[v3,12/23] net/cnxk: handling representee notification

Message ID 20240201130754.194352-13-hkalra@marvell.com (mailing list archive)
State Changes Requested, archived
Delegated to: Jerin Jacob
Headers
Series net/cnxk: support for port representors |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Harman Kalra Feb. 1, 2024, 1:07 p.m. UTC
  In case of any representee coming up or going down, kernel sends a
mbox up call which signals a thread to process these messages and
enable/disable HW resources accordingly.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/net/cnxk/cnxk_eswitch.c |   8 +
 drivers/net/cnxk/cnxk_eswitch.h |  20 +++
 drivers/net/cnxk/cnxk_rep.c     | 263 ++++++++++++++++++++++++++++++++
 drivers/net/cnxk/cnxk_rep.h     |  36 +++++
 4 files changed, 327 insertions(+)
  

Patch

diff --git a/drivers/net/cnxk/cnxk_eswitch.c b/drivers/net/cnxk/cnxk_eswitch.c
index ad6834410d..79f44de06e 100644
--- a/drivers/net/cnxk/cnxk_eswitch.c
+++ b/drivers/net/cnxk/cnxk_eswitch.c
@@ -139,6 +139,14 @@  cnxk_eswitch_dev_remove(struct rte_pci_device *pci_dev)
 				close(sock_fd);
 		}
 
+		if (eswitch_dev->repte_msg_proc.start_thread) {
+			eswitch_dev->repte_msg_proc.start_thread = false;
+			pthread_cond_signal(&eswitch_dev->repte_msg_proc.repte_msg_cond);
+			rte_thread_join(eswitch_dev->repte_msg_proc.repte_msg_thread, NULL);
+			pthread_mutex_destroy(&eswitch_dev->repte_msg_proc.mutex);
+			pthread_cond_destroy(&eswitch_dev->repte_msg_proc.repte_msg_cond);
+		}
+
 		/* Remove representor devices associated with PF */
 		cnxk_rep_dev_remove(eswitch_dev);
 	}
diff --git a/drivers/net/cnxk/cnxk_eswitch.h b/drivers/net/cnxk/cnxk_eswitch.h
index ecf10a8e08..1baf198d72 100644
--- a/drivers/net/cnxk/cnxk_eswitch.h
+++ b/drivers/net/cnxk/cnxk_eswitch.h
@@ -30,6 +30,23 @@  enum cnxk_esw_da_pattern_type {
 	CNXK_ESW_DA_TYPE_PFVF,
 };
 
+struct cnxk_esw_repte_msg {
+	uint16_t hw_func;
+	bool enable;
+
+	TAILQ_ENTRY(cnxk_esw_repte_msg) next;
+};
+
+struct cnxk_esw_repte_msg_proc {
+	bool start_thread;
+	uint8_t msg_avail;
+	rte_thread_t repte_msg_thread;
+	pthread_cond_t repte_msg_cond;
+	pthread_mutex_t mutex;
+
+	TAILQ_HEAD(esw_repte_msg_list, cnxk_esw_repte_msg) msg_list;
+};
+
 struct cnxk_esw_repr_hw_info {
 	/* Representee pcifunc value */
 	uint16_t hw_func;
@@ -139,6 +156,9 @@  struct cnxk_eswitch_dev {
 	bool client_connected;
 	int sock_fd;
 
+	/* Representee notification */
+	struct cnxk_esw_repte_msg_proc repte_msg_proc;
+
 	/* Port representor fields */
 	rte_spinlock_t rep_lock;
 	uint16_t nb_switch_domain;
diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c
index 5b619ebb9e..11901dac87 100644
--- a/drivers/net/cnxk/cnxk_rep.c
+++ b/drivers/net/cnxk/cnxk_rep.c
@@ -4,6 +4,8 @@ 
 #include <cnxk_rep.h>
 #include <cnxk_rep_msg.h>
 
+#define REPTE_MSG_PROC_THRD_NAME_MAX_LEN 30
+
 #define PF_SHIFT 10
 #define PF_MASK	 0x3F
 
@@ -86,6 +88,7 @@  cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev)
 {
 	int i, rc = 0;
 
+	roc_eswitch_nix_process_repte_notify_cb_unregister(&eswitch_dev->nix);
 	for (i = 0; i < eswitch_dev->nb_switch_domain; i++) {
 		rc = rte_eth_switch_domain_free(eswitch_dev->sw_dom[i].switch_domain_id);
 		if (rc)
@@ -95,6 +98,236 @@  cnxk_rep_dev_remove(struct cnxk_eswitch_dev *eswitch_dev)
 	return rc;
 }
 
+static int
+cnxk_representee_release(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func)
+{
+	struct cnxk_rep_dev *rep_dev = NULL;
+	struct rte_eth_dev *rep_eth_dev;
+	int i, rc = 0;
+
+	for (i = 0; i < eswitch_dev->repr_cnt.nb_repr_probed; i++) {
+		rep_eth_dev = eswitch_dev->rep_info[i].rep_eth_dev;
+		if (!rep_eth_dev) {
+			plt_err("Failed to get rep ethdev handle");
+			rc = -EINVAL;
+			goto done;
+		}
+
+		rep_dev = cnxk_rep_pmd_priv(rep_eth_dev);
+		if (rep_dev->hw_func == hw_func &&
+		    (!rep_dev->native_repte || rep_dev->is_vf_active)) {
+			rep_dev->is_vf_active = false;
+			rc = cnxk_rep_dev_stop(rep_eth_dev);
+			if (rc) {
+				plt_err("Failed to stop repr port %d, rep id %d", rep_dev->port_id,
+					rep_dev->rep_id);
+				goto done;
+			}
+
+			cnxk_rep_rx_queue_release(rep_eth_dev, 0);
+			cnxk_rep_tx_queue_release(rep_eth_dev, 0);
+			plt_rep_dbg("Released representor ID %d representing %x", rep_dev->rep_id,
+				    hw_func);
+			break;
+		}
+	}
+done:
+	return rc;
+}
+
+static int
+cnxk_representee_setup(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, uint16_t rep_id)
+{
+	struct cnxk_rep_dev *rep_dev = NULL;
+	struct rte_eth_dev *rep_eth_dev;
+	int i, rc = 0;
+
+	for (i = 0; i < eswitch_dev->repr_cnt.nb_repr_probed; i++) {
+		rep_eth_dev = eswitch_dev->rep_info[i].rep_eth_dev;
+		if (!rep_eth_dev) {
+			plt_err("Failed to get rep ethdev handle");
+			rc = -EINVAL;
+			goto done;
+		}
+
+		rep_dev = cnxk_rep_pmd_priv(rep_eth_dev);
+		if (rep_dev->hw_func == hw_func && !rep_dev->is_vf_active) {
+			rep_dev->is_vf_active = true;
+			rep_dev->native_repte = true;
+			if (rep_dev->rep_id != rep_id) {
+				plt_err("Rep ID assigned during init %d does not match %d",
+					rep_dev->rep_id, rep_id);
+				rc = -EINVAL;
+				goto done;
+			}
+
+			rc = cnxk_rep_rx_queue_setup(rep_eth_dev, rep_dev->rxq->qid,
+						     rep_dev->rxq->nb_desc, 0,
+						     rep_dev->rxq->rx_conf, rep_dev->rxq->mpool);
+			if (rc) {
+				plt_err("Failed to setup rxq repr port %d, rep id %d",
+					rep_dev->port_id, rep_dev->rep_id);
+				goto done;
+			}
+
+			rc = cnxk_rep_tx_queue_setup(rep_eth_dev, rep_dev->txq->qid,
+						     rep_dev->txq->nb_desc, 0,
+						     rep_dev->txq->tx_conf);
+			if (rc) {
+				plt_err("Failed to setup txq repr port %d, rep id %d",
+					rep_dev->port_id, rep_dev->rep_id);
+				goto done;
+			}
+
+			rc = cnxk_rep_dev_start(rep_eth_dev);
+			if (rc) {
+				plt_err("Failed to start repr port %d, rep id %d", rep_dev->port_id,
+					rep_dev->rep_id);
+				goto done;
+			}
+			break;
+		}
+	}
+done:
+	return rc;
+}
+
+static int
+cnxk_representee_msg_process(struct cnxk_eswitch_dev *eswitch_dev, uint16_t hw_func, bool enable)
+{
+	struct cnxk_eswitch_devargs *esw_da;
+	uint16_t rep_id = UINT16_MAX;
+	int rc = 0, i, j;
+
+	/* Traversing the initialized represented list */
+	for (i = 0; i < eswitch_dev->nb_esw_da; i++) {
+		esw_da = &eswitch_dev->esw_da[i];
+		for (j = 0; j < esw_da->nb_repr_ports; j++) {
+			if (esw_da->repr_hw_info[j].hw_func == hw_func) {
+				rep_id = esw_da->repr_hw_info[j].rep_id;
+				break;
+			}
+		}
+		if (rep_id != UINT16_MAX)
+			break;
+	}
+	/* No action on PF func for which representor has not been created */
+	if (rep_id == UINT16_MAX)
+		goto done;
+
+	if (enable) {
+		rc = cnxk_representee_setup(eswitch_dev, hw_func, rep_id);
+		if (rc) {
+			plt_err("Failed to setup representee, err %d", rc);
+			goto fail;
+		}
+		plt_rep_dbg("		Representor ID %d representing %x", rep_id, hw_func);
+		rc = cnxk_eswitch_flow_rules_install(eswitch_dev, hw_func);
+		if (rc) {
+			plt_err("Failed to install rxtx flow rules for %x", hw_func);
+			goto fail;
+		}
+	} else {
+		rc = cnxk_eswitch_flow_rules_delete(eswitch_dev, hw_func);
+		if (rc) {
+			plt_err("Failed to delete flow rules for %x", hw_func);
+			goto fail;
+		}
+		rc = cnxk_representee_release(eswitch_dev, hw_func);
+		if (rc) {
+			plt_err("Failed to release representee, err %d", rc);
+			goto fail;
+		}
+	}
+
+done:
+	return 0;
+fail:
+	return rc;
+}
+
+static uint32_t
+cnxk_representee_msg_thread_main(void *arg)
+{
+	struct cnxk_eswitch_dev *eswitch_dev = (struct cnxk_eswitch_dev *)arg;
+	struct cnxk_esw_repte_msg_proc *repte_msg_proc;
+	struct cnxk_esw_repte_msg *msg, *next_msg;
+	int count, rc;
+
+	repte_msg_proc = &eswitch_dev->repte_msg_proc;
+	pthread_mutex_lock(&eswitch_dev->repte_msg_proc.mutex);
+	while (eswitch_dev->repte_msg_proc.start_thread) {
+		do {
+			rc = pthread_cond_wait(&eswitch_dev->repte_msg_proc.repte_msg_cond,
+					       &eswitch_dev->repte_msg_proc.mutex);
+		} while (rc != 0);
+
+		/* Go through list pushed from interrupt context and process each message */
+		next_msg = TAILQ_FIRST(&repte_msg_proc->msg_list);
+		count = 0;
+		while (next_msg) {
+			msg = next_msg;
+			count++;
+			plt_rep_dbg("	Processing msg %d: hw_func %x action %s", count,
+				    msg->hw_func, msg->enable ? "enable" : "disable");
+
+			/* Unlocking for interrupt thread to grab lock
+			 * while thread process the message.
+			 */
+			pthread_mutex_unlock(&eswitch_dev->repte_msg_proc.mutex);
+			/* Processing the message */
+			cnxk_representee_msg_process(eswitch_dev, msg->hw_func, msg->enable);
+			/* Locking as cond wait will unlock before wait */
+			pthread_mutex_lock(&eswitch_dev->repte_msg_proc.mutex);
+			next_msg = TAILQ_NEXT(msg, next);
+			TAILQ_REMOVE(&repte_msg_proc->msg_list, msg, next);
+			rte_free(msg);
+		}
+	}
+
+	pthread_mutex_unlock(&eswitch_dev->repte_msg_proc.mutex);
+
+	return 0;
+}
+
+static int
+cnxk_representee_notification(void *roc_nix, uint16_t hw_func, bool enable)
+{
+	struct cnxk_esw_repte_msg_proc *repte_msg_proc;
+	struct cnxk_eswitch_dev *eswitch_dev;
+	struct cnxk_esw_repte_msg *msg;
+	int rc = 0;
+
+	RTE_SET_USED(roc_nix);
+	eswitch_dev = cnxk_eswitch_pmd_priv();
+	if (!eswitch_dev) {
+		plt_err("Failed to get PF ethdev handle");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	repte_msg_proc = &eswitch_dev->repte_msg_proc;
+	msg = rte_zmalloc("msg", sizeof(struct cnxk_esw_repte_msg), 0);
+	if (!msg) {
+		plt_err("Failed to allocate memory for repte msg");
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	msg->hw_func = hw_func;
+	msg->enable = enable;
+
+	plt_rep_dbg("Pushing new notification : hw_func %x enable %d\n", msg->hw_func, enable);
+	pthread_mutex_lock(&eswitch_dev->repte_msg_proc.mutex);
+	TAILQ_INSERT_TAIL(&repte_msg_proc->msg_list, msg, next);
+	/* Signal vf message handler thread */
+	pthread_cond_signal(&eswitch_dev->repte_msg_proc.repte_msg_cond);
+	pthread_mutex_unlock(&eswitch_dev->repte_msg_proc.mutex);
+
+done:
+	return rc;
+}
+
 static int
 cnxk_rep_parent_setup(struct cnxk_eswitch_dev *eswitch_dev)
 {
@@ -263,6 +496,7 @@  create_representor_ethdev(struct rte_pci_device *pci_dev, struct cnxk_eswitch_de
 int
 cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswitch_dev)
 {
+	char name[REPTE_MSG_PROC_THRD_NAME_MAX_LEN];
 	struct cnxk_eswitch_devargs *esw_da;
 	uint16_t num_rep;
 	int i, j, rc;
@@ -302,7 +536,36 @@  cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct cnxk_eswitch_dev *eswi
 		}
 	}
 
+	if (!eswitch_dev->repte_msg_proc.start_thread) {
+		/* Register callback for representee notification */
+		if (roc_eswitch_nix_process_repte_notify_cb_register(&eswitch_dev->nix,
+							     cnxk_representee_notification)) {
+			plt_err("Failed to register callback for representee notification");
+			rc = -EINVAL;
+			goto fail;
+		}
+
+		/* Create a thread for handling msgs from VFs */
+		TAILQ_INIT(&eswitch_dev->repte_msg_proc.msg_list);
+		pthread_cond_init(&eswitch_dev->repte_msg_proc.repte_msg_cond, NULL);
+		pthread_mutex_init(&eswitch_dev->repte_msg_proc.mutex, NULL);
+
+		rte_strscpy(name, "repte_msg_proc_thrd", REPTE_MSG_PROC_THRD_NAME_MAX_LEN);
+		eswitch_dev->repte_msg_proc.start_thread = true;
+		rc =
+		rte_thread_create_internal_control(&eswitch_dev->repte_msg_proc.repte_msg_thread,
+						   name, cnxk_representee_msg_thread_main,
+						   eswitch_dev);
+		if (rc != 0) {
+			plt_err("Failed to create thread for VF mbox handling\n");
+			goto thread_fail;
+		}
+	}
+
 	return 0;
+thread_fail:
+	pthread_mutex_destroy(&eswitch_dev->repte_msg_proc.mutex);
+	pthread_cond_destroy(&eswitch_dev->repte_msg_proc.repte_msg_cond);
 fail:
 	return rc;
 }
diff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h
index da298823a7..bee141e25b 100644
--- a/drivers/net/cnxk/cnxk_rep.h
+++ b/drivers/net/cnxk/cnxk_rep.h
@@ -10,6 +10,40 @@ 
 /* Common ethdev ops */
 extern struct eth_dev_ops cnxk_rep_dev_ops;
 
+struct cnxk_rep_queue_stats {
+	uint64_t pkts;
+	uint64_t bytes;
+};
+
+struct cnxk_rep_rxq {
+	/* Parent rep device */
+	struct cnxk_rep_dev *rep_dev;
+	/* Queue ID */
+	uint16_t qid;
+	/* No of desc */
+	uint16_t nb_desc;
+	/* mempool handle */
+	struct rte_mempool *mpool;
+	/* RX config parameters */
+	const struct rte_eth_rxconf *rx_conf;
+	/* Per queue TX statistics */
+	struct cnxk_rep_queue_stats stats;
+};
+
+struct cnxk_rep_txq {
+	/* Parent rep device */
+	struct cnxk_rep_dev *rep_dev;
+	/* Queue ID */
+	uint16_t qid;
+	/* No of desc */
+	uint16_t nb_desc;
+	/* TX config parameters */
+	const struct rte_eth_txconf *tx_conf;
+	/* Per queue TX statistics */
+	struct cnxk_rep_queue_stats stats;
+};
+
+/* Representor port configurations */
 struct cnxk_rep_dev {
 	uint16_t port_id;
 	uint16_t rep_id;
@@ -18,6 +52,8 @@  struct cnxk_rep_dev {
 	uint16_t hw_func;
 	bool is_vf_active;
 	bool native_repte;
+	struct cnxk_rep_rxq *rxq;
+	struct cnxk_rep_txq *txq;
 	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
 };