[v3,14/15] common/idpf: add vec queue setup

Message ID 20230117072626.93796-19-beilei.xing@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series net/idpf: introduce idpf common modle |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Xing, Beilei Jan. 17, 2023, 7:26 a.m. UTC
  From: Beilei Xing <beilei.xing@intel.com>

Move vector queue setup for single queue model to common module.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/common/idpf/idpf_common_rxtx.c | 57 ++++++++++++++++++++++++++
 drivers/common/idpf/idpf_common_rxtx.h |  2 +
 drivers/common/idpf/version.map        |  1 +
 drivers/net/idpf/idpf_rxtx.c           | 57 --------------------------
 drivers/net/idpf/idpf_rxtx.h           |  1 -
 5 files changed, 60 insertions(+), 58 deletions(-)
  

Patch

diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index 459057f20e..bc95fef6bc 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -1399,3 +1399,60 @@  idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	return i;
 }
+
+static void __rte_cold
+release_rxq_mbufs_vec(struct idpf_rx_queue *rxq)
+{
+	const uint16_t mask = rxq->nb_rx_desc - 1;
+	uint16_t i;
+
+	if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+		return;
+
+	/* free all mbufs that are valid in the ring */
+	if (rxq->rxrearm_nb == 0) {
+		for (i = 0; i < rxq->nb_rx_desc; i++) {
+			if (rxq->sw_ring[i] != NULL)
+				rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+		}
+	} else {
+		for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask) {
+			if (rxq->sw_ring[i] != NULL)
+				rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+		}
+	}
+
+	rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+	/* set all entries to NULL */
+	memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+static const struct idpf_rxq_ops def_singleq_rx_ops_vec = {
+	.release_mbufs = release_rxq_mbufs_vec,
+};
+
+static inline int
+idpf_singleq_rx_vec_setup_default(struct idpf_rx_queue *rxq)
+{
+	uintptr_t p;
+	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+	mb_def.nb_segs = 1;
+	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+	mb_def.port = rxq->port_id;
+	rte_mbuf_refcnt_set(&mb_def, 1);
+
+	/* prevent compiler reordering: rearm_data covers previous fields */
+	rte_compiler_barrier();
+	p = (uintptr_t)&mb_def.rearm_data;
+	rxq->mbuf_initializer = *(uint64_t *)p;
+	return 0;
+}
+
+int __rte_cold
+idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq)
+{
+	rxq->ops = &def_singleq_rx_ops_vec;
+	return idpf_singleq_rx_vec_setup_default(rxq);
+}
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index 827f791505..74d6081638 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -252,5 +252,7 @@  uint16_t idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 __rte_internal
 uint16_t idpf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts);
+__rte_internal
+int idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq);
 
 #endif /* _IDPF_COMMON_RXTX_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 244c74c209..0f3f4aa758 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -32,6 +32,7 @@  INTERNAL {
 	idpf_reset_split_tx_descq;
 	idpf_rx_queue_release;
 	idpf_singleq_recv_pkts;
+	idpf_singleq_rx_vec_setup;
 	idpf_singleq_xmit_pkts;
 	idpf_splitq_recv_pkts;
 	idpf_splitq_xmit_pkts;
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 74bf207c05..6155531e69 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -743,63 +743,6 @@  idpf_stop_queues(struct rte_eth_dev *dev)
 	}
 }
 
-static void __rte_cold
-release_rxq_mbufs_vec(struct idpf_rx_queue *rxq)
-{
-	const uint16_t mask = rxq->nb_rx_desc - 1;
-	uint16_t i;
-
-	if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
-		return;
-
-	/* free all mbufs that are valid in the ring */
-	if (rxq->rxrearm_nb == 0) {
-		for (i = 0; i < rxq->nb_rx_desc; i++) {
-			if (rxq->sw_ring[i] != NULL)
-				rte_pktmbuf_free_seg(rxq->sw_ring[i]);
-		}
-	} else {
-		for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask) {
-			if (rxq->sw_ring[i] != NULL)
-				rte_pktmbuf_free_seg(rxq->sw_ring[i]);
-		}
-	}
-
-	rxq->rxrearm_nb = rxq->nb_rx_desc;
-
-	/* set all entries to NULL */
-	memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
-}
-
-static const struct idpf_rxq_ops def_singleq_rx_ops_vec = {
-	.release_mbufs = release_rxq_mbufs_vec,
-};
-
-static inline int
-idpf_singleq_rx_vec_setup_default(struct idpf_rx_queue *rxq)
-{
-	uintptr_t p;
-	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
-
-	mb_def.nb_segs = 1;
-	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
-	mb_def.port = rxq->port_id;
-	rte_mbuf_refcnt_set(&mb_def, 1);
-
-	/* prevent compiler reordering: rearm_data covers previous fields */
-	rte_compiler_barrier();
-	p = (uintptr_t)&mb_def.rearm_data;
-	rxq->mbuf_initializer = *(uint64_t *)p;
-	return 0;
-}
-
-int __rte_cold
-idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq)
-{
-	rxq->ops = &def_singleq_rx_ops_vec;
-	return idpf_singleq_rx_vec_setup_default(rxq);
-}
-
 void
 idpf_set_rx_function(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index eab363c3e7..a985dc2cf5 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -44,7 +44,6 @@  void idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 			uint16_t nb_desc, unsigned int socket_id,
 			const struct rte_eth_txconf *tx_conf);
-int idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq);
 int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);