[v7,14/19] common/idpf: add vec queue setup
Checks
Commit Message
From: Beilei Xing <beilei.xing@intel.com>
Move vector queue setup for single queue model to common module.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_rxtx.c | 57 ++++++++++++++++++++++++++
drivers/common/idpf/idpf_common_rxtx.h | 2 +
drivers/common/idpf/version.map | 1 +
drivers/net/idpf/idpf_rxtx.c | 57 --------------------------
drivers/net/idpf/idpf_rxtx.h | 1 -
5 files changed, 60 insertions(+), 58 deletions(-)
@@ -1399,3 +1399,60 @@ idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
return i;
}
+
+static void __rte_cold
+release_rxq_mbufs_vec(struct idpf_rx_queue *rxq)
+{
+ const uint16_t mask = rxq->nb_rx_desc - 1;
+ uint16_t i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ if (rxq->rxrearm_nb == 0) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i] != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ }
+ } else {
+ for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask) {
+ if (rxq->sw_ring[i] != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ }
+ }
+
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+static const struct idpf_rxq_ops def_singleq_rx_ops_vec = {
+ .release_mbufs = release_rxq_mbufs_vec,
+};
+
+static inline int
+idpf_singleq_rx_vec_setup_default(struct idpf_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+int __rte_cold
+idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq)
+{
+ rxq->ops = &def_singleq_rx_ops_vec;
+ return idpf_singleq_rx_vec_setup_default(rxq);
+}
@@ -252,5 +252,7 @@ uint16_t idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
__rte_internal
uint16_t idpf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+__rte_internal
+int idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq);
#endif /* _IDPF_COMMON_RXTX_H_ */
@@ -25,6 +25,7 @@ INTERNAL {
idpf_reset_split_tx_descq;
idpf_rx_queue_release;
idpf_singleq_recv_pkts;
+ idpf_singleq_rx_vec_setup;
idpf_singleq_xmit_pkts;
idpf_splitq_recv_pkts;
idpf_splitq_xmit_pkts;
@@ -743,63 +743,6 @@ idpf_stop_queues(struct rte_eth_dev *dev)
}
}
-static void __rte_cold
-release_rxq_mbufs_vec(struct idpf_rx_queue *rxq)
-{
- const uint16_t mask = rxq->nb_rx_desc - 1;
- uint16_t i;
-
- if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
- return;
-
- /* free all mbufs that are valid in the ring */
- if (rxq->rxrearm_nb == 0) {
- for (i = 0; i < rxq->nb_rx_desc; i++) {
- if (rxq->sw_ring[i] != NULL)
- rte_pktmbuf_free_seg(rxq->sw_ring[i]);
- }
- } else {
- for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask) {
- if (rxq->sw_ring[i] != NULL)
- rte_pktmbuf_free_seg(rxq->sw_ring[i]);
- }
- }
-
- rxq->rxrearm_nb = rxq->nb_rx_desc;
-
- /* set all entries to NULL */
- memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
-}
-
-static const struct idpf_rxq_ops def_singleq_rx_ops_vec = {
- .release_mbufs = release_rxq_mbufs_vec,
-};
-
-static inline int
-idpf_singleq_rx_vec_setup_default(struct idpf_rx_queue *rxq)
-{
- uintptr_t p;
- struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
-
- mb_def.nb_segs = 1;
- mb_def.data_off = RTE_PKTMBUF_HEADROOM;
- mb_def.port = rxq->port_id;
- rte_mbuf_refcnt_set(&mb_def, 1);
-
- /* prevent compiler reordering: rearm_data covers previous fields */
- rte_compiler_barrier();
- p = (uintptr_t)&mb_def.rearm_data;
- rxq->mbuf_initializer = *(uint64_t *)p;
- return 0;
-}
-
-int __rte_cold
-idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq)
-{
- rxq->ops = &def_singleq_rx_ops_vec;
- return idpf_singleq_rx_vec_setup_default(rxq);
-}
-
void
idpf_set_rx_function(struct rte_eth_dev *dev)
{
@@ -44,7 +44,6 @@ void idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-int idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq);
int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);