[v17,04/18] net/idpf: add Rx queue setup

Message ID 20221031051556.98549-5-beilei.xing@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Andrew Rybchenko
Headers
Series add support for idpf PMD in DPDK |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Xing, Beilei Oct. 31, 2022, 5:15 a.m. UTC
  From: Junfeng Guo <junfeng.guo@intel.com>

Add support for rx_queue_setup ops.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/net/idpf/idpf_ethdev.c |  11 +
 drivers/net/idpf/idpf_rxtx.c   | 400 +++++++++++++++++++++++++++++++++
 drivers/net/idpf/idpf_rxtx.h   |  46 ++++
 3 files changed, 457 insertions(+)
  

Patch

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 54f20d30ca..fb5cd1b111 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -48,12 +48,22 @@  idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.tx_rs_thresh = IDPF_DEFAULT_TX_RS_THRESH,
 	};
 
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_free_thresh = IDPF_DEFAULT_RX_FREE_THRESH,
+	};
+
 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
 		.nb_max = IDPF_MAX_RING_DESC,
 		.nb_min = IDPF_MIN_RING_DESC,
 		.nb_align = IDPF_ALIGN_RING_DESC,
 	};
 
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = IDPF_MAX_RING_DESC,
+		.nb_min = IDPF_MIN_RING_DESC,
+		.nb_align = IDPF_ALIGN_RING_DESC,
+	};
+
 	return 0;
 }
 
@@ -643,6 +653,7 @@  idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
 static const struct eth_dev_ops idpf_eth_dev_ops = {
 	.dev_configure			= idpf_dev_configure,
 	.dev_close			= idpf_dev_close,
+	.rx_queue_setup			= idpf_rx_queue_setup,
 	.tx_queue_setup			= idpf_tx_queue_setup,
 	.dev_infos_get			= idpf_dev_info_get,
 };
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 4afa0a2560..25dd5d85d5 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -8,6 +8,21 @@ 
 #include "idpf_ethdev.h"
 #include "idpf_rxtx.h"
 
+static int
+check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
+{
+	/* The following constraints must be satisfied:
+	 *   thresh < rxq->nb_rx_desc
+	 */
+	if (thresh >= nb_desc) {
+		PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
+			     thresh, nb_desc);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int
 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
 		uint16_t tx_free_thresh)
@@ -56,6 +71,87 @@  check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
 	return 0;
 }
 
+static void
+reset_split_rx_descq(struct idpf_rx_queue *rxq)
+{
+	uint16_t len;
+	uint32_t i;
+
+	if (rxq == NULL)
+		return;
+
+	len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+	for (i = 0; i < len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3);
+	     i++)
+		((volatile char *)rxq->rx_ring)[i] = 0;
+
+	rxq->rx_tail = 0;
+	rxq->expected_gen_id = 1;
+}
+
+static void
+reset_split_rx_bufq(struct idpf_rx_queue *rxq)
+{
+	uint16_t len;
+	uint32_t i;
+
+	if (rxq == NULL)
+		return;
+
+	len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+	for (i = 0; i < len * sizeof(struct virtchnl2_splitq_rx_buf_desc);
+	     i++)
+		((volatile char *)rxq->rx_ring)[i] = 0;
+
+	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+	for (i = 0; i < IDPF_RX_MAX_BURST; i++)
+		rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+	/* The next descriptor id which can be received. */
+	rxq->rx_next_avail = 0;
+
+	/* The next descriptor id which can be refilled. */
+	rxq->rx_tail = 0;
+	/* The number of descriptors which can be refilled. */
+	rxq->nb_rx_hold = rxq->nb_rx_desc - 1;
+
+	rxq->bufq1 = NULL;
+	rxq->bufq2 = NULL;
+}
+
+static void
+reset_single_rx_queue(struct idpf_rx_queue *rxq)
+{
+	uint16_t len;
+	uint32_t i;
+
+	if (rxq == NULL)
+		return;
+
+	len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+	for (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);
+	     i++)
+		((volatile char *)rxq->rx_ring)[i] = 0;
+
+	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+	for (i = 0; i < IDPF_RX_MAX_BURST; i++)
+		rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+	rxq->rx_tail = 0;
+	rxq->nb_rx_hold = 0;
+
+	if (rxq->pkt_first_seg != NULL)
+		rte_pktmbuf_free(rxq->pkt_first_seg);
+
+	rxq->pkt_first_seg = NULL;
+	rxq->pkt_last_seg = NULL;
+}
+
 static void
 reset_split_tx_descq(struct idpf_tx_queue *txq)
 {
@@ -145,6 +241,310 @@  reset_single_tx_queue(struct idpf_tx_queue *txq)
 	txq->next_rs = txq->rs_thresh - 1;
 }
 
+static int
+idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+			 uint16_t queue_idx, uint16_t rx_free_thresh,
+			 uint16_t nb_desc, unsigned int socket_id,
+			 struct rte_mempool *mp)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *adapter = vport->adapter;
+	struct idpf_hw *hw = &adapter->hw;
+	const struct rte_memzone *mz;
+	uint32_t ring_size;
+	uint16_t len;
+
+	bufq->mp = mp;
+	bufq->nb_rx_desc = nb_desc;
+	bufq->rx_free_thresh = rx_free_thresh;
+	bufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx;
+	bufq->port_id = dev->data->port_id;
+	bufq->rx_hdr_len = 0;
+	bufq->adapter = adapter;
+
+	len = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM;
+	bufq->rx_buf_len = len;
+
+	/* Allocate the software ring. */
+	len = nb_desc + IDPF_RX_MAX_BURST;
+	bufq->sw_ring =
+		rte_zmalloc_socket("idpf rx bufq sw ring",
+				   sizeof(struct rte_mbuf *) * len,
+				   RTE_CACHE_LINE_SIZE,
+				   socket_id);
+	if (bufq->sw_ring == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+		return -ENOMEM;
+	}
+
+	/* Allocate a liitle more to support bulk allocate. */
+	len = nb_desc + IDPF_RX_MAX_BURST;
+	ring_size = RTE_ALIGN(len *
+			      sizeof(struct virtchnl2_splitq_rx_buf_desc),
+			      IDPF_DMA_MEM_ALIGN);
+	mz = rte_eth_dma_zone_reserve(dev, "rx_buf_ring", queue_idx,
+				      ring_size, IDPF_RING_BASE_ALIGN,
+				      socket_id);
+	if (mz == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX buffer queue.");
+		rte_free(bufq->sw_ring);
+		return -ENOMEM;
+	}
+
+	/* Zero all the descriptors in the ring. */
+	memset(mz->addr, 0, ring_size);
+	bufq->rx_ring_phys_addr = mz->iova;
+	bufq->rx_ring = mz->addr;
+
+	bufq->mz = mz;
+	reset_split_rx_bufq(bufq);
+	bufq->q_set = true;
+	bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +
+			 queue_idx * vport->chunks_info.rx_buf_qtail_spacing);
+
+	/* TODO: allow bulk or vec */
+
+	return 0;
+}
+
+static int
+idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			  uint16_t nb_desc, unsigned int socket_id,
+			  const struct rte_eth_rxconf *rx_conf,
+			  struct rte_mempool *mp)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *adapter = vport->adapter;
+	struct idpf_rx_queue *bufq1, *bufq2;
+	const struct rte_memzone *mz;
+	struct idpf_rx_queue *rxq;
+	uint16_t rx_free_thresh;
+	uint32_t ring_size;
+	uint64_t offloads;
+	uint16_t qid;
+	uint16_t len;
+	int ret;
+
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+	/* Check free threshold */
+	rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
+		IDPF_DEFAULT_RX_FREE_THRESH :
+		rx_conf->rx_free_thresh;
+	if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
+		return -EINVAL;
+
+	/* Setup Rx description queue */
+	rxq = rte_zmalloc_socket("idpf rxq",
+				 sizeof(struct idpf_rx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 socket_id);
+	if (rxq == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+		return -ENOMEM;
+	}
+
+	rxq->mp = mp;
+	rxq->nb_rx_desc = nb_desc;
+	rxq->rx_free_thresh = rx_free_thresh;
+	rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
+	rxq->port_id = dev->data->port_id;
+	rxq->rx_hdr_len = 0;
+	rxq->adapter = adapter;
+	rxq->offloads = offloads;
+
+	len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+	rxq->rx_buf_len = len;
+
+	len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+	ring_size = RTE_ALIGN(len *
+			      sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3),
+			      IDPF_DMA_MEM_ALIGN);
+	mz = rte_eth_dma_zone_reserve(dev, "rx_cpmpl_ring", queue_idx,
+				      ring_size, IDPF_RING_BASE_ALIGN,
+				      socket_id);
+
+	if (mz == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
+		ret = -ENOMEM;
+		goto free_rxq;
+	}
+
+	/* Zero all the descriptors in the ring. */
+	memset(mz->addr, 0, ring_size);
+	rxq->rx_ring_phys_addr = mz->iova;
+	rxq->rx_ring = mz->addr;
+
+	rxq->mz = mz;
+	reset_split_rx_descq(rxq);
+
+	/* TODO: allow bulk or vec */
+
+	/* setup Rx buffer queue */
+	bufq1 = rte_zmalloc_socket("idpf bufq1",
+				   sizeof(struct idpf_rx_queue),
+				   RTE_CACHE_LINE_SIZE,
+				   socket_id);
+	if (bufq1 == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer queue 1.");
+		ret = -ENOMEM;
+		goto free_mz;
+	}
+	qid = 2 * queue_idx;
+	ret = idpf_rx_split_bufq_setup(dev, bufq1, qid, rx_free_thresh,
+				       nb_desc, socket_id, mp);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to setup buffer queue 1");
+		ret = -EINVAL;
+		goto free_bufq1;
+	}
+	rxq->bufq1 = bufq1;
+
+	bufq2 = rte_zmalloc_socket("idpf bufq2",
+				   sizeof(struct idpf_rx_queue),
+				   RTE_CACHE_LINE_SIZE,
+				   socket_id);
+	if (bufq2 == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer queue 2.");
+		rte_free(bufq1->sw_ring);
+		rte_memzone_free(bufq1->mz);
+		ret = -ENOMEM;
+		goto free_bufq1;
+	}
+	qid = 2 * queue_idx + 1;
+	ret = idpf_rx_split_bufq_setup(dev, bufq2, qid, rx_free_thresh,
+				       nb_desc, socket_id, mp);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Failed to setup buffer queue 2");
+		rte_free(bufq1->sw_ring);
+		rte_memzone_free(bufq1->mz);
+		ret = -EINVAL;
+		goto free_bufq2;
+	}
+	rxq->bufq2 = bufq2;
+
+	rxq->q_set = true;
+	dev->data->rx_queues[queue_idx] = rxq;
+
+	return 0;
+
+free_bufq2:
+	rte_free(bufq2);
+free_bufq1:
+	rte_free(bufq1);
+free_mz:
+	rte_memzone_free(mz);
+free_rxq:
+	rte_free(rxq);
+
+	return ret;
+}
+
+static int
+idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			   uint16_t nb_desc, unsigned int socket_id,
+			   const struct rte_eth_rxconf *rx_conf,
+			   struct rte_mempool *mp)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+	struct idpf_adapter *adapter = vport->adapter;
+	struct idpf_hw *hw = &adapter->hw;
+	const struct rte_memzone *mz;
+	struct idpf_rx_queue *rxq;
+	uint16_t rx_free_thresh;
+	uint32_t ring_size;
+	uint64_t offloads;
+	uint16_t len;
+
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+	/* Check free threshold */
+	rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
+		IDPF_DEFAULT_RX_FREE_THRESH :
+		rx_conf->rx_free_thresh;
+	if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
+		return -EINVAL;
+
+	/* Setup Rx description queue */
+	rxq = rte_zmalloc_socket("idpf rxq",
+				 sizeof(struct idpf_rx_queue),
+				 RTE_CACHE_LINE_SIZE,
+				 socket_id);
+	if (rxq == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+		return -ENOMEM;
+	}
+
+	rxq->mp = mp;
+	rxq->nb_rx_desc = nb_desc;
+	rxq->rx_free_thresh = rx_free_thresh;
+	rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
+	rxq->port_id = dev->data->port_id;
+	rxq->rx_hdr_len = 0;
+	rxq->adapter = adapter;
+	rxq->offloads = offloads;
+
+	len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+	rxq->rx_buf_len = len;
+
+	len = nb_desc + IDPF_RX_MAX_BURST;
+	rxq->sw_ring =
+		rte_zmalloc_socket("idpf rxq sw ring",
+				   sizeof(struct rte_mbuf *) * len,
+				   RTE_CACHE_LINE_SIZE,
+				   socket_id);
+	if (rxq->sw_ring == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+		rte_free(rxq);
+		return -ENOMEM;
+	}
+
+	/* Allocate a liitle more to support bulk allocate. */
+	len = nb_desc + IDPF_RX_MAX_BURST;
+	ring_size = RTE_ALIGN(len *
+			      sizeof(struct virtchnl2_singleq_rx_buf_desc),
+			      IDPF_DMA_MEM_ALIGN);
+	mz = rte_eth_dma_zone_reserve(dev, "rx ring", queue_idx,
+				      ring_size, IDPF_RING_BASE_ALIGN,
+				      socket_id);
+	if (mz == NULL) {
+		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX buffer queue.");
+		rte_free(rxq->sw_ring);
+		rte_free(rxq);
+		return -ENOMEM;
+	}
+
+	/* Zero all the descriptors in the ring. */
+	memset(mz->addr, 0, ring_size);
+	rxq->rx_ring_phys_addr = mz->iova;
+	rxq->rx_ring = mz->addr;
+
+	rxq->mz = mz;
+	reset_single_rx_queue(rxq);
+	rxq->q_set = true;
+	dev->data->rx_queues[queue_idx] = rxq;
+	rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +
+			queue_idx * vport->chunks_info.rx_qtail_spacing);
+
+	return 0;
+}
+
+int
+idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+		    uint16_t nb_desc, unsigned int socket_id,
+		    const struct rte_eth_rxconf *rx_conf,
+		    struct rte_mempool *mp)
+{
+	struct idpf_vport *vport = dev->data->dev_private;
+
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+		return idpf_rx_single_queue_setup(dev, queue_idx, nb_desc,
+						  socket_id, rx_conf, mp);
+	else
+		return idpf_rx_split_queue_setup(dev, queue_idx, nb_desc,
+						 socket_id, rx_conf, mp);
+}
+
 static int
 idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 			  uint16_t nb_desc, unsigned int socket_id,
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index c7ba15b058..3f3932c3eb 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -15,9 +15,51 @@ 
 /* Base address of the HW descriptor ring should be 128B aligned. */
 #define IDPF_RING_BASE_ALIGN	128
 
+#define IDPF_RX_MAX_BURST		32
+#define IDPF_DEFAULT_RX_FREE_THRESH	32
+
 #define IDPF_DEFAULT_TX_RS_THRESH	32
 #define IDPF_DEFAULT_TX_FREE_THRESH	32
 
+struct idpf_rx_queue {
+	struct idpf_adapter *adapter;   /* the adapter this queue belongs to */
+	struct rte_mempool *mp;         /* mbuf pool to populate Rx ring */
+	const struct rte_memzone *mz;   /* memzone for Rx ring */
+	volatile void *rx_ring;
+	struct rte_mbuf **sw_ring;      /* address of SW ring */
+	uint64_t rx_ring_phys_addr;     /* Rx ring DMA address */
+
+	uint16_t nb_rx_desc;            /* ring length */
+	uint16_t rx_tail;               /* current value of tail */
+	volatile uint8_t *qrx_tail;     /* register address of tail */
+	uint16_t rx_free_thresh;        /* max free RX desc to hold */
+	uint16_t nb_rx_hold;            /* number of held free RX desc */
+	struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
+	struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
+	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
+
+	uint16_t rx_nb_avail;
+	uint16_t rx_next_avail;
+
+	uint16_t port_id;       /* device port ID */
+	uint16_t queue_id;      /* Rx queue index */
+	uint16_t rx_buf_len;    /* The packet buffer size */
+	uint16_t rx_hdr_len;    /* The header buffer size */
+	uint16_t max_pkt_len;   /* Maximum packet length */
+	uint8_t rxdid;
+
+	bool q_set;             /* if rx queue has been configured */
+	bool q_started;         /* if rx queue has been started */
+
+	/* only valid for split queue mode */
+	uint8_t expected_gen_id;
+	struct idpf_rx_queue *bufq1;
+	struct idpf_rx_queue *bufq2;
+
+	uint64_t offloads;
+	uint32_t hw_register_set;
+};
+
 struct idpf_tx_entry {
 	struct rte_mbuf *mbuf;
 	uint16_t next_id;
@@ -63,6 +105,10 @@  struct idpf_tx_queue {
 	struct idpf_tx_queue *complq;
 };
 
+int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+			uint16_t nb_desc, unsigned int socket_id,
+			const struct rte_eth_rxconf *rx_conf,
+			struct rte_mempool *mp);
 int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 			uint16_t nb_desc, unsigned int socket_id,
 			const struct rte_eth_txconf *tx_conf);