diff mbox series

[11/11] net/mlx5: support shared Rx queue

Message ID 20210926111904.237736-12-xuemingl@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers show
Series net/mlx5: support shared Rx queue | expand

Checks

Context Check Description
ci/Intel-compilation warning apply issues
ci/checkpatch warning coding style issues

Commit Message

Xueming(Steven) Li Sept. 26, 2021, 11:19 a.m. UTC
This patch introduces shared RXQ. All share Rx queues with same group
and queue id shares same rxq_ctrl. Rxq_ctrl and rxq_data are shared,
all queues from different member port share same WQ and CQ, essentially
one Rx WQ, mbufs are filled into this singleton WQ.

Shared rxq_data is set into device Rx queues of all member ports as
rxq object, used for receiving packets. Polling queue of any member
ports returns packets of any member, mbuf->port is used to identify
source port.

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
 doc/guides/nics/features/mlx5.ini |   1 +
 doc/guides/nics/mlx5.rst          |   6 +
 drivers/net/mlx5/linux/mlx5_os.c  |   2 +
 drivers/net/mlx5/mlx5.h           |   2 +
 drivers/net/mlx5/mlx5_devx.c      |   9 +-
 drivers/net/mlx5/mlx5_rx.h        |   7 +
 drivers/net/mlx5/mlx5_rxq.c       | 208 ++++++++++++++++++++++++++----
 drivers/net/mlx5/mlx5_trigger.c   |  76 ++++++-----
 8 files changed, 255 insertions(+), 56 deletions(-)
diff mbox series

Patch

diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index f01abd4231f..ff5e669acc1 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -11,6 +11,7 @@  Removal event        = Y
 Rx interrupt         = Y
 Fast mbuf free       = Y
 Queue start/stop     = Y
+Shared Rx queue      = Y
 Burst mode info      = Y
 Power mgmt address monitor = Y
 MTU update           = Y
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index ca3e7f560da..494ee957c1d 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -113,6 +113,7 @@  Features
 - Connection tracking.
 - Sub-Function representors.
 - Sub-Function.
+- Shared Rx queue.
 
 
 Limitations
@@ -464,6 +465,11 @@  Limitations
   - In order to achieve best insertion rate, application should manage the flows per lcore.
   - Better to disable memory reclaim by setting ``reclaim_mem_mode`` to 0 to accelerate the flow object allocation and release with cache.
 
+ Shared Rx queue:
+
+  - Counter of received packets and bytes number of devices in same share group are same.
+  - Counter of received packets and bytes number of queues in same group and queue ID are same.
+
 Statistics
 ----------
 
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 27233b679c6..b631768b4f9 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -457,6 +457,7 @@  mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 			mlx5_glue->dr_create_flow_action_default_miss();
 	if (!sh->default_miss_action)
 		DRV_LOG(WARNING, "Default miss action is not supported.");
+	LIST_INIT(&sh->shared_rxqs);
 	return 0;
 error:
 	/* Rollback the created objects. */
@@ -531,6 +532,7 @@  mlx5_os_free_shared_dr(struct mlx5_priv *priv)
 	MLX5_ASSERT(sh && sh->refcnt);
 	if (sh->refcnt > 1)
 		return;
+	MLX5_ASSERT(LIST_EMPTY(&sh->shared_rxqs));
 #ifdef HAVE_MLX5DV_DR
 	if (sh->rx_domain) {
 		mlx5_glue->dr_destroy_domain(sh->rx_domain);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 6a9c99a8826..c671c8a354f 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1193,6 +1193,7 @@  struct mlx5_dev_ctx_shared {
 	struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX];
 	/* Flex parser profiles information. */
 	void *devx_rx_uar; /* DevX UAR for Rx. */
+	LIST_HEAD(shared_rxqs, mlx5_rxq_ctrl) shared_rxqs; /* Shared RXQs. */
 	struct mlx5_aso_age_mng *aso_age_mng;
 	/* Management data for aging mechanism using ASO Flow Hit. */
 	struct mlx5_geneve_tlv_option_resource *geneve_tlv_option_resource;
@@ -1257,6 +1258,7 @@  struct mlx5_rxq_obj {
 		};
 		struct mlx5_devx_obj *rq; /* DevX RQ object for hairpin. */
 		struct {
+			struct mlx5_devx_rmp devx_rmp; /* RMP for shared RQ. */
 			struct mlx5_devx_cq cq_obj; /* DevX CQ object. */
 			void *devx_channel;
 		};
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 371ff387c99..01561639038 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -170,6 +170,8 @@  mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq)
 		memset(&rxq->devx_rq, 0, sizeof(rxq->devx_rq));
 		mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
 		memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));
+		if (!RXQ_CTRL_LAST(rxq))
+			return;
 		if (rxq_obj->devx_channel) {
 			mlx5_os_devx_destroy_event_channel
 							(rxq_obj->devx_channel);
@@ -270,6 +272,8 @@  mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)
 	rq_attr.wq_attr.pd = priv->sh->pdn;
 	rq_attr.counter_set_id = priv->counter_set_id;
 	/* Create RQ using DevX API. */
+	if (rxq_data->shared)
+		rxq->devx_rq.rmp = &rxq_ctrl->obj->devx_rmp;
 	return mlx5_devx_rq_create(priv->sh->ctx, &rxq->devx_rq,
 				   wqe_size, log_desc_n, &rq_attr,
 				   rxq_ctrl->socket);
@@ -495,7 +499,10 @@  mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)
 	ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);
 	if (ret)
 		goto error;
-	rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;
+	if (rxq_data->shared)
+		rxq_data->wqes = (void *)(uintptr_t)tmpl->devx_rmp.wq.umem_buf;
+	else
+		rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;
 	rxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.db_rec;
 	mlx5_rxq_initialize(rxq_data);
 	priv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED;
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 161399c764d..a83fa6e8db1 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -26,6 +26,9 @@ 
 #define RXQ_PORT(rxq_ctrl) LIST_FIRST(&(rxq_ctrl)->owners)->priv
 #define RXQ_DEV(rxq_ctrl) ETH_DEV(RXQ_PORT(rxq_ctrl))
 #define RXQ_PORT_ID(rxq_ctrl) PORT_ID(RXQ_PORT(rxq_ctrl))
+#define RXQ_CTRL_LAST(rxq) \
+	(LIST_FIRST(&(rxq)->ctrl->owners) == (rxq) && \
+	LIST_NEXT((rxq), owner_entry) == NULL)
 
 struct mlx5_rxq_stats {
 #ifdef MLX5_PMD_SOFT_COUNTERS
@@ -107,6 +110,7 @@  struct mlx5_rxq_data {
 	unsigned int lro:1; /* Enable LRO. */
 	unsigned int dynf_meta:1; /* Dynamic metadata is configured. */
 	unsigned int mcqe_format:3; /* CQE compression format. */
+	unsigned int shared:1; /* Shared RXQ. */
 	volatile uint32_t *rq_db;
 	volatile uint32_t *cq_db;
 	uint16_t port_id;
@@ -169,6 +173,9 @@  struct mlx5_rxq_ctrl {
 	struct mlx5_dev_ctx_shared *sh; /* Shared context. */
 	enum mlx5_rxq_type type; /* Rxq type. */
 	unsigned int socket; /* CPU socket ID for allocations. */
+	LIST_ENTRY(mlx5_rxq_ctrl) share_entry; /* Entry in shared RXQ list. */
+	uint32_t share_group; /* Group ID of shared RXQ. */
+	unsigned int started:1; /* Whether (shared) RXQ has been started. */
 	unsigned int irq:1; /* Whether IRQ is enabled. */
 	uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
 	uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index cde01a48022..45f78ad076b 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -28,6 +28,7 @@ 
 #include "mlx5_rx.h"
 #include "mlx5_utils.h"
 #include "mlx5_autoconf.h"
+#include "mlx5_devx.h"
 
 
 /* Default RSS hash key also used for ConnectX-3. */
@@ -352,6 +353,9 @@  mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
 	if (MLX5_LRO_SUPPORTED(dev))
 		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+	if (priv->config.hca_attr.mem_rq_rmp &&
+	    priv->obj_ops.rxq_obj_new == devx_obj_ops.rxq_obj_new)
+		offloads |= RTE_ETH_RX_OFFLOAD_SHARED_RXQ;
 	return offloads;
 }
 
@@ -648,6 +652,114 @@  mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
 	return 0;
 }
 
+/**
+ * Get the shared Rx queue object that matches group and queue index.
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param group
+ *   Shared RXQ group.
+ * @param idx
+ *   RX queue index.
+ *
+ * @return
+ *   Shared RXQ object that matching, or NULL if not found.
+ */
+static struct mlx5_rxq_ctrl *
+mlx5_shared_rxq_get(struct rte_eth_dev *dev, uint32_t group, uint16_t idx)
+{
+	struct mlx5_rxq_ctrl *rxq_ctrl;
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {
+		if (rxq_ctrl->share_group == group && rxq_ctrl->rxq.idx == idx)
+			return rxq_ctrl;
+	}
+	return NULL;
+}
+
+/**
+ * Check whether requested Rx queue configuration matches shared RXQ.
+ *
+ * @param rxq_ctrl
+ *   Pointer to shared RXQ.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param idx
+ *   Queue index.
+ * @param desc
+ *   Number of descriptors to configure in queue.
+ * @param socket
+ *   NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ *   Thresholds parameters.
+ * @param mp
+ *   Memory pool for buffer allocations.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static bool
+mlx5_shared_rxq_match(struct mlx5_rxq_ctrl *rxq_ctrl, struct rte_eth_dev *dev,
+		      uint16_t idx, uint16_t desc, unsigned int socket,
+		      const struct rte_eth_rxconf *conf,
+		      struct rte_mempool *mp)
+{
+	struct mlx5_priv *spriv = LIST_FIRST(&rxq_ctrl->owners)->priv;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	unsigned int mprq_stride_nums = priv->config.mprq.stride_num_n ?
+		priv->config.mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
+
+	RTE_SET_USED(conf);
+	if (rxq_ctrl->socket != socket) {
+		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: socket mismatch",
+			dev->data->port_id, idx);
+		return false;
+	}
+	if (priv->config.mprq.enabled)
+		desc >>= mprq_stride_nums;
+	if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
+		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: descriptor number mismatch",
+			dev->data->port_id, idx);
+		return false;
+	}
+	if (priv->mtu != spriv->mtu) {
+		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mtu mismatch",
+			dev->data->port_id, idx);
+		return false;
+	}
+	if (priv->dev_data->dev_conf.intr_conf.rxq !=
+	    spriv->dev_data->dev_conf.intr_conf.rxq) {
+		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: interrupt mismatch",
+			dev->data->port_id, idx);
+		return false;
+	}
+	if (!spriv->config.mprq.enabled && rxq_ctrl->rxq.mp != mp) {
+		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mempool mismatch",
+			dev->data->port_id, idx);
+		return false;
+	}
+	if (priv->config.hw_padding != spriv->config.hw_padding) {
+		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: padding mismatch",
+			dev->data->port_id, idx);
+		return false;
+	}
+	if (memcmp(&priv->config.mprq, &spriv->config.mprq,
+		   sizeof(priv->config.mprq)) != 0) {
+		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: MPRQ mismatch",
+			dev->data->port_id, idx);
+		return false;
+	}
+	if (priv->config.cqe_comp != spriv->config.cqe_comp ||
+	    (priv->config.cqe_comp &&
+	     priv->config.cqe_comp_fmt != spriv->config.cqe_comp_fmt)) {
+		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: CQE compression mismatch",
+			dev->data->port_id, idx);
+		return false;
+	}
+	return true;
+}
+
 /**
  *
  * @param dev
@@ -673,12 +785,14 @@  mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_priv *rxq;
-	struct mlx5_rxq_ctrl *rxq_ctrl;
+	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
 	struct rte_eth_rxseg_split *rx_seg =
 				(struct rte_eth_rxseg_split *)conf->rx_seg;
 	struct rte_eth_rxseg_split rx_single = {.mp = mp};
 	uint16_t n_seg = conf->rx_nseg;
 	int res;
+	uint64_t offloads = conf->offloads |
+			    dev->data->dev_conf.rxmode.offloads;
 
 	if (mp) {
 		/*
@@ -690,9 +804,6 @@  mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		n_seg = 1;
 	}
 	if (n_seg > 1) {
-		uint64_t offloads = conf->offloads |
-				    dev->data->dev_conf.rxmode.offloads;
-
 		/* The offloads should be checked on rte_eth_dev layer. */
 		MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
 		if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
@@ -704,9 +815,32 @@  mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		}
 		MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
 	}
+	if (offloads & RTE_ETH_RX_OFFLOAD_SHARED_RXQ) {
+		if (!priv->config.hca_attr.mem_rq_rmp) {
+			DRV_LOG(ERR, "port %u queue index %u shared Rx queue not supported by fw",
+				     dev->data->port_id, idx);
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+		if (priv->obj_ops.rxq_obj_new != devx_obj_ops.rxq_obj_new) {
+			DRV_LOG(ERR, "port %u queue index %u shared Rx queue needs DevX api",
+				     dev->data->port_id, idx);
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+		/* Try to reuse shared RXQ. */
+		rxq_ctrl = mlx5_shared_rxq_get(dev, conf->shared_group, idx);
+		if (rxq_ctrl != NULL &&
+		    !mlx5_shared_rxq_match(rxq_ctrl, dev, idx, desc, socket,
+					   conf, mp)) {
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+	}
 	res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
 	if (res)
 		return res;
+	/* Allocate RXQ. */
 	rxq = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sizeof(*rxq), 0,
 			  SOCKET_ID_ANY);
 	if (!rxq) {
@@ -718,14 +852,22 @@  mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	rxq->priv = priv;
 	rxq->idx = idx;
 	(*priv->rxq_privs)[idx] = rxq;
-	rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg, n_seg);
-	if (!rxq_ctrl) {
-		DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
-			dev->data->port_id, idx);
-		mlx5_free(rxq);
-		(*priv->rxq_privs)[idx] = NULL;
-		rte_errno = ENOMEM;
-		return -rte_errno;
+	if (rxq_ctrl != NULL) {
+		/* Join owner list of shared RXQ. */
+		LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
+		rxq->ctrl = rxq_ctrl;
+	} else {
+		/* Create new shared RXQ. */
+		rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg,
+					n_seg);
+		if (rxq_ctrl == NULL) {
+			DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
+				dev->data->port_id, idx);
+			mlx5_free(rxq);
+			(*priv->rxq_privs)[idx] = NULL;
+			rte_errno = ENOMEM;
+			return -rte_errno;
+		}
 	}
 	DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
 		dev->data->port_id, idx);
@@ -1071,6 +1213,9 @@  mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
 	struct mlx5_rxq_obj *rxq_obj;
 
 	LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
+		if (rxq_obj->rxq_ctrl->rxq.shared &&
+		    !LIST_EMPTY(&rxq_obj->rxq_ctrl->owners))
+			continue;
 		DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
 			dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
 		++ret;
@@ -1348,6 +1493,10 @@  mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
 		return NULL;
 	}
 	LIST_INIT(&tmpl->owners);
+	if (offloads & RTE_ETH_RX_OFFLOAD_SHARED_RXQ) {
+		tmpl->rxq.shared = 1;
+		LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
+	}
 	rxq->ctrl = tmpl;
 	LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
 	MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
@@ -1771,6 +1920,7 @@  mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_priv *rxq;
 	struct mlx5_rxq_ctrl *rxq_ctrl;
+	bool free_ctrl;
 
 	if (priv->rxq_privs == NULL)
 		return 0;
@@ -1780,24 +1930,36 @@  mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
 	if (mlx5_rxq_deref(dev, idx) > 1)
 		return 1;
 	rxq_ctrl = rxq->ctrl;
-	if (rxq_ctrl->obj != NULL) {
+	/* If the last entry in share RXQ. */
+	free_ctrl = RXQ_CTRL_LAST(rxq);
+	if (rxq->devx_rq.rq != NULL)
 		priv->obj_ops.rxq_obj_release(rxq);
-		LIST_REMOVE(rxq_ctrl->obj, next);
-		mlx5_free(rxq_ctrl->obj);
-		rxq_ctrl->obj = NULL;
+	if (free_ctrl) {
+		if (rxq_ctrl->obj != NULL) {
+			LIST_REMOVE(rxq_ctrl->obj, next);
+			mlx5_free(rxq_ctrl->obj);
+			rxq_ctrl->obj = NULL;
+		}
+		rxq_ctrl->started = false;
 	}
 	if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-		rxq_free_elts(rxq_ctrl);
+		if (free_ctrl)
+			rxq_free_elts(rxq_ctrl);
 		dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
 	}
 	if (!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED)) {
-		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-			mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
-			mlx5_mprq_free_mp(dev, rxq_ctrl);
-		}
 		LIST_REMOVE(rxq, owner_entry);
-		LIST_REMOVE(rxq_ctrl, next);
-		mlx5_free(rxq_ctrl);
+		if (free_ctrl) {
+			if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
+				mlx5_mr_btree_free
+					(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+				mlx5_mprq_free_mp(dev, rxq_ctrl);
+			}
+			if (rxq_ctrl->rxq.shared)
+				LIST_REMOVE(rxq_ctrl, share_entry);
+			LIST_REMOVE(rxq_ctrl, next);
+			mlx5_free(rxq_ctrl);
+		}
 		dev->data->rx_queues[idx] = NULL;
 		mlx5_free(rxq);
 		(*priv->rxq_privs)[idx] = NULL;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 1e865e74e39..2fd8c70cce5 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -122,6 +122,46 @@  mlx5_rxq_stop(struct rte_eth_dev *dev)
 		mlx5_rxq_release(dev, i);
 }
 
+static int
+mlx5_rxq_ctrl_prepare(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl,
+		      unsigned int idx)
+{
+	int ret = 0;
+
+	if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
+		if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+			/* Allocate/reuse/resize mempool for MPRQ. */
+			if (mlx5_mprq_alloc_mp(dev, rxq_ctrl) < 0)
+				return -rte_errno;
+
+			/* Pre-register Rx mempools. */
+			mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
+					  rxq_ctrl->rxq.mprq_mp);
+		} else {
+			uint32_t s;
+			for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
+				mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
+						  rxq_ctrl->rxq.rxseg[s].mp);
+		}
+		ret = rxq_alloc_elts(rxq_ctrl);
+		if (ret)
+			return ret;
+	}
+	MLX5_ASSERT(!rxq_ctrl->obj);
+	rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+				    sizeof(*rxq_ctrl->obj), 0,
+				    rxq_ctrl->socket);
+	if (!rxq_ctrl->obj) {
+		DRV_LOG(ERR, "Port %u Rx queue %u can't allocate resources.",
+			dev->data->port_id, idx);
+		rte_errno = ENOMEM;
+		return -rte_errno;
+	}
+	DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.", dev->data->port_id,
+		idx, (void *)&rxq_ctrl->obj);
+	return 0;
+}
+
 /**
  * Start traffic on Rx queues.
  *
@@ -149,45 +189,17 @@  mlx5_rxq_start(struct rte_eth_dev *dev)
 		if (rxq == NULL)
 			continue;
 		rxq_ctrl = rxq->ctrl;
-		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-			if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
-				/* Allocate/reuse/resize mempool for MPRQ. */
-				if (mlx5_mprq_alloc_mp(dev, rxq_ctrl) < 0)
-					goto error;
-				/* Pre-register Rx mempools. */
-				mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
-						  rxq_ctrl->rxq.mprq_mp);
-			} else {
-				uint32_t s;
-
-				for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
-					mlx5_mr_update_mp
-						(dev, &rxq_ctrl->rxq.mr_ctrl,
-						rxq_ctrl->rxq.rxseg[s].mp);
-			}
-			ret = rxq_alloc_elts(rxq_ctrl);
-			if (ret)
+		if (!rxq_ctrl->started) {
+			if (mlx5_rxq_ctrl_prepare(dev, rxq_ctrl, i) < 0)
 				goto error;
-		}
-		MLX5_ASSERT(!rxq_ctrl->obj);
-		rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
-					    sizeof(*rxq_ctrl->obj), 0,
-					    rxq_ctrl->socket);
-		if (!rxq_ctrl->obj) {
-			DRV_LOG(ERR,
-				"Port %u Rx queue %u can't allocate resources.",
-				dev->data->port_id, i);
-			rte_errno = ENOMEM;
-			goto error;
+			LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
+			rxq_ctrl->started = true;
 		}
 		ret = priv->obj_ops.rxq_obj_new(rxq);
 		if (ret) {
 			mlx5_free(rxq_ctrl->obj);
 			goto error;
 		}
-		DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.",
-			dev->data->port_id, i, (void *)&rxq_ctrl->obj);
-		LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
 	}
 	return 0;
 error: