[v3,3/4] net/mlx5: separate the flow handle resource

Message ID 1585062980-27196-4-git-send-email-bingz@mellanox.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: move to non-cached mode for flow rules |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Bing Zhao March 24, 2020, 3:16 p.m. UTC
  Only the members of flow handle structure will be used when trying
to destroy a flow. Other members of mlx5 device flow resource will
only be used for flow creating, and they could be reused for different
flows.
So only the device flow handle structure needs to be saved for further
usage. This could be separated from the whole mlx5 device flow and
stored with a list for each rte flow.
Other members will be pre-allocated with an array, and an index will
be used to help to apply each device flow to the hardware.
The flow handle sizes of Verbs and DV mode will be different, and
some calculation could be done before allocating a verbs handle.
Then the total memory consumption will less for Verbs when there is
no inbox driver being used.

Signed-off-by: Bing Zhao <bingz@mellanox.com>
---
 drivers/net/mlx5/mlx5.c            |   7 ++
 drivers/net/mlx5/mlx5.h            |   4 +
 drivers/net/mlx5/mlx5_flow.c       | 127 +++++++++++++-------
 drivers/net/mlx5/mlx5_flow.h       |  89 ++++++++++++--
 drivers/net/mlx5/mlx5_flow_dv.c    | 230 +++++++++++++++++++++----------------
 drivers/net/mlx5/mlx5_flow_verbs.c | 139 ++++++++++++----------
 drivers/net/mlx5/mlx5_trigger.c    |   1 +
 7 files changed, 381 insertions(+), 216 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 0613f70..8dda0c3 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1246,6 +1246,8 @@  struct mlx5_flow_id_pool *
 	 */
 	mlx5_flow_list_flush(dev, &priv->flows, true);
 	mlx5_flow_meter_flush(dev, NULL);
+	/* Free the intermediate buffers for flow creation. */
+	mlx5_flow_free_intermediate(dev);
 	/* Prevent crashes when queues are still in use. */
 	dev->rx_pkt_burst = removed_rx_burst;
 	dev->tx_pkt_burst = removed_tx_burst;
@@ -2768,6 +2770,11 @@  struct mlx5_flow_id_pool *
 			err = ENOTSUP;
 			goto error;
 	}
+	/*
+	 * Allocate the buffer for flow creating, just once.
+	 * The allocation must be done before any flow creating.
+	 */
+	mlx5_flow_alloc_intermediate(eth_dev);
 	/* Query availibility of metadata reg_c's. */
 	err = mlx5_flow_discover_mreg_c(eth_dev);
 	if (err < 0) {
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 98e5fa5..2cc4c76 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -517,6 +517,8 @@  struct mlx5_priv {
 	struct mlx5_drop drop_queue; /* Flow drop queues. */
 	struct mlx5_flows flows; /* RTE Flow rules. */
 	struct mlx5_flows ctrl_flows; /* Control flow rules. */
+	void *inter_flows; /* Intermediate resources for flow creation. */
+	int flow_idx; /* Intermediate device flow index. */
 	LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
 	LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
 	LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
@@ -728,6 +730,8 @@  int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
 void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list);
 int mlx5_flow_start_default(struct rte_eth_dev *dev);
 void mlx5_flow_stop_default(struct rte_eth_dev *dev);
+void mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev);
+void mlx5_flow_free_intermediate(struct rte_eth_dev *dev);
 int mlx5_flow_verify(struct rte_eth_dev *dev);
 int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t queue);
 int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 230f071..bf0728d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -712,17 +712,19 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
- * @param[in] dev_flow
- *   Pointer to device flow structure.
+ * @param[in] flow
+ *   Pointer to flow structure.
+ * @param[in] dev_handle
+ *   Pointer to device flow handle structure.
  */
 static void
-flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow,
+		       struct mlx5_flow_handle *dev_handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->handle.act_flags &
+	const int mark = !!(dev_handle->act_flags &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL);
+	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
 
 	for (i = 0; i != flow->rss.queue_num; ++i) {
@@ -751,7 +753,7 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			/* Increase the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
 				if ((tunnels_info[j].tunnel &
-				     dev_flow->handle.layers) ==
+				     dev_handle->layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]++;
 					break;
@@ -773,10 +775,10 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 static void
 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		flow_drv_rxq_flags_set(dev, dev_flow);
+	LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+		flow_drv_rxq_flags_set(dev, flow, dev_handle);
 }
 
 /**
@@ -785,17 +787,19 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param[in] dev_flow
- *   Pointer to the device flow.
+ * @param[in] flow
+ *   Pointer to flow structure.
+ * @param[in] dev_handle
+ *   Pointer to the device flow handle structure.
  */
 static void
-flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow,
+			struct mlx5_flow_handle *dev_handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow *flow = dev_flow->flow;
-	const int mark = !!(dev_flow->handle.act_flags &
+	const int mark = !!(dev_handle->act_flags &
 			    (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
-	const int tunnel = !!(dev_flow->handle.layers & MLX5_FLOW_LAYER_TUNNEL);
+	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
 
 	MLX5_ASSERT(dev->data->dev_started);
@@ -820,7 +824,7 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			/* Decrease the counter matching the flow. */
 			for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
 				if ((tunnels_info[j].tunnel &
-				     dev_flow->handle.layers) ==
+				     dev_handle->layers) ==
 				    tunnels_info[j].tunnel) {
 					rxq_ctrl->flow_tunnels_n[j]--;
 					break;
@@ -843,10 +847,10 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 static void
 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		flow_drv_rxq_flags_trim(dev, dev_flow);
+	LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+		flow_drv_rxq_flags_trim(dev, flow, dev_handle);
 }
 
 /**
@@ -2309,11 +2313,11 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
 			     struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next)
-		if (dev_flow->handle.qrss_id)
-			flow_qrss_free_id(dev, dev_flow->handle.qrss_id);
+	LIST_FOREACH(dev_handle, &flow->dev_handles, next)
+		if (dev_handle->qrss_id)
+			flow_qrss_free_id(dev, dev_handle->qrss_id);
 }
 
 static int
@@ -2329,7 +2333,8 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 }
 
 static struct mlx5_flow *
-flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
+		  const struct rte_flow_attr *attr __rte_unused,
 		  const struct rte_flow_item items[] __rte_unused,
 		  const struct rte_flow_action actions[] __rte_unused,
 		  struct rte_flow_error *error)
@@ -2469,6 +2474,8 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *   setting backward reference to the flow should be done out of this function.
  *   layers field is not filled either.
  *
+ * @param[in] dev
+ *   Pointer to the dev structure.
  * @param[in] attr
  *   Pointer to the flow attributes.
  * @param[in] items
@@ -2482,7 +2489,8 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
  */
 static inline struct mlx5_flow *
-flow_drv_prepare(const struct rte_flow *flow,
+flow_drv_prepare(struct rte_eth_dev *dev,
+		 const struct rte_flow *flow,
 		 const struct rte_flow_attr *attr,
 		 const struct rte_flow_item items[],
 		 const struct rte_flow_action actions[],
@@ -2493,7 +2501,7 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 
 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
 	fops = flow_get_drv_ops(type);
-	return fops->prepare(attr, items, actions, error);
+	return fops->prepare(dev, attr, items, actions, error);
 }
 
 /**
@@ -2701,17 +2709,17 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	 * help to do the optimization work for source code.
 	 * If no decap actions, use the layers directly.
 	 */
-	if (!(dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DECAP))
-		return dev_flow->handle.layers;
+	if (!(dev_flow->handle->act_flags & MLX5_FLOW_ACTION_DECAP))
+		return dev_flow->handle->layers;
 	/* Convert L3 layers with decap action. */
-	if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
-	else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
 		layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
 	/* Convert L4 layers with decap action.  */
-	if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
+	if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
 		layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
-	else if (dev_flow->handle.layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
+	else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
 		layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
 	return layers;
 }
@@ -3412,7 +3420,7 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  * The last stage of splitting chain, just creates the subflow
  * without any modification.
  *
- * @param dev
+ * @param[in] dev
  *   Pointer to Ethernet device.
  * @param[in] flow
  *   Parent flow structure pointer.
@@ -3445,19 +3453,19 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	struct mlx5_flow *dev_flow;
 
-	dev_flow = flow_drv_prepare(flow, attr, items, actions, error);
+	dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, error);
 	if (!dev_flow)
 		return -rte_errno;
 	dev_flow->flow = flow;
 	dev_flow->external = external;
 	/* Subflow object was created, we must include one in the list. */
-	LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+	LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
 	/*
 	 * If dev_flow is as one of the suffix flow, some actions in suffix
 	 * flow may need some user defined item layer flags.
 	 */
 	if (prefix_layers)
-		dev_flow->handle.layers = prefix_layers;
+		dev_flow->handle->layers = prefix_layers;
 	if (sub_flow)
 		*sub_flow = dev_flow;
 	return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
@@ -3972,7 +3980,7 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			 * reallocation becomes possible (for example, for
 			 * other flows in other threads).
 			 */
-			dev_flow->handle.qrss_id = qrss_id;
+			dev_flow->handle->qrss_id = qrss_id;
 			ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
 						   error);
 			if (ret < 0)
@@ -4085,7 +4093,7 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			ret = -rte_errno;
 			goto exit;
 		}
-		dev_flow->handle.mtr_flow_id = mtr_tag_id;
+		dev_flow->handle->mtr_flow_id = mtr_tag_id;
 		/* Setting the sfx group atrr. */
 		sfx_attr.group = sfx_attr.transfer ?
 				(MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
@@ -4256,7 +4264,7 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
 		flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
 	}
-	LIST_INIT(&flow->dev_flows);
+	LIST_INIT(&flow->dev_handles);
 	if (rss && rss->types) {
 		unsigned int graph_root;
 
@@ -4271,6 +4279,8 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		buf->entries = 1;
 		buf->entry[0].pattern = (void *)(uintptr_t)items;
 	}
+	/* Reset device flow index to 0. */
+	priv->flow_idx = 0;
 	for (i = 0; i < buf->entries; ++i) {
 		/*
 		 * The splitter may create multiple dev_flows,
@@ -4289,13 +4299,13 @@  uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
 		attr_tx.ingress = 0;
 		attr_tx.egress = 1;
-		dev_flow = flow_drv_prepare(flow, &attr_tx, items_tx.items,
+		dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
 					    actions_hairpin_tx.actions, error);
 		if (!dev_flow)
 			goto error;
 		dev_flow->flow = flow;
 		dev_flow->external = 0;
-		LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
+		LIST_INSERT_HEAD(&flow->dev_handles, dev_flow->handle, next);
 		ret = flow_drv_translate(dev, dev_flow, &attr_tx,
 					 items_tx.items,
 					 actions_hairpin_tx.actions, error);
@@ -4543,8 +4553,6 @@  struct rte_flow *
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param list
- *   Pointer to a TAILQ flow list.
  */
 void
 mlx5_flow_stop_default(struct rte_eth_dev *dev)
@@ -4570,6 +4578,37 @@  struct rte_flow *
 }
 
 /**
+ * Allocate intermediate resources for flow creation.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	if (!priv->inter_flows)
+		priv->inter_flows = rte_calloc(__func__, MLX5_NUM_MAX_DEV_FLOWS,
+					       sizeof(struct mlx5_flow), 0);
+}
+
+/**
+ * Free intermediate resources for flows.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_flow_free_intermediate(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	rte_free(priv->inter_flows);
+	priv->inter_flows = NULL;
+}
+
+/**
  * Verify the flow list is empty
  *
  * @param dev
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index f3aea53..0f0e59d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -500,6 +500,8 @@  struct mlx5_flow_handle_dv {
 
 /** Device flow handle structure: used both for creating & destroying. */
 struct mlx5_flow_handle {
+	LIST_ENTRY(mlx5_flow_handle) next;
+	/**< Pointer to next device flow handle. */
 	uint64_t layers;
 	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
 	uint64_t act_flags;
@@ -517,6 +519,18 @@  struct mlx5_flow_handle {
 };
 
 /*
+ * Size for Verbs device flow handle structure only. Do not use the DV only
+ * structure in Verbs. No DV flows attributes will be accessed.
+ * Macro offsetof() could also be used here.
+ */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#define MLX5_FLOW_HANDLE_VERBS_SIZE \
+	(sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv))
+#else
+#define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle))
+#endif
+
+/*
  * Max number of actions per DV flow.
  * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED
  * in rdma-core file providers/mlx5/verbs.c.
@@ -524,7 +538,7 @@  struct mlx5_flow_handle {
 #define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8
 
 /** Device flow structure only for DV flow creation. */
-struct mlx5_flow_resource_dv {
+struct mlx5_flow_dv_workspace {
 	uint32_t group; /**< The group index. */
 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
 	int actions_n; /**< number of actions. */
@@ -533,27 +547,79 @@  struct mlx5_flow_resource_dv {
 	/**< Holds the value that the packet is compared to. */
 };
 
+/*
+ * Maximal Verbs flow specifications & actions size.
+ * Some elements are mutually exclusive, but enough space should be allocated.
+ * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers.
+ *               2. One tunnel header (exception: GRE + MPLS),
+ *                  SPEC length: GRE == tunnel.
+ * Actions: 1. 1 Mark OR Flag.
+ *          2. 1 Drop (if any).
+ *          3. No limitation for counters, but it makes no sense to support too
+ *             many counters in a single device flow.
+ */
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+#define MLX5_VERBS_MAX_SPEC_SIZE \
+		( \
+			(2 * (sizeof(struct ibv_flow_spec_eth) + \
+			      sizeof(struct ibv_flow_spec_ipv6) + \
+			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
+			sizeof(struct ibv_flow_spec_gre) + \
+			sizeof(struct ibv_flow_spec_mpls)) \
+		)
+#else
+#define MLX5_VERBS_MAX_SPEC_SIZE \
+		( \
+			(2 * (sizeof(struct ibv_flow_spec_eth) + \
+			      sizeof(struct ibv_flow_spec_ipv6) + \
+			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
+			sizeof(struct ibv_flow_spec_tunnel)) \
+		)
+#endif
+
+#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
+	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+#define MLX5_VERBS_MAX_ACT_SIZE \
+		( \
+			sizeof(struct ibv_flow_spec_action_tag) + \
+			sizeof(struct ibv_flow_spec_action_drop) + \
+			sizeof(struct ibv_flow_spec_counter_action) * 4 \
+		)
+#else
+#define MLX5_VERBS_MAX_ACT_SIZE \
+		( \
+			sizeof(struct ibv_flow_spec_action_tag) + \
+			sizeof(struct ibv_flow_spec_action_drop) \
+		)
+#endif
+
+#define MLX5_VERBS_MAX_SPEC_ACT_SIZE \
+		(MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE)
+
 /** Device flow structure only for Verbs flow creation. */
-struct mlx5_flow_resource_verbs {
+struct mlx5_flow_verbs_workspace {
 	unsigned int size; /**< Size of the attribute. */
-	struct ibv_flow_attr *attr; /**< Pointer to the Specification buffer. */
-	uint8_t *specs; /**< Pointer to the specifications. */
+	struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */
+	uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE];
+	/**< Specifications & actions buffer of verbs flow. */
 };
 
+/** Maximal number of device sub-flows supported. */
+#define MLX5_NUM_MAX_DEV_FLOWS 32
+
 /** Device flow structure. */
 struct mlx5_flow {
-	LIST_ENTRY(mlx5_flow) next; /**< Pointer to next device flow. */
 	struct rte_flow *flow; /**< Pointer to the main flow. */
 	uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
 	bool external; /**< true if the flow is created external to PMD. */
 	uint8_t ingress; /**< 1 if the flow is ingress. */
 	union {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-		struct mlx5_flow_resource_dv dv;
+		struct mlx5_flow_dv_workspace dv;
 #endif
-		struct mlx5_flow_resource_verbs verbs;
+		struct mlx5_flow_verbs_workspace verbs;
 	};
-	struct mlx5_flow_handle handle;
+	struct mlx5_flow_handle *handle;
 };
 
 /* Flow meter state. */
@@ -667,8 +733,8 @@  struct rte_flow {
 	struct mlx5_flow_mreg_copy_resource *mreg_copy;
 	/**< pointer to metadata register copy table resource. */
 	struct mlx5_flow_meter *meter; /**< Holds flow meter. */
-	LIST_HEAD(dev_flows, mlx5_flow) dev_flows;
-	/**< Device flows that are part of the flow. */
+	LIST_HEAD(dev_handles, mlx5_flow_handle) dev_handles;
+	/**< Device flow handles that are part of the flow. */
 	struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
 	uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */
 	uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */
@@ -681,7 +747,8 @@  typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
 				    bool external,
 				    struct rte_flow_error *error);
 typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
-	(const struct rte_flow_attr *attr, const struct rte_flow_item items[],
+	(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+	 const struct rte_flow_item items[],
 	 const struct rte_flow_action actions[], struct rte_flow_error *error);
 typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev,
 				     struct mlx5_flow *dev_flow,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index d1eec96..d532ce0 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -92,7 +92,7 @@ 
 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
 		  struct mlx5_flow *dev_flow, bool tunnel_decap)
 {
-	uint64_t layers = dev_flow->handle.layers;
+	uint64_t layers = dev_flow->handle->layers;
 
 	/*
 	 * If layers is already initialized, it means this dev_flow is the
@@ -2399,7 +2399,7 @@  struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle.dvh.encap_decap = cache_resource;
+			dev_flow->handle->dvh.encap_decap = cache_resource;
 			return 0;
 		}
 	}
@@ -2425,7 +2425,7 @@  struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
-	dev_flow->handle.dvh.encap_decap = cache_resource;
+	dev_flow->handle->dvh.encap_decap = cache_resource;
 	DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2476,7 +2476,7 @@  struct field_modify_info modify_tcp[] = {
 			(void *)&tbl_data->jump, cnt);
 	}
 	rte_atomic32_inc(&tbl_data->jump.refcnt);
-	dev_flow->handle.dvh.jump = &tbl_data->jump;
+	dev_flow->handle->dvh.jump = &tbl_data->jump;
 	return 0;
 }
 
@@ -2514,7 +2514,7 @@  struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle.dvh.port_id_action = cache_resource;
+			dev_flow->handle->dvh.port_id_action = cache_resource;
 			return 0;
 		}
 	}
@@ -2542,7 +2542,7 @@  struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
-	dev_flow->handle.dvh.port_id_action = cache_resource;
+	dev_flow->handle->dvh.port_id_action = cache_resource;
 	DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -2585,7 +2585,7 @@  struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle.dvh.push_vlan_res = cache_resource;
+			dev_flow->handle->dvh.push_vlan_res = cache_resource;
 			return 0;
 		}
 	}
@@ -2614,7 +2614,7 @@  struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
-	dev_flow->handle.dvh.push_vlan_res = cache_resource;
+	dev_flow->handle->dvh.push_vlan_res = cache_resource;
 	DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -3727,7 +3727,7 @@  struct field_modify_info modify_tcp[] = {
 				(void *)cache_resource,
 				rte_atomic32_read(&cache_resource->refcnt));
 			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle.dvh.modify_hdr = cache_resource;
+			dev_flow->handle->dvh.modify_hdr = cache_resource;
 			return 0;
 		}
 	}
@@ -3754,7 +3754,7 @@  struct field_modify_info modify_tcp[] = {
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
 	LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
-	dev_flow->handle.dvh.modify_hdr = cache_resource;
+	dev_flow->handle->dvh.modify_hdr = cache_resource;
 	DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -5207,6 +5207,8 @@  struct field_modify_info modify_tcp[] = {
  * Internal preparation function. Allocates the DV flow size,
  * this size is constant.
  *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
  * @param[in] attr
  *   Pointer to the flow attributes.
  * @param[in] items
@@ -5221,22 +5223,41 @@  struct field_modify_info modify_tcp[] = {
  *   otherwise NULL and rte_errno is set.
  */
 static struct mlx5_flow *
-flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_dv_prepare(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr __rte_unused,
 		const struct rte_flow_item items[] __rte_unused,
 		const struct rte_flow_action actions[] __rte_unused,
 		struct rte_flow_error *error)
 {
-	size_t size = sizeof(struct mlx5_flow);
+	size_t size = sizeof(struct mlx5_flow_handle);
 	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
+	struct mlx5_priv *priv = dev->data->dev_private;
 
-	dev_flow = rte_calloc(__func__, 1, size, 0);
-	if (!dev_flow) {
+	/* In case of corrupting the memory. */
+	if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
+		rte_flow_error_set(error, ENOSPC,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "not free temporary device flow");
+		return NULL;
+	}
+	dev_handle = rte_calloc(__func__, 1, size, 0);
+	if (!dev_handle) {
 		rte_flow_error_set(error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				   "not enough memory to create flow");
+				   "not enough memory to create flow handle");
 		return NULL;
 	}
+	/* No multi-thread supporting. */
+	dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
+	dev_flow->handle = dev_handle;
 	dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+	/*
+	 * The matching value needs to be cleared to 0 before using. In the
+	 * past, it will be automaticlly cleared when using rte_*alloc
+	 * API. The time consumption will be almost the same as before.
+	 */
+	memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
 	dev_flow->ingress = attr->ingress;
 	dev_flow->dv.transfer = attr->transfer;
 	return dev_flow;
@@ -5394,7 +5415,7 @@  struct field_modify_info modify_tcp[] = {
 		 * This is workaround, masks are not supported,
 		 * and pre-validated.
 		 */
-		dev_flow->handle.vf_vlan.tag =
+		dev_flow->handle->vf_vlan.tag =
 			rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
 	}
 	tci_m = rte_be_to_cpu_16(vlan_m->tci);
@@ -6895,7 +6916,7 @@  struct field_modify_info modify_tcp[] = {
 				(void *)cache_matcher,
 				rte_atomic32_read(&cache_matcher->refcnt));
 			rte_atomic32_inc(&cache_matcher->refcnt);
-			dev_flow->handle.dvh.matcher = cache_matcher;
+			dev_flow->handle->dvh.matcher = cache_matcher;
 			/* old matcher should not make the table ref++. */
 			flow_dv_tbl_resource_release(dev, tbl);
 			return 0;
@@ -6932,7 +6953,7 @@  struct field_modify_info modify_tcp[] = {
 	/* only matcher ref++, table ref++ already done above in get API. */
 	rte_atomic32_inc(&cache_matcher->refcnt);
 	LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
-	dev_flow->handle.dvh.matcher = cache_matcher;
+	dev_flow->handle->dvh.matcher = cache_matcher;
 	DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
 		key->domain ? "FDB" : "NIC", key->table_id,
 		cache_matcher->priority,
@@ -6974,7 +6995,7 @@  struct field_modify_info modify_tcp[] = {
 		cache_resource = container_of
 			(entry, struct mlx5_flow_dv_tag_resource, entry);
 		rte_atomic32_inc(&cache_resource->refcnt);
-		dev_flow->handle.dvh.tag_resource = cache_resource;
+		dev_flow->handle->dvh.tag_resource = cache_resource;
 		DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
 			(void *)cache_resource,
 			rte_atomic32_read(&cache_resource->refcnt));
@@ -7003,7 +7024,7 @@  struct field_modify_info modify_tcp[] = {
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot insert tag");
 	}
-	dev_flow->handle.dvh.tag_resource = cache_resource;
+	dev_flow->handle->dvh.tag_resource = cache_resource;
 	DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -7148,7 +7169,7 @@  struct field_modify_info modify_tcp[] = {
 flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
 {
 	struct rte_flow *flow = dev_flow->flow;
-	uint64_t items = dev_flow->handle.layers;
+	uint64_t items = dev_flow->handle->layers;
 	int rss_inner = 0;
 	uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
 
@@ -7238,6 +7259,7 @@  struct field_modify_info modify_tcp[] = {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *dev_conf = &priv->config;
 	struct rte_flow *flow = dev_flow->flow;
+	struct mlx5_flow_handle *handle = dev_flow->handle;
 	uint64_t item_flags = 0;
 	uint64_t last_item = 0;
 	uint64_t action_flags = 0;
@@ -7306,7 +7328,7 @@  struct field_modify_info modify_tcp[] = {
 			    (dev, &port_id_resource, dev_flow, error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.port_id_action->action;
+					handle->dvh.port_id_action->action;
 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
 			break;
 		case RTE_FLOW_ACTION_TYPE_FLAG:
@@ -7324,12 +7346,17 @@  struct field_modify_info modify_tcp[] = {
 				break;
 			}
 			tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
-			if (!dev_flow->handle.dvh.tag_resource)
-				if (flow_dv_tag_resource_register
-				    (dev, tag_be, dev_flow, error))
-					return -rte_errno;
+			/*
+			 * Only one FLAG or MARK is supported per device flow
+			 * right now. So the pointer to the tag resource must be
+			 * zero before the register process.
+			 */
+			MLX5_ASSERT(!handle->dvh.tag_resource);
+			if (flow_dv_tag_resource_register(dev, tag_be,
+							  dev_flow, error))
+				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.tag_resource->action;
+					handle->dvh.tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_MARK:
 			action_flags |= MLX5_FLOW_ACTION_MARK;
@@ -7351,12 +7378,12 @@  struct field_modify_info modify_tcp[] = {
 			tag_be = mlx5_flow_mark_set
 			      (((const struct rte_flow_action_mark *)
 			       (actions->conf))->id);
-			if (!dev_flow->handle.dvh.tag_resource)
-				if (flow_dv_tag_resource_register
-				    (dev, tag_be, dev_flow, error))
-					return -rte_errno;
+			MLX5_ASSERT(!handle->dvh.tag_resource);
+			if (flow_dv_tag_resource_register(dev, tag_be,
+							  dev_flow, error))
+				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.tag_resource->action;
+					handle->dvh.tag_resource->action;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_META:
 			if (flow_dv_convert_action_set_meta
@@ -7454,7 +7481,7 @@  struct field_modify_info modify_tcp[] = {
 					    (dev, attr, &vlan, dev_flow, error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.push_vlan_res->action;
+					handle->dvh.push_vlan_res->action;
 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
@@ -7481,7 +7508,7 @@  struct field_modify_info modify_tcp[] = {
 							   error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
@@ -7491,7 +7518,7 @@  struct field_modify_info modify_tcp[] = {
 							   error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
@@ -7501,7 +7528,7 @@  struct field_modify_info modify_tcp[] = {
 					(dev, actions, dev_flow, attr, error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			} else {
 				/* Handle encap without preceding decap. */
 				if (flow_dv_create_action_l2_encap
@@ -7509,7 +7536,7 @@  struct field_modify_info modify_tcp[] = {
 				     error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			}
 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
 			break;
@@ -7521,7 +7548,7 @@  struct field_modify_info modify_tcp[] = {
 				    (dev, dev_flow, attr->transfer, error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.encap_decap->verbs_action;
+					handle->dvh.encap_decap->verbs_action;
 			}
 			/* If decap is followed by encap, handle it at encap. */
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
@@ -7553,7 +7580,7 @@  struct field_modify_info modify_tcp[] = {
 						 "cannot create jump action.");
 			}
 			dev_flow->dv.actions[actions_n++] =
-				dev_flow->handle.dvh.jump->action;
+					handle->dvh.jump->action;
 			action_flags |= MLX5_FLOW_ACTION_JUMP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
@@ -7686,7 +7713,7 @@  struct field_modify_info modify_tcp[] = {
 					(dev, mhdr_res, dev_flow, error))
 					return -rte_errno;
 				dev_flow->dv.actions[modify_action_position] =
-				dev_flow->handle.dvh.modify_hdr->verbs_action;
+					handle->dvh.modify_hdr->verbs_action;
 			}
 			break;
 		default:
@@ -7697,7 +7724,7 @@  struct field_modify_info modify_tcp[] = {
 			modify_action_position = actions_n++;
 	}
 	dev_flow->dv.actions_n = actions_n;
-	dev_flow->handle.act_flags = action_flags;
+	handle->act_flags = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 		int item_type = items->type;
@@ -7902,7 +7929,7 @@  struct field_modify_info modify_tcp[] = {
 	 * Layers may be already initialized from prefix flow if this dev_flow
 	 * is the suffix flow.
 	 */
-	dev_flow->handle.layers |= item_flags;
+	handle->layers |= item_flags;
 	if (action_flags & MLX5_FLOW_ACTION_RSS)
 		flow_dv_hashfields_set(dev_flow);
 	/* Register matcher. */
@@ -7937,19 +7964,21 @@  struct field_modify_info modify_tcp[] = {
 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 		struct rte_flow_error *error)
 {
-	struct mlx5_flow_resource_dv *dv;
+	struct mlx5_flow_dv_workspace *dv;
 	struct mlx5_flow_handle *dh;
 	struct mlx5_flow_handle_dv *dv_h;
 	struct mlx5_flow *dev_flow;
 	struct mlx5_priv *priv = dev->data->dev_private;
 	int n;
 	int err;
+	int idx;
 
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
+	for (idx = priv->flow_idx - 1; idx >= 0; idx--) {
+		dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
 		dv = &dev_flow->dv;
-		n = dv->actions_n;
+		dh = dev_flow->handle;
 		dv_h = &dh->dvh;
+		n = dv->actions_n;
 		if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
 			if (dv->transfer) {
 				dv->actions[n++] = priv->sh->esw_drop_action;
@@ -7982,7 +8011,7 @@  struct field_modify_info modify_tcp[] = {
 					 dev_flow->hash_fields,
 					 (*flow->rss.queue),
 					 flow->rss.queue_num,
-					 !!(dev_flow->handle.layers &
+					 !!(dh->layers &
 					    MLX5_FLOW_LAYER_TUNNEL));
 			}
 			if (!hrxq) {
@@ -8020,17 +8049,16 @@  struct field_modify_info modify_tcp[] = {
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		struct mlx5_flow_handle *dh_tmp = &dev_flow->handle;
-		if (dh_tmp->hrxq) {
-			if (dh_tmp->act_flags & MLX5_FLOW_ACTION_DROP)
+	LIST_FOREACH(dh, &flow->dev_handles, next) {
+		if (dh->hrxq) {
+			if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dh_tmp->hrxq);
-			dh_tmp->hrxq = NULL;
+				mlx5_hrxq_release(dev, dh->hrxq);
+			dh->hrxq = NULL;
 		}
-		if (dh_tmp->vf_vlan.tag && dh_tmp->vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dh_tmp->vf_vlan);
+		if (dh->vf_vlan.tag && dh->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
@@ -8041,17 +8069,17 @@  struct field_modify_info modify_tcp[] = {
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
 flow_dv_matcher_release(struct rte_eth_dev *dev,
-			struct mlx5_flow *flow)
+			struct mlx5_flow_handle *handle)
 {
-	struct mlx5_flow_dv_matcher *matcher = flow->handle.dvh.matcher;
+	struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
 
 	MLX5_ASSERT(matcher->matcher_object);
 	DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
@@ -8074,17 +8102,17 @@  struct field_modify_info modify_tcp[] = {
 /**
  * Release an encap/decap resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
+flow_dv_encap_decap_resource_release(struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource =
-						flow->handle.dvh.encap_decap;
+						handle->dvh.encap_decap;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
@@ -8107,18 +8135,18 @@  struct field_modify_info modify_tcp[] = {
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
-				  struct mlx5_flow *flow)
+				  struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
-						flow->handle.dvh.jump;
+							handle->dvh.jump;
 	struct mlx5_flow_tbl_data_entry *tbl_data =
 			container_of(cache_resource,
 				     struct mlx5_flow_tbl_data_entry, jump);
@@ -8142,17 +8170,17 @@  struct field_modify_info modify_tcp[] = {
 /**
  * Release a modify-header resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
+flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
-						flow->handle.dvh.modify_hdr;
+							handle->dvh.modify_hdr;
 
 	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
@@ -8173,17 +8201,17 @@  struct field_modify_info modify_tcp[] = {
 /**
  * Release port ID action resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
+flow_dv_port_id_action_resource_release(struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_port_id_action_resource *cache_resource =
-						flow->handle.dvh.port_id_action;
+						handle->dvh.port_id_action;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
@@ -8204,17 +8232,17 @@  struct field_modify_info modify_tcp[] = {
 /**
  * Release push vlan action resource.
  *
- * @param flow
- *   Pointer to mlx5_flow.
+ * @param handle
+ *   Pointer to mlx5_flow_handle.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 static int
-flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
+flow_dv_push_vlan_action_resource_release(struct mlx5_flow_handle *handle)
 {
 	struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
-						flow->handle.dvh.push_vlan_res;
+						handle->dvh.push_vlan_res;
 
 	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
@@ -8245,18 +8273,16 @@  struct field_modify_info modify_tcp[] = {
 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
 	struct mlx5_flow_handle *dh;
-	struct mlx5_flow *dev_flow;
 
 	if (!flow)
 		return;
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
+	LIST_FOREACH(dh, &flow->dev_handles, next) {
 		if (dh->ib_flow) {
 			claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
 			dh->ib_flow = NULL;
 		}
 		if (dh->hrxq) {
-			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
+			if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
 				mlx5_hrxq_release(dev, dh->hrxq);
@@ -8279,7 +8305,7 @@  struct field_modify_info modify_tcp[] = {
 static void
 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
 
 	if (!flow)
 		return;
@@ -8292,25 +8318,25 @@  struct field_modify_info modify_tcp[] = {
 		mlx5_flow_meter_detach(flow->meter);
 		flow->meter = NULL;
 	}
-	while (!LIST_EMPTY(&flow->dev_flows)) {
-		dev_flow = LIST_FIRST(&flow->dev_flows);
-		LIST_REMOVE(dev_flow, next);
-		if (dev_flow->handle.dvh.matcher)
-			flow_dv_matcher_release(dev, dev_flow);
-		if (dev_flow->handle.dvh.encap_decap)
-			flow_dv_encap_decap_resource_release(dev_flow);
-		if (dev_flow->handle.dvh.modify_hdr)
-			flow_dv_modify_hdr_resource_release(dev_flow);
-		if (dev_flow->handle.dvh.jump)
-			flow_dv_jump_tbl_resource_release(dev, dev_flow);
-		if (dev_flow->handle.dvh.port_id_action)
-			flow_dv_port_id_action_resource_release(dev_flow);
-		if (dev_flow->handle.dvh.push_vlan_res)
-			flow_dv_push_vlan_action_resource_release(dev_flow);
-		if (dev_flow->handle.dvh.tag_resource)
+	while (!LIST_EMPTY(&flow->dev_handles)) {
+		dev_handle = LIST_FIRST(&flow->dev_handles);
+		LIST_REMOVE(dev_handle, next);
+		if (dev_handle->dvh.matcher)
+			flow_dv_matcher_release(dev, dev_handle);
+		if (dev_handle->dvh.encap_decap)
+			flow_dv_encap_decap_resource_release(dev_handle);
+		if (dev_handle->dvh.modify_hdr)
+			flow_dv_modify_hdr_resource_release(dev_handle);
+		if (dev_handle->dvh.jump)
+			flow_dv_jump_tbl_resource_release(dev, dev_handle);
+		if (dev_handle->dvh.port_id_action)
+			flow_dv_port_id_action_resource_release(dev_handle);
+		if (dev_handle->dvh.push_vlan_res)
+			flow_dv_push_vlan_action_resource_release(dev_handle);
+		if (dev_handle->dvh.tag_resource)
 			flow_dv_tag_release(dev,
-					dev_flow->handle.dvh.tag_resource);
-		rte_free(dev_flow);
+					    dev_handle->dvh.tag_resource);
+		rte_free(dev_handle);
 	}
 }
 
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 08185ec..ccd3395 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -253,7 +253,7 @@ 
  *   Size in bytes of the specification to copy.
  */
 static void
-flow_verbs_spec_add(struct mlx5_flow_resource_verbs *verbs,
+flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
 		    void *src, unsigned int size)
 {
 	void *dst;
@@ -263,7 +263,7 @@ 
 	MLX5_ASSERT(verbs->specs);
 	dst = (void *)(verbs->specs + verbs->size);
 	memcpy(dst, src, size);
-	++verbs->attr->num_of_specs;
+	++verbs->attr.num_of_specs;
 	verbs->size += size;
 }
 
@@ -392,9 +392,9 @@ 
 	if (!(item_flags & l2m))
 		flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
 	else
-		flow_verbs_item_vlan_update(dev_flow->verbs.attr, &eth);
+		flow_verbs_item_vlan_update(&dev_flow->verbs.attr, &eth);
 	if (!tunnel)
-		dev_flow->handle.vf_vlan.tag =
+		dev_flow->handle->vf_vlan.tag =
 			rte_be_to_cpu_16(spec->tci) & 0x0fff;
 }
 
@@ -744,7 +744,7 @@ 
 			      const struct rte_flow_item *item __rte_unused,
 			      uint64_t item_flags)
 {
-	struct mlx5_flow_resource_verbs *verbs = &dev_flow->verbs;
+	struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
 	struct ibv_flow_spec_tunnel tunnel = {
@@ -774,11 +774,11 @@ 
 	}
 #endif
 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
-		flow_verbs_item_gre_ip_protocol_update(verbs->attr,
+		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
 						       IBV_FLOW_SPEC_IPV4_EXT,
 						       IPPROTO_GRE);
 	else
-		flow_verbs_item_gre_ip_protocol_update(verbs->attr,
+		flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
 						       IBV_FLOW_SPEC_IPV6,
 						       IPPROTO_GRE);
 	flow_verbs_spec_add(verbs, &tunnel, size);
@@ -1385,6 +1385,8 @@ 
  * The required size is calculate based on the actions and items. This function
  * also returns the detected actions and items for later use.
  *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
  * @param[in] attr
  *   Pointer to the flow attributes.
  * @param[in] items
@@ -1399,25 +1401,45 @@ 
  *   is set.
  */
 static struct mlx5_flow *
-flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_verbs_prepare(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr __rte_unused,
 		   const struct rte_flow_item items[],
 		   const struct rte_flow_action actions[],
 		   struct rte_flow_error *error)
 {
-	size_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
+	size_t size = 0;
 	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *dev_handle;
+	struct mlx5_priv *priv = dev->data->dev_private;
 
 	size += flow_verbs_get_actions_size(actions);
 	size += flow_verbs_get_items_size(items);
-	dev_flow = rte_calloc(__func__, 1, size, 0);
-	if (!dev_flow) {
+	if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
+		rte_flow_error_set(error, E2BIG,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Verbs spec/action size too large");
+		return NULL;
+	}
+	/* In case of corrupting the memory. */
+	if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
+		rte_flow_error_set(error, ENOSPC,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "not free temporary device flow");
+		return NULL;
+	}
+	dev_handle = rte_calloc(__func__, 1, MLX5_FLOW_HANDLE_VERBS_SIZE, 0);
+	if (!dev_handle) {
 		rte_flow_error_set(error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				   "not enough memory to create flow");
+				   "not enough memory to create flow handle");
 		return NULL;
 	}
-	dev_flow->verbs.attr = (void *)(dev_flow + 1);
-	dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1);
+	/* No multi-thread supporting. */
+	dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
+	dev_flow->handle = dev_handle;
+	/* Memcpy is used, only size needs to be cleared to 0. */
+	dev_flow->verbs.size = 0;
+	dev_flow->verbs.attr.num_of_specs = 0;
 	dev_flow->ingress = attr->ingress;
 	/* Need to set transfer attribute: not supported in Verbs mode. */
 	return dev_flow;
@@ -1499,7 +1521,7 @@ 
 						  "action not supported");
 		}
 	}
-	dev_flow->handle.act_flags = action_flags;
+	dev_flow->handle->act_flags = action_flags;
 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 
@@ -1601,10 +1623,11 @@ 
 						  "item not supported");
 		}
 	}
-	dev_flow->handle.layers = item_flags;
-	dev_flow->verbs.attr->priority =
+	dev_flow->handle->layers = item_flags;
+	/* Other members of attr will be ignored. */
+	dev_flow->verbs.attr.priority =
 		mlx5_flow_adjust_priority(dev, priority, subpriority);
-	dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port;
+	dev_flow->verbs.attr.port = (uint8_t)priv->ibv_port;
 	return 0;
 }
 
@@ -1619,26 +1642,24 @@ 
 static void
 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow_handle *dh;
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *handle;
 
 	if (!flow)
 		return;
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
-		if (dh->ib_flow) {
-			claim_zero(mlx5_glue->destroy_flow(dh->ib_flow));
-			dh->ib_flow = NULL;
+	LIST_FOREACH(handle, &flow->dev_handles, next) {
+		if (handle->ib_flow) {
+			claim_zero(mlx5_glue->destroy_flow(handle->ib_flow));
+			handle->ib_flow = NULL;
 		}
-		if (dh->hrxq) {
-			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
+		if (handle->hrxq) {
+			if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dh->hrxq);
-			dh->hrxq = NULL;
+				mlx5_hrxq_release(dev, handle->hrxq);
+			handle->hrxq = NULL;
 		}
-		if (dh->vf_vlan.tag && dh->vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
+		if (handle->vf_vlan.tag && handle->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
 	}
 }
 
@@ -1653,15 +1674,15 @@ 
 static void
 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
-	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_handle *handle;
 
 	if (!flow)
 		return;
 	flow_verbs_remove(dev, flow);
-	while (!LIST_EMPTY(&flow->dev_flows)) {
-		dev_flow = LIST_FIRST(&flow->dev_flows);
-		LIST_REMOVE(dev_flow, next);
-		rte_free(dev_flow);
+	while (!LIST_EMPTY(&flow->dev_handles)) {
+		handle = LIST_FIRST(&flow->dev_handles);
+		LIST_REMOVE(handle, next);
+		rte_free(handle);
 	}
 	if (flow->counter) {
 		flow_verbs_counter_release(dev, flow->counter);
@@ -1687,15 +1708,17 @@ 
 		 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_handle *dh;
+	struct mlx5_flow_handle *handle;
 	struct mlx5_flow *dev_flow;
 	int err;
-
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
-		if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP) {
-			dh->hrxq = mlx5_hrxq_drop_new(dev);
-			if (!dh->hrxq) {
+	int idx;
+
+	for (idx = priv->flow_idx - 1; idx >= 0; idx--) {
+		dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
+		handle = dev_flow->handle;
+		if (handle->act_flags & MLX5_FLOW_ACTION_DROP) {
+			handle->hrxq = mlx5_hrxq_drop_new(dev);
+			if (!handle->hrxq) {
 				rte_flow_error_set
 					(error, errno,
 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -1717,7 +1740,7 @@ 
 						dev_flow->hash_fields,
 						(*flow->rss.queue),
 						flow->rss.queue_num,
-						!!(dev_flow->handle.layers &
+						!!(handle->layers &
 						MLX5_FLOW_LAYER_TUNNEL));
 			if (!hrxq) {
 				rte_flow_error_set
@@ -1726,11 +1749,11 @@ 
 					 "cannot get hash queue");
 				goto error;
 			}
-			dh->hrxq = hrxq;
+			handle->hrxq = hrxq;
 		}
-		dh->ib_flow = mlx5_glue->create_flow(dh->hrxq->qp,
-						     dev_flow->verbs.attr);
-		if (!dh->ib_flow) {
+		handle->ib_flow = mlx5_glue->create_flow(handle->hrxq->qp,
+						     &dev_flow->verbs.attr);
+		if (!handle->ib_flow) {
 			rte_flow_error_set(error, errno,
 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					   NULL,
@@ -1738,31 +1761,29 @@ 
 			goto error;
 		}
 		if (priv->vmwa_context &&
-		    dev_flow->handle.vf_vlan.tag &&
-		    !dev_flow->handle.vf_vlan.created) {
+		    handle->vf_vlan.tag && !handle->vf_vlan.created) {
 			/*
 			 * The rule contains the VLAN pattern.
 			 * For VF we are going to create VLAN
 			 * interface to make hypervisor set correct
 			 * e-Switch vport context.
 			 */
-			mlx5_vlan_vmwa_acquire(dev, &dev_flow->handle.vf_vlan);
+			mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
 		}
 	}
 	return 0;
 error:
 	err = rte_errno; /* Save rte_errno before cleanup. */
-	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
-		dh = &dev_flow->handle;
-		if (dh->hrxq) {
-			if (dev_flow->handle.act_flags & MLX5_FLOW_ACTION_DROP)
+	LIST_FOREACH(handle, &flow->dev_handles, next) {
+		if (handle->hrxq) {
+			if (handle->act_flags & MLX5_FLOW_ACTION_DROP)
 				mlx5_hrxq_drop_release(dev);
 			else
-				mlx5_hrxq_release(dev, dh->hrxq);
-			dh->hrxq = NULL;
+				mlx5_hrxq_release(dev, handle->hrxq);
+			handle->hrxq = NULL;
 		}
-		if (dh->vf_vlan.tag && dh->vf_vlan.created)
-			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
+		if (handle->vf_vlan.tag && handle->vf_vlan.created)
+			mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
 	}
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index b686ee8..7bcfe5e 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -307,6 +307,7 @@ 
 		mlx5_txq_stop(dev);
 		return -rte_errno;
 	}
+	/* Set started flag here for the following steps like control flow. */
 	dev->data->dev_started = 1;
 	ret = mlx5_rx_intr_vec_enable(dev);
 	if (ret) {