net/mlx5: fix GRE protocol type translation for VERB API

Message ID 20211223131638.15190-1-getelson@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: fix GRE protocol type translation for VERB API |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/github-robot: build success github build: passed
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-intel-Functional fail Functional Testing issues
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-abi-testing success Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS

Commit Message

Gregory Etelson Dec. 23, 2021, 1:16 p.m. UTC
  When application creates several flows to match on GRE tunnel without
explicitly specifying GRE protocol type value in flow rules, PMD will
translate that to zero mask.
RDMA-CORE cannot distinguish between different inner flow types and
produces identical matchers for each zero mask.

The patch extracts inner header type from flow rule and forces it in
GRE protocol type, if application did not specify any.

Cc: stable@dpdk.org

Fixes: 84c406e74524 ("net/mlx5: add flow translate function")
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.h       | 14 +++++++++++
 drivers/net/mlx5/mlx5_flow_dv.c    | 14 -----------
 drivers/net/mlx5/mlx5_flow_verbs.c | 37 ++++++++++++++++++++----------
 3 files changed, 39 insertions(+), 26 deletions(-)
  

Comments

Raslan Darawsheh Jan. 9, 2022, 11:56 a.m. UTC | #1
Hi,

> -----Original Message-----
> From: Gregory Etelson <getelson@nvidia.com>
> Sent: Thursday, December 23, 2021 3:17 PM
> To: dev@dpdk.org
> Cc: Gregory Etelson <getelson@nvidia.com>; Raslan Darawsheh
> <rasland@nvidia.com>; stable@dpdk.org; Matan Azrad
> <matan@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>; Yongseok
> Koh <yskoh@mellanox.com>; Ori Kam <orika@nvidia.com>
> Subject: [PATCH] net/mlx5: fix GRE protocol type translation for VERB API
> 
> When application creates several flows to match on GRE tunnel without
> explicitly specifying GRE protocol type value in flow rules, PMD will
> translate that to zero mask.
> RDMA-CORE cannot distinguish between different inner flow types and
> produces identical matchers for each zero mask.
> 
> The patch extracts inner header type from flow rule and forces it in
> GRE protocol type, if application did not specify any.
> 
> Cc: stable@dpdk.org
> 
> Fixes: 84c406e74524 ("net/mlx5: add flow translate function")
> Signed-off-by: Gregory Etelson <getelson@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>

Patch applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh
  

Patch

diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 1f54649c69..f80d9454d5 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1450,6 +1450,20 @@  flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx)
 	return ct;
 }
 
+static inline uint16_t
+mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
+{
+	if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+		return RTE_ETHER_TYPE_TEB;
+	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
+		return RTE_ETHER_TYPE_IPV4;
+	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
+		return RTE_ETHER_TYPE_IPV6;
+	else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
+		return RTE_ETHER_TYPE_MPLS;
+	return 0;
+}
+
 int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
 			     const struct mlx5_flow_tunnel *tunnel,
 			     uint32_t group, uint32_t *table,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 1c6cae8779..5bb60dd73c 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -93,20 +93,6 @@  static int
 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
 				  uint32_t rix_jump);
 
-static inline uint16_t
-mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
-{
-	if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
-		return RTE_ETHER_TYPE_TEB;
-	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
-		return RTE_ETHER_TYPE_IPV4;
-	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
-		return RTE_ETHER_TYPE_IPV6;
-	else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
-		return RTE_ETHER_TYPE_MPLS;
-	return 0;
-}
-
 static int16_t
 flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 29cd694752..192a00d4fd 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -907,6 +907,7 @@  flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
 		.size = size,
 	};
 #else
+	static const struct rte_flow_item_gre empty_gre = {0,};
 	const struct rte_flow_item_gre *spec = item->spec;
 	const struct rte_flow_item_gre *mask = item->mask;
 	unsigned int size = sizeof(struct ibv_flow_spec_gre);
@@ -915,17 +916,29 @@  flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
 		.size = size,
 	};
 
-	if (!mask)
-		mask = &rte_flow_item_gre_mask;
-	if (spec) {
-		tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
-		tunnel.val.protocol = spec->protocol;
-		tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
-		tunnel.mask.protocol = mask->protocol;
-		/* Remove unwanted bits from values. */
-		tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+	if (!spec) {
+		spec = &empty_gre;
+		mask = &empty_gre;
+	} else {
+		if (!mask)
+			mask = &rte_flow_item_gre_mask;
+	}
+	tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+	tunnel.val.protocol = spec->protocol;
+	tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
+	tunnel.mask.protocol = mask->protocol;
+	/* Remove unwanted bits from values. */
+	tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+	tunnel.val.key &= tunnel.mask.key;
+	if (tunnel.mask.protocol) {
 		tunnel.val.protocol &= tunnel.mask.protocol;
-		tunnel.val.key &= tunnel.mask.key;
+	} else {
+		tunnel.val.protocol = mlx5_translate_tunnel_etypes(item_flags);
+		if (tunnel.val.protocol) {
+			tunnel.mask.protocol = 0xFFFF;
+			tunnel.val.protocol =
+				rte_cpu_to_be_16(tunnel.val.protocol);
+		}
 	}
 #endif
 	if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
@@ -1803,8 +1816,6 @@  flow_verbs_translate(struct rte_eth_dev *dev,
 			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GRE:
-			flow_verbs_translate_item_gre(dev_flow, items,
-						      item_flags);
 			subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
 			item_flags |= MLX5_FLOW_LAYER_GRE;
 			break;
@@ -1820,6 +1831,8 @@  flow_verbs_translate(struct rte_eth_dev *dev,
 						  NULL, "item not supported");
 		}
 	}
+	if (item_flags & MLX5_FLOW_LAYER_GRE)
+		flow_verbs_translate_item_gre(dev_flow, items, item_flags);
 	dev_flow->handle->layers = item_flags;
 	/* Other members of attr will be ignored. */
 	dev_flow->verbs.attr.priority =