[v2,2/7] net/mlx5: add support for ptype match in hardware steering

Message ID 20231023210707.1344241-3-akozyrev@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series ptype matching support in mlx5 |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Alexander Kozyrev Oct. 23, 2023, 9:07 p.m. UTC
  The packet type matching provides quick way of finding out
L2/L3/L4 protocols in a given packet. That helps with
optimized flow rules matching, eliminating the need of
stacking all the packet headers in the matching criteria.

Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_definer.c | 161 ++++++++++++++++++++++++++
 drivers/net/mlx5/hws/mlx5dr_definer.h |   7 ++
 drivers/net/mlx5/mlx5_flow.h          |   3 +
 drivers/net/mlx5/mlx5_flow_hw.c       |   1 +
 4 files changed, 172 insertions(+)
  

Patch

diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 95b5d4b70e..8d846984e7 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -16,11 +16,15 @@ 
 #define STE_NO_VLAN	0x0
 #define STE_SVLAN	0x1
 #define STE_CVLAN	0x2
+#define STE_NO_L3	0x0
 #define STE_IPV4	0x1
 #define STE_IPV6	0x2
+#define STE_NO_L4	0x0
 #define STE_TCP		0x1
 #define STE_UDP		0x2
 #define STE_ICMP	0x3
+#define STE_NO_TUN	0x0
+#define STE_ESP		0x3
 
 #define MLX5DR_DEFINER_QUOTA_BLOCK 0
 #define MLX5DR_DEFINER_QUOTA_PASS  2
@@ -277,6 +281,82 @@  mlx5dr_definer_conntrack_tag(struct mlx5dr_definer_fc *fc,
 	DR_SET(tag, reg_value, fc->byte_off, fc->bit_off, fc->bit_mask);
 }
 
+static void
+mlx5dr_definer_ptype_l2_set(struct mlx5dr_definer_fc *fc,
+			    const void *item_spec,
+			    uint8_t *tag)
+{
+	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L2_I);
+	const struct rte_flow_item_ptype *v = item_spec;
+	uint32_t packet_type = v->packet_type &
+		(inner ? RTE_PTYPE_INNER_L2_MASK : RTE_PTYPE_L2_MASK);
+	uint8_t l2_type = STE_NO_VLAN;
+
+	if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER : RTE_PTYPE_L2_ETHER))
+		l2_type = STE_NO_VLAN;
+	else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER_VLAN))
+		l2_type = STE_CVLAN;
+	else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_QINQ : RTE_PTYPE_L2_ETHER_QINQ))
+		l2_type = STE_SVLAN;
+
+	DR_SET(tag, l2_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_l3_set(struct mlx5dr_definer_fc *fc,
+			    const void *item_spec,
+			    uint8_t *tag)
+{
+	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L3_I);
+	const struct rte_flow_item_ptype *v = item_spec;
+	uint32_t packet_type = v->packet_type &
+		(inner ? RTE_PTYPE_INNER_L3_MASK : RTE_PTYPE_L3_MASK);
+	uint8_t l3_type = STE_NO_L3;
+
+	if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4))
+		l3_type = STE_IPV4;
+	else if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6))
+		l3_type = STE_IPV6;
+
+	DR_SET(tag, l3_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_l4_set(struct mlx5dr_definer_fc *fc,
+			    const void *item_spec,
+			    uint8_t *tag)
+{
+	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_I);
+	const struct rte_flow_item_ptype *v = item_spec;
+	uint32_t packet_type = v->packet_type &
+		(inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK);
+	uint8_t l4_type = STE_NO_L4;
+
+	if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP))
+		l4_type = STE_TCP;
+	else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP))
+		l4_type = STE_UDP;
+	else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ICMP : RTE_PTYPE_L4_ICMP))
+		l4_type = STE_ICMP;
+
+	DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_ptype_tunnel_set(struct mlx5dr_definer_fc *fc,
+				const void *item_spec,
+				uint8_t *tag)
+{
+	const struct rte_flow_item_ptype *v = item_spec;
+	uint32_t packet_type = v->packet_type & RTE_PTYPE_TUNNEL_MASK;
+	uint8_t tun_type = STE_NO_TUN;
+
+	if (packet_type == RTE_PTYPE_TUNNEL_ESP)
+		tun_type = STE_ESP;
+
+	DR_SET(tag, tun_type, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
 static void
 mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
 			     const void *item_spec,
@@ -1709,6 +1789,83 @@  mlx5dr_definer_conv_item_gre_key(struct mlx5dr_definer_conv_data *cd,
 	return 0;
 }
 
+static int
+mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd,
+			       struct rte_flow_item *item,
+			       int item_idx)
+{
+	const struct rte_flow_item_ptype *m = item->mask;
+	struct mlx5dr_definer_fc *fc;
+
+	if (!m)
+		return 0;
+
+	if (!(m->packet_type &
+	      (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK | RTE_PTYPE_TUNNEL_MASK |
+	       RTE_PTYPE_INNER_L2_MASK | RTE_PTYPE_INNER_L3_MASK | RTE_PTYPE_INNER_L4_MASK))) {
+		rte_errno = ENOTSUP;
+		return rte_errno;
+	}
+
+	if (m->packet_type & RTE_PTYPE_L2_MASK) {
+		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, false)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ptype_l2_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, false);
+	}
+
+	if (m->packet_type & RTE_PTYPE_INNER_L2_MASK) {
+		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, true)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ptype_l2_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, true);
+	}
+
+	if (m->packet_type & RTE_PTYPE_L3_MASK) {
+		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, false)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ptype_l3_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, l3_type, false);
+	}
+
+	if (m->packet_type & RTE_PTYPE_INNER_L3_MASK) {
+		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, true)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ptype_l3_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, l3_type, true);
+	}
+
+	if (m->packet_type & RTE_PTYPE_L4_MASK) {
+		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, false)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ptype_l4_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, l4_type, false);
+	}
+
+	if (m->packet_type & RTE_PTYPE_INNER_L4_MASK) {
+		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, true)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ptype_l4_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, l4_type, true);
+	}
+
+	if (m->packet_type & RTE_PTYPE_TUNNEL_MASK) {
+		fc = &cd->fc[MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_ptype_tunnel_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
+	}
+
+	return 0;
+}
+
 static int
 mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd,
 				   struct rte_flow_item *item,
@@ -2332,6 +2489,10 @@  mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 			ret = mlx5dr_definer_conv_item_ib_l4(&cd, items, i);
 			item_flags |= MLX5_FLOW_ITEM_IB_BTH;
 			break;
+		case RTE_FLOW_ITEM_TYPE_PTYPE:
+			ret = mlx5dr_definer_conv_item_ptype(&cd, items, i);
+			item_flags |= MLX5_FLOW_ITEM_PTYPE;
+			break;
 		default:
 			DR_LOG(ERR, "Unsupported item type %d", items->type);
 			rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index f5a541bc17..ea07f55d52 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -141,6 +141,13 @@  enum mlx5dr_definer_fname {
 	MLX5DR_DEFINER_FNAME_IB_L4_OPCODE,
 	MLX5DR_DEFINER_FNAME_IB_L4_QPN,
 	MLX5DR_DEFINER_FNAME_IB_L4_A,
+	MLX5DR_DEFINER_FNAME_PTYPE_L2_O,
+	MLX5DR_DEFINER_FNAME_PTYPE_L2_I,
+	MLX5DR_DEFINER_FNAME_PTYPE_L3_O,
+	MLX5DR_DEFINER_FNAME_PTYPE_L3_I,
+	MLX5DR_DEFINER_FNAME_PTYPE_L4_O,
+	MLX5DR_DEFINER_FNAME_PTYPE_L4_I,
+	MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL,
 	MLX5DR_DEFINER_FNAME_MAX,
 };
 
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 903ff66d72..98b267245c 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -233,6 +233,9 @@  enum mlx5_feature_name {
 /* IB BTH ITEM. */
 #define MLX5_FLOW_ITEM_IB_BTH (1ull << 51)
 
+/* PTYPE ITEM */
+#define MLX5_FLOW_ITEM_PTYPE (1ull << 52)
+
 /* NSH ITEM */
 #define MLX5_FLOW_ITEM_NSH (1ull << 53)
 
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 6fcf654e4a..34b3c9e6ad 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -5382,6 +5382,7 @@  flow_hw_pattern_validate(struct rte_eth_dev *dev,
 		case RTE_FLOW_ITEM_TYPE_ESP:
 		case RTE_FLOW_ITEM_TYPE_FLEX:
 		case RTE_FLOW_ITEM_TYPE_IB_BTH:
+		case RTE_FLOW_ITEM_TYPE_PTYPE:
 			break;
 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
 			/*