[v3,3/3] net/mlx5/hws: add ICMPv6 ID and sequence match support

Message ID 20230205134154.408984-4-yongquanx@nvidia.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series support match icmpv6 ID and sequence |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/github-robot: build success github build: passed
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS

Commit Message

Leo Xu Feb. 5, 2023, 1:41 p.m. UTC
  This patch adds ICMPv6 ID and sequence match support for HWS.
Since type and code of ICMPv6 echo is already specified by ITEM type:
  RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST
  RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY
mlx5 pmd will set appropriate type and code automatically:
  Echo request: type(128), code(0)
  Echo reply:   type(129), code(0)
type and code provided by application will be ignored

Signed-off-by: Leo Xu <yongquanx@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_definer.c | 88 +++++++++++++++++++++++++++
 1 file changed, 88 insertions(+)
  

Comments

Alex Vesker Feb. 7, 2023, 1:05 p.m. UTC | #1
Hi,

> -----Original Message-----
> From: Leo Xu <yongquanx@nvidia.com>
> Sent: Sunday, 5 February 2023 15:42
> To: dev@dpdk.org
> Cc: Matan Azrad <matan@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>
> Subject: [PATCH v3 3/3] net/mlx5/hws: add ICMPv6 ID and sequence match
> support
> 
> This patch adds ICMPv6 ID and sequence match support for HWS.
> Since type and code of ICMPv6 echo is already specified by ITEM type:
>   RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST
>   RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY
> mlx5 pmd will set appropriate type and code automatically:
>   Echo request: type(128), code(0)
>   Echo reply:   type(129), code(0)
> type and code provided by application will be ignored
> 
> Signed-off-by: Leo Xu <yongquanx@nvidia.com>
> ---
>  drivers/net/mlx5/hws/mlx5dr_definer.c | 88 +++++++++++++++++++++++++++
>  1 file changed, 88 insertions(+)
> 
> diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c
> b/drivers/net/mlx5/hws/mlx5dr_definer.c
> index 6b98eb8c96..d56e85631d 100644
> --- a/drivers/net/mlx5/hws/mlx5dr_definer.c
> +++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
> @@ -368,6 +368,47 @@ mlx5dr_definer_icmp6_dw1_set(struct
> mlx5dr_definer_fc *fc,
>  	DR_SET(tag, icmp_dw1, fc->byte_off, fc->bit_off, fc->bit_mask);  }
> 
> +static void
> +mlx5dr_definer_icmp6_echo_dw1_mask_set(struct mlx5dr_definer_fc *fc,
> +				       __rte_unused const void *item_spec,
> +				       uint8_t *tag)
> +{
> +	const struct rte_flow_item_icmp6 spec = {0xFF, 0xFF, 0x0};
> +	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag); }
> +
> +static void
> +mlx5dr_definer_icmp6_echo_request_dw1_set(struct mlx5dr_definer_fc *fc,
> +					  __rte_unused const void *item_spec,
> +					  uint8_t *tag)
> +{
> +	const struct rte_flow_item_icmp6 spec = {RTE_ICMP6_ECHO_REQUEST,
> 0, 0};
> +	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag); }
> +
> +static void
> +mlx5dr_definer_icmp6_echo_reply_dw1_set(struct mlx5dr_definer_fc *fc,
> +					__rte_unused const void *item_spec,
> +					uint8_t *tag)
> +{
> +	const struct rte_flow_item_icmp6 spec = {RTE_ICMP6_ECHO_REPLY, 0,
> 0};
> +	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag); }
> +
> +static void
> +mlx5dr_definer_icmp6_echo_dw2_set(struct mlx5dr_definer_fc *fc,
> +				  const void *item_spec,
> +				  uint8_t *tag)
> +{
> +	const struct rte_flow_item_icmp6_echo *v = item_spec;
> +	rte_be32_t dw2;
> +
> +	dw2 = (rte_be_to_cpu_16(v->hdr.identifier) <<
> __mlx5_dw_bit_off(header_icmp, ident)) |
> +	      (rte_be_to_cpu_16(v->hdr.sequence) <<
> +__mlx5_dw_bit_off(header_icmp, seq_nb));
> +
> +	DR_SET(tag, dw2, fc->byte_off, fc->bit_off, fc->bit_mask); }
> +
>  static void
>  mlx5dr_definer_ipv6_flow_label_set(struct mlx5dr_definer_fc *fc,
>  				   const void *item_spec,
> @@ -1441,6 +1482,48 @@ mlx5dr_definer_conv_item_icmp6(struct
> mlx5dr_definer_conv_data *cd,
>  	return 0;
>  }
> 
> +static int
> +mlx5dr_definer_conv_item_icmp6_echo(struct mlx5dr_definer_conv_data *cd,
> +				    struct rte_flow_item *item,
> +				    int item_idx)
> +{
> +	const struct rte_flow_item_icmp6_echo *m = item->mask;
> +	struct mlx5dr_definer_fc *fc;
> +	bool inner = cd->tunnel;
> +
> +	if (!cd->relaxed) {
> +		/* Overwrite match on L4 type ICMP6 */
> +		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
> +		fc->item_idx = item_idx;
> +		fc->tag_set = &mlx5dr_definer_icmp_protocol_set;
> +		fc->tag_mask_set = &mlx5dr_definer_ones_set;
> +		DR_CALC_SET(fc, eth_l2, l4_type, inner);
> +
> +		/* Set fixed type and code for icmp6 echo request/reply */
> +		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW1];
> +		fc->item_idx = item_idx;
> +		fc->tag_mask_set =
> &mlx5dr_definer_icmp6_echo_dw1_mask_set;
> +		if (item->type ==
> RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST)
> +			fc->tag_set =
> &mlx5dr_definer_icmp6_echo_request_dw1_set;
> +		else /* RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY */
> +			fc->tag_set =
> &mlx5dr_definer_icmp6_echo_reply_dw1_set;
> +		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw1);
> +	}
> +
> +	if (!m)
> +		return 0;
> +
> +	/* Set identifier & sequence into icmp_dw2 */
> +	if (m->hdr.identifier || m->hdr.sequence) {
> +		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW2];
> +		fc->item_idx = item_idx;
> +		fc->tag_set = &mlx5dr_definer_icmp6_echo_dw2_set;
> +		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw2);
> +	}
> +
> +	return 0;
> +}
> +
>  static int
>  mlx5dr_definer_conv_item_meter_color(struct mlx5dr_definer_conv_data *cd,
>  			     struct rte_flow_item *item,
> @@ -1577,6 +1660,11 @@ mlx5dr_definer_conv_items_to_hl(struct
> mlx5dr_context *ctx,
>  			ret = mlx5dr_definer_conv_item_icmp6(&cd, items, i);
>  			item_flags |= MLX5_FLOW_LAYER_ICMP6;
>  			break;
> +		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
> +		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
> +			ret = mlx5dr_definer_conv_item_icmp6_echo(&cd,
> items, i);
> +			item_flags |= MLX5_FLOW_LAYER_ICMP6;
> +			break;
>  		case RTE_FLOW_ITEM_TYPE_METER_COLOR:
>  			ret = mlx5dr_definer_conv_item_meter_color(&cd,
> items, i);
>  			item_flags |= MLX5_FLOW_ITEM_METER_COLOR;
> --
> 2.27.0

Acked-by: Alex Vesker <valex@nvidia.com>
  
Slava Ovsiienko Feb. 7, 2023, 1:49 p.m. UTC | #2
> -----Original Message-----
> From: Leo Xu (Networking SW) <yongquanx@nvidia.com>
> Sent: Sunday, February 5, 2023 3:42 PM
> To: dev@dpdk.org
> Cc: Matan Azrad <matan@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>
> Subject: [PATCH v3 3/3] net/mlx5/hws: add ICMPv6 ID and sequence match
> support
> 
> This patch adds ICMPv6 ID and sequence match support for HWS.
> Since type and code of ICMPv6 echo is already specified by ITEM type:
>   RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST
>   RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY
> mlx5 pmd will set appropriate type and code automatically:
>   Echo request: type(128), code(0)
>   Echo reply:   type(129), code(0)
> type and code provided by application will be ignored
> 
> Signed-off-by: Leo Xu <yongquanx@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
  

Patch

diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 6b98eb8c96..d56e85631d 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -368,6 +368,47 @@  mlx5dr_definer_icmp6_dw1_set(struct mlx5dr_definer_fc *fc,
 	DR_SET(tag, icmp_dw1, fc->byte_off, fc->bit_off, fc->bit_mask);
 }
 
+static void
+mlx5dr_definer_icmp6_echo_dw1_mask_set(struct mlx5dr_definer_fc *fc,
+				       __rte_unused const void *item_spec,
+				       uint8_t *tag)
+{
+	const struct rte_flow_item_icmp6 spec = {0xFF, 0xFF, 0x0};
+	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
+}
+
+static void
+mlx5dr_definer_icmp6_echo_request_dw1_set(struct mlx5dr_definer_fc *fc,
+					  __rte_unused const void *item_spec,
+					  uint8_t *tag)
+{
+	const struct rte_flow_item_icmp6 spec = {RTE_ICMP6_ECHO_REQUEST, 0, 0};
+	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
+}
+
+static void
+mlx5dr_definer_icmp6_echo_reply_dw1_set(struct mlx5dr_definer_fc *fc,
+					__rte_unused const void *item_spec,
+					uint8_t *tag)
+{
+	const struct rte_flow_item_icmp6 spec = {RTE_ICMP6_ECHO_REPLY, 0, 0};
+	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
+}
+
+static void
+mlx5dr_definer_icmp6_echo_dw2_set(struct mlx5dr_definer_fc *fc,
+				  const void *item_spec,
+				  uint8_t *tag)
+{
+	const struct rte_flow_item_icmp6_echo *v = item_spec;
+	rte_be32_t dw2;
+
+	dw2 = (rte_be_to_cpu_16(v->hdr.identifier) << __mlx5_dw_bit_off(header_icmp, ident)) |
+	      (rte_be_to_cpu_16(v->hdr.sequence) << __mlx5_dw_bit_off(header_icmp, seq_nb));
+
+	DR_SET(tag, dw2, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
 static void
 mlx5dr_definer_ipv6_flow_label_set(struct mlx5dr_definer_fc *fc,
 				   const void *item_spec,
@@ -1441,6 +1482,48 @@  mlx5dr_definer_conv_item_icmp6(struct mlx5dr_definer_conv_data *cd,
 	return 0;
 }
 
+static int
+mlx5dr_definer_conv_item_icmp6_echo(struct mlx5dr_definer_conv_data *cd,
+				    struct rte_flow_item *item,
+				    int item_idx)
+{
+	const struct rte_flow_item_icmp6_echo *m = item->mask;
+	struct mlx5dr_definer_fc *fc;
+	bool inner = cd->tunnel;
+
+	if (!cd->relaxed) {
+		/* Overwrite match on L4 type ICMP6 */
+		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_icmp_protocol_set;
+		fc->tag_mask_set = &mlx5dr_definer_ones_set;
+		DR_CALC_SET(fc, eth_l2, l4_type, inner);
+
+		/* Set fixed type and code for icmp6 echo request/reply */
+		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW1];
+		fc->item_idx = item_idx;
+		fc->tag_mask_set = &mlx5dr_definer_icmp6_echo_dw1_mask_set;
+		if (item->type == RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST)
+			fc->tag_set = &mlx5dr_definer_icmp6_echo_request_dw1_set;
+		else /* RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY */
+			fc->tag_set = &mlx5dr_definer_icmp6_echo_reply_dw1_set;
+		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw1);
+	}
+
+	if (!m)
+		return 0;
+
+	/* Set identifier & sequence into icmp_dw2 */
+	if (m->hdr.identifier || m->hdr.sequence) {
+		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW2];
+		fc->item_idx = item_idx;
+		fc->tag_set = &mlx5dr_definer_icmp6_echo_dw2_set;
+		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw2);
+	}
+
+	return 0;
+}
+
 static int
 mlx5dr_definer_conv_item_meter_color(struct mlx5dr_definer_conv_data *cd,
 			     struct rte_flow_item *item,
@@ -1577,6 +1660,11 @@  mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 			ret = mlx5dr_definer_conv_item_icmp6(&cd, items, i);
 			item_flags |= MLX5_FLOW_LAYER_ICMP6;
 			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
+		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
+			ret = mlx5dr_definer_conv_item_icmp6_echo(&cd, items, i);
+			item_flags |= MLX5_FLOW_LAYER_ICMP6;
+			break;
 		case RTE_FLOW_ITEM_TYPE_METER_COLOR:
 			ret = mlx5dr_definer_conv_item_meter_color(&cd, items, i);
 			item_flags |= MLX5_FLOW_ITEM_METER_COLOR;