[v2,1/9] net/mlx5: update flex parser arc types support

Message ID 20240918134623.8441-2-viacheslavo@nvidia.com (mailing list archive)
State Awaiting Upstream
Delegated to: Raslan Darawsheh
Headers
Series net/mlx5: cumulative fix series for flex item |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Viacheslav Ovsiienko Sept. 18, 2024, 1:46 p.m. UTC
Add support for input IPv4 and for ESP output flex parser arcs.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_flex.c | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)
  

Comments

Dariusz Sosnowski Sept. 18, 2024, 1:57 p.m. UTC | #1
> -----Original Message-----
> From: Slava Ovsiienko <viacheslavo@nvidia.com>
> Sent: Wednesday, September 18, 2024 15:46
> To: dev@dpdk.org
> Cc: Matan Azrad <matan@nvidia.com>; Raslan Darawsheh
> <rasland@nvidia.com>; Ori Kam <orika@nvidia.com>; Dariusz Sosnowski
> <dsosnowski@nvidia.com>
> Subject: [PATCH v2 1/9] net/mlx5: update flex parser arc types support
> 
> Add support for input IPv4 and for ESP output flex parser arcs.
> 
> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> ---
>  drivers/net/mlx5/mlx5_flow_flex.c | 21 +++++++++++++++++++++
>  1 file changed, 21 insertions(+)
> 
> diff --git a/drivers/net/mlx5/mlx5_flow_flex.c
> b/drivers/net/mlx5/mlx5_flow_flex.c
> index 8a02247406..5b104d583c 100644
> --- a/drivers/net/mlx5/mlx5_flow_flex.c
> +++ b/drivers/net/mlx5/mlx5_flow_flex.c
> @@ -1111,6 +1111,8 @@ mlx5_flex_arc_type(enum rte_flow_item_type type,
> int in)
>  		return MLX5_GRAPH_ARC_NODE_GENEVE;
>  	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
>  		return MLX5_GRAPH_ARC_NODE_VXLAN_GPE;
> +	case RTE_FLOW_ITEM_TYPE_ESP:
> +		return MLX5_GRAPH_ARC_NODE_IPSEC_ESP;
>  	default:
>  		return -EINVAL;
>  	}
> @@ -1148,6 +1150,22 @@ mlx5_flex_arc_in_udp(const struct rte_flow_item
> *item,
>  	return rte_be_to_cpu_16(spec->hdr.dst_port);
>  }
> 
> +static int
> +mlx5_flex_arc_in_ipv4(const struct rte_flow_item *item,
> +		      struct rte_flow_error *error)
> +{
> +	const struct rte_flow_item_ipv4 *spec = item->spec;
> +	const struct rte_flow_item_ipv4 *mask = item->mask;
> +	struct rte_flow_item_ipv4 ip = { .hdr.next_proto_id = 0xff };
> +
> +	if (memcmp(mask, &ip, sizeof(struct rte_flow_item_ipv4))) {
> +		return rte_flow_error_set
> +			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
> +			 "invalid ipv4 item mask, full mask is desired");
> +	}
> +	return spec->hdr.next_proto_id;
> +}
> +
>  static int
>  mlx5_flex_arc_in_ipv6(const struct rte_flow_item *item,
>  		      struct rte_flow_error *error)
> @@ -1210,6 +1228,9 @@ mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr
> *attr,
>  		case RTE_FLOW_ITEM_TYPE_UDP:
>  			ret = mlx5_flex_arc_in_udp(rte_item, error);
>  			break;
> +		case RTE_FLOW_ITEM_TYPE_IPV4:
> +			ret = mlx5_flex_arc_in_ipv4(rte_item, error);
> +			break;
>  		case RTE_FLOW_ITEM_TYPE_IPV6:
>  			ret = mlx5_flex_arc_in_ipv6(rte_item, error);
>  			break;
> --
> 2.34.1

Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>

Resending the Ack for each patch separately, because patchwork assigned my Ack for the series to v1, not v2.

Best regards,
Dariusz Sosnowski
  

Patch

diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 8a02247406..5b104d583c 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -1111,6 +1111,8 @@  mlx5_flex_arc_type(enum rte_flow_item_type type, int in)
 		return MLX5_GRAPH_ARC_NODE_GENEVE;
 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
 		return MLX5_GRAPH_ARC_NODE_VXLAN_GPE;
+	case RTE_FLOW_ITEM_TYPE_ESP:
+		return MLX5_GRAPH_ARC_NODE_IPSEC_ESP;
 	default:
 		return -EINVAL;
 	}
@@ -1148,6 +1150,22 @@  mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
 	return rte_be_to_cpu_16(spec->hdr.dst_port);
 }
 
+static int
+mlx5_flex_arc_in_ipv4(const struct rte_flow_item *item,
+		      struct rte_flow_error *error)
+{
+	const struct rte_flow_item_ipv4 *spec = item->spec;
+	const struct rte_flow_item_ipv4 *mask = item->mask;
+	struct rte_flow_item_ipv4 ip = { .hdr.next_proto_id = 0xff };
+
+	if (memcmp(mask, &ip, sizeof(struct rte_flow_item_ipv4))) {
+		return rte_flow_error_set
+			(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+			 "invalid ipv4 item mask, full mask is desired");
+	}
+	return spec->hdr.next_proto_id;
+}
+
 static int
 mlx5_flex_arc_in_ipv6(const struct rte_flow_item *item,
 		      struct rte_flow_error *error)
@@ -1210,6 +1228,9 @@  mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
 		case RTE_FLOW_ITEM_TYPE_UDP:
 			ret = mlx5_flex_arc_in_udp(rte_item, error);
 			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ret = mlx5_flex_arc_in_ipv4(rte_item, error);
+			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
 			ret = mlx5_flex_arc_in_ipv6(rte_item, error);
 			break;