[v2] net/mlx5: improve validation of item order

Message ID 099aca2cedb8654adc85923e5497ff8df1003bc5.1568191352.git.jackmin@mellanox.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series [v2] net/mlx5: improve validation of item order |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK
ci/iol-dpdk_compile_ovs success Compile Testing PASS
ci/iol-dpdk_compile_spdk success Compile Testing PASS
ci/iol-dpdk_compile success Compile Testing PASS
ci/mellanox-Performance success Performance Testing PASS
ci/intel-Performance success Performance Testing PASS

Commit Message

Xiaoyu Min Sept. 11, 2019, 8:46 a.m. UTC
  The Item order validation between L2 and L3 is missing, which leading to
the following flow rule is accepted:

  testpmd> flow create 0 ingress pattern ipv4 / eth / end actions drop /
           end

Only the outer L3 layer should check whether the L2 layer is present,
because the L3 layer could directly follow the tunnel layer
without L2 layer.

Meanwhile inner L2 layer should check whether there is inner L3 layer
before it.

Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function")
Cc: stable@dpdk.org

Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)
  

Comments

Slava Ovsiienko Oct. 8, 2019, 9:43 a.m. UTC | #1
> -----Original Message-----
> From: Xiaoyu Min <jackmin@mellanox.com>
> Sent: Wednesday, September 11, 2019 11:46
> To: Matan Azrad <matan@mellanox.com>; Shahaf Shuler
> <shahafs@mellanox.com>; Slava Ovsiienko <viacheslavo@mellanox.com>
> Cc: dev@dpdk.org; Ori Kam <orika@mellanox.com>; stable@dpdk.org
> Subject: [PATCH v2] net/mlx5: improve validation of item order
> 
> The Item order validation between L2 and L3 is missing, which leading to the
> following flow rule is accepted:
> 
>   testpmd> flow create 0 ingress pattern ipv4 / eth / end actions drop /
>            end
> 
> Only the outer L3 layer should check whether the L2 layer is present, because
> the L3 layer could directly follow the tunnel layer without L2 layer.
> 
> Meanwhile inner L2 layer should check whether there is inner L3 layer before
> it.
> 
> Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>

> ---
>  drivers/net/mlx5/mlx5_flow.c | 19 +++++++++++++++++++
>  1 file changed, 19 insertions(+)
> 
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index eb360525da..45bd9c8025 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -1224,6 +1224,11 @@ mlx5_flow_validate_item_eth(const struct
> rte_flow_item *item,
>  		return rte_flow_error_set(error, ENOTSUP,
>  					  RTE_FLOW_ERROR_TYPE_ITEM,
> item,
>  					  "multiple L2 layers not supported");
> +	if (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))
> +		return rte_flow_error_set(error, EINVAL,
> +					  RTE_FLOW_ERROR_TYPE_ITEM,
> item,
> +					  "inner L2 layer should not "
> +					  "follow inner L3 layers");
>  	if (!mask)
>  		mask = &rte_flow_item_eth_mask;
>  	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, @@ -
> 1270,6 +1275,8 @@ mlx5_flow_validate_item_vlan(const struct
> rte_flow_item *item,
>  	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
>  					MLX5_FLOW_LAYER_OUTER_VLAN;
> 
> +	const uint64_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
> +				      MLX5_FLOW_LAYER_OUTER_L2;
>  	if (item_flags & vlanm)
>  		return rte_flow_error_set(error, EINVAL,
>  					  RTE_FLOW_ERROR_TYPE_ITEM,
> item,
> @@ -1278,6 +1285,10 @@ mlx5_flow_validate_item_vlan(const struct
> rte_flow_item *item,
>  		return rte_flow_error_set(error, EINVAL,
>  					  RTE_FLOW_ERROR_TYPE_ITEM,
> item,
>  					  "L2 layer cannot follow L3/L4
> layer");
> +	else if ((item_flags & l2m) == 0)
> +		return rte_flow_error_set(error, EINVAL,
> +					  RTE_FLOW_ERROR_TYPE_ITEM,
> item,
> +					  "no L2 layer before VLAN");
>  	if (!mask)
>  		mask = &rte_flow_item_vlan_mask;
>  	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, @@ -
> 1390,6 +1401,10 @@ mlx5_flow_validate_item_ipv4(const struct
> rte_flow_item *item,
>  		return rte_flow_error_set(error, EINVAL,
>  					  RTE_FLOW_ERROR_TYPE_ITEM,
> item,
>  					  "L3 cannot follow an NVGRE
> layer.");
> +	else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
> +		return rte_flow_error_set(error, EINVAL,
> +					  RTE_FLOW_ERROR_TYPE_ITEM,
> item,
> +					  "no L2 layer before IPV4");
>  	if (!mask)
>  		mask = &rte_flow_item_ipv4_mask;
>  	else if (mask->hdr.next_proto_id != 0 && @@ -1481,6 +1496,10 @@
> mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
>  		return rte_flow_error_set(error, EINVAL,
>  					  RTE_FLOW_ERROR_TYPE_ITEM,
> item,
>  					  "L3 cannot follow an NVGRE
> layer.");
> +	else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
> +		return rte_flow_error_set(error, EINVAL,
> +					  RTE_FLOW_ERROR_TYPE_ITEM,
> item,
> +					  "no L2 layer before IPV6");
>  	if (!mask)
>  		mask = &rte_flow_item_ipv6_mask;
>  	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
> --
> 2.23.0
  
Raslan Darawsheh Oct. 8, 2019, 11:53 a.m. UTC | #2
Hi,

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Xiaoyu Min
> Sent: Wednesday, September 11, 2019 11:46 AM
> To: Matan Azrad <matan@mellanox.com>; Shahaf Shuler
> <shahafs@mellanox.com>; Slava Ovsiienko <viacheslavo@mellanox.com>
> Cc: dev@dpdk.org; Ori Kam <orika@mellanox.com>; stable@dpdk.org
> Subject: [dpdk-dev] [PATCH v2] net/mlx5: improve validation of item order
> 
> The Item order validation between L2 and L3 is missing, which leading to
> the following flow rule is accepted:
> 
>   testpmd> flow create 0 ingress pattern ipv4 / eth / end actions drop /
>            end
> 
> Only the outer L3 layer should check whether the L2 layer is present,
> because the L3 layer could directly follow the tunnel layer
> without L2 layer.
> 
> Meanwhile inner L2 layer should check whether there is inner L3 layer
> before it.
> 
> Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
> ---


Patch applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh
  

Patch

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index eb360525da..45bd9c8025 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1224,6 +1224,11 @@  mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "multiple L2 layers not supported");
+	if (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "inner L2 layer should not "
+					  "follow inner L3 layers");
 	if (!mask)
 		mask = &rte_flow_item_eth_mask;
 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
@@ -1270,6 +1275,8 @@  mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
 	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
 					MLX5_FLOW_LAYER_OUTER_VLAN;
 
+	const uint64_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+				      MLX5_FLOW_LAYER_OUTER_L2;
 	if (item_flags & vlanm)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -1278,6 +1285,10 @@  mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L2 layer cannot follow L3/L4 layer");
+	else if ((item_flags & l2m) == 0)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "no L2 layer before VLAN");
 	if (!mask)
 		mask = &rte_flow_item_vlan_mask;
 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
@@ -1390,6 +1401,10 @@  mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L3 cannot follow an NVGRE layer.");
+	else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "no L2 layer before IPV4");
 	if (!mask)
 		mask = &rte_flow_item_ipv4_mask;
 	else if (mask->hdr.next_proto_id != 0 &&
@@ -1481,6 +1496,10 @@  mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L3 cannot follow an NVGRE layer.");
+	else if (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "no L2 layer before IPV6");
 	if (!mask)
 		mask = &rte_flow_item_ipv6_mask;
 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,