[v5] net/i40e: fix flex payload rule conflict issue
Checks
Commit Message
From: Beilei Xing <beilei.xing@intel.com>
With the following commands, the second flow can't
be created successfully.
1. flow create 0 ingress pattern eth / ipv4 / udp /
raw relative is 1 pattern is 0102030405 / end
actions drop / end
2. flow destroy 0 rule 0
3. flow create 0 ingress pattern eth / ipv4 / udp /
raw relative is 1 pattern is 010203040506 / end
actions drop / end
The root cause is that a flag for flex pit isn't reset.
Fixes: 6ced3dd72f5f ("net/i40e: support flexible payload parsing for FDIR")
Cc: stable@dpdk.org
Reported-by: Chenmin Sun<chenmin.sun@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
v4 changes:
- Code refine.
v3 changes:
- Add flow count for flexible payload flow.
v2 changeds:
- Add fix line.
drivers/net/i40e/i40e_ethdev.h | 3 +++
drivers/net/i40e/i40e_fdir.c | 19 ++++++++++++++++---
drivers/net/i40e/i40e_flow.c | 4 ++++
3 files changed, 23 insertions(+), 3 deletions(-)
Comments
Acked-by: Jeff Guo <jia.guo@intel.com>
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Tuesday, January 5, 2021 11:13 AM
> To: Guo, Jia <jia.guo@intel.com>; dev@dpdk.org
> Cc: Xing, Beilei <beilei.xing@intel.com>; stable@dpdk.org; Sun, Chenmin
> <chenmin.sun@intel.com>
> Subject: [PATCH v5] net/i40e: fix flex payload rule conflict issue
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> With the following commands, the second flow can't be created successfully.
>
> 1. flow create 0 ingress pattern eth / ipv4 / udp /
> raw relative is 1 pattern is 0102030405 / end
> actions drop / end
> 2. flow destroy 0 rule 0
> 3. flow create 0 ingress pattern eth / ipv4 / udp /
> raw relative is 1 pattern is 010203040506 / end
> actions drop / end
>
> The root cause is that a flag for flex pit isn't reset.
>
> Fixes: 6ced3dd72f5f ("net/i40e: support flexible payload parsing for FDIR")
> Cc: stable@dpdk.org
>
> Reported-by: Chenmin Sun<chenmin.sun@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
>
> v4 changes:
> - Code refine.
> v3 changes:
> - Add flow count for flexible payload flow.
> v2 changeds:
> - Add fix line.
>
> drivers/net/i40e/i40e_ethdev.h | 3 +++
> drivers/net/i40e/i40e_fdir.c | 19 ++++++++++++++++---
> drivers/net/i40e/i40e_flow.c | 4 ++++
> 3 files changed, 23 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/i40e/i40e_ethdev.h
> b/drivers/net/i40e/i40e_ethdev.h index 696c5aaf7e..aac226999c 100644
> --- a/drivers/net/i40e/i40e_ethdev.h
> +++ b/drivers/net/i40e/i40e_ethdev.h
> @@ -636,6 +636,7 @@ struct i40e_fdir_flow_ext {
> bool is_udp; /* ipv4|ipv6 udp flow */
> enum i40e_flxpld_layer_idx layer_idx;
> struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_LAYER *
> I40E_MAX_FLXPLD_FIED];
> + bool is_flex_flow;
> };
>
> /* A structure used to define the input for a flow director filter entry */ @@
> -784,6 +785,8 @@ struct i40e_fdir_info {
> bool flex_mask_flag[I40E_FILTER_PCTYPE_MAX];
>
> bool inset_flag[I40E_FILTER_PCTYPE_MAX]; /* Mark if input set is set
> */
> +
> + uint32_t flex_flow_count[I40E_MAX_FLXPLD_LAYER];
> };
>
> /* Ethertype filter number HW supports */ diff --git
> a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index
> 50c0eee9f2..0343e8b09a 100644
> --- a/drivers/net/i40e/i40e_fdir.c
> +++ b/drivers/net/i40e/i40e_fdir.c
> @@ -355,6 +355,7 @@ i40e_init_flx_pld(struct i40e_pf *pf)
> I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-
> used*/
> I40E_WRITE_REG(hw,
> I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-
> used*/
> + pf->fdir.flex_pit_flag[i] = 0;
> }
>
> /* initialize the masks */
> @@ -1513,8 +1514,6 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
> I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx),
> flx_pit);
> min_next_off++;
> }
> -
> - pf->fdir.flex_pit_flag[layer_idx] = 1;
> }
>
> static int
> @@ -1686,7 +1685,7 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev
> *dev,
> i40e_fdir_filter_convert(filter, &check_filter);
>
> if (add) {
> - if (!filter->input.flow_ext.customized_pctype) {
> + if (filter->input.flow_ext.is_flex_flow) {
> for (i = 0; i < filter->input.flow_ext.raw_id; i++) {
> layer_idx = filter->input.flow_ext.layer_idx;
> field_idx = layer_idx *
> I40E_MAX_FLXPLD_FIED + i; @@ -1738,6 +1737,9 @@
> i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
> fdir_info->fdir_guarantee_free_space > 0)
> wait_status = false;
> } else {
> + if (filter->input.flow_ext.is_flex_flow)
> + layer_idx = filter->input.flow_ext.layer_idx;
> +
> node = i40e_sw_fdir_filter_lookup(fdir_info,
> &check_filter.fdir.input);
> if (!node) {
> @@ -1785,6 +1787,17 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev
> *dev,
> goto error_op;
> }
>
> + if (filter->input.flow_ext.is_flex_flow) {
> + if (add) {
> + fdir_info->flex_flow_count[layer_idx]++;
> + pf->fdir.flex_pit_flag[layer_idx] = 1;
> + } else {
> + fdir_info->flex_flow_count[layer_idx]--;
> + if (!fdir_info->flex_flow_count[layer_idx])
> + pf->fdir.flex_pit_flag[layer_idx] = 0;
> + }
> + }
> +
> if (add) {
> fdir_info->fdir_actual_cnt++;
> if (fdir_info->fdir_invalprio == 1 && diff --git
> a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index
> b09ff6590d..bbd666b7a0 100644
> --- a/drivers/net/i40e/i40e_flow.c
> +++ b/drivers/net/i40e/i40e_flow.c
> @@ -3069,6 +3069,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev
> *dev,
> &flex_pit, sizeof(struct i40e_fdir_flex_pit));
> filter->input.flow_ext.layer_idx = layer_idx;
> filter->input.flow_ext.raw_id = raw_id;
> + filter->input.flow_ext.is_flex_flow = true;
> break;
> case RTE_FLOW_ITEM_TYPE_VF:
> vf_spec = item->spec;
> @@ -5515,6 +5516,9 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
> pf->fdir.flex_mask_flag[pctype] = 0;
> }
>
> + for (i = 0; i < I40E_MAX_FLXPLD_LAYER; i++)
> + pf->fdir.flex_pit_flag[i] = 0;
> +
> /* Disable FDIR processing as all FDIR rules are now flushed
> */
> i40e_fdir_rx_proc_enable(dev, 0);
> }
> --
> 2.26.2
> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Guo, Jia
> Sent: Tuesday, January 5, 2021 11:40 AM
> To: Xing, Beilei <beilei.xing@intel.com>; dev@dpdk.org
> Cc: stable@dpdk.org; Sun, Chenmin <chenmin.sun@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v5] net/i40e: fix flex payload rule conflict issue
>
> Acked-by: Jeff Guo <jia.guo@intel.com>
>
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Tuesday, January 5, 2021 11:13 AM
> > To: Guo, Jia <jia.guo@intel.com>; dev@dpdk.org
> > Cc: Xing, Beilei <beilei.xing@intel.com>; stable@dpdk.org; Sun,
> > Chenmin <chenmin.sun@intel.com>
> > Subject: [PATCH v5] net/i40e: fix flex payload rule conflict issue
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> >
> > With the following commands, the second flow can't be created successfully.
> >
> > 1. flow create 0 ingress pattern eth / ipv4 / udp /
> > raw relative is 1 pattern is 0102030405 / end
> > actions drop / end
> > 2. flow destroy 0 rule 0
> > 3. flow create 0 ingress pattern eth / ipv4 / udp /
> > raw relative is 1 pattern is 010203040506 / end
> > actions drop / end
> >
> > The root cause is that a flag for flex pit isn't reset.
> >
> > Fixes: 6ced3dd72f5f ("net/i40e: support flexible payload parsing for
> > FDIR")
> > Cc: stable@dpdk.org
> >
> > Reported-by: Chenmin Sun<chenmin.sun@intel.com>
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Applied to dpdk-next-net-intel.
Thanks
Qi
@@ -636,6 +636,7 @@ struct i40e_fdir_flow_ext {
bool is_udp; /* ipv4|ipv6 udp flow */
enum i40e_flxpld_layer_idx layer_idx;
struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
+ bool is_flex_flow;
};
/* A structure used to define the input for a flow director filter entry */
@@ -784,6 +785,8 @@ struct i40e_fdir_info {
bool flex_mask_flag[I40E_FILTER_PCTYPE_MAX];
bool inset_flag[I40E_FILTER_PCTYPE_MAX]; /* Mark if input set is set */
+
+ uint32_t flex_flow_count[I40E_MAX_FLXPLD_LAYER];
};
/* Ethertype filter number HW supports */
@@ -355,6 +355,7 @@ i40e_init_flx_pld(struct i40e_pf *pf)
I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
I40E_WRITE_REG(hw,
I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
+ pf->fdir.flex_pit_flag[i] = 0;
}
/* initialize the masks */
@@ -1513,8 +1514,6 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
min_next_off++;
}
-
- pf->fdir.flex_pit_flag[layer_idx] = 1;
}
static int
@@ -1686,7 +1685,7 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
i40e_fdir_filter_convert(filter, &check_filter);
if (add) {
- if (!filter->input.flow_ext.customized_pctype) {
+ if (filter->input.flow_ext.is_flex_flow) {
for (i = 0; i < filter->input.flow_ext.raw_id; i++) {
layer_idx = filter->input.flow_ext.layer_idx;
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
@@ -1738,6 +1737,9 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
fdir_info->fdir_guarantee_free_space > 0)
wait_status = false;
} else {
+ if (filter->input.flow_ext.is_flex_flow)
+ layer_idx = filter->input.flow_ext.layer_idx;
+
node = i40e_sw_fdir_filter_lookup(fdir_info,
&check_filter.fdir.input);
if (!node) {
@@ -1785,6 +1787,17 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
goto error_op;
}
+ if (filter->input.flow_ext.is_flex_flow) {
+ if (add) {
+ fdir_info->flex_flow_count[layer_idx]++;
+ pf->fdir.flex_pit_flag[layer_idx] = 1;
+ } else {
+ fdir_info->flex_flow_count[layer_idx]--;
+ if (!fdir_info->flex_flow_count[layer_idx])
+ pf->fdir.flex_pit_flag[layer_idx] = 0;
+ }
+ }
+
if (add) {
fdir_info->fdir_actual_cnt++;
if (fdir_info->fdir_invalprio == 1 &&
@@ -3069,6 +3069,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
&flex_pit, sizeof(struct i40e_fdir_flex_pit));
filter->input.flow_ext.layer_idx = layer_idx;
filter->input.flow_ext.raw_id = raw_id;
+ filter->input.flow_ext.is_flex_flow = true;
break;
case RTE_FLOW_ITEM_TYPE_VF:
vf_spec = item->spec;
@@ -5515,6 +5516,9 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
pf->fdir.flex_mask_flag[pctype] = 0;
}
+ for (i = 0; i < I40E_MAX_FLXPLD_LAYER; i++)
+ pf->fdir.flex_pit_flag[i] = 0;
+
/* Disable FDIR processing as all FDIR rules are now flushed */
i40e_fdir_rx_proc_enable(dev, 0);
}