[v2,19/20] net/mlx5: add flow MPLS item
Checks
Commit Message
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
drivers/net/mlx5/mlx5_flow.c | 77 +++++++++++++++++++++++++++++++++++-
1 file changed, 76 insertions(+), 1 deletion(-)
Comments
On Wed, Jun 27, 2018 at 05:07:51PM +0200, Nelio Laranjeiro wrote:
> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> ---
> drivers/net/mlx5/mlx5_flow.c | 77 +++++++++++++++++++++++++++++++++++-
> 1 file changed, 76 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 636aaabe8..7aa4e6ed5 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -55,6 +55,7 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;
> #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
> #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
> #define MLX5_FLOW_LAYER_GRE (1u << 14)
> +#define MLX5_FLOW_LAYER_MPLS (1u << 15)
>
> /* Outer Masks. */
> #define MLX5_FLOW_LAYER_OUTER_L3 \
> @@ -68,7 +69,7 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;
> /* Tunnel masks. */
> #define MLX5_FLOW_LAYER_TUNNEL \
> (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
> - MLX5_FLOW_LAYER_GRE)
> + MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS)
>
> /* Inner Masks. */
> #define MLX5_FLOW_LAYER_INNER_L3 \
> @@ -1589,6 +1590,77 @@ mlx5_flow_item_gre(const struct rte_flow_item *item,
> return size;
> }
>
> +/**
> + * Validate MPLS layer and possibly create the Verbs specification.
> + *
> + * @param dev
> + * Pointer to Ethernet device.
> + * @param item[in]
> + * Item specification.
> + * @param flow[in, out]
> + * Pointer to flow structure.
> + * @param flow_size[in]
> + * Size in bytes of the available space for to store the flow information.
> + * @param error
> + * Pointer to error structure.
> + *
> + * @return
> + * size in bytes necessary for the conversion, a negative errno value
> + * otherwise and rte_errno is set.
> + */
> +static int
> +mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,
> + struct rte_flow *flow __rte_unused,
> + const size_t flow_size __rte_unused,
> + struct rte_flow_error *error)
> +{
> +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
> + const struct rte_flow_item_mpls *spec = item->spec;
> + const struct rte_flow_item_mpls *mask = item->mask;
> + unsigned int size = sizeof(struct ibv_flow_spec_mpls);
> + struct ibv_flow_spec_mpls mpls = {
> + .type = IBV_FLOW_SPEC_MPLS,
> + .size = size,
> + };
> + const uint32_t layers = mlx5_flow_layers(flow);
> + int ret;
> +
> + if (layers & MLX5_FLOW_LAYER_TUNNEL)
Just curious, no need to check flow->exapnd?
> + return rte_flow_error_set(error, ENOTSUP,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "a tunnel is already"
> + " present");
> + if (!mask)
> + mask = &rte_flow_item_mpls_mask;
> + ret = mlx5_flow_item_validate
> + (item, (const uint8_t *)mask,
> + (const uint8_t *)&rte_flow_item_mpls_mask,
> + sizeof(struct rte_flow_item_mpls), error);
> + if (ret < 0)
> + return ret;
> + if (spec) {
> + memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
> + memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
> + /* Remove unwanted bits from values. */
> + mpls.val.label &= mpls.mask.label;
> + }
> + if (size <= flow_size)
Is it guaranteed flow->cur_verbs isn't null if size fits? Could be obvious but
just want to make sure.
> + mlx5_flow_spec_verbs_add(flow, &mpls, size);
> + mlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_MPLS);
> + if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
> + flow->ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP;
> + else
> + flow->ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE;
> + return size;
> +#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
> + return rte_flow_error_set(error, ENOTSUP,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "MPLS is not supported by Verbs, please"
> + " update.");
> +}
> +
> /**
> * Validate items provided by the user.
> *
> @@ -1650,6 +1722,9 @@ mlx5_flow_items(struct rte_eth_dev *dev,
> case RTE_FLOW_ITEM_TYPE_GRE:
> ret = mlx5_flow_item_gre(items, flow, remain, error);
> break;
#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
> + case RTE_FLOW_ITEM_TYPE_MPLS:
> + ret = mlx5_flow_item_mpls(items, flow, remain, error);
> + break;
#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
How about this?
> default:
> return rte_flow_error_set(error, ENOTSUP,
> RTE_FLOW_ERROR_TYPE_ITEM,
> --
> 2.18.0
>
On Fri, Jul 06, 2018 at 05:11:31PM -0700, Yongseok Koh wrote:
> On Wed, Jun 27, 2018 at 05:07:51PM +0200, Nelio Laranjeiro wrote:
> > Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
> > ---
>[...]
> > + if (spec) {
> > + memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
> > + memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
> > + /* Remove unwanted bits from values. */
> > + mpls.val.label &= mpls.mask.label;
> > + }
> > + if (size <= flow_size)
>
> Is it guaranteed flow->cur_verbs isn't null if size fits? Could be obvious but
> just want to make sure.
Yes it is.
> > + mlx5_flow_spec_verbs_add(flow, &mpls, size);
> > + mlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_MPLS);
> > + if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
> > + flow->ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP;
> > + else
> > + flow->ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE;
> > + return size;
> > +#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
> > + return rte_flow_error_set(error, ENOTSUP,
> > + RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "MPLS is not supported by Verbs, please"
> > + " update.");
> > +}
> > +
> > /**
> > * Validate items provided by the user.
> > *
> > @@ -1650,6 +1722,9 @@ mlx5_flow_items(struct rte_eth_dev *dev,
> > case RTE_FLOW_ITEM_TYPE_GRE:
> > ret = mlx5_flow_item_gre(items, flow, remain, error);
> > break;
>
> #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
>
> > + case RTE_FLOW_ITEM_TYPE_MPLS:
> > + ret = mlx5_flow_item_mpls(items, flow, remain, error);
> > + break;
>
> #endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
>
> How about this?
>[...]
It adds another couple of #ifdef #endif and the final output won't help
much the user, having an error "MPLS is not updated by Verbs, please
update" will help more than "item not supported".
Regards,
@@ -55,6 +55,7 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;
#define MLX5_FLOW_LAYER_VXLAN (1u << 12)
#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
#define MLX5_FLOW_LAYER_GRE (1u << 14)
+#define MLX5_FLOW_LAYER_MPLS (1u << 15)
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
@@ -68,7 +69,7 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;
/* Tunnel masks. */
#define MLX5_FLOW_LAYER_TUNNEL \
(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
- MLX5_FLOW_LAYER_GRE)
+ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS)
/* Inner Masks. */
#define MLX5_FLOW_LAYER_INNER_L3 \
@@ -1589,6 +1590,77 @@ mlx5_flow_item_gre(const struct rte_flow_item *item,
return size;
}
+/**
+ * Validate MPLS layer and possibly create the Verbs specification.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param item[in]
+ * Item specification.
+ * @param flow[in, out]
+ * Pointer to flow structure.
+ * @param flow_size[in]
+ * Size in bytes of the available space for to store the flow information.
+ * @param error
+ * Pointer to error structure.
+ *
+ * @return
+ * size in bytes necessary for the conversion, a negative errno value
+ * otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ const size_t flow_size __rte_unused,
+ struct rte_flow_error *error)
+{
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ const struct rte_flow_item_mpls *spec = item->spec;
+ const struct rte_flow_item_mpls *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_mpls);
+ struct ibv_flow_spec_mpls mpls = {
+ .type = IBV_FLOW_SPEC_MPLS,
+ .size = size,
+ };
+ const uint32_t layers = mlx5_flow_layers(flow);
+ int ret;
+
+ if (layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already"
+ " present");
+ if (!mask)
+ mask = &rte_flow_item_mpls_mask;
+ ret = mlx5_flow_item_validate
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_mpls_mask,
+ sizeof(struct rte_flow_item_mpls), error);
+ if (ret < 0)
+ return ret;
+ if (spec) {
+ memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
+ memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
+ /* Remove unwanted bits from values. */
+ mpls.val.label &= mpls.mask.label;
+ }
+ if (size <= flow_size)
+ mlx5_flow_spec_verbs_add(flow, &mpls, size);
+ mlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_MPLS);
+ if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
+ flow->ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP;
+ else
+ flow->ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE;
+ return size;
+#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "MPLS is not supported by Verbs, please"
+ " update.");
+}
+
/**
* Validate items provided by the user.
*
@@ -1650,6 +1722,9 @@ mlx5_flow_items(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_GRE:
ret = mlx5_flow_item_gre(items, flow, remain, error);
break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ ret = mlx5_flow_item_mpls(items, flow, remain, error);
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,