diff mbox series

[v2,10/14] net/mlx5: add flex item API

Message ID 20211001193415.23288-11-viacheslavo@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers show
Series ethdev: introduce configurable flexible item | expand

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Viacheslav Ovsiienko Oct. 1, 2021, 7:34 p.m. UTC
This patch is a preparation step of implementing
flex item feature in driver and it provides:

  - external entry point routines for flex item
    creation/deletion

  - flex item objects management over the ports.

The flex item object keeps information about
the item created over the port - reference counter
to track whether item is in use by some active
flows and the pointer to underlaying shared DevX
object, providing all the data needed to translate
the flow flex pattern into matcher fields according
hardware configuration.

There is not too many flex items supposed to be
created on the port, the design is optimized
rather for flow insertion rate than memory savings.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c  |   5 +-
 drivers/net/mlx5/meson.build      |   1 +
 drivers/net/mlx5/mlx5.c           |   2 +-
 drivers/net/mlx5/mlx5.h           |  24 ++++
 drivers/net/mlx5/mlx5_flow.c      |  49 ++++++++
 drivers/net/mlx5/mlx5_flow.h      |  18 ++-
 drivers/net/mlx5/mlx5_flow_dv.c   |   3 +-
 drivers/net/mlx5/mlx5_flow_flex.c | 189 ++++++++++++++++++++++++++++++
 8 files changed, 286 insertions(+), 5 deletions(-)
 create mode 100644 drivers/net/mlx5/mlx5_flow_flex.c
diff mbox series

Patch

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 3746057673..cbbc152782 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -928,7 +928,6 @@  mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
 	return false;
 }
 
-
 /**
  * Spawn an Ethernet device from Verbs information.
  *
@@ -1787,6 +1786,8 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		err = mlx5_alloc_shared_dr(priv);
 		if (err)
 			goto error;
+		if (mlx5_flex_item_port_init(eth_dev) < 0)
+			goto error;
 	}
 	if (config->devx && config->dv_flow_en && config->dest_tir) {
 		priv->obj_ops = devx_obj_ops;
@@ -1922,6 +1923,8 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 			claim_zero(rte_eth_switch_domain_free(priv->domain_id));
 		if (priv->hrxqs)
 			mlx5_list_destroy(priv->hrxqs);
+		if (eth_dev && priv->flex_item_map)
+			mlx5_flex_item_port_cleanup(eth_dev);
 		mlx5_free(priv);
 		if (eth_dev != NULL)
 			eth_dev->data->dev_private = NULL;
diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build
index dac7f1fabf..f9b21c35d9 100644
--- a/drivers/net/mlx5/meson.build
+++ b/drivers/net/mlx5/meson.build
@@ -17,6 +17,7 @@  sources = files(
         'mlx5_flow_meter.c',
         'mlx5_flow_dv.c',
         'mlx5_flow_aso.c',
+        'mlx5_flow_flex.c',
         'mlx5_mac.c',
         'mlx5_mr.c',
         'mlx5_rss.c',
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index aa49542b9d..d902e00ea3 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -376,7 +376,6 @@  static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 	},
 };
 
-
 #define MLX5_FLOW_MIN_ID_POOL_SIZE 512
 #define MLX5_ID_GENERATION_ARRAY_FACTOR 16
 
@@ -1575,6 +1574,7 @@  mlx5_dev_close(struct rte_eth_dev *dev)
 	mlx5_mp_os_req_stop_rxtx(dev);
 	/* Free the eCPRI flex parser resource. */
 	mlx5_flex_parser_ecpri_release(dev);
+	mlx5_flex_item_port_cleanup(dev);
 	if (priv->rxqs != NULL) {
 		/* XXX race condition if mlx5_rx_burst() is still running. */
 		rte_delay_us_sleep(1000);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 5000d2d4c5..89b4d66374 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -49,6 +49,9 @@ 
 #define MLX5_MAX_MODIFY_NUM			32
 #define MLX5_ROOT_TBL_MODIFY_NUM		16
 
+/* Maximal number of flex items created on the port.*/
+#define MLX5_PORT_FLEX_ITEM_NUM			4
+
 enum mlx5_ipool_index {
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
 	MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
@@ -1112,6 +1115,12 @@  struct mlx5_aso_ct_pools_mng {
 	struct mlx5_aso_sq aso_sq; /* ASO queue objects. */
 };
 
+/* Port flex item context. */
+struct mlx5_flex_item {
+	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
+	uint32_t refcnt; /**< Atomically accessed refcnt by flows. */
+};
+
 /*
  * Shared Infiniband device context for Master/Representors
  * which belong to same IB device with multiple IB ports.
@@ -1448,6 +1457,10 @@  struct mlx5_priv {
 	uint32_t rss_shared_actions; /* RSS shared actions. */
 	struct mlx5_devx_obj *q_counters; /* DevX queue counter object. */
 	uint32_t counter_set_id; /* Queue counter ID to set in DevX objects. */
+	rte_spinlock_t flex_item_sl; /* Flex item list spinlock. */
+	struct mlx5_flex_item flex_item[MLX5_PORT_FLEX_ITEM_NUM];
+	/* Flex items have been created on the port. */
+	uint32_t flex_item_map; /* Map of allocated flex item elements. */
 };
 
 #define PORT_ID(priv) ((priv)->dev_data->port_id)
@@ -1823,4 +1836,15 @@  int mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,
 int mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,
 			  struct mlx5_aso_ct_action *ct);
 
+/* mlx5_flow_flex.c */
+
+struct rte_flow_item_flex_handle *
+flow_dv_item_create(struct rte_eth_dev *dev,
+		    const struct rte_flow_item_flex_conf *conf,
+		    struct rte_flow_error *error);
+int flow_dv_item_release(struct rte_eth_dev *dev,
+		    const struct rte_flow_item_flex_handle *flex_handle,
+		    struct rte_flow_error *error);
+int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
+void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index c914a7120c..5224daed6c 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -718,6 +718,14 @@  mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
 				  struct rte_mbuf *m,
 				  struct rte_flow_restore_info *info,
 				  struct rte_flow_error *err);
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct rte_flow_error *error);
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+			    const struct rte_flow_item_flex_handle *handle,
+			    struct rte_flow_error *error);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
 	.validate = mlx5_flow_validate,
@@ -737,6 +745,8 @@  static const struct rte_flow_ops mlx5_flow_ops = {
 	.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
 	.tunnel_item_release = mlx5_flow_tunnel_item_release,
 	.get_restore_info = mlx5_flow_tunnel_get_restore_info,
+	.flex_item_create = mlx5_flow_flex_item_create,
+	.flex_item_release = mlx5_flow_flex_item_release,
 };
 
 /* Tunnel information. */
@@ -9398,6 +9408,45 @@  mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
 }
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 
+/* Flex flow item API */
+static struct rte_flow_item_flex_handle *
+mlx5_flow_flex_item_create(struct rte_eth_dev *dev,
+			   const struct rte_flow_item_flex_conf *conf,
+			   struct rte_flow_error *error)
+{
+	static const char err_msg[] = "flex item creation unsupported";
+	struct rte_flow_attr attr = { .transfer = 0 };
+	const struct mlx5_flow_driver_ops *fops =
+			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+	if (!fops->item_create) {
+		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, err_msg);
+		return NULL;
+	}
+	return fops->item_create(dev, conf, error);
+}
+
+static int
+mlx5_flow_flex_item_release(struct rte_eth_dev *dev,
+			    const struct rte_flow_item_flex_handle *handle,
+			    struct rte_flow_error *error)
+{
+	static const char err_msg[] = "flex item release unsupported";
+	struct rte_flow_attr attr = { .transfer = 0 };
+	const struct mlx5_flow_driver_ops *fops =
+			flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+
+	if (!fops->item_release) {
+		DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
+		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+				   NULL, err_msg);
+		return -rte_errno;
+	}
+	return fops->item_release(dev, handle, error);
+}
+
 static void
 mlx5_dbg__print_pattern(const struct rte_flow_item *item)
 {
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 5c68d4f7d7..a8f8c49dd2 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1226,6 +1226,19 @@  typedef int (*mlx5_flow_create_def_policy_t)
 			(struct rte_eth_dev *dev);
 typedef void (*mlx5_flow_destroy_def_policy_t)
 			(struct rte_eth_dev *dev);
+typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t)
+			(struct rte_eth_dev *dev,
+			 const struct rte_flow_item_flex_conf *conf,
+			 struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_release_t)
+			(struct rte_eth_dev *dev,
+			 const struct rte_flow_item_flex_handle *handle,
+			 struct rte_flow_error *error);
+typedef int (*mlx5_flow_item_update_t)
+			(struct rte_eth_dev *dev,
+			 const struct rte_flow_item_flex_handle *handle,
+			 const struct rte_flow_item_flex_conf *conf,
+			 struct rte_flow_error *error);
 
 struct mlx5_flow_driver_ops {
 	mlx5_flow_validate_t validate;
@@ -1260,6 +1273,9 @@  struct mlx5_flow_driver_ops {
 	mlx5_flow_action_update_t action_update;
 	mlx5_flow_action_query_t action_query;
 	mlx5_flow_sync_domain_t sync_domain;
+	mlx5_flow_item_create_t item_create;
+	mlx5_flow_item_release_t item_release;
+	mlx5_flow_item_update_t item_update;
 };
 
 /* mlx5_flow.c */
@@ -1709,6 +1725,4 @@  const struct mlx5_flow_tunnel *
 mlx5_get_tof(const struct rte_flow_item *items,
 	     const struct rte_flow_action *actions,
 	     enum mlx5_tof_rule_type *rule_type);
-
-
 #endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index fc676d3ee4..a3c35a5edf 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -18011,7 +18011,8 @@  const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
 	.action_update = flow_dv_action_update,
 	.action_query = flow_dv_action_query,
 	.sync_domain = flow_dv_sync_domain,
+	.item_create = flow_dv_item_create,
+	.item_release = flow_dv_item_release,
 };
-
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
new file mode 100644
index 0000000000..b7bc4af6fb
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -0,0 +1,189 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021 NVIDIA Corporation & Affiliates
+ */
+#include <rte_malloc.h>
+#include <mlx5_devx_cmds.h>
+#include <mlx5_malloc.h>
+#include "mlx5.h"
+#include "mlx5_flow.h"
+
+static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
+	      "Flex item maximal number exceeds uint32_t bit width");
+
+/**
+ *  Routine called once on port initialization to init flex item
+ *  related infrastructure initialization
+ *
+ * @param dev
+ *   Ethernet device to perform flex item initialization
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flex_item_port_init(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	rte_spinlock_init(&priv->flex_item_sl);
+	MLX5_ASSERT(!priv->flex_item_map);
+	return 0;
+}
+
+/**
+ *  Routine called once on port close to perform flex item
+ *  related infrastructure cleanup.
+ *
+ * @param dev
+ *   Ethernet device to perform cleanup
+ */
+void
+mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	uint32_t i;
+
+	for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
+		if (priv->flex_item_map & (1 << i)) {
+			/* DevX object dereferencing should be provided here. */
+			priv->flex_item_map &= ~(1 << i);
+		}
+	}
+}
+
+static int
+mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
+{
+	uintptr_t start = (uintptr_t)&priv->flex_item[0];
+	uintptr_t entry = (uintptr_t)item;
+	uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
+
+	if (entry < start ||
+	    idx >= MLX5_PORT_FLEX_ITEM_NUM ||
+	    (entry - start) % sizeof(struct mlx5_flex_item) ||
+	    !(priv->flex_item_map & (1u << idx)))
+		return -1;
+	return (int)idx;
+}
+
+static struct mlx5_flex_item *
+mlx5_flex_alloc(struct mlx5_priv *priv)
+{
+	struct mlx5_flex_item *item = NULL;
+
+	rte_spinlock_lock(&priv->flex_item_sl);
+	if (~priv->flex_item_map) {
+		uint32_t idx = rte_bsf32(~priv->flex_item_map);
+
+		if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
+			item = &priv->flex_item[idx];
+			MLX5_ASSERT(!item->refcnt);
+			MLX5_ASSERT(!item->devx_fp);
+			item->devx_fp = NULL;
+			__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+			priv->flex_item_map |= 1u << idx;
+		}
+	}
+	rte_spinlock_unlock(&priv->flex_item_sl);
+	return item;
+}
+
+static void
+mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
+{
+	int idx = mlx5_flex_index(priv, item);
+
+	MLX5_ASSERT(idx >= 0 &&
+		    idx < MLX5_PORT_FLEX_ITEM_NUM &&
+		    (priv->flex_item_map & (1u << idx)));
+	if (idx >= 0) {
+		rte_spinlock_lock(&priv->flex_item_sl);
+		MLX5_ASSERT(!item->refcnt);
+		MLX5_ASSERT(!item->devx_fp);
+		item->devx_fp = NULL;
+		__atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+		priv->flex_item_map &= ~(1u << idx);
+		rte_spinlock_unlock(&priv->flex_item_sl);
+	}
+}
+
+/**
+ * Create the flex item with specified configuration over the Ethernet device.
+ *
+ * @param dev
+ *   Ethernet device to create flex item on.
+ * @param[in] conf
+ *   Flex item configuration.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. PMDs initialize this
+ *   structure in case of error only.
+ *
+ * @return
+ *   Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
+ */
+struct rte_flow_item_flex_handle *
+flow_dv_item_create(struct rte_eth_dev *dev,
+		    const struct rte_flow_item_flex_conf *conf,
+		    struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_item *flex;
+
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	flex = mlx5_flex_alloc(priv);
+	if (!flex) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "too many flex items created on the port");
+		return NULL;
+	}
+	RTE_SET_USED(conf);
+	/* Mark initialized flex item valid. */
+	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
+	return (struct rte_flow_item_flex_handle *)flex;
+}
+
+/**
+ * Release the flex item on the specified Ethernet device.
+ *
+ * @param dev
+ *   Ethernet device to destroy flex item on.
+ * @param[in] handle
+ *   Handle of the item existing on the specified device.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL. PMDs initialize this
+ *   structure in case of error only.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+flow_dv_item_release(struct rte_eth_dev *dev,
+		     const struct rte_flow_item_flex_handle *handle,
+		     struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_item *flex =
+		(struct mlx5_flex_item *)(uintptr_t)handle;
+	uint32_t old_refcnt = 1;
+
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	rte_spinlock_lock(&priv->flex_item_sl);
+	if (mlx5_flex_index(priv, flex) < 0) {
+		rte_spinlock_unlock(&priv->flex_item_sl);
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "invalid flex item handle value");
+	}
+	if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
+					 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+		rte_spinlock_unlock(&priv->flex_item_sl);
+		return rte_flow_error_set(error, EBUSY,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "flex item has flow references");
+	}
+	/* Flex item is marked as invalid, we can leave locked section. */
+	rte_spinlock_unlock(&priv->flex_item_sl);
+	mlx5_flex_free(priv, flex);
+	return 0;
+}