[v2,11/14] net/mlx5: add flex parser DevX object management

Message ID 20211001193415.23288-12-viacheslavo@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series ethdev: introduce configurable flexible item |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Slava Ovsiienko Oct. 1, 2021, 7:34 p.m. UTC
  From: Gregory Etelson <getelson@nvidia.com>

The DevX flex parsers can be shared between representors
within the same IB context. We should put the flex parser
objects into the shared list and engage the standard
mlx5_list_xxx API to manage ones.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c  |  10 +++
 drivers/net/mlx5/mlx5.c           |   4 +
 drivers/net/mlx5/mlx5.h           |  20 +++++
 drivers/net/mlx5/mlx5_flow_flex.c | 120 +++++++++++++++++++++++++++++-
 4 files changed, 153 insertions(+), 1 deletion(-)
  

Patch

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index cbbc152782..e4066d134b 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -384,6 +384,16 @@  mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 					      flow_dv_dest_array_clone_free_cb);
 	if (!sh->dest_array_list)
 		goto error;
+	/* Init shared flex parsers list, no need lcore_share */
+	snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
+	sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
+					       mlx5_flex_parser_create_cb,
+					       mlx5_flex_parser_match_cb,
+					       mlx5_flex_parser_remove_cb,
+					       mlx5_flex_parser_clone_cb,
+					       mlx5_flex_parser_clone_free_cb);
+	if (!sh->flex_parsers_dv)
+		goto error;
 #endif
 #ifdef HAVE_MLX5DV_DR
 	void *domain;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d902e00ea3..77fe073f5c 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1315,6 +1315,10 @@  mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 	if (LIST_EMPTY(&mlx5_dev_ctx_list))
 		mlx5_flow_os_release_workspace();
 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+	if (sh->flex_parsers_dv) {
+		mlx5_list_destroy(sh->flex_parsers_dv);
+		sh->flex_parsers_dv = NULL;
+	}
 	/*
 	 *  Ensure there is no async event handler installed.
 	 *  Only primary process handles async device events.
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 89b4d66374..629ff6ebfe 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1115,6 +1115,15 @@  struct mlx5_aso_ct_pools_mng {
 	struct mlx5_aso_sq aso_sq; /* ASO queue objects. */
 };
 
+/* DevX flex parser context. */
+struct mlx5_flex_parser_devx {
+	struct mlx5_list_entry entry;  /* List element at the beginning. */
+	uint32_t num_samples;
+	void *devx_obj;
+	struct mlx5_devx_graph_node_attr devx_conf;
+	uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
+};
+
 /* Port flex item context. */
 struct mlx5_flex_item {
 	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
@@ -1179,6 +1188,7 @@  struct mlx5_dev_ctx_shared {
 	struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
 	struct mlx5_list *sample_action_list; /* List of sample actions. */
 	struct mlx5_list *dest_array_list;
+	struct mlx5_list *flex_parsers_dv; /* Flex Item parsers. */
 	/* List of destination array actions. */
 	struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
 	void *default_miss_action; /* Default miss action. */
@@ -1847,4 +1857,14 @@  int flow_dv_item_release(struct rte_eth_dev *dev,
 		    struct rte_flow_error *error);
 int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
 void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
+/* Flex parser list callbacks. */
+struct mlx5_list_entry *mlx5_flex_parser_create_cb(void *list_ctx, void *ctx);
+int mlx5_flex_parser_match_cb(void *list_ctx,
+			      struct mlx5_list_entry *iter, void *ctx);
+void mlx5_flex_parser_remove_cb(void *list_ctx,	struct mlx5_list_entry *entry);
+struct mlx5_list_entry *mlx5_flex_parser_clone_cb(void *list_ctx,
+						  struct mlx5_list_entry *entry,
+						  void *ctx);
+void mlx5_flex_parser_clone_free_cb(void *tool_ctx,
+				    struct mlx5_list_entry *entry);
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index b7bc4af6fb..b8a091e259 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -45,7 +45,13 @@  mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
 
 	for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
 		if (priv->flex_item_map & (1 << i)) {
-			/* DevX object dereferencing should be provided here. */
+			struct mlx5_flex_item *flex = &priv->flex_item[i];
+
+			claim_zero(mlx5_list_unregister
+					(priv->sh->flex_parsers_dv,
+					 &flex->devx_fp->entry));
+			flex->devx_fp = NULL;
+			flex->refcnt = 0;
 			priv->flex_item_map &= ~(1 << i);
 		}
 	}
@@ -127,7 +133,9 @@  flow_dv_item_create(struct rte_eth_dev *dev,
 		    struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL };
 	struct mlx5_flex_item *flex;
+	struct mlx5_list_entry *ent;
 
 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	flex = mlx5_flex_alloc(priv);
@@ -137,10 +145,22 @@  flow_dv_item_create(struct rte_eth_dev *dev,
 				   "too many flex items created on the port");
 		return NULL;
 	}
+	ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
+	if (!ent) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "flex item creation failure");
+		goto error;
+	}
+	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
 	RTE_SET_USED(conf);
 	/* Mark initialized flex item valid. */
 	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
 	return (struct rte_flow_item_flex_handle *)flex;
+
+error:
+	mlx5_flex_free(priv, flex);
+	return NULL;
 }
 
 /**
@@ -166,6 +186,7 @@  flow_dv_item_release(struct rte_eth_dev *dev,
 	struct mlx5_flex_item *flex =
 		(struct mlx5_flex_item *)(uintptr_t)handle;
 	uint32_t old_refcnt = 1;
+	int rc;
 
 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	rte_spinlock_lock(&priv->flex_item_sl);
@@ -184,6 +205,103 @@  flow_dv_item_release(struct rte_eth_dev *dev,
 	}
 	/* Flex item is marked as invalid, we can leave locked section. */
 	rte_spinlock_unlock(&priv->flex_item_sl);
+	MLX5_ASSERT(flex->devx_fp);
+	rc = mlx5_list_unregister(priv->sh->flex_parsers_dv,
+				  &flex->devx_fp->entry);
+	flex->devx_fp = NULL;
 	mlx5_flex_free(priv, flex);
+	if (rc)
+		return rte_flow_error_set(error, rc,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "flex item release failure");
 	return 0;
 }
+
+/* DevX flex parser list callbacks. */
+struct mlx5_list_entry *
+mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
+{
+	struct mlx5_dev_ctx_shared *sh = list_ctx;
+	struct mlx5_flex_parser_devx *fp, *conf = ctx;
+	int ret;
+
+	fp = mlx5_malloc(MLX5_MEM_ZERO,	sizeof(struct mlx5_flex_parser_devx),
+			 0, SOCKET_ID_ANY);
+	if (!fp)
+		return NULL;
+	/* Copy the requested confgiurations. */
+	fp->num_samples = conf->num_samples;
+	memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf));
+	/* Create DevX flex parser. */
+	fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->ctx,
+							&fp->devx_conf);
+	if (!fp->devx_obj)
+		goto error;
+	/* Query the firmware assigined sample ids. */
+	ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
+						fp->sample_ids,
+						fp->num_samples);
+	if (ret)
+		goto error;
+	DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u\n",
+		(const void *)fp, fp->num_samples);
+	return &fp->entry;
+error:
+	if (fp->devx_obj)
+		mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj);
+	if (fp)
+		mlx5_free(fp);
+	return NULL;
+}
+
+int
+mlx5_flex_parser_match_cb(void *list_ctx,
+			  struct mlx5_list_entry *iter, void *ctx)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(iter, struct mlx5_flex_parser_devx, entry);
+	struct mlx5_flex_parser_devx *org =
+		container_of(ctx, struct mlx5_flex_parser_devx, entry);
+
+	RTE_SET_USED(list_ctx);
+	return !iter || !ctx || memcmp(&fp->devx_conf,
+				       &org->devx_conf,
+				       sizeof(fp->devx_conf));
+}
+
+void
+mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(entry, struct mlx5_flex_parser_devx, entry);
+
+	RTE_SET_USED(list_ctx);
+	MLX5_ASSERT(fp->devx_obj);
+	claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
+	mlx5_free(entry);
+}
+
+struct mlx5_list_entry *
+mlx5_flex_parser_clone_cb(void *list_ctx,
+			  struct mlx5_list_entry *entry, void *ctx)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(entry, struct mlx5_flex_parser_devx, entry);
+
+	RTE_SET_USED(list_ctx);
+	fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx),
+			 0, SOCKET_ID_ANY);
+	if (!fp)
+		return NULL;
+	memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx));
+	return &fp->entry;
+}
+
+void
+mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(entry, struct mlx5_flex_parser_devx, entry);
+	RTE_SET_USED(list_ctx);
+	mlx5_free(fp);
+}