[08/20] net/mlx5: remove DevX flag duplication

Message ID 20220127153950.812953-9-michaelba@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series mlx5: refactor devargs management |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Michael Baum Jan. 27, 2022, 3:39 p.m. UTC
  The sharing device context structure has a field named "devx" which
indicates if DevX is supported.
The common configure stracture has also field named "devx" with the same
meaning.

There is no need for this duplication, because there is a reference to
the common structure from within the sharing device context structure.

This patch removes it from sharing device context structure and uses the
common config structure instead.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c    | 16 ++++++++--------
 drivers/net/mlx5/linux/mlx5_verbs.c |  4 ++--
 drivers/net/mlx5/mlx5.c             |  3 +--
 drivers/net/mlx5/mlx5.h             |  1 -
 drivers/net/mlx5/mlx5_ethdev.c      |  3 ++-
 drivers/net/mlx5/mlx5_flow.c        |  2 +-
 drivers/net/mlx5/mlx5_flow_dv.c     | 23 ++++++++++++-----------
 drivers/net/mlx5/mlx5_trigger.c     |  2 +-
 drivers/net/mlx5/windows/mlx5_os.c  |  6 +++---
 9 files changed, 30 insertions(+), 30 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 39ca145e4a..b579be25cb 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -682,7 +682,7 @@  mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
 	fallback = true;
 #else
 	fallback = false;
-	if (!sh->devx || !priv->config.dv_flow_en ||
+	if (!sh->cdev->config.devx || !priv->config.dv_flow_en ||
 	    !hca_attr->flow_counters_dump ||
 	    !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
 	    (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
@@ -1316,7 +1316,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
 		config->mps == MLX5_MPW ? "legacy " : "",
 		config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
-	if (sh->devx) {
+	if (sh->cdev->config.devx) {
 		sh->steering_format_version = hca_attr->steering_format_version;
 		/* Check for LRO support. */
 		if (config->dest_tir && hca_attr->lro_cap &&
@@ -1434,13 +1434,13 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		config->cqe_comp = 0;
 	}
 	if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
-	    (!sh->devx || !hca_attr->mini_cqe_resp_flow_tag)) {
+	    (!sh->cdev->config.devx || !hca_attr->mini_cqe_resp_flow_tag)) {
 		DRV_LOG(WARNING, "Flow Tag CQE compression"
 				 " format isn't supported.");
 		config->cqe_comp = 0;
 	}
 	if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
-	    (!sh->devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
+	    (!sh->cdev->config.devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
 		DRV_LOG(WARNING, "L3/L4 Header CQE compression"
 				 " format isn't supported.");
 		config->cqe_comp = 0;
@@ -1463,7 +1463,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 			hca_attr->log_max_static_sq_wq);
 		DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
 			hca_attr->qos.wqe_rate_pp ? "" : "not ");
-		if (!sh->devx) {
+		if (!sh->cdev->config.devx) {
 			DRV_LOG(ERR, "DevX is required for packet pacing");
 			err = ENODEV;
 			goto error;
@@ -1519,7 +1519,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 				priv->dev_port);
 		}
 	}
-	if (sh->devx) {
+	if (sh->cdev->config.devx) {
 		uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
 
 		err = hca_attr->access_register_user ?
@@ -1676,7 +1676,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		if (mlx5_flex_item_port_init(eth_dev) < 0)
 			goto error;
 	}
-	if (sh->devx && config->dv_flow_en && config->dest_tir) {
+	if (sh->cdev->config.devx && config->dv_flow_en && config->dest_tir) {
 		priv->obj_ops = devx_obj_ops;
 		mlx5_queue_counter_id_prepare(eth_dev);
 		priv->obj_ops.lb_dummy_queue_create =
@@ -2735,7 +2735,7 @@  mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
 			rte_intr_fd_set(sh->intr_handle, -1);
 		}
 	}
-	if (sh->devx) {
+	if (sh->cdev->config.devx) {
 #ifdef HAVE_IBV_DEVX_ASYNC
 		sh->intr_handle_devx =
 			rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index 2b6eef44a7..722017efa4 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -998,7 +998,7 @@  mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 	qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET;
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	/* If using DevX, need additional mask to read tisn value. */
-	if (priv->sh->devx && !priv->sh->tdn)
+	if (priv->sh->cdev->config.devx && !priv->sh->tdn)
 		qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
 #endif
 	obj.cq.in = txq_obj->cq;
@@ -1042,7 +1042,7 @@  mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 	 * This is done once per port.
 	 * Will use this value on Rx, when creating matching TIR.
 	 */
-	if (priv->sh->devx && !priv->sh->tdn) {
+	if (priv->sh->cdev->config.devx && !priv->sh->tdn) {
 		ret = mlx5_devx_cmd_qp_query_tis_td(txq_obj->qp, qp.tisn,
 						    &priv->sh->tdn);
 		if (ret) {
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 920b174b96..b371a87355 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1182,7 +1182,6 @@  mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 	pthread_mutex_init(&sh->txpp.mutex, NULL);
 	sh->numa_node = spawn->cdev->dev->numa_node;
 	sh->cdev = spawn->cdev;
-	sh->devx = sh->cdev->config.devx;
 	if (spawn->bond_info)
 		sh->bond = *spawn->bond_info;
 	err = mlx5_os_get_dev_attr(sh->cdev, &sh->device_attr);
@@ -1205,7 +1204,7 @@  mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 		sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
 		sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
 	}
-	if (sh->devx) {
+	if (sh->cdev->config.devx) {
 		sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
 		if (!sh->td) {
 			DRV_LOG(ERR, "TD allocation failure");
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index ee485343ff..6bc7a34f60 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1146,7 +1146,6 @@  struct mlx5_flex_item {
 struct mlx5_dev_ctx_shared {
 	LIST_ENTRY(mlx5_dev_ctx_shared) next;
 	uint32_t refcnt;
-	uint32_t devx:1; /* Opened with DV. */
 	uint32_t flow_hit_aso_en:1; /* Flow Hit ASO is supported. */
 	uint32_t steering_format_version:4;
 	/* Indicates the device steering logic format. */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 5b0eee3321..801c467bba 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -723,7 +723,8 @@  mlx5_hairpin_cap_get(struct rte_eth_dev *dev, struct rte_eth_hairpin_cap *cap)
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *config = &priv->config;
 
-	if (!priv->sh->devx || !config->dest_tir || !config->dv_flow_en) {
+	if (!priv->sh->cdev->config.devx || !config->dest_tir ||
+	    !config->dv_flow_en) {
 		rte_errno = ENOTSUP;
 		return -rte_errno;
 	}
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 5a21803e18..907f3fd75a 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -9966,7 +9966,7 @@  mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
 	type = mlx5_flow_os_get_type();
 	if (type == MLX5_FLOW_TYPE_MAX) {
 		type = MLX5_FLOW_TYPE_VERBS;
-		if (priv->sh->devx && priv->config.dv_flow_en)
+		if (priv->sh->cdev->config.devx && priv->config.dv_flow_en)
 			type = MLX5_FLOW_TYPE_DV;
 	}
 	fops = flow_get_drv_ops(type);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index b0ed9f93a0..4e60a54df3 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -3291,7 +3291,7 @@  flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	if (!priv->sh->devx)
+	if (!priv->sh->cdev->config.devx)
 		goto notsup_err;
 	if (action_flags & MLX5_FLOW_ACTION_COUNT)
 		return rte_flow_error_set(error, EINVAL,
@@ -5302,8 +5302,8 @@  flow_dv_validate_action_age(uint64_t action_flags,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	const struct rte_flow_action_age *age = action->conf;
 
-	if (!priv->sh->devx || (priv->sh->cmng.counter_fallback &&
-	    !priv->sh->aso_age_mng))
+	if (!priv->sh->cdev->config.devx ||
+	    (priv->sh->cmng.counter_fallback && !priv->sh->aso_age_mng))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL,
@@ -5587,7 +5587,8 @@  flow_dv_validate_action_sample(uint64_t *action_flags,
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
 					  "ratio value starts from 1");
-	if (!priv->sh->devx || (sample->ratio > 0 && !priv->sampler_en))
+	if (!priv->sh->cdev->config.devx ||
+	    (sample->ratio > 0 && !priv->sampler_en))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL,
@@ -6175,7 +6176,7 @@  flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
 			age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
 	uint32_t cnt_idx;
 
-	if (!priv->sh->devx) {
+	if (!priv->sh->cdev->config.devx) {
 		rte_errno = ENOTSUP;
 		return 0;
 	}
@@ -6498,7 +6499,7 @@  flow_dv_mtr_alloc(struct rte_eth_dev *dev)
 	struct mlx5_aso_mtr_pool *pool;
 	uint32_t mtr_idx = 0;
 
-	if (!priv->sh->devx) {
+	if (!priv->sh->cdev->config.devx) {
 		rte_errno = ENOTSUP;
 		return 0;
 	}
@@ -12515,7 +12516,7 @@  flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
 	uint32_t ct_idx;
 
 	MLX5_ASSERT(mng);
-	if (!priv->sh->devx) {
+	if (!priv->sh->cdev->config.devx) {
 		rte_errno = ENOTSUP;
 		return 0;
 	}
@@ -12953,7 +12954,7 @@  flow_dv_translate(struct rte_eth_dev *dev,
 			}
 			break;
 		case RTE_FLOW_ACTION_TYPE_COUNT:
-			if (!priv->sh->devx) {
+			if (!priv->sh->cdev->config.devx) {
 				return rte_flow_error_set
 					      (error, ENOTSUP,
 					       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -15834,7 +15835,7 @@  flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow_query_count *qc = data;
 
-	if (!priv->sh->devx)
+	if (!priv->sh->cdev->config.devx)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL,
@@ -15887,7 +15888,7 @@  flow_dv_query_count_ptr(struct rte_eth_dev *dev, uint32_t cnt_idx,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	if (!priv->sh->devx || !action_ptr)
+	if (!priv->sh->cdev->config.devx || !action_ptr)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL,
@@ -17491,7 +17492,7 @@  flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
 	uint64_t inn_pkts, inn_bytes;
 	int ret;
 
-	if (!priv->sh->devx)
+	if (!priv->sh->cdev->config.devx)
 		return -1;
 
 	ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 4bb0331464..1dfe7da435 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1104,7 +1104,7 @@  mlx5_dev_start(struct rte_eth_dev *dev)
 			dev->data->port_id, strerror(rte_errno));
 		goto error;
 	}
-	if ((priv->sh->devx && priv->config.dv_flow_en &&
+	if ((priv->sh->cdev->config.devx && priv->config.dv_flow_en &&
 	    priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {
 		ret = priv->obj_ops.lb_dummy_queue_create(dev);
 		if (ret)
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 3d8bd2e100..5396365ec9 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -275,7 +275,7 @@  mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
 	fallback = true;
 #else
 	fallback = false;
-	if (!sh->devx || !priv->config.dv_flow_en ||
+	if (!sh->cdev->config.devx || !priv->config.dv_flow_en ||
 	    !hca_attr->flow_counters_dump ||
 	    !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
 	    (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
@@ -476,7 +476,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
 		config->cqe_comp = 0;
 	}
-	if (sh->devx) {
+	if (sh->cdev->config.devx) {
 		hca_attr = &sh->cdev->config.hca_attr;
 		config->hw_csum = hca_attr->csum_cap;
 		DRV_LOG(DEBUG, "checksum offloading is %ssupported",
@@ -661,7 +661,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 			goto error;
 		}
 	}
-	if (sh->devx) {
+	if (sh->cdev->config.devx) {
 		priv->obj_ops = devx_obj_ops;
 	} else {
 		DRV_LOG(ERR, "Windows flow must be DevX.");