[RFC,2/3] net/mlx5: refactor devargs handling in spawn

Message ID 20220102065927.2210733-3-michaelba@nvidia.com (mailing list archive)
State RFC, archived
Delegated to: Thomas Monjalon
Headers
Series common/mlx5: refactor devargs management |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Michael Baum Jan. 2, 2022, 6:59 a.m. UTC
  From: Michael Baum <michaelba@nvidia.com>

Rearranging the configuration while classifying it into three main
categories:

1. Configurations that come from the device.
   Should be updated once for all ports and should not change at any
   stage.
   They are collected in one structure that is within the shared device
   context structure.

2. Arguments that come from the user and are related to the shared
   device context.
   Should be updated once for all ports and should not change at any
   stage.
   They too are gathered in one structure that is within the shared
   device context structure.
   If during probe again the user requests to change these arguments, an
   error will be returned.

3. Arguments that come from the user and are used for each port
   independently.
   May be updated for new ports in case of probe again.
   They are collected in one structure located under the private
   structure of each port, updated for each port anew.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c      | 1062 ++++++++++++++-----------
 drivers/net/mlx5/linux/mlx5_vlan_os.c |    3 +-
 drivers/net/mlx5/mlx5.c               |  484 ++++++-----
 drivers/net/mlx5/mlx5.h               |  140 ++--
 drivers/net/mlx5/mlx5_devx.c          |    4 +-
 drivers/net/mlx5/mlx5_ethdev.c        |   25 +-
 drivers/net/mlx5/mlx5_flow.c          |   87 +-
 drivers/net/mlx5/mlx5_flow.h          |    2 +-
 drivers/net/mlx5/mlx5_flow_dv.c       |  104 +--
 drivers/net/mlx5/mlx5_flow_meter.c    |  122 +--
 drivers/net/mlx5/mlx5_rxmode.c        |    8 +-
 drivers/net/mlx5/mlx5_rxq.c           |   43 +-
 drivers/net/mlx5/mlx5_trigger.c       |   15 +-
 drivers/net/mlx5/mlx5_tx.c            |    2 +-
 drivers/net/mlx5/mlx5_txpp.c          |   12 +-
 drivers/net/mlx5/mlx5_txq.c           |   45 +-
 drivers/net/mlx5/mlx5_vlan.c          |    4 +-
 drivers/net/mlx5/windows/mlx5_os.c    |  385 ++++++---
 18 files changed, 1471 insertions(+), 1076 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 7c503cceec..f90888fe72 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -263,7 +263,7 @@  __mlx5_discovery_misc5_cap(struct mlx5_priv *priv)
 	dv_attr.priority = 3;
 #ifdef HAVE_MLX5DV_DR_ESWITCH
 	void *misc2_m;
-	if (priv->config.dv_esw_en) {
+	if (priv->sh->config.dv_esw_en) {
 		/* FDB enabled reg_c_0 */
 		dv_attr.match_criteria_enable |=
 				(1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT);
@@ -384,7 +384,7 @@  mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 	}
 	sh->tx_domain = domain;
 #ifdef HAVE_MLX5DV_DR_ESWITCH
-	if (priv->config.dv_esw_en) {
+	if (sh->config.dv_esw_en) {
 		domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
 						     MLX5DV_DR_DOMAIN_TYPE_FDB);
 		if (!domain) {
@@ -406,20 +406,20 @@  mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 		goto error;
 	}
 #endif
-	if (!sh->tunnel_hub && priv->config.dv_miss_info)
+	if (!sh->tunnel_hub && sh->config.dv_miss_info)
 		err = mlx5_alloc_tunnel_hub(sh);
 	if (err) {
 		DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err);
 		goto error;
 	}
-	if (priv->config.reclaim_mode == MLX5_RCM_AGGR) {
+	if (sh->config.reclaim_mode == MLX5_RCM_AGGR) {
 		mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1);
 		mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1);
 		if (sh->fdb_domain)
 			mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1);
 	}
 	sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
-	if (!priv->config.allow_duplicate_pattern) {
+	if (!sh->config.allow_duplicate_pattern) {
 #ifndef HAVE_MLX5_DR_ALLOW_DUPLICATE
 		DRV_LOG(WARNING, "Disallow duplicate pattern is not supported - maybe old rdma-core version?");
 #endif
@@ -686,7 +686,7 @@  mlx5_flow_drop_action_config(struct rte_eth_dev *dev __rte_unused)
 #ifdef HAVE_MLX5DV_DR
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	if (!priv->config.dv_flow_en || !priv->sh->dr_drop_action)
+	if (!priv->sh->config.dv_flow_en || !priv->sh->dr_drop_action)
 		return;
 	/**
 	 * DR supports drop action placeholder when it is supported;
@@ -820,6 +820,556 @@  mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
 	return false;
 }
 
+/**
+ * Parse user port parameters and adjust them according to device capabilities.
+ *
+ * @param priv
+ *   Pointer to shared device context.
+ * @param devargs
+ *   Device arguments structure.
+ * @param config
+ *   Pointer to port configuration structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_os_port_args_config(struct mlx5_priv *priv, struct rte_devargs *devargs,
+			 struct mlx5_port_config *config)
+{
+	struct rte_kvargs *kvlist;
+	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
+	struct mlx5_dev_attr *dev_attr = &priv->sh->device_attr;
+	bool devx = priv->sh->cdev->config.devx;
+	uint32_t hw_padding = 0;
+	uint32_t mps;
+	int ret = 0;
+
+	/* Default configuration. */
+	memset(config, 0, sizeof(*config));
+	config->mps = MLX5_ARG_UNSET;
+	config->cqe_comp = 1;
+	config->rx_vec_en = 1;
+	config->txq_inline_max = MLX5_ARG_UNSET;
+	config->txq_inline_min = MLX5_ARG_UNSET;
+	config->txq_inline_mpw = MLX5_ARG_UNSET;
+	config->txqs_inline = MLX5_ARG_UNSET;
+	config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
+	config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
+	config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
+	config->log_hp_size = MLX5_ARG_UNSET;
+	config->std_delay_drop = 0;
+	config->hp_delay_drop = 0;
+	/* Parse device parameters. */
+	if (devargs != NULL) {
+		kvlist = rte_kvargs_parse(devargs->args, NULL);
+		if (kvlist == NULL) {
+			DRV_LOG(ERR,
+				"Failed to parse device arguments.");
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+		/* Process parameters. */
+		ret = rte_kvargs_process(kvlist, NULL,
+					 mlx5_port_args_check_handler, config);
+		rte_kvargs_free(kvlist);
+		if (ret) {
+			DRV_LOG(ERR, "Failed to process port arguments: %s",
+				strerror(rte_errno));
+			return -rte_errno;
+		}
+	}
+	/* Adjust parameters according to device capabilities. */
+#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
+	hw_padding = !!dev_attr->rx_pad_end_addr_align;
+#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
+	hw_padding = !!(dev_attr->device_cap_flags_ex &
+			IBV_DEVICE_PCI_WRITE_END_PADDING);
+#endif
+	if (config->hw_padding && !hw_padding) {
+		DRV_LOG(DEBUG, "Rx end alignment padding isn't supported.");
+		config->hw_padding = 0;
+	} else if (config->hw_padding) {
+		DRV_LOG(DEBUG, "Rx end alignment padding is enabled.");
+	}
+	/*
+	 * Multi-packet send is supported by ConnectX-4 Lx PF as well
+	 * as all ConnectX-5 devices.
+	 */
+	if (dev_attr->flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
+		if (dev_attr->flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
+			DRV_LOG(DEBUG, "enhanced MPW is supported.");
+			mps = MLX5_MPW_ENHANCED;
+		} else {
+			DRV_LOG(DEBUG, "MPW is supported.");
+			mps = MLX5_MPW;
+		}
+	} else {
+		DRV_LOG(DEBUG, "MPW isn't supported.");
+		mps = MLX5_MPW_DISABLED;
+	}
+	/*
+	 * MPW is disabled by default, while the Enhanced MPW is enabled
+	 * by default.
+	 */
+	if (config->mps == MLX5_ARG_UNSET)
+		config->mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
+							  MLX5_MPW_DISABLED;
+	else
+		config->mps = config->mps ? mps : MLX5_MPW_DISABLED;
+	DRV_LOG(INFO, "%sMPS is %s",
+		config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
+		config->mps == MLX5_MPW ? "legacy " : "",
+		config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+	/* Check for LRO support. */
+	if (priv->sh->dev_cap.devx_obj_ops_en && hca_attr->lro_cap) {
+		/* TBD check tunnel lro caps. */
+		config->lro.supported = hca_attr->lro_cap;
+		DRV_LOG(DEBUG, "Device supports LRO.");
+		/*
+		 * If LRO timeout is not configured by application,
+		 * use the minimal supported value.
+		 */
+		if (!config->lro.timeout)
+			config->lro.timeout =
+				       hca_attr->lro_timer_supported_periods[0];
+		DRV_LOG(DEBUG, "LRO session timeout set to %d usec.",
+			config->lro.timeout);
+		DRV_LOG(DEBUG,
+			"LRO minimal size of TCP segment required for coalescing is %d bytes",
+			hca_attr->lro_min_mss_size);
+	}
+#if (RTE_CACHE_LINE_SIZE == 128)
+	if (config->cqe_comp &&
+	    !(dev_attr->flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
+		DRV_LOG(WARNING, "Rx CQE 128B compression is not supported");
+		config->cqe_comp = 0;
+	}
+#endif
+	if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
+	    (!devx || !hca_attr->mini_cqe_resp_flow_tag)) {
+		DRV_LOG(WARNING,
+			"Flow Tag CQE compression format isn't supported.");
+		config->cqe_comp = 0;
+	}
+	if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
+	    (!devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
+		DRV_LOG(WARNING,
+			"L3/L4 Header CQE compression format isn't supported.");
+		config->cqe_comp = 0;
+	}
+	DRV_LOG(DEBUG, "Rx CQE compression is %ssupported.",
+		config->cqe_comp ? "" : "not ");
+	if (config->std_delay_drop || config->hp_delay_drop) {
+		if (!hca_attr->rq_delay_drop) {
+			config->std_delay_drop = 0;
+			config->hp_delay_drop = 0;
+			DRV_LOG(WARNING,
+				"dev_port-%u: Rxq delay drop is not supported.",
+				priv->dev_port);
+		}
+	}
+	if (config->mprq.enabled && !priv->sh->dev_cap.mprq.enabled) {
+		DRV_LOG(WARNING, "Multi-Packet RQ isn't supported.");
+		config->mprq.enabled = 0;
+	}
+	if (config->max_dump_files_num == 0)
+		config->max_dump_files_num = 128;
+	/* Detect minimal data bytes to inline. */
+	mlx5_set_min_inline(priv);
+	DRV_LOG(DEBUG, "VLAN insertion in WQE is %ssupported.",
+		config->hw_vlan_insert ? "" : "not ");
+	DRV_LOG(DEBUG, "\"rxq_pkt_pad_en\" is %u.", config->hw_padding);
+	DRV_LOG(DEBUG, "\"rxq_cqe_comp_en\" is %u.", config->cqe_comp);
+	DRV_LOG(DEBUG, "\"cqe_comp_fmt\" is %u.", config->cqe_comp_fmt);
+	DRV_LOG(DEBUG, "\"rx_vec_en\" is %u.", config->rx_vec_en);
+	DRV_LOG(DEBUG, "Standard \"delay_drop\" is %u.",
+		config->std_delay_drop);
+	DRV_LOG(DEBUG, "Hairpin \"delay_drop\" is %u.", config->hp_delay_drop);
+	DRV_LOG(DEBUG, "\"max_dump_files_num\" is %u.",
+		config->max_dump_files_num);
+	DRV_LOG(DEBUG, "\"log_hp_size\" is %u.", config->log_hp_size);
+	DRV_LOG(DEBUG, "\"mprq_en\" is %u.", config->mprq.enabled);
+	DRV_LOG(DEBUG, "\"mprq_log_stride_num\" is %u.",
+		config->mprq.log_stride_num);
+	DRV_LOG(DEBUG, "\"mprq_log_stride_size\" is %u.",
+		config->mprq.log_stride_size);
+	DRV_LOG(DEBUG, "\"mprq_max_memcpy_len\" is %u.",
+		config->mprq.max_memcpy_len);
+	DRV_LOG(DEBUG, "\"rxqs_min_mprq\" is %u.", config->mprq.min_rxqs_num);
+	DRV_LOG(DEBUG, "\"lro_timeout_usec\" is %u.", config->lro.timeout);
+	DRV_LOG(DEBUG, "\"txq_mpw_en\" is %d.", config->mps);
+	DRV_LOG(DEBUG, "\"txqs_min_inline\" is %d.", config->txqs_inline);
+	DRV_LOG(DEBUG, "\"txq_inline_min\" is %d.", config->txq_inline_min);
+	DRV_LOG(DEBUG, "\"txq_inline_max\" is %d.", config->txq_inline_max);
+	DRV_LOG(DEBUG, "\"txq_inline_mpw\" is %d.", config->txq_inline_mpw);
+	return 0;
+}
+
+/**
+ * Parse user device parameters and adjust them according to device
+ * capabilities.
+ *
+ * @param sh
+ *   Pointer to shared device context.
+ * @param devargs
+ *   Device arguments structure.
+ * @param config
+ *   Pointer to shared device configuration structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_os_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,
+				   struct rte_devargs *devargs,
+				   struct mlx5_sh_config *config)
+{
+	struct rte_kvargs *kvlist;
+	struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
+	int ret = 0;
+
+	/* Default configuration. */
+	memset(config, 0, sizeof(*config));
+	config->vf_nl_en = 1;
+	config->dv_esw_en = 1;
+	config->dv_flow_en = 1;
+	config->decap_en = 1;
+	config->allow_duplicate_pattern = 1;
+	/* Parse device parameters. */
+	if (devargs != NULL) {
+		kvlist = rte_kvargs_parse(devargs->args, NULL);
+		if (kvlist == NULL) {
+			DRV_LOG(ERR,
+				"Failed to parse shared device arguments.");
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+		/* Process parameters. */
+		ret = rte_kvargs_process(kvlist, NULL,
+					 mlx5_dev_args_check_handler, config);
+		rte_kvargs_free(kvlist);
+		if (ret) {
+			DRV_LOG(ERR, "Failed to process device arguments: %s",
+				strerror(rte_errno));
+			return -rte_errno;
+		}
+	}
+	/* Adjust parameters according to device capabilities. */
+#if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
+	if (config->dv_flow_en) {
+		DRV_LOG(WARNING, "DV flow is not supported.");
+		config->dv_flow_en = 0;
+	}
+#endif
+#ifdef HAVE_MLX5DV_DR_ESWITCH
+	if (!(hca_attr->eswitch_manager && config->dv_flow_en && sh->esw_mode))
+		config->dv_esw_en = 0;
+#else
+	config->dv_esw_en = 0;
+#endif
+	if (config->dv_miss_info && config->dv_esw_en)
+		config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
+	if (!config->dv_esw_en &&
+	    config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+		DRV_LOG(WARNING,
+			"Metadata mode %u is not supported (no E-Switch).",
+			config->dv_xmeta_en);
+		config->dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
+	}
+	if (config->tx_pp) {
+		DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
+			hca_attr->dev_freq_khz);
+		DRV_LOG(DEBUG, "Packet pacing is %ssupported.",
+			hca_attr->qos.packet_pacing ? "" : "not ");
+		DRV_LOG(DEBUG, "Cross channel ops are %ssupported.",
+			hca_attr->cross_channel ? "" : "not ");
+		DRV_LOG(DEBUG, "WQE index ignore is %ssupported.",
+			hca_attr->wqe_index_ignore ? "" : "not ");
+		DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported.",
+			hca_attr->non_wire_sq ? "" : "not ");
+		DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d).",
+			hca_attr->log_max_static_sq_wq ? "" : "not ",
+			hca_attr->log_max_static_sq_wq);
+		DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported.",
+			hca_attr->qos.wqe_rate_pp ? "" : "not ");
+		if (!sh->cdev->config.devx) {
+			DRV_LOG(ERR, "DevX is required for packet pacing.");
+			rte_errno = ENODEV;
+			return -rte_errno;
+		}
+		if (!hca_attr->qos.packet_pacing) {
+			DRV_LOG(ERR, "Packet pacing is not supported.");
+			rte_errno = ENODEV;
+			return -rte_errno;
+		}
+		if (!hca_attr->cross_channel) {
+			DRV_LOG(ERR,
+				"Cross channel operations are required for packet pacing.");
+			rte_errno = ENODEV;
+			return -rte_errno;
+		}
+		if (!hca_attr->wqe_index_ignore) {
+			DRV_LOG(ERR,
+				"WQE index ignore feature is required for packet pacing.");
+			rte_errno = ENODEV;
+			return -rte_errno;
+		}
+		if (!hca_attr->non_wire_sq) {
+			DRV_LOG(ERR,
+				"Non-wire SQ feature is required for packet pacing.");
+			rte_errno = ENODEV;
+			return -rte_errno;
+		}
+		if (!hca_attr->log_max_static_sq_wq) {
+			DRV_LOG(ERR,
+				"Static WQE SQ feature is required for packet pacing.");
+			rte_errno = ENODEV;
+			return -rte_errno;
+		}
+		if (!hca_attr->qos.wqe_rate_pp) {
+			DRV_LOG(ERR,
+				"WQE rate mode is required for packet pacing.");
+			rte_errno = ENODEV;
+			return -rte_errno;
+		}
+#ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
+		DRV_LOG(ERR,
+			"DevX does not provide UAR offset, can't create queues for packet pacing.");
+		rte_errno = ENODEV;
+		return -rte_errno;
+#endif
+	} else if (config->tx_skew) {
+		DRV_LOG(WARNING,
+			"\"tx_skew\" doesn't affect without \"tx_pp\".");
+	}
+	DRV_LOG(DEBUG, "\"tx_pp\" is %d.", config->tx_pp);
+	DRV_LOG(DEBUG, "\"tx_skew\" is %d.", config->tx_skew);
+	DRV_LOG(DEBUG, "\"reclaim_mode\" is %u.", config->reclaim_mode);
+	DRV_LOG(DEBUG, "\"dv_esw_en\" is %u.", config->dv_esw_en);
+	DRV_LOG(DEBUG, "\"dv_flow_en\" is %u.", config->dv_flow_en);
+	DRV_LOG(DEBUG, "\"dv_xmeta_en\" is %u.", config->dv_xmeta_en);
+	DRV_LOG(DEBUG, "\"dv_miss_info\" is %u.", config->dv_miss_info);
+	DRV_LOG(DEBUG, "\"l3_vxlan_en\" is %u.", config->l3_vxlan_en);
+	DRV_LOG(DEBUG, "\"vf_nl_en\" is %u.", config->vf_nl_en);
+	DRV_LOG(DEBUG, "\"lacp_by_user\" is %u.", config->lacp_by_user);
+	DRV_LOG(DEBUG, "\"decap_en\" is %u.", config->decap_en);
+	DRV_LOG(DEBUG, "\"allow_duplicate_pattern\" is %u.",
+		config->allow_duplicate_pattern);
+	return 0;
+}
+
+/**
+ * Configure all device capabilities.
+ *
+ * @param sh
+ *   Pointer to mlx5_dev_ctx_shared object.
+ */
+void
+mlx5_os_cap_config(struct mlx5_dev_ctx_shared *sh)
+{
+	struct mlx5_cap *cap = &sh->dev_cap;
+	struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
+	bool devx = sh->cdev->config.devx;
+
+	memset(cap, 0, sizeof(*cap));
+	if (mlx5_dev_is_pci(sh->cdev->dev))
+		cap->vf = mlx5_dev_is_vf_pci(RTE_DEV_TO_PCI(sh->cdev->dev));
+	else
+		cap->sf = 1;
+	cap->hw_csum = !!(sh->device_attr.device_cap_flags_ex &
+			  IBV_DEVICE_RAW_IP_CSUM);
+	DRV_LOG(DEBUG, "Checksum offloading is %ssupported.",
+		(cap->hw_csum ? "" : "not "));
+	cap->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
+				IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
+	DRV_LOG(DEBUG, "VLAN stripping is %ssupported.",
+		(cap->hw_vlan_strip ? "" : "not "));
+	cap->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
+				 IBV_RAW_PACKET_CAP_SCATTER_FCS);
+	/*
+	 * If HW has bug working with tunnel packet decapsulation and scatter
+	 * FCS, and decapsulation is needed, clear the hw_fcs_strip bit.
+	 * Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
+	 */
+	if (hca_attr->scatter_fcs_w_decap_disable && sh->config.decap_en)
+		cap->hw_fcs_strip = 0;
+	DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported.",
+		(cap->hw_fcs_strip ? "" : "not "));
+	/*
+	 * DPDK doesn't support larger/variable indirection tables.
+	 * Once DPDK supports it, take max size from device attr.
+	 */
+	cap->ind_table_max_size =
+		RTE_MIN(sh->device_attr.max_rwq_indirection_table_size,
+			(unsigned int)RTE_ETH_RSS_RETA_SIZE_512);
+	DRV_LOG(DEBUG, "Maximum Rx indirection table size is %u",
+		cap->ind_table_max_size);
+#if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
+	!defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+	DRV_LOG(DEBUG, "Counters are not supported.");
+#endif
+#ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
+	/*
+	 * When advanced DR API is available and DV flow is supported and
+	 * DevX is supported, HW objects operations are created by DevX.
+	 */
+	if (devx && sh->config.dv_flow_en)
+		cap->devx_obj_ops_en = 1;
+#endif
+	DRV_LOG(DEBUG, "HW objects operations creation by DevX is %ssupported",
+		cap->devx_obj_ops_en ? "" : "not ");
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+	cap->mpls_en = ((sh->device_attr.tunnel_offloads_caps &
+		    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
+		   (sh->device_attr.tunnel_offloads_caps &
+		    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
+	DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
+		cap->mpls_en ? "" : "not ");
+#else
+	DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to old OFED/rdma-core version or firmware configuration");
+#endif
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+	if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
+		cap->mprq.enabled = 1;
+		cap->mprq.log_min_stride_size =
+			sh->device_attr.min_single_stride_log_num_of_bytes;
+		cap->mprq.log_max_stride_size =
+			sh->device_attr.max_single_stride_log_num_of_bytes;
+		cap->mprq.log_min_stride_num =
+			sh->device_attr.min_single_wqe_log_num_of_strides;
+		cap->mprq.log_max_stride_num =
+			sh->device_attr.max_single_wqe_log_num_of_strides;
+		cap->mprq.log_min_stride_wqe_size = devx ?
+					hca_attr->log_min_stride_wqe_sz :
+					MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE;
+		DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %u",
+			cap->mprq.log_min_stride_size);
+		DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %u",
+			cap->mprq.log_max_stride_size);
+		DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %u",
+			cap->mprq.log_min_stride_num);
+		DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %u",
+			cap->mprq.log_max_stride_num);
+		DRV_LOG(DEBUG, "\tmin_stride_wqe_log_size: %u",
+			cap->mprq.log_min_stride_wqe_size);
+		DRV_LOG(DEBUG, "\tsupported_qpts: %d",
+			sh->device_attr.stride_supported_qpts);
+		DRV_LOG(DEBUG, "Device supports Multi-Packet RQ.");
+	}
+#endif
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+	if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
+		cap->tunnel_en = sh->device_attr.tunnel_offloads_caps &
+				(MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN |
+				 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE |
+				 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE);
+	}
+	if (cap->tunnel_en) {
+		DRV_LOG(DEBUG, "Tunnel offloading is supported for %s%s%s",
+			cap->tunnel_en &
+			MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN ?
+			"[VXLAN]" : "",
+			cap->tunnel_en &
+			MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE ?
+			"[GRE]" : "",
+			cap->tunnel_en &
+			MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE ?
+			"[GENEVE]" : "");
+	} else {
+		DRV_LOG(DEBUG, "Tunnel offloading is not supported.");
+	}
+#else
+	DRV_LOG(WARNING,
+		"Tunnel offloading disabled due to old OFED/rdma-core version");
+#endif
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+	if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
+		cap->swp = sh->device_attr.sw_parsing_offloads &
+			   (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
+			    MLX5_SW_PARSING_TSO_CAP);
+	DRV_LOG(DEBUG, "SWP support: %u", cap->swp);
+#endif
+	cap->tso = sh->device_attr.max_tso > 0 &&
+		   (sh->device_attr.tso_supported_qpts &
+		    (1 << IBV_QPT_RAW_PACKET));
+	if (cap->tso)
+		cap->tso_max_payload_sz = sh->device_attr.max_tso;
+	if (!devx)
+		return;
+	cap->steering_format_version = hca_attr->steering_format_version;
+#if defined(HAVE_MLX5DV_DR) && \
+	(defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) || \
+	 defined(HAVE_MLX5_DR_CREATE_ACTION_ASO))
+	if (hca_attr->qos.sup && hca_attr->qos.flow_meter_old &&
+	    sh->config.dv_flow_en) {
+		uint8_t reg_c_mask = hca_attr->qos.flow_meter_reg_c_ids;
+		/*
+		 * Meter needs two REG_C's for color match and pre-sfx
+		 * flow match. Here get the REG_C for color match.
+		 * REG_C_0 and REG_C_1 is reserved for metadata feature.
+		 */
+		reg_c_mask &= 0xfc;
+		if (__builtin_popcount(reg_c_mask) < 1) {
+			cap->mtr_en = 0;
+			DRV_LOG(WARNING, "No available register for meter.");
+		} else {
+			uint8_t color_reg;
+
+			/*
+			 * The meter color register is used by the flow-hit
+			 * feature as well.
+			 * The flow-hit feature must use REG_C_3
+			 * Prefer REG_C_3 if it is available.
+			 */
+			if (reg_c_mask & (1 << (REG_C_3 - REG_C_0)))
+				color_reg = REG_C_3;
+			else
+				color_reg = ffs(reg_c_mask) - 1 + REG_C_0;
+			cap->mtr_en = 1;
+			cap->mtr_color_reg = color_reg;
+			cap->mtr_reg_share = hca_attr->qos.flow_meter;
+			DRV_LOG(DEBUG, "The REG_C meter uses is %d", color_reg);
+		}
+	}
+	if (hca_attr->qos.sup && hca_attr->qos.flow_meter_aso_sup) {
+		uint32_t log_obj_size =
+				rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
+
+		if (log_obj_size >= hca_attr->qos.log_meter_aso_granularity &&
+		    log_obj_size <= hca_attr->qos.log_meter_aso_max_alloc)
+			cap->meter_aso_en = 1;
+	}
+	cap->tunnel_header_0_1 = hca_attr->flow.tunnel_header_0_1;
+#endif
+#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
+	if (hca_attr->flow_hit_aso && cap->mtr_color_reg == REG_C_3) {
+		cap->flow_hit_aso_en = 1;
+		DRV_LOG(DEBUG, "Flow Hit ASO is supported.");
+	}
+#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
+#if defined(HAVE_MLX5_DR_CREATE_ACTION_ASO) && \
+	defined(HAVE_MLX5_DR_ACTION_ASO_CT)
+	if (hca_attr->ct_offload && cap->mtr_color_reg == REG_C_3) {
+		cap->ct_aso_en = 1;
+		DRV_LOG(DEBUG, "CT ASO is supported.");
+	}
+#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO && HAVE_MLX5_DR_ACTION_ASO_CT */
+#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE)
+	if (hca_attr->log_max_ft_sampler_num > 0  && sh->config.dv_flow_en) {
+		cap->sampler_en = 1;
+		DRV_LOG(DEBUG, "Sampler enabled!");
+	} else {
+		cap->sampler_en = 0;
+		if (!hca_attr->log_max_ft_sampler_num)
+			DRV_LOG(WARNING, "No available register for sampler.");
+		else
+			DRV_LOG(DEBUG, "DV flow is not supported!");
+	}
+#endif
+	mlx5_rt_timestamp_config(sh, hca_attr);
+}
+
 /**
  * Spawn an Ethernet device from Verbs information.
  *
@@ -827,8 +1377,6 @@  mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
  *   Backing DPDK device.
  * @param spawn
  *   Verbs device parameters (name, port, switch_info) to spawn.
- * @param config
- *   Device configuration parameters.
  * @param eth_da
  *   Device arguments.
  *
@@ -842,27 +1390,20 @@  mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
 static struct rte_eth_dev *
 mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	       struct mlx5_dev_spawn_data *spawn,
-	       struct mlx5_dev_config *config,
 	       struct rte_eth_devargs *eth_da)
 {
 	const struct mlx5_switch_info *switch_info = &spawn->info;
 	struct mlx5_dev_ctx_shared *sh = NULL;
-	struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
 	struct ibv_port_attr port_attr = { .state = IBV_PORT_NOP };
 	struct rte_eth_dev *eth_dev = NULL;
 	struct mlx5_priv *priv = NULL;
 	int err = 0;
-	unsigned int hw_padding = 0;
-	unsigned int mps;
-	unsigned int mpls_en = 0;
-	unsigned int swp = 0;
-	unsigned int mprq = 0;
 	struct rte_ether_addr mac;
 	char name[RTE_ETH_NAME_MAX_LEN];
 	int own_domain_id = 0;
 	uint16_t port_id;
 	struct mlx5_port_info vport_info = { .query_flags = 0 };
-	int nl_rdma = -1;
+	int nl_rdma;
 	int i;
 
 	/* Determine if this port representor is supposed to be spawned. */
@@ -940,132 +1481,9 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		mlx5_dev_close(eth_dev);
 		return NULL;
 	}
-	/* Process parameters. */
-	err = mlx5_args(config, dpdk_dev->devargs);
-	if (err) {
-		DRV_LOG(ERR, "failed to process device arguments: %s",
-			strerror(rte_errno));
-		return NULL;
-	}
-	sh = mlx5_alloc_shared_dev_ctx(spawn, config);
+	sh = mlx5_alloc_shared_dev_ctx(spawn);
 	if (!sh)
 		return NULL;
-	/* Update final values for devargs before check sibling config. */
-	if (config->dv_miss_info) {
-		if (switch_info->master || switch_info->representor)
-			config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
-	}
-#if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
-	if (config->dv_flow_en) {
-		DRV_LOG(WARNING, "DV flow is not supported.");
-		config->dv_flow_en = 0;
-	}
-#endif
-#ifdef HAVE_MLX5DV_DR_ESWITCH
-	if (!(hca_attr->eswitch_manager && config->dv_flow_en &&
-	      (switch_info->representor || switch_info->master)))
-		config->dv_esw_en = 0;
-#else
-	config->dv_esw_en = 0;
-#endif
-	if (!config->dv_esw_en &&
-	    config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
-		DRV_LOG(WARNING,
-			"Metadata mode %u is not supported (no E-Switch).",
-			config->dv_xmeta_en);
-		config->dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
-	}
-	/* Check sibling device configurations. */
-	err = mlx5_dev_check_sibling_config(sh, config, dpdk_dev);
-	if (err)
-		goto error;
-#ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
-	config->dest_tir = 1;
-#endif
-	/*
-	 * Multi-packet send is supported by ConnectX-4 Lx PF as well
-	 * as all ConnectX-5 devices.
-	 */
-	if (sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
-		if (sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
-			DRV_LOG(DEBUG, "enhanced MPW is supported");
-			mps = MLX5_MPW_ENHANCED;
-		} else {
-			DRV_LOG(DEBUG, "MPW is supported");
-			mps = MLX5_MPW;
-		}
-	} else {
-		DRV_LOG(DEBUG, "MPW isn't supported");
-		mps = MLX5_MPW_DISABLED;
-	}
-#ifdef HAVE_IBV_MLX5_MOD_SWP
-	if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
-		swp = sh->device_attr.sw_parsing_offloads;
-	DRV_LOG(DEBUG, "SWP support: %u", swp);
-#endif
-	config->swp = swp & (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
-		MLX5_SW_PARSING_TSO_CAP);
-#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
-	if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
-		DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
-			sh->device_attr.min_single_stride_log_num_of_bytes);
-		DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
-			sh->device_attr.max_single_stride_log_num_of_bytes);
-		DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
-			sh->device_attr.min_single_wqe_log_num_of_strides);
-		DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
-			sh->device_attr.max_single_wqe_log_num_of_strides);
-		DRV_LOG(DEBUG, "\tsupported_qpts: %d",
-			sh->device_attr.stride_supported_qpts);
-		DRV_LOG(DEBUG, "\tmin_stride_wqe_log_size: %d",
-			config->mprq.log_min_stride_wqe_size);
-		DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
-		mprq = 1;
-		config->mprq.log_min_stride_size =
-			sh->device_attr.min_single_stride_log_num_of_bytes;
-		config->mprq.log_max_stride_size =
-			sh->device_attr.max_single_stride_log_num_of_bytes;
-		config->mprq.log_min_stride_num =
-			sh->device_attr.min_single_wqe_log_num_of_strides;
-		config->mprq.log_max_stride_num =
-			sh->device_attr.max_single_wqe_log_num_of_strides;
-	}
-#endif
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-	if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
-		config->tunnel_en = sh->device_attr.tunnel_offloads_caps &
-			     (MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN |
-			      MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE |
-			      MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE);
-	}
-	if (config->tunnel_en) {
-		DRV_LOG(DEBUG, "tunnel offloading is supported for %s%s%s",
-		config->tunnel_en &
-		MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN ? "[VXLAN]" : "",
-		config->tunnel_en &
-		MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE ? "[GRE]" : "",
-		config->tunnel_en &
-		MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE ? "[GENEVE]" : ""
-		);
-	} else {
-		DRV_LOG(DEBUG, "tunnel offloading is not supported");
-	}
-#else
-	DRV_LOG(WARNING,
-		"tunnel offloading disabled due to old OFED/rdma-core version");
-#endif
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
-	mpls_en = ((sh->device_attr.tunnel_offloads_caps &
-		    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
-		   (sh->device_attr.tunnel_offloads_caps &
-		    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
-	DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
-		mpls_en ? "" : "not ");
-#else
-	DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
-		" old OFED/rdma-core version or firmware configuration");
-#endif
-	config->mpls_en = mpls_en;
 	nl_rdma = mlx5_nl_init(NETLINK_RDMA);
 	/* Check port status. */
 	if (spawn->phys_port <= UINT8_MAX) {
@@ -1133,7 +1551,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * register to match on vport index. The engaged part of metadata
 	 * register is defined by mask.
 	 */
-	if (switch_info->representor || switch_info->master) {
+	if (sh->esw_mode) {
 		err = mlx5_glue->devx_port_query(sh->cdev->ctx,
 						 spawn->phys_port,
 						 &vport_info);
@@ -1164,8 +1582,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	}
 	if (vport_info.query_flags & MLX5_PORT_QUERY_VPORT) {
 		priv->vport_id = vport_info.vport_id;
-	} else if (spawn->pf_bond >= 0 &&
-		   (switch_info->representor || switch_info->master)) {
+	} else if (spawn->pf_bond >= 0 && sh->esw_mode) {
 		DRV_LOG(ERR,
 			"Cannot deduce vport index for port %d on bonding device %s",
 			spawn->phys_port, spawn->phys_dev_name);
@@ -1219,280 +1636,14 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		DRV_LOG(DEBUG, "dev_port-%u new domain_id=%u\n",
 			priv->dev_port, priv->domain_id);
 	}
-	config->hw_csum = !!(sh->device_attr.device_cap_flags_ex &
-			    IBV_DEVICE_RAW_IP_CSUM);
-	DRV_LOG(DEBUG, "checksum offloading is %ssupported",
-		(config->hw_csum ? "" : "not "));
-#if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
-	!defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
-	DRV_LOG(DEBUG, "counters are not supported");
-#endif
-	config->ind_table_max_size =
-		sh->device_attr.max_rwq_indirection_table_size;
-	/*
-	 * Remove this check once DPDK supports larger/variable
-	 * indirection tables.
-	 */
-	if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
-		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
-	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
-		config->ind_table_max_size);
-	config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
-				  IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
-	DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
-		(config->hw_vlan_strip ? "" : "not "));
-	config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
-				 IBV_RAW_PACKET_CAP_SCATTER_FCS);
-#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
-	hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
-#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
-	hw_padding = !!(sh->device_attr.device_cap_flags_ex &
-			IBV_DEVICE_PCI_WRITE_END_PADDING);
-#endif
-	if (config->hw_padding && !hw_padding) {
-		DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
-		config->hw_padding = 0;
-	} else if (config->hw_padding) {
-		DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
-	}
-	config->tso = (sh->device_attr.max_tso > 0 &&
-		      (sh->device_attr.tso_supported_qpts &
-		       (1 << IBV_QPT_RAW_PACKET)));
-	if (config->tso)
-		config->tso_max_payload_sz = sh->device_attr.max_tso;
-	/*
-	 * MPW is disabled by default, while the Enhanced MPW is enabled
-	 * by default.
-	 */
-	if (config->mps == MLX5_ARG_UNSET)
-		config->mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
-							  MLX5_MPW_DISABLED;
-	else
-		config->mps = config->mps ? mps : MLX5_MPW_DISABLED;
-	DRV_LOG(INFO, "%sMPS is %s",
-		config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
-		config->mps == MLX5_MPW ? "legacy " : "",
-		config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
-	if (sh->cdev->config.devx) {
-		sh->steering_format_version = hca_attr->steering_format_version;
-		/* Check for LRO support. */
-		if (config->dest_tir && hca_attr->lro_cap &&
-		    config->dv_flow_en) {
-			/* TBD check tunnel lro caps. */
-			config->lro.supported = hca_attr->lro_cap;
-			DRV_LOG(DEBUG, "Device supports LRO");
-			/*
-			 * If LRO timeout is not configured by application,
-			 * use the minimal supported value.
-			 */
-			if (!config->lro.timeout)
-				config->lro.timeout =
-				       hca_attr->lro_timer_supported_periods[0];
-			DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
-				config->lro.timeout);
-			DRV_LOG(DEBUG, "LRO minimal size of TCP segment "
-				"required for coalescing is %d bytes",
-				hca_attr->lro_min_mss_size);
-		}
-#if defined(HAVE_MLX5DV_DR) && \
-	(defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) || \
-	 defined(HAVE_MLX5_DR_CREATE_ACTION_ASO))
-		if (hca_attr->qos.sup && hca_attr->qos.flow_meter_old &&
-		    config->dv_flow_en) {
-			uint8_t reg_c_mask = hca_attr->qos.flow_meter_reg_c_ids;
-			/*
-			 * Meter needs two REG_C's for color match and pre-sfx
-			 * flow match. Here get the REG_C for color match.
-			 * REG_C_0 and REG_C_1 is reserved for metadata feature.
-			 */
-			reg_c_mask &= 0xfc;
-			if (__builtin_popcount(reg_c_mask) < 1) {
-				priv->mtr_en = 0;
-				DRV_LOG(WARNING, "No available register for"
-					" meter.");
-			} else {
-				/*
-				 * The meter color register is used by the
-				 * flow-hit feature as well.
-				 * The flow-hit feature must use REG_C_3
-				 * Prefer REG_C_3 if it is available.
-				 */
-				if (reg_c_mask & (1 << (REG_C_3 - REG_C_0)))
-					priv->mtr_color_reg = REG_C_3;
-				else
-					priv->mtr_color_reg = ffs(reg_c_mask)
-							      - 1 + REG_C_0;
-				priv->mtr_en = 1;
-				priv->mtr_reg_share = hca_attr->qos.flow_meter;
-				DRV_LOG(DEBUG, "The REG_C meter uses is %d",
-					priv->mtr_color_reg);
-			}
-		}
-		if (hca_attr->qos.sup && hca_attr->qos.flow_meter_aso_sup) {
-			uint32_t log_obj_size =
-				rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
-			if (log_obj_size >=
-			    hca_attr->qos.log_meter_aso_granularity &&
-			    log_obj_size <=
-			    hca_attr->qos.log_meter_aso_max_alloc)
-				sh->meter_aso_en = 1;
-		}
-		if (priv->mtr_en) {
-			err = mlx5_aso_flow_mtrs_mng_init(priv->sh);
-			if (err) {
-				err = -err;
-				goto error;
-			}
-		}
-		if (hca_attr->flow.tunnel_header_0_1)
-			sh->tunnel_header_0_1 = 1;
-#endif
-#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
-		if (hca_attr->flow_hit_aso && priv->mtr_color_reg == REG_C_3) {
-			sh->flow_hit_aso_en = 1;
-			err = mlx5_flow_aso_age_mng_init(sh);
-			if (err) {
-				err = -err;
-				goto error;
-			}
-			DRV_LOG(DEBUG, "Flow Hit ASO is supported.");
-		}
-#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
-#if defined(HAVE_MLX5_DR_CREATE_ACTION_ASO) && \
-	defined(HAVE_MLX5_DR_ACTION_ASO_CT)
-		if (hca_attr->ct_offload && priv->mtr_color_reg == REG_C_3) {
-			err = mlx5_flow_aso_ct_mng_init(sh);
-			if (err) {
-				err = -err;
-				goto error;
-			}
-			DRV_LOG(DEBUG, "CT ASO is supported.");
-			sh->ct_aso_en = 1;
-		}
-#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO && HAVE_MLX5_DR_ACTION_ASO_CT */
-#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE)
-		if (hca_attr->log_max_ft_sampler_num > 0  &&
-		    config->dv_flow_en) {
-			priv->sampler_en = 1;
-			DRV_LOG(DEBUG, "Sampler enabled!");
-		} else {
-			priv->sampler_en = 0;
-			if (!hca_attr->log_max_ft_sampler_num)
-				DRV_LOG(WARNING,
-					"No available register for sampler.");
-			else
-				DRV_LOG(DEBUG, "DV flow is not supported!");
-		}
-#endif
-	}
-	if (config->cqe_comp && RTE_CACHE_LINE_SIZE == 128 &&
-	    !(sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
-		DRV_LOG(WARNING, "Rx CQE 128B compression is not supported");
-		config->cqe_comp = 0;
-	}
-	if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
-	    (!sh->cdev->config.devx || !hca_attr->mini_cqe_resp_flow_tag)) {
-		DRV_LOG(WARNING, "Flow Tag CQE compression"
-				 " format isn't supported.");
-		config->cqe_comp = 0;
-	}
-	if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_L34H_STRIDX &&
-	    (!sh->cdev->config.devx || !hca_attr->mini_cqe_resp_l3_l4_tag)) {
-		DRV_LOG(WARNING, "L3/L4 Header CQE compression"
-				 " format isn't supported.");
-		config->cqe_comp = 0;
-	}
-	DRV_LOG(DEBUG, "Rx CQE compression is %ssupported",
-			config->cqe_comp ? "" : "not ");
-	if (config->tx_pp) {
-		DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
-			hca_attr->dev_freq_khz);
-		DRV_LOG(DEBUG, "Packet pacing is %ssupported",
-			hca_attr->qos.packet_pacing ? "" : "not ");
-		DRV_LOG(DEBUG, "Cross channel ops are %ssupported",
-			hca_attr->cross_channel ? "" : "not ");
-		DRV_LOG(DEBUG, "WQE index ignore is %ssupported",
-			hca_attr->wqe_index_ignore ? "" : "not ");
-		DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported",
-			hca_attr->non_wire_sq ? "" : "not ");
-		DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
-			hca_attr->log_max_static_sq_wq ? "" : "not ",
-			hca_attr->log_max_static_sq_wq);
-		DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
-			hca_attr->qos.wqe_rate_pp ? "" : "not ");
-		if (!sh->cdev->config.devx) {
-			DRV_LOG(ERR, "DevX is required for packet pacing");
-			err = ENODEV;
-			goto error;
-		}
-		if (!hca_attr->qos.packet_pacing) {
-			DRV_LOG(ERR, "Packet pacing is not supported");
-			err = ENODEV;
-			goto error;
-		}
-		if (!hca_attr->cross_channel) {
-			DRV_LOG(ERR, "Cross channel operations are"
-				     " required for packet pacing");
-			err = ENODEV;
-			goto error;
-		}
-		if (!hca_attr->wqe_index_ignore) {
-			DRV_LOG(ERR, "WQE index ignore feature is"
-				     " required for packet pacing");
-			err = ENODEV;
-			goto error;
-		}
-		if (!hca_attr->non_wire_sq) {
-			DRV_LOG(ERR, "Non-wire SQ feature is"
-				     " required for packet pacing");
-			err = ENODEV;
-			goto error;
-		}
-		if (!hca_attr->log_max_static_sq_wq) {
-			DRV_LOG(ERR, "Static WQE SQ feature is"
-				     " required for packet pacing");
-			err = ENODEV;
-			goto error;
-		}
-		if (!hca_attr->qos.wqe_rate_pp) {
-			DRV_LOG(ERR, "WQE rate mode is required"
-				     " for packet pacing");
-			err = ENODEV;
-			goto error;
-		}
-#ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
-		DRV_LOG(ERR, "DevX does not provide UAR offset,"
-			     " can't create queues for packet pacing");
-		err = ENODEV;
+	/* Process parameters and store port configuration on priv structure. */
+	err = mlx5_os_port_args_config(priv, dpdk_dev->devargs, &priv->config);
+	if (err) {
+		err = rte_errno;
+		DRV_LOG(ERR, "Failed to process port configure: %s",
+			strerror(rte_errno));
 		goto error;
-#endif
 	}
-	if (config->std_delay_drop || config->hp_delay_drop) {
-		if (!hca_attr->rq_delay_drop) {
-			config->std_delay_drop = 0;
-			config->hp_delay_drop = 0;
-			DRV_LOG(WARNING,
-				"dev_port-%u: Rxq delay drop is not supported",
-				priv->dev_port);
-		}
-	}
-	if (sh->cdev->config.devx)
-		mlx5_rt_timestamp_config(sh, config, hca_attr);
-	/*
-	 * If HW has bug working with tunnel packet decapsulation and
-	 * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
-	 * bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
-	 */
-	if (hca_attr->scatter_fcs_w_decap_disable && config->decap_en)
-		config->hw_fcs_strip = 0;
-	DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
-		(config->hw_fcs_strip ? "" : "not "));
-	if (config->mprq.enabled && !mprq) {
-		DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
-		config->mprq.enabled = 0;
-	}
-	if (config->max_dump_files_num == 0)
-		config->max_dump_files_num = 128;
 	eth_dev = rte_eth_dev_allocate(name);
 	if (eth_dev == NULL) {
 		DRV_LOG(ERR, "can not allocate rte ethdev");
@@ -1572,7 +1723,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	eth_dev->rx_queue_count = mlx5_rx_queue_count;
 	/* Register MAC address. */
 	claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
-	if (config->vf && config->vf_nl_en)
+	if (sh->dev_cap.vf && sh->config.vf_nl_en)
 		mlx5_nl_mac_addr_sync(priv->nl_socket_route,
 				      mlx5_ifindex(eth_dev),
 				      eth_dev->data->mac_addrs,
@@ -1593,13 +1744,9 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Verbs context returned by ibv_open_device().
 	 */
 	mlx5_link_update(eth_dev, 0);
-	/* Detect minimal data bytes to inline. */
-	mlx5_set_min_inline(spawn, config);
-	/* Store device configuration on private structure. */
-	priv->config = *config;
 	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
-		icfg[i].release_mem_en = !!config->reclaim_mode;
-		if (config->reclaim_mode)
+		icfg[i].release_mem_en = !!sh->config.reclaim_mode;
+		if (sh->config.reclaim_mode)
 			icfg[i].per_core_cache = 0;
 		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
 		if (!priv->flows[i])
@@ -1607,14 +1754,14 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	}
 	/* Create context for virtual machine VLAN workaround. */
 	priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
-	if (config->dv_flow_en) {
+	if (sh->config.dv_flow_en) {
 		err = mlx5_alloc_shared_dr(priv);
 		if (err)
 			goto error;
 		if (mlx5_flex_item_port_init(eth_dev) < 0)
 			goto error;
 	}
-	if (sh->cdev->config.devx && config->dv_flow_en && config->dest_tir) {
+	if (sh->dev_cap.devx_obj_ops_en) {
 		priv->obj_ops = devx_obj_ops;
 		mlx5_queue_counter_id_prepare(eth_dev);
 		priv->obj_ops.lb_dummy_queue_create =
@@ -1629,7 +1776,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	} else {
 		priv->obj_ops = ibv_obj_ops;
 	}
-	if (config->tx_pp &&
+	if (sh->config.tx_pp &&
 	    priv->obj_ops.txq_obj_new != mlx5_txq_devx_obj_new) {
 		/*
 		 * HAVE_MLX5DV_DEVX_UAR_OFFSET is required to support
@@ -1660,11 +1807,11 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		goto error;
 	}
 	mlx5_set_metadata_mask(eth_dev);
-	if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+	if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
 	    !priv->sh->dv_regc0_mask) {
 		DRV_LOG(ERR, "metadata mode %u is not supported "
 			     "(no metadata reg_c[0] is available)",
-			     priv->config.dv_xmeta_en);
+			     sh->config.dv_xmeta_en);
 			err = ENOTSUP;
 			goto error;
 	}
@@ -1689,16 +1836,16 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		DRV_LOG(DEBUG,
 			"port %u extensive metadata register is not supported",
 			eth_dev->data->port_id);
-		if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+		if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
 			DRV_LOG(ERR, "metadata mode %u is not supported "
 				     "(no metadata registers available)",
-				     priv->config.dv_xmeta_en);
+				     sh->config.dv_xmeta_en);
 			err = ENOTSUP;
 			goto error;
 		}
 	}
-	if (priv->config.dv_flow_en &&
-	    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+	if (sh->config.dv_flow_en &&
+	    sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
 	    mlx5_flow_ext_mreg_supported(eth_dev) &&
 	    priv->sh->dv_regc0_mask) {
 		priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
@@ -1717,7 +1864,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	rte_spinlock_init(&priv->shared_act_sl);
 	mlx5_flow_counter_mode_config(eth_dev);
 	mlx5_flow_drop_action_config(eth_dev);
-	if (priv->config.dv_flow_en)
+	if (sh->config.dv_flow_en)
 		eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
 	return eth_dev;
 error:
@@ -1964,34 +2111,6 @@  mlx5_device_bond_pci_match(const char *ibdev_name,
 	return pf;
 }
 
-static void
-mlx5_os_config_default(struct mlx5_dev_config *config,
-		       struct mlx5_common_dev_config *cconf)
-{
-	memset(config, 0, sizeof(*config));
-	config->mps = MLX5_ARG_UNSET;
-	config->cqe_comp = 1;
-	config->rx_vec_en = 1;
-	config->txq_inline_max = MLX5_ARG_UNSET;
-	config->txq_inline_min = MLX5_ARG_UNSET;
-	config->txq_inline_mpw = MLX5_ARG_UNSET;
-	config->txqs_inline = MLX5_ARG_UNSET;
-	config->vf_nl_en = 1;
-	config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
-	config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
-	config->mprq.log_min_stride_wqe_size = cconf->devx ?
-					cconf->hca_attr.log_min_stride_wqe_sz :
-					MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE;
-	config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
-	config->dv_esw_en = 1;
-	config->dv_flow_en = 1;
-	config->decap_en = 1;
-	config->log_hp_size = MLX5_ARG_UNSET;
-	config->allow_duplicate_pattern = 1;
-	config->std_delay_drop = 0;
-	config->hp_delay_drop = 0;
-}
-
 /**
  * Register a PCI device within bonding.
  *
@@ -2040,7 +2159,6 @@  mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
 	int bd = -1;
 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
 	struct mlx5_dev_spawn_data *list = NULL;
-	struct mlx5_dev_config dev_config;
 	struct rte_eth_devargs eth_da = *req_eth_da;
 	struct rte_pci_addr owner_pci = pci_dev->addr; /* Owner PF. */
 	struct mlx5_bond_info bond_info;
@@ -2382,11 +2500,7 @@  mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
 	for (i = 0; i != ns; ++i) {
 		uint32_t restore;
 
-		/* Default configuration. */
-		mlx5_os_config_default(&dev_config, &cdev->config);
-		dev_config.vf = mlx5_dev_is_vf_pci(pci_dev);
-		list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i],
-						 &dev_config, &eth_da);
+		list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i], &eth_da);
 		if (!list[i].eth_dev) {
 			if (rte_errno != EBUSY && rte_errno != EEXIST)
 				break;
@@ -2541,7 +2655,6 @@  static int
 mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev)
 {
 	struct rte_eth_devargs eth_da = { .nb_ports = 0 };
-	struct mlx5_dev_config config;
 	struct mlx5_dev_spawn_data spawn = { .pf_bond = -1 };
 	struct rte_device *dev = cdev->dev;
 	struct rte_auxiliary_device *adev = RTE_DEV_TO_AUXILIARY(dev);
@@ -2552,9 +2665,6 @@  mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev)
 	ret = mlx5_os_parse_eth_devargs(dev, &eth_da);
 	if (ret != 0)
 		return ret;
-	/* Set default config data. */
-	mlx5_os_config_default(&config, &cdev->config);
-	config.sf = 1;
 	/* Init spawn data. */
 	spawn.max_port = 1;
 	spawn.phys_port = 1;
@@ -2567,7 +2677,7 @@  mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev)
 	spawn.ifindex = ret;
 	spawn.cdev = cdev;
 	/* Spawn device. */
-	eth_dev = mlx5_dev_spawn(dev, &spawn, &config, &eth_da);
+	eth_dev = mlx5_dev_spawn(dev, &spawn, &eth_da);
 	if (eth_dev == NULL)
 		return -rte_errno;
 	/* Post create. */
@@ -2605,6 +2715,12 @@  mlx5_os_net_probe(struct mlx5_common_device *cdev)
 			strerror(rte_errno));
 		return -rte_errno;
 	}
+	ret = mlx5_probe_again_args_validate(cdev);
+	if (ret) {
+		DRV_LOG(ERR, "Probe again parameters are not compatible : %s",
+			strerror(rte_errno));
+		return -rte_errno;
+	}
 	if (mlx5_dev_is_pci(cdev->dev))
 		return mlx5_os_pci_probe(cdev);
 	else
@@ -2785,7 +2901,7 @@  void
 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	const int vf = priv->config.vf;
+	const int vf = priv->sh->dev_cap.vf;
 
 	if (vf)
 		mlx5_nl_mac_addr_remove(priv->nl_socket_route,
@@ -2811,7 +2927,7 @@  mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
 		     uint32_t index)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	const int vf = priv->config.vf;
+	const int vf = priv->sh->dev_cap.vf;
 	int ret = 0;
 
 	if (vf)
diff --git a/drivers/net/mlx5/linux/mlx5_vlan_os.c b/drivers/net/mlx5/linux/mlx5_vlan_os.c
index 005904bdfe..80ccd5a460 100644
--- a/drivers/net/mlx5/linux/mlx5_vlan_os.c
+++ b/drivers/net/mlx5/linux/mlx5_vlan_os.c
@@ -103,12 +103,11 @@  void *
 mlx5_vlan_vmwa_init(struct rte_eth_dev *dev, uint32_t ifindex)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
 	struct mlx5_nl_vlan_vmwa_context *vmwa;
 	enum rte_hypervisor hv_type;
 
 	/* Do not engage workaround over PF. */
-	if (!config->vf)
+	if (!priv->sh->dev_cap.vf)
 		return NULL;
 	/* Check whether there is desired virtual environment */
 	hv_type = rte_hypervisor_get();
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index cce4d4448c..12113f2811 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -428,13 +428,13 @@  mlx5_is_sf_repr(struct rte_eth_dev *dev)
 /**
  * Initialize the ASO aging management structure.
  *
- * @param[in] sh
- *   Pointer to mlx5_dev_ctx_shared object to free
+ * @param[out] sh
+ *   Pointer to mlx5_dev_ctx_shared object.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-int
+static int
 mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh)
 {
 	int err;
@@ -533,7 +533,7 @@  mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
 	fallback = true;
 #else
 	fallback = false;
-	if (!sh->cdev->config.devx || !priv->config.dv_flow_en ||
+	if (!sh->cdev->config.devx || !sh->config.dv_flow_en ||
 	    !hca_attr->flow_counters_dump ||
 	    !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
 	    (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
@@ -556,8 +556,8 @@  mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
 /**
  * Initialize the counters management structure.
  *
- * @param[in] sh
- *   Pointer to mlx5_dev_ctx_shared object to free
+ * @param[out] sh
+ *   Pointer to mlx5_dev_ctx_shared object.
  */
 static void
 mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh)
@@ -651,11 +651,14 @@  mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh)
 /**
  * Initialize the aso flow meters management structure.
  *
- * @param[in] sh
- *   Pointer to mlx5_dev_ctx_shared object to free
+ * @param[out] sh
+ *   Pointer to mlx5_dev_ctx_shared object.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-int
-mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh)
+static int
+mlx5_flow_aso_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh)
 {
 	if (!sh->mtrmng) {
 		sh->mtrmng = mlx5_malloc(MLX5_MEM_ZERO,
@@ -667,7 +670,7 @@  mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh)
 			rte_errno = ENOMEM;
 			return -ENOMEM;
 		}
-		if (sh->meter_aso_en) {
+		if (sh->dev_cap.meter_aso_en) {
 			rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl);
 			rte_rwlock_init(&sh->mtrmng->pools_mng.resize_mtrwl);
 			LIST_INIT(&sh->mtrmng->pools_mng.meters);
@@ -685,7 +688,7 @@  mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh)
  *   Pointer to mlx5_dev_ctx_shared object to free.
  */
 static void
-mlx5_aso_flow_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh)
+mlx5_flow_aso_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh)
 {
 	struct mlx5_aso_mtr_pool *mtr_pool;
 	struct mlx5_flow_mtr_mng *mtrmng = sh->mtrmng;
@@ -695,7 +698,7 @@  mlx5_aso_flow_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh)
 	int i;
 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
 
-	if (sh->meter_aso_en) {
+	if (sh->dev_cap.meter_aso_en) {
 		mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_POLICER);
 		idx = mtrmng->pools_mng.n_valid;
 		while (idx--) {
@@ -744,13 +747,13 @@  mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh)
 /*
  * Initialize the ASO connection tracking structure.
  *
- * @param[in] sh
+ * @param[out] sh
  *   Pointer to mlx5_dev_ctx_shared object.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-int
+static int
 mlx5_flow_aso_ct_mng_init(struct mlx5_dev_ctx_shared *sh)
 {
 	int err;
@@ -831,17 +834,74 @@  mlx5_flow_aso_ct_mng_close(struct mlx5_dev_ctx_shared *sh)
 	sh->ct_mng = NULL;
 }
 
+/**
+ * Close and release all the resources of the ASO flow management structures.
+ *
+ * @param[in] sh
+ *   Pointer to mlx5_dev_ctx_shared object to free.
+ */
+static void
+mlx5_flow_aso_close(struct mlx5_dev_ctx_shared *sh)
+{
+	if (sh->ct_mng) {
+		mlx5_flow_aso_ct_mng_close(sh);
+		sh->ct_mng = NULL;
+	}
+	if (sh->aso_age_mng) {
+		mlx5_flow_aso_age_mng_close(sh);
+		sh->aso_age_mng = NULL;
+	}
+	if (sh->mtrmng) {
+		mlx5_flow_aso_mtrs_mng_close(sh);
+		sh->mtrmng = NULL;
+	}
+}
+
+/**
+ * Initialize the ASO management structures (Age, Meter and Connection Tracing).
+ *
+ * @param[out] sh
+ *   Pointer to mlx5_dev_ctx_shared object.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_aso_init(struct mlx5_dev_ctx_shared *sh)
+{
+	int err;
+
+	if (!sh->cdev->config.devx)
+		return 0;
+	if (sh->dev_cap.mtr_en) {
+		err = mlx5_flow_aso_mtrs_mng_init(sh);
+		if (err)
+			goto error;
+	}
+	if (sh->dev_cap.flow_hit_aso_en) {
+		err = mlx5_flow_aso_age_mng_init(sh);
+		if (err)
+			goto error;
+	}
+	if (sh->dev_cap.ct_aso_en) {
+		err = mlx5_flow_aso_ct_mng_init(sh);
+		if (err)
+			goto error;
+	}
+	return 0;
+error:
+	mlx5_flow_aso_close(sh);
+	return err;
+}
+
 /**
  * Initialize the flow resources' indexed mempool.
  *
  * @param[in] sh
  *   Pointer to mlx5_dev_ctx_shared object.
- * @param[in] config
- *   Pointer to user dev config.
  */
 static void
-mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh,
-		       const struct mlx5_dev_config *config)
+mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh)
 {
 	uint8_t i;
 	struct mlx5_indexed_pool_config cfg;
@@ -856,12 +916,12 @@  mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh,
 		 * according to PCI function flow configuration.
 		 */
 		case MLX5_IPOOL_MLX5_FLOW:
-			cfg.size = config->dv_flow_en ?
+			cfg.size = sh->config.dv_flow_en ?
 				sizeof(struct mlx5_flow_handle) :
 				MLX5_FLOW_HANDLE_VERBS_SIZE;
 			break;
 		}
-		if (config->reclaim_mode) {
+		if (sh->config.reclaim_mode) {
 			cfg.release_mem_en = 1;
 			cfg.per_core_cache = 0;
 		} else {
@@ -1174,14 +1234,11 @@  mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh)
  *
  * @param sh
  *   Pointer to mlx5_dev_ctx_shared object.
- * @param config
- *   Device configuration parameters.
  * @param hca_attr
  *   Pointer to DevX HCA capabilities structure.
  */
 void
 mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
-			 struct mlx5_dev_config *config,
 			 struct mlx5_hca_attr *hca_attr)
 {
 	uint32_t dw_cnt = MLX5_ST_SZ_DW(register_mtutc);
@@ -1198,11 +1255,11 @@  mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
 		/* MTUTC register is read successfully. */
 		ts_mode = MLX5_GET(register_mtutc, reg, time_stamp_mode);
 		if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
-			config->rt_timestamp = 1;
+			sh->dev_cap.rt_timestamp = 1;
 	} else {
 		/* Kernel does not support register reading. */
 		if (hca_attr->dev_freq_khz == (NS_PER_S / MS_PER_S))
-			config->rt_timestamp = 1;
+			sh->dev_cap.rt_timestamp = 1;
 	}
 }
 
@@ -1219,16 +1276,13 @@  mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
  *
  * @param[in] spawn
  *   Pointer to the device attributes (name, port, etc).
- * @param[in] config
- *   Pointer to device configuration structure.
  *
  * @return
  *   Pointer to mlx5_dev_ctx_shared object on success,
  *   otherwise NULL and rte_errno is set.
  */
 struct mlx5_dev_ctx_shared *
-mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
-			  const struct mlx5_dev_config *config)
+mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn)
 {
 	struct mlx5_dev_ctx_shared *sh;
 	int err = 0;
@@ -1259,6 +1313,7 @@  mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 	pthread_mutex_init(&sh->txpp.mutex, NULL);
 	sh->numa_node = spawn->cdev->dev->numa_node;
 	sh->cdev = spawn->cdev;
+	sh->esw_mode = !!(spawn->info.master || spawn->info.representor);
 	if (spawn->bond_info)
 		sh->bond = *spawn->bond_info;
 	err = mlx5_os_get_dev_attr(sh->cdev, &sh->device_attr);
@@ -1266,9 +1321,16 @@  mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 		DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
 		goto error;
 	}
+	err = mlx5_os_shared_dev_ctx_args_config(sh, sh->cdev->dev->devargs,
+						 &sh->config);
+	if (err) {
+		DRV_LOG(ERR, "Failed to process device configure: %s",
+			strerror(rte_errno));
+		goto error;
+	}
+	mlx5_os_cap_config(sh);
 	sh->refcnt = 1;
 	sh->max_port = spawn->max_port;
-	sh->reclaim_mode = config->reclaim_mode;
 	strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->cdev->ctx),
 		sizeof(sh->ibdev_name) - 1);
 	strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx),
@@ -1296,6 +1358,9 @@  mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 		err = mlx5_rxtx_uars_prepare(sh);
 		if (err)
 			goto error;
+		err = mlx5_flow_aso_init(sh);
+		if (err)
+			goto error;
 #ifndef RTE_ARCH_64
 	} else {
 		/* Initialize UAR access locks for 32bit implementations. */
@@ -1312,7 +1377,7 @@  mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 	}
 	mlx5_flow_aging_init(sh);
 	mlx5_flow_counters_mng_init(sh);
-	mlx5_flow_ipool_create(sh, config);
+	mlx5_flow_ipool_create(sh);
 	/* Add context to the global device list. */
 	LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
 	rte_spinlock_init(&sh->geneve_tlv_opt_sl);
@@ -1324,6 +1389,7 @@  mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 	pthread_mutex_destroy(&sh->txpp.mutex);
 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
 	MLX5_ASSERT(sh);
+	mlx5_flow_aso_close(sh);
 	mlx5_rxtx_uars_release(sh);
 	i = 0;
 	do {
@@ -1395,16 +1461,9 @@  mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 	 *  Only primary process handles async device events.
 	 **/
 	mlx5_flow_counters_mng_close(sh);
-	if (sh->ct_mng)
-		mlx5_flow_aso_ct_mng_close(sh);
-	if (sh->aso_age_mng) {
-		mlx5_flow_aso_age_mng_close(sh);
-		sh->aso_age_mng = NULL;
-	}
-	if (sh->mtrmng)
-		mlx5_aso_flow_mtrs_mng_close(sh);
 	mlx5_flow_ipool_destroy(sh);
 	mlx5_os_dev_shared_handler_uninstall(sh);
+	mlx5_flow_aso_close(sh);
 	mlx5_rxtx_uars_release(sh);
 	do {
 		if (sh->tis[i])
@@ -1675,7 +1734,7 @@  mlx5_dev_close(struct rte_eth_dev *dev)
 		mlx5_free(priv->rss_conf.rss_key);
 	if (priv->reta_idx != NULL)
 		mlx5_free(priv->reta_idx);
-	if (priv->config.vf)
+	if (priv->sh->dev_cap.vf)
 		mlx5_os_mac_addr_flush(dev);
 	if (priv->nl_socket_route >= 0)
 		close(priv->nl_socket_route);
@@ -1904,6 +1963,89 @@  const struct eth_dev_ops mlx5_dev_ops_isolate = {
 	.get_monitor_addr = mlx5_get_monitor_addr,
 };
 
+/**
+ * Verify and store value for share device argument.
+ *
+ * @param[in] key
+ *   Key argument to verify.
+ * @param[in] val
+ *   Value associated with key.
+ * @param opaque
+ *   User data.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_dev_args_check_handler(const char *key, const char *val, void *opaque)
+{
+	struct mlx5_sh_config *config = opaque;
+	signed long tmp;
+
+	errno = 0;
+	tmp = strtol(val, NULL, 0);
+	if (errno) {
+		rte_errno = errno;
+		DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
+		return -rte_errno;
+	}
+	if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) {
+		/* Negative values are acceptable for some keys only. */
+		rte_errno = EINVAL;
+		DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val);
+		return -rte_errno;
+	}
+	if (strcmp(MLX5_TX_PP, key) == 0) {
+		unsigned long mod = tmp >= 0 ? tmp : -tmp;
+
+		if (!mod) {
+			DRV_LOG(ERR, "Zero Tx packet pacing parameter.");
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+		config->tx_pp = tmp;
+	} else if (strcmp(MLX5_TX_SKEW, key) == 0) {
+		config->tx_skew = tmp;
+	} else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
+		config->l3_vxlan_en = !!tmp;
+	} else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
+		config->vf_nl_en = !!tmp;
+	} else if (strcmp(MLX5_DV_ESW_EN, key) == 0) {
+		config->dv_esw_en = !!tmp;
+	} else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
+		config->dv_flow_en = !!tmp;
+	} else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) {
+		if (tmp != MLX5_XMETA_MODE_LEGACY &&
+		    tmp != MLX5_XMETA_MODE_META16 &&
+		    tmp != MLX5_XMETA_MODE_META32 &&
+		    tmp != MLX5_XMETA_MODE_MISS_INFO) {
+			DRV_LOG(ERR, "Invalid extensive metadata parameter.");
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+		if (tmp != MLX5_XMETA_MODE_MISS_INFO)
+			config->dv_xmeta_en = tmp;
+		else
+			config->dv_miss_info = 1;
+	} else if (strcmp(MLX5_LACP_BY_USER, key) == 0) {
+		config->lacp_by_user = !!tmp;
+	} else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) {
+		if (tmp != MLX5_RCM_NONE &&
+		    tmp != MLX5_RCM_LIGHT &&
+		    tmp != MLX5_RCM_AGGR) {
+			DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val);
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+		config->reclaim_mode = tmp;
+	} else if (strcmp(MLX5_DECAP_EN, key) == 0) {
+		config->decap_en = !!tmp;
+	} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
+		config->allow_duplicate_pattern = !!tmp;
+	}
+	return 0;
+}
+
 /**
  * Verify and store value for device argument.
  *
@@ -1917,18 +2059,22 @@  const struct eth_dev_ops mlx5_dev_ops_isolate = {
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-mlx5_args_check(const char *key, const char *val, void *opaque)
+int
+mlx5_port_args_check_handler(const char *key, const char *val, void *opaque)
 {
-	struct mlx5_dev_config *config = opaque;
-	unsigned long mod;
+	struct mlx5_port_config *config = opaque;
 	signed long tmp;
 
 	/* No-op, port representors are processed in mlx5_dev_spawn(). */
 	if (!strcmp(MLX5_DRIVER_KEY, key) || !strcmp(MLX5_REPRESENTOR, key) ||
 	    !strcmp(MLX5_SYS_MEM_EN, key) || !strcmp(MLX5_TX_DB_NC, key) ||
-	    !strcmp(MLX5_MR_MEMPOOL_REG_EN, key) ||
-	    !strcmp(MLX5_MR_EXT_MEMSEG_EN, key))
+	    !strcmp(MLX5_MR_MEMPOOL_REG_EN, key) || !strcmp(MLX5_TX_PP, key) ||
+	    !strcmp(MLX5_MR_EXT_MEMSEG_EN, key) || !strcmp(MLX5_TX_SKEW, key) ||
+	    !strcmp(MLX5_RECLAIM_MEM, key) || !strcmp(MLX5_DECAP_EN, key) ||
+	    !strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) ||
+	    !strcmp(MLX5_L3_VXLAN_EN, key) || !strcmp(MLX5_VF_NL_EN, key) ||
+	    !strcmp(MLX5_DV_ESW_EN, key) || !strcmp(MLX5_DV_FLOW_EN, key) ||
+	    !strcmp(MLX5_DV_XMETA_EN, key) || !strcmp(MLX5_LACP_BY_USER, key))
 		return 0;
 	errno = 0;
 	tmp = strtol(val, NULL, 0);
@@ -1937,13 +2083,12 @@  mlx5_args_check(const char *key, const char *val, void *opaque)
 		DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
 		return -rte_errno;
 	}
-	if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) {
+	if (tmp < 0) {
 		/* Negative values are acceptable for some keys only. */
 		rte_errno = EINVAL;
 		DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val);
 		return -rte_errno;
 	}
-	mod = tmp >= 0 ? tmp : -tmp;
 	if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
 		if (tmp > MLX5_CQE_RESP_FORMAT_L34H_STRIDX) {
 			DRV_LOG(ERR, "invalid CQE compression "
@@ -1989,41 +2134,8 @@  mlx5_args_check(const char *key, const char *val, void *opaque)
 		config->txq_inline_mpw = tmp;
 	} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
 		DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
-	} else if (strcmp(MLX5_TX_PP, key) == 0) {
-		if (!mod) {
-			DRV_LOG(ERR, "Zero Tx packet pacing parameter");
-			rte_errno = EINVAL;
-			return -rte_errno;
-		}
-		config->tx_pp = tmp;
-	} else if (strcmp(MLX5_TX_SKEW, key) == 0) {
-		config->tx_skew = tmp;
 	} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
 		config->rx_vec_en = !!tmp;
-	} else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
-		config->l3_vxlan_en = !!tmp;
-	} else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
-		config->vf_nl_en = !!tmp;
-	} else if (strcmp(MLX5_DV_ESW_EN, key) == 0) {
-		config->dv_esw_en = !!tmp;
-	} else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
-		config->dv_flow_en = !!tmp;
-	} else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) {
-		if (tmp != MLX5_XMETA_MODE_LEGACY &&
-		    tmp != MLX5_XMETA_MODE_META16 &&
-		    tmp != MLX5_XMETA_MODE_META32 &&
-		    tmp != MLX5_XMETA_MODE_MISS_INFO) {
-			DRV_LOG(ERR, "invalid extensive "
-				     "metadata parameter");
-			rte_errno = EINVAL;
-			return -rte_errno;
-		}
-		if (tmp != MLX5_XMETA_MODE_MISS_INFO)
-			config->dv_xmeta_en = tmp;
-		else
-			config->dv_miss_info = 1;
-	} else if (strcmp(MLX5_LACP_BY_USER, key) == 0) {
-		config->lacp_by_user = !!tmp;
 	} else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
 		config->max_dump_files_num = tmp;
 	} else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
@@ -2032,19 +2144,6 @@  mlx5_args_check(const char *key, const char *val, void *opaque)
 		DRV_LOG(DEBUG, "class argument is %s.", val);
 	} else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) {
 		config->log_hp_size = tmp;
-	} else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) {
-		if (tmp != MLX5_RCM_NONE &&
-		    tmp != MLX5_RCM_LIGHT &&
-		    tmp != MLX5_RCM_AGGR) {
-			DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val);
-			rte_errno = EINVAL;
-			return -rte_errno;
-		}
-		config->reclaim_mode = tmp;
-	} else if (strcmp(MLX5_DECAP_EN, key) == 0) {
-		config->decap_en = !!tmp;
-	} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
-		config->allow_duplicate_pattern = !!tmp;
 	} else if (strcmp(MLX5_DELAY_DROP, key) == 0) {
 		config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD);
 		config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN);
@@ -2056,41 +2155,6 @@  mlx5_args_check(const char *key, const char *val, void *opaque)
 	return 0;
 }
 
-/**
- * Parse device parameters.
- *
- * @param config
- *   Pointer to device configuration structure.
- * @param devargs
- *   Device arguments structure.
- *
- * @return
- *   0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
-{
-	struct rte_kvargs *kvlist;
-	int ret = 0;
-
-	if (devargs == NULL)
-		return 0;
-	/* Following UGLY cast is done to pass checkpatch. */
-	kvlist = rte_kvargs_parse(devargs->args, NULL);
-	if (kvlist == NULL) {
-		rte_errno = EINVAL;
-		return -rte_errno;
-	}
-	/* Process parameters. */
-	ret = rte_kvargs_process(kvlist, NULL, mlx5_args_check, config);
-	if (ret) {
-		rte_errno = EINVAL;
-		ret = -rte_errno;
-	}
-	rte_kvargs_free(kvlist);
-	return ret;
-}
-
 /**
  * Configures the minimal amount of data to inline into WQE
  * while sending packets.
@@ -2102,21 +2166,19 @@  mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
  * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx
  *   and none (0 bytes) for other NICs
  *
- * @param spawn
- *   Verbs device parameters (name, port, switch_info) to spawn.
- * @param config
- *   Device configuration parameters.
+ * @param priv
+ *   Pointer to the private device data structure.
  */
 void
-mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
-		    struct mlx5_dev_config *config)
+mlx5_set_min_inline(struct mlx5_priv *priv)
 {
-	struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
+	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
+	struct mlx5_port_config *config = &priv->config;
 
 	if (config->txq_inline_min != MLX5_ARG_UNSET) {
 		/* Application defines size of inlined data explicitly. */
-		if (spawn->pci_dev != NULL) {
-			switch (spawn->pci_dev->id.device_id) {
+		if (priv->pci_dev != NULL) {
+			switch (priv->pci_dev->id.device_id) {
 			case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
 			case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
 				if (config->txq_inline_min <
@@ -2182,7 +2244,7 @@  mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
 			}
 		}
 	}
-	if (spawn->pci_dev == NULL) {
+	if (priv->pci_dev == NULL) {
 		config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
 		goto exit;
 	}
@@ -2191,7 +2253,7 @@  mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
 	 * inline data size with DevX. Try PCI ID
 	 * to determine old NICs.
 	 */
-	switch (spawn->pci_dev->id.device_id) {
+	switch (priv->pci_dev->id.device_id) {
 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
 	case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
@@ -2233,7 +2295,7 @@  mlx5_set_metadata_mask(struct rte_eth_dev *dev)
 	uint32_t meta, mark, reg_c0;
 
 	reg_c0 = ~priv->vport_meta_mask;
-	switch (priv->config.dv_xmeta_en) {
+	switch (sh->config.dv_xmeta_en) {
 	case MLX5_XMETA_MODE_LEGACY:
 		meta = UINT32_MAX;
 		mark = MLX5_FLOW_MARK_MASK;
@@ -2267,7 +2329,7 @@  mlx5_set_metadata_mask(struct rte_eth_dev *dev)
 				 sh->dv_meta_mask, reg_c0);
 	else
 		sh->dv_regc0_mask = reg_c0;
-	DRV_LOG(DEBUG, "metadata mode %u", priv->config.dv_xmeta_en);
+	DRV_LOG(DEBUG, "metadata mode %u", sh->config.dv_xmeta_en);
 	DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask);
 	DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask);
 	DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask);
@@ -2294,58 +2356,120 @@  rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n)
 }
 
 /**
- * Check sibling device configurations.
+ * Check sibling device configurations when probing again.
  *
- * Sibling devices sharing the Infiniband device context should have compatible
+ * Sibling devices sharing infiniband device context should have compatible
  * configurations. This regards representors and bonding slaves.
  *
- * @param sh
- *   Shared device context.
- * @param config
- *   Configuration of the device is going to be created.
- * @param dpdk_dev
- *   Backing DPDK device.
+ * @param cdev
+ *   Pointer to mlx5 device structure.
  *
  * @return
- *   0 on success, EINVAL otherwise
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_dev_check_sibling_config(struct mlx5_dev_ctx_shared *sh,
-			      struct mlx5_dev_config *config,
-			      struct rte_device *dpdk_dev)
+mlx5_probe_again_args_validate(struct mlx5_common_device *cdev)
 {
-	struct mlx5_dev_config *sh_conf = NULL;
-	uint16_t port_id;
+	struct mlx5_dev_ctx_shared *sh = NULL;
+	struct mlx5_sh_config *config;
+	int ret;
 
-	MLX5_ASSERT(sh);
-	/* Nothing to compare for the single/first device. */
-	if (sh->refcnt == 1)
+	/* Secondary process should not handle devargs. */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
-	/* Find the device with shared context. */
-	MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
-		struct mlx5_priv *opriv =
-			rte_eth_devices[port_id].data->dev_private;
-
-		if (opriv && opriv->sh == sh) {
-			sh_conf = &opriv->config;
+	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
+	/* Search for IB context by device name. */
+	LIST_FOREACH(sh, &mlx5_dev_ctx_list, next)
+		if (sh->cdev == cdev)
 			break;
-		}
-	}
-	if (!sh_conf)
+	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+	/* There is sh for this device -> it isn't probe again. */
+	if (sh == NULL)
 		return 0;
-	if (sh_conf->dv_flow_en ^ config->dv_flow_en) {
-		DRV_LOG(ERR, "\"dv_flow_en\" configuration mismatch"
-			     " for shared %s context", sh->ibdev_name);
-		rte_errno = EINVAL;
-		return rte_errno;
+	config = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
+			     sizeof(struct mlx5_sh_config),
+			     RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+	if (config == NULL) {
+		rte_errno = -ENOMEM;
+		return -rte_errno;
 	}
-	if (sh_conf->dv_xmeta_en ^ config->dv_xmeta_en) {
-		DRV_LOG(ERR, "\"dv_xmeta_en\" configuration mismatch"
-			     " for shared %s context", sh->ibdev_name);
-		rte_errno = EINVAL;
-		return rte_errno;
+	ret = mlx5_os_shared_dev_ctx_args_config(sh, sh->cdev->dev->devargs,
+						 config);
+	if (ret) {
+		DRV_LOG(ERR, "Failed to process device configure: %s",
+			strerror(rte_errno));
+		mlx5_free(config);
+		return ret;
+	}
+	/* Check sibling device configurations. */
+	if (sh->config.dv_flow_en ^ config->dv_flow_en) {
+		DRV_LOG(ERR, "\"dv_flow_en\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if ((sh->config.dv_xmeta_en ^ config->dv_xmeta_en) ||
+	    (sh->config.dv_miss_info ^ config->dv_miss_info)) {
+		DRV_LOG(ERR, "\"dv_xmeta_en\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.dv_esw_en ^ config->dv_esw_en) {
+		DRV_LOG(ERR, "\"dv_esw_en\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
 	}
+	if (sh->config.reclaim_mode ^ config->reclaim_mode) {
+		DRV_LOG(ERR, "\"reclaim_mode\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.allow_duplicate_pattern ^
+	    config->allow_duplicate_pattern) {
+		DRV_LOG(ERR, "\"allow_duplicate_pattern\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.l3_vxlan_en ^ config->l3_vxlan_en) {
+		DRV_LOG(ERR, "\"l3_vxlan_en\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.decap_en ^ config->decap_en) {
+		DRV_LOG(ERR, "\"decap_en\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.lacp_by_user ^ config->lacp_by_user) {
+		DRV_LOG(ERR, "\"lacp_by_user\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.tx_pp ^ config->tx_pp) {
+		DRV_LOG(ERR, "\"tx_pp\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.tx_skew ^ config->tx_skew) {
+		DRV_LOG(ERR, "\"tx_skew\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	mlx5_free(config);
 	return 0;
+error:
+	mlx5_free(config);
+	rte_errno = EINVAL;
+	return -rte_errno;
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 874ac36071..ed4c71aca9 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -234,74 +234,95 @@  struct mlx5_lro_config {
 };
 
 /*
- * Device configuration structure.
- *
- * Merged configuration from:
- *
- *  - Device capabilities,
- *  - User device parameters disabled features.
+ * Port configuration structure.
+ * User device parameters disabled features.
+ * TODO: complete it...
  */
-struct mlx5_dev_config {
-	unsigned int hw_csum:1; /* Checksum offload is supported. */
-	unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
+struct mlx5_port_config {
 	unsigned int hw_vlan_insert:1; /* VLAN insertion in WQE is supported. */
-	unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
 	unsigned int hw_padding:1; /* End alignment padding is supported. */
-	unsigned int vf:1; /* This is a VF. */
-	unsigned int sf:1; /* This is a SF. */
-	unsigned int tunnel_en:3;
-	/* Whether tunnel stateless offloads are supported. */
-	unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
 	unsigned int cqe_comp:1; /* CQE compression is enabled. */
 	unsigned int cqe_comp_fmt:3; /* CQE compression format. */
-	unsigned int tso:1; /* Whether TSO is supported. */
 	unsigned int rx_vec_en:1; /* Rx vector is enabled. */
-	unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */
-	unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */
-	unsigned int dv_esw_en:1; /* Enable E-Switch DV flow. */
-	unsigned int dv_flow_en:1; /* Enable DV flow. */
-	unsigned int dv_xmeta_en:2; /* Enable extensive flow metadata. */
-	unsigned int lacp_by_user:1;
-	/* Enable user to manage LACP traffic. */
-	unsigned int swp:3; /* Tx generic tunnel checksum and TSO offload. */
-	unsigned int dest_tir:1; /* Whether advanced DR API is available. */
-	unsigned int reclaim_mode:2; /* Memory reclaim mode. */
-	unsigned int rt_timestamp:1; /* realtime timestamp format. */
-	unsigned int decap_en:1; /* Whether decap will be used or not. */
-	unsigned int dv_miss_info:1; /* restore packet after partial hw miss */
-	unsigned int allow_duplicate_pattern:1;
-	/* Allow/Prevent the duplicate rules pattern. */
 	unsigned int std_delay_drop:1; /* Enable standard Rxq delay drop. */
 	unsigned int hp_delay_drop:1; /* Enable hairpin Rxq delay drop. */
 	struct {
 		unsigned int enabled:1; /* Whether MPRQ is enabled. */
 		unsigned int log_stride_num; /* Log number of strides. */
 		unsigned int log_stride_size; /* Log size of a stride. */
-		unsigned int log_min_stride_size; /* Log min size of a stride.*/
-		unsigned int log_max_stride_size; /* Log max size of a stride.*/
-		unsigned int log_min_stride_num; /* Log min num of strides. */
-		unsigned int log_max_stride_num; /* Log max num of strides. */
-		unsigned int log_min_stride_wqe_size;
-		/* Log min WQE size, (size of single stride)*(num of strides).*/
 		unsigned int max_memcpy_len;
 		/* Maximum packet size to memcpy Rx packets. */
 		unsigned int min_rxqs_num;
 		/* Rx queue count threshold to enable MPRQ. */
 	} mprq; /* Configurations for Multi-Packet RQ. */
 	int mps; /* Multi-packet send supported mode. */
-	unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
-	unsigned int ind_table_max_size; /* Maximum indirection table size. */
 	unsigned int max_dump_files_num; /* Maximum dump files per queue. */
 	unsigned int log_hp_size; /* Single hairpin queue data size in total. */
 	int txqs_inline; /* Queue number threshold for inlining. */
 	int txq_inline_min; /* Minimal amount of data bytes to inline. */
 	int txq_inline_max; /* Max packet size for inlining with SEND. */
 	int txq_inline_mpw; /* Max packet size for inlining with eMPW. */
+	struct mlx5_lro_config lro; /* LRO configuration. */
+};
+
+/*
+ * Share device configuration structure.
+ * User device parameters disabled features.
+ * TODO: complete it...
+ */
+struct mlx5_sh_config {
 	int tx_pp; /* Timestamp scheduling granularity in nanoseconds. */
 	int tx_skew; /* Tx scheduling skew between WQE and data on wire. */
-	struct mlx5_lro_config lro; /* LRO configuration. */
+	uint32_t reclaim_mode:2; /* Memory reclaim mode. */
+	uint32_t dv_esw_en:1; /* Enable E-Switch DV flow. */
+	uint32_t dv_flow_en:1; /* Enable DV flow. */
+	uint32_t dv_xmeta_en:2; /* Enable extensive flow metadata. */
+	uint32_t dv_miss_info:1; /* restore packet after partial hw miss. */
+	uint32_t l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */
+	uint32_t vf_nl_en:1; /* Enable Netlink requests in VF mode. */
+	uint32_t lacp_by_user:1; /* Enable user to manage LACP traffic. */
+	uint32_t decap_en:1; /* Whether decap will be used or not. */
+	uint32_t allow_duplicate_pattern:1;
+	/* Allow/Prevent the duplicate rules pattern. */
 };
 
+/* Device capabilities structure. */
+struct mlx5_cap {
+	uint32_t vf:1; /* This is a VF. */
+	uint32_t sf:1; /* This is a SF. */
+	uint32_t hw_csum:1; /* Checksum offload is supported. */
+	uint32_t hw_vlan_strip:1; /* VLAN stripping is supported. */
+	uint32_t hw_fcs_strip:1; /* FCS stripping is supported. */
+	uint32_t flow_hit_aso_en:1; /* Flow Hit ASO is supported. */
+	uint32_t meter_aso_en:1; /* Flow Meter ASO is supported. */
+	uint32_t ct_aso_en:1; /* Connection Tracking ASO is supported. */
+	uint32_t tunnel_header_0_1:1; /* tunnel_header_0_1 is supported. */
+	uint32_t tunnel_en:3;
+	/* Whether tunnel stateless offloads are supported. */
+	uint32_t steering_format_version:4;
+	/* Indicates the device steering logic format. */
+	uint32_t devx_obj_ops_en:1;
+	/* Whether HW objects operations are created by DevX. */
+	uint32_t tso:1; /* Whether TSO is supported. */
+	uint32_t swp:3; /* Tx generic tunnel checksum and TSO offload. */
+	uint32_t rt_timestamp:1; /* Realtime timestamp format. */
+	uint32_t mpls_en:1; /* MPLS over GRE/UDP is enabled. */
+	uint32_t sampler_en:1; /* Whether support sampler. */
+	uint32_t mtr_en:1; /* Whether support meter. */
+	uint32_t mtr_reg_share:1; /* Whether support meter REG_C share. */
+	uint8_t mtr_color_reg; /* Meter color match REG_C. */
+	struct {
+		uint32_t enabled:1; /* Whether MPRQ is enabled. */
+		uint32_t log_min_stride_size; /* Log min size of a stride. */
+		uint32_t log_max_stride_size; /* Log max size of a stride. */
+		uint32_t log_min_stride_num; /* Log min num of strides. */
+		uint32_t log_max_stride_num; /* Log max num of strides. */
+		uint32_t log_min_stride_wqe_size;
+		/* Log min WQE size, (size of single stride)*(num of strides).*/
+	} mprq; /* Capability for Multi-Packet RQ. */
+	uint32_t tso_max_payload_sz; /* Maximum TCP payload for TSO. */
+	uint32_t ind_table_max_size; /* Maximum indirection table size. */
+};
 
 /* Structure for VF VLAN workaround. */
 struct mlx5_vf_vlan {
@@ -1146,14 +1167,8 @@  struct mlx5_flex_item {
 struct mlx5_dev_ctx_shared {
 	LIST_ENTRY(mlx5_dev_ctx_shared) next;
 	uint32_t refcnt;
-	uint32_t flow_hit_aso_en:1; /* Flow Hit ASO is supported. */
-	uint32_t steering_format_version:4;
-	/* Indicates the device steering logic format. */
-	uint32_t meter_aso_en:1; /* Flow Meter ASO is supported. */
-	uint32_t ct_aso_en:1; /* Connection Tracking ASO is supported. */
-	uint32_t tunnel_header_0_1:1; /* tunnel_header_0_1 is supported. */
+	uint32_t esw_mode:1; /* Whether is E-Switch mode. */
 	uint32_t misc5_cap:1; /* misc5 matcher parameter is supported. */
-	uint32_t reclaim_mode:1; /* Reclaim memory. */
 	uint32_t dr_drop_action_en:1; /* Use DR drop action. */
 	uint32_t drop_action_check_flag:1; /* Check Flag for drop action. */
 	uint32_t flow_priority_check_flag:1; /* Check Flag for flow priority. */
@@ -1165,6 +1180,8 @@  struct mlx5_dev_ctx_shared {
 	char ibdev_name[MLX5_FS_NAME_MAX]; /* SYSFS dev name. */
 	char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
 	struct mlx5_dev_attr device_attr; /* Device properties. */
+	struct mlx5_cap dev_cap;  /* Device capabilities. */
+	struct mlx5_sh_config config; /* Device configuration. */
 	int numa_node; /* Numa node of backing physical device. */
 	/* Packet pacing related structure. */
 	struct mlx5_dev_txpp txpp;
@@ -1407,9 +1424,6 @@  struct mlx5_priv {
 	unsigned int representor:1; /* Device is a port representor. */
 	unsigned int master:1; /* Device is a E-Switch master. */
 	unsigned int txpp_en:1; /* Tx packet pacing enabled. */
-	unsigned int sampler_en:1; /* Whether support sampler. */
-	unsigned int mtr_en:1; /* Whether support meter. */
-	unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
 	unsigned int lb_used:1; /* Loopback queue is referred to. */
 	uint16_t domain_id; /* Switch domain identifier. */
 	uint16_t vport_id; /* Associated VF vport index (if any). */
@@ -1450,15 +1464,13 @@  struct mlx5_priv {
 	uint32_t link_speed_capa; /* Link speed capabilities. */
 	struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
 	struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */
-	struct mlx5_dev_config config; /* Device configuration. */
+	struct mlx5_port_config config; /* Port configuration. */
 	/* Context for Verbs allocator. */
 	int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */
 	int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */
 	struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */
 	struct mlx5_hlist *mreg_cp_tbl;
 	/* Hash table of Rx metadata register copy table. */
-	uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */
-	uint8_t mtr_color_reg; /* Meter color match REG_C. */
 	struct mlx5_legacy_flow_meters flow_meters; /* MTR list. */
 	struct mlx5_l3t_tbl *mtr_profile_tbl; /* Meter index lookup table. */
 	struct mlx5_l3t_tbl *policy_idx_tbl; /* Policy index lookup table. */
@@ -1515,23 +1527,20 @@  void mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh);
 	for (port_id = mlx5_eth_find_next(0, dev); \
 	     port_id < RTE_MAX_ETHPORTS; \
 	     port_id = mlx5_eth_find_next(port_id + 1, dev))
-int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs);
 void mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
-			      struct mlx5_dev_config *config,
 			      struct mlx5_hca_attr *hca_attr);
 struct mlx5_dev_ctx_shared *
-mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
-			   const struct mlx5_dev_config *config);
+mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn);
 void mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh);
 int mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev);
 void mlx5_free_table_hash_list(struct mlx5_priv *priv);
 int mlx5_alloc_table_hash_list(struct mlx5_priv *priv);
-void mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
-			 struct mlx5_dev_config *config);
+void mlx5_set_min_inline(struct mlx5_priv *priv);
 void mlx5_set_metadata_mask(struct rte_eth_dev *dev);
-int mlx5_dev_check_sibling_config(struct mlx5_dev_ctx_shared *sh,
-				  struct mlx5_dev_config *config,
-				  struct rte_device *dpdk_dev);
+int mlx5_port_args_check_handler(const char *key, const char *val,
+				 void *opaque);
+int mlx5_dev_args_check_handler(const char *key, const char *val, void *opaque);
+int mlx5_probe_again_args_validate(struct mlx5_common_device *cdev);
 int mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
 int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size);
 int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
@@ -1540,9 +1549,6 @@  int mlx5_hairpin_cap_get(struct rte_eth_dev *dev,
 bool mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev);
 int mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev);
 void mlx5_flow_counter_mode_config(struct rte_eth_dev *dev);
-int mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh);
-int mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh);
-int mlx5_flow_aso_ct_mng_init(struct mlx5_dev_ctx_shared *sh);
 
 /* mlx5_ethdev.c */
 
@@ -1799,6 +1805,10 @@  void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);
 struct rte_pci_driver;
 int mlx5_os_get_dev_attr(struct mlx5_common_device *dev,
 			 struct mlx5_dev_attr *dev_attr);
+int mlx5_os_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,
+				       struct rte_devargs *devargs,
+				       struct mlx5_sh_config *config);
+void mlx5_os_cap_config(struct mlx5_dev_ctx_shared *sh);
 void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
 int mlx5_os_net_probe(struct mlx5_common_device *cdev);
 void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 97c8925044..478acec470 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -571,7 +571,7 @@  mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
 		rte_errno = ENOMEM;
 		return NULL;
 	}
-	rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
+	rqt_attr->rqt_max_size = priv->sh->dev_cap.ind_table_max_size;
 	rqt_attr->rqt_actual_size = rqt_n;
 	if (queues == NULL) {
 		for (i = 0; i < rqt_n; i++)
@@ -1196,7 +1196,7 @@  mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
 		.flush_in_error_en = 1,
 		.allow_multi_pkt_send_wqe = !!priv->config.mps,
 		.min_wqe_inline_mode = cdev->config.hca_attr.vport_inline_mode,
-		.allow_swp = !!priv->config.swp,
+		.allow_swp = !!priv->sh->dev_cap.swp,
 		.cqn = txq_obj->cq_obj.cq->id,
 		.tis_lst_sz = 1,
 		.wq_attr = (struct mlx5_devx_wq_attr){
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 801c467bba..246bd15e43 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -121,7 +121,7 @@  mlx5_dev_configure(struct rte_eth_dev *dev)
 			dev->data->port_id, priv->txqs_n, txqs_n);
 		priv->txqs_n = txqs_n;
 	}
-	if (rxqs_n > priv->config.ind_table_max_size) {
+	if (rxqs_n > priv->sh->dev_cap.ind_table_max_size) {
 		DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
 			dev->data->port_id, rxqs_n);
 		rte_errno = EINVAL;
@@ -177,7 +177,7 @@  mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev)
 			rss_queue_arr[j++] = i;
 	}
 	rss_queue_n = j;
-	if (rss_queue_n > priv->config.ind_table_max_size) {
+	if (rss_queue_n > priv->sh->dev_cap.ind_table_max_size) {
 		DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
 			dev->data->port_id, rss_queue_n);
 		rte_errno = EINVAL;
@@ -193,8 +193,8 @@  mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev)
 	 * The result is always rounded to the next power of two.
 	 */
 	reta_idx_n = (1 << log2above((rss_queue_n & (rss_queue_n - 1)) ?
-				priv->config.ind_table_max_size :
-				rss_queue_n));
+				     priv->sh->dev_cap.ind_table_max_size :
+				     rss_queue_n));
 	ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
 	if (ret) {
 		mlx5_free(rss_queue_arr);
@@ -266,7 +266,7 @@  static void
 mlx5_set_txlimit_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_port_config *config = &priv->config;
 	unsigned int inlen;
 	uint16_t nb_max;
 
@@ -302,7 +302,6 @@  int
 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
 	unsigned int max;
 
 	/* FIXME: we should ask the device for these values. */
@@ -322,8 +321,8 @@  mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
 	info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
 	info->rx_seg_capa.max_nseg = MLX5_MAX_RXQ_NSEG;
-	info->rx_seg_capa.multi_pools = !config->mprq.enabled;
-	info->rx_seg_capa.offset_allowed = !config->mprq.enabled;
+	info->rx_seg_capa.multi_pools = !priv->config.mprq.enabled;
+	info->rx_seg_capa.offset_allowed = !priv->config.mprq.enabled;
 	info->rx_seg_capa.offset_align_log2 = 0;
 	info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
 				 info->rx_queue_offload_capa);
@@ -331,7 +330,7 @@  mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 	info->dev_capa = RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP;
 	info->if_index = mlx5_ifindex(dev);
 	info->reta_size = priv->reta_idx_n ?
-		priv->reta_idx_n : config->ind_table_max_size;
+		priv->reta_idx_n : priv->sh->dev_cap.ind_table_max_size;
 	info->hash_key_size = MLX5_RSS_HASH_KEY_LEN;
 	info->speed_capa = priv->link_speed_capa;
 	info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
@@ -672,7 +671,7 @@  mlx5_port_to_eswitch_info(uint16_t port, bool valid)
 	}
 	dev = &rte_eth_devices[port];
 	priv = dev->data->dev_private;
-	if (!(priv->representor || priv->master)) {
+	if (!priv->sh->esw_mode) {
 		rte_errno = EINVAL;
 		return NULL;
 	}
@@ -699,7 +698,7 @@  mlx5_dev_to_eswitch_info(struct rte_eth_dev *dev)
 	struct mlx5_priv *priv;
 
 	priv = dev->data->dev_private;
-	if (!(priv->representor || priv->master)) {
+	if (!priv->sh->esw_mode) {
 		rte_errno = EINVAL;
 		return NULL;
 	}
@@ -721,10 +720,8 @@  int
 mlx5_hairpin_cap_get(struct rte_eth_dev *dev, struct rte_eth_hairpin_cap *cap)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
 
-	if (!priv->sh->cdev->config.devx || !config->dest_tir ||
-	    !config->dv_flow_en) {
+	if (!priv->sh->dev_cap.devx_obj_ops_en) {
 		rte_errno = ENOTSUP;
 		return -rte_errno;
 	}
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index d15407e8f6..7058c40fc5 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -886,7 +886,8 @@  mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
 		     struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
+	uint8_t mtr_color_reg = priv->sh->dev_cap.mtr_color_reg;
 	enum modify_reg start_reg;
 	bool skip_mtr_reg = false;
 
@@ -932,24 +933,23 @@  mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
 		 * If meter color and meter id share one register, flow match
 		 * should use the meter color register for match.
 		 */
-		if (priv->mtr_reg_share)
-			return priv->mtr_color_reg;
+		if (priv->sh->dev_cap.mtr_reg_share)
+			return mtr_color_reg;
 		else
-			return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
-			       REG_C_3;
+			return mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
 	case MLX5_MTR_COLOR:
 	case MLX5_ASO_FLOW_HIT:
 	case MLX5_ASO_CONNTRACK:
 	case MLX5_SAMPLE_ID:
 		/* All features use the same REG_C. */
-		MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
-		return priv->mtr_color_reg;
+		MLX5_ASSERT(mtr_color_reg != REG_NON);
+		return mtr_color_reg;
 	case MLX5_COPY_MARK:
 		/*
 		 * Metadata COPY_MARK register using is in meter suffix sub
 		 * flow while with meter. It's safe to share the same register.
 		 */
-		return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
+		return mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
 	case MLX5_APP_TAG:
 		/*
 		 * If meter is enable, it will engage the register for color
@@ -958,9 +958,11 @@  mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
 		 * match.
 		 * If meter is disable, free to use all available registers.
 		 */
-		start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
-			    (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
-		skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
+		start_reg = mtr_color_reg != REG_C_2 ? REG_C_2 :
+			    (priv->sh->dev_cap.mtr_reg_share ? REG_C_3 :
+			     REG_C_4);
+		skip_mtr_reg = !!(priv->sh->dev_cap.mtr_en &&
+				  start_reg == REG_C_2);
 		if (id > (uint32_t)(REG_C_7 - start_reg))
 			return rte_flow_error_set(error, EINVAL,
 						  RTE_FLOW_ERROR_TYPE_ITEM,
@@ -976,7 +978,7 @@  mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
 		 * color register.
 		 */
 		if (skip_mtr_reg && priv->sh->flow_mreg_c
-		    [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
+				[id + start_reg - REG_C_0] >= mtr_color_reg) {
 			if (id >= (uint32_t)(REG_C_7 - start_reg))
 				return rte_flow_error_set(error, EINVAL,
 						       RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1254,8 +1256,8 @@  flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
 		 * this must be always enabled (metadata may arive
 		 * from other port - not from local flows only.
 		 */
-		if (priv->config.dv_flow_en &&
-		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+		if (priv->sh->config.dv_flow_en &&
+		    priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
 		    mlx5_flow_ext_mreg_supported(dev)) {
 			rxq_ctrl->rxq.mark = 1;
 			rxq_ctrl->flow_mark_n = 1;
@@ -1345,8 +1347,8 @@  flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
 		MLX5_ASSERT(rxq_ctrl != NULL);
 		if (rxq_ctrl == NULL)
 			continue;
-		if (priv->config.dv_flow_en &&
-		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+		if (priv->sh->config.dv_flow_en &&
+		    priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
 		    mlx5_flow_ext_mreg_supported(dev)) {
 			rxq_ctrl->rxq.mark = 1;
 			rxq_ctrl->flow_mark_n = 1;
@@ -1746,7 +1748,7 @@  mlx5_validate_action_rss(struct rte_eth_dev *dev,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 					  &rss->key_len,
 					  "RSS hash key too large");
-	if (rss->queue_num > priv->config.ind_table_max_size)
+	if (rss->queue_num > priv->sh->dev_cap.ind_table_max_size)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 					  &rss->queue_num,
@@ -1981,7 +1983,7 @@  mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
 					  "egress is not supported");
-	if (attributes->transfer && !priv->config.dv_esw_en)
+	if (attributes->transfer && !priv->sh->config.dv_esw_en)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
 					  NULL, "transfer is not supported");
@@ -2637,7 +2639,7 @@  mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
 	if (!mask)
 		mask = &rte_flow_item_vxlan_mask;
 
-	if (priv->sh->steering_format_version !=
+	if (priv->sh->dev_cap.steering_format_version !=
 	    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 ||
 	    !udp_dport || udp_dport == MLX5_UDP_PORT_VXLAN) {
 		/* FDB domain & NIC domain non-zero group */
@@ -2645,7 +2647,7 @@  mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
 			valid_mask = &nic_mask;
 		/* Group zero in NIC domain */
 		if (!attr->group && !attr->transfer &&
-		    priv->sh->tunnel_header_0_1)
+		    priv->sh->dev_cap.tunnel_header_0_1)
 			valid_mask = &nic_mask;
 	}
 	ret = mlx5_flow_item_acceptable
@@ -2698,7 +2700,7 @@  mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
 		uint8_t vni[4];
 	} id = { .vlan_id = 0, };
 
-	if (!priv->config.l3_vxlan_en)
+	if (!priv->sh->config.l3_vxlan_en)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L3 VXLAN is not enabled by device"
@@ -3125,7 +3127,7 @@  mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	int ret;
 
-	if (!priv->config.mpls_en)
+	if (!priv->sh->dev_cap.mpls_en)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "MPLS not supported or"
@@ -3416,11 +3418,11 @@  flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
 	if (type != MLX5_FLOW_TYPE_MAX)
 		return type;
 	/* If no OS specific type - continue with DV/VERBS selection */
-	if (attr->transfer && priv->config.dv_esw_en)
+	if (attr->transfer && priv->sh->config.dv_esw_en)
 		type = MLX5_FLOW_TYPE_DV;
 	if (!attr->transfer)
-		type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
-						 MLX5_FLOW_TYPE_VERBS;
+		type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
+						     MLX5_FLOW_TYPE_VERBS;
 	return type;
 }
 
@@ -3866,7 +3868,7 @@  flow_action_handles_translate(struct rte_eth_dev *dev,
 			translated[handle->index].conf = (void *)(uintptr_t)idx;
 			break;
 		case MLX5_INDIRECT_ACTION_TYPE_AGE:
-			if (priv->sh->flow_hit_aso_en) {
+			if (priv->sh->dev_cap.flow_hit_aso_en) {
 				translated[handle->index].type =
 					(enum rte_flow_action_type)
 					MLX5_RTE_FLOW_ACTION_TYPE_AGE;
@@ -3876,7 +3878,7 @@  flow_action_handles_translate(struct rte_eth_dev *dev,
 			}
 			/* Fall-through */
 		case MLX5_INDIRECT_ACTION_TYPE_CT:
-			if (priv->sh->ct_aso_en) {
+			if (priv->sh->dev_cap.ct_aso_en) {
 				translated[handle->index].type =
 					RTE_FLOW_ACTION_TYPE_CONNTRACK;
 				translated[handle->index].conf =
@@ -4092,7 +4094,7 @@  static bool flow_check_modify_action_type(struct rte_eth_dev *dev,
 		return true;
 	case RTE_FLOW_ACTION_TYPE_FLAG:
 	case RTE_FLOW_ACTION_TYPE_MARK:
-		if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
+		if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
 			return true;
 		else
 			return false;
@@ -4531,8 +4533,8 @@  flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
 	uint32_t mark_id;
 
 	/* Check whether extensive metadata feature is engaged. */
-	if (!priv->config.dv_flow_en ||
-	    priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+	if (!priv->sh->config.dv_flow_en ||
+	    priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
 	    !mlx5_flow_ext_mreg_supported(dev) ||
 	    !priv->sh->dv_regc0_mask)
 		return 0;
@@ -4591,7 +4593,7 @@  flow_mreg_update_copy_table(struct rte_eth_dev *dev,
 			    struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	struct mlx5_flow_mreg_copy_resource *mcp_res;
 	const struct rte_flow_action_mark *mark;
 
@@ -5009,8 +5011,9 @@  flow_meter_split_prep(struct rte_eth_dev *dev,
 	struct rte_flow_action *action_pre_head = NULL;
 	int32_t flow_src_port = priv->representor_id;
 	bool mtr_first;
-	uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
-	uint8_t mtr_reg_bits = priv->mtr_reg_share ?
+	uint8_t mtr_id_offset = priv->sh->dev_cap.mtr_reg_share ?
+				MLX5_MTR_COLOR_BITS : 0;
+	uint8_t mtr_reg_bits = priv->sh->dev_cap.mtr_reg_share ?
 				MLX5_MTR_IDLE_BITS_IN_COLOR_REG : MLX5_REG_BITS;
 	uint32_t flow_id = 0;
 	uint32_t flow_id_reversed = 0;
@@ -5060,7 +5063,7 @@  flow_meter_split_prep(struct rte_eth_dev *dev,
 	}
 	sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
 	sfx_items++;
-	mtr_first = priv->sh->meter_aso_en &&
+	mtr_first = priv->sh->dev_cap.meter_aso_en &&
 		(attr->egress || (attr->transfer && flow_src_port != UINT16_MAX));
 	/* For ASO meter, meter must be before tag in TX direction. */
 	if (mtr_first) {
@@ -5119,7 +5122,7 @@  flow_meter_split_prep(struct rte_eth_dev *dev,
 	}
 	/* Add end action to the actions. */
 	actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
-	if (priv->sh->meter_aso_en) {
+	if (priv->sh->dev_cap.meter_aso_en) {
 		/**
 		 * For ASO meter, need to add an extra jump action explicitly,
 		 * to jump from meter to policer table.
@@ -5724,7 +5727,7 @@  flow_create_split_metadata(struct rte_eth_dev *dev,
 			   struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	const struct rte_flow_action *qrss = NULL;
 	struct rte_flow_action *ext_actions = NULL;
 	struct mlx5_flow *dev_flow = NULL;
@@ -6012,7 +6015,7 @@  flow_create_split_meter(struct rte_eth_dev *dev,
 	int actions_n = 0;
 	int ret = 0;
 
-	if (priv->mtr_en)
+	if (priv->sh->dev_cap.mtr_en)
 		actions_n = flow_check_meter_action(dev, actions, &has_mtr,
 						    &has_modify, &meter_id);
 	if (has_mtr) {
@@ -6207,7 +6210,7 @@  flow_create_split_sample(struct rte_eth_dev *dev,
 	const uint32_t next_ft_step = 1;
 	int ret = 0;
 
-	if (priv->sampler_en)
+	if (priv->sh->dev_cap.sampler_en)
 		actions_n = flow_check_match_action(actions, attr,
 					RTE_FLOW_ACTION_TYPE_SAMPLE,
 					&sample_action_pos, &qrss_action_pos,
@@ -8231,7 +8234,7 @@  mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
 		struct rte_flow *flow;
 		struct rte_flow_error error;
 
-		if (!priv->config.dv_flow_en)
+		if (!priv->sh->config.dv_flow_en)
 			break;
 		/* Create internal flow, validation skips copy action. */
 		flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
@@ -8545,7 +8548,7 @@  mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
 	struct mlx5_flow_handle *dh;
 	struct rte_flow *flow;
 
-	if (!priv->config.dv_flow_en) {
+	if (!sh->config.dv_flow_en) {
 		if (fputs("device dv flow disabled\n", file) <= 0)
 			return -errno;
 		return -ENOTSUP;
@@ -9529,7 +9532,7 @@  mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	if (!priv->config.dv_flow_en)
+	if (!priv->sh->config.dv_flow_en)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "flow DV interface is off");
@@ -9948,7 +9951,7 @@  mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
 	type = mlx5_flow_os_get_type();
 	if (type == MLX5_FLOW_TYPE_MAX) {
 		type = MLX5_FLOW_TYPE_VERBS;
-		if (priv->sh->cdev->config.devx && priv->config.dv_flow_en)
+		if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en)
 			type = MLX5_FLOW_TYPE_DV;
 	}
 	fops = flow_get_drv_ops(type);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 1f54649c69..6cd758922f 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -964,7 +964,7 @@  is_tunnel_offload_active(const struct rte_eth_dev *dev)
 {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	const struct mlx5_priv *priv = dev->data->dev_private;
-	return !!priv->config.dv_miss_info;
+	return !!priv->sh->config.dv_miss_info;
 #else
 	RTE_SET_USED(dev);
 	return false;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index be48eb0b1b..e2912a7e36 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -1160,7 +1160,8 @@  flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
 		uint32_t reg_c0 = priv->sh->dv_regc0_mask;
 
 		MLX5_ASSERT(reg_c0);
-		MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
+		MLX5_ASSERT(priv->sh->config.dv_xmeta_en !=
+			    MLX5_XMETA_MODE_LEGACY);
 		if (conf->dst == REG_C_0) {
 			/* Copy to reg_c[0], within mask only. */
 			reg_dst.offset = rte_bsf32(reg_c0);
@@ -1931,7 +1932,7 @@  flow_dv_validate_item_mark(struct rte_eth_dev *dev,
 			   struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	const struct rte_flow_item_mark *spec = item->spec;
 	const struct rte_flow_item_mark *mask = item->mask;
 	const struct rte_flow_item_mark nic_mask = {
@@ -2005,7 +2006,7 @@  flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
 			   struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	const struct rte_flow_item_meta *spec = item->spec;
 	const struct rte_flow_item_meta *mask = item->mask;
 	struct rte_flow_item_meta nic_mask = {
@@ -2743,8 +2744,8 @@  flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
 	/* Pop VLAN is not supported in egress except for CX6 FDB mode. */
 	if (attr->transfer) {
 		bool fdb_tx = priv->representor_id != UINT16_MAX;
-		bool is_cx5 = sh->steering_format_version ==
-		    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
+		bool is_cx5 = sh->dev_cap.steering_format_version ==
+			      MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
 
 		if (fdb_tx && is_cx5)
 			direction_error = true;
@@ -2895,8 +2896,8 @@  flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
 	/* Push VLAN is not supported in ingress except for CX6 FDB mode. */
 	if (attr->transfer) {
 		bool fdb_tx = priv->representor_id != UINT16_MAX;
-		bool is_cx5 = sh->steering_format_version ==
-		    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
+		bool is_cx5 = sh->dev_cap.steering_format_version ==
+			      MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
 
 		if (!fdb_tx && is_cx5)
 			direction_error = true;
@@ -3055,7 +3056,7 @@  flow_dv_validate_action_flag(struct rte_eth_dev *dev,
 			     struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	int ret;
 
 	/* Fall back if no extended metadata register support. */
@@ -3114,7 +3115,7 @@  flow_dv_validate_action_mark(struct rte_eth_dev *dev,
 			     struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	const struct rte_flow_action_mark *mark = action->conf;
 	int ret;
 
@@ -3188,7 +3189,7 @@  flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
 				 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	const struct rte_flow_action_set_meta *conf;
 	uint32_t nic_mask = UINT32_MAX;
 	int reg;
@@ -3312,7 +3313,7 @@  flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "duplicate count actions set");
 	if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
-	    !priv->sh->flow_hit_aso_en)
+	    !priv->sh->dev_cap.flow_hit_aso_en)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "old age and shared count combination is not supported");
@@ -3400,7 +3401,7 @@  flow_dv_validate_action_decap(struct rte_eth_dev *dev,
 	const struct mlx5_priv *priv = dev->data->dev_private;
 
 	if (priv->sh->cdev->config.hca_attr.scatter_fcs_w_decap_disable &&
-	    !priv->config.decap_en)
+	    !priv->sh->config.decap_en)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "decap is not enabled");
@@ -4825,7 +4826,7 @@  flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
 {
 	int ret = 0;
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	const struct rte_flow_action_modify_field *action_modify_field =
 		action->conf;
 	uint32_t dst_width = mlx5_flow_item_field_width(dev,
@@ -5175,7 +5176,7 @@  mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "meter with jump not support");
-	if (!priv->mtr_en)
+	if (!priv->sh->dev_cap.mtr_en)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL,
@@ -5186,7 +5187,7 @@  mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "Meter not found");
 	/* aso meter can always be shared by different domains */
-	if (fm->ref_cnt && !priv->sh->meter_aso_en &&
+	if (fm->ref_cnt && !priv->sh->dev_cap.meter_aso_en &&
 	    !(fm->transfer == attr->transfer ||
 	      (!fm->ingress && !attr->ingress && attr->egress) ||
 	      (!fm->egress && !attr->egress && attr->ingress)))
@@ -5433,8 +5434,9 @@  flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
 		       .grow_trunk = 3,
 		       .grow_shift = 2,
 		       .need_lock = 1,
-		       .release_mem_en = !!sh->reclaim_mode,
-		       .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
+		       .release_mem_en = !!sh->config.reclaim_mode,
+		       .per_core_cache =
+				       sh->config.reclaim_mode ? 0 : (1 << 16),
 		       .malloc = mlx5_malloc,
 		       .free = mlx5_free,
 		       .type = "mlx5_modify_action_resource",
@@ -5581,7 +5583,7 @@  flow_dv_validate_action_sample(uint64_t *action_flags,
 			       struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *dev_conf = &priv->config;
+	struct mlx5_sh_config *dev_conf = &priv->sh->config;
 	const struct rte_flow_action_sample *sample = action->conf;
 	const struct rte_flow_action *act;
 	uint64_t sub_action_flags = 0;
@@ -5598,7 +5600,7 @@  flow_dv_validate_action_sample(uint64_t *action_flags,
 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
 					  "ratio value starts from 1");
 	if (!priv->sh->cdev->config.devx ||
-	    (sample->ratio > 0 && !priv->sampler_en))
+	    (sample->ratio > 0 && !priv->sh->dev_cap.sampler_en))
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL,
@@ -6605,11 +6607,12 @@  flow_dv_validate_attributes(struct rte_eth_dev *dev,
 					  NULL,
 					  "priority out of range");
 	if (attributes->transfer) {
-		if (!priv->config.dv_esw_en)
+		if (!priv->sh->config.dv_esw_en)
 			return rte_flow_error_set
 				(error, ENOTSUP,
 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 				 "E-Switch dr is not supported");
+		// TODO: unreachable, decide about the solution.
 		if (!(priv->representor || priv->master))
 			return rte_flow_error_set
 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -6895,7 +6898,7 @@  flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 		},
 	};
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *dev_conf = &priv->config;
+	struct mlx5_sh_config *dev_conf = &priv->sh->config;
 	uint16_t queue_index = 0xFFFF;
 	const struct rte_flow_item_vlan *vlan_m = NULL;
 	uint32_t rw_act_num = 0;
@@ -6919,7 +6922,7 @@  flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 	tunnel = is_tunnel_offload_active(dev) ?
 		 mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
 	if (tunnel) {
-		if (!priv->config.dv_flow_en)
+		if (!dev_conf->dv_flow_en)
 			return rte_flow_error_set
 				(error, ENOTSUP,
 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -7756,7 +7759,7 @@  flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 			 * Validate the regular AGE action (using counter)
 			 * mutual exclusion with share counter actions.
 			 */
-			if (!priv->sh->flow_hit_aso_en) {
+			if (!priv->sh->dev_cap.flow_hit_aso_en) {
 				if (shared_count)
 					return rte_flow_error_set
 						(error, EINVAL,
@@ -8923,6 +8926,7 @@  flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
 	uint32_t *tunnel_header_m;
 	uint16_t dport;
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
 	const struct rte_flow_item_vxlan nic_mask = {
 		.vni = "\xff\xff\xff",
 		.rsvd1 = 0xff,
@@ -8947,17 +8951,18 @@  flow_dv_translate_item_vxlan(struct rte_eth_dev *dev,
 	if (!vxlan_v)
 		return;
 	if (!vxlan_m) {
-		if ((!attr->group && !priv->sh->tunnel_header_0_1) ||
-		    (attr->group && !priv->sh->misc5_cap))
+		if ((!attr->group && !sh->dev_cap.tunnel_header_0_1) ||
+		    (attr->group && !sh->misc5_cap))
 			vxlan_m = &rte_flow_item_vxlan_mask;
 		else
 			vxlan_m = &nic_mask;
 	}
-	if ((priv->sh->steering_format_version ==
-	    MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
-	    dport != MLX5_UDP_PORT_VXLAN) ||
-	    (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
-	    ((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
+	if ((sh->dev_cap.steering_format_version ==
+	     MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
+	     dport != MLX5_UDP_PORT_VXLAN) ||
+	    (!attr->group && !attr->transfer &&
+	     !sh->dev_cap.tunnel_header_0_1) ||
+	    ((attr->group || attr->transfer) && !sh->misc5_cap)) {
 		void *misc_m;
 		void *misc_v;
 		char *vni_m;
@@ -12607,7 +12612,7 @@  flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
 	struct mlx5_aso_ct_action *ct;
 	uint32_t idx;
 
-	if (!sh->ct_aso_en)
+	if (!sh->dev_cap.ct_aso_en)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "Connection is not supported");
@@ -12655,7 +12660,7 @@  flow_dv_translate(struct rte_eth_dev *dev,
 		  struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *dev_conf = &priv->config;
+	struct mlx5_sh_config *dev_conf = &priv->sh->config;
 	struct rte_flow *flow = dev_flow->flow;
 	struct mlx5_flow_handle *handle = dev_flow->handle;
 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
@@ -13307,7 +13312,7 @@  flow_dv_translate(struct rte_eth_dev *dev,
 			 */
 			if (action_flags & MLX5_FLOW_ACTION_AGE) {
 				if ((non_shared_age && count) ||
-				    !(priv->sh->flow_hit_aso_en &&
+				    !(priv->sh->dev_cap.flow_hit_aso_en &&
 				      (attr->group || attr->transfer))) {
 					/* Creates age by counters. */
 					cnt_act = flow_dv_prepare_counter
@@ -13622,8 +13627,7 @@  flow_dv_translate(struct rte_eth_dev *dev,
 	 * E-Switch rule where no port_id item was found. In both cases
 	 * the source port is set according the current port in use.
 	 */
-	if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
-	    (priv->representor || priv->master)) {
+	if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && priv->sh->esw_mode) {
 		if (flow_dv_translate_item_port_id(dev, match_mask,
 						   match_value, NULL, attr))
 			return -rte_errno;
@@ -14010,7 +14014,7 @@  flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 				(error, errno,
 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 				NULL,
-				(!priv->config.allow_duplicate_pattern &&
+				(!priv->sh->config.allow_duplicate_pattern &&
 				errno == EEXIST) ?
 				"duplicating pattern is not allowed" :
 				"hardware refuses to create flow");
@@ -15706,7 +15710,9 @@  __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
 				struct rte_flow_action tag_action;
 				struct mlx5_rte_flow_action_set_tag set_tag;
 				uint32_t next_mtr_idx = 0;
+				bool mtr_reg_share;
 
+				mtr_reg_share = priv->sh->dev_cap.mtr_reg_share;
 				mtr = act->conf;
 				next_fm = mlx5_flow_meter_find(priv,
 							mtr->mtr_id,
@@ -15729,9 +15735,9 @@  __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
 						MLX5_MTR_ID,
 						0,
 						(struct rte_flow_error *)error);
-					set_tag.offset = (priv->mtr_reg_share ?
+					set_tag.offset = (mtr_reg_share ?
 						MLX5_MTR_COLOR_BITS : 0);
-					set_tag.length = (priv->mtr_reg_share ?
+					set_tag.length = (mtr_reg_share ?
 					       MLX5_MTR_IDLE_BITS_IN_COLOR_REG :
 					       MLX5_REG_BITS);
 					set_tag.data = next_mtr_idx;
@@ -16078,7 +16084,7 @@  flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	int i;
 
-	if (!fm || !priv->config.dv_flow_en)
+	if (!fm || !priv->sh->config.dv_flow_en)
 		return;
 	for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
 		if (fm->drop_rule[i]) {
@@ -16181,7 +16187,7 @@  __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	uint8_t misc_mask;
 
-	if (match_src_port && (priv->representor || priv->master)) {
+	if (match_src_port && priv->sh->esw_mode) {
 		if (flow_dv_translate_item_port_id(dev, matcher.buf,
 						   value.buf, item, attr)) {
 			DRV_LOG(ERR, "Failed to create meter policy%d flow's"
@@ -16233,7 +16239,7 @@  __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	const uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
 
-	if (match_src_port && (priv->representor || priv->master)) {
+	if (match_src_port && priv->sh->esw_mode) {
 		if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
 						   value.buf, item, attr)) {
 			DRV_LOG(ERR, "Failed to register meter policy%d matcher"
@@ -16684,7 +16690,8 @@  flow_dv_create_def_policy(struct rte_eth_dev *dev)
 
 	/* Non-termination policy table. */
 	for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
-		if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
+		if (!priv->sh->config.dv_esw_en &&
+		    i == MLX5_MTR_DOMAIN_TRANSFER)
 			continue;
 		if (__flow_dv_create_domain_def_policy(dev, i)) {
 			DRV_LOG(ERR, "Failed to create default policy");
@@ -16734,7 +16741,8 @@  flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
 	int mtr_id_reg_c = mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
 						     0, &error);
 	uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
-	uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
+	uint8_t mtr_id_offset = priv->sh->dev_cap.mtr_reg_share ?
+				MLX5_MTR_COLOR_BITS : 0;
 	struct mlx5_list_entry *entry;
 	struct mlx5_flow_dv_matcher matcher = {
 		.mask = {
@@ -16748,7 +16756,7 @@  flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
 	};
 	uint8_t misc_mask;
 
-	if (!priv->mtr_en || mtr_id_reg_c < 0) {
+	if (!priv->sh->dev_cap.mtr_en || mtr_id_reg_c < 0) {
 		rte_errno = ENOTSUP;
 		return -1;
 	}
@@ -17638,7 +17646,7 @@  flow_dv_action_validate(struct rte_eth_dev *dev,
 	case RTE_FLOW_ACTION_TYPE_COUNT:
 		return flow_dv_validate_action_count(dev, true, 0, err);
 	case RTE_FLOW_ACTION_TYPE_CONNTRACK:
-		if (!priv->sh->ct_aso_en)
+		if (!priv->sh->dev_cap.ct_aso_en)
 			return rte_flow_error_set(err, ENOTSUP,
 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 					"ASO CT is not supported");
@@ -17793,7 +17801,7 @@  flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
 			struct rte_mtr_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *dev_conf = &priv->config;
+	struct mlx5_sh_config *dev_conf = &priv->sh->config;
 	const struct rte_flow_action *act;
 	uint64_t action_flags[RTE_COLORS] = {0};
 	int actions_n;
@@ -17807,7 +17815,7 @@  flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
 	bool def_yellow = false;
 	const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
 
-	if (!priv->config.dv_esw_en)
+	if (!dev_conf->dv_esw_en)
 		def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
 	*domain_bitmap = def_domain;
 	/* Red color could only support DROP action. */
@@ -17851,7 +17859,7 @@  flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
 			switch (act->type) {
 			case RTE_FLOW_ACTION_TYPE_PORT_ID:
 			case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
-				if (!priv->config.dv_esw_en)
+				if (!dev_conf->dv_esw_en)
 					return -rte_mtr_error_set(error,
 					ENOTSUP,
 					RTE_MTR_ERROR_TYPE_METER_POLICY,
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index 2f91c0074e..dd97f51024 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -64,7 +64,7 @@  mlx5_flow_meter_action_create(struct mlx5_priv *priv,
 	val = (ebs_eir >> ASO_DSEG_EBS_MAN_OFFSET) & ASO_DSEG_MAN_MASK;
 	MLX5_SET(flow_meter_parameters, fmp, ebs_mantissa, val);
 	mtr_init.next_table = def_policy->sub_policy.tbl_rsc->obj;
-	mtr_init.reg_c_index = priv->mtr_color_reg - REG_C_0;
+	mtr_init.reg_c_index = priv->sh->dev_cap.mtr_color_reg - REG_C_0;
 	mtr_init.flow_meter_parameter = fmp;
 	mtr_init.flow_meter_parameter_sz =
 		MLX5_ST_SZ_BYTES(flow_meter_parameters);
@@ -153,7 +153,7 @@  mlx5_flow_meter_profile_validate(struct rte_eth_dev *dev,
 					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
 					  NULL,
 					  "Meter profile already exists.");
-	if (!priv->sh->meter_aso_en) {
+	if (!priv->sh->dev_cap.meter_aso_en) {
 		/* Old version is even not supported. */
 		if (!priv->sh->cdev->config.hca_attr.qos.flow_meter_old)
 			return -rte_mtr_error_set(error, ENOTSUP,
@@ -428,12 +428,12 @@  mlx5_flow_mtr_cap_get(struct rte_eth_dev *dev,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_hca_qos_attr *qattr = &priv->sh->cdev->config.hca_attr.qos;
 
-	if (!priv->mtr_en)
+	if (!priv->sh->dev_cap.mtr_en)
 		return -rte_mtr_error_set(error, ENOTSUP,
 					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "Meter is not supported");
 	memset(cap, 0, sizeof(*cap));
-	if (priv->sh->meter_aso_en) {
+	if (priv->sh->dev_cap.meter_aso_en) {
 		/* 2 meters per one ASO cache line. */
 		cap->n_max = 1 << (qattr->log_max_num_meter_aso + 1);
 		cap->srtcm_rfc2697_packet_mode_supported = 1;
@@ -481,7 +481,7 @@  mlx5_flow_meter_profile_add(struct rte_eth_dev *dev,
 	union mlx5_l3t_data data;
 	int ret;
 
-	if (!priv->mtr_en)
+	if (!priv->sh->dev_cap.mtr_en)
 		return -rte_mtr_error_set(error, ENOTSUP,
 					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "Meter is not supported");
@@ -539,7 +539,7 @@  mlx5_flow_meter_profile_delete(struct rte_eth_dev *dev,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_flow_meter_profile *fmp;
 
-	if (!priv->mtr_en)
+	if (!priv->sh->dev_cap.mtr_en)
 		return -rte_mtr_error_set(error, ENOTSUP,
 					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "Meter is not supported");
@@ -650,14 +650,14 @@  mlx5_flow_meter_policy_validate(struct rte_eth_dev *dev,
 	struct rte_mtr_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow_attr attr = { .transfer =
-			priv->config.dv_esw_en ? 1 : 0};
+	struct rte_flow_attr attr = { .transfer = priv->sh->config.dv_esw_en ?
+						  1 : 0 };
 	bool is_rss = false;
 	uint8_t policy_mode;
 	uint8_t domain_bitmap;
 	int ret;
 
-	if (!priv->mtr_en || !priv->sh->meter_aso_en)
+	if (!priv->sh->dev_cap.mtr_en || !priv->sh->dev_cap.meter_aso_en)
 		return -rte_mtr_error_set(error, ENOTSUP,
 				RTE_MTR_ERROR_TYPE_METER_POLICY,
 				NULL, "meter policy unsupported.");
@@ -738,8 +738,8 @@  mlx5_flow_meter_policy_add(struct rte_eth_dev *dev,
 			struct rte_mtr_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow_attr attr = { .transfer =
-			priv->config.dv_esw_en ? 1 : 0};
+	struct rte_flow_attr attr = { .transfer = priv->sh->config.dv_esw_en ?
+						  1 : 0 };
 	uint32_t sub_policy_idx = 0;
 	uint32_t policy_idx = 0;
 	struct mlx5_flow_meter_policy *mtr_policy = NULL;
@@ -754,7 +754,7 @@  mlx5_flow_meter_policy_add(struct rte_eth_dev *dev,
 	union mlx5_l3t_data data;
 	bool skip_rule = false;
 
-	if (!priv->mtr_en)
+	if (!priv->sh->dev_cap.mtr_en)
 		return -rte_mtr_error_set(error, ENOTSUP,
 					  RTE_MTR_ERROR_TYPE_METER_POLICY,
 					  NULL, "meter policy unsupported. ");
@@ -794,7 +794,7 @@  mlx5_flow_meter_policy_add(struct rte_eth_dev *dev,
 		priv->sh->mtrmng->def_policy_id = policy_id;
 		return 0;
 	}
-	if (!priv->sh->meter_aso_en)
+	if (!priv->sh->dev_cap.meter_aso_en)
 		return -rte_mtr_error_set(error, ENOTSUP,
 			RTE_MTR_ERROR_TYPE_METER_POLICY, NULL,
 			"no ASO capability to support the policy ");
@@ -1015,8 +1015,8 @@  mlx5_flow_meter_validate(struct mlx5_priv *priv, uint32_t meter_id,
 /**
  * Modify the flow meter action.
  *
- * @param[in] priv
- *   Pointer to mlx5 private data structure.
+ * @param[in] sh
+ *   Pointer to shared device context structure.
  * @param[in] fm
  *   Pointer to flow meter to be modified.
  * @param[in] srtcm
@@ -1029,7 +1029,7 @@  mlx5_flow_meter_validate(struct mlx5_priv *priv, uint32_t meter_id,
  *   0 on success, o negative value otherwise.
  */
 static int
-mlx5_flow_meter_action_modify(struct mlx5_priv *priv,
+mlx5_flow_meter_action_modify(struct mlx5_dev_ctx_shared *sh,
 		struct mlx5_flow_meter_info *fm,
 		const struct mlx5_flow_meter_srtcm_rfc2697_prm *srtcm,
 		uint64_t modify_bits, uint32_t active_state, uint32_t is_enable)
@@ -1042,18 +1042,18 @@  mlx5_flow_meter_action_modify(struct mlx5_priv *priv,
 	struct mlx5_aso_mtr *aso_mtr = NULL;
 	uint32_t cbs_cir, ebs_eir, val;
 
-	if (priv->sh->meter_aso_en) {
+	if (sh->dev_cap.meter_aso_en) {
 		fm->is_enable = !!is_enable;
 		aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);
-		ret = mlx5_aso_meter_update_by_wqe(priv->sh, aso_mtr);
+		ret = mlx5_aso_meter_update_by_wqe(sh, aso_mtr);
 		if (ret)
 			return ret;
-		ret = mlx5_aso_mtr_wait(priv->sh, aso_mtr);
+		ret = mlx5_aso_mtr_wait(sh, aso_mtr);
 		if (ret)
 			return ret;
 	} else {
 		/* Fill command parameters. */
-		mod_attr.reg_c_index = priv->mtr_color_reg - REG_C_0;
+		mod_attr.reg_c_index = sh->dev_cap.mtr_color_reg - REG_C_0;
 		mod_attr.flow_meter_parameter = in;
 		mod_attr.flow_meter_parameter_sz =
 				MLX5_ST_SZ_BYTES(flow_meter_parameters);
@@ -1107,7 +1107,7 @@  mlx5_flow_meter_action_modify(struct mlx5_priv *priv,
 	}
 	return 0;
 #else
-	(void)priv;
+	(void)sh;
 	(void)fm;
 	(void)srtcm;
 	(void)modify_bits;
@@ -1164,6 +1164,7 @@  mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
 		       struct rte_mtr_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
 	struct mlx5_legacy_flow_meters *fms = &priv->flow_meters;
 	struct mlx5_flow_meter_profile *fmp;
 	struct mlx5_flow_meter_info *fm;
@@ -1181,10 +1182,10 @@  mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
 	int ret;
 	uint8_t domain_bitmap;
 	uint8_t mtr_id_bits;
-	uint8_t mtr_reg_bits = priv->mtr_reg_share ?
+	uint8_t mtr_reg_bits = sh->dev_cap.mtr_reg_share ?
 				MLX5_MTR_IDLE_BITS_IN_COLOR_REG : MLX5_REG_BITS;
 
-	if (!priv->mtr_en)
+	if (!sh->dev_cap.mtr_en)
 		return -rte_mtr_error_set(error, ENOTSUP,
 					RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
 					"Meter is not supported");
@@ -1199,15 +1200,14 @@  mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
 			RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
 			NULL, "Meter profile id not valid.");
 	/* Meter policy must exist. */
-	if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
-		__atomic_add_fetch
-			(&priv->sh->mtrmng->def_policy_ref_cnt,
-			1, __ATOMIC_RELAXED);
+	if (params->meter_policy_id == sh->mtrmng->def_policy_id) {
+		__atomic_add_fetch(&sh->mtrmng->def_policy_ref_cnt, 1,
+				   __ATOMIC_RELAXED);
 		domain_bitmap = MLX5_MTR_ALL_DOMAIN_BIT;
-		if (!priv->config.dv_esw_en)
+		if (!priv->sh->config.dv_esw_en)
 			domain_bitmap &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
 	} else {
-		if (!priv->sh->meter_aso_en)
+		if (!sh->dev_cap.meter_aso_en)
 			return -rte_mtr_error_set(error, ENOTSUP,
 				RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
 				"Part of the policies cannot be "
@@ -1234,7 +1234,7 @@  mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
 					NULL, "Meter yellow policy is empty.");
 	}
 	/* Allocate the flow meter memory. */
-	if (priv->sh->meter_aso_en) {
+	if (sh->dev_cap.meter_aso_en) {
 		mtr_idx = mlx5_flow_mtr_alloc(dev);
 		if (!mtr_idx)
 			return -rte_mtr_error_set(error, ENOMEM,
@@ -1247,8 +1247,8 @@  mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
 			return -rte_mtr_error_set(error, ENOMEM,
 				RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
 				"Unsupported profile with yellow.");
-		legacy_fm = mlx5_ipool_zmalloc
-				(priv->sh->ipool[MLX5_IPOOL_MTR], &mtr_idx);
+		legacy_fm = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_MTR],
+					       &mtr_idx);
 		if (legacy_fm == NULL)
 			return -rte_mtr_error_set(error, ENOMEM,
 				RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -1257,13 +1257,12 @@  mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
 		fm = &legacy_fm->fm;
 	}
 	mtr_id_bits = MLX5_REG_BITS - __builtin_clz(mtr_idx);
-	if ((mtr_id_bits + priv->sh->mtrmng->max_mtr_flow_bits) >
-	    mtr_reg_bits) {
+	if ((mtr_id_bits + sh->mtrmng->max_mtr_flow_bits) > mtr_reg_bits) {
 		DRV_LOG(ERR, "Meter number exceeds max limit.");
 		goto error;
 	}
-	if (mtr_id_bits > priv->sh->mtrmng->max_mtr_bits)
-		priv->sh->mtrmng->max_mtr_bits = mtr_id_bits;
+	if (mtr_id_bits > sh->mtrmng->max_mtr_bits)
+		sh->mtrmng->max_mtr_bits = mtr_id_bits;
 	/* Fill the flow meter parameters. */
 	fm->meter_id = meter_id;
 	fm->policy_id = params->meter_policy_id;
@@ -1273,14 +1272,14 @@  mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
 	if (mlx5_flow_create_mtr_tbls(dev, fm, mtr_idx, domain_bitmap))
 		goto error;
 	/* Add to the flow meter list. */
-	if (!priv->sh->meter_aso_en)
+	if (!sh->dev_cap.meter_aso_en)
 		TAILQ_INSERT_TAIL(fms, legacy_fm, next);
 	/* Add to the flow meter list. */
 	fm->active_state = 1; /* Config meter starts as active. */
 	fm->is_enable = 1;
 	fm->shared = !!shared;
 	__atomic_add_fetch(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
-	if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
+	if (params->meter_policy_id == sh->mtrmng->def_policy_id) {
 		fm->def_policy = 1;
 		fm->flow_ipool = mlx5_ipool_create(&flow_ipool_cfg);
 		if (!fm->flow_ipool)
@@ -1288,9 +1287,9 @@  mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
 	}
 	rte_spinlock_init(&fm->sl);
 	/* If ASO meter supported, update ASO flow meter by wqe. */
-	if (priv->sh->meter_aso_en) {
+	if (sh->dev_cap.meter_aso_en) {
 		aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);
-		ret = mlx5_aso_meter_update_by_wqe(priv->sh, aso_mtr);
+		ret = mlx5_aso_meter_update_by_wqe(sh, aso_mtr);
 		if (ret)
 			goto error;
 		if (!priv->mtr_idx_tbl) {
@@ -1311,10 +1310,10 @@  mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
 	/* Free policer counters. */
 	if (fm->drop_cnt)
 		mlx5_counter_free(dev, fm->drop_cnt);
-	if (priv->sh->meter_aso_en)
+	if (sh->dev_cap.meter_aso_en)
 		mlx5_flow_mtr_free(dev, mtr_idx);
 	else
-		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR], mtr_idx);
+		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_MTR], mtr_idx);
 	return -rte_mtr_error_set(error, ENOTSUP,
 		RTE_MTR_ERROR_TYPE_UNSPECIFIED,
 		NULL, "Failed to create devx meter.");
@@ -1341,7 +1340,7 @@  mlx5_flow_meter_params_flush(struct rte_eth_dev *dev,
 	__atomic_sub_fetch(&fmp->ref_cnt, 1, __ATOMIC_RELAXED);
 	fm->profile = NULL;
 	/* Remove from list. */
-	if (!priv->sh->meter_aso_en) {
+	if (!priv->sh->dev_cap.meter_aso_en) {
 		legacy_fm = container_of(fm,
 			struct mlx5_legacy_flow_meter, fm);
 		TAILQ_REMOVE(fms, legacy_fm, next);
@@ -1358,7 +1357,7 @@  mlx5_flow_meter_params_flush(struct rte_eth_dev *dev,
 	if (fm->def_policy)
 		__atomic_sub_fetch(&priv->sh->mtrmng->def_policy_ref_cnt,
 				1, __ATOMIC_RELAXED);
-	if (priv->sh->meter_aso_en) {
+	if (priv->sh->dev_cap.meter_aso_en) {
 		if (!fm->def_policy) {
 			mtr_policy = mlx5_flow_meter_policy_find(dev,
 						fm->policy_id, NULL);
@@ -1399,7 +1398,7 @@  mlx5_flow_meter_destroy(struct rte_eth_dev *dev, uint32_t meter_id,
 	struct mlx5_flow_meter_info *fm;
 	uint32_t mtr_idx = 0;
 
-	if (!priv->mtr_en)
+	if (!priv->sh->dev_cap.mtr_en)
 		return -rte_mtr_error_set(error, ENOTSUP,
 					  RTE_MTR_ERROR_TYPE_UNSPECIFIED,
 					  NULL,
@@ -1456,10 +1455,10 @@  mlx5_flow_meter_modify_state(struct mlx5_priv *priv,
 	int ret;
 
 	if (new_state == MLX5_FLOW_METER_DISABLE)
-		ret = mlx5_flow_meter_action_modify(priv, fm,
+		ret = mlx5_flow_meter_action_modify(priv->sh, fm,
 				&srtcm, modify_bits, 0, 0);
 	else
-		ret = mlx5_flow_meter_action_modify(priv, fm,
+		ret = mlx5_flow_meter_action_modify(priv->sh, fm,
 						    &fm->profile->srtcm_prm,
 						    modify_bits, 0, 1);
 	if (ret)
@@ -1494,7 +1493,7 @@  mlx5_flow_meter_enable(struct rte_eth_dev *dev,
 	struct mlx5_flow_meter_info *fm;
 	int ret;
 
-	if (!priv->mtr_en)
+	if (!priv->sh->dev_cap.mtr_en)
 		return -rte_mtr_error_set(error, ENOTSUP,
 					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "Meter is not supported");
@@ -1535,7 +1534,7 @@  mlx5_flow_meter_disable(struct rte_eth_dev *dev,
 	struct mlx5_flow_meter_info *fm;
 	int ret;
 
-	if (!priv->mtr_en)
+	if (!priv->sh->dev_cap.mtr_en)
 		return -rte_mtr_error_set(error, ENOTSUP,
 					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "Meter is not supported");
@@ -1583,7 +1582,7 @@  mlx5_flow_meter_profile_update(struct rte_eth_dev *dev,
 			       MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CIR;
 	int ret;
 
-	if (!priv->mtr_en)
+	if (!priv->sh->dev_cap.mtr_en)
 		return -rte_mtr_error_set(error, ENOTSUP,
 					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "Meter is not supported");
@@ -1608,8 +1607,9 @@  mlx5_flow_meter_profile_update(struct rte_eth_dev *dev,
 	/* Update meter params in HW (if not disabled). */
 	if (fm->active_state == MLX5_FLOW_METER_DISABLE)
 		return 0;
-	ret = mlx5_flow_meter_action_modify(priv, fm, &fm->profile->srtcm_prm,
-					      modify_bits, fm->active_state, 1);
+	ret = mlx5_flow_meter_action_modify(priv->sh, fm,
+					    &fm->profile->srtcm_prm,
+					    modify_bits, fm->active_state, 1);
 	if (ret) {
 		fm->profile = old_fmp;
 		return -rte_mtr_error_set(error, -ret,
@@ -1646,7 +1646,7 @@  mlx5_flow_meter_stats_update(struct rte_eth_dev *dev,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_flow_meter_info *fm;
 
-	if (!priv->mtr_en)
+	if (!priv->sh->dev_cap.mtr_en)
 		return -rte_mtr_error_set(error, ENOTSUP,
 					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "Meter is not supported");
@@ -1697,7 +1697,7 @@  mlx5_flow_meter_stats_read(struct rte_eth_dev *dev,
 	uint64_t bytes;
 	int ret = 0;
 
-	if (!priv->mtr_en)
+	if (!priv->sh->dev_cap.mtr_en)
 		return -rte_mtr_error_set(error, ENOTSUP,
 					  RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "Meter is not supported");
@@ -1791,7 +1791,7 @@  mlx5_flow_meter_find(struct mlx5_priv *priv, uint32_t meter_id,
 	union mlx5_l3t_data data;
 	uint16_t n_valid;
 
-	if (priv->sh->meter_aso_en) {
+	if (priv->sh->dev_cap.meter_aso_en) {
 		rte_rwlock_read_lock(&pools_mng->resize_mtrwl);
 		n_valid = pools_mng->n_valid;
 		rte_rwlock_read_unlock(&pools_mng->resize_mtrwl);
@@ -1833,7 +1833,7 @@  flow_dv_meter_find_by_idx(struct mlx5_priv *priv, uint32_t idx)
 {
 	struct mlx5_aso_mtr *aso_mtr;
 
-	if (priv->sh->meter_aso_en) {
+	if (priv->sh->dev_cap.meter_aso_en) {
 		aso_mtr = mlx5_aso_meter_by_idx(priv, idx);
 		if (!aso_mtr)
 			return NULL;
@@ -1868,7 +1868,7 @@  mlx5_flow_meter_attach(struct mlx5_priv *priv,
 {
 	int ret = 0;
 
-	if (priv->sh->meter_aso_en) {
+	if (priv->sh->dev_cap.meter_aso_en) {
 		struct mlx5_aso_mtr *aso_mtr;
 
 		aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm);
@@ -1943,7 +1943,7 @@  mlx5_flow_meter_detach(struct mlx5_priv *priv,
 #ifdef HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER
 	rte_spinlock_lock(&fm->sl);
 	MLX5_ASSERT(fm->ref_cnt);
-	if (--fm->ref_cnt == 0 && !priv->sh->meter_aso_en) {
+	if (--fm->ref_cnt == 0 && !priv->sh->dev_cap.meter_aso_en) {
 		mlx5_glue->destroy_flow_action(fm->meter_action);
 		fm->meter_action = NULL;
 		fm->ingress = 0;
@@ -1972,7 +1972,7 @@  mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev)
 	void *entry;
 	uint32_t i, policy_idx;
 
-	if (!priv->mtr_en)
+	if (!priv->sh->dev_cap.mtr_en)
 		return;
 	if (priv->policy_idx_tbl) {
 		MLX5_L3T_FOREACH(priv->policy_idx_tbl, i, entry) {
@@ -2145,9 +2145,9 @@  mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error)
 	void *entry;
 	struct mlx5_aso_mtr *aso_mtr;
 
-	if (!priv->mtr_en)
+	if (!priv->sh->dev_cap.mtr_en)
 		return 0;
-	if (priv->sh->meter_aso_en) {
+	if (priv->sh->dev_cap.meter_aso_en) {
 		if (mlx5_flow_meter_flush_all_hierarchies(dev, error))
 			return -rte_errno;
 		if (priv->mtr_idx_tbl) {
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index 7f19b235c2..f44906e1a7 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -36,7 +36,7 @@  mlx5_promiscuous_enable(struct rte_eth_dev *dev)
 			dev->data->port_id);
 		return 0;
 	}
-	if (priv->config.vf || priv->config.sf) {
+	if (priv->sh->dev_cap.vf || priv->sh->dev_cap.sf) {
 		ret = mlx5_os_set_promisc(dev, 1);
 		if (ret)
 			return ret;
@@ -69,7 +69,7 @@  mlx5_promiscuous_disable(struct rte_eth_dev *dev)
 	int ret;
 
 	dev->data->promiscuous = 0;
-	if (priv->config.vf || priv->config.sf) {
+	if (priv->sh->dev_cap.vf || priv->sh->dev_cap.sf) {
 		ret = mlx5_os_set_promisc(dev, 0);
 		if (ret)
 			return ret;
@@ -109,7 +109,7 @@  mlx5_allmulticast_enable(struct rte_eth_dev *dev)
 			dev->data->port_id);
 		return 0;
 	}
-	if (priv->config.vf || priv->config.sf) {
+	if (priv->sh->dev_cap.vf || priv->sh->dev_cap.sf) {
 		ret = mlx5_os_set_allmulti(dev, 1);
 		if (ret)
 			goto error;
@@ -142,7 +142,7 @@  mlx5_allmulticast_disable(struct rte_eth_dev *dev)
 	int ret;
 
 	dev->data->all_multicast = 0;
-	if (priv->config.vf || priv->config.sf) {
+	if (priv->sh->dev_cap.vf || priv->sh->dev_cap.sf) {
 		ret = mlx5_os_set_allmulti(dev, 0);
 		if (ret)
 			goto error;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 62561eb335..1ac1949be0 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -357,20 +357,19 @@  uint64_t
 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
 	uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
 			     RTE_ETH_RX_OFFLOAD_TIMESTAMP |
 			     RTE_ETH_RX_OFFLOAD_RSS_HASH);
 
-	if (!config->mprq.enabled)
+	if (!priv->config.mprq.enabled)
 		offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
-	if (config->hw_fcs_strip)
+	if (priv->sh->dev_cap.hw_fcs_strip)
 		offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
-	if (config->hw_csum)
+	if (priv->sh->dev_cap.hw_csum)
 		offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
 			     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
 			     RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
-	if (config->hw_vlan_strip)
+	if (priv->sh->dev_cap.hw_vlan_strip)
 		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 	if (MLX5_LRO_SUPPORTED(dev))
 		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
@@ -1561,15 +1560,16 @@  mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 		  uint32_t *actual_log_stride_size)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
-	uint32_t log_min_stride_num = config->mprq.log_min_stride_num;
-	uint32_t log_max_stride_num = config->mprq.log_max_stride_num;
+	struct mlx5_cap *dev_cap = &priv->sh->dev_cap;
+	struct mlx5_port_config *config = &priv->config;
+	uint32_t log_min_stride_num = dev_cap->mprq.log_min_stride_num;
+	uint32_t log_max_stride_num = dev_cap->mprq.log_max_stride_num;
 	uint32_t log_def_stride_num =
 			RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM,
 					log_min_stride_num),
 				log_max_stride_num);
-	uint32_t log_min_stride_size = config->mprq.log_min_stride_size;
-	uint32_t log_max_stride_size = config->mprq.log_max_stride_size;
+	uint32_t log_min_stride_size = dev_cap->mprq.log_min_stride_size;
+	uint32_t log_max_stride_size = dev_cap->mprq.log_max_stride_size;
 	uint32_t log_def_stride_size =
 			RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE,
 					log_min_stride_size),
@@ -1608,7 +1608,7 @@  mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	}
 	log_stride_wqe_size = *actual_log_stride_num + *actual_log_stride_size;
 	/* Check if WQE buffer size is supported by hardware. */
-	if (log_stride_wqe_size < config->mprq.log_min_stride_wqe_size) {
+	if (log_stride_wqe_size < dev_cap->mprq.log_min_stride_wqe_size) {
 		*actual_log_stride_num = log_def_stride_num;
 		*actual_log_stride_size = log_def_stride_size;
 		DRV_LOG(WARNING,
@@ -1617,7 +1617,8 @@  mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			RTE_BIT32(log_def_stride_size));
 		log_stride_wqe_size = log_def_stride_num + log_def_stride_size;
 	}
-	MLX5_ASSERT(log_stride_wqe_size < config->mprq.log_min_stride_wqe_size);
+	MLX5_ASSERT(log_stride_wqe_size <
+		    dev_cap->mprq.log_min_stride_wqe_size);
 	if (desc <= RTE_BIT32(*actual_log_stride_num))
 		goto unsupport;
 	if (min_mbuf_size > RTE_BIT32(log_stride_wqe_size)) {
@@ -1646,9 +1647,9 @@  mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 			RTE_BIT32(config->mprq.log_stride_size),
 			RTE_BIT32(config->mprq.log_stride_num),
 			config->mprq.min_rxqs_num,
-			RTE_BIT32(config->mprq.log_min_stride_wqe_size),
-			RTE_BIT32(config->mprq.log_min_stride_size),
-			RTE_BIT32(config->mprq.log_max_stride_size),
+			RTE_BIT32(dev_cap->mprq.log_min_stride_wqe_size),
+			RTE_BIT32(dev_cap->mprq.log_min_stride_size),
+			RTE_BIT32(dev_cap->mprq.log_max_stride_size),
 			rx_seg_en ? "" : "not ");
 	return -1;
 }
@@ -1678,7 +1679,7 @@  mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_ctrl *tmpl;
 	unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_port_config *config = &priv->config;
 	uint64_t offloads = conf->offloads |
 			   dev->data->dev_conf.rxmode.offloads;
 	unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
@@ -1892,7 +1893,7 @@  mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
 	tmpl->rxq.crc_present = 0;
 	tmpl->rxq.lro = lro_on_queue;
 	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
-		if (config->hw_fcs_strip) {
+		if (priv->sh->dev_cap.hw_fcs_strip) {
 			/*
 			 * RQs used for LRO-enabled TIRs should not be
 			 * configured to scatter the FCS.
@@ -2368,7 +2369,7 @@  mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
 	int ret = 0, err;
 	const unsigned int n = rte_is_power_of_2(queues_n) ?
 			       log2above(queues_n) :
-			       log2above(priv->config.ind_table_max_size);
+			       log2above(priv->sh->dev_cap.ind_table_max_size);
 
 	if (ref_qs)
 		for (i = 0; i != queues_n; ++i) {
@@ -2493,7 +2494,7 @@  mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
 	int ret = 0, err;
 	const unsigned int n = rte_is_power_of_2(queues_n) ?
 			       log2above(queues_n) :
-			       log2above(priv->config.ind_table_max_size);
+			       log2above(priv->sh->dev_cap.ind_table_max_size);
 
 	MLX5_ASSERT(standalone);
 	RTE_SET_USED(standalone);
@@ -2574,7 +2575,7 @@  mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	const unsigned int n = rte_is_power_of_2(ind_tbl->queues_n) ?
 			       log2above(ind_tbl->queues_n) :
-			       log2above(priv->config.ind_table_max_size);
+			       log2above(priv->sh->dev_cap.ind_table_max_size);
 	unsigned int i;
 	int ret;
 
@@ -2992,6 +2993,6 @@  mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
 		if (data == NULL)
 			continue;
 		data->sh = sh;
-		data->rt_timestamp = priv->config.rt_timestamp;
+		data->rt_timestamp = sh->dev_cap.rt_timestamp;
 	}
 }
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 1dfe7da435..c13adc9199 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1104,8 +1104,8 @@  mlx5_dev_start(struct rte_eth_dev *dev)
 			dev->data->port_id, strerror(rte_errno));
 		goto error;
 	}
-	if ((priv->sh->cdev->config.devx && priv->config.dv_flow_en &&
-	    priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {
+	if (priv->sh->dev_cap.devx_obj_ops_en &&
+	    priv->obj_ops.lb_dummy_queue_create) {
 		ret = priv->obj_ops.lb_dummy_queue_create(dev);
 		if (ret)
 			goto error;
@@ -1117,7 +1117,7 @@  mlx5_dev_start(struct rte_eth_dev *dev)
 		goto error;
 	}
 	if (priv->config.std_delay_drop || priv->config.hp_delay_drop) {
-		if (!priv->config.vf && !priv->config.sf &&
+		if (!priv->sh->dev_cap.vf && !priv->sh->dev_cap.sf &&
 		    !priv->representor) {
 			ret = mlx5_get_flag_dropless_rq(dev);
 			if (ret < 0)
@@ -1276,8 +1276,6 @@  mlx5_dev_stop(struct rte_eth_dev *dev)
  * Enable traffic flows configured by control plane
  *
  * @param dev
- *   Pointer to Ethernet device private data.
- * @param dev
  *   Pointer to Ethernet device structure.
  *
  * @return
@@ -1330,8 +1328,7 @@  mlx5_traffic_enable(struct rte_eth_dev *dev)
 				goto error;
 			}
 		}
-		if ((priv->representor || priv->master) &&
-		    priv->config.dv_esw_en) {
+		if (priv->sh->config.dv_esw_en) {
 			if (mlx5_flow_create_devx_sq_miss_flow(dev, i) == 0) {
 				DRV_LOG(ERR,
 					"Port %u Tx queue %u SQ create representor devx default miss rule failed.",
@@ -1341,7 +1338,7 @@  mlx5_traffic_enable(struct rte_eth_dev *dev)
 		}
 		mlx5_txq_release(dev, i);
 	}
-	if ((priv->master || priv->representor) && priv->config.dv_esw_en) {
+	if (priv->sh->config.dv_esw_en) {
 		if (mlx5_flow_create_esw_table_zero_flow(dev))
 			priv->fdb_def_rule = 1;
 		else
@@ -1349,7 +1346,7 @@  mlx5_traffic_enable(struct rte_eth_dev *dev)
 				" configured - only Eswitch group 0 flows are"
 				" supported.", dev->data->port_id);
 	}
-	if (!priv->config.lacp_by_user && priv->pf_bond >= 0) {
+	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) {
 		ret = mlx5_flow_lacp_miss(dev);
 		if (ret)
 			DRV_LOG(INFO, "port %u LACP rule cannot be created - "
diff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c
index 5492d64cae..d670936e52 100644
--- a/drivers/net/mlx5/mlx5_tx.c
+++ b/drivers/net/mlx5/mlx5_tx.c
@@ -517,7 +517,7 @@  eth_tx_burst_t
 mlx5_select_tx_function(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_port_config *config = &priv->config;
 	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
 	unsigned int diff = 0, olx = 0, i, m;
 
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 1d16ebcb41..fe74317fe8 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -816,15 +816,15 @@  mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
  * Returns 0 on success, negative otherwise
  */
 static int
-mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
+mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh)
 {
-	int tx_pp = priv->config.tx_pp;
+	int tx_pp = sh->config.tx_pp;
 	int ret;
 
 	/* Store the requested pacing parameters. */
 	sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp;
 	sh->txpp.test = !!(tx_pp < 0);
-	sh->txpp.skew = priv->config.tx_skew;
+	sh->txpp.skew = sh->config.tx_skew;
 	sh->txpp.freq = sh->cdev->config.hca_attr.dev_freq_khz;
 	ret = mlx5_txpp_create_event_channel(sh);
 	if (ret)
@@ -891,7 +891,7 @@  mlx5_txpp_start(struct rte_eth_dev *dev)
 	struct mlx5_dev_ctx_shared *sh = priv->sh;
 	int err = 0;
 
-	if (!priv->config.tx_pp) {
+	if (!sh->config.tx_pp) {
 		/* Packet pacing is not requested for the device. */
 		MLX5_ASSERT(priv->txpp_en == 0);
 		return 0;
@@ -901,7 +901,7 @@  mlx5_txpp_start(struct rte_eth_dev *dev)
 		MLX5_ASSERT(sh->txpp.refcnt);
 		return 0;
 	}
-	if (priv->config.tx_pp > 0) {
+	if (sh->config.tx_pp > 0) {
 		err = rte_mbuf_dynflag_lookup
 			(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
 		/* No flag registered means no service needed. */
@@ -914,7 +914,7 @@  mlx5_txpp_start(struct rte_eth_dev *dev)
 		priv->txpp_en = 1;
 		++sh->txpp.refcnt;
 	} else {
-		err = mlx5_txpp_create(sh, priv);
+		err = mlx5_txpp_create(sh);
 		if (!err) {
 			MLX5_ASSERT(sh->txpp.tick);
 			priv->txpp_en = 1;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 4e0bf7af9c..51ab8a578b 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -100,39 +100,39 @@  mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 	struct mlx5_priv *priv = dev->data->dev_private;
 	uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
 			     RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_cap *dev_cap = &priv->sh->dev_cap;
 
-	if (config->hw_csum)
+	if (dev_cap->hw_csum)
 		offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
 			     RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
 			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
-	if (config->tso)
+	if (dev_cap->tso)
 		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
-	if (config->tx_pp)
+	if (priv->sh->config.tx_pp)
 		offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
-	if (config->swp) {
-		if (config->swp & MLX5_SW_PARSING_CSUM_CAP)
+	if (dev_cap->swp) {
+		if (dev_cap->swp & MLX5_SW_PARSING_CSUM_CAP)
 			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-		if (config->swp & MLX5_SW_PARSING_TSO_CAP)
+		if (dev_cap->swp & MLX5_SW_PARSING_TSO_CAP)
 			offloads |= (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
 				     RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
 	}
-	if (config->tunnel_en) {
-		if (config->hw_csum)
+	if (dev_cap->tunnel_en) {
+		if (dev_cap->hw_csum)
 			offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-		if (config->tso) {
-			if (config->tunnel_en &
+		if (dev_cap->tso) {
+			if (dev_cap->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
 				offloads |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
-			if (config->tunnel_en &
+			if (dev_cap->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_GRE_CAP)
 				offloads |= RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO;
-			if (config->tunnel_en &
+			if (dev_cap->tunnel_en &
 				MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
 				offloads |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
 		}
 	}
-	if (!config->mprq.enabled)
+	if (!priv->config.mprq.enabled)
 		offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 	return offloads;
 }
@@ -740,7 +740,8 @@  static void
 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 {
 	struct mlx5_priv *priv = txq_ctrl->priv;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_port_config *config = &priv->config;
+	struct mlx5_cap *dev_cap = &priv->sh->dev_cap;
 	unsigned int inlen_send; /* Inline data for ordinary SEND.*/
 	unsigned int inlen_empw; /* Inline data for enhanced MPW. */
 	unsigned int inlen_mode; /* Minimal required Inline data. */
@@ -924,19 +925,19 @@  txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
 		txq_ctrl->txq.tso_en = 1;
 	}
 	if (((RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
-	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
+	    (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
 	   ((RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
-	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
+	    (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
 	   ((RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
-	    (config->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
-	   (config->swp  & MLX5_SW_PARSING_TSO_CAP))
+	    (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
+	   (dev_cap->swp  & MLX5_SW_PARSING_TSO_CAP))
 		txq_ctrl->txq.tunnel_en = 1;
 	txq_ctrl->txq.swp_en = (((RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
 				  RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO) &
-				  txq_ctrl->txq.offloads) && (config->swp &
+				  txq_ctrl->txq.offloads) && (dev_cap->swp &
 				  MLX5_SW_PARSING_TSO_CAP)) |
 				((RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM &
-				 txq_ctrl->txq.offloads) && (config->swp &
+				 txq_ctrl->txq.offloads) && (dev_cap->swp &
 				 MLX5_SW_PARSING_CSUM_CAP));
 }
 
@@ -958,7 +959,7 @@  static int
 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
 {
 	struct mlx5_priv *priv = txq_ctrl->priv;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_port_config *config = &priv->config;
 	unsigned int max_inline;
 
 	max_inline = txq_calc_inline_max(txq_ctrl);
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index ea841bb32f..e7161b66fe 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -97,7 +97,7 @@  mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 
 	MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);
 	/* Validate hw support */
-	if (!priv->config.hw_vlan_strip) {
+	if (!priv->sh->dev_cap.hw_vlan_strip) {
 		DRV_LOG(ERR, "port %u VLAN stripping is not supported",
 			dev->data->port_id);
 		return;
@@ -146,7 +146,7 @@  mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 		int hw_vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
 				       RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
 
-		if (!priv->config.hw_vlan_strip) {
+		if (!priv->sh->dev_cap.hw_vlan_strip) {
 			DRV_LOG(ERR, "port %u VLAN stripping is not supported",
 				dev->data->port_id);
 			return 0;
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 9effbb9201..2e24f864d9 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -239,6 +239,245 @@  mlx5_os_set_nonblock_channel_fd(int fd)
 	return -ENOTSUP;
 }
 
+/**
+ * Parse user port parameters and adjust them according to device capabilities.
+ *
+ * @param priv
+ *   Pointer to shared device context.
+ * @param devargs
+ *   Device arguments structure.
+ * @param config
+ *   Pointer to port configuration structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_os_port_args_config(struct mlx5_priv *priv, struct rte_devargs *devargs,
+			 struct mlx5_port_config *config)
+{
+	struct rte_kvargs *kvlist;
+	int ret = 0;
+
+	/* Default configuration. */
+	memset(config, 0, sizeof(*config));
+	config->rx_vec_en = 1;
+	config->txq_inline_max = MLX5_ARG_UNSET;
+	config->txq_inline_min = MLX5_ARG_UNSET;
+	config->txq_inline_mpw = MLX5_ARG_UNSET;
+	config->txqs_inline = MLX5_ARG_UNSET;
+	config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
+	config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
+	config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
+	config->log_hp_size = MLX5_ARG_UNSET;
+	config->std_delay_drop = 0;
+	config->hp_delay_drop = 0;
+	/* Parse device parameters. */
+	if (devargs != NULL) {
+		kvlist = rte_kvargs_parse(devargs->args, NULL);
+		if (kvlist == NULL) {
+			DRV_LOG(ERR,
+				"Failed to parse device arguments.");
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+		/* Process parameters. */
+		ret = rte_kvargs_process(kvlist, NULL,
+					 mlx5_port_args_check_handler, config);
+		rte_kvargs_free(kvlist);
+		if (ret) {
+			DRV_LOG(ERR, "Failed to process port arguments: %s",
+				strerror(rte_errno));
+			return -rte_errno;
+		}
+	}
+	if (config->hw_padding) {
+		DRV_LOG(DEBUG, "Rx end alignment padding isn't supported.");
+		config->hw_padding = 0;
+	}
+	DRV_LOG(DEBUG, "%sMPS is %s.",
+		config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
+		config->mps == MLX5_MPW ? "legacy " : "",
+		config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+	if (config->cqe_comp) {
+		DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
+		config->cqe_comp = 0;
+	}
+	if (config->mprq.enabled) {
+		DRV_LOG(WARNING, "Multi-Packet RQ isn't supported.");
+		config->mprq.enabled = 0;
+	}
+	if (config->max_dump_files_num == 0)
+		config->max_dump_files_num = 128;
+	/* Detect minimal data bytes to inline. */
+	mlx5_set_min_inline(priv);
+	DRV_LOG(DEBUG, "VLAN insertion in WQE is %ssupported.",
+		config->hw_vlan_insert ? "" : "not ");
+	DRV_LOG(DEBUG, "\"rx_vec_en\" is %u.", config->rx_vec_en);
+	DRV_LOG(DEBUG, "Standard \"delay_drop\" is %u.",
+		config->std_delay_drop);
+	DRV_LOG(DEBUG, "Hairpin \"delay_drop\" is %u.", config->hp_delay_drop);
+	DRV_LOG(DEBUG, "\"max_dump_files_num\" is %u.",
+		config->max_dump_files_num);
+	DRV_LOG(DEBUG, "\"log_hp_size\" is %u.", config->log_hp_size);
+	DRV_LOG(DEBUG, "\"txq_mpw_en\" is %d.", config->mps);
+	DRV_LOG(DEBUG, "\"txqs_min_inline\" is %d.", config->txqs_inline);
+	DRV_LOG(DEBUG, "\"txq_inline_min\" is %d.", config->txq_inline_min);
+	DRV_LOG(DEBUG, "\"txq_inline_max\" is %d.", config->txq_inline_max);
+	DRV_LOG(DEBUG, "\"txq_inline_mpw\" is %d.", config->txq_inline_mpw);
+	return 0;
+}
+
+/**
+ * Parse user device parameters and adjust them according to device
+ * capabilities.
+ *
+ * @param sh
+ *   Pointer to shared device context.
+ * @param devargs
+ *   Device arguments structure.
+ * @param config
+ *   Pointer to shared device configuration structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_os_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,
+				   struct rte_devargs *devargs,
+				   struct mlx5_sh_config *config)
+{
+	RTE_SET_USED(sh);
+	struct rte_kvargs *kvlist;
+	int ret = 0;
+
+	/* Default configuration. */
+	memset(config, 0, sizeof(*config));
+	config->dv_flow_en = 1;
+	config->decap_en = 1; /* TODO: check it with Tal */
+	/* Parse device parameters. */
+	if (devargs != NULL) {
+		kvlist = rte_kvargs_parse(devargs->args, NULL);
+		if (kvlist == NULL) {
+			DRV_LOG(ERR,
+				"Failed to parse shared device arguments.");
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+		/* Process parameters. */
+		ret = rte_kvargs_process(kvlist, NULL,
+					 mlx5_dev_args_check_handler, config);
+		rte_kvargs_free(kvlist);
+		if (ret) {
+			DRV_LOG(ERR, "Failed to process device arguments: %s",
+				strerror(rte_errno));
+			return -rte_errno;
+		}
+	}
+	/* Adjust parameters according to device capabilities. */
+	if (config->dv_esw_en) {
+		DRV_LOG(WARNING, "E-Switch isn't supported.");
+		config->dv_esw_en = 0;
+	}
+	if (!config->dv_flow_en) {
+		DRV_LOG(ERR, "Windows flow mode must be DV flow enable.");
+		rte_errno = ENOTSUP;
+		return -rte_errno;
+	}
+	if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+		DRV_LOG(WARNING,
+			"Metadata mode %u is not supported (no E-Switch).",
+			config->dv_xmeta_en);
+		config->dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
+	}
+	if (config->allow_duplicate_pattern) {
+		DRV_LOG(WARNING, "Pattern duplication isn't supported.");
+		config->allow_duplicate_pattern = 0;
+	}
+	if (config->vf_nl_en) {
+		DRV_LOG(WARNING, "VF netlink isn't supported.");
+		config->vf_nl_en = 0;
+	}
+	if (config->tx_pp || config->tx_skew) {
+		DRV_LOG(WARNING, "Packet pacing isn't supported.");
+		config->tx_pp = 0;
+		config->tx_skew = 0;
+	}
+	DRV_LOG(DEBUG, "\"tx_pp\" is %d.", config->tx_pp);
+	DRV_LOG(DEBUG, "\"tx_skew\" is %d.", config->tx_skew);
+	DRV_LOG(DEBUG, "\"reclaim_mode\" is %u.", config->reclaim_mode);
+	DRV_LOG(DEBUG, "\"dv_esw_en\" is %u.", config->dv_esw_en);
+	DRV_LOG(DEBUG, "\"dv_flow_en\" is %u.", config->dv_flow_en);
+	DRV_LOG(DEBUG, "\"dv_xmeta_en\" is %u.", config->dv_xmeta_en);
+	DRV_LOG(DEBUG, "\"dv_miss_info\" is %u.", config->dv_miss_info);
+	DRV_LOG(DEBUG, "\"l3_vxlan_en\" is %u.", config->l3_vxlan_en);
+	DRV_LOG(DEBUG, "\"vf_nl_en\" is %u.", config->vf_nl_en);
+	DRV_LOG(DEBUG, "\"lacp_by_user\" is %u.", config->lacp_by_user);
+	DRV_LOG(DEBUG, "\"decap_en\" is %u.", config->decap_en);
+	DRV_LOG(DEBUG, "\"allow_duplicate_pattern\" is %u.",
+		config->allow_duplicate_pattern);
+	return 0;
+}
+
+/**
+ * Configure all device capabilities.
+ *
+ * @param sh
+ *   Pointer to mlx5_dev_ctx_shared object.
+ */
+void
+mlx5_os_cap_config(struct mlx5_dev_ctx_shared *sh)
+{
+	struct mlx5_cap *cap = &sh->dev_cap;
+	struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
+
+	MLX5_ASSERT(sh->cdev->config.devx);
+	MLX5_ASSERT(mlx5_dev_is_pci(sh->cdev->dev));
+	memset(cap, 0, sizeof(*cap));
+	cap->vf = mlx5_dev_is_vf_pci(RTE_DEV_TO_PCI(sh->cdev->dev));
+	cap->hw_csum = hca_attr->csum_cap;
+	DRV_LOG(DEBUG, "Checksum offloading is %ssupported.",
+		(cap->hw_csum ? "" : "not "));
+	cap->hw_vlan_strip = hca_attr->vlan_cap;
+	DRV_LOG(DEBUG, "VLAN stripping is %ssupported.",
+		(cap->hw_vlan_strip ? "" : "not "));
+	cap->hw_fcs_strip = hca_attr->scatter_fcs;
+	/*
+	 * DPDK doesn't support larger/variable indirection tables.
+	 * Once DPDK supports it, take max size from device attr.
+	 */
+	cap->ind_table_max_size =
+		RTE_MIN(sh->device_attr.max_rwq_indirection_table_size,
+			(unsigned int)RTE_ETH_RSS_RETA_SIZE_512);
+	DRV_LOG(DEBUG, "Maximum Rx indirection table size is %u",
+		cap->ind_table_max_size);
+	DRV_LOG(DEBUG, "Counters are not supported.");
+	cap->tso = !!(sh->device_attr.max_tso > 0);
+	if (cap->tso)
+		cap->tso_max_payload_sz = sh->device_attr.max_tso;
+	cap->tunnel_en = sh->device_attr.tunnel_offloads_caps &
+			 (MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
+			  MLX5_TUNNELED_OFFLOADS_GRE_CAP |
+			  MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
+	if (cap->tunnel_en) {
+		DRV_LOG(DEBUG, "Tunnel offloading is supported for %s%s%s",
+			cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ?
+			"[VXLAN]" : "",
+			cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP ?
+			"[GRE]" : "",
+			cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ?
+			"[GENEVE]" : "");
+	} else {
+		DRV_LOG(DEBUG, "Tunnel offloading is not supported.");
+	}
+	DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is not supported.");
+	cap->swp = sh->device_attr.sw_parsing_offloads &
+		   (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
+		    MLX5_SW_PARSING_TSO_CAP);
+	DRV_LOG(DEBUG, "SWP support: %u", cap->swp);
+	mlx5_rt_timestamp_config(sh, hca_attr);
+}
+
 /**
  * Spawn an Ethernet device from DevX information.
  *
@@ -246,8 +485,6 @@  mlx5_os_set_nonblock_channel_fd(int fd)
  *   Backing DPDK device.
  * @param spawn
  *   Verbs device parameters (name, port, switch_info) to spawn.
- * @param config
- *   Device configuration parameters.
  *
  * @return
  *   A valid Ethernet device object on success, NULL otherwise and rte_errno
@@ -257,12 +494,10 @@  mlx5_os_set_nonblock_channel_fd(int fd)
  */
 static struct rte_eth_dev *
 mlx5_dev_spawn(struct rte_device *dpdk_dev,
-	       struct mlx5_dev_spawn_data *spawn,
-	       struct mlx5_dev_config *config)
+	       struct mlx5_dev_spawn_data *spawn)
 {
 	const struct mlx5_switch_info *switch_info = &spawn->info;
 	struct mlx5_dev_ctx_shared *sh = NULL;
-	struct mlx5_hca_attr *hca_attr;
 	struct rte_eth_dev *eth_dev = NULL;
 	struct mlx5_priv *priv = NULL;
 	int err = 0;
@@ -280,35 +515,9 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		return NULL;
 	}
 	DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
-	/* Process parameters. */
-	err = mlx5_args(config, dpdk_dev->devargs);
-	if (err) {
-		err = rte_errno;
-		DRV_LOG(ERR, "failed to process device arguments: %s",
-			strerror(rte_errno));
-		goto error;
-	}
-	sh = mlx5_alloc_shared_dev_ctx(spawn, config);
+	sh = mlx5_alloc_shared_dev_ctx(spawn);
 	if (!sh)
 		return NULL;
-	/* Update final values for devargs before check sibling config. */
-	config->dv_esw_en = 0;
-	if (!config->dv_flow_en) {
-		DRV_LOG(ERR, "Windows flow mode must be DV flow enable.");
-		err = ENOTSUP;
-		goto error;
-	}
-	if (!config->dv_esw_en &&
-	    config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
-		DRV_LOG(WARNING,
-			"Metadata mode %u is not supported (no E-Switch).",
-			config->dv_xmeta_en);
-		config->dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
-	}
-	/* Check sibling device configurations. */
-	err = mlx5_dev_check_sibling_config(sh, config, dpdk_dev);
-	if (err)
-		goto error;
 	/* Initialize the shutdown event in mlx5_dev_spawn to
 	 * support mlx5_is_removed for Windows.
 	 */
@@ -319,29 +528,6 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		goto error;
 	}
 	DRV_LOG(DEBUG, "MPW isn't supported");
-	config->swp = sh->device_attr.sw_parsing_offloads &
-		(MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
-		 MLX5_SW_PARSING_TSO_CAP);
-	config->ind_table_max_size =
-		sh->device_attr.max_rwq_indirection_table_size;
-	config->tunnel_en = sh->device_attr.tunnel_offloads_caps &
-		(MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
-		 MLX5_TUNNELED_OFFLOADS_GRE_CAP |
-		 MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
-	if (config->tunnel_en) {
-		DRV_LOG(DEBUG, "tunnel offloading is supported for %s%s%s",
-		config->tunnel_en &
-		MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
-		config->tunnel_en &
-		MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
-		config->tunnel_en &
-		MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : ""
-		);
-	} else {
-		DRV_LOG(DEBUG, "tunnel offloading is not supported");
-	}
-	DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported");
-	config->mpls_en = 0;
 	/* Allocate private eth device data. */
 	priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
 			   sizeof(*priv),
@@ -392,49 +578,14 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		}
 		own_domain_id = 1;
 	}
-	DRV_LOG(DEBUG, "counters are not supported");
-	config->ind_table_max_size =
-		sh->device_attr.max_rwq_indirection_table_size;
-	/*
-	 * Remove this check once DPDK supports larger/variable
-	 * indirection tables.
-	 */
-	if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
-		config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
-	DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
-		config->ind_table_max_size);
-	if (config->hw_padding) {
-		DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
-		config->hw_padding = 0;
-	}
-	config->tso = (sh->device_attr.max_tso > 0);
-	if (config->tso)
-		config->tso_max_payload_sz = sh->device_attr.max_tso;
-	DRV_LOG(DEBUG, "%sMPS is %s.",
-		config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
-		config->mps == MLX5_MPW ? "legacy " : "",
-		config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
-	if (config->cqe_comp) {
-		DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
-		config->cqe_comp = 0;
-	}
-	if (sh->cdev->config.devx) {
-		hca_attr = &sh->cdev->config.hca_attr;
-		config->hw_csum = hca_attr->csum_cap;
-		DRV_LOG(DEBUG, "checksum offloading is %ssupported",
-			(config->hw_csum ? "" : "not "));
-		config->hw_vlan_strip = hca_attr->vlan_cap;
-		DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
-			(config->hw_vlan_strip ? "" : "not "));
-		config->hw_fcs_strip = hca_attr->scatter_fcs;
-		mlx5_rt_timestamp_config(sh, config, hca_attr);
-	}
-	if (config->mprq.enabled) {
-		DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
-		config->mprq.enabled = 0;
+	/* Process parameters and store port configuration on priv structure. */
+	err = mlx5_os_port_args_config(priv, dpdk_dev->devargs, &priv->config);
+	if (err) {
+		err = rte_errno;
+		DRV_LOG(ERR, "Failed to process port configure: %s",
+			strerror(rte_errno));
+		goto error;
 	}
-	if (config->max_dump_files_num == 0)
-		config->max_dump_files_num = 128;
 	eth_dev = rte_eth_dev_allocate(name);
 	if (eth_dev == NULL) {
 		DRV_LOG(ERR, "can not allocate rte ethdev");
@@ -529,13 +680,9 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	 * Verbs context returned by ibv_open_device().
 	 */
 	mlx5_link_update(eth_dev, 0);
-	/* Detect minimal data bytes to inline. */
-	mlx5_set_min_inline(spawn, config);
-	/* Store device configuration on private structure. */
-	priv->config = *config;
 	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
-		icfg[i].release_mem_en = !!config->reclaim_mode;
-		if (config->reclaim_mode)
+		icfg[i].release_mem_en = !!sh->config.reclaim_mode;
+		if (sh->config.reclaim_mode)
 			icfg[i].per_core_cache = 0;
 		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
 		if (!priv->flows[i])
@@ -543,7 +690,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	}
 	/* Create context for virtual machine VLAN workaround. */
 	priv->vmwa_context = NULL;
-	if (config->dv_flow_en) {
+	if (sh->config.dv_flow_en) {
 		err = mlx5_alloc_shared_dr(priv);
 		if (err)
 			goto error;
@@ -551,11 +698,11 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	/* No supported flow priority number detection. */
 	priv->sh->flow_max_priority = -1;
 	mlx5_set_metadata_mask(eth_dev);
-	if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+	if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
 	    !priv->sh->dv_regc0_mask) {
 		DRV_LOG(ERR, "metadata mode %u is not supported "
 			     "(no metadata reg_c[0] is available).",
-			     priv->config.dv_xmeta_en);
+			     sh->config.dv_xmeta_en);
 			err = ENOTSUP;
 			goto error;
 	}
@@ -575,10 +722,10 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		DRV_LOG(DEBUG,
 			"port %u extensive metadata register is not supported.",
 			eth_dev->data->port_id);
-		if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+		if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
 			DRV_LOG(ERR, "metadata mode %u is not supported "
 				     "(no metadata registers available).",
-				     priv->config.dv_xmeta_en);
+				     sh->config.dv_xmeta_en);
 			err = ENOTSUP;
 			goto error;
 		}
@@ -835,20 +982,6 @@  mlx5_os_net_probe(struct mlx5_common_device *cdev)
 			.name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK,
 		},
 	};
-	struct mlx5_dev_config dev_config = {
-		.rx_vec_en = 1,
-		.txq_inline_max = MLX5_ARG_UNSET,
-		.txq_inline_min = MLX5_ARG_UNSET,
-		.txq_inline_mpw = MLX5_ARG_UNSET,
-		.txqs_inline = MLX5_ARG_UNSET,
-		.mprq = {
-			.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
-			.min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
-		},
-		.dv_flow_en = 1,
-		.log_hp_size = MLX5_ARG_UNSET,
-		.vf = mlx5_dev_is_vf_pci(pci_dev),
-	};
 	int ret;
 	uint32_t restore;
 
@@ -862,7 +995,13 @@  mlx5_os_net_probe(struct mlx5_common_device *cdev)
 			strerror(rte_errno));
 		return -rte_errno;
 	}
-	spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn, &dev_config);
+	ret = mlx5_probe_again_args_validate(cdev);
+	if (ret) {
+		DRV_LOG(ERR, "Probe again parameters are not compatible : %s",
+			strerror(rte_errno));
+		return -rte_errno;
+	}
+	spawn.eth_dev = mlx5_dev_spawn(cdev->dev, &spawn);
 	if (!spawn.eth_dev)
 		return -rte_errno;
 	restore = spawn.eth_dev->data->dev_flags;