[16/20] net/mlx5: add share device context config structure

Message ID 20220127153950.812953-17-michaelba@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Raslan Darawsheh
Headers
Series mlx5: refactor devargs management |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Michael Baum Jan. 27, 2022, 3:39 p.m. UTC
  Add configuration structure for shared device context. This structure
contains all configurations coming from devargs which oriented to
device. It is a field of shared device context (SH) structure, and is
updated once in mlx5_alloc_shared_dev_ctx() function.
This structure cannot be changed when probing again, so add function to
prevent it. The mlx5_probe_again_args_validate() function creates a
temporary IB context configure structure according to new devargs
attached in probing again, then checks the match between the temporary
structure and the existing IB context configure structure.

Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c   |  95 ++----
 drivers/net/mlx5/mlx5.c            | 453 +++++++++++++++++++++--------
 drivers/net/mlx5/mlx5.h            |  43 +--
 drivers/net/mlx5/mlx5_ethdev.c     |   3 +-
 drivers/net/mlx5/mlx5_flow.c       |  30 +-
 drivers/net/mlx5/mlx5_flow.h       |   2 +-
 drivers/net/mlx5/mlx5_flow_dv.c    |  45 +--
 drivers/net/mlx5/mlx5_flow_meter.c |  10 +-
 drivers/net/mlx5/mlx5_rxq.c        |   7 +-
 drivers/net/mlx5/mlx5_trigger.c    |  10 +-
 drivers/net/mlx5/mlx5_txpp.c       |  12 +-
 drivers/net/mlx5/mlx5_txq.c        |   2 +-
 drivers/net/mlx5/windows/mlx5_os.c |  35 +--
 13 files changed, 457 insertions(+), 290 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 13db399b5e..50cc287e73 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -436,7 +436,7 @@  __mlx5_discovery_misc5_cap(struct mlx5_priv *priv)
 	dv_attr.priority = 3;
 #ifdef HAVE_MLX5DV_DR_ESWITCH
 	void *misc2_m;
-	if (priv->config.dv_esw_en) {
+	if (priv->sh->config.dv_esw_en) {
 		/* FDB enabled reg_c_0 */
 		dv_attr.match_criteria_enable |=
 				(1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT);
@@ -557,7 +557,7 @@  mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 	}
 	sh->tx_domain = domain;
 #ifdef HAVE_MLX5DV_DR_ESWITCH
-	if (priv->config.dv_esw_en) {
+	if (sh->config.dv_esw_en) {
 		domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
 						     MLX5DV_DR_DOMAIN_TYPE_FDB);
 		if (!domain) {
@@ -579,20 +579,20 @@  mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 		goto error;
 	}
 #endif
-	if (!sh->tunnel_hub && priv->config.dv_miss_info)
+	if (!sh->tunnel_hub && sh->config.dv_miss_info)
 		err = mlx5_alloc_tunnel_hub(sh);
 	if (err) {
 		DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err);
 		goto error;
 	}
-	if (priv->config.reclaim_mode == MLX5_RCM_AGGR) {
+	if (sh->config.reclaim_mode == MLX5_RCM_AGGR) {
 		mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1);
 		mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1);
 		if (sh->fdb_domain)
 			mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1);
 	}
 	sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
-	if (!priv->config.allow_duplicate_pattern) {
+	if (!sh->config.allow_duplicate_pattern) {
 #ifndef HAVE_MLX5_DR_ALLOW_DUPLICATE
 		DRV_LOG(WARNING, "Disallow duplicate pattern is not supported - maybe old rdma-core version?");
 #endif
@@ -859,7 +859,7 @@  mlx5_flow_drop_action_config(struct rte_eth_dev *dev __rte_unused)
 #ifdef HAVE_MLX5DV_DR
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	if (!priv->config.dv_flow_en || !priv->sh->dr_drop_action)
+	if (!priv->sh->config.dv_flow_en || !priv->sh->dr_drop_action)
 		return;
 	/**
 	 * DR supports drop action placeholder when it is supported;
@@ -1115,31 +1115,9 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 			strerror(rte_errno));
 		return NULL;
 	}
-	sh = mlx5_alloc_shared_dev_ctx(spawn, config);
+	sh = mlx5_alloc_shared_dev_ctx(spawn);
 	if (!sh)
 		return NULL;
-	/* Update final values for devargs before check sibling config. */
-	if (config->dv_flow_en && !sh->dev_cap.dv_flow_en) {
-		DRV_LOG(WARNING, "DV flow is not supported.");
-		config->dv_flow_en = 0;
-	}
-	if (config->dv_esw_en && !sh->dev_cap.dv_esw_en) {
-		DRV_LOG(WARNING, "E-Switch DV flow is not supported.");
-		config->dv_esw_en = 0;
-	}
-	if (config->dv_miss_info && config->dv_esw_en)
-		config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
-	if (!config->dv_esw_en &&
-	    config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
-		DRV_LOG(WARNING,
-			"Metadata mode %u is not supported (no E-Switch).",
-			config->dv_xmeta_en);
-		config->dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
-	}
-	/* Check sibling device configurations. */
-	err = mlx5_dev_check_sibling_config(sh, config, dpdk_dev);
-	if (err)
-		goto error;
 	nl_rdma = mlx5_nl_init(NETLINK_RDMA);
 	/* Check port status. */
 	if (spawn->phys_port <= UINT8_MAX) {
@@ -1314,7 +1292,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	if (sh->cdev->config.devx) {
 		sh->steering_format_version = hca_attr->steering_format_version;
 		/* LRO is supported only when DV flow enabled. */
-		if (sh->dev_cap.lro_supported && config->dv_flow_en)
+		if (sh->dev_cap.lro_supported && sh->config.dv_flow_en)
 			sh->dev_cap.lro_supported = 0;
 		if (sh->dev_cap.lro_supported) {
 			/*
@@ -1331,7 +1309,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	(defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) || \
 	 defined(HAVE_MLX5_DR_CREATE_ACTION_ASO))
 		if (hca_attr->qos.sup && hca_attr->qos.flow_meter_old &&
-		    config->dv_flow_en) {
+		    sh->config.dv_flow_en) {
 			uint8_t reg_c_mask = hca_attr->qos.flow_meter_reg_c_ids;
 			/*
 			 * Meter needs two REG_C's for color match and pre-sfx
@@ -1405,7 +1383,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO && HAVE_MLX5_DR_ACTION_ASO_CT */
 #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE)
 		if (hca_attr->log_max_ft_sampler_num > 0  &&
-		    config->dv_flow_en) {
+		    sh->config.dv_flow_en) {
 			priv->sampler_en = 1;
 			DRV_LOG(DEBUG, "Sampler enabled!");
 		} else {
@@ -1436,11 +1414,6 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	}
 	DRV_LOG(DEBUG, "Rx CQE compression is %ssupported",
 			config->cqe_comp ? "" : "not ");
-	if (config->tx_pp && !sh->dev_cap.txpp_en) {
-		DRV_LOG(ERR, "Packet pacing is not supported.");
-		err = ENODEV;
-		goto error;
-	}
 	if (config->std_delay_drop || config->hp_delay_drop) {
 		if (!hca_attr->rq_delay_drop) {
 			config->std_delay_drop = 0;
@@ -1450,17 +1423,6 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 				priv->dev_port);
 		}
 	}
-	/*
-	 * If HW has bug working with tunnel packet decapsulation and
-	 * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
-	 * bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
-	 */
-	if (sh->dev_cap.scatter_fcs_w_decap_disable && config->decap_en)
-		config->hw_fcs_strip = 0;
-	else
-		config->hw_fcs_strip = sh->dev_cap.hw_fcs_strip;
-	DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
-		(config->hw_fcs_strip ? "" : "not "));
 	if (config->mprq.enabled && !sh->dev_cap.mprq.enabled) {
 		DRV_LOG(WARNING, "Multi-Packet RQ isn't supported.");
 		config->mprq.enabled = 0;
@@ -1546,7 +1508,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	eth_dev->rx_queue_count = mlx5_rx_queue_count;
 	/* Register MAC address. */
 	claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
-	if (sh->dev_cap.vf && config->vf_nl_en)
+	if (sh->dev_cap.vf && sh->config.vf_nl_en)
 		mlx5_nl_mac_addr_sync(priv->nl_socket_route,
 				      mlx5_ifindex(eth_dev),
 				      eth_dev->data->mac_addrs,
@@ -1572,8 +1534,8 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	/* Store device configuration on private structure. */
 	priv->config = *config;
 	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
-		icfg[i].release_mem_en = !!config->reclaim_mode;
-		if (config->reclaim_mode)
+		icfg[i].release_mem_en = !!sh->config.reclaim_mode;
+		if (sh->config.reclaim_mode)
 			icfg[i].per_core_cache = 0;
 		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
 		if (!priv->flows[i])
@@ -1581,14 +1543,14 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	}
 	/* Create context for virtual machine VLAN workaround. */
 	priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
-	if (config->dv_flow_en) {
+	if (sh->config.dv_flow_en) {
 		err = mlx5_alloc_shared_dr(priv);
 		if (err)
 			goto error;
 		if (mlx5_flex_item_port_init(eth_dev) < 0)
 			goto error;
 	}
-	if (sh->cdev->config.devx && config->dv_flow_en &&
+	if (sh->cdev->config.devx && sh->config.dv_flow_en &&
 	    sh->dev_cap.dest_tir) {
 		priv->obj_ops = devx_obj_ops;
 		mlx5_queue_counter_id_prepare(eth_dev);
@@ -1604,7 +1566,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	} else {
 		priv->obj_ops = ibv_obj_ops;
 	}
-	if (config->tx_pp &&
+	if (sh->config.tx_pp &&
 	    priv->obj_ops.txq_obj_new != mlx5_txq_devx_obj_new) {
 		/*
 		 * HAVE_MLX5DV_DEVX_UAR_OFFSET is required to support
@@ -1635,11 +1597,11 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		goto error;
 	}
 	mlx5_set_metadata_mask(eth_dev);
-	if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+	if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
 	    !priv->sh->dv_regc0_mask) {
 		DRV_LOG(ERR, "metadata mode %u is not supported "
 			     "(no metadata reg_c[0] is available)",
-			     priv->config.dv_xmeta_en);
+			     sh->config.dv_xmeta_en);
 			err = ENOTSUP;
 			goto error;
 	}
@@ -1664,16 +1626,16 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		DRV_LOG(DEBUG,
 			"port %u extensive metadata register is not supported",
 			eth_dev->data->port_id);
-		if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+		if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
 			DRV_LOG(ERR, "metadata mode %u is not supported "
 				     "(no metadata registers available)",
-				     priv->config.dv_xmeta_en);
+				     sh->config.dv_xmeta_en);
 			err = ENOTSUP;
 			goto error;
 		}
 	}
-	if (priv->config.dv_flow_en &&
-	    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+	if (sh->config.dv_flow_en &&
+	    sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
 	    mlx5_flow_ext_mreg_supported(eth_dev) &&
 	    priv->sh->dv_regc0_mask) {
 		priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
@@ -1692,7 +1654,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	rte_spinlock_init(&priv->shared_act_sl);
 	mlx5_flow_counter_mode_config(eth_dev);
 	mlx5_flow_drop_action_config(eth_dev);
-	if (priv->config.dv_flow_en)
+	if (sh->config.dv_flow_en)
 		eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
 	return eth_dev;
 error:
@@ -1950,15 +1912,10 @@  mlx5_os_config_default(struct mlx5_dev_config *config)
 	config->txq_inline_min = MLX5_ARG_UNSET;
 	config->txq_inline_mpw = MLX5_ARG_UNSET;
 	config->txqs_inline = MLX5_ARG_UNSET;
-	config->vf_nl_en = 1;
 	config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
 	config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
 	config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
-	config->dv_esw_en = 1;
-	config->dv_flow_en = 1;
-	config->decap_en = 1;
 	config->log_hp_size = MLX5_ARG_UNSET;
-	config->allow_duplicate_pattern = 1;
 	config->std_delay_drop = 0;
 	config->hp_delay_drop = 0;
 }
@@ -2574,6 +2531,12 @@  mlx5_os_net_probe(struct mlx5_common_device *cdev)
 			strerror(rte_errno));
 		return -rte_errno;
 	}
+	ret = mlx5_probe_again_args_validate(cdev);
+	if (ret) {
+		DRV_LOG(ERR, "Probe again parameters are not compatible : %s",
+			strerror(rte_errno));
+		return -rte_errno;
+	}
 	if (mlx5_dev_is_pci(cdev->dev))
 		return mlx5_os_pci_probe(cdev);
 	else
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index bd23ce5afd..75ff11c357 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -533,7 +533,7 @@  mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
 	fallback = true;
 #else
 	fallback = false;
-	if (!sh->cdev->config.devx || !priv->config.dv_flow_en ||
+	if (!sh->cdev->config.devx || !sh->config.dv_flow_en ||
 	    !hca_attr->flow_counters_dump ||
 	    !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
 	    (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
@@ -836,12 +836,9 @@  mlx5_flow_aso_ct_mng_close(struct mlx5_dev_ctx_shared *sh)
  *
  * @param[in] sh
  *   Pointer to mlx5_dev_ctx_shared object.
- * @param[in] config
- *   Pointer to user dev config.
  */
 static void
-mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh,
-		       const struct mlx5_dev_config *config)
+mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh)
 {
 	uint8_t i;
 	struct mlx5_indexed_pool_config cfg;
@@ -856,12 +853,12 @@  mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh,
 		 * according to PCI function flow configuration.
 		 */
 		case MLX5_IPOOL_MLX5_FLOW:
-			cfg.size = config->dv_flow_en ?
+			cfg.size = sh->config.dv_flow_en ?
 				sizeof(struct mlx5_flow_handle) :
 				MLX5_FLOW_HANDLE_VERBS_SIZE;
 			break;
 		}
-		if (config->reclaim_mode) {
+		if (sh->config.reclaim_mode) {
 			cfg.release_mem_en = 1;
 			cfg.per_core_cache = 0;
 		} else {
@@ -1169,6 +1166,191 @@  mlx5_setup_tis(struct mlx5_dev_ctx_shared *sh)
 	return 0;
 }
 
+/**
+ * Verify and store value for share device argument.
+ *
+ * @param[in] key
+ *   Key argument to verify.
+ * @param[in] val
+ *   Value associated with key.
+ * @param opaque
+ *   User data.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_dev_args_check_handler(const char *key, const char *val, void *opaque)
+{
+	struct mlx5_sh_config *config = opaque;
+	signed long tmp;
+
+	errno = 0;
+	tmp = strtol(val, NULL, 0);
+	if (errno) {
+		rte_errno = errno;
+		DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
+		return -rte_errno;
+	}
+	if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) {
+		/* Negative values are acceptable for some keys only. */
+		rte_errno = EINVAL;
+		DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val);
+		return -rte_errno;
+	}
+	if (strcmp(MLX5_TX_PP, key) == 0) {
+		unsigned long mod = tmp >= 0 ? tmp : -tmp;
+
+		if (!mod) {
+			DRV_LOG(ERR, "Zero Tx packet pacing parameter.");
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+		config->tx_pp = tmp;
+	} else if (strcmp(MLX5_TX_SKEW, key) == 0) {
+		config->tx_skew = tmp;
+	} else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
+		config->l3_vxlan_en = !!tmp;
+	} else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
+		config->vf_nl_en = !!tmp;
+	} else if (strcmp(MLX5_DV_ESW_EN, key) == 0) {
+		config->dv_esw_en = !!tmp;
+	} else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
+		config->dv_flow_en = !!tmp;
+	} else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) {
+		if (tmp != MLX5_XMETA_MODE_LEGACY &&
+		    tmp != MLX5_XMETA_MODE_META16 &&
+		    tmp != MLX5_XMETA_MODE_META32 &&
+		    tmp != MLX5_XMETA_MODE_MISS_INFO) {
+			DRV_LOG(ERR, "Invalid extensive metadata parameter.");
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+		if (tmp != MLX5_XMETA_MODE_MISS_INFO)
+			config->dv_xmeta_en = tmp;
+		else
+			config->dv_miss_info = 1;
+	} else if (strcmp(MLX5_LACP_BY_USER, key) == 0) {
+		config->lacp_by_user = !!tmp;
+	} else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) {
+		if (tmp != MLX5_RCM_NONE &&
+		    tmp != MLX5_RCM_LIGHT &&
+		    tmp != MLX5_RCM_AGGR) {
+			DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val);
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+		config->reclaim_mode = tmp;
+	} else if (strcmp(MLX5_DECAP_EN, key) == 0) {
+		config->decap_en = !!tmp;
+	} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
+		config->allow_duplicate_pattern = !!tmp;
+	}
+	return 0;
+}
+
+/**
+ * Parse user device parameters and adjust them according to device
+ * capabilities.
+ *
+ * @param sh
+ *   Pointer to shared device context.
+ * @param devargs
+ *   Device arguments structure.
+ * @param config
+ *   Pointer to shared device configuration structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_shared_dev_ctx_args_config(struct mlx5_dev_ctx_shared *sh,
+				struct rte_devargs *devargs,
+				struct mlx5_sh_config *config)
+{
+	struct rte_kvargs *kvlist;
+	int ret = 0;
+
+	/* Default configuration. */
+	memset(config, 0, sizeof(*config));
+	config->vf_nl_en = 1;
+	config->dv_esw_en = 1;
+	config->dv_flow_en = 1;
+	config->decap_en = 1;
+	config->allow_duplicate_pattern = 1;
+	/* Parse device parameters. */
+	if (devargs != NULL) {
+		kvlist = rte_kvargs_parse(devargs->args, NULL);
+		if (kvlist == NULL) {
+			DRV_LOG(ERR,
+				"Failed to parse shared device arguments.");
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+		/* Process parameters. */
+		ret = rte_kvargs_process(kvlist, NULL,
+					 mlx5_dev_args_check_handler, config);
+		rte_kvargs_free(kvlist);
+		if (ret) {
+			DRV_LOG(ERR, "Failed to process device arguments: %s",
+				strerror(rte_errno));
+			return -rte_errno;
+		}
+	}
+	/* Adjust parameters according to device capabilities. */
+	if (config->dv_flow_en && !sh->dev_cap.dv_flow_en) {
+		DRV_LOG(WARNING, "DV flow is not supported.");
+		config->dv_flow_en = 0;
+	}
+	if (config->dv_esw_en && !sh->dev_cap.dv_esw_en) {
+		DRV_LOG(DEBUG, "E-Switch DV flow is not supported.");
+		config->dv_esw_en = 0;
+	}
+	if (config->dv_miss_info && config->dv_esw_en)
+		config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
+	if (!config->dv_esw_en &&
+	    config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+		DRV_LOG(WARNING,
+			"Metadata mode %u is not supported (no E-Switch).",
+			config->dv_xmeta_en);
+		config->dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
+	}
+	if (config->tx_pp && !sh->dev_cap.txpp_en) {
+		DRV_LOG(ERR, "Packet pacing is not supported.");
+		rte_errno = ENODEV;
+		return -rte_errno;
+	}
+	if (!config->tx_pp && config->tx_skew) {
+		DRV_LOG(WARNING,
+			"\"tx_skew\" doesn't affect without \"tx_pp\".");
+	}
+	/*
+	 * If HW has bug working with tunnel packet decapsulation and scatter
+	 * FCS, and decapsulation is needed, clear the hw_fcs_strip bit.
+	 * Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
+	 */
+	if (sh->dev_cap.scatter_fcs_w_decap_disable && sh->config.decap_en)
+		config->hw_fcs_strip = 0;
+	else
+		config->hw_fcs_strip = sh->dev_cap.hw_fcs_strip;
+	DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
+		(config->hw_fcs_strip ? "" : "not "));
+	DRV_LOG(DEBUG, "\"tx_pp\" is %d.", config->tx_pp);
+	DRV_LOG(DEBUG, "\"tx_skew\" is %d.", config->tx_skew);
+	DRV_LOG(DEBUG, "\"reclaim_mode\" is %u.", config->reclaim_mode);
+	DRV_LOG(DEBUG, "\"dv_esw_en\" is %u.", config->dv_esw_en);
+	DRV_LOG(DEBUG, "\"dv_flow_en\" is %u.", config->dv_flow_en);
+	DRV_LOG(DEBUG, "\"dv_xmeta_en\" is %u.", config->dv_xmeta_en);
+	DRV_LOG(DEBUG, "\"dv_miss_info\" is %u.", config->dv_miss_info);
+	DRV_LOG(DEBUG, "\"l3_vxlan_en\" is %u.", config->l3_vxlan_en);
+	DRV_LOG(DEBUG, "\"vf_nl_en\" is %u.", config->vf_nl_en);
+	DRV_LOG(DEBUG, "\"lacp_by_user\" is %u.", config->lacp_by_user);
+	DRV_LOG(DEBUG, "\"decap_en\" is %u.", config->decap_en);
+	DRV_LOG(DEBUG, "\"allow_duplicate_pattern\" is %u.",
+		config->allow_duplicate_pattern);
+	return 0;
+}
+
 /**
  * Configure realtime timestamp format.
  *
@@ -1216,16 +1398,13 @@  mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
  *
  * @param[in] spawn
  *   Pointer to the device attributes (name, port, etc).
- * @param[in] config
- *   Pointer to device configuration structure.
  *
  * @return
  *   Pointer to mlx5_dev_ctx_shared object on success,
  *   otherwise NULL and rte_errno is set.
  */
 struct mlx5_dev_ctx_shared *
-mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
-			  const struct mlx5_dev_config *config)
+mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn)
 {
 	struct mlx5_dev_ctx_shared *sh;
 	int err = 0;
@@ -1264,9 +1443,15 @@  mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 		DRV_LOG(ERR, "Fail to configure device capabilities.");
 		goto error;
 	}
+	err = mlx5_shared_dev_ctx_args_config(sh, sh->cdev->dev->devargs,
+					      &sh->config);
+	if (err) {
+		DRV_LOG(ERR, "Failed to process device configure: %s",
+			strerror(rte_errno));
+		goto error;
+	}
 	sh->refcnt = 1;
 	sh->max_port = spawn->max_port;
-	sh->reclaim_mode = config->reclaim_mode;
 	strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->cdev->ctx),
 		sizeof(sh->ibdev_name) - 1);
 	strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx),
@@ -1310,7 +1495,7 @@  mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 	}
 	mlx5_flow_aging_init(sh);
 	mlx5_flow_counters_mng_init(sh);
-	mlx5_flow_ipool_create(sh, config);
+	mlx5_flow_ipool_create(sh);
 	/* Add context to the global device list. */
 	LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
 	rte_spinlock_init(&sh->geneve_tlv_opt_sl);
@@ -1919,14 +2104,18 @@  static int
 mlx5_args_check(const char *key, const char *val, void *opaque)
 {
 	struct mlx5_dev_config *config = opaque;
-	unsigned long mod;
 	signed long tmp;
 
 	/* No-op, port representors are processed in mlx5_dev_spawn(). */
 	if (!strcmp(MLX5_DRIVER_KEY, key) || !strcmp(MLX5_REPRESENTOR, key) ||
 	    !strcmp(MLX5_SYS_MEM_EN, key) || !strcmp(MLX5_TX_DB_NC, key) ||
-	    !strcmp(MLX5_MR_MEMPOOL_REG_EN, key) ||
-	    !strcmp(MLX5_MR_EXT_MEMSEG_EN, key))
+	    !strcmp(MLX5_MR_MEMPOOL_REG_EN, key) || !strcmp(MLX5_TX_PP, key) ||
+	    !strcmp(MLX5_MR_EXT_MEMSEG_EN, key) || !strcmp(MLX5_TX_SKEW, key) ||
+	    !strcmp(MLX5_RECLAIM_MEM, key) || !strcmp(MLX5_DECAP_EN, key) ||
+	    !strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) ||
+	    !strcmp(MLX5_L3_VXLAN_EN, key) || !strcmp(MLX5_VF_NL_EN, key) ||
+	    !strcmp(MLX5_DV_ESW_EN, key) || !strcmp(MLX5_DV_FLOW_EN, key) ||
+	    !strcmp(MLX5_DV_XMETA_EN, key) || !strcmp(MLX5_LACP_BY_USER, key))
 		return 0;
 	errno = 0;
 	tmp = strtol(val, NULL, 0);
@@ -1935,13 +2124,12 @@  mlx5_args_check(const char *key, const char *val, void *opaque)
 		DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
 		return -rte_errno;
 	}
-	if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) {
+	if (tmp < 0) {
 		/* Negative values are acceptable for some keys only. */
 		rte_errno = EINVAL;
 		DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val);
 		return -rte_errno;
 	}
-	mod = tmp >= 0 ? tmp : -tmp;
 	if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
 		if (tmp > MLX5_CQE_RESP_FORMAT_L34H_STRIDX) {
 			DRV_LOG(ERR, "invalid CQE compression "
@@ -1987,41 +2175,8 @@  mlx5_args_check(const char *key, const char *val, void *opaque)
 		config->txq_inline_mpw = tmp;
 	} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
 		DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
-	} else if (strcmp(MLX5_TX_PP, key) == 0) {
-		if (!mod) {
-			DRV_LOG(ERR, "Zero Tx packet pacing parameter");
-			rte_errno = EINVAL;
-			return -rte_errno;
-		}
-		config->tx_pp = tmp;
-	} else if (strcmp(MLX5_TX_SKEW, key) == 0) {
-		config->tx_skew = tmp;
 	} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
 		config->rx_vec_en = !!tmp;
-	} else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
-		config->l3_vxlan_en = !!tmp;
-	} else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
-		config->vf_nl_en = !!tmp;
-	} else if (strcmp(MLX5_DV_ESW_EN, key) == 0) {
-		config->dv_esw_en = !!tmp;
-	} else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) {
-		config->dv_flow_en = !!tmp;
-	} else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) {
-		if (tmp != MLX5_XMETA_MODE_LEGACY &&
-		    tmp != MLX5_XMETA_MODE_META16 &&
-		    tmp != MLX5_XMETA_MODE_META32 &&
-		    tmp != MLX5_XMETA_MODE_MISS_INFO) {
-			DRV_LOG(ERR, "invalid extensive "
-				     "metadata parameter");
-			rte_errno = EINVAL;
-			return -rte_errno;
-		}
-		if (tmp != MLX5_XMETA_MODE_MISS_INFO)
-			config->dv_xmeta_en = tmp;
-		else
-			config->dv_miss_info = 1;
-	} else if (strcmp(MLX5_LACP_BY_USER, key) == 0) {
-		config->lacp_by_user = !!tmp;
 	} else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
 		config->max_dump_files_num = tmp;
 	} else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) {
@@ -2030,19 +2185,6 @@  mlx5_args_check(const char *key, const char *val, void *opaque)
 		DRV_LOG(DEBUG, "class argument is %s.", val);
 	} else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) {
 		config->log_hp_size = tmp;
-	} else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) {
-		if (tmp != MLX5_RCM_NONE &&
-		    tmp != MLX5_RCM_LIGHT &&
-		    tmp != MLX5_RCM_AGGR) {
-			DRV_LOG(ERR, "Unrecognized %s: \"%s\"", key, val);
-			rte_errno = EINVAL;
-			return -rte_errno;
-		}
-		config->reclaim_mode = tmp;
-	} else if (strcmp(MLX5_DECAP_EN, key) == 0) {
-		config->decap_en = !!tmp;
-	} else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) {
-		config->allow_duplicate_pattern = !!tmp;
 	} else if (strcmp(MLX5_DELAY_DROP, key) == 0) {
 		config->std_delay_drop = !!(tmp & MLX5_DELAY_DROP_STANDARD);
 		config->hp_delay_drop = !!(tmp & MLX5_DELAY_DROP_HAIRPIN);
@@ -2089,6 +2231,130 @@  mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
 	return ret;
 }
 
+/**
+ * Check sibling device configurations when probing again.
+ *
+ * Sibling devices sharing infiniband device context should have compatible
+ * configurations. This regards representors and bonding device.
+ *
+ * @param cdev
+ *   Pointer to mlx5 device structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_probe_again_args_validate(struct mlx5_common_device *cdev)
+{
+	struct mlx5_dev_ctx_shared *sh = NULL;
+	struct mlx5_sh_config *config;
+	int ret;
+
+	/* Secondary process should not handle devargs. */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+	pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
+	/* Search for IB context by common device pointer. */
+	LIST_FOREACH(sh, &mlx5_dev_ctx_list, next)
+		if (sh->cdev == cdev)
+			break;
+	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+	/* There is sh for this device -> it isn't probe again. */
+	if (sh == NULL)
+		return 0;
+	config = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
+			     sizeof(struct mlx5_sh_config),
+			     RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+	if (config == NULL) {
+		rte_errno = -ENOMEM;
+		return -rte_errno;
+	}
+	/*
+	 * Creates a temporary IB context configure structure according to new
+	 * devargs attached in probing again.
+	 */
+	ret = mlx5_shared_dev_ctx_args_config(sh, sh->cdev->dev->devargs,
+					      config);
+	if (ret) {
+		DRV_LOG(ERR, "Failed to process device configure: %s",
+			strerror(rte_errno));
+		mlx5_free(config);
+		return ret;
+	}
+	/*
+	 * Checks the match between the temporary structure and the existing
+	 * IB context structure.
+	 */
+	if (sh->config.dv_flow_en ^ config->dv_flow_en) {
+		DRV_LOG(ERR, "\"dv_flow_en\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if ((sh->config.dv_xmeta_en ^ config->dv_xmeta_en) ||
+	    (sh->config.dv_miss_info ^ config->dv_miss_info)) {
+		DRV_LOG(ERR, "\"dv_xmeta_en\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.dv_esw_en ^ config->dv_esw_en) {
+		DRV_LOG(ERR, "\"dv_esw_en\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.reclaim_mode ^ config->reclaim_mode) {
+		DRV_LOG(ERR, "\"reclaim_mode\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.allow_duplicate_pattern ^
+	    config->allow_duplicate_pattern) {
+		DRV_LOG(ERR, "\"allow_duplicate_pattern\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.l3_vxlan_en ^ config->l3_vxlan_en) {
+		DRV_LOG(ERR, "\"l3_vxlan_en\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.decap_en ^ config->decap_en) {
+		DRV_LOG(ERR, "\"decap_en\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.lacp_by_user ^ config->lacp_by_user) {
+		DRV_LOG(ERR, "\"lacp_by_user\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.tx_pp ^ config->tx_pp) {
+		DRV_LOG(ERR, "\"tx_pp\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	if (sh->config.tx_skew ^ config->tx_skew) {
+		DRV_LOG(ERR, "\"tx_skew\" "
+			"configuration mismatch for shared %s context.",
+			sh->ibdev_name);
+		goto error;
+	}
+	mlx5_free(config);
+	return 0;
+error:
+	mlx5_free(config);
+	rte_errno = EINVAL;
+	return -rte_errno;
+}
+
 /**
  * Configures the minimal amount of data to inline into WQE
  * while sending packets.
@@ -2231,7 +2497,7 @@  mlx5_set_metadata_mask(struct rte_eth_dev *dev)
 	uint32_t meta, mark, reg_c0;
 
 	reg_c0 = ~priv->vport_meta_mask;
-	switch (priv->config.dv_xmeta_en) {
+	switch (sh->config.dv_xmeta_en) {
 	case MLX5_XMETA_MODE_LEGACY:
 		meta = UINT32_MAX;
 		mark = MLX5_FLOW_MARK_MASK;
@@ -2265,7 +2531,7 @@  mlx5_set_metadata_mask(struct rte_eth_dev *dev)
 				 sh->dv_meta_mask, reg_c0);
 	else
 		sh->dv_regc0_mask = reg_c0;
-	DRV_LOG(DEBUG, "metadata mode %u", priv->config.dv_xmeta_en);
+	DRV_LOG(DEBUG, "metadata mode %u", sh->config.dv_xmeta_en);
 	DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask);
 	DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask);
 	DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask);
@@ -2291,61 +2557,6 @@  rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n)
 	return RTE_DIM(dynf_names);
 }
 
-/**
- * Check sibling device configurations.
- *
- * Sibling devices sharing the Infiniband device context should have compatible
- * configurations. This regards representors and bonding device.
- *
- * @param sh
- *   Shared device context.
- * @param config
- *   Configuration of the device is going to be created.
- * @param dpdk_dev
- *   Backing DPDK device.
- *
- * @return
- *   0 on success, EINVAL otherwise
- */
-int
-mlx5_dev_check_sibling_config(struct mlx5_dev_ctx_shared *sh,
-			      struct mlx5_dev_config *config,
-			      struct rte_device *dpdk_dev)
-{
-	struct mlx5_dev_config *sh_conf = NULL;
-	uint16_t port_id;
-
-	MLX5_ASSERT(sh);
-	/* Nothing to compare for the single/first device. */
-	if (sh->refcnt == 1)
-		return 0;
-	/* Find the device with shared context. */
-	MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
-		struct mlx5_priv *opriv =
-			rte_eth_devices[port_id].data->dev_private;
-
-		if (opriv && opriv->sh == sh) {
-			sh_conf = &opriv->config;
-			break;
-		}
-	}
-	if (!sh_conf)
-		return 0;
-	if (sh_conf->dv_flow_en ^ config->dv_flow_en) {
-		DRV_LOG(ERR, "\"dv_flow_en\" configuration mismatch"
-			     " for shared %s context", sh->ibdev_name);
-		rte_errno = EINVAL;
-		return rte_errno;
-	}
-	if (sh_conf->dv_xmeta_en ^ config->dv_xmeta_en) {
-		DRV_LOG(ERR, "\"dv_xmeta_en\" configuration mismatch"
-			     " for shared %s context", sh->ibdev_name);
-		rte_errno = EINVAL;
-		return rte_errno;
-	}
-	return 0;
-}
-
 /**
  * Look for the ethernet device belonging to mlx5 driver.
  *
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index bda09cf96e..5ca48ef68f 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -252,23 +252,10 @@  struct mlx5_stats_ctrl {
  */
 struct mlx5_dev_config {
 	unsigned int hw_vlan_insert:1; /* VLAN insertion in WQE is supported. */
-	unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
 	unsigned int hw_padding:1; /* End alignment padding is supported. */
 	unsigned int cqe_comp:1; /* CQE compression is enabled. */
 	unsigned int cqe_comp_fmt:3; /* CQE compression format. */
 	unsigned int rx_vec_en:1; /* Rx vector is enabled. */
-	unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */
-	unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */
-	unsigned int dv_esw_en:1; /* Enable E-Switch DV flow. */
-	unsigned int dv_flow_en:1; /* Enable DV flow. */
-	unsigned int dv_xmeta_en:2; /* Enable extensive flow metadata. */
-	unsigned int lacp_by_user:1;
-	/* Enable user to manage LACP traffic. */
-	unsigned int reclaim_mode:2; /* Memory reclaim mode. */
-	unsigned int decap_en:1; /* Whether decap will be used or not. */
-	unsigned int dv_miss_info:1; /* restore packet after partial hw miss */
-	unsigned int allow_duplicate_pattern:1;
-	/* Allow/Prevent the duplicate rules pattern. */
 	unsigned int std_delay_drop:1; /* Enable standard Rxq delay drop. */
 	unsigned int hp_delay_drop:1; /* Enable hairpin Rxq delay drop. */
 	struct {
@@ -288,8 +275,29 @@  struct mlx5_dev_config {
 	int txq_inline_min; /* Minimal amount of data bytes to inline. */
 	int txq_inline_max; /* Max packet size for inlining with SEND. */
 	int txq_inline_mpw; /* Max packet size for inlining with eMPW. */
+};
+
+/*
+ * Share context device configuration structure.
+ * User device parameters disabled features.
+ * This structure updated once for device in mlx5_alloc_shared_dev_ctx()
+ * function and cannot change even when probing again.
+ */
+struct mlx5_sh_config {
 	int tx_pp; /* Timestamp scheduling granularity in nanoseconds. */
 	int tx_skew; /* Tx scheduling skew between WQE and data on wire. */
+	uint32_t reclaim_mode:2; /* Memory reclaim mode. */
+	uint32_t dv_esw_en:1; /* Enable E-Switch DV flow. */
+	uint32_t dv_flow_en:1; /* Enable DV flow. */
+	uint32_t dv_xmeta_en:2; /* Enable extensive flow metadata. */
+	uint32_t dv_miss_info:1; /* Restore packet after partial hw miss. */
+	uint32_t l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */
+	uint32_t vf_nl_en:1; /* Enable Netlink requests in VF mode. */
+	uint32_t lacp_by_user:1; /* Enable user to manage LACP traffic. */
+	uint32_t decap_en:1; /* Whether decap will be used or not. */
+	uint32_t hw_fcs_strip:1; /* FCS stripping is supported. */
+	uint32_t allow_duplicate_pattern:1;
+	/* Allow/Prevent the duplicate rules pattern. */
 };
 
 
@@ -1144,7 +1152,6 @@  struct mlx5_dev_ctx_shared {
 	uint32_t ct_aso_en:1; /* Connection Tracking ASO is supported. */
 	uint32_t tunnel_header_0_1:1; /* tunnel_header_0_1 is supported. */
 	uint32_t misc5_cap:1; /* misc5 matcher parameter is supported. */
-	uint32_t reclaim_mode:1; /* Reclaim memory. */
 	uint32_t dr_drop_action_en:1; /* Use DR drop action. */
 	uint32_t drop_action_check_flag:1; /* Check Flag for drop action. */
 	uint32_t flow_priority_check_flag:1; /* Check Flag for flow priority. */
@@ -1156,6 +1163,7 @@  struct mlx5_dev_ctx_shared {
 	char ibdev_name[MLX5_FS_NAME_MAX]; /* SYSFS dev name. */
 	char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
 	struct mlx5_dev_cap dev_cap; /* Device capabilities. */
+	struct mlx5_sh_config config; /* Device configuration. */
 	int numa_node; /* Numa node of backing physical device. */
 	/* Packet pacing related structure. */
 	struct mlx5_dev_txpp txpp;
@@ -1511,8 +1519,7 @@  int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs);
 void mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
 			      struct mlx5_hca_attr *hca_attr);
 struct mlx5_dev_ctx_shared *
-mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
-			   const struct mlx5_dev_config *config);
+mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn);
 void mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh);
 int mlx5_dev_ctx_shared_mempool_subscribe(struct rte_eth_dev *dev);
 void mlx5_free_table_hash_list(struct mlx5_priv *priv);
@@ -1520,9 +1527,7 @@  int mlx5_alloc_table_hash_list(struct mlx5_priv *priv);
 void mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
 			 struct mlx5_dev_config *config);
 void mlx5_set_metadata_mask(struct rte_eth_dev *dev);
-int mlx5_dev_check_sibling_config(struct mlx5_dev_ctx_shared *sh,
-				  struct mlx5_dev_config *config,
-				  struct rte_device *dpdk_dev);
+int mlx5_probe_again_args_validate(struct mlx5_common_device *cdev);
 bool mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev);
 int mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev);
 void mlx5_flow_counter_mode_config(struct rte_eth_dev *dev);
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index b7fe781d3a..9e478db8df 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -720,10 +720,9 @@  int
 mlx5_hairpin_cap_get(struct rte_eth_dev *dev, struct rte_eth_hairpin_cap *cap)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
 
 	if (!priv->sh->cdev->config.devx || !priv->sh->dev_cap.dest_tir ||
-	    !config->dv_flow_en) {
+	    !priv->sh->config.dv_flow_en) {
 		rte_errno = ENOTSUP;
 		return -rte_errno;
 	}
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8bb9a72ba5..f8fc4621c9 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -901,7 +901,7 @@  mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
 		     struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	enum modify_reg start_reg;
 	bool skip_mtr_reg = false;
 
@@ -1994,7 +1994,7 @@  mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
 					  "egress is not supported");
-	if (attributes->transfer && !priv->config.dv_esw_en)
+	if (attributes->transfer && !priv->sh->config.dv_esw_en)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
 					  NULL, "transfer is not supported");
@@ -2711,7 +2711,7 @@  mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
 		uint8_t vni[4];
 	} id = { .vlan_id = 0, };
 
-	if (!priv->config.l3_vxlan_en)
+	if (!priv->sh->config.l3_vxlan_en)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
 					  "L3 VXLAN is not enabled by device"
@@ -3429,11 +3429,11 @@  flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
 	if (type != MLX5_FLOW_TYPE_MAX)
 		return type;
 	/* If no OS specific type - continue with DV/VERBS selection */
-	if (attr->transfer && priv->config.dv_esw_en)
+	if (attr->transfer && priv->sh->config.dv_esw_en)
 		type = MLX5_FLOW_TYPE_DV;
 	if (!attr->transfer)
-		type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
-						 MLX5_FLOW_TYPE_VERBS;
+		type = priv->sh->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
+						     MLX5_FLOW_TYPE_VERBS;
 	return type;
 }
 
@@ -4105,7 +4105,7 @@  static bool flow_check_modify_action_type(struct rte_eth_dev *dev,
 		return true;
 	case RTE_FLOW_ACTION_TYPE_FLAG:
 	case RTE_FLOW_ACTION_TYPE_MARK:
-		if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
+		if (priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
 			return true;
 		else
 			return false;
@@ -4544,8 +4544,8 @@  flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
 	uint32_t mark_id;
 
 	/* Check whether extensive metadata feature is engaged. */
-	if (!priv->config.dv_flow_en ||
-	    priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
+	if (!priv->sh->config.dv_flow_en ||
+	    priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
 	    !mlx5_flow_ext_mreg_supported(dev) ||
 	    !priv->sh->dv_regc0_mask)
 		return 0;
@@ -4604,7 +4604,7 @@  flow_mreg_update_copy_table(struct rte_eth_dev *dev,
 			    struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	struct mlx5_flow_mreg_copy_resource *mcp_res;
 	const struct rte_flow_action_mark *mark;
 
@@ -5740,7 +5740,7 @@  flow_create_split_metadata(struct rte_eth_dev *dev,
 			   struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	const struct rte_flow_action *qrss = NULL;
 	struct rte_flow_action *ext_actions = NULL;
 	struct mlx5_flow *dev_flow = NULL;
@@ -8249,7 +8249,7 @@  mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
 		struct rte_flow *flow;
 		struct rte_flow_error error;
 
-		if (!priv->config.dv_flow_en)
+		if (!priv->sh->config.dv_flow_en)
 			break;
 		/* Create internal flow, validation skips copy action. */
 		flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
@@ -8563,7 +8563,7 @@  mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
 	struct mlx5_flow_handle *dh;
 	struct rte_flow *flow;
 
-	if (!priv->config.dv_flow_en) {
+	if (!sh->config.dv_flow_en) {
 		if (fputs("device dv flow disabled\n", file) <= 0)
 			return -errno;
 		return -ENOTSUP;
@@ -9547,7 +9547,7 @@  mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	if (!priv->config.dv_flow_en)
+	if (!priv->sh->config.dv_flow_en)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
 					  "flow DV interface is off");
@@ -9966,7 +9966,7 @@  mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
 	type = mlx5_flow_os_get_type();
 	if (type == MLX5_FLOW_TYPE_MAX) {
 		type = MLX5_FLOW_TYPE_VERBS;
-		if (priv->sh->cdev->config.devx && priv->config.dv_flow_en)
+		if (priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en)
 			type = MLX5_FLOW_TYPE_DV;
 	}
 	fops = flow_get_drv_ops(type);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 7fec79afb3..583e8b7321 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -963,7 +963,7 @@  is_tunnel_offload_active(const struct rte_eth_dev *dev)
 {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	const struct mlx5_priv *priv = dev->data->dev_private;
-	return !!priv->config.dv_miss_info;
+	return !!priv->sh->config.dv_miss_info;
 #else
 	RTE_SET_USED(dev);
 	return false;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 6a5ac01c2a..c30cb4c203 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -1146,7 +1146,8 @@  flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
 		uint32_t reg_c0 = priv->sh->dv_regc0_mask;
 
 		MLX5_ASSERT(reg_c0);
-		MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
+		MLX5_ASSERT(priv->sh->config.dv_xmeta_en !=
+			    MLX5_XMETA_MODE_LEGACY);
 		if (conf->dst == REG_C_0) {
 			/* Copy to reg_c[0], within mask only. */
 			reg_dst.offset = rte_bsf32(reg_c0);
@@ -1917,7 +1918,7 @@  flow_dv_validate_item_mark(struct rte_eth_dev *dev,
 			   struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	const struct rte_flow_item_mark *spec = item->spec;
 	const struct rte_flow_item_mark *mask = item->mask;
 	const struct rte_flow_item_mark nic_mask = {
@@ -1991,7 +1992,7 @@  flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
 			   struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	const struct rte_flow_item_meta *spec = item->spec;
 	const struct rte_flow_item_meta *mask = item->mask;
 	struct rte_flow_item_meta nic_mask = {
@@ -3041,7 +3042,7 @@  flow_dv_validate_action_flag(struct rte_eth_dev *dev,
 			     struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	int ret;
 
 	/* Fall back if no extended metadata register support. */
@@ -3100,7 +3101,7 @@  flow_dv_validate_action_mark(struct rte_eth_dev *dev,
 			     struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	const struct rte_flow_action_mark *mark = action->conf;
 	int ret;
 
@@ -3174,7 +3175,7 @@  flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
 				 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	const struct rte_flow_action_set_meta *conf;
 	uint32_t nic_mask = UINT32_MAX;
 	int reg;
@@ -3386,7 +3387,7 @@  flow_dv_validate_action_decap(struct rte_eth_dev *dev,
 	const struct mlx5_priv *priv = dev->data->dev_private;
 
 	if (priv->sh->cdev->config.hca_attr.scatter_fcs_w_decap_disable &&
-	    !priv->config.decap_en)
+	    !priv->sh->config.decap_en)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "decap is not enabled");
@@ -4811,7 +4812,7 @@  flow_dv_validate_action_modify_field(struct rte_eth_dev *dev,
 {
 	int ret = 0;
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
+	struct mlx5_sh_config *config = &priv->sh->config;
 	const struct rte_flow_action_modify_field *action_modify_field =
 		action->conf;
 	uint32_t dst_width = mlx5_flow_item_field_width(dev,
@@ -5423,8 +5424,9 @@  flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
 		       .grow_trunk = 3,
 		       .grow_shift = 2,
 		       .need_lock = 1,
-		       .release_mem_en = !!sh->reclaim_mode,
-		       .per_core_cache = sh->reclaim_mode ? 0 : (1 << 16),
+		       .release_mem_en = !!sh->config.reclaim_mode,
+		       .per_core_cache =
+				       sh->config.reclaim_mode ? 0 : (1 << 16),
 		       .malloc = mlx5_malloc,
 		       .free = mlx5_free,
 		       .type = "mlx5_modify_action_resource",
@@ -5571,7 +5573,7 @@  flow_dv_validate_action_sample(uint64_t *action_flags,
 			       struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *dev_conf = &priv->config;
+	struct mlx5_sh_config *dev_conf = &priv->sh->config;
 	const struct rte_flow_action_sample *sample = action->conf;
 	const struct rte_flow_action *act;
 	uint64_t sub_action_flags = 0;
@@ -6595,7 +6597,7 @@  flow_dv_validate_attributes(struct rte_eth_dev *dev,
 					  NULL,
 					  "priority out of range");
 	if (attributes->transfer) {
-		if (!priv->config.dv_esw_en)
+		if (!priv->sh->config.dv_esw_en)
 			return rte_flow_error_set
 				(error, ENOTSUP,
 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -6880,7 +6882,7 @@  flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 		},
 	};
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *dev_conf = &priv->config;
+	struct mlx5_sh_config *dev_conf = &priv->sh->config;
 	uint16_t queue_index = 0xFFFF;
 	const struct rte_flow_item_vlan *vlan_m = NULL;
 	uint32_t rw_act_num = 0;
@@ -6904,7 +6906,7 @@  flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 	tunnel = is_tunnel_offload_active(dev) ?
 		 mlx5_get_tof(items, actions, &tof_rule_type) : NULL;
 	if (tunnel) {
-		if (!priv->config.dv_flow_en)
+		if (!dev_conf->dv_flow_en)
 			return rte_flow_error_set
 				(error, ENOTSUP,
 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -12640,7 +12642,7 @@  flow_dv_translate(struct rte_eth_dev *dev,
 		  struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *dev_conf = &priv->config;
+	struct mlx5_sh_config *dev_conf = &priv->sh->config;
 	struct rte_flow *flow = dev_flow->flow;
 	struct mlx5_flow_handle *handle = dev_flow->handle;
 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
@@ -13994,7 +13996,7 @@  flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 				(error, errno,
 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 				NULL,
-				(!priv->config.allow_duplicate_pattern &&
+				(!priv->sh->config.allow_duplicate_pattern &&
 				errno == EEXIST) ?
 				"duplicating pattern is not allowed" :
 				"hardware refuses to create flow");
@@ -16064,7 +16066,7 @@  flow_dv_destroy_mtr_tbls(struct rte_eth_dev *dev,
 	struct mlx5_priv *priv = dev->data->dev_private;
 	int i;
 
-	if (!fm || !priv->config.dv_flow_en)
+	if (!fm || !priv->sh->config.dv_flow_en)
 		return;
 	for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
 		if (fm->drop_rule[i]) {
@@ -16670,7 +16672,8 @@  flow_dv_create_def_policy(struct rte_eth_dev *dev)
 
 	/* Non-termination policy table. */
 	for (i = 0; i < MLX5_MTR_DOMAIN_MAX; i++) {
-		if (!priv->config.dv_esw_en && i == MLX5_MTR_DOMAIN_TRANSFER)
+		if (!priv->sh->config.dv_esw_en &&
+		    i == MLX5_MTR_DOMAIN_TRANSFER)
 			continue;
 		if (__flow_dv_create_domain_def_policy(dev, i)) {
 			DRV_LOG(ERR, "Failed to create default policy");
@@ -17781,7 +17784,7 @@  flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
 			struct rte_mtr_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *dev_conf = &priv->config;
+	struct mlx5_sh_config *dev_conf = &priv->sh->config;
 	const struct rte_flow_action *act;
 	uint64_t action_flags[RTE_COLORS] = {0};
 	int actions_n;
@@ -17795,7 +17798,7 @@  flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
 	bool def_yellow = false;
 	const struct rte_flow_action_rss *rss_color[RTE_COLORS] = {NULL};
 
-	if (!priv->config.dv_esw_en)
+	if (!dev_conf->dv_esw_en)
 		def_domain &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
 	*domain_bitmap = def_domain;
 	/* Red color could only support DROP action. */
@@ -17839,7 +17842,7 @@  flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev,
 			switch (act->type) {
 			case RTE_FLOW_ACTION_TYPE_PORT_ID:
 			case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
-				if (!priv->config.dv_esw_en)
+				if (!dev_conf->dv_esw_en)
 					return -rte_mtr_error_set(error,
 					ENOTSUP,
 					RTE_MTR_ERROR_TYPE_METER_POLICY,
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index 77ae326bb7..393c13e3c5 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -650,8 +650,8 @@  mlx5_flow_meter_policy_validate(struct rte_eth_dev *dev,
 	struct rte_mtr_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow_attr attr = { .transfer =
-			priv->config.dv_esw_en ? 1 : 0};
+	struct rte_flow_attr attr = { .transfer = priv->sh->config.dv_esw_en ?
+						  1 : 0 };
 	bool is_rss = false;
 	uint8_t policy_mode;
 	uint8_t domain_bitmap;
@@ -738,8 +738,8 @@  mlx5_flow_meter_policy_add(struct rte_eth_dev *dev,
 			struct rte_mtr_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow_attr attr = { .transfer =
-			priv->config.dv_esw_en ? 1 : 0};
+	struct rte_flow_attr attr = { .transfer = priv->sh->config.dv_esw_en ?
+						  1 : 0 };
 	uint32_t sub_policy_idx = 0;
 	uint32_t policy_idx = 0;
 	struct mlx5_flow_meter_policy *mtr_policy = NULL;
@@ -1205,7 +1205,7 @@  mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id,
 			(&priv->sh->mtrmng->def_policy_ref_cnt,
 			1, __ATOMIC_RELAXED);
 		domain_bitmap = MLX5_MTR_ALL_DOMAIN_BIT;
-		if (!priv->config.dv_esw_en)
+		if (!priv->sh->config.dv_esw_en)
 			domain_bitmap &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
 	} else {
 		if (!priv->sh->meter_aso_en)
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index bcb04018f8..1d1f2556de 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -359,14 +359,13 @@  uint64_t
 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_config *config = &priv->config;
 	uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
 			     RTE_ETH_RX_OFFLOAD_TIMESTAMP |
 			     RTE_ETH_RX_OFFLOAD_RSS_HASH);
 
-	if (!config->mprq.enabled)
+	if (!priv->config.mprq.enabled)
 		offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
-	if (config->hw_fcs_strip)
+	if (priv->sh->config.hw_fcs_strip)
 		offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 	if (priv->sh->dev_cap.hw_csum)
 		offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
@@ -1896,7 +1895,7 @@  mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
 	tmpl->rxq.crc_present = 0;
 	tmpl->rxq.lro = lro_on_queue;
 	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
-		if (config->hw_fcs_strip) {
+		if (priv->sh->config.hw_fcs_strip) {
 			/*
 			 * RQs used for LRO-enabled TIRs should not be
 			 * configured to scatter the FCS.
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 72dfb2128a..eb03e9f7b1 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1104,7 +1104,7 @@  mlx5_dev_start(struct rte_eth_dev *dev)
 			dev->data->port_id, strerror(rte_errno));
 		goto error;
 	}
-	if ((priv->sh->cdev->config.devx && priv->config.dv_flow_en &&
+	if ((priv->sh->cdev->config.devx && priv->sh->config.dv_flow_en &&
 	     priv->sh->dev_cap.dest_tir) &&
 	    priv->obj_ops.lb_dummy_queue_create) {
 		ret = priv->obj_ops.lb_dummy_queue_create(dev);
@@ -1277,8 +1277,6 @@  mlx5_dev_stop(struct rte_eth_dev *dev)
  * Enable traffic flows configured by control plane
  *
  * @param dev
- *   Pointer to Ethernet device private data.
- * @param dev
  *   Pointer to Ethernet device structure.
  *
  * @return
@@ -1331,7 +1329,7 @@  mlx5_traffic_enable(struct rte_eth_dev *dev)
 				goto error;
 			}
 		}
-		if (priv->config.dv_esw_en) {
+		if (priv->sh->config.dv_esw_en) {
 			if (mlx5_flow_create_devx_sq_miss_flow(dev, i) == 0) {
 				DRV_LOG(ERR,
 					"Port %u Tx queue %u SQ create representor devx default miss rule failed.",
@@ -1341,7 +1339,7 @@  mlx5_traffic_enable(struct rte_eth_dev *dev)
 		}
 		mlx5_txq_release(dev, i);
 	}
-	if (priv->config.dv_esw_en) {
+	if (priv->sh->config.dv_esw_en) {
 		if (mlx5_flow_create_esw_table_zero_flow(dev))
 			priv->fdb_def_rule = 1;
 		else
@@ -1349,7 +1347,7 @@  mlx5_traffic_enable(struct rte_eth_dev *dev)
 				" configured - only Eswitch group 0 flows are"
 				" supported.", dev->data->port_id);
 	}
-	if (!priv->config.lacp_by_user && priv->pf_bond >= 0) {
+	if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) {
 		ret = mlx5_flow_lacp_miss(dev);
 		if (ret)
 			DRV_LOG(INFO, "port %u LACP rule cannot be created - "
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 1d16ebcb41..fe74317fe8 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -816,15 +816,15 @@  mlx5_txpp_start_service(struct mlx5_dev_ctx_shared *sh)
  * Returns 0 on success, negative otherwise
  */
 static int
-mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_priv *priv)
+mlx5_txpp_create(struct mlx5_dev_ctx_shared *sh)
 {
-	int tx_pp = priv->config.tx_pp;
+	int tx_pp = sh->config.tx_pp;
 	int ret;
 
 	/* Store the requested pacing parameters. */
 	sh->txpp.tick = tx_pp >= 0 ? tx_pp : -tx_pp;
 	sh->txpp.test = !!(tx_pp < 0);
-	sh->txpp.skew = priv->config.tx_skew;
+	sh->txpp.skew = sh->config.tx_skew;
 	sh->txpp.freq = sh->cdev->config.hca_attr.dev_freq_khz;
 	ret = mlx5_txpp_create_event_channel(sh);
 	if (ret)
@@ -891,7 +891,7 @@  mlx5_txpp_start(struct rte_eth_dev *dev)
 	struct mlx5_dev_ctx_shared *sh = priv->sh;
 	int err = 0;
 
-	if (!priv->config.tx_pp) {
+	if (!sh->config.tx_pp) {
 		/* Packet pacing is not requested for the device. */
 		MLX5_ASSERT(priv->txpp_en == 0);
 		return 0;
@@ -901,7 +901,7 @@  mlx5_txpp_start(struct rte_eth_dev *dev)
 		MLX5_ASSERT(sh->txpp.refcnt);
 		return 0;
 	}
-	if (priv->config.tx_pp > 0) {
+	if (sh->config.tx_pp > 0) {
 		err = rte_mbuf_dynflag_lookup
 			(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
 		/* No flag registered means no service needed. */
@@ -914,7 +914,7 @@  mlx5_txpp_start(struct rte_eth_dev *dev)
 		priv->txpp_en = 1;
 		++sh->txpp.refcnt;
 	} else {
-		err = mlx5_txpp_create(sh, priv);
+		err = mlx5_txpp_create(sh);
 		if (!err) {
 			MLX5_ASSERT(sh->txpp.tick);
 			priv->txpp_en = 1;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 47bca9e3ea..3373ee66b4 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -109,7 +109,7 @@  mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
 			     RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
 	if (dev_cap->tso)
 		offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
-	if (config->tx_pp)
+	if (priv->sh->config.tx_pp)
 		offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
 	if (dev_cap->swp) {
 		if (dev_cap->swp & MLX5_SW_PARSING_CSUM_CAP)
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index dfcd28901a..04f9590096 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -325,27 +325,18 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 			strerror(rte_errno));
 		goto error;
 	}
-	sh = mlx5_alloc_shared_dev_ctx(spawn, config);
+	sh = mlx5_alloc_shared_dev_ctx(spawn);
 	if (!sh)
 		return NULL;
-	/* Update final values for devargs before check sibling config. */
-	config->dv_esw_en = 0;
-	if (!config->dv_flow_en) {
+	if (!sh->config.dv_flow_en) {
 		DRV_LOG(ERR, "Windows flow mode must be DV flow enable.");
 		err = ENOTSUP;
 		goto error;
 	}
-	if (!config->dv_esw_en &&
-	    config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
-		DRV_LOG(WARNING,
-			"Metadata mode %u is not supported (no E-Switch).",
-			config->dv_xmeta_en);
-		config->dv_xmeta_en = MLX5_XMETA_MODE_LEGACY;
+	if (sh->config.vf_nl_en) {
+		DRV_LOG(DEBUG, "VF netlink isn't supported.");
+		sh->config.vf_nl_en = 0;
 	}
-	/* Check sibling device configurations. */
-	err = mlx5_dev_check_sibling_config(sh, config, dpdk_dev);
-	if (err)
-		goto error;
 	/* Initialize the shutdown event in mlx5_dev_spawn to
 	 * support mlx5_is_removed for Windows.
 	 */
@@ -417,7 +408,6 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
 		config->cqe_comp = 0;
 	}
-	config->hw_fcs_strip = sh->dev_cap.hw_fcs_strip;
 	if (config->mprq.enabled) {
 		DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
 		config->mprq.enabled = 0;
@@ -523,8 +513,8 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	/* Store device configuration on private structure. */
 	priv->config = *config;
 	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
-		icfg[i].release_mem_en = !!config->reclaim_mode;
-		if (config->reclaim_mode)
+		icfg[i].release_mem_en = !!sh->config.reclaim_mode;
+		if (sh->config.reclaim_mode)
 			icfg[i].per_core_cache = 0;
 		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
 		if (!priv->flows[i])
@@ -532,7 +522,7 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	}
 	/* Create context for virtual machine VLAN workaround. */
 	priv->vmwa_context = NULL;
-	if (config->dv_flow_en) {
+	if (sh->config.dv_flow_en) {
 		err = mlx5_alloc_shared_dr(priv);
 		if (err)
 			goto error;
@@ -540,11 +530,11 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	/* No supported flow priority number detection. */
 	priv->sh->flow_max_priority = -1;
 	mlx5_set_metadata_mask(eth_dev);
-	if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+	if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
 	    !priv->sh->dv_regc0_mask) {
 		DRV_LOG(ERR, "metadata mode %u is not supported "
 			     "(no metadata reg_c[0] is available).",
-			     priv->config.dv_xmeta_en);
+			     sh->config.dv_xmeta_en);
 			err = ENOTSUP;
 			goto error;
 	}
@@ -564,10 +554,10 @@  mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		DRV_LOG(DEBUG,
 			"port %u extensive metadata register is not supported.",
 			eth_dev->data->port_id);
-		if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+		if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
 			DRV_LOG(ERR, "metadata mode %u is not supported "
 				     "(no metadata registers available).",
-				     priv->config.dv_xmeta_en);
+				     sh->config.dv_xmeta_en);
 			err = ENOTSUP;
 			goto error;
 		}
@@ -837,7 +827,6 @@  mlx5_os_net_probe(struct mlx5_common_device *cdev)
 			.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
 			.min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
 		},
-		.dv_flow_en = 1,
 		.log_hp_size = MLX5_ARG_UNSET,
 	};
 	int ret;