@@ -1114,7 +1114,6 @@ struct mlx5_dev_spawn_data {
priv->ctx = sh->ctx;
priv->ibv_port = spawn->ibv_port;
priv->device_attr = sh->device_attr;
- priv->pd = sh->pd;
priv->mtu = ETHER_MTU;
#ifndef RTE_ARCH_64
/* Initialize UAR access locks for 32bit implementations. */
@@ -228,7 +228,6 @@ struct mlx5_priv {
uint32_t ibv_port; /* IB device port number. */
struct ibv_context *ctx; /* Verbs context. */
struct ibv_device_attr_ex device_attr; /* Device properties. */
- struct ibv_pd *pd; /* Protection Domain. */
struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
/* Bit-field of MAC addresses owned by the PMD. */
@@ -719,7 +719,7 @@ struct mr_update_mp_data {
* mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
* through mlx5_alloc_verbs_buf().
*/
- mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len,
+ mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)data.start, len,
IBV_ACCESS_LOCAL_WRITE);
if (mr->ibv_mr == NULL) {
DEBUG("port %u fail to create a verbs MR for address (%p)",
@@ -1156,7 +1156,7 @@ struct mr_update_mp_data {
}
DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
dev->data->port_id, mem_idx, mp->name);
- mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)addr, len,
+ mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)addr, len,
IBV_ACCESS_LOCAL_WRITE);
if (mr->ibv_mr == NULL) {
DRV_LOG(WARNING,
@@ -867,7 +867,7 @@ struct mlx5_rxq_ibv *
.max_wr = wqe_n >> rxq_data->sges_n,
/* Max number of scatter/gather elements in a WR. */
.max_sge = 1 << rxq_data->sges_n,
- .pd = priv->pd,
+ .pd = priv->sh->pd,
.cq = tmpl->cq,
.comp_mask =
IBV_WQ_FLAGS_CVLAN_STRIPPING |
@@ -1831,7 +1831,7 @@ struct mlx5_hrxq *
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
- .pd = priv->pd,
+ .pd = priv->sh->pd,
},
&qp_init_attr);
#else
@@ -1850,7 +1850,7 @@ struct mlx5_hrxq *
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
- .pd = priv->pd,
+ .pd = priv->sh->pd,
});
#endif
if (!qp) {
@@ -2006,7 +2006,7 @@ struct mlx5_rxq_ibv *
.wq_type = IBV_WQT_RQ,
.max_wr = 1,
.max_sge = 1,
- .pd = priv->pd,
+ .pd = priv->sh->pd,
.cq = cq,
});
if (!wq) {
@@ -2160,7 +2160,7 @@ struct mlx5_hrxq *
.rx_hash_fields_mask = 0,
},
.rwq_ind_tbl = ind_tbl->ind_table,
- .pd = priv->pd
+ .pd = priv->sh->pd
});
if (!qp) {
DEBUG("port %u cannot allocate QP for drop queue",
@@ -426,7 +426,7 @@ struct mlx5_txq_ibv *
* Tx burst.
*/
.sq_sig_all = 0,
- .pd = priv->pd,
+ .pd = priv->sh->pd,
.comp_mask = IBV_QP_INIT_ATTR_PD,
};
if (txq_data->max_inline)
The PMD code is updated to use Protected Domain from the the shared IB device context. The Domain is shared between all devices belonging to the same multiport Infiniband device. If IB device has only one port, the PD is not shared, because there is only ethernet device created over IB one. Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> --- drivers/net/mlx5/mlx5.c | 1 - drivers/net/mlx5/mlx5.h | 1 - drivers/net/mlx5/mlx5_mr.c | 4 ++-- drivers/net/mlx5/mlx5_rxq.c | 10 +++++----- drivers/net/mlx5/mlx5_txq.c | 2 +- 5 files changed, 8 insertions(+), 10 deletions(-)