From patchwork Thu Mar 18 09:56:07 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Michael Baum X-Patchwork-Id: 89479 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4206BA0561; Thu, 18 Mar 2021 10:56:37 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 84030140F93; Thu, 18 Mar 2021 10:56:26 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by mails.dpdk.org (Postfix) with ESMTP id D1FE7140F6F for ; Thu, 18 Mar 2021 10:56:22 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from michaelba@nvidia.com) with SMTP; 18 Mar 2021 11:56:18 +0200 Received: from nvidia.com (pegasus07.mtr.labs.mlnx [10.210.16.112]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 12I9uHRE019444; Thu, 18 Mar 2021 11:56:18 +0200 From: Michael Baum To: dev@dpdk.org Cc: Matan Azrad , Raslan Darawsheh , Viacheslav Ovsiienko , stable@dpdk.org Date: Thu, 18 Mar 2021 09:56:07 +0000 Message-Id: <1616061368-29768-2-git-send-email-michaelba@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1616061368-29768-1-git-send-email-michaelba@nvidia.com> References: <1616061368-29768-1-git-send-email-michaelba@nvidia.com> Subject: [dpdk-dev] [PATCH 1/2] net/mlx5: workaround ASO memory region creation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Due to kernel issue in direct MKEY creation using the DevX API for physical memory, this patch replaces the ASO MR creation to use Verbs API. Fixes: f935ed4b645a ("net/mlx5: support flow hit action for aging") Cc: stable@dpdk.org Signed-off-by: Michael Baum Acked-by: Matan Azrad --- drivers/common/mlx5/linux/mlx5_common_verbs.c | 1 - drivers/common/mlx5/windows/mlx5_common_os.c | 23 +++--- drivers/net/mlx5/mlx5.h | 10 +-- drivers/net/mlx5/mlx5_flow_age.c | 106 +++++++++++--------------- 4 files changed, 58 insertions(+), 82 deletions(-) diff --git a/drivers/common/mlx5/linux/mlx5_common_verbs.c b/drivers/common/mlx5/linux/mlx5_common_verbs.c index 339535d..aa560f0 100644 --- a/drivers/common/mlx5/linux/mlx5_common_verbs.c +++ b/drivers/common/mlx5/linux/mlx5_common_verbs.c @@ -37,7 +37,6 @@ { struct ibv_mr *ibv_mr; - memset(pmd_mr, 0, sizeof(*pmd_mr)); ibv_mr = mlx5_glue->reg_mr(pd, addr, length, IBV_ACCESS_LOCAL_WRITE | (haswell_broadwell_cpu ? 0 : diff --git a/drivers/common/mlx5/windows/mlx5_common_os.c b/drivers/common/mlx5/windows/mlx5_common_os.c index f2d781a..cebf42d 100644 --- a/drivers/common/mlx5/windows/mlx5_common_os.c +++ b/drivers/common/mlx5/windows/mlx5_common_os.c @@ -155,23 +155,22 @@ struct mlx5_devx_mkey_attr mkey_attr; struct mlx5_pd *mlx5_pd = (struct mlx5_pd *)pd; struct mlx5_hca_attr attr; + struct mlx5_devx_obj *mkey; + void *obj; if (!pd || !addr) { rte_errno = EINVAL; return -1; } - memset(pmd_mr, 0, sizeof(*pmd_mr)); if (mlx5_devx_cmd_query_hca_attr(mlx5_pd->devx_ctx, &attr)) return -1; - pmd_mr->addr = addr; - pmd_mr->len = length; - pmd_mr->obj = mlx5_os_umem_reg(mlx5_pd->devx_ctx, pmd_mr->addr, - pmd_mr->len, IBV_ACCESS_LOCAL_WRITE); - if (!pmd_mr->obj) + obj = mlx5_os_umem_reg(mlx5_pd->devx_ctx, addr, length, + IBV_ACCESS_LOCAL_WRITE); + if (!obj) return -1; mkey_attr.addr = (uintptr_t)addr; mkey_attr.size = length; - mkey_attr.umem_id = ((struct mlx5_devx_umem *)(pmd_mr->obj))->umem_id; + mkey_attr.umem_id = ((struct mlx5_devx_umem *)(obj))->umem_id; mkey_attr.pd = mlx5_pd->pdn; mkey_attr.log_entity_size = 0; mkey_attr.pg_access = 0; @@ -183,11 +182,15 @@ mkey_attr.relaxed_ordering_write = attr.relaxed_ordering_write; mkey_attr.relaxed_ordering_read = attr.relaxed_ordering_read; } - pmd_mr->mkey = mlx5_devx_cmd_mkey_create(mlx5_pd->devx_ctx, &mkey_attr); - if (!pmd_mr->mkey) { - claim_zero(mlx5_os_umem_dereg(pmd_mr->obj)); + mkey = mlx5_devx_cmd_mkey_create(mlx5_pd->devx_ctx, &mkey_attr); + if (!mkey) { + claim_zero(mlx5_os_umem_dereg(obj)); return -1; } + pmd_mr->addr = addr; + pmd_mr->len = length; + pmd_mr->obj = obj; + pmd_mr->mkey = mkey; pmd_mr->lkey = pmd_mr->mkey->id; return 0; } diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 14043b6..e2eb4db 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -471,14 +471,6 @@ struct mlx5_aso_cq { uint64_t errors; }; -struct mlx5_aso_devx_mr { - void *buf; - uint64_t length; - struct mlx5dv_devx_umem *umem; - struct mlx5_devx_obj *mkey; - bool is_indirect; -}; - struct mlx5_aso_sq_elem { struct mlx5_aso_age_pool *pool; uint16_t burst_size; @@ -489,7 +481,7 @@ struct mlx5_aso_sq { struct mlx5_aso_cq cq; struct mlx5_devx_sq sq_obj; volatile uint64_t *uar_addr; - struct mlx5_aso_devx_mr mr; + struct mlx5_pmd_mr mr; uint16_t pi; uint32_t head; uint32_t tail; diff --git a/drivers/net/mlx5/mlx5_flow_age.c b/drivers/net/mlx5/mlx5_flow_age.c index 00cb20d..c0be7c3 100644 --- a/drivers/net/mlx5/mlx5_flow_age.c +++ b/drivers/net/mlx5/mlx5_flow_age.c @@ -61,90 +61,72 @@ /** * Free MR resources. * + * @param[in] sh + * Pointer to shared device context. * @param[in] mr * MR to free. */ static void -mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr) +mlx5_aso_dereg_mr(struct mlx5_dev_ctx_shared *sh, struct mlx5_pmd_mr *mr) { - claim_zero(mlx5_devx_cmd_destroy(mr->mkey)); - if (!mr->is_indirect && mr->umem) - claim_zero(mlx5_glue->devx_umem_dereg(mr->umem)); - mlx5_free(mr->buf); + void *addr = mr->addr; + + sh->share_cache.dereg_mr_cb(mr); + mlx5_free(addr); memset(mr, 0, sizeof(*mr)); } /** * Register Memory Region. * - * @param[in] ctx - * Context returned from mlx5 open_device() glue function. + * @param[in] sh + * Pointer to shared device context. * @param[in] length * Size of MR buffer. * @param[in/out] mr * Pointer to MR to create. * @param[in] socket * Socket to use for allocation. - * @param[in] pdn - * Protection Domain number to use. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr, - int socket, int pdn) +mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t length, + struct mlx5_pmd_mr *mr, int socket) { - struct mlx5_devx_mkey_attr mkey_attr; - mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096, - socket); - if (!mr->buf) { - DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx."); + int ret; + + mr->addr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096, + socket); + if (!mr->addr) { + DRV_LOG(ERR, "Failed to create ASO bits mem for MR."); return -1; } - mr->umem = mlx5_os_umem_reg(ctx, mr->buf, length, - IBV_ACCESS_LOCAL_WRITE); - if (!mr->umem) { - DRV_LOG(ERR, "Failed to register Umem for MR by Devx."); - goto error; - } - mkey_attr.addr = (uintptr_t)mr->buf; - mkey_attr.size = length; - mkey_attr.umem_id = mlx5_os_get_umem_id(mr->umem); - mkey_attr.pd = pdn; - mkey_attr.pg_access = 1; - mkey_attr.klm_array = NULL; - mkey_attr.klm_num = 0; - mkey_attr.relaxed_ordering_read = 0; - mkey_attr.relaxed_ordering_write = 0; - mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr); - if (!mr->mkey) { + ret = sh->share_cache.reg_mr_cb(sh->pd, mr->addr, length, mr); + if (ret) { DRV_LOG(ERR, "Failed to create direct Mkey."); - goto error; + mlx5_free(mr->addr); + return -1; } - mr->length = length; - mr->is_indirect = false; return 0; -error: - if (mr->umem) - claim_zero(mlx5_glue->devx_umem_dereg(mr->umem)); - mlx5_free(mr->buf); - return -1; } /** * Destroy Send Queue used for ASO access. * + * @param[in] sh + * Pointer to shared device context. * @param[in] sq * ASO SQ to destroy. */ static void -mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq) +mlx5_aso_destroy_sq(struct mlx5_dev_ctx_shared *sh, struct mlx5_aso_sq *sq) { mlx5_devx_sq_destroy(&sq->sq_obj); mlx5_aso_cq_destroy(&sq->cq); - mlx5_aso_devx_dereg_mr(&sq->mr); + mlx5_aso_dereg_mr(sh, &sq->mr); memset(sq, 0, sizeof(*sq)); } @@ -166,8 +148,8 @@ for (i = 0, wqe = &sq->sq_obj.aso_wqes[0]; i < size; ++i, ++wqe) { wqe->general_cseg.sq_ds = rte_cpu_to_be_32((sq->sqn << 8) | (sizeof(*wqe) >> 4)); - wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.mkey->id); - addr = (uint64_t)((uint64_t *)sq->mr.buf + i * + wqe->aso_cseg.lkey = rte_cpu_to_be_32(sq->mr.lkey); + addr = (uint64_t)((uint64_t *)sq->mr.addr + i * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64); wqe->aso_cseg.va_h = rte_cpu_to_be_32((uint32_t)(addr >> 32)); wqe->aso_cseg.va_l_r = rte_cpu_to_be_32((uint32_t)addr | 1u); @@ -184,31 +166,31 @@ /** * Create Send Queue used for ASO access. * - * @param[in] ctx - * Context returned from mlx5 open_device() glue function. + * @param[in] sh + * Pointer to shared device context. * @param[in/out] sq * Pointer to SQ to create. * @param[in] socket * Socket to use for allocation. * @param[in] uar * User Access Region object. - * @param[in] pdn - * Protection Domain number to use. * @param[in] log_desc_n * Log of number of descriptors in queue. + * @param[in] ts_format + * timestamp format supported by the queue. * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket, - void *uar, uint32_t pdn, uint16_t log_desc_n, +mlx5_aso_sq_create(struct mlx5_dev_ctx_shared *sh, struct mlx5_aso_sq *sq, + int socket, void *uar, uint16_t log_desc_n, uint32_t ts_format) { struct mlx5_devx_create_sq_attr attr = { .user_index = 0xFFFF, .wq_attr = (struct mlx5_devx_wq_attr){ - .pd = pdn, + .pd = sh->pdn, .uar_page = mlx5_os_get_devx_uar_page_id(uar), }, .ts_format = mlx5_ts_format_conv(ts_format), @@ -220,17 +202,18 @@ uint16_t log_wqbb_n; int ret; - if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) * - sq_desc_n, &sq->mr, socket, pdn)) + if (mlx5_aso_reg_mr(sh, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) * sq_desc_n, + &sq->mr, socket)) return -1; - if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket, + if (mlx5_aso_cq_create(sh->ctx, &sq->cq, log_desc_n, socket, mlx5_os_get_devx_uar_page_id(uar))) goto error; sq->log_desc_n = log_desc_n; attr.cqn = sq->cq.cq_obj.cq->id; /* for mlx5_aso_wqe that is twice the size of mlx5_wqe */ log_wqbb_n = log_desc_n + 1; - ret = mlx5_devx_sq_create(ctx, &sq->sq_obj, log_wqbb_n, &attr, socket); + ret = mlx5_devx_sq_create(sh->ctx, &sq->sq_obj, log_wqbb_n, &attr, + socket); if (ret) { DRV_LOG(ERR, "Can't create SQ object."); rte_errno = ENOMEM; @@ -250,7 +233,7 @@ mlx5_aso_init_sq(sq); return 0; error: - mlx5_aso_destroy_sq(sq); + mlx5_aso_destroy_sq(sh, sq); return -1; } @@ -266,9 +249,8 @@ int mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh) { - return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0, - sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC, - sh->sq_ts_format); + return mlx5_aso_sq_create(sh, &sh->aso_age_mng->aso_sq, 0, sh->tx_uar, + MLX5_ASO_QUEUE_LOG_DESC, sh->sq_ts_format); } /** @@ -280,7 +262,7 @@ void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh) { - mlx5_aso_destroy_sq(&sh->aso_age_mng->aso_sq); + mlx5_aso_destroy_sq(sh, &sh->aso_age_mng->aso_sq); } /** @@ -410,7 +392,7 @@ uint16_t idx = (sq->tail + i) & mask; struct mlx5_aso_age_pool *pool = sq->elts[idx].pool; uint64_t diff = curr - pool->time_of_last_age_check; - uint64_t *addr = sq->mr.buf; + uint64_t *addr = sq->mr.addr; int j; addr += idx * MLX5_ASO_AGE_ACTIONS_PER_POOL / 64; From patchwork Thu Mar 18 09:56:08 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Michael Baum X-Patchwork-Id: 89477 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id AEBE6A0561; Thu, 18 Mar 2021 10:56:25 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1D2F0140F70; Thu, 18 Mar 2021 10:56:24 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by mails.dpdk.org (Postfix) with ESMTP id C1E14140F63 for ; Thu, 18 Mar 2021 10:56:22 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from michaelba@nvidia.com) with SMTP; 18 Mar 2021 11:56:18 +0200 Received: from nvidia.com (pegasus07.mtr.labs.mlnx [10.210.16.112]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 12I9uHRF019444; Thu, 18 Mar 2021 11:56:18 +0200 From: Michael Baum To: dev@dpdk.org Cc: Matan Azrad , Raslan Darawsheh , Viacheslav Ovsiienko , stable@dpdk.org Date: Thu, 18 Mar 2021 09:56:08 +0000 Message-Id: <1616061368-29768-3-git-send-email-michaelba@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1616061368-29768-1-git-send-email-michaelba@nvidia.com> References: <1616061368-29768-1-git-send-email-michaelba@nvidia.com> Subject: [dpdk-dev] [PATCH 2/2] net/mlx5: workaround counter memory region creation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Due to kernel issue in direct MKEY creation using the DevX API for physical memory, this patch replaces the counter MR creation to use Verbs API. Fixes: 3aa279157fa0 ("net/mlx5: synchronize flow counter pool creation") Cc: stable@dpdk.org Signed-off-by: Michael Baum Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 10 ---------- drivers/net/mlx5/mlx5.c | 11 +++++++---- drivers/net/mlx5/mlx5.h | 5 +---- drivers/net/mlx5/mlx5_flow.c | 27 +++++---------------------- drivers/net/mlx5/windows/mlx5_os.c | 9 --------- 5 files changed, 13 insertions(+), 49 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 5e3ae9f..5740214 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -1163,16 +1163,6 @@ err = -err; goto error; } - /* Check relax ordering support. */ - if (!haswell_broadwell_cpu) { - sh->cmng.relaxed_ordering_write = - config->hca_attr.relaxed_ordering_write; - sh->cmng.relaxed_ordering_read = - config->hca_attr.relaxed_ordering_read; - } else { - sh->cmng.relaxed_ordering_read = 0; - sh->cmng.relaxed_ordering_write = 0; - } sh->rq_ts_format = config->hca_attr.rq_ts_format; sh->sq_ts_format = config->hca_attr.sq_ts_format; sh->qp_ts_format = config->hca_attr.qp_ts_format; diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index abd7ff7..fb58631 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -469,17 +469,20 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = /** * Destroy all the resources allocated for a counter memory management. * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object to free. * @param[in] mng * Pointer to the memory management structure. */ static void -mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) +mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh, + struct mlx5_counter_stats_mem_mng *mng) { uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data; LIST_REMOVE(mng, next); - claim_zero(mlx5_devx_cmd_destroy(mng->dm)); - claim_zero(mlx5_os_umem_dereg(mng->umem)); + sh->share_cache.dereg_mr_cb(&mng->dm); + memset(&mng->dm, 0, sizeof(mng->dm)); mlx5_free(mem); } @@ -533,7 +536,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = } mng = LIST_FIRST(&sh->cmng.mem_mngs); while (mng) { - mlx5_flow_destroy_counter_stat_mem_mng(mng); + mlx5_flow_destroy_counter_stat_mem_mng(sh, mng); mng = LIST_FIRST(&sh->cmng.mem_mngs); } memset(&sh->cmng, 0, sizeof(sh->cmng)); diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index e2eb4db..8e8727a 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -422,8 +422,7 @@ struct mlx5_flow_counter_pool { struct mlx5_counter_stats_mem_mng { LIST_ENTRY(mlx5_counter_stats_mem_mng) next; struct mlx5_counter_stats_raw *raws; - struct mlx5_devx_obj *dm; - void *umem; + struct mlx5_pmd_mr dm; }; /* Raw memory structure for the counter statistics values of a pool. */ @@ -454,8 +453,6 @@ struct mlx5_flow_counter_mng { uint8_t pending_queries; uint16_t pool_index; uint8_t query_thread_on; - bool relaxed_ordering_read; - bool relaxed_ordering_write; bool counter_fallback; /* Use counter fallback management. */ LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs; LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws; diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index d46fc33..afa8ab4 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -6717,7 +6717,6 @@ struct mlx5_meter_domains_infos * static int mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh) { - struct mlx5_devx_mkey_attr mkey_attr; struct mlx5_counter_stats_mem_mng *mem_mng; volatile struct flow_counter_stats *raw_data; int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES; @@ -6727,6 +6726,7 @@ struct mlx5_meter_domains_infos * sizeof(struct mlx5_counter_stats_mem_mng); size_t pgsize = rte_mem_page_size(); uint8_t *mem; + int ret; int i; if (pgsize == (size_t)-1) { @@ -6741,26 +6741,9 @@ struct mlx5_meter_domains_infos * } mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1; size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n; - mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size, - IBV_ACCESS_LOCAL_WRITE); - if (!mem_mng->umem) { - rte_errno = errno; - mlx5_free(mem); - return -rte_errno; - } - mkey_attr.addr = (uintptr_t)mem; - mkey_attr.size = size; - mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem); - mkey_attr.pd = sh->pdn; - mkey_attr.log_entity_size = 0; - mkey_attr.pg_access = 0; - mkey_attr.klm_array = NULL; - mkey_attr.klm_num = 0; - mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write; - mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read; - mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr); - if (!mem_mng->dm) { - mlx5_os_umem_dereg(mem_mng->umem); + ret = sh->share_cache.reg_mr_cb(sh->pd, (void *)mem, size, + &mem_mng->dm); + if (ret) { rte_errno = errno; mlx5_free(mem); return -rte_errno; @@ -6879,7 +6862,7 @@ struct mlx5_meter_domains_infos * ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0, MLX5_COUNTERS_PER_POOL, NULL, NULL, - pool->raw_hw->mem_mng->dm->id, + pool->raw_hw->mem_mng->dm.lkey, (void *)(uintptr_t) pool->raw_hw->data, sh->devx_comp, diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c index 6f39276..3e72e36 100644 --- a/drivers/net/mlx5/windows/mlx5_os.c +++ b/drivers/net/mlx5/windows/mlx5_os.c @@ -466,15 +466,6 @@ err = -err; goto error; } - /* Check relax ordering support. */ - sh->cmng.relaxed_ordering_read = 0; - sh->cmng.relaxed_ordering_write = 0; - if (!haswell_broadwell_cpu) { - sh->cmng.relaxed_ordering_write = - config->hca_attr.relaxed_ordering_write; - sh->cmng.relaxed_ordering_read = - config->hca_attr.relaxed_ordering_read; - } } if (config->devx) { uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];