From patchwork Fri Oct 23 07:14:52 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Suanming Mou X-Patchwork-Id: 81891 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8183FA04DE; Fri, 23 Oct 2020 09:24:00 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D0ACEA95A; Fri, 23 Oct 2020 09:17:15 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id B20C4A8E8 for ; Fri, 23 Oct 2020 09:15:49 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 23 Oct 2020 10:15:44 +0300 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09N7F2Lf026736; Fri, 23 Oct 2020 10:15:43 +0300 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Fri, 23 Oct 2020 15:14:52 +0800 Message-Id: <1603437295-119083-23-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603437295-119083-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603437295-119083-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v2 22/25] net/mlx5: make Rx queue thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit applies the cache linked list to Rx queue to make it thread safe. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_os.c | 5 + drivers/net/mlx5/mlx5.c | 1 + drivers/net/mlx5/mlx5.h | 24 +++- drivers/net/mlx5/mlx5_flow.h | 16 --- drivers/net/mlx5/mlx5_flow_dv.c | 61 ++++------ drivers/net/mlx5/mlx5_flow_verbs.c | 19 +-- drivers/net/mlx5/mlx5_rxq.c | 235 +++++++++++++++++++------------------ drivers/net/mlx5/mlx5_rxtx.h | 20 ++-- 8 files changed, 182 insertions(+), 199 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 9d1a5d7..b0dcb40 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -1450,6 +1450,10 @@ err = ENOTSUP; goto error; } + mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev, + mlx5_hrxq_create_cb, + mlx5_hrxq_match_cb, + mlx5_hrxq_remove_cb); /* Query availability of metadata reg_c's. */ err = mlx5_flow_discover_mreg_c(eth_dev); if (err < 0) { @@ -1502,6 +1506,7 @@ mlx5_drop_action_destroy(eth_dev); if (own_domain_id) claim_zero(rte_eth_switch_domain_free(priv->domain_id)); + mlx5_cache_list_destroy(&priv->hrxqs); mlx5_free(priv); if (eth_dev != NULL) eth_dev->data->dev_private = NULL; diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index fa769cd..cacc799 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1276,6 +1276,7 @@ struct mlx5_dev_ctx_shared * if (ret) DRV_LOG(WARNING, "port %u some flows still remain", dev->data->port_id); + mlx5_cache_list_destroy(&priv->hrxqs); /* * Free the shared context in last turn, because the cleanup * routines above may use some shared fields, like diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 0e4917a..7157dbf 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -63,6 +63,13 @@ enum mlx5_reclaim_mem_mode { MLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */ }; +/* Hash and cache list callback context. */ +struct mlx5_flow_cb_ctx { + struct rte_eth_dev *dev; + struct rte_flow_error *error; + void *data; +}; + /* Device attributes used in mlx5 PMD */ struct mlx5_dev_attr { uint64_t device_cap_flags_ex; @@ -671,6 +678,18 @@ struct mlx5_proc_priv { /* MTR list. */ TAILQ_HEAD(mlx5_flow_meters, mlx5_flow_meter); +/* RSS description. */ +struct mlx5_flow_rss_desc { + uint32_t level; + uint32_t queue_num; /**< Number of entries in @p queue. */ + uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */ + uint64_t hash_fields; /* Verbs Hash fields. */ + uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ + uint32_t key_len; /**< RSS hash key len. */ + uint32_t tunnel; /**< Queue in tunnel. */ + uint16_t *queue; /**< Destination queues. */ +}; + #define MLX5_PROC_PRIV(port_id) \ ((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private) @@ -709,7 +728,7 @@ struct mlx5_ind_table_obj { /* Hash Rx queue. */ struct mlx5_hrxq { - ILIST_ENTRY(uint32_t)next; /* Index to the next element. */ + struct mlx5_cache_entry entry; /* Cache entry. */ rte_atomic32_t refcnt; /* Reference counter. */ struct mlx5_ind_table_obj *ind_table; /* Indirection table. */ RTE_STD_C11 @@ -722,6 +741,7 @@ struct mlx5_hrxq { #endif uint64_t hash_fields; /* Verbs Hash fields. */ uint32_t rss_key_len; /* Hash key length in bytes. */ + uint32_t idx; /* Hash Rx queue index. */ uint8_t rss_key[]; /* Hash key. */ }; @@ -835,7 +855,7 @@ struct mlx5_priv { struct mlx5_obj_ops obj_ops; /* HW objects operations. */ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */ - uint32_t hrxqs; /* Verbs Hash Rx queues. */ + struct mlx5_cache_list hrxqs; /* Hash Rx queues. */ LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */ LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */ /* Indirection tables. */ diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index fd53c4d..c332308 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -381,13 +381,6 @@ enum mlx5_flow_fate_type { MLX5_FLOW_FATE_MAX, }; -/* Hash list callback context */ -struct mlx5_flow_cb_ctx { - struct rte_eth_dev *dev; - struct rte_flow_error *error; - void *data; -}; - /* Matcher PRM representation */ struct mlx5_flow_dv_match_params { size_t size; @@ -594,15 +587,6 @@ struct ibv_spec_header { uint16_t size; }; -/* RSS description. */ -struct mlx5_flow_rss_desc { - uint32_t level; - uint32_t queue_num; /**< Number of entries in @p queue. */ - uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */ - uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ - uint16_t *queue; /**< Destination queues. */ -}; - /* PMD flow priority for tunnel */ #define MLX5_TUNNEL_PRIO_GET(rss_desc) \ ((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4) diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 92eb91f..2ddaf75 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8350,7 +8350,7 @@ struct mlx5_hlist_entry * } /** - * Create an Rx Hash queue. + * Prepare an Rx Hash queue. * * @param dev * Pointer to Ethernet device. @@ -8365,33 +8365,22 @@ struct mlx5_hlist_entry * * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. */ static struct mlx5_hrxq * -flow_dv_handle_rx_queue(struct rte_eth_dev *dev, - struct mlx5_flow *dev_flow, - struct mlx5_flow_rss_desc *rss_desc, - uint32_t *hrxq_idx) +flow_dv_hrxq_prepare(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + struct mlx5_flow_rss_desc *rss_desc, + uint32_t *hrxq_idx) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_handle *dh = dev_flow->handle; struct mlx5_hrxq *hrxq; MLX5_ASSERT(rss_desc->queue_num); - *hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num); - if (!*hrxq_idx) { - *hrxq_idx = mlx5_hrxq_new - (dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num, - !!(dh->layers & - MLX5_FLOW_LAYER_TUNNEL)); - if (!*hrxq_idx) - return NULL; - } + rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc->hash_fields = dev_flow->hash_fields; + rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL); + *hrxq_idx = mlx5_hrxq_get(dev, rss_desc); + if (!*hrxq_idx) + return NULL; hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], *hrxq_idx); return hrxq; @@ -8753,8 +8742,8 @@ struct mlx5_hlist_entry * queue = sub_actions->conf; rss_desc->queue_num = 1; rss_desc->queue[0] = queue->index; - hrxq = flow_dv_handle_rx_queue(dev, dev_flow, - rss_desc, &hrxq_idx); + hrxq = flow_dv_hrxq_prepare(dev, dev_flow, + rss_desc, &hrxq_idx); if (!hrxq) return rte_flow_error_set (error, rte_errno, @@ -8951,8 +8940,8 @@ struct mlx5_hlist_entry * if (num_of_dest > 1) { if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) { /* Handle QP action for mirroring */ - hrxq = flow_dv_handle_rx_queue(dev, dev_flow, - rss_desc, &hrxq_idx); + hrxq = flow_dv_hrxq_prepare(dev, dev_flow, + rss_desc, &hrxq_idx); if (!hrxq) return rte_flow_error_set (error, rte_errno, @@ -9952,21 +9941,11 @@ struct mlx5_hlist_entry * [!!wks->flow_nested_idx]; MLX5_ASSERT(rss_desc->queue_num); - hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num); - if (!hrxq_idx) { - hrxq_idx = mlx5_hrxq_new - (dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num, - !!(dh->layers & - MLX5_FLOW_LAYER_TUNNEL)); - } + rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc->hash_fields = dev_flow->hash_fields; + rss_desc->tunnel = !!(dh->layers & + MLX5_FLOW_LAYER_TUNNEL); + hrxq_idx = mlx5_hrxq_get(dev, rss_desc); hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); if (!hrxq) { diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index cedec4e..0f3056e 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -1963,20 +1963,11 @@ &wks->rss_desc[!!wks->flow_nested_idx]; MLX5_ASSERT(rss_desc->queue_num); - hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num); - if (!hrxq_idx) - hrxq_idx = mlx5_hrxq_new - (dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num, - !!(handle->layers & - MLX5_FLOW_LAYER_TUNNEL)); + rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc->hash_fields = dev_flow->hash_fields; + rss_desc->tunnel = !!(handle->layers & + MLX5_FLOW_LAYER_TUNNEL); + hrxq_idx = mlx5_hrxq_get(dev, rss_desc); hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); if (!hrxq) { diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index ca1625e..09290c0 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1870,156 +1870,167 @@ struct mlx5_ind_table_obj * } /** - * Get an Rx Hash queue. + * Match an Rx Hash queue. * - * @param dev - * Pointer to Ethernet device. - * @param rss_conf - * RSS configuration for the Rx hash queue. - * @param queues - * Queues entering in hash queue. In case of empty hash_fields only the - * first queue index will be taken for the indirection table. - * @param queues_n - * Number of queues. + * @param list + * Cache list pointer. + * @param entry + * Hash queue entry pointer. + * @param cb_ctx + * Context of the callback function. * * @return - * An hash Rx queue index on success. + * 0 if match, none zero if not match. */ -uint32_t -mlx5_hrxq_get(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n) +int +mlx5_hrxq_match_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, + void *cb_ctx) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq; - uint32_t idx; - - queues_n = hash_fields ? queues_n : 1; - ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx, - hrxq, next) { - struct mlx5_ind_table_obj *ind_tbl; + struct rte_eth_dev *dev = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_rss_desc *rss_desc = ctx->data; + struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); + struct mlx5_ind_table_obj *ind_tbl; + uint32_t queues_n; - if (hrxq->rss_key_len != rss_key_len) - continue; - if (memcmp(hrxq->rss_key, rss_key, rss_key_len)) - continue; - if (hrxq->hash_fields != hash_fields) - continue; - ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); - if (!ind_tbl) - continue; - if (ind_tbl != hrxq->ind_table) { - mlx5_ind_table_obj_release(dev, ind_tbl); - continue; - } - rte_atomic32_inc(&hrxq->refcnt); - return idx; - } - return 0; + if (hrxq->rss_key_len != rss_desc->key_len || + memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) || + hrxq->hash_fields != rss_desc->hash_fields) + return 1; + queues_n = rss_desc->hash_fields ? rss_desc->queue_num : 1; + ind_tbl = mlx5_ind_table_obj_get(dev, rss_desc->queue, queues_n); + if (ind_tbl) + mlx5_ind_table_obj_release(dev, ind_tbl); + return ind_tbl != hrxq->ind_table; } /** - * Release the hash Rx queue. - * - * @param dev - * Pointer to Ethernet device. - * @param hrxq - * Index to Hash Rx queue to release. + * Remove the Rx Hash queue. * - * @return - * 1 while a reference on it exists, 0 when freed. + * @param list + * Cache list pointer. + * @param entry + * Hash queue entry pointer. */ -int -mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) +void +mlx5_hrxq_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) { + struct rte_eth_dev *dev = list->ctx; struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq; + struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); - hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - if (!hrxq) - return 0; - if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { #ifdef HAVE_IBV_FLOW_DV_SUPPORT - mlx5_glue->destroy_flow_action(hrxq->action); + mlx5_glue->destroy_flow_action(hrxq->action); #endif - priv->obj_ops.hrxq_destroy(hrxq); - mlx5_ind_table_obj_release(dev, hrxq->ind_table); - ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, - hrxq_idx, hrxq, next); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - return 0; - } - claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table)); - return 1; + priv->obj_ops.hrxq_destroy(hrxq); + mlx5_ind_table_obj_release(dev, hrxq->ind_table); + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx); } /** * Create an Rx Hash queue. * - * @param dev - * Pointer to Ethernet device. - * @param rss_key - * RSS key for the Rx hash queue. - * @param rss_key_len - * RSS key length. - * @param hash_fields - * Verbs protocol hash field to make the RSS on. - * @param queues - * Queues entering in hash queue. In case of empty hash_fields only the - * first queue index will be taken for the indirection table. - * @param queues_n - * Number of queues. - * @param tunnel - * Tunnel type. + * @param list + * Cache list pointer. + * @param entry + * Hash queue entry pointer. + * @param cb_ctx + * Context of the callback function. * * @return - * The DevX object initialized index, 0 otherwise and rte_errno is set. + * queue entry on success, NULL otherwise. */ -uint32_t -mlx5_hrxq_new(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n, - int tunnel __rte_unused) +struct mlx5_cache_entry * +mlx5_hrxq_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) { + struct rte_eth_dev *dev = list->ctx; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_rss_desc *rss_desc = ctx->data; + const uint8_t *rss_key = rss_desc->key; + uint32_t rss_key_len = rss_desc->key_len; + const uint16_t *queues = rss_desc->queue; + uint32_t queues_n = rss_desc->queue_num; struct mlx5_hrxq *hrxq = NULL; uint32_t hrxq_idx = 0; struct mlx5_ind_table_obj *ind_tbl; int ret; - queues_n = hash_fields ? queues_n : 1; + queues_n = rss_desc->hash_fields ? queues_n : 1; ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n); - if (!ind_tbl) { - rte_errno = ENOMEM; - return 0; - } + if (!ind_tbl) + return NULL; hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); if (!hrxq) goto error; + hrxq->idx = hrxq_idx; hrxq->ind_table = ind_tbl; hrxq->rss_key_len = rss_key_len; - hrxq->hash_fields = hash_fields; + hrxq->hash_fields = rss_desc->hash_fields; memcpy(hrxq->rss_key, rss_key, rss_key_len); - ret = priv->obj_ops.hrxq_new(dev, hrxq, tunnel); - if (ret < 0) { - rte_errno = errno; + ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel); + if (ret < 0) goto error; - } - rte_atomic32_inc(&hrxq->refcnt); - ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx, - hrxq, next); - return hrxq_idx; + return &hrxq->entry; error: - ret = rte_errno; /* Save rte_errno before cleanup. */ mlx5_ind_table_obj_release(dev, ind_tbl); if (hrxq) mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - rte_errno = ret; /* Restore rte_errno. */ - return 0; + return NULL; +} + +/** + * Get an Rx Hash queue. + * + * @param dev + * Pointer to Ethernet device. + * @param rss_desc + * RSS configuration for the Rx hash queue. + * + * @return + * An hash Rx queue index on success. + */ +uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, + struct mlx5_flow_rss_desc *rss_desc) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq; + struct mlx5_cache_entry *entry; + struct mlx5_flow_cb_ctx ctx = { + .data = rss_desc, + }; + + entry = mlx5_cache_register(&priv->hrxqs, &ctx); + if (!entry) + return 0; + hrxq = container_of(entry, typeof(*hrxq), entry); + return hrxq->idx; +} + +/** + * Release the hash Rx queue. + * + * @param dev + * Pointer to Ethernet device. + * @param hrxq_idx + * Index to Hash Rx queue to release. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq; + + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); + return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry); } /** @@ -2103,22 +2114,12 @@ struct mlx5_hrxq * * @return * The number of object not released. */ -int +uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq; - uint32_t idx; - int ret = 0; - ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx, - hrxq, next) { - DRV_LOG(DEBUG, - "port %u hash Rx queue %p still referenced", - dev->data->port_id, (void *)hrxq); - ++ret; - } - return ret; + return mlx5_cache_list_get_entry_num(&priv->hrxqs); } /** diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 0eafa22..a7196cf 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -335,17 +335,19 @@ struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev, uint32_t queues_n); int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, struct mlx5_ind_table_obj *ind_tbl); -uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n, - int tunnel __rte_unused); +struct mlx5_cache_entry *mlx5_hrxq_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, void *cb_ctx); +int mlx5_hrxq_match_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, + void *cb_ctx); +void mlx5_hrxq_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry); uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n); + struct mlx5_flow_rss_desc *rss_desc); int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx); -int mlx5_hrxq_verify(struct rte_eth_dev *dev); +uint32_t mlx5_hrxq_verify(struct rte_eth_dev *dev); + + enum mlx5_rxq_type mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx); struct mlx5_hrxq *mlx5_drop_action_create(struct rte_eth_dev *dev); void mlx5_drop_action_destroy(struct rte_eth_dev *dev);