[v2] net/mlx5: fix flow workspace destruction
Checks
Commit Message
From: Gregory Etelson <getelson@nvidia.com>
PMD uses pthread key to allocate and access per thread flow
workspace memory buffers.
PMD registered a key destructor function to clean up flow workspace
buffers. However, the key destructor was not called by the pthread
library.
The patch keeps track of per-thread flow workspaces in PMD.
Flow workspaces memory release is activated from PMD destructor.
In the meanwhile, workspace buffer and RSS queues array are allocated
in a single memory chunk with this patch. The maximal number of
queues RTE_ETH_RSS_RETA_SIZE_512 is chosen. Then the workspace
adjustment can be removed to reduce the software hiccup:
1. realloc and content copy
2. spinlock acquire and release
Fixes: 5d55a494f4e6 ("net/mlx5: split multi-thread flow handling per OS")
Cc: stable@dpdk.org
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Bing Zhao <bingz@nvidia.com>
---
v2: fix typo in the commit message and remove the needless NULL
pointer initialization for static variable.
---
drivers/net/mlx5/linux/mlx5_flow_os.c | 2 +-
drivers/net/mlx5/mlx5.c | 1 +
drivers/net/mlx5/mlx5_flow.c | 76 +++++++++++----------------
drivers/net/mlx5/mlx5_flow.h | 4 +-
4 files changed, 36 insertions(+), 47 deletions(-)
Comments
From: Bing Zhao <bingz@nvidia.com>
> PMD uses pthread key to allocate and access per thread flow workspace
> memory buffers.
>
> PMD registered a key destructor function to clean up flow workspace buffers.
> However, the key destructor was not called by the pthread library.
>
> The patch keeps track of per-thread flow workspaces in PMD.
> Flow workspaces memory release is activated from PMD destructor.
>
> In the meanwhile, workspace buffer and RSS queues array are allocated in a
> single memory chunk with this patch. The maximal number of queues
> RTE_ETH_RSS_RETA_SIZE_512 is chosen. Then the workspace adjustment can
> be removed to reduce the software hiccup:
> 1. realloc and content copy
> 2. spinlock acquire and release
>
> Fixes: 5d55a494f4e6 ("net/mlx5: split multi-thread flow handling per OS")
> Cc: stable@dpdk.org
>
> Signed-off-by: Gregory Etelson <getelson@nvidia.com>
> Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
@@ -51,7 +51,7 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
int
mlx5_flow_os_init_workspace_once(void)
{
- if (rte_thread_key_create(&key_workspace, flow_release_workspace)) {
+ if (rte_thread_key_create(&key_workspace, NULL)) {
DRV_LOG(ERR, "Can't create flow workspace data thread key.");
rte_errno = ENOMEM;
return -rte_errno;
@@ -1838,6 +1838,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
mlx5_os_net_cleanup();
mlx5_flow_os_release_workspace();
+ mlx5_flow_workspace_gc_release();
}
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
if (sh->flex_parsers_dv) {
@@ -7155,36 +7155,6 @@ flow_tunnel_from_rule(const struct mlx5_flow *flow)
return tunnel;
}
-/**
- * Adjust flow RSS workspace if needed.
- *
- * @param wks
- * Pointer to thread flow work space.
- * @param rss_desc
- * Pointer to RSS descriptor.
- * @param[in] nrssq_num
- * New RSS queue number.
- *
- * @return
- * 0 on success, -1 otherwise and rte_errno is set.
- */
-static int
-flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
- struct mlx5_flow_rss_desc *rss_desc,
- uint32_t nrssq_num)
-{
- if (likely(nrssq_num <= wks->rssq_num))
- return 0;
- rss_desc->queue = realloc(rss_desc->queue,
- sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2));
- if (!rss_desc->queue) {
- rte_errno = ENOMEM;
- return -1;
- }
- wks->rssq_num = RTE_ALIGN(nrssq_num, 2);
- return 0;
-}
-
/**
* Create a flow and add it to @p list.
*
@@ -7303,8 +7273,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
if (attr->ingress)
rss = flow_get_rss_action(dev, p_actions_rx);
if (rss) {
- if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
- return 0;
+ MLX5_ASSERT(rss->queue_num <= RTE_ETH_RSS_RETA_SIZE_512);
/*
* The following information is required by
* mlx5_flow_hashfields_adjust() in advance.
@@ -8072,12 +8041,34 @@ flow_release_workspace(void *data)
while (wks) {
next = wks->next;
- free(wks->rss_desc.queue);
free(wks);
wks = next;
}
}
+static struct mlx5_flow_workspace *gc_head;
+static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER;
+
+static void
+mlx5_flow_workspace_gc_add(struct mlx5_flow_workspace *ws)
+{
+ rte_spinlock_lock(&mlx5_flow_workspace_lock);
+ ws->gc = gc_head;
+ gc_head = ws;
+ rte_spinlock_unlock(&mlx5_flow_workspace_lock);
+}
+
+void
+mlx5_flow_workspace_gc_release(void)
+{
+ while (gc_head) {
+ struct mlx5_flow_workspace *wks = gc_head;
+
+ gc_head = wks->gc;
+ flow_release_workspace(wks);
+ }
+}
+
/**
* Get thread specific current flow workspace.
*
@@ -8103,23 +8094,17 @@ mlx5_flow_get_thread_workspace(void)
static struct mlx5_flow_workspace*
flow_alloc_thread_workspace(void)
{
- struct mlx5_flow_workspace *data = calloc(1, sizeof(*data));
+ size_t data_size = RTE_ALIGN(sizeof(struct mlx5_flow_workspace), sizeof(long));
+ size_t rss_queue_array_size = sizeof(uint16_t) * RTE_ETH_RSS_RETA_SIZE_512;
+ struct mlx5_flow_workspace *data = calloc(1, data_size +
+ rss_queue_array_size);
if (!data) {
- DRV_LOG(ERR, "Failed to allocate flow workspace "
- "memory.");
+ DRV_LOG(ERR, "Failed to allocate flow workspace memory.");
return NULL;
}
- data->rss_desc.queue = calloc(1,
- sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
- if (!data->rss_desc.queue)
- goto err;
- data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
+ data->rss_desc.queue = RTE_PTR_ADD(data, data_size);
return data;
-err:
- free(data->rss_desc.queue);
- free(data);
- return NULL;
}
/**
@@ -8140,6 +8125,7 @@ mlx5_flow_push_thread_workspace(void)
data = flow_alloc_thread_workspace();
if (!data)
return NULL;
+ mlx5_flow_workspace_gc_add(data);
} else if (!curr->inuse) {
data = curr;
} else if (curr->next) {
@@ -1496,10 +1496,10 @@ struct mlx5_flow_workspace {
/* If creating another flow in same thread, push new as stack. */
struct mlx5_flow_workspace *prev;
struct mlx5_flow_workspace *next;
+ struct mlx5_flow_workspace *gc;
uint32_t inuse; /* can't create new flow with current. */
struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
struct mlx5_flow_rss_desc rss_desc;
- uint32_t rssq_num; /* Allocated queue num in rss_desc. */
uint32_t flow_idx; /* Intermediate device flow index. */
struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */
struct mlx5_flow_meter_policy *policy;
@@ -2022,6 +2022,8 @@ struct mlx5_flow_driver_ops {
struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
void mlx5_flow_pop_thread_workspace(void);
struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);
+void mlx5_flow_workspace_gc_release(void);
+
__extension__
struct flow_grp_info {
uint64_t external:1;