net/mlx5: fix the workspace double free in Windows
Checks
Commit Message
The thread specific variable workspace indicated by "key_workspace"
should be freed explicitly when closing a device. For example, in
Linux, when exiting an application, the thread will not exit
explicitly and the thread resources will not be deconstructed.
The commit to solve this introduced a global list to manage the
workspace resources as a garbage collector. It will also be executed
in Windows, but the workspaces have already been freed in the
function mlx5_flow_os_release_workspace().
With this commit, the garbage collector will only be executed in
Linux. The workspace resources management in Windows will remain
the same with some stub function when needed.
Fixes: dc7c5e0aa905 ("net/mlx5: fix flow workspace destruction")
Cc: getelson@nvidia.com
Cc: dpdk@stable.org
Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_flow_os.c | 25 +++++++++++++++++++++++++
drivers/net/mlx5/linux/mlx5_flow_os.h | 8 ++++++++
drivers/net/mlx5/mlx5.c | 1 -
drivers/net/mlx5/mlx5_flow.c | 25 +------------------------
drivers/net/mlx5/mlx5_flow.h | 1 -
drivers/net/mlx5/windows/mlx5_flow_os.c | 6 ++++++
drivers/net/mlx5/windows/mlx5_flow_os.h | 8 ++++++++
7 files changed, 48 insertions(+), 26 deletions(-)
@@ -8,6 +8,10 @@
/* Key of thread specific flow workspace data. */
static rte_thread_key key_workspace;
+/* Flow workspace global list head for garbage collector. */
+static struct mlx5_flow_workspace *gc_head;
+/* Spinlock for operating flow workspace list. */
+static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER;
int
mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
@@ -48,6 +52,26 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
return 0;
}
+void
+mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace *ws)
+{
+ rte_spinlock_lock(&mlx5_flow_workspace_lock);
+ ws->gc = gc_head;
+ gc_head = ws;
+ rte_spinlock_unlock(&mlx5_flow_workspace_lock);
+}
+
+static void
+mlx5_flow_os_workspace_gc_release(void)
+{
+ while (gc_head) {
+ struct mlx5_flow_workspace *wks = gc_head;
+
+ gc_head = wks->gc;
+ flow_release_workspace(wks);
+ }
+}
+
int
mlx5_flow_os_init_workspace_once(void)
{
@@ -75,4 +99,5 @@ void
mlx5_flow_os_release_workspace(void)
{
rte_thread_key_delete(key_workspace);
+ mlx5_flow_os_workspace_gc_release();
}
@@ -526,4 +526,12 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
uint8_t target_protocol,
struct rte_flow_error *error);
+/**
+ * Add per thread workspace to the global list for garbage collection.
+ *
+ * @param[in] ws
+ * Pointer to the flow workspace.
+ */
+void mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace *ws);
+
#endif /* RTE_PMD_MLX5_FLOW_OS_H_ */
@@ -1851,7 +1851,6 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
mlx5_os_net_cleanup();
mlx5_flow_os_release_workspace();
- mlx5_flow_workspace_gc_release();
}
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
if (sh->flex_parsers_dv) {
@@ -8093,29 +8093,6 @@ flow_release_workspace(void *data)
}
}
-static struct mlx5_flow_workspace *gc_head;
-static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER;
-
-static void
-mlx5_flow_workspace_gc_add(struct mlx5_flow_workspace *ws)
-{
- rte_spinlock_lock(&mlx5_flow_workspace_lock);
- ws->gc = gc_head;
- gc_head = ws;
- rte_spinlock_unlock(&mlx5_flow_workspace_lock);
-}
-
-void
-mlx5_flow_workspace_gc_release(void)
-{
- while (gc_head) {
- struct mlx5_flow_workspace *wks = gc_head;
-
- gc_head = wks->gc;
- flow_release_workspace(wks);
- }
-}
-
/**
* Get thread specific current flow workspace.
*
@@ -8172,7 +8149,7 @@ mlx5_flow_push_thread_workspace(void)
data = flow_alloc_thread_workspace();
if (!data)
return NULL;
- mlx5_flow_workspace_gc_add(data);
+ mlx5_flow_os_workspace_gc_add(data);
} else if (!curr->inuse) {
data = curr;
} else if (curr->next) {
@@ -2032,7 +2032,6 @@ struct mlx5_flow_driver_ops {
struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
void mlx5_flow_pop_thread_workspace(void);
struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);
-void mlx5_flow_workspace_gc_release(void);
__extension__
struct flow_grp_info {
@@ -417,6 +417,12 @@ mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)
return err;
}
+void
+mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace *ws)
+{
+ RTE_SET_USED(ws);
+}
+
int
mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
uint64_t item_flags,
@@ -473,4 +473,12 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
uint8_t target_protocol,
struct rte_flow_error *error);
+/**
+ * Add per thread workspace to the global list for garbage collection.
+ *
+ * @param[in] ws
+ * Pointer to the flow workspace.
+ */
+void mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace *ws);
+
#endif /* RTE_PMD_MLX5_FLOW_OS_H_ */