@@ -1354,13 +1354,13 @@
mlx5_flow_ext_mreg_supported(eth_dev) &&
priv->sh->dv_regc0_mask) {
priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
- MLX5_FLOW_MREG_HTABLE_SZ,
- 0, false,
- NULL, NULL, NULL);
+ MLX5_FLOW_MREG_HTABLE_SZ, 0, false,
+ flow_dv_mreg_create_cb, NULL, flow_dv_mreg_remove_cb);
if (!priv->mreg_cp_tbl) {
err = ENOMEM;
goto error;
}
+ priv->mreg_cp_tbl->ctx = eth_dev;
}
return eth_dev;
error:
@@ -2950,36 +2950,17 @@ struct mlx5_flow_tunnel_info {
flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
uint32_t flow_idx);
-/**
- * Add a flow of copying flow metadata registers in RX_CP_TBL.
- *
- * As mark_id is unique, if there's already a registered flow for the mark_id,
- * return by increasing the reference counter of the resource. Otherwise, create
- * the resource (mcp_res) and flow.
- *
- * Flow looks like,
- * - If ingress port is ANY and reg_c[1] is mark_id,
- * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
- *
- * For default flow (zero mark_id), flow is like,
- * - If ingress port is ANY,
- * reg_b := reg_c[0] and jump to RX_ACT_TBL.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param mark_id
- * ID of MARK action, zero means default flow for META.
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- *
- * @return
- * Associated resource on success, NULL otherwise and rte_errno is set.
- */
-static struct mlx5_flow_mreg_copy_resource *
-flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
- struct rte_flow_error *error)
+struct mlx5_hlist_entry *
+flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
+ void *cb_ctx)
{
+ struct rte_eth_dev *dev = list->ctx;
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_mreg_copy_resource *mcp_res;
+ uint32_t idx = 0;
+ int ret;
+ uint32_t mark_id = key;
struct rte_flow_attr attr = {
.group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
.ingress = 1,
@@ -3003,30 +2984,22 @@ struct mlx5_flow_tunnel_info {
struct rte_flow_action actions[] = {
[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
};
- struct mlx5_flow_mreg_copy_resource *mcp_res;
- uint32_t idx = 0;
- int ret;
/* Fill the register fileds in the flow. */
- ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
+ ret = mlx5_flow_get_reg_id(ctx->dev, MLX5_FLOW_MARK, 0, ctx->error);
if (ret < 0)
return NULL;
tag_spec.id = ret;
- ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
+ ret = mlx5_flow_get_reg_id(ctx->dev, MLX5_METADATA_RX, 0, ctx->error);
if (ret < 0)
return NULL;
cp_mreg.src = ret;
- /* Check if already registered. */
- MLX5_ASSERT(priv->mreg_cp_tbl);
- mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, NULL);
- if (mcp_res) {
- /* For non-default rule. */
- if (mark_id != MLX5_DEFAULT_COPY_ID)
- mcp_res->refcnt++;
- MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID ||
- mcp_res->refcnt == 1);
- return mcp_res;
+ mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
+ if (!mcp_res) {
+ rte_errno = ENOMEM;
+ return NULL;
}
+ mcp_res->idx = idx;
/* Provide the full width of FLAG specific value. */
if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
@@ -3076,39 +3049,68 @@ struct mlx5_flow_tunnel_info {
.type = RTE_FLOW_ACTION_TYPE_END,
};
}
- /* Build a new entry. */
- mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
- if (!mcp_res) {
- rte_errno = ENOMEM;
- return NULL;
- }
- mcp_res->idx = idx;
/*
* The copy Flows are not included in any list. There
* ones are referenced from other Flows and can not
- * be applied, removed, deleted in ardbitrary order
+ * be applied, removed, deleted in arbitrary order
* by list traversing.
*/
- mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
- actions, false, error);
- if (!mcp_res->rix_flow)
- goto error;
- mcp_res->refcnt++;
- mcp_res->hlist_ent.key = mark_id;
- ret = !mlx5_hlist_insert(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
- MLX5_ASSERT(!ret);
- if (ret)
- goto error;
- return mcp_res;
-error:
- if (mcp_res->rix_flow)
- flow_list_destroy(dev, NULL, mcp_res->rix_flow);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
- return NULL;
+ mcp_res->rix_flow = flow_list_create(ctx->dev, NULL, &attr, items,
+ actions, false, ctx->error);
+ if (!mcp_res->rix_flow) {
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
+ return NULL;
+ }
+ return &mcp_res->hlist_ent;
}
/**
- * Release flow in RX_CP_TBL.
+ * Add a flow of copying flow metadata registers in RX_CP_TBL.
+ *
+ * As mark_id is unique, if there's already a registered flow for the mark_id,
+ * return by increasing the reference counter of the resource. Otherwise, create
+ * the resource (mcp_res) and flow.
+ *
+ * Flow looks like,
+ * - If ingress port is ANY and reg_c[1] is mark_id,
+ * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
+ *
+ * For default flow (zero mark_id), flow is like,
+ * - If ingress port is ANY,
+ * reg_b := reg_c[0] and jump to RX_ACT_TBL.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mark_id
+ * ID of MARK action, zero means default flow for META.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * Associated resource on success, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_mreg_copy_resource *
+flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hlist_entry *entry;
+ struct mlx5_flow_cb_ctx ctx = {
+ .dev = dev,
+ .error = error,
+ };
+
+ /* Check if already registered. */
+ MLX5_ASSERT(priv->mreg_cp_tbl);
+ entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx);
+ if (!entry)
+ return NULL;
+ return container_of(entry, struct mlx5_flow_mreg_copy_resource,
+ hlist_ent);
+}
+
+/**
+ * Stop flow in RX_CP_TBL.
*
* @param dev
* Pointer to Ethernet device.
@@ -3116,117 +3118,102 @@ struct mlx5_flow_tunnel_info {
* Parent flow for wich copying is provided.
*/
static void
-flow_mreg_del_copy_action(struct rte_eth_dev *dev,
- struct rte_flow *flow)
+flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
+ struct rte_flow *flow)
{
struct mlx5_flow_mreg_copy_resource *mcp_res;
struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow *mcp_flow;
- if (!flow->rix_mreg_copy)
+ if (!flow->rix_mreg_copy || !flow->copy_applied)
return;
mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
flow->rix_mreg_copy);
- if (!mcp_res || !priv->mreg_cp_tbl)
+ if (!mcp_res)
return;
- if (flow->copy_applied) {
- MLX5_ASSERT(mcp_res->appcnt);
- flow->copy_applied = 0;
- --mcp_res->appcnt;
- if (!mcp_res->appcnt) {
- struct rte_flow *mcp_flow = mlx5_ipool_get
- (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
- mcp_res->rix_flow);
-
- if (mcp_flow)
- flow_drv_remove(dev, mcp_flow);
- }
- }
- /*
- * We do not check availability of metadata registers here,
- * because copy resources are not allocated in this case.
- */
- if (--mcp_res->refcnt)
+ if (__atomic_sub_fetch(&mcp_res->appcnt, 0, __ATOMIC_ACQ_REL))
return;
+ flow->copy_applied = 0;
+ mcp_flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+ mcp_res->rix_flow);
+ if (mcp_flow)
+ flow_drv_remove(dev, mcp_flow);
+}
+
+void
+flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)
+{
+ struct mlx5_flow_mreg_copy_resource *mcp_res =
+ container_of(entry, typeof(*mcp_res), hlist_ent);
+ struct rte_eth_dev *dev = list->ctx;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
MLX5_ASSERT(mcp_res->rix_flow);
flow_list_destroy(dev, NULL, mcp_res->rix_flow);
- mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
- flow->rix_mreg_copy = 0;
}
/**
- * Start flow in RX_CP_TBL.
+ * Release flow in RX_CP_TBL.
*
* @param dev
* Pointer to Ethernet device.
* @flow
* Parent flow for wich copying is provided.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-flow_mreg_start_copy_action(struct rte_eth_dev *dev,
- struct rte_flow *flow)
+static void
+flow_mreg_del_copy_action(struct rte_eth_dev *dev, struct rte_flow *flow)
{
struct mlx5_flow_mreg_copy_resource *mcp_res;
struct mlx5_priv *priv = dev->data->dev_private;
- int ret;
- if (!flow->rix_mreg_copy || flow->copy_applied)
- return 0;
+ if (!flow->rix_mreg_copy)
+ return;
mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
flow->rix_mreg_copy);
- if (!mcp_res)
- return 0;
- if (!mcp_res->appcnt) {
- struct rte_flow *mcp_flow = mlx5_ipool_get
- (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
- mcp_res->rix_flow);
-
- if (mcp_flow) {
- ret = flow_drv_apply(dev, mcp_flow, NULL);
- if (ret)
- return ret;
- }
- }
- ++mcp_res->appcnt;
- flow->copy_applied = 1;
- return 0;
+ if (!mcp_res || !priv->mreg_cp_tbl)
+ return;
+ flow_mreg_stop_copy_action(dev, flow);
+ mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
}
/**
- * Stop flow in RX_CP_TBL.
+ * Start flow in RX_CP_TBL.
*
* @param dev
* Pointer to Ethernet device.
* @flow
* Parent flow for wich copying is provided.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static void
-flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
- struct rte_flow *flow)
+static int
+flow_mreg_start_copy_action(struct rte_eth_dev *dev,
+ struct rte_flow *flow)
{
struct mlx5_flow_mreg_copy_resource *mcp_res;
struct mlx5_priv *priv = dev->data->dev_private;
+ int ret;
- if (!flow->rix_mreg_copy || !flow->copy_applied)
- return;
+ if (!flow->rix_mreg_copy || flow->copy_applied)
+ return 0;
mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
flow->rix_mreg_copy);
if (!mcp_res)
- return;
- MLX5_ASSERT(mcp_res->appcnt);
- --mcp_res->appcnt;
- flow->copy_applied = 0;
- if (!mcp_res->appcnt) {
- struct rte_flow *mcp_flow = mlx5_ipool_get
- (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
- mcp_res->rix_flow);
-
- if (mcp_flow)
- flow_drv_remove(dev, mcp_flow);
+ return 0;
+ if (__atomic_fetch_add(&mcp_res->appcnt, 1, __ATOMIC_ACQ_REL))
+ return 0;
+ struct rte_flow *mcp_flow = mlx5_ipool_get
+ (priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+ mcp_res->rix_flow);
+ if (mcp_flow) {
+ ret = flow_drv_apply(dev, mcp_flow, NULL);
+ if (ret)
+ return ret;
}
+ flow->copy_applied = 1;
+ return 0;
}
/**
@@ -3238,20 +3225,17 @@ struct mlx5_flow_tunnel_info {
static void
flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
{
- struct mlx5_flow_mreg_copy_resource *mcp_res;
+ struct mlx5_hlist_entry *entry;
struct mlx5_priv *priv = dev->data->dev_private;
/* Check if default flow is registered. */
if (!priv->mreg_cp_tbl)
return;
- mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl,
- MLX5_DEFAULT_COPY_ID, NULL);
- if (!mcp_res)
+ entry = mlx5_hlist_lookup(priv->mreg_cp_tbl,
+ MLX5_DEFAULT_COPY_ID, NULL);
+ if (!entry)
return;
- MLX5_ASSERT(mcp_res->rix_flow);
- flow_list_destroy(dev, NULL, mcp_res->rix_flow);
- mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
+ mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
}
/**
@@ -3345,7 +3329,8 @@ struct mlx5_flow_tunnel_info {
return -rte_errno;
flow->rix_mreg_copy = mcp_res->idx;
if (dev->data->dev_started) {
- mcp_res->appcnt++;
+ __atomic_add_fetch(&mcp_res->appcnt, 1,
+ __ATOMIC_RELAXED);
flow->copy_applied = 1;
}
return 0;
@@ -3358,7 +3343,8 @@ struct mlx5_flow_tunnel_info {
return -rte_errno;
flow->rix_mreg_copy = mcp_res->idx;
if (dev->data->dev_started) {
- mcp_res->appcnt++;
+ __atomic_add_fetch(&mcp_res->appcnt, 1,
+ __ATOMIC_RELAXED);
flow->copy_applied = 1;
}
return 0;
@@ -508,7 +508,6 @@ struct mlx5_flow_mreg_copy_resource {
struct mlx5_hlist_entry hlist_ent;
LIST_ENTRY(mlx5_flow_mreg_copy_resource) next;
/* List entry for device flows. */
- uint32_t refcnt; /* Reference counter. */
uint32_t appcnt; /* Apply/Remove counter. */
uint32_t idx;
uint32_t rix_flow; /* Built flow for copy. */
@@ -1101,4 +1100,9 @@ struct mlx5_hlist_entry *flow_dv_modify_create_cb(struct mlx5_hlist *list,
void flow_dv_modify_remove_cb(struct mlx5_hlist *list,
struct mlx5_hlist_entry *entry);
+struct mlx5_hlist_entry *flow_dv_mreg_create_cb(struct mlx5_hlist *list,
+ uint64_t key, void *ctx);
+void flow_dv_mreg_remove_cb(struct mlx5_hlist *list,
+ struct mlx5_hlist_entry *entry);
+
#endif /* RTE_PMD_MLX5_FLOW_H_ */