From patchwork Tue Nov 5 08:01:55 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Slava Ovsiienko X-Patchwork-Id: 62440 X-Patchwork-Delegate: rasland@nvidia.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 53070A0352; Tue, 5 Nov 2019 09:08:12 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 626F51BF0D; Tue, 5 Nov 2019 09:02:41 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id BDE1F1BEBB for ; Tue, 5 Nov 2019 09:02:23 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@mellanox.com) with ESMTPS (AES256-SHA encrypted); 5 Nov 2019 10:02:19 +0200 Received: from pegasus11.mtr.labs.mlnx (pegasus11.mtr.labs.mlnx [10.210.16.104]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id xA582JPT026547; Tue, 5 Nov 2019 10:02:19 +0200 Received: from pegasus11.mtr.labs.mlnx (localhost [127.0.0.1]) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id xA582JMQ030797; Tue, 5 Nov 2019 08:02:19 GMT Received: (from viacheslavo@localhost) by pegasus11.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id xA582J0w030796; Tue, 5 Nov 2019 08:02:19 GMT X-Authentication-Warning: pegasus11.mtr.labs.mlnx: viacheslavo set sender to viacheslavo@mellanox.com using -f From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: matan@mellanox.com, rasland@mellanox.com, thomas@monjalon.net, orika@mellanox.com, Yongseok Koh Date: Tue, 5 Nov 2019 08:01:55 +0000 Message-Id: <1572940915-29416-21-git-send-email-viacheslavo@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1572940915-29416-1-git-send-email-viacheslavo@mellanox.com> References: <1572940915-29416-1-git-send-email-viacheslavo@mellanox.com> Subject: [dpdk-dev] [PATCH 20/20] net/mlx5: add metadata register copy table X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" While reg_c[meta] can be copied to reg_b simply by modify-header action (it is supported by hardware), it is not possible to copy reg_c[mark] to the STE flow_tag as flow_tag is not a metadata register and this is not supported by hardware. Instead, it should be manually set by a flow per each unique MARK ID. For this purpose, there should be a dedicated flow table - RX_CP_TBL and all the Rx flow should pass by the table to properly copy values from the register to flow tag field. And for each MARK action, a copy flow should be added to RX_CP_TBL according to the MARK ID like: (if reg_c[mark] == mark_id), flow_tag := mark_id / reg_b := reg_c[meta] / jump to RX_ACT_TBL For SET_META action, there can be only one default flow like: reg_b := reg_c[meta] / jump to RX_ACT_TBL Signed-off-by: Yongseok Koh Signed-off-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5.h | 7 +- drivers/net/mlx5/mlx5_defs.h | 3 + drivers/net/mlx5/mlx5_flow.c | 432 +++++++++++++++++++++++++++++++++++++++- drivers/net/mlx5/mlx5_flow.h | 19 ++ drivers/net/mlx5/mlx5_flow_dv.c | 10 +- 5 files changed, 464 insertions(+), 7 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 9c1a88a..470778b 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -567,8 +567,9 @@ struct mlx5_flow_tbl_resource { #define MLX5_HAIRPIN_TX_TABLE (UINT16_MAX - 1) /* Reserve the last two tables for metadata register copy. */ #define MLX5_FLOW_MREG_ACT_TABLE_GROUP (MLX5_MAX_TABLES - 1) -#define MLX5_FLOW_MREG_CP_TABLE_GROUP \ - (MLX5_FLOW_MREG_ACT_TABLE_GROUP - 1) +#define MLX5_FLOW_MREG_CP_TABLE_GROUP (MLX5_MAX_TABLES - 2) +/* Tables for metering splits should be added here. */ +#define MLX5_MAX_TABLES_EXTERNAL (MLX5_MAX_TABLES - 3) #define MLX5_MAX_TABLES_FDB UINT16_MAX #define MLX5_DBR_PAGE_SIZE 4096 /* Must be >= 512. */ @@ -734,6 +735,8 @@ struct mlx5_priv { LIST_HEAD(dbrpage, mlx5_devx_dbr_page) dbrpgs; /* Door-bell pages. */ struct mlx5_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */ struct mlx5_flow_id_pool *qrss_id_pool; + struct mlx5_shtable_bucket mreg_cp_tbl[MLX5_FLOW_MREG_HTABLE_SZ]; + /* Hash table of Rx metadata register copy table. */ #ifndef RTE_ARCH_64 rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */ rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX]; diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h index a77c430..fa2009c 100644 --- a/drivers/net/mlx5/mlx5_defs.h +++ b/drivers/net/mlx5/mlx5_defs.h @@ -145,6 +145,9 @@ #define MLX5_XMETA_MODE_META16 1 #define MLX5_XMETA_MODE_META32 2 +/* Size of the simple hash table for metadata register table. */ +#define MLX5_FLOW_MREG_HTABLE_SZ 1024 + /* Definition of static_assert found in /usr/include/assert.h */ #ifndef HAVE_STATIC_ASSERT #define static_assert _Static_assert diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 0ed8308..2f6cd92 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -667,7 +667,17 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - if (mark) { + /* + * To support metadata register copy on Tx loopback, + * this must be always enabled (metadata may arive + * from other port - not from local flows only. + */ + if (priv->config.dv_flow_en && + priv->config.dv_xmeta_en && + mlx5_flow_ext_mreg_supported(dev)) { + rxq_ctrl->rxq.mark = 1; + rxq_ctrl->flow_mark_n = 1; + } else if (mark) { rxq_ctrl->rxq.mark = 1; rxq_ctrl->flow_mark_n++; } @@ -731,7 +741,12 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - if (mark) { + if (priv->config.dv_flow_en && + priv->config.dv_xmeta_en && + mlx5_flow_ext_mreg_supported(dev)) { + rxq_ctrl->rxq.mark = 1; + rxq_ctrl->flow_mark_n = 1; + } else if (mark) { rxq_ctrl->flow_mark_n--; rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n; } @@ -2727,6 +2742,392 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, return 0; } +/* Declare flow create/destroy prototype in advance. */ +static struct rte_flow * +flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + bool external, struct rte_flow_error *error); + +static void +flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, + struct rte_flow *flow); + +/** + * Add a flow of copying flow metadata registers in RX_CP_TBL. + * + * As mark_id is unique, if there's already a registered flow for the mark_id, + * return by increasing the reference counter of the resource. Otherwise, create + * the resource (mcp_res) and flow. + * + * Flow looks like, + * - If ingress port is ANY and reg_c[1] is mark_id, + * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * For default flow (zero mark_id), flow is like, + * - If ingress port is ANY, + * reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @param mark_id + * ID of MARK action, zero means default flow for META. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * Associated resource on success, NULL otherwise and rte_errno is set. + */ +static struct mlx5_flow_mreg_copy_resource * +flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_flow_attr attr = { + .group = MLX5_FLOW_MREG_CP_TABLE_GROUP, + .ingress = 1, + }; + struct mlx5_rte_flow_item_tag tag_spec = { + .data = mark_id, + }; + struct rte_flow_item items[] = { + [1] = { .type = RTE_FLOW_ITEM_TYPE_END, }, + }; + struct rte_flow_action_mark ftag = { + .id = mark_id, + }; + struct mlx5_flow_action_copy_mreg cp_mreg = { + .dst = REG_B, + .src = 0, + }; + struct rte_flow_action_jump jump = { + .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP, + }; + struct rte_flow_action actions[] = { + [3] = { .type = RTE_FLOW_ACTION_TYPE_END, }, + }; + struct mlx5_flow_mreg_copy_resource *mcp_res; + int ret; + + /* Fill the register fileds in the flow. */ + ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error); + if (ret < 0) + return NULL; + tag_spec.id = ret; + ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error); + if (ret < 0) + return NULL; + cp_mreg.src = ret; + /* Check if already registered. */ + mcp_res = (void *)mlx5_shtable_search(priv->mreg_cp_tbl, + RTE_DIM(priv->mreg_cp_tbl), + mark_id); + if (mcp_res) { + /* For non-default rule. */ + if (mark_id) + mcp_res->refcnt++; + assert(mark_id || mcp_res->refcnt == 1); + return mcp_res; + } + /* Provide the full width of FLAG specific value. */ + if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT)) + tag_spec.data = MLX5_FLOW_MARK_DEFAULT; + /* Build a new flow. */ + if (mark_id) { + items[0] = (struct rte_flow_item){ + .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG, + .spec = &tag_spec, + }; + items[1] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_END, + }; + actions[0] = (struct rte_flow_action){ + .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK, + .conf = &ftag, + }; + actions[1] = (struct rte_flow_action){ + .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = &cp_mreg, + }; + actions[2] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = &jump, + }; + actions[3] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_END, + }; + } else { + /* Default rule, wildcard match. */ + attr.priority = MLX5_FLOW_PRIO_RSVD; + items[0] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_END, + }; + actions[0] = (struct rte_flow_action){ + .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, + .conf = &cp_mreg, + }; + actions[1] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_JUMP, + .conf = &jump, + }; + actions[2] = (struct rte_flow_action){ + .type = RTE_FLOW_ACTION_TYPE_END, + }; + } + /* Build a new entry. */ + mcp_res = rte_zmalloc(__func__, sizeof(*mcp_res), 0); + if (!mcp_res) { + rte_errno = ENOMEM; + return NULL; + } + /* + * The copy Flows are not included in any list. There + * ones are referenced from other Flows and can not + * be applied, removed, deleted in ardbitrary order + * by list traversing. + */ + mcp_res->flow = flow_list_create(dev, NULL, &attr, items, + actions, false, error); + if (!mcp_res->flow) + goto error; + mcp_res->refcnt++; + mcp_res->htbl_ent.key = mark_id; + ret = mlx5_shtable_insert(priv->mreg_cp_tbl, + RTE_DIM(priv->mreg_cp_tbl), + &mcp_res->htbl_ent); + assert(!ret); + return mcp_res; +error: + rte_free(mcp_res); + return NULL; +} + +/** + * Release flow in RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @flow + * Parent flow for wich copying is provided. + */ +static void +flow_mreg_del_copy_action(struct rte_eth_dev *dev, + struct rte_flow *flow) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; + + if (!mcp_res) + return; + if (flow->copy_applied) { + assert(mcp_res->appcnt); + flow->copy_applied = 0; + --mcp_res->appcnt; + if (!mcp_res->appcnt) + flow_drv_remove(dev, mcp_res->flow); + } + /* + * We do not check availability of metadata registers here, + * because copy resources are allocated in this case. + */ + if (--mcp_res->refcnt) + return; + flow_list_destroy(dev, NULL, mcp_res->flow); + mlx5_shtable_remove(&mcp_res->htbl_ent); + rte_free(mcp_res); + flow->mreg_copy = NULL; +} + +/** + * Start flow in RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @flow + * Parent flow for wich copying is provided. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_mreg_start_copy_action(struct rte_eth_dev *dev, + struct rte_flow *flow) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; + int ret; + + if (!mcp_res || flow->copy_applied) + return 0; + if (!mcp_res->appcnt) { + ret = flow_drv_apply(dev, mcp_res->flow, NULL); + if (ret) + return ret; + } + ++mcp_res->appcnt; + flow->copy_applied = 1; + return 0; +} + +/** + * Stop flow in RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @flow + * Parent flow for wich copying is provided. + */ +static void +flow_mreg_stop_copy_action(struct rte_eth_dev *dev, + struct rte_flow *flow) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res = flow->mreg_copy; + + if (!mcp_res || !flow->copy_applied) + return; + assert(mcp_res->appcnt); + --mcp_res->appcnt; + flow->copy_applied = 0; + if (!mcp_res->appcnt) + flow_drv_remove(dev, mcp_res->flow); +} + +/** + * Remove the default copy action from RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) +{ + struct mlx5_flow_mreg_copy_resource *mcp_res; + struct mlx5_priv *priv = dev->data->dev_private; + + /* Check if default flow is registered. */ + mcp_res = (void *)mlx5_shtable_search(priv->mreg_cp_tbl, + RTE_DIM(priv->mreg_cp_tbl), 0); + if (!mcp_res) + return; + flow_list_destroy(dev, NULL, mcp_res->flow); + mlx5_shtable_remove(&mcp_res->htbl_ent); + rte_free(mcp_res); +} + +/** + * Add the default copy action in in RX_CP_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 for success, negative value otherwise and rte_errno is set. + */ +static int +flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_mreg_copy_resource *mcp_res; + + /* Check whether extensive metadata feature is engaged. */ + if (!priv->config.dv_flow_en || + !priv->config.dv_xmeta_en || + !mlx5_flow_ext_mreg_supported(dev) || + !priv->sh->dv_regc0_mask) + return 0; + mcp_res = flow_mreg_add_copy_action(dev, 0, error); + if (!mcp_res) + return -rte_errno; + return 0; +} + +/** + * Add a flow of copying flow metadata registers in RX_CP_TBL. + * + * All the flow having Q/RSS action should be split by + * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL + * performs the following, + * - CQE->flow_tag := reg_c[1] (MARK) + * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META) + * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1] + * but there should be a flow per each MARK ID set by MARK action. + * + * For the aforementioned reason, if there's a MARK action in flow's action + * list, a corresponding flow should be added to the RX_CP_TBL in order to copy + * the MARK ID to CQE's flow_tag like, + * - If reg_c[1] is mark_id, + * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * For SET_META action which stores value in reg_c[0], as the destination is + * also a flow metadata register (reg_b), adding a default flow is enough. Zero + * MARK ID means the default flow. The default flow looks like, + * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL. + * + * @param dev + * Pointer to Ethernet device. + * @param flow + * Pointer to flow structure. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] error + * Perform verbose error reporting if not NULL. + * + * @return + * 0 on success, negative value otherwise and rte_errno is set. + */ +static int +flow_mreg_update_copy_table(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + struct mlx5_flow_mreg_copy_resource *mcp_res; + const struct rte_flow_action_mark *mark; + + /* Check whether extensive metadata feature is engaged. */ + if (!config->dv_flow_en || + !config->dv_xmeta_en || + !mlx5_flow_ext_mreg_supported(dev) || + !priv->sh->dv_regc0_mask) + return 0; + /* Find MARK action. */ + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_FLAG: + mcp_res = flow_mreg_add_copy_action + (dev, MLX5_FLOW_MARK_DEFAULT, error); + if (!mcp_res) + return -rte_errno; + flow->mreg_copy = mcp_res; + if (dev->data->dev_started) { + mcp_res->appcnt++; + flow->copy_applied = 1; + } + return 0; + case RTE_FLOW_ACTION_TYPE_MARK: + mark = (const struct rte_flow_action_mark *) + actions->conf; + mcp_res = + flow_mreg_add_copy_action(dev, mark->id, error); + if (!mcp_res) + return -rte_errno; + flow->mreg_copy = mcp_res; + if (dev->data->dev_started) { + mcp_res->appcnt++; + flow->copy_applied = 1; + } + return 0; + default: + break; + } + } + return 0; +} + #define MLX5_MAX_SPLIT_ACTIONS 24 #define MLX5_MAX_SPLIT_ITEMS 24 @@ -3450,6 +3851,17 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, if (ret < 0) goto error; } + /* + * Update the metadata register copy table. If extensive + * metadata feature is enabled and registers are supported + * we might create the extra rte_flow for each unique + * MARK/FLAG action ID. + */ + if (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP) { + ret = flow_mreg_update_copy_table(dev, flow, actions, error); + if (ret) + goto error; + } if (dev->data->dev_started) { ret = flow_drv_apply(dev, flow, error); if (ret < 0) @@ -3465,6 +3877,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, hairpin_id); return NULL; error: + assert(flow); + flow_mreg_del_copy_action(dev, flow); ret = rte_errno; /* Save rte_errno before cleanup. */ if (flow->hairpin_flow_id) mlx5_flow_id_release(priv->sh->flow_id_pool, @@ -3573,6 +3987,7 @@ struct rte_flow * flow_drv_destroy(dev, flow); if (list) TAILQ_REMOVE(list, flow, next); + flow_mreg_del_copy_action(dev, flow); rte_free(flow->fdir); rte_free(flow); } @@ -3609,8 +4024,11 @@ struct rte_flow * { struct rte_flow *flow; - TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) + TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { flow_drv_remove(dev, flow); + flow_mreg_stop_copy_action(dev, flow); + } + flow_mreg_del_default_copy_action(dev); flow_rxq_flags_clear(dev); } @@ -3632,7 +4050,15 @@ struct rte_flow * struct rte_flow_error error; int ret = 0; + /* Make sure default copy action (reg_c[0] -> reg_b) is created. */ + ret = flow_mreg_add_default_copy_action(dev, &error); + if (ret < 0) + return -rte_errno; + /* Apply Flows created by application. */ TAILQ_FOREACH(flow, list, next) { + ret = flow_mreg_start_copy_action(dev, flow); + if (ret < 0) + goto error; ret = flow_drv_apply(dev, flow, &error); if (ret < 0) goto error; diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index c71938b..1fa5fb9 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -38,6 +38,7 @@ enum mlx5_rte_flow_item_type { enum mlx5_rte_flow_action_type { MLX5_RTE_FLOW_ACTION_TYPE_END = INT_MIN, MLX5_RTE_FLOW_ACTION_TYPE_TAG, + MLX5_RTE_FLOW_ACTION_TYPE_MARK, MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, }; @@ -417,6 +418,21 @@ struct mlx5_flow_dv_push_vlan_action_resource { rte_be32_t vlan_tag; /**< VLAN tag value. */ }; +/* Metadata register copy table entry. */ +struct mlx5_flow_mreg_copy_resource { + /* + * Hash table for copy table. + * - Key is 32-bit MARK action ID. + * - MUST be the first entry. + */ + struct mlx5_shtable_entry htbl_ent; + LIST_ENTRY(mlx5_flow_mreg_copy_resource) next; + /* List entry for device flows. */ + uint32_t refcnt; /* Reference counter. */ + uint32_t appcnt; /* Apply/Remove counter. */ + struct rte_flow *flow; /* Built flow for copy. */ +}; + /* * Max number of actions per DV flow. * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED @@ -510,10 +526,13 @@ struct rte_flow { enum mlx5_flow_drv_type drv_type; /**< Driver type. */ struct mlx5_flow_rss rss; /**< RSS context. */ struct mlx5_flow_counter *counter; /**< Holds flow counter. */ + struct mlx5_flow_mreg_copy_resource *mreg_copy; + /**< pointer to metadata register copy table resource. */ LIST_HEAD(dev_flows, mlx5_flow) dev_flows; /**< Device flows that are part of the flow. */ struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */ uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */ + uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */ }; typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev, diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 19f58cb..6a7e612 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -4064,8 +4064,11 @@ struct field_modify_info modify_tcp[] = { NULL, "groups are not supported"); #else - uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB : - MLX5_MAX_TABLES; + uint32_t max_group = attributes->transfer ? + MLX5_MAX_TABLES_FDB : + external ? + MLX5_MAX_TABLES_EXTERNAL : + MLX5_MAX_TABLES; uint32_t table; int ret; @@ -4672,6 +4675,7 @@ struct field_modify_info modify_tcp[] = { MLX5_FLOW_ACTION_DEC_TCP_ACK; break; case MLX5_RTE_FLOW_ACTION_TYPE_TAG: + case MLX5_RTE_FLOW_ACTION_TYPE_MARK: case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG: break; default: @@ -6508,6 +6512,8 @@ struct field_modify_info modify_tcp[] = { action_flags |= MLX5_FLOW_ACTION_MARK_EXT; break; } + /* Fall-through */ + case MLX5_RTE_FLOW_ACTION_TYPE_MARK: /* Legacy (non-extensive) MARK action. */ tag_resource.tag = mlx5_flow_mark_set (((const struct rte_flow_action_mark *)