From patchwork Wed Oct 24 12:36:13 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Shahaf Shuler X-Patchwork-Id: 47321 X-Patchwork-Delegate: shahafs@mellanox.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5169F1B1FF; Wed, 24 Oct 2018 14:38:51 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id EAA431B1FD for ; Wed, 24 Oct 2018 14:38:49 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from shahafs@mellanox.com) with ESMTPS (AES256-SHA encrypted); 24 Oct 2018 14:43:54 +0200 Received: from unicorn01.mtl.labs.mlnx. (unicorn01.mtl.labs.mlnx [10.7.12.62]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id w9OCaLct001458; Wed, 24 Oct 2018 15:38:46 +0300 From: Shahaf Shuler To: shahafs@mellanox.com Cc: dev@dpdk.org, yskoh@mellanox.com Date: Wed, 24 Oct 2018 15:36:13 +0300 Message-Id: <808a0ac238cd8a9924e7cc881a18f42c421197ee.1540384475.git.shahafs@mellanox.com> X-Mailer: git-send-email 2.12.0 In-Reply-To: References: Subject: [dpdk-dev] [PATCH 1/3] net/mlx5: rename static functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Yongseok Koh In mlx5_flow*.c, static functions have names starting from 'flow_' while shared ones start from "mlx5_flow_'. Signed-off-by: Yongseok Koh --- drivers/net/mlx5/mlx5_flow.c | 62 +++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 32 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 6219c9d68a..68eb7da3f6 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -505,7 +505,7 @@ mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, * Rx queue to update. */ static void -mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) +flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) { unsigned int i; uint32_t tunnel_ptype = 0; @@ -533,7 +533,7 @@ mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) * Pointer to flow structure. */ static void -mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) +flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) { struct priv *priv = dev->data->dev_private; const int mark = !!(flow->actions & @@ -562,7 +562,7 @@ mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) break; } } - mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); + flow_rxq_tunnel_ptype_update(rxq_ctrl); } } } @@ -577,7 +577,7 @@ mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) * Pointer to the flow. */ static void -mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) +flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) { struct priv *priv = dev->data->dev_private; const int mark = !!(flow->actions & @@ -607,7 +607,7 @@ mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) break; } } - mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl); + flow_rxq_tunnel_ptype_update(rxq_ctrl); } } } @@ -619,7 +619,7 @@ mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) * Pointer to Ethernet device. */ static void -mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev) +flow_rxq_flags_clear(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; unsigned int i; @@ -1914,7 +1914,7 @@ mlx5_flow_validate(struct rte_eth_dev *dev, * Pointer to the RSS action if exist, else return NULL. */ static const struct rte_flow_action_rss* -mlx5_flow_get_rss_action(const struct rte_flow_action actions[]) +flow_get_rss_action(const struct rte_flow_action actions[]) { for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { @@ -1929,7 +1929,7 @@ mlx5_flow_get_rss_action(const struct rte_flow_action actions[]) } static unsigned int -mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) +find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) { const struct rte_flow_item *item; unsigned int has_vlan = 0; @@ -1967,12 +1967,11 @@ mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level) * A flow on success, NULL otherwise and rte_errno is set. */ static struct rte_flow * -mlx5_flow_list_create(struct rte_eth_dev *dev, - struct mlx5_flows *list, - const struct rte_flow_attr *attr, - const struct rte_flow_item items[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) +flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, + const struct rte_flow_attr *attr, + const struct rte_flow_item items[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) { struct rte_flow *flow = NULL; struct mlx5_flow *dev_flow; @@ -1992,7 +1991,7 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, if (ret < 0) return NULL; flow_size = sizeof(struct rte_flow); - rss = mlx5_flow_get_rss_action(actions); + rss = flow_get_rss_action(actions); if (rss) flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), sizeof(void *)); @@ -2007,7 +2006,7 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, if (rss && rss->types) { unsigned int graph_root; - graph_root = mlx5_find_graph_root(items, rss->level); + graph_root = find_graph_root(items, rss->level); ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer), items, rss->types, mlx5_support_expansion, @@ -2038,7 +2037,7 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, goto error; } TAILQ_INSERT_TAIL(list, flow, next); - mlx5_flow_rxq_flags_set(dev, flow); + flow_rxq_flags_set(dev, flow); return flow; error: ret = rte_errno; /* Save rte_errno before cleanup. */ @@ -2062,9 +2061,9 @@ mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - return mlx5_flow_list_create - (dev, &((struct priv *)dev->data->dev_private)->flows, - attr, items, actions, error); + return flow_list_create(dev, + &((struct priv *)dev->data->dev_private)->flows, + attr, items, actions, error); } /** @@ -2078,8 +2077,8 @@ mlx5_flow_create(struct rte_eth_dev *dev, * Flow to destroy. */ static void -mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, - struct rte_flow *flow) +flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, + struct rte_flow *flow) { flow_drv_destroy(dev, flow); TAILQ_REMOVE(list, flow, next); @@ -2088,7 +2087,7 @@ mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, * already clean. */ if (dev->data->dev_started) - mlx5_flow_rxq_flags_trim(dev, flow); + flow_rxq_flags_trim(dev, flow); rte_free(flow); } @@ -2107,7 +2106,7 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list) struct rte_flow *flow; flow = TAILQ_FIRST(list); - mlx5_flow_list_destroy(dev, list, flow); + flow_list_destroy(dev, list, flow); } } @@ -2126,7 +2125,7 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) flow_drv_remove(dev, flow); - mlx5_flow_rxq_flags_clear(dev); + flow_rxq_flags_clear(dev); } /** @@ -2151,7 +2150,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) ret = flow_drv_apply(dev, flow, &error); if (ret < 0) goto error; - mlx5_flow_rxq_flags_set(dev, flow); + flow_rxq_flags_set(dev, flow); } return 0; error: @@ -2260,8 +2259,8 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev, } for (i = 0; i != priv->reta_idx_n; ++i) queue[i] = (*priv->reta_idx)[i]; - flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items, - actions, &error); + flow = flow_list_create(dev, &priv->ctrl_flows, + &attr, items, actions, &error); if (!flow) return -rte_errno; return 0; @@ -2301,7 +2300,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, { struct priv *priv = dev->data->dev_private; - mlx5_flow_list_destroy(dev, &priv->flows, flow); + flow_list_destroy(dev, &priv->flows, flow); return 0; } @@ -2609,9 +2608,8 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev, ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes); if (ret) return ret; - flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr, - attributes.items, attributes.actions, - &error); + flow = flow_list_create(dev, &priv->flows, &attributes.attr, + attributes.items, attributes.actions, &error); if (flow) { DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id, (void *)flow); From patchwork Wed Oct 24 12:36:14 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Shahaf Shuler X-Patchwork-Id: 47323 X-Patchwork-Delegate: shahafs@mellanox.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 898DF1B212; Wed, 24 Oct 2018 14:38:55 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id F0D0D1B1FF for ; Wed, 24 Oct 2018 14:38:49 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from shahafs@mellanox.com) with ESMTPS (AES256-SHA encrypted); 24 Oct 2018 14:43:54 +0200 Received: from unicorn01.mtl.labs.mlnx. (unicorn01.mtl.labs.mlnx [10.7.12.62]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id w9OCaLcu001458; Wed, 24 Oct 2018 15:38:46 +0300 From: Shahaf Shuler To: shahafs@mellanox.com Cc: dev@dpdk.org, yskoh@mellanox.com, orika@mellanox.com Date: Wed, 24 Oct 2018 15:36:14 +0300 Message-Id: <8493774f50d4044312dfda70f0a096d135e6e3ea.1540384475.git.shahafs@mellanox.com> X-Mailer: git-send-email 2.12.0 In-Reply-To: References: Subject: [dpdk-dev] [PATCH 2/3] net/mlx5: fix flow tunnel handling X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Yongseok Koh Both rte_flow and mlx5_flow redundantly have item flags. And it is not properly set in the code. This causes wrong tunnel flag handling. A rte_flow can have multiple expanded device flows if the flow has an RSS action. Therefore, mlx5_flow should have the layers field. Fixes: c4d9b9f7f382 ("net/mlx5: add Direct Verbs final functions") Fixes: fc2c498ccb94 ("net/mlx5: add Direct Verbs translate items") Fixes: 3d69434113d1 ("net/mlx5: add Direct Verbs validation function") Fixes: 84c406e74524 ("net/mlx5: add flow translate function") Fixes: 4e05a229c5da ("net/mlx5: add flow prepare function") Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function") Cc: orika@mellanox.com Signed-off-by: Yongseok Koh --- drivers/net/mlx5/mlx5_flow.c | 69 +++++++++++++++++++++++++++------ drivers/net/mlx5/mlx5_flow.h | 8 ++-- drivers/net/mlx5/mlx5_flow_dv.c | 12 +++--- drivers/net/mlx5/mlx5_flow_verbs.c | 15 ++++--- 4 files changed, 77 insertions(+), 27 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 68eb7da3f6..0d9a03b632 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -525,20 +525,22 @@ flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl) } /** - * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow. + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive + * flow. * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] flow - * Pointer to flow structure. + * @param[in] dev_flow + * Pointer to device flow structure. */ static void -flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) +flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) { struct priv *priv = dev->data->dev_private; + struct rte_flow *flow = dev_flow->flow; const int mark = !!(flow->actions & (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); - const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int i; for (i = 0; i != flow->rss.queue_num; ++i) { @@ -556,7 +558,8 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) /* Increase the counter matching the flow. */ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { - if ((tunnels_info[j].tunnel & flow->layers) == + if ((tunnels_info[j].tunnel & + dev_flow->layers) == tunnels_info[j].tunnel) { rxq_ctrl->flow_tunnels_n[j]++; break; @@ -568,21 +571,39 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) } /** + * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] flow + * Pointer to flow structure. + */ +static void +flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_flow *dev_flow; + + LIST_FOREACH(dev_flow, &flow->dev_flows, next) + flow_drv_rxq_flags_set(dev, dev_flow); +} + +/** * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the - * @p flow if no other flow uses it with the same kind of request. + * device flow if no other flow uses it with the same kind of request. * * @param dev * Pointer to Ethernet device. - * @param[in] flow - * Pointer to the flow. + * @param[in] dev_flow + * Pointer to the device flow. */ static void -flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) +flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow) { struct priv *priv = dev->data->dev_private; + struct rte_flow *flow = dev_flow->flow; const int mark = !!(flow->actions & (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK)); - const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int i; assert(dev->data->dev_started); @@ -601,7 +622,8 @@ flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) /* Decrease the counter matching the flow. */ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) { - if ((tunnels_info[j].tunnel & flow->layers) == + if ((tunnels_info[j].tunnel & + dev_flow->layers) == tunnels_info[j].tunnel) { rxq_ctrl->flow_tunnels_n[j]--; break; @@ -613,6 +635,24 @@ flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) } /** + * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the + * @p flow if no other flow uses it with the same kind of request. + * + * @param dev + * Pointer to Ethernet device. + * @param[in] flow + * Pointer to the flow. + */ +static void +flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_flow *dev_flow; + + LIST_FOREACH(dev_flow, &flow->dev_flows, next) + flow_drv_rxq_flags_trim(dev, dev_flow); +} + +/** * Clear the Mark/Flag and Tunnel ptype information in all Rx queues. * * @param dev @@ -2024,6 +2064,11 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, if (!dev_flow) goto error; dev_flow->flow = flow; + dev_flow->layers = item_flags; + /* Store actions once as expanded flows have same actions. */ + if (i == 0) + flow->actions = action_flags; + assert(flow->actions == action_flags); LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next); ret = flow_drv_translate(dev, dev_flow, attr, buf->entry[i].pattern, diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 38635c9fdb..1cc989ae91 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -215,7 +215,8 @@ struct mlx5_flow_verbs { struct mlx5_flow { LIST_ENTRY(mlx5_flow) next; struct rte_flow *flow; /**< Pointer to the main flow. */ - uint32_t layers; /**< Bit-fields that holds the detected layers. */ + uint32_t layers; + /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */ union { #ifdef HAVE_IBV_FLOW_DV_SUPPORT struct mlx5_flow_dv dv; @@ -240,15 +241,14 @@ struct mlx5_flow_counter { struct rte_flow { TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ enum mlx5_flow_drv_type drv_type; /**< Drvier type. */ - uint32_t layers; - /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */ struct mlx5_flow_counter *counter; /**< Holds flow counter. */ struct rte_flow_action_rss rss;/**< RSS context. */ uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ LIST_HEAD(dev_flows, mlx5_flow) dev_flows; /**< Device flows that are part of the flow. */ - uint32_t actions; /**< Bit-fields which mark all detected actions. */ + uint32_t actions; + /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */ }; typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 071f31d0fd..53e9a170c3 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -177,6 +177,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, if (ret < 0) return ret; for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); switch (items->type) { case RTE_FLOW_ITEM_TYPE_VOID: break; @@ -285,7 +286,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "too many actions"); - tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VOID: break; @@ -1253,13 +1253,15 @@ flow_dv_translate(struct rte_eth_dev *dev, }, }; void *match_value = dev_flow->dv.value.buf; - uint8_t inner = 0; + int tunnel = 0; if (priority == MLX5_FLOW_PRIO_RSVD) priority = priv->config.flow_prio - 1; - for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); flow_dv_create_item(&matcher, match_value, items, dev_flow, - inner); + tunnel); + } matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, matcher.mask.size); if (priority == MLX5_FLOW_PRIO_RSVD) @@ -1324,7 +1326,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, (dev, flow->key, MLX5_RSS_HASH_KEY_LEN, dv->hash_fields, (*flow->queue), flow->rss.queue_num, - !!(flow->layers & + !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL)); if (!hrxq) { rte_flow_error_set diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index 4ae974b072..75b950e16f 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -343,7 +343,7 @@ flow_verbs_translate_item_ipv6(const struct rte_flow_item *item, { const struct rte_flow_item_ipv6 *spec = item->spec; const struct rte_flow_item_ipv6 *mask = item->mask; - const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int size = sizeof(struct ibv_flow_spec_ipv6); struct ibv_flow_spec_ipv6 ipv6 = { .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0), @@ -467,7 +467,7 @@ flow_verbs_translate_item_tcp(const struct rte_flow_item *item, { const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; - const int tunnel = !!(dev_flow->flow->layers & MLX5_FLOW_LAYER_TUNNEL); + const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL); unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp); struct ibv_flow_spec_tcp_udp tcp = { .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0), @@ -999,6 +999,8 @@ flow_verbs_validate(struct rte_eth_dev *dev, return ret; for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { int ret = 0; + + tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); switch (items->type) { case RTE_FLOW_ITEM_TYPE_VOID: break; @@ -1110,7 +1112,6 @@ flow_verbs_validate(struct rte_eth_dev *dev, } } for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { - tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VOID: break; @@ -1252,9 +1253,10 @@ flow_verbs_get_items_and_size(const struct rte_flow_item items[], { int size = 0; uint64_t detected_items = 0; - const int tunnel = !!(*item_flags & MLX5_FLOW_LAYER_TUNNEL); for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int tunnel = !!(detected_items & MLX5_FLOW_LAYER_TUNNEL); + switch (items->type) { case RTE_FLOW_ITEM_TYPE_VOID: break; @@ -1450,7 +1452,8 @@ flow_verbs_translate(struct rte_eth_dev *dev, "action not supported"); } } - dev_flow->flow->actions |= action_flags; + /* Device flow should have action flags by flow_drv_prepare(). */ + assert(dev_flow->flow->actions == action_flags); for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { switch (items->type) { case RTE_FLOW_ITEM_TYPE_VOID: @@ -1613,7 +1616,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, verbs->hash_fields, (*flow->queue), flow->rss.queue_num, - !!(flow->layers & + !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL)); if (!hrxq) { rte_flow_error_set From patchwork Wed Oct 24 12:36:15 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Shahaf Shuler X-Patchwork-Id: 47322 X-Patchwork-Delegate: shahafs@mellanox.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5CA881B20B; Wed, 24 Oct 2018 14:38:53 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 016861B202 for ; Wed, 24 Oct 2018 14:38:49 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from shahafs@mellanox.com) with ESMTPS (AES256-SHA encrypted); 24 Oct 2018 14:43:54 +0200 Received: from unicorn01.mtl.labs.mlnx. (unicorn01.mtl.labs.mlnx [10.7.12.62]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id w9OCaLcv001458; Wed, 24 Oct 2018 15:38:46 +0300 From: Shahaf Shuler To: shahafs@mellanox.com Cc: dev@dpdk.org, yskoh@mellanox.com, orika@mellanox.com Date: Wed, 24 Oct 2018 15:36:15 +0300 Message-Id: X-Mailer: git-send-email 2.12.0 In-Reply-To: References: Subject: [dpdk-dev] [PATCH 3/3] net/mlx5: fix bit width of item and action flags X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Yongseok Koh Most of the code uses uint64_t for MLX5_FLOW_LAYER_* and MLX5_FLOW_ACTION_*, but there're some code using uint32_t. Fixes: 2ed2fe5f0a9c ("net/mlx5: rewrite IP address UDP/TCP port by E-Switch") Fixes: 57123c00c1b8 ("net/mlx5: add Linux TC flower driver for E-Switch flow") Fixes: fc2c498ccb94 ("net/mlx5: add Direct Verbs translate items") Fixes: 3d69434113d1 ("net/mlx5: add Direct Verbs validation function") Fixes: 84c406e74524 ("net/mlx5: add flow translate function") Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function") Cc: orika@mellanox.com Signed-off-by: Yongseok Koh --- drivers/net/mlx5/mlx5_flow.c | 2 +- drivers/net/mlx5/mlx5_flow.h | 6 +++--- drivers/net/mlx5/mlx5_flow_dv.c | 4 ++-- drivers/net/mlx5/mlx5_flow_tcf.c | 6 +++--- drivers/net/mlx5/mlx5_flow_verbs.c | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 0d9a03b632..c757552771 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -477,7 +477,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item, */ uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, - int tunnel __rte_unused, uint32_t layer_types, + int tunnel __rte_unused, uint64_t layer_types, uint64_t hash_fields) { struct rte_flow *flow = dev_flow->flow; diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 1cc989ae91..f2d1202285 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -215,7 +215,7 @@ struct mlx5_flow_verbs { struct mlx5_flow { LIST_ENTRY(mlx5_flow) next; struct rte_flow *flow; /**< Pointer to the main flow. */ - uint32_t layers; + uint64_t layers; /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */ union { #ifdef HAVE_IBV_FLOW_DV_SUPPORT @@ -247,7 +247,7 @@ struct rte_flow { uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ LIST_HEAD(dev_flows, mlx5_flow) dev_flows; /**< Device flows that are part of the flow. */ - uint32_t actions; + uint64_t actions; /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */ }; typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev, @@ -289,7 +289,7 @@ struct mlx5_flow_driver_ops { /* mlx5_flow.c */ uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, int tunnel, - uint32_t layer_types, + uint64_t layer_types, uint64_t hash_fields); uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, uint32_t subpriority); diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 53e9a170c3..8f729f44f8 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -165,8 +165,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct rte_flow_error *error) { int ret; - uint32_t action_flags = 0; - uint32_t item_flags = 0; + uint64_t action_flags = 0; + uint64_t item_flags = 0; int tunnel = 0; uint8_t next_protocol = 0xff; int actions_n = 0; diff --git a/drivers/net/mlx5/mlx5_flow_tcf.c b/drivers/net/mlx5/mlx5_flow_tcf.c index 05ffbd3114..a3c1449db7 100644 --- a/drivers/net/mlx5/mlx5_flow_tcf.c +++ b/drivers/net/mlx5/mlx5_flow_tcf.c @@ -968,8 +968,8 @@ flow_tcf_validate(struct rte_eth_dev *dev, const struct rte_flow_action_set_ipv4 *set_ipv4; const struct rte_flow_action_set_ipv6 *set_ipv6; } conf; - uint32_t item_flags = 0; - uint32_t action_flags = 0; + uint64_t item_flags = 0; + uint64_t action_flags = 0; uint8_t next_protocol = -1; unsigned int tcm_ifindex = 0; uint8_t pedit_validated = 0; @@ -1186,7 +1186,7 @@ flow_tcf_validate(struct rte_eth_dev *dev, } for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { unsigned int i; - uint32_t current_action_flag = 0; + uint64_t current_action_flag = 0; switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VOID: diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index 75b950e16f..9481edcf7f 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -987,8 +987,8 @@ flow_verbs_validate(struct rte_eth_dev *dev, struct rte_flow_error *error) { int ret; - uint32_t action_flags = 0; - uint32_t item_flags = 0; + uint64_t action_flags = 0; + uint64_t item_flags = 0; int tunnel = 0; uint8_t next_protocol = 0xff;