From patchwork Wed Sep 29 20:57:26 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ivan Malov X-Patchwork-Id: 100026 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 031C0A0032; Wed, 29 Sep 2021 22:58:25 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C1F1141142; Wed, 29 Sep 2021 22:57:53 +0200 (CEST) Received: from shelob.oktetlabs.ru (shelob.oktetlabs.ru [91.220.146.113]) by mails.dpdk.org (Postfix) with ESMTP id E7A93410EE for ; Wed, 29 Sep 2021 22:57:45 +0200 (CEST) Received: from localhost.localdomain (unknown [5.144.122.192]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by shelob.oktetlabs.ru (Postfix) with ESMTPSA id 8F2667F6D5; Wed, 29 Sep 2021 23:57:45 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 shelob.oktetlabs.ru 8F2667F6D5 DKIM-Signature: v=1; a=rsa-sha256; c=simple/simple; d=oktetlabs.ru; s=default; t=1632949065; bh=Edq0zT95StPv7UQHppqoKThQYklQinck9GxNYneodfs=; h=From:To:Cc:Subject:Date:In-Reply-To:References; b=Eyo7SmC0fUFEOH8+Jy1y0C8ni8lf2dBTJypUUFcjVS+/XSkdj7bicHghk0unV+b9K M/M5BChwYEleM6R0VhxtWeDmYEE5u86Fv8mBXCM9S2gmlaHoCEITWofL3z77KrCQFk 7FN9vvC3hImb6Uz1oN4EdPk6IS6QZf+bg23SR1A0= From: Ivan Malov To: dev@dpdk.org Cc: Andrew Rybchenko Date: Wed, 29 Sep 2021 23:57:26 +0300 Message-Id: <20210929205730.775-7-ivan.malov@oktetlabs.ru> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20210929205730.775-1-ivan.malov@oktetlabs.ru> References: <20210929205730.775-1-ivan.malov@oktetlabs.ru> MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH 06/10] net/sfc: implement control path operations in tunnel offload X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Support generic callbacks which callers will invoke to get PMD-specific actions and items used to produce JUMP and GROUP flows and to detect tunnel information. Signed-off-by: Ivan Malov Reviewed-by: Andrew Rybchenko --- drivers/net/sfc/sfc_dp.c | 48 +++++ drivers/net/sfc/sfc_dp.h | 9 + drivers/net/sfc/sfc_ef100_rx.c | 12 ++ drivers/net/sfc/sfc_flow.c | 5 + drivers/net/sfc/sfc_flow_tunnel.c | 316 ++++++++++++++++++++++++++++++ drivers/net/sfc/sfc_flow_tunnel.h | 37 ++++ 6 files changed, 427 insertions(+) diff --git a/drivers/net/sfc/sfc_dp.c b/drivers/net/sfc/sfc_dp.c index 24ed0898c8..509c95890d 100644 --- a/drivers/net/sfc/sfc_dp.c +++ b/drivers/net/sfc/sfc_dp.c @@ -12,6 +12,7 @@ #include #include +#include #include "sfc_dp.h" #include "sfc_log.h" @@ -77,3 +78,50 @@ sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry) return 0; } + +int sfc_dp_ft_id_offset = -1; +uint64_t sfc_dp_ft_id_valid; + +int +sfc_dp_ft_id_register(void) +{ + static const struct rte_mbuf_dynfield ft_id = { + .name = "rte_net_sfc_dynfield_ft_id", + .size = sizeof(uint8_t), + .align = __alignof__(uint8_t), + }; + static const struct rte_mbuf_dynflag ft_id_valid = { + .name = "rte_net_sfc_dynflag_ft_id_valid", + }; + + int field_offset; + int flag; + + SFC_GENERIC_LOG(INFO, "%s() entry", __func__); + + if (sfc_dp_ft_id_valid != 0) { + SFC_GENERIC_LOG(INFO, "%s() already registered", __func__); + return 0; + } + + field_offset = rte_mbuf_dynfield_register(&ft_id); + if (field_offset < 0) { + SFC_GENERIC_LOG(ERR, "%s() failed to register ft_id dynfield", + __func__); + return -1; + } + + flag = rte_mbuf_dynflag_register(&ft_id_valid); + if (flag < 0) { + SFC_GENERIC_LOG(ERR, "%s() failed to register ft_id dynflag", + __func__); + return -1; + } + + sfc_dp_ft_id_offset = field_offset; + sfc_dp_ft_id_valid = UINT64_C(1) << flag; + + SFC_GENERIC_LOG(INFO, "%s() done", __func__); + + return 0; +} diff --git a/drivers/net/sfc/sfc_dp.h b/drivers/net/sfc/sfc_dp.h index 7fd8f34b0f..b27420d4fc 100644 --- a/drivers/net/sfc/sfc_dp.h +++ b/drivers/net/sfc/sfc_dp.h @@ -126,6 +126,15 @@ struct sfc_dp *sfc_dp_find_by_caps(struct sfc_dp_list *head, unsigned int avail_caps); int sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry); +/** Dynamically registered mbuf "ft_id" validity flag (as a bitmask). */ +extern uint64_t sfc_dp_ft_id_valid; + +/** Dynamically registered mbuf field "ft_id" (mbuf byte offset). */ +extern int sfc_dp_ft_id_offset; + +/** Register dynamic mbuf field "ft_id" and its validity flag. */ +int sfc_dp_ft_id_register(void); + #ifdef __cplusplus } #endif diff --git a/drivers/net/sfc/sfc_ef100_rx.c b/drivers/net/sfc/sfc_ef100_rx.c index 3219c972db..a4f4500d74 100644 --- a/drivers/net/sfc/sfc_ef100_rx.c +++ b/drivers/net/sfc/sfc_ef100_rx.c @@ -422,6 +422,7 @@ sfc_ef100_rx_prefix_to_offloads(const struct sfc_ef100_rxq *rxq, } if (rxq->flags & SFC_EF100_RXQ_USER_MARK) { + uint8_t tunnel_mark; uint32_t user_mark; uint32_t mark; @@ -434,6 +435,17 @@ sfc_ef100_rx_prefix_to_offloads(const struct sfc_ef100_rxq *rxq, ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; m->hash.fdir.hi = user_mark; } + + tunnel_mark = SFC_FT_GET_TUNNEL_MARK(mark); + if (tunnel_mark != SFC_FT_TUNNEL_MARK_INVALID) { + sfc_ft_id_t ft_id; + + ft_id = SFC_FT_TUNNEL_MARK_TO_ID(tunnel_mark); + + ol_flags |= sfc_dp_ft_id_valid; + *RTE_MBUF_DYNFIELD(m, sfc_dp_ft_id_offset, + sfc_ft_id_t *) = ft_id; + } } m->ol_flags = ol_flags; diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c index fe726afc9c..c3e75bae84 100644 --- a/drivers/net/sfc/sfc_flow.c +++ b/drivers/net/sfc/sfc_flow.c @@ -2926,6 +2926,11 @@ const struct rte_flow_ops sfc_flow_ops = { .flush = sfc_flow_flush, .query = sfc_flow_query, .isolate = sfc_flow_isolate, + .tunnel_decap_set = sfc_flow_tunnel_decap_set, + .tunnel_match = sfc_flow_tunnel_match, + .tunnel_action_decap_release = sfc_flow_tunnel_action_decap_release, + .tunnel_item_release = sfc_flow_tunnel_item_release, + .get_restore_info = sfc_flow_tunnel_get_restore_info, }; void diff --git a/drivers/net/sfc/sfc_flow_tunnel.c b/drivers/net/sfc/sfc_flow_tunnel.c index b03c90c9a4..2de401148e 100644 --- a/drivers/net/sfc/sfc_flow_tunnel.c +++ b/drivers/net/sfc/sfc_flow_tunnel.c @@ -6,7 +6,10 @@ #include #include +#include + #include "sfc.h" +#include "sfc_dp.h" #include "sfc_flow.h" #include "sfc_dp_rx.h" #include "sfc_flow_tunnel.h" @@ -143,3 +146,316 @@ sfc_flow_tunnel_detect_jump_rule(struct sfc_adapter *sa, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "tunnel offload: JUMP: preparsing failed"); } + +static int +sfc_flow_tunnel_attach(struct sfc_adapter *sa, + struct rte_flow_tunnel *tunnel, + struct sfc_flow_tunnel **ftp) +{ + struct sfc_flow_tunnel *ft; + const char *ft_status; + int ft_id_free = -1; + sfc_ft_id_t ft_id; + int rc; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + rc = sfc_dp_ft_id_register(); + if (rc != 0) + return rc; + + if (tunnel->type != RTE_FLOW_ITEM_TYPE_VXLAN) { + sfc_err(sa, "tunnel offload: unsupported tunnel (encapsulation) type"); + return ENOTSUP; + } + + for (ft_id = 0; ft_id < SFC_FT_MAX_NTUNNELS; ++ft_id) { + ft = &sa->flow_tunnels[ft_id]; + + if (ft->refcnt == 0) { + if (ft_id_free == -1) + ft_id_free = ft_id; + + continue; + } + + if (memcmp(tunnel, &ft->rte_tunnel, sizeof(*tunnel)) == 0) { + ft_status = "existing"; + goto attach; + } + } + + if (ft_id_free == -1) { + sfc_err(sa, "tunnel offload: no free slot for the new tunnel"); + return ENOBUFS; + } + + ft_id = ft_id_free; + ft = &sa->flow_tunnels[ft_id]; + + memcpy(&ft->rte_tunnel, tunnel, sizeof(*tunnel)); + + ft->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN; + + ft->action_mark.id = SFC_FT_ID_TO_MARK(ft_id_free); + ft->action.type = RTE_FLOW_ACTION_TYPE_MARK; + ft->action.conf = &ft->action_mark; + + ft->item.type = RTE_FLOW_ITEM_TYPE_MARK; + ft->item_mark_v.id = ft->action_mark.id; + ft->item.spec = &ft->item_mark_v; + ft->item.mask = &ft->item_mark_m; + ft->item_mark_m.id = UINT32_MAX; + + ft->jump_rule_is_set = B_FALSE; + + ft->refcnt = 0; + + ft_status = "newly added"; + +attach: + sfc_dbg(sa, "tunnel offload: attaching to %s tunnel=%u", + ft_status, ft_id); + + ++(ft->refcnt); + *ftp = ft; + + return 0; +} + +static int +sfc_flow_tunnel_detach(struct sfc_adapter *sa, + uint32_t ft_mark) +{ + struct sfc_flow_tunnel *ft; + + SFC_ASSERT(sfc_adapter_is_locked(sa)); + + ft = sfc_flow_tunnel_pick(sa, ft_mark); + if (ft == NULL) { + sfc_err(sa, "tunnel offload: invalid tunnel"); + return EINVAL; + } + + if (ft->refcnt == 0) { + sfc_err(sa, "tunnel offload: tunnel=%u does not exist", ft->id); + return ENOENT; + } + + --(ft->refcnt); + + return 0; +} + +int +sfc_flow_tunnel_decap_set(struct rte_eth_dev *dev, + struct rte_flow_tunnel *tunnel, + struct rte_flow_action **pmd_actions, + uint32_t *num_of_actions, + struct rte_flow_error *err) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_flow_tunnel *ft; + int rc; + + sfc_adapter_lock(sa); + + if (!sfc_flow_tunnel_is_active(sa)) { + rc = ENOTSUP; + goto fail; + } + + rc = sfc_flow_tunnel_attach(sa, tunnel, &ft); + if (rc != 0) + goto fail; + + *pmd_actions = &ft->action; + *num_of_actions = 1; + + sfc_adapter_unlock(sa); + + return 0; + +fail: + sfc_adapter_unlock(sa); + + return rte_flow_error_set(err, rc, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "tunnel offload: decap_set failed"); +} + +int +sfc_flow_tunnel_match(struct rte_eth_dev *dev, + struct rte_flow_tunnel *tunnel, + struct rte_flow_item **pmd_items, + uint32_t *num_of_items, + struct rte_flow_error *err) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + struct sfc_flow_tunnel *ft; + int rc; + + sfc_adapter_lock(sa); + + if (!sfc_flow_tunnel_is_active(sa)) { + rc = ENOTSUP; + goto fail; + } + + rc = sfc_flow_tunnel_attach(sa, tunnel, &ft); + if (rc != 0) + goto fail; + + *pmd_items = &ft->item; + *num_of_items = 1; + + sfc_adapter_unlock(sa); + + return 0; + +fail: + sfc_adapter_unlock(sa); + + return rte_flow_error_set(err, rc, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "tunnel offload: tunnel_match failed"); +} + +int +sfc_flow_tunnel_item_release(struct rte_eth_dev *dev, + struct rte_flow_item *pmd_items, + uint32_t num_items, + struct rte_flow_error *err) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + const struct rte_flow_item_mark *item_mark; + struct rte_flow_item *item = pmd_items; + int rc; + + sfc_adapter_lock(sa); + + if (!sfc_flow_tunnel_is_active(sa)) { + rc = ENOTSUP; + goto fail; + } + + if (num_items != 1 || item == NULL || item->spec == NULL || + item->type != RTE_FLOW_ITEM_TYPE_MARK) { + sfc_err(sa, "tunnel offload: item_release: wrong input"); + rc = EINVAL; + goto fail; + } + + item_mark = item->spec; + + rc = sfc_flow_tunnel_detach(sa, item_mark->id); + if (rc != 0) + goto fail; + + sfc_adapter_unlock(sa); + + return 0; + +fail: + sfc_adapter_unlock(sa); + + return rte_flow_error_set(err, rc, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "tunnel offload: item_release failed"); +} + +int +sfc_flow_tunnel_action_decap_release(struct rte_eth_dev *dev, + struct rte_flow_action *pmd_actions, + uint32_t num_actions, + struct rte_flow_error *err) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + const struct rte_flow_action_mark *action_mark; + struct rte_flow_action *action = pmd_actions; + int rc; + + sfc_adapter_lock(sa); + + if (!sfc_flow_tunnel_is_active(sa)) { + rc = ENOTSUP; + goto fail; + } + + if (num_actions != 1 || action == NULL || action->conf == NULL || + action->type != RTE_FLOW_ACTION_TYPE_MARK) { + sfc_err(sa, "tunnel offload: action_decap_release: wrong input"); + rc = EINVAL; + goto fail; + } + + action_mark = action->conf; + + rc = sfc_flow_tunnel_detach(sa, action_mark->id); + if (rc != 0) + goto fail; + + sfc_adapter_unlock(sa); + + return 0; + +fail: + sfc_adapter_unlock(sa); + + return rte_flow_error_set(err, rc, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "tunnel offload: item_release failed"); +} + +int +sfc_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, + struct rte_mbuf *m, + struct rte_flow_restore_info *info, + struct rte_flow_error *err) +{ + struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + const struct sfc_flow_tunnel *ft; + sfc_ft_id_t ft_id; + int rc; + + sfc_adapter_lock(sa); + + if ((m->ol_flags & sfc_dp_ft_id_valid) == 0) { + sfc_dbg(sa, "tunnel offload: get_restore_info: no tunnel mark in the packet"); + rc = EINVAL; + goto fail; + } + + ft_id = *RTE_MBUF_DYNFIELD(m, sfc_dp_ft_id_offset, sfc_ft_id_t *); + ft = &sa->flow_tunnels[ft_id]; + + if (ft->refcnt == 0) { + sfc_err(sa, "tunnel offload: get_restore_info: tunnel=%u does not exist", + ft_id); + rc = ENOENT; + goto fail; + } + + memcpy(&info->tunnel, &ft->rte_tunnel, sizeof(info->tunnel)); + + /* + * The packet still has encapsulation header; JUMP rules never + * strip it. Therefore, set RTE_FLOW_RESTORE_INFO_ENCAPSULATED. + */ + info->flags = RTE_FLOW_RESTORE_INFO_ENCAPSULATED | + RTE_FLOW_RESTORE_INFO_GROUP_ID | + RTE_FLOW_RESTORE_INFO_TUNNEL; + + info->group_id = 0; + + sfc_adapter_unlock(sa); + + return 0; + +fail: + sfc_adapter_unlock(sa); + + return rte_flow_error_set(err, rc, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "tunnel offload: get_restore_info failed"); +} diff --git a/drivers/net/sfc/sfc_flow_tunnel.h b/drivers/net/sfc/sfc_flow_tunnel.h index 27a8fa5ae7..573585ca80 100644 --- a/drivers/net/sfc/sfc_flow_tunnel.h +++ b/drivers/net/sfc/sfc_flow_tunnel.h @@ -10,6 +10,8 @@ #include #include +#include + #include "efx.h" #ifdef __cplusplus @@ -51,8 +53,16 @@ typedef uint8_t sfc_ft_id_t; struct sfc_flow_tunnel { bool jump_rule_is_set; efx_tunnel_protocol_t encap_type; + struct rte_flow_tunnel rte_tunnel; unsigned int refcnt; sfc_ft_id_t id; + + struct rte_flow_action_mark action_mark; + struct rte_flow_action action; + + struct rte_flow_item_mark item_mark_v; + struct rte_flow_item_mark item_mark_m; + struct rte_flow_item item; }; struct sfc_adapter; @@ -69,6 +79,33 @@ int sfc_flow_tunnel_detect_jump_rule(struct sfc_adapter *sa, struct sfc_flow_spec_mae *spec, struct rte_flow_error *error); +int sfc_flow_tunnel_decap_set(struct rte_eth_dev *dev, + struct rte_flow_tunnel *tunnel, + struct rte_flow_action **pmd_actions, + uint32_t *num_of_actions, + struct rte_flow_error *err); + +int sfc_flow_tunnel_match(struct rte_eth_dev *dev, + struct rte_flow_tunnel *tunnel, + struct rte_flow_item **pmd_items, + uint32_t *num_of_items, + struct rte_flow_error *err); + +int sfc_flow_tunnel_item_release(struct rte_eth_dev *dev, + struct rte_flow_item *pmd_items, + uint32_t num_items, + struct rte_flow_error *err); + +int sfc_flow_tunnel_action_decap_release(struct rte_eth_dev *dev, + struct rte_flow_action *pmd_actions, + uint32_t num_actions, + struct rte_flow_error *err); + +int sfc_flow_tunnel_get_restore_info(struct rte_eth_dev *dev, + struct rte_mbuf *m, + struct rte_flow_restore_info *info, + struct rte_flow_error *err); + #ifdef __cplusplus } #endif