From patchwork Tue Aug 22 01:02:21 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhang, Yuying" X-Patchwork-Id: 131798 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1455942608; Thu, 21 Sep 2023 18:59:14 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2556840DD8; Thu, 21 Sep 2023 18:59:04 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id 6CFD740DCE for ; Thu, 21 Sep 2023 18:58:59 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1695315539; x=1726851539; h=from:to:subject:date:message-id:in-reply-to:references: mime-version:content-transfer-encoding; bh=xiOx28K9bjnO8C95rUxSi2y2ajtK5929vntEuBOfAOQ=; b=DWfiAIPyC5HgNCxTGqBMIQlVfSEHyjPxV0aacivlCbVJycv3euxQ0cIY Xl0ZY0ciRsFixLwbmjByasNJMAT3dqjpUDMxUudRux24AQai/9GXCrO/B 4fyyWQ77Oh3Nes2Q4rzWWJJBSE1LM83vfFKCe2Dt4JNkSi2KTZMajRl/L V+s6QazdPBJzexVi2YUxc9FPqYLmEdInb0ZhaT56YGrs12Oz0Y9hzaOUl 0Xe04atr8jjUMRuWdGzatHKxk1uf20YqS0KtDyZTRQi3CCKOipf1S4g+7 ZJnyl4XWqScj8dt9wg5pktpBVC3+UHCAH8MY9TRw3lDAAQ/NZhgv5bv6N w==; X-IronPort-AV: E=McAfee;i="6600,9927,10840"; a="379468268" X-IronPort-AV: E=Sophos;i="6.03,165,1694761200"; d="scan'208";a="379468268" Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Sep 2023 09:58:58 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10840"; a="817472275" X-IronPort-AV: E=Sophos;i="6.03,165,1694761200"; d="scan'208";a="817472275" Received: from dpdk-pengyuan-mev.sh.intel.com ([10.67.119.128]) by fmsmga004.fm.intel.com with ESMTP; 21 Sep 2023 09:58:57 -0700 From: "Zhang, Yuying" To: yuying.zhang@intel.com, dev@dpdk.org, qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Subject: [PATCH v6 3/8] net/cpfl: set up rte flow skeleton Date: Tue, 22 Aug 2023 01:02:21 +0000 Message-Id: <20230822010226.17783-4-yuying.zhang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230822010226.17783-1-yuying.zhang@intel.com> References: <20230915100047.90153-1-yuying.zhang@intel.com> <20230822010226.17783-1-yuying.zhang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Yuying Zhang Set up the rte_flow backend skeleton. Introduce the framework to support different engines as rte_flow backend. Bridge rte_flow driver API to flow engines. Signed-off-by: Yuying Zhang --- drivers/net/cpfl/cpfl_ethdev.c | 53 ++++++ drivers/net/cpfl/cpfl_ethdev.h | 5 + drivers/net/cpfl/cpfl_flow.c | 339 +++++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_flow.h | 85 +++++++++ drivers/net/cpfl/meson.build | 1 + 5 files changed, 483 insertions(+) create mode 100644 drivers/net/cpfl/cpfl_flow.c create mode 100644 drivers/net/cpfl/cpfl_flow.h diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 1745f703c8..c350728861 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -15,6 +15,7 @@ #include "cpfl_ethdev.h" #include #include "cpfl_rxtx.h" +#include "cpfl_flow.h" #define CPFL_REPRESENTOR "representor" #define CPFL_TX_SINGLE_Q "tx_single" @@ -1074,6 +1075,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev) return 0; } +static void +cpfl_flow_free(struct cpfl_vport *vport) +{ + struct rte_flow *p_flow; + + while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) { + TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next); + if (p_flow->engine->free) + p_flow->engine->free(p_flow); + rte_free(p_flow); + } +} + static int cpfl_p2p_queue_grps_del(struct idpf_vport *vport) { @@ -1105,6 +1119,7 @@ cpfl_dev_close(struct rte_eth_dev *dev) if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) cpfl_p2p_queue_grps_del(vport); + cpfl_flow_free(cpfl_vport); idpf_vport_deinit(vport); rte_free(cpfl_vport->p2p_q_chunks_info); @@ -1117,6 +1132,29 @@ cpfl_dev_close(struct rte_eth_dev *dev) return 0; } +static int +cpfl_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops) +{ + struct cpfl_itf *itf; + + if (!dev) + return -EINVAL; + + itf = CPFL_DEV_TO_ITF(dev); + + /* only vport support rte_flow */ + if (itf->type != CPFL_ITF_TYPE_VPORT) + return -ENOTSUP; +#ifdef RTE_HAS_JANSSON + *ops = &cpfl_flow_ops; +#else + *ops = NULL; + PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library."); +#endif + return 0; +} + static int cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports, size_t len, uint32_t tx) @@ -1318,6 +1356,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = { .xstats_get = cpfl_dev_xstats_get, .xstats_get_names = cpfl_dev_xstats_get_names, .xstats_reset = cpfl_dev_xstats_reset, + .flow_ops_get = cpfl_dev_flow_ops_get, .hairpin_cap_get = cpfl_hairpin_cap_get, .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup, .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup, @@ -2021,6 +2060,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a goto err_vports_alloc; } +#ifdef RTE_HAS_JANSSON + ret = cpfl_flow_init(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init flow module"); + goto err_flow_init; + } +#endif adapter->cur_vports = 0; adapter->cur_vport_nb = 0; @@ -2028,6 +2074,9 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a return ret; +#ifdef RTE_HAS_JANSSON +err_flow_init: +#endif err_vports_alloc: rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter); cpfl_repr_allowlist_uninit(adapter); @@ -2182,6 +2231,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT; cpfl_vport->itf.adapter = adapter; cpfl_vport->itf.data = dev->data; + TAILQ_INIT(&cpfl_vport->itf.flow_list); adapter->vports[param->idx] = cpfl_vport; adapter->cur_vports |= RTE_BIT32(param->devarg_id); adapter->cur_vport_nb++; @@ -2262,6 +2312,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev) static void cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter) { +#ifdef RTE_HAS_JANSSON + cpfl_flow_uninit(adapter); +#endif rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter); cpfl_vport_map_uninit(adapter); idpf_adapter_deinit(&adapter->base); diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index 383dbd14c6..69bf32cfbd 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -140,9 +140,12 @@ enum cpfl_itf_type { CPFL_ITF_TYPE_REPRESENTOR, }; +TAILQ_HEAD(cpfl_flow_list, rte_flow); + struct cpfl_itf { enum cpfl_itf_type type; struct cpfl_adapter_ext *adapter; + struct cpfl_flow_list flow_list; void *data; }; @@ -206,6 +209,8 @@ struct cpfl_adapter_ext { rte_spinlock_t repr_lock; struct rte_hash *repr_allowlist_hash; + struct cpfl_flow_js_parser *flow_parser; + struct cpfl_metadata meta; }; diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c new file mode 100644 index 0000000000..03dd1ffa44 --- /dev/null +++ b/drivers/net/cpfl/cpfl_flow.c @@ -0,0 +1,339 @@ +/* SPDX-Lidpfnse-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ +#include +#include + +#include "cpfl_flow.h" +#include "cpfl_flow_parser.h" + +TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine); + +static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list); + +void +cpfl_flow_engine_register(struct cpfl_flow_engine *engine) +{ + TAILQ_INSERT_TAIL(&engine_list, engine, node); +} + +struct cpfl_flow_engine * +cpfl_flow_engine_match(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta) +{ + struct cpfl_flow_engine *engine = NULL; + void *temp; + + RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (!engine->parse_pattern_action) + continue; + + if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0) + continue; + return engine; + } + + return NULL; +} + +int +cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_flow_engine *engine = NULL; + void *temp; + int ret; + + RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (!engine->init) { + PMD_INIT_LOG(ERR, "Invalid engine type (%d)", + engine->type); + return -ENOTSUP; + } + + ret = engine->init(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to initialize engine %d", + engine->type); + return ret; + } + } + + return 0; +} + +void +cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_flow_engine *engine = NULL; + void *temp; + + RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (engine->uninit) + engine->uninit(adapter); + } +} + +static int +cpfl_flow_attr_valid(const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (attr->priority > CPFL_PREC_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Only support priority 0-7."); + return -rte_errno; + } + + return 0; +} + +static int +cpfl_flow_param_valid(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + ret = cpfl_flow_attr_valid(attr, error); + if (ret) + return ret; + + if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + return 0; +} + +static int +__cpfl_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct cpfl_flow_engine **engine, + struct rte_flow_error *error) +{ + int ret; + + ret = cpfl_flow_param_valid(attr, pattern, actions, error); + if (ret) + return ret; + + *engine = cpfl_flow_engine_match(dev, attr, pattern, actions, meta); + if (!*engine) { + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "No matched engine."); + return -rte_errno; + } + + return 0; +} + +int +cpfl_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct cpfl_flow_engine *engine = NULL; + int ret; + + ret = __cpfl_flow_validate(dev, attr, pattern, actions, NULL, &engine, error); + + return ret; +} + +struct rte_flow * +cpfl_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + struct cpfl_flow_engine *engine = NULL; + struct rte_flow *flow; + void *meta; + int ret; + + flow = rte_malloc(NULL, sizeof(struct rte_flow), 0); + if (!flow) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory"); + return NULL; + } + + ret = __cpfl_flow_validate(dev, attr, pattern, actions, &meta, &engine, error); + if (ret) { + rte_free(flow); + return NULL; + } + + if (!engine->create) { + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "No matched flow creation function"); + rte_free(flow); + return NULL; + } + + ret = engine->create(dev, flow, meta, error); + if (ret) { + rte_free(flow); + return NULL; + } + + flow->engine = engine; + TAILQ_INSERT_TAIL(&itf->flow_list, flow, next); + + return flow; +} + +int +cpfl_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + int ret = 0; + + if (!flow || !flow->engine || !flow->engine->destroy) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Invalid flow"); + return -rte_errno; + } + + ret = flow->engine->destroy(dev, flow, error); + if (!ret) + TAILQ_REMOVE(&itf->flow_list, flow, next); + else + PMD_DRV_LOG(ERR, "Failed to destroy flow"); + + return ret; +} + +int +cpfl_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + struct rte_flow *p_flow; + void *temp; + int ret = 0; + + RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) { + ret = cpfl_flow_destroy(dev, p_flow, error); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to flush flows"); + return -EINVAL; + } + } + + return ret; +} + +int +cpfl_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) +{ + struct rte_flow_query_count *count = data; + int ret = -EINVAL; + + if (!flow || !flow->engine || !flow->engine->query_count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Invalid flow"); + return -rte_errno; + } + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow->engine->query_count(dev, flow, count, error); + break; + default: + ret = rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + break; + } + } + + return ret; +} + +const struct rte_flow_ops cpfl_flow_ops = { + .validate = cpfl_flow_validate, + .create = cpfl_flow_create, + .destroy = cpfl_flow_destroy, + .flush = cpfl_flow_flush, + .query = cpfl_flow_query, +}; + +int +cpfl_flow_init(struct cpfl_adapter_ext *ad) +{ + int ret; + + if (ad->devargs.flow_parser[0] == '\0') { + PMD_INIT_LOG(WARNING, "flow module is not initialized"); + return 0; + } + + ret = cpfl_flow_engine_init(ad); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to init flow engines"); + goto err; + } + + ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to create flow parser"); + goto err; + } + + return ret; + +err: + cpfl_flow_engine_uninit(ad); + return ret; +} + +void +cpfl_flow_uninit(struct cpfl_adapter_ext *ad) +{ + if (ad->devargs.flow_parser[0] == '\0') + return; + + cpfl_parser_destroy(ad->flow_parser); + cpfl_flow_engine_uninit(ad); +} diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h new file mode 100644 index 0000000000..8c19b853ca --- /dev/null +++ b/drivers/net/cpfl/cpfl_flow.h @@ -0,0 +1,85 @@ +/* SPDX-Lidpfnse-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#ifndef _CPFL_FLOW_H_ +#define _CPFL_FLOW_H_ + +#include +#include "cpfl_ethdev.h" + +#define CPFL_PREC_MAX 7 + +extern const struct rte_flow_ops cpfl_flow_ops; + +enum cpfl_flow_engine_type { + CPFL_FLOW_ENGINE_NONE = 0, + CPFL_FLOW_ENGINE_FXP, +}; + +typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad); +typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad); +typedef int (*engine_create_t)(struct rte_eth_dev *dev, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error); +typedef int (*engine_destroy_t)(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error); +typedef int (*engine_query_t)(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_query_count *count, + struct rte_flow_error *error); +typedef void (*engine_free_t) (struct rte_flow *flow); +typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta); + +struct cpfl_flow_engine { + TAILQ_ENTRY(cpfl_flow_engine) node; + enum cpfl_flow_engine_type type; + engine_init_t init; + engine_uninit_t uninit; + engine_create_t create; + engine_destroy_t destroy; + engine_query_t query_count; + engine_free_t free; + engine_parse_pattern_action_t parse_pattern_action; +}; + +struct rte_flow { + TAILQ_ENTRY(rte_flow) next; + struct cpfl_flow_engine *engine; + void *rule; +}; + +void cpfl_flow_engine_register(struct cpfl_flow_engine *engine); +struct cpfl_flow_engine *cpfl_flow_engine_match(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta); +int cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter); +void cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter); +int cpfl_flow_init(struct cpfl_adapter_ext *ad); +void cpfl_flow_uninit(struct cpfl_adapter_ext *ad); +struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +int cpfl_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error); +int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); +int cpfl_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error); +#endif diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index d767818eb7..f5654d5b0e 100644 --- a/drivers/net/cpfl/meson.build +++ b/drivers/net/cpfl/meson.build @@ -41,6 +41,7 @@ endif if dpdk_conf.has('RTE_HAS_JANSSON') sources += files( + 'cpfl_flow.c', 'cpfl_flow_parser.c', ) ext_deps += jansson_dep