From patchwork Sat Aug 12 07:55:02 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhang, Yuying" X-Patchwork-Id: 130251 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D6D894303E; Sat, 12 Aug 2023 09:32:19 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D663F43259; Sat, 12 Aug 2023 09:32:16 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 8100F43251 for ; Sat, 12 Aug 2023 09:32:14 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1691825534; x=1723361534; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=CCizFiabS2nT+rwlNhkWqXYQsc6nhJz2TB6hSSruu3Q=; b=cd8PR1WiD/tiT3IYwnizIx/Lwg91Q28tTG5iPsLkwcdMAh/K/6DRUGSw xKFMLdkHGfvtaVUUJFLF5zj3nkoao+FR4MFYPQHv+P592lWEUkhcKuD+9 zg5MY4jf6Trdj7WwNm0gQgYBHL9SCoFNu7M+1aP1C7yv3HXjA/ZbrP3HB HJniolZQ69hVw8Dp2tWvsWo49AGM5UfucHV/vmJS11YXd/nPPsF77E3Lz olCSgIE2RJWUYWhFxciBKLKIjonL4NeqJ4ACQbLJUdcVkUHuPk2n8DhRc UYz3DftZY7Fhb/05IJxhnxsdJbjMG+UUjIxzRqZNo8Z5aXms/qtjZDOEG w==; X-IronPort-AV: E=McAfee;i="6600,9927,10799"; a="369281977" X-IronPort-AV: E=Sophos;i="6.01,167,1684825200"; d="scan'208";a="369281977" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Aug 2023 00:32:14 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10799"; a="798268333" X-IronPort-AV: E=Sophos;i="6.01,167,1684825200"; d="scan'208";a="798268333" Received: from dpdk-yuyingzh-icelake.sh.intel.com ([10.67.116.226]) by fmsmga008.fm.intel.com with ESMTP; 12 Aug 2023 00:32:12 -0700 From: Yuying Zhang To: dev@dpdk.org, beilei.xing@intel.com, qi.z.zhang@intel.com, jingjing.wu@intel.com Cc: Yuying Zhang Subject: [PATCH v1 1/5] net/cpfl: setup rte flow skeleton Date: Sat, 12 Aug 2023 07:55:02 +0000 Message-Id: <20230812075506.361769-2-yuying.zhang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230812075506.361769-1-yuying.zhang@intel.com> References: <20230812075506.361769-1-yuying.zhang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Setup the rte_flow backend skeleton. Introduce the framework to support different engines as rte_flow backend. Bridge rte_flow driver API to flow engines. Signed-off-by: Yuying Zhang Signed-off-by: Qi Zhang --- drivers/net/cpfl/cpfl_ethdev.c | 54 ++++++ drivers/net/cpfl/cpfl_ethdev.h | 5 + drivers/net/cpfl/cpfl_flow.c | 331 +++++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_flow.h | 88 +++++++++ drivers/net/cpfl/meson.build | 3 +- 5 files changed, 480 insertions(+), 1 deletion(-) create mode 100644 drivers/net/cpfl/cpfl_flow.c create mode 100644 drivers/net/cpfl/cpfl_flow.h diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 34b7c22ee1..23e5181588 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -16,6 +16,7 @@ #include "cpfl_ethdev.h" #include #include "cpfl_rxtx.h" +#include "cpfl_flow.h" #define CPFL_REPRESENTOR "representor" #define CPFL_TX_SINGLE_Q "tx_single" @@ -1199,6 +1200,19 @@ cpfl_dev_stop(struct rte_eth_dev *dev) return 0; } +static void +cpfl_flow_free(struct cpfl_vport *vport) +{ + struct rte_flow *p_flow; + + while ((p_flow = TAILQ_FIRST(&vport->itf.flow_list))) { + TAILQ_REMOVE(&vport->itf.flow_list, p_flow, next); + if (p_flow->engine->free) + p_flow->engine->free(p_flow); + rte_free(p_flow); + } +} + static int cpfl_p2p_queue_grps_del(struct idpf_vport *vport) { @@ -1231,6 +1245,7 @@ cpfl_dev_close(struct rte_eth_dev *dev) cpfl_p2p_queue_grps_del(vport); if (!cpfl_vport->exceptional) { + cpfl_flow_free(cpfl_vport); adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id); adapter->cur_vport_nb--; adapter->vports[vport->sw_idx] = NULL; @@ -1248,6 +1263,29 @@ cpfl_dev_close(struct rte_eth_dev *dev) return 0; } +static int +cpfl_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops) +{ + struct cpfl_itf *itf; + + if (!dev) + return -EINVAL; + + itf = CPFL_DEV_TO_ITF(dev); + + /* only vport support rte_flow */ + if (itf->type != CPFL_ITF_TYPE_VPORT) + return -ENOTSUP; +#ifdef CPFL_FLOW_JSON_SUPPORT + *ops = &cpfl_flow_ops; +#else + *ops = NULL; + PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c library."); +#endif + return 0; +} + static int cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports, size_t len, uint32_t tx) @@ -1449,6 +1487,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = { .xstats_get = cpfl_dev_xstats_get, .xstats_get_names = cpfl_dev_xstats_get_names, .xstats_reset = cpfl_dev_xstats_reset, + .flow_ops_get = cpfl_dev_flow_ops_get, .hairpin_cap_get = cpfl_hairpin_cap_get, .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup, .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup, @@ -2411,6 +2450,13 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a goto err_create_ctrl_vport; } +#ifdef CPFL_FLOW_JSON_SUPPORT + ret = cpfl_flow_init(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to init flow module"); + goto err_flow_init; + } +#endif adapter->cur_vports = 0; adapter->cur_vport_nb = 0; @@ -2418,6 +2464,10 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a return ret; +#ifdef CPFL_FLOW_JSON_SUPPORT +err_flow_init: + cpfl_ctrl_path_close(adapter); +#endif err_create_ctrl_vport: rte_free(adapter->vports); err_vports_alloc: @@ -2574,6 +2624,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT; cpfl_vport->itf.adapter = adapter; cpfl_vport->itf.data = dev->data; + TAILQ_INIT(&cpfl_vport->itf.flow_list); adapter->vports[param->idx] = cpfl_vport; adapter->cur_vports |= RTE_BIT32(param->devarg_id); adapter->cur_vport_nb++; @@ -2713,6 +2764,9 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev) static void cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter) { +#ifdef CPFL_FLOW_JSON_SUPPORT + cpfl_flow_uninit(adapter); +#endif cpfl_ctrl_path_close(adapter); rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter); cpfl_vport_map_uninit(adapter); diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index 2e9480ffc1..c71f16ac60 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -143,9 +143,12 @@ enum cpfl_itf_type { CPFL_ITF_TYPE_REPRESENTOR }; +TAILQ_HEAD(cpfl_flow_list, rte_flow); + struct cpfl_itf { enum cpfl_itf_type type; struct cpfl_adapter_ext *adapter; + struct cpfl_flow_list flow_list; void *data; }; @@ -222,6 +225,8 @@ struct cpfl_adapter_ext { rte_spinlock_t repr_lock; struct rte_hash *repr_whitelist_hash; + struct cpfl_flow_js_parser *flow_parser; + /* ctrl vport and ctrl queues. */ struct cpfl_vport ctrl_vport; uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE]; diff --git a/drivers/net/cpfl/cpfl_flow.c b/drivers/net/cpfl/cpfl_flow.c new file mode 100644 index 0000000000..e303936081 --- /dev/null +++ b/drivers/net/cpfl/cpfl_flow.c @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ +#include +#include + +#include "cpfl_flow.h" +#include "cpfl_flow_parser.h" + +TAILQ_HEAD(cpfl_flow_engine_list, cpfl_flow_engine); + +static struct cpfl_flow_engine_list engine_list = TAILQ_HEAD_INITIALIZER(engine_list); + +void +cpfl_flow_engine_register(struct cpfl_flow_engine *engine) +{ + TAILQ_INSERT_TAIL(&engine_list, engine, node); +} + +struct cpfl_flow_engine * +cpfl_flow_engine_match(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta) +{ + struct cpfl_flow_engine *engine = NULL; + void *temp; + + RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (!engine->parse_pattern_action) + continue; + if (engine->parse_pattern_action(dev, attr, pattern, actions, meta) < 0) + continue; + return engine; + } + + return NULL; +} + +int +cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_flow_engine *engine = NULL; + void *temp; + int ret; + + RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (!engine->init) { + PMD_INIT_LOG(ERR, "Invalid engine type (%d)", + engine->type); + return -ENOTSUP; + } + + ret = engine->init(adapter); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to initialize engine %d", + engine->type); + return ret; + } + } + + return 0; +} + +void +cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter) +{ + struct cpfl_flow_engine *engine = NULL; + void *temp; + + RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { + if (engine->uninit) + engine->uninit(adapter); + } +} + +static int +cpfl_flow_valid_attr(const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (attr->priority > 6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Only support priority 0-6."); + return -rte_errno; + } + + return 0; +} + +static int +cpfl_flow_param_valid(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + int ret; + + if (!pattern) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + ret = cpfl_flow_valid_attr(attr, error); + if (ret) + return ret; + + if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + return 0; +} + +int +cpfl_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct cpfl_flow_engine *engine; + int ret; + + ret = cpfl_flow_param_valid(attr, pattern, actions, error); + if (ret) + return ret; + + engine = cpfl_flow_engine_match(dev, attr, pattern, actions, NULL); + + if (!engine) { + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "No matched engine."); + return -rte_errno; + } + + return 0; +} + +struct rte_flow * +cpfl_flow_create(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_attr *attr __rte_unused, + const struct rte_flow_item pattern[] __rte_unused, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + struct cpfl_flow_engine *engine; + struct rte_flow *flow; + void *meta; + int ret; + + flow = rte_malloc(NULL, sizeof(struct rte_flow), 0); + if (!flow) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory"); + return NULL; + } + + ret = cpfl_flow_param_valid(attr, pattern, actions, error); + if (ret) { + rte_free(flow); + return NULL; + } + + engine = cpfl_flow_engine_match(dev, attr, pattern, actions, &meta); + if (!engine) { + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "No matched engine"); + rte_free(flow); + return NULL; + } + + if (!engine->create) { + rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "No matched flow creation function"); + rte_free(flow); + return NULL; + } + + ret = engine->create(dev, flow, meta, error); + if (ret) { + rte_free(flow); + return NULL; + } + + flow->engine = engine; + TAILQ_INSERT_TAIL(&itf->flow_list, flow, next); + + return flow; +} + +int +cpfl_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + int ret = 0; + + if (!flow || !flow->engine || !flow->engine->destroy) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Invalid flow"); + return -rte_errno; + } + + ret = flow->engine->destroy(dev, flow, error); + if (!ret) + TAILQ_REMOVE(&itf->flow_list, flow, next); + else + PMD_DRV_LOG(ERR, "Failed to destroy flow"); + + return ret; +} + +int +cpfl_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + struct rte_flow *p_flow; + void *temp; + int ret = 0; + + RTE_TAILQ_FOREACH_SAFE(p_flow, &itf->flow_list, next, temp) { + ret = cpfl_flow_destroy(dev, p_flow, error); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to flush flows"); + return -EINVAL; + } + } + + return ret; +} + +int +cpfl_flow_query(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused, + const struct rte_flow_action *actions __rte_unused, + void *data __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + struct rte_flow_query_count *count = data; + int ret = -EINVAL; + + if (!flow || !flow->engine || !flow->engine->query_count) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Invalid flow"); + return -rte_errno; + } + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow->engine->query_count(dev, flow, count, error); + break; + default: + ret = rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + break; + } + } + + return ret; +} + +const struct rte_flow_ops cpfl_flow_ops = { + .validate = cpfl_flow_validate, + .create = cpfl_flow_create, + .destroy = cpfl_flow_destroy, + .flush = cpfl_flow_flush, + .query = cpfl_flow_query, +}; + +int +cpfl_flow_init(struct cpfl_adapter_ext *ad) +{ + int ret; + + if (ad->devargs.flow_parser[0] == '\0') { + PMD_INIT_LOG(WARNING, "flow module is not initialized"); + return 0; + } + + ret = cpfl_flow_engine_init(ad); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to init flow engines"); + goto err; + } + + ret = cpfl_parser_create(&ad->flow_parser, ad->devargs.flow_parser); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to create flow parser"); + goto err; + } + + return ret; + +err: + cpfl_flow_engine_uninit(ad); + return ret; +} + +void +cpfl_flow_uninit(struct cpfl_adapter_ext *ad) +{ + if (ad->devargs.flow_parser[0] == '\0') + return; + + cpfl_parser_destroy(ad->flow_parser); + cpfl_flow_engine_uninit(ad); +} diff --git a/drivers/net/cpfl/cpfl_flow.h b/drivers/net/cpfl/cpfl_flow.h new file mode 100644 index 0000000000..04f4cc1149 --- /dev/null +++ b/drivers/net/cpfl/cpfl_flow.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#ifndef _CPFL_FLOW_H_ +#define _CPFL_FLOW_H_ + +#include +#include "cpfl_ethdev.h" + +extern const struct rte_flow_ops cpfl_flow_ops; + +enum cpfl_flow_engine_type { + CPFL_FLOW_ENGINE_NONE = 0, + CPFL_FLOW_ENGINE_FXP, +}; + +typedef int (*engine_init_t)(struct cpfl_adapter_ext *ad); +typedef void (*engine_uninit_t)(struct cpfl_adapter_ext *ad); +typedef int (*engine_create_t)(struct rte_eth_dev *dev, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error); +typedef int (*engine_destroy_t)(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error); +typedef int (*engine_query_t)(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_query_count *count, + struct rte_flow_error *error); +typedef void (*engine_free_t) (struct rte_flow *flow); +typedef int (*engine_parse_pattern_action_t)(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta); + +struct cpfl_flow_engine { + TAILQ_ENTRY(cpfl_flow_engine) node; + enum cpfl_flow_engine_type type; + engine_init_t init; + engine_uninit_t uninit; + engine_create_t create; + engine_destroy_t destroy; + engine_query_t query_count; + engine_free_t free; + engine_parse_pattern_action_t parse_pattern_action; +}; + +struct rte_flow { + TAILQ_ENTRY(rte_flow) next; + struct cpfl_flow_engine *engine; + void *rule; +}; + +void cpfl_flow_engine_register(struct cpfl_flow_engine *engine); + +struct cpfl_flow_engine * +cpfl_flow_engine_match(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta); +int +cpfl_flow_engine_init(struct cpfl_adapter_ext *adapter); +void +cpfl_flow_engine_uninit(struct cpfl_adapter_ext *adapter); + +int cpfl_flow_init(struct cpfl_adapter_ext *ad); +void cpfl_flow_uninit(struct cpfl_adapter_ext *ad); +struct rte_flow *cpfl_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +int cpfl_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error); +int cpfl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error); +int cpfl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error); +int cpfl_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error); +#endif diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index 84ba994469..222497f7c2 100644 --- a/drivers/net/cpfl/meson.build +++ b/drivers/net/cpfl/meson.build @@ -42,10 +42,11 @@ endif js_dep = dependency('json-c', required: false, method : 'pkg-config') if js_dep.found() sources += files( + 'cpfl_flow.c', 'cpfl_flow_parser.c', 'cpfl_rules.c', 'cpfl_controlq.c', ) dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true) ext_deps += js_dep -endif \ No newline at end of file +endif From patchwork Sat Aug 12 07:55:03 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhang, Yuying" X-Patchwork-Id: 130252 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id BC3BA4303E; Sat, 12 Aug 2023 09:32:26 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1169143260; Sat, 12 Aug 2023 09:32:19 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id A4EED4325E for ; Sat, 12 Aug 2023 09:32:17 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1691825537; x=1723361537; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=KOZfqCpDKFeLpQsepCi76NPOXLAy0IEyUHB6yJOWruk=; b=eQuXrbw9PqLXRlD3vlWOxz00/xnxtdq8j2wg1Gtw3HbmgTEUvrBIY1Qu AFiP1Niy1UdiuEN3dacWOeEUKxKVN5DGFwvOzGfJm/P9TU4vw9w6CJ5B0 IzG3+uUkoxGpt6ATmYq65WjlVVrkeDINJ3Gvcyj0gnrZoSsL59PAsJfiq NoaRrbXIEvX0K2d9C+GZwzlml6oZr1xX/EsXy7wQwOHB1FJdxwjK9fPao TiSQlFMzXP30OijwVHbct7f4spcT0t8V3SlKSgTNpbLy583JFghRdLkkN FV2MY6eeuLAn7jFQ0tNVF91CQGuJv30B/I5pfUqjPj6pHmNRMeoZDyjU6 w==; X-IronPort-AV: E=McAfee;i="6600,9927,10799"; a="369281984" X-IronPort-AV: E=Sophos;i="6.01,167,1684825200"; d="scan'208";a="369281984" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Aug 2023 00:32:17 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10799"; a="798268341" X-IronPort-AV: E=Sophos;i="6.01,167,1684825200"; d="scan'208";a="798268341" Received: from dpdk-yuyingzh-icelake.sh.intel.com ([10.67.116.226]) by fmsmga008.fm.intel.com with ESMTP; 12 Aug 2023 00:32:15 -0700 From: Yuying Zhang To: dev@dpdk.org, beilei.xing@intel.com, qi.z.zhang@intel.com, jingjing.wu@intel.com Cc: Yuying Zhang Subject: [PATCH v1 2/5] common/idpf/base: refine idpf ctlq message structure Date: Sat, 12 Aug 2023 07:55:03 +0000 Message-Id: <20230812075506.361769-3-yuying.zhang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230812075506.361769-1-yuying.zhang@intel.com> References: <20230812075506.361769-1-yuying.zhang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add cfg data in idpf_ctlq_msg. Signed-off-by: Yuying Zhang --- drivers/common/idpf/base/idpf_controlq_api.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/common/idpf/base/idpf_controlq_api.h b/drivers/common/idpf/base/idpf_controlq_api.h index 3780304256..b38b10465c 100644 --- a/drivers/common/idpf/base/idpf_controlq_api.h +++ b/drivers/common/idpf/base/idpf_controlq_api.h @@ -65,6 +65,9 @@ struct idpf_ctlq_msg { u32 chnl_opcode; u32 chnl_retval; } mbx; + struct { + u64 data; + } cfg; } cookie; union { #define IDPF_DIRECT_CTX_SIZE 16 From patchwork Sat Aug 12 07:55:04 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhang, Yuying" X-Patchwork-Id: 130253 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 54B7F4303E; Sat, 12 Aug 2023 09:32:34 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 413D643000; Sat, 12 Aug 2023 09:32:27 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id BBE6943266 for ; Sat, 12 Aug 2023 09:32:24 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1691825545; x=1723361545; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=vTX0z4O8yUn9DyW3C1yodyi/tOL/EfRk5V2rwu3bvyQ=; b=UnJO7jqUfGxyig0cunYe7DZhQoI1eRKFwC8sEQw3tOi9wGagf3mWf8ac 8UvZJTzYGLpG/k3gK9NrNBwGP9PNIci76vEVY7rAefiUfwpjT8dkRDZHs ghBUvpSkYhLsbV/ncZdVfUMUOnxRPTi2sYR+ClIiJpcl6pKDfrtOBNO3q fTTivgYANR9Rn22NmvzOwtscGe6Bf6xJ7fWacr34G0uRb1jfzNgDf3g7M 0AGqG8ny+NDn460UY151r/6PumY/1ZI77SAhGpqaJX23Fn4lHmNcUzLO2 QvjZtcmTifdJ+SBY75DjGr8fKqGAWS0A6YRD8rxiFgfmf22rnDkBJObIq Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10799"; a="369281996" X-IronPort-AV: E=Sophos;i="6.01,167,1684825200"; d="scan'208";a="369281996" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Aug 2023 00:32:24 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10799"; a="798268348" X-IronPort-AV: E=Sophos;i="6.01,167,1684825200"; d="scan'208";a="798268348" Received: from dpdk-yuyingzh-icelake.sh.intel.com ([10.67.116.226]) by fmsmga008.fm.intel.com with ESMTP; 12 Aug 2023 00:32:22 -0700 From: Yuying Zhang To: dev@dpdk.org, beilei.xing@intel.com, qi.z.zhang@intel.com, jingjing.wu@intel.com Cc: Yuying Zhang Subject: [PATCH v1 3/5] net/cpfl: add cpfl control queue message handle Date: Sat, 12 Aug 2023 07:55:04 +0000 Message-Id: <20230812075506.361769-4-yuying.zhang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230812075506.361769-1-yuying.zhang@intel.com> References: <20230812075506.361769-1-yuying.zhang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add cpfl driver control queue message handle, including send/receive/clean/post_rx_buffs. Signed-off-by: Yuying Zhang --- drivers/net/cpfl/cpfl_controlq.c | 419 ++++++++++++++++++++++++++++++- drivers/net/cpfl/cpfl_controlq.h | 24 ++ 2 files changed, 442 insertions(+), 1 deletion(-) diff --git a/drivers/net/cpfl/cpfl_controlq.c b/drivers/net/cpfl/cpfl_controlq.c index 97a6bdd042..c696a529a7 100644 --- a/drivers/net/cpfl/cpfl_controlq.c +++ b/drivers/net/cpfl/cpfl_controlq.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2001-2023 Intel Corporation + * Copyright(c) 2023 Intel Corporation */ #include "cpfl_controlq.h" @@ -332,6 +332,395 @@ cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo, return status; } +/** + * cpfl_ctlq_send - send command to Control Queue (CTQ) + * @hw: pointer to hw struct + * @cq: handle to control queue struct to send on + * @num_q_msg: number of messages to send on control queue + * @q_msg: pointer to array of queue messages to be sent + * + * The caller is expected to allocate DMAable buffers and pass them to the + * send routine via the q_msg struct / control queue specific data struct. + * The control queue will hold a reference to each send message until + * the completion for that message has been cleaned. + */ +int +cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]) +{ + struct idpf_ctlq_desc *desc; + int num_desc_avail = 0; + int status = 0; + int i = 0; + + if (!cq || !cq->ring_size) + return -ENOBUFS; + + idpf_acquire_lock(&cq->cq_lock); + + /* Ensure there are enough descriptors to send all messages */ + num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq); + if (num_desc_avail == 0 || num_desc_avail < num_q_msg) { + status = -ENOSPC; + goto sq_send_command_out; + } + + for (i = 0; i < num_q_msg; i++) { + struct idpf_ctlq_msg *msg = &q_msg[i]; + uint64_t msg_cookie; + + desc = IDPF_CTLQ_DESC(cq, cq->next_to_use); + desc->opcode = CPU_TO_LE16(msg->opcode); + desc->pfid_vfid = CPU_TO_LE16(msg->func_id); + msg_cookie = *(u64 *)&msg->cookie; + desc->cookie_high = + CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie)); + desc->cookie_low = + CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie)); + desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) << + IDPF_CTLQ_FLAG_HOST_ID_S); + if (msg->data_len) { + struct idpf_dma_mem *buff = msg->ctx.indirect.payload; + + desc->datalen |= CPU_TO_LE16(msg->data_len); + desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF); + desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD); + /* Update the address values in the desc with the pa + * value for respective buffer + */ + desc->params.indirect.addr_high = + CPU_TO_LE32(IDPF_HI_DWORD(buff->pa)); + desc->params.indirect.addr_low = + CPU_TO_LE32(IDPF_LO_DWORD(buff->pa)); + idpf_memcpy(&desc->params, msg->ctx.indirect.context, + IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA); + } else { + idpf_memcpy(&desc->params, msg->ctx.direct, + IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA); + } + + /* Store buffer info */ + cq->bi.tx_msg[cq->next_to_use] = msg; + (cq->next_to_use)++; + if (cq->next_to_use == cq->ring_size) + cq->next_to_use = 0; + } + + /* Force memory write to complete before letting hardware + * know that there are new descriptors to fetch. + */ + idpf_wmb(); + wr32(hw, cq->reg.tail, cq->next_to_use); + +sq_send_command_out: + idpf_release_lock(&cq->cq_lock); + + return status; +} + +/** + * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW write + * back for the requested queue + * @cq: pointer to the specific Control queue + * @clean_count: (input|output) number of descriptors to clean as input, and + * number of descriptors actually cleaned as output + * @msg_status: (output) pointer to msg pointer array to be populated; needs + * to be allocated by caller + * @force: (input) clean descriptors which were not done yet. Use with caution + * in kernel mode only + * + * Returns an array of message pointers associated with the cleaned + * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned + * descriptors. The status will be returned for each; any messages that failed + * to send will have a non-zero status. The caller is expected to free original + * ctlq_msgs and free or reuse the DMA buffers. + */ +static int +__cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count, + struct idpf_ctlq_msg *msg_status[], bool force) +{ + struct idpf_ctlq_desc *desc; + uint16_t i = 0, num_to_clean; + uint16_t ntc, desc_err; + int ret = 0; + + if (!cq || !cq->ring_size) + return -ENOBUFS; + if (*clean_count == 0) + return 0; + if (*clean_count > cq->ring_size) + return -EINVAL; + + idpf_acquire_lock(&cq->cq_lock); + ntc = cq->next_to_clean; + num_to_clean = *clean_count; + + for (i = 0; i < num_to_clean; i++) { + /* Fetch next descriptor and check if marked as done */ + desc = IDPF_CTLQ_DESC(cq, ntc); + if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD)) + break; + + desc_err = LE16_TO_CPU(desc->ret_val); + if (desc_err) { + /* strip off FW internal code */ + desc_err &= 0xff; + } + + msg_status[i] = cq->bi.tx_msg[ntc]; + if (!msg_status[i]) + break; + msg_status[i]->status = desc_err; + cq->bi.tx_msg[ntc] = NULL; + /* Zero out any stale data */ + idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM); + ntc++; + if (ntc == cq->ring_size) + ntc = 0; + } + + cq->next_to_clean = ntc; + idpf_release_lock(&cq->cq_lock); + + /* Return number of descriptors actually cleaned */ + *clean_count = i; + + return ret; +} + +/** + * cpfl_ctlq_clean_sq - reclaim send descriptors on HW write back for the + * requested queue + * @cq: pointer to the specific Control queue + * @clean_count: (input|output) number of descriptors to clean as input, and + * number of descriptors actually cleaned as output + * @msg_status: (output) pointer to msg pointer array to be populated; needs + * to be allocated by caller + * + * Returns an array of message pointers associated with the cleaned + * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned + * descriptors. The status will be returned for each; any messages that failed + * to send will have a non-zero status. The caller is expected to free original + * ctlq_msgs and free or reuse the DMA buffers. + */ +int +cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count, + struct idpf_ctlq_msg *msg_status[]) +{ + return __cpfl_ctlq_clean_sq(cq, clean_count, msg_status, false); +} + +/** + * cpfl_ctlq_post_rx_buffs - post buffers to descriptor ring + * @hw: pointer to hw struct + * @cq: pointer to control queue handle + * @buff_count: (input|output) input is number of buffers caller is trying to + * return; output is number of buffers that were not posted + * @buffs: array of pointers to dma mem structs to be given to hardware + * + * Caller uses this function to return DMA buffers to the descriptor ring after + * consuming them; buff_count will be the number of buffers. + * + * Note: this function needs to be called after a receive call even + * if there are no DMA buffers to be returned, i.e. buff_count = 0, + * buffs = NULL to support direct commands + */ +int +cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + uint16_t *buff_count, struct idpf_dma_mem **buffs) +{ + struct idpf_ctlq_desc *desc; + uint16_t ntp = cq->next_to_post; + bool buffs_avail = false; + uint16_t tbp = ntp + 1; + int status = 0; + int i = 0; + + if (*buff_count > cq->ring_size) + return -EINVAL; + if (*buff_count > 0) + buffs_avail = true; + idpf_acquire_lock(&cq->cq_lock); + if (tbp >= cq->ring_size) + tbp = 0; + if (tbp == cq->next_to_clean) + goto post_buffs_out; + + /* Post buffers for as many as provided or up until the last one used */ + while (ntp != cq->next_to_clean) { + desc = IDPF_CTLQ_DESC(cq, ntp); + if (cq->bi.rx_buff[ntp]) + goto fill_desc; + if (!buffs_avail) { + /* If the caller hasn't given us any buffers or + * there are none left, search the ring itself + * for an available buffer to move to this + * entry starting at the next entry in the ring + */ + tbp = ntp + 1; + /* Wrap ring if necessary */ + if (tbp >= cq->ring_size) + tbp = 0; + + while (tbp != cq->next_to_clean) { + if (cq->bi.rx_buff[tbp]) { + cq->bi.rx_buff[ntp] = + cq->bi.rx_buff[tbp]; + cq->bi.rx_buff[tbp] = NULL; + /* Found a buffer, no need to + * search anymore + */ + break; + } + /* Wrap ring if necessary */ + tbp++; + if (tbp >= cq->ring_size) + tbp = 0; + } + + if (tbp == cq->next_to_clean) + goto post_buffs_out; + } else { + /* Give back pointer to DMA buffer */ + cq->bi.rx_buff[ntp] = buffs[i]; + i++; + + if (i >= *buff_count) + buffs_avail = false; + } + +fill_desc: + desc->flags = + CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD); + + /* Post buffers to descriptor */ + desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size); + desc->params.indirect.addr_high = + CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa)); + desc->params.indirect.addr_low = + CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa)); + + ntp++; + if (ntp == cq->ring_size) + ntp = 0; + } + +post_buffs_out: + /* Only update tail if buffers were actually posted */ + if (cq->next_to_post != ntp) { + if (ntp) + /* Update next_to_post to ntp - 1 since current ntp + * will not have a buffer + */ + cq->next_to_post = ntp - 1; + else + /* Wrap to end of end ring since current ntp is 0 */ + cq->next_to_post = cq->ring_size - 1; + + wr32(hw, cq->reg.tail, cq->next_to_post); + } + + idpf_release_lock(&cq->cq_lock); + + /* return the number of buffers that were not posted */ + *buff_count = *buff_count - i; + + return status; +} + +/** + * cpfl_ctlq_recv - receive control queue message call back + * @cq: pointer to control queue handle to receive on + * @num_q_msg: (input|output) input number of messages that should be received; + * output number of messages actually received + * @q_msg: (output) array of received control queue messages on this q; + * needs to be pre-allocated by caller for as many messages as requested + * + * Called by interrupt handler or polling mechanism. Caller is expected + * to free buffers + */ +int +cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg, + struct idpf_ctlq_msg *q_msg) +{ + uint16_t num_to_clean, ntc, ret_val, flags; + struct idpf_ctlq_desc *desc; + int ret_code = 0; + uint16_t i = 0; + + if (!cq || !cq->ring_size) + return -ENOBUFS; + + if (*num_q_msg == 0) + return 0; + else if (*num_q_msg > cq->ring_size) + return -EINVAL; + + /* take the lock before we start messing with the ring */ + idpf_acquire_lock(&cq->cq_lock); + ntc = cq->next_to_clean; + num_to_clean = *num_q_msg; + + for (i = 0; i < num_to_clean; i++) { + /* Fetch next descriptor and check if marked as done */ + desc = IDPF_CTLQ_DESC(cq, ntc); + flags = LE16_TO_CPU(desc->flags); + if (!(flags & IDPF_CTLQ_FLAG_DD)) + break; + + ret_val = LE16_TO_CPU(desc->ret_val); + q_msg[i].vmvf_type = (flags & + (IDPF_CTLQ_FLAG_FTYPE_VM | + IDPF_CTLQ_FLAG_FTYPE_PF)) >> + IDPF_CTLQ_FLAG_FTYPE_S; + + if (flags & IDPF_CTLQ_FLAG_ERR) + ret_code = -EBADMSG; + + q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high); + q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low); + q_msg[i].opcode = LE16_TO_CPU(desc->opcode); + q_msg[i].data_len = LE16_TO_CPU(desc->datalen); + q_msg[i].status = ret_val; + + if (desc->datalen) { + idpf_memcpy(q_msg[i].ctx.indirect.context, + &desc->params.indirect, + IDPF_INDIRECT_CTX_SIZE, + IDPF_DMA_TO_NONDMA); + /* Assign pointer to dma buffer to ctlq_msg array + * to be given to upper layer + */ + q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc]; + /* Zero out pointer to DMA buffer info; + * will be repopulated by post buffers API + */ + cq->bi.rx_buff[ntc] = NULL; + } else { + idpf_memcpy(q_msg[i].ctx.direct, + desc->params.raw, + IDPF_DIRECT_CTX_SIZE, + IDPF_DMA_TO_NONDMA); + } + + /* Zero out stale data in descriptor */ + idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc), + IDPF_DMA_MEM); + + ntc++; + if (ntc == cq->ring_size) + ntc = 0; + }; + + cq->next_to_clean = ntc; + idpf_release_lock(&cq->cq_lock); + *num_q_msg = i; + if (*num_q_msg == 0) + ret_code = -ENOMSG; + + return ret_code; +} + int cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo, struct idpf_ctlq_info **cq) @@ -378,3 +767,31 @@ cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { cpfl_ctlq_remove(hw, cq); } + +int +cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]) +{ + return cpfl_ctlq_send(hw, cq, num_q_msg, q_msg); +} + +int +cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg, + struct idpf_ctlq_msg q_msg[]) +{ + return cpfl_ctlq_recv(cq, num_q_msg, q_msg); +} + +int +cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + uint16_t *buff_count, struct idpf_dma_mem **buffs) +{ + return cpfl_ctlq_post_rx_buffs(hw, cq, buff_count, buffs); +} + +int +cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count, + struct idpf_ctlq_msg *msg_status[]) +{ + return cpfl_ctlq_clean_sq(cq, clean_count, msg_status); +} diff --git a/drivers/net/cpfl/cpfl_controlq.h b/drivers/net/cpfl/cpfl_controlq.h index 930d717f63..3fd658cc36 100644 --- a/drivers/net/cpfl/cpfl_controlq.h +++ b/drivers/net/cpfl/cpfl_controlq.h @@ -14,6 +14,13 @@ #define CPFL_DFLT_MBX_RING_LEN 512 #define CPFL_CFGQ_RING_LEN 512 +/* CRQ/CSQ specific error codes */ +#define CPFL_ERR_CTLQ_ERROR -74 /* -EBADMSG */ +#define CPFL_ERR_CTLQ_TIMEOUT -110 /* -ETIMEDOUT */ +#define CPFL_ERR_CTLQ_FULL -28 /* -ENOSPC */ +#define CPFL_ERR_CTLQ_NO_WORK -42 /* -ENOMSG */ +#define CPFL_ERR_CTLQ_EMPTY -105 /* -ENOBUFS */ + /* Generic queue info structures */ /* MB, CONFIG and EVENT q do not have extended info */ struct cpfl_ctlq_create_info { @@ -44,8 +51,25 @@ int cpfl_ctlq_alloc_ring_res(struct idpf_hw *hw, int cpfl_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo, struct idpf_ctlq_info **cq); +int cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]); +int cpfl_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count, + struct idpf_ctlq_msg *msg_status[]); +int cpfl_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + uint16_t *buff_count, struct idpf_dma_mem **buffs); +int cpfl_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg, + struct idpf_ctlq_msg *q_msg); int cpfl_vport_ctlq_add(struct idpf_hw *hw, struct cpfl_ctlq_create_info *qinfo, struct idpf_ctlq_info **cq); void cpfl_vport_ctlq_remove(struct idpf_hw *hw, struct idpf_ctlq_info *cq); +int cpfl_vport_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]); +int cpfl_vport_ctlq_recv(struct idpf_ctlq_info *cq, uint16_t *num_q_msg, + struct idpf_ctlq_msg q_msg[]); + +int cpfl_vport_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, + uint16_t *buff_count, struct idpf_dma_mem **buffs); +int cpfl_vport_ctlq_clean_sq(struct idpf_ctlq_info *cq, uint16_t *clean_count, + struct idpf_ctlq_msg *msg_status[]); #endif From patchwork Sat Aug 12 07:55:05 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhang, Yuying" X-Patchwork-Id: 130254 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9C96D4303E; Sat, 12 Aug 2023 09:32:44 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D411B4326E; Sat, 12 Aug 2023 09:32:29 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 9D9F54325B for ; Sat, 12 Aug 2023 09:32:27 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1691825547; x=1723361547; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=2TAvMwrXMKcUfgR2tcJnA9vWr5cNwRlq2+BE0D7R1bY=; b=mGuXSeig9P5slrRQapc25eN4I+ZvravwQuQH56zBolP7F06w7yfySUZK EUBts9T0VyNEEb2jNRGWTbUL6kRHaXTG0YGFwzO6CKdsHeZRCgaoT78/N se3YO1xXno0vzig+a4J+gXeHG4PY51pg9XRWS6jWqKHezcoySk87pTelL VAqTv2Yw3yX9K35JVNaYgp6Dh0F6CYyIxWp7Ez6C2HaXGmPC9NLx7YXDU BHBqF8BuWlQQICuf6Y0WDxBfX5Qi7Jsin56J3Wggui55CCw+2+Pv42e2X JkgnZQJ+xNLhUePRRXjXIDsLHjkYxZVpzzCTEIqynk2nl7sPal5dRhVuz A==; X-IronPort-AV: E=McAfee;i="6600,9927,10799"; a="369282002" X-IronPort-AV: E=Sophos;i="6.01,167,1684825200"; d="scan'208";a="369282002" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Aug 2023 00:32:27 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10799"; a="798268358" X-IronPort-AV: E=Sophos;i="6.01,167,1684825200"; d="scan'208";a="798268358" Received: from dpdk-yuyingzh-icelake.sh.intel.com ([10.67.116.226]) by fmsmga008.fm.intel.com with ESMTP; 12 Aug 2023 00:32:25 -0700 From: Yuying Zhang To: dev@dpdk.org, beilei.xing@intel.com, qi.z.zhang@intel.com, jingjing.wu@intel.com Cc: Yuying Zhang Subject: [PATCH v1 4/5] net/cpfl: add fxp rule module Date: Sat, 12 Aug 2023 07:55:05 +0000 Message-Id: <20230812075506.361769-5-yuying.zhang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230812075506.361769-1-yuying.zhang@intel.com> References: <20230812075506.361769-1-yuying.zhang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Added low level fxp module for rule packing / creation / destroying. Signed-off-by: Yuying Zhang --- drivers/net/cpfl/cpfl_ethdev.h | 4 + drivers/net/cpfl/cpfl_fxp_rule.c | 288 +++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_fxp_rule.h | 87 ++++++++++ drivers/net/cpfl/meson.build | 1 + 4 files changed, 380 insertions(+) create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index c71f16ac60..63bcc5551f 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -145,10 +145,14 @@ enum cpfl_itf_type { TAILQ_HEAD(cpfl_flow_list, rte_flow); +#define CPFL_FLOW_BATCH_SIZE 490 struct cpfl_itf { enum cpfl_itf_type type; struct cpfl_adapter_ext *adapter; struct cpfl_flow_list flow_list; + struct idpf_dma_mem flow_dma; + struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE]; + struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE]; void *data; }; diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c new file mode 100644 index 0000000000..936f57e4fa --- /dev/null +++ b/drivers/net/cpfl/cpfl_fxp_rule.c @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ +#include "cpfl_ethdev.h" + +#include "cpfl_fxp_rule.h" +#include "cpfl_logs.h" + +#define CTLQ_SEND_RETRIES 100 +#define CTLQ_RECEIVE_RETRIES 100 + +int +cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg, + struct idpf_ctlq_msg q_msg[]) +{ + struct idpf_ctlq_msg **msg_ptr_list; + uint16_t clean_count = 0; + int num_cleaned = 0; + int retries = 0; + int ret = 0; + + msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *)); + if (!msg_ptr_list) { + PMD_INIT_LOG(ERR, "no memory for cleaning ctlq"); + ret = -ENOMEM; + goto err; + } + + ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg); + if (ret) { + PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret); + goto send_err; + } + + while (retries <= CTLQ_SEND_RETRIES) { + clean_count = num_q_msg - num_cleaned; + ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count, + &msg_ptr_list[num_cleaned]); + if (ret) { + PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret); + goto send_err; + } + + num_cleaned += clean_count; + retries++; + if (num_cleaned >= num_q_msg) + break; + rte_delay_us_sleep(10); + } + + if (retries > CTLQ_SEND_RETRIES) { + PMD_INIT_LOG(ERR, "timed out while polling for completions"); + ret = -1; + goto send_err; + } + +send_err: + if (msg_ptr_list) + free(msg_ptr_list); +err: + return ret; +} + +static int +cpfl_process_rx_ctlq_msg(uint16_t num_q_msg, struct idpf_ctlq_msg *q_msg) +{ + uint16_t i; + int ret = 0; + + if (!num_q_msg || !q_msg) + return -EINVAL; + + for (i = 0; i < num_q_msg; i++) { + switch (q_msg[i].status) { + case CPFL_CFG_PKT_ERR_OK: + continue; + case CPFL_CFG_PKT_ERR_EEXIST: + PMD_INIT_LOG(ERR, "The rule has confliction with already existed one"); + return -EINVAL; + case CPFL_CFG_PKT_ERR_ENOTFND: + PMD_INIT_LOG(ERR, "The rule has already deleted"); + return -EINVAL; + default: + PMD_INIT_LOG(ERR, "Invalid rule"); + return -EINVAL; + } + } + + return ret; +} + +int +cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg, + struct idpf_ctlq_msg q_msg[]) +{ + int retries = 0; + struct idpf_dma_mem *dma; + uint16_t i; + uint16_t buff_cnt; + int ret = 0; + + retries = 0; + while (retries <= CTLQ_RECEIVE_RETRIES) { + rte_delay_us_sleep(10); + ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]); + + if (ret && ret != CPFL_ERR_CTLQ_NO_WORK && + ret != CPFL_ERR_CTLQ_ERROR) { + PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret); + retries++; + continue; + } + + if (ret == CPFL_ERR_CTLQ_NO_WORK) { + retries++; + continue; + } + + if (ret == CPFL_ERR_CTLQ_EMPTY) + break; + + ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg); + if (ret) { + PMD_INIT_LOG(WARNING, "failed to process rx_ctrlq msg"); + break; + } + + for (i = 0; i < num_q_msg; i++) { + if (q_msg[i].data_len > 0) + dma = q_msg[i].ctx.indirect.payload; + else + dma = NULL; + + buff_cnt = dma ? 1 : 0; + ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma); + if (ret) + PMD_INIT_LOG(WARNING, "could not posted recv bufs\n"); + } + break; + } + + if (retries > CTLQ_RECEIVE_RETRIES) { + PMD_INIT_LOG(ERR, "timed out while polling for receive response"); + ret = -1; + } + + return ret; +} + +static int +pack_mod_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, + struct idpf_ctlq_msg *msg) +{ + struct cpfl_mod_rule_info *minfo = &rinfo->mod; + union cpfl_rule_cfg_pkt_record *blob = NULL; + struct cpfl_rule_cfg_data cfg = {0}; + + /* prepare rule blob */ + if (!dma->va) { + PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__); + return -1; + } + blob = (union cpfl_rule_cfg_pkt_record *)dma->va; + memset(blob, 0, sizeof(*blob)); + memset(&cfg, 0, sizeof(cfg)); + + /* fill info for both query and add/update */ + cpfl_fill_rule_mod_content(minfo->mod_obj_size, + minfo->pin_mod_content, + minfo->mod_index, + &cfg.ext.mod_content); + + /* only fill content for add/update */ + memcpy(blob->mod_blob, minfo->mod_content, + minfo->mod_content_byte_len); + +#define NO_HOST_NEEDED 0 + /* pack message */ + cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule, + rinfo->cookie, + 0, /* vsi_id not used for mod */ + rinfo->port_num, + NO_HOST_NEEDED, + 0, /* time_sel */ + 0, /* time_sel_val */ + 0, /* cache_wr_thru */ + rinfo->resp_req, + (u16)sizeof(*blob), + (void *)dma, + &cfg.common); + cpfl_prep_rule_desc(&cfg, msg); + return 0; +} + +static int pack_default_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, + struct idpf_ctlq_msg *msg, bool add) +{ + union cpfl_rule_cfg_pkt_record *blob = NULL; + enum cpfl_ctlq_rule_cfg_opc opc; + struct cpfl_rule_cfg_data cfg; + uint16_t cfg_ctrl; + + if (!dma->va) { + PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__); + return -1; + } + blob = (union cpfl_rule_cfg_pkt_record *)dma->va; + memset(blob, 0, sizeof(*blob)); + memset(msg, 0, sizeof(*msg)); + + if (rinfo->type == CPFL_RULE_TYPE_SEM) { + cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id, + rinfo->sem.sub_prof_id, + rinfo->sem.pin_to_cache, + rinfo->sem.fixed_fetch); + cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len, + rinfo->act_bytes, rinfo->act_byte_len, + cfg_ctrl, blob); + opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule; + } + + cpfl_fill_rule_cfg_data_common(opc, + rinfo->cookie, + rinfo->vsi, + rinfo->port_num, + rinfo->host_id, + 0, /* time_sel */ + 0, /* time_sel_val */ + 0, /* cache_wr_thru */ + rinfo->resp_req, + sizeof(union cpfl_rule_cfg_pkt_record), + dma, + &cfg.common); + cpfl_prep_rule_desc(&cfg, msg); + return 0; +} + +static int pack_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, + struct idpf_ctlq_msg *msg, bool add) +{ + int ret = 0; + + if (rinfo->type == CPFL_RULE_TYPE_SEM) { + if (pack_default_rule(rinfo, dma, msg, add) < 0) + ret = -1; + } else if (rinfo->type == CPFL_RULE_TYPE_MOD) { + if (pack_mod_rule(rinfo, dma, msg) < 0) + ret = -1; + } + + return ret; +} + +int +cpfl_rule_update(struct cpfl_itf *itf, + struct idpf_ctlq_info *tx_cq, + struct idpf_ctlq_info *rx_cq, + struct cpfl_rule_info *rinfo, + int rule_num, + bool add) +{ + struct idpf_hw *hw = &itf->adapter->base.hw; + int i; + int ret = 0; + + if (rule_num == 0) + return 0; + + for (i = 0; i < rule_num; i++) { + ret = pack_rule(&rinfo[i], &itf->dma[i], &itf->msg[i], add); + if (ret) { + PMD_INIT_LOG(ERR, "Could not create rule"); + return ret; + } + } + ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to send rule"); + return ret; + } + ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to create rule"); + return ret; + } + + return 0; +} diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h new file mode 100644 index 0000000000..68efa8e3f8 --- /dev/null +++ b/drivers/net/cpfl/cpfl_fxp_rule.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#ifndef _CPFL_FXP_RULE_H_ +#define _CPFL_FXP_RULE_H_ + +#include "cpfl_rules.h" + +#define CPFL_MAX_KEY_LEN 128 +#define CPFL_MAX_RULE_ACTIONS 32 + +struct cpfl_sem_rule_info { + uint16_t prof_id; + uint8_t sub_prof_id; + uint8_t key[CPFL_MAX_KEY_LEN]; + uint8_t key_byte_len; + uint8_t pin_to_cache; + uint8_t fixed_fetch; +}; + +struct cpfl_lem_rule_info { + uint16_t prof_id; + uint8_t key[CPFL_MAX_KEY_LEN]; + uint8_t key_byte_len; + uint8_t pin_to_cache; + uint8_t fixed_fetch; +}; + +#define CPFL_MAX_MOD_CONTENT_LEN 256 +struct cpfl_mod_rule_info { + uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN]; + uint8_t mod_content_byte_len; + uint32_t mod_index; + uint8_t pin_mod_content; + uint8_t mod_obj_size; +}; + +enum cpfl_rule_type { + CPFL_RULE_TYPE_NONE, + CPFL_RULE_TYPE_SEM, + CPFL_RULE_TYPE_LEM, + CPFL_RULE_TYPE_MOD +}; + +struct cpfl_rule_info { + enum cpfl_rule_type type; + uint64_t cookie; + uint8_t host_id; + uint8_t port_num; + uint8_t resp_req; + /* TODO: change this to be dynamically allocated/reallocated */ + uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)]; + uint8_t act_byte_len; + /* vsi is used for lem and lpm rules */ + uint16_t vsi; + uint8_t clear_mirror_1st_state; + /* mod related fields */ + union { + struct cpfl_mod_rule_info mod; + struct cpfl_sem_rule_info sem; + struct cpfl_lem_rule_info lem; + }; +}; + +struct cpfl_meter_action_info { + uint8_t meter_logic_bank_id; + uint32_t meter_logic_idx; + uint8_t prof_id; + uint8_t slot; +}; + +extern struct cpfl_vport_ext *vport; + +int cpfl_rule_update(struct cpfl_itf *itf, + struct idpf_ctlq_info *tx_cq, + struct idpf_ctlq_info *rx_cq, + struct cpfl_rule_info *rinfo, + int rule_num, + bool add); +int +cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg, + struct idpf_ctlq_msg q_msg[]); +int +cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg, + struct idpf_ctlq_msg q_msg[]); +#endif /*CPFL_FXP_RULE_H*/ diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index 222497f7c2..4061123034 100644 --- a/drivers/net/cpfl/meson.build +++ b/drivers/net/cpfl/meson.build @@ -46,6 +46,7 @@ if js_dep.found() 'cpfl_flow_parser.c', 'cpfl_rules.c', 'cpfl_controlq.c', + 'cpfl_fxp_rule.c', ) dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true) ext_deps += js_dep From patchwork Sat Aug 12 07:55:06 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhang, Yuying" X-Patchwork-Id: 130255 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3C0C84303E; Sat, 12 Aug 2023 09:32:51 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E0DC843268; Sat, 12 Aug 2023 09:32:32 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id C316143271 for ; Sat, 12 Aug 2023 09:32:30 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1691825551; x=1723361551; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=Vanq2r40Yx3ZA0+7jgIy4QaelsnHSDtyrJ6nqfnTEwY=; b=YbWNukDRe92ufm27Qb4yDOHaDIv/UxESjoziZQGFi4SodFsXHCJDwto6 dgJRIcnUBDeYjtuH3Szqm71YRv9r6TUEws9FiPFj7U0oy492ix08GTKfZ Ugr6ezdwonc6sgy56NRWj2Ma2s8UGTMBFDk5ZD8AqHbDSYQJKOCE/6Va+ p81PatTnoBm0DTvIrNJWH7dtW1QVadSEn6/PzSwR81Y29HZswangVhcP1 19uqCXdyctJTObP0y49nwvxuovx5P94l88d/iiBPJJqIU+A1OFYXGfMEI 0buogaUUbL0pqqya9Pqs/v/vw/7YpK3lzQAduBlef9OSAfY/HGWyXd0DJ Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10799"; a="369282010" X-IronPort-AV: E=Sophos;i="6.01,167,1684825200"; d="scan'208";a="369282010" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Aug 2023 00:32:30 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10799"; a="798268365" X-IronPort-AV: E=Sophos;i="6.01,167,1684825200"; d="scan'208";a="798268365" Received: from dpdk-yuyingzh-icelake.sh.intel.com ([10.67.116.226]) by fmsmga008.fm.intel.com with ESMTP; 12 Aug 2023 00:32:28 -0700 From: Yuying Zhang To: dev@dpdk.org, beilei.xing@intel.com, qi.z.zhang@intel.com, jingjing.wu@intel.com Cc: Yuying Zhang Subject: [PATCH v1 5/5] net/cpfl: add fxp flow engine Date: Sat, 12 Aug 2023 07:55:06 +0000 Message-Id: <20230812075506.361769-6-yuying.zhang@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230812075506.361769-1-yuying.zhang@intel.com> References: <20230812075506.361769-1-yuying.zhang@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Adapt fxp low level as a flow engine. Signed-off-by: Yuying Zhang Signed-off-by: Qi Zhang --- drivers/net/cpfl/cpfl_ethdev.h | 85 ++++ drivers/net/cpfl/cpfl_flow_engine_fxp.c | 610 ++++++++++++++++++++++++ drivers/net/cpfl/meson.build | 1 + 3 files changed, 696 insertions(+) create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index 63bcc5551f..d7e9ea1a74 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -92,6 +92,8 @@ #define CPFL_RX_CFGQ_NUM 4 #define CPFL_TX_CFGQ_NUM 4 +#define CPFL_FPCP_CFGQ_TX 0 +#define CPFL_FPCP_CFGQ_RX 1 #define CPFL_CFGQ_NUM 8 struct cpfl_vport_param { @@ -230,6 +232,8 @@ struct cpfl_adapter_ext { struct rte_hash *repr_whitelist_hash; struct cpfl_flow_js_parser *flow_parser; + struct rte_bitmap *mod_bm; + void *mod_bm_mem; /* ctrl vport and ctrl queues. */ struct cpfl_vport ctrl_vport; @@ -265,5 +269,86 @@ int cpfl_packets_dispatch(void *arg); ((struct cpfl_repr *)((dev)->data->dev_private)) #define CPFL_DEV_TO_ITF(dev) \ ((struct cpfl_itf *)((dev)->data->dev_private)) +#define CPFL_INVALID_HW_ID UINT16_MAX + +static inline uint16_t +cpfl_get_port_id(struct cpfl_itf *itf) +{ + if (!itf) + return CPFL_INVALID_HW_ID; + + if (itf->type == CPFL_ITF_TYPE_VPORT) { + struct cpfl_vport *vport = (void *)itf; + + return vport->base.devarg_id; + } + + return CPFL_INVALID_HW_ID; +} + +static inline uint16_t +cpfl_get_vsi_id(struct cpfl_itf *itf) +{ + struct cpfl_adapter_ext *adapter = itf->adapter; + struct cpfl_vport_info *info; + uint32_t vport_id; + int ret; + struct cpfl_vport_id vport_identity; + + if (!itf) + return CPFL_INVALID_HW_ID; + + if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) { + struct cpfl_repr *repr = (void *)itf; + + return repr->vport_info->vport_info.vsi_id; + } else if (itf->type == CPFL_ITF_TYPE_VPORT) { + vport_id = ((struct cpfl_vport *)itf)->base.vport_id; + vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF; + /* host: HOST0_CPF_ID, acc: ACC_CPF_ID */ + vport_identity.pf_id = ACC_CPF_ID; + vport_identity.vf_id = 0; + vport_identity.vport_id = vport_id; + + ret = rte_hash_lookup_data(adapter->vport_map_hash, &vport_identity, + (void **)&info); + if (ret < 0) { + PMD_DRV_LOG(ERR, "vport id not exist"); + goto err; + } + + /* rte_spinlock_unlock(&adapter->vport_map_lock); */ + return info->vport_info.vsi_id; + } + +err: + /* rte_spinlock_unlock(&adapter->vport_map_lock); */ + return CPFL_INVALID_HW_ID; +} + +static inline struct cpfl_itf * +cpfl_get_itf_by_port_id(uint16_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= RTE_MAX_ETHPORTS) { + PMD_DRV_LOG(ERR, "port_id should be < %d.", RTE_MAX_ETHPORTS); + return NULL; + } + + dev = &rte_eth_devices[port_id]; + + if (dev->state == RTE_ETH_DEV_UNUSED) { + PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id); + return NULL; + } + + if (!dev->data) { + PMD_DRV_LOG(ERR, "eth_dev[%d] data not be allocated.", port_id); + return NULL; + } + + return CPFL_DEV_TO_ITF(dev); +} #endif /* _CPFL_ETHDEV_H_ */ diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c new file mode 100644 index 0000000000..e10639c842 --- /dev/null +++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c @@ -0,0 +1,610 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cpfl_rules.h" +#include "cpfl_logs.h" +#include "cpfl_ethdev.h" +#include "cpfl_flow.h" +#include "cpfl_fxp_rule.h" +#include "cpfl_flow_parser.h" +#include "rte_memcpy.h" + +#define COOKIE_DEF 0x1000 +#define PREC_MAX 7 +#define PREC_DEF 1 +#define PREC_SET 5 +#define TYPE_ID 3 +#define OFFSET 0x0a +#define HOST_ID_DEF 0 +#define PF_NUM_DEF 0 +#define PORT_NUM_DEF 0 +#define RESP_REQ_DEF 2 +#define PIN_TO_CACHE_DEF 0 +#define CLEAR_MIRROR_1ST_STATE_DEF 0 +#define FIXED_FETCH_DEF 0 +#define PTI_DEF 0 +#define MOD_OBJ_SIZE_DEF 0 +#define PIN_MOD_CONTENT_DEF 0 + +#define MAX_MOD_CONTENT_INDEX 256 +#define MAX_MR_ACTION_NUM 8 + +struct rule_info_meta { + struct cpfl_flow_pr_action pr_action; + uint32_t pr_num; + uint32_t mr_num; + uint32_t rule_num; + struct cpfl_rule_info rules[0]; +}; + +static uint32_t fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad); +static void fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx); +uint64_t rule_cookie = COOKIE_DEF; + +static int +cpfl_fxp_create(struct rte_eth_dev *dev, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error) +{ + int ret = 0; + uint32_t cpq_id = 0; + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + struct cpfl_adapter_ext *ad = itf->adapter; + struct rule_info_meta *rim = meta; + struct cpfl_vport *vport; + + if (!rim) + return ret; + + if (itf->type == CPFL_ITF_TYPE_VPORT) { + vport = (struct cpfl_vport *)itf; + cpq_id = vport->base.devarg_id * 2; + } else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) { + cpq_id = CPFL_FPCP_CFGQ_TX; + } else { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "fail to find correct control queue"); + return -rte_errno; + } + + ret = cpfl_rule_update(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], + rim->rules, rim->rule_num, true); + if (ret < 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "cpfl filter create flow fail"); + rte_free(rim); + return ret; + } + + flow->rule = rim; + + return ret; +} + +static inline void +cpfl_fxp_rule_free(struct rte_flow *flow) +{ + rte_free(flow->rule); + flow->rule = NULL; +} + +static int +cpfl_fxp_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret = 0; + uint32_t cpq_id = 0; + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + struct cpfl_adapter_ext *ad = itf->adapter; + struct rule_info_meta *rim; + uint32_t i; + struct cpfl_vport *vport; + + rim = flow->rule; + if (!rim) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "no such flow create by cpfl filter"); + + cpfl_fxp_rule_free(flow); + + return -rte_errno; + } + + if (itf->type == CPFL_ITF_TYPE_VPORT) { + vport = (struct cpfl_vport *)itf; + cpq_id = vport->base.devarg_id * 2; + } else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) { + cpq_id = CPFL_FPCP_CFGQ_TX; + } else { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "fail to find correct control queue"); + return -rte_errno; + } + + ret = cpfl_rule_update(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], rim->rules, + rim->rule_num, false); + if (ret < 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "fail to destroy cpfl filter rule"); + goto err; + } + + /* free mod index */ + for (i = rim->pr_num; i < rim->rule_num; i++) + fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index); +err: + cpfl_fxp_rule_free(flow); + return ret; +} + +static bool +cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action, + struct rule_info_meta *rim, + int i) +{ + if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_SEM) { + struct cpfl_rule_info *rinfo = &rim->rules[i]; + + rinfo->type = CPFL_RULE_TYPE_SEM; + rinfo->sem.prof_id = pr_action->sem.prof; + rinfo->sem.sub_prof_id = pr_action->sem.subprof; + rinfo->sem.key_byte_len = pr_action->sem.keysize; + rte_memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len); + rinfo->sem.pin_to_cache = PIN_TO_CACHE_DEF; + rinfo->sem.fixed_fetch = FIXED_FETCH_DEF; + } else { + PMD_DRV_LOG(ERR, "Invalid pattern item."); + return false; + } + + return true; +} + +static int +cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter, + struct cpfl_rule_info *match_rinfo, + struct cpfl_rule_info *mod_rinfo, + const struct cpfl_flow_mr_action *mr_action) +{ + struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod; + uint32_t mod_idx; + int i; + int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set)); + union cpfl_action_set *act_set = + &((union cpfl_action_set *)match_rinfo->act_bytes)[next]; + + if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD) + return -EINVAL; + + *act_set = cpfl_act_mod_profile(PREC_DEF, + mr_action->mod.prof, + PTI_DEF, + 0, /* append */ + 0, /* prepend */ + CPFL_ACT_MOD_PROFILE_PREFETCH_256B); + + act_set++; + match_rinfo->act_byte_len += sizeof(union cpfl_action_set); + + mod_idx = fxp_mod_idx_alloc(adapter); + if (mod_idx == MAX_MOD_CONTENT_INDEX) { + PMD_DRV_LOG(ERR, "Out of Mod Index."); + return -ENOMEM; + } + + *act_set = cpfl_act_mod_addr(PREC_DEF, mod_idx); + + act_set++; + match_rinfo->act_byte_len += sizeof(union cpfl_action_set); + + mod_rinfo->type = CPFL_RULE_TYPE_MOD; + minfo->mod_obj_size = MOD_OBJ_SIZE_DEF; + minfo->pin_mod_content = PIN_MOD_CONTENT_DEF; + minfo->mod_index = mod_idx; + mod_rinfo->cookie = 0x1237561; + mod_rinfo->port_num = PORT_NUM_DEF; + mod_rinfo->resp_req = RESP_REQ_DEF; + + minfo->mod_content_byte_len = mr_action->mod.byte_len + 2; + for (i = 0; i < minfo->mod_content_byte_len; i++) + minfo->mod_content[i] = mr_action->mod.data[i]; + + return 0; +} + +static int +cpfl_fxp_parse_action(struct cpfl_itf *itf, + const struct rte_flow_action *actions, + const struct cpfl_flow_mr_action *mr_action, + struct rule_info_meta *rim, + int priority, + int index, + bool is_vport_rule) +{ + const struct rte_flow_action_ethdev *act_ethdev; + const struct rte_flow_action *action; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_rss *rss; + struct rte_eth_dev_data *data; + enum rte_flow_action_type action_type; + struct cpfl_vport *vport; + /* used when action is REPRESENTED_PORT or REPRESENTED_PORT type */ + struct cpfl_itf *dst_itf; + uint16_t dev_id; /*vsi_id or phyical port id*/ + bool is_vsi; + bool set_meta_valid = false; + int queue_id = -1; + bool fwd_vsi = false; + bool fwd_q = false; + bool fwd_jump = false; + uint32_t i; + struct cpfl_rule_info *rinfo = &rim->rules[index]; + union cpfl_action_set *act_set = (void *)rinfo->act_bytes; + + priority = PREC_MAX - priority; + for (action = actions; action->type != + RTE_FLOW_ACTION_TYPE_END; action++) { + action_type = action->type; + switch (action_type) { + case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: + case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: + if (!fwd_vsi && !fwd_jump) + fwd_vsi = true; + else + goto err; + if (is_vport_rule) { + dst_itf = itf; + } else { + act_ethdev = action->conf; + dst_itf = cpfl_get_itf_by_port_id(act_ethdev->port_id); + } + + if (!dst_itf) + goto err; + + if (dst_itf->type == CPFL_ITF_TYPE_VPORT) { + vport = (struct cpfl_vport *)dst_itf; + queue_id = vport->base.chunks_info.rx_start_qid; + } else { + queue_id = -2; + } + + is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR || + dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR); + if (is_vsi || is_vport_rule) + dev_id = cpfl_get_vsi_id(dst_itf); + else + dev_id = cpfl_get_port_id(dst_itf); + + if (dev_id == CPFL_INVALID_HW_ID) + goto err; + + if (is_vsi || is_vport_rule) + *act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id); + else + *act_set = cpfl_act_fwd_port(0, priority, 0, dev_id); + act_set++; + rinfo->act_byte_len += sizeof(union cpfl_action_set); + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + if (!fwd_q && !fwd_jump) + fwd_q = true; + else + goto err; + if (queue_id == -2) + goto err; + act_q = action->conf; + data = itf->data; + if (act_q->index >= data->nb_rx_queues) + goto err; + + vport = (struct cpfl_vport *)itf; + if (queue_id < 0) + queue_id = vport->base.chunks_info.rx_start_qid; + queue_id += act_q->index; + *act_set = cpfl_act_set_hash_queue(priority, 0, queue_id, 0); + act_set++; + rinfo->act_byte_len += sizeof(union cpfl_action_set); + break; + case RTE_FLOW_ACTION_TYPE_RSS: + rss = action->conf; + if (rss->queue_num <= 1) + goto err; + for (i = 0; i < rss->queue_num - 1; i++) { + if (rss->queue[i + 1] != rss->queue[i] + 1) + goto err; + } + data = itf->data; + if (rss->queue[rss->queue_num - 1] >= data->nb_rx_queues) + goto err; +#define FXP_MAX_QREGION_SIZE 128 + if (!(rte_is_power_of_2(rss->queue_num) && + rss->queue_num <= FXP_MAX_QREGION_SIZE)) + goto err; + + if (!fwd_q && !fwd_jump) + fwd_q = true; + else + goto err; + if (queue_id == -2) + goto err; + vport = (struct cpfl_vport *)itf; + if (queue_id < 0) + queue_id = vport->base.chunks_info.rx_start_qid; + queue_id += rss->queue[0]; + *act_set = cpfl_act_set_hash_queue_region(priority, 0, queue_id, + log(rss->queue_num) / log(2), 0); + act_set++; + rinfo->act_byte_len += sizeof(union cpfl_action_set); + break; + case RTE_FLOW_ACTION_TYPE_DROP: + (*act_set).data = cpfl_act_drop(priority).data; + act_set++; + rinfo->act_byte_len += sizeof(union cpfl_action_set); + (*act_set).data = cpfl_act_set_commit_mode(priority, 0).data; + act_set++; + rinfo->act_byte_len += sizeof(union cpfl_action_set); + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + break; + case RTE_FLOW_ACTION_TYPE_VOID: + break; + default: + goto err; + } + } + + if (mr_action != NULL && !set_meta_valid) { + uint32_t i; + + for (i = 0; i < rim->mr_num; i++) + if (cpfl_parse_mod_content(itf->adapter, rinfo, + &rim->rules[rim->pr_num + i], + &mr_action[i])) + goto err; + } + + return 0; + +err: + PMD_DRV_LOG(ERR, "Invalid action type"); + return -EINVAL; +} + +static void +cpfl_fill_rinfo_default_value(struct cpfl_rule_info *rinfo) +{ + if (rule_cookie == ~0llu) + rule_cookie = COOKIE_DEF; + rinfo->cookie = rule_cookie++; + rinfo->host_id = HOST_ID_DEF; + rinfo->port_num = PORT_NUM_DEF; + rinfo->resp_req = RESP_REQ_DEF; + rinfo->clear_mirror_1st_state = CLEAR_MIRROR_1ST_STATE_DEF; +} + +static bool is_mod_action(const struct rte_flow_action actions[], bool *set_meta_valid) +{ + const struct rte_flow_action *action; + enum rte_flow_action_type action_type; + + if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END) + return false; + + for (action = actions; action->type != + RTE_FLOW_ACTION_TYPE_END; action++) { + action_type = action->type; + switch (action_type) { + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD: + return true; + case RTE_FLOW_ACTION_TYPE_SET_TAG: + *set_meta_valid = true; + return true; + default: + continue; + } + } + return false; +} + +static int +cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta) +{ + struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev); + struct cpfl_flow_pr_action pr_action = { 0 }; + struct cpfl_adapter_ext *adapter = itf->adapter; + struct cpfl_flow_mr_action mr_action[MAX_MR_ACTION_NUM] = { 0 }; + uint32_t pr_num = 0, mr_num = 0; + struct cpfl_vport *vport; + struct rule_info_meta *rim; + bool set_meta_valid = false; + int ret; + + if (itf->type == CPFL_ITF_TYPE_VPORT) { + vport = (struct cpfl_vport *)itf; + if (vport->exceptional) { + PMD_DRV_LOG(ERR, "Can't create rte_flow with exceptional vport."); + return -EINVAL; + } + } + + ret = cpfl_flow_parse_items(adapter->flow_parser, pattern, attr, &pr_action); + if (ret) { + PMD_DRV_LOG(ERR, "No Match pattern support."); + return -EINVAL; + } + + if (is_mod_action(actions, &set_meta_valid)) { + ret = cpfl_flow_parse_actions(adapter->flow_parser, actions, mr_action); + if (ret) { + PMD_DRV_LOG(ERR, "action parse fails."); + return -EINVAL; + } + if (!set_meta_valid) + mr_num++; + } + + pr_num = 1; + rim = rte_zmalloc(NULL, + sizeof(struct rule_info_meta) + + (pr_num + mr_num) * sizeof(struct cpfl_rule_info), + 0); + if (!rim) + return -ENOMEM; + + rim->pr_action = pr_action; + rim->pr_num = pr_num; + rim->mr_num = mr_num; + rim->rule_num = pr_num + mr_num; + + if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) { + PMD_DRV_LOG(ERR, "Invalid input set"); + rte_free(rim); + return -rte_errno; + } + + if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority, + 0, false)) { + PMD_DRV_LOG(ERR, "Invalid input set"); + rte_free(rim); + return -rte_errno; + } + + cpfl_fill_rinfo_default_value(&rim->rules[0]); + + if (!meta) + rte_free(rim); + else + *meta = rim; + + return 0; +} + +static int fxp_mod_init(struct cpfl_adapter_ext *ad) +{ + uint32_t size = rte_bitmap_get_memory_footprint(MAX_MOD_CONTENT_INDEX); + + void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); + + if (!mem) + return -ENOMEM; + + /* a set bit represent a free slot */ + ad->mod_bm = rte_bitmap_init_with_all_set(MAX_MOD_CONTENT_INDEX, mem, size); + if (!ad->mod_bm) { + rte_free(mem); + return -EINVAL; + } + + ad->mod_bm_mem = mem; + + return 0; +} + +static void fxp_mod_uninit(struct cpfl_adapter_ext *ad) +{ + rte_free(ad->mod_bm_mem); + ad->mod_bm_mem = NULL; + ad->mod_bm = NULL; +} + +static uint32_t fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad) +{ + uint64_t slab = 0; + uint32_t pos = 0; + + if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab)) + return MAX_MOD_CONTENT_INDEX; + + pos += __builtin_ffsll(slab) - 1; + rte_bitmap_clear(ad->mod_bm, pos); + + return pos; +} + +static void fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx) +{ + rte_bitmap_set(ad->mod_bm, idx); +} + +static int +cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused, + struct rte_flow_query_count *count __rte_unused, + struct rte_flow_error *error) +{ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "count action not supported by this module"); + + return -rte_errno; +} + +static void +cpfl_fxp_uninit(struct cpfl_adapter_ext *ad) +{ + fxp_mod_uninit(ad); +} + +static int +cpfl_fxp_init(struct cpfl_adapter_ext *ad) +{ + int ret = 0; + + ret = fxp_mod_init(ad); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to init mod content bitmap."); + return ret; + } + + return ret; +} + +static struct +cpfl_flow_engine cpfl_fxp_engine = { + .type = CPFL_FLOW_ENGINE_FXP, + .init = cpfl_fxp_init, + .uninit = cpfl_fxp_uninit, + .create = cpfl_fxp_create, + .destroy = cpfl_fxp_destroy, + .query_count = cpfl_fxp_query, + .parse_pattern_action = cpfl_fxp_parse_pattern_action, +}; + +RTE_INIT(cpfl_sw_engine_init) +{ + struct cpfl_flow_engine *engine = &cpfl_fxp_engine; + + cpfl_flow_engine_register(engine); +} diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index 4061123034..ce46d7e76e 100644 --- a/drivers/net/cpfl/meson.build +++ b/drivers/net/cpfl/meson.build @@ -43,6 +43,7 @@ js_dep = dependency('json-c', required: false, method : 'pkg-config') if js_dep.found() sources += files( 'cpfl_flow.c', + 'cpfl_flow_engine_fxp.c', 'cpfl_flow_parser.c', 'cpfl_rules.c', 'cpfl_controlq.c',