From patchwork Wed Mar 18 05:41:57 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Simei Su X-Patchwork-Id: 66828 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 64A53A0579; Wed, 18 Mar 2020 06:43:49 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 359071C036; Wed, 18 Mar 2020 06:43:44 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 690131C026 for ; Wed, 18 Mar 2020 06:43:41 +0100 (CET) IronPort-SDR: VQPirrM3TWwJQZa0/hW75Yfnq3xq6I5XUKbLjOmWdlL6FoVgIpHBenj7fldFanOieAeC4DzYdc 6KmCCbn1gYpQ== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Mar 2020 22:43:40 -0700 IronPort-SDR: keEhcw+GKFxsu+/UK2k9IcfNzogCN60rZAJy1YGfa5MWSna166h0SN9lWrph8xqbtLUio/ho2Z R++tqI/oQtQw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,566,1574150400"; d="scan'208";a="355592286" Received: from npg-dpdk-cvl-simeisu-118d193.sh.intel.com ([10.67.110.183]) by fmsmga001.fm.intel.com with ESMTP; 17 Mar 2020 22:43:38 -0700 From: Simei Su To: xiaolong.ye@intel.com, qi.z.zhang@intel.com Cc: dev@dpdk.org, yahui.cao@intel.com, jingjing.wu@intel.com, simei.su@intel.com Date: Wed, 18 Mar 2020 13:41:57 +0800 Message-Id: <1584510121-377747-2-git-send-email-simei.su@intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1584510121-377747-1-git-send-email-simei.su@intel.com> References: <1584510121-377747-1-git-send-email-simei.su@intel.com> Subject: [dpdk-dev] [PATCH 1/5] net/iavf: add support for FDIR basic rule X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds FDIR create/destroy/validate function in AVF. Common pattern and queue/qgroup/passthru/drop actions are supported. Signed-off-by: Simei Su --- drivers/net/iavf/Makefile | 1 + drivers/net/iavf/iavf.h | 16 + drivers/net/iavf/iavf_fdir.c | 762 ++++++++++++++++++++++++++++++++++++++++++ drivers/net/iavf/iavf_vchnl.c | 128 ++++++- drivers/net/iavf/meson.build | 1 + 5 files changed, 907 insertions(+), 1 deletion(-) create mode 100644 drivers/net/iavf/iavf_fdir.c diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile index 1bf0f26..193bc55 100644 --- a/drivers/net/iavf/Makefile +++ b/drivers/net/iavf/Makefile @@ -24,6 +24,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c ifeq ($(CONFIG_RTE_ARCH_X86), y) SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c endif diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 48b9509..62a3eb8 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -99,6 +99,16 @@ struct iavf_vsi { struct iavf_flow_parser_node; TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node); +struct iavf_fdir_conf { + struct virtchnl_fdir_fltr input; + uint64_t input_set; + uint32_t flow_id; +}; + +struct iavf_fdir_info { + struct iavf_fdir_conf conf; +}; + /* TODO: is that correct to assume the max number to be 16 ?*/ #define IAVF_MAX_MSIX_VECTORS 16 @@ -138,6 +148,8 @@ struct iavf_info { struct iavf_flow_list flow_list; struct iavf_parser_list rss_parser_list; struct iavf_parser_list dist_parser_list; + + struct iavf_fdir_info fdir; /* flow director info */ }; #define IAVF_MAX_PKT_TYPE 1024 @@ -260,4 +272,8 @@ int iavf_config_promisc(struct iavf_adapter *adapter, bool enable_unicast, int iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr, bool add); int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add); +int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter); +int iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter); +int iavf_fdir_check(struct iavf_adapter *adapter, + struct iavf_fdir_conf *filter); #endif /* _IAVF_ETHDEV_H_ */ diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c new file mode 100644 index 0000000..dd321ba --- /dev/null +++ b/drivers/net/iavf/iavf_fdir.c @@ -0,0 +1,762 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "iavf.h" +#include "iavf_generic_flow.h" +#include "virtchnl.h" + +#define IAVF_FDIR_MAX_QREGION_SIZE 128 + +#define IAVF_FDIR_IPV6_TC_OFFSET 20 +#define IAVF_IPV6_TC_MASK (0xFF << IAVF_FDIR_IPV6_TC_OFFSET) + +#define IAVF_FDIR_INSET_ETH (\ + IAVF_INSET_ETHERTYPE) + +#define IAVF_FDIR_INSET_ETH_IPV4 (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \ + IAVF_INSET_IPV4_TTL) + +#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \ + IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT) + +#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \ + IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT) + +#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \ + IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT) + +#define IAVF_FDIR_INSET_ETH_IPV6 (\ + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \ + IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \ + IAVF_INSET_IPV6_HOP_LIMIT) + +#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\ + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \ + IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \ + IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT) + +#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\ + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \ + IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \ + IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT) + +#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\ + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \ + IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \ + IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT) + +static struct iavf_pattern_match_item iavf_fdir_pattern[] = { + {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_udp, IAVF_FDIR_INSET_ETH_IPV4_UDP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_tcp, IAVF_FDIR_INSET_ETH_IPV4_TCP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_sctp, IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6, IAVF_FDIR_INSET_ETH_IPV6, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_udp, IAVF_FDIR_INSET_ETH_IPV6_UDP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_tcp, IAVF_FDIR_INSET_ETH_IPV6_TCP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_sctp, IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE}, +}; + +static struct iavf_flow_parser iavf_fdir_parser; + +static int +iavf_fdir_init(struct iavf_adapter *ad) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + struct iavf_flow_parser *parser; + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF) + parser = &iavf_fdir_parser; + else + return -ENOTSUP; + + return iavf_register_parser(parser, ad); +} + +static void +iavf_fdir_uninit(struct iavf_adapter *ad) +{ + struct iavf_flow_parser *parser; + + parser = &iavf_fdir_parser; + + iavf_unregister_parser(parser, ad); +} + +static int +iavf_fdir_create(struct iavf_adapter *ad, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error) +{ + struct iavf_fdir_conf *filter = meta; + struct iavf_fdir_conf *rule; + int ret; + + rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0); + if (!rule) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory"); + return -rte_errno; + } + + ret = iavf_fdir_add(ad, filter); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Add filter rule failed."); + goto free_entry; + } + + rte_memcpy(rule, filter, sizeof(*rule)); + flow->rule = rule; + + return 0; + +free_entry: + rte_free(rule); + return -rte_errno; +} + +static int +iavf_fdir_destroy(struct iavf_adapter *ad, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct iavf_fdir_conf *filter; + int ret; + + filter = (struct iavf_fdir_conf *)flow->rule; + + ret = iavf_fdir_del(ad, filter); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Del filter rule failed."); + return -rte_errno; + } + + flow->rule = NULL; + rte_free(filter); + + return 0; +} + +static int +iavf_fdir_validation(struct iavf_adapter *ad, + __rte_unused struct rte_flow *flow, + void *meta, + struct rte_flow_error *error) +{ + struct iavf_fdir_conf *filter = meta; + int ret; + + ret = iavf_fdir_check(ad, filter); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Validate filter rule failed."); + return -rte_errno; + } + + return 0; +}; + +static struct iavf_flow_engine iavf_fdir_engine = { + .init = iavf_fdir_init, + .uninit = iavf_fdir_uninit, + .create = iavf_fdir_create, + .destroy = iavf_fdir_destroy, + .validation = iavf_fdir_validation, + .type = IAVF_FLOW_ENGINE_FDIR, +}; + +static int +iavf_fdir_parse_action_qregion(struct iavf_adapter *ad, + struct rte_flow_error *error, + const struct rte_flow_action *act, + struct virtchnl_filter_action *filter_action) +{ + const struct rte_flow_action_rss *rss = act->conf; + uint32_t i; + + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + return -rte_errno; + } + + if (rss->queue_num <= 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Queue region size can't be 0 or 1."); + return -rte_errno; + } + + /* check if queue index for queue region is continuous */ + for (i = 0; i < rss->queue_num - 1; i++) { + if (rss->queue[i + 1] != rss->queue[i] + 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Discontinuous queue region"); + return -rte_errno; + } + } + + if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid queue region indexes."); + return -rte_errno; + } + + if (!(rte_is_power_of_2(rss->queue_num) && + (rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "The region size should be any of the following values:" + "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number " + "of queues do not exceed the VSI allocation."); + return -rte_errno; + } + + filter_action->q_index = rss->queue[0]; + filter_action->q_region = rte_fls_u32(rss->queue_num) - 1; + + return 0; +} + +static int +iavf_fdir_parse_action(struct iavf_adapter *ad, + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct iavf_fdir_conf *filter) +{ + const struct rte_flow_action_queue *act_q; + uint32_t dest_num = 0; + int ret; + + int number = 0; + struct virtchnl_filter_action *filter_action; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + + case RTE_FLOW_ACTION_TYPE_PASSTHRU: + dest_num++; + + filter_action = &filter->input.rule_cfg. + action_set.actions[number]; + + filter_action->type = VIRTCHNL_FDIR_ACT_PASSTHRU; + + filter->input.rule_cfg.action_set.count = ++number; + break; + + case RTE_FLOW_ACTION_TYPE_DROP: + dest_num++; + + filter_action = &filter->input.rule_cfg. + action_set.actions[number]; + + filter_action->type = VIRTCHNL_FDIR_ACT_DROP; + + filter->input.rule_cfg.action_set.count = ++number; + break; + + case RTE_FLOW_ACTION_TYPE_QUEUE: + dest_num++; + + act_q = actions->conf; + filter_action = &filter->input.rule_cfg. + action_set.actions[number]; + + filter_action->type = VIRTCHNL_FDIR_ACT_QUEUE; + filter_action->q_index = act_q->index; + + if (filter_action->q_index >= + ad->eth_dev->data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, "Invalid queue for FDIR."); + return -rte_errno; + } + + filter->input.rule_cfg.action_set.count = ++number; + break; + + case RTE_FLOW_ACTION_TYPE_RSS: + dest_num++; + + filter_action = &filter->input.rule_cfg. + action_set.actions[number]; + + filter_action->type = VIRTCHNL_FDIR_ACT_Q_REGION; + + ret = iavf_fdir_parse_action_qregion(ad, + error, actions, filter_action); + if (ret) + return ret; + + filter->input.rule_cfg.action_set.count = ++number; + break; + + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Invalid action."); + return -rte_errno; + } + } + + if (dest_num == 0 || dest_num >= 2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Unsupported action combination"); + return -rte_errno; + } + + return 0; +} + +static int +iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + struct iavf_fdir_conf *filter) +{ + const struct rte_flow_item *item = pattern; + enum rte_flow_item_type item_type; + enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END; + const struct rte_flow_item_eth *eth_spec, *eth_mask; + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; + const struct rte_flow_item_udp *udp_spec, *udp_mask; + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; + const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; + uint64_t input_set = IAVF_INSET_NONE; + + enum rte_flow_item_type next_type; + uint16_t ether_type; + + int layer = 0; + struct virtchnl_proto_hdr *hdr; + + uint8_t ipv6_addr_mask[16] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF + }; + + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not support range"); + } + + item_type = item->type; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + next_type = (item + 1)->type; + + hdr = &filter->input.rule_cfg.proto_stack. + proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH); + + if (next_type == RTE_FLOW_ITEM_TYPE_END && + (!eth_spec || !eth_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "NULL eth spec/mask."); + return -rte_errno; + } + + if (eth_spec && eth_mask) { + if (!rte_is_zero_ether_addr(ð_mask->src) || + !rte_is_zero_ether_addr(ð_mask->dst)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid MAC_addr mask."); + return -rte_errno; + } + } + + if (eth_spec && eth_mask && eth_mask->type) { + if (eth_mask->type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid type mask."); + return -rte_errno; + } + + ether_type = rte_be_to_cpu_16(eth_spec->type); + if (ether_type == RTE_ETHER_TYPE_IPV4 || + ether_type == RTE_ETHER_TYPE_IPV6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported ether_type."); + return -rte_errno; + } + + input_set |= IAVF_INSET_ETHERTYPE; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, + ETH, ETHERTYPE); + + rte_memcpy(hdr->buffer, + eth_spec, sizeof(*eth_spec)); + } + + filter->input.rule_cfg.proto_stack.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_IPV4: + l3 = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_spec = item->spec; + ipv4_mask = item->mask; + + hdr = &filter->input.rule_cfg.proto_stack. + proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4); + + if (ipv4_spec && ipv4_mask) { + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid IPv4 mask."); + return -rte_errno; + } + + if (ipv4_mask->hdr.type_of_service == + UINT8_MAX) { + input_set |= IAVF_INSET_IPV4_TOS; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, IPV4, DSCP); + } + if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) { + input_set |= IAVF_INSET_IPV4_PROTO; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, IPV4, PROT); + } + if (ipv4_mask->hdr.time_to_live == UINT8_MAX) { + input_set |= IAVF_INSET_IPV4_TTL; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, IPV4, TTL); + } + if (ipv4_mask->hdr.src_addr == UINT32_MAX) { + input_set |= IAVF_INSET_IPV4_SRC; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, IPV4, SRC); + } + if (ipv4_mask->hdr.dst_addr == UINT32_MAX) { + input_set |= IAVF_INSET_IPV4_DST; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, IPV4, DST); + } + + rte_memcpy(hdr->buffer, + &ipv4_spec->hdr, + sizeof(ipv4_spec->hdr)); + } + + filter->input.rule_cfg.proto_stack.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_IPV6: + l3 = RTE_FLOW_ITEM_TYPE_IPV6; + ipv6_spec = item->spec; + ipv6_mask = item->mask; + + hdr = &filter->input.rule_cfg.proto_stack. + proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6); + + if (ipv6_spec && ipv6_mask) { + if (ipv6_mask->hdr.payload_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid IPv6 mask"); + return -rte_errno; + } + + if ((ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) + == rte_cpu_to_be_32( + IAVF_IPV6_TC_MASK)) { + input_set |= IAVF_INSET_IPV6_TC; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, IPV6, TC); + } + if (ipv6_mask->hdr.proto == UINT8_MAX) { + input_set |= IAVF_INSET_IPV6_NEXT_HDR; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, IPV6, PROT); + } + if (ipv6_mask->hdr.hop_limits == UINT8_MAX) { + input_set |= IAVF_INSET_IPV6_HOP_LIMIT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, IPV6, HOP_LIMIT); + } + if (!memcmp(ipv6_mask->hdr.src_addr, + ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.src_addr))) { + input_set |= IAVF_INSET_IPV6_SRC; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, IPV6, SRC); + } + if (!memcmp(ipv6_mask->hdr.dst_addr, + ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.dst_addr))) { + input_set |= IAVF_INSET_IPV6_DST; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, IPV6, DST); + } + + rte_memcpy(hdr->buffer, + &ipv6_spec->hdr, + sizeof(ipv6_spec->hdr)); + } + + filter->input.rule_cfg.proto_stack.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + udp_spec = item->spec; + udp_mask = item->mask; + + hdr = &filter->input.rule_cfg.proto_stack. + proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP); + + if (udp_spec && udp_mask) { + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid UDP mask"); + return -rte_errno; + } + + if (udp_mask->hdr.src_port == UINT16_MAX) { + input_set |= IAVF_INSET_UDP_SRC_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, UDP, SRC_PORT); + } + if (udp_mask->hdr.dst_port == UINT16_MAX) { + input_set |= IAVF_INSET_UDP_DST_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, UDP, DST_PORT); + } + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + rte_memcpy(hdr->buffer, + &udp_spec->hdr, + sizeof(udp_spec->hdr)); + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + rte_memcpy(hdr->buffer, + &udp_spec->hdr, + sizeof(udp_spec->hdr)); + } + + filter->input.rule_cfg.proto_stack.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_TCP: + tcp_spec = item->spec; + tcp_mask = item->mask; + + hdr = &filter->input.rule_cfg.proto_stack. + proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP); + + if (tcp_spec && tcp_mask) { + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid TCP mask"); + return -rte_errno; + } + + if (tcp_mask->hdr.src_port == UINT16_MAX) { + input_set |= IAVF_INSET_TCP_SRC_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, TCP, SRC_PORT); + } + if (tcp_mask->hdr.dst_port == UINT16_MAX) { + input_set |= IAVF_INSET_TCP_DST_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, TCP, DST_PORT); + } + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + rte_memcpy(hdr->buffer, + &tcp_spec->hdr, + sizeof(tcp_spec->hdr)); + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + rte_memcpy(hdr->buffer, + &tcp_spec->hdr, + sizeof(tcp_spec->hdr)); + } + + filter->input.rule_cfg.proto_stack.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_SCTP: + sctp_spec = item->spec; + sctp_mask = item->mask; + + hdr = &filter->input.rule_cfg.proto_stack. + proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP); + + if (sctp_spec && sctp_mask) { + if (sctp_mask->hdr.cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid UDP mask"); + return -rte_errno; + } + + if (sctp_mask->hdr.src_port == UINT16_MAX) { + input_set |= IAVF_INSET_SCTP_SRC_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, SCTP, SRC_PORT); + } + if (sctp_mask->hdr.dst_port == UINT16_MAX) { + input_set |= IAVF_INSET_SCTP_DST_PORT; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, SCTP, DST_PORT); + } + + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) + rte_memcpy(hdr->buffer, + &sctp_spec->hdr, + sizeof(sctp_spec->hdr)); + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) + rte_memcpy(hdr->buffer, + &sctp_spec->hdr, + sizeof(sctp_spec->hdr)); + } + + filter->input.rule_cfg.proto_stack.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_VOID: + break; + + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Invalid pattern item."); + return -rte_errno; + } + } + + filter->input_set = input_set; + + return 0; +} + +static int +iavf_fdir_parse(struct iavf_adapter *ad, + struct iavf_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + struct iavf_fdir_conf *filter = &vf->fdir.conf; + struct iavf_pattern_match_item *item = NULL; + uint64_t input_set; + int ret; + + memset(filter, 0, sizeof(*filter)); + + item = iavf_search_pattern_match_item(pattern, array, array_len, error); + if (!item) + return -rte_errno; + + ret = iavf_fdir_parse_pattern(ad, pattern, error, filter); + if (ret) + goto error; + + input_set = filter->input_set; + if (!input_set || input_set & ~item->input_set_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern, + "Invalid input set"); + ret = -rte_errno; + goto error; + } + + ret = iavf_fdir_parse_action(ad, actions, error, filter); + if (ret) + goto error; + + if (meta) + *meta = filter; + +error: + rte_free(item); + return ret; +} + +static struct iavf_flow_parser iavf_fdir_parser = { + .engine = &iavf_fdir_engine, + .array = iavf_fdir_pattern, + .array_len = RTE_DIM(iavf_fdir_pattern), + .parse_pattern_action = iavf_fdir_parse, + .stage = IAVF_FLOW_STAGE_DISTRIBUTOR, +}; + +RTE_INIT(iavf_fdir_engine_register) +{ + iavf_register_flow_engine(&iavf_fdir_engine); +} diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 11c70f5..77bfd1b 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -342,7 +342,8 @@ caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_OFFLOAD_QUERY_DDP | - VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC; + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC | + VIRTCHNL_VF_OFFLOAD_FDIR_PF; args.in_args = (uint8_t *)∩︀ args.in_args_size = sizeof(caps); @@ -867,3 +868,128 @@ return err; } + +int +iavf_fdir_add(struct iavf_adapter *adapter, + struct iavf_fdir_conf *filter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_fdir_status *fdir_status; + + struct iavf_cmd_info args; + int err; + + filter->input.vsi_id = vf->vsi_res->vsi_id; + filter->input.validate_only = 0; + + args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER; + args.in_args = (uint8_t *)(&filter->input); + args.in_args_size = sizeof(*(&filter->input)); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER"); + return err; + } + + fdir_status = (struct virtchnl_fdir_status *)args.out_buffer; + filter->flow_id = fdir_status->flow_id; + + if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS) + PMD_DRV_LOG(INFO, + "add rule request is successfully done by PF"); + else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE) + PMD_DRV_LOG(INFO, + "add rule request is failed due to no hw resource"); + else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT) + PMD_DRV_LOG(INFO, + "add rule request is failed due to the rule is already existed"); + else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) + PMD_DRV_LOG(INFO, + "add rule request is failed due to the hw doesn't support"); + else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) + PMD_DRV_LOG(INFO, + "add rule request is failed due to time out for programming"); + + return 0; +}; + +int +iavf_fdir_del(struct iavf_adapter *adapter, + struct iavf_fdir_conf *filter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_fdir_status *fdir_status; + + struct iavf_cmd_info args; + int err; + + filter->input.vsi_id = vf->vsi_res->vsi_id; + filter->input.flow_id = filter->flow_id; + + args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER; + args.in_args = (uint8_t *)(&filter->input); + args.in_args_size = sizeof(filter->input); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER"); + return err; + } + + fdir_status = (struct virtchnl_fdir_status *)args.out_buffer; + + if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS) + PMD_DRV_LOG(INFO, + "delete rule request is successfully done by PF"); + else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) + PMD_DRV_LOG(INFO, + "delete rule request is failed due to this rule doesn't exist"); + else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) + PMD_DRV_LOG(INFO, + "delete rule request is failed due to time out for programming"); + + return 0; +}; + +int +iavf_fdir_check(struct iavf_adapter *adapter, + struct iavf_fdir_conf *filter) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_fdir_status *fdir_status; + + struct iavf_cmd_info args; + int err; + + filter->input.vsi_id = vf->vsi_res->vsi_id; + filter->input.validate_only = 1; + + args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER; + args.in_args = (uint8_t *)(&filter->input); + args.in_args_size = sizeof(*(&filter->input)); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + err = iavf_execute_vf_cmd(adapter, &args); + if (err) { + PMD_DRV_LOG(ERR, "fail to check flow direcotor rule"); + return err; + } + + fdir_status = (struct virtchnl_fdir_status *)args.out_buffer; + + if (fdir_status->status == VIRTCHNL_FDIR_SUCCESS) + PMD_DRV_LOG(INFO, + "check rule request is successfully done by PF"); + else if (fdir_status->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) + PMD_DRV_LOG(INFO, + "check rule request is failed due to parameters validation" + " or HW doesn't support"); + + return 0; +} diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build index 32eabca..ce71054 100644 --- a/drivers/net/iavf/meson.build +++ b/drivers/net/iavf/meson.build @@ -13,6 +13,7 @@ sources = files( 'iavf_rxtx.c', 'iavf_vchnl.c', 'iavf_generic_flow.c', + 'iavf_fdir.c', ) if arch_subdir == 'x86' From patchwork Wed Mar 18 05:41:58 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Simei Su X-Patchwork-Id: 66829 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 61382A0579; Wed, 18 Mar 2020 06:44:01 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 13AA41C069; Wed, 18 Mar 2020 06:43:46 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id B05611C02D for ; Wed, 18 Mar 2020 06:43:43 +0100 (CET) IronPort-SDR: 9tSUrrMuFMFcN2tgKN/RV2hkEa9QA3C7DTgha51YFSV7Nl1UIlGxiQEu0Fi/XA0GdH8LehLLzW ysKwz/5otIEA== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Mar 2020 22:43:43 -0700 IronPort-SDR: BiYOJQ/EXPzgXUvyCe+aeaB2Gg2abzrh9KzJoueuZ/1YGF81U8k++TxT8sPl1g5KjAnpW7VJSi MZf9ehtFHL9w== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,566,1574150400"; d="scan'208";a="355592295" Received: from npg-dpdk-cvl-simeisu-118d193.sh.intel.com ([10.67.110.183]) by fmsmga001.fm.intel.com with ESMTP; 17 Mar 2020 22:43:41 -0700 From: Simei Su To: xiaolong.ye@intel.com, qi.z.zhang@intel.com Cc: dev@dpdk.org, yahui.cao@intel.com, jingjing.wu@intel.com, simei.su@intel.com Date: Wed, 18 Mar 2020 13:41:58 +0800 Message-Id: <1584510121-377747-3-git-send-email-simei.su@intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1584510121-377747-1-git-send-email-simei.su@intel.com> References: <1584510121-377747-1-git-send-email-simei.su@intel.com> Subject: [dpdk-dev] [PATCH 2/5] net/iavf: add support for FDIR GTPU X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch enables GTPU pattern for RTE_FLOW. Signed-off-by: Simei Su --- drivers/net/iavf/iavf_fdir.c | 67 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c index dd321ba..ad100c8 100644 --- a/drivers/net/iavf/iavf_fdir.c +++ b/drivers/net/iavf/iavf_fdir.c @@ -67,6 +67,14 @@ IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \ IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT) +#define IAVF_FDIR_INSET_GTPU (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_GTPU_TEID) + +#define IAVF_FDIR_INSET_GTPU_EH (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI) + static struct iavf_pattern_match_item iavf_fdir_pattern[] = { {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE}, {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE}, @@ -77,6 +85,8 @@ {iavf_pattern_eth_ipv6_udp, IAVF_FDIR_INSET_ETH_IPV6_UDP, IAVF_INSET_NONE}, {iavf_pattern_eth_ipv6_tcp, IAVF_FDIR_INSET_ETH_IPV6_TCP, IAVF_INSET_NONE}, {iavf_pattern_eth_ipv6_sctp, IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_gtpu, IAVF_FDIR_INSET_GTPU, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_gtpu_eh, IAVF_FDIR_INSET_GTPU_EH, IAVF_INSET_NONE}, }; static struct iavf_flow_parser iavf_fdir_parser; @@ -360,6 +370,8 @@ const struct rte_flow_item_udp *udp_spec, *udp_mask; const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; + const struct rte_flow_item_gtp *gtp_spec, *gtp_mask; + const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask; uint64_t input_set = IAVF_INSET_NONE; enum rte_flow_item_type next_type; @@ -686,6 +698,61 @@ filter->input.rule_cfg.proto_stack.count = ++layer; break; + case RTE_FLOW_ITEM_TYPE_GTPU: + gtp_spec = item->spec; + gtp_mask = item->mask; + + hdr = &filter->input.rule_cfg.proto_stack. + proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP); + + if (gtp_spec && gtp_mask) { + if (gtp_mask->v_pt_rsv_flags || + gtp_mask->msg_type || + gtp_mask->msg_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, "Invalid GTP mask"); + return -rte_errno; + } + + if (gtp_mask->teid == UINT32_MAX) { + input_set |= IAVF_INSET_GTPU_TEID; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, GTPU_IP, TEID); + } + + rte_memcpy(hdr->buffer, + gtp_spec, sizeof(*gtp_spec)); + } + + filter->input.rule_cfg.proto_stack.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_GTP_PSC: + gtp_psc_spec = item->spec; + gtp_psc_mask = item->mask; + + hdr = &filter->input.rule_cfg.proto_stack. + proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH); + + if (gtp_psc_spec && gtp_psc_mask) { + if (gtp_psc_mask->qfi == UINT8_MAX) { + input_set |= IAVF_INSET_GTPU_QFI; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, GTPU_EH, QFI); + } + + rte_memcpy(hdr->buffer, gtp_psc_spec, + sizeof(*gtp_psc_spec)); + } + + filter->input.rule_cfg.proto_stack.count = ++layer; + break; + case RTE_FLOW_ITEM_TYPE_VOID: break; From patchwork Wed Mar 18 05:41:59 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Simei Su X-Patchwork-Id: 66830 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1ABADA0579; Wed, 18 Mar 2020 06:44:09 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6784F1C0AC; Wed, 18 Mar 2020 06:43:47 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 2DC211C06B for ; Wed, 18 Mar 2020 06:43:46 +0100 (CET) IronPort-SDR: 1MlxvRnPx3LnCAB05AuSQTodmIwSkHuOHvwXdCKDs7lK5kRt2QDKZ6k401bkIs4P37cd/+l4at uz3eP5X0hUvg== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Mar 2020 22:43:45 -0700 IronPort-SDR: DKPA5upbMd+UXiOpOq4X6xi4+wTNdaK9VVab9kfiXncAXzGZxlb9JGFt7ptWGFtLbO6OL7bjRp 9YsX4HQZabgw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,566,1574150400"; d="scan'208";a="355592301" Received: from npg-dpdk-cvl-simeisu-118d193.sh.intel.com ([10.67.110.183]) by fmsmga001.fm.intel.com with ESMTP; 17 Mar 2020 22:43:44 -0700 From: Simei Su To: xiaolong.ye@intel.com, qi.z.zhang@intel.com Cc: dev@dpdk.org, yahui.cao@intel.com, jingjing.wu@intel.com, simei.su@intel.com Date: Wed, 18 Mar 2020 13:41:59 +0800 Message-Id: <1584510121-377747-4-git-send-email-simei.su@intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1584510121-377747-1-git-send-email-simei.su@intel.com> References: <1584510121-377747-1-git-send-email-simei.su@intel.com> Subject: [dpdk-dev] [PATCH 3/5] net/iavf: add support for FDIR L2TPv3 and IPSec X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch enables L2TPv3, ESP, AH and NAT-T pattern for RTE_FLOW. Signed-off-by: Simei Su --- drivers/net/iavf/iavf_fdir.c | 97 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c index ad100c8..70437d6 100644 --- a/drivers/net/iavf/iavf_fdir.c +++ b/drivers/net/iavf/iavf_fdir.c @@ -75,6 +75,23 @@ IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI) +#define IAVF_FDIR_INSET_L2TPV3OIP (\ + IAVF_L2TPV3OIP_SESSION_ID) + +#define IAVF_FDIR_INSET_ESP (\ + IAVF_INSET_ESP_SPI) + +#define IAVF_FDIR_INSET_AH (\ + IAVF_INSET_AH_SPI) + +#define IAVF_FDIR_INSET_IPV4_NATT_ESP (\ + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \ + IAVF_INSET_ESP_SPI) + +#define IAVF_FDIR_INSET_IPV6_NATT_ESP (\ + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \ + IAVF_INSET_ESP_SPI) + static struct iavf_pattern_match_item iavf_fdir_pattern[] = { {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE}, {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE}, @@ -87,6 +104,14 @@ {iavf_pattern_eth_ipv6_sctp, IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE}, {iavf_pattern_eth_ipv4_gtpu, IAVF_FDIR_INSET_GTPU, IAVF_INSET_NONE}, {iavf_pattern_eth_ipv4_gtpu_eh, IAVF_FDIR_INSET_GTPU_EH, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_udp_esp, IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_udp_esp, IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE}, }; static struct iavf_flow_parser iavf_fdir_parser; @@ -372,6 +397,9 @@ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; const struct rte_flow_item_gtp *gtp_spec, *gtp_mask; const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask; + const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask; + const struct rte_flow_item_esp *esp_spec, *esp_mask; + const struct rte_flow_item_ah *ah_spec, *ah_mask; uint64_t input_set = IAVF_INSET_NONE; enum rte_flow_item_type next_type; @@ -753,6 +781,75 @@ filter->input.rule_cfg.proto_stack.count = ++layer; break; + case RTE_FLOW_ITEM_TYPE_L2TPV3OIP: + l2tpv3oip_spec = item->spec; + l2tpv3oip_mask = item->mask; + + hdr = &filter->input.rule_cfg.proto_stack. + proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3); + + if (l2tpv3oip_spec && l2tpv3oip_mask) { + if (l2tpv3oip_mask->session_id == UINT32_MAX) { + input_set |= IAVF_L2TPV3OIP_SESSION_ID; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, L2TPV3, SESS_ID); + } + + rte_memcpy(hdr->buffer, l2tpv3oip_spec, + sizeof(*l2tpv3oip_spec)); + } + + filter->input.rule_cfg.proto_stack.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_ESP: + esp_spec = item->spec; + esp_mask = item->mask; + + hdr = &filter->input.rule_cfg.proto_stack. + proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP); + + if (esp_spec && esp_mask) { + if (esp_mask->hdr.spi == UINT32_MAX) { + input_set |= IAVF_INSET_ESP_SPI; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, ESP, SPI); + } + + rte_memcpy(hdr->buffer, &esp_spec->hdr, + sizeof(esp_spec->hdr)); + } + + filter->input.rule_cfg.proto_stack.count = ++layer; + break; + + case RTE_FLOW_ITEM_TYPE_AH: + ah_spec = item->spec; + ah_mask = item->mask; + + hdr = &filter->input.rule_cfg.proto_stack. + proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH); + + if (ah_spec && ah_mask) { + if (ah_mask->spi == UINT32_MAX) { + input_set |= IAVF_INSET_AH_SPI; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, AH, SPI); + } + + rte_memcpy(hdr->buffer, ah_spec, + sizeof(*ah_spec)); + } + + filter->input.rule_cfg.proto_stack.count = ++layer; + break; + case RTE_FLOW_ITEM_TYPE_VOID: break; From patchwork Wed Mar 18 05:42:00 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Simei Su X-Patchwork-Id: 66831 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8C1ECA0579; Wed, 18 Mar 2020 06:44:17 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A067D1BF7E; Wed, 18 Mar 2020 06:43:49 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 754B41BF7E for ; Wed, 18 Mar 2020 06:43:48 +0100 (CET) IronPort-SDR: eROi2sQsfB8mpmUlQy1QDJ3wEBKcYOpF4BHYkxWDfZ/64CS8C4bQk/c/HTO7oQDh2gs11dMShd 9uAWFDKTdOaw== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Mar 2020 22:43:48 -0700 IronPort-SDR: UXlBzVTKUzoMj1Sj05X+8nsw9f2lGic3Hh3fqzNHRxmfpk2idQloC8HnQfKdHXyu/9BQbncgIJ EXDw3ZFNnZtQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,566,1574150400"; d="scan'208";a="355592306" Received: from npg-dpdk-cvl-simeisu-118d193.sh.intel.com ([10.67.110.183]) by fmsmga001.fm.intel.com with ESMTP; 17 Mar 2020 22:43:46 -0700 From: Simei Su To: xiaolong.ye@intel.com, qi.z.zhang@intel.com Cc: dev@dpdk.org, yahui.cao@intel.com, jingjing.wu@intel.com, simei.su@intel.com Date: Wed, 18 Mar 2020 13:42:00 +0800 Message-Id: <1584510121-377747-5-git-send-email-simei.su@intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1584510121-377747-1-git-send-email-simei.su@intel.com> References: <1584510121-377747-1-git-send-email-simei.su@intel.com> Subject: [dpdk-dev] [PATCH 4/5] net/iavf: add support for FDIR PFCP X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch enables PFCP node and session pattern for RTE_FLOW. Signed-off-by: Simei Su --- drivers/net/iavf/iavf_fdir.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c index 70437d6..8d49c28 100644 --- a/drivers/net/iavf/iavf_fdir.c +++ b/drivers/net/iavf/iavf_fdir.c @@ -92,6 +92,9 @@ IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \ IAVF_INSET_ESP_SPI) +#define IAVF_FDIR_INSET_PFCP (\ + IAVF_INSET_PFCP_S_FIELD) + static struct iavf_pattern_match_item iavf_fdir_pattern[] = { {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE}, {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE}, @@ -112,6 +115,8 @@ {iavf_pattern_eth_ipv6_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE}, {iavf_pattern_eth_ipv4_udp_esp, IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE}, {iavf_pattern_eth_ipv6_udp_esp, IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv4_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE}, + {iavf_pattern_eth_ipv6_pfcp, IAVF_FDIR_INSET_PFCP, IAVF_INSET_NONE}, }; static struct iavf_flow_parser iavf_fdir_parser; @@ -400,6 +405,7 @@ const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask; const struct rte_flow_item_esp *esp_spec, *esp_mask; const struct rte_flow_item_ah *ah_spec, *ah_mask; + const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask; uint64_t input_set = IAVF_INSET_NONE; enum rte_flow_item_type next_type; @@ -850,6 +856,29 @@ filter->input.rule_cfg.proto_stack.count = ++layer; break; + case RTE_FLOW_ITEM_TYPE_PFCP: + pfcp_spec = item->spec; + pfcp_mask = item->mask; + + hdr = &filter->input.rule_cfg.proto_stack. + proto_hdr[layer]; + + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP); + + if (pfcp_spec && pfcp_mask) { + if (pfcp_mask->s_field == UINT8_MAX) { + input_set |= IAVF_INSET_PFCP_S_FIELD; + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT( + hdr, PFCP, S_FILED); + } + + rte_memcpy(hdr->buffer, pfcp_spec, + sizeof(*pfcp_spec)); + } + + filter->input.rule_cfg.proto_stack.count = ++layer; + break; + case RTE_FLOW_ITEM_TYPE_VOID: break; From patchwork Wed Mar 18 05:42:01 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Simei Su X-Patchwork-Id: 66832 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 918EBA0579; Wed, 18 Mar 2020 06:44:26 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 09D0A1C0B6; Wed, 18 Mar 2020 06:43:55 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 8570A1C0B5 for ; Wed, 18 Mar 2020 06:43:53 +0100 (CET) IronPort-SDR: Pd3tXQ4Kl/RdNKoHNfaoq1TrhKb7QVQss0mp4sVaD7lradI7gUePyseUh5E8q1+gYwc34L7F3p CiMksRHqrErw== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Mar 2020 22:43:53 -0700 IronPort-SDR: 0SeAcj7Zew3gt7mPPFK6tGMY0o02ztLXvhA9de3hTXgBR9iuuF394TCkJlORK6nic/kCFcxDOc Yzi466rdV+og== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,566,1574150400"; d="scan'208";a="355592312" Received: from npg-dpdk-cvl-simeisu-118d193.sh.intel.com ([10.67.110.183]) by fmsmga001.fm.intel.com with ESMTP; 17 Mar 2020 22:43:51 -0700 From: Simei Su To: xiaolong.ye@intel.com, qi.z.zhang@intel.com Cc: dev@dpdk.org, yahui.cao@intel.com, jingjing.wu@intel.com, simei.su@intel.com Date: Wed, 18 Mar 2020 13:42:01 +0800 Message-Id: <1584510121-377747-6-git-send-email-simei.su@intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1584510121-377747-1-git-send-email-simei.su@intel.com> References: <1584510121-377747-1-git-send-email-simei.su@intel.com> Subject: [dpdk-dev] [PATCH 5/5] net/iavf: add support for FDIR mark action X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch enables mark action support and takes mark only case into consideration. Signed-off-by: Simei Su --- drivers/net/iavf/iavf.h | 1 + drivers/net/iavf/iavf_fdir.c | 46 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 62a3eb8..178d481 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -103,6 +103,7 @@ struct iavf_fdir_conf { struct virtchnl_fdir_fltr input; uint64_t input_set; uint32_t flow_id; + uint32_t mark_flag; }; struct iavf_fdir_info { diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c index 8d49c28..a03bc09 100644 --- a/drivers/net/iavf/iavf_fdir.c +++ b/drivers/net/iavf/iavf_fdir.c @@ -18,6 +18,7 @@ #include "iavf.h" #include "iavf_generic_flow.h" #include "virtchnl.h" +#include "iavf_rxtx.h" #define IAVF_FDIR_MAX_QREGION_SIZE 128 @@ -171,6 +172,9 @@ goto free_entry; } + if (filter->mark_flag == 1) + iavf_fdir_rx_proc_enable(ad, 1); + rte_memcpy(rule, filter, sizeof(*rule)); flow->rule = rule; @@ -199,6 +203,9 @@ return -rte_errno; } + if (filter->mark_flag == 1) + iavf_fdir_rx_proc_enable(ad, 0); + flow->rule = NULL; rte_free(filter); @@ -297,7 +304,9 @@ struct iavf_fdir_conf *filter) { const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_mark *mark_spec = NULL; uint32_t dest_num = 0; + uint32_t mark_num = 0; int ret; int number = 0; @@ -367,6 +376,20 @@ filter->input.rule_cfg.action_set.count = ++number; break; + case RTE_FLOW_ACTION_TYPE_MARK: + mark_num++; + + filter->mark_flag = 1; + mark_spec = actions->conf; + filter_action = &filter->input.rule_cfg. + action_set.actions[number]; + + filter_action->type = VIRTCHNL_FDIR_ACT_MARK; + filter_action->mark_id = mark_spec->id; + + filter->input.rule_cfg.action_set.count = ++number; + break; + default: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, @@ -375,13 +398,34 @@ } } - if (dest_num == 0 || dest_num >= 2) { + if (dest_num >= 2) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "Unsupported action combination"); return -rte_errno; } + if (mark_num >= 2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Too many mark actions"); + return -rte_errno; + } + + if (dest_num + mark_num == 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Emtpy action"); + return -rte_errno; + } + + /* Mark only is equal to mark + passthru. */ + if (dest_num == 0) { + filter_action = &filter->input.rule_cfg. + action_set.actions[number]; + filter_action->type = VIRTCHNL_FDIR_ACT_PASSTHRU; + } + return 0; }