From patchwork Thu Mar 18 08:48:24 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhang, Yuying" X-Patchwork-Id: 89458 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 960CAA0561; Thu, 18 Mar 2021 09:57:16 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7BD9F140EC8; Thu, 18 Mar 2021 09:57:16 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by mails.dpdk.org (Postfix) with ESMTP id 105B1140EC3 for ; Thu, 18 Mar 2021 09:57:14 +0100 (CET) IronPort-SDR: U+4R8lWMH0AAoi1CnFchwPM090IrOa2T7BX7ZkRCywgMg8RVeT8LE6zrC6xxUvo6mEhIdmrZXJ 1h/2/sZ/aN0Q== X-IronPort-AV: E=McAfee;i="6000,8403,9926"; a="253644852" X-IronPort-AV: E=Sophos;i="5.81,258,1610438400"; d="scan'208";a="253644852" Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Mar 2021 01:57:14 -0700 IronPort-SDR: D7KKPSpR3SfA5liRnxCY5S7Owlh4cqpw2MBskqvp21gA+gz+fzf/jNi/hvwZ2TjlAGwL5N1i0a T2iGADfFhFZw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,258,1610438400"; d="scan'208";a="606080450" Received: from dpdk-yyzhang2.sh.intel.com ([10.67.117.129]) by fmsmga005.fm.intel.com with ESMTP; 18 Mar 2021 01:57:12 -0700 From: Yuying To: dev@dpdk.org, qi.z.zhang@intel.com Cc: Yuying Date: Thu, 18 Mar 2021 08:48:24 +0000 Message-Id: <20210318084824.165907-1-yuying.zhang@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v1] net/ice: support GTPU TEID pattern for switch filter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Enable GTPU pattern for CVL switch filter. This patch only supports outer l3/l4 filtering. Signed-off-by: Yuying --- doc/guides/rel_notes/release_21_05.rst | 3 + drivers/net/ice/ice_switch_filter.c | 91 ++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) diff --git a/doc/guides/rel_notes/release_21_05.rst b/doc/guides/rel_notes/release_21_05.rst index 88e7607a08..8507dc948f 100644 --- a/doc/guides/rel_notes/release_21_05.rst +++ b/doc/guides/rel_notes/release_21_05.rst @@ -91,6 +91,9 @@ New Features * Added a command line option to configure forced speed for Ethernet port. ``dpdk-testpmd -c 0xff -- -i --eth-link-speed N`` +* **Updated Intel ice driver.** + + * Added GTP TEID support for DCF switch filter. Removed Items ------------- diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index ada3ecf60b..9147a5fdbe 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -137,6 +137,17 @@ #define ICE_SW_INSET_MAC_IPV6_PFCP ( \ ICE_SW_INSET_MAC_IPV6 | \ ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID) +#define ICE_SW_INSET_MAC_IPV4_GTPU ( \ + ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID) +#define ICE_SW_INSET_MAC_IPV4_GTPU_EH ( \ + ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID | \ + ICE_INSET_GTPU_QFI) +#define ICE_SW_INSET_MAC_IPV6_GTPU ( \ + ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID) +#define ICE_SW_INSET_MAC_IPV6_GTPU_EH ( \ + ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID | \ + ICE_INSET_GTPU_QFI) + struct sw_meta { struct ice_adv_lkup_elem *list; @@ -198,6 +209,10 @@ ice_pattern_match_item ice_switch_pattern_dist_list[] = { {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh, ICE_SW_INSET_MAC_IPV4_GTPU_EH, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh, ICE_SW_INSET_MAC_IPV6_GTPU_EH, ICE_INSET_NONE, ICE_INSET_NONE}, }; static struct @@ -251,6 +266,10 @@ ice_pattern_match_item ice_switch_pattern_perm_list[] = { {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh, ICE_SW_INSET_MAC_IPV4_GTPU_EH, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh, ICE_SW_INSET_MAC_IPV6_GTPU_EH, ICE_INSET_NONE, ICE_INSET_NONE}, }; static int @@ -378,6 +397,8 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], const struct rte_flow_item_ah *ah_spec, *ah_mask; const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask; const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask; + const struct rte_flow_item_gtp *gtp_spec, *gtp_mask; + const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask; uint64_t input_set = ICE_INSET_NONE; uint16_t input_set_byte = 0; bool pppoe_elem_valid = 0; @@ -1255,6 +1276,76 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], } break; + case RTE_FLOW_ITEM_TYPE_GTPU: + gtp_spec = item->spec; + gtp_mask = item->mask; + if (gtp_spec && !gtp_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTPU item"); + return 0; + } + if (gtp_spec && gtp_mask) { + if (gtp_mask->v_pt_rsv_flags || + gtp_mask->msg_type || + gtp_mask->msg_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTPU mask"); + return 0; + } + if (gtp_mask->teid) + input_set |= ICE_INSET_GTPU_TEID; + list[t].type = ICE_GTP; + list[t].h_u.gtp_hdr.teid = + gtp_spec->teid; + list[t].m_u.gtp_hdr.teid = + gtp_mask->teid; + input_set_byte += 4; + t++; + } + if (ipv4_valid) + *tun_type = ICE_SW_TUN_GTP_IPV4; + else if (ipv6_valid) + *tun_type = ICE_SW_TUN_GTP_IPV6; + break; + + case RTE_FLOW_ITEM_TYPE_GTP_PSC: + gtp_psc_spec = item->spec; + gtp_psc_mask = item->mask; + if (gtp_psc_spec && !gtp_psc_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTPU_EH item"); + return 0; + } + if (gtp_psc_spec && gtp_psc_mask) { + if (gtp_psc_mask->pdu_type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTPU_EH mask"); + return 0; + } + if (gtp_psc_mask->qfi) + input_set |= ICE_INSET_GTPU_QFI; + list[t].type = ICE_GTP; + list[t].h_u.gtp_hdr.qfi = + gtp_psc_spec->qfi; + list[t].m_u.gtp_hdr.qfi = + gtp_psc_mask->qfi; + input_set_byte += 1; + t++; + } + if (ipv4_valid) + *tun_type = ICE_SW_TUN_GTP_IPV4_EH; + else if (ipv6_valid) + *tun_type = ICE_SW_TUN_GTP_IPV6_EH; + break; + case RTE_FLOW_ITEM_TYPE_VOID: break;