From patchwork Tue Mar 22 17:58:39 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 108814 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 77723A04FF; Tue, 22 Mar 2022 18:59:17 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 428B140694; Tue, 22 Mar 2022 18:59:16 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 5AF1B4067B for ; Tue, 22 Mar 2022 18:59:15 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with ESMTP id 22MGJaRD022316; Tue, 22 Mar 2022 10:59:14 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=Zt+K8rGgqooh0FpdDF9z4UPTWI2HB0P5tlB26f68BiQ=; b=jKKIooyLNVBP4HRCfeCUxQSAY8N3cJ+JzLOkP4Q9BP8PIN+82tSNGf83qt2mA/N6nosJ DkSQ/1ckzMZ0nnABU651HSsyJv0SoORbS93rfeeLUqU7U3Vxdq8Gy+gywd/7rd4obepx /vAxRF5lbebqRe6Poj40Z5YHa8HECM62SS4RPVBQ43qZgC0liFfayaLp+WdmLaDLs/Wg dnAuk2i/4DRog8ajR/eI5lK3wC0uS8h/vL3VmLpH+H8sRfXrr3y6muKSaBUODzIUX0Gh UYGkJNL33xpbZ3eONoozvcDXQkiQhQe8Cqx2BtkTy/99DitwzqzqpP9qbubQ9KuUnpka Dg== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3ewepn6cgx-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Tue, 22 Mar 2022 10:59:14 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 22 Mar 2022 10:59:12 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Tue, 22 Mar 2022 10:59:12 -0700 Received: from localhost.localdomain (unknown [10.28.34.24]) by maili.marvell.com (Postfix) with ESMTP id 4ED9D3F7051; Tue, 22 Mar 2022 10:59:10 -0700 (PDT) From: Nithin Dabilpuram To: , Radu Nicolau , Akhil Goyal CC: , , Nithin Dabilpuram Subject: [PATCH 1/7] examples/ipsec-secgw: disable Tx chksum offload for inline Date: Tue, 22 Mar 2022 23:28:39 +0530 Message-ID: <20220322175902.363520-1-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: nnc2d9DHQsP59NzAxI6-Ii-LuUoOHmjL X-Proofpoint-GUID: nnc2d9DHQsP59NzAxI6-Ii-LuUoOHmjL X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.850,Hydra:6.0.425,FMLib:17.11.64.514 definitions=2022-03-22_07,2022-03-22_01,2022-02-23_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Enable Tx IPv4 checksum offload only when Tx inline crypto, lookaside crypto/protocol or cpu crypto is needed. For Tx Inline protocol offload, checksum computation is implicitly taken care by HW. Signed-off-by: Nithin Dabilpuram --- examples/ipsec-secgw/ipsec-secgw.c | 3 --- examples/ipsec-secgw/sa.c | 32 +++++++++++++++++++++++++------- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 42b5081..76919e5 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -2330,9 +2330,6 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; - if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) - local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; - printf("port %u configuring rx_offloads=0x%" PRIx64 ", tx_offloads=0x%" PRIx64 "\n", portid, local_port_conf.rxmode.offloads, diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index 1839ac7..36d890f 100644 --- a/examples/ipsec-secgw/sa.c +++ b/examples/ipsec-secgw/sa.c @@ -1785,13 +1785,31 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads, for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) { rule = &sa_out[idx_sa]; rule_type = ipsec_get_action_type(rule); - if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || - rule_type == - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) - && rule->portid == port_id) { - *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY; - if (rule->mss) - *tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO; + switch (rule_type) { + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + /* Checksum offload is not needed for inline protocol as + * all processing for Outbound IPSec packets will be + * implicitly taken care and for non-IPSec packets, + * there is no need of IPv4 Checksum offload. + */ + if (rule->portid == port_id) + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY; + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + if (rule->portid == port_id) { + *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY; + if (rule->mss) + *tx_offloads |= + RTE_ETH_TX_OFFLOAD_TCP_TSO; + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; + } + break; + default: + /* Enable IPv4 checksum offload even if one of lookaside + * SA's are present. + */ + *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; + break; } } return 0; From patchwork Tue Mar 22 17:58:40 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 108815 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id F0A45A04FF; Tue, 22 Mar 2022 18:59:21 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 080BA427F0; Tue, 22 Mar 2022 18:59:19 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 924C2427EE for ; Tue, 22 Mar 2022 18:59:17 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with ESMTP id 22MGd4S7014761; Tue, 22 Mar 2022 10:59:17 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=WK7IMlOOI2yeNmNa9smE9kR4W7phi93lwVgLsLwjWZI=; b=CAGXJw/MEJN6Fa7YrJ/KS4veUUjSpcybjQ21QT6jluuKgqsfzKN2/4qaG4bYToJcDGpu x7Fg1omREUfsCxR8bHoGGasTUTEl7GsfM7lHudtYHI6A1xHdSOg6vAuHNuQEb85F9YQl AXd18u7wSSYnsBJOuwY+qByf0FASZjJCQnqhrOsBA2nH2mL1Afu3G3IpScclg22n9upG LxVi5vIPx9ZB34OKG/2w4NijtgoQhyLc1I+eHq73SK1fbtc9HELLB/dvZHOUlTlbgBPz zEP/y2dIvWk67N0LOgMTgdsjgxveFm1SRwlEsP87tttdH8AThCBJYFWSm36loMBf81Xj /A== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3ewepn6ch3-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Tue, 22 Mar 2022 10:59:16 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Tue, 22 Mar 2022 10:59:14 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Tue, 22 Mar 2022 10:59:14 -0700 Received: from localhost.localdomain (unknown [10.28.34.24]) by maili.marvell.com (Postfix) with ESMTP id CC76D3F704A; Tue, 22 Mar 2022 10:59:12 -0700 (PDT) From: Nithin Dabilpuram To: , Radu Nicolau , Akhil Goyal CC: , , Nithin Dabilpuram Subject: [PATCH 2/7] examples/ipsec-secgw: use HW parsed packet type in poll mode Date: Tue, 22 Mar 2022 23:28:40 +0530 Message-ID: <20220322175902.363520-2-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220322175902.363520-1-ndabilpuram@marvell.com> References: <20220322175902.363520-1-ndabilpuram@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: W2eJ7xjvZksJGjntG60w4foSGZeb8Tjm X-Proofpoint-GUID: W2eJ7xjvZksJGjntG60w4foSGZeb8Tjm X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.850,Hydra:6.0.425,FMLib:17.11.64.514 definitions=2022-03-22_07,2022-03-22_01,2022-02-23_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Use HW parsed packet type when ethdev supports necessary protocols. If packet type is not supported, then register ethdev callbacks for parse packet in SW. This is better for performance as it effects fast path. Signed-off-by: Nithin Dabilpuram --- examples/ipsec-secgw/ipsec-secgw.c | 259 +++++++++++++++++++++++++++---------- 1 file changed, 194 insertions(+), 65 deletions(-) diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 76919e5..e8f9e90 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -374,53 +374,30 @@ print_stats_cb(__rte_unused void *param) static inline void prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) { + uint32_t ptype = pkt->packet_type; const struct rte_ether_hdr *eth; const struct rte_ipv4_hdr *iph4; const struct rte_ipv6_hdr *iph6; - const struct rte_udp_hdr *udp; - uint16_t ip4_hdr_len; - uint16_t nat_port; + uint32_t tun_type, l3_type; + + tun_type = ptype & RTE_PTYPE_TUNNEL_MASK; + l3_type = ptype & RTE_PTYPE_L3_MASK; eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *); - if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { - + if (l3_type == RTE_PTYPE_L3_IPV4) { iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN); adjust_ipv4_pktlen(pkt, iph4, 0); - switch (iph4->next_proto_id) { - case IPPROTO_ESP: + if (tun_type == RTE_PTYPE_TUNNEL_ESP) { t->ipsec.pkts[(t->ipsec.num)++] = pkt; - break; - case IPPROTO_UDP: - if (app_sa_prm.udp_encap == 1) { - ip4_hdr_len = ((iph4->version_ihl & - RTE_IPV4_HDR_IHL_MASK) * - RTE_IPV4_IHL_MULTIPLIER); - udp = rte_pktmbuf_mtod_offset(pkt, - struct rte_udp_hdr *, ip4_hdr_len); - nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT); - if (udp->src_port == nat_port || - udp->dst_port == nat_port){ - t->ipsec.pkts[(t->ipsec.num)++] = pkt; - pkt->packet_type |= - MBUF_PTYPE_TUNNEL_ESP_IN_UDP; - break; - } - } - /* Fall through */ - default: + } else { t->ip4.data[t->ip4.num] = &iph4->next_proto_id; t->ip4.pkts[(t->ip4.num)++] = pkt; } pkt->l2_len = 0; pkt->l3_len = sizeof(*iph4); - pkt->packet_type |= RTE_PTYPE_L3_IPV4; - if (pkt->packet_type & RTE_PTYPE_L4_TCP) - pkt->l4_len = sizeof(struct rte_tcp_hdr); - else if (pkt->packet_type & RTE_PTYPE_L4_UDP) - pkt->l4_len = sizeof(struct rte_udp_hdr); - } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + } else if (l3_type & RTE_PTYPE_L3_IPV6) { int next_proto; size_t l3len, ext_len; uint8_t *p; @@ -430,47 +407,37 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) RTE_ETHER_HDR_LEN); adjust_ipv6_pktlen(pkt, iph6, 0); - next_proto = iph6->proto; - - /* determine l3 header size up to ESP extension */ l3len = sizeof(struct ip6_hdr); - p = rte_pktmbuf_mtod(pkt, uint8_t *); - while (next_proto != IPPROTO_ESP && l3len < pkt->data_len && - (next_proto = rte_ipv6_get_next_ext(p + l3len, - next_proto, &ext_len)) >= 0) - l3len += ext_len; - /* drop packet when IPv6 header exceeds first segment length */ - if (unlikely(l3len > pkt->data_len)) { - free_pkts(&pkt, 1); - return; - } - - switch (next_proto) { - case IPPROTO_ESP: + if (tun_type == RTE_PTYPE_TUNNEL_ESP) { t->ipsec.pkts[(t->ipsec.num)++] = pkt; - break; - case IPPROTO_UDP: - if (app_sa_prm.udp_encap == 1) { - udp = rte_pktmbuf_mtod_offset(pkt, - struct rte_udp_hdr *, l3len); - nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT); - if (udp->src_port == nat_port || - udp->dst_port == nat_port){ - t->ipsec.pkts[(t->ipsec.num)++] = pkt; - pkt->packet_type |= - MBUF_PTYPE_TUNNEL_ESP_IN_UDP; - break; - } - } - /* Fall through */ - default: + } else { t->ip6.data[t->ip6.num] = &iph6->proto; t->ip6.pkts[(t->ip6.num)++] = pkt; } + + /* Determine l3 header size up to ESP extension by walking + * through extension headers. + */ + if (l3_type == RTE_PTYPE_L3_IPV6_EXT || + l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) { + p = rte_pktmbuf_mtod(pkt, uint8_t *); + next_proto = iph6->proto; + while (next_proto != IPPROTO_ESP && + l3len < pkt->data_len && + (next_proto = rte_ipv6_get_next_ext(p + l3len, + next_proto, &ext_len)) >= 0) + l3len += ext_len; + + /* Drop pkt when IPv6 header exceeds first seg size */ + if (unlikely(l3len > pkt->data_len)) { + free_pkts(&pkt, 1); + return; + } + } + pkt->l2_len = 0; pkt->l3_len = l3len; - pkt->packet_type |= RTE_PTYPE_L3_IPV6; } else { /* Unknown/Unsupported type, drop the packet */ RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n", @@ -479,6 +446,11 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) return; } + if (ptype & RTE_PTYPE_L4_TCP) + pkt->l4_len = sizeof(struct rte_tcp_hdr); + else if (ptype & RTE_PTYPE_L4_UDP) + pkt->l4_len = sizeof(struct rte_udp_hdr); + /* Check if the packet has been processed inline. For inline protocol * processed packets, the metadata in the mbuf can be used to identify * the security processing done on the packet. The metadata will be @@ -2249,6 +2221,147 @@ cryptodevs_init(uint16_t req_queue_num) return total_nb_qps; } +static int +check_ptype(int portid) +{ + int l3_ipv4 = 0, l3_ipv6 = 0, l4_udp = 0, tunnel_esp = 0; + int i, nb_ptypes; + uint32_t mask; + + mask = (RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK | + RTE_PTYPE_TUNNEL_MASK); + + nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, NULL, 0); + if (nb_ptypes <= 0) + return 0; + + uint32_t ptypes[nb_ptypes]; + + nb_ptypes = rte_eth_dev_get_supported_ptypes(portid, mask, ptypes, nb_ptypes); + for (i = 0; i < nb_ptypes; ++i) { + if (ptypes[i] & RTE_PTYPE_L3_IPV4) + l3_ipv4 = 1; + if (ptypes[i] & RTE_PTYPE_L3_IPV6) + l3_ipv6 = 1; + if (ptypes[i] & RTE_PTYPE_TUNNEL_ESP) + tunnel_esp = 1; + if (ptypes[i] & RTE_PTYPE_L4_UDP) + l4_udp = 1; + } + + if (l3_ipv4 == 0) + printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid); + + if (l3_ipv6 == 0) + printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid); + + if (l4_udp == 0) + printf("port %d cannot parse RTE_PTYPE_L4_UDP\n", portid); + + if (tunnel_esp == 0) + printf("port %d cannot parse RTE_PTYPE_TUNNEL_ESP\n", portid); + + if (l3_ipv4 && l3_ipv6 && l4_udp && tunnel_esp) + return 1; + + return 0; + +} + +static inline void +parse_ptype(struct rte_mbuf *m) +{ + uint32_t packet_type = RTE_PTYPE_UNKNOWN; + const struct rte_ipv4_hdr *iph4; + const struct rte_ipv6_hdr *iph6; + const struct rte_ether_hdr *eth; + const struct rte_udp_hdr *udp; + uint16_t nat_port, ether_type; + int next_proto = 0; + size_t ext_len = 0; + const uint8_t *p; + uint32_t l3len; + + eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + ether_type = eth->ether_type; + + if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { + iph4 = (const struct rte_ipv4_hdr *)(eth + 1); + l3len = ((iph4->version_ihl & RTE_IPV4_HDR_IHL_MASK) * + RTE_IPV4_IHL_MULTIPLIER); + + if (l3len == sizeof(struct rte_ipv4_hdr)) + packet_type |= RTE_PTYPE_L3_IPV4; + else + packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + + next_proto = iph4->next_proto_id; + p = (const uint8_t *)iph4; + } else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { + iph6 = (const struct rte_ipv6_hdr *)(eth + 1); + l3len = sizeof(struct ip6_hdr); + + /* determine l3 header size up to ESP extension */ + next_proto = iph6->proto; + p = (const uint8_t *)iph6; + while (next_proto != IPPROTO_ESP && l3len < m->data_len && + (next_proto = rte_ipv6_get_next_ext(p + l3len, + next_proto, &ext_len)) >= 0) + l3len += ext_len; + + /* Skip IPv6 header exceeds first segment length */ + if (unlikely(l3len + RTE_ETHER_HDR_LEN > m->data_len)) + goto exit; + + if (l3len == sizeof(struct ip6_hdr)) + packet_type |= RTE_PTYPE_L3_IPV6; + else + packet_type |= RTE_PTYPE_L3_IPV6_EXT; + } + + switch (next_proto) { + case IPPROTO_ESP: + packet_type |= RTE_PTYPE_TUNNEL_ESP; + break; + case IPPROTO_UDP: + if (app_sa_prm.udp_encap == 1) { + udp = (const struct rte_udp_hdr *)(p + l3len); + nat_port = rte_cpu_to_be_16(IPSEC_NAT_T_PORT); + if (udp->src_port == nat_port || + udp->dst_port == nat_port) + packet_type |= + MBUF_PTYPE_TUNNEL_ESP_IN_UDP; + } + break; + default: + break; + } +exit: + m->packet_type = packet_type; +} + +static uint16_t +parse_ptype_cb(uint16_t port __rte_unused, uint16_t queue __rte_unused, + struct rte_mbuf *pkts[], uint16_t nb_pkts, + uint16_t max_pkts __rte_unused, + void *user_param __rte_unused) +{ + uint32_t i; + + if (unlikely(nb_pkts == 0)) + return nb_pkts; + + rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *)); + for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) { + rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1], + struct ether_hdr *)); + parse_ptype(pkts[i]); + } + parse_ptype(pkts[i]); + + return nb_pkts; +} + static void port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) { @@ -2260,6 +2373,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) struct lcore_conf *qconf; struct rte_ether_addr ethaddr; struct rte_eth_conf local_port_conf = port_conf; + int ptype_supported; ret = rte_eth_dev_info_get(portid, &dev_info); if (ret != 0) @@ -2357,6 +2471,11 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) rte_exit(EXIT_FAILURE, "Cannot adjust number of descriptors: " "err=%d, port=%d\n", ret, portid); + /* Check if required ptypes are supported */ + ptype_supported = check_ptype(portid); + if (!ptype_supported) + printf("Port %d: softly parse packet type info\n", portid); + /* init one TX queue per lcore */ tx_queueid = 0; for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { @@ -2418,6 +2537,16 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d, " "port=%d\n", ret, portid); + + /* Register Rx callback if ptypes are not supported */ + if (!ptype_supported && + !rte_eth_add_rx_callback(portid, queue, + parse_ptype_cb, NULL)) { + printf("Failed to add rx callback: port=%d, " + "queue=%d\n", portid, queue); + } + + } } printf("\n"); From patchwork Tue Mar 22 17:58:41 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 108816 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4CC40A04FF; Tue, 22 Mar 2022 18:59:29 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2E104427F5; Tue, 22 Mar 2022 18:59:22 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 99486427FA for ; Tue, 22 Mar 2022 18:59:20 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with ESMTP id 22MG9rZu022265; Tue, 22 Mar 2022 10:59:19 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=uFu4bqbN1aK9VKCg4R3y+xoC4ad/kbKhxbjEKiIERps=; b=Z3J0K7nO09MIGssg/+AB7+1b1ar+jkxytgOU2V3CCfV3X2nDg/OchiIgc+gX9whrnjE7 +lTe94DgSPoAicHIi55ValeohFEQPp4zpg8XMfp767z+F5WJijgHOOfKY2BtOOfXqkbD MzCEb6y3sQlFB4JrJH1iHw6isH4HZGe5aigDiMDG/a+TXyl3U30mpuUiiidOXR64R3LL qfL5fe8D2sjdmUuOJG41EaBTcz7XeMY1EKqxLXoqQLR8FuERGu5FEZedpwRRXCemg4YE dwS4Oa8m+gk23IrmM6uMBGcP1ovFCL2Rla9W4Gq3iodY+vljb5oGvaXLYKVCcd1WcsWS /A== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3eyhqw0j5v-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Tue, 22 Mar 2022 10:59:19 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 22 Mar 2022 10:59:18 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Tue, 22 Mar 2022 10:59:18 -0700 Received: from localhost.localdomain (unknown [10.28.34.24]) by maili.marvell.com (Postfix) with ESMTP id 50E243F705E; Tue, 22 Mar 2022 10:59:15 -0700 (PDT) From: Nithin Dabilpuram To: , Radu Nicolau , Akhil Goyal CC: , , Nithin Dabilpuram Subject: [PATCH 3/7] examples/ipsec-secgw: allow larger burst size for vectors Date: Tue, 22 Mar 2022 23:28:41 +0530 Message-ID: <20220322175902.363520-3-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220322175902.363520-1-ndabilpuram@marvell.com> References: <20220322175902.363520-1-ndabilpuram@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: _4T8gBsYM5TBY6V1K9rtC4XfFbVAYPWW X-Proofpoint-ORIG-GUID: _4T8gBsYM5TBY6V1K9rtC4XfFbVAYPWW X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.850,Hydra:6.0.425,FMLib:17.11.64.514 definitions=2022-03-22_07,2022-03-22_01,2022-02-23_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Allow larger burst size of vector event mode instead of restricting to 32. Also restructure traffic type struct to have num pkts first so that it is always in first cacheline. Also cache align traffic type struct. Since MAX_PKT_BURST is not used by vector event mode worker, define another macro for its burst size so that poll mode perf is not effected. Signed-off-by: Nithin Dabilpuram --- examples/ipsec-secgw/ipsec-secgw.c | 2 +- examples/ipsec-secgw/ipsec-secgw.h | 15 ++++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index e8f9e90..7e01495 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -1858,7 +1858,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf) case CMD_LINE_OPT_VECTOR_SIZE_NUM: ret = parse_decimal(optarg); - if (ret > MAX_PKT_BURST) { + if (ret > MAX_PKT_BURST_VEC) { printf("Invalid argument for \'%s\': %s\n", CMD_LINE_OPT_VECTOR_SIZE, optarg); print_usage(prgname); diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h index 24f11ad..c347175 100644 --- a/examples/ipsec-secgw/ipsec-secgw.h +++ b/examples/ipsec-secgw/ipsec-secgw.h @@ -10,6 +10,11 @@ #define NB_SOCKETS 4 #define MAX_PKT_BURST 32 +#define MAX_PKT_BURST_VEC 256 + +#define MAX_PKTS \ + ((MAX_PKT_BURST_VEC > MAX_PKT_BURST ? \ + MAX_PKT_BURST_VEC : MAX_PKT_BURST) * 2) #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 @@ -48,12 +53,12 @@ #define MBUF_PTYPE_TUNNEL_ESP_IN_UDP (RTE_PTYPE_TUNNEL_ESP | RTE_PTYPE_L4_UDP) struct traffic_type { - const uint8_t *data[MAX_PKT_BURST * 2]; - struct rte_mbuf *pkts[MAX_PKT_BURST * 2]; - void *saptr[MAX_PKT_BURST * 2]; - uint32_t res[MAX_PKT_BURST * 2]; uint32_t num; -}; + struct rte_mbuf *pkts[MAX_PKTS]; + const uint8_t *data[MAX_PKTS]; + void *saptr[MAX_PKTS]; + uint32_t res[MAX_PKTS]; +} __rte_cache_aligned; struct ipsec_traffic { struct traffic_type ipsec; From patchwork Tue Mar 22 17:58:42 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 108817 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 08FF8A04FF; Tue, 22 Mar 2022 18:59:36 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 29468427FD; Tue, 22 Mar 2022 18:59:25 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 0D964427FA for ; Tue, 22 Mar 2022 18:59:22 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with ESMTP id 22MGjbw6026124; Tue, 22 Mar 2022 10:59:22 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=XszT3TAtAS0uF6IjhgPeXAo8kp+3JWiLU9CKCHuSSp4=; b=eNriHoFctNxDLSe1ROMKsANlKjLHyPzjftPbk8QrUDcNAYy3PBvDbkBwHLoxNxIFwwfb EDOSCCmcg8XT2KJhUpY/rcDD5B38PNMvEqRw7B1lfMWmFVBN6g27qq+hUOkcIj0aQNhI ItIaON/ijI0x0yIPiWgNAL2bsRMXrbHM83f87S9+ZPu4y/FXk1oIW0R/66Ew8/Lv/k+M OnlI2DGKcSG3Fw77hYbd+AChGUIXVIowf13onkOfuWpuUpKfmw0zTybghLhNJ2YO5vxa Npdq0/ySiMWq/5/VPOVupdVa6HdAdyY3L3zIqVobNGo8qa1hNtgk4qEfMKvU/s9Imhhm 3w== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3ewepn6chu-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Tue, 22 Mar 2022 10:59:22 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 22 Mar 2022 10:59:19 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Tue, 22 Mar 2022 10:59:19 -0700 Received: from localhost.localdomain (unknown [10.28.34.24]) by maili.marvell.com (Postfix) with ESMTP id DA7F33F706B; Tue, 22 Mar 2022 10:59:17 -0700 (PDT) From: Nithin Dabilpuram To: , Radu Nicolau , Akhil Goyal CC: , , Nithin Dabilpuram Subject: [PATCH 4/7] examples/ipsec-secgw: move fast path helper functions Date: Tue, 22 Mar 2022 23:28:42 +0530 Message-ID: <20220322175902.363520-4-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220322175902.363520-1-ndabilpuram@marvell.com> References: <20220322175902.363520-1-ndabilpuram@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: R6vycT8ZGiBKTsAikA2nry3feQQqCuY1 X-Proofpoint-GUID: R6vycT8ZGiBKTsAikA2nry3feQQqCuY1 X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.850,Hydra:6.0.425,FMLib:17.11.64.514 definitions=2022-03-22_07,2022-03-22_01,2022-02-23_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Move fast path helper functions to header file for easy access. Signed-off-by: Nithin Dabilpuram --- examples/ipsec-secgw/ipsec-secgw.c | 547 +--------------------------------- examples/ipsec-secgw/ipsec-secgw.h | 4 + examples/ipsec-secgw/ipsec.h | 34 +++ examples/ipsec-secgw/ipsec_process.c | 49 +-- examples/ipsec-secgw/ipsec_worker.h | 560 +++++++++++++++++++++++++++++++++++ 5 files changed, 602 insertions(+), 592 deletions(-) diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 7e01495..1d0ce3a 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -70,11 +70,6 @@ volatile bool force_quit; #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ -/* Configure how many packets ahead to prefetch, when reading packets */ -#define PREFETCH_OFFSET 3 - -#define MAX_RX_QUEUE_PER_LCORE 16 - #define MAX_LCORE_PARAMS 1024 /* @@ -191,9 +186,9 @@ static uint64_t dev_tx_offload = UINT64_MAX; /* * global values that determine multi-seg policy */ -static uint32_t frag_tbl_sz; +uint32_t frag_tbl_sz; static uint32_t frame_buf_size = RTE_MBUF_DEFAULT_BUF_SIZE; -static uint32_t mtu_size = RTE_ETHER_MTU; +uint32_t mtu_size = RTE_ETHER_MTU; static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS; static uint32_t stats_interval; @@ -205,11 +200,6 @@ struct app_sa_prm app_sa_prm = { }; static const char *cfgfile; -struct lcore_rx_queue { - uint16_t port_id; - uint8_t queue_id; -} __rte_cache_aligned; - struct lcore_params { uint16_t port_id; uint8_t queue_id; @@ -224,28 +214,7 @@ static uint16_t nb_lcore_params; static struct rte_hash *cdev_map_in; static struct rte_hash *cdev_map_out; -struct buffer { - uint16_t len; - struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *)); -}; - -struct lcore_conf { - uint16_t nb_rx_queue; - struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; - uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; - struct buffer tx_mbufs[RTE_MAX_ETHPORTS]; - struct ipsec_ctx inbound; - struct ipsec_ctx outbound; - struct rt_ctx *rt4_ctx; - struct rt_ctx *rt6_ctx; - struct { - struct rte_ip_frag_tbl *tbl; - struct rte_mempool *pool_indir; - struct rte_ip_frag_death_row dr; - } frag; -} __rte_cache_aligned; - -static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; +struct lcore_conf lcore_conf[RTE_MAX_LCORE]; static struct rte_eth_conf port_conf = { .rxmode = { @@ -281,32 +250,6 @@ multi_seg_required(void) frame_buf_size || frag_tbl_sz != 0); } -static inline void -adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph, - uint32_t l2_len) -{ - uint32_t plen, trim; - - plen = rte_be_to_cpu_16(iph->total_length) + l2_len; - if (plen < m->pkt_len) { - trim = m->pkt_len - plen; - rte_pktmbuf_trim(m, trim); - } -} - -static inline void -adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph, - uint32_t l2_len) -{ - uint32_t plen, trim; - - plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len; - if (plen < m->pkt_len) { - trim = m->pkt_len - plen; - rte_pktmbuf_trim(m, trim); - } -} - struct ipsec_core_statistics core_statistics[RTE_MAX_LCORE]; @@ -371,341 +314,6 @@ print_stats_cb(__rte_unused void *param) rte_eal_alarm_set(stats_interval * US_PER_S, print_stats_cb, NULL); } -static inline void -prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) -{ - uint32_t ptype = pkt->packet_type; - const struct rte_ether_hdr *eth; - const struct rte_ipv4_hdr *iph4; - const struct rte_ipv6_hdr *iph6; - uint32_t tun_type, l3_type; - - tun_type = ptype & RTE_PTYPE_TUNNEL_MASK; - l3_type = ptype & RTE_PTYPE_L3_MASK; - - eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *); - if (l3_type == RTE_PTYPE_L3_IPV4) { - iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt, - RTE_ETHER_HDR_LEN); - adjust_ipv4_pktlen(pkt, iph4, 0); - - if (tun_type == RTE_PTYPE_TUNNEL_ESP) { - t->ipsec.pkts[(t->ipsec.num)++] = pkt; - } else { - t->ip4.data[t->ip4.num] = &iph4->next_proto_id; - t->ip4.pkts[(t->ip4.num)++] = pkt; - } - pkt->l2_len = 0; - pkt->l3_len = sizeof(*iph4); - } else if (l3_type & RTE_PTYPE_L3_IPV6) { - int next_proto; - size_t l3len, ext_len; - uint8_t *p; - - /* get protocol type */ - iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt, - RTE_ETHER_HDR_LEN); - adjust_ipv6_pktlen(pkt, iph6, 0); - - l3len = sizeof(struct ip6_hdr); - - if (tun_type == RTE_PTYPE_TUNNEL_ESP) { - t->ipsec.pkts[(t->ipsec.num)++] = pkt; - } else { - t->ip6.data[t->ip6.num] = &iph6->proto; - t->ip6.pkts[(t->ip6.num)++] = pkt; - } - - /* Determine l3 header size up to ESP extension by walking - * through extension headers. - */ - if (l3_type == RTE_PTYPE_L3_IPV6_EXT || - l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) { - p = rte_pktmbuf_mtod(pkt, uint8_t *); - next_proto = iph6->proto; - while (next_proto != IPPROTO_ESP && - l3len < pkt->data_len && - (next_proto = rte_ipv6_get_next_ext(p + l3len, - next_proto, &ext_len)) >= 0) - l3len += ext_len; - - /* Drop pkt when IPv6 header exceeds first seg size */ - if (unlikely(l3len > pkt->data_len)) { - free_pkts(&pkt, 1); - return; - } - } - - pkt->l2_len = 0; - pkt->l3_len = l3len; - } else { - /* Unknown/Unsupported type, drop the packet */ - RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n", - rte_be_to_cpu_16(eth->ether_type)); - free_pkts(&pkt, 1); - return; - } - - if (ptype & RTE_PTYPE_L4_TCP) - pkt->l4_len = sizeof(struct rte_tcp_hdr); - else if (ptype & RTE_PTYPE_L4_UDP) - pkt->l4_len = sizeof(struct rte_udp_hdr); - - /* Check if the packet has been processed inline. For inline protocol - * processed packets, the metadata in the mbuf can be used to identify - * the security processing done on the packet. The metadata will be - * used to retrieve the application registered userdata associated - * with the security session. - */ - - if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD && - rte_security_dynfield_is_registered()) { - struct ipsec_sa *sa; - struct ipsec_mbuf_metadata *priv; - struct rte_security_ctx *ctx = (struct rte_security_ctx *) - rte_eth_dev_get_sec_ctx( - pkt->port); - - /* Retrieve the userdata registered. Here, the userdata - * registered is the SA pointer. - */ - sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, - *rte_security_dynfield(pkt)); - if (sa == NULL) { - /* userdata could not be retrieved */ - return; - } - - /* Save SA as priv member in mbuf. This will be used in the - * IPsec selector(SP-SA) check. - */ - - priv = get_priv(pkt); - priv->sa = sa; - } -} - -static inline void -prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t, - uint16_t nb_pkts) -{ - int32_t i; - - t->ipsec.num = 0; - t->ip4.num = 0; - t->ip6.num = 0; - - for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) { - rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET], - void *)); - prepare_one_packet(pkts[i], t); - } - /* Process left packets */ - for (; i < nb_pkts; i++) - prepare_one_packet(pkts[i], t); -} - -static inline void -prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port, - const struct lcore_conf *qconf) -{ - struct ip *ip; - struct rte_ether_hdr *ethhdr; - - ip = rte_pktmbuf_mtod(pkt, struct ip *); - - ethhdr = (struct rte_ether_hdr *) - rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN); - - if (ip->ip_v == IPVERSION) { - pkt->ol_flags |= qconf->outbound.ipv4_offloads; - pkt->l3_len = sizeof(struct ip); - pkt->l2_len = RTE_ETHER_HDR_LEN; - - ip->ip_sum = 0; - - /* calculate IPv4 cksum in SW */ - if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0) - ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip); - - ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); - } else { - pkt->ol_flags |= qconf->outbound.ipv6_offloads; - pkt->l3_len = sizeof(struct ip6_hdr); - pkt->l2_len = RTE_ETHER_HDR_LEN; - - ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); - } - - memcpy(ðhdr->src_addr, ðaddr_tbl[port].src, - sizeof(struct rte_ether_addr)); - memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst, - sizeof(struct rte_ether_addr)); -} - -static inline void -prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port, - const struct lcore_conf *qconf) -{ - int32_t i; - const int32_t prefetch_offset = 2; - - for (i = 0; i < (nb_pkts - prefetch_offset); i++) { - rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]); - prepare_tx_pkt(pkts[i], port, qconf); - } - /* Process left packets */ - for (; i < nb_pkts; i++) - prepare_tx_pkt(pkts[i], port, qconf); -} - -/* Send burst of packets on an output interface */ -static inline int32_t -send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port) -{ - struct rte_mbuf **m_table; - int32_t ret; - uint16_t queueid; - - queueid = qconf->tx_queue_id[port]; - m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; - - prepare_tx_burst(m_table, n, port, qconf); - - ret = rte_eth_tx_burst(port, queueid, m_table, n); - - core_stats_update_tx(ret); - - if (unlikely(ret < n)) { - do { - free_pkts(&m_table[ret], 1); - } while (++ret < n); - } - - return 0; -} - -/* - * Helper function to fragment and queue for TX one packet. - */ -static inline uint32_t -send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m, - uint16_t port, uint8_t proto) -{ - struct buffer *tbl; - uint32_t len, n; - int32_t rc; - - tbl = qconf->tx_mbufs + port; - len = tbl->len; - - /* free space for new fragments */ - if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) { - send_burst(qconf, len, port); - len = 0; - } - - n = RTE_DIM(tbl->m_table) - len; - - if (proto == IPPROTO_IP) - rc = rte_ipv4_fragment_packet(m, tbl->m_table + len, - n, mtu_size, m->pool, qconf->frag.pool_indir); - else - rc = rte_ipv6_fragment_packet(m, tbl->m_table + len, - n, mtu_size, m->pool, qconf->frag.pool_indir); - - if (rc >= 0) - len += rc; - else - RTE_LOG(ERR, IPSEC, - "%s: failed to fragment packet with size %u, " - "error code: %d\n", - __func__, m->pkt_len, rte_errno); - - free_pkts(&m, 1); - return len; -} - -/* Enqueue a single packet, and send burst if queue is filled */ -static inline int32_t -send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto) -{ - uint32_t lcore_id; - uint16_t len; - struct lcore_conf *qconf; - - lcore_id = rte_lcore_id(); - - qconf = &lcore_conf[lcore_id]; - len = qconf->tx_mbufs[port].len; - - if (m->pkt_len <= mtu_size) { - qconf->tx_mbufs[port].m_table[len] = m; - len++; - - /* need to fragment the packet */ - } else if (frag_tbl_sz > 0) - len = send_fragment_packet(qconf, m, port, proto); - else - free_pkts(&m, 1); - - /* enough pkts to be sent */ - if (unlikely(len == MAX_PKT_BURST)) { - send_burst(qconf, MAX_PKT_BURST, port); - len = 0; - } - - qconf->tx_mbufs[port].len = len; - return 0; -} - -static inline void -inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip, - uint16_t lim, struct ipsec_spd_stats *stats) -{ - struct rte_mbuf *m; - uint32_t i, j, res, sa_idx; - - if (ip->num == 0 || sp == NULL) - return; - - rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, - ip->num, DEFAULT_MAX_CATEGORIES); - - j = 0; - for (i = 0; i < ip->num; i++) { - m = ip->pkts[i]; - res = ip->res[i]; - if (res == BYPASS) { - ip->pkts[j++] = m; - stats->bypass++; - continue; - } - if (res == DISCARD) { - free_pkts(&m, 1); - stats->discard++; - continue; - } - - /* Only check SPI match for processed IPSec packets */ - if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) { - stats->discard++; - free_pkts(&m, 1); - continue; - } - - sa_idx = res - 1; - if (!inbound_sa_check(sa, m, sa_idx)) { - stats->discard++; - free_pkts(&m, 1); - continue; - } - ip->pkts[j++] = m; - stats->protect++; - } - ip->num = j; -} - static void split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num) { @@ -934,140 +542,6 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx, } } -static inline int32_t -get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6) -{ - struct ipsec_mbuf_metadata *priv; - struct ipsec_sa *sa; - - priv = get_priv(pkt); - - sa = priv->sa; - if (unlikely(sa == NULL)) { - RTE_LOG(ERR, IPSEC, "SA not saved in private data\n"); - goto fail; - } - - if (is_ipv6) - return sa->portid; - - /* else */ - return (sa->portid | RTE_LPM_LOOKUP_SUCCESS); - -fail: - if (is_ipv6) - return -1; - - /* else */ - return 0; -} - -static inline void -route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) -{ - uint32_t hop[MAX_PKT_BURST * 2]; - uint32_t dst_ip[MAX_PKT_BURST * 2]; - int32_t pkt_hop = 0; - uint16_t i, offset; - uint16_t lpm_pkts = 0; - unsigned int lcoreid = rte_lcore_id(); - - if (nb_pkts == 0) - return; - - /* Need to do an LPM lookup for non-inline packets. Inline packets will - * have port ID in the SA - */ - - for (i = 0; i < nb_pkts; i++) { - if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) { - /* Security offload not enabled. So an LPM lookup is - * required to get the hop - */ - offset = offsetof(struct ip, ip_dst); - dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i], - uint32_t *, offset); - dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]); - lpm_pkts++; - } - } - - rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts); - - lpm_pkts = 0; - - for (i = 0; i < nb_pkts; i++) { - if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) { - /* Read hop from the SA */ - pkt_hop = get_hop_for_offload_pkt(pkts[i], 0); - } else { - /* Need to use hop returned by lookup */ - pkt_hop = hop[lpm_pkts++]; - } - - if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) { - core_statistics[lcoreid].lpm4.miss++; - free_pkts(&pkts[i], 1); - continue; - } - send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP); - } -} - -static inline void -route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) -{ - int32_t hop[MAX_PKT_BURST * 2]; - uint8_t dst_ip[MAX_PKT_BURST * 2][16]; - uint8_t *ip6_dst; - int32_t pkt_hop = 0; - uint16_t i, offset; - uint16_t lpm_pkts = 0; - unsigned int lcoreid = rte_lcore_id(); - - if (nb_pkts == 0) - return; - - /* Need to do an LPM lookup for non-inline packets. Inline packets will - * have port ID in the SA - */ - - for (i = 0; i < nb_pkts; i++) { - if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) { - /* Security offload not enabled. So an LPM lookup is - * required to get the hop - */ - offset = offsetof(struct ip6_hdr, ip6_dst); - ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, - offset); - memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16); - lpm_pkts++; - } - } - - rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop, - lpm_pkts); - - lpm_pkts = 0; - - for (i = 0; i < nb_pkts; i++) { - if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) { - /* Read hop from the SA */ - pkt_hop = get_hop_for_offload_pkt(pkts[i], 1); - } else { - /* Need to use hop returned by lookup */ - pkt_hop = hop[lpm_pkts++]; - } - - if (pkt_hop == -1) { - core_statistics[lcoreid].lpm6.miss++; - free_pkts(&pkts[i], 1); - continue; - } - send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6); - } -} - static inline void process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, uint8_t nb_pkts, uint16_t portid) @@ -1093,21 +567,6 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, } static inline void -drain_tx_buffers(struct lcore_conf *qconf) -{ - struct buffer *buf; - uint32_t portid; - - for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { - buf = &qconf->tx_mbufs[portid]; - if (buf->len == 0) - continue; - send_burst(qconf, buf->len, portid); - buf->len = 0; - } -} - -static inline void drain_crypto_buffers(struct lcore_conf *qconf) { uint32_t i; diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h index c347175..2edf631 100644 --- a/examples/ipsec-secgw/ipsec-secgw.h +++ b/examples/ipsec-secgw/ipsec-secgw.h @@ -6,6 +6,7 @@ #include +#define MAX_RX_QUEUE_PER_LCORE 16 #define NB_SOCKETS 4 @@ -141,6 +142,9 @@ extern uint32_t nb_bufs_in_pool; extern bool per_port_pool; +extern uint32_t mtu_size; +extern uint32_t frag_tbl_sz; + static inline uint8_t is_unprotected_port(uint16_t port_id) { diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index ccfde8e..9a4e7ea 100644 --- a/examples/ipsec-secgw/ipsec.h +++ b/examples/ipsec-secgw/ipsec.h @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -37,6 +38,11 @@ #define IP6_VERSION (6) +#define SATP_OUT_IPV4(t) \ + ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \ + (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \ + ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4) + struct rte_crypto_xform; struct ipsec_xform; struct rte_mbuf; @@ -260,6 +266,34 @@ struct cnt_blk { uint32_t cnt; } __rte_packed; +struct lcore_rx_queue { + uint16_t port_id; + uint8_t queue_id; +} __rte_cache_aligned; + +struct buffer { + uint16_t len; + struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *)); +}; + +struct lcore_conf { + uint16_t nb_rx_queue; + struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; + uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; + struct buffer tx_mbufs[RTE_MAX_ETHPORTS]; + struct ipsec_ctx inbound; + struct ipsec_ctx outbound; + struct rt_ctx *rt4_ctx; + struct rt_ctx *rt6_ctx; + struct { + struct rte_ip_frag_tbl *tbl; + struct rte_mempool *pool_indir; + struct rte_ip_frag_death_row dr; + } frag; +} __rte_cache_aligned; + +extern struct lcore_conf lcore_conf[RTE_MAX_LCORE]; + /* Socket ctx */ extern struct socket_ctx socket_ctx[NB_SOCKETS]; diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c index 285e9c7..089d89f 100644 --- a/examples/ipsec-secgw/ipsec_process.c +++ b/examples/ipsec-secgw/ipsec_process.c @@ -13,11 +13,7 @@ #include "ipsec.h" #include "ipsec-secgw.h" - -#define SATP_OUT_IPV4(t) \ - ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \ - (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \ - ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4) +#include "ipsec_worker.h" /* helper routine to free bulk of crypto-ops and related packets */ static inline void @@ -209,49 +205,6 @@ ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa, } /* - * helper routine for inline and cpu(synchronous) processing - * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt(). - * Should be removed in future. - */ -static inline void -prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt) -{ - uint32_t j; - struct ipsec_mbuf_metadata *priv; - - for (j = 0; j != cnt; j++) { - priv = get_priv(mb[j]); - priv->sa = sa; - /* setup TSO related fields if TSO enabled*/ - if (priv->sa->mss) { - uint32_t ptype = mb[j]->packet_type; - /* only TCP is supported */ - if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) { - mb[j]->tso_segsz = priv->sa->mss; - if ((IS_TUNNEL(priv->sa->flags))) { - mb[j]->outer_l3_len = mb[j]->l3_len; - mb[j]->outer_l2_len = mb[j]->l2_len; - mb[j]->ol_flags |= - RTE_MBUF_F_TX_TUNNEL_ESP; - if (RTE_ETH_IS_IPV4_HDR(ptype)) - mb[j]->ol_flags |= - RTE_MBUF_F_TX_OUTER_IP_CKSUM; - } - mb[j]->l4_len = sizeof(struct rte_tcp_hdr); - mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG | - RTE_MBUF_F_TX_TCP_CKSUM); - if (RTE_ETH_IS_IPV4_HDR(ptype)) - mb[j]->ol_flags |= - RTE_MBUF_F_TX_OUTER_IPV4; - else - mb[j]->ol_flags |= - RTE_MBUF_F_TX_OUTER_IPV6; - } - } - } -} - -/* * finish processing of packets successfully decrypted by an inline processor */ static uint32_t diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h index 5d85cf1..eb966a6 100644 --- a/examples/ipsec-secgw/ipsec_worker.h +++ b/examples/ipsec-secgw/ipsec_worker.h @@ -4,8 +4,15 @@ #ifndef _IPSEC_WORKER_H_ #define _IPSEC_WORKER_H_ +#include +#include +#include +#include + #include "ipsec.h" +/* Configure how many packets ahead to prefetch, when reading packets */ +#define PREFETCH_OFFSET 3 enum pkt_type { PKT_TYPE_PLAIN_IPV4 = 1, PKT_TYPE_IPSEC_IPV4, @@ -38,4 +45,557 @@ void ipsec_poll_mode_worker(void); int ipsec_launch_one_lcore(void *args); +/* + * helper routine for inline and cpu(synchronous) processing + * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt(). + * Should be removed in future. + */ +static inline void +prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt) +{ + uint32_t j; + struct ipsec_mbuf_metadata *priv; + + for (j = 0; j != cnt; j++) { + priv = get_priv(mb[j]); + priv->sa = sa; + /* setup TSO related fields if TSO enabled*/ + if (priv->sa->mss) { + uint32_t ptype = mb[j]->packet_type; + /* only TCP is supported */ + if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) { + mb[j]->tso_segsz = priv->sa->mss; + if ((IS_TUNNEL(priv->sa->flags))) { + mb[j]->outer_l3_len = mb[j]->l3_len; + mb[j]->outer_l2_len = mb[j]->l2_len; + mb[j]->ol_flags |= + RTE_MBUF_F_TX_TUNNEL_ESP; + if (RTE_ETH_IS_IPV4_HDR(ptype)) + mb[j]->ol_flags |= + RTE_MBUF_F_TX_OUTER_IP_CKSUM; + } + mb[j]->l4_len = sizeof(struct rte_tcp_hdr); + mb[j]->ol_flags |= (RTE_MBUF_F_TX_TCP_SEG | + RTE_MBUF_F_TX_TCP_CKSUM); + if (RTE_ETH_IS_IPV4_HDR(ptype)) + mb[j]->ol_flags |= + RTE_MBUF_F_TX_OUTER_IPV4; + else + mb[j]->ol_flags |= + RTE_MBUF_F_TX_OUTER_IPV6; + } + } + } +} + +static __rte_always_inline void +adjust_ipv4_pktlen(struct rte_mbuf *m, const struct rte_ipv4_hdr *iph, + uint32_t l2_len) +{ + uint32_t plen, trim; + + plen = rte_be_to_cpu_16(iph->total_length) + l2_len; + if (plen < m->pkt_len) { + trim = m->pkt_len - plen; + rte_pktmbuf_trim(m, trim); + } +} + +static __rte_always_inline void +adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph, + uint32_t l2_len) +{ + uint32_t plen, trim; + + plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len; + if (plen < m->pkt_len) { + trim = m->pkt_len - plen; + rte_pktmbuf_trim(m, trim); + } +} + +static __rte_always_inline void +prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) +{ + uint32_t ptype = pkt->packet_type; + const struct rte_ether_hdr *eth; + const struct rte_ipv4_hdr *iph4; + const struct rte_ipv6_hdr *iph6; + uint32_t tun_type, l3_type; + + tun_type = ptype & RTE_PTYPE_TUNNEL_MASK; + l3_type = ptype & RTE_PTYPE_L3_MASK; + + eth = rte_pktmbuf_mtod(pkt, const struct rte_ether_hdr *); + if (l3_type == RTE_PTYPE_L3_IPV4) { + iph4 = (const struct rte_ipv4_hdr *)rte_pktmbuf_adj(pkt, + RTE_ETHER_HDR_LEN); + adjust_ipv4_pktlen(pkt, iph4, 0); + + if (tun_type == RTE_PTYPE_TUNNEL_ESP) { + t->ipsec.pkts[(t->ipsec.num)++] = pkt; + } else { + t->ip4.data[t->ip4.num] = &iph4->next_proto_id; + t->ip4.pkts[(t->ip4.num)++] = pkt; + } + pkt->l2_len = 0; + pkt->l3_len = sizeof(*iph4); + } else if (l3_type & RTE_PTYPE_L3_IPV6) { + int next_proto; + size_t l3len, ext_len; + uint8_t *p; + + /* get protocol type */ + iph6 = (const struct rte_ipv6_hdr *)rte_pktmbuf_adj(pkt, + RTE_ETHER_HDR_LEN); + adjust_ipv6_pktlen(pkt, iph6, 0); + + l3len = sizeof(struct ip6_hdr); + + if (tun_type == RTE_PTYPE_TUNNEL_ESP) { + t->ipsec.pkts[(t->ipsec.num)++] = pkt; + } else { + t->ip6.data[t->ip6.num] = &iph6->proto; + t->ip6.pkts[(t->ip6.num)++] = pkt; + } + + /* Determine l3 header size up to ESP extension by walking + * through extension headers. + */ + if (l3_type == RTE_PTYPE_L3_IPV6_EXT || + l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) { + p = rte_pktmbuf_mtod(pkt, uint8_t *); + next_proto = iph6->proto; + while (next_proto != IPPROTO_ESP && + l3len < pkt->data_len && + (next_proto = rte_ipv6_get_next_ext(p + l3len, + next_proto, &ext_len)) >= 0) + l3len += ext_len; + + /* Drop pkt when IPv6 header exceeds first seg size */ + if (unlikely(l3len > pkt->data_len)) { + free_pkts(&pkt, 1); + return; + } + } + + pkt->l2_len = 0; + pkt->l3_len = l3len; + } else { + /* Unknown/Unsupported type, drop the packet */ + RTE_LOG(ERR, IPSEC, "Unsupported packet type 0x%x\n", + rte_be_to_cpu_16(eth->ether_type)); + free_pkts(&pkt, 1); + return; + } + + if (ptype & RTE_PTYPE_L4_TCP) + pkt->l4_len = sizeof(struct rte_tcp_hdr); + else if (ptype & RTE_PTYPE_L4_UDP) + pkt->l4_len = sizeof(struct rte_udp_hdr); + + /* Check if the packet has been processed inline. For inline protocol + * processed packets, the metadata in the mbuf can be used to identify + * the security processing done on the packet. The metadata will be + * used to retrieve the application registered userdata associated + * with the security session. + */ + + if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD && + rte_security_dynfield_is_registered()) { + struct ipsec_sa *sa; + struct ipsec_mbuf_metadata *priv; + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_eth_dev_get_sec_ctx( + pkt->port); + + /* Retrieve the userdata registered. Here, the userdata + * registered is the SA pointer. + */ + sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, + *rte_security_dynfield(pkt)); + if (sa == NULL) { + /* userdata could not be retrieved */ + return; + } + + /* Save SA as priv member in mbuf. This will be used in the + * IPsec selector(SP-SA) check. + */ + + priv = get_priv(pkt); + priv->sa = sa; + } +} + +static __rte_always_inline void +prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t, + uint16_t nb_pkts) +{ + int32_t i; + + t->ipsec.num = 0; + t->ip4.num = 0; + t->ip6.num = 0; + + for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) { + rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET], + void *)); + prepare_one_packet(pkts[i], t); + } + /* Process left packets */ + for (; i < nb_pkts; i++) + prepare_one_packet(pkts[i], t); +} + +static __rte_always_inline void +prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port, + const struct lcore_conf *qconf) +{ + struct ip *ip; + struct rte_ether_hdr *ethhdr; + + ip = rte_pktmbuf_mtod(pkt, struct ip *); + + ethhdr = (struct rte_ether_hdr *) + rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN); + + if (ip->ip_v == IPVERSION) { + pkt->ol_flags |= qconf->outbound.ipv4_offloads; + pkt->l3_len = sizeof(struct ip); + pkt->l2_len = RTE_ETHER_HDR_LEN; + + ip->ip_sum = 0; + + /* calculate IPv4 cksum in SW */ + if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0) + ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip); + + ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + } else { + pkt->ol_flags |= qconf->outbound.ipv6_offloads; + pkt->l3_len = sizeof(struct ip6_hdr); + pkt->l2_len = RTE_ETHER_HDR_LEN; + + ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + } + + memcpy(ðhdr->src_addr, ðaddr_tbl[port].src, + sizeof(struct rte_ether_addr)); + memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst, + sizeof(struct rte_ether_addr)); +} + +static __rte_always_inline void +prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port, + const struct lcore_conf *qconf) +{ + int32_t i; + const int32_t prefetch_offset = 2; + + for (i = 0; i < (nb_pkts - prefetch_offset); i++) { + rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]); + prepare_tx_pkt(pkts[i], port, qconf); + } + /* Process left packets */ + for (; i < nb_pkts; i++) + prepare_tx_pkt(pkts[i], port, qconf); +} + +/* Send burst of packets on an output interface */ +static __rte_always_inline int32_t +send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port) +{ + struct rte_mbuf **m_table; + int32_t ret; + uint16_t queueid; + + queueid = qconf->tx_queue_id[port]; + m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; + + prepare_tx_burst(m_table, n, port, qconf); + + ret = rte_eth_tx_burst(port, queueid, m_table, n); + + core_stats_update_tx(ret); + + if (unlikely(ret < n)) { + do { + free_pkts(&m_table[ret], 1); + } while (++ret < n); + } + + return 0; +} + +/* + * Helper function to fragment and queue for TX one packet. + */ +static __rte_always_inline uint32_t +send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m, + uint16_t port, uint8_t proto) +{ + struct buffer *tbl; + uint32_t len, n; + int32_t rc; + + tbl = qconf->tx_mbufs + port; + len = tbl->len; + + /* free space for new fragments */ + if (len + RTE_LIBRTE_IP_FRAG_MAX_FRAG >= RTE_DIM(tbl->m_table)) { + send_burst(qconf, len, port); + len = 0; + } + + n = RTE_DIM(tbl->m_table) - len; + + if (proto == IPPROTO_IP) + rc = rte_ipv4_fragment_packet(m, tbl->m_table + len, + n, mtu_size, m->pool, qconf->frag.pool_indir); + else + rc = rte_ipv6_fragment_packet(m, tbl->m_table + len, + n, mtu_size, m->pool, qconf->frag.pool_indir); + + if (rc >= 0) + len += rc; + else + RTE_LOG(ERR, IPSEC, + "%s: failed to fragment packet with size %u, " + "error code: %d\n", + __func__, m->pkt_len, rte_errno); + + free_pkts(&m, 1); + return len; +} + +/* Enqueue a single packet, and send burst if queue is filled */ +static __rte_always_inline int32_t +send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto) +{ + uint32_t lcore_id; + uint16_t len; + struct lcore_conf *qconf; + + lcore_id = rte_lcore_id(); + + qconf = &lcore_conf[lcore_id]; + len = qconf->tx_mbufs[port].len; + + if (m->pkt_len <= mtu_size) { + qconf->tx_mbufs[port].m_table[len] = m; + len++; + + /* need to fragment the packet */ + } else if (frag_tbl_sz > 0) + len = send_fragment_packet(qconf, m, port, proto); + else + free_pkts(&m, 1); + + /* enough pkts to be sent */ + if (unlikely(len == MAX_PKT_BURST)) { + send_burst(qconf, MAX_PKT_BURST, port); + len = 0; + } + + qconf->tx_mbufs[port].len = len; + return 0; +} + +static __rte_always_inline void +inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip, + uint16_t lim, struct ipsec_spd_stats *stats) +{ + struct rte_mbuf *m; + uint32_t i, j, res, sa_idx; + + if (ip->num == 0 || sp == NULL) + return; + + rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, + ip->num, DEFAULT_MAX_CATEGORIES); + + j = 0; + for (i = 0; i < ip->num; i++) { + m = ip->pkts[i]; + res = ip->res[i]; + if (res == BYPASS) { + ip->pkts[j++] = m; + stats->bypass++; + continue; + } + if (res == DISCARD) { + free_pkts(&m, 1); + stats->discard++; + continue; + } + + /* Only check SPI match for processed IPSec packets */ + if (i < lim && ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)) { + stats->discard++; + free_pkts(&m, 1); + continue; + } + + sa_idx = res - 1; + if (!inbound_sa_check(sa, m, sa_idx)) { + stats->discard++; + free_pkts(&m, 1); + continue; + } + ip->pkts[j++] = m; + stats->protect++; + } + ip->num = j; +} + +static __rte_always_inline int32_t +get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6) +{ + struct ipsec_mbuf_metadata *priv; + struct ipsec_sa *sa; + + priv = get_priv(pkt); + + sa = priv->sa; + if (unlikely(sa == NULL)) { + RTE_LOG(ERR, IPSEC, "SA not saved in private data\n"); + goto fail; + } + + if (is_ipv6) + return sa->portid; + + /* else */ + return (sa->portid | RTE_LPM_LOOKUP_SUCCESS); + +fail: + if (is_ipv6) + return -1; + + /* else */ + return 0; +} + +static __rte_always_inline void +route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) +{ + uint32_t hop[MAX_PKT_BURST * 2]; + uint32_t dst_ip[MAX_PKT_BURST * 2]; + int32_t pkt_hop = 0; + uint16_t i, offset; + uint16_t lpm_pkts = 0; + unsigned int lcoreid = rte_lcore_id(); + + if (nb_pkts == 0) + return; + + /* Need to do an LPM lookup for non-inline packets. Inline packets will + * have port ID in the SA + */ + + for (i = 0; i < nb_pkts; i++) { + if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) { + /* Security offload not enabled. So an LPM lookup is + * required to get the hop + */ + offset = offsetof(struct ip, ip_dst); + dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i], + uint32_t *, offset); + dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]); + lpm_pkts++; + } + } + + rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, lpm_pkts); + + lpm_pkts = 0; + + for (i = 0; i < nb_pkts; i++) { + if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) { + /* Read hop from the SA */ + pkt_hop = get_hop_for_offload_pkt(pkts[i], 0); + } else { + /* Need to use hop returned by lookup */ + pkt_hop = hop[lpm_pkts++]; + } + + if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) { + core_statistics[lcoreid].lpm4.miss++; + free_pkts(&pkts[i], 1); + continue; + } + send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP); + } +} + +static __rte_always_inline void +route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) +{ + int32_t hop[MAX_PKT_BURST * 2]; + uint8_t dst_ip[MAX_PKT_BURST * 2][16]; + uint8_t *ip6_dst; + int32_t pkt_hop = 0; + uint16_t i, offset; + uint16_t lpm_pkts = 0; + unsigned int lcoreid = rte_lcore_id(); + + if (nb_pkts == 0) + return; + + /* Need to do an LPM lookup for non-inline packets. Inline packets will + * have port ID in the SA + */ + + for (i = 0; i < nb_pkts; i++) { + if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) { + /* Security offload not enabled. So an LPM lookup is + * required to get the hop + */ + offset = offsetof(struct ip6_hdr, ip6_dst); + ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, + offset); + memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16); + lpm_pkts++; + } + } + + rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip, hop, + lpm_pkts); + + lpm_pkts = 0; + + for (i = 0; i < nb_pkts; i++) { + if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) { + /* Read hop from the SA */ + pkt_hop = get_hop_for_offload_pkt(pkts[i], 1); + } else { + /* Need to use hop returned by lookup */ + pkt_hop = hop[lpm_pkts++]; + } + + if (pkt_hop == -1) { + core_statistics[lcoreid].lpm6.miss++; + free_pkts(&pkts[i], 1); + continue; + } + send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6); + } +} + +static __rte_always_inline void +drain_tx_buffers(struct lcore_conf *qconf) +{ + struct buffer *buf; + uint32_t portid; + + for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { + buf = &qconf->tx_mbufs[portid]; + if (buf->len == 0) + continue; + send_burst(qconf, buf->len, portid); + buf->len = 0; + } +} + #endif /* _IPSEC_WORKER_H_ */ From patchwork Tue Mar 22 17:58:43 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 108818 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id BD37CA04FF; Tue, 22 Mar 2022 18:59:44 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5FE3842807; Tue, 22 Mar 2022 18:59:26 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id AC4B6427FA for ; Tue, 22 Mar 2022 18:59:24 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with ESMTP id 22MG9rZv022265; Tue, 22 Mar 2022 10:59:23 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=N6E6jheVAT5hQN1LMSWsQ6vVQbv4NixeQXlwnUQQj+E=; b=RsifufJNGIgfvYEYNmocr7eVsFs9j4yEvE38b5wfc3uTsW7joHo8GB3Xu6VhoUxwdWZ6 BrJAqqIWpr3KgSg47BNZLlvuKJk4lyPDgzun4s8/cYIQhslFLH4AGxVCA1FUq1pMwdH4 0k+xnzVktAlnmr4eRw2ea8TWBNjUcZ8/J/OTNI8XTNtMVMNi42s0mbh42RonB07GyGCv c35BCAspgUkcdEIDfbTFx87payG6YTvSYz2hHaLM4jHEHl2MzY6DOQEXlunidDfWwnCG y1ITpoAc8E/xUv+pXDkX2D1q0Vo8DohKwSkPdW+lQQxNr4jg5PctXp5FXTzxNweYNMKM Ag== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3eyhqw0j6c-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Tue, 22 Mar 2022 10:59:23 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 22 Mar 2022 10:59:22 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Tue, 22 Mar 2022 10:59:22 -0700 Received: from localhost.localdomain (unknown [10.28.34.24]) by maili.marvell.com (Postfix) with ESMTP id 64B823F704A; Tue, 22 Mar 2022 10:59:20 -0700 (PDT) From: Nithin Dabilpuram To: , Radu Nicolau , Akhil Goyal CC: , , Nithin Dabilpuram Subject: [PATCH 5/7] examples/ipsec-secgw: get security context from lcore conf Date: Tue, 22 Mar 2022 23:28:43 +0530 Message-ID: <20220322175902.363520-5-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220322175902.363520-1-ndabilpuram@marvell.com> References: <20220322175902.363520-1-ndabilpuram@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: myyF0IBIjkyi29Vd7KiWNnT6cU72vkD_ X-Proofpoint-ORIG-GUID: myyF0IBIjkyi29Vd7KiWNnT6cU72vkD_ X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.850,Hydra:6.0.425,FMLib:17.11.64.514 definitions=2022-03-22_07,2022-03-22_01,2022-02-23_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Store security context pointer in lcore Rx queue config and get it from there in fast path for better performance. Currently rte_eth_dev_get_sec_ctx() which is meant to be control path API is called per packet basis. For every call to that API, ethdev port status is checked. Signed-off-by: Nithin Dabilpuram --- examples/ipsec-secgw/ipsec-secgw.c | 22 +++++++++++++++++++--- examples/ipsec-secgw/ipsec.h | 1 + examples/ipsec-secgw/ipsec_worker.h | 17 +++++++---------- 3 files changed, 27 insertions(+), 13 deletions(-) diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 1d0ce3a..a04b5e8 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -544,11 +544,11 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx, static inline void process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, - uint8_t nb_pkts, uint16_t portid) + uint8_t nb_pkts, uint16_t portid, struct rte_security_ctx *ctx) { struct ipsec_traffic traffic; - prepare_traffic(pkts, &traffic, nb_pkts); + prepare_traffic(ctx, pkts, &traffic, nb_pkts); if (unlikely(single_sa)) { if (is_unprotected_port(portid)) @@ -740,7 +740,8 @@ ipsec_poll_mode_worker(void) if (nb_rx > 0) { core_stats_update_rx(nb_rx); - process_pkts(qconf, pkts, nb_rx, portid); + process_pkts(qconf, pkts, nb_rx, portid, + rxql->sec_ctx); } /* dequeue and process completed crypto-ops */ @@ -3060,6 +3061,21 @@ main(int32_t argc, char **argv) flow_init(); + /* Get security context if available and only if dynamic field is + * registered for fast path access. + */ + if (!rte_security_dynfield_is_registered()) + goto skip_sec_ctx; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + for (i = 0; i < lcore_conf[lcore_id].nb_rx_queue; i++) { + portid = lcore_conf[lcore_id].rx_queue_list[i].port_id; + lcore_conf[lcore_id].rx_queue_list[i].sec_ctx = + rte_eth_dev_get_sec_ctx(portid); + } + } +skip_sec_ctx: + check_all_ports_link_status(enabled_port_mask); if (stats_interval > 0) diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index 9a4e7ea..ecad262 100644 --- a/examples/ipsec-secgw/ipsec.h +++ b/examples/ipsec-secgw/ipsec.h @@ -269,6 +269,7 @@ struct cnt_blk { struct lcore_rx_queue { uint16_t port_id; uint8_t queue_id; + struct rte_security_ctx *sec_ctx; } __rte_cache_aligned; struct buffer { diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h index eb966a6..838b3f6 100644 --- a/examples/ipsec-secgw/ipsec_worker.h +++ b/examples/ipsec-secgw/ipsec_worker.h @@ -115,7 +115,8 @@ adjust_ipv6_pktlen(struct rte_mbuf *m, const struct rte_ipv6_hdr *iph, } static __rte_always_inline void -prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) +prepare_one_packet(struct rte_security_ctx *ctx, struct rte_mbuf *pkt, + struct ipsec_traffic *t) { uint32_t ptype = pkt->packet_type; const struct rte_ether_hdr *eth; @@ -201,13 +202,9 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) * with the security session. */ - if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD && - rte_security_dynfield_is_registered()) { + if (ctx && pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) { struct ipsec_sa *sa; struct ipsec_mbuf_metadata *priv; - struct rte_security_ctx *ctx = (struct rte_security_ctx *) - rte_eth_dev_get_sec_ctx( - pkt->port); /* Retrieve the userdata registered. Here, the userdata * registered is the SA pointer. @@ -229,8 +226,8 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) } static __rte_always_inline void -prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t, - uint16_t nb_pkts) +prepare_traffic(struct rte_security_ctx *ctx, struct rte_mbuf **pkts, + struct ipsec_traffic *t, uint16_t nb_pkts) { int32_t i; @@ -241,11 +238,11 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t, for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) { rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET], void *)); - prepare_one_packet(pkts[i], t); + prepare_one_packet(ctx, pkts[i], t); } /* Process left packets */ for (; i < nb_pkts; i++) - prepare_one_packet(pkts[i], t); + prepare_one_packet(ctx, pkts[i], t); } static __rte_always_inline void From patchwork Tue Mar 22 17:58:44 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 108819 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D615EA04FF; Tue, 22 Mar 2022 18:59:50 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 39D9A42800; Tue, 22 Mar 2022 18:59:29 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id B72DF42800 for ; Tue, 22 Mar 2022 18:59:27 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with ESMTP id 22MG7w17024192; Tue, 22 Mar 2022 10:59:27 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=GWMFwZTC802pJASMQNx33a4+r1keDAZeXoKV4C9oB3M=; b=Izgq8nfBtsOzv0j2Mfe3K5OpRY3LEkrfhk7m0o+F1UUbsyhi2M0C346Vs0EydUJeUhL9 lOGUyiU5mfPUh44l2LPjSOeSmVfVdUYnWFnDrLqJVkrQh1B//J0xkKTS3X58xu1m2dSi G4MU/8og37WyDFhQqnxj4+ptDWoLXRW4igHhXRCmSQ02bGEf+N7ugkZjmfGwZkq5Y0gV YL7tUdGZa/YA1HD4wOg8aWcNRXAhANUGEfNcz86LiIRwyMuBNSpsAN5OzM0rKruTyI/v O1pW9xBUOVRzFScxFrM268h+5PMuNtbnOYZCg83F86oT386UDaC2MRntosCwgoNi8BcI +w== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3ewepn6cjb-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Tue, 22 Mar 2022 10:59:27 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 22 Mar 2022 10:59:24 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Tue, 22 Mar 2022 10:59:24 -0700 Received: from localhost.localdomain (unknown [10.28.34.24]) by maili.marvell.com (Postfix) with ESMTP id DF2B63F7051; Tue, 22 Mar 2022 10:59:22 -0700 (PDT) From: Nithin Dabilpuram To: , Radu Nicolau , Akhil Goyal CC: , , Nithin Dabilpuram Subject: [PATCH 6/7] examples/ipsec-secgw: update eth header during route lookup Date: Tue, 22 Mar 2022 23:28:44 +0530 Message-ID: <20220322175902.363520-6-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220322175902.363520-1-ndabilpuram@marvell.com> References: <20220322175902.363520-1-ndabilpuram@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: g4pTE5EjiLmpwqibUp-z3mXARJBNlvbv X-Proofpoint-GUID: g4pTE5EjiLmpwqibUp-z3mXARJBNlvbv X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.850,Hydra:6.0.425,FMLib:17.11.64.514 definitions=2022-03-22_07,2022-03-22_01,2022-02-23_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Update ethernet header during route lookup instead of doing way later while performing Tx burst. Advantages to doing is at route lookup is that no additional IP version checks based on packet data are needed and packet data is already in cache as route lookup is already consuming that data. This is also useful for inline protocol offload cases of v4inv6 or v6inv4 outbound tunnel operations as packet data will not have any info about what is the tunnel protocol. Signed-off-by: Nithin Dabilpuram --- examples/ipsec-secgw/ipsec-secgw.c | 9 +- examples/ipsec-secgw/ipsec_worker.h | 197 ++++++++++++++++++++++-------------- 2 files changed, 129 insertions(+), 77 deletions(-) diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index a04b5e8..84f6150 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -562,7 +562,8 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, process_pkts_outbound(&qconf->outbound, &traffic); } - route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num); + route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num, + qconf->outbound.ipv4_offloads, true); route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num); } @@ -613,7 +614,8 @@ drain_inbound_crypto_queues(const struct lcore_conf *qconf, if (trf.ip4.num != 0) { inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0, &core_statistics[lcoreid].inbound.spd4); - route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num); + route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num, + qconf->outbound.ipv4_offloads, true); } /* process ipv6 packets */ @@ -647,7 +649,8 @@ drain_outbound_crypto_queues(const struct lcore_conf *qconf, /* process ipv4 packets */ if (trf.ip4.num != 0) - route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num); + route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num, + qconf->outbound.ipv4_offloads, true); /* process ipv6 packets */ if (trf.ip6.num != 0) diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h index 838b3f6..b183248 100644 --- a/examples/ipsec-secgw/ipsec_worker.h +++ b/examples/ipsec-secgw/ipsec_worker.h @@ -245,60 +245,6 @@ prepare_traffic(struct rte_security_ctx *ctx, struct rte_mbuf **pkts, prepare_one_packet(ctx, pkts[i], t); } -static __rte_always_inline void -prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port, - const struct lcore_conf *qconf) -{ - struct ip *ip; - struct rte_ether_hdr *ethhdr; - - ip = rte_pktmbuf_mtod(pkt, struct ip *); - - ethhdr = (struct rte_ether_hdr *) - rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN); - - if (ip->ip_v == IPVERSION) { - pkt->ol_flags |= qconf->outbound.ipv4_offloads; - pkt->l3_len = sizeof(struct ip); - pkt->l2_len = RTE_ETHER_HDR_LEN; - - ip->ip_sum = 0; - - /* calculate IPv4 cksum in SW */ - if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0) - ip->ip_sum = rte_ipv4_cksum((struct rte_ipv4_hdr *)ip); - - ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); - } else { - pkt->ol_flags |= qconf->outbound.ipv6_offloads; - pkt->l3_len = sizeof(struct ip6_hdr); - pkt->l2_len = RTE_ETHER_HDR_LEN; - - ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); - } - - memcpy(ðhdr->src_addr, ðaddr_tbl[port].src, - sizeof(struct rte_ether_addr)); - memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst, - sizeof(struct rte_ether_addr)); -} - -static __rte_always_inline void -prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port, - const struct lcore_conf *qconf) -{ - int32_t i; - const int32_t prefetch_offset = 2; - - for (i = 0; i < (nb_pkts - prefetch_offset); i++) { - rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]); - prepare_tx_pkt(pkts[i], port, qconf); - } - /* Process left packets */ - for (; i < nb_pkts; i++) - prepare_tx_pkt(pkts[i], port, qconf); -} - /* Send burst of packets on an output interface */ static __rte_always_inline int32_t send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port) @@ -310,8 +256,6 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port) queueid = qconf->tx_queue_id[port]; m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; - prepare_tx_burst(m_table, n, port, qconf); - ret = rte_eth_tx_burst(port, queueid, m_table, n); core_stats_update_tx(ret); @@ -332,8 +276,11 @@ static __rte_always_inline uint32_t send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m, uint16_t port, uint8_t proto) { + struct rte_ether_hdr *ethhdr; + struct rte_ipv4_hdr *ip; + struct rte_mbuf *pkt; struct buffer *tbl; - uint32_t len, n; + uint32_t len, n, i; int32_t rc; tbl = qconf->tx_mbufs + port; @@ -347,6 +294,9 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m, n = RTE_DIM(tbl->m_table) - len; + /* Strip the ethernet header that was prepended earlier */ + rte_pktmbuf_adj(m, RTE_ETHER_HDR_LEN); + if (proto == IPPROTO_IP) rc = rte_ipv4_fragment_packet(m, tbl->m_table + len, n, mtu_size, m->pool, qconf->frag.pool_indir); @@ -354,13 +304,51 @@ send_fragment_packet(struct lcore_conf *qconf, struct rte_mbuf *m, rc = rte_ipv6_fragment_packet(m, tbl->m_table + len, n, mtu_size, m->pool, qconf->frag.pool_indir); - if (rc >= 0) - len += rc; - else + if (rc < 0) { RTE_LOG(ERR, IPSEC, "%s: failed to fragment packet with size %u, " "error code: %d\n", __func__, m->pkt_len, rte_errno); + rc = 0; + } + + i = len; + len += rc; + for (; i < len; i++) { + pkt = tbl->m_table[i]; + + /* Update Ethernet header */ + ethhdr = (struct rte_ether_hdr *) + rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN); + pkt->l2_len = RTE_ETHER_HDR_LEN; + + if (proto == IPPROTO_IP) { + ethhdr->ether_type = + rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + /* Update minimum offload data */ + pkt->l3_len = sizeof(struct rte_ipv4_hdr); + pkt->ol_flags |= qconf->outbound.ipv4_offloads; + + ip = (struct rte_ipv4_hdr *)(ethhdr + 1); + ip->hdr_checksum = 0; + + /* calculate IPv4 cksum in SW */ + if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0) + ip->hdr_checksum = rte_ipv4_cksum(ip); + } else { + ethhdr->ether_type = + rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + + /* Update minimum offload data */ + pkt->l3_len = sizeof(struct rte_ipv6_hdr); + pkt->ol_flags |= qconf->outbound.ipv6_offloads; + } + + memcpy(ðhdr->src_addr, ðaddr_tbl[port].src, + sizeof(struct rte_ether_addr)); + memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst, + sizeof(struct rte_ether_addr)); + } free_pkts(&m, 1); return len; @@ -379,7 +367,8 @@ send_single_packet(struct rte_mbuf *m, uint16_t port, uint8_t proto) qconf = &lcore_conf[lcore_id]; len = qconf->tx_mbufs[port].len; - if (m->pkt_len <= mtu_size) { + /* L2 header is already part of packet */ + if (m->pkt_len - RTE_ETHER_HDR_LEN <= mtu_size) { qconf->tx_mbufs[port].m_table[len] = m; len++; @@ -475,14 +464,18 @@ get_hop_for_offload_pkt(struct rte_mbuf *pkt, int is_ipv6) } static __rte_always_inline void -route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) +route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], + uint8_t nb_pkts, uint64_t tx_offloads, bool ip_cksum) { uint32_t hop[MAX_PKT_BURST * 2]; uint32_t dst_ip[MAX_PKT_BURST * 2]; + struct rte_ether_hdr *ethhdr; int32_t pkt_hop = 0; uint16_t i, offset; uint16_t lpm_pkts = 0; unsigned int lcoreid = rte_lcore_id(); + struct rte_mbuf *pkt; + uint16_t port; if (nb_pkts == 0) return; @@ -492,12 +485,13 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) */ for (i = 0; i < nb_pkts; i++) { - if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) { + pkt = pkts[i]; + if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) { /* Security offload not enabled. So an LPM lookup is * required to get the hop */ offset = offsetof(struct ip, ip_dst); - dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkts[i], + dst_ip[lpm_pkts] = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset); dst_ip[lpm_pkts] = rte_be_to_cpu_32(dst_ip[lpm_pkts]); lpm_pkts++; @@ -509,9 +503,10 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) lpm_pkts = 0; for (i = 0; i < nb_pkts; i++) { - if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) { + pkt = pkts[i]; + if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) { /* Read hop from the SA */ - pkt_hop = get_hop_for_offload_pkt(pkts[i], 0); + pkt_hop = get_hop_for_offload_pkt(pkt, 0); } else { /* Need to use hop returned by lookup */ pkt_hop = hop[lpm_pkts++]; @@ -519,10 +514,41 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) if ((pkt_hop & RTE_LPM_LOOKUP_SUCCESS) == 0) { core_statistics[lcoreid].lpm4.miss++; - free_pkts(&pkts[i], 1); + free_pkts(&pkt, 1); continue; } - send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IP); + + port = pkt_hop & 0xff; + + /* Update minimum offload data */ + pkt->l3_len = sizeof(struct rte_ipv4_hdr); + pkt->l2_len = RTE_ETHER_HDR_LEN; + pkt->ol_flags |= RTE_MBUF_F_TX_IPV4; + + /* Update Ethernet header */ + ethhdr = (struct rte_ether_hdr *) + rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN); + + if (ip_cksum) { + struct rte_ipv4_hdr *ip; + + pkt->ol_flags |= tx_offloads; + + ip = (struct rte_ipv4_hdr *)(ethhdr + 1); + ip->hdr_checksum = 0; + + /* calculate IPv4 cksum in SW */ + if ((pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0) + ip->hdr_checksum = rte_ipv4_cksum(ip); + } + + ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + memcpy(ðhdr->src_addr, ðaddr_tbl[port].src, + sizeof(struct rte_ether_addr)); + memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst, + sizeof(struct rte_ether_addr)); + + send_single_packet(pkt, port, IPPROTO_IP); } } @@ -531,11 +557,14 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) { int32_t hop[MAX_PKT_BURST * 2]; uint8_t dst_ip[MAX_PKT_BURST * 2][16]; + struct rte_ether_hdr *ethhdr; uint8_t *ip6_dst; int32_t pkt_hop = 0; uint16_t i, offset; uint16_t lpm_pkts = 0; unsigned int lcoreid = rte_lcore_id(); + struct rte_mbuf *pkt; + uint16_t port; if (nb_pkts == 0) return; @@ -545,12 +574,13 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) */ for (i = 0; i < nb_pkts; i++) { - if (!(pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) { + pkt = pkts[i]; + if (!(pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) { /* Security offload not enabled. So an LPM lookup is * required to get the hop */ offset = offsetof(struct ip6_hdr, ip6_dst); - ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, + ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset); memcpy(&dst_ip[lpm_pkts][0], ip6_dst, 16); lpm_pkts++; @@ -563,9 +593,10 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) lpm_pkts = 0; for (i = 0; i < nb_pkts; i++) { - if (pkts[i]->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) { + pkt = pkts[i]; + if (pkt->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) { /* Read hop from the SA */ - pkt_hop = get_hop_for_offload_pkt(pkts[i], 1); + pkt_hop = get_hop_for_offload_pkt(pkt, 1); } else { /* Need to use hop returned by lookup */ pkt_hop = hop[lpm_pkts++]; @@ -573,10 +604,28 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) if (pkt_hop == -1) { core_statistics[lcoreid].lpm6.miss++; - free_pkts(&pkts[i], 1); + free_pkts(&pkt, 1); continue; } - send_single_packet(pkts[i], pkt_hop & 0xff, IPPROTO_IPV6); + + port = pkt_hop & 0xff; + + /* Update minimum offload data */ + pkt->ol_flags |= RTE_MBUF_F_TX_IPV6; + pkt->l3_len = sizeof(struct ip6_hdr); + pkt->l2_len = RTE_ETHER_HDR_LEN; + + /* Update Ethernet header */ + ethhdr = (struct rte_ether_hdr *) + rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN); + + ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + memcpy(ðhdr->src_addr, ðaddr_tbl[port].src, + sizeof(struct rte_ether_addr)); + memcpy(ðhdr->dst_addr, ðaddr_tbl[port].dst, + sizeof(struct rte_ether_addr)); + + send_single_packet(pkt, port, IPPROTO_IPV6); } } From patchwork Tue Mar 22 17:58:45 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 108820 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 27BB8A04FF; Tue, 22 Mar 2022 18:59:58 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 20DC24280D; Tue, 22 Mar 2022 18:59:32 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id D71744280B for ; Tue, 22 Mar 2022 18:59:29 +0100 (CET) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with ESMTP id 22MG9rZw022265; Tue, 22 Mar 2022 10:59:29 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=/k0wRJ/Dfy7sh1FwCA/c1WMhXUTOXBftclhMypOgrqM=; b=d9TOsFMlUYz2cN6kz4W8c/0PkfkKehRjBvdWqgtrZJtI9ednT7x+2i/GIAjHtGIGG8NB JwUj/DS/aXfzmdvmXEf2wzY8ZAbF+4RN9amvUt77uIiXphuGBSRfNyt4ZUVy1kCPxxdo fXncTh+Bfcnm8uV484Lg1dGUWQRzTbWmO2lR9590WOwN4o0ln6gSM3Cl7OmoCsnWLpSF gVE80iEA6UEeD2v0dWe0Zu92kbk4zNMluhTuENKmEi80YYFTmGOeLYEulPz7LRc+Sz6m vA8ilo5sY4FU11JtryZPlNsRq8PmF/5bGzM/hXmglg0R00qLeKkWrxlO2irDHXy5te22 zQ== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3eyhqw0j72-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Tue, 22 Mar 2022 10:59:28 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Tue, 22 Mar 2022 10:59:27 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Tue, 22 Mar 2022 10:59:27 -0700 Received: from localhost.localdomain (unknown [10.28.34.24]) by maili.marvell.com (Postfix) with ESMTP id 6BED43F704A; Tue, 22 Mar 2022 10:59:25 -0700 (PDT) From: Nithin Dabilpuram To: , Radu Nicolau , Akhil Goyal CC: , , Nithin Dabilpuram Subject: [PATCH 7/7] examples/ipsec-secgw: add poll mode worker for inline proto Date: Tue, 22 Mar 2022 23:28:45 +0530 Message-ID: <20220322175902.363520-7-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220322175902.363520-1-ndabilpuram@marvell.com> References: <20220322175902.363520-1-ndabilpuram@marvell.com> MIME-Version: 1.0 X-Proofpoint-GUID: f1heMVx2fjbCxqFZaTi9_B2iTNEe7yoE X-Proofpoint-ORIG-GUID: f1heMVx2fjbCxqFZaTi9_B2iTNEe7yoE X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.850,Hydra:6.0.425,FMLib:17.11.64.514 definitions=2022-03-22_07,2022-03-22_01,2022-02-23_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add separate worker thread when all SA's are of type inline protocol offload and librte_ipsec is enabled in order to make it more optimal for that case. Current default worker supports all kinds of SA leading to doing lot of per-packet checks and branching based on SA type which can be of 5 types of SA's. Also make a provision for choosing different poll mode workers for different combinations of SA types with default being existing poll mode worker that supports all kinds of SA's. Signed-off-by: Nithin Dabilpuram --- examples/ipsec-secgw/ipsec-secgw.c | 6 +- examples/ipsec-secgw/ipsec-secgw.h | 10 + examples/ipsec-secgw/ipsec_worker.c | 378 +++++++++++++++++++++++++++++++++++- examples/ipsec-secgw/ipsec_worker.h | 4 + examples/ipsec-secgw/sa.c | 9 + 5 files changed, 403 insertions(+), 4 deletions(-) diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 84f6150..515b344 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -68,8 +68,6 @@ volatile bool force_quit; #define CDEV_MP_CACHE_MULTIPLIER 1.5 /* from rte_mempool.c */ #define MAX_QUEUE_PAIRS 1 -#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ - #define MAX_LCORE_PARAMS 1024 /* @@ -173,7 +171,7 @@ static uint64_t enabled_cryptodev_mask = UINT64_MAX; static int32_t promiscuous_on = 1; static int32_t numa_on = 1; /**< NUMA is enabled by default. */ static uint32_t nb_lcores; -static uint32_t single_sa; +uint32_t single_sa; uint32_t nb_bufs_in_pool; /* @@ -238,6 +236,7 @@ struct socket_ctx socket_ctx[NB_SOCKETS]; bool per_port_pool; +uint16_t wrkr_flags; /* * Determine is multi-segment support required: * - either frame buffer size is smaller then mtu @@ -1233,6 +1232,7 @@ parse_args(int32_t argc, char **argv, struct eh_conf *eh_conf) single_sa = 1; single_sa_idx = ret; eh_conf->ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER; + wrkr_flags |= SS_F; printf("Configured with single SA index %u\n", single_sa_idx); break; diff --git a/examples/ipsec-secgw/ipsec-secgw.h b/examples/ipsec-secgw/ipsec-secgw.h index 2edf631..f027360 100644 --- a/examples/ipsec-secgw/ipsec-secgw.h +++ b/examples/ipsec-secgw/ipsec-secgw.h @@ -135,6 +135,7 @@ extern uint32_t unprotected_port_mask; /* Index of SA in single mode */ extern uint32_t single_sa_idx; +extern uint32_t single_sa; extern volatile bool force_quit; @@ -145,6 +146,15 @@ extern bool per_port_pool; extern uint32_t mtu_size; extern uint32_t frag_tbl_sz; +#define SS_F (1U << 0) /* Single SA mode */ +#define INL_PR_F (1U << 1) /* Inline Protocol */ +#define INL_CR_F (1U << 2) /* Inline Crypto */ +#define LA_PR_F (1U << 3) /* Lookaside Protocol */ +#define LA_ANY_F (1U << 4) /* Lookaside Any */ +#define MAX_F (LA_ANY_F << 1) + +extern uint16_t wrkr_flags; + static inline uint8_t is_unprotected_port(uint16_t port_id) { diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c index 8639426..2b96951 100644 --- a/examples/ipsec-secgw/ipsec_worker.c +++ b/examples/ipsec-secgw/ipsec_worker.c @@ -17,6 +17,8 @@ struct port_drv_mode_data { struct rte_security_ctx *ctx; }; +typedef void (*ipsec_worker_fn_t)(void); + static inline enum pkt_type process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp) { @@ -1004,6 +1006,380 @@ ipsec_eventmode_worker(struct eh_conf *conf) eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param); } +static __rte_always_inline void +outb_inl_pro_spd_process(struct sp_ctx *sp, + struct sa_ctx *sa_ctx, + struct traffic_type *ip, + struct traffic_type *match, + struct traffic_type *mismatch, + bool match_flag, + struct ipsec_spd_stats *stats) +{ + uint32_t prev_sa_idx = UINT32_MAX; + struct rte_mbuf *ipsec[MAX_PKT_BURST]; + struct rte_ipsec_session *ips; + uint32_t i, j, j_mis, sa_idx; + struct ipsec_sa *sa = NULL; + uint32_t ipsec_num = 0; + struct rte_mbuf *m; + uint64_t satp; + + if (ip->num == 0 || sp == NULL) + return; + + rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, + ip->num, DEFAULT_MAX_CATEGORIES); + + j = match->num; + j_mis = mismatch->num; + + for (i = 0; i < ip->num; i++) { + m = ip->pkts[i]; + sa_idx = ip->res[i] - 1; + + if (unlikely(ip->res[i] == DISCARD)) { + free_pkts(&m, 1); + + stats->discard++; + } else if (unlikely(ip->res[i] == BYPASS)) { + match->pkts[j++] = m; + + stats->bypass++; + } else { + if (prev_sa_idx == UINT32_MAX) { + prev_sa_idx = sa_idx; + sa = &sa_ctx->sa[sa_idx]; + ips = ipsec_get_primary_session(sa); + satp = rte_ipsec_sa_type(ips->sa); + } + + if (sa_idx != prev_sa_idx) { + prep_process_group(sa, ipsec, ipsec_num); + + /* Prepare packets for outbound */ + rte_ipsec_pkt_process(ips, ipsec, ipsec_num); + + /* Copy to current tr or a different tr */ + if (SATP_OUT_IPV4(satp) == match_flag) { + memcpy(&match->pkts[j], ipsec, + ipsec_num * sizeof(void *)); + j += ipsec_num; + } else { + memcpy(&mismatch->pkts[j_mis], ipsec, + ipsec_num * sizeof(void *)); + j_mis += ipsec_num; + } + + /* Update to new SA */ + sa = &sa_ctx->sa[sa_idx]; + ips = ipsec_get_primary_session(sa); + satp = rte_ipsec_sa_type(ips->sa); + ipsec_num = 0; + } + + ipsec[ipsec_num++] = m; + stats->protect++; + } + } + + if (ipsec_num) { + prep_process_group(sa, ipsec, ipsec_num); + + /* Prepare pacekts for outbound */ + rte_ipsec_pkt_process(ips, ipsec, ipsec_num); + + /* Copy to current tr or a different tr */ + if (SATP_OUT_IPV4(satp) == match_flag) { + memcpy(&match->pkts[j], ipsec, + ipsec_num * sizeof(void *)); + j += ipsec_num; + } else { + memcpy(&mismatch->pkts[j_mis], ipsec, + ipsec_num * sizeof(void *)); + j_mis += ipsec_num; + } + } + match->num = j; + mismatch->num = j_mis; +} + +/* Poll mode worker when all SA's are of type inline protocol */ +void +ipsec_poll_mode_wrkr_inl_pr(void) +{ + const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) + / US_PER_S * BURST_TX_DRAIN_US; + struct sp_ctx *sp4_in, *sp6_in, *sp4_out, *sp6_out; + struct rte_mbuf *pkts[MAX_PKT_BURST]; + uint64_t prev_tsc, diff_tsc, cur_tsc; + struct ipsec_core_statistics *stats; + struct rt_ctx *rt4_ctx, *rt6_ctx; + struct sa_ctx *sa_in, *sa_out; + struct traffic_type ip4, ip6; + struct lcore_rx_queue *rxql; + struct rte_mbuf **v4, **v6; + struct ipsec_traffic trf; + struct lcore_conf *qconf; + uint16_t v4_num, v6_num; + int32_t socket_id; + uint32_t lcore_id; + int32_t i, nb_rx; + uint16_t portid; + uint8_t queueid; + + prev_tsc = 0; + lcore_id = rte_lcore_id(); + qconf = &lcore_conf[lcore_id]; + rxql = qconf->rx_queue_list; + socket_id = rte_lcore_to_socket_id(lcore_id); + stats = &core_statistics[lcore_id]; + + rt4_ctx = socket_ctx[socket_id].rt_ip4; + rt6_ctx = socket_ctx[socket_id].rt_ip6; + + sp4_in = socket_ctx[socket_id].sp_ip4_in; + sp6_in = socket_ctx[socket_id].sp_ip6_in; + sa_in = socket_ctx[socket_id].sa_in; + + sp4_out = socket_ctx[socket_id].sp_ip4_out; + sp6_out = socket_ctx[socket_id].sp_ip6_out; + sa_out = socket_ctx[socket_id].sa_out; + + qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir; + + if (qconf->nb_rx_queue == 0) { + RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n", + lcore_id); + return; + } + + RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id); + + for (i = 0; i < qconf->nb_rx_queue; i++) { + portid = rxql[i].port_id; + queueid = rxql[i].queue_id; + RTE_LOG(INFO, IPSEC, + " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", + lcore_id, portid, queueid); + } + + while (!force_quit) { + cur_tsc = rte_rdtsc(); + + /* TX queue buffer drain */ + diff_tsc = cur_tsc - prev_tsc; + + if (unlikely(diff_tsc > drain_tsc)) { + drain_tx_buffers(qconf); + prev_tsc = cur_tsc; + } + + for (i = 0; i < qconf->nb_rx_queue; ++i) { + /* Read packets from RX queues */ + portid = rxql[i].port_id; + queueid = rxql[i].queue_id; + nb_rx = rte_eth_rx_burst(portid, queueid, + pkts, MAX_PKT_BURST); + + if (nb_rx <= 0) + continue; + + core_stats_update_rx(nb_rx); + + prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx); + + /* Drop any IPsec traffic */ + free_pkts(trf.ipsec.pkts, trf.ipsec.num); + + if (is_unprotected_port(portid)) { + inbound_sp_sa(sp4_in, sa_in, &trf.ip4, + trf.ip4.num, + &stats->inbound.spd4); + + inbound_sp_sa(sp6_in, sa_in, &trf.ip6, + trf.ip6.num, + &stats->inbound.spd6); + + v4 = trf.ip4.pkts; + v4_num = trf.ip4.num; + v6 = trf.ip6.pkts; + v6_num = trf.ip6.num; + } else { + ip4.num = 0; + ip6.num = 0; + + outb_inl_pro_spd_process(sp4_out, sa_out, + &trf.ip4, &ip4, &ip6, + true, + &stats->outbound.spd4); + + outb_inl_pro_spd_process(sp6_out, sa_out, + &trf.ip6, &ip6, &ip4, + false, + &stats->outbound.spd6); + v4 = ip4.pkts; + v4_num = ip4.num; + v6 = ip6.pkts; + v6_num = ip6.num; + } + + route4_pkts(rt4_ctx, v4, v4_num, 0, false); + route6_pkts(rt6_ctx, v6, v6_num); + } + } +} + +/* Poll mode worker when all SA's are of type inline protocol + * and single sa mode is enabled. + */ +void +ipsec_poll_mode_wrkr_inl_pr_ss(void) +{ + const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) + / US_PER_S * BURST_TX_DRAIN_US; + struct rte_mbuf *pkts[MAX_PKT_BURST], *pkt; + uint64_t prev_tsc, diff_tsc, cur_tsc; + struct rte_ipsec_session *ips; + struct lcore_rx_queue *rxql; + struct lcore_conf *qconf; + struct ipsec_traffic trf; + struct sa_ctx *sa_out; + uint32_t i, nb_rx, j; + struct ipsec_sa *sa; + int32_t socket_id; + uint32_t lcore_id; + uint16_t portid; + uint8_t queueid; + + prev_tsc = 0; + lcore_id = rte_lcore_id(); + qconf = &lcore_conf[lcore_id]; + rxql = qconf->rx_queue_list; + socket_id = rte_lcore_to_socket_id(lcore_id); + + /* Get SA info */ + sa_out = socket_ctx[socket_id].sa_out; + sa = &sa_out->sa[single_sa_idx]; + ips = ipsec_get_primary_session(sa); + + qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir; + + if (qconf->nb_rx_queue == 0) { + RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n", + lcore_id); + return; + } + + RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id); + + for (i = 0; i < qconf->nb_rx_queue; i++) { + portid = rxql[i].port_id; + queueid = rxql[i].queue_id; + RTE_LOG(INFO, IPSEC, + " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", + lcore_id, portid, queueid); + } + + while (!force_quit) { + cur_tsc = rte_rdtsc(); + + /* TX queue buffer drain */ + diff_tsc = cur_tsc - prev_tsc; + + if (unlikely(diff_tsc > drain_tsc)) { + drain_tx_buffers(qconf); + prev_tsc = cur_tsc; + } + + for (i = 0; i < qconf->nb_rx_queue; ++i) { + /* Read packets from RX queues */ + portid = rxql[i].port_id; + queueid = rxql[i].queue_id; + nb_rx = rte_eth_rx_burst(portid, queueid, + pkts, MAX_PKT_BURST); + + if (nb_rx <= 0) + continue; + + core_stats_update_rx(nb_rx); + + if (is_unprotected_port(portid)) { + /* Nothing much to do for inbound inline + * decrypted traffic. + */ + for (j = 0; j < nb_rx; j++) { + uint32_t ptype, proto; + + pkt = pkts[j]; + ptype = pkt->packet_type & + RTE_PTYPE_L3_MASK; + if (ptype == RTE_PTYPE_L3_IPV4) + proto = IPPROTO_IP; + else + proto = IPPROTO_IPV6; + + send_single_packet(pkt, portid, proto); + } + + continue; + } + + /* Prepare packets for outbound */ + prepare_traffic(rxql[i].sec_ctx, pkts, &trf, nb_rx); + + /* Drop any IPsec traffic */ + free_pkts(trf.ipsec.pkts, trf.ipsec.num); + + rte_ipsec_pkt_process(ips, trf.ip4.pkts, + trf.ip4.num); + rte_ipsec_pkt_process(ips, trf.ip6.pkts, + trf.ip6.num); + portid = sa->portid; + + /* Send v4 pkts out */ + for (j = 0; j < trf.ip4.num; j++) { + pkt = trf.ip4.pkts[j]; + + rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN); + pkt->l2_len = RTE_ETHER_HDR_LEN; + send_single_packet(pkt, portid, IPPROTO_IP); + } + + /* Send v6 pkts out */ + for (j = 0; j < trf.ip6.num; j++) { + pkt = trf.ip6.pkts[j]; + + rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN); + pkt->l2_len = RTE_ETHER_HDR_LEN; + send_single_packet(pkt, portid, IPPROTO_IPV6); + } + } + } +} + +static void +ipsec_poll_mode_wrkr_launch(void) +{ + static ipsec_worker_fn_t poll_mode_wrkrs[MAX_F] = { + [INL_PR_F] = ipsec_poll_mode_wrkr_inl_pr, + [INL_PR_F | SS_F] = ipsec_poll_mode_wrkr_inl_pr_ss, + }; + ipsec_worker_fn_t fn; + + if (!app_sa_prm.enable) { + fn = ipsec_poll_mode_worker; + } else { + fn = poll_mode_wrkrs[wrkr_flags]; + + /* Always default to all mode worker */ + if (!fn) + fn = ipsec_poll_mode_worker; + } + + /* Launch worker */ + (*fn)(); +} + int ipsec_launch_one_lcore(void *args) { struct eh_conf *conf; @@ -1012,7 +1388,7 @@ int ipsec_launch_one_lcore(void *args) if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) { /* Run in poll mode */ - ipsec_poll_mode_worker(); + ipsec_poll_mode_wrkr_launch(); } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) { /* Run in event mode */ ipsec_eventmode_worker(conf); diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-secgw/ipsec_worker.h index b183248..a040d94 100644 --- a/examples/ipsec-secgw/ipsec_worker.h +++ b/examples/ipsec-secgw/ipsec_worker.h @@ -13,6 +13,8 @@ /* Configure how many packets ahead to prefetch, when reading packets */ #define PREFETCH_OFFSET 3 +#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ + enum pkt_type { PKT_TYPE_PLAIN_IPV4 = 1, PKT_TYPE_IPSEC_IPV4, @@ -42,6 +44,8 @@ struct lcore_conf_ev_tx_int_port_wrkr { } __rte_cache_aligned; void ipsec_poll_mode_worker(void); +void ipsec_poll_mode_wrkr_inl_pr(void); +void ipsec_poll_mode_wrkr_inl_pr_ss(void); int ipsec_launch_one_lcore(void *args); diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index 36d890f..db3d6bb 100644 --- a/examples/ipsec-secgw/sa.c +++ b/examples/ipsec-secgw/sa.c @@ -936,6 +936,15 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, ips->type = RTE_SECURITY_ACTION_TYPE_NONE; } + if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) + wrkr_flags |= INL_CR_F; + else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) + wrkr_flags |= INL_PR_F; + else if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) + wrkr_flags |= LA_PR_F; + else + wrkr_flags |= LA_ANY_F; + nb_crypto_sessions++; *ri = *ri + 1; }