From patchwork Wed Sep 25 04:54:07 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 59692 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 29A6A1DBD; Wed, 25 Sep 2019 06:56:00 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by dpdk.org (Postfix) with ESMTP id 141E21DBC for ; Wed, 25 Sep 2019 06:55:57 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id x8P4tsQS017269 for ; Tue, 24 Sep 2019 21:55:57 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : mime-version : content-type; s=pfpt0818; bh=UTOy65JmzixPI3YY0oBqIdM4xkCSuuVWVXz0e8g7Kx0=; b=d9G+5EG2VItXemjwpEo6bkTbyEHdzm1Ih0ekXUCjQNL/LfJhv6bivvh/2YdCZeqnP/gc 5zj0omcpylEnvVNUJNjdvpE7bgpLgYf3OBdOJHWy8qrzc1pUm3PXi1uaN1fTkIJoJ8bk x5RUVlnMNS+vYfoZYbeAZ384IgIlT+hTlfgVaCfaCutshr6KtEArBS3qCjB8Tb6RYUlE yYuObGJVaKTmxGnDk+npZVIgrs0NGeqTyI55FHiaWQdBdtOpLpoOOz6DETlMT/YxJg3x uNF2clPhF207jtxvvh4LUBQ7PnY6Y6UwwrNB6zLKQk/+umaoxFkikJ1wM0Iow83hvrWh xg== Received: from sc-exch04.marvell.com ([199.233.58.184]) by mx0b-0016f401.pphosted.com with ESMTP id 2v81mw800y-5 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Tue, 24 Sep 2019 21:55:57 -0700 Received: from SC-EXCH03.marvell.com (10.93.176.83) by SC-EXCH04.marvell.com (10.93.176.84) with Microsoft SMTP Server (TLS) id 15.0.1367.3; Tue, 24 Sep 2019 21:54:13 -0700 Received: from maili.marvell.com (10.93.176.43) by SC-EXCH03.marvell.com (10.93.176.83) with Microsoft SMTP Server id 15.0.1367.3 via Frontend Transport; Tue, 24 Sep 2019 21:54:13 -0700 Received: from hyd1588t430.marvell.com (unknown [10.29.52.204]) by maili.marvell.com (Postfix) with ESMTP id 46B5E3F7041; Tue, 24 Sep 2019 21:54:11 -0700 (PDT) From: Nithin Dabilpuram To: Jerin Jacob , Nithin Dabilpuram , Kiran Kumar K CC: Date: Wed, 25 Sep 2019 10:24:07 +0530 Message-ID: <20190925045408.131578-1-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.8.4 MIME-Version: 1.0 X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.95,1.0.8 definitions=2019-09-25_03:2019-09-23,2019-09-25 signatures=0 Subject: [dpdk-dev] [PATCH] net/octeontx2: add GRE TSO offload support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Extends existing TSO support to GRE tunnel on the same SoC revisions. Signed-off-by: Nithin Dabilpuram --- Note: This patchset depends on "net/octeontx2: add TSO offload support" i.e "http://patches.dpdk.org/patch/59691/" drivers/net/octeontx2/otx2_ethdev.c | 111 +++++++++++++++++++++++++++++++++++- drivers/net/octeontx2/otx2_ethdev.h | 1 + drivers/net/octeontx2/otx2_tx.h | 2 +- 3 files changed, 111 insertions(+), 3 deletions(-) diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c index c447f83..428351d 100644 --- a/drivers/net/octeontx2/otx2_ethdev.c +++ b/drivers/net/octeontx2/otx2_ethdev.c @@ -33,7 +33,8 @@ nix_get_tx_offload_capa(struct otx2_eth_dev *dev) if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev)) capa &= ~(DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO); + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO); return capa; } @@ -655,7 +656,8 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev) /* Enable Inner and Outer checksum for Tunnel TSO */ if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO)) flags |= (NIX_TX_OFFLOAD_TSO_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | NIX_TX_OFFLOAD_L3_L4_CSUM_F); @@ -1322,6 +1324,61 @@ nix_lso_udp_tun_tcp(struct nix_lso_format_cfg *req, field++; } +static void +nix_lso_tun_tcp(struct nix_lso_format_cfg *req, + bool outer_v4, bool inner_v4) +{ + volatile struct nix_lso_format *field; + + field = (volatile struct nix_lso_format *)&req->fields[0]; + req->field_mask = NIX_LSO_FIELD_MASK; + /* Outer IPv4/IPv6 len */ + field->layer = NIX_TXLAYER_OL3; + field->offset = outer_v4 ? 2 : 4; + field->sizem1 = 1; /* 2B */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + if (outer_v4) { + /* IPID */ + field->layer = NIX_TXLAYER_OL3; + field->offset = 4; + field->sizem1 = 1; + /* Incremented linearly per segment */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* Inner IPv4/IPv6 */ + field->layer = NIX_TXLAYER_IL3; + field->offset = inner_v4 ? 2 : 4; + field->sizem1 = 1; /* 2B */ + field->alg = NIX_LSOALG_ADD_PAYLEN; + field++; + if (inner_v4) { + /* IPID field */ + field->layer = NIX_TXLAYER_IL3; + field->offset = 4; + field->sizem1 = 1; + /* Incremented linearly per segment */ + field->alg = NIX_LSOALG_ADD_SEGNUM; + field++; + } + + /* TCP sequence number update */ + field->layer = NIX_TXLAYER_IL4; + field->offset = 4; + field->sizem1 = 3; /* 4 bytes */ + field->alg = NIX_LSOALG_ADD_OFFSET; + field++; + + /* TCP flags field */ + field->layer = NIX_TXLAYER_IL4; + field->offset = 12; + field->sizem1 = 1; + field->alg = NIX_LSOALG_TCP_FLAGS; + field++; +} + static int nix_setup_lso_formats(struct otx2_eth_dev *dev) { @@ -1414,6 +1471,56 @@ nix_setup_lso_formats(struct otx2_eth_dev *dev) return -EFAULT; otx2_nix_dbg("udp tun v6v6 fmt=%u\n", base + 5); + /* + * IPv4/TUN HDR/IPv4/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, true, true); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 6) + return -EFAULT; + otx2_nix_dbg("tun v4v4 fmt=%u\n", base + 6); + + /* + * IPv4/TUN HDR/IPv6/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, true, false); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 7) + return -EFAULT; + otx2_nix_dbg("tun v4v6 fmt=%u\n", base + 7); + + /* + * IPv6/TUN HDR/IPv4/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, false, true); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + if (rsp->lso_format_idx != base + 8) + return -EFAULT; + otx2_nix_dbg("tun v6v4 fmt=%u\n", base + 8); + + /* + * IPv6/TUN HDR/IPv6/TCP LSO + */ + req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox); + nix_lso_tun_tcp(req, false, false); + rc = otx2_mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + if (rsp->lso_format_idx != base + 9) + return -EFAULT; + otx2_nix_dbg("tun v6v6 fmt=%u\n", base + 9); return 0; } diff --git a/drivers/net/octeontx2/otx2_ethdev.h b/drivers/net/octeontx2/otx2_ethdev.h index 38f341e..0150baa 100644 --- a/drivers/net/octeontx2/otx2_ethdev.h +++ b/drivers/net/octeontx2/otx2_ethdev.h @@ -130,6 +130,7 @@ DEV_TX_OFFLOAD_TCP_TSO | \ DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \ DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \ + DEV_TX_OFFLOAD_GRE_TNL_TSO | \ DEV_TX_OFFLOAD_MULTI_SEGS | \ DEV_TX_OFFLOAD_IPV4_CKSUM) diff --git a/drivers/net/octeontx2/otx2_tx.h b/drivers/net/octeontx2/otx2_tx.h index e919198..04e859b 100644 --- a/drivers/net/octeontx2/otx2_tx.h +++ b/drivers/net/octeontx2/otx2_tx.h @@ -342,7 +342,7 @@ otx2_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags) w1.il4type = NIX_SENDL4TYPE_TCP_CKSUM; w1.ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0; /* Update format for UDP tunneled packet */ - send_hdr_ext->w0.lso_format += (is_udp_tun << 1); + send_hdr_ext->w0.lso_format += is_udp_tun ? 2 : 6; send_hdr_ext->w0.lso_format += !!(ol_flags & PKT_TX_OUTER_IPV6) << 1;