From patchwork Mon Jun 17 11:31:37 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Rybchenko X-Patchwork-Id: 54843 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 285D01BEF7; Mon, 17 Jun 2019 13:31:54 +0200 (CEST) Received: from dispatch1-us1.ppe-hosted.com (dispatch1-us1.ppe-hosted.com [148.163.129.52]) by dpdk.org (Postfix) with ESMTP id A3C5C1BEED; Mon, 17 Jun 2019 13:31:52 +0200 (CEST) X-Virus-Scanned: Proofpoint Essentials engine Received: from webmail.solarflare.com (webmail.solarflare.com [12.187.104.26]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-SHA384 (256/256 bits)) (No client certificate requested) by mx1-us4.ppe-hosted.com (PPE Hosted ESMTP Server) with ESMTPS id 31054BC007C; Mon, 17 Jun 2019 11:31:51 +0000 (UTC) Received: from ocex03.SolarFlarecom.com (10.20.40.36) by ocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server (TLS) id 15.0.1395.4; Mon, 17 Jun 2019 04:31:48 -0700 Received: from opal.uk.solarflarecom.com (10.17.10.1) by ocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server (TLS) id 15.0.1395.4 via Frontend Transport; Mon, 17 Jun 2019 04:31:48 -0700 Received: from ukv-loginhost.uk.solarflarecom.com (ukv-loginhost.uk.solarflarecom.com [10.17.10.39]) by opal.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id x5HBVkbN004388; Mon, 17 Jun 2019 12:31:46 +0100 Received: from ukv-loginhost.uk.solarflarecom.com (localhost [127.0.0.1]) by ukv-loginhost.uk.solarflarecom.com (Postfix) with ESMTP id C9C39161621; Mon, 17 Jun 2019 12:31:46 +0100 (BST) From: Andrew Rybchenko To: Maxime Coquelin , Tiwei Bie , Zhihong Wang CC: , Dilshod Urazov , Date: Mon, 17 Jun 2019 12:31:37 +0100 Message-ID: <1560771098-20375-1-git-send-email-arybchenko@solarflare.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1559587805-1637-1-git-send-email-arybchenko@solarflare.com> References: <1559587805-1637-1-git-send-email-arybchenko@solarflare.com> MIME-Version: 1.0 X-TM-AS-Product-Ver: SMEX-12.5.0.1300-8.5.1010-24684.005 X-TM-AS-Result: No-4.992800-4.000000-10 X-TMASE-MatchedRID: L6x78Rxx066jJwaIeQ4C0EdAWPMBu8kQNU8z+tFJHR2sBbMjHUED8LER MpuIU2oe4XHxYgeGSGV1F2UJ40Hu5gVQ1xLwjqxoHcQQBuf4ZFsmKH/Kj46+VZGhAvBSa2i/ZpV YVTTLTSbtyiiocwStrVO9V994jC9DrSKKfw/QqNV8nLwuiNHSSEtc8DbogbSE31GU/N5W5BA0NS C223LgVWvHBlNaXPoaZ0xJsWXSiHBUf9SbrSddM0f49ONH0RaSQG0p9pfNzlFFpeiWAzLHcalwB bEvrazy4vM1YF6AJbbCCfuIMF6xLSdET58jp62S1DvavNXTUQZsIZAEKcp0nYeS40if8Etiu9kf WLc8MfGNG40Qth15mJ8CTt7hRqCnClYdgMP73cigdWZkSOZdqqn5Ozza4nc5vU24pLfQFmdAgUh ku2yAbQNQf6dUs3KIKyog/1PG+0kxvqnnvSKWdw== X-TM-AS-User-Approved-Sender: No X-TM-AS-User-Blocked-Sender: No X-TMASE-Result: 10--4.992800-4.000000 X-TMASE-Version: SMEX-12.5.0.1300-8.5.1010-24684.005 X-MDID: 1560771111-THz0zxeOjxYH Subject: [dpdk-dev] [PATCH v4 1/2] net/virtio: add Tx preparation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Dilshod Urazov Virtio requires pseudo-header checksum in TCP/UDP checksum to do offload, but it was lost when Tx prepare is introduced. Also rte_validate_tx_offload() should be used to validate Tx offloads. Also it is incorrect to do virtio_tso_fix_cksum() after prepend to mbuf without taking prepended size into account, since layer 2/3/4 lengths provide incorrect offsets after prepend. Fixes: 4fb7e803eb1a ("ethdev: add Tx preparation") Cc: stable@dpdk.org Signed-off-by: Dilshod Urazov Signed-off-by: Andrew Rybchenko Reviewed-by: Tiwei Bie --- drivers/net/virtio/virtio_ethdev.c | 1 + drivers/net/virtio/virtio_ethdev.h | 3 +++ drivers/net/virtio/virtio_rxtx.c | 32 +++++++++++++++++++++++++++++- 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index c4570bbf8..97d3c293e 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -1473,6 +1473,7 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) { struct virtio_hw *hw = eth_dev->data->dev_private; + eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare; if (vtpci_packed_queue(hw)) { PMD_INIT_LOG(INFO, "virtio: using packed ring %s Tx path on port %u", diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index 45e96f32b..20d331795 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -89,6 +89,9 @@ uint16_t virtio_recv_mergeable_pkts_packed(void *rx_queue, uint16_t virtio_recv_pkts_inorder(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t virtio_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 1f1178467..07f8f47de 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -559,7 +559,6 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr, /* TCP Segmentation Offload */ if (cookie->ol_flags & PKT_TX_TCP_SEG) { - virtio_tso_fix_cksum(cookie); hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ? VIRTIO_NET_HDR_GSO_TCPV6 : VIRTIO_NET_HDR_GSO_TCPV4; @@ -1949,6 +1948,37 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue, return nb_rx; } +uint16_t +virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx; + int error; + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + struct rte_mbuf *m = tx_pkts[nb_tx]; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + error = rte_validate_tx_offload(m); + if (unlikely(error)) { + rte_errno = -error; + break; + } +#endif + + error = rte_net_intel_cksum_prepare(m); + if (unlikely(error)) { + rte_errno = -error; + break; + } + + if (m->ol_flags & PKT_TX_TCP_SEG) + virtio_tso_fix_cksum(m); + } + + return nb_tx; +} + uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) From patchwork Mon Jun 17 11:31:38 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Rybchenko X-Patchwork-Id: 54844 X-Patchwork-Delegate: maxime.coquelin@redhat.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4C7141BF0E; Mon, 17 Jun 2019 13:31:57 +0200 (CEST) Received: from dispatch1-us1.ppe-hosted.com (dispatch1-us1.ppe-hosted.com [148.163.129.52]) by dpdk.org (Postfix) with ESMTP id D23C51BEF7; Mon, 17 Jun 2019 13:31:52 +0200 (CEST) X-Virus-Scanned: Proofpoint Essentials engine Received: from webmail.solarflare.com (webmail.solarflare.com [12.187.104.26]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-SHA384 (256/256 bits)) (No client certificate requested) by mx1-us4.ppe-hosted.com (PPE Hosted ESMTP Server) with ESMTPS id 64922BC005A; Mon, 17 Jun 2019 11:31:51 +0000 (UTC) Received: from ocex03.SolarFlarecom.com (10.20.40.36) by ocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server (TLS) id 15.0.1395.4; Mon, 17 Jun 2019 04:31:48 -0700 Received: from opal.uk.solarflarecom.com (10.17.10.1) by ocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server (TLS) id 15.0.1395.4 via Frontend Transport; Mon, 17 Jun 2019 04:31:48 -0700 Received: from ukv-loginhost.uk.solarflarecom.com (ukv-loginhost.uk.solarflarecom.com [10.17.10.39]) by opal.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id x5HBVk0h004390; Mon, 17 Jun 2019 12:31:46 +0100 Received: from ukv-loginhost.uk.solarflarecom.com (localhost [127.0.0.1]) by ukv-loginhost.uk.solarflarecom.com (Postfix) with ESMTP id D13071627D8; Mon, 17 Jun 2019 12:31:46 +0100 (BST) From: Andrew Rybchenko To: Maxime Coquelin , Tiwei Bie , Zhihong Wang CC: , Dilshod Urazov , Date: Mon, 17 Jun 2019 12:31:38 +0100 Message-ID: <1560771098-20375-2-git-send-email-arybchenko@solarflare.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1560771098-20375-1-git-send-email-arybchenko@solarflare.com> References: <1559587805-1637-1-git-send-email-arybchenko@solarflare.com> <1560771098-20375-1-git-send-email-arybchenko@solarflare.com> MIME-Version: 1.0 X-TM-AS-Product-Ver: SMEX-12.5.0.1300-8.5.1010-24684.005 X-TM-AS-Result: No-7.290200-4.000000-10 X-TMASE-MatchedRID: SkQQjMzZ0Z08nLmw6Z2q2khwlOfYeSqx+ahnrHhmAJRUxSkAXu9eJ6HD MThyZnbzPI8SZp0SyHM79hHeUc9mPKKgpS1QMZdluwdUMMznEA9LXPA26IG0hN9RlPzeVuQQ8oM fiEarrJC9mrU7f3UmCBQAmj5Up3IVjhDOdWflxbriNGQgiadfQ/ngX/aL8PCNp/hnt7fvY9OCFd uArmI+dLgEDngN0nGItdc8f3nHVh99+z+hLcW+n54CIKY/Hg3AGdQnQSTrKGPEQdG7H66TyMdRT 5TQAJnAiCt3Z9awlKsZgzWKoeWi3ah8lFA0v3kZOaeCDlNLP7OeqD9WtJkSIw== X-TM-AS-User-Approved-Sender: No X-TM-AS-User-Blocked-Sender: No X-TMASE-Result: 10--7.290200-4.000000 X-TMASE-Version: SMEX-12.5.0.1300-8.5.1010-24684.005 X-MDID: 1560771112-weO1pAsP5IlS Subject: [dpdk-dev] [PATCH v4 2/2] net/virtio: move VLAN tag insertion to Tx prepare X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Dilshod Urazov VLAN tag insertion should be in Tx prepare, not in Tx burst functions. One of Tx prepare goals is to be able to do preparations in advance (possibliy on different CPU core) and then transmit it fast. Also Tx prepare can report that a packet does not pass Tx offloads check. E.g. has no enough headroom to insert VLAN header. Fixes: 4fb7e803eb1a ("ethdev: add Tx preparation") Cc: stable@dpdk.org Signed-off-by: Dilshod Urazov Signed-off-by: Andrew Rybchenko Reviewed-by: Tiwei Bie --- v4: - add more details to commit log drivers/net/virtio/virtio_rxtx.c | 50 +++++++++----------------------- 1 file changed, 14 insertions(+), 36 deletions(-) diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 07f8f47de..dcce39e8c 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -1966,6 +1966,20 @@ virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, } #endif + /* Do VLAN tag insertion */ + if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) { + error = rte_vlan_insert(&m); + /* rte_vlan_insert() may change pointer + * even in the case of failure + */ + tx_pkts[nb_tx] = m; + + if (unlikely(error)) { + rte_errno = -error; + break; + } + } + error = rte_net_intel_cksum_prepare(m); if (unlikely(error)) { rte_errno = -error; @@ -1989,7 +2003,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t hdr_size = hw->vtnet_hdr_size; uint16_t nb_tx = 0; bool in_order = hw->use_inorder_tx; - int error; if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) return nb_tx; @@ -2007,17 +2020,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, struct rte_mbuf *txm = tx_pkts[nb_tx]; int can_push = 0, slots, need; - /* Do VLAN tag insertion */ - if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { - error = rte_vlan_insert(&txm); - if (unlikely(error)) { - rte_pktmbuf_free(txm); - continue; - } - /* vlan_insert may add a header mbuf */ - tx_pkts[nb_tx] = txm; - } - /* optimize ring usage */ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && @@ -2077,7 +2079,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct virtio_hw *hw = vq->hw; uint16_t hdr_size = hw->vtnet_hdr_size; uint16_t nb_used, nb_tx = 0; - int error; if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) return nb_tx; @@ -2096,17 +2097,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct rte_mbuf *txm = tx_pkts[nb_tx]; int can_push = 0, use_indirect = 0, slots, need; - /* Do VLAN tag insertion */ - if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { - error = rte_vlan_insert(&txm); - if (unlikely(error)) { - rte_pktmbuf_free(txm); - continue; - } - /* vlan_insert may add a header mbuf */ - tx_pkts[nb_tx] = txm; - } - /* optimize ring usage */ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && @@ -2176,7 +2166,6 @@ virtio_xmit_pkts_inorder(void *tx_queue, uint16_t hdr_size = hw->vtnet_hdr_size; uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0; struct rte_mbuf *inorder_pkts[nb_pkts]; - int error; if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) return nb_tx; @@ -2201,17 +2190,6 @@ virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf *txm = tx_pkts[nb_tx]; int slots, need; - /* Do VLAN tag insertion */ - if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { - error = rte_vlan_insert(&txm); - if (unlikely(error)) { - rte_pktmbuf_free(txm); - continue; - } - /* vlan_insert may add a header mbuf */ - tx_pkts[nb_tx] = txm; - } - /* optimize ring usage */ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&