[v3,2/2] net/virtio: move VLAN tag insertion to Tx prepare

Message ID 1560680229-8166-2-git-send-email-arybchenko@solarflare.com (mailing list archive)
State Superseded, archived
Delegated to: Maxime Coquelin
Headers
Series [v3,1/2] net/virtio: add Tx preparation |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

Andrew Rybchenko June 16, 2019, 10:17 a.m. UTC
  From: Dilshod Urazov <Dilshod.Urazov@oktetlabs.ru>

VLAN tag insertion should be in Tx prepare, not in Tx burst functions.

Fixes: 4fb7e803eb1a ("ethdev: add Tx preparation")
Cc: stable@dpdk.org

Signed-off-by: Dilshod Urazov <Dilshod.Urazov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
---
 drivers/net/virtio/virtio_rxtx.c | 50 +++++++++-----------------------
 1 file changed, 14 insertions(+), 36 deletions(-)
  

Comments

Tiwei Bie June 17, 2019, 8:54 a.m. UTC | #1
On Sun, Jun 16, 2019 at 11:17:09AM +0100, Andrew Rybchenko wrote:
> From: Dilshod Urazov <Dilshod.Urazov@oktetlabs.ru>
> 
> VLAN tag insertion should be in Tx prepare, not in Tx burst functions.

Please add some details in the commit log.

Thanks,
Tiwei


> 
> Fixes: 4fb7e803eb1a ("ethdev: add Tx preparation")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Dilshod Urazov <Dilshod.Urazov@oktetlabs.ru>
> Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
> ---
>  drivers/net/virtio/virtio_rxtx.c | 50 +++++++++-----------------------
>  1 file changed, 14 insertions(+), 36 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 07f8f47de..dcce39e8c 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -1966,6 +1966,20 @@ virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
>  		}
>  #endif
>  
> +		/* Do VLAN tag insertion */
> +		if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
> +			error = rte_vlan_insert(&m);
> +			/* rte_vlan_insert() may change pointer
> +			 * even in the case of failure
> +			 */
> +			tx_pkts[nb_tx] = m;
> +
> +			if (unlikely(error)) {
> +				rte_errno = -error;
> +				break;
> +			}
> +		}
> +
>  		error = rte_net_intel_cksum_prepare(m);
>  		if (unlikely(error)) {
>  			rte_errno = -error;
> @@ -1989,7 +2003,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
>  	uint16_t hdr_size = hw->vtnet_hdr_size;
>  	uint16_t nb_tx = 0;
>  	bool in_order = hw->use_inorder_tx;
> -	int error;
>  
>  	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
>  		return nb_tx;
> @@ -2007,17 +2020,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
>  		struct rte_mbuf *txm = tx_pkts[nb_tx];
>  		int can_push = 0, slots, need;
>  
> -		/* Do VLAN tag insertion */
> -		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
> -			error = rte_vlan_insert(&txm);
> -			if (unlikely(error)) {
> -				rte_pktmbuf_free(txm);
> -				continue;
> -			}
> -			/* vlan_insert may add a header mbuf */
> -			tx_pkts[nb_tx] = txm;
> -		}
> -
>  		/* optimize ring usage */
>  		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>  		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
> @@ -2077,7 +2079,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>  	struct virtio_hw *hw = vq->hw;
>  	uint16_t hdr_size = hw->vtnet_hdr_size;
>  	uint16_t nb_used, nb_tx = 0;
> -	int error;
>  
>  	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
>  		return nb_tx;
> @@ -2096,17 +2097,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>  		struct rte_mbuf *txm = tx_pkts[nb_tx];
>  		int can_push = 0, use_indirect = 0, slots, need;
>  
> -		/* Do VLAN tag insertion */
> -		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
> -			error = rte_vlan_insert(&txm);
> -			if (unlikely(error)) {
> -				rte_pktmbuf_free(txm);
> -				continue;
> -			}
> -			/* vlan_insert may add a header mbuf */
> -			tx_pkts[nb_tx] = txm;
> -		}
> -
>  		/* optimize ring usage */
>  		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>  		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
> @@ -2176,7 +2166,6 @@ virtio_xmit_pkts_inorder(void *tx_queue,
>  	uint16_t hdr_size = hw->vtnet_hdr_size;
>  	uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
>  	struct rte_mbuf *inorder_pkts[nb_pkts];
> -	int error;
>  
>  	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
>  		return nb_tx;
> @@ -2201,17 +2190,6 @@ virtio_xmit_pkts_inorder(void *tx_queue,
>  		struct rte_mbuf *txm = tx_pkts[nb_tx];
>  		int slots, need;
>  
> -		/* Do VLAN tag insertion */
> -		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
> -			error = rte_vlan_insert(&txm);
> -			if (unlikely(error)) {
> -				rte_pktmbuf_free(txm);
> -				continue;
> -			}
> -			/* vlan_insert may add a header mbuf */
> -			tx_pkts[nb_tx] = txm;
> -		}
> -
>  		/* optimize ring usage */
>  		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
>  		     vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
> -- 
> 2.17.1
>
  
Andrew Rybchenko June 17, 2019, 11:35 a.m. UTC | #2
On 6/17/19 11:54 AM, Tiwei Bie wrote:
> On Sun, Jun 16, 2019 at 11:17:09AM +0100, Andrew Rybchenko wrote:
>> From: Dilshod Urazov <Dilshod.Urazov@oktetlabs.ru>
>>
>> VLAN tag insertion should be in Tx prepare, not in Tx burst functions.
> Please add some details in the commit log.

Done, please, see v4.

Also, please, note that the following patch [1] fixes the problem with
Tx checksum offload after VLAN insertion.

Andrew.

[1] https://patches.dpdk.org/patch/54830/
  

Patch

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 07f8f47de..dcce39e8c 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -1966,6 +1966,20 @@  virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
 		}
 #endif
 
+		/* Do VLAN tag insertion */
+		if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
+			error = rte_vlan_insert(&m);
+			/* rte_vlan_insert() may change pointer
+			 * even in the case of failure
+			 */
+			tx_pkts[nb_tx] = m;
+
+			if (unlikely(error)) {
+				rte_errno = -error;
+				break;
+			}
+		}
+
 		error = rte_net_intel_cksum_prepare(m);
 		if (unlikely(error)) {
 			rte_errno = -error;
@@ -1989,7 +2003,6 @@  virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint16_t nb_tx = 0;
 	bool in_order = hw->use_inorder_tx;
-	int error;
 
 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
 		return nb_tx;
@@ -2007,17 +2020,6 @@  virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
 		struct rte_mbuf *txm = tx_pkts[nb_tx];
 		int can_push = 0, slots, need;
 
-		/* Do VLAN tag insertion */
-		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
-			error = rte_vlan_insert(&txm);
-			if (unlikely(error)) {
-				rte_pktmbuf_free(txm);
-				continue;
-			}
-			/* vlan_insert may add a header mbuf */
-			tx_pkts[nb_tx] = txm;
-		}
-
 		/* optimize ring usage */
 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
 		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
@@ -2077,7 +2079,6 @@  virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint16_t nb_used, nb_tx = 0;
-	int error;
 
 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
 		return nb_tx;
@@ -2096,17 +2097,6 @@  virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		struct rte_mbuf *txm = tx_pkts[nb_tx];
 		int can_push = 0, use_indirect = 0, slots, need;
 
-		/* Do VLAN tag insertion */
-		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
-			error = rte_vlan_insert(&txm);
-			if (unlikely(error)) {
-				rte_pktmbuf_free(txm);
-				continue;
-			}
-			/* vlan_insert may add a header mbuf */
-			tx_pkts[nb_tx] = txm;
-		}
-
 		/* optimize ring usage */
 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
 		      vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
@@ -2176,7 +2166,6 @@  virtio_xmit_pkts_inorder(void *tx_queue,
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
 	struct rte_mbuf *inorder_pkts[nb_pkts];
-	int error;
 
 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
 		return nb_tx;
@@ -2201,17 +2190,6 @@  virtio_xmit_pkts_inorder(void *tx_queue,
 		struct rte_mbuf *txm = tx_pkts[nb_tx];
 		int slots, need;
 
-		/* Do VLAN tag insertion */
-		if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
-			error = rte_vlan_insert(&txm);
-			if (unlikely(error)) {
-				rte_pktmbuf_free(txm);
-				continue;
-			}
-			/* vlan_insert may add a header mbuf */
-			tx_pkts[nb_tx] = txm;
-		}
-
 		/* optimize ring usage */
 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
 		     vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&