[dpdk-dev,RFC,1/6] mbuf: update mbuf structure for QinQ support

Message ID 1430793143-3610-2-git-send-email-helin.zhang@intel.com (mailing list archive)
State Superseded, archived
Headers

Commit Message

Zhang, Helin May 5, 2015, 2:32 a.m. UTC
  To support QinQ, 'vlan_tci' should be replaced by 'vlan_tci0' and
'vlan_tci1'. Also new offload flags of 'PKT_RX_QINQ_PKT' and
'PKT_TX_QINQ_PKT' should be added.

Signed-off-by: Helin Zhang <helin.zhang@intel.com>
---
 app/test-pmd/flowgen.c                |  2 +-
 app/test-pmd/macfwd.c                 |  2 +-
 app/test-pmd/macswap.c                |  2 +-
 app/test-pmd/rxonly.c                 |  2 +-
 app/test-pmd/txonly.c                 |  2 +-
 app/test/packet_burst_generator.c     |  4 ++--
 lib/librte_ether/rte_ether.h          |  4 ++--
 lib/librte_mbuf/rte_mbuf.h            | 22 +++++++++++++++++++---
 lib/librte_pmd_e1000/em_rxtx.c        |  8 ++++----
 lib/librte_pmd_e1000/igb_rxtx.c       |  8 ++++----
 lib/librte_pmd_enic/enic_ethdev.c     |  2 +-
 lib/librte_pmd_enic/enic_main.c       |  2 +-
 lib/librte_pmd_fm10k/fm10k_rxtx.c     |  2 +-
 lib/librte_pmd_i40e/i40e_rxtx.c       |  8 ++++----
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c     | 11 +++++------
 lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c |  6 +++---
 16 files changed, 51 insertions(+), 36 deletions(-)
  

Comments

Ananyev, Konstantin May 5, 2015, 11:04 a.m. UTC | #1
Hi Helin,

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Helin Zhang
> Sent: Tuesday, May 05, 2015 3:32 AM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure for QinQ support
> 
> To support QinQ, 'vlan_tci' should be replaced by 'vlan_tci0' and
> 'vlan_tci1'. Also new offload flags of 'PKT_RX_QINQ_PKT' and
> 'PKT_TX_QINQ_PKT' should be added.
> 
> Signed-off-by: Helin Zhang <helin.zhang@intel.com>
> ---
>  app/test-pmd/flowgen.c                |  2 +-
>  app/test-pmd/macfwd.c                 |  2 +-
>  app/test-pmd/macswap.c                |  2 +-
>  app/test-pmd/rxonly.c                 |  2 +-
>  app/test-pmd/txonly.c                 |  2 +-
>  app/test/packet_burst_generator.c     |  4 ++--
>  lib/librte_ether/rte_ether.h          |  4 ++--
>  lib/librte_mbuf/rte_mbuf.h            | 22 +++++++++++++++++++---
>  lib/librte_pmd_e1000/em_rxtx.c        |  8 ++++----
>  lib/librte_pmd_e1000/igb_rxtx.c       |  8 ++++----
>  lib/librte_pmd_enic/enic_ethdev.c     |  2 +-
>  lib/librte_pmd_enic/enic_main.c       |  2 +-
>  lib/librte_pmd_fm10k/fm10k_rxtx.c     |  2 +-
>  lib/librte_pmd_i40e/i40e_rxtx.c       |  8 ++++----
>  lib/librte_pmd_ixgbe/ixgbe_rxtx.c     | 11 +++++------
>  lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c |  6 +++---
>  16 files changed, 51 insertions(+), 36 deletions(-)
> 
> diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
> index 72016c9..f24b00c 100644
> --- a/app/test-pmd/flowgen.c
> +++ b/app/test-pmd/flowgen.c
> @@ -207,7 +207,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
>  		pkt->nb_segs		= 1;
>  		pkt->pkt_len		= pkt_size;
>  		pkt->ol_flags		= ol_flags;
> -		pkt->vlan_tci		= vlan_tci;
> +		pkt->vlan_tci0		= vlan_tci;
>  		pkt->l2_len		= sizeof(struct ether_hdr);
>  		pkt->l3_len		= sizeof(struct ipv4_hdr);
>  		pkts_burst[nb_pkt]	= pkt;
> diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
> index 035e5eb..590b613 100644
> --- a/app/test-pmd/macfwd.c
> +++ b/app/test-pmd/macfwd.c
> @@ -120,7 +120,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
>  		mb->ol_flags = ol_flags;
>  		mb->l2_len = sizeof(struct ether_hdr);
>  		mb->l3_len = sizeof(struct ipv4_hdr);
> -		mb->vlan_tci = txp->tx_vlan_id;
> +		mb->vlan_tci0 = txp->tx_vlan_id;
>  	}
>  	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
>  	fs->tx_packets += nb_tx;
> diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
> index 6729849..c355399 100644
> --- a/app/test-pmd/macswap.c
> +++ b/app/test-pmd/macswap.c
> @@ -122,7 +122,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
>  		mb->ol_flags = ol_flags;
>  		mb->l2_len = sizeof(struct ether_hdr);
>  		mb->l3_len = sizeof(struct ipv4_hdr);
> -		mb->vlan_tci = txp->tx_vlan_id;
> +		mb->vlan_tci0 = txp->tx_vlan_id;
>  	}
>  	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
>  	fs->tx_packets += nb_tx;
> diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
> index ac56090..aa2cf7f 100644
> --- a/app/test-pmd/rxonly.c
> +++ b/app/test-pmd/rxonly.c
> @@ -159,7 +159,7 @@ pkt_burst_receive(struct fwd_stream *fs)
>  				       mb->hash.fdir.hash, mb->hash.fdir.id);
>  		}
>  		if (ol_flags & PKT_RX_VLAN_PKT)
> -			printf(" - VLAN tci=0x%x", mb->vlan_tci);
> +			printf(" - VLAN tci=0x%x", mb->vlan_tci0);
>  		if (is_encapsulation) {
>  			struct ipv4_hdr *ipv4_hdr;
>  			struct ipv6_hdr *ipv6_hdr;
> diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
> index ca32c85..4a2827f 100644
> --- a/app/test-pmd/txonly.c
> +++ b/app/test-pmd/txonly.c
> @@ -266,7 +266,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
>  		pkt->nb_segs = tx_pkt_nb_segs;
>  		pkt->pkt_len = tx_pkt_length;
>  		pkt->ol_flags = ol_flags;
> -		pkt->vlan_tci  = vlan_tci;
> +		pkt->vlan_tci0  = vlan_tci;
>  		pkt->l2_len = sizeof(struct ether_hdr);
>  		pkt->l3_len = sizeof(struct ipv4_hdr);
>  		pkts_burst[nb_pkt] = pkt;
> diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
> index b46eed7..959644c 100644
> --- a/app/test/packet_burst_generator.c
> +++ b/app/test/packet_burst_generator.c
> @@ -270,7 +270,7 @@ nomore_mbuf:
>  		pkt->l2_len = eth_hdr_size;
> 
>  		if (ipv4) {
> -			pkt->vlan_tci  = ETHER_TYPE_IPv4;
> +			pkt->vlan_tci0  = ETHER_TYPE_IPv4;
>  			pkt->l3_len = sizeof(struct ipv4_hdr);
> 
>  			if (vlan_enabled)
> @@ -278,7 +278,7 @@ nomore_mbuf:
>  			else
>  				pkt->ol_flags = PKT_RX_IPV4_HDR;
>  		} else {
> -			pkt->vlan_tci  = ETHER_TYPE_IPv6;
> +			pkt->vlan_tci0  = ETHER_TYPE_IPv6;
>  			pkt->l3_len = sizeof(struct ipv6_hdr);
> 
>  			if (vlan_enabled)
> diff --git a/lib/librte_ether/rte_ether.h b/lib/librte_ether/rte_ether.h
> index 49f4576..6d682a2 100644
> --- a/lib/librte_ether/rte_ether.h
> +++ b/lib/librte_ether/rte_ether.h
> @@ -357,7 +357,7 @@ static inline int rte_vlan_strip(struct rte_mbuf *m)
> 
>  	struct vlan_hdr *vh = (struct vlan_hdr *)(eh + 1);
>  	m->ol_flags |= PKT_RX_VLAN_PKT;
> -	m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci);
> +	m->vlan_tci0 = rte_be_to_cpu_16(vh->vlan_tci);
> 
>  	/* Copy ether header over rather than moving whole packet */
>  	memmove(rte_pktmbuf_adj(m, sizeof(struct vlan_hdr)),
> @@ -404,7 +404,7 @@ static inline int rte_vlan_insert(struct rte_mbuf **m)
>  	nh->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
> 
>  	vh = (struct vlan_hdr *) (nh + 1);
> -	vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci);
> +	vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci0);
> 
>  	return 0;
>  }
> diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
> index 70b0987..6eed54f 100644
> --- a/lib/librte_mbuf/rte_mbuf.h
> +++ b/lib/librte_mbuf/rte_mbuf.h
> @@ -101,11 +101,17 @@ extern "C" {
>  #define PKT_RX_TUNNEL_IPV6_HDR (1ULL << 12) /**< RX tunnel packet with IPv6 header. */
>  #define PKT_RX_FDIR_ID       (1ULL << 13) /**< FD id reported if FDIR match. */
>  #define PKT_RX_FDIR_FLX      (1ULL << 14) /**< Flexible bytes reported if FDIR match. */
> +#define PKT_RX_QINQ_PKT     (1ULL << 15)  /**< RX packet with double VLAN stripped. */
>  /* add new RX flags here */
> 
>  /* add new TX flags here */
> 
>  /**
> + * Second VLAN insertion (QinQ) flag.
> + */
> +#define PKT_TX_QINQ_PKT    (1ULL << 49)
> +
> +/**
>   * TCP segmentation offload. To enable this offload feature for a
>   * packet to be transmitted on hardware supporting TSO:
>   *  - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies
> @@ -268,7 +274,6 @@ struct rte_mbuf {
> 
>  	uint16_t data_len;        /**< Amount of data in segment buffer. */
>  	uint32_t pkt_len;         /**< Total pkt len: sum of all segments. */
> -	uint16_t vlan_tci;        /**< VLAN Tag Control Identifier (CPU order) */
>  	uint16_t reserved;

Now here is an implicit 2-bytes whole between 'reserved' and 'rss'.
Probably better to make it explicit - make 'reserved' uint32_t.

Another thing - it looks like your change will break  ixgbe vector RX.

>  	union {
>  		uint32_t rss;     /**< RSS hash result if RSS enabled */
> @@ -289,6 +294,15 @@ struct rte_mbuf {
>  		uint32_t usr;	  /**< User defined tags. See rte_distributor_process() */
>  	} hash;                   /**< hash information */
> 
> +	/* VLAN tags */
> +	union {
> +		uint32_t vlan_tags;
> +		struct {
> +			uint16_t vlan_tci0;
> +			uint16_t vlan_tci1;

Do you really need to change vlan_tci to vlan_tci0?
Can't you keep 'vlan_tci' for first vlan tag, and add something like 'vlan_tci_ext', or 'vlan_tci_next' for second one?
Would save you a lot of changes, again users who use single vlan wouldn't need to update their code for 2.1.

> +		};
> +	};
> +
>  	uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */
> 
>  	/* second cache line - fields only used in slow path or on TX */
> @@ -766,7 +780,8 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
>  	m->next = NULL;
>  	m->pkt_len = 0;
>  	m->tx_offload = 0;
> -	m->vlan_tci = 0;
> +	m->vlan_tci0 = 0;
> +	m->vlan_tci1 = 0;

Why just not:
m-> vlan_tags = 0;
?

>  	m->nb_segs = 1;
>  	m->port = 0xff;
> 
> @@ -838,7 +853,8 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
>  	mi->data_off = m->data_off;
>  	mi->data_len = m->data_len;
>  	mi->port = m->port;
> -	mi->vlan_tci = m->vlan_tci;
> +	mi->vlan_tci0 = m->vlan_tci0;
> +	mi->vlan_tci1 = m->vlan_tci1;

Same thing, why not:
mi-> vlan_tags = m-> vlan_tags;
?

>  	mi->tx_offload = m->tx_offload;
>  	mi->hash = m->hash;
> 
> diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c
> index 64d067c..422f4ed 100644
> --- a/lib/librte_pmd_e1000/em_rxtx.c
> +++ b/lib/librte_pmd_e1000/em_rxtx.c
> @@ -438,7 +438,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  		/* If hardware offload required */
>  		tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK));
>  		if (tx_ol_req) {
> -			hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
> +			hdrlen.f.vlan_tci = tx_pkt->vlan_tci0;
>  			hdrlen.f.l2_len = tx_pkt->l2_len;
>  			hdrlen.f.l3_len = tx_pkt->l3_len;
>  			/* If new context to be built or reuse the exist ctx. */
> @@ -534,7 +534,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  		/* Set VLAN Tag offload fields. */
>  		if (ol_flags & PKT_TX_VLAN_PKT) {
>  			cmd_type_len |= E1000_TXD_CMD_VLE;
> -			popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
> +			popts_spec = tx_pkt->vlan_tci0 << E1000_TXD_VLAN_SHIFT;
>  		}
> 
>  		if (tx_ol_req) {
> @@ -800,7 +800,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>  				rx_desc_error_to_pkt_flags(rxd.errors);
> 
>  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
> +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);
> 
>  		/*
>  		 * Store the mbuf address into the next entry of the array
> @@ -1026,7 +1026,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>  					rx_desc_error_to_pkt_flags(rxd.errors);
> 
>  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
> +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);
> 
>  		/* Prefetch data of first segment, if configured to do so. */
>  		rte_packet_prefetch((char *)first_seg->buf_addr +
> diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
> index 80d05c0..fda273f 100644
> --- a/lib/librte_pmd_e1000/igb_rxtx.c
> +++ b/lib/librte_pmd_e1000/igb_rxtx.c
> @@ -401,7 +401,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  		ol_flags = tx_pkt->ol_flags;
>  		l2_l3_len.l2_len = tx_pkt->l2_len;
>  		l2_l3_len.l3_len = tx_pkt->l3_len;
> -		vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
> +		vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci0;
>  		vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
>  		tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
> 
> @@ -784,7 +784,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>  		rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
>  		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
>  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> 
>  		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
>  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> @@ -1015,10 +1015,10 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>  		first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
> 
>  		/*
> -		 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
> +		 * The vlan_tci0 field is only valid when PKT_RX_VLAN_PKT is
>  		 * set in the pkt_flags field.
>  		 */
> -		first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> +		first_seg->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
>  		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
>  		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
>  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> diff --git a/lib/librte_pmd_enic/enic_ethdev.c b/lib/librte_pmd_enic/enic_ethdev.c
> index 69ad01b..45c0e14 100644
> --- a/lib/librte_pmd_enic/enic_ethdev.c
> +++ b/lib/librte_pmd_enic/enic_ethdev.c
> @@ -506,7 +506,7 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  				return index;
>  		}
>  		pkt_len = tx_pkt->pkt_len;
> -		vlan_id = tx_pkt->vlan_tci;
> +		vlan_id = tx_pkt->vlan_tci0;
>  		ol_flags = tx_pkt->ol_flags;
>  		for (frags = 0; inc_len < pkt_len; frags++) {
>  			if (!tx_pkt)
> diff --git a/lib/librte_pmd_enic/enic_main.c b/lib/librte_pmd_enic/enic_main.c
> index 15313c2..d1660a1 100644
> --- a/lib/librte_pmd_enic/enic_main.c
> +++ b/lib/librte_pmd_enic/enic_main.c
> @@ -490,7 +490,7 @@ static int enic_rq_indicate_buf(struct vnic_rq *rq,
> 
>  	if (vlan_tci) {
>  		rx_pkt->ol_flags |= PKT_RX_VLAN_PKT;
> -		rx_pkt->vlan_tci = vlan_tci;
> +		rx_pkt->vlan_tci0 = vlan_tci;
>  	}
> 
>  	return eop;
> diff --git a/lib/librte_pmd_fm10k/fm10k_rxtx.c b/lib/librte_pmd_fm10k/fm10k_rxtx.c
> index 83bddfc..ba3b8aa 100644
> --- a/lib/librte_pmd_fm10k/fm10k_rxtx.c
> +++ b/lib/librte_pmd_fm10k/fm10k_rxtx.c
> @@ -410,7 +410,7 @@ static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
> 
>  	/* set vlan if requested */
>  	if (mb->ol_flags & PKT_TX_VLAN_PKT)
> -		q->hw_ring[q->next_free].vlan = mb->vlan_tci;
> +		q->hw_ring[q->next_free].vlan = mb->vlan_tci0;
> 
>  	/* fill up the rings */
>  	for (; mb != NULL; mb = mb->next) {
> diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
> index 493cfa3..1fe377c 100644
> --- a/lib/librte_pmd_i40e/i40e_rxtx.c
> +++ b/lib/librte_pmd_i40e/i40e_rxtx.c
> @@ -703,7 +703,7 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
>  				I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
>  			mb->data_len = pkt_len;
>  			mb->pkt_len = pkt_len;
> -			mb->vlan_tci = rx_status &
> +			mb->vlan_tci0 = rx_status &
>  				(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
>  			rte_le_to_cpu_16(\
>  				rxdp[j].wb.qword0.lo_dword.l2tag1) : 0;
> @@ -947,7 +947,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
>  		rxm->data_len = rx_packet_len;
>  		rxm->port = rxq->port_id;
> 
> -		rxm->vlan_tci = rx_status &
> +		rxm->vlan_tci0 = rx_status &
>  			(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
>  			rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
>  		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
> @@ -1106,7 +1106,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
>  		}
> 
>  		first_seg->port = rxq->port_id;
> -		first_seg->vlan_tci = (rx_status &
> +		first_seg->vlan_tci0 = (rx_status &
>  			(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
>  			rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
>  		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
> @@ -1291,7 +1291,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> 
>  		/* Descriptor based VLAN insertion */
>  		if (ol_flags & PKT_TX_VLAN_PKT) {
> -			tx_flags |= tx_pkt->vlan_tci <<
> +			tx_flags |= tx_pkt->vlan_tci0 <<
>  					I40E_TX_FLAG_L2TAG1_SHIFT;
>  			tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
>  			td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> index 7f15f15..fd664da 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> @@ -612,7 +612,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  			tx_offload.l2_len = tx_pkt->l2_len;
>  			tx_offload.l3_len = tx_pkt->l3_len;
>  			tx_offload.l4_len = tx_pkt->l4_len;
> -			tx_offload.vlan_tci = tx_pkt->vlan_tci;
> +			tx_offload.vlan_tci = tx_pkt->vlan_tci0;
>  			tx_offload.tso_segsz = tx_pkt->tso_segsz;
> 
>  			/* If new context need be built or reuse the exist ctx. */
> @@ -981,8 +981,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
>  			pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len);
>  			mb->data_len = pkt_len;
>  			mb->pkt_len = pkt_len;
> -			mb->vlan_tci = rxdp[j].wb.upper.vlan;
> -			mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
> +			mb->vlan_tci0 = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
> 
>  			/* convert descriptor fields to rte mbuf flags */
>  			pkt_flags  = rx_desc_hlen_type_rss_to_pkt_flags(
> @@ -1327,7 +1326,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> 
>  		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
>  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> 
>  		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
>  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> @@ -1412,10 +1411,10 @@ ixgbe_fill_cluster_head_buf(
>  	head->port = port_id;
> 
>  	/*
> -	 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
> +	 * The vlan_tci0 field is only valid when PKT_RX_VLAN_PKT is
>  	 * set in the pkt_flags field.
>  	 */
> -	head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
> +	head->vlan_tci0 = rte_le_to_cpu_16(desc->wb.upper.vlan);
>  	hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
>  	pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
>  	pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
> diff --git a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> index d8019f5..57a33c9 100644
> --- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> +++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> @@ -405,7 +405,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>  			/* Add VLAN tag if requested */
>  			if (txm->ol_flags & PKT_TX_VLAN_PKT) {
>  				txd->ti = 1;
> -				txd->tci = rte_cpu_to_le_16(txm->vlan_tci);
> +				txd->tci = rte_cpu_to_le_16(txm->vlan_tci0);
>  			}
> 
>  			/* Record current mbuf for freeing it later in tx complete */
> @@ -629,10 +629,10 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
>  				   rcd->tci);
>  			rxm->ol_flags = PKT_RX_VLAN_PKT;
>  			/* Copy vlan tag in packet buffer */
> -			rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
> +			rxm->vlan_tci0 = rte_le_to_cpu_16((uint16_t)rcd->tci);
>  		} else {
>  			rxm->ol_flags = 0;
> -			rxm->vlan_tci = 0;
> +			rxm->vlan_tci0 = 0;
>  		}
> 
>  		/* Initialize newly received packet buffer */
> --
> 1.9.3
  
Chilikin, Andrey May 5, 2015, 3:42 p.m. UTC | #2
Hi Helin,

I would agree with Konstantin about new naming for VLAN tags. I think we can leave existing name for t vlan_tci and just name new VLAN tag differently. I was thinking in the line of "vlan_tci_outer" or "stag_tci". So vlan_tci will store single VLAN in case if only one L2 tag is present or will store inner VLAN in case of two tags. "vlan_tci_outer" will store outer VLAN when two L2 tags are present. "stag_tci" name also looks like a good candidate as in most cases if two tags are presented then outer VLAN is addressed as S-Tag even if it is simple tag stacking.

Regards,
Andrey

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ananyev,
> Konstantin
> Sent: Tuesday, May 5, 2015 12:05 PM
> To: Zhang, Helin; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure for
> QinQ support
> 
> Hi Helin,
> 
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Helin Zhang
> > Sent: Tuesday, May 05, 2015 3:32 AM
> > To: dev@dpdk.org
> > Subject: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure for
> > QinQ support
> >
> > To support QinQ, 'vlan_tci' should be replaced by 'vlan_tci0' and
> > 'vlan_tci1'. Also new offload flags of 'PKT_RX_QINQ_PKT' and
> > 'PKT_TX_QINQ_PKT' should be added.
> >
> > Signed-off-by: Helin Zhang <helin.zhang@intel.com>
> > ---
> >  app/test-pmd/flowgen.c                |  2 +-
> >  app/test-pmd/macfwd.c                 |  2 +-
> >  app/test-pmd/macswap.c                |  2 +-
> >  app/test-pmd/rxonly.c                 |  2 +-
> >  app/test-pmd/txonly.c                 |  2 +-
> >  app/test/packet_burst_generator.c     |  4 ++--
> >  lib/librte_ether/rte_ether.h          |  4 ++--
> >  lib/librte_mbuf/rte_mbuf.h            | 22 +++++++++++++++++++---
> >  lib/librte_pmd_e1000/em_rxtx.c        |  8 ++++----
> >  lib/librte_pmd_e1000/igb_rxtx.c       |  8 ++++----
> >  lib/librte_pmd_enic/enic_ethdev.c     |  2 +-
> >  lib/librte_pmd_enic/enic_main.c       |  2 +-
> >  lib/librte_pmd_fm10k/fm10k_rxtx.c     |  2 +-
> >  lib/librte_pmd_i40e/i40e_rxtx.c       |  8 ++++----
> >  lib/librte_pmd_ixgbe/ixgbe_rxtx.c     | 11 +++++------
> >  lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c |  6 +++---
> >  16 files changed, 51 insertions(+), 36 deletions(-)
> >
> > diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c index
> > 72016c9..f24b00c 100644
> > --- a/app/test-pmd/flowgen.c
> > +++ b/app/test-pmd/flowgen.c
> > @@ -207,7 +207,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
> >  		pkt->nb_segs		= 1;
> >  		pkt->pkt_len		= pkt_size;
> >  		pkt->ol_flags		= ol_flags;
> > -		pkt->vlan_tci		= vlan_tci;
> > +		pkt->vlan_tci0		= vlan_tci;
> >  		pkt->l2_len		= sizeof(struct ether_hdr);
> >  		pkt->l3_len		= sizeof(struct ipv4_hdr);
> >  		pkts_burst[nb_pkt]	= pkt;
> > diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c index
> > 035e5eb..590b613 100644
> > --- a/app/test-pmd/macfwd.c
> > +++ b/app/test-pmd/macfwd.c
> > @@ -120,7 +120,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
> >  		mb->ol_flags = ol_flags;
> >  		mb->l2_len = sizeof(struct ether_hdr);
> >  		mb->l3_len = sizeof(struct ipv4_hdr);
> > -		mb->vlan_tci = txp->tx_vlan_id;
> > +		mb->vlan_tci0 = txp->tx_vlan_id;
> >  	}
> >  	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
> nb_rx);
> >  	fs->tx_packets += nb_tx;
> > diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c index
> > 6729849..c355399 100644
> > --- a/app/test-pmd/macswap.c
> > +++ b/app/test-pmd/macswap.c
> > @@ -122,7 +122,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
> >  		mb->ol_flags = ol_flags;
> >  		mb->l2_len = sizeof(struct ether_hdr);
> >  		mb->l3_len = sizeof(struct ipv4_hdr);
> > -		mb->vlan_tci = txp->tx_vlan_id;
> > +		mb->vlan_tci0 = txp->tx_vlan_id;
> >  	}
> >  	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
> nb_rx);
> >  	fs->tx_packets += nb_tx;
> > diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c index
> > ac56090..aa2cf7f 100644
> > --- a/app/test-pmd/rxonly.c
> > +++ b/app/test-pmd/rxonly.c
> > @@ -159,7 +159,7 @@ pkt_burst_receive(struct fwd_stream *fs)
> >  				       mb->hash.fdir.hash, mb->hash.fdir.id);
> >  		}
> >  		if (ol_flags & PKT_RX_VLAN_PKT)
> > -			printf(" - VLAN tci=0x%x", mb->vlan_tci);
> > +			printf(" - VLAN tci=0x%x", mb->vlan_tci0);
> >  		if (is_encapsulation) {
> >  			struct ipv4_hdr *ipv4_hdr;
> >  			struct ipv6_hdr *ipv6_hdr;
> > diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index
> > ca32c85..4a2827f 100644
> > --- a/app/test-pmd/txonly.c
> > +++ b/app/test-pmd/txonly.c
> > @@ -266,7 +266,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
> >  		pkt->nb_segs = tx_pkt_nb_segs;
> >  		pkt->pkt_len = tx_pkt_length;
> >  		pkt->ol_flags = ol_flags;
> > -		pkt->vlan_tci  = vlan_tci;
> > +		pkt->vlan_tci0  = vlan_tci;
> >  		pkt->l2_len = sizeof(struct ether_hdr);
> >  		pkt->l3_len = sizeof(struct ipv4_hdr);
> >  		pkts_burst[nb_pkt] = pkt;
> > diff --git a/app/test/packet_burst_generator.c
> > b/app/test/packet_burst_generator.c
> > index b46eed7..959644c 100644
> > --- a/app/test/packet_burst_generator.c
> > +++ b/app/test/packet_burst_generator.c
> > @@ -270,7 +270,7 @@ nomore_mbuf:
> >  		pkt->l2_len = eth_hdr_size;
> >
> >  		if (ipv4) {
> > -			pkt->vlan_tci  = ETHER_TYPE_IPv4;
> > +			pkt->vlan_tci0  = ETHER_TYPE_IPv4;
> >  			pkt->l3_len = sizeof(struct ipv4_hdr);
> >
> >  			if (vlan_enabled)
> > @@ -278,7 +278,7 @@ nomore_mbuf:
> >  			else
> >  				pkt->ol_flags = PKT_RX_IPV4_HDR;
> >  		} else {
> > -			pkt->vlan_tci  = ETHER_TYPE_IPv6;
> > +			pkt->vlan_tci0  = ETHER_TYPE_IPv6;
> >  			pkt->l3_len = sizeof(struct ipv6_hdr);
> >
> >  			if (vlan_enabled)
> > diff --git a/lib/librte_ether/rte_ether.h
> > b/lib/librte_ether/rte_ether.h index 49f4576..6d682a2 100644
> > --- a/lib/librte_ether/rte_ether.h
> > +++ b/lib/librte_ether/rte_ether.h
> > @@ -357,7 +357,7 @@ static inline int rte_vlan_strip(struct rte_mbuf
> > *m)
> >
> >  	struct vlan_hdr *vh = (struct vlan_hdr *)(eh + 1);
> >  	m->ol_flags |= PKT_RX_VLAN_PKT;
> > -	m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci);
> > +	m->vlan_tci0 = rte_be_to_cpu_16(vh->vlan_tci);
> >
> >  	/* Copy ether header over rather than moving whole packet */
> >  	memmove(rte_pktmbuf_adj(m, sizeof(struct vlan_hdr)), @@ -404,7
> > +404,7 @@ static inline int rte_vlan_insert(struct rte_mbuf **m)
> >  	nh->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
> >
> >  	vh = (struct vlan_hdr *) (nh + 1);
> > -	vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci);
> > +	vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci0);
> >
> >  	return 0;
> >  }
> > diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
> > index 70b0987..6eed54f 100644
> > --- a/lib/librte_mbuf/rte_mbuf.h
> > +++ b/lib/librte_mbuf/rte_mbuf.h
> > @@ -101,11 +101,17 @@ extern "C" {
> >  #define PKT_RX_TUNNEL_IPV6_HDR (1ULL << 12) /**< RX tunnel packet
> with IPv6 header. */
> >  #define PKT_RX_FDIR_ID       (1ULL << 13) /**< FD id reported if FDIR
> match. */
> >  #define PKT_RX_FDIR_FLX      (1ULL << 14) /**< Flexible bytes reported if
> FDIR match. */
> > +#define PKT_RX_QINQ_PKT     (1ULL << 15)  /**< RX packet with double
> VLAN stripped. */
> >  /* add new RX flags here */
> >
> >  /* add new TX flags here */
> >
> >  /**
> > + * Second VLAN insertion (QinQ) flag.
> > + */
> > +#define PKT_TX_QINQ_PKT    (1ULL << 49)
> > +
> > +/**
> >   * TCP segmentation offload. To enable this offload feature for a
> >   * packet to be transmitted on hardware supporting TSO:
> >   *  - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag
> > implies @@ -268,7 +274,6 @@ struct rte_mbuf {
> >
> >  	uint16_t data_len;        /**< Amount of data in segment buffer. */
> >  	uint32_t pkt_len;         /**< Total pkt len: sum of all segments. */
> > -	uint16_t vlan_tci;        /**< VLAN Tag Control Identifier (CPU order) */
> >  	uint16_t reserved;
> 
> Now here is an implicit 2-bytes whole between 'reserved' and 'rss'.
> Probably better to make it explicit - make 'reserved' uint32_t.
> 
> Another thing - it looks like your change will break  ixgbe vector RX.
> 
> >  	union {
> >  		uint32_t rss;     /**< RSS hash result if RSS enabled */
> > @@ -289,6 +294,15 @@ struct rte_mbuf {
> >  		uint32_t usr;	  /**< User defined tags. See
> rte_distributor_process() */
> >  	} hash;                   /**< hash information */
> >
> > +	/* VLAN tags */
> > +	union {
> > +		uint32_t vlan_tags;
> > +		struct {
> > +			uint16_t vlan_tci0;
> > +			uint16_t vlan_tci1;
> 
> Do you really need to change vlan_tci to vlan_tci0?
> Can't you keep 'vlan_tci' for first vlan tag, and add something like
> 'vlan_tci_ext', or 'vlan_tci_next' for second one?
> Would save you a lot of changes, again users who use single vlan wouldn't
> need to update their code for 2.1.
> 
> > +		};
> > +	};
> > +
> >  	uint32_t seqn; /**< Sequence number. See also
> rte_reorder_insert()
> > */
> >
> >  	/* second cache line - fields only used in slow path or on TX */ @@
> > -766,7 +780,8 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf
> *m)
> >  	m->next = NULL;
> >  	m->pkt_len = 0;
> >  	m->tx_offload = 0;
> > -	m->vlan_tci = 0;
> > +	m->vlan_tci0 = 0;
> > +	m->vlan_tci1 = 0;
> 
> Why just not:
> m-> vlan_tags = 0;
> ?
> 
> >  	m->nb_segs = 1;
> >  	m->port = 0xff;
> >
> > @@ -838,7 +853,8 @@ static inline void rte_pktmbuf_attach(struct
> rte_mbuf *mi, struct rte_mbuf *m)
> >  	mi->data_off = m->data_off;
> >  	mi->data_len = m->data_len;
> >  	mi->port = m->port;
> > -	mi->vlan_tci = m->vlan_tci;
> > +	mi->vlan_tci0 = m->vlan_tci0;
> > +	mi->vlan_tci1 = m->vlan_tci1;
> 
> Same thing, why not:
> mi-> vlan_tags = m-> vlan_tags;
> ?
> 
> >  	mi->tx_offload = m->tx_offload;
> >  	mi->hash = m->hash;
> >
> > diff --git a/lib/librte_pmd_e1000/em_rxtx.c
> > b/lib/librte_pmd_e1000/em_rxtx.c index 64d067c..422f4ed 100644
> > --- a/lib/librte_pmd_e1000/em_rxtx.c
> > +++ b/lib/librte_pmd_e1000/em_rxtx.c
> > @@ -438,7 +438,7 @@ eth_em_xmit_pkts(void *tx_queue, struct
> rte_mbuf **tx_pkts,
> >  		/* If hardware offload required */
> >  		tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM |
> PKT_TX_L4_MASK));
> >  		if (tx_ol_req) {
> > -			hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
> > +			hdrlen.f.vlan_tci = tx_pkt->vlan_tci0;
> >  			hdrlen.f.l2_len = tx_pkt->l2_len;
> >  			hdrlen.f.l3_len = tx_pkt->l3_len;
> >  			/* If new context to be built or reuse the exist ctx. */
> @@ -534,7
> > +534,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> >  		/* Set VLAN Tag offload fields. */
> >  		if (ol_flags & PKT_TX_VLAN_PKT) {
> >  			cmd_type_len |= E1000_TXD_CMD_VLE;
> > -			popts_spec = tx_pkt->vlan_tci <<
> E1000_TXD_VLAN_SHIFT;
> > +			popts_spec = tx_pkt->vlan_tci0 <<
> E1000_TXD_VLAN_SHIFT;
> >  		}
> >
> >  		if (tx_ol_req) {
> > @@ -800,7 +800,7 @@ eth_em_recv_pkts(void *rx_queue, struct
> rte_mbuf **rx_pkts,
> >  				rx_desc_error_to_pkt_flags(rxd.errors);
> >
> >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
> > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);
> >
> >  		/*
> >  		 * Store the mbuf address into the next entry of the array
> @@
> > -1026,7 +1026,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct
> rte_mbuf **rx_pkts,
> >
> 	rx_desc_error_to_pkt_flags(rxd.errors);
> >
> >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
> > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);
> >
> >  		/* Prefetch data of first segment, if configured to do so. */
> >  		rte_packet_prefetch((char *)first_seg->buf_addr + diff --git
> > a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
> > index 80d05c0..fda273f 100644
> > --- a/lib/librte_pmd_e1000/igb_rxtx.c
> > +++ b/lib/librte_pmd_e1000/igb_rxtx.c
> > @@ -401,7 +401,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct
> rte_mbuf **tx_pkts,
> >  		ol_flags = tx_pkt->ol_flags;
> >  		l2_l3_len.l2_len = tx_pkt->l2_len;
> >  		l2_l3_len.l3_len = tx_pkt->l3_len;
> > -		vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
> > +		vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci0;
> >  		vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
> >  		tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
> >
> > @@ -784,7 +784,7 @@ eth_igb_recv_pkts(void *rx_queue, struct
> rte_mbuf **rx_pkts,
> >  		rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
> >  		hlen_type_rss =
> rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
> >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> >
> >  		pkt_flags =
> rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> >  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> > @@ -1015,10 +1015,10 @@ eth_igb_recv_scattered_pkts(void *rx_queue,
> struct rte_mbuf **rx_pkts,
> >  		first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
> >
> >  		/*
> > -		 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
> > +		 * The vlan_tci0 field is only valid when PKT_RX_VLAN_PKT is
> >  		 * set in the pkt_flags field.
> >  		 */
> > -		first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > +		first_seg->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> >  		hlen_type_rss =
> rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
> >  		pkt_flags =
> rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> >  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> > diff --git a/lib/librte_pmd_enic/enic_ethdev.c
> > b/lib/librte_pmd_enic/enic_ethdev.c
> > index 69ad01b..45c0e14 100644
> > --- a/lib/librte_pmd_enic/enic_ethdev.c
> > +++ b/lib/librte_pmd_enic/enic_ethdev.c
> > @@ -506,7 +506,7 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue,
> struct rte_mbuf **tx_pkts,
> >  				return index;
> >  		}
> >  		pkt_len = tx_pkt->pkt_len;
> > -		vlan_id = tx_pkt->vlan_tci;
> > +		vlan_id = tx_pkt->vlan_tci0;
> >  		ol_flags = tx_pkt->ol_flags;
> >  		for (frags = 0; inc_len < pkt_len; frags++) {
> >  			if (!tx_pkt)
> > diff --git a/lib/librte_pmd_enic/enic_main.c
> > b/lib/librte_pmd_enic/enic_main.c index 15313c2..d1660a1 100644
> > --- a/lib/librte_pmd_enic/enic_main.c
> > +++ b/lib/librte_pmd_enic/enic_main.c
> > @@ -490,7 +490,7 @@ static int enic_rq_indicate_buf(struct vnic_rq
> > *rq,
> >
> >  	if (vlan_tci) {
> >  		rx_pkt->ol_flags |= PKT_RX_VLAN_PKT;
> > -		rx_pkt->vlan_tci = vlan_tci;
> > +		rx_pkt->vlan_tci0 = vlan_tci;
> >  	}
> >
> >  	return eop;
> > diff --git a/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > b/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > index 83bddfc..ba3b8aa 100644
> > --- a/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > +++ b/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > @@ -410,7 +410,7 @@ static inline void tx_xmit_pkt(struct
> > fm10k_tx_queue *q, struct rte_mbuf *mb)
> >
> >  	/* set vlan if requested */
> >  	if (mb->ol_flags & PKT_TX_VLAN_PKT)
> > -		q->hw_ring[q->next_free].vlan = mb->vlan_tci;
> > +		q->hw_ring[q->next_free].vlan = mb->vlan_tci0;
> >
> >  	/* fill up the rings */
> >  	for (; mb != NULL; mb = mb->next) {
> > diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c
> > b/lib/librte_pmd_i40e/i40e_rxtx.c index 493cfa3..1fe377c 100644
> > --- a/lib/librte_pmd_i40e/i40e_rxtx.c
> > +++ b/lib/librte_pmd_i40e/i40e_rxtx.c
> > @@ -703,7 +703,7 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
> >  				I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq-
> >crc_len;
> >  			mb->data_len = pkt_len;
> >  			mb->pkt_len = pkt_len;
> > -			mb->vlan_tci = rx_status &
> > +			mb->vlan_tci0 = rx_status &
> >  				(1 <<
> I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
> >  			rte_le_to_cpu_16(\
> >  				rxdp[j].wb.qword0.lo_dword.l2tag1) : 0; @@
> -947,7 +947,7 @@
> > i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t
> nb_pkts)
> >  		rxm->data_len = rx_packet_len;
> >  		rxm->port = rxq->port_id;
> >
> > -		rxm->vlan_tci = rx_status &
> > +		rxm->vlan_tci0 = rx_status &
> >  			(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
> >  			rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) :
> 0;
> >  		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
> > @@ -1106,7 +1106,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
> >  		}
> >
> >  		first_seg->port = rxq->port_id;
> > -		first_seg->vlan_tci = (rx_status &
> > +		first_seg->vlan_tci0 = (rx_status &
> >  			(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
> >  			rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) :
> 0;
> >  		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
> > @@ -1291,7 +1291,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf
> > **tx_pkts, uint16_t nb_pkts)
> >
> >  		/* Descriptor based VLAN insertion */
> >  		if (ol_flags & PKT_TX_VLAN_PKT) {
> > -			tx_flags |= tx_pkt->vlan_tci <<
> > +			tx_flags |= tx_pkt->vlan_tci0 <<
> >  					I40E_TX_FLAG_L2TAG1_SHIFT;
> >  			tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
> >  			td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; diff --git
> > a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > index 7f15f15..fd664da 100644
> > --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > @@ -612,7 +612,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> >  			tx_offload.l2_len = tx_pkt->l2_len;
> >  			tx_offload.l3_len = tx_pkt->l3_len;
> >  			tx_offload.l4_len = tx_pkt->l4_len;
> > -			tx_offload.vlan_tci = tx_pkt->vlan_tci;
> > +			tx_offload.vlan_tci = tx_pkt->vlan_tci0;
> >  			tx_offload.tso_segsz = tx_pkt->tso_segsz;
> >
> >  			/* If new context need be built or reuse the exist ctx.
> */ @@
> > -981,8 +981,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
> >  			pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq-
> >crc_len);
> >  			mb->data_len = pkt_len;
> >  			mb->pkt_len = pkt_len;
> > -			mb->vlan_tci = rxdp[j].wb.upper.vlan;
> > -			mb->vlan_tci =
> rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
> > +			mb->vlan_tci0 =
> rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
> >
> >  			/* convert descriptor fields to rte mbuf flags */
> >  			pkt_flags  = rx_desc_hlen_type_rss_to_pkt_flags(
> > @@ -1327,7 +1326,7 @@ ixgbe_recv_pkts(void *rx_queue, struct
> rte_mbuf
> > **rx_pkts,
> >
> >  		hlen_type_rss =
> rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
> >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> >
> >  		pkt_flags =
> rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> >  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> > @@ -1412,10 +1411,10 @@ ixgbe_fill_cluster_head_buf(
> >  	head->port = port_id;
> >
> >  	/*
> > -	 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
> > +	 * The vlan_tci0 field is only valid when PKT_RX_VLAN_PKT is
> >  	 * set in the pkt_flags field.
> >  	 */
> > -	head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
> > +	head->vlan_tci0 = rte_le_to_cpu_16(desc->wb.upper.vlan);
> >  	hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
> >  	pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> >  	pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
> > diff --git a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > index d8019f5..57a33c9 100644
> > --- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > +++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > @@ -405,7 +405,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct
> rte_mbuf **tx_pkts,
> >  			/* Add VLAN tag if requested */
> >  			if (txm->ol_flags & PKT_TX_VLAN_PKT) {
> >  				txd->ti = 1;
> > -				txd->tci = rte_cpu_to_le_16(txm->vlan_tci);
> > +				txd->tci = rte_cpu_to_le_16(txm->vlan_tci0);
> >  			}
> >
> >  			/* Record current mbuf for freeing it later in tx
> complete */ @@
> > -629,10 +629,10 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, uint16_t nb_pkts)
> >  				   rcd->tci);
> >  			rxm->ol_flags = PKT_RX_VLAN_PKT;
> >  			/* Copy vlan tag in packet buffer */
> > -			rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd-
> >tci);
> > +			rxm->vlan_tci0 = rte_le_to_cpu_16((uint16_t)rcd-
> >tci);
> >  		} else {
> >  			rxm->ol_flags = 0;
> > -			rxm->vlan_tci = 0;
> > +			rxm->vlan_tci0 = 0;
> >  		}
> >
> >  		/* Initialize newly received packet buffer */
> > --
> > 1.9.3
  
Ananyev, Konstantin May 5, 2015, 10:37 p.m. UTC | #3
> -----Original Message-----
> From: Chilikin, Andrey
> Sent: Tuesday, May 05, 2015 4:43 PM
> To: Ananyev, Konstantin; Zhang, Helin; dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure for QinQ support
> 
> Hi Helin,
> 
> I would agree with Konstantin about new naming for VLAN tags. I think we can leave existing name for t vlan_tci and just name new
> VLAN tag differently. I was thinking in the line of "vlan_tci_outer" or "stag_tci". So vlan_tci will store single VLAN in case if only one L2
> tag is present or will store inner VLAN in case of two tags. "vlan_tci_outer" will store outer VLAN when two L2 tags are present.
> "stag_tci" name also looks like a good candidate as in most cases if two tags are presented then outer VLAN is addressed as S-Tag
> even if it is simple tag stacking.

Yep, I suppose "vlan_tci_outer" or "stag_tci" is a better name, then what I suggested.
Konstantin

> 
> Regards,
> Andrey
> 
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ananyev,
> > Konstantin
> > Sent: Tuesday, May 5, 2015 12:05 PM
> > To: Zhang, Helin; dev@dpdk.org
> > Subject: Re: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure for
> > QinQ support
> >
> > Hi Helin,
> >
> > > -----Original Message-----
> > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Helin Zhang
> > > Sent: Tuesday, May 05, 2015 3:32 AM
> > > To: dev@dpdk.org
> > > Subject: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure for
> > > QinQ support
> > >
> > > To support QinQ, 'vlan_tci' should be replaced by 'vlan_tci0' and
> > > 'vlan_tci1'. Also new offload flags of 'PKT_RX_QINQ_PKT' and
> > > 'PKT_TX_QINQ_PKT' should be added.
> > >
> > > Signed-off-by: Helin Zhang <helin.zhang@intel.com>
> > > ---
> > >  app/test-pmd/flowgen.c                |  2 +-
> > >  app/test-pmd/macfwd.c                 |  2 +-
> > >  app/test-pmd/macswap.c                |  2 +-
> > >  app/test-pmd/rxonly.c                 |  2 +-
> > >  app/test-pmd/txonly.c                 |  2 +-
> > >  app/test/packet_burst_generator.c     |  4 ++--
> > >  lib/librte_ether/rte_ether.h          |  4 ++--
> > >  lib/librte_mbuf/rte_mbuf.h            | 22 +++++++++++++++++++---
> > >  lib/librte_pmd_e1000/em_rxtx.c        |  8 ++++----
> > >  lib/librte_pmd_e1000/igb_rxtx.c       |  8 ++++----
> > >  lib/librte_pmd_enic/enic_ethdev.c     |  2 +-
> > >  lib/librte_pmd_enic/enic_main.c       |  2 +-
> > >  lib/librte_pmd_fm10k/fm10k_rxtx.c     |  2 +-
> > >  lib/librte_pmd_i40e/i40e_rxtx.c       |  8 ++++----
> > >  lib/librte_pmd_ixgbe/ixgbe_rxtx.c     | 11 +++++------
> > >  lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c |  6 +++---
> > >  16 files changed, 51 insertions(+), 36 deletions(-)
> > >
> > > diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c index
> > > 72016c9..f24b00c 100644
> > > --- a/app/test-pmd/flowgen.c
> > > +++ b/app/test-pmd/flowgen.c
> > > @@ -207,7 +207,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
> > >  		pkt->nb_segs		= 1;
> > >  		pkt->pkt_len		= pkt_size;
> > >  		pkt->ol_flags		= ol_flags;
> > > -		pkt->vlan_tci		= vlan_tci;
> > > +		pkt->vlan_tci0		= vlan_tci;
> > >  		pkt->l2_len		= sizeof(struct ether_hdr);
> > >  		pkt->l3_len		= sizeof(struct ipv4_hdr);
> > >  		pkts_burst[nb_pkt]	= pkt;
> > > diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c index
> > > 035e5eb..590b613 100644
> > > --- a/app/test-pmd/macfwd.c
> > > +++ b/app/test-pmd/macfwd.c
> > > @@ -120,7 +120,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
> > >  		mb->ol_flags = ol_flags;
> > >  		mb->l2_len = sizeof(struct ether_hdr);
> > >  		mb->l3_len = sizeof(struct ipv4_hdr);
> > > -		mb->vlan_tci = txp->tx_vlan_id;
> > > +		mb->vlan_tci0 = txp->tx_vlan_id;
> > >  	}
> > >  	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
> > nb_rx);
> > >  	fs->tx_packets += nb_tx;
> > > diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c index
> > > 6729849..c355399 100644
> > > --- a/app/test-pmd/macswap.c
> > > +++ b/app/test-pmd/macswap.c
> > > @@ -122,7 +122,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
> > >  		mb->ol_flags = ol_flags;
> > >  		mb->l2_len = sizeof(struct ether_hdr);
> > >  		mb->l3_len = sizeof(struct ipv4_hdr);
> > > -		mb->vlan_tci = txp->tx_vlan_id;
> > > +		mb->vlan_tci0 = txp->tx_vlan_id;
> > >  	}
> > >  	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
> > nb_rx);
> > >  	fs->tx_packets += nb_tx;
> > > diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c index
> > > ac56090..aa2cf7f 100644
> > > --- a/app/test-pmd/rxonly.c
> > > +++ b/app/test-pmd/rxonly.c
> > > @@ -159,7 +159,7 @@ pkt_burst_receive(struct fwd_stream *fs)
> > >  				       mb->hash.fdir.hash, mb->hash.fdir.id);
> > >  		}
> > >  		if (ol_flags & PKT_RX_VLAN_PKT)
> > > -			printf(" - VLAN tci=0x%x", mb->vlan_tci);
> > > +			printf(" - VLAN tci=0x%x", mb->vlan_tci0);
> > >  		if (is_encapsulation) {
> > >  			struct ipv4_hdr *ipv4_hdr;
> > >  			struct ipv6_hdr *ipv6_hdr;
> > > diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index
> > > ca32c85..4a2827f 100644
> > > --- a/app/test-pmd/txonly.c
> > > +++ b/app/test-pmd/txonly.c
> > > @@ -266,7 +266,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
> > >  		pkt->nb_segs = tx_pkt_nb_segs;
> > >  		pkt->pkt_len = tx_pkt_length;
> > >  		pkt->ol_flags = ol_flags;
> > > -		pkt->vlan_tci  = vlan_tci;
> > > +		pkt->vlan_tci0  = vlan_tci;
> > >  		pkt->l2_len = sizeof(struct ether_hdr);
> > >  		pkt->l3_len = sizeof(struct ipv4_hdr);
> > >  		pkts_burst[nb_pkt] = pkt;
> > > diff --git a/app/test/packet_burst_generator.c
> > > b/app/test/packet_burst_generator.c
> > > index b46eed7..959644c 100644
> > > --- a/app/test/packet_burst_generator.c
> > > +++ b/app/test/packet_burst_generator.c
> > > @@ -270,7 +270,7 @@ nomore_mbuf:
> > >  		pkt->l2_len = eth_hdr_size;
> > >
> > >  		if (ipv4) {
> > > -			pkt->vlan_tci  = ETHER_TYPE_IPv4;
> > > +			pkt->vlan_tci0  = ETHER_TYPE_IPv4;
> > >  			pkt->l3_len = sizeof(struct ipv4_hdr);
> > >
> > >  			if (vlan_enabled)
> > > @@ -278,7 +278,7 @@ nomore_mbuf:
> > >  			else
> > >  				pkt->ol_flags = PKT_RX_IPV4_HDR;
> > >  		} else {
> > > -			pkt->vlan_tci  = ETHER_TYPE_IPv6;
> > > +			pkt->vlan_tci0  = ETHER_TYPE_IPv6;
> > >  			pkt->l3_len = sizeof(struct ipv6_hdr);
> > >
> > >  			if (vlan_enabled)
> > > diff --git a/lib/librte_ether/rte_ether.h
> > > b/lib/librte_ether/rte_ether.h index 49f4576..6d682a2 100644
> > > --- a/lib/librte_ether/rte_ether.h
> > > +++ b/lib/librte_ether/rte_ether.h
> > > @@ -357,7 +357,7 @@ static inline int rte_vlan_strip(struct rte_mbuf
> > > *m)
> > >
> > >  	struct vlan_hdr *vh = (struct vlan_hdr *)(eh + 1);
> > >  	m->ol_flags |= PKT_RX_VLAN_PKT;
> > > -	m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci);
> > > +	m->vlan_tci0 = rte_be_to_cpu_16(vh->vlan_tci);
> > >
> > >  	/* Copy ether header over rather than moving whole packet */
> > >  	memmove(rte_pktmbuf_adj(m, sizeof(struct vlan_hdr)), @@ -404,7
> > > +404,7 @@ static inline int rte_vlan_insert(struct rte_mbuf **m)
> > >  	nh->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
> > >
> > >  	vh = (struct vlan_hdr *) (nh + 1);
> > > -	vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci);
> > > +	vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci0);
> > >
> > >  	return 0;
> > >  }
> > > diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
> > > index 70b0987..6eed54f 100644
> > > --- a/lib/librte_mbuf/rte_mbuf.h
> > > +++ b/lib/librte_mbuf/rte_mbuf.h
> > > @@ -101,11 +101,17 @@ extern "C" {
> > >  #define PKT_RX_TUNNEL_IPV6_HDR (1ULL << 12) /**< RX tunnel packet
> > with IPv6 header. */
> > >  #define PKT_RX_FDIR_ID       (1ULL << 13) /**< FD id reported if FDIR
> > match. */
> > >  #define PKT_RX_FDIR_FLX      (1ULL << 14) /**< Flexible bytes reported if
> > FDIR match. */
> > > +#define PKT_RX_QINQ_PKT     (1ULL << 15)  /**< RX packet with double
> > VLAN stripped. */
> > >  /* add new RX flags here */
> > >
> > >  /* add new TX flags here */
> > >
> > >  /**
> > > + * Second VLAN insertion (QinQ) flag.
> > > + */
> > > +#define PKT_TX_QINQ_PKT    (1ULL << 49)
> > > +
> > > +/**
> > >   * TCP segmentation offload. To enable this offload feature for a
> > >   * packet to be transmitted on hardware supporting TSO:
> > >   *  - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag
> > > implies @@ -268,7 +274,6 @@ struct rte_mbuf {
> > >
> > >  	uint16_t data_len;        /**< Amount of data in segment buffer. */
> > >  	uint32_t pkt_len;         /**< Total pkt len: sum of all segments. */
> > > -	uint16_t vlan_tci;        /**< VLAN Tag Control Identifier (CPU order) */
> > >  	uint16_t reserved;
> >
> > Now here is an implicit 2-bytes whole between 'reserved' and 'rss'.
> > Probably better to make it explicit - make 'reserved' uint32_t.
> >
> > Another thing - it looks like your change will break  ixgbe vector RX.
> >
> > >  	union {
> > >  		uint32_t rss;     /**< RSS hash result if RSS enabled */
> > > @@ -289,6 +294,15 @@ struct rte_mbuf {
> > >  		uint32_t usr;	  /**< User defined tags. See
> > rte_distributor_process() */
> > >  	} hash;                   /**< hash information */
> > >
> > > +	/* VLAN tags */
> > > +	union {
> > > +		uint32_t vlan_tags;
> > > +		struct {
> > > +			uint16_t vlan_tci0;
> > > +			uint16_t vlan_tci1;
> >
> > Do you really need to change vlan_tci to vlan_tci0?
> > Can't you keep 'vlan_tci' for first vlan tag, and add something like
> > 'vlan_tci_ext', or 'vlan_tci_next' for second one?
> > Would save you a lot of changes, again users who use single vlan wouldn't
> > need to update their code for 2.1.
> >
> > > +		};
> > > +	};
> > > +
> > >  	uint32_t seqn; /**< Sequence number. See also
> > rte_reorder_insert()
> > > */
> > >
> > >  	/* second cache line - fields only used in slow path or on TX */ @@
> > > -766,7 +780,8 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf
> > *m)
> > >  	m->next = NULL;
> > >  	m->pkt_len = 0;
> > >  	m->tx_offload = 0;
> > > -	m->vlan_tci = 0;
> > > +	m->vlan_tci0 = 0;
> > > +	m->vlan_tci1 = 0;
> >
> > Why just not:
> > m-> vlan_tags = 0;
> > ?
> >
> > >  	m->nb_segs = 1;
> > >  	m->port = 0xff;
> > >
> > > @@ -838,7 +853,8 @@ static inline void rte_pktmbuf_attach(struct
> > rte_mbuf *mi, struct rte_mbuf *m)
> > >  	mi->data_off = m->data_off;
> > >  	mi->data_len = m->data_len;
> > >  	mi->port = m->port;
> > > -	mi->vlan_tci = m->vlan_tci;
> > > +	mi->vlan_tci0 = m->vlan_tci0;
> > > +	mi->vlan_tci1 = m->vlan_tci1;
> >
> > Same thing, why not:
> > mi-> vlan_tags = m-> vlan_tags;
> > ?
> >
> > >  	mi->tx_offload = m->tx_offload;
> > >  	mi->hash = m->hash;
> > >
> > > diff --git a/lib/librte_pmd_e1000/em_rxtx.c
> > > b/lib/librte_pmd_e1000/em_rxtx.c index 64d067c..422f4ed 100644
> > > --- a/lib/librte_pmd_e1000/em_rxtx.c
> > > +++ b/lib/librte_pmd_e1000/em_rxtx.c
> > > @@ -438,7 +438,7 @@ eth_em_xmit_pkts(void *tx_queue, struct
> > rte_mbuf **tx_pkts,
> > >  		/* If hardware offload required */
> > >  		tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM |
> > PKT_TX_L4_MASK));
> > >  		if (tx_ol_req) {
> > > -			hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
> > > +			hdrlen.f.vlan_tci = tx_pkt->vlan_tci0;
> > >  			hdrlen.f.l2_len = tx_pkt->l2_len;
> > >  			hdrlen.f.l3_len = tx_pkt->l3_len;
> > >  			/* If new context to be built or reuse the exist ctx. */
> > @@ -534,7
> > > +534,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf
> > **tx_pkts,
> > >  		/* Set VLAN Tag offload fields. */
> > >  		if (ol_flags & PKT_TX_VLAN_PKT) {
> > >  			cmd_type_len |= E1000_TXD_CMD_VLE;
> > > -			popts_spec = tx_pkt->vlan_tci <<
> > E1000_TXD_VLAN_SHIFT;
> > > +			popts_spec = tx_pkt->vlan_tci0 <<
> > E1000_TXD_VLAN_SHIFT;
> > >  		}
> > >
> > >  		if (tx_ol_req) {
> > > @@ -800,7 +800,7 @@ eth_em_recv_pkts(void *rx_queue, struct
> > rte_mbuf **rx_pkts,
> > >  				rx_desc_error_to_pkt_flags(rxd.errors);
> > >
> > >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
> > > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);
> > >
> > >  		/*
> > >  		 * Store the mbuf address into the next entry of the array
> > @@
> > > -1026,7 +1026,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct
> > rte_mbuf **rx_pkts,
> > >
> > 	rx_desc_error_to_pkt_flags(rxd.errors);
> > >
> > >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
> > > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);
> > >
> > >  		/* Prefetch data of first segment, if configured to do so. */
> > >  		rte_packet_prefetch((char *)first_seg->buf_addr + diff --git
> > > a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
> > > index 80d05c0..fda273f 100644
> > > --- a/lib/librte_pmd_e1000/igb_rxtx.c
> > > +++ b/lib/librte_pmd_e1000/igb_rxtx.c
> > > @@ -401,7 +401,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct
> > rte_mbuf **tx_pkts,
> > >  		ol_flags = tx_pkt->ol_flags;
> > >  		l2_l3_len.l2_len = tx_pkt->l2_len;
> > >  		l2_l3_len.l3_len = tx_pkt->l3_len;
> > > -		vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
> > > +		vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci0;
> > >  		vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
> > >  		tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
> > >
> > > @@ -784,7 +784,7 @@ eth_igb_recv_pkts(void *rx_queue, struct
> > rte_mbuf **rx_pkts,
> > >  		rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
> > >  		hlen_type_rss =
> > rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
> > >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > >
> > >  		pkt_flags =
> > rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> > >  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> > > @@ -1015,10 +1015,10 @@ eth_igb_recv_scattered_pkts(void *rx_queue,
> > struct rte_mbuf **rx_pkts,
> > >  		first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
> > >
> > >  		/*
> > > -		 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
> > > +		 * The vlan_tci0 field is only valid when PKT_RX_VLAN_PKT is
> > >  		 * set in the pkt_flags field.
> > >  		 */
> > > -		first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > > +		first_seg->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > >  		hlen_type_rss =
> > rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
> > >  		pkt_flags =
> > rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> > >  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> > > diff --git a/lib/librte_pmd_enic/enic_ethdev.c
> > > b/lib/librte_pmd_enic/enic_ethdev.c
> > > index 69ad01b..45c0e14 100644
> > > --- a/lib/librte_pmd_enic/enic_ethdev.c
> > > +++ b/lib/librte_pmd_enic/enic_ethdev.c
> > > @@ -506,7 +506,7 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue,
> > struct rte_mbuf **tx_pkts,
> > >  				return index;
> > >  		}
> > >  		pkt_len = tx_pkt->pkt_len;
> > > -		vlan_id = tx_pkt->vlan_tci;
> > > +		vlan_id = tx_pkt->vlan_tci0;
> > >  		ol_flags = tx_pkt->ol_flags;
> > >  		for (frags = 0; inc_len < pkt_len; frags++) {
> > >  			if (!tx_pkt)
> > > diff --git a/lib/librte_pmd_enic/enic_main.c
> > > b/lib/librte_pmd_enic/enic_main.c index 15313c2..d1660a1 100644
> > > --- a/lib/librte_pmd_enic/enic_main.c
> > > +++ b/lib/librte_pmd_enic/enic_main.c
> > > @@ -490,7 +490,7 @@ static int enic_rq_indicate_buf(struct vnic_rq
> > > *rq,
> > >
> > >  	if (vlan_tci) {
> > >  		rx_pkt->ol_flags |= PKT_RX_VLAN_PKT;
> > > -		rx_pkt->vlan_tci = vlan_tci;
> > > +		rx_pkt->vlan_tci0 = vlan_tci;
> > >  	}
> > >
> > >  	return eop;
> > > diff --git a/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > > b/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > > index 83bddfc..ba3b8aa 100644
> > > --- a/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > > +++ b/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > > @@ -410,7 +410,7 @@ static inline void tx_xmit_pkt(struct
> > > fm10k_tx_queue *q, struct rte_mbuf *mb)
> > >
> > >  	/* set vlan if requested */
> > >  	if (mb->ol_flags & PKT_TX_VLAN_PKT)
> > > -		q->hw_ring[q->next_free].vlan = mb->vlan_tci;
> > > +		q->hw_ring[q->next_free].vlan = mb->vlan_tci0;
> > >
> > >  	/* fill up the rings */
> > >  	for (; mb != NULL; mb = mb->next) {
> > > diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c
> > > b/lib/librte_pmd_i40e/i40e_rxtx.c index 493cfa3..1fe377c 100644
> > > --- a/lib/librte_pmd_i40e/i40e_rxtx.c
> > > +++ b/lib/librte_pmd_i40e/i40e_rxtx.c
> > > @@ -703,7 +703,7 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
> > >  				I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq-
> > >crc_len;
> > >  			mb->data_len = pkt_len;
> > >  			mb->pkt_len = pkt_len;
> > > -			mb->vlan_tci = rx_status &
> > > +			mb->vlan_tci0 = rx_status &
> > >  				(1 <<
> > I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
> > >  			rte_le_to_cpu_16(\
> > >  				rxdp[j].wb.qword0.lo_dword.l2tag1) : 0; @@
> > -947,7 +947,7 @@
> > > i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t
> > nb_pkts)
> > >  		rxm->data_len = rx_packet_len;
> > >  		rxm->port = rxq->port_id;
> > >
> > > -		rxm->vlan_tci = rx_status &
> > > +		rxm->vlan_tci0 = rx_status &
> > >  			(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
> > >  			rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) :
> > 0;
> > >  		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
> > > @@ -1106,7 +1106,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
> > >  		}
> > >
> > >  		first_seg->port = rxq->port_id;
> > > -		first_seg->vlan_tci = (rx_status &
> > > +		first_seg->vlan_tci0 = (rx_status &
> > >  			(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
> > >  			rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) :
> > 0;
> > >  		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
> > > @@ -1291,7 +1291,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf
> > > **tx_pkts, uint16_t nb_pkts)
> > >
> > >  		/* Descriptor based VLAN insertion */
> > >  		if (ol_flags & PKT_TX_VLAN_PKT) {
> > > -			tx_flags |= tx_pkt->vlan_tci <<
> > > +			tx_flags |= tx_pkt->vlan_tci0 <<
> > >  					I40E_TX_FLAG_L2TAG1_SHIFT;
> > >  			tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
> > >  			td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; diff --git
> > > a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > > b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > > index 7f15f15..fd664da 100644
> > > --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > > +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > > @@ -612,7 +612,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf
> > **tx_pkts,
> > >  			tx_offload.l2_len = tx_pkt->l2_len;
> > >  			tx_offload.l3_len = tx_pkt->l3_len;
> > >  			tx_offload.l4_len = tx_pkt->l4_len;
> > > -			tx_offload.vlan_tci = tx_pkt->vlan_tci;
> > > +			tx_offload.vlan_tci = tx_pkt->vlan_tci0;
> > >  			tx_offload.tso_segsz = tx_pkt->tso_segsz;
> > >
> > >  			/* If new context need be built or reuse the exist ctx.
> > */ @@
> > > -981,8 +981,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
> > >  			pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq-
> > >crc_len);
> > >  			mb->data_len = pkt_len;
> > >  			mb->pkt_len = pkt_len;
> > > -			mb->vlan_tci = rxdp[j].wb.upper.vlan;
> > > -			mb->vlan_tci =
> > rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
> > > +			mb->vlan_tci0 =
> > rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
> > >
> > >  			/* convert descriptor fields to rte mbuf flags */
> > >  			pkt_flags  = rx_desc_hlen_type_rss_to_pkt_flags(
> > > @@ -1327,7 +1326,7 @@ ixgbe_recv_pkts(void *rx_queue, struct
> > rte_mbuf
> > > **rx_pkts,
> > >
> > >  		hlen_type_rss =
> > rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
> > >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > >
> > >  		pkt_flags =
> > rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> > >  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> > > @@ -1412,10 +1411,10 @@ ixgbe_fill_cluster_head_buf(
> > >  	head->port = port_id;
> > >
> > >  	/*
> > > -	 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
> > > +	 * The vlan_tci0 field is only valid when PKT_RX_VLAN_PKT is
> > >  	 * set in the pkt_flags field.
> > >  	 */
> > > -	head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
> > > +	head->vlan_tci0 = rte_le_to_cpu_16(desc->wb.upper.vlan);
> > >  	hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
> > >  	pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> > >  	pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
> > > diff --git a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > > b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > > index d8019f5..57a33c9 100644
> > > --- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > > +++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > > @@ -405,7 +405,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct
> > rte_mbuf **tx_pkts,
> > >  			/* Add VLAN tag if requested */
> > >  			if (txm->ol_flags & PKT_TX_VLAN_PKT) {
> > >  				txd->ti = 1;
> > > -				txd->tci = rte_cpu_to_le_16(txm->vlan_tci);
> > > +				txd->tci = rte_cpu_to_le_16(txm->vlan_tci0);
> > >  			}
> > >
> > >  			/* Record current mbuf for freeing it later in tx
> > complete */ @@
> > > -629,10 +629,10 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf
> > **rx_pkts, uint16_t nb_pkts)
> > >  				   rcd->tci);
> > >  			rxm->ol_flags = PKT_RX_VLAN_PKT;
> > >  			/* Copy vlan tag in packet buffer */
> > > -			rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd-
> > >tci);
> > > +			rxm->vlan_tci0 = rte_le_to_cpu_16((uint16_t)rcd-
> > >tci);
> > >  		} else {
> > >  			rxm->ol_flags = 0;
> > > -			rxm->vlan_tci = 0;
> > > +			rxm->vlan_tci0 = 0;
> > >  		}
> > >
> > >  		/* Initialize newly received packet buffer */
> > > --
> > > 1.9.3
  
Zhang, Helin May 6, 2015, 4:06 a.m. UTC | #4
> -----Original Message-----
> From: Ananyev, Konstantin
> Sent: Tuesday, May 5, 2015 7:05 PM
> To: Zhang, Helin; dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure for
> QinQ support
> 
> Hi Helin,
> 
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Helin Zhang
> > Sent: Tuesday, May 05, 2015 3:32 AM
> > To: dev@dpdk.org
> > Subject: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure for
> > QinQ support
> >
> > To support QinQ, 'vlan_tci' should be replaced by 'vlan_tci0' and
> > 'vlan_tci1'. Also new offload flags of 'PKT_RX_QINQ_PKT' and
> > 'PKT_TX_QINQ_PKT' should be added.
> >
> > Signed-off-by: Helin Zhang <helin.zhang@intel.com>
> > ---
> >  app/test-pmd/flowgen.c                |  2 +-
> >  app/test-pmd/macfwd.c                 |  2 +-
> >  app/test-pmd/macswap.c                |  2 +-
> >  app/test-pmd/rxonly.c                 |  2 +-
> >  app/test-pmd/txonly.c                 |  2 +-
> >  app/test/packet_burst_generator.c     |  4 ++--
> >  lib/librte_ether/rte_ether.h          |  4 ++--
> >  lib/librte_mbuf/rte_mbuf.h            | 22
> +++++++++++++++++++---
> >  lib/librte_pmd_e1000/em_rxtx.c        |  8 ++++----
> >  lib/librte_pmd_e1000/igb_rxtx.c       |  8 ++++----
> >  lib/librte_pmd_enic/enic_ethdev.c     |  2 +-
> >  lib/librte_pmd_enic/enic_main.c       |  2 +-
> >  lib/librte_pmd_fm10k/fm10k_rxtx.c     |  2 +-
> >  lib/librte_pmd_i40e/i40e_rxtx.c       |  8 ++++----
> >  lib/librte_pmd_ixgbe/ixgbe_rxtx.c     | 11 +++++------
> >  lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c |  6 +++---
> >  16 files changed, 51 insertions(+), 36 deletions(-)
> >
> > diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c index
> > 72016c9..f24b00c 100644
> > --- a/app/test-pmd/flowgen.c
> > +++ b/app/test-pmd/flowgen.c
> > @@ -207,7 +207,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
> >  		pkt->nb_segs		= 1;
> >  		pkt->pkt_len		= pkt_size;
> >  		pkt->ol_flags		= ol_flags;
> > -		pkt->vlan_tci		= vlan_tci;
> > +		pkt->vlan_tci0		= vlan_tci;
> >  		pkt->l2_len		= sizeof(struct ether_hdr);
> >  		pkt->l3_len		= sizeof(struct ipv4_hdr);
> >  		pkts_burst[nb_pkt]	= pkt;
> > diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c index
> > 035e5eb..590b613 100644
> > --- a/app/test-pmd/macfwd.c
> > +++ b/app/test-pmd/macfwd.c
> > @@ -120,7 +120,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
> >  		mb->ol_flags = ol_flags;
> >  		mb->l2_len = sizeof(struct ether_hdr);
> >  		mb->l3_len = sizeof(struct ipv4_hdr);
> > -		mb->vlan_tci = txp->tx_vlan_id;
> > +		mb->vlan_tci0 = txp->tx_vlan_id;
> >  	}
> >  	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
> nb_rx);
> >  	fs->tx_packets += nb_tx;
> > diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c index
> > 6729849..c355399 100644
> > --- a/app/test-pmd/macswap.c
> > +++ b/app/test-pmd/macswap.c
> > @@ -122,7 +122,7 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
> >  		mb->ol_flags = ol_flags;
> >  		mb->l2_len = sizeof(struct ether_hdr);
> >  		mb->l3_len = sizeof(struct ipv4_hdr);
> > -		mb->vlan_tci = txp->tx_vlan_id;
> > +		mb->vlan_tci0 = txp->tx_vlan_id;
> >  	}
> >  	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
> nb_rx);
> >  	fs->tx_packets += nb_tx;
> > diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c index
> > ac56090..aa2cf7f 100644
> > --- a/app/test-pmd/rxonly.c
> > +++ b/app/test-pmd/rxonly.c
> > @@ -159,7 +159,7 @@ pkt_burst_receive(struct fwd_stream *fs)
> >  				       mb->hash.fdir.hash, mb->hash.fdir.id);
> >  		}
> >  		if (ol_flags & PKT_RX_VLAN_PKT)
> > -			printf(" - VLAN tci=0x%x", mb->vlan_tci);
> > +			printf(" - VLAN tci=0x%x", mb->vlan_tci0);
> >  		if (is_encapsulation) {
> >  			struct ipv4_hdr *ipv4_hdr;
> >  			struct ipv6_hdr *ipv6_hdr;
> > diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index
> > ca32c85..4a2827f 100644
> > --- a/app/test-pmd/txonly.c
> > +++ b/app/test-pmd/txonly.c
> > @@ -266,7 +266,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
> >  		pkt->nb_segs = tx_pkt_nb_segs;
> >  		pkt->pkt_len = tx_pkt_length;
> >  		pkt->ol_flags = ol_flags;
> > -		pkt->vlan_tci  = vlan_tci;
> > +		pkt->vlan_tci0  = vlan_tci;
> >  		pkt->l2_len = sizeof(struct ether_hdr);
> >  		pkt->l3_len = sizeof(struct ipv4_hdr);
> >  		pkts_burst[nb_pkt] = pkt;
> > diff --git a/app/test/packet_burst_generator.c
> > b/app/test/packet_burst_generator.c
> > index b46eed7..959644c 100644
> > --- a/app/test/packet_burst_generator.c
> > +++ b/app/test/packet_burst_generator.c
> > @@ -270,7 +270,7 @@ nomore_mbuf:
> >  		pkt->l2_len = eth_hdr_size;
> >
> >  		if (ipv4) {
> > -			pkt->vlan_tci  = ETHER_TYPE_IPv4;
> > +			pkt->vlan_tci0  = ETHER_TYPE_IPv4;
> >  			pkt->l3_len = sizeof(struct ipv4_hdr);
> >
> >  			if (vlan_enabled)
> > @@ -278,7 +278,7 @@ nomore_mbuf:
> >  			else
> >  				pkt->ol_flags = PKT_RX_IPV4_HDR;
> >  		} else {
> > -			pkt->vlan_tci  = ETHER_TYPE_IPv6;
> > +			pkt->vlan_tci0  = ETHER_TYPE_IPv6;
> >  			pkt->l3_len = sizeof(struct ipv6_hdr);
> >
> >  			if (vlan_enabled)
> > diff --git a/lib/librte_ether/rte_ether.h
> > b/lib/librte_ether/rte_ether.h index 49f4576..6d682a2 100644
> > --- a/lib/librte_ether/rte_ether.h
> > +++ b/lib/librte_ether/rte_ether.h
> > @@ -357,7 +357,7 @@ static inline int rte_vlan_strip(struct rte_mbuf
> > *m)
> >
> >  	struct vlan_hdr *vh = (struct vlan_hdr *)(eh + 1);
> >  	m->ol_flags |= PKT_RX_VLAN_PKT;
> > -	m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci);
> > +	m->vlan_tci0 = rte_be_to_cpu_16(vh->vlan_tci);
> >
> >  	/* Copy ether header over rather than moving whole packet */
> >  	memmove(rte_pktmbuf_adj(m, sizeof(struct vlan_hdr)), @@ -404,7
> > +404,7 @@ static inline int rte_vlan_insert(struct rte_mbuf **m)
> >  	nh->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
> >
> >  	vh = (struct vlan_hdr *) (nh + 1);
> > -	vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci);
> > +	vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci0);
> >
> >  	return 0;
> >  }
> > diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
> > index 70b0987..6eed54f 100644
> > --- a/lib/librte_mbuf/rte_mbuf.h
> > +++ b/lib/librte_mbuf/rte_mbuf.h
> > @@ -101,11 +101,17 @@ extern "C" {
> >  #define PKT_RX_TUNNEL_IPV6_HDR (1ULL << 12) /**< RX tunnel
> packet with IPv6 header. */
> >  #define PKT_RX_FDIR_ID       (1ULL << 13) /**< FD id reported if
> FDIR match. */
> >  #define PKT_RX_FDIR_FLX      (1ULL << 14) /**< Flexible bytes
> reported if FDIR match. */
> > +#define PKT_RX_QINQ_PKT     (1ULL << 15)  /**< RX packet with
> double VLAN stripped. */
> >  /* add new RX flags here */
> >
> >  /* add new TX flags here */
> >
> >  /**
> > + * Second VLAN insertion (QinQ) flag.
> > + */
> > +#define PKT_TX_QINQ_PKT    (1ULL << 49)
> > +
> > +/**
> >   * TCP segmentation offload. To enable this offload feature for a
> >   * packet to be transmitted on hardware supporting TSO:
> >   *  - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag
> > implies @@ -268,7 +274,6 @@ struct rte_mbuf {
> >
> >  	uint16_t data_len;        /**< Amount of data in segment buffer. */
> >  	uint32_t pkt_len;         /**< Total pkt len: sum of all segments. */
> > -	uint16_t vlan_tci;        /**< VLAN Tag Control Identifier (CPU
> order) */
> >  	uint16_t reserved;
> 
> Now here is an implicit 2-bytes whole between 'reserved' and 'rss'.
> Probably better to make it explicit - make 'reserved' uint32_t.
Yes, the layout will be changed according to the demands of Vector PMD.
The vlan structure will be kept the same, but the mbuf structure layout will
be re-organized a bit.

> 
> Another thing - it looks like your change will break  ixgbe vector RX.
Yes, in the cover-letter, I noted that the vector PMD will be updated soon
together with the code changes.

> 
> >  	union {
> >  		uint32_t rss;     /**< RSS hash result if RSS enabled */
> > @@ -289,6 +294,15 @@ struct rte_mbuf {
> >  		uint32_t usr;	  /**< User defined tags. See
> rte_distributor_process() */
> >  	} hash;                   /**< hash information */
> >
> > +	/* VLAN tags */
> > +	union {
> > +		uint32_t vlan_tags;
> > +		struct {
> > +			uint16_t vlan_tci0;
> > +			uint16_t vlan_tci1;
> 
> Do you really need to change vlan_tci to vlan_tci0?
> Can't you keep 'vlan_tci' for first vlan tag, and add something like
> 'vlan_tci_ext', or 'vlan_tci_next' for second one?
> Would save you a lot of changes, again users who use single vlan wouldn't
> need to update their code for 2.1.
Yes, good point! The names came from the original mbuf definition done by
Bruce long long ago. If more guys suggest keeping th old one, and just add a new
one, I will do like that in the next version of patch set.
Thank you all!

> 
> > +		};
> > +	};
> > +
> >  	uint32_t seqn; /**< Sequence number. See also rte_reorder_insert()
> > */
> >
> >  	/* second cache line - fields only used in slow path or on TX */ @@
> > -766,7 +780,8 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf
> *m)
> >  	m->next = NULL;
> >  	m->pkt_len = 0;
> >  	m->tx_offload = 0;
> > -	m->vlan_tci = 0;
> > +	m->vlan_tci0 = 0;
> > +	m->vlan_tci1 = 0;
> 
> Why just not:
> m-> vlan_tags = 0;
> ?
Accepted. Good point!

> 
> >  	m->nb_segs = 1;
> >  	m->port = 0xff;
> >
> > @@ -838,7 +853,8 @@ static inline void rte_pktmbuf_attach(struct
> rte_mbuf *mi, struct rte_mbuf *m)
> >  	mi->data_off = m->data_off;
> >  	mi->data_len = m->data_len;
> >  	mi->port = m->port;
> > -	mi->vlan_tci = m->vlan_tci;
> > +	mi->vlan_tci0 = m->vlan_tci0;
> > +	mi->vlan_tci1 = m->vlan_tci1;
> 
> Same thing, why not:
> mi-> vlan_tags = m-> vlan_tags;
> ?
Accepted. Good point!

Regards,
Helin

> 
> >  	mi->tx_offload = m->tx_offload;
> >  	mi->hash = m->hash;
> >
> > diff --git a/lib/librte_pmd_e1000/em_rxtx.c
> > b/lib/librte_pmd_e1000/em_rxtx.c index 64d067c..422f4ed 100644
> > --- a/lib/librte_pmd_e1000/em_rxtx.c
> > +++ b/lib/librte_pmd_e1000/em_rxtx.c
> > @@ -438,7 +438,7 @@ eth_em_xmit_pkts(void *tx_queue, struct
> rte_mbuf **tx_pkts,
> >  		/* If hardware offload required */
> >  		tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM |
> PKT_TX_L4_MASK));
> >  		if (tx_ol_req) {
> > -			hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
> > +			hdrlen.f.vlan_tci = tx_pkt->vlan_tci0;
> >  			hdrlen.f.l2_len = tx_pkt->l2_len;
> >  			hdrlen.f.l3_len = tx_pkt->l3_len;
> >  			/* If new context to be built or reuse the exist ctx. */ @@
> -534,7
> > +534,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> >  		/* Set VLAN Tag offload fields. */
> >  		if (ol_flags & PKT_TX_VLAN_PKT) {
> >  			cmd_type_len |= E1000_TXD_CMD_VLE;
> > -			popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
> > +			popts_spec = tx_pkt->vlan_tci0 << E1000_TXD_VLAN_SHIFT;
> >  		}
> >
> >  		if (tx_ol_req) {
> > @@ -800,7 +800,7 @@ eth_em_recv_pkts(void *rx_queue, struct
> rte_mbuf **rx_pkts,
> >  				rx_desc_error_to_pkt_flags(rxd.errors);
> >
> >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
> > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);
> >
> >  		/*
> >  		 * Store the mbuf address into the next entry of the array @@
> > -1026,7 +1026,7 @@ eth_em_recv_scattered_pkts(void *rx_queue,
> struct rte_mbuf **rx_pkts,
> >  					rx_desc_error_to_pkt_flags(rxd.errors);
> >
> >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
> > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);
> >
> >  		/* Prefetch data of first segment, if configured to do so. */
> >  		rte_packet_prefetch((char *)first_seg->buf_addr + diff --git
> > a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
> > index 80d05c0..fda273f 100644
> > --- a/lib/librte_pmd_e1000/igb_rxtx.c
> > +++ b/lib/librte_pmd_e1000/igb_rxtx.c
> > @@ -401,7 +401,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct
> rte_mbuf **tx_pkts,
> >  		ol_flags = tx_pkt->ol_flags;
> >  		l2_l3_len.l2_len = tx_pkt->l2_len;
> >  		l2_l3_len.l3_len = tx_pkt->l3_len;
> > -		vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
> > +		vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci0;
> >  		vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
> >  		tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
> >
> > @@ -784,7 +784,7 @@ eth_igb_recv_pkts(void *rx_queue, struct
> rte_mbuf **rx_pkts,
> >  		rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
> >  		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
> >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> >
> >  		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> >  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> > @@ -1015,10 +1015,10 @@ eth_igb_recv_scattered_pkts(void
> *rx_queue, struct rte_mbuf **rx_pkts,
> >  		first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
> >
> >  		/*
> > -		 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
> > +		 * The vlan_tci0 field is only valid when PKT_RX_VLAN_PKT is
> >  		 * set in the pkt_flags field.
> >  		 */
> > -		first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > +		first_seg->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> >  		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
> >  		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> >  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> > diff --git a/lib/librte_pmd_enic/enic_ethdev.c
> > b/lib/librte_pmd_enic/enic_ethdev.c
> > index 69ad01b..45c0e14 100644
> > --- a/lib/librte_pmd_enic/enic_ethdev.c
> > +++ b/lib/librte_pmd_enic/enic_ethdev.c
> > @@ -506,7 +506,7 @@ static uint16_t enicpmd_xmit_pkts(void
> *tx_queue, struct rte_mbuf **tx_pkts,
> >  				return index;
> >  		}
> >  		pkt_len = tx_pkt->pkt_len;
> > -		vlan_id = tx_pkt->vlan_tci;
> > +		vlan_id = tx_pkt->vlan_tci0;
> >  		ol_flags = tx_pkt->ol_flags;
> >  		for (frags = 0; inc_len < pkt_len; frags++) {
> >  			if (!tx_pkt)
> > diff --git a/lib/librte_pmd_enic/enic_main.c
> > b/lib/librte_pmd_enic/enic_main.c index 15313c2..d1660a1 100644
> > --- a/lib/librte_pmd_enic/enic_main.c
> > +++ b/lib/librte_pmd_enic/enic_main.c
> > @@ -490,7 +490,7 @@ static int enic_rq_indicate_buf(struct vnic_rq
> > *rq,
> >
> >  	if (vlan_tci) {
> >  		rx_pkt->ol_flags |= PKT_RX_VLAN_PKT;
> > -		rx_pkt->vlan_tci = vlan_tci;
> > +		rx_pkt->vlan_tci0 = vlan_tci;
> >  	}
> >
> >  	return eop;
> > diff --git a/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > b/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > index 83bddfc..ba3b8aa 100644
> > --- a/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > +++ b/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > @@ -410,7 +410,7 @@ static inline void tx_xmit_pkt(struct
> > fm10k_tx_queue *q, struct rte_mbuf *mb)
> >
> >  	/* set vlan if requested */
> >  	if (mb->ol_flags & PKT_TX_VLAN_PKT)
> > -		q->hw_ring[q->next_free].vlan = mb->vlan_tci;
> > +		q->hw_ring[q->next_free].vlan = mb->vlan_tci0;
> >
> >  	/* fill up the rings */
> >  	for (; mb != NULL; mb = mb->next) {
> > diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c
> > b/lib/librte_pmd_i40e/i40e_rxtx.c index 493cfa3..1fe377c 100644
> > --- a/lib/librte_pmd_i40e/i40e_rxtx.c
> > +++ b/lib/librte_pmd_i40e/i40e_rxtx.c
> > @@ -703,7 +703,7 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue
> *rxq)
> >  				I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
> >  			mb->data_len = pkt_len;
> >  			mb->pkt_len = pkt_len;
> > -			mb->vlan_tci = rx_status &
> > +			mb->vlan_tci0 = rx_status &
> >  				(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
> >  			rte_le_to_cpu_16(\
> >  				rxdp[j].wb.qword0.lo_dword.l2tag1) : 0; @@ -947,7
> +947,7 @@
> > i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t
> nb_pkts)
> >  		rxm->data_len = rx_packet_len;
> >  		rxm->port = rxq->port_id;
> >
> > -		rxm->vlan_tci = rx_status &
> > +		rxm->vlan_tci0 = rx_status &
> >  			(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
> >  			rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
> >  		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
> > @@ -1106,7 +1106,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
> >  		}
> >
> >  		first_seg->port = rxq->port_id;
> > -		first_seg->vlan_tci = (rx_status &
> > +		first_seg->vlan_tci0 = (rx_status &
> >  			(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
> >  			rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
> >  		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
> > @@ -1291,7 +1291,7 @@ i40e_xmit_pkts(void *tx_queue, struct
> rte_mbuf
> > **tx_pkts, uint16_t nb_pkts)
> >
> >  		/* Descriptor based VLAN insertion */
> >  		if (ol_flags & PKT_TX_VLAN_PKT) {
> > -			tx_flags |= tx_pkt->vlan_tci <<
> > +			tx_flags |= tx_pkt->vlan_tci0 <<
> >  					I40E_TX_FLAG_L2TAG1_SHIFT;
> >  			tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
> >  			td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; diff --git
> > a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > index 7f15f15..fd664da 100644
> > --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > @@ -612,7 +612,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct
> rte_mbuf **tx_pkts,
> >  			tx_offload.l2_len = tx_pkt->l2_len;
> >  			tx_offload.l3_len = tx_pkt->l3_len;
> >  			tx_offload.l4_len = tx_pkt->l4_len;
> > -			tx_offload.vlan_tci = tx_pkt->vlan_tci;
> > +			tx_offload.vlan_tci = tx_pkt->vlan_tci0;
> >  			tx_offload.tso_segsz = tx_pkt->tso_segsz;
> >
> >  			/* If new context need be built or reuse the exist ctx. */
> @@
> > -981,8 +981,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue
> *rxq)
> >  			pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len);
> >  			mb->data_len = pkt_len;
> >  			mb->pkt_len = pkt_len;
> > -			mb->vlan_tci = rxdp[j].wb.upper.vlan;
> > -			mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
> > +			mb->vlan_tci0 = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
> >
> >  			/* convert descriptor fields to rte mbuf flags */
> >  			pkt_flags  = rx_desc_hlen_type_rss_to_pkt_flags(
> > @@ -1327,7 +1326,7 @@ ixgbe_recv_pkts(void *rx_queue, struct
> rte_mbuf
> > **rx_pkts,
> >
> >  		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
> >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> >
> >  		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> >  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> > @@ -1412,10 +1411,10 @@ ixgbe_fill_cluster_head_buf(
> >  	head->port = port_id;
> >
> >  	/*
> > -	 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
> > +	 * The vlan_tci0 field is only valid when PKT_RX_VLAN_PKT is
> >  	 * set in the pkt_flags field.
> >  	 */
> > -	head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
> > +	head->vlan_tci0 = rte_le_to_cpu_16(desc->wb.upper.vlan);
> >  	hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
> >  	pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> >  	pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
> > diff --git a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > index d8019f5..57a33c9 100644
> > --- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > +++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > @@ -405,7 +405,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct
> rte_mbuf **tx_pkts,
> >  			/* Add VLAN tag if requested */
> >  			if (txm->ol_flags & PKT_TX_VLAN_PKT) {
> >  				txd->ti = 1;
> > -				txd->tci = rte_cpu_to_le_16(txm->vlan_tci);
> > +				txd->tci = rte_cpu_to_le_16(txm->vlan_tci0);
> >  			}
> >
> >  			/* Record current mbuf for freeing it later in tx complete */
> @@
> > -629,10 +629,10 @@ vmxnet3_recv_pkts(void *rx_queue, struct
> rte_mbuf **rx_pkts, uint16_t nb_pkts)
> >  				   rcd->tci);
> >  			rxm->ol_flags = PKT_RX_VLAN_PKT;
> >  			/* Copy vlan tag in packet buffer */
> > -			rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
> > +			rxm->vlan_tci0 = rte_le_to_cpu_16((uint16_t)rcd->tci);
> >  		} else {
> >  			rxm->ol_flags = 0;
> > -			rxm->vlan_tci = 0;
> > +			rxm->vlan_tci0 = 0;
> >  		}
> >
> >  		/* Initialize newly received packet buffer */
> > --
> > 1.9.3
  
Zhang, Helin May 6, 2015, 4:07 a.m. UTC | #5
> -----Original Message-----
> From: Ananyev, Konstantin
> Sent: Wednesday, May 6, 2015 6:38 AM
> To: Chilikin, Andrey; Zhang, Helin; dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure for
> QinQ support
> 
> 
> 
> > -----Original Message-----
> > From: Chilikin, Andrey
> > Sent: Tuesday, May 05, 2015 4:43 PM
> > To: Ananyev, Konstantin; Zhang, Helin; dev@dpdk.org
> > Subject: RE: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure
> > for QinQ support
> >
> > Hi Helin,
> >
> > I would agree with Konstantin about new naming for VLAN tags. I think
> > we can leave existing name for t vlan_tci and just name new VLAN tag
> > differently. I was thinking in the line of "vlan_tci_outer" or "stag_tci". So
> vlan_tci will store single VLAN in case if only one L2 tag is present or will
> store inner VLAN in case of two tags. "vlan_tci_outer" will store outer
> VLAN when two L2 tags are present.
> > "stag_tci" name also looks like a good candidate as in most cases if
> > two tags are presented then outer VLAN is addressed as S-Tag even if it is
> simple tag stacking.
> 
> Yep, I suppose "vlan_tci_outer" or "stag_tci" is a better name, then what I
> suggested.
> Konstantin

Agree with you guys. It seems this is more popular! Thank you, Andrey, Konstantin!

Regards,
Helin

> 
> >
> > Regards,
> > Andrey
> >
> > > -----Original Message-----
> > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Ananyev,
> > > Konstantin
> > > Sent: Tuesday, May 5, 2015 12:05 PM
> > > To: Zhang, Helin; dev@dpdk.org
> > > Subject: Re: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure
> > > for QinQ support
> > >
> > > Hi Helin,
> > >
> > > > -----Original Message-----
> > > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Helin Zhang
> > > > Sent: Tuesday, May 05, 2015 3:32 AM
> > > > To: dev@dpdk.org
> > > > Subject: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure
> > > > for QinQ support
> > > >
> > > > To support QinQ, 'vlan_tci' should be replaced by 'vlan_tci0' and
> > > > 'vlan_tci1'. Also new offload flags of 'PKT_RX_QINQ_PKT' and
> > > > 'PKT_TX_QINQ_PKT' should be added.
> > > >
> > > > Signed-off-by: Helin Zhang <helin.zhang@intel.com>
> > > > ---
> > > >  app/test-pmd/flowgen.c                |  2 +-
> > > >  app/test-pmd/macfwd.c                 |  2 +-
> > > >  app/test-pmd/macswap.c                |  2 +-
> > > >  app/test-pmd/rxonly.c                 |  2 +-
> > > >  app/test-pmd/txonly.c                 |  2 +-
> > > >  app/test/packet_burst_generator.c     |  4 ++--
> > > >  lib/librte_ether/rte_ether.h          |  4 ++--
> > > >  lib/librte_mbuf/rte_mbuf.h            | 22
> +++++++++++++++++++---
> > > >  lib/librte_pmd_e1000/em_rxtx.c        |  8 ++++----
> > > >  lib/librte_pmd_e1000/igb_rxtx.c       |  8 ++++----
> > > >  lib/librte_pmd_enic/enic_ethdev.c     |  2 +-
> > > >  lib/librte_pmd_enic/enic_main.c       |  2 +-
> > > >  lib/librte_pmd_fm10k/fm10k_rxtx.c     |  2 +-
> > > >  lib/librte_pmd_i40e/i40e_rxtx.c       |  8 ++++----
> > > >  lib/librte_pmd_ixgbe/ixgbe_rxtx.c     | 11 +++++------
> > > >  lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c |  6 +++---
> > > >  16 files changed, 51 insertions(+), 36 deletions(-)
> > > >
> > > > diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c index
> > > > 72016c9..f24b00c 100644
> > > > --- a/app/test-pmd/flowgen.c
> > > > +++ b/app/test-pmd/flowgen.c
> > > > @@ -207,7 +207,7 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
> > > >  		pkt->nb_segs		= 1;
> > > >  		pkt->pkt_len		= pkt_size;
> > > >  		pkt->ol_flags		= ol_flags;
> > > > -		pkt->vlan_tci		= vlan_tci;
> > > > +		pkt->vlan_tci0		= vlan_tci;
> > > >  		pkt->l2_len		= sizeof(struct ether_hdr);
> > > >  		pkt->l3_len		= sizeof(struct ipv4_hdr);
> > > >  		pkts_burst[nb_pkt]	= pkt;
> > > > diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c index
> > > > 035e5eb..590b613 100644
> > > > --- a/app/test-pmd/macfwd.c
> > > > +++ b/app/test-pmd/macfwd.c
> > > > @@ -120,7 +120,7 @@ pkt_burst_mac_forward(struct fwd_stream
> *fs)
> > > >  		mb->ol_flags = ol_flags;
> > > >  		mb->l2_len = sizeof(struct ether_hdr);
> > > >  		mb->l3_len = sizeof(struct ipv4_hdr);
> > > > -		mb->vlan_tci = txp->tx_vlan_id;
> > > > +		mb->vlan_tci0 = txp->tx_vlan_id;
> > > >  	}
> > > >  	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
> > > nb_rx);
> > > >  	fs->tx_packets += nb_tx;
> > > > diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
> index
> > > > 6729849..c355399 100644
> > > > --- a/app/test-pmd/macswap.c
> > > > +++ b/app/test-pmd/macswap.c
> > > > @@ -122,7 +122,7 @@ pkt_burst_mac_swap(struct fwd_stream
> *fs)
> > > >  		mb->ol_flags = ol_flags;
> > > >  		mb->l2_len = sizeof(struct ether_hdr);
> > > >  		mb->l3_len = sizeof(struct ipv4_hdr);
> > > > -		mb->vlan_tci = txp->tx_vlan_id;
> > > > +		mb->vlan_tci0 = txp->tx_vlan_id;
> > > >  	}
> > > >  	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
> > > nb_rx);
> > > >  	fs->tx_packets += nb_tx;
> > > > diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c index
> > > > ac56090..aa2cf7f 100644
> > > > --- a/app/test-pmd/rxonly.c
> > > > +++ b/app/test-pmd/rxonly.c
> > > > @@ -159,7 +159,7 @@ pkt_burst_receive(struct fwd_stream *fs)
> > > >  				       mb->hash.fdir.hash, mb->hash.fdir.id);
> > > >  		}
> > > >  		if (ol_flags & PKT_RX_VLAN_PKT)
> > > > -			printf(" - VLAN tci=0x%x", mb->vlan_tci);
> > > > +			printf(" - VLAN tci=0x%x", mb->vlan_tci0);
> > > >  		if (is_encapsulation) {
> > > >  			struct ipv4_hdr *ipv4_hdr;
> > > >  			struct ipv6_hdr *ipv6_hdr;
> > > > diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c index
> > > > ca32c85..4a2827f 100644
> > > > --- a/app/test-pmd/txonly.c
> > > > +++ b/app/test-pmd/txonly.c
> > > > @@ -266,7 +266,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
> > > >  		pkt->nb_segs = tx_pkt_nb_segs;
> > > >  		pkt->pkt_len = tx_pkt_length;
> > > >  		pkt->ol_flags = ol_flags;
> > > > -		pkt->vlan_tci  = vlan_tci;
> > > > +		pkt->vlan_tci0  = vlan_tci;
> > > >  		pkt->l2_len = sizeof(struct ether_hdr);
> > > >  		pkt->l3_len = sizeof(struct ipv4_hdr);
> > > >  		pkts_burst[nb_pkt] = pkt;
> > > > diff --git a/app/test/packet_burst_generator.c
> > > > b/app/test/packet_burst_generator.c
> > > > index b46eed7..959644c 100644
> > > > --- a/app/test/packet_burst_generator.c
> > > > +++ b/app/test/packet_burst_generator.c
> > > > @@ -270,7 +270,7 @@ nomore_mbuf:
> > > >  		pkt->l2_len = eth_hdr_size;
> > > >
> > > >  		if (ipv4) {
> > > > -			pkt->vlan_tci  = ETHER_TYPE_IPv4;
> > > > +			pkt->vlan_tci0  = ETHER_TYPE_IPv4;
> > > >  			pkt->l3_len = sizeof(struct ipv4_hdr);
> > > >
> > > >  			if (vlan_enabled)
> > > > @@ -278,7 +278,7 @@ nomore_mbuf:
> > > >  			else
> > > >  				pkt->ol_flags = PKT_RX_IPV4_HDR;
> > > >  		} else {
> > > > -			pkt->vlan_tci  = ETHER_TYPE_IPv6;
> > > > +			pkt->vlan_tci0  = ETHER_TYPE_IPv6;
> > > >  			pkt->l3_len = sizeof(struct ipv6_hdr);
> > > >
> > > >  			if (vlan_enabled)
> > > > diff --git a/lib/librte_ether/rte_ether.h
> > > > b/lib/librte_ether/rte_ether.h index 49f4576..6d682a2 100644
> > > > --- a/lib/librte_ether/rte_ether.h
> > > > +++ b/lib/librte_ether/rte_ether.h
> > > > @@ -357,7 +357,7 @@ static inline int rte_vlan_strip(struct
> > > > rte_mbuf
> > > > *m)
> > > >
> > > >  	struct vlan_hdr *vh = (struct vlan_hdr *)(eh + 1);
> > > >  	m->ol_flags |= PKT_RX_VLAN_PKT;
> > > > -	m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci);
> > > > +	m->vlan_tci0 = rte_be_to_cpu_16(vh->vlan_tci);
> > > >
> > > >  	/* Copy ether header over rather than moving whole packet */
> > > >  	memmove(rte_pktmbuf_adj(m, sizeof(struct vlan_hdr)), @@
> -404,7
> > > > +404,7 @@ static inline int rte_vlan_insert(struct rte_mbuf **m)
> > > >  	nh->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
> > > >
> > > >  	vh = (struct vlan_hdr *) (nh + 1);
> > > > -	vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci);
> > > > +	vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci0);
> > > >
> > > >  	return 0;
> > > >  }
> > > > diff --git a/lib/librte_mbuf/rte_mbuf.h
> > > > b/lib/librte_mbuf/rte_mbuf.h index 70b0987..6eed54f 100644
> > > > --- a/lib/librte_mbuf/rte_mbuf.h
> > > > +++ b/lib/librte_mbuf/rte_mbuf.h
> > > > @@ -101,11 +101,17 @@ extern "C" {  #define
> PKT_RX_TUNNEL_IPV6_HDR
> > > > (1ULL << 12) /**< RX tunnel packet
> > > with IPv6 header. */
> > > >  #define PKT_RX_FDIR_ID       (1ULL << 13) /**< FD id reported
> if FDIR
> > > match. */
> > > >  #define PKT_RX_FDIR_FLX      (1ULL << 14) /**< Flexible bytes
> reported if
> > > FDIR match. */
> > > > +#define PKT_RX_QINQ_PKT     (1ULL << 15)  /**< RX packet
> with double
> > > VLAN stripped. */
> > > >  /* add new RX flags here */
> > > >
> > > >  /* add new TX flags here */
> > > >
> > > >  /**
> > > > + * Second VLAN insertion (QinQ) flag.
> > > > + */
> > > > +#define PKT_TX_QINQ_PKT    (1ULL << 49)
> > > > +
> > > > +/**
> > > >   * TCP segmentation offload. To enable this offload feature for a
> > > >   * packet to be transmitted on hardware supporting TSO:
> > > >   *  - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag
> > > > implies @@ -268,7 +274,6 @@ struct rte_mbuf {
> > > >
> > > >  	uint16_t data_len;        /**< Amount of data in segment
> buffer. */
> > > >  	uint32_t pkt_len;         /**< Total pkt len: sum of all
> segments. */
> > > > -	uint16_t vlan_tci;        /**< VLAN Tag Control Identifier (CPU
> order) */
> > > >  	uint16_t reserved;
> > >
> > > Now here is an implicit 2-bytes whole between 'reserved' and 'rss'.
> > > Probably better to make it explicit - make 'reserved' uint32_t.
> > >
> > > Another thing - it looks like your change will break  ixgbe vector RX.
> > >
> > > >  	union {
> > > >  		uint32_t rss;     /**< RSS hash result if RSS enabled */
> > > > @@ -289,6 +294,15 @@ struct rte_mbuf {
> > > >  		uint32_t usr;	  /**< User defined tags. See
> > > rte_distributor_process() */
> > > >  	} hash;                   /**< hash information */
> > > >
> > > > +	/* VLAN tags */
> > > > +	union {
> > > > +		uint32_t vlan_tags;
> > > > +		struct {
> > > > +			uint16_t vlan_tci0;
> > > > +			uint16_t vlan_tci1;
> > >
> > > Do you really need to change vlan_tci to vlan_tci0?
> > > Can't you keep 'vlan_tci' for first vlan tag, and add something like
> > > 'vlan_tci_ext', or 'vlan_tci_next' for second one?
> > > Would save you a lot of changes, again users who use single vlan
> > > wouldn't need to update their code for 2.1.
> > >
> > > > +		};
> > > > +	};
> > > > +
> > > >  	uint32_t seqn; /**< Sequence number. See also
> > > rte_reorder_insert()
> > > > */
> > > >
> > > >  	/* second cache line - fields only used in slow path or on TX */
> > > > @@
> > > > -766,7 +780,8 @@ static inline void rte_pktmbuf_reset(struct
> > > > rte_mbuf
> > > *m)
> > > >  	m->next = NULL;
> > > >  	m->pkt_len = 0;
> > > >  	m->tx_offload = 0;
> > > > -	m->vlan_tci = 0;
> > > > +	m->vlan_tci0 = 0;
> > > > +	m->vlan_tci1 = 0;
> > >
> > > Why just not:
> > > m-> vlan_tags = 0;
> > > ?
> > >
> > > >  	m->nb_segs = 1;
> > > >  	m->port = 0xff;
> > > >
> > > > @@ -838,7 +853,8 @@ static inline void rte_pktmbuf_attach(struct
> > > rte_mbuf *mi, struct rte_mbuf *m)
> > > >  	mi->data_off = m->data_off;
> > > >  	mi->data_len = m->data_len;
> > > >  	mi->port = m->port;
> > > > -	mi->vlan_tci = m->vlan_tci;
> > > > +	mi->vlan_tci0 = m->vlan_tci0;
> > > > +	mi->vlan_tci1 = m->vlan_tci1;
> > >
> > > Same thing, why not:
> > > mi-> vlan_tags = m-> vlan_tags;
> > > ?
> > >
> > > >  	mi->tx_offload = m->tx_offload;
> > > >  	mi->hash = m->hash;
> > > >
> > > > diff --git a/lib/librte_pmd_e1000/em_rxtx.c
> > > > b/lib/librte_pmd_e1000/em_rxtx.c index 64d067c..422f4ed 100644
> > > > --- a/lib/librte_pmd_e1000/em_rxtx.c
> > > > +++ b/lib/librte_pmd_e1000/em_rxtx.c
> > > > @@ -438,7 +438,7 @@ eth_em_xmit_pkts(void *tx_queue, struct
> > > rte_mbuf **tx_pkts,
> > > >  		/* If hardware offload required */
> > > >  		tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM |
> > > PKT_TX_L4_MASK));
> > > >  		if (tx_ol_req) {
> > > > -			hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
> > > > +			hdrlen.f.vlan_tci = tx_pkt->vlan_tci0;
> > > >  			hdrlen.f.l2_len = tx_pkt->l2_len;
> > > >  			hdrlen.f.l3_len = tx_pkt->l3_len;
> > > >  			/* If new context to be built or reuse the exist ctx. */
> > > @@ -534,7
> > > > +534,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf
> > > **tx_pkts,
> > > >  		/* Set VLAN Tag offload fields. */
> > > >  		if (ol_flags & PKT_TX_VLAN_PKT) {
> > > >  			cmd_type_len |= E1000_TXD_CMD_VLE;
> > > > -			popts_spec = tx_pkt->vlan_tci <<
> > > E1000_TXD_VLAN_SHIFT;
> > > > +			popts_spec = tx_pkt->vlan_tci0 <<
> > > E1000_TXD_VLAN_SHIFT;
> > > >  		}
> > > >
> > > >  		if (tx_ol_req) {
> > > > @@ -800,7 +800,7 @@ eth_em_recv_pkts(void *rx_queue, struct
> > > rte_mbuf **rx_pkts,
> > > >  				rx_desc_error_to_pkt_flags(rxd.errors);
> > > >
> > > >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > > > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
> > > > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);
> > > >
> > > >  		/*
> > > >  		 * Store the mbuf address into the next entry of the array
> > > @@
> > > > -1026,7 +1026,7 @@ eth_em_recv_scattered_pkts(void
> *rx_queue,
> > > > struct
> > > rte_mbuf **rx_pkts,
> > > >
> > > 	rx_desc_error_to_pkt_flags(rxd.errors);
> > > >
> > > >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > > > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
> > > > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);
> > > >
> > > >  		/* Prefetch data of first segment, if configured to do so. */
> > > >  		rte_packet_prefetch((char *)first_seg->buf_addr + diff --git
> > > > a/lib/librte_pmd_e1000/igb_rxtx.c
> > > > b/lib/librte_pmd_e1000/igb_rxtx.c index 80d05c0..fda273f 100644
> > > > --- a/lib/librte_pmd_e1000/igb_rxtx.c
> > > > +++ b/lib/librte_pmd_e1000/igb_rxtx.c
> > > > @@ -401,7 +401,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct
> > > rte_mbuf **tx_pkts,
> > > >  		ol_flags = tx_pkt->ol_flags;
> > > >  		l2_l3_len.l2_len = tx_pkt->l2_len;
> > > >  		l2_l3_len.l3_len = tx_pkt->l3_len;
> > > > -		vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
> > > > +		vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci0;
> > > >  		vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
> > > >  		tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
> > > >
> > > > @@ -784,7 +784,7 @@ eth_igb_recv_pkts(void *rx_queue, struct
> > > rte_mbuf **rx_pkts,
> > > >  		rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
> > > >  		hlen_type_rss =
> > > rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
> > > >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > > > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > > > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > > >
> > > >  		pkt_flags =
> > > rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> > > >  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> > > > @@ -1015,10 +1015,10 @@ eth_igb_recv_scattered_pkts(void
> > > > *rx_queue,
> > > struct rte_mbuf **rx_pkts,
> > > >  		first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
> > > >
> > > >  		/*
> > > > -		 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
> > > > +		 * The vlan_tci0 field is only valid when PKT_RX_VLAN_PKT
> is
> > > >  		 * set in the pkt_flags field.
> > > >  		 */
> > > > -		first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > > > +		first_seg->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > > >  		hlen_type_rss =
> > > rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
> > > >  		pkt_flags =
> > > rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> > > >  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> > > > diff --git a/lib/librte_pmd_enic/enic_ethdev.c
> > > > b/lib/librte_pmd_enic/enic_ethdev.c
> > > > index 69ad01b..45c0e14 100644
> > > > --- a/lib/librte_pmd_enic/enic_ethdev.c
> > > > +++ b/lib/librte_pmd_enic/enic_ethdev.c
> > > > @@ -506,7 +506,7 @@ static uint16_t enicpmd_xmit_pkts(void
> > > > *tx_queue,
> > > struct rte_mbuf **tx_pkts,
> > > >  				return index;
> > > >  		}
> > > >  		pkt_len = tx_pkt->pkt_len;
> > > > -		vlan_id = tx_pkt->vlan_tci;
> > > > +		vlan_id = tx_pkt->vlan_tci0;
> > > >  		ol_flags = tx_pkt->ol_flags;
> > > >  		for (frags = 0; inc_len < pkt_len; frags++) {
> > > >  			if (!tx_pkt)
> > > > diff --git a/lib/librte_pmd_enic/enic_main.c
> > > > b/lib/librte_pmd_enic/enic_main.c index 15313c2..d1660a1
> 100644
> > > > --- a/lib/librte_pmd_enic/enic_main.c
> > > > +++ b/lib/librte_pmd_enic/enic_main.c
> > > > @@ -490,7 +490,7 @@ static int enic_rq_indicate_buf(struct
> vnic_rq
> > > > *rq,
> > > >
> > > >  	if (vlan_tci) {
> > > >  		rx_pkt->ol_flags |= PKT_RX_VLAN_PKT;
> > > > -		rx_pkt->vlan_tci = vlan_tci;
> > > > +		rx_pkt->vlan_tci0 = vlan_tci;
> > > >  	}
> > > >
> > > >  	return eop;
> > > > diff --git a/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > > > b/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > > > index 83bddfc..ba3b8aa 100644
> > > > --- a/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > > > +++ b/lib/librte_pmd_fm10k/fm10k_rxtx.c
> > > > @@ -410,7 +410,7 @@ static inline void tx_xmit_pkt(struct
> > > > fm10k_tx_queue *q, struct rte_mbuf *mb)
> > > >
> > > >  	/* set vlan if requested */
> > > >  	if (mb->ol_flags & PKT_TX_VLAN_PKT)
> > > > -		q->hw_ring[q->next_free].vlan = mb->vlan_tci;
> > > > +		q->hw_ring[q->next_free].vlan = mb->vlan_tci0;
> > > >
> > > >  	/* fill up the rings */
> > > >  	for (; mb != NULL; mb = mb->next) { diff --git
> > > > a/lib/librte_pmd_i40e/i40e_rxtx.c
> > > > b/lib/librte_pmd_i40e/i40e_rxtx.c index 493cfa3..1fe377c 100644
> > > > --- a/lib/librte_pmd_i40e/i40e_rxtx.c
> > > > +++ b/lib/librte_pmd_i40e/i40e_rxtx.c
> > > > @@ -703,7 +703,7 @@ i40e_rx_scan_hw_ring(struct
> i40e_rx_queue *rxq)
> > > >  				I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq-
> crc_len;
> > > >  			mb->data_len = pkt_len;
> > > >  			mb->pkt_len = pkt_len;
> > > > -			mb->vlan_tci = rx_status &
> > > > +			mb->vlan_tci0 = rx_status &
> > > >  				(1 <<
> > > I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
> > > >  			rte_le_to_cpu_16(\
> > > >  				rxdp[j].wb.qword0.lo_dword.l2tag1) : 0; @@
> > > -947,7 +947,7 @@
> > > > i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> uint16_t
> > > nb_pkts)
> > > >  		rxm->data_len = rx_packet_len;
> > > >  		rxm->port = rxq->port_id;
> > > >
> > > > -		rxm->vlan_tci = rx_status &
> > > > +		rxm->vlan_tci0 = rx_status &
> > > >  			(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
> > > >  			rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) :
> > > 0;
> > > >  		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
> > > > @@ -1106,7 +1106,7 @@ i40e_recv_scattered_pkts(void
> *rx_queue,
> > > >  		}
> > > >
> > > >  		first_seg->port = rxq->port_id;
> > > > -		first_seg->vlan_tci = (rx_status &
> > > > +		first_seg->vlan_tci0 = (rx_status &
> > > >  			(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
> > > >  			rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) :
> > > 0;
> > > >  		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
> > > > @@ -1291,7 +1291,7 @@ i40e_xmit_pkts(void *tx_queue, struct
> > > > rte_mbuf **tx_pkts, uint16_t nb_pkts)
> > > >
> > > >  		/* Descriptor based VLAN insertion */
> > > >  		if (ol_flags & PKT_TX_VLAN_PKT) {
> > > > -			tx_flags |= tx_pkt->vlan_tci <<
> > > > +			tx_flags |= tx_pkt->vlan_tci0 <<
> > > >  					I40E_TX_FLAG_L2TAG1_SHIFT;
> > > >  			tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
> > > >  			td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; diff --git
> > > > a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > > > b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > > > index 7f15f15..fd664da 100644
> > > > --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > > > +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > > > @@ -612,7 +612,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct
> > > > rte_mbuf
> > > **tx_pkts,
> > > >  			tx_offload.l2_len = tx_pkt->l2_len;
> > > >  			tx_offload.l3_len = tx_pkt->l3_len;
> > > >  			tx_offload.l4_len = tx_pkt->l4_len;
> > > > -			tx_offload.vlan_tci = tx_pkt->vlan_tci;
> > > > +			tx_offload.vlan_tci = tx_pkt->vlan_tci0;
> > > >  			tx_offload.tso_segsz = tx_pkt->tso_segsz;
> > > >
> > > >  			/* If new context need be built or reuse the exist ctx.
> > > */ @@
> > > > -981,8 +981,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue
> *rxq)
> > > >  			pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq-
> crc_len);
> > > >  			mb->data_len = pkt_len;
> > > >  			mb->pkt_len = pkt_len;
> > > > -			mb->vlan_tci = rxdp[j].wb.upper.vlan;
> > > > -			mb->vlan_tci =
> > > rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
> > > > +			mb->vlan_tci0 =
> > > rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
> > > >
> > > >  			/* convert descriptor fields to rte mbuf flags */
> > > >  			pkt_flags  = rx_desc_hlen_type_rss_to_pkt_flags(
> > > > @@ -1327,7 +1326,7 @@ ixgbe_recv_pkts(void *rx_queue, struct
> > > rte_mbuf
> > > > **rx_pkts,
> > > >
> > > >  		hlen_type_rss =
> > > rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
> > > >  		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
> > > > -		rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > > > +		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
> > > >
> > > >  		pkt_flags =
> > > rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> > > >  		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
> > > > @@ -1412,10 +1411,10 @@ ixgbe_fill_cluster_head_buf(
> > > >  	head->port = port_id;
> > > >
> > > >  	/*
> > > > -	 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
> > > > +	 * The vlan_tci0 field is only valid when PKT_RX_VLAN_PKT is
> > > >  	 * set in the pkt_flags field.
> > > >  	 */
> > > > -	head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
> > > > +	head->vlan_tci0 = rte_le_to_cpu_16(desc->wb.upper.vlan);
> > > >  	hlen_type_rss =
> rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
> > > >  	pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
> > > >  	pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
> > > > diff --git a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > > > b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > > > index d8019f5..57a33c9 100644
> > > > --- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > > > +++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
> > > > @@ -405,7 +405,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct
> > > rte_mbuf **tx_pkts,
> > > >  			/* Add VLAN tag if requested */
> > > >  			if (txm->ol_flags & PKT_TX_VLAN_PKT) {
> > > >  				txd->ti = 1;
> > > > -				txd->tci = rte_cpu_to_le_16(txm->vlan_tci);
> > > > +				txd->tci = rte_cpu_to_le_16(txm->vlan_tci0);
> > > >  			}
> > > >
> > > >  			/* Record current mbuf for freeing it later in tx
> > > complete */ @@
> > > > -629,10 +629,10 @@ vmxnet3_recv_pkts(void *rx_queue, struct
> > > > rte_mbuf
> > > **rx_pkts, uint16_t nb_pkts)
> > > >  				   rcd->tci);
> > > >  			rxm->ol_flags = PKT_RX_VLAN_PKT;
> > > >  			/* Copy vlan tag in packet buffer */
> > > > -			rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd-
> > > >tci);
> > > > +			rxm->vlan_tci0 = rte_le_to_cpu_16((uint16_t)rcd-
> > > >tci);
> > > >  		} else {
> > > >  			rxm->ol_flags = 0;
> > > > -			rxm->vlan_tci = 0;
> > > > +			rxm->vlan_tci0 = 0;
> > > >  		}
> > > >
> > > >  		/* Initialize newly received packet buffer */
> > > > --
> > > > 1.9.3
  
Bruce Richardson May 6, 2015, 8:39 a.m. UTC | #6
On Wed, May 06, 2015 at 04:06:17AM +0000, Zhang, Helin wrote:
> 
> 
> > -----Original Message-----
> > From: Ananyev, Konstantin
> > Sent: Tuesday, May 5, 2015 7:05 PM
> > To: Zhang, Helin; dev@dpdk.org
> > Subject: RE: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure for
> > QinQ support
> > 
> > Hi Helin,
> > 
> > > -----Original Message-----
> > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Helin Zhang
> > > Sent: Tuesday, May 05, 2015 3:32 AM
> > > To: dev@dpdk.org
> > > Subject: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure for
> > > QinQ support
> > >
> > > To support QinQ, 'vlan_tci' should be replaced by 'vlan_tci0' and
> > > 'vlan_tci1'. Also new offload flags of 'PKT_RX_QINQ_PKT' and
> > > 'PKT_TX_QINQ_PKT' should be added.
> > >
> > > Signed-off-by: Helin Zhang <helin.zhang@intel.com>
> > > ---
> > >  app/test-pmd/flowgen.c                |  2 +-
> > >  app/test-pmd/macfwd.c                 |  2 +-
> > >  app/test-pmd/macswap.c                |  2 +-
> > >  app/test-pmd/rxonly.c                 |  2 +-
> > >  app/test-pmd/txonly.c                 |  2 +-
> > >  app/test/packet_burst_generator.c     |  4 ++--
> > >  lib/librte_ether/rte_ether.h          |  4 ++--
> > >  lib/librte_mbuf/rte_mbuf.h            | 22
> > +++++++++++++++++++---
> > >  lib/librte_pmd_e1000/em_rxtx.c        |  8 ++++----
> > >  lib/librte_pmd_e1000/igb_rxtx.c       |  8 ++++----
> > >  lib/librte_pmd_enic/enic_ethdev.c     |  2 +-
> > >  lib/librte_pmd_enic/enic_main.c       |  2 +-
> > >  lib/librte_pmd_fm10k/fm10k_rxtx.c     |  2 +-
> > >  lib/librte_pmd_i40e/i40e_rxtx.c       |  8 ++++----
> > >  lib/librte_pmd_ixgbe/ixgbe_rxtx.c     | 11 +++++------
> > >  lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c |  6 +++---
> > >  16 files changed, 51 insertions(+), 36 deletions(-)
> > >
<snip>
> > --- a/lib/librte_mbuf/rte_mbuf.h
> > > +++ b/lib/librte_mbuf/rte_mbuf.h
> > > @@ -101,11 +101,17 @@ extern "C" {
> > >  #define PKT_RX_TUNNEL_IPV6_HDR (1ULL << 12) /**< RX tunnel
> > packet with IPv6 header. */
> > >  #define PKT_RX_FDIR_ID       (1ULL << 13) /**< FD id reported if
> > FDIR match. */
> > >  #define PKT_RX_FDIR_FLX      (1ULL << 14) /**< Flexible bytes
> > reported if FDIR match. */
> > > +#define PKT_RX_QINQ_PKT     (1ULL << 15)  /**< RX packet with
> > double VLAN stripped. */
> > >  /* add new RX flags here */
> > >
> > >  /* add new TX flags here */
> > >
> > >  /**
> > > + * Second VLAN insertion (QinQ) flag.
> > > + */
> > > +#define PKT_TX_QINQ_PKT    (1ULL << 49)
> > > +
> > > +/**
> > >   * TCP segmentation offload. To enable this offload feature for a
> > >   * packet to be transmitted on hardware supporting TSO:
> > >   *  - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag
> > > implies @@ -268,7 +274,6 @@ struct rte_mbuf {
> > >
> > >  	uint16_t data_len;        /**< Amount of data in segment buffer. */
> > >  	uint32_t pkt_len;         /**< Total pkt len: sum of all segments. */
> > > -	uint16_t vlan_tci;        /**< VLAN Tag Control Identifier (CPU
> > order) */
> > >  	uint16_t reserved;
> > 
> > Now here is an implicit 2-bytes whole between 'reserved' and 'rss'.
> > Probably better to make it explicit - make 'reserved' uint32_t.
> Yes, the layout will be changed according to the demands of Vector PMD.
> The vlan structure will be kept the same, but the mbuf structure layout will
> be re-organized a bit.

Why not just put the extra vlan tag into the reserved space. In the original
work to restructure the mbuf, that was what the reserved space was put there
for [it was marked as reserved as it was requested that fields not be fully
dedicated until used, and we did not have double-vlan support at that time].
However, it seems more sensible to put the vlans there now, unless there is
a good reason to move them to the new location in the mbuf that you propose
below.

/Bruce

> 
> > 
> > Another thing - it looks like your change will break  ixgbe vector RX.
> Yes, in the cover-letter, I noted that the vector PMD will be updated soon
> together with the code changes.
> 
> > 
> > >  	union {
> > >  		uint32_t rss;     /**< RSS hash result if RSS enabled */
> > > @@ -289,6 +294,15 @@ struct rte_mbuf {
> > >  		uint32_t usr;	  /**< User defined tags. See
> > rte_distributor_process() */
> > >  	} hash;                   /**< hash information */
> > >
> > > +	/* VLAN tags */
> > > +	union {
> > > +		uint32_t vlan_tags;
> > > +		struct {
> > > +			uint16_t vlan_tci0;
> > > +			uint16_t vlan_tci1;
> > 
> > Do you really need to change vlan_tci to vlan_tci0?
> > Can't you keep 'vlan_tci' for first vlan tag, and add something like
> > 'vlan_tci_ext', or 'vlan_tci_next' for second one?
> > Would save you a lot of changes, again users who use single vlan wouldn't
> > need to update their code for 2.1.
> Yes, good point! The names came from the original mbuf definition done by
> Bruce long long ago. If more guys suggest keeping th old one, and just add a new
> one, I will do like that in the next version of patch set.
> Thank you all!
> 
> > 
> > > +		};
> > > +	};
> > > +
> > >  	uint32_t seqn; /**< Sequence number. See also rte_reorder_insert()
> > > */
> > >
> > >  	/* second cache line - fields only used in slow path or on TX */ @@
> > > -766,7 +780,8 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf
> > *m)
> > >  	m->next = NULL;
> > >  	m->pkt_len = 0;
> > >  	m->tx_offload = 0;
> > > -	m->vlan_tci = 0;
> > > +	m->vlan_tci0 = 0;
> > > +	m->vlan_tci1 = 0;
> > 
> > Why just not:
> > m-> vlan_tags = 0;
> > ?
> Accepted. Good point!
> 
> > 
> > >  	m->nb_segs = 1;
> > >  	m->port = 0xff;
> > >
> > > @@ -838,7 +853,8 @@ static inline void rte_pktmbuf_attach(struct
> > rte_mbuf *mi, struct rte_mbuf *m)
> > >  	mi->data_off = m->data_off;
> > >  	mi->data_len = m->data_len;
> > >  	mi->port = m->port;
> > > -	mi->vlan_tci = m->vlan_tci;
> > > +	mi->vlan_tci0 = m->vlan_tci0;
> > > +	mi->vlan_tci1 = m->vlan_tci1;
> > 
> > Same thing, why not:
> > mi-> vlan_tags = m-> vlan_tags;
> > ?
> Accepted. Good point!
> 
> Regards,
> Helin

<snip>
  
Zhang, Helin May 6, 2015, 8:48 a.m. UTC | #7
> -----Original Message-----
> From: Richardson, Bruce
> Sent: Wednesday, May 6, 2015 4:39 PM
> To: Zhang, Helin
> Cc: Ananyev, Konstantin; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure for
> QinQ support
> 
> On Wed, May 06, 2015 at 04:06:17AM +0000, Zhang, Helin wrote:
> >
> >
> > > -----Original Message-----
> > > From: Ananyev, Konstantin
> > > Sent: Tuesday, May 5, 2015 7:05 PM
> > > To: Zhang, Helin; dev@dpdk.org
> > > Subject: RE: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure
> > > for QinQ support
> > >
> > > Hi Helin,
> > >
> > > > -----Original Message-----
> > > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Helin Zhang
> > > > Sent: Tuesday, May 05, 2015 3:32 AM
> > > > To: dev@dpdk.org
> > > > Subject: [dpdk-dev] [PATCH RFC 1/6] mbuf: update mbuf structure
> > > > for QinQ support
> > > >
> > > > To support QinQ, 'vlan_tci' should be replaced by 'vlan_tci0' and
> > > > 'vlan_tci1'. Also new offload flags of 'PKT_RX_QINQ_PKT' and
> > > > 'PKT_TX_QINQ_PKT' should be added.
> > > >
> > > > Signed-off-by: Helin Zhang <helin.zhang@intel.com>
> > > > ---
> > > >  app/test-pmd/flowgen.c                |  2 +-
> > > >  app/test-pmd/macfwd.c                 |  2 +-
> > > >  app/test-pmd/macswap.c                |  2 +-
> > > >  app/test-pmd/rxonly.c                 |  2 +-
> > > >  app/test-pmd/txonly.c                 |  2 +-
> > > >  app/test/packet_burst_generator.c     |  4 ++--
> > > >  lib/librte_ether/rte_ether.h          |  4 ++--
> > > >  lib/librte_mbuf/rte_mbuf.h            | 22
> > > +++++++++++++++++++---
> > > >  lib/librte_pmd_e1000/em_rxtx.c        |  8 ++++----
> > > >  lib/librte_pmd_e1000/igb_rxtx.c       |  8 ++++----
> > > >  lib/librte_pmd_enic/enic_ethdev.c     |  2 +-
> > > >  lib/librte_pmd_enic/enic_main.c       |  2 +-
> > > >  lib/librte_pmd_fm10k/fm10k_rxtx.c     |  2 +-
> > > >  lib/librte_pmd_i40e/i40e_rxtx.c       |  8 ++++----
> > > >  lib/librte_pmd_ixgbe/ixgbe_rxtx.c     | 11 +++++------
> > > >  lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c |  6 +++---
> > > >  16 files changed, 51 insertions(+), 36 deletions(-)
> > > >
> <snip>
> > > --- a/lib/librte_mbuf/rte_mbuf.h
> > > > +++ b/lib/librte_mbuf/rte_mbuf.h
> > > > @@ -101,11 +101,17 @@ extern "C" {  #define
> PKT_RX_TUNNEL_IPV6_HDR
> > > > (1ULL << 12) /**< RX tunnel
> > > packet with IPv6 header. */
> > > >  #define PKT_RX_FDIR_ID       (1ULL << 13) /**< FD id reported
> if
> > > FDIR match. */
> > > >  #define PKT_RX_FDIR_FLX      (1ULL << 14) /**< Flexible bytes
> > > reported if FDIR match. */
> > > > +#define PKT_RX_QINQ_PKT     (1ULL << 15)  /**< RX packet
> with
> > > double VLAN stripped. */
> > > >  /* add new RX flags here */
> > > >
> > > >  /* add new TX flags here */
> > > >
> > > >  /**
> > > > + * Second VLAN insertion (QinQ) flag.
> > > > + */
> > > > +#define PKT_TX_QINQ_PKT    (1ULL << 49)
> > > > +
> > > > +/**
> > > >   * TCP segmentation offload. To enable this offload feature for a
> > > >   * packet to be transmitted on hardware supporting TSO:
> > > >   *  - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag
> > > > implies @@ -268,7 +274,6 @@ struct rte_mbuf {
> > > >
> > > >  	uint16_t data_len;        /**< Amount of data in segment
> buffer. */
> > > >  	uint32_t pkt_len;         /**< Total pkt len: sum of all
> segments. */
> > > > -	uint16_t vlan_tci;        /**< VLAN Tag Control Identifier (CPU
> > > order) */
> > > >  	uint16_t reserved;
> > >
> > > Now here is an implicit 2-bytes whole between 'reserved' and 'rss'.
> > > Probably better to make it explicit - make 'reserved' uint32_t.
> > Yes, the layout will be changed according to the demands of Vector PMD.
> > The vlan structure will be kept the same, but the mbuf structure
> > layout will be re-organized a bit.
> 
> Why not just put the extra vlan tag into the reserved space. In the original
> work to restructure the mbuf, that was what the reserved space was put
> there for [it was marked as reserved as it was requested that fields not be
> fully dedicated until used, and we did not have double-vlan support at that
> time].
> However, it seems more sensible to put the vlans there now, unless there
> is a good reason to move them to the new location in the mbuf that you
> propose below.
> 
> /Bruce

Thank you very much for the reminder!
The main reason is that we planned to enlarge the packet_type field, so we have to move the vlan fields down. Hopefully the unified packet type can be merged before this one.

Regards,
Helin

> 
> >
> > >
> > > Another thing - it looks like your change will break  ixgbe vector RX.
> > Yes, in the cover-letter, I noted that the vector PMD will be updated
> > soon together with the code changes.
> >
> > >
> > > >  	union {
> > > >  		uint32_t rss;     /**< RSS hash result if RSS enabled */
> > > > @@ -289,6 +294,15 @@ struct rte_mbuf {
> > > >  		uint32_t usr;	  /**< User defined tags. See
> > > rte_distributor_process() */
> > > >  	} hash;                   /**< hash information */
> > > >
> > > > +	/* VLAN tags */
> > > > +	union {
> > > > +		uint32_t vlan_tags;
> > > > +		struct {
> > > > +			uint16_t vlan_tci0;
> > > > +			uint16_t vlan_tci1;
> > >
> > > Do you really need to change vlan_tci to vlan_tci0?
> > > Can't you keep 'vlan_tci' for first vlan tag, and add something like
> > > 'vlan_tci_ext', or 'vlan_tci_next' for second one?
> > > Would save you a lot of changes, again users who use single vlan
> > > wouldn't need to update their code for 2.1.
> > Yes, good point! The names came from the original mbuf definition done
> > by Bruce long long ago. If more guys suggest keeping th old one, and
> > just add a new one, I will do like that in the next version of patch set.
> > Thank you all!
> >
> > >
> > > > +		};
> > > > +	};
> > > > +
> > > >  	uint32_t seqn; /**< Sequence number. See also
> > > > rte_reorder_insert() */
> > > >
> > > >  	/* second cache line - fields only used in slow path or on TX */
> > > > @@
> > > > -766,7 +780,8 @@ static inline void rte_pktmbuf_reset(struct
> > > > rte_mbuf
> > > *m)
> > > >  	m->next = NULL;
> > > >  	m->pkt_len = 0;
> > > >  	m->tx_offload = 0;
> > > > -	m->vlan_tci = 0;
> > > > +	m->vlan_tci0 = 0;
> > > > +	m->vlan_tci1 = 0;
> > >
> > > Why just not:
> > > m-> vlan_tags = 0;
> > > ?
> > Accepted. Good point!
> >
> > >
> > > >  	m->nb_segs = 1;
> > > >  	m->port = 0xff;
> > > >
> > > > @@ -838,7 +853,8 @@ static inline void rte_pktmbuf_attach(struct
> > > rte_mbuf *mi, struct rte_mbuf *m)
> > > >  	mi->data_off = m->data_off;
> > > >  	mi->data_len = m->data_len;
> > > >  	mi->port = m->port;
> > > > -	mi->vlan_tci = m->vlan_tci;
> > > > +	mi->vlan_tci0 = m->vlan_tci0;
> > > > +	mi->vlan_tci1 = m->vlan_tci1;
> > >
> > > Same thing, why not:
> > > mi-> vlan_tags = m-> vlan_tags;
> > > ?
> > Accepted. Good point!
> >
> > Regards,
> > Helin
> 
> <snip>
  

Patch

diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index 72016c9..f24b00c 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -207,7 +207,7 @@  pkt_burst_flow_gen(struct fwd_stream *fs)
 		pkt->nb_segs		= 1;
 		pkt->pkt_len		= pkt_size;
 		pkt->ol_flags		= ol_flags;
-		pkt->vlan_tci		= vlan_tci;
+		pkt->vlan_tci0		= vlan_tci;
 		pkt->l2_len		= sizeof(struct ether_hdr);
 		pkt->l3_len		= sizeof(struct ipv4_hdr);
 		pkts_burst[nb_pkt]	= pkt;
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index 035e5eb..590b613 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -120,7 +120,7 @@  pkt_burst_mac_forward(struct fwd_stream *fs)
 		mb->ol_flags = ol_flags;
 		mb->l2_len = sizeof(struct ether_hdr);
 		mb->l3_len = sizeof(struct ipv4_hdr);
-		mb->vlan_tci = txp->tx_vlan_id;
+		mb->vlan_tci0 = txp->tx_vlan_id;
 	}
 	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
 	fs->tx_packets += nb_tx;
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 6729849..c355399 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -122,7 +122,7 @@  pkt_burst_mac_swap(struct fwd_stream *fs)
 		mb->ol_flags = ol_flags;
 		mb->l2_len = sizeof(struct ether_hdr);
 		mb->l3_len = sizeof(struct ipv4_hdr);
-		mb->vlan_tci = txp->tx_vlan_id;
+		mb->vlan_tci0 = txp->tx_vlan_id;
 	}
 	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
 	fs->tx_packets += nb_tx;
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index ac56090..aa2cf7f 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -159,7 +159,7 @@  pkt_burst_receive(struct fwd_stream *fs)
 				       mb->hash.fdir.hash, mb->hash.fdir.id);
 		}
 		if (ol_flags & PKT_RX_VLAN_PKT)
-			printf(" - VLAN tci=0x%x", mb->vlan_tci);
+			printf(" - VLAN tci=0x%x", mb->vlan_tci0);
 		if (is_encapsulation) {
 			struct ipv4_hdr *ipv4_hdr;
 			struct ipv6_hdr *ipv6_hdr;
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index ca32c85..4a2827f 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -266,7 +266,7 @@  pkt_burst_transmit(struct fwd_stream *fs)
 		pkt->nb_segs = tx_pkt_nb_segs;
 		pkt->pkt_len = tx_pkt_length;
 		pkt->ol_flags = ol_flags;
-		pkt->vlan_tci  = vlan_tci;
+		pkt->vlan_tci0  = vlan_tci;
 		pkt->l2_len = sizeof(struct ether_hdr);
 		pkt->l3_len = sizeof(struct ipv4_hdr);
 		pkts_burst[nb_pkt] = pkt;
diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c
index b46eed7..959644c 100644
--- a/app/test/packet_burst_generator.c
+++ b/app/test/packet_burst_generator.c
@@ -270,7 +270,7 @@  nomore_mbuf:
 		pkt->l2_len = eth_hdr_size;
 
 		if (ipv4) {
-			pkt->vlan_tci  = ETHER_TYPE_IPv4;
+			pkt->vlan_tci0  = ETHER_TYPE_IPv4;
 			pkt->l3_len = sizeof(struct ipv4_hdr);
 
 			if (vlan_enabled)
@@ -278,7 +278,7 @@  nomore_mbuf:
 			else
 				pkt->ol_flags = PKT_RX_IPV4_HDR;
 		} else {
-			pkt->vlan_tci  = ETHER_TYPE_IPv6;
+			pkt->vlan_tci0  = ETHER_TYPE_IPv6;
 			pkt->l3_len = sizeof(struct ipv6_hdr);
 
 			if (vlan_enabled)
diff --git a/lib/librte_ether/rte_ether.h b/lib/librte_ether/rte_ether.h
index 49f4576..6d682a2 100644
--- a/lib/librte_ether/rte_ether.h
+++ b/lib/librte_ether/rte_ether.h
@@ -357,7 +357,7 @@  static inline int rte_vlan_strip(struct rte_mbuf *m)
 
 	struct vlan_hdr *vh = (struct vlan_hdr *)(eh + 1);
 	m->ol_flags |= PKT_RX_VLAN_PKT;
-	m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci);
+	m->vlan_tci0 = rte_be_to_cpu_16(vh->vlan_tci);
 
 	/* Copy ether header over rather than moving whole packet */
 	memmove(rte_pktmbuf_adj(m, sizeof(struct vlan_hdr)),
@@ -404,7 +404,7 @@  static inline int rte_vlan_insert(struct rte_mbuf **m)
 	nh->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
 
 	vh = (struct vlan_hdr *) (nh + 1);
-	vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci);
+	vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci0);
 
 	return 0;
 }
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 70b0987..6eed54f 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -101,11 +101,17 @@  extern "C" {
 #define PKT_RX_TUNNEL_IPV6_HDR (1ULL << 12) /**< RX tunnel packet with IPv6 header. */
 #define PKT_RX_FDIR_ID       (1ULL << 13) /**< FD id reported if FDIR match. */
 #define PKT_RX_FDIR_FLX      (1ULL << 14) /**< Flexible bytes reported if FDIR match. */
+#define PKT_RX_QINQ_PKT     (1ULL << 15)  /**< RX packet with double VLAN stripped. */
 /* add new RX flags here */
 
 /* add new TX flags here */
 
 /**
+ * Second VLAN insertion (QinQ) flag.
+ */
+#define PKT_TX_QINQ_PKT    (1ULL << 49)
+
+/**
  * TCP segmentation offload. To enable this offload feature for a
  * packet to be transmitted on hardware supporting TSO:
  *  - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies
@@ -268,7 +274,6 @@  struct rte_mbuf {
 
 	uint16_t data_len;        /**< Amount of data in segment buffer. */
 	uint32_t pkt_len;         /**< Total pkt len: sum of all segments. */
-	uint16_t vlan_tci;        /**< VLAN Tag Control Identifier (CPU order) */
 	uint16_t reserved;
 	union {
 		uint32_t rss;     /**< RSS hash result if RSS enabled */
@@ -289,6 +294,15 @@  struct rte_mbuf {
 		uint32_t usr;	  /**< User defined tags. See rte_distributor_process() */
 	} hash;                   /**< hash information */
 
+	/* VLAN tags */
+	union {
+		uint32_t vlan_tags;
+		struct {
+			uint16_t vlan_tci0;
+			uint16_t vlan_tci1;
+		};
+	};
+
 	uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */
 
 	/* second cache line - fields only used in slow path or on TX */
@@ -766,7 +780,8 @@  static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
 	m->next = NULL;
 	m->pkt_len = 0;
 	m->tx_offload = 0;
-	m->vlan_tci = 0;
+	m->vlan_tci0 = 0;
+	m->vlan_tci1 = 0;
 	m->nb_segs = 1;
 	m->port = 0xff;
 
@@ -838,7 +853,8 @@  static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
 	mi->data_off = m->data_off;
 	mi->data_len = m->data_len;
 	mi->port = m->port;
-	mi->vlan_tci = m->vlan_tci;
+	mi->vlan_tci0 = m->vlan_tci0;
+	mi->vlan_tci1 = m->vlan_tci1;
 	mi->tx_offload = m->tx_offload;
 	mi->hash = m->hash;
 
diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c
index 64d067c..422f4ed 100644
--- a/lib/librte_pmd_e1000/em_rxtx.c
+++ b/lib/librte_pmd_e1000/em_rxtx.c
@@ -438,7 +438,7 @@  eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		/* If hardware offload required */
 		tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK));
 		if (tx_ol_req) {
-			hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
+			hdrlen.f.vlan_tci = tx_pkt->vlan_tci0;
 			hdrlen.f.l2_len = tx_pkt->l2_len;
 			hdrlen.f.l3_len = tx_pkt->l3_len;
 			/* If new context to be built or reuse the exist ctx. */
@@ -534,7 +534,7 @@  eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		/* Set VLAN Tag offload fields. */
 		if (ol_flags & PKT_TX_VLAN_PKT) {
 			cmd_type_len |= E1000_TXD_CMD_VLE;
-			popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
+			popts_spec = tx_pkt->vlan_tci0 << E1000_TXD_VLAN_SHIFT;
 		}
 
 		if (tx_ol_req) {
@@ -800,7 +800,7 @@  eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 				rx_desc_error_to_pkt_flags(rxd.errors);
 
 		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-		rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
+		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);
 
 		/*
 		 * Store the mbuf address into the next entry of the array
@@ -1026,7 +1026,7 @@  eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 					rx_desc_error_to_pkt_flags(rxd.errors);
 
 		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-		rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
+		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.special);
 
 		/* Prefetch data of first segment, if configured to do so. */
 		rte_packet_prefetch((char *)first_seg->buf_addr +
diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
index 80d05c0..fda273f 100644
--- a/lib/librte_pmd_e1000/igb_rxtx.c
+++ b/lib/librte_pmd_e1000/igb_rxtx.c
@@ -401,7 +401,7 @@  eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		ol_flags = tx_pkt->ol_flags;
 		l2_l3_len.l2_len = tx_pkt->l2_len;
 		l2_l3_len.l3_len = tx_pkt->l3_len;
-		vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
+		vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci0;
 		vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
 		tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
 
@@ -784,7 +784,7 @@  eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
 		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
 		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-		rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
 
 		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
 		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
@@ -1015,10 +1015,10 @@  eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
 
 		/*
-		 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
+		 * The vlan_tci0 field is only valid when PKT_RX_VLAN_PKT is
 		 * set in the pkt_flags field.
 		 */
-		first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+		first_seg->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
 		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
 		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
 		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
diff --git a/lib/librte_pmd_enic/enic_ethdev.c b/lib/librte_pmd_enic/enic_ethdev.c
index 69ad01b..45c0e14 100644
--- a/lib/librte_pmd_enic/enic_ethdev.c
+++ b/lib/librte_pmd_enic/enic_ethdev.c
@@ -506,7 +506,7 @@  static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 				return index;
 		}
 		pkt_len = tx_pkt->pkt_len;
-		vlan_id = tx_pkt->vlan_tci;
+		vlan_id = tx_pkt->vlan_tci0;
 		ol_flags = tx_pkt->ol_flags;
 		for (frags = 0; inc_len < pkt_len; frags++) {
 			if (!tx_pkt)
diff --git a/lib/librte_pmd_enic/enic_main.c b/lib/librte_pmd_enic/enic_main.c
index 15313c2..d1660a1 100644
--- a/lib/librte_pmd_enic/enic_main.c
+++ b/lib/librte_pmd_enic/enic_main.c
@@ -490,7 +490,7 @@  static int enic_rq_indicate_buf(struct vnic_rq *rq,
 
 	if (vlan_tci) {
 		rx_pkt->ol_flags |= PKT_RX_VLAN_PKT;
-		rx_pkt->vlan_tci = vlan_tci;
+		rx_pkt->vlan_tci0 = vlan_tci;
 	}
 
 	return eop;
diff --git a/lib/librte_pmd_fm10k/fm10k_rxtx.c b/lib/librte_pmd_fm10k/fm10k_rxtx.c
index 83bddfc..ba3b8aa 100644
--- a/lib/librte_pmd_fm10k/fm10k_rxtx.c
+++ b/lib/librte_pmd_fm10k/fm10k_rxtx.c
@@ -410,7 +410,7 @@  static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
 
 	/* set vlan if requested */
 	if (mb->ol_flags & PKT_TX_VLAN_PKT)
-		q->hw_ring[q->next_free].vlan = mb->vlan_tci;
+		q->hw_ring[q->next_free].vlan = mb->vlan_tci0;
 
 	/* fill up the rings */
 	for (; mb != NULL; mb = mb->next) {
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index 493cfa3..1fe377c 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -703,7 +703,7 @@  i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
 				I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
 			mb->data_len = pkt_len;
 			mb->pkt_len = pkt_len;
-			mb->vlan_tci = rx_status &
+			mb->vlan_tci0 = rx_status &
 				(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
 			rte_le_to_cpu_16(\
 				rxdp[j].wb.qword0.lo_dword.l2tag1) : 0;
@@ -947,7 +947,7 @@  i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		rxm->data_len = rx_packet_len;
 		rxm->port = rxq->port_id;
 
-		rxm->vlan_tci = rx_status &
+		rxm->vlan_tci0 = rx_status &
 			(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
 			rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
 		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
@@ -1106,7 +1106,7 @@  i40e_recv_scattered_pkts(void *rx_queue,
 		}
 
 		first_seg->port = rxq->port_id;
-		first_seg->vlan_tci = (rx_status &
+		first_seg->vlan_tci0 = (rx_status &
 			(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
 			rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
 		pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
@@ -1291,7 +1291,7 @@  i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		/* Descriptor based VLAN insertion */
 		if (ol_flags & PKT_TX_VLAN_PKT) {
-			tx_flags |= tx_pkt->vlan_tci <<
+			tx_flags |= tx_pkt->vlan_tci0 <<
 					I40E_TX_FLAG_L2TAG1_SHIFT;
 			tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
 			td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 7f15f15..fd664da 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -612,7 +612,7 @@  ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			tx_offload.l2_len = tx_pkt->l2_len;
 			tx_offload.l3_len = tx_pkt->l3_len;
 			tx_offload.l4_len = tx_pkt->l4_len;
-			tx_offload.vlan_tci = tx_pkt->vlan_tci;
+			tx_offload.vlan_tci = tx_pkt->vlan_tci0;
 			tx_offload.tso_segsz = tx_pkt->tso_segsz;
 
 			/* If new context need be built or reuse the exist ctx. */
@@ -981,8 +981,7 @@  ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
 			pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len);
 			mb->data_len = pkt_len;
 			mb->pkt_len = pkt_len;
-			mb->vlan_tci = rxdp[j].wb.upper.vlan;
-			mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
+			mb->vlan_tci0 = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
 
 			/* convert descriptor fields to rte mbuf flags */
 			pkt_flags  = rx_desc_hlen_type_rss_to_pkt_flags(
@@ -1327,7 +1326,7 @@  ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 		hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
 		/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-		rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+		rxm->vlan_tci0 = rte_le_to_cpu_16(rxd.wb.upper.vlan);
 
 		pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
 		pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
@@ -1412,10 +1411,10 @@  ixgbe_fill_cluster_head_buf(
 	head->port = port_id;
 
 	/*
-	 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
+	 * The vlan_tci0 field is only valid when PKT_RX_VLAN_PKT is
 	 * set in the pkt_flags field.
 	 */
-	head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
+	head->vlan_tci0 = rte_le_to_cpu_16(desc->wb.upper.vlan);
 	hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
 	pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
 	pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
diff --git a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
index d8019f5..57a33c9 100644
--- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
@@ -405,7 +405,7 @@  vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			/* Add VLAN tag if requested */
 			if (txm->ol_flags & PKT_TX_VLAN_PKT) {
 				txd->ti = 1;
-				txd->tci = rte_cpu_to_le_16(txm->vlan_tci);
+				txd->tci = rte_cpu_to_le_16(txm->vlan_tci0);
 			}
 
 			/* Record current mbuf for freeing it later in tx complete */
@@ -629,10 +629,10 @@  vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 				   rcd->tci);
 			rxm->ol_flags = PKT_RX_VLAN_PKT;
 			/* Copy vlan tag in packet buffer */
-			rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
+			rxm->vlan_tci0 = rte_le_to_cpu_16((uint16_t)rcd->tci);
 		} else {
 			rxm->ol_flags = 0;
-			rxm->vlan_tci = 0;
+			rxm->vlan_tci0 = 0;
 		}
 
 		/* Initialize newly received packet buffer */