@@ -432,8 +432,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
}
/* Combine the packet header write. VLAN is not consider here */
- mb->vlan_macip.f.l2_len = l2_len;
- mb->vlan_macip.f.l3_len = l3_len;
+ mb->l2_len = l2_len;
+ mb->l3_len = l3_len;
mb->ol_flags = ol_flags;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
@@ -205,13 +205,13 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_size -
sizeof(*eth_hdr) -
sizeof(*ip_hdr));
- pkt->nb_segs = 1;
- pkt->pkt_len = pkt_size;
- pkt->ol_flags = ol_flags;
- pkt->vlan_macip.f.vlan_tci = vlan_tci;
- pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
- pkts_burst[nb_pkt] = pkt;
+ pkt->nb_segs = 1;
+ pkt->pkt_len = pkt_size;
+ pkt->ol_flags = ol_flags;
+ pkt->vlan_tci = vlan_tci;
+ pkt->l2_len = sizeof(struct ether_hdr);
+ pkt->l3_len = sizeof(struct ipv4_hdr);
+ pkts_burst[nb_pkt] = pkt;
next_flow = (next_flow + 1) % cfg_n_flows;
}
@@ -116,9 +116,9 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
ether_addr_copy(&ports[fs->tx_port].eth_addr,
ð_hdr->s_addr);
mb->ol_flags = txp->tx_ol_flags;
- mb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
- mb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;
+ mb->l2_len = sizeof(struct ether_hdr);
+ mb->l3_len = sizeof(struct ipv4_hdr);
+ mb->vlan_tci = txp->tx_vlan_id;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
fs->tx_packets += nb_tx;
@@ -118,9 +118,9 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
ether_addr_copy(&addr, ð_hdr->s_addr);
mb->ol_flags = txp->tx_ol_flags;
- mb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
- mb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;
+ mb->l2_len = sizeof(struct ether_hdr);
+ mb->l3_len = sizeof(struct ipv4_hdr);
+ mb->vlan_tci = txp->tx_vlan_id;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
fs->tx_packets += nb_tx;
@@ -165,8 +165,7 @@ pkt_burst_receive(struct fwd_stream *fs)
printf(" - FDIR hash=0x%x - FDIR id=0x%x ",
mb->hash.fdir.hash, mb->hash.fdir.id);
if (ol_flags & PKT_RX_VLAN_PKT)
- printf(" - VLAN tci=0x%x",
- mb->vlan_macip.f.vlan_tci);
+ printf(" - VLAN tci=0x%x", mb->vlan_tci);
printf("\n");
if (ol_flags != 0) {
int rxf;
@@ -406,7 +406,9 @@ testpmd_mbuf_ctor(struct rte_mempool *mp,
mb->ol_flags = 0;
mb->data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
mb->nb_segs = 1;
- mb->vlan_macip.data = 0;
+ mb->l2_len = 0;
+ mb->l3_len = 0;
+ mb->vlan_tci = 0;
mb->hash.rss = 0;
}
@@ -263,9 +263,9 @@ pkt_burst_transmit(struct fwd_stream *fs)
pkt->nb_segs = tx_pkt_nb_segs;
pkt->pkt_len = tx_pkt_length;
pkt->ol_flags = ol_flags;
- pkt->vlan_macip.f.vlan_tci = vlan_tci;
- pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ pkt->vlan_tci = vlan_tci;
+ pkt->l2_len = sizeof(struct ether_hdr);
+ pkt->l3_len = sizeof(struct ipv4_hdr);
pkts_burst[nb_pkt] = pkt;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
@@ -260,19 +260,19 @@ nomore_mbuf:
*/
pkt->nb_segs = tx_pkt_nb_segs;
pkt->pkt_len = tx_pkt_length;
- pkt->vlan_macip.f.l2_len = eth_hdr_size;
+ pkt->l2_len = eth_hdr_size;
if (ipv4) {
- pkt->vlan_macip.f.vlan_tci = ETHER_TYPE_IPv4;
- pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ pkt->vlan_tci = ETHER_TYPE_IPv4;
+ pkt->l3_len = sizeof(struct ipv4_hdr);
if (vlan_enabled)
pkt->ol_flags = PKT_RX_IPV4_HDR | PKT_RX_VLAN_PKT;
else
pkt->ol_flags = PKT_RX_IPV4_HDR;
} else {
- pkt->vlan_macip.f.vlan_tci = ETHER_TYPE_IPv6;
- pkt->vlan_macip.f.l3_len = sizeof(struct ipv6_hdr);
+ pkt->vlan_tci = ETHER_TYPE_IPv6;
+ pkt->l3_len = sizeof(struct ipv6_hdr);
if (vlan_enabled)
pkt->ol_flags = PKT_RX_IPV6_HDR | PKT_RX_VLAN_PKT;
@@ -413,7 +413,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
rte_panic("No headroom in mbuf.\n");
}
- m->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ m->l2_len = sizeof(struct ether_hdr);
/* 02:00:00:00:00:xx */
d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
@@ -255,8 +255,8 @@ app_pkt_metadata_fill(struct rte_mbuf *m)
/* Pop Ethernet header */
if (app.ether_hdr_pop_push) {
rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
- m->vlan_macip.f.l2_len = 0;
- m->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ m->l2_len = 0;
+ m->l3_len = sizeof(struct ipv4_hdr);
}
}
@@ -66,7 +66,7 @@ app_pkt_metadata_flush(struct rte_mbuf *pkt)
ether_addr_copy(&pkt_meta->nh_arp, ðer_hdr->d_addr);
ether_addr_copy(&local_ether_addr, ðer_hdr->s_addr);
ether_hdr->ether_type = rte_bswap16(ETHER_TYPE_IPv4);
- pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ pkt->l2_len = sizeof(struct ether_hdr);
}
static int
@@ -412,8 +412,8 @@ reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
dr = &qconf->death_row;
/* prepare mbuf: setup l2_len/l3_len. */
- m->vlan_macip.f.l2_len = sizeof(*eth_hdr);
- m->vlan_macip.f.l3_len = sizeof(*ip_hdr);
+ m->l2_len = sizeof(*eth_hdr);
+ m->l3_len = sizeof(*ip_hdr);
/* process this fragment. */
mo = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr);
@@ -455,8 +455,8 @@ reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
dr = &qconf->death_row;
/* prepare mbuf: setup l2_len/l3_len. */
- m->vlan_macip.f.l2_len = sizeof(*eth_hdr);
- m->vlan_macip.f.l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
+ m->l2_len = sizeof(*eth_hdr);
+ m->l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
mo = rte_ipv6_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr, frag_hdr);
if (mo == NULL)
@@ -338,7 +338,9 @@ mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
/* copy metadata from source packet*/
hdr->port = pkt->port;
- hdr->vlan_macip = pkt->vlan_macip;
+ hdr->vlan_tci = pkt->vlan_tci;
+ hdr->l2_len = pkt->l2_len;
+ hdr->l3_len = pkt->l3_len;
hdr->hash = pkt->hash;
hdr->ol_flags = pkt->ol_flags;
@@ -2690,9 +2690,9 @@ virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,
mbuf->buf_addr = m->buf_addr;
}
mbuf->ol_flags = PKT_TX_VLAN_PKT;
- mbuf->vlan_macip.f.vlan_tci = vlan_tag;
- mbuf->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mbuf->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ mbuf->vlan_tci = vlan_tag;
+ mbuf->l2_len = sizeof(struct ether_hdr);
+ mbuf->l3_len = sizeof(struct ipv4_hdr);
MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
tx_q->m_table[len] = mbuf;
@@ -173,8 +173,7 @@ ip_frag_chain(struct rte_mbuf *mn, struct rte_mbuf *mp)
struct rte_mbuf *ms;
/* adjust start of the last fragment data. */
- rte_pktmbuf_adj(mp, (uint16_t)(mp->vlan_macip.f.l2_len +
- mp->vlan_macip.f.l3_len));
+ rte_pktmbuf_adj(mp, (uint16_t)(mp->l2_len + mp->l3_len));
/* chain two fragments. */
ms = rte_pktmbuf_lastseg(mn);
@@ -198,7 +198,7 @@ rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
out_pkt->pkt_len - sizeof(struct ipv4_hdr));
out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
- out_pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ out_pkt->l3_len = sizeof(struct ipv4_hdr);
/* Write the fragment to the output list */
pkts_out[out_pkt_pos] = out_pkt;
@@ -87,10 +87,10 @@ ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
/* update ipv4 header for the reassmebled packet */
ip_hdr = (struct ipv4_hdr*)(rte_pktmbuf_mtod(m, uint8_t *) +
- m->vlan_macip.f.l2_len);
+ m->l2_len);
ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
- m->vlan_macip.f.l3_len));
+ m->l3_len));
ip_hdr->fragment_offset = (uint16_t)(ip_hdr->fragment_offset &
rte_cpu_to_be_16(IPV4_HDR_DF_FLAG));
ip_hdr->hdr_checksum = 0;
@@ -137,7 +137,7 @@ rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
ip_ofs *= IPV4_HDR_OFFSET_UNITS;
ip_len = (uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length) -
- mb->vlan_macip.f.l3_len);
+ mb->l3_len);
IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"mbuf: %p, tms: %" PRIu64
@@ -109,7 +109,7 @@ ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
/* update ipv6 header for the reassembled datagram */
ip_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(m, uint8_t *) +
- m->vlan_macip.f.l2_len);
+ m->l2_len);
ip_hdr->payload_len = rte_cpu_to_be_16(payload_len);
@@ -120,8 +120,7 @@ ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
* other headers, so we assume there are no other headers and thus update
* the main IPv6 header instead.
*/
- move_len = m->vlan_macip.f.l2_len + m->vlan_macip.f.l3_len -
- sizeof(*frag_hdr);
+ move_len = m->l2_len + m->l3_len - sizeof(*frag_hdr);
frag_hdr = (struct ipv6_extension_fragment *) (ip_hdr + 1);
ip_hdr->proto = frag_hdr->next_header;
@@ -111,27 +111,6 @@ extern "C" {
*/
#define PKT_TX_OFFLOAD_MASK (PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
-/** Offload features */
-union rte_vlan_macip {
- uint32_t data;
- struct {
- uint16_t l3_len:9; /**< L3 (IP) Header Length. */
- uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
- uint16_t vlan_tci;
- /**< VLAN Tag Control Identifier (CPU order). */
- } f;
-};
-
-/*
- * Compare mask for vlan_macip_len.data,
- * should be in sync with rte_vlan_macip.f layout.
- * */
-#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
-#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
-#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
-/**< MAC+IP length. */
-#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
-
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
@@ -170,7 +149,9 @@ struct rte_mbuf {
uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
/* offload features, valid for first segment only */
- union rte_vlan_macip vlan_macip;
+ uint16_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order). */
union {
uint32_t rss; /**< RSS hash result if RSS enabled */
struct {
@@ -540,7 +521,9 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
m->next = NULL;
m->pkt_len = 0;
- m->vlan_macip.data = 0;
+ m->l2_len = 0;
+ m->l3_len = 0;
+ m->vlan_tci = 0;
m->nb_segs = 1;
m->port = 0xff;
@@ -607,7 +590,9 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md)
mi->data = md->data;
mi->data_len = md->data_len;
mi->port = md->port;
- mi->vlan_macip = md->vlan_macip;
+ mi->vlan_tci = md->vlan_tci;
+ mi->l2_len = md->l2_len;
+ mi->l3_len = md->l3_len;
mi->hash = md->hash;
mi->next = NULL;
@@ -144,13 +144,34 @@ enum {
EM_CTX_NUM = 1, /**< CTX NUM */
};
+/** Offload features */
+union em_vlan_macip {
+ uint32_t data;
+ struct {
+ uint16_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint16_t vlan_tci;
+ /**< VLAN Tag Control Identifier (CPU order). */
+ } f;
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with em_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
+/**< MAC+IP length. */
+#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
/**
* Structure to check if new context need be built
*/
struct em_ctx_info {
uint16_t flags; /**< ol_flags related to context build. */
uint32_t cmp_mask; /**< compare mask */
- union rte_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
+ union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
};
/**
@@ -219,7 +240,7 @@ static inline void
em_set_xmit_ctx(struct em_tx_queue* txq,
volatile struct e1000_context_desc *ctx_txd,
uint16_t flags,
- union rte_vlan_macip hdrlen)
+ union em_vlan_macip hdrlen)
{
uint32_t cmp_mask, cmd_len;
uint16_t ipcse, l2len;
@@ -285,7 +306,7 @@ em_set_xmit_ctx(struct em_tx_queue* txq,
*/
static inline uint32_t
what_ctx_update(struct em_tx_queue *txq, uint16_t flags,
- union rte_vlan_macip hdrlen)
+ union em_vlan_macip hdrlen)
{
/* If match with the current context */
if (likely (txq->ctx_cache.flags == flags &&
@@ -391,7 +412,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t tx_ol_req;
uint32_t ctx;
uint32_t new_ctx;
- union rte_vlan_macip hdrlen;
+ union em_vlan_macip hdrlen;
txq = tx_queue;
sw_ring = txq->sw_ring;
@@ -421,7 +442,9 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM |
PKT_TX_L4_MASK));
if (tx_ol_req) {
- hdrlen = tx_pkt->vlan_macip;
+ hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
+ hdrlen.f.l2_len = tx_pkt->l2_len;
+ hdrlen.f.l3_len = tx_pkt->l3_len;
/* If new context to be built or reuse the exist ctx. */
ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
@@ -516,8 +539,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Set VLAN Tag offload fields. */
if (ol_flags & PKT_TX_VLAN_PKT) {
cmd_type_len |= E1000_TXD_CMD_VLE;
- popts_spec = tx_pkt->vlan_macip.f.vlan_tci <<
- E1000_TXD_VLAN_SHIFT;
+ popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
}
if (tx_ol_req) {
@@ -784,7 +806,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rx_desc_error_to_pkt_flags(rxd.errors));
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/*
* Store the mbuf address into the next entry of the array
@@ -1010,7 +1032,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rx_desc_error_to_pkt_flags(rxd.errors));
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch(first_seg->data);
@@ -153,13 +153,34 @@ enum igb_advctx_num {
IGB_CTX_NUM = 2, /**< CTX_NUM */
};
+/** Offload features */
+union igb_vlan_macip {
+ uint32_t data;
+ struct {
+ uint16_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint16_t vlan_tci;
+ /**< VLAN Tag Control Identifier (CPU order). */
+ } f;
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with igb_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
+/**< MAC+IP length. */
+#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
/**
* Strucutre to check if new context need be built
*/
struct igb_advctx_info {
uint16_t flags; /**< ol_flags related to context build. */
uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
- union rte_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
+ union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
};
/**
@@ -342,6 +363,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
volatile union e1000_adv_tx_desc *txd;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
+ union igb_vlan_macip vlan_macip_lens;
uint64_t buf_dma_addr;
uint32_t olinfo_status;
uint32_t cmd_type_len;
@@ -355,7 +377,6 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t tx_ol_req;
uint32_t new_ctx = 0;
uint32_t ctx = 0;
- uint32_t vlan_macip_lens;
txq = tx_queue;
sw_ring = txq->sw_ring;
@@ -380,13 +401,15 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens = tx_pkt->vlan_macip.data;
+ vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
+ vlan_macip_lens.f.l2_len = tx_pkt->l2_len;
+ vlan_macip_lens.f.l3_len = tx_pkt->l3_len;
tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
/* If a Context Descriptor need be built . */
if (tx_ol_req) {
ctx = what_advctx_update(txq, tx_ol_req,
- vlan_macip_lens);
+ vlan_macip_lens.data);
/* Only allocate context descriptor if required*/
new_ctx = (ctx == IGB_CTX_NUM);
ctx = txq->ctx_curr;
@@ -502,7 +525,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- vlan_macip_lens);
+ vlan_macip_lens.data);
txe->last_id = tx_last;
tx_id = txe->next_id;
@@ -764,8 +787,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_macip.f.vlan_tci =
- rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (uint16_t)(pkt_flags |
@@ -1001,8 +1023,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
- first_seg->vlan_macip.f.vlan_tci =
- rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (uint16_t)(pkt_flags |
@@ -613,7 +613,7 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
- mb->vlan_macip.f.vlan_tci = rx_status &
+ mb->vlan_tci = rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
rte_le_to_cpu_16(\
rxdp[j].wb.qword0.lo_dword.l2tag1) : 0;
@@ -850,7 +850,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->data_len = rx_packet_len;
rxm->port = rxq->port_id;
- rxm->vlan_macip.f.vlan_tci = rx_status &
+ rxm->vlan_tci = rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
@@ -1003,7 +1003,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
}
first_seg->port = rxq->port_id;
- first_seg->vlan_macip.f.vlan_tci = (rx_status &
+ first_seg->vlan_tci = (rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
@@ -1105,8 +1105,8 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
ol_flags = tx_pkt->ol_flags;
- l2_len = tx_pkt->vlan_macip.f.l2_len;
- l3_len = tx_pkt->vlan_macip.f.l3_len;
+ l2_len = tx_pkt->l2_len;
+ l3_len = tx_pkt->l3_len;
/* Calculate the number of context descriptors needed. */
nb_ctx = i40e_calc_context_desc(ol_flags);
@@ -1142,8 +1142,8 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Descriptor based VLAN insertion */
if (ol_flags & PKT_TX_VLAN_PKT) {
- tx_flags |= tx_pkt->vlan_macip.f.vlan_tci <<
- I40E_TX_FLAG_L2TAG1_SHIFT;
+ tx_flags |= tx_pkt->vlan_tci <<
+ I40E_TX_FLAG_L2TAG1_SHIFT;
tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >>
@@ -540,6 +540,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
volatile union ixgbe_adv_tx_desc *txd;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
+ union ixgbe_vlan_macip vlan_macip_lens;
uint64_t buf_dma_addr;
uint32_t olinfo_status;
uint32_t cmd_type_len;
@@ -551,7 +552,6 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_tx;
uint16_t nb_used;
uint16_t tx_ol_req;
- uint32_t vlan_macip_lens;
uint32_t ctx = 0;
uint32_t new_ctx;
@@ -579,14 +579,16 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens = tx_pkt->vlan_macip.data;
+ vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
+ vlan_macip_lens.f.l2_len = tx_pkt->l2_len;
+ vlan_macip_lens.f.l3_len = tx_pkt->l3_len;
/* If hardware offload required */
tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
if (tx_ol_req) {
/* If new context need be built or reuse the exist ctx. */
ctx = what_advctx_update(txq, tx_ol_req,
- vlan_macip_lens);
+ vlan_macip_lens.data);
/* Only allocate context descriptor if required*/
new_ctx = (ctx == IXGBE_CTX_NUM);
ctx = txq->ctx_curr;
@@ -728,7 +730,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- vlan_macip_lens);
+ vlan_macip_lens.data);
txe->last_id = tx_last;
tx_id = txe->next_id;
@@ -939,7 +941,7 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
rxq->crc_len);
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
- mb->vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
+ mb->vlan_tci = rxdp[j].wb.upper.vlan;
mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
/* convert descriptor fields to rte mbuf flags */
@@ -1257,8 +1259,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_macip.f.vlan_tci =
- rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (uint16_t)(pkt_flags |
@@ -1502,8 +1503,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
- first_seg->vlan_macip.f.vlan_tci =
- rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (uint16_t)(pkt_flags |
@@ -153,6 +153,27 @@ enum ixgbe_advctx_num {
IXGBE_CTX_NUM = 2, /**< CTX NUMBER */
};
+/** Offload features */
+union ixgbe_vlan_macip {
+ uint32_t data;
+ struct {
+ uint16_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint16_t vlan_tci;
+ /**< VLAN Tag Control Identifier (CPU order). */
+ } f;
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with ixgbe_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
+/**< MAC+IP length. */
+#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
/**
* Structure to check if new context need be built
*/
@@ -160,7 +181,7 @@ enum ixgbe_advctx_num {
struct ixgbe_advctx_info {
uint16_t flags; /**< ol_flags for context build. */
uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
- union rte_vlan_macip vlan_macip_lens; /**< vlan, mac ip length. */
+ union ixgbe_vlan_macip vlan_macip_lens; /**< vlan, mac ip length. */
};
/**
@@ -550,8 +550,8 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rte_pktmbuf_mtod(rxm, void *));
#endif
/* Copy vlan tag in packet buffer */
- rxm->vlan_macip.f.vlan_tci =
- rte_le_to_cpu_16((uint16_t)rcd->tci);
+ rxm->vlan_tci = rte_le_to_cpu_16(
+ (uint16_t)rcd->tci);
} else
rxm->ol_flags = 0;
@@ -563,7 +563,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->pkt_len = (uint16_t)rcd->len;
rxm->data_len = (uint16_t)rcd->len;
rxm->port = rxq->port_id;
- rxm->vlan_macip.f.vlan_tci = 0;
+ rxm->vlan_tci = 0;
rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
rx_pkts[nb_rx++] = rxm;