@@ -100,9 +100,9 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads;
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
- ol_flags |= PKT_TX_VLAN_PKT;
+ ol_flags |= PKT_TX_VLAN;
if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
- ol_flags |= PKT_TX_QINQ_PKT;
+ ol_flags |= PKT_TX_QINQ;
if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
@@ -73,9 +73,9 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
txp = &ports[fs->tx_port];
tx_offloads = txp->dev_conf.txmode.offloads;
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
- ol_flags = PKT_TX_VLAN_PKT;
+ ol_flags = PKT_TX_VLAN;
if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
- ol_flags |= PKT_TX_QINQ_PKT;
+ ol_flags |= PKT_TX_QINQ;
if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
for (i = 0; i < nb_rx; i++) {
@@ -353,9 +353,9 @@ pkt_burst_transmit(struct fwd_stream *fs)
vlan_tci = txp->tx_vlan_id;
vlan_tci_outer = txp->tx_vlan_id_outer;
if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
- ol_flags = PKT_TX_VLAN_PKT;
+ ol_flags = PKT_TX_VLAN;
if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
- ol_flags |= PKT_TX_QINQ_PKT;
+ ol_flags |= PKT_TX_QINQ;
if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
ol_flags |= PKT_TX_MACSEC;
@@ -159,11 +159,6 @@ Deprecation Notices
will be limited to maximum 256 queues.
Also compile time flag ``RTE_ETHDEV_QUEUE_STAT_CNTRS`` will be removed.
-* ethdev: The offload flag ``PKT_RX_EIP_CKSUM_BAD`` will be removed and
- replaced by the new flag ``PKT_RX_OUTER_IP_CKSUM_BAD``. The new name is more
- consistent with existing outer header checksum status flag naming, which
- should help in reducing confusion about its usage.
-
* i40e: As there are both i40evf and iavf pmd, the functions of them are
duplicated. And now more and more advanced features are developed on iavf.
To keep consistent with kernel driver's name
@@ -224,7 +224,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
/* insert vlan info if necessary */
- if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (mbuf->ol_flags & PKT_TX_VLAN) {
if (rte_vlan_insert(&mbuf)) {
rte_pktmbuf_free(mbuf);
continue;
@@ -1674,7 +1674,7 @@ avp_dev_copy_to_buffers(struct avp_dev *avp,
first_buf->nb_segs = count;
first_buf->pkt_len = total_length;
- if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (mbuf->ol_flags & PKT_TX_VLAN) {
first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
first_buf->vlan_tci = mbuf->vlan_tci;
}
@@ -1905,7 +1905,7 @@ avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
pkt_buf->nb_segs = 1;
pkt_buf->next = NULL;
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & PKT_TX_VLAN) {
pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
pkt_buf->vlan_tci = m->vlan_tci;
}
@@ -811,7 +811,7 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
rte_wmb();
- if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ if (mbuf->ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
/* Mark it as a CONTEXT descriptor */
AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3,
CTXT, 1);
@@ -2189,7 +2189,7 @@ int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0)
tx_start_bd->nbd = rte_cpu_to_le_16(2);
- if (m0->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m0->ol_flags & PKT_TX_VLAN) {
tx_start_bd->vlan_or_ethertype =
rte_cpu_to_le_16(m0->vlan_tci);
tx_start_bd->bd_flags.as_bitfield |=
@@ -110,10 +110,10 @@ bnxt_xmit_need_long_bd(struct rte_mbuf *tx_pkt, struct bnxt_tx_queue *txq)
{
if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
- PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_VLAN | PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
PKT_TX_TUNNEL_GENEVE | PKT_TX_IEEE1588_TMST |
- PKT_TX_QINQ_PKT) ||
+ PKT_TX_QINQ) ||
(BNXT_TRUFLOW_EN(txq->bp) &&
(txq->bp->tx_cfa_action || txq->vfr_tx_cfa_action)))
return true;
@@ -200,13 +200,13 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
vlan_tag_flags = 0;
/* HW can accelerate only outer vlan in QinQ mode */
- if (tx_pkt->ol_flags & PKT_TX_QINQ_PKT) {
+ if (tx_pkt->ol_flags & PKT_TX_QINQ) {
vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
tx_pkt->vlan_tci_outer;
outer_tpid_bd = txq->bp->outer_tpid_bd &
BNXT_OUTER_TPID_BD_MASK;
vlan_tag_flags |= outer_tpid_bd;
- } else if (tx_pkt->ol_flags & PKT_TX_VLAN_PKT) {
+ } else if (tx_pkt->ol_flags & PKT_TX_VLAN) {
/* shurd: Should this mask at
* TX_BD_LONG_CFA_META_VLAN_VID_MASK?
*/
@@ -1037,7 +1037,7 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
}
- if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (mbuf->ol_flags & PKT_TX_VLAN) {
txq->stats.vlan_ins++;
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
}
@@ -1258,7 +1258,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
txq->stats.tx_cso += m->tso_segsz;
}
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & PKT_TX_VLAN) {
txq->stats.vlan_ins++;
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
}
@@ -1228,7 +1228,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
(*bufs)->nb_segs == 1 &&
rte_mbuf_refcnt_read((*bufs)) == 1)) {
if (unlikely(((*bufs)->ol_flags
- & PKT_TX_VLAN_PKT) ||
+ & PKT_TX_VLAN) ||
(eth_data->dev_conf.txmode.offloads
& DEV_TX_OFFLOAD_VLAN_INSERT))) {
ret = rte_vlan_insert(bufs);
@@ -1271,7 +1271,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
goto send_n_return;
}
- if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
+ if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN) ||
(eth_data->dev_conf.txmode.offloads
& DEV_TX_OFFLOAD_VLAN_INSERT))) {
int ret = rte_vlan_insert(bufs);
@@ -1532,7 +1532,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
(*bufs)->nb_segs == 1 &&
rte_mbuf_refcnt_read((*bufs)) == 1)) {
if (unlikely((*bufs)->ol_flags
- & PKT_TX_VLAN_PKT)) {
+ & PKT_TX_VLAN)) {
ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;
@@ -55,7 +55,7 @@
PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
- PKT_TX_VLAN_PKT)
+ PKT_TX_VLAN)
#define E1000_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
@@ -506,7 +506,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
popts_spec = 0;
/* Set VLAN Tag offload fields. */
- if (ol_flags & PKT_TX_VLAN_PKT) {
+ if (ol_flags & PKT_TX_VLAN) {
cmd_type_len |= E1000_TXD_CMD_VLE;
popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
}
@@ -54,7 +54,7 @@
PKT_TX_OUTER_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
@@ -260,7 +260,7 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq,
/* Specify which HW CTX to upload. */
mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & PKT_TX_VLAN)
tx_offload_mask.data |= TX_VLAN_CMP_MASK;
/* check if TCP segmentation required for this packet */
@@ -369,7 +369,7 @@ tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
uint32_t cmdtype;
static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
- cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
+ cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN) != 0];
cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
return cmdtype;
}
@@ -38,7 +38,7 @@ static inline void dump_rxd(union fm10k_rx_desc *rxd)
#endif
#define FM10K_TX_OFFLOAD_MASK ( \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
@@ -609,7 +609,7 @@ static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;
/* set vlan if requested */
- if (mb->ol_flags & PKT_TX_VLAN_PKT)
+ if (mb->ol_flags & PKT_TX_VLAN)
q->hw_ring[q->next_free].vlan = mb->vlan_tci;
else
q->hw_ring[q->next_free].vlan = 0;
@@ -592,7 +592,7 @@ hinic_fill_tx_offload_info(struct rte_mbuf *mbuf,
task->pkt_info2 = 0;
/* Base VLAN */
- if (unlikely(ol_flags & PKT_TX_VLAN_PKT)) {
+ if (unlikely(ol_flags & PKT_TX_VLAN)) {
vlan_tag = mbuf->vlan_tci;
hinic_set_vlan_tx_offload(task, queue_info, vlan_tag,
vlan_tag >> VLAN_PRIO_SHIFT);
@@ -3190,11 +3190,11 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
* To avoid the VLAN of Tx descriptor is overwritten by PVID, it should
* be added to the position close to the IP header when PVID is enabled.
*/
- if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN_PKT |
- PKT_TX_QINQ_PKT)) {
+ if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN |
+ PKT_TX_QINQ)) {
desc->tx.ol_type_vlan_len_msec |=
rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B));
- if (ol_flags & PKT_TX_QINQ_PKT)
+ if (ol_flags & PKT_TX_QINQ)
desc->tx.outer_vlan_tag =
rte_cpu_to_le_16(rxm->vlan_tci_outer);
else
@@ -3202,8 +3202,8 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc,
rte_cpu_to_le_16(rxm->vlan_tci);
}
- if (ol_flags & PKT_TX_QINQ_PKT ||
- ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) {
+ if (ol_flags & PKT_TX_QINQ ||
+ ((ol_flags & PKT_TX_VLAN) && txq->pvid_sw_shift_en)) {
desc->tx.type_cs_vlan_tso_len |=
rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
@@ -3742,12 +3742,12 @@ hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m)
* implementation function named hns3_prep_pkts to inform users that
* these packets will be discarded.
*/
- if (m->ol_flags & PKT_TX_QINQ_PKT)
+ if (m->ol_flags & PKT_TX_QINQ)
return -EINVAL;
eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) {
- if (m->ol_flags & PKT_TX_VLAN_PKT)
+ if (m->ol_flags & PKT_TX_VLAN)
return -EINVAL;
/* Ensure the incoming packet is not a QinQ packet */
@@ -64,8 +64,8 @@
PKT_TX_L4_MASK | \
PKT_TX_OUTER_IP_CKSUM | \
PKT_TX_TCP_SEG | \
- PKT_TX_QINQ_PKT | \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_QINQ | \
+ PKT_TX_VLAN | \
PKT_TX_TUNNEL_MASK | \
I40E_TX_IEEE1588_TMST)
@@ -1006,7 +1006,7 @@ i40e_calc_context_desc(uint64_t flags)
{
static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TCP_SEG |
- PKT_TX_QINQ_PKT |
+ PKT_TX_QINQ |
PKT_TX_TUNNEL_MASK;
#ifdef RTE_LIBRTE_IEEE1588
@@ -1151,7 +1151,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* Descriptor based VLAN insertion */
- if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
td_tag = tx_pkt->vlan_tci;
}
@@ -1200,7 +1200,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
ctx_txd->tunneling_params =
rte_cpu_to_le_32(cd_tunneling_params);
- if (ol_flags & PKT_TX_QINQ_PKT) {
+ if (ol_flags & PKT_TX_QINQ) {
cd_l2tag2 = tx_pkt->vlan_tci_outer;
cd_type_cmd_tso_mss |=
((uint64_t)I40E_TX_CTX_DESC_IL2TAG2 <<
@@ -2074,7 +2074,7 @@ iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
{
if (flags & PKT_TX_TCP_SEG)
return 1;
- if (flags & PKT_TX_VLAN_PKT &&
+ if (flags & PKT_TX_VLAN &&
vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
return 1;
return 0;
@@ -2260,7 +2260,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* Descriptor based VLAN insertion */
- if (ol_flags & PKT_TX_VLAN_PKT &&
+ if (ol_flags & PKT_TX_VLAN &&
txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
td_tag = tx_pkt->vlan_tci;
@@ -2301,7 +2301,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
cd_type_cmd_tso_mss |=
iavf_set_tso_ctx(tx_pkt, tx_offload);
- if (ol_flags & PKT_TX_VLAN_PKT &&
+ if (ol_flags & PKT_TX_VLAN &&
txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
@@ -62,7 +62,7 @@
PKT_TX_OUTER_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG)
@@ -78,7 +78,7 @@
PKT_TX_OUTER_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
@@ -1530,7 +1530,7 @@ igc_set_xmit_ctx(struct igc_tx_queue *txq,
/* Specify which HW CTX to upload. */
mss_l4len_idx = (ctx_curr << IGC_ADVTXD_IDX_SHIFT);
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & PKT_TX_VLAN)
tx_offload_mask.vlan_tci = 0xffff;
/* check if TCP segmentation required for this packet */
@@ -1604,7 +1604,7 @@ tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
uint32_t cmdtype;
static uint32_t vlan_cmd[2] = {0, IGC_ADVTXD_DCMD_VLE};
static uint32_t tso_cmd[2] = {0, IGC_ADVTXD_DCMD_TSE};
- cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
+ cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN) != 0];
cmdtype |= tso_cmd[(ol_flags & IGC_TX_OFFLOAD_SEG) != 0];
return cmdtype;
}
@@ -356,7 +356,7 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
uint32_t offset = 0;
bool start, done;
bool encap;
- bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT);
+ bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN);
uint16_t vlan_tci = txm->vlan_tci;
uint64_t ol_flags = txm->ol_flags;
@@ -495,7 +495,7 @@ ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
stats->no_csum++;
- has_vlan = (ol_flags & PKT_TX_VLAN_PKT);
+ has_vlan = (ol_flags & PKT_TX_VLAN);
encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
(ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
((ol_flags & PKT_TX_OUTER_IPV4) ||
@@ -64,7 +64,7 @@
PKT_TX_OUTER_IPV4 | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
@@ -384,7 +384,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
/* Specify which HW CTX to upload. */
mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
- if (ol_flags & PKT_TX_VLAN_PKT) {
+ if (ol_flags & PKT_TX_VLAN) {
tx_offload_mask.vlan_tci |= ~0;
}
@@ -543,7 +543,7 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
{
uint32_t cmdtype = 0;
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & PKT_TX_VLAN)
cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
if (ol_flags & PKT_TX_TCP_SEG)
cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
@@ -538,7 +538,7 @@ txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
* should be set regardless of HW offload.
*/
off = loc->mbuf->outer_l2_len;
- if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN_PKT)
+ if (MLX5_TXOFF_CONFIG(VLAN) && ol & PKT_TX_VLAN)
off += sizeof(struct rte_vlan_hdr);
set = (off >> 1) << 8; /* Outer L3 offset. */
off += loc->mbuf->outer_l3_len;
@@ -956,7 +956,7 @@ mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
*RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
/* Engage VLAN tag insertion feature if requested. */
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & PKT_TX_VLAN) {
/*
* We should get here only if device support
* this feature correctly.
@@ -1814,7 +1814,7 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
* the required space in WQE ring buffer.
*/
dlen = rte_pktmbuf_pkt_len(loc->mbuf);
- if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN)
vlan = sizeof(struct rte_vlan_hdr);
inlen = loc->mbuf->l2_len + vlan +
loc->mbuf->l3_len + loc->mbuf->l4_len;
@@ -1929,7 +1929,7 @@ mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
/* Update sent data bytes counter. */
txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ loc->mbuf->ol_flags & PKT_TX_VLAN)
txq->stats.obytes += sizeof(struct rte_vlan_hdr);
#endif
/*
@@ -2028,7 +2028,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
* to estimate the required space for WQE.
*/
dlen = rte_pktmbuf_pkt_len(loc->mbuf);
- if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN)
vlan = sizeof(struct rte_vlan_hdr);
inlen = dlen + vlan;
/* Check against minimal length. */
@@ -2291,7 +2291,7 @@ mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
}
dlen = rte_pktmbuf_data_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & PKT_TX_VLAN) {
vlan = sizeof(struct rte_vlan_hdr);
}
/*
@@ -2416,7 +2416,7 @@ mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
return MLX5_TXCMP_CODE_SINGLE;
/* Check if eMPW can be engaged. */
if (MLX5_TXOFF_CONFIG(VLAN) &&
- unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) &&
+ unlikely(loc->mbuf->ol_flags & PKT_TX_VLAN) &&
(!MLX5_TXOFF_CONFIG(INLINE) ||
unlikely((rte_pktmbuf_data_len(loc->mbuf) +
sizeof(struct rte_vlan_hdr)) > txq->inlen_empw))) {
@@ -2478,7 +2478,7 @@ mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq,
return false;
/* There must be no VLAN packets in eMPW loop. */
if (MLX5_TXOFF_CONFIG(VLAN))
- MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
+ MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN));
/* Check if the scheduling is requested. */
if (MLX5_TXOFF_CONFIG(TXPP) &&
loc->mbuf->ol_flags & txq->ts_mask)
@@ -2939,7 +2939,7 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
}
/* Inline entire packet, optional VLAN insertion. */
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & PKT_TX_VLAN) {
/*
* The packet length must be checked in
* mlx5_tx_able_to_empw() and packet
@@ -3004,7 +3004,7 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
if (MLX5_TXOFF_CONFIG(VLAN))
MLX5_ASSERT(!(loc->mbuf->ol_flags &
- PKT_TX_VLAN_PKT));
+ PKT_TX_VLAN));
mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
/* We have to store mbuf in elts.*/
txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
@@ -3149,7 +3149,7 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
inlen = rte_pktmbuf_data_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ loc->mbuf->ol_flags & PKT_TX_VLAN) {
vlan = sizeof(struct rte_vlan_hdr);
inlen += vlan;
}
@@ -3380,7 +3380,7 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
/* Update sent data bytes counter. */
txq->stats.obytes += rte_pktmbuf_data_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
- loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ loc->mbuf->ol_flags & PKT_TX_VLAN)
txq->stats.obytes +=
sizeof(struct rte_vlan_hdr);
#endif
@@ -1331,7 +1331,7 @@ static void hn_encap(struct rndis_packet_msg *pkt,
NDIS_PKTINFO_TYPE_HASHVAL);
*pi_data = queue_id;
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & PKT_TX_VLAN) {
pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE,
NDIS_PKTINFO_TYPE_VLAN);
*pi_data = m->vlan_tci;
@@ -929,7 +929,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
nfp_net_tx_tso(txq, &txd, pkt);
nfp_net_tx_cksum(txq, &txd, pkt);
- if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
+ if ((pkt->ol_flags & PKT_TX_VLAN) &&
(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
txd.flags |= PCIE_DESC_TX_VLAN;
txd.vlan = pkt->vlan_tci;
@@ -2587,7 +2587,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* Descriptor based VLAN insertion */
- if (tx_ol_flags & PKT_TX_VLAN_PKT) {
+ if (tx_ol_flags & PKT_TX_VLAN) {
vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
@@ -153,7 +153,7 @@
PKT_TX_IPV6)
#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_TUNNEL_MASK)
#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
@@ -382,7 +382,7 @@ sfc_ef100_tx_qdesc_send_create(const struct rte_mbuf *m, efx_oword_t *tx_desc)
ESF_GZ_TX_SEND_CSO_OUTER_L4, outer_l4,
ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEND);
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & PKT_TX_VLAN) {
efx_oword_t tx_desc_extra_fields;
EFX_POPULATE_OWORD_2(tx_desc_extra_fields,
@@ -464,7 +464,7 @@ sfc_ef100_tx_qdesc_tso_create(const struct rte_mbuf *m,
EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields);
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & PKT_TX_VLAN) {
EFX_POPULATE_OWORD_2(tx_desc_extra_fields,
ESF_GZ_TX_TSO_VLAN_INSERT_EN, 1,
ESF_GZ_TX_TSO_VLAN_INSERT_TCI, m->vlan_tci);
@@ -805,7 +805,7 @@ sfc_ef10_simple_prepare_pkts(__rte_unused void *tx_queue,
/* ef10_simple does not support TSO and VLAN insertion */
if (unlikely(m->ol_flags &
- (PKT_TX_TCP_SEG | PKT_TX_VLAN_PKT))) {
+ (PKT_TX_TCP_SEG | PKT_TX_VLAN))) {
rte_errno = ENOTSUP;
break;
}
@@ -766,7 +766,7 @@ static unsigned int
sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m,
efx_desc_t **pend)
{
- uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ?
+ uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN) ?
m->vlan_tci : 0);
if (this_tag == txq->hw_vlan_tci)
@@ -54,7 +54,7 @@ static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
PKT_TX_OUTER_IPV4 |
PKT_TX_IPV6 |
PKT_TX_IPV4 |
- PKT_TX_VLAN_PKT |
+ PKT_TX_VLAN |
PKT_TX_L4_MASK |
PKT_TX_TCP_SEG |
PKT_TX_TUNNEL_MASK |
@@ -408,7 +408,7 @@ txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,
vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.l2_len);
}
- if (ol_flags & PKT_TX_VLAN_PKT) {
+ if (ol_flags & PKT_TX_VLAN) {
tx_offload_mask.vlan_tci |= ~0;
vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci);
}
@@ -496,7 +496,7 @@ tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
tmp |= TXGBE_TXD_IPCS;
tmp |= TXGBE_TXD_L4CS;
}
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & PKT_TX_VLAN)
tmp |= TXGBE_TXD_CC;
return tmp;
@@ -507,7 +507,7 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
{
uint32_t cmdtype = 0;
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & PKT_TX_VLAN)
cmdtype |= TXGBE_TXD_VLE;
if (ol_flags & PKT_TX_TCP_SEG)
cmdtype |= TXGBE_TXD_TSE;
@@ -444,7 +444,7 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
struct rte_mbuf *m = bufs[i];
/* Do VLAN tag insertion */
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & PKT_TX_VLAN) {
int error = rte_vlan_insert(&m);
if (unlikely(error)) {
rte_pktmbuf_free(m);
@@ -1747,7 +1747,7 @@ virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
#endif
/* Do VLAN tag insertion */
- if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
+ if (unlikely(m->ol_flags & PKT_TX_VLAN)) {
error = rte_vlan_insert(&m);
/* rte_vlan_insert() may change pointer
* even in the case of failure
@@ -49,7 +49,7 @@
#include "vmxnet3_ethdev.h"
#define VMXNET3_TX_OFFLOAD_MASK ( \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_L4_MASK | \
@@ -520,7 +520,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Add VLAN tag if present */
gdesc = txq->cmd_ring.base + first2fill;
- if (txm->ol_flags & PKT_TX_VLAN_PKT) {
+ if (txm->ol_flags & PKT_TX_VLAN) {
gdesc->txd.ti = 1;
gdesc->txd.tci = txm->vlan_tci;
}
@@ -1115,7 +1115,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
(vh->vlan_tci != vlan_tag_be))
vh->vlan_tci = vlan_tag_be;
} else {
- m->ol_flags |= PKT_TX_VLAN_PKT;
+ m->ol_flags |= PKT_TX_VLAN;
/*
* Find the right seg to adjust the data len when offset is
@@ -55,37 +55,12 @@ extern "C" {
/** RX packet with FDIR match indicate. */
#define PKT_RX_FDIR (1ULL << 2)
-/**
- * Deprecated.
- * Checking this flag alone is deprecated: check the 2 bits of
- * PKT_RX_L4_CKSUM_MASK.
- * This flag was set when the L4 checksum of a packet was detected as
- * wrong by the hardware.
- */
-#define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
-
-/**
- * Deprecated.
- * Checking this flag alone is deprecated: check the 2 bits of
- * PKT_RX_IP_CKSUM_MASK.
- * This flag was set when the IP checksum of a packet was detected as
- * wrong by the hardware.
- */
-#define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
-
/**
* This flag is set when the outermost IP header checksum is detected as
* wrong by the hardware.
*/
#define PKT_RX_OUTER_IP_CKSUM_BAD (1ULL << 5)
-/**
- * Deprecated.
- * This flag has been renamed, use PKT_RX_OUTER_IP_CKSUM_BAD instead.
- */
-#define PKT_RX_EIP_CKSUM_BAD \
- RTE_DEPRECATED(PKT_RX_EIP_CKSUM_BAD) PKT_RX_OUTER_IP_CKSUM_BAD
-
/**
* A vlan has been stripped by the hardware and its tci is saved in
* mbuf->vlan_tci. This can only happen if vlan stripping is enabled
@@ -289,8 +264,6 @@ extern "C" {
* mbuf 'vlan_tci' & 'vlan_tci_outer' must be valid when this flag is set.
*/
#define PKT_TX_QINQ (1ULL << 49)
-/** This old name is deprecated. */
-#define PKT_TX_QINQ_PKT PKT_TX_QINQ
/**
* TCP segmentation offload. To enable this offload feature for a
@@ -358,8 +331,6 @@ extern "C" {
* mbuf 'vlan_tci' field must be valid when this flag is set.
*/
#define PKT_TX_VLAN (1ULL << 57)
-/* this old name is deprecated */
-#define PKT_TX_VLAN_PKT PKT_TX_VLAN
/**
* Offload the IP checksum of an external header in the hardware. The
@@ -391,14 +362,14 @@ extern "C" {
PKT_TX_OUTER_IPV6 | \
PKT_TX_OUTER_IPV4 | \
PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_VLAN_PKT | \
+ PKT_TX_VLAN | \
PKT_TX_IPV6 | \
PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_IEEE1588_TMST | \
PKT_TX_TCP_SEG | \
- PKT_TX_QINQ_PKT | \
+ PKT_TX_QINQ | \
PKT_TX_TUNNEL_MASK | \
PKT_TX_MACSEC | \
PKT_TX_SEC_OFFLOAD | \