@@ -186,7 +186,7 @@
rte_mempool_get_priv(rxq_ctrl->rxq.mp);
int j;
- /* Initialize default rearm_data for vPMD. */
+ /* Initialize default mbuf_rearm_data for vPMD. */
mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
rte_mbuf_refcnt_set(mbuf_init, 1);
mbuf_init->nb_segs = 1;
@@ -196,11 +196,11 @@
mbuf_init->ol_flags = RTE_MBUF_F_EXTERNAL;
/*
* prevent compiler reordering:
- * rearm_data covers previous fields.
+ * mbuf_rearm_data covers previous fields.
*/
rte_compiler_barrier();
rxq->mbuf_initializer =
- *(rte_xmm_t *)&mbuf_init->rearm_data;
+ *(rte_xmm_t *)&mbuf_init->mbuf_rearm_data;
/* Padding with a fake mbuf for vectorized Rx. */
for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
@@ -31,23 +31,23 @@
/* rxq_cq_decompress_v() */
S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
- offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 4);
S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
- offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 8);
S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) ==
- offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+ offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 12);
/* rxq_cq_to_ptype_oflags_v() */
S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) ==
- offsetof(struct rte_mbuf, rearm_data) + 8);
-S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) ==
- RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
+ offsetof(struct rte_mbuf, mbuf_rearm_data) + 8);
+S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, mbuf_rearm_data) ==
+ RTE_ALIGN(offsetof(struct rte_mbuf, mbuf_rearm_data), 16));
/* rxq_burst_v() */
S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
- offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+ offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 4);
S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
- offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+ offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 8);
#if (RTE_CACHE_LINE_SIZE == 128)
S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64);
#else
@@ -101,10 +101,10 @@
uint16_t pkts_n = mcqe_n;
const __vector unsigned char rearm =
(__vector unsigned char)vec_vsx_ld(0,
- (signed int const *)&t_pkt->rearm_data);
+ (signed int const *)&t_pkt->mbuf_rearm_data);
const __vector unsigned char rxdf =
(__vector unsigned char)vec_vsx_ld(0,
- (signed int const *)&t_pkt->rx_descriptor_fields1);
+ (signed int const *)&t_pkt->mbuf_rx_descriptor_fields1);
const __vector unsigned char crc_adj =
(__vector unsigned char)(__vector unsigned short){
0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
@@ -132,8 +132,8 @@
/*
* A. load mCQEs into a 128bit register.
* B. store rearm data to mbuf.
- * C. combine data from mCQEs with rx_descriptor_fields1.
- * D. store rx_descriptor_fields1.
+ * C. combine data from mCQEs with mbuf_rx_descriptor_fields1.
+ * D. store mbuf_rx_descriptor_fields1.
* E. store flow tag (rte_flow mark).
*/
cycle:
@@ -173,11 +173,11 @@
/* B.1 store rearm data to mbuf. */
*(__vector unsigned char *)
- &elts[pos]->rearm_data = rearm;
+ &elts[pos]->mbuf_rearm_data = rearm;
*(__vector unsigned char *)
- &elts[pos + 1]->rearm_data = rearm;
+ &elts[pos + 1]->mbuf_rearm_data = rearm;
- /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ /* C.1 combine data from mCQEs with mbuf_rx_descriptor_fields1. */
rxdf1 = vec_perm(mcqe1, zero, shuf_mask1);
rxdf2 = vec_perm(mcqe1, zero, shuf_mask2);
rxdf1 = (__vector unsigned char)
@@ -193,19 +193,19 @@
vec_sel((__vector unsigned short)rxdf2,
(__vector unsigned short)rxdf, rxdf_sel_mask);
- /* D.1 store rx_descriptor_fields1. */
+ /* D.1 store mbuf_rx_descriptor_fields1. */
*(__vector unsigned char *)
- &elts[pos]->rx_descriptor_fields1 = rxdf1;
+ &elts[pos]->mbuf_rx_descriptor_fields1 = rxdf1;
*(__vector unsigned char *)
- &elts[pos + 1]->rx_descriptor_fields1 = rxdf2;
+ &elts[pos + 1]->mbuf_rx_descriptor_fields1 = rxdf2;
/* B.1 store rearm data to mbuf. */
*(__vector unsigned char *)
- &elts[pos + 2]->rearm_data = rearm;
+ &elts[pos + 2]->mbuf_rearm_data = rearm;
*(__vector unsigned char *)
- &elts[pos + 3]->rearm_data = rearm;
+ &elts[pos + 3]->mbuf_rearm_data = rearm;
- /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ /* C.1 combine data from mCQEs with mbuf_rx_descriptor_fields1. */
rxdf1 = vec_perm(mcqe2, zero, shuf_mask1);
rxdf2 = vec_perm(mcqe2, zero, shuf_mask2);
rxdf1 = (__vector unsigned char)
@@ -221,11 +221,11 @@
vec_sel((__vector unsigned short)rxdf2,
(__vector unsigned short)rxdf, rxdf_sel_mask);
- /* D.1 store rx_descriptor_fields1. */
+ /* D.1 store mbuf_rx_descriptor_fields1. */
*(__vector unsigned char *)
- &elts[pos + 2]->rx_descriptor_fields1 = rxdf1;
+ &elts[pos + 2]->mbuf_rx_descriptor_fields1 = rxdf1;
*(__vector unsigned char *)
- &elts[pos + 3]->rx_descriptor_fields1 = rxdf2;
+ &elts[pos + 3]->mbuf_rx_descriptor_fields1 = rxdf2;
#ifdef MLX5_PMD_SOFT_COUNTERS
invalid_mask = (__vector unsigned char)(__vector unsigned long){
@@ -767,15 +767,15 @@
vec_sro((__vector unsigned short)ol_flags,
(__vector unsigned char){32}), rearm_sel_mask);
- /* Write 8B rearm_data and 8B ol_flags. */
+ /* Write 8B mbuf_rearm_data and 8B ol_flags. */
vec_vsx_st(rearm0, 0,
- (__vector unsigned char *)&pkts[0]->rearm_data);
+ (__vector unsigned char *)&pkts[0]->mbuf_rearm_data);
vec_vsx_st(rearm1, 0,
- (__vector unsigned char *)&pkts[1]->rearm_data);
+ (__vector unsigned char *)&pkts[1]->mbuf_rearm_data);
vec_vsx_st(rearm2, 0,
- (__vector unsigned char *)&pkts[2]->rearm_data);
+ (__vector unsigned char *)&pkts[2]->mbuf_rearm_data);
vec_vsx_st(rearm3, 0,
- (__vector unsigned char *)&pkts[3]->rearm_data);
+ (__vector unsigned char *)&pkts[3]->mbuf_rearm_data);
}
/**
@@ -1046,7 +1046,7 @@
((__vector unsigned int)pkt_mb2 +
(__vector unsigned int)flow_mark_adj);
- /* D.1 fill in mbuf - rx_descriptor_fields1. */
+ /* D.1 fill in mbuf - mbuf_rx_descriptor_fields1. */
*(__vector unsigned char *)
&pkts[pos + 3]->pkt_len = pkt_mb3;
*(__vector unsigned char *)
@@ -1115,7 +1115,7 @@
vec_mergel((__vector unsigned long)op_own_tmp1,
(__vector unsigned long)op_own_tmp2);
- /* D.1 fill in mbuf - rx_descriptor_fields1. */
+ /* D.1 fill in mbuf - mbuf_rx_descriptor_fields1. */
*(__vector unsigned char *)
&pkts[pos + 1]->pkt_len = pkt_mb1;
*(__vector unsigned char *)
@@ -1245,7 +1245,7 @@
/* D.4 mark if any error is set */
*err |= ((__vector unsigned long)opcode)[0];
- /* D.5 fill in mbuf - rearm_data and packet_type. */
+ /* D.5 fill in mbuf - mbuf_rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
if (unlikely(rxq->shared)) {
pkts[pos]->port = cq[pos].user_index_low;
@@ -99,7 +99,7 @@
t_pkt->data_len + (rxq->crc_present * RTE_ETHER_CRC_LEN);
uint16_t pkts_n = mcqe_n;
const uint64x2_t rearm =
- vld1q_u64((void *)&t_pkt->rearm_data);
+ vld1q_u64((void *)&t_pkt->mbuf_rearm_data);
const uint32x4_t rxdf_mask = {
0xffffffff, /* packet_type */
0, /* skip pkt_len */
@@ -107,7 +107,7 @@
0, /* skip hash.rss */
};
const uint8x16_t rxdf =
- vandq_u8(vld1q_u8((void *)&t_pkt->rx_descriptor_fields1),
+ vandq_u8(vld1q_u8((void *)&t_pkt->mbuf_rx_descriptor_fields1),
vreinterpretq_u8_u32(rxdf_mask));
const uint16x8_t crc_adj = {
0, 0,
@@ -131,8 +131,8 @@
/*
* A. load mCQEs into a 128bit register.
* B. store rearm data to mbuf.
- * C. combine data from mCQEs with rx_descriptor_fields1.
- * D. store rx_descriptor_fields1.
+ * C. combine data from mCQEs with mbuf_rx_descriptor_fields1.
+ * D. store mbuf_rx_descriptor_fields1.
* E. store flow tag (rte_flow mark).
*/
cycle:
@@ -140,10 +140,10 @@
rte_prefetch0((void *)(cq + mcqe_n));
for (pos = 0; pos < mcqe_n; ) {
uint8_t *p = (void *)&mcq[pos % 8];
- uint8_t *e0 = (void *)&elts[pos]->rearm_data;
- uint8_t *e1 = (void *)&elts[pos + 1]->rearm_data;
- uint8_t *e2 = (void *)&elts[pos + 2]->rearm_data;
- uint8_t *e3 = (void *)&elts[pos + 3]->rearm_data;
+ uint8_t *e0 = (void *)&elts[pos]->mbuf_rearm_data;
+ uint8_t *e1 = (void *)&elts[pos + 1]->mbuf_rearm_data;
+ uint8_t *e2 = (void *)&elts[pos + 2]->mbuf_rearm_data;
+ uint8_t *e3 = (void *)&elts[pos + 3]->mbuf_rearm_data;
uint16x4_t byte_cnt;
#ifdef MLX5_PMD_SOFT_COUNTERS
uint16x4_t invalid_mask =
@@ -164,14 +164,14 @@
"add %[e0], %[e0], #16 \n\t"
"st1 {%[rearm].2d}, [%[e1]] \n\t"
"add %[e1], %[e1], #16 \n\t"
- /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ /* C.1 combine data from mCQEs with mbuf_rx_descriptor_fields1. */
"tbl v18.16b, {v16.16b}, %[mcqe_shuf_m1].16b \n\t"
"tbl v19.16b, {v16.16b}, %[mcqe_shuf_m2].16b \n\t"
"sub v18.8h, v18.8h, %[crc_adj].8h \n\t"
"sub v19.8h, v19.8h, %[crc_adj].8h \n\t"
"orr v18.16b, v18.16b, %[rxdf].16b \n\t"
"orr v19.16b, v19.16b, %[rxdf].16b \n\t"
- /* D.1 store rx_descriptor_fields1. */
+ /* D.1 store mbuf_rx_descriptor_fields1. */
"st1 {v18.2d}, [%[e0]] \n\t"
"st1 {v19.2d}, [%[e1]] \n\t"
/* B.1 store rearm data to mbuf. */
@@ -179,14 +179,14 @@
"add %[e2], %[e2], #16 \n\t"
"st1 {%[rearm].2d}, [%[e3]] \n\t"
"add %[e3], %[e3], #16 \n\t"
- /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ /* C.1 combine data from mCQEs with mbuf_rx_descriptor_fields1. */
"tbl v18.16b, {v17.16b}, %[mcqe_shuf_m1].16b \n\t"
"tbl v19.16b, {v17.16b}, %[mcqe_shuf_m2].16b \n\t"
"sub v18.8h, v18.8h, %[crc_adj].8h \n\t"
"sub v19.8h, v19.8h, %[crc_adj].8h \n\t"
"orr v18.16b, v18.16b, %[rxdf].16b \n\t"
"orr v19.16b, v19.16b, %[rxdf].16b \n\t"
- /* D.1 store rx_descriptor_fields1. */
+ /* D.1 store mbuf_rx_descriptor_fields1. */
"st1 {v18.2d}, [%[e2]] \n\t"
"st1 {v19.2d}, [%[e3]] \n\t"
#ifdef MLX5_PMD_SOFT_COUNTERS
@@ -513,10 +513,10 @@
(vgetq_lane_u32(ol_flags, 0),
vreinterpretq_u32_u64(mbuf_init), 2));
- vst1q_u64((void *)&pkts[0]->rearm_data, rearm0);
- vst1q_u64((void *)&pkts[1]->rearm_data, rearm1);
- vst1q_u64((void *)&pkts[2]->rearm_data, rearm2);
- vst1q_u64((void *)&pkts[3]->rearm_data, rearm3);
+ vst1q_u64((void *)&pkts[0]->mbuf_rearm_data, rearm0);
+ vst1q_u64((void *)&pkts[1]->mbuf_rearm_data, rearm1);
+ vst1q_u64((void *)&pkts[2]->mbuf_rearm_data, rearm2);
+ vst1q_u64((void *)&pkts[3]->mbuf_rearm_data, rearm3);
}
/**
@@ -736,17 +736,17 @@
"tbl %[op_own].8b, {v20.16b - v23.16b}, %[owner_shuf_m].8b \n\t"
/* C.2 (CQE 3) adjust flow mark. */
"add v15.4s, v15.4s, %[flow_mark_adj].4s \n\t"
- /* C.3 (CQE 3) fill in mbuf - rx_descriptor_fields1. */
+ /* C.3 (CQE 3) fill in mbuf - mbuf_rx_descriptor_fields1. */
"st1 {v15.2d}, [%[e3]] \n\t"
/* C.2 (CQE 2) adjust flow mark. */
"add v14.4s, v14.4s, %[flow_mark_adj].4s \n\t"
- /* C.3 (CQE 2) fill in mbuf - rx_descriptor_fields1. */
+ /* C.3 (CQE 2) fill in mbuf - mbuf_rx_descriptor_fields1. */
"st1 {v14.2d}, [%[e2]] \n\t"
/* C.1 (CQE 0) generate final structure for mbuf. */
"tbl v12.16b, {v20.16b}, %[mb_shuf_m].16b \n\t"
/* C.2 (CQE 1) adjust flow mark. */
"add v13.4s, v13.4s, %[flow_mark_adj].4s \n\t"
- /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */
+ /* C.3 (CQE 1) fill in mbuf - mbuf_rx_descriptor_fields1. */
"st1 {v13.2d}, [%[e1]] \n\t"
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Extract byte_cnt. */
@@ -760,7 +760,7 @@
"st1 {v24.2d - v25.2d}, [%[pkts_p]] \n\t"
/* C.2 (CQE 0) adjust flow mark. */
"add v12.4s, v12.4s, %[flow_mark_adj].4s \n\t"
- /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */
+ /* C.3 (CQE 1) fill in mbuf - mbuf_rx_descriptor_fields1. */
"st1 {v12.2d}, [%[e0]] \n\t"
:[op_own]"=&w"(op_own),
[byte_cnt]"=&w"(byte_cnt),
@@ -831,7 +831,7 @@
opcode = vbic_u16(opcode, mini_mask);
/* D.4 mark if any error is set */
*err |= vget_lane_u64(vreinterpret_u64_u16(opcode), 0);
- /* C.4 fill in mbuf - rearm_data and packet_type. */
+ /* C.4 fill in mbuf - mbuf_rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
opcode, &elts[pos]);
if (unlikely(rxq->shared)) {
@@ -98,9 +98,9 @@
t_pkt->data_len + (rxq->crc_present * RTE_ETHER_CRC_LEN);
uint16_t pkts_n = mcqe_n;
const __m128i rearm =
- _mm_loadu_si128((__m128i *)&t_pkt->rearm_data);
+ _mm_loadu_si128((__m128i *)&t_pkt->mbuf_rearm_data);
const __m128i rxdf =
- _mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1);
+ _mm_loadu_si128((__m128i *)&t_pkt->mbuf_rx_descriptor_fields1);
const __m128i crc_adj =
_mm_set_epi16(0, 0, 0,
rxq->crc_present * RTE_ETHER_CRC_LEN,
@@ -123,8 +123,8 @@
/*
* A. load mCQEs into a 128bit register.
* B. store rearm data to mbuf.
- * C. combine data from mCQEs with rx_descriptor_fields1.
- * D. store rx_descriptor_fields1.
+ * C. combine data from mCQEs with mbuf_rx_descriptor_fields1.
+ * D. store mbuf_rx_descriptor_fields1.
* E. store flow tag (rte_flow mark).
*/
cycle:
@@ -145,38 +145,38 @@
mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]);
mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]);
/* B.1 store rearm data to mbuf. */
- _mm_storeu_si128((__m128i *)&elts[pos]->rearm_data, rearm);
- _mm_storeu_si128((__m128i *)&elts[pos + 1]->rearm_data, rearm);
- /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ _mm_storeu_si128((__m128i *)&elts[pos]->mbuf_rearm_data, rearm);
+ _mm_storeu_si128((__m128i *)&elts[pos + 1]->mbuf_rearm_data, rearm);
+ /* C.1 combine data from mCQEs with mbuf_rx_descriptor_fields1. */
rxdf1 = _mm_shuffle_epi8(mcqe1, shuf_mask1);
rxdf2 = _mm_shuffle_epi8(mcqe1, shuf_mask2);
rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
- /* D.1 store rx_descriptor_fields1. */
+ /* D.1 store mbuf_rx_descriptor_fields1. */
_mm_storeu_si128((__m128i *)
- &elts[pos]->rx_descriptor_fields1,
+ &elts[pos]->mbuf_rx_descriptor_fields1,
rxdf1);
_mm_storeu_si128((__m128i *)
- &elts[pos + 1]->rx_descriptor_fields1,
+ &elts[pos + 1]->mbuf_rx_descriptor_fields1,
rxdf2);
/* B.1 store rearm data to mbuf. */
- _mm_storeu_si128((__m128i *)&elts[pos + 2]->rearm_data, rearm);
- _mm_storeu_si128((__m128i *)&elts[pos + 3]->rearm_data, rearm);
- /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
+ _mm_storeu_si128((__m128i *)&elts[pos + 2]->mbuf_rearm_data, rearm);
+ _mm_storeu_si128((__m128i *)&elts[pos + 3]->mbuf_rearm_data, rearm);
+ /* C.1 combine data from mCQEs with mbuf_rx_descriptor_fields1. */
rxdf1 = _mm_shuffle_epi8(mcqe2, shuf_mask1);
rxdf2 = _mm_shuffle_epi8(mcqe2, shuf_mask2);
rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
- /* D.1 store rx_descriptor_fields1. */
+ /* D.1 store mbuf_rx_descriptor_fields1. */
_mm_storeu_si128((__m128i *)
- &elts[pos + 2]->rx_descriptor_fields1,
+ &elts[pos + 2]->mbuf_rx_descriptor_fields1,
rxdf1);
_mm_storeu_si128((__m128i *)
- &elts[pos + 3]->rx_descriptor_fields1,
+ &elts[pos + 3]->mbuf_rx_descriptor_fields1,
rxdf2);
#ifdef MLX5_PMD_SOFT_COUNTERS
invalid_mask = _mm_set_epi64x(0,
@@ -510,11 +510,11 @@
rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 4), 0x30);
rearm2 = _mm_blend_epi16(mbuf_init, ol_flags, 0x30);
rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(ol_flags, 4), 0x30);
- /* Write 8B rearm_data and 8B ol_flags. */
- _mm_store_si128((__m128i *)&pkts[0]->rearm_data, rearm0);
- _mm_store_si128((__m128i *)&pkts[1]->rearm_data, rearm1);
- _mm_store_si128((__m128i *)&pkts[2]->rearm_data, rearm2);
- _mm_store_si128((__m128i *)&pkts[3]->rearm_data, rearm3);
+ /* Write 8B mbuf_rearm_data and 8B ol_flags. */
+ _mm_store_si128((__m128i *)&pkts[0]->mbuf_rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&pkts[1]->mbuf_rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&pkts[2]->mbuf_rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&pkts[3]->mbuf_rearm_data, rearm3);
}
/**
@@ -693,7 +693,7 @@
/* C.4 adjust flow mark. */
pkt_mb3 = _mm_add_epi32(pkt_mb3, flow_mark_adj);
pkt_mb2 = _mm_add_epi32(pkt_mb2, flow_mark_adj);
- /* D.1 fill in mbuf - rx_descriptor_fields1. */
+ /* D.1 fill in mbuf - mbuf_rx_descriptor_fields1. */
_mm_storeu_si128((void *)&pkts[pos + 3]->pkt_len, pkt_mb3);
_mm_storeu_si128((void *)&pkts[pos + 2]->pkt_len, pkt_mb2);
/* E.1 extract op_own field. */
@@ -723,7 +723,7 @@
/* E.1 extract op_own byte. */
op_own_tmp1 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
op_own = _mm_unpackhi_epi64(op_own_tmp1, op_own_tmp2);
- /* D.1 fill in mbuf - rx_descriptor_fields1. */
+ /* D.1 fill in mbuf - mbuf_rx_descriptor_fields1. */
_mm_storeu_si128((void *)&pkts[pos + 1]->pkt_len, pkt_mb1);
_mm_storeu_si128((void *)&pkts[pos]->pkt_len, pkt_mb0);
/* E.2 mask out CQEs belonging to HW. */
@@ -779,7 +779,7 @@
opcode = _mm_andnot_si128(mini_mask, opcode);
/* D.4 mark if any error is set */
*err |= _mm_cvtsi128_si64(opcode);
- /* D.5 fill in mbuf - rearm_data and packet_type. */
+ /* D.5 fill in mbuf - mbuf_rearm_data and packet_type. */
rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
if (unlikely(rxq->shared)) {
pkts[pos]->port = cq[pos].user_index_low;