[v4,04/18] net/iavf: stop using zero sized marker fields

Message ID 1707978080-28859-5-git-send-email-roretzla@linux.microsoft.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series stop using zero sized marker fields |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Tyler Retzlaff Feb. 15, 2024, 6:21 a.m. UTC
  Update to reference newly named anonymous union markers supported by
standard C and stop referencing zero sized compiler extension markers.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
 drivers/net/iavf/iavf_rxtx_vec_avx2.c   | 60 ++++++++++++++---------------
 drivers/net/iavf/iavf_rxtx_vec_avx512.c | 60 ++++++++++++++---------------
 drivers/net/iavf/iavf_rxtx_vec_common.h |  4 +-
 drivers/net/iavf/iavf_rxtx_vec_neon.c   | 16 ++++----
 drivers/net/iavf/iavf_rxtx_vec_sse.c    | 68 ++++++++++++++++-----------------
 5 files changed, 104 insertions(+), 104 deletions(-)
  

Patch

diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index 510b4d8..e763b96 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -104,13 +104,13 @@ 
 	 * calls above.
 	 */
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
-			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+			offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 4);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
-			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+			offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 8);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
-			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+			offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 10);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
-			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+			offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 12);
 
 	/* Status/Error flag masks */
 	/**
@@ -374,10 +374,10 @@ 
 		 */
 		/* check the structure matches expectations */
 		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
-				 offsetof(struct rte_mbuf, rearm_data) + 8);
-		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+				 offsetof(struct rte_mbuf, mbuf_rearm_data) + 8);
+		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, mbuf_rearm_data) !=
 				 RTE_ALIGN(offsetof(struct rte_mbuf,
-						    rearm_data),
+						    mbuf_rearm_data),
 					   16));
 		/* build up data and do writes */
 		__m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
@@ -398,13 +398,13 @@ 
 		rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
 		rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
 		/* write to mbuf */
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->mbuf_rearm_data,
 				    rearm6);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->mbuf_rearm_data,
 				    rearm4);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->mbuf_rearm_data,
 				    rearm2);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->mbuf_rearm_data,
 				    rearm0);
 
 		/* repeat for the odd mbufs */
@@ -427,13 +427,13 @@ 
 		rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
 		rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
 		/* again write to mbufs */
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->mbuf_rearm_data,
 				    rearm7);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->mbuf_rearm_data,
 				    rearm5);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->mbuf_rearm_data,
 				    rearm3);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->mbuf_rearm_data,
 				    rearm1);
 
 		/* extract and record EOP bit */
@@ -628,13 +628,13 @@ 
 	 * calls above.
 	 */
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
-			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+			offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 4);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
-			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+			offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 8);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
-			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+			offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 10);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
-			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+			offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 12);
 
 	/* Status/Error flag masks */
 	/**
@@ -1281,10 +1281,10 @@ 
 		 */
 		/* check the structure matches expectations */
 		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
-				 offsetof(struct rte_mbuf, rearm_data) + 8);
-		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+				 offsetof(struct rte_mbuf, mbuf_rearm_data) + 8);
+		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, mbuf_rearm_data) !=
 				 RTE_ALIGN(offsetof(struct rte_mbuf,
-						    rearm_data),
+						    mbuf_rearm_data),
 					   16));
 		/* build up data and do writes */
 		__m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
@@ -1305,13 +1305,13 @@ 
 		rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
 		rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
 		/* write to mbuf */
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->mbuf_rearm_data,
 				    rearm6);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->mbuf_rearm_data,
 				    rearm4);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->mbuf_rearm_data,
 				    rearm2);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->mbuf_rearm_data,
 				    rearm0);
 
 		/* repeat for the odd mbufs */
@@ -1334,13 +1334,13 @@ 
 		rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
 		rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
 		/* again write to mbufs */
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->mbuf_rearm_data,
 				    rearm7);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->mbuf_rearm_data,
 				    rearm5);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->mbuf_rearm_data,
 				    rearm3);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->mbuf_rearm_data,
 				    rearm1);
 
 		/* extract and record EOP bit */
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 3bb6f30..febc4fc 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -141,13 +141,13 @@ 
 	 * calls above.
 	 */
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
-			 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+			 offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 4);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
-			 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+			 offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 8);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
-			 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+			 offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 10);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
-			 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+			 offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 12);
 
 	uint16_t i, received;
 
@@ -414,10 +414,10 @@ 
 		 */
 		/* check the structure matches expectations */
 		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
-				 offsetof(struct rte_mbuf, rearm_data) + 8);
-		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+				 offsetof(struct rte_mbuf, mbuf_rearm_data) + 8);
+		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, mbuf_rearm_data) !=
 				 RTE_ALIGN(offsetof(struct rte_mbuf,
-						    rearm_data),
+						    mbuf_rearm_data),
 						    16));
 		/* build up data and do writes */
 		__m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
@@ -450,13 +450,13 @@ 
 			rearm0 = _mm256_permute2f128_si256(mbuf_init, mb0_1, 0x20);
 		}
 		/* write to mbuf */
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->mbuf_rearm_data,
 				    rearm6);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->mbuf_rearm_data,
 				    rearm4);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->mbuf_rearm_data,
 				    rearm2);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->mbuf_rearm_data,
 				    rearm0);
 
 		/* repeat for the odd mbufs */
@@ -486,13 +486,13 @@ 
 			rearm1 = _mm256_blend_epi32(mbuf_init, mb0_1, 0xF0);
 		}
 		/* again write to mbufs */
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->mbuf_rearm_data,
 				    rearm7);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->mbuf_rearm_data,
 				    rearm5);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->mbuf_rearm_data,
 				    rearm3);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->mbuf_rearm_data,
 				    rearm1);
 
 		/* extract and record EOP bit */
@@ -709,13 +709,13 @@ 
 	 * calls above.
 	 */
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
-			 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+			 offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 4);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
-			 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+			 offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 8);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
-			 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+			 offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 10);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
-			 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+			 offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 12);
 
 	uint16_t i, received;
 
@@ -1437,10 +1437,10 @@ 
 		 */
 		/* check the structure matches expectations */
 		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
-				 offsetof(struct rte_mbuf, rearm_data) + 8);
-		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+				 offsetof(struct rte_mbuf, mbuf_rearm_data) + 8);
+		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, mbuf_rearm_data) !=
 				 RTE_ALIGN(offsetof(struct rte_mbuf,
-						    rearm_data),
+						    mbuf_rearm_data),
 						    16));
 		/* build up data and do writes */
 		__m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
@@ -1461,13 +1461,13 @@ 
 		rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
 		rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
 		/* write to mbuf */
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->mbuf_rearm_data,
 				    rearm6);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->mbuf_rearm_data,
 				    rearm4);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->mbuf_rearm_data,
 				    rearm2);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->mbuf_rearm_data,
 				    rearm0);
 
 		/* repeat for the odd mbufs */
@@ -1490,13 +1490,13 @@ 
 		rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
 		rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
 		/* again write to mbufs */
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->mbuf_rearm_data,
 				    rearm7);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->mbuf_rearm_data,
 				    rearm5);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->mbuf_rearm_data,
 				    rearm3);
-		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->mbuf_rearm_data,
 				    rearm1);
 
 		/* extract and record EOP bit */
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 5c52200..4ce1196 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -205,9 +205,9 @@ 
 	mb_def.port = rxq->port_id;
 	rte_mbuf_refcnt_set(&mb_def, 1);
 
-	/* prevent compiler reordering: rearm_data covers previous fields */
+	/* prevent compiler reordering: mbuf_rearm_data covers previous fields */
 	rte_compiler_barrier();
-	p = (uintptr_t)&mb_def.rearm_data;
+	p = (uintptr_t)&mb_def.mbuf_rearm_data;
 	rxq->mbuf_initializer = *(uint64_t *)p;
 	return 0;
 }
diff --git a/drivers/net/iavf/iavf_rxtx_vec_neon.c b/drivers/net/iavf/iavf_rxtx_vec_neon.c
index 83825aa..f4f6033 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_neon.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_neon.c
@@ -159,10 +159,10 @@ 
 	rearm2 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 2), mbuf_init, 1);
 	rearm3 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 3), mbuf_init, 1);
 
-	vst1q_u64((uint64_t *)&rx_pkts[0]->rearm_data, rearm0);
-	vst1q_u64((uint64_t *)&rx_pkts[1]->rearm_data, rearm1);
-	vst1q_u64((uint64_t *)&rx_pkts[2]->rearm_data, rearm2);
-	vst1q_u64((uint64_t *)&rx_pkts[3]->rearm_data, rearm3);
+	vst1q_u64((uint64_t *)&rx_pkts[0]->mbuf_rearm_data, rearm0);
+	vst1q_u64((uint64_t *)&rx_pkts[1]->mbuf_rearm_data, rearm1);
+	vst1q_u64((uint64_t *)&rx_pkts[2]->mbuf_rearm_data, rearm2);
+	vst1q_u64((uint64_t *)&rx_pkts[3]->mbuf_rearm_data, rearm3);
 }
 
 #define PKTLEN_SHIFT     10
@@ -332,13 +332,13 @@ 
 		pkt_mb1 = vreinterpretq_u8_u16(tmp);
 
 		/* D.3 copy final data to rx_pkts */
-		vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+		vst1q_u8((void *)&rx_pkts[pos + 3]->mbuf_rx_descriptor_fields1,
 				pkt_mb4);
-		vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+		vst1q_u8((void *)&rx_pkts[pos + 2]->mbuf_rx_descriptor_fields1,
 				pkt_mb3);
-		vst1q_u8((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+		vst1q_u8((void *)&rx_pkts[pos + 1]->mbuf_rx_descriptor_fields1,
 				pkt_mb2);
-		vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+		vst1q_u8((void *)&rx_pkts[pos]->mbuf_rx_descriptor_fields1,
 				pkt_mb1);
 
 		desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 96f187f..fe33507 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -180,13 +180,13 @@ 
 
 	/* write the rearm data and the olflags in one write */
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
-			offsetof(struct rte_mbuf, rearm_data) + 8);
-	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
-			RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
-	_mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
-	_mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
-	_mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
-	_mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
+			offsetof(struct rte_mbuf, mbuf_rearm_data) + 8);
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, mbuf_rearm_data) !=
+			RTE_ALIGN(offsetof(struct rte_mbuf, mbuf_rearm_data), 16));
+	_mm_store_si128((__m128i *)&rx_pkts[0]->mbuf_rearm_data, rearm0);
+	_mm_store_si128((__m128i *)&rx_pkts[1]->mbuf_rearm_data, rearm1);
+	_mm_store_si128((__m128i *)&rx_pkts[2]->mbuf_rearm_data, rearm2);
+	_mm_store_si128((__m128i *)&rx_pkts[3]->mbuf_rearm_data, rearm3);
 }
 
 static inline __m128i
@@ -413,13 +413,13 @@ 
 
 	/* write the rearm data and the olflags in one write */
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
-			 offsetof(struct rte_mbuf, rearm_data) + 8);
-	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
-			 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
-	_mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
-	_mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
-	_mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
-	_mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
+			 offsetof(struct rte_mbuf, mbuf_rearm_data) + 8);
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, mbuf_rearm_data) !=
+			 RTE_ALIGN(offsetof(struct rte_mbuf, mbuf_rearm_data), 16));
+	_mm_store_si128((__m128i *)&rx_pkts[0]->mbuf_rearm_data, rearm0);
+	_mm_store_si128((__m128i *)&rx_pkts[1]->mbuf_rearm_data, rearm1);
+	_mm_store_si128((__m128i *)&rx_pkts[2]->mbuf_rearm_data, rearm2);
+	_mm_store_si128((__m128i *)&rx_pkts[3]->mbuf_rearm_data, rearm3);
 }
 
 #define PKTLEN_SHIFT     10
@@ -493,9 +493,9 @@ 
 	 * call above.
 	 */
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
-			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+			offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 4);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
-			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+			offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 8);
 	__m128i dd_check, eop_check;
 
 	/* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP */
@@ -541,13 +541,13 @@ 
 	 * here for completeness in case of future modifications.
 	 */
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
-			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+			offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 4);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
-			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+			offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 8);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
-			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+			offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 10);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
-			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+			offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 12);
 
 	/* Cache is empty -> need to scan the buffer rings, but first move
 	 * the next 'n' mbufs into the cache
@@ -651,10 +651,10 @@ 
 
 		/* D.3 copy final 3,4 data to rx_pkts */
 		_mm_storeu_si128(
-			(void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+			(void *)&rx_pkts[pos + 3]->mbuf_rx_descriptor_fields1,
 			pkt_mb4);
 		_mm_storeu_si128(
-			(void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+			(void *)&rx_pkts[pos + 2]->mbuf_rx_descriptor_fields1,
 			pkt_mb3);
 
 		/* D.2 pkt 1,2 remove crc */
@@ -689,9 +689,9 @@ 
 
 		/* D.3 copy final 1,2 data to rx_pkts */
 		_mm_storeu_si128(
-			(void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+			(void *)&rx_pkts[pos + 1]->mbuf_rx_descriptor_fields1,
 			pkt_mb2);
-		_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+		_mm_storeu_si128((void *)&rx_pkts[pos]->mbuf_rx_descriptor_fields1,
 				 pkt_mb1);
 		desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
 		/* C.4 calc available number of desc */
@@ -766,9 +766,9 @@ 
 	 * call above.
 	 */
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
-			 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+			 offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 4);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
-			 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+			 offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 8);
 
 	/* 4 packets DD mask */
 	const __m128i dd_check = _mm_set_epi64x(0x0000000100000001LL,
@@ -824,13 +824,13 @@ 
 	 * here for completeness in case of future modifications.
 	 */
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
-			 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+			 offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 4);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
-			 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+			 offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 8);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
-			 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+			 offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 10);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
-			 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+			 offsetof(struct rte_mbuf, mbuf_rx_descriptor_fields1) + 12);
 
 	/* Cache is empty -> need to scan the buffer rings, but first move
 	 * the next 'n' mbufs into the cache
@@ -1089,10 +1089,10 @@ 
 
 		/* D.3 copy final 3,4 data to rx_pkts */
 		_mm_storeu_si128
-			((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+			((void *)&rx_pkts[pos + 3]->mbuf_rx_descriptor_fields1,
 			 pkt_mb3);
 		_mm_storeu_si128
-			((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+			((void *)&rx_pkts[pos + 2]->mbuf_rx_descriptor_fields1,
 			 pkt_mb2);
 
 		/* C* extract and record EOP bit */
@@ -1116,9 +1116,9 @@ 
 
 		/* D.3 copy final 1,2 data to rx_pkts */
 		_mm_storeu_si128
-			((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+			((void *)&rx_pkts[pos + 1]->mbuf_rx_descriptor_fields1,
 			 pkt_mb1);
-		_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+		_mm_storeu_si128((void *)&rx_pkts[pos]->mbuf_rx_descriptor_fields1,
 				 pkt_mb0);
 		flex_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
 		/* C.4 calc available number of desc */