@@ -550,6 +550,25 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
return received;
}
+static inline __m256i
+flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
+{
+#define FDID_MIS_MAGIC 0xFFFFFFFF
+ RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
+ RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+ const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR |
+ PKT_RX_FDIR_ID);
+ /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
+ const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC);
+ __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7,
+ fdir_mis_mask);
+ /* this XOR op results to bit-reverse the fdir_mask */
+ fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask);
+ const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit);
+
+ return fdir_flags;
+}
+
static inline uint16_t
_iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
struct rte_mbuf **rx_pkts,
@@ -612,8 +631,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
const __m256i shuf_msk =
_mm256_set_epi8
(/* first descriptor */
- 15, 14,
- 13, 12, /* octet 12~15, 32 bits rss */
+ 0xFF, 0xFF,
+ 0xFF, 0xFF, /* rss not supported */
11, 10, /* octet 10~11, 16 bits vlan_macip */
5, 4, /* octet 4~5, 16 bits data_len */
0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
@@ -621,8 +640,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
0xFF, 0xFF, /* pkt_type set as unknown */
0xFF, 0xFF, /*pkt_type set as unknown */
/* second descriptor */
- 15, 14,
- 13, 12, /* octet 12~15, 32 bits rss */
+ 0xFF, 0xFF,
+ 0xFF, 0xFF, /* rss not supported */
11, 10, /* octet 10~11, 16 bits vlan_macip */
5, 4, /* octet 4~5, 16 bits data_len */
0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */
@@ -864,8 +883,61 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
rss_vlan_flag_bits);
/* merge flags */
- const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+ __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
rss_vlan_flags);
+
+ if (rxq->vsi->adapter->fdir_enabled) {
+ const __m256i fdir_id4_7 =
+ _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
+
+ const __m256i fdir_id0_3 =
+ _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
+
+ const __m256i fdir_id0_7 =
+ _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
+
+ const __m256i fdir_flags =
+ flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7);
+
+ /* merge with fdir_flags */
+ mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
+
+ /* write to mbuf: have to use scalar store here */
+ uint32_t fdir_id_extr;
+
+ fdir_id_extr = _mm256_extract_epi32(fdir_id0_7, 3);
+ if (fdir_id_extr != FDID_MIS_MAGIC)
+ rx_pkts[i + 0]->hash.fdir.hi = fdir_id_extr;
+
+ fdir_id_extr = _mm256_extract_epi32(fdir_id0_7, 7);
+ if (fdir_id_extr != FDID_MIS_MAGIC)
+ rx_pkts[i + 1]->hash.fdir.hi = fdir_id_extr;
+
+ fdir_id_extr = _mm256_extract_epi32(fdir_id0_7, 2);
+ if (fdir_id_extr != FDID_MIS_MAGIC)
+ rx_pkts[i + 2]->hash.fdir.hi = fdir_id_extr;
+
+ fdir_id_extr = _mm256_extract_epi32(fdir_id0_7, 6);
+ if (fdir_id_extr != FDID_MIS_MAGIC)
+ rx_pkts[i + 3]->hash.fdir.hi = fdir_id_extr;
+
+ fdir_id_extr = _mm256_extract_epi32(fdir_id0_7, 1);
+ if (fdir_id_extr != FDID_MIS_MAGIC)
+ rx_pkts[i + 4]->hash.fdir.hi = fdir_id_extr;
+
+ fdir_id_extr = _mm256_extract_epi32(fdir_id0_7, 5);
+ if (fdir_id_extr != FDID_MIS_MAGIC)
+ rx_pkts[i + 5]->hash.fdir.hi = fdir_id_extr;
+
+ fdir_id_extr = _mm256_extract_epi32(fdir_id0_7, 0);
+ if (fdir_id_extr != FDID_MIS_MAGIC)
+ rx_pkts[i + 6]->hash.fdir.hi = fdir_id_extr;
+
+ fdir_id_extr = _mm256_extract_epi32(fdir_id0_7, 4);
+ if (fdir_id_extr != FDID_MIS_MAGIC)
+ rx_pkts[i + 7]->hash.fdir.hi = fdir_id_extr;
+ } /* if() on fdir_enabled */
+
/**
* At this point, we have the 8 sets of flags in the low 16-bits
* of each 32-bit value in vlan0.