[v3] net/ice: refactor the Rx FlexiMD handling

Message ID 20200918010535.27089-1-haiyue.wang@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series [v3] net/ice: refactor the Rx FlexiMD handling |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-testing success Testing PASS
ci/Intel-compilation success Compilation OK

Commit Message

Wang, Haiyue Sept. 18, 2020, 1:05 a.m. UTC
  The hardware supports many kinds of FlexiMDs set into Rx descriptor, and
the FlexiMDs can have different offsets in the descriptor according the
DDP package setting.

The FlexiMDs type and offset are identified by the RXDID, which will be
used to setup the queue.

For expanding to support different RXDIDs in the future, refactor the Rx
FlexiMD handling by the functions mapped to related RXDIDs.

Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
---
v3: remove the typedef's ___rte_unused, and rewrite the commit title and
    message.
v2: assign the handle for ICE_RXDID_COMMS_OVS directly, not use
    fall-through.
---
 drivers/net/ice/ice_rxtx.c | 263 ++++++++++++++++++++++---------------
 drivers/net/ice/ice_rxtx.h |   5 +
 2 files changed, 163 insertions(+), 105 deletions(-)
  

Comments

Guo, Jia Sept. 22, 2020, 5:35 a.m. UTC | #1
Hi, haiyue

> -----Original Message-----
> From: Wang, Haiyue <haiyue.wang@intel.com>
> Sent: Friday, September 18, 2020 9:06 AM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>; Guo,
> Junfeng <junfeng.guo@intel.com>; Guo, Jia <jia.guo@intel.com>; Wang,
> Haiyue <haiyue.wang@intel.com>
> Subject: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> 
> The hardware supports many kinds of FlexiMDs set into Rx descriptor, and
> the FlexiMDs can have different offsets in the descriptor according the DDP
> package setting.
> 
> The FlexiMDs type and offset are identified by the RXDID, which will be used
> to setup the queue.
> 
> For expanding to support different RXDIDs in the future, refactor the Rx
> FlexiMD handling by the functions mapped to related RXDIDs.
> 
> Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
> ---
> v3: remove the typedef's ___rte_unused, and rewrite the commit title and
>     message.
> v2: assign the handle for ICE_RXDID_COMMS_OVS directly, not use
>     fall-through.
> ---
>  drivers/net/ice/ice_rxtx.c | 263 ++++++++++++++++++++++---------------
>  drivers/net/ice/ice_rxtx.h |   5 +
>  2 files changed, 163 insertions(+), 105 deletions(-)
> 
> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index
> fef6ad454..93a0ac691 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -25,40 +25,6 @@ uint64_t
> rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
>  uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
>  uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
> 
> -static inline uint64_t
> -ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid) -{
> -	static struct {
> -		uint64_t *ol_flag;
> -		bool chk_valid;
> -	} ol_flag_map[] = {
> -		[ICE_RXDID_COMMS_AUX_VLAN] = {
> -			&rte_net_ice_dynflag_proto_xtr_vlan_mask, true },
> -		[ICE_RXDID_COMMS_AUX_IPV4] = {
> -			&rte_net_ice_dynflag_proto_xtr_ipv4_mask, true },
> -		[ICE_RXDID_COMMS_AUX_IPV6] = {
> -			&rte_net_ice_dynflag_proto_xtr_ipv6_mask, true },
> -		[ICE_RXDID_COMMS_AUX_IPV6_FLOW] = {
> -			&rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask,
> true },
> -		[ICE_RXDID_COMMS_AUX_TCP] = {
> -			&rte_net_ice_dynflag_proto_xtr_tcp_mask, true },
> -		[ICE_RXDID_COMMS_AUX_IP_OFFSET] = {
> -			&rte_net_ice_dynflag_proto_xtr_ip_offset_mask,
> false },
> -	};
> -	uint64_t *ol_flag;
> -
> -	if (rxdid < RTE_DIM(ol_flag_map)) {
> -		ol_flag = ol_flag_map[rxdid].ol_flag;
> -		if (!ol_flag)
> -			return 0ULL;
> -
> -		*chk_valid = ol_flag_map[rxdid].chk_valid;
> -		return *ol_flag;
> -	}
> -
> -	return 0ULL;
> -}
> -
>  static inline uint8_t
>  ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)  { @@ -76,6 +42,159 @@
> ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
>  				rxdid_map[xtr_type] :
> ICE_RXDID_COMMS_OVS;  }
> 
> +static inline void
> +ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue
> *rxq,
> +				   struct rte_mbuf *mb,
> +				   volatile union ice_rx_flex_desc *rxdp) {
> +	volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
> +			(volatile struct ice_32b_rx_flex_desc_comms_ovs
> *)rxdp; #ifndef
> +RTE_LIBRTE_ICE_16BYTE_RX_DESC
> +	uint16_t stat_err;
> +#endif

This #ifndef could be better combine with below #ifndef.

> +
> +	if (desc->flow_id != 0xFFFFFFFF) {
> +		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> +		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> +	}
> +
> +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> +	stat_err = rte_le_to_cpu_16(desc->status_error0);
> +	if (likely(stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> +		mb->ol_flags |= PKT_RX_RSS_HASH;
> +		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> +	}
> +#endif
> +}
> +
> +static inline void
> +ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
> +				      struct rte_mbuf *mb,
> +				      volatile union ice_rx_flex_desc *rxdp) {
> +	volatile struct ice_32b_rx_flex_desc_comms *desc =
> +			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
> +	uint16_t stat_err;
> +
> +	stat_err = rte_le_to_cpu_16(desc->status_error0);
> +	if (likely(stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> +		mb->ol_flags |= PKT_RX_RSS_HASH;
> +		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> +	}
> +
> +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> +	if (desc->flow_id != 0xFFFFFFFF) {
> +		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> +		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> +	}
> +
> +	if (rxq->xtr_ol_flag) {
> +		uint32_t metadata = 0;
> +
> +		stat_err = rte_le_to_cpu_16(desc->status_error1);
> +
> +		if (stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
> +			metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux0);
> +
> +		if (stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> +			metadata |=
> +				rte_le_to_cpu_16(desc->flex_ts.flex.aux1)
> << 16;
> +
> +		if (metadata) {
> +			mb->ol_flags |= rxq->xtr_ol_flag;
> +
> +			*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb)
> = metadata;
> +		}
> +	}
> +#endif
> +}
> +
> +static inline void
> +ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
> +				      struct rte_mbuf *mb,
> +				      volatile union ice_rx_flex_desc *rxdp) {
> +	volatile struct ice_32b_rx_flex_desc_comms *desc =
> +			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
> +	uint16_t stat_err;
> +
> +	stat_err = rte_le_to_cpu_16(desc->status_error0);
> +	if (likely(stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> +		mb->ol_flags |= PKT_RX_RSS_HASH;
> +		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> +	}
> +
> +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> +	if (desc->flow_id != 0xFFFFFFFF) {
> +		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> +		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> +	}
> +
> +	if (rxq->xtr_ol_flag) {
> +		uint32_t metadata = 0;
> +
> +		if (desc->flex_ts.flex.aux0 != 0xFFFF)
> +			metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux0);
> +		else if (desc->flex_ts.flex.aux1 != 0xFFFF)
> +			metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux1);

So you mean the ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S and ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S could not use to identify the IPv4 hdr offset and IPv6 hdr offset here in rxdid # 25?
And if yes they can and they will not set at the same time, is that separate this v2 from v1 necessary? 

> +
> +		if (metadata) {
> +			mb->ol_flags |= rxq->xtr_ol_flag;
> +
> +			*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb)
> = metadata;
> +		}
> +	}
> +#endif
> +}
> +
> +static void
> +ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t
> +rxdid) {
> +	switch (rxdid) {
> +	case ICE_RXDID_COMMS_AUX_VLAN:
> +		rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_vlan_mask;
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> +		break;
> +
> +	case ICE_RXDID_COMMS_AUX_IPV4:
> +		rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_ipv4_mask;
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> +		break;
> +
> +	case ICE_RXDID_COMMS_AUX_IPV6:
> +		rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_ipv6_mask;
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> +		break;
> +
> +	case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
> +		rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> +		break;
> +
> +	case ICE_RXDID_COMMS_AUX_TCP:
> +		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v1;
> +		break;
> +
> +	case ICE_RXDID_COMMS_AUX_IP_OFFSET:
> +		rxq->xtr_ol_flag =
> rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_aux_v2;
> +		break;
> +
> +	case ICE_RXDID_COMMS_OVS:
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_ovs;
> +		break;
> +
> +	default:
> +		/* update this according to the RXDID for PROTO_XTR_NONE
> */
> +		rxq->rxd_to_pkt_fields =
> ice_rxd_to_pkt_fields_by_comms_ovs;
> +		break;
> +	}
> +
> +	if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
> +		rxq->xtr_ol_flag = 0;
> +}
> +
>  static enum ice_status
>  ice_program_hw_rx_queue(struct ice_rx_queue *rxq)  { @@ -158,6 +277,8
> @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
>  		return -EINVAL;
>  	}
> 
> +	ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
> +
>  	/* Enable Flexible Descriptors in the queue context which
>  	 * allows this driver to select a specific receive descriptor format
>  	 */
> @@ -1338,74 +1459,6 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile
> union ice_rx_flex_desc *rxdp)
>  		   mb->vlan_tci, mb->vlan_tci_outer);  }
> 
> -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> -#define ICE_RX_PROTO_XTR_VALID \
> -	((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
> -	 (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> -
> -static void
> -ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
> -		     volatile struct ice_32b_rx_flex_desc_comms_ovs *desc)
> -{
> -	uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
> -	uint32_t metadata = 0;
> -	uint64_t ol_flag;
> -	bool chk_valid;
> -
> -	ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid);
> -	if (unlikely(!ol_flag))
> -		return;
> -
> -	if (chk_valid) {
> -		if (stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
> -			metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux0);
> -
> -		if (stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> -			metadata |=
> -				rte_le_to_cpu_16(desc->flex_ts.flex.aux1)
> << 16;
> -	} else {
> -		if (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF)
> -			metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux0);
> -		else if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF)
> -			metadata = rte_le_to_cpu_16(desc-
> >flex_ts.flex.aux1);
> -	}
> -
> -	if (!metadata)
> -		return;
> -
> -	mb->ol_flags |= ol_flag;
> -
> -	*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
> -}
> -#endif
> -
> -static inline void
> -ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
> -		      volatile union ice_rx_flex_desc *rxdp)
> -{
> -	volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
> -			(volatile struct ice_32b_rx_flex_desc_comms_ovs
> *)rxdp;
> -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> -	uint16_t stat_err;
> -
> -	stat_err = rte_le_to_cpu_16(desc->status_error0);
> -	if (likely(stat_err & (1 <<
> ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> -		mb->ol_flags |= PKT_RX_RSS_HASH;
> -		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> -	}
> -#endif
> -
> -	if (desc->flow_id != 0xFFFFFFFF) {
> -		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> -		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> -	}
> -
> -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> -	if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
> -		ice_rxd_to_proto_xtr(mb, desc);
> -#endif
> -}
> -
>  #define ICE_LOOK_AHEAD 8
>  #if (ICE_LOOK_AHEAD != 8)
>  #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
> @@ -1463,7 +1516,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
>  			mb->packet_type =
> ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> 
> 	rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
>  			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
> -			ice_rxd_to_pkt_fields(mb, &rxdp[j]);
> +			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
> 
>  			mb->ol_flags |= pkt_flags;
>  		}
> @@ -1760,7 +1813,7 @@ ice_recv_scattered_pkts(void *rx_queue,
>  		first_seg->packet_type =
> ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
>  			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
>  		ice_rxd_to_vlan_tci(first_seg, &rxd);
> -		ice_rxd_to_pkt_fields(first_seg, &rxd);
> +		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
>  		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
>  		first_seg->ol_flags |= pkt_flags;
>  		/* Prefetch data of first segment, if configured to do so. */
> @@ -2160,7 +2213,7 @@ ice_recv_pkts(void *rx_queue,
>  		rxm->packet_type =
> ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
>  			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
>  		ice_rxd_to_vlan_tci(rxm, &rxd);
> -		ice_rxd_to_pkt_fields(rxm, &rxd);
> +		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
>  		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
>  		rxm->ol_flags |= pkt_flags;
>  		/* copy old mbuf to rx_pkts */
> diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index
> 9fa57b3b2..de2291788 100644
> --- a/drivers/net/ice/ice_rxtx.h
> +++ b/drivers/net/ice/ice_rxtx.h
> @@ -42,6 +42,9 @@
> 
>  typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
> typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
> +typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq,
> +					struct rte_mbuf *mb,
> +					volatile union ice_rx_flex_desc
> *rxdp);
> 
>  struct ice_rx_entry {
>  	struct rte_mbuf *mbuf;
> @@ -82,6 +85,8 @@ struct ice_rx_queue {
>  	bool q_set; /* indicate if rx queue has been configured */
>  	bool rx_deferred_start; /* don't start this queue in dev start */
>  	uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
> +	uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
> +	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields;

If create a function pointer here in .h, it is better add some doc.

>  	ice_rx_release_mbufs_t rx_rel_mbufs;
>  };
> 
> --
> 2.28.0
  
Wang, Haiyue Sept. 22, 2020, 5:42 a.m. UTC | #2
Hi Jeff,

> -----Original Message-----
> From: Guo, Jia <jia.guo@intel.com>
> Sent: Tuesday, September 22, 2020 13:35
> To: Wang, Haiyue <haiyue.wang@intel.com>; dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX <junyux.jiang@intel.com>; Rong, Leyi
> <leyi.rong@intel.com>; Yang, Qiming <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>; Guo,
> Junfeng <junfeng.guo@intel.com>
> Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> 
> Hi, haiyue
> 
> > -----Original Message-----
> > From: Wang, Haiyue <haiyue.wang@intel.com>
> > Sent: Friday, September 18, 2020 9:06 AM
> > To: dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang, Qiming
> > <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>; Guo,
> > Junfeng <junfeng.guo@intel.com>; Guo, Jia <jia.guo@intel.com>; Wang,
> > Haiyue <haiyue.wang@intel.com>
> > Subject: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> >
> > The hardware supports many kinds of FlexiMDs set into Rx descriptor, and
> > the FlexiMDs can have different offsets in the descriptor according the DDP
> > package setting.
> >
> > The FlexiMDs type and offset are identified by the RXDID, which will be used
> > to setup the queue.
> >
> > For expanding to support different RXDIDs in the future, refactor the Rx
> > FlexiMD handling by the functions mapped to related RXDIDs.
> >
> > Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
> > ---
> > v3: remove the typedef's ___rte_unused, and rewrite the commit title and
> >     message.
> > v2: assign the handle for ICE_RXDID_COMMS_OVS directly, not use
> >     fall-through.
> > ---
> >  drivers/net/ice/ice_rxtx.c | 263 ++++++++++++++++++++++---------------
> >  drivers/net/ice/ice_rxtx.h |   5 +
> >  2 files changed, 163 insertions(+), 105 deletions(-)
> >
> > diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index
> > fef6ad454..93a0ac691 100644
> > --- a/drivers/net/ice/ice_rxtx.c
> > +++ b/drivers/net/ice/ice_rxtx.c
> > @@ -25,40 +25,6 @@ uint64_t
> > rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
> >  uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
> >  uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
> >
> > -static inline uint64_t
> > -ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid) -{
> > -static struct {
> > -uint64_t *ol_flag;
> > -bool chk_valid;
> > -} ol_flag_map[] = {
> > -[ICE_RXDID_COMMS_AUX_VLAN] = {
> > -&rte_net_ice_dynflag_proto_xtr_vlan_mask, true },
> > -[ICE_RXDID_COMMS_AUX_IPV4] = {
> > -&rte_net_ice_dynflag_proto_xtr_ipv4_mask, true },
> > -[ICE_RXDID_COMMS_AUX_IPV6] = {
> > -&rte_net_ice_dynflag_proto_xtr_ipv6_mask, true },
> > -[ICE_RXDID_COMMS_AUX_IPV6_FLOW] = {
> > -&rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask,
> > true },
> > -[ICE_RXDID_COMMS_AUX_TCP] = {
> > -&rte_net_ice_dynflag_proto_xtr_tcp_mask, true },
> > -[ICE_RXDID_COMMS_AUX_IP_OFFSET] = {
> > -&rte_net_ice_dynflag_proto_xtr_ip_offset_mask,
> > false },
> > -};
> > -uint64_t *ol_flag;
> > -
> > -if (rxdid < RTE_DIM(ol_flag_map)) {
> > -ol_flag = ol_flag_map[rxdid].ol_flag;
> > -if (!ol_flag)
> > -return 0ULL;
> > -
> > -*chk_valid = ol_flag_map[rxdid].chk_valid;
> > -return *ol_flag;
> > -}
> > -
> > -return 0ULL;
> > -}
> > -
> >  static inline uint8_t
> >  ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)  { @@ -76,6 +42,159 @@
> > ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
> >  rxdid_map[xtr_type] :
> > ICE_RXDID_COMMS_OVS;  }
> >
> > +static inline void
> > +ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue
> > *rxq,
> > +   struct rte_mbuf *mb,
> > +   volatile union ice_rx_flex_desc *rxdp) {
> > +volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
> > +(volatile struct ice_32b_rx_flex_desc_comms_ovs
> > *)rxdp; #ifndef
> > +RTE_LIBRTE_ICE_16BYTE_RX_DESC
> > +uint16_t stat_err;
> > +#endif
> 
> This #ifndef could be better combine with below #ifndef.
> 

I changed it to according to the different offsets, like ovs has rss hash
in Qword 3, which is after flow Id Qword 1, others are opposite. So that
this handling order can reflect the offset difference, although, it MAY looks
not so beautiful. What do you think ? :)

> > +
> > +if (desc->flow_id != 0xFFFFFFFF) {
> > +mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> > +mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> > +}
> > +
> > +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> > +stat_err = rte_le_to_cpu_16(desc->status_error0);
> > +if (likely(stat_err & (1 <<
> > ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> > +mb->ol_flags |= PKT_RX_RSS_HASH;
> > +mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> > +}
> > +#endif
> > +}
> > +
> > +static inline void
> > +ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
> > +      struct rte_mbuf *mb,
> > +      volatile union ice_rx_flex_desc *rxdp) {
> > +volatile struct ice_32b_rx_flex_desc_comms *desc =
> > +(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
> > +uint16_t stat_err;
> > +
> > +stat_err = rte_le_to_cpu_16(desc->status_error0);
> > +if (likely(stat_err & (1 <<
> > ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> > +mb->ol_flags |= PKT_RX_RSS_HASH;
> > +mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> > +}
> > +
> > +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> > +if (desc->flow_id != 0xFFFFFFFF) {
> > +mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> > +mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> > +}
> > +
> > +if (rxq->xtr_ol_flag) {
> > +uint32_t metadata = 0;
> > +
> > +stat_err = rte_le_to_cpu_16(desc->status_error1);
> > +
> > +if (stat_err & (1 <<
> > ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
> > +metadata = rte_le_to_cpu_16(desc-
> > >flex_ts.flex.aux0);
> > +
> > +if (stat_err & (1 <<
> > ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> > +metadata |=
> > +rte_le_to_cpu_16(desc->flex_ts.flex.aux1)
> > << 16;
> > +
> > +if (metadata) {
> > +mb->ol_flags |= rxq->xtr_ol_flag;
> > +
> > +*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb)
> > = metadata;
> > +}
> > +}
> > +#endif
> > +}
> > +
> > +static inline void
> > +ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
> > +      struct rte_mbuf *mb,
> > +      volatile union ice_rx_flex_desc *rxdp) {
> > +volatile struct ice_32b_rx_flex_desc_comms *desc =
> > +(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
> > +uint16_t stat_err;
> > +
> > +stat_err = rte_le_to_cpu_16(desc->status_error0);
> > +if (likely(stat_err & (1 <<
> > ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> > +mb->ol_flags |= PKT_RX_RSS_HASH;
> > +mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> > +}
> > +
> > +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> > +if (desc->flow_id != 0xFFFFFFFF) {
> > +mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> > +mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> > +}
> > +
> > +if (rxq->xtr_ol_flag) {
> > +uint32_t metadata = 0;
> > +
> > +if (desc->flex_ts.flex.aux0 != 0xFFFF)
> > +metadata = rte_le_to_cpu_16(desc-
> > >flex_ts.flex.aux0);
> > +else if (desc->flex_ts.flex.aux1 != 0xFFFF)
> > +metadata = rte_le_to_cpu_16(desc-
> > >flex_ts.flex.aux1);
> 
> So you mean the ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S and ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S
> could not use to identify the IPv4 hdr offset and IPv6 hdr offset here in rxdid # 25?
> And if yes they can and they will not set at the same time, is that separate this v2 from v1 necessary?
> 
> > +
> > +if (metadata) {
> > +mb->ol_flags |= rxq->xtr_ol_flag;
> > +
> > +*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb)
> > = metadata;
> > +}
> > +}
> > +#endif
> > +}
> > +
> > +static void
> > +ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t
> > +rxdid) {
> > +switch (rxdid) {
> > +case ICE_RXDID_COMMS_AUX_VLAN:
> > +rxq->xtr_ol_flag =
> > rte_net_ice_dynflag_proto_xtr_vlan_mask;
> > +rxq->rxd_to_pkt_fields =
> > ice_rxd_to_pkt_fields_by_comms_aux_v1;
> > +break;
> > +
> > +case ICE_RXDID_COMMS_AUX_IPV4:
> > +rxq->xtr_ol_flag =
> > rte_net_ice_dynflag_proto_xtr_ipv4_mask;
> > +rxq->rxd_to_pkt_fields =
> > ice_rxd_to_pkt_fields_by_comms_aux_v1;
> > +break;
> > +
> > +case ICE_RXDID_COMMS_AUX_IPV6:
> > +rxq->xtr_ol_flag =
> > rte_net_ice_dynflag_proto_xtr_ipv6_mask;
> > +rxq->rxd_to_pkt_fields =
> > ice_rxd_to_pkt_fields_by_comms_aux_v1;
> > +break;
> > +
> > +case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
> > +rxq->xtr_ol_flag =
> > rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
> > +rxq->rxd_to_pkt_fields =
> > ice_rxd_to_pkt_fields_by_comms_aux_v1;
> > +break;
> > +
> > +case ICE_RXDID_COMMS_AUX_TCP:
> > +rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
> > +rxq->rxd_to_pkt_fields =
> > ice_rxd_to_pkt_fields_by_comms_aux_v1;
> > +break;
> > +
> > +case ICE_RXDID_COMMS_AUX_IP_OFFSET:
> > +rxq->xtr_ol_flag =
> > rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
> > +rxq->rxd_to_pkt_fields =
> > ice_rxd_to_pkt_fields_by_comms_aux_v2;
> > +break;
> > +
> > +case ICE_RXDID_COMMS_OVS:
> > +rxq->rxd_to_pkt_fields =
> > ice_rxd_to_pkt_fields_by_comms_ovs;
> > +break;
> > +
> > +default:
> > +/* update this according to the RXDID for PROTO_XTR_NONE
> > */
> > +rxq->rxd_to_pkt_fields =
> > ice_rxd_to_pkt_fields_by_comms_ovs;
> > +break;
> > +}
> > +
> > +if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
> > +rxq->xtr_ol_flag = 0;
> > +}
> > +
> >  static enum ice_status
> >  ice_program_hw_rx_queue(struct ice_rx_queue *rxq)  { @@ -158,6 +277,8
> > @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
> >  return -EINVAL;
> >  }
> >
> > +ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
> > +
> >  /* Enable Flexible Descriptors in the queue context which
> >   * allows this driver to select a specific receive descriptor format
> >   */
> > @@ -1338,74 +1459,6 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile
> > union ice_rx_flex_desc *rxdp)
> >     mb->vlan_tci, mb->vlan_tci_outer);  }
> >
> > -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> > -#define ICE_RX_PROTO_XTR_VALID \
> > -((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
> > - (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> > -
> > -static void
> > -ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
> > -     volatile struct ice_32b_rx_flex_desc_comms_ovs *desc)
> > -{
> > -uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
> > -uint32_t metadata = 0;
> > -uint64_t ol_flag;
> > -bool chk_valid;
> > -
> > -ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid);
> > -if (unlikely(!ol_flag))
> > -return;
> > -
> > -if (chk_valid) {
> > -if (stat_err & (1 <<
> > ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
> > -metadata = rte_le_to_cpu_16(desc-
> > >flex_ts.flex.aux0);
> > -
> > -if (stat_err & (1 <<
> > ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> > -metadata |=
> > -rte_le_to_cpu_16(desc->flex_ts.flex.aux1)
> > << 16;
> > -} else {
> > -if (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF)
> > -metadata = rte_le_to_cpu_16(desc-
> > >flex_ts.flex.aux0);
> > -else if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF)
> > -metadata = rte_le_to_cpu_16(desc-
> > >flex_ts.flex.aux1);
> > -}
> > -
> > -if (!metadata)
> > -return;
> > -
> > -mb->ol_flags |= ol_flag;
> > -
> > -*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
> > -}
> > -#endif
> > -
> > -static inline void
> > -ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
> > -      volatile union ice_rx_flex_desc *rxdp)
> > -{
> > -volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
> > -(volatile struct ice_32b_rx_flex_desc_comms_ovs
> > *)rxdp;
> > -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> > -uint16_t stat_err;
> > -
> > -stat_err = rte_le_to_cpu_16(desc->status_error0);
> > -if (likely(stat_err & (1 <<
> > ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> > -mb->ol_flags |= PKT_RX_RSS_HASH;
> > -mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> > -}
> > -#endif
> > -
> > -if (desc->flow_id != 0xFFFFFFFF) {
> > -mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> > -mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> > -}
> > -
> > -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> > -if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
> > -ice_rxd_to_proto_xtr(mb, desc);
> > -#endif
> > -}
> > -
> >  #define ICE_LOOK_AHEAD 8
> >  #if (ICE_LOOK_AHEAD != 8)
> >  #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
> > @@ -1463,7 +1516,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
> >  mb->packet_type =
> > ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> >
> > rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
> >  ice_rxd_to_vlan_tci(mb, &rxdp[j]);
> > -ice_rxd_to_pkt_fields(mb, &rxdp[j]);
> > +rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
> >
> >  mb->ol_flags |= pkt_flags;
> >  }
> > @@ -1760,7 +1813,7 @@ ice_recv_scattered_pkts(void *rx_queue,
> >  first_seg->packet_type =
> > ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> >  rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> >  ice_rxd_to_vlan_tci(first_seg, &rxd);
> > -ice_rxd_to_pkt_fields(first_seg, &rxd);
> > +rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
> >  pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
> >  first_seg->ol_flags |= pkt_flags;
> >  /* Prefetch data of first segment, if configured to do so. */
> > @@ -2160,7 +2213,7 @@ ice_recv_pkts(void *rx_queue,
> >  rxm->packet_type =
> > ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> >  rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> >  ice_rxd_to_vlan_tci(rxm, &rxd);
> > -ice_rxd_to_pkt_fields(rxm, &rxd);
> > +rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
> >  pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
> >  rxm->ol_flags |= pkt_flags;
> >  /* copy old mbuf to rx_pkts */
> > diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index
> > 9fa57b3b2..de2291788 100644
> > --- a/drivers/net/ice/ice_rxtx.h
> > +++ b/drivers/net/ice/ice_rxtx.h
> > @@ -42,6 +42,9 @@
> >
> >  typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
> > typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
> > +typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq,
> > +struct rte_mbuf *mb,
> > +volatile union ice_rx_flex_desc
> > *rxdp);
> >
> >  struct ice_rx_entry {
> >  struct rte_mbuf *mbuf;
> > @@ -82,6 +85,8 @@ struct ice_rx_queue {
> >  bool q_set; /* indicate if rx queue has been configured */
> >  bool rx_deferred_start; /* don't start this queue in dev start */
> >  uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
> > +uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
> > +ice_rxd_to_pkt_fields_t rxd_to_pkt_fields;
> 
> If create a function pointer here in .h, it is better add some doc.
> 
> >  ice_rx_release_mbufs_t rx_rel_mbufs;
> >  };
> >
> > --
> > 2.28.0
>
  
Guo, Jia Sept. 22, 2020, 5:50 a.m. UTC | #3
> -----Original Message-----
> From: Wang, Haiyue <haiyue.wang@intel.com>
> Sent: Tuesday, September 22, 2020 1:42 PM
> To: Guo, Jia <jia.guo@intel.com>; dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>; Guo,
> Junfeng <junfeng.guo@intel.com>
> Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> 
> Hi Jeff,
> 
> > -----Original Message-----
> > From: Guo, Jia <jia.guo@intel.com>
> > Sent: Tuesday, September 22, 2020 13:35
> > To: Wang, Haiyue <haiyue.wang@intel.com>; dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang,
> > Qiming <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>;
> > Guo, Junfeng <junfeng.guo@intel.com>
> > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> >
> > Hi, haiyue
> >
> > > -----Original Message-----
> > > From: Wang, Haiyue <haiyue.wang@intel.com>
> > > Sent: Friday, September 18, 2020 9:06 AM
> > > To: dev@dpdk.org
> > > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang,
> > > Qiming <qiming.yang@intel.com>; Sun, GuinanX
> > > <guinanx.sun@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>; Guo,
> > > Jia <jia.guo@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>
> > > Subject: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> > >
> > > The hardware supports many kinds of FlexiMDs set into Rx descriptor,
> > > and the FlexiMDs can have different offsets in the descriptor
> > > according the DDP package setting.
> > >
> > > The FlexiMDs type and offset are identified by the RXDID, which will
> > > be used to setup the queue.
> > >
> > > For expanding to support different RXDIDs in the future, refactor
> > > the Rx FlexiMD handling by the functions mapped to related RXDIDs.
> > >
> > > Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
> > > ---
> > > v3: remove the typedef's ___rte_unused, and rewrite the commit title
> and
> > >     message.
> > > v2: assign the handle for ICE_RXDID_COMMS_OVS directly, not use
> > >     fall-through.
> > > ---
> > >  drivers/net/ice/ice_rxtx.c | 263 ++++++++++++++++++++++---------------
> > >  drivers/net/ice/ice_rxtx.h |   5 +
> > >  2 files changed, 163 insertions(+), 105 deletions(-)
> > >
> > > diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
> > > index
> > > fef6ad454..93a0ac691 100644
> > > --- a/drivers/net/ice/ice_rxtx.c
> > > +++ b/drivers/net/ice/ice_rxtx.c
> > > @@ -25,40 +25,6 @@ uint64_t
> > > rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
> > >  uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
> > >  uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
> > >
> > > -static inline uint64_t
> > > -ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid) -{
> > > -static struct { -uint64_t *ol_flag; -bool chk_valid; -}
> > > ol_flag_map[] = { -[ICE_RXDID_COMMS_AUX_VLAN] = {
> > > -&rte_net_ice_dynflag_proto_xtr_vlan_mask, true },
> > > -[ICE_RXDID_COMMS_AUX_IPV4] = {
> > > -&rte_net_ice_dynflag_proto_xtr_ipv4_mask, true },
> > > -[ICE_RXDID_COMMS_AUX_IPV6] = {
> > > -&rte_net_ice_dynflag_proto_xtr_ipv6_mask, true },
> > > -[ICE_RXDID_COMMS_AUX_IPV6_FLOW] = {
> > > -&rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask,
> > > true },
> > > -[ICE_RXDID_COMMS_AUX_TCP] = {
> > > -&rte_net_ice_dynflag_proto_xtr_tcp_mask, true },
> > > -[ICE_RXDID_COMMS_AUX_IP_OFFSET] = {
> > > -&rte_net_ice_dynflag_proto_xtr_ip_offset_mask,
> > > false },
> > > -};
> > > -uint64_t *ol_flag;
> > > -
> > > -if (rxdid < RTE_DIM(ol_flag_map)) { -ol_flag =
> > > ol_flag_map[rxdid].ol_flag; -if (!ol_flag) -return 0ULL;
> > > -
> > > -*chk_valid = ol_flag_map[rxdid].chk_valid; -return *ol_flag; -}
> > > -
> > > -return 0ULL;
> > > -}
> > > -
> > >  static inline uint8_t
> > >  ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)  { @@ -76,6 +42,159
> > > @@ ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
> > > rxdid_map[xtr_type] :
> > > ICE_RXDID_COMMS_OVS;  }
> > >
> > > +static inline void
> > > +ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct
> ice_rx_queue
> > > *rxq,
> > > +   struct rte_mbuf *mb,
> > > +   volatile union ice_rx_flex_desc *rxdp) { volatile struct
> > > +ice_32b_rx_flex_desc_comms_ovs *desc = (volatile struct
> > > +ice_32b_rx_flex_desc_comms_ovs
> > > *)rxdp; #ifndef
> > > +RTE_LIBRTE_ICE_16BYTE_RX_DESC
> > > +uint16_t stat_err;
> > > +#endif
> >
> > This #ifndef could be better combine with below #ifndef.
> >
> 
> I changed it to according to the different offsets, like ovs has rss hash in
> Qword 3, which is after flow Id Qword 1, others are opposite. So that this
> handling order can reflect the offset difference, although, it MAY looks not
> so beautiful. What do you think ? :)
> 

I am not sure I got you point about the order reason, but I think in or out #ifndef should be clear show the offset difference, I am insist about that but if other also agree. And I still have 2 comments assume you are default agree and could I expect a new version or not?

> > > +
> > > +if (desc->flow_id != 0xFFFFFFFF) {
> > > +mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; hash.fdir.hi =
> > > +mb->rte_le_to_cpu_32(desc->flow_id);
> > > +}
> > > +
> > > +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC stat_err =
> > > +rte_le_to_cpu_16(desc->status_error0);
> > > +if (likely(stat_err & (1 <<
> > > ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> > > +mb->ol_flags |= PKT_RX_RSS_HASH;
> > > +mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> > > +}
> > > +#endif
> > > +}
> > > +
> > > +static inline void
> > > +ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
> > > +      struct rte_mbuf *mb,
> > > +      volatile union ice_rx_flex_desc *rxdp) { volatile struct
> > > +ice_32b_rx_flex_desc_comms *desc = (volatile struct
> > > +ice_32b_rx_flex_desc_comms *)rxdp; uint16_t stat_err;
> > > +
> > > +stat_err = rte_le_to_cpu_16(desc->status_error0);
> > > +if (likely(stat_err & (1 <<
> > > ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> > > +mb->ol_flags |= PKT_RX_RSS_HASH;
> > > +mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> > > +}
> > > +
> > > +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC if (desc->flow_id !=
> > > +0xFFFFFFFF) {
> > > +mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; hash.fdir.hi =
> > > +mb->rte_le_to_cpu_32(desc->flow_id);
> > > +}
> > > +
> > > +if (rxq->xtr_ol_flag) {
> > > +uint32_t metadata = 0;
> > > +
> > > +stat_err = rte_le_to_cpu_16(desc->status_error1);
> > > +
> > > +if (stat_err & (1 <<
> > > ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
> > > +metadata = rte_le_to_cpu_16(desc-
> > > >flex_ts.flex.aux0);
> > > +
> > > +if (stat_err & (1 <<
> > > ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> > > +metadata |=
> > > +rte_le_to_cpu_16(desc->flex_ts.flex.aux1)
> > > << 16;
> > > +
> > > +if (metadata) {
> > > +mb->ol_flags |= rxq->xtr_ol_flag;
> > > +
> > > +*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb)
> > > = metadata;
> > > +}
> > > +}
> > > +#endif
> > > +}
> > > +
> > > +static inline void
> > > +ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
> > > +      struct rte_mbuf *mb,
> > > +      volatile union ice_rx_flex_desc *rxdp) { volatile struct
> > > +ice_32b_rx_flex_desc_comms *desc = (volatile struct
> > > +ice_32b_rx_flex_desc_comms *)rxdp; uint16_t stat_err;
> > > +
> > > +stat_err = rte_le_to_cpu_16(desc->status_error0);
> > > +if (likely(stat_err & (1 <<
> > > ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> > > +mb->ol_flags |= PKT_RX_RSS_HASH;
> > > +mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> > > +}
> > > +
> > > +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC if (desc->flow_id !=
> > > +0xFFFFFFFF) {
> > > +mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; hash.fdir.hi =
> > > +mb->rte_le_to_cpu_32(desc->flow_id);
> > > +}
> > > +
> > > +if (rxq->xtr_ol_flag) {
> > > +uint32_t metadata = 0;
> > > +
> > > +if (desc->flex_ts.flex.aux0 != 0xFFFF) metadata =
> > > +rte_le_to_cpu_16(desc-
> > > >flex_ts.flex.aux0);
> > > +else if (desc->flex_ts.flex.aux1 != 0xFFFF) metadata =
> > > +rte_le_to_cpu_16(desc-
> > > >flex_ts.flex.aux1);
> >
> > So you mean the ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S and
> > ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S
> > could not use to identify the IPv4 hdr offset and IPv6 hdr offset here in
> rxdid # 25?
> > And if yes they can and they will not set at the same time, is that separate
> this v2 from v1 necessary?
> >
> > > +
> > > +if (metadata) {
> > > +mb->ol_flags |= rxq->xtr_ol_flag;
> > > +
> > > +*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb)
> > > = metadata;
> > > +}
> > > +}
> > > +#endif
> > > +}
> > > +
> > > +static void
> > > +ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq,
> > > +uint32_t
> > > +rxdid) {
> > > +switch (rxdid) {
> > > +case ICE_RXDID_COMMS_AUX_VLAN:
> > > +rxq->xtr_ol_flag =
> > > rte_net_ice_dynflag_proto_xtr_vlan_mask;
> > > +rxq->rxd_to_pkt_fields =
> > > ice_rxd_to_pkt_fields_by_comms_aux_v1;
> > > +break;
> > > +
> > > +case ICE_RXDID_COMMS_AUX_IPV4:
> > > +rxq->xtr_ol_flag =
> > > rte_net_ice_dynflag_proto_xtr_ipv4_mask;
> > > +rxq->rxd_to_pkt_fields =
> > > ice_rxd_to_pkt_fields_by_comms_aux_v1;
> > > +break;
> > > +
> > > +case ICE_RXDID_COMMS_AUX_IPV6:
> > > +rxq->xtr_ol_flag =
> > > rte_net_ice_dynflag_proto_xtr_ipv6_mask;
> > > +rxq->rxd_to_pkt_fields =
> > > ice_rxd_to_pkt_fields_by_comms_aux_v1;
> > > +break;
> > > +
> > > +case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
> > > +rxq->xtr_ol_flag =
> > > rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
> > > +rxq->rxd_to_pkt_fields =
> > > ice_rxd_to_pkt_fields_by_comms_aux_v1;
> > > +break;
> > > +
> > > +case ICE_RXDID_COMMS_AUX_TCP:
> > > +rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
> > > +rxq->rxd_to_pkt_fields =
> > > ice_rxd_to_pkt_fields_by_comms_aux_v1;
> > > +break;
> > > +
> > > +case ICE_RXDID_COMMS_AUX_IP_OFFSET:
> > > +rxq->xtr_ol_flag =
> > > rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
> > > +rxq->rxd_to_pkt_fields =
> > > ice_rxd_to_pkt_fields_by_comms_aux_v2;
> > > +break;
> > > +
> > > +case ICE_RXDID_COMMS_OVS:
> > > +rxq->rxd_to_pkt_fields =
> > > ice_rxd_to_pkt_fields_by_comms_ovs;
> > > +break;
> > > +
> > > +default:
> > > +/* update this according to the RXDID for PROTO_XTR_NONE
> > > */
> > > +rxq->rxd_to_pkt_fields =
> > > ice_rxd_to_pkt_fields_by_comms_ovs;
> > > +break;
> > > +}
> > > +
> > > +if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
> > > +rxq->xtr_ol_flag = 0;
> > > +}
> > > +
> > >  static enum ice_status
> > >  ice_program_hw_rx_queue(struct ice_rx_queue *rxq)  { @@ -158,6
> > > +277,8 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
> return
> > > -EINVAL;  }
> > >
> > > +ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
> > > +
> > >  /* Enable Flexible Descriptors in the queue context which
> > >   * allows this driver to select a specific receive descriptor format
> > >   */
> > > @@ -1338,74 +1459,6 @@ ice_rxd_to_vlan_tci(struct rte_mbuf *mb,
> > > volatile union ice_rx_flex_desc *rxdp)
> > >     mb->vlan_tci, mb->vlan_tci_outer);  }
> > >
> > > -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC -#define
> > > ICE_RX_PROTO_XTR_VALID \
> > > -((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
> > > - (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> > > -
> > > -static void
> > > -ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
> > > -     volatile struct ice_32b_rx_flex_desc_comms_ovs *desc)
> > > -{
> > > -uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
> > > -uint32_t metadata = 0;
> > > -uint64_t ol_flag;
> > > -bool chk_valid;
> > > -
> > > -ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid);
> > > -if (unlikely(!ol_flag)) -return;
> > > -
> > > -if (chk_valid) {
> > > -if (stat_err & (1 <<
> > > ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
> > > -metadata = rte_le_to_cpu_16(desc-
> > > >flex_ts.flex.aux0);
> > > -
> > > -if (stat_err & (1 <<
> > > ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
> > > -metadata |=
> > > -rte_le_to_cpu_16(desc->flex_ts.flex.aux1)
> > > << 16;
> > > -} else {
> > > -if (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF) -metadata
> > > = rte_le_to_cpu_16(desc-
> > > >flex_ts.flex.aux0);
> > > -else if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF)
> > > -metadata = rte_le_to_cpu_16(desc-
> > > >flex_ts.flex.aux1);
> > > -}
> > > -
> > > -if (!metadata)
> > > -return;
> > > -
> > > -mb->ol_flags |= ol_flag;
> > > -
> > > -*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata; -} -
> #endif
> > > -
> > > -static inline void
> > > -ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
> > > -      volatile union ice_rx_flex_desc *rxdp)
> > > -{
> > > -volatile struct ice_32b_rx_flex_desc_comms_ovs *desc = -(volatile
> > > struct ice_32b_rx_flex_desc_comms_ovs *)rxdp; -#ifndef
> > > RTE_LIBRTE_ICE_16BYTE_RX_DESC -uint16_t stat_err;
> > > -
> > > -stat_err = rte_le_to_cpu_16(desc->status_error0);
> > > -if (likely(stat_err & (1 <<
> > > ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> > > -mb->ol_flags |= PKT_RX_RSS_HASH;
> > > -mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> > > -}
> > > -#endif
> > > -
> > > -if (desc->flow_id != 0xFFFFFFFF) {
> > > -mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; hash.fdir.hi =
> > > -mb->rte_le_to_cpu_32(desc->flow_id);
> > > -}
> > > -
> > > -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC -if
> > > (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
> > > -ice_rxd_to_proto_xtr(mb, desc);
> > > -#endif
> > > -}
> > > -
> > >  #define ICE_LOOK_AHEAD 8
> > >  #if (ICE_LOOK_AHEAD != 8)
> > >  #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
> > > @@ -1463,7 +1516,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
> > > mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> > >
> > > rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
> > >  ice_rxd_to_vlan_tci(mb, &rxdp[j]);
> > > -ice_rxd_to_pkt_fields(mb, &rxdp[j]);
> > > +rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
> > >
> > >  mb->ol_flags |= pkt_flags;
> > >  }
> > > @@ -1760,7 +1813,7 @@ ice_recv_scattered_pkts(void *rx_queue,
> > > first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> > > rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> > >  ice_rxd_to_vlan_tci(first_seg, &rxd);
> > > -ice_rxd_to_pkt_fields(first_seg, &rxd);
> > > +rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
> > >  pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
> > >  first_seg->ol_flags |= pkt_flags;
> > >  /* Prefetch data of first segment, if configured to do so. */ @@
> > > -2160,7 +2213,7 @@ ice_recv_pkts(void *rx_queue,  rxm->packet_type =
> > > ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> > > rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> > >  ice_rxd_to_vlan_tci(rxm, &rxd);
> > > -ice_rxd_to_pkt_fields(rxm, &rxd);
> > > +rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
> > >  pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
> > >  rxm->ol_flags |= pkt_flags;
> > >  /* copy old mbuf to rx_pkts */
> > > diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
> > > index
> > > 9fa57b3b2..de2291788 100644
> > > --- a/drivers/net/ice/ice_rxtx.h
> > > +++ b/drivers/net/ice/ice_rxtx.h
> > > @@ -42,6 +42,9 @@
> > >
> > >  typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
> > > typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
> > > +typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq,
> > > +struct rte_mbuf *mb, volatile union ice_rx_flex_desc
> > > *rxdp);
> > >
> > >  struct ice_rx_entry {
> > >  struct rte_mbuf *mbuf;
> > > @@ -82,6 +85,8 @@ struct ice_rx_queue {
> > >  bool q_set; /* indicate if rx queue has been configured */
> > >  bool rx_deferred_start; /* don't start this queue in dev start */
> > >  uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
> > > +uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
> > > +ice_rxd_to_pkt_fields_t rxd_to_pkt_fields;
> >
> > If create a function pointer here in .h, it is better add some doc.
> >
> > >  ice_rx_release_mbufs_t rx_rel_mbufs;
> > >  };
> > >
> > > --
> > > 2.28.0
> >
>
  
Wang, Haiyue Sept. 22, 2020, 6:09 a.m. UTC | #4
> -----Original Message-----
> From: Guo, Jia <jia.guo@intel.com>
> Sent: Tuesday, September 22, 2020 13:51
> To: Wang, Haiyue <haiyue.wang@intel.com>; dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX <junyux.jiang@intel.com>; Rong, Leyi
> <leyi.rong@intel.com>; Yang, Qiming <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>; Guo,
> Junfeng <junfeng.guo@intel.com>
> Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> 
> 
> > -----Original Message-----
> > From: Wang, Haiyue <haiyue.wang@intel.com>
> > Sent: Tuesday, September 22, 2020 1:42 PM
> > To: Guo, Jia <jia.guo@intel.com>; dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang, Qiming
> > <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>; Guo,
> > Junfeng <junfeng.guo@intel.com>
> > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> >
> > Hi Jeff,
> >
> > > -----Original Message-----
> > > From: Guo, Jia <jia.guo@intel.com>
> > > Sent: Tuesday, September 22, 2020 13:35
> > > To: Wang, Haiyue <haiyue.wang@intel.com>; dev@dpdk.org
> > > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang,
> > > Qiming <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>;
> > > Guo, Junfeng <junfeng.guo@intel.com>
> > > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> > >
> > > Hi, haiyue
> > >

> > > > +   struct rte_mbuf *mb,
> > > > +   volatile union ice_rx_flex_desc *rxdp) { volatile struct
> > > > +ice_32b_rx_flex_desc_comms_ovs *desc = (volatile struct
> > > > +ice_32b_rx_flex_desc_comms_ovs
> > > > *)rxdp; #ifndef
> > > > +RTE_LIBRTE_ICE_16BYTE_RX_DESC
> > > > +uint16_t stat_err;
> > > > +#endif
> > >
> > > This #ifndef could be better combine with below #ifndef.
> > >
> >
> > I changed it to according to the different offsets, like ovs has rss hash in
> > Qword 3, which is after flow Id Qword 1, others are opposite. So that this
> > handling order can reflect the offset difference, although, it MAY looks not
> > so beautiful. What do you think ? :)
> >
> 
> I am not sure I got you point about the order reason, but I think in or out #ifndef should be clear
> show the offset difference, 

You mean below ? If so, 'uint16_t stat_err;' is not so good in the middle of two code blocks.

#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
	uint16_t stat_err;

	stat_err = rte_le_to_cpu_16(desc->status_error0);
	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
		mb->ol_flags |= PKT_RX_RSS_HASH;
		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
	}
#endif


I am insist about that but if other also agree. And I still have 2
> comments assume you are default agree and could I expect a new version or not?
> 

Oh, I missed you other two comments. Your reply is embedded into my original many
code lines. You can remove the unnecessary codes, so that people can get your comments
quickly without moving mouse down to check line by line. ;-)


> > > > +if (rxq->xtr_ol_flag) {
> > > > +uint32_t metadata = 0;
> > > > +
> > > > +if (desc->flex_ts.flex.aux0 != 0xFFFF) metadata =
> > > > +rte_le_to_cpu_16(desc-
> > > > >flex_ts.flex.aux0);
> > > > +else if (desc->flex_ts.flex.aux1 != 0xFFFF) metadata =
> > > > +rte_le_to_cpu_16(desc-
> > > > >flex_ts.flex.aux1);
> > >
> > > So you mean the ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S and
> > > ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S
> > > could not use to identify the IPv4 hdr offset and IPv6 hdr offset here in
> > rxdid # 25?
> > > And if yes they can and they will not set at the same time, is that separate
> > this v2 from v1 necessary?

XTRMD4/5_VALID_S can't detect the value of IP offset available, non 0xffffffff
is the right check condition.

> > > > +uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
> > > > +ice_rxd_to_pkt_fields_t rxd_to_pkt_fields;
> > >
> > > If create a function pointer here in .h, it is better add some doc.

C comments: /* .... */ ?

> > >
> > > >  ice_rx_release_mbufs_t rx_rel_mbufs;
> > > >  };
> > > >
> > > > --
> > > > 2.28.0
> > >
> >
>
  
Guo, Jia Sept. 22, 2020, 6:14 a.m. UTC | #5
> -----Original Message-----
> From: Wang, Haiyue <haiyue.wang@intel.com>
> Sent: Tuesday, September 22, 2020 2:09 PM
> To: Guo, Jia <jia.guo@intel.com>; dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>; Guo,
> Junfeng <junfeng.guo@intel.com>
> Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> 
> > -----Original Message-----
> > From: Guo, Jia <jia.guo@intel.com>
> > Sent: Tuesday, September 22, 2020 13:51
> > To: Wang, Haiyue <haiyue.wang@intel.com>; dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang,
> > Qiming <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>;
> > Guo, Junfeng <junfeng.guo@intel.com>
> > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> >
> >
> > > -----Original Message-----
> > > From: Wang, Haiyue <haiyue.wang@intel.com>
> > > Sent: Tuesday, September 22, 2020 1:42 PM
> > > To: Guo, Jia <jia.guo@intel.com>; dev@dpdk.org
> > > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang,
> > > Qiming <qiming.yang@intel.com>; Sun, GuinanX
> > > <guinanx.sun@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
> > > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> > >
> > > Hi Jeff,
> > >
> > > > -----Original Message-----
> > > > From: Guo, Jia <jia.guo@intel.com>
> > > > Sent: Tuesday, September 22, 2020 13:35
> > > > To: Wang, Haiyue <haiyue.wang@intel.com>; dev@dpdk.org
> > > > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > > > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang,
> > > > Qiming <qiming.yang@intel.com>; Sun, GuinanX
> > > > <guinanx.sun@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
> > > > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> > > >
> > > > Hi, haiyue
> > > >
> 
> > > > > +   struct rte_mbuf *mb,
> > > > > +   volatile union ice_rx_flex_desc *rxdp) { volatile struct
> > > > > +ice_32b_rx_flex_desc_comms_ovs *desc = (volatile struct
> > > > > +ice_32b_rx_flex_desc_comms_ovs
> > > > > *)rxdp; #ifndef
> > > > > +RTE_LIBRTE_ICE_16BYTE_RX_DESC
> > > > > +uint16_t stat_err;
> > > > > +#endif
> > > >
> > > > This #ifndef could be better combine with below #ifndef.
> > > >
> > >
> > > I changed it to according to the different offsets, like ovs has rss
> > > hash in Qword 3, which is after flow Id Qword 1, others are
> > > opposite. So that this handling order can reflect the offset
> > > difference, although, it MAY looks not so beautiful. What do you
> > > think ? :)
> > >
> >
> > I am not sure I got you point about the order reason, but I think in
> > or out #ifndef should be clear show the offset difference,
> 
> You mean below ? If so, 'uint16_t stat_err;' is not so good in the middle of
> two code blocks.
> 
> #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> uint16_t stat_err;
> 
> stat_err = rte_le_to_cpu_16(desc->status_error0);
> if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> mb->ol_flags |= PKT_RX_RSS_HASH;
> mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> }
> #endif
> 

Sorry, let me show clear as below

#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
		uint16_t stat_err;

		stat_err = rte_le_to_cpu_16(desc->status_error0);
		if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
			mb->ol_flags |= PKT_RX_RSS_HASH;
			mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
		}
#endif
	
		if (desc->flow_id != 0xFFFFFFFF) {
			mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
			mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
		}

> 
> I am insist about that but if other also agree. And I still have 2
> > comments assume you are default agree and could I expect a new version
> or not?
> >
> 
> Oh, I missed you other two comments. Your reply is embedded into my
> original many code lines. You can remove the unnecessary codes, so that
> people can get your comments quickly without moving mouse down to check
> line by line. ;-)
> 
> 

That is a good idea.

> > > > > +if (rxq->xtr_ol_flag) {
> > > > > +uint32_t metadata = 0;
> > > > > +
> > > > > +if (desc->flex_ts.flex.aux0 != 0xFFFF) metadata =
> > > > > +rte_le_to_cpu_16(desc-
> > > > > >flex_ts.flex.aux0);
> > > > > +else if (desc->flex_ts.flex.aux1 != 0xFFFF) metadata =
> > > > > +rte_le_to_cpu_16(desc-
> > > > > >flex_ts.flex.aux1);
> > > >
> > > > So you mean the ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S and
> > > > ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S
> > > > could not use to identify the IPv4 hdr offset and IPv6 hdr offset
> > > > here in
> > > rxdid # 25?
> > > > And if yes they can and they will not set at the same time, is
> > > > that separate
> > > this v2 from v1 necessary?
> 
> XTRMD4/5_VALID_S can't detect the value of IP offset available, non
> 0xffffffff is the right check condition.
> 

Ok, just clarify me now.

> > > > > +uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
> > > > > +ice_rxd_to_pkt_fields_t rxd_to_pkt_fields;
> > > >
> > > > If create a function pointer here in .h, it is better add some doc.
> 
> C comments: /* .... */ ?
> 

Yes.

> > > >
> > > > >  ice_rx_release_mbufs_t rx_rel_mbufs;  };
> > > > >
> > > > > --
> > > > > 2.28.0
> > > >
> > >
> >
>
  
Wang, Haiyue Sept. 22, 2020, 6:23 a.m. UTC | #6
> -----Original Message-----
> From: Guo, Jia <jia.guo@intel.com>
> Sent: Tuesday, September 22, 2020 14:15
> To: Wang, Haiyue <haiyue.wang@intel.com>; dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX <junyux.jiang@intel.com>; Rong, Leyi
> <leyi.rong@intel.com>; Yang, Qiming <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>; Guo,
> Junfeng <junfeng.guo@intel.com>
> Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> 
> 
> > -----Original Message-----
> > From: Wang, Haiyue <haiyue.wang@intel.com>
> > Sent: Tuesday, September 22, 2020 2:09 PM
> > To: Guo, Jia <jia.guo@intel.com>; dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang, Qiming
> > <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>; Guo,
> > Junfeng <junfeng.guo@intel.com>
> > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> >
> > > -----Original Message-----
> > > From: Guo, Jia <jia.guo@intel.com>
> > > Sent: Tuesday, September 22, 2020 13:51
> > > To: Wang, Haiyue <haiyue.wang@intel.com>; dev@dpdk.org
> > > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang,
> > > Qiming <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>;
> > > Guo, Junfeng <junfeng.guo@intel.com>
> > > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> > >
> > >
> > > > -----Original Message-----
> > > > From: Wang, Haiyue <haiyue.wang@intel.com>
> > > > Sent: Tuesday, September 22, 2020 1:42 PM
> > > > To: Guo, Jia <jia.guo@intel.com>; dev@dpdk.org
> > > > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > > > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang,
> > > > Qiming <qiming.yang@intel.com>; Sun, GuinanX
> > > > <guinanx.sun@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
> > > > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> > > >
> > > > Hi Jeff,
> > > >
> > > > > -----Original Message-----
> > > > > From: Guo, Jia <jia.guo@intel.com>
> > > > > Sent: Tuesday, September 22, 2020 13:35
> > > > > To: Wang, Haiyue <haiyue.wang@intel.com>; dev@dpdk.org
> > > > > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > > > > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang,
> > > > > Qiming <qiming.yang@intel.com>; Sun, GuinanX
> > > > > <guinanx.sun@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
> > > > > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> > > > >
> > > > > Hi, haiyue
> > > > >
> >
> > > > > > +   struct rte_mbuf *mb,
> > > > > > +   volatile union ice_rx_flex_desc *rxdp) { volatile struct
> > > > > > +ice_32b_rx_flex_desc_comms_ovs *desc = (volatile struct
> > > > > > +ice_32b_rx_flex_desc_comms_ovs
> > > > > > *)rxdp; #ifndef
> > > > > > +RTE_LIBRTE_ICE_16BYTE_RX_DESC
> > > > > > +uint16_t stat_err;
> > > > > > +#endif
> > > > >
> > > > > This #ifndef could be better combine with below #ifndef.
> > > > >
> > > >
> > > > I changed it to according to the different offsets, like ovs has rss
> > > > hash in Qword 3, which is after flow Id Qword 1, others are
> > > > opposite. So that this handling order can reflect the offset
> > > > difference, although, it MAY looks not so beautiful. What do you
> > > > think ? :)
> > > >
> > >
> > > I am not sure I got you point about the order reason, but I think in
> > > or out #ifndef should be clear show the offset difference,
> >
> > You mean below ? If so, 'uint16_t stat_err;' is not so good in the middle of
> > two code blocks.
> >
> > #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> > uint16_t stat_err;
> >
> > stat_err = rte_le_to_cpu_16(desc->status_error0);
> > if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> > mb->ol_flags |= PKT_RX_RSS_HASH;
> > mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> > }
> > #endif
> >
> 
> Sorry, let me show clear as below
> 
> #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> uint16_t stat_err;
> 
> stat_err = rte_le_to_cpu_16(desc->status_error0);
> if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> mb->ol_flags |= PKT_RX_RSS_HASH;
> mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> }
> #endif
> 
> if (desc->flow_id != 0xFFFFFFFF) {
> mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
> mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
> }
> 

That's what I said about the order, you can check "ice_rxd_to_pkt_fields_by_comms_ovs"
vs "ice_rxd_to_pkt_fields_by_comms_aux_v1" about "desc->flow_id" and "desc->rss_hash"
handling order: offset first, handle it firstly.

> > > > > > +uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
> > > > > > +ice_rxd_to_pkt_fields_t rxd_to_pkt_fields;
> > > > >
> > > > > If create a function pointer here in .h, it is better add some doc.
> >
> > C comments: /* .... */ ?
> >
> 
> Yes.

OK.

> 
> > > > >
> > > > > >  ice_rx_release_mbufs_t rx_rel_mbufs;  };
> > > > > >
> > > > > > --
> > > > > > 2.28.0
> > > > >
> > > >
> > >
> >
>
  
Guo, Jia Sept. 22, 2020, 6:33 a.m. UTC | #7
> -----Original Message-----
> From: Wang, Haiyue <haiyue.wang@intel.com>
> Sent: Tuesday, September 22, 2020 2:24 PM
> To: Guo, Jia <jia.guo@intel.com>; dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>; Guo,
> Junfeng <junfeng.guo@intel.com>
> Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> 
> > -----Original Message-----
> > From: Guo, Jia <jia.guo@intel.com>
> > Sent: Tuesday, September 22, 2020 14:15
> > To: Wang, Haiyue <haiyue.wang@intel.com>; dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang,
> > Qiming <qiming.yang@intel.com>; Sun, GuinanX <guinanx.sun@intel.com>;
> > Guo, Junfeng <junfeng.guo@intel.com>
> > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> >
> >
> > > -----Original Message-----
> > > From: Wang, Haiyue <haiyue.wang@intel.com>
> > > Sent: Tuesday, September 22, 2020 2:09 PM
> > > To: Guo, Jia <jia.guo@intel.com>; dev@dpdk.org
> > > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang,
> > > Qiming <qiming.yang@intel.com>; Sun, GuinanX
> > > <guinanx.sun@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
> > > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> > >
> > > > -----Original Message-----
> > > > From: Guo, Jia <jia.guo@intel.com>
> > > > Sent: Tuesday, September 22, 2020 13:51
> > > > To: Wang, Haiyue <haiyue.wang@intel.com>; dev@dpdk.org
> > > > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > > > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>; Yang,
> > > > Qiming <qiming.yang@intel.com>; Sun, GuinanX
> > > > <guinanx.sun@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
> > > > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD handling
> > > >
> > > >
> > > > > -----Original Message-----
> > > > > From: Wang, Haiyue <haiyue.wang@intel.com>
> > > > > Sent: Tuesday, September 22, 2020 1:42 PM
> > > > > To: Guo, Jia <jia.guo@intel.com>; dev@dpdk.org
> > > > > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > > > > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>;
> > > > > Yang, Qiming <qiming.yang@intel.com>; Sun, GuinanX
> > > > > <guinanx.sun@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
> > > > > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD
> > > > > handling
> > > > >
> > > > > Hi Jeff,
> > > > >
> > > > > > -----Original Message-----
> > > > > > From: Guo, Jia <jia.guo@intel.com>
> > > > > > Sent: Tuesday, September 22, 2020 13:35
> > > > > > To: Wang, Haiyue <haiyue.wang@intel.com>; dev@dpdk.org
> > > > > > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Jiang, JunyuX
> > > > > > <junyux.jiang@intel.com>; Rong, Leyi <leyi.rong@intel.com>;
> > > > > > Yang, Qiming <qiming.yang@intel.com>; Sun, GuinanX
> > > > > > <guinanx.sun@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
> > > > > > Subject: RE: [PATCH v3] net/ice: refactor the Rx FlexiMD
> > > > > > handling
> > > > > >
> > > > > > Hi, haiyue
> > > > > >
> > >
> > > > > > > +   struct rte_mbuf *mb,
> > > > > > > +   volatile union ice_rx_flex_desc *rxdp) { volatile struct
> > > > > > > +ice_32b_rx_flex_desc_comms_ovs *desc = (volatile struct
> > > > > > > +ice_32b_rx_flex_desc_comms_ovs
> > > > > > > *)rxdp; #ifndef
> > > > > > > +RTE_LIBRTE_ICE_16BYTE_RX_DESC uint16_t stat_err; #endif
> > > > > >
> > > > > > This #ifndef could be better combine with below #ifndef.
> > > > > >
> > > > >
> > > > > I changed it to according to the different offsets, like ovs has
> > > > > rss hash in Qword 3, which is after flow Id Qword 1, others are
> > > > > opposite. So that this handling order can reflect the offset
> > > > > difference, although, it MAY looks not so beautiful. What do you
> > > > > think ? :)
> > > > >
> > > >
> > > > I am not sure I got you point about the order reason, but I think
> > > > in or out #ifndef should be clear show the offset difference,
> > >
> > > You mean below ? If so, 'uint16_t stat_err;' is not so good in the
> > > middle of two code blocks.
> > >
> > > #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC uint16_t stat_err;
> > >
> > > stat_err = rte_le_to_cpu_16(desc->status_error0);
> > > if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S)))
> > > {
> > > mb->ol_flags |= PKT_RX_RSS_HASH;
> > > mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> > > }
> > > #endif
> > >
> >
> > Sorry, let me show clear as below
> >
> > #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> > uint16_t stat_err;
> >
> > stat_err = rte_le_to_cpu_16(desc->status_error0);
> > if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
> > mb->ol_flags |= PKT_RX_RSS_HASH;
> > mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
> > }
> > #endif
> >
> > if (desc->flow_id != 0xFFFFFFFF) {
> > mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; hash.fdir.hi =
> > mb->rte_le_to_cpu_32(desc->flow_id);
> > }
> >
> 
> That's what I said about the order, you can check
> "ice_rxd_to_pkt_fields_by_comms_ovs"
> vs "ice_rxd_to_pkt_fields_by_comms_aux_v1" about "desc->flow_id" and
> "desc->rss_hash"
> handling order: offset first, handle it firstly.
> 

I know your point and you could choose if need more doc to highlight this offset change or not. Anyway, I am not insist object for this order.

> > > > > > > +uint64_t xtr_ol_flag; /* Protocol extraction offload flag
> > > > > > > +*/ ice_rxd_to_pkt_fields_t rxd_to_pkt_fields;
> > > > > >
> > > > > > If create a function pointer here in .h, it is better add some doc.
> > >
> > > C comments: /* .... */ ?
> > >
> >
> > Yes.
> 
> OK.
> 
> >
> > > > > >
> > > > > > >  ice_rx_release_mbufs_t rx_rel_mbufs;  };
> > > > > > >
> > > > > > > --
> > > > > > > 2.28.0
> > > > > >
> > > > >
> > > >
> > >
> >
>
  

Patch

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index fef6ad454..93a0ac691 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -25,40 +25,6 @@  uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
 
-static inline uint64_t
-ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid)
-{
-	static struct {
-		uint64_t *ol_flag;
-		bool chk_valid;
-	} ol_flag_map[] = {
-		[ICE_RXDID_COMMS_AUX_VLAN] = {
-			&rte_net_ice_dynflag_proto_xtr_vlan_mask, true },
-		[ICE_RXDID_COMMS_AUX_IPV4] = {
-			&rte_net_ice_dynflag_proto_xtr_ipv4_mask, true },
-		[ICE_RXDID_COMMS_AUX_IPV6] = {
-			&rte_net_ice_dynflag_proto_xtr_ipv6_mask, true },
-		[ICE_RXDID_COMMS_AUX_IPV6_FLOW] = {
-			&rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask, true },
-		[ICE_RXDID_COMMS_AUX_TCP] = {
-			&rte_net_ice_dynflag_proto_xtr_tcp_mask, true },
-		[ICE_RXDID_COMMS_AUX_IP_OFFSET] = {
-			&rte_net_ice_dynflag_proto_xtr_ip_offset_mask, false },
-	};
-	uint64_t *ol_flag;
-
-	if (rxdid < RTE_DIM(ol_flag_map)) {
-		ol_flag = ol_flag_map[rxdid].ol_flag;
-		if (!ol_flag)
-			return 0ULL;
-
-		*chk_valid = ol_flag_map[rxdid].chk_valid;
-		return *ol_flag;
-	}
-
-	return 0ULL;
-}
-
 static inline uint8_t
 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
 {
@@ -76,6 +42,159 @@  ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
 				rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
 }
 
+static inline void
+ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
+				   struct rte_mbuf *mb,
+				   volatile union ice_rx_flex_desc *rxdp)
+{
+	volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
+			(volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+	uint16_t stat_err;
+#endif
+
+	if (desc->flow_id != 0xFFFFFFFF) {
+		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+	}
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+	stat_err = rte_le_to_cpu_16(desc->status_error0);
+	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+		mb->ol_flags |= PKT_RX_RSS_HASH;
+		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+	}
+#endif
+}
+
+static inline void
+ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
+				      struct rte_mbuf *mb,
+				      volatile union ice_rx_flex_desc *rxdp)
+{
+	volatile struct ice_32b_rx_flex_desc_comms *desc =
+			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
+	uint16_t stat_err;
+
+	stat_err = rte_le_to_cpu_16(desc->status_error0);
+	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+		mb->ol_flags |= PKT_RX_RSS_HASH;
+		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+	}
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+	if (desc->flow_id != 0xFFFFFFFF) {
+		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+	}
+
+	if (rxq->xtr_ol_flag) {
+		uint32_t metadata = 0;
+
+		stat_err = rte_le_to_cpu_16(desc->status_error1);
+
+		if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
+			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
+
+		if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
+			metadata |=
+				rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
+
+		if (metadata) {
+			mb->ol_flags |= rxq->xtr_ol_flag;
+
+			*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
+		}
+	}
+#endif
+}
+
+static inline void
+ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
+				      struct rte_mbuf *mb,
+				      volatile union ice_rx_flex_desc *rxdp)
+{
+	volatile struct ice_32b_rx_flex_desc_comms *desc =
+			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
+	uint16_t stat_err;
+
+	stat_err = rte_le_to_cpu_16(desc->status_error0);
+	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+		mb->ol_flags |= PKT_RX_RSS_HASH;
+		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+	}
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+	if (desc->flow_id != 0xFFFFFFFF) {
+		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+	}
+
+	if (rxq->xtr_ol_flag) {
+		uint32_t metadata = 0;
+
+		if (desc->flex_ts.flex.aux0 != 0xFFFF)
+			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
+		else if (desc->flex_ts.flex.aux1 != 0xFFFF)
+			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
+
+		if (metadata) {
+			mb->ol_flags |= rxq->xtr_ol_flag;
+
+			*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
+		}
+	}
+#endif
+}
+
+static void
+ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
+{
+	switch (rxdid) {
+	case ICE_RXDID_COMMS_AUX_VLAN:
+		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
+		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+		break;
+
+	case ICE_RXDID_COMMS_AUX_IPV4:
+		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
+		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+		break;
+
+	case ICE_RXDID_COMMS_AUX_IPV6:
+		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
+		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+		break;
+
+	case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
+		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
+		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+		break;
+
+	case ICE_RXDID_COMMS_AUX_TCP:
+		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
+		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+		break;
+
+	case ICE_RXDID_COMMS_AUX_IP_OFFSET:
+		rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
+		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
+
+	case ICE_RXDID_COMMS_OVS:
+		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
+		break;
+
+	default:
+		/* update this according to the RXDID for PROTO_XTR_NONE */
+		rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
+		break;
+	}
+
+	if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
+		rxq->xtr_ol_flag = 0;
+}
+
 static enum ice_status
 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 {
@@ -158,6 +277,8 @@  ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 		return -EINVAL;
 	}
 
+	ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
+
 	/* Enable Flexible Descriptors in the queue context which
 	 * allows this driver to select a specific receive descriptor format
 	 */
@@ -1338,74 +1459,6 @@  ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
 		   mb->vlan_tci, mb->vlan_tci_outer);
 }
 
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-#define ICE_RX_PROTO_XTR_VALID \
-	((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
-	 (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
-
-static void
-ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
-		     volatile struct ice_32b_rx_flex_desc_comms_ovs *desc)
-{
-	uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
-	uint32_t metadata = 0;
-	uint64_t ol_flag;
-	bool chk_valid;
-
-	ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid);
-	if (unlikely(!ol_flag))
-		return;
-
-	if (chk_valid) {
-		if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
-			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
-
-		if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
-			metadata |=
-				rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
-	} else {
-		if (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF)
-			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
-		else if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF)
-			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
-	}
-
-	if (!metadata)
-		return;
-
-	mb->ol_flags |= ol_flag;
-
-	*RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
-}
-#endif
-
-static inline void
-ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
-		      volatile union ice_rx_flex_desc *rxdp)
-{
-	volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
-			(volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-	uint16_t stat_err;
-
-	stat_err = rte_le_to_cpu_16(desc->status_error0);
-	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
-		mb->ol_flags |= PKT_RX_RSS_HASH;
-		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
-	}
-#endif
-
-	if (desc->flow_id != 0xFFFFFFFF) {
-		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
-		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
-	}
-
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-	if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
-		ice_rxd_to_proto_xtr(mb, desc);
-#endif
-}
-
 #define ICE_LOOK_AHEAD 8
 #if (ICE_LOOK_AHEAD != 8)
 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
@@ -1463,7 +1516,7 @@  ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
-			ice_rxd_to_pkt_fields(mb, &rxdp[j]);
+			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 
 			mb->ol_flags |= pkt_flags;
 		}
@@ -1760,7 +1813,7 @@  ice_recv_scattered_pkts(void *rx_queue,
 		first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		ice_rxd_to_vlan_tci(first_seg, &rxd);
-		ice_rxd_to_pkt_fields(first_seg, &rxd);
+		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
 		first_seg->ol_flags |= pkt_flags;
 		/* Prefetch data of first segment, if configured to do so. */
@@ -2160,7 +2213,7 @@  ice_recv_pkts(void *rx_queue,
 		rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		ice_rxd_to_vlan_tci(rxm, &rxd);
-		ice_rxd_to_pkt_fields(rxm, &rxd);
+		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
 		/* copy old mbuf to rx_pkts */
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 9fa57b3b2..de2291788 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -42,6 +42,9 @@ 
 
 typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
 typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
+typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq,
+					struct rte_mbuf *mb,
+					volatile union ice_rx_flex_desc *rxdp);
 
 struct ice_rx_entry {
 	struct rte_mbuf *mbuf;
@@ -82,6 +85,8 @@  struct ice_rx_queue {
 	bool q_set; /* indicate if rx queue has been configured */
 	bool rx_deferred_start; /* don't start this queue in dev start */
 	uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
+	uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
+	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 	ice_rx_release_mbufs_t rx_rel_mbufs;
 };