[v2] net/ice: fix DCF Rx segmentation fault
Checks
Commit Message
The initialization of selecting the handler for scalar Rx path FlexiMD
fields extraction into mbuf is missed, it will cause segmentation fault
(core dumped).
Also add the missed support to handle RXDID 16, which has RSS hash value
on Qword 1.
Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
Reported-by: Alvin Zhang <alvinx.zhang@intel.com>
Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
---
v2: The RSS hash is wrong, fix it.
---
drivers/net/ice/ice_dcf.c | 1 +
drivers/net/ice/ice_rxtx.c | 28 +++++++++++++++++++++++++++-
drivers/net/ice/ice_rxtx.h | 2 ++
3 files changed, 30 insertions(+), 1 deletion(-)
Comments
> -----Original Message-----
> From: Wang, Haiyue <haiyue.wang@intel.com>
> Sent: Thursday, October 29, 2020 9:13 AM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Wang, Haiyue
> <haiyue.wang@intel.com>; Zhang, AlvinX <alvinx.zhang@intel.com>; Yang,
> Qiming <qiming.yang@intel.com>; Guo, Jia <jia.guo@intel.com>
> Subject: [PATCH v2] net/ice: fix DCF Rx segmentation fault
>
> The initialization of selecting the handler for scalar Rx path FlexiMD fields
> extraction into mbuf is missed, it will cause segmentation fault (core dumped).
>
> Also add the missed support to handle RXDID 16, which has RSS hash value on
> Qword 1.
>
> Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
>
> Reported-by: Alvin Zhang <alvinx.zhang@intel.com>
> Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel after revert v1
Thanks
Qi
@@ -899,6 +899,7 @@ ice_dcf_configure_queues(struct ice_dcf_hw *hw)
return -EINVAL;
}
#endif
+ ice_select_rxd_to_pkt_fields_handler(rxq[i], vc_qp->rxq.rxdid);
}
memset(&args, 0, sizeof(args));
@@ -43,6 +43,28 @@ ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
}
+static inline void
+ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
+ struct rte_mbuf *mb,
+ volatile union ice_rx_flex_desc *rxdp)
+{
+ volatile struct ice_32b_rx_flex_desc_comms *desc =
+ (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
+ uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
+
+ if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+ mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+ }
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
+#endif
+}
+
static inline void
ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
struct rte_mbuf *mb,
@@ -148,7 +170,7 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
#endif
}
-static void
+void
ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
{
switch (rxdid) {
@@ -182,6 +204,10 @@ ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
break;
+ case ICE_RXDID_COMMS_GENERIC:
+ rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
+ break;
+
case ICE_RXDID_COMMS_OVS:
rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
break;
@@ -234,6 +234,8 @@ int ice_rx_descriptor_status(void *rx_queue, uint16_t offset);
int ice_tx_descriptor_status(void *tx_queue, uint16_t offset);
void ice_set_default_ptype_table(struct rte_eth_dev *dev);
const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+void ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq,
+ uint32_t rxdid);
int ice_rx_vec_dev_check(struct rte_eth_dev *dev);
int ice_tx_vec_dev_check(struct rte_eth_dev *dev);