net/ice: track the DCF state of PF

Message ID 20211122092913.262886-1-dapengx.yu@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series net/ice: track the DCF state of PF |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/github-robot: build success github build: passed
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS

Commit Message

Yu, DapengX Nov. 22, 2021, 9:29 a.m. UTC
  From: Dapeng Yu <dapengx.yu@intel.com>

When VF is reset, PF will change DCF state from ON to other state, if
flow creation, destroy, or redirect command is sent to DCF at this
time, it will fail.

This patch tracks DCF state and returns try-again error to caller when
DCF state is not ON.

Cc: stable@dpdk.org

Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
---
 drivers/net/ice/base/ice_common.c   | 14 +++++++++++---
 drivers/net/ice/ice_dcf.c           | 23 +++++++++++++++++------
 drivers/net/ice/ice_dcf.h           |  3 +++
 drivers/net/ice/ice_dcf_parent.c    |  1 +
 drivers/net/ice/ice_generic_flow.c  |  4 +++-
 drivers/net/ice/ice_switch_filter.c | 22 ++++++++++++++++++----
 6 files changed, 53 insertions(+), 14 deletions(-)
  

Patch

diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index ae55bebaa2..1626db263c 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -1764,12 +1764,14 @@  ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
 		u16 buf_size, struct ice_sq_cd *cd)
 {
 	if (hw->aq_send_cmd_fn) {
-		enum ice_status status = ICE_ERR_NOT_READY;
+		enum ice_status status;
 		u16 retval = ICE_AQ_RC_OK;
 
 		ice_acquire_lock(&hw->adminq.sq_lock);
-		if (!hw->aq_send_cmd_fn(hw->aq_send_cmd_param, desc,
-					buf, buf_size)) {
+		status = hw->aq_send_cmd_fn(hw->aq_send_cmd_param, desc, buf,
+					    buf_size);
+		switch (status) {
+		case ICE_SUCCESS:
 			retval = LE16_TO_CPU(desc->retval);
 			/* strip off FW internal code */
 			if (retval)
@@ -1778,6 +1780,12 @@  ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
 				status = ICE_SUCCESS;
 			else
 				status = ICE_ERR_AQ_ERROR;
+			break;
+		case ICE_ERR_NOT_READY:
+			break;
+		default:
+			status = ICE_ERR_AQ_ERROR;
+			break;
 		}
 
 		hw->adminq.sq_last_status = (enum ice_aq_err)retval;
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index cca1d7bf46..63c2e547ff 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -269,6 +269,8 @@  ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
 	hw->vsi_id = hw->vsi_res->vsi_id;
 	PMD_DRV_LOG(DEBUG, "VSI ID is %u", hw->vsi_id);
 
+	__atomic_store_n(&hw->state_on, true, __ATOMIC_RELAXED);
+
 	return 0;
 }
 
@@ -476,9 +478,12 @@  ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
 	int err = 0;
 	int i = 0;
 
+	if (!__atomic_load_n(&hw->state_on, __ATOMIC_RELAXED))
+		return ICE_ERR_NOT_READY;
+
 	if ((buf && !buf_size) || (!buf && buf_size) ||
 	    buf_size > ICE_DCF_AQ_BUF_SZ)
-		return -EINVAL;
+		return ICE_ERR_PARAM;
 
 	desc_cmd.v_op = VIRTCHNL_OP_DCF_CMD_DESC;
 	desc_cmd.req_msglen = sizeof(*desc);
@@ -503,7 +508,7 @@  ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
 
 	if (ice_dcf_vc_cmd_send(hw, &desc_cmd) ||
 	    ice_dcf_vc_cmd_send(hw, &buff_cmd)) {
-		err = -1;
+		err = ICE_ERR_AQ_ERROR;
 		PMD_DRV_LOG(ERR, "fail to send OP_DCF_CMD_DESC/BUFF");
 		goto ret;
 	}
@@ -516,10 +521,15 @@  ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
 	} while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
 
 	if (desc_cmd.v_ret != IAVF_SUCCESS || buff_cmd.v_ret != IAVF_SUCCESS) {
-		err = -1;
-		PMD_DRV_LOG(ERR,
-			    "No response (%d times) or return failure (desc: %d / buff: %d)",
-			    i, desc_cmd.v_ret, buff_cmd.v_ret);
+		if (!__atomic_load_n(&hw->state_on, __ATOMIC_RELAXED)) {
+			err = ICE_ERR_NOT_READY;
+			PMD_DRV_LOG(ERR, "DCF is not on temporarily");
+		} else {
+			err = ICE_ERR_AQ_ERROR;
+			PMD_DRV_LOG(ERR,
+				    "No response (%d times) or return failure (desc: %d / buff: %d)",
+				    i, desc_cmd.v_ret, buff_cmd.v_ret);
+		}
 	}
 
 ret:
@@ -594,6 +604,7 @@  ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 	int ret, size;
 
 	hw->resetting = false;
+	__atomic_store_n(&hw->state_on, false, __ATOMIC_RELAXED);
 
 	hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
 	hw->avf.back = hw;
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index 6ec766ebda..c3a2bb2b03 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -113,6 +113,9 @@  struct ice_dcf_hw {
 	uint32_t link_speed;
 
 	bool resetting;
+
+	/* True if DCF state of the associated PF is on */
+	bool state_on;
 };
 
 int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 1ff2c47172..84c170e79b 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -258,6 +258,7 @@  ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
 			    pf_msg->event_data.vf_vsi_map.vf_id,
 			    pf_msg->event_data.vf_vsi_map.vsi_id);
+		__atomic_store_n(&dcf_hw->state_on, false, __ATOMIC_RELAXED);
 		start_vsi_reset_thread(dcf_hw, true,
 				       pf_msg->event_data.vf_vsi_map.vf_id);
 		break;
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index c673feb7a6..406a0a953f 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -2515,7 +2515,9 @@  ice_flow_flush(struct rte_eth_dev *dev,
 		ret = ice_flow_destroy(dev, p_flow, error);
 		if (ret) {
 			PMD_DRV_LOG(ERR, "Failed to flush flows");
-			return -EINVAL;
+			if (ret != -EAGAIN)
+				ret = -EINVAL;
+			return ret;
 		}
 	}
 
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index ed29c00d77..9e35c2d1ea 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -423,7 +423,11 @@  ice_switch_create(struct ice_adapter *ad,
 
 		flow->rule = filter_conf_ptr;
 	} else {
-		rte_flow_error_set(error, EINVAL,
+		if (ret == ICE_ERR_NOT_READY)
+			ret = -EAGAIN;
+		else
+			ret = -EINVAL;
+		rte_flow_error_set(error, -ret,
 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 			"switch filter create flow fail");
 		goto error;
@@ -477,7 +481,11 @@  ice_switch_destroy(struct ice_adapter *ad,
 
 	ret = ice_rem_adv_rule_by_id(hw, &filter_conf_ptr->sw_query_data);
 	if (ret) {
-		rte_flow_error_set(error, EINVAL,
+		if (ret == ICE_ERR_NOT_READY)
+			ret = -EAGAIN;
+		else
+			ret = -EINVAL;
+		rte_flow_error_set(error, -ret,
 			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 			"fail to destroy switch filter rule");
 		return -rte_errno;
@@ -2023,7 +2031,10 @@  ice_switch_redirect(struct ice_adapter *ad,
 			    rdata->rule_id);
 		filter_conf_ptr->fltr_status =
 			ICE_SW_FLTR_RMV_FAILED_ON_RIDRECT;
-		ret = -EINVAL;
+		if (ret == ICE_ERR_NOT_READY)
+			ret = -EAGAIN;
+		else
+			ret = -EINVAL;
 		goto out;
 	}
 
@@ -2038,7 +2049,10 @@  ice_switch_redirect(struct ice_adapter *ad,
 		PMD_DRV_LOG(ERR, "Failed to replay the rule");
 		filter_conf_ptr->fltr_status =
 			ICE_SW_FLTR_ADD_FAILED_ON_RIDRECT;
-		ret = -EINVAL;
+		if (ret == ICE_ERR_NOT_READY)
+			ret = -EAGAIN;
+		else
+			ret = -EINVAL;
 	} else {
 		filter_conf_ptr->sw_query_data = added_rdata;
 		/* Save VSI number for failure recover */