[04/10] vdpa/nfp: refactor the logic of datapath update

Message ID 20240426074831.1729792-5-chaoyong.he@corigine.com (mailing list archive)
State New
Delegated to: Maxime Coquelin
Headers
Series support software live migration |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Chaoyong He April 26, 2024, 7:48 a.m. UTC
  From: Xinying Yu <xinying.yu@corigine.com>

In order to add the new configuration logic of software live
migration, split the datapath update logic into two parts,
queue configuration and VF configuration.

Signed-off-by: Xinying Yu <xinying.yu@corigine.com>
Reviewed-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Long Wu <long.wu@corigine.com>
Reviewed-by: Peng Zhang <peng.zhang@corigine.com>
---
 drivers/vdpa/nfp/nfp_vdpa_core.c | 54 +++++++++++++++++++++-----------
 1 file changed, 36 insertions(+), 18 deletions(-)
  

Patch

diff --git a/drivers/vdpa/nfp/nfp_vdpa_core.c b/drivers/vdpa/nfp/nfp_vdpa_core.c
index 6d07356581..79ecd2b4fc 100644
--- a/drivers/vdpa/nfp/nfp_vdpa_core.c
+++ b/drivers/vdpa/nfp/nfp_vdpa_core.c
@@ -105,8 +105,8 @@  nfp_vdpa_check_offloads(void)
 			NFP_NET_CFG_CTRL_IN_ORDER;
 }
 
-int
-nfp_vdpa_hw_start(struct nfp_vdpa_hw *vdpa_hw,
+static int
+nfp_vdpa_vf_config(struct nfp_hw *hw,
 		int vid)
 {
 	int ret;
@@ -114,24 +114,8 @@  nfp_vdpa_hw_start(struct nfp_vdpa_hw *vdpa_hw,
 	uint32_t new_ctrl;
 	uint32_t new_ext_ctrl;
 	struct timespec wait_tst;
-	struct nfp_hw *hw = &vdpa_hw->super;
 	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
 
-	nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(0), vdpa_hw->vring[1].desc);
-	nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(0), rte_log2_u32(vdpa_hw->vring[1].size));
-	nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(1), vdpa_hw->vring[1].avail);
-	nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(2), vdpa_hw->vring[1].used);
-
-	nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(0), vdpa_hw->vring[0].desc);
-	nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(0), rte_log2_u32(vdpa_hw->vring[0].size));
-	nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(1), vdpa_hw->vring[0].avail);
-	nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(2), vdpa_hw->vring[0].used);
-
-	rte_wmb();
-
-	nfp_disable_queues(hw);
-	nfp_enable_queues(hw, NFP_VDPA_MAX_QUEUES, NFP_VDPA_MAX_QUEUES);
-
 	nn_cfg_writel(hw, NFP_NET_CFG_MTU, 9216);
 	nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, 10240);
 
@@ -177,6 +161,40 @@  nfp_vdpa_hw_start(struct nfp_vdpa_hw *vdpa_hw,
 	return 0;
 }
 
+static void
+nfp_vdpa_queue_config(struct nfp_vdpa_hw *vdpa_hw)
+{
+	struct nfp_hw *hw = &vdpa_hw->super;
+
+	nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(0), vdpa_hw->vring[1].desc);
+	nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(0),
+			rte_log2_u32(vdpa_hw->vring[1].size));
+	nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(1), vdpa_hw->vring[1].avail);
+	nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(2), vdpa_hw->vring[1].used);
+
+	nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(0), vdpa_hw->vring[0].desc);
+	nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(0),
+			rte_log2_u32(vdpa_hw->vring[0].size));
+	nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(1), vdpa_hw->vring[0].avail);
+	nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(2), vdpa_hw->vring[0].used);
+
+	rte_wmb();
+}
+
+int
+nfp_vdpa_hw_start(struct nfp_vdpa_hw *vdpa_hw,
+		int vid)
+{
+	struct nfp_hw *hw = &vdpa_hw->super;
+
+	nfp_vdpa_queue_config(vdpa_hw);
+
+	nfp_disable_queues(hw);
+	nfp_enable_queues(hw, NFP_VDPA_MAX_QUEUES, NFP_VDPA_MAX_QUEUES);
+
+	return nfp_vdpa_vf_config(hw, vid);
+}
+
 void
 nfp_vdpa_hw_stop(struct nfp_vdpa_hw *vdpa_hw)
 {