[v2,4/8] net/nfp: add the process private structure

Message ID 20240419052349.1294696-5-chaoyong.he@corigine.com (mailing list archive)
State Accepted
Delegated to: Ferruh Yigit
Headers
Series [v2,1/8] net/nfp: fix resource leak of secondary process |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Chaoyong He April 19, 2024, 5:23 a.m. UTC
  Add a new data structure to hold the process private data, and modify
logic to make sure only get 'pf_dev' from the process private data.

Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Long Wu <long.wu@corigine.com>
Reviewed-by: Peng Zhang <peng.zhang@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower.c           |  71 ++++++------
 drivers/net/nfp/flower/nfp_flower.h           |   8 +-
 drivers/net/nfp/flower/nfp_flower_cmsg.c      |   5 +-
 drivers/net/nfp/flower/nfp_flower_cmsg.h      |   3 +-
 .../net/nfp/flower/nfp_flower_representor.c   |  85 ++++++++++----
 .../net/nfp/flower/nfp_flower_representor.h   |   3 +-
 drivers/net/nfp/flower/nfp_flower_service.c   |  32 +++---
 drivers/net/nfp/flower/nfp_flower_service.h   |  10 +-
 drivers/net/nfp/nfd3/nfp_nfd3_dp.c            |   1 +
 drivers/net/nfp/nfdk/nfp_nfdk_dp.c            |   1 +
 drivers/net/nfp/nfp_ethdev.c                  | 107 +++++++++++++-----
 drivers/net/nfp/nfp_net_common.c              |  67 +++++++----
 drivers/net/nfp/nfp_net_common.h              |  15 ++-
 drivers/net/nfp/nfp_net_flow.c                |  20 +++-
 drivers/net/nfp/nfp_rxtx.c                    |   5 +-
 drivers/net/nfp/nfp_rxtx.h                    |  10 +-
 16 files changed, 284 insertions(+), 159 deletions(-)
  

Patch

diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c
index 303f6bd3f6..8dd9e23c78 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -102,14 +102,14 @@  static const struct eth_dev_ops nfp_flower_pf_vnic_ops = {
 };
 
 static inline struct nfp_flower_representor *
-nfp_flower_get_repr(struct nfp_net_hw *hw,
+nfp_flower_get_repr(struct nfp_net_hw_priv *hw_priv,
 		uint32_t port_id)
 {
 	uint8_t port;
 	struct nfp_app_fw_flower *app_fw_flower;
 
 	/* Obtain handle to app_fw_flower here */
-	app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(hw->pf_dev->app_fw_priv);
+	app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(hw_priv->pf_dev->app_fw_priv);
 
 	switch (NFP_FLOWER_CMSG_PORT_TYPE(port_id)) {
 	case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT:
@@ -126,15 +126,15 @@  nfp_flower_get_repr(struct nfp_net_hw *hw,
 }
 
 bool
-nfp_flower_pf_dispatch_pkts(struct nfp_net_hw *hw,
+nfp_flower_pf_dispatch_pkts(struct nfp_net_hw_priv *hw_priv,
 		struct rte_mbuf *mbuf,
 		uint32_t port_id)
 {
 	struct nfp_flower_representor *repr;
 
-	repr = nfp_flower_get_repr(hw, port_id);
+	repr = nfp_flower_get_repr(hw_priv, port_id);
 	if (repr == NULL) {
-		PMD_RX_LOG(ERR, "Can not get repr for port %u", hw->idx);
+		PMD_RX_LOG(ERR, "Can not get repr for port %u", port_id);
 		return false;
 	}
 
@@ -189,13 +189,14 @@  nfp_flower_pf_xmit_pkts(void *tx_queue,
 	struct nfp_app_fw_flower *app_fw_flower;
 
 	txq = tx_queue;
-	app_fw_flower = txq->hw->pf_dev->app_fw_priv;
+	app_fw_flower = txq->hw_priv->pf_dev->app_fw_priv;
 
 	return app_fw_flower->nfd_func.pf_xmit_t(tx_queue, tx_pkts, nb_pkts);
 }
 
 static int
-nfp_flower_init_vnic_common(struct nfp_net_hw *hw,
+nfp_flower_init_vnic_common(struct nfp_net_hw_priv *hw_priv,
+		struct nfp_net_hw *hw,
 		const char *vnic_type)
 {
 	int err;
@@ -205,8 +206,8 @@  nfp_flower_init_vnic_common(struct nfp_net_hw *hw,
 	struct nfp_pf_dev *pf_dev;
 	struct rte_pci_device *pci_dev;
 
-	pf_dev = hw->pf_dev;
-	pci_dev = hw->pf_dev->pci_dev;
+	pf_dev = hw_priv->pf_dev;
+	pci_dev = pf_dev->pci_dev;
 
 	PMD_INIT_LOG(DEBUG, "%s vNIC ctrl bar: %p", vnic_type, hw->super.ctrl_bar);
 
@@ -239,13 +240,15 @@  nfp_flower_init_vnic_common(struct nfp_net_hw *hw,
 }
 
 static int
-nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
+nfp_flower_init_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower,
+		struct nfp_net_hw_priv *hw_priv)
 {
 	uint16_t i;
 	int ret = 0;
 	uint16_t n_txq;
 	uint16_t n_rxq;
 	const char *pci_name;
+	struct nfp_net_hw *hw;
 	unsigned int numa_node;
 	struct rte_mempool *mp;
 	struct nfp_net_rxq *rxq;
@@ -253,16 +256,15 @@  nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 	struct nfp_pf_dev *pf_dev;
 	struct rte_eth_dev *eth_dev;
 	const struct rte_memzone *tz;
-	struct nfp_app_fw_flower *app_fw_flower;
 	char ctrl_rxring_name[RTE_MEMZONE_NAMESIZE];
 	char ctrl_txring_name[RTE_MEMZONE_NAMESIZE];
 	char ctrl_pktmbuf_pool_name[RTE_MEMZONE_NAMESIZE];
 
 	/* Set up some pointers here for ease of use */
-	pf_dev = hw->pf_dev;
-	app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv);
+	pf_dev = hw_priv->pf_dev;
+	hw = app_fw_flower->ctrl_hw;
 
-	ret = nfp_flower_init_vnic_common(hw, "ctrl_vnic");
+	ret = nfp_flower_init_vnic_common(hw_priv, hw, "ctrl_vnic");
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Could not init pf vnic");
 		return -EINVAL;
@@ -397,6 +399,7 @@  nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 		nfp_net_reset_rx_queue(rxq);
 
 		rxq->hw = hw;
+		rxq->hw_priv = hw_priv;
 
 		/*
 		 * Telling the HW about the physical address of the RX ring and number
@@ -462,6 +465,7 @@  nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 		nfp_net_reset_tx_queue(txq);
 
 		txq->hw = hw;
+		txq->hw_priv = hw_priv;
 
 		/*
 		 * Telling the HW about the physical address of the TX ring and number
@@ -472,7 +476,7 @@  nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 	}
 
 	/* Alloc sync memory zone */
-	ret = nfp_flower_service_sync_alloc(app_fw_flower);
+	ret = nfp_flower_service_sync_alloc(hw_priv);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Alloc sync memory zone failed");
 		goto tx_queue_setup_cleanup;
@@ -512,21 +516,22 @@  nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 }
 
 static void
-nfp_flower_cleanup_ctrl_vnic(struct nfp_net_hw *hw)
+nfp_flower_cleanup_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower,
+		struct nfp_net_hw_priv *hw_priv)
 {
 	uint32_t i;
 	const char *pci_name;
+	struct nfp_net_hw *hw;
 	struct nfp_net_rxq *rxq;
 	struct nfp_net_txq *txq;
 	struct rte_eth_dev *eth_dev;
-	struct nfp_app_fw_flower *app_fw_flower;
 	char ctrl_txring_name[RTE_MEMZONE_NAMESIZE];
 	char ctrl_rxring_name[RTE_MEMZONE_NAMESIZE];
 
+	hw = app_fw_flower->ctrl_hw;
 	eth_dev = hw->eth_dev;
-	app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(hw->pf_dev->app_fw_priv);
 
-	pci_name = strchr(app_fw_flower->pf_hw->pf_dev->pci_dev->name, ':') + 1;
+	pci_name = strchr(hw_priv->pf_dev->pci_dev->name, ':') + 1;
 
 	snprintf(ctrl_txring_name, sizeof(ctrl_txring_name), "%s_cttx_ring", pci_name);
 	for (i = 0; i < hw->max_tx_queues; i++) {
@@ -548,7 +553,7 @@  nfp_flower_cleanup_ctrl_vnic(struct nfp_net_hw *hw)
 		}
 	}
 
-	nfp_flower_service_sync_free(app_fw_flower);
+	nfp_flower_service_sync_free(hw_priv);
 	rte_free(eth_dev->data->tx_queues);
 	rte_free(eth_dev->data->rx_queues);
 	rte_mempool_free(app_fw_flower->ctrl_pktmbuf_pool);
@@ -634,7 +639,7 @@  nfp_flower_nfd_func_register(struct nfp_app_fw_flower *app_fw_flower)
 }
 
 int
-nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev,
+nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv,
 		const struct nfp_dev_info *dev_info)
 {
 	int ret;
@@ -644,6 +649,7 @@  nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev,
 	struct nfp_net_hw *pf_hw;
 	struct nfp_net_hw *ctrl_hw;
 	struct nfp_app_fw_flower *app_fw_flower;
+	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
 
 	numa_node = rte_socket_id();
 
@@ -702,11 +708,10 @@  nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev,
 	/* Fill in the PF vNIC and populate app struct */
 	app_fw_flower->pf_hw = pf_hw;
 	pf_hw->super.ctrl_bar = pf_dev->ctrl_bar;
-	pf_hw->pf_dev = pf_dev;
 	pf_hw->cpp = pf_dev->cpp;
 	pf_hw->dev_info = dev_info;
 
-	ret = nfp_flower_init_vnic_common(app_fw_flower->pf_hw, "pf_vnic");
+	ret = nfp_flower_init_vnic_common(hw_priv, pf_hw, "pf_vnic");
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Could not initialize flower PF vNIC");
 		goto pf_cpp_area_cleanup;
@@ -728,11 +733,10 @@  nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev,
 	}
 
 	/* Now populate the ctrl vNIC */
-	ctrl_hw->pf_dev = pf_dev;
 	ctrl_hw->cpp = pf_dev->cpp;
 	ctrl_hw->dev_info = dev_info;
 
-	ret = nfp_flower_init_ctrl_vnic(app_fw_flower->ctrl_hw);
+	ret = nfp_flower_init_ctrl_vnic(app_fw_flower, hw_priv);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Could not initialize flower ctrl vNIC");
 		goto ctrl_cpp_area_cleanup;
@@ -746,14 +750,14 @@  nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev,
 	}
 
 	/* Start up flower services */
-	ret = nfp_flower_service_start(app_fw_flower);
+	ret = nfp_flower_service_start(app_fw_flower, hw_priv);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Could not enable flower services");
 		ret = -ESRCH;
 		goto ctrl_vnic_cleanup;
 	}
 
-	ret = nfp_flower_repr_create(app_fw_flower);
+	ret = nfp_flower_repr_create(app_fw_flower, hw_priv);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Could not create representor ports");
 		goto ctrl_vnic_service_stop;
@@ -762,9 +766,9 @@  nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev,
 	return 0;
 
 ctrl_vnic_service_stop:
-	nfp_flower_service_stop(app_fw_flower);
+	nfp_flower_service_stop(app_fw_flower, hw_priv);
 ctrl_vnic_cleanup:
-	nfp_flower_cleanup_ctrl_vnic(app_fw_flower->ctrl_hw);
+	nfp_flower_cleanup_ctrl_vnic(app_fw_flower, hw_priv);
 ctrl_cpp_area_cleanup:
 	nfp_cpp_area_free(ctrl_hw->ctrl_area);
 pf_cpp_area_cleanup:
@@ -782,12 +786,13 @@  nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev,
 }
 
 void
-nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev)
+nfp_uninit_app_fw_flower(struct nfp_net_hw_priv *hw_priv)
 {
 	struct nfp_app_fw_flower *app_fw_flower;
+	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
 
 	app_fw_flower = pf_dev->app_fw_priv;
-	nfp_flower_cleanup_ctrl_vnic(app_fw_flower->ctrl_hw);
+	nfp_flower_cleanup_ctrl_vnic(app_fw_flower, hw_priv);
 	nfp_cpp_area_free(app_fw_flower->ctrl_hw->ctrl_area);
 	nfp_cpp_area_free(pf_dev->ctrl_area);
 	rte_free(app_fw_flower->pf_hw);
@@ -799,7 +804,7 @@  nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev)
 }
 
 int
-nfp_secondary_init_app_fw_flower(struct nfp_pf_dev *pf_dev)
+nfp_secondary_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv)
 {
 	struct rte_eth_dev *eth_dev;
 	const char *port_name = "pf_vnic_eth_dev";
@@ -812,7 +817,7 @@  nfp_secondary_init_app_fw_flower(struct nfp_pf_dev *pf_dev)
 		return -ENODEV;
 	}
 
-	eth_dev->process_private = pf_dev;
+	eth_dev->process_private = hw_priv;
 	eth_dev->dev_ops = &nfp_flower_pf_vnic_ops;
 	eth_dev->rx_pkt_burst = nfp_net_recv_pkts;
 	eth_dev->tx_pkt_burst = nfp_flower_pf_xmit_pkts;
diff --git a/drivers/net/nfp/flower/nfp_flower.h b/drivers/net/nfp/flower/nfp_flower.h
index 8393de66c5..5cc29dcc87 100644
--- a/drivers/net/nfp/flower/nfp_flower.h
+++ b/drivers/net/nfp/flower/nfp_flower.h
@@ -104,11 +104,11 @@  nfp_flower_support_decap_v2(const struct nfp_app_fw_flower *app_fw_flower)
 	return app_fw_flower->ext_features & NFP_FL_FEATS_DECAP_V2;
 }
 
-int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev,
+int nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv,
 		const struct nfp_dev_info *dev_info);
-void nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev);
-int nfp_secondary_init_app_fw_flower(struct nfp_pf_dev *pf_dev);
-bool nfp_flower_pf_dispatch_pkts(struct nfp_net_hw *hw,
+void nfp_uninit_app_fw_flower(struct nfp_net_hw_priv *hw_priv);
+int nfp_secondary_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv);
+bool nfp_flower_pf_dispatch_pkts(struct nfp_net_hw_priv *hw_priv,
 		struct rte_mbuf *mbuf,
 		uint32_t port_id);
 uint16_t nfp_flower_pf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.c b/drivers/net/nfp/flower/nfp_flower_cmsg.c
index eefeb0d887..2a0e9afe90 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.c
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.c
@@ -81,7 +81,8 @@  nfp_flower_cmsg_mac_repr_fill(struct rte_mbuf *m,
 }
 
 int
-nfp_flower_cmsg_mac_repr(struct nfp_app_fw_flower *app_fw_flower)
+nfp_flower_cmsg_mac_repr(struct nfp_app_fw_flower *app_fw_flower,
+		struct nfp_eth_table *nfp_eth_table)
 {
 	uint8_t i;
 	uint16_t cnt;
@@ -89,7 +90,6 @@  nfp_flower_cmsg_mac_repr(struct nfp_app_fw_flower *app_fw_flower)
 	uint32_t nbi_port;
 	uint32_t phys_port;
 	struct rte_mbuf *mbuf;
-	struct nfp_eth_table *nfp_eth_table;
 
 	mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
 	if (mbuf == NULL) {
@@ -100,7 +100,6 @@  nfp_flower_cmsg_mac_repr(struct nfp_app_fw_flower *app_fw_flower)
 	nfp_flower_cmsg_mac_repr_init(mbuf, app_fw_flower);
 
 	/* Fill in the mac repr cmsg */
-	nfp_eth_table = app_fw_flower->pf_hw->pf_dev->nfp_eth_table;
 	for (i = 0; i < app_fw_flower->num_phyport_reprs; i++) {
 		nbi = nfp_eth_table->ports[i].nbi;
 		nbi_port = nfp_eth_table->ports[i].base;
diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.h b/drivers/net/nfp/flower/nfp_flower_cmsg.h
index c94ea706bb..afaf733ef7 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.h
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.h
@@ -975,7 +975,8 @@  struct nfp_fl_act_mark {
 	rte_be32_t mark;
 };
 
-int nfp_flower_cmsg_mac_repr(struct nfp_app_fw_flower *app_fw_flower);
+int nfp_flower_cmsg_mac_repr(struct nfp_app_fw_flower *app_fw_flower,
+		struct nfp_eth_table *nfp_eth_table);
 int nfp_flower_cmsg_repr_reify(struct nfp_app_fw_flower *app_fw_flower,
 		struct nfp_flower_representor *repr);
 int nfp_flower_cmsg_port_mod(struct nfp_app_fw_flower *app_fw_flower,
diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c b/drivers/net/nfp/flower/nfp_flower_representor.c
index d4c3c30682..934f078dca 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.c
+++ b/drivers/net/nfp/flower/nfp_flower_representor.c
@@ -83,17 +83,17 @@  nfp_flower_repr_dev_infos_get(__rte_unused struct rte_eth_dev *dev,
 static int
 nfp_flower_repr_dev_start(struct rte_eth_dev *dev)
 {
+	uint16_t i;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_flower_representor *repr;
 	struct nfp_app_fw_flower *app_fw_flower;
-	uint16_t i;
 
 	repr = dev->data->dev_private;
+	hw_priv = dev->process_private;
 	app_fw_flower = repr->app_fw_flower;
 
-	if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) {
-		nfp_eth_set_configured(app_fw_flower->pf_hw->pf_dev->cpp,
-				repr->nfp_idx, 1);
-	}
+	if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT)
+		nfp_eth_set_configured(hw_priv->pf_dev->cpp, repr->nfp_idx, 1);
 
 	nfp_flower_cmsg_port_mod(app_fw_flower, repr->port_id, true);
 
@@ -108,19 +108,19 @@  nfp_flower_repr_dev_start(struct rte_eth_dev *dev)
 static int
 nfp_flower_repr_dev_stop(struct rte_eth_dev *dev)
 {
+	uint16_t i;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_flower_representor *repr;
 	struct nfp_app_fw_flower *app_fw_flower;
-	uint16_t i;
 
 	repr = dev->data->dev_private;
+	hw_priv = dev->process_private;
 	app_fw_flower = repr->app_fw_flower;
 
 	nfp_flower_cmsg_port_mod(app_fw_flower, repr->port_id, false);
 
-	if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) {
-		nfp_eth_set_configured(app_fw_flower->pf_hw->pf_dev->cpp,
-				repr->nfp_idx, 0);
-	}
+	if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT)
+		nfp_eth_set_configured(hw_priv->pf_dev->cpp, repr->nfp_idx, 0);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -408,8 +408,8 @@  static int
 nfp_flower_repr_dev_close(struct rte_eth_dev *dev)
 {
 	uint16_t i;
-	struct nfp_net_hw *hw;
 	struct nfp_pf_dev *pf_dev;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_flower_representor *repr;
 	struct nfp_app_fw_flower *app_fw_flower;
 
@@ -417,9 +417,10 @@  nfp_flower_repr_dev_close(struct rte_eth_dev *dev)
 		return 0;
 
 	repr = dev->data->dev_private;
+	hw_priv = dev->process_private;
+
 	app_fw_flower = repr->app_fw_flower;
-	hw = app_fw_flower->pf_hw;
-	pf_dev = hw->pf_dev;
+	pf_dev = hw_priv->pf_dev;
 
 	if (pf_dev->app_fw_id != NFP_APP_FW_FLOWER_NIC)
 		return -EINVAL;
@@ -442,14 +443,14 @@  nfp_flower_repr_dev_close(struct rte_eth_dev *dev)
 		return 0;
 
 	/* Stop flower service first */
-	nfp_flower_service_stop(app_fw_flower);
+	nfp_flower_service_stop(app_fw_flower, hw_priv);
 
 	/* Disable cpp service */
 	nfp_service_disable(&pf_dev->cpp_service_info);
 
 	/* Now it is safe to free all PF resources */
-	nfp_uninit_app_fw_flower(pf_dev);
-	nfp_pf_uninit(pf_dev);
+	nfp_uninit_app_fw_flower(hw_priv);
+	nfp_pf_uninit(hw_priv);
 
 	return 0;
 }
@@ -708,8 +709,43 @@  nfp_flower_repr_free_all(struct nfp_app_fw_flower *app_fw_flower)
 	}
 }
 
+static void
+nfp_flower_repr_priv_init(struct nfp_app_fw_flower *app_fw_flower,
+		struct nfp_net_hw_priv *hw_priv)
+{
+	uint32_t i;
+	struct rte_eth_dev *eth_dev;
+	struct nfp_flower_representor *repr;
+
+	repr = app_fw_flower->pf_repr;
+	if (repr != NULL) {
+		eth_dev = repr->eth_dev;
+		if (eth_dev != NULL)
+			eth_dev->process_private = hw_priv;
+	}
+
+	for (i = 0; i < NFP_MAX_PHYPORTS; i++) {
+		repr = app_fw_flower->phy_reprs[i];
+		if (repr != NULL) {
+			eth_dev = repr->eth_dev;
+			if (eth_dev != NULL)
+				eth_dev->process_private = hw_priv;
+		}
+	}
+
+	for (i = 0; i < MAX_FLOWER_VFS; i++) {
+		repr = app_fw_flower->vf_reprs[i];
+		if (repr != NULL) {
+			eth_dev = repr->eth_dev;
+			if (eth_dev != NULL)
+				eth_dev->process_private = hw_priv;
+		}
+	}
+}
+
 static int
-nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
+nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower,
+		struct nfp_net_hw_priv *hw_priv)
 {
 	int i;
 	int ret;
@@ -723,11 +759,11 @@  nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
 		.app_fw_flower    = app_fw_flower,
 	};
 
-	nfp_eth_table = app_fw_flower->pf_hw->pf_dev->nfp_eth_table;
+	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
 	eth_dev = app_fw_flower->ctrl_hw->eth_dev;
 
 	/* Send a NFP_FLOWER_CMSG_TYPE_MAC_REPR cmsg to hardware */
-	ret = nfp_flower_cmsg_mac_repr(app_fw_flower);
+	ret = nfp_flower_cmsg_mac_repr(app_fw_flower, nfp_eth_table);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "Cloud not send mac repr cmsgs");
 		return ret;
@@ -739,7 +775,7 @@  nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
 	/* PF vNIC reprs get a random MAC address */
 	rte_eth_random_addr(flower_repr.mac_addr.addr_bytes);
 
-	pci_dev = app_fw_flower->pf_hw->pf_dev->pci_dev;
+	pci_dev = hw_priv->pf_dev->pci_dev;
 
 	pci_name = strchr(pci_dev->name, ':') + 1;
 
@@ -813,6 +849,8 @@  nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
 	if (i < app_fw_flower->num_vf_reprs)
 		goto repr_free;
 
+	nfp_flower_repr_priv_init(app_fw_flower, hw_priv);
+
 	return 0;
 
 repr_free:
@@ -822,7 +860,8 @@  nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower)
 }
 
 int
-nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower)
+nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower,
+		struct nfp_net_hw_priv *hw_priv)
 {
 	int ret;
 	struct nfp_pf_dev *pf_dev;
@@ -832,7 +871,7 @@  nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower)
 		.nb_representor_ports = 0
 	};
 
-	pf_dev = app_fw_flower->pf_hw->pf_dev;
+	pf_dev = hw_priv->pf_dev;
 	pci_dev = pf_dev->pci_dev;
 
 	/* Allocate a switch domain for the flower app */
@@ -876,7 +915,7 @@  nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower)
 	PMD_INIT_LOG(INFO, "%d number of VF reprs", app_fw_flower->num_vf_reprs);
 	PMD_INIT_LOG(INFO, "%d number of phyport reprs", app_fw_flower->num_phyport_reprs);
 
-	ret = nfp_flower_repr_alloc(app_fw_flower);
+	ret = nfp_flower_repr_alloc(app_fw_flower, hw_priv);
 	if (ret != 0) {
 		PMD_INIT_LOG(ERR, "representors allocation failed");
 		ret = -EINVAL;
diff --git a/drivers/net/nfp/flower/nfp_flower_representor.h b/drivers/net/nfp/flower/nfp_flower_representor.h
index 7a4e7ecfc1..41965f6e0e 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.h
+++ b/drivers/net/nfp/flower/nfp_flower_representor.h
@@ -23,7 +23,8 @@  struct nfp_flower_representor {
 	struct rte_eth_dev *eth_dev;
 };
 
-int nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower);
+int nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower,
+		struct nfp_net_hw_priv *hw_priv);
 bool nfp_flower_repr_is_vf(struct nfp_flower_representor *repr);
 
 #endif /* __NFP_FLOWER_REPRESENTOR_H__ */
diff --git a/drivers/net/nfp/flower/nfp_flower_service.c b/drivers/net/nfp/flower/nfp_flower_service.c
index 5e8811fe24..515b4abef0 100644
--- a/drivers/net/nfp/flower/nfp_flower_service.c
+++ b/drivers/net/nfp/flower/nfp_flower_service.c
@@ -28,9 +28,9 @@  struct nfp_flower_service {
 };
 
 static struct nfp_flower_service *
-nfp_flower_service_handle_get(struct nfp_app_fw_flower *app)
+nfp_flower_service_handle_get(struct nfp_net_hw_priv *hw_priv)
 {
-	return app->pf_hw->pf_dev->process_share.fl_service;
+	return hw_priv->pf_dev->process_share.fl_service;
 }
 
 static int
@@ -100,13 +100,13 @@  nfp_flower_service_insert(struct nfp_app_fw_flower *app,
 }
 
 int
-nfp_flower_service_start(void *app_fw_flower)
+nfp_flower_service_start(void *app_fw_flower,
+		struct nfp_net_hw_priv *hw_priv)
 {
 	int ret;
 	struct nfp_flower_service *service_handle;
-	struct nfp_app_fw_flower *app = app_fw_flower;
 
-	service_handle = nfp_flower_service_handle_get(app);
+	service_handle = nfp_flower_service_handle_get(hw_priv);
 	if (service_handle == NULL) {
 		PMD_DRV_LOG(ERR, "Can not get service handle");
 		return -EINVAL;
@@ -122,7 +122,7 @@  nfp_flower_service_start(void *app_fw_flower)
 	}
 
 	/* Insert the NIC to flower service slot */
-	ret = nfp_flower_service_insert(app, service_handle);
+	ret = nfp_flower_service_insert(app_fw_flower, service_handle);
 	if (ret == MAX_FLOWER_SERVICE_SLOT) {
 		PMD_DRV_LOG(ERR, "Flower ctrl vnic service slot over %u",
 				MAX_FLOWER_SERVICE_SLOT);
@@ -133,14 +133,14 @@  nfp_flower_service_start(void *app_fw_flower)
 }
 
 void
-nfp_flower_service_stop(void *app_fw_flower)
+nfp_flower_service_stop(void *app_fw_flower,
+		struct nfp_net_hw_priv *hw_priv)
 {
 	uint16_t slot;
 	uint16_t count;
 	struct nfp_flower_service *service_handle;
-	struct nfp_app_fw_flower *app = app_fw_flower;
 
-	service_handle = nfp_flower_service_handle_get(app);
+	service_handle = nfp_flower_service_handle_get(hw_priv);
 	if (service_handle == NULL) {
 		PMD_DRV_LOG(ERR, "Can not get service handle");
 		return;
@@ -149,7 +149,7 @@  nfp_flower_service_stop(void *app_fw_flower)
 	rte_spinlock_lock(&service_handle->spinlock);
 	for (slot = 0; slot < MAX_FLOWER_SERVICE_SLOT; slot++) {
 		/* The app only in one slot */
-		if (service_handle->slots[slot] != app)
+		if (service_handle->slots[slot] != app_fw_flower)
 			continue;
 
 		service_handle->slots[slot] = NULL;
@@ -157,7 +157,7 @@  nfp_flower_service_stop(void *app_fw_flower)
 	rte_spinlock_unlock(&service_handle->spinlock);
 
 	/* Determine whether to disable service */
-	count = nfp_sync_handle_count_get(app->pf_hw->pf_dev->sync, NULL,
+	count = nfp_sync_handle_count_get(hw_priv->pf_dev->sync, NULL,
 			service_handle);
 	if (count > 1)
 		return;
@@ -167,11 +167,10 @@  nfp_flower_service_stop(void *app_fw_flower)
 }
 
 int
-nfp_flower_service_sync_alloc(void *app_fw_flower)
+nfp_flower_service_sync_alloc(struct nfp_net_hw_priv *hw_priv)
 {
 	struct nfp_flower_service *service_handle;
-	struct nfp_app_fw_flower *app = app_fw_flower;
-	struct nfp_pf_dev *pf_dev = app->pf_hw->pf_dev;
+	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
 
 	service_handle = nfp_sync_handle_alloc(pf_dev->sync, NULL,
 			NFP_SYNC_MAGIC_FL_SERVICE,
@@ -185,10 +184,9 @@  nfp_flower_service_sync_alloc(void *app_fw_flower)
 }
 
 void
-nfp_flower_service_sync_free(void *app_fw_flower)
+nfp_flower_service_sync_free(struct nfp_net_hw_priv *hw_priv)
 {
-	struct nfp_app_fw_flower *app = app_fw_flower;
-	struct nfp_pf_dev *pf_dev = app->pf_hw->pf_dev;
+	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
 
 	nfp_sync_handle_free(pf_dev->sync, NULL, pf_dev->process_share.fl_service);
 
diff --git a/drivers/net/nfp/flower/nfp_flower_service.h b/drivers/net/nfp/flower/nfp_flower_service.h
index a75780274f..2fbe4fa601 100644
--- a/drivers/net/nfp/flower/nfp_flower_service.h
+++ b/drivers/net/nfp/flower/nfp_flower_service.h
@@ -6,12 +6,12 @@ 
 #ifndef __NFP_FLOWER_SERVICE_H__
 #define __NFP_FLOWER_SERVICE_H__
 
-struct nfp_flower_service;
+#include "../nfp_net_common.h"
 
-int nfp_flower_service_start(void *app_fw_flower);
-void nfp_flower_service_stop(void *app_fw_flower);
+int nfp_flower_service_start(void *app_fw_flower, struct nfp_net_hw_priv *hw_priv);
+void nfp_flower_service_stop(void *app_fw_flower, struct nfp_net_hw_priv *hw_priv);
 
-int nfp_flower_service_sync_alloc(void *app_fw_flower);
-void nfp_flower_service_sync_free(void *app_fw_flower);
+int nfp_flower_service_sync_alloc(struct nfp_net_hw_priv *hw_priv);
+void nfp_flower_service_sync_free(struct nfp_net_hw_priv *hw_priv);
 
 #endif /* __NFP_FLOWER_SERVICE_H__ */
diff --git a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
index 253872f4a1..681a63eacd 100644
--- a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
+++ b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
@@ -469,6 +469,7 @@  nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,
 	nfp_net_reset_tx_queue(txq);
 
 	txq->hw = hw;
+	txq->hw_priv = dev->process_private;
 
 	/*
 	 * Telling the HW about the physical address of the TX ring and number
diff --git a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
index 41cdfd3a40..83eaec9c24 100644
--- a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
+++ b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
@@ -555,6 +555,7 @@  nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
 
 	dev->data->tx_queues[queue_idx] = txq;
 	txq->hw = hw;
+	txq->hw_priv = dev->process_private;
 
 	/*
 	 * Telling the HW about the physical address of the TX ring and number
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 98d8e87028..58e155e92b 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -103,12 +103,14 @@  nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic,
 		uint16_t port)
 {
 	struct nfp_net_hw *hw;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_eth_table *nfp_eth_table;
 
 	/* Grab a pointer to the correct physical port */
 	hw = app_fw_nic->ports[port];
+	hw_priv = hw->eth_dev->process_private;
 
-	nfp_eth_table = app_fw_nic->pf_dev->nfp_eth_table;
+	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
 
 	rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->super.mac_addr);
 }
@@ -183,8 +185,7 @@  nfp_net_nfp4000_speed_configure_check(uint16_t port_id,
 }
 
 static int
-nfp_net_speed_configure(struct rte_eth_dev *dev,
-		struct nfp_net_hw *net_hw)
+nfp_net_speed_configure(struct rte_eth_dev *dev)
 {
 	int ret;
 	uint32_t speed_capa;
@@ -193,11 +194,13 @@  nfp_net_speed_configure(struct rte_eth_dev *dev,
 	uint32_t configure_speed;
 	struct nfp_eth_table_port *eth_port;
 	struct nfp_eth_table *nfp_eth_table;
+	struct nfp_net_hw *net_hw = dev->data->dev_private;
+	struct nfp_net_hw_priv *hw_priv = dev->process_private;
 
-	nfp_eth_table = net_hw->pf_dev->nfp_eth_table;
+	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
 	eth_port = &nfp_eth_table->ports[net_hw->idx];
 
-	speed_capa = net_hw->pf_dev->speed_capa;
+	speed_capa = hw_priv->pf_dev->speed_capa;
 	if (speed_capa == 0) {
 		PMD_DRV_LOG(ERR, "Speed_capa is invalid.");
 		return -EINVAL;
@@ -272,12 +275,14 @@  nfp_net_start(struct rte_eth_dev *dev)
 	struct nfp_net_hw *net_hw;
 	struct nfp_pf_dev *pf_dev;
 	struct rte_eth_rxmode *rxmode;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_app_fw_nic *app_fw_nic;
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
 
 	net_hw = dev->data->dev_private;
-	pf_dev = net_hw->pf_dev;
+	hw_priv = dev->process_private;
+	pf_dev = hw_priv->pf_dev;
 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
 	hw = &net_hw->super;
 
@@ -288,7 +293,7 @@  nfp_net_start(struct rte_eth_dev *dev)
 	nfp_net_enable_queues(dev);
 
 	/* Configure the port speed and the auto-negotiation mode. */
-	ret = nfp_net_speed_configure(dev, net_hw);
+	ret = nfp_net_speed_configure(dev);
 	if (ret < 0) {
 		PMD_DRV_LOG(ERR, "Failed to set the speed and auto-negotiation mode.");
 		return ret;
@@ -400,7 +405,7 @@  nfp_net_start(struct rte_eth_dev *dev)
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
 		cpp = net_hw->cpp;
 	else
-		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
+		cpp = pf_dev->cpp;
 
 	/* Configure the physical port up */
 	nfp_eth_set_configured(cpp, net_hw->nfp_idx, 1);
@@ -437,13 +442,15 @@  nfp_net_set_link_up(struct rte_eth_dev *dev)
 {
 	struct nfp_cpp *cpp;
 	struct nfp_net_hw *hw;
+	struct nfp_net_hw_priv *hw_priv;
 
 	hw = dev->data->dev_private;
+	hw_priv = dev->process_private;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
 		cpp = hw->cpp;
 	else
-		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
+		cpp = hw_priv->pf_dev->cpp;
 
 	return nfp_eth_set_configured(cpp, hw->nfp_idx, 1);
 }
@@ -454,13 +461,15 @@  nfp_net_set_link_down(struct rte_eth_dev *dev)
 {
 	struct nfp_cpp *cpp;
 	struct nfp_net_hw *hw;
+	struct nfp_net_hw_priv *hw_priv;
 
 	hw = dev->data->dev_private;
+	hw_priv = dev->process_private;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
 		cpp = hw->cpp;
 	else
-		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
+		cpp = hw_priv->pf_dev->cpp;
 
 	return nfp_eth_set_configured(cpp, hw->nfp_idx, 0);
 }
@@ -573,11 +582,13 @@  static void
 nfp_net_uninit(struct rte_eth_dev *eth_dev)
 {
 	struct nfp_net_hw *net_hw;
+	struct nfp_net_hw_priv *hw_priv;
 
 	net_hw = eth_dev->data->dev_private;
+	hw_priv = eth_dev->process_private;
 
 	if ((net_hw->super.cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0)
-		nfp_net_flow_priv_uninit(net_hw->pf_dev, net_hw->idx);
+		nfp_net_flow_priv_uninit(hw_priv->pf_dev, net_hw->idx);
 
 	rte_free(net_hw->eth_xstats_base);
 	if ((net_hw->super.cap & NFP_NET_CFG_CTRL_TXRWB) != 0)
@@ -612,8 +623,10 @@  nfp_uninit_app_fw_nic(struct nfp_pf_dev *pf_dev)
 }
 
 void
-nfp_pf_uninit(struct nfp_pf_dev *pf_dev)
+nfp_pf_uninit(struct nfp_net_hw_priv *hw_priv)
 {
+	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
+
 	nfp_cpp_area_release_free(pf_dev->qc_area);
 	free(pf_dev->sym_tbl);
 	if (pf_dev->multi_pf.enabled) {
@@ -626,15 +639,19 @@  nfp_pf_uninit(struct nfp_pf_dev *pf_dev)
 	nfp_cpp_free(pf_dev->cpp);
 	nfp_sync_free(pf_dev->sync);
 	rte_free(pf_dev);
+	rte_free(hw_priv);
 }
 
 static int
-nfp_pf_secondary_uninit(struct nfp_pf_dev *pf_dev)
+nfp_pf_secondary_uninit(struct nfp_net_hw_priv *hw_priv)
 {
+	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
+
 	free(pf_dev->sym_tbl);
 	nfp_cpp_free(pf_dev->cpp);
 	nfp_sync_free(pf_dev->sync);
 	rte_free(pf_dev);
+	rte_free(hw_priv);
 
 	return 0;
 }
@@ -648,8 +665,11 @@  nfp_net_close(struct rte_eth_dev *dev)
 	struct nfp_net_hw *hw;
 	struct nfp_pf_dev *pf_dev;
 	struct rte_pci_device *pci_dev;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_app_fw_nic *app_fw_nic;
 
+	hw_priv = dev->process_private;
+
 	/*
 	 * In secondary process, a released eth device can be found by its name
 	 * in shared memory.
@@ -660,12 +680,12 @@  nfp_net_close(struct rte_eth_dev *dev)
 		if (dev->state == RTE_ETH_DEV_UNUSED)
 			return 0;
 
-		nfp_pf_secondary_uninit(dev->process_private);
+		nfp_pf_secondary_uninit(hw_priv);
 		return 0;
 	}
 
 	hw = dev->data->dev_private;
-	pf_dev = hw->pf_dev;
+	pf_dev = hw_priv->pf_dev;
 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
 
@@ -707,7 +727,7 @@  nfp_net_close(struct rte_eth_dev *dev)
 			nfp_net_dev_interrupt_handler, (void *)dev);
 
 	nfp_uninit_app_fw_nic(pf_dev);
-	nfp_pf_uninit(pf_dev);
+	nfp_pf_uninit(hw_priv);
 
 	return 0;
 }
@@ -887,13 +907,15 @@  nfp_net_init(struct rte_eth_dev *eth_dev)
 	struct nfp_net_hw *net_hw;
 	struct nfp_pf_dev *pf_dev;
 	struct rte_pci_device *pci_dev;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_app_fw_nic *app_fw_nic;
 
 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
 	net_hw = eth_dev->data->dev_private;
 
 	/* Use backpointer here to the PF of this eth_dev */
-	pf_dev = net_hw->pf_dev;
+	hw_priv = eth_dev->process_private;
+	pf_dev = hw_priv->pf_dev;
 
 	/* Use backpointer to the CoreNIC app struct */
 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
@@ -915,8 +937,8 @@  nfp_net_init(struct rte_eth_dev *eth_dev)
 		uint32_t min_size;
 
 		hw->ctrl_bar = pf_dev->ctrl_bar;
-		min_size = NFP_MAC_STATS_SIZE * net_hw->pf_dev->nfp_eth_table->max_index;
-		net_hw->mac_stats_bar = nfp_rtsym_map(net_hw->pf_dev->sym_tbl, "_mac_stats",
+		min_size = NFP_MAC_STATS_SIZE * pf_dev->nfp_eth_table->max_index;
+		net_hw->mac_stats_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_mac_stats",
 				min_size, &net_hw->mac_stats_area);
 		if (net_hw->mac_stats_bar == NULL) {
 			PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar");
@@ -1472,7 +1494,7 @@  nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev)
 }
 
 static int
-nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
+nfp_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv,
 		const struct nfp_dev_info *dev_info)
 {
 	uint8_t i;
@@ -1487,6 +1509,7 @@  nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
 	char bar_name[RTE_ETH_NAME_MAX_LEN];
 	char port_name[RTE_ETH_NAME_MAX_LEN];
 	char vnic_name[RTE_ETH_NAME_MAX_LEN];
+	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
 
 	nfp_eth_table = pf_dev->nfp_eth_table;
 	PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count);
@@ -1529,7 +1552,6 @@  nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
 
 	/* Populate coreNIC app properties */
 	app_fw_nic->total_phyports = total_vnics;
-	app_fw_nic->pf_dev = pf_dev;
 	if (total_vnics > 1)
 		app_fw_nic->multiport = true;
 
@@ -1580,13 +1602,13 @@  nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
 		app_fw_nic->ports[id] = hw;
 
 		hw->dev_info = dev_info;
-		hw->pf_dev = pf_dev;
 		hw->cpp = pf_dev->cpp;
 		hw->eth_dev = eth_dev;
 		hw->idx = id;
 		hw->nfp_idx = nfp_eth_table->ports[id].index;
 
 		eth_dev->device = &pf_dev->pci_dev->device;
+		eth_dev->process_private = hw_priv;
 
 		/*
 		 * Ctrl/tx/rx BAR mappings and remaining init happens in
@@ -1792,6 +1814,7 @@  nfp_pf_init(struct rte_pci_device *pci_dev)
 	enum nfp_app_fw_id app_fw_id;
 	char name[RTE_ETH_NAME_MAX_LEN];
 	struct nfp_rtsym_table *sym_tbl;
+	struct nfp_net_hw_priv *hw_priv;
 	char app_name[RTE_ETH_NAME_MAX_LEN];
 	struct nfp_eth_table *nfp_eth_table;
 	const struct nfp_dev_info *dev_info;
@@ -1810,13 +1833,20 @@  nfp_pf_init(struct rte_pci_device *pci_dev)
 		return -ENODEV;
 	}
 
+	hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0);
+	if (hw_priv == NULL) {
+		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data");
+		return -ENOMEM;
+	}
+
 	/* Allocate memory for the PF "device" */
 	function_id = (pci_dev->addr.function) & 0x07;
 	snprintf(name, sizeof(name), "nfp_pf%u", function_id);
 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
 	if (pf_dev == NULL) {
 		PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto hw_priv_free;
 	}
 
 	sync = nfp_sync_alloc();
@@ -1937,6 +1967,8 @@  nfp_pf_init(struct rte_pci_device *pci_dev)
 
 	PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar);
 
+	hw_priv->pf_dev = pf_dev;
+
 	/*
 	 * PF initialization has been done at this point. Call app specific
 	 * init code now.
@@ -1950,7 +1982,7 @@  nfp_pf_init(struct rte_pci_device *pci_dev)
 		}
 
 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
-		ret = nfp_init_app_fw_nic(pf_dev, dev_info);
+		ret = nfp_init_app_fw_nic(hw_priv, dev_info);
 		if (ret != 0) {
 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
 			goto hwqueues_cleanup;
@@ -1958,7 +1990,7 @@  nfp_pf_init(struct rte_pci_device *pci_dev)
 		break;
 	case NFP_APP_FW_FLOWER_NIC:
 		PMD_INIT_LOG(INFO, "Initializing Flower");
-		ret = nfp_init_app_fw_flower(pf_dev, dev_info);
+		ret = nfp_init_app_fw_flower(hw_priv, dev_info);
 		if (ret != 0) {
 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
 			goto hwqueues_cleanup;
@@ -1996,12 +2028,14 @@  nfp_pf_init(struct rte_pci_device *pci_dev)
 	nfp_sync_free(sync);
 pf_cleanup:
 	rte_free(pf_dev);
+hw_priv_free:
+	rte_free(hw_priv);
 
 	return ret;
 }
 
 static int
-nfp_secondary_init_app_fw_nic(struct nfp_pf_dev *pf_dev)
+nfp_secondary_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv)
 {
 	uint32_t i;
 	int err = 0;
@@ -2010,6 +2044,7 @@  nfp_secondary_init_app_fw_nic(struct nfp_pf_dev *pf_dev)
 	uint32_t total_vnics;
 	struct nfp_net_hw *hw;
 	char pf_name[RTE_ETH_NAME_MAX_LEN];
+	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
 
 	/* Read the number of vNIC's created for the PF */
 	function_id = (pf_dev->pci_dev->addr.function) & 0x07;
@@ -2039,7 +2074,7 @@  nfp_secondary_init_app_fw_nic(struct nfp_pf_dev *pf_dev)
 			break;
 		}
 
-		eth_dev->process_private = pf_dev;
+		eth_dev->process_private = hw_priv;
 		hw = eth_dev->data->dev_private;
 		nfp_net_ethdev_ops_mount(hw, eth_dev);
 
@@ -2065,6 +2100,7 @@  nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
 	enum nfp_app_fw_id app_fw_id;
 	char name[RTE_ETH_NAME_MAX_LEN];
 	struct nfp_rtsym_table *sym_tbl;
+	struct nfp_net_hw_priv *hw_priv;
 	const struct nfp_dev_info *dev_info;
 	char app_name[RTE_ETH_NAME_MAX_LEN];
 
@@ -2082,12 +2118,19 @@  nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
 		return -ENODEV;
 	}
 
+	hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0);
+	if (hw_priv == NULL) {
+		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data");
+		return -ENOMEM;
+	}
+
 	/* Allocate memory for the PF "device" */
 	snprintf(name, sizeof(name), "nfp_pf%d", 0);
 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
 	if (pf_dev == NULL) {
 		PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto hw_priv_free;
 	}
 
 	sync = nfp_sync_alloc();
@@ -2143,11 +2186,13 @@  nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
 	pf_dev->pci_dev = pci_dev;
 	pf_dev->sync = sync;
 
+	hw_priv->pf_dev = pf_dev;
+
 	/* Call app specific init code now */
 	switch (app_fw_id) {
 	case NFP_APP_FW_CORE_NIC:
 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
-		ret = nfp_secondary_init_app_fw_nic(pf_dev);
+		ret = nfp_secondary_init_app_fw_nic(hw_priv);
 		if (ret != 0) {
 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
 			goto sym_tbl_cleanup;
@@ -2155,7 +2200,7 @@  nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
 		break;
 	case NFP_APP_FW_FLOWER_NIC:
 		PMD_INIT_LOG(INFO, "Initializing Flower");
-		ret = nfp_secondary_init_app_fw_flower(pf_dev);
+		ret = nfp_secondary_init_app_fw_flower(hw_priv);
 		if (ret != 0) {
 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
 			goto sym_tbl_cleanup;
@@ -2177,6 +2222,8 @@  nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
 	nfp_sync_free(sync);
 pf_cleanup:
 	rte_free(pf_dev);
+hw_priv_free:
+	rte_free(hw_priv);
 
 	return ret;
 }
diff --git a/drivers/net/nfp/nfp_net_common.c b/drivers/net/nfp/nfp_net_common.c
index da1a7e7be1..60d77b6178 100644
--- a/drivers/net/nfp/nfp_net_common.c
+++ b/drivers/net/nfp/nfp_net_common.c
@@ -685,6 +685,7 @@  nfp_net_allmulticast_disable(struct rte_eth_dev *dev)
 static int
 nfp_net_speed_aneg_update(struct rte_eth_dev *dev,
 		struct nfp_net_hw *hw,
+		struct nfp_net_hw_priv *hw_priv,
 		struct rte_eth_link *link)
 {
 	uint32_t i;
@@ -700,11 +701,11 @@  nfp_net_speed_aneg_update(struct rte_eth_dev *dev,
 			return -EIO;
 		}
 
-		hw->pf_dev->nfp_eth_table->ports[hw->idx] = nfp_eth_table->ports[hw->idx];
+		hw_priv->pf_dev->nfp_eth_table->ports[hw->idx] = nfp_eth_table->ports[hw->idx];
 		free(nfp_eth_table);
 	}
 
-	nfp_eth_table = hw->pf_dev->nfp_eth_table;
+	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
 	eth_port = &nfp_eth_table->ports[hw->idx];
 	speed = eth_port->speed;
 
@@ -730,10 +731,12 @@  nfp_net_link_update_common(struct rte_eth_dev *dev,
 {
 	int ret;
 	uint32_t nn_link_status;
+	struct nfp_net_hw_priv *hw_priv;
 
+	hw_priv = dev->process_private;
 	if (link->link_status == RTE_ETH_LINK_UP) {
-		if (hw->pf_dev != NULL) {
-			ret = nfp_net_speed_aneg_update(dev, hw, link);
+		if (hw_priv->pf_dev != NULL) {
+			ret = nfp_net_speed_aneg_update(dev, hw, hw_priv, link);
 			if (ret != 0) {
 				PMD_DRV_LOG(DEBUG, "Failed to update speed and aneg.");
 				return ret;
@@ -775,8 +778,10 @@  nfp_net_link_update(struct rte_eth_dev *dev,
 	struct nfp_net_hw *hw;
 	uint32_t nn_link_status;
 	struct rte_eth_link link;
+	struct nfp_net_hw_priv *hw_priv;
 
 	hw = nfp_net_get_hw(dev);
+	hw_priv = dev->process_private;
 
 	memset(&link, 0, sizeof(struct rte_eth_link));
 
@@ -796,7 +801,7 @@  nfp_net_link_update(struct rte_eth_dev *dev,
 	 * Not applicable for VFs as the associated PF is still attached to the
 	 * kernel driver.
 	 */
-	if (hw->pf_dev != NULL)
+	if (hw_priv != NULL && hw_priv->pf_dev != NULL)
 		nfp_net_notify_port_speed(hw, &link);
 
 	return ret;
@@ -1173,8 +1178,10 @@  nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	uint16_t min_tx_desc;
 	uint16_t max_tx_desc;
 	struct nfp_net_hw *hw;
+	struct nfp_net_hw_priv *hw_priv;
 
 	hw = nfp_net_get_hw(dev);
+	hw_priv = dev->process_private;
 
 	nfp_net_rx_desc_limits(hw, &min_rx_desc, &max_rx_desc);
 	nfp_net_tx_desc_limits(hw, &min_tx_desc, &max_tx_desc);
@@ -1274,8 +1281,8 @@  nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	}
 
 	/* Only PF supports getting speed capability. */
-	if (hw->pf_dev != NULL)
-		dev_info->speed_capa = hw->pf_dev->speed_capa;
+	if (hw_priv != NULL && hw_priv->pf_dev != NULL)
+		dev_info->speed_capa = hw_priv->pf_dev->speed_capa;
 
 	return 0;
 }
@@ -2135,10 +2142,10 @@  nfp_net_get_mip_name(struct nfp_net_hw *hw,
 }
 
 static void
-nfp_net_get_app_name(struct nfp_net_hw *hw,
+nfp_net_get_app_name(struct nfp_net_hw_priv *hw_priv,
 		char *app_name)
 {
-	switch (hw->pf_dev->app_fw_id) {
+	switch (hw_priv->pf_dev->app_fw_id) {
 	case NFP_APP_FW_CORE_NIC:
 		snprintf(app_name, FW_VER_LEN, "%s", "nic");
 		break;
@@ -2161,11 +2168,13 @@  nfp_net_firmware_version_get(struct rte_eth_dev *dev,
 	char app_name[FW_VER_LEN];
 	char nsp_version[FW_VER_LEN];
 	char vnic_version[FW_VER_LEN];
+	struct nfp_net_hw_priv *hw_priv;
 
 	if (fw_size < FW_VER_LEN)
 		return FW_VER_LEN;
 
 	hw = nfp_net_get_hw(dev);
+	hw_priv = dev->process_private;
 
 	if (!rte_eth_dev_is_repr(dev)) {
 		snprintf(vnic_version, FW_VER_LEN, "%d.%d.%d.%d",
@@ -2177,7 +2186,7 @@  nfp_net_firmware_version_get(struct rte_eth_dev *dev,
 
 	nfp_net_get_nsp_info(hw, nsp_version);
 	nfp_net_get_mip_name(hw, mip_name);
-	nfp_net_get_app_name(hw, app_name);
+	nfp_net_get_app_name(hw_priv, app_name);
 
 	snprintf(fw_version, FW_VER_LEN, "%s %s %s %s",
 			vnic_version, nsp_version, mip_name, app_name);
@@ -2212,8 +2221,10 @@  nfp_net_stop(struct rte_eth_dev *dev)
 {
 	struct nfp_cpp *cpp;
 	struct nfp_net_hw *hw;
+	struct nfp_net_hw_priv *hw_priv;
 
 	hw = nfp_net_get_hw(dev);
+	hw_priv = dev->process_private;
 
 	nfp_net_disable_queues(dev);
 
@@ -2224,7 +2235,7 @@  nfp_net_stop(struct rte_eth_dev *dev)
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
 		cpp = hw->cpp;
 	else
-		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
+		cpp = hw_priv->pf_dev->cpp;
 
 	nfp_eth_set_configured(cpp, hw->nfp_idx, 0);
 
@@ -2255,15 +2266,15 @@  int
 nfp_net_flow_ctrl_get(struct rte_eth_dev *dev,
 		struct rte_eth_fc_conf *fc_conf)
 {
-	struct nfp_net_hw *net_hw;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_eth_table *nfp_eth_table;
 	struct nfp_eth_table_port *eth_port;
 
-	net_hw = nfp_net_get_hw(dev);
-	if (net_hw->pf_dev == NULL)
+	hw_priv = dev->process_private;
+	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
 		return -EINVAL;
 
-	nfp_eth_table = net_hw->pf_dev->nfp_eth_table;
+	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
 	eth_port = &nfp_eth_table->ports[dev->data->port_id];
 
 	/* Currently only RX/TX switch are supported */
@@ -2319,15 +2330,17 @@  nfp_net_flow_ctrl_set(struct rte_eth_dev *dev,
 	int ret;
 	struct nfp_net_hw *net_hw;
 	enum rte_eth_fc_mode set_mode;
+	struct nfp_net_hw_priv *hw_priv;
 	enum rte_eth_fc_mode original_mode;
 	struct nfp_eth_table *nfp_eth_table;
 	struct nfp_eth_table_port *eth_port;
 
 	net_hw = nfp_net_get_hw(dev);
-	if (net_hw->pf_dev == NULL)
+	hw_priv = dev->process_private;
+	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
 		return -EINVAL;
 
-	nfp_eth_table = net_hw->pf_dev->nfp_eth_table;
+	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
 	eth_port = &nfp_eth_table->ports[net_hw->idx];
 
 	original_mode = nfp_net_get_pause_mode(eth_port);
@@ -2355,14 +2368,16 @@  nfp_net_fec_get_capability(struct rte_eth_dev *dev,
 	uint16_t speed;
 	struct nfp_net_hw *hw;
 	uint32_t supported_fec;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_eth_table *nfp_eth_table;
 	struct nfp_eth_table_port *eth_port;
 
 	hw = nfp_net_get_hw(dev);
-	if (hw->pf_dev == NULL)
+	hw_priv = dev->process_private;
+	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
 		return -EINVAL;
 
-	nfp_eth_table = hw->pf_dev->nfp_eth_table;
+	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
 	eth_port = &nfp_eth_table->ports[hw->idx];
 
 	speed = eth_port->speed;
@@ -2412,20 +2427,22 @@  nfp_net_fec_get(struct rte_eth_dev *dev,
 		uint32_t *fec_capa)
 {
 	struct nfp_net_hw *hw;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_eth_table *nfp_eth_table;
 	struct nfp_eth_table_port *eth_port;
 
 	hw = nfp_net_get_hw(dev);
-	if (hw->pf_dev == NULL)
+	hw_priv = dev->process_private;
+	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
 		return -EINVAL;
 
 	if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN) {
 		nfp_eth_table = nfp_eth_read_ports(hw->cpp);
-		hw->pf_dev->nfp_eth_table->ports[hw->idx] = nfp_eth_table->ports[hw->idx];
+		hw_priv->pf_dev->nfp_eth_table->ports[hw->idx] = nfp_eth_table->ports[hw->idx];
 		free(nfp_eth_table);
 	}
 
-	nfp_eth_table = hw->pf_dev->nfp_eth_table;
+	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
 	eth_port = &nfp_eth_table->ports[hw->idx];
 
 	if (!nfp_eth_can_support_fec(eth_port)) {
@@ -2473,14 +2490,16 @@  nfp_net_fec_set(struct rte_eth_dev *dev,
 	enum nfp_eth_fec fec;
 	struct nfp_net_hw *hw;
 	uint32_t supported_fec;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_eth_table *nfp_eth_table;
 	struct nfp_eth_table_port *eth_port;
 
 	hw = nfp_net_get_hw(dev);
-	if (hw->pf_dev == NULL)
+	hw_priv = dev->process_private;
+	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
 		return -EINVAL;
 
-	nfp_eth_table = hw->pf_dev->nfp_eth_table;
+	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
 	eth_port = &nfp_eth_table->ports[hw->idx];
 
 	supported_fec = nfp_eth_supported_fec_modes(eth_port);
diff --git a/drivers/net/nfp/nfp_net_common.h b/drivers/net/nfp/nfp_net_common.h
index 8066e77e6f..8dbe2fb973 100644
--- a/drivers/net/nfp/nfp_net_common.h
+++ b/drivers/net/nfp/nfp_net_common.h
@@ -12,7 +12,6 @@ 
 #include <nfp_dev.h>
 #include <rte_spinlock.h>
 
-#include "flower/nfp_flower_service.h"
 #include "nfpcore/nfp_sync.h"
 #include "nfp_net_ctrl.h"
 #include "nfp_service.h"
@@ -86,6 +85,8 @@  struct nfp_multi_pf {
 	uint8_t *beat_addr;
 };
 
+struct nfp_flower_service;
+
 struct nfp_process_share {
 	struct nfp_flower_service *fl_service;
 };
@@ -145,9 +146,6 @@  struct nfp_net_priv {
 };
 
 struct nfp_app_fw_nic {
-	/** Backpointer to the PF device */
-	struct nfp_pf_dev *pf_dev;
-
 	/**
 	 * Array of physical ports belonging to this CoreNIC app.
 	 * This is really a list of vNIC's, one for each physical port.
@@ -158,13 +156,14 @@  struct nfp_app_fw_nic {
 	uint8_t total_phyports;
 };
 
+struct nfp_net_hw_priv {
+	struct nfp_pf_dev *pf_dev;
+};
+
 struct nfp_net_hw {
 	/** The parent class */
 	struct nfp_hw super;
 
-	/** Backpointer to the PF this port belongs to */
-	struct nfp_pf_dev *pf_dev;
-
 	/** Backpointer to the eth_dev of this port */
 	struct rte_eth_dev *eth_dev;
 
@@ -314,7 +313,7 @@  int nfp_net_flow_ctrl_get(struct rte_eth_dev *dev,
 		struct rte_eth_fc_conf *fc_conf);
 int nfp_net_flow_ctrl_set(struct rte_eth_dev *dev,
 		struct rte_eth_fc_conf *fc_conf);
-void nfp_pf_uninit(struct nfp_pf_dev *pf_dev);
+void nfp_pf_uninit(struct nfp_net_hw_priv *hw_priv);
 int nfp_net_fec_get_capability(struct rte_eth_dev *dev,
 		struct rte_eth_fec_capa *speed_fec_capa,
 		unsigned int num);
diff --git a/drivers/net/nfp/nfp_net_flow.c b/drivers/net/nfp/nfp_net_flow.c
index ea743eed73..b0d1a57d99 100644
--- a/drivers/net/nfp/nfp_net_flow.c
+++ b/drivers/net/nfp/nfp_net_flow.c
@@ -658,10 +658,12 @@  nfp_net_flow_setup(struct rte_eth_dev *dev,
 	struct nfp_net_priv *priv;
 	struct rte_flow *nfp_flow;
 	struct rte_flow *flow_find;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_app_fw_nic *app_fw_nic;
 
 	hw = dev->data->dev_private;
-	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw->pf_dev->app_fw_priv);
+	hw_priv = dev->process_private;
+	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv);
 	priv = app_fw_nic->ports[hw->idx]->priv;
 
 	ret = nfp_net_flow_calculate_items(items, &match_len);
@@ -775,10 +777,12 @@  nfp_net_flow_validate(struct rte_eth_dev *dev,
 	struct nfp_net_hw *hw;
 	struct rte_flow *nfp_flow;
 	struct nfp_net_priv *priv;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_app_fw_nic *app_fw_nic;
 
 	hw = dev->data->dev_private;
-	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw->pf_dev->app_fw_priv);
+	hw_priv = dev->process_private;
+	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv);
 	priv = app_fw_nic->ports[hw->idx]->priv;
 
 	nfp_flow = nfp_net_flow_setup(dev, attr, items, actions);
@@ -811,10 +815,12 @@  nfp_net_flow_create(struct rte_eth_dev *dev,
 	struct nfp_net_hw *hw;
 	struct rte_flow *nfp_flow;
 	struct nfp_net_priv *priv;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_app_fw_nic *app_fw_nic;
 
 	hw = dev->data->dev_private;
-	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw->pf_dev->app_fw_priv);
+	hw_priv = dev->process_private;
+	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv);
 	priv = app_fw_nic->ports[hw->idx]->priv;
 
 	nfp_flow = nfp_net_flow_setup(dev, attr, items, actions);
@@ -861,10 +867,12 @@  nfp_net_flow_destroy(struct rte_eth_dev *dev,
 	struct nfp_net_hw *hw;
 	struct nfp_net_priv *priv;
 	struct rte_flow *flow_find;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_app_fw_nic *app_fw_nic;
 
 	hw = dev->data->dev_private;
-	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw->pf_dev->app_fw_priv);
+	hw_priv = dev->process_private;
+	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv);
 	priv = app_fw_nic->ports[hw->idx]->priv;
 
 	/* Find the flow in flow hash table */
@@ -920,10 +928,12 @@  nfp_net_flow_flush(struct rte_eth_dev *dev,
 	struct nfp_net_hw *hw;
 	struct rte_flow *nfp_flow;
 	struct rte_hash *flow_table;
+	struct nfp_net_hw_priv *hw_priv;
 	struct nfp_app_fw_nic *app_fw_nic;
 
 	hw = dev->data->dev_private;
-	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw->pf_dev->app_fw_priv);
+	hw_priv = dev->process_private;
+	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(hw_priv->pf_dev->app_fw_priv);
 	flow_table = app_fw_nic->ports[hw->idx]->priv->flow_table;
 
 	while (rte_hash_iterate(flow_table, &next_key, &next_data, &iter) >= 0) {
diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index f9c4636688..12f362a4f4 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -418,6 +418,7 @@  nfp_net_recv_pkts(void *rx_queue,
 	struct nfp_net_dp_buf *rxb;
 	struct nfp_net_rx_desc *rxds;
 	uint16_t avail_multiplexed = 0;
+	struct nfp_net_hw_priv *hw_priv;
 
 	rxq = rx_queue;
 	if (unlikely(rxq == NULL)) {
@@ -430,6 +431,7 @@  nfp_net_recv_pkts(void *rx_queue,
 	}
 
 	hw = rxq->hw;
+	hw_priv = rxq->hw_priv;
 
 	while (avail + avail_multiplexed < nb_pkts) {
 		rxb = &rxq->rxbufs[rxq->rd_p];
@@ -520,7 +522,7 @@  nfp_net_recv_pkts(void *rx_queue,
 
 		if (((meta.flags >> NFP_NET_META_PORTID) & 0x1) == 0) {
 			rx_pkts[avail++] = mb;
-		} else if (nfp_flower_pf_dispatch_pkts(hw, mb, meta.port_id)) {
+		} else if (nfp_flower_pf_dispatch_pkts(hw_priv, mb, meta.port_id)) {
 			avail_multiplexed++;
 		} else {
 			rte_pktmbuf_free(mb);
@@ -684,6 +686,7 @@  nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
 	nfp_net_reset_rx_queue(rxq);
 
 	rxq->hw = hw;
+	rxq->hw_priv = dev->process_private;
 
 	/*
 	 * Telling the HW about the physical address of the RX ring and number
diff --git a/drivers/net/nfp/nfp_rxtx.h b/drivers/net/nfp/nfp_rxtx.h
index 0aa73a6432..8cd3649882 100644
--- a/drivers/net/nfp/nfp_rxtx.h
+++ b/drivers/net/nfp/nfp_rxtx.h
@@ -30,6 +30,7 @@  struct nfp_tx_ipsec_desc_msg {
 struct nfp_net_txq {
 	/** Backpointer to nfp_net structure */
 	struct nfp_net_hw *hw;
+	struct nfp_net_hw_priv *hw_priv;
 
 	/**
 	 * For each descriptor keep a reference to the mbuf and
@@ -69,8 +70,8 @@  struct nfp_net_txq {
 	uint16_t data_pending;
 
 	/**
-	 * At this point 50 bytes have been used for all the fields in the
-	 * TX critical path. We have room for 14 bytes and still all placed
+	 * At this point 58 bytes have been used for all the fields in the
+	 * TX critical path. We have room for 6 bytes and still all placed
 	 * in a cache line.
 	 */
 	uint64_t dma;
@@ -138,6 +139,7 @@  struct nfp_net_rx_desc {
 struct nfp_net_rxq {
 	/** Backpointer to nfp_net structure */
 	struct nfp_net_hw *hw;
+	struct nfp_net_hw_priv *hw_priv;
 
 	/**
 	 * Point to the base addresses of the freelist queue
@@ -188,8 +190,8 @@  struct nfp_net_rxq {
 	uint16_t qidx;
 
 	/**
-	 * At this point 54 bytes have been used for all the fields in the
-	 * RX critical path. We have room for 10 bytes and still all placed
+	 * At this point 62 bytes have been used for all the fields in the
+	 * RX critical path. We have room for 2 bytes and still all placed
 	 * in a cache line.
 	 */