@@ -166,15 +166,15 @@ nfp_flower_pf_nfdk_xmit_pkts(void *tx_queue,
}
static void
-nfp_flower_pf_xmit_pkts_register(struct nfp_app_fw_flower *app_fw_flower)
+nfp_flower_pf_xmit_pkts_register(struct nfp_pf_dev *pf_dev)
{
- struct nfp_net_hw *hw;
struct nfp_flower_nfd_func *nfd_func;
+ struct nfp_app_fw_flower *app_fw_flower;
- hw = app_fw_flower->pf_hw;
+ app_fw_flower = pf_dev->app_fw_priv;
nfd_func = &app_fw_flower->nfd_func;
- if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
+ if (pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
nfd_func->pf_xmit_t = nfp_flower_pf_nfd3_xmit_pkts;
else
nfd_func->pf_xmit_t = nfp_flower_pf_nfdk_xmit_pkts;
@@ -204,14 +204,12 @@ nfp_flower_init_vnic_common(struct nfp_net_hw_priv *hw_priv,
uint64_t rx_bar_off;
uint64_t tx_bar_off;
struct nfp_pf_dev *pf_dev;
- struct rte_pci_device *pci_dev;
pf_dev = hw_priv->pf_dev;
- pci_dev = pf_dev->pci_dev;
PMD_INIT_LOG(DEBUG, "%s vNIC ctrl bar: %p", vnic_type, hw->super.ctrl_bar);
- err = nfp_net_common_init(pci_dev, hw);
+ err = nfp_net_common_init(pf_dev, hw);
if (err != 0)
return err;
@@ -612,15 +610,15 @@ nfp_flower_start_ctrl_vnic(struct nfp_app_fw_flower *app_fw_flower)
}
static void
-nfp_flower_pkt_add_metadata_register(struct nfp_app_fw_flower *app_fw_flower)
+nfp_flower_pkt_add_metadata_register(struct nfp_pf_dev *pf_dev)
{
- struct nfp_net_hw *hw;
struct nfp_flower_nfd_func *nfd_func;
+ struct nfp_app_fw_flower *app_fw_flower;
- hw = app_fw_flower->pf_hw;
+ app_fw_flower = pf_dev->app_fw_priv;
nfd_func = &app_fw_flower->nfd_func;
- if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
+ if (pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
nfd_func->pkt_add_metadata_t = nfp_flower_nfd3_pkt_add_metadata;
else
nfd_func->pkt_add_metadata_t = nfp_flower_nfdk_pkt_add_metadata;
@@ -635,11 +633,11 @@ nfp_flower_pkt_add_metadata(struct nfp_app_fw_flower *app_fw_flower,
}
static void
-nfp_flower_nfd_func_register(struct nfp_app_fw_flower *app_fw_flower)
+nfp_flower_nfd_func_register(struct nfp_pf_dev *pf_dev)
{
- nfp_flower_pkt_add_metadata_register(app_fw_flower);
- nfp_flower_ctrl_vnic_xmit_register(app_fw_flower);
- nfp_flower_pf_xmit_pkts_register(app_fw_flower);
+ nfp_flower_pkt_add_metadata_register(pf_dev);
+ nfp_flower_ctrl_vnic_xmit_register(pf_dev);
+ nfp_flower_pf_xmit_pkts_register(pf_dev);
}
int
@@ -730,7 +728,7 @@ nfp_init_app_fw_flower(struct nfp_net_hw_priv *hw_priv)
goto pf_cpp_area_cleanup;
}
- nfp_flower_nfd_func_register(app_fw_flower);
+ nfp_flower_nfd_func_register(pf_dev);
/* The ctrl vNIC struct comes directly after the PF one */
app_fw_flower->ctrl_hw = pf_hw + 1;
@@ -343,15 +343,15 @@ nfp_flower_ctrl_vnic_nfdk_xmit(struct nfp_app_fw_flower *app_fw_flower,
}
void
-nfp_flower_ctrl_vnic_xmit_register(struct nfp_app_fw_flower *app_fw_flower)
+nfp_flower_ctrl_vnic_xmit_register(struct nfp_pf_dev *pf_dev)
{
- struct nfp_net_hw *hw;
struct nfp_flower_nfd_func *nfd_func;
+ struct nfp_app_fw_flower *app_fw_flower;
- hw = app_fw_flower->pf_hw;
+ app_fw_flower = pf_dev->app_fw_priv;
nfd_func = &app_fw_flower->nfd_func;
- if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
+ if (pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
nfd_func->ctrl_vnic_xmit_t = nfp_flower_ctrl_vnic_nfd3_xmit;
else
nfd_func->ctrl_vnic_xmit_t = nfp_flower_ctrl_vnic_nfdk_xmit;
@@ -11,6 +11,6 @@
void nfp_flower_ctrl_vnic_process(struct nfp_net_hw_priv *hw_priv);
uint16_t nfp_flower_ctrl_vnic_xmit(struct nfp_app_fw_flower *app_fw_flower,
struct rte_mbuf *mbuf);
-void nfp_flower_ctrl_vnic_xmit_register(struct nfp_app_fw_flower *app_fw_flower);
+void nfp_flower_ctrl_vnic_xmit_register(struct nfp_pf_dev *pf_dev);
#endif /* __NFP_FLOWER_CTRL_H__ */
@@ -390,7 +390,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,
hw = nfp_net_get_hw(dev);
hw_priv = dev->process_private;
- nfp_net_tx_desc_limits(hw, hw_priv, &min_tx_desc, &max_tx_desc);
+ nfp_net_tx_desc_limits(hw_priv, &min_tx_desc, &max_tx_desc);
/* Validating number of descriptors */
tx_desc_sz = nb_desc * sizeof(struct nfp_net_nfd3_tx_desc);
@@ -424,7 +424,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
hw = nfp_net_get_hw(dev);
hw_priv = dev->process_private;
- nfp_net_tx_desc_limits(hw, hw_priv, &min_tx_desc, &max_tx_desc);
+ nfp_net_tx_desc_limits(hw_priv, &min_tx_desc, &max_tx_desc);
/* Validating number of descriptors */
tx_desc_sz = nb_desc * sizeof(struct nfp_net_nfdk_tx_desc);
@@ -959,10 +959,10 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = {
};
static inline void
-nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw,
+nfp_net_ethdev_ops_mount(struct nfp_pf_dev *pf_dev,
struct rte_eth_dev *eth_dev)
{
- if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
+ if (pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts;
else
nfp_net_nfdk_xmit_pkts_set(eth_dev);
@@ -1030,7 +1030,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev,
PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats);
- err = nfp_net_common_init(pci_dev, net_hw);
+ err = nfp_net_common_init(pf_dev, net_hw);
if (err != 0)
return err;
@@ -1046,7 +1046,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev,
return err;
}
- nfp_net_ethdev_ops_mount(net_hw, eth_dev);
+ nfp_net_ethdev_ops_mount(pf_dev, eth_dev);
net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) *
nfp_net_xstats_size(eth_dev), 0);
@@ -1074,7 +1074,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev,
if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
- nfp_net_log_device_information(net_hw);
+ nfp_net_log_device_information(net_hw, pf_dev);
/* Initializing spinlock for reconfigs */
rte_spinlock_init(&hw->reconfig_lock);
@@ -1552,9 +1552,6 @@ nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev)
struct nfp_cpp_area *area;
char name[RTE_ETH_NAME_MAX_LEN];
- if (!pf_dev->multi_pf.enabled)
- return 0;
-
memset(&net_hw, 0, sizeof(struct nfp_net_hw));
/* Map the symbol table */
@@ -1570,6 +1567,16 @@ nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev)
hw = &net_hw.super;
hw->ctrl_bar = ctrl_bar;
+ /* Check the version from firmware */
+ if (!nfp_net_version_check(hw, pf_dev)) {
+ PMD_INIT_LOG(ERR, "Not the valid version.");
+ err = -EINVAL;
+ goto end;
+ }
+
+ if (!pf_dev->multi_pf.enabled)
+ goto end;
+
cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1);
if ((cap_extend & NFP_NET_CFG_CTRL_MULTI_PF) == 0) {
PMD_INIT_LOG(ERR, "Loaded firmware doesn't support multiple PF");
@@ -2358,10 +2365,10 @@ static int
nfp_secondary_net_init(struct rte_eth_dev *eth_dev,
void *para)
{
- struct nfp_net_hw *net_hw;
+ struct nfp_net_hw_priv *hw_priv;
- net_hw = eth_dev->data->dev_private;
- nfp_net_ethdev_ops_mount(net_hw, eth_dev);
+ hw_priv = para;
+ nfp_net_ethdev_ops_mount(hw_priv->pf_dev, eth_dev);
eth_dev->process_private = para;
@@ -235,10 +235,10 @@ static const struct eth_dev_ops nfp_netvf_eth_dev_ops = {
};
static inline void
-nfp_netvf_ethdev_ops_mount(struct nfp_net_hw *hw,
+nfp_netvf_ethdev_ops_mount(struct nfp_pf_dev *pf_dev,
struct rte_eth_dev *eth_dev)
{
- if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
+ if (pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts;
else
nfp_net_nfdk_xmit_pkts_set(eth_dev);
@@ -256,6 +256,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
uint32_t start_q;
struct nfp_hw *hw;
struct nfp_net_hw *net_hw;
+ struct nfp_pf_dev *pf_dev;
uint64_t tx_bar_off = 0;
uint64_t rx_bar_off = 0;
struct rte_pci_device *pci_dev;
@@ -280,13 +281,27 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
return -ENODEV;
}
+ pf_dev = rte_zmalloc(NULL, sizeof(*pf_dev), 0);
+ if (pf_dev == NULL) {
+ PMD_INIT_LOG(ERR, "Can not allocate memory for the PF device.");
+ return -ENOMEM;
+ }
+
+ pf_dev->pci_dev = pci_dev;
+
+ /* Check the version from firmware */
+ if (!nfp_net_version_check(hw, pf_dev)) {
+ err = -EINVAL;
+ goto pf_dev_free;
+ }
+
PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
- err = nfp_net_common_init(pci_dev, net_hw);
+ err = nfp_net_common_init(pf_dev, net_hw);
if (err != 0)
- return err;
+ goto pf_dev_free;
- nfp_netvf_ethdev_ops_mount(net_hw, eth_dev);
+ nfp_netvf_ethdev_ops_mount(pf_dev, eth_dev);
hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0);
if (hw_priv == NULL) {
@@ -296,6 +311,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
}
hw_priv->dev_info = dev_info;
+ hw_priv->pf_dev = pf_dev;
eth_dev->process_private = hw_priv;
@@ -330,7 +346,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
- nfp_net_log_device_information(net_hw);
+ nfp_net_log_device_information(net_hw, pf_dev);
/* Initializing spinlock for reconfigs */
rte_spinlock_init(&hw->reconfig_lock);
@@ -381,6 +397,8 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
rte_free(net_hw->eth_xstats_base);
hw_priv_free:
rte_free(hw_priv);
+pf_dev_free:
+ rte_free(pf_dev);
return err;
}
@@ -349,13 +349,14 @@ nfp_net_configure(struct rte_eth_dev *dev)
}
void
-nfp_net_log_device_information(const struct nfp_net_hw *hw)
+nfp_net_log_device_information(const struct nfp_net_hw *hw,
+ struct nfp_pf_dev *pf_dev)
{
uint32_t cap = hw->super.cap;
uint32_t cap_ext = hw->super.cap_ext;
PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
- hw->ver.major, hw->ver.minor, hw->max_mtu);
+ pf_dev->ver.major, pf_dev->ver.minor, hw->max_mtu);
PMD_INIT_LOG(INFO, "CAP: %#x", cap);
PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
@@ -1235,14 +1236,13 @@ nfp_net_rx_desc_limits(struct nfp_net_hw_priv *hw_priv,
}
void
-nfp_net_tx_desc_limits(struct nfp_net_hw *hw,
- struct nfp_net_hw_priv *hw_priv,
+nfp_net_tx_desc_limits(struct nfp_net_hw_priv *hw_priv,
uint16_t *min_tx_desc,
uint16_t *max_tx_desc)
{
uint16_t tx_dpp;
- if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
+ if (hw_priv->pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
tx_dpp = NFD3_TX_DESC_PER_PKT;
else
tx_dpp = NFDK_TX_DESC_PER_SIMPLE_PKT;
@@ -1269,7 +1269,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
return -EINVAL;
nfp_net_rx_desc_limits(hw_priv, &min_rx_desc, &max_rx_desc);
- nfp_net_tx_desc_limits(hw, hw_priv, &min_tx_desc, &max_tx_desc);
+ nfp_net_tx_desc_limits(hw_priv, &min_tx_desc, &max_tx_desc);
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
@@ -1373,11 +1373,13 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
int
-nfp_net_common_init(struct rte_pci_device *pci_dev,
+nfp_net_common_init(struct nfp_pf_dev *pf_dev,
struct nfp_net_hw *hw)
{
const int stride = 4;
+ struct rte_pci_device *pci_dev;
+ pci_dev = pf_dev->pci_dev;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
@@ -1391,11 +1393,7 @@ nfp_net_common_init(struct rte_pci_device *pci_dev,
return -ENODEV;
}
- nfp_net_cfg_read_version(hw);
- if (!nfp_net_is_valid_nfd_version(hw->ver))
- return -EINVAL;
-
- if (nfp_net_check_dma_mask(hw, pci_dev->name) != 0)
+ if (nfp_net_check_dma_mask(pf_dev, pci_dev->name) != 0)
return -ENODEV;
/* Get some of the read-only fields from the config BAR */
@@ -1404,10 +1402,10 @@ nfp_net_common_init(struct rte_pci_device *pci_dev,
hw->max_mtu = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_MTU);
hw->flbufsz = DEFAULT_FLBUF_SIZE;
- nfp_net_meta_init_format(hw);
+ nfp_net_meta_init_format(hw, pf_dev);
/* Read the Rx offset configured from firmware */
- if (hw->ver.major < 2)
+ if (pf_dev->ver.major < 2)
hw->rx_offset = NFP_NET_RX_OFFSET;
else
hw->rx_offset = nn_cfg_readl(&hw->super, NFP_NET_CFG_RX_OFFSET);
@@ -2118,10 +2116,10 @@ nfp_net_set_vxlan_port(struct nfp_net_hw *net_hw,
* than 40 bits.
*/
int
-nfp_net_check_dma_mask(struct nfp_net_hw *hw,
+nfp_net_check_dma_mask(struct nfp_pf_dev *pf_dev,
char *name)
{
- if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3 &&
+ if (pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3 &&
rte_mem_check_dma_mask(40) != 0) {
PMD_DRV_LOG(ERR, "Device %s can't be used: restricted dma mask to 40 bits!",
name);
@@ -2165,16 +2163,28 @@ nfp_net_txrwb_free(struct rte_eth_dev *eth_dev)
net_hw->txrwb_mz = NULL;
}
-void
-nfp_net_cfg_read_version(struct nfp_net_hw *hw)
+static void
+nfp_net_cfg_read_version(struct nfp_hw *hw,
+ struct nfp_pf_dev *pf_dev)
{
union {
uint32_t whole;
struct nfp_net_fw_ver split;
} version;
- version.whole = nn_cfg_readl(&hw->super, NFP_NET_CFG_VERSION);
- hw->ver = version.split;
+ version.whole = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
+ pf_dev->ver = version.split;
+}
+
+bool
+nfp_net_version_check(struct nfp_hw *hw,
+ struct nfp_pf_dev *pf_dev)
+{
+ nfp_net_cfg_read_version(hw, pf_dev);
+ if (!nfp_net_is_valid_nfd_version(pf_dev->ver))
+ return false;
+
+ return true;
}
static void
@@ -2249,6 +2259,7 @@ nfp_net_firmware_version_get(struct rte_eth_dev *dev,
size_t fw_size)
{
struct nfp_net_hw *hw;
+ struct nfp_pf_dev *pf_dev;
struct nfp_net_hw_priv *hw_priv;
char app_name[FW_VER_LEN] = {0};
char mip_name[FW_VER_LEN] = {0};
@@ -2260,6 +2271,7 @@ nfp_net_firmware_version_get(struct rte_eth_dev *dev,
hw = nfp_net_get_hw(dev);
hw_priv = dev->process_private;
+ pf_dev = hw_priv->pf_dev;
if (hw->fw_version[0] != 0) {
snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version);
@@ -2268,8 +2280,8 @@ nfp_net_firmware_version_get(struct rte_eth_dev *dev,
if (!rte_eth_dev_is_repr(dev)) {
snprintf(vnic_version, FW_VER_LEN, "%d.%d.%d.%d",
- hw->ver.extend, hw->ver.class,
- hw->ver.major, hw->ver.minor);
+ pf_dev->ver.extend, pf_dev->ver.class,
+ pf_dev->ver.major, pf_dev->ver.minor);
} else {
snprintf(vnic_version, FW_VER_LEN, "*");
}
@@ -108,6 +108,8 @@ struct nfp_pf_dev {
enum nfp_app_fw_id app_fw_id;
+ struct nfp_net_fw_ver ver;
+
/** Pointer to the app running on the PF */
void *app_fw_priv;
@@ -219,7 +221,6 @@ struct nfp_net_hw {
const struct rte_memzone *txrwb_mz;
/** Info from the firmware */
- struct nfp_net_fw_ver ver;
uint32_t max_mtu;
uint32_t mtu;
uint32_t rx_offset;
@@ -276,8 +277,9 @@ nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info,
/* Prototypes for common NFP functions */
int nfp_net_mbox_reconfig(struct nfp_net_hw *hw, uint32_t mbox_cmd);
int nfp_net_configure(struct rte_eth_dev *dev);
-int nfp_net_common_init(struct rte_pci_device *pci_dev, struct nfp_net_hw *hw);
-void nfp_net_log_device_information(const struct nfp_net_hw *hw);
+int nfp_net_common_init(struct nfp_pf_dev *pf_dev, struct nfp_net_hw *hw);
+void nfp_net_log_device_information(const struct nfp_net_hw *hw,
+ struct nfp_pf_dev *pf_dev);
void nfp_net_enable_queues(struct rte_eth_dev *dev);
void nfp_net_disable_queues(struct rte_eth_dev *dev);
void nfp_net_params_setup(struct nfp_net_hw *hw);
@@ -345,12 +347,10 @@ int nfp_net_set_vxlan_port(struct nfp_net_hw *hw, size_t idx, uint16_t port);
void nfp_net_rx_desc_limits(struct nfp_net_hw_priv *hw_priv,
uint16_t *min_rx_desc,
uint16_t *max_rx_desc);
-void nfp_net_tx_desc_limits(struct nfp_net_hw *hw,
- struct nfp_net_hw_priv *hw_priv,
+void nfp_net_tx_desc_limits(struct nfp_net_hw_priv *hw_priv,
uint16_t *min_tx_desc,
uint16_t *max_tx_desc);
-int nfp_net_check_dma_mask(struct nfp_net_hw *hw, char *name);
-void nfp_net_cfg_read_version(struct nfp_net_hw *hw);
+int nfp_net_check_dma_mask(struct nfp_pf_dev *pf_dev, char *name);
int nfp_net_firmware_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size);
bool nfp_net_is_valid_nfd_version(struct nfp_net_fw_ver version);
struct nfp_net_hw *nfp_net_get_hw(const struct rte_eth_dev *dev);
@@ -377,6 +377,8 @@ uint8_t nfp_function_id_get(const struct nfp_pf_dev *pf_dev,
uint8_t port_id);
int nfp_net_vf_config_app_init(struct nfp_net_hw *net_hw,
struct nfp_pf_dev *pf_dev);
+bool nfp_net_version_check(struct nfp_hw *hw,
+ struct nfp_pf_dev *pf_dev);
#define NFP_PRIV_TO_APP_FW_NIC(app_fw_priv)\
((struct nfp_app_fw_nic *)app_fw_priv)
@@ -269,14 +269,15 @@ nfp_net_meta_parse(struct nfp_net_rx_desc *rxds,
}
void
-nfp_net_meta_init_format(struct nfp_net_hw *hw)
+nfp_net_meta_init_format(struct nfp_net_hw *hw,
+ struct nfp_pf_dev *pf_dev)
{
/*
* ABI 4.x and ctrl vNIC always use chained metadata, in other cases we allow use of
* single metadata if only RSS(v1) is supported by hw capability, and RSS(v2)
* also indicate that we are using chained metadata.
*/
- if (hw->ver.major == 4) {
+ if (pf_dev->ver.major == 4) {
hw->meta_format = NFP_NET_METAFORMAT_CHAINED;
} else if ((hw->super.cap & NFP_NET_CFG_CTRL_CHAIN_META) != 0) {
hw->meta_format = NFP_NET_METAFORMAT_CHAINED;
@@ -89,7 +89,10 @@ struct nfp_net_meta_parsed {
} vlan[NFP_NET_META_MAX_VLANS];
};
-void nfp_net_meta_init_format(struct nfp_net_hw *hw);
+struct nfp_pf_dev;
+
+void nfp_net_meta_init_format(struct nfp_net_hw *hw,
+ struct nfp_pf_dev *pf_dev);
void nfp_net_meta_parse(struct nfp_net_rx_desc *rxds,
struct nfp_net_rxq *rxq,
struct nfp_net_hw *hw,
@@ -816,11 +816,11 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct nfp_net_hw *hw;
+ struct nfp_net_hw_priv *hw_priv;
- hw = nfp_net_get_hw(dev);
+ hw_priv = dev->process_private;
- if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
+ if (hw_priv->pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
return nfp_net_nfd3_tx_queue_setup(dev, queue_idx,
nb_desc, socket_id, tx_conf);
else
@@ -852,10 +852,10 @@ nfp_net_tx_queue_info_get(struct rte_eth_dev *dev,
struct rte_eth_txq_info *info)
{
struct rte_eth_dev_info dev_info;
- struct nfp_net_hw *hw = nfp_net_get_hw(dev);
+ struct nfp_net_hw_priv *hw_priv = dev->process_private;
struct nfp_net_txq *txq = dev->data->tx_queues[queue_id];
- if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
+ if (hw_priv->pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
info->nb_desc = txq->tx_count / NFD3_TX_DESC_PER_PKT;
else
info->nb_desc = txq->tx_count / NFDK_TX_DESC_PER_SIMPLE_PKT;