@@ -48,11 +48,7 @@ nfp_flower_pf_start(struct rte_eth_dev *dev)
nfp_net_params_setup(net_hw);
update |= NFP_NET_CFG_UPDATE_RSS;
-
- if ((hw->cap & NFP_NET_CFG_CTRL_RSS2) != 0)
- new_ctrl |= NFP_NET_CFG_CTRL_RSS2;
- else
- new_ctrl |= NFP_NET_CFG_CTRL_RSS;
+ new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
/* Enable device */
new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
@@ -62,8 +58,6 @@ nfp_flower_pf_start(struct rte_eth_dev *dev)
if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
- nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
-
/* If an error when reconfig we avoid to change hw state */
ret = nfp_reconfig(hw, new_ctrl, update);
if (ret != 0) {
@@ -88,43 +82,6 @@ nfp_flower_pf_start(struct rte_eth_dev *dev)
return 0;
}
-/* Stop device: disable rx and tx functions to allow for reconfiguring. */
-int
-nfp_flower_pf_stop(struct rte_eth_dev *dev)
-{
- uint16_t i;
- struct nfp_net_hw *hw;
- struct nfp_net_txq *this_tx_q;
- struct nfp_net_rxq *this_rx_q;
- struct nfp_flower_representor *repr;
-
- repr = dev->data->dev_private;
- hw = repr->app_fw_flower->pf_hw;
-
- nfp_net_disable_queues(dev);
-
- /* Clear queues */
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- this_tx_q = dev->data->tx_queues[i];
- nfp_net_reset_tx_queue(this_tx_q);
- dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
- }
-
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- this_rx_q = dev->data->rx_queues[i];
- nfp_net_reset_rx_queue(this_rx_q);
- dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
- }
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- /* Configure the physical port down */
- nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
- else
- nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0);
-
- return 0;
-}
-
/* Reset and stop device. The device can not be restarted. */
static int
nfp_flower_pf_close(struct rte_eth_dev *dev)
@@ -188,7 +145,7 @@ static const struct eth_dev_ops nfp_flower_pf_vnic_ops = {
.dev_configure = nfp_net_configure,
.dev_start = nfp_flower_pf_start,
- .dev_stop = nfp_flower_pf_stop,
+ .dev_stop = nfp_net_stop,
.dev_close = nfp_flower_pf_close,
};
@@ -113,7 +113,6 @@ bool nfp_flower_pf_dispatch_pkts(struct nfp_net_hw *hw,
uint16_t nfp_flower_pf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int nfp_flower_pf_start(struct rte_eth_dev *dev);
-int nfp_flower_pf_stop(struct rte_eth_dev *dev);
uint32_t nfp_flower_pkt_add_metadata(struct nfp_app_fw_flower *app_fw_flower,
struct rte_mbuf *mbuf, uint32_t port_id);
@@ -18,232 +18,23 @@ enum nfp_repr_type {
NFP_REPR_TYPE_MAX, /*<< Number of representor types */
};
-static int
-nfp_pf_repr_rx_queue_setup(struct rte_eth_dev *dev,
- uint16_t queue_idx,
- uint16_t nb_desc,
- unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mp)
-{
- struct nfp_net_hw *hw;
- struct nfp_net_rxq *rxq;
- const struct rte_memzone *tz;
- struct nfp_flower_representor *repr;
-
- repr = dev->data->dev_private;
- hw = repr->app_fw_flower->pf_hw;
-
- /* Allocating rx queue data structure */
- rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (rxq == NULL)
- return -ENOMEM;
-
- dev->data->rx_queues[queue_idx] = rxq;
-
- /* Hw queues mapping based on firmware configuration */
- rxq->qidx = queue_idx;
- rxq->fl_qcidx = queue_idx * hw->stride_rx;
- rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
-
- /*
- * Tracking mbuf size for detecting a potential mbuf overflow due to
- * RX offset.
- */
- rxq->mem_pool = mp;
- rxq->mbuf_size = rxq->mem_pool->elt_size;
- rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
- hw->flbufsz = rxq->mbuf_size;
-
- rxq->rx_count = nb_desc;
- rxq->port_id = dev->data->port_id;
- rxq->rx_free_thresh = rx_conf->rx_free_thresh;
-
- /*
- * Allocate RX ring hardware descriptors. A memzone large enough to
- * handle the maximum ring size is allocated in order to allow for
- * resizing in later calls to the queue setup function.
- */
- tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
- sizeof(struct nfp_net_rx_desc) *
- hw->dev_info->max_qc_size,
- NFP_MEMZONE_ALIGN, socket_id);
- if (tz == NULL) {
- PMD_DRV_LOG(ERR, "Error allocating rx dma");
- nfp_net_rx_queue_release(dev, queue_idx);
- dev->data->rx_queues[queue_idx] = NULL;
- return -ENOMEM;
- }
-
- /* Saving physical and virtual addresses for the RX ring */
- rxq->dma = (uint64_t)tz->iova;
- rxq->rxds = tz->addr;
-
- /* Mbuf pointers array for referencing mbufs linked to RX descriptors */
- rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
- sizeof(*rxq->rxbufs) * nb_desc,
- RTE_CACHE_LINE_SIZE, socket_id);
- if (rxq->rxbufs == NULL) {
- nfp_net_rx_queue_release(dev, queue_idx);
- dev->data->rx_queues[queue_idx] = NULL;
- return -ENOMEM;
- }
-
- nfp_net_reset_rx_queue(rxq);
- rxq->hw = hw;
-
- /*
- * Telling the HW about the physical address of the RX ring and number
- * of descriptors in log2 format.
- */
- nn_cfg_writeq(&hw->super, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
- nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
-
- return 0;
-}
-
-static int
-nfp_pf_repr_tx_queue_setup(struct rte_eth_dev *dev,
- uint16_t queue_idx,
- uint16_t nb_desc,
- unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf)
-{
- struct nfp_net_hw *hw;
- struct nfp_net_txq *txq;
- uint16_t tx_free_thresh;
- const struct rte_memzone *tz;
- struct nfp_flower_representor *repr;
-
- repr = dev->data->dev_private;
- hw = repr->app_fw_flower->pf_hw;
-
- tx_free_thresh = (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
- DEFAULT_TX_FREE_THRESH;
- if (tx_free_thresh > nb_desc)
- return -EINVAL;
-
- /* Allocating tx queue data structure */
- txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (txq == NULL) {
- PMD_DRV_LOG(ERR, "Error allocating tx dma");
- return -ENOMEM;
- }
-
- dev->data->tx_queues[queue_idx] = txq;
-
- /*
- * Allocate TX ring hardware descriptors. A memzone large enough to
- * handle the maximum ring size is allocated in order to allow for
- * resizing in later calls to the queue setup function.
- */
- tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
- sizeof(struct nfp_net_nfd3_tx_desc) *
- hw->dev_info->max_qc_size,
- NFP_MEMZONE_ALIGN, socket_id);
- if (tz == NULL) {
- PMD_DRV_LOG(ERR, "Error allocating tx dma");
- nfp_net_tx_queue_release(dev, queue_idx);
- dev->data->tx_queues[queue_idx] = NULL;
- return -ENOMEM;
- }
-
- txq->tx_count = nb_desc;
- txq->tx_free_thresh = tx_free_thresh;
-
- /* Queue mapping based on firmware configuration */
- txq->qidx = queue_idx;
- txq->tx_qcidx = queue_idx * hw->stride_tx;
- txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
-
- txq->port_id = dev->data->port_id;
-
- /* Saving physical and virtual addresses for the TX ring */
- txq->dma = (uint64_t)tz->iova;
- txq->txds = tz->addr;
-
- /* Mbuf pointers array for referencing mbufs linked to TX descriptors */
- txq->txbufs = rte_zmalloc_socket("txq->txbufs",
- sizeof(*txq->txbufs) * nb_desc,
- RTE_CACHE_LINE_SIZE, socket_id);
- if (txq->txbufs == NULL) {
- nfp_net_tx_queue_release(dev, queue_idx);
- dev->data->tx_queues[queue_idx] = NULL;
- return -ENOMEM;
- }
-
- nfp_net_reset_tx_queue(txq);
- txq->hw = hw;
-
- /*
- * Telling the HW about the physical address of the TX ring and number
- * of descriptors in log2 format.
- */
- nn_cfg_writeq(&hw->super, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
- nn_cfg_writeb(&hw->super, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
-
- return 0;
-}
-
static int
nfp_flower_repr_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
int ret;
- uint32_t i;
uint32_t nn_link_status;
struct nfp_net_hw *pf_hw;
struct rte_eth_link *link;
- struct nfp_eth_table *nfp_eth_table;
struct nfp_flower_representor *repr;
- static const uint32_t ls_to_ethtool[] = {
- [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
- [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = RTE_ETH_SPEED_NUM_NONE,
- [NFP_NET_CFG_STS_LINK_RATE_1G] = RTE_ETH_SPEED_NUM_1G,
- [NFP_NET_CFG_STS_LINK_RATE_10G] = RTE_ETH_SPEED_NUM_10G,
- [NFP_NET_CFG_STS_LINK_RATE_25G] = RTE_ETH_SPEED_NUM_25G,
- [NFP_NET_CFG_STS_LINK_RATE_40G] = RTE_ETH_SPEED_NUM_40G,
- [NFP_NET_CFG_STS_LINK_RATE_50G] = RTE_ETH_SPEED_NUM_50G,
- [NFP_NET_CFG_STS_LINK_RATE_100G] = RTE_ETH_SPEED_NUM_100G,
- };
-
repr = dev->data->dev_private;
link = &repr->link;
- link->link_speed = RTE_ETH_SPEED_NUM_NONE;
- pf_hw = repr->app_fw_flower->pf_hw;
- if (link->link_status == RTE_ETH_LINK_UP) {
- if (pf_hw->pf_dev != NULL) {
- nfp_eth_table = pf_hw->pf_dev->nfp_eth_table;
- if (nfp_eth_table != NULL) {
- uint32_t speed = nfp_eth_table->ports[pf_hw->idx].speed;
- for (i = 0; i < RTE_DIM(ls_to_ethtool); i++) {
- if (ls_to_ethtool[i] == speed) {
- link->link_speed = speed;
- break;
- }
- }
- }
- } else {
- nn_link_status = nn_cfg_readw(&pf_hw->super, NFP_NET_CFG_STS);
- nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
- NFP_NET_CFG_STS_LINK_RATE_MASK;
-
- if (nn_link_status < RTE_DIM(ls_to_ethtool))
- link->link_speed = ls_to_ethtool[nn_link_status];
- }
- }
+ pf_hw = repr->app_fw_flower->pf_hw;
+ nn_link_status = nn_cfg_readw(&pf_hw->super, NFP_NET_CFG_STS);
- ret = rte_eth_linkstatus_set(dev, link);
- if (ret == 0) {
- if (link->link_status)
- PMD_DRV_LOG(INFO, "NIC Link is Up");
- else
- PMD_DRV_LOG(INFO, "NIC Link is Down");
- }
+ ret = nfp_net_link_update_common(dev, pf_hw, link, nn_link_status);
return ret;
}
@@ -275,30 +66,6 @@ nfp_flower_repr_dev_infos_get(__rte_unused struct rte_eth_dev *dev,
return 0;
}
-static int
-nfp_flower_repr_dev_configure(struct rte_eth_dev *dev)
-{
- struct nfp_net_hw *pf_hw;
- struct rte_eth_conf *dev_conf;
- struct rte_eth_rxmode *rxmode;
- struct nfp_flower_representor *repr;
-
- repr = dev->data->dev_private;
- pf_hw = repr->app_fw_flower->pf_hw;
-
- dev_conf = &dev->data->dev_conf;
- rxmode = &dev_conf->rxmode;
-
- /* Checking MTU set */
- if (rxmode->mtu > pf_hw->flbufsz) {
- PMD_DRV_LOG(INFO, "MTU (%u) larger then current mbufsize (%u) not supported",
- rxmode->mtu, pf_hw->flbufsz);
- return -ERANGE;
- }
-
- return 0;
-}
-
static int
nfp_flower_repr_dev_start(struct rte_eth_dev *dev)
{
@@ -528,11 +295,11 @@ static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = {
.dev_infos_get = nfp_flower_repr_dev_infos_get,
.dev_start = nfp_flower_pf_start,
- .dev_configure = nfp_flower_repr_dev_configure,
- .dev_stop = nfp_flower_pf_stop,
+ .dev_configure = nfp_net_configure,
+ .dev_stop = nfp_net_stop,
- .rx_queue_setup = nfp_pf_repr_rx_queue_setup,
- .tx_queue_setup = nfp_pf_repr_tx_queue_setup,
+ .rx_queue_setup = nfp_net_rx_queue_setup,
+ .tx_queue_setup = nfp_net_tx_queue_setup,
.link_update = nfp_flower_repr_link_update,
@@ -543,14 +310,14 @@ static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = {
.promiscuous_disable = nfp_net_promisc_disable,
.mac_addr_set = nfp_flower_repr_mac_addr_set,
- .fw_version_get = nfp_repr_firmware_version_get,
+ .fw_version_get = nfp_net_firmware_version_get,
};
static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
.dev_infos_get = nfp_flower_repr_dev_infos_get,
.dev_start = nfp_flower_repr_dev_start,
- .dev_configure = nfp_flower_repr_dev_configure,
+ .dev_configure = nfp_net_configure,
.dev_stop = nfp_flower_repr_dev_stop,
.rx_queue_setup = nfp_flower_repr_rx_queue_setup,
@@ -565,7 +332,7 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = {
.promiscuous_disable = nfp_net_promisc_disable,
.mac_addr_set = nfp_flower_repr_mac_addr_set,
- .fw_version_get = nfp_repr_firmware_version_get,
+ .fw_version_get = nfp_net_firmware_version_get,
.flow_ops_get = nfp_net_flow_ops_get,
.mtr_ops_get = nfp_net_mtr_ops_get,
@@ -377,7 +377,7 @@ nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_free_thresh;
const struct rte_memzone *tz;
- hw = dev->data->dev_private;
+ hw = nfp_net_get_hw(dev);
nfp_net_tx_desc_limits(hw, &min_tx_desc, &max_tx_desc);
@@ -454,7 +454,7 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
struct nfp_net_txq *txq;
const struct rte_memzone *tz;
- hw = dev->data->dev_private;
+ hw = nfp_net_get_hw(dev);
nfp_net_tx_desc_limits(hw, &min_tx_desc, &max_tx_desc);
@@ -51,7 +51,6 @@ nfp_net_start(struct rte_eth_dev *dev)
uint32_t ctrl_extend = 0;
struct nfp_net_hw *net_hw;
struct nfp_pf_dev *pf_dev;
- struct rte_eth_conf *dev_conf;
struct rte_eth_rxmode *rxmode;
struct nfp_app_fw_nic *app_fw_nic;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
@@ -113,9 +112,7 @@ nfp_net_start(struct rte_eth_dev *dev)
/* Writing configuration parameters in the device */
nfp_net_params_setup(net_hw);
- dev_conf = &dev->data->dev_conf;
- rxmode = &dev_conf->rxmode;
-
+ rxmode = &dev->data->dev_conf.rxmode;
if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) {
nfp_net_rss_config_default(dev);
update |= NFP_NET_CFG_UPDATE_RSS;
@@ -197,29 +194,6 @@ nfp_net_start(struct rte_eth_dev *dev)
return ret;
}
-/* Stop device: disable rx and tx functions to allow for reconfiguring. */
-static int
-nfp_net_stop(struct rte_eth_dev *dev)
-{
- struct nfp_net_hw *hw;
-
- hw = dev->data->dev_private;
-
- nfp_net_disable_queues(dev);
-
- /* Clear queues */
- nfp_net_stop_tx_queue(dev);
- nfp_net_stop_rx_queue(dev);
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- /* Configure the physical port down */
- nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
- else
- nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0);
-
- return 0;
-}
-
/* Set the link up. */
static int
nfp_net_set_link_up(struct rte_eth_dev *dev)
@@ -581,43 +581,27 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev)
return 0;
}
-/*
- * Return 0 means link status changed, -1 means not changed
- *
- * Wait to complete is needed as it can take up to 9 seconds to get the Link
- * status.
- */
int
-nfp_net_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete)
+nfp_net_link_update_common(struct rte_eth_dev *dev,
+ struct nfp_net_hw *hw,
+ struct rte_eth_link *link,
+ uint32_t link_status)
{
int ret;
uint32_t i;
- struct nfp_net_hw *hw;
uint32_t nn_link_status;
- struct rte_eth_link link;
struct nfp_eth_table *nfp_eth_table;
- hw = nfp_net_get_hw(dev);
-
- memset(&link, 0, sizeof(struct rte_eth_link));
+ link->link_speed = RTE_ETH_SPEED_NUM_NONE;
- /* Read link status */
- nn_link_status = nn_cfg_readw(&hw->super, NFP_NET_CFG_STS);
- if ((nn_link_status & NFP_NET_CFG_STS_LINK) != 0)
- link.link_status = RTE_ETH_LINK_UP;
-
- link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
- link.link_speed = RTE_ETH_SPEED_NUM_NONE;
-
- if (link.link_status == RTE_ETH_LINK_UP) {
+ if (link->link_status == RTE_ETH_LINK_UP) {
if (hw->pf_dev != NULL) {
nfp_eth_table = hw->pf_dev->nfp_eth_table;
if (nfp_eth_table != NULL) {
uint32_t speed = nfp_eth_table->ports[hw->idx].speed;
for (i = 0; i < RTE_DIM(nfp_net_link_speed_nfp2rte); i++) {
if (nfp_net_link_speed_nfp2rte[i] == speed) {
- link.link_speed = speed;
+ link->link_speed = speed;
break;
}
}
@@ -627,21 +611,52 @@ nfp_net_link_update(struct rte_eth_dev *dev,
* Shift and mask nn_link_status so that it is effectively the value
* at offset NFP_NET_CFG_STS_NSP_LINK_RATE.
*/
- nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
+ nn_link_status = (link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
NFP_NET_CFG_STS_LINK_RATE_MASK;
if (nn_link_status < RTE_DIM(nfp_net_link_speed_nfp2rte))
- link.link_speed = nfp_net_link_speed_nfp2rte[nn_link_status];
+ link->link_speed = nfp_net_link_speed_nfp2rte[nn_link_status];
}
}
- ret = rte_eth_linkstatus_set(dev, &link);
+ ret = rte_eth_linkstatus_set(dev, link);
if (ret == 0) {
- if (link.link_status != 0)
+ if (link->link_status != 0)
PMD_DRV_LOG(INFO, "NIC Link is Up");
else
PMD_DRV_LOG(INFO, "NIC Link is Down");
}
+ return ret;
+}
+
+/*
+ * Return 0 means link status changed, -1 means not changed
+ *
+ * Wait to complete is needed as it can take up to 9 seconds to get the Link
+ * status.
+ */
+int
+nfp_net_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ int ret;
+ struct nfp_net_hw *hw;
+ uint32_t nn_link_status;
+ struct rte_eth_link link;
+
+ hw = nfp_net_get_hw(dev);
+
+ memset(&link, 0, sizeof(struct rte_eth_link));
+
+ /* Read link status */
+ nn_link_status = nn_cfg_readw(&hw->super, NFP_NET_CFG_STS);
+ if ((nn_link_status & NFP_NET_CFG_STS_LINK) != 0)
+ link.link_status = RTE_ETH_LINK_UP;
+
+ link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+
+ ret = nfp_net_link_update_common(dev, hw, &link, nn_link_status);
+
/*
* Notify the port to update the speed value in the CTRL BAR from NSP.
* Not applicable for VFs as the associated PF is still attached to the
@@ -1996,11 +2011,15 @@ nfp_net_firmware_version_get(struct rte_eth_dev *dev,
if (fw_size < FW_VER_LEN)
return FW_VER_LEN;
- hw = dev->data->dev_private;
+ hw = nfp_net_get_hw(dev);
- snprintf(vnic_version, FW_VER_LEN, "%d.%d.%d.%d",
+ if ((dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) != 0) {
+ snprintf(vnic_version, FW_VER_LEN, "%d.%d.%d.%d",
hw->ver.extend, hw->ver.class,
hw->ver.major, hw->ver.minor);
+ } else {
+ snprintf(vnic_version, FW_VER_LEN, "*");
+ }
nfp_net_get_nsp_info(hw, nsp_version);
nfp_net_get_mip_name(hw, mip_name);
@@ -2012,33 +2031,6 @@ nfp_net_firmware_version_get(struct rte_eth_dev *dev,
return 0;
}
-int
-nfp_repr_firmware_version_get(struct rte_eth_dev *dev,
- char *fw_version,
- size_t fw_size)
-{
- struct nfp_net_hw *hw;
- char mip_name[FW_VER_LEN];
- char app_name[FW_VER_LEN];
- char nsp_version[FW_VER_LEN];
- struct nfp_flower_representor *repr;
-
- if (fw_size < FW_VER_LEN)
- return FW_VER_LEN;
-
- repr = dev->data->dev_private;
- hw = repr->app_fw_flower->pf_hw;
-
- nfp_net_get_nsp_info(hw, nsp_version);
- nfp_net_get_mip_name(hw, mip_name);
- nfp_net_get_app_name(hw, app_name);
-
- snprintf(fw_version, FW_VER_LEN, "* %s %s %s",
- nsp_version, mip_name, app_name);
-
- return 0;
-}
-
bool
nfp_net_is_valid_nfd_version(struct nfp_net_fw_ver version)
{
@@ -2059,3 +2051,26 @@ nfp_net_is_valid_nfd_version(struct nfp_net_fw_ver version)
return false;
}
+
+/* Disable rx and tx functions to allow for reconfiguring. */
+int
+nfp_net_stop(struct rte_eth_dev *dev)
+{
+ struct nfp_net_hw *hw;
+
+ hw = nfp_net_get_hw(dev);
+
+ nfp_net_disable_queues(dev);
+
+ /* Clear queues */
+ nfp_net_stop_tx_queue(dev);
+ nfp_net_stop_rx_queue(dev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ /* Configure the physical port down */
+ nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
+ else
+ nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0);
+
+ return 0;
+}
@@ -178,6 +178,10 @@ int nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
uint32_t nfp_check_offloads(struct rte_eth_dev *dev);
int nfp_net_promisc_enable(struct rte_eth_dev *dev);
int nfp_net_promisc_disable(struct rte_eth_dev *dev);
+int nfp_net_link_update_common(struct rte_eth_dev *dev,
+ struct nfp_net_hw *hw,
+ struct rte_eth_link *link,
+ uint32_t link_status);
int nfp_net_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete);
int nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
@@ -235,9 +239,9 @@ int nfp_net_check_dma_mask(struct nfp_net_hw *hw, char *name);
void nfp_net_init_metadata_format(struct nfp_net_hw *hw);
void nfp_net_cfg_read_version(struct nfp_net_hw *hw);
int nfp_net_firmware_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size);
-int nfp_repr_firmware_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size);
bool nfp_net_is_valid_nfd_version(struct nfp_net_fw_ver version);
struct nfp_net_hw *nfp_net_get_hw(const struct rte_eth_dev *dev);
+int nfp_net_stop(struct rte_eth_dev *dev);
#define NFP_PRIV_TO_APP_FW_NIC(app_fw_priv)\
((struct nfp_app_fw_nic *)app_fw_priv)
@@ -840,7 +840,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
struct nfp_net_rxq *rxq;
const struct rte_memzone *tz;
- hw = dev->data->dev_private;
+ hw = nfp_net_get_hw(dev);
nfp_net_rx_desc_limits(hw, &min_rx_desc, &max_rx_desc);
@@ -1067,7 +1067,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev,
{
struct nfp_net_hw *hw;
- hw = dev->data->dev_private;
+ hw = nfp_net_get_hw(dev);
if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
return nfp_net_nfd3_tx_queue_setup(dev, queue_idx,