[02/17] net/mana: add device configuration and stop
Checks
Commit Message
From: Long Li <longli@microsoft.com>
MANA defines its memory allocation functions to override IB layer default
functions to allocate device queues. This patch adds the code for device
configuration and stop.
Signed-off-by: Long Li <longli@microsoft.com>
---
drivers/net/mana/mana.c | 87 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/mana/mana.h | 3 --
2 files changed, 85 insertions(+), 5 deletions(-)
Comments
On Fri, 1 Jul 2022 02:02:32 -0700
longli@linuxonhyperv.com wrote:
> +
> + if (txmode->offloads & ~BNIC_DEV_TX_OFFLOAD_SUPPORT) {
> + DRV_LOG(ERR, "Unsupported TX offload: %lx", txmode->offloads);
> + return -EINVAL;
> + }
> +
> + if (rxmode->offloads & ~BNIC_DEV_RX_OFFLOAD_SUPPORT) {
> + DRV_LOG(ERR, "Unsupported RX offload: %lx", rxmode->offloads);
> + return -EINVAL;
> + }
> +
If the device reports the correct capabilities in dev_info.tx_offload_capa
and dev_info.rx_offload_capa then these checks are unnecessary since the
flags are already checked in ethdev_configure.
@@ -57,7 +57,91 @@ static rte_spinlock_t mana_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
int mana_logtype_driver;
int mana_logtype_init;
+static void *mana_alloc_verbs_buf(size_t size, void *data)
+{
+ void *ret;
+ size_t alignment = rte_mem_page_size();
+ int socket = (int)(uintptr_t)data;
+
+ DRV_LOG(DEBUG, "size=%lu socket=%d", size, socket);
+
+ if (alignment == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ ret = rte_zmalloc_socket("mana_verb_buf", size, alignment, socket);
+ if (!ret && size)
+ rte_errno = ENOMEM;
+ return ret;
+}
+
+static void mana_free_verbs_buf(void *ptr, void *data __rte_unused)
+{
+ rte_free(ptr);
+}
+
+static int mana_dev_configure(struct rte_eth_dev *dev)
+{
+ struct mana_priv *priv = dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
+ const struct rte_eth_txmode *txmode = &dev_conf->txmode;
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ dev_conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+ if (txmode->offloads & ~BNIC_DEV_TX_OFFLOAD_SUPPORT) {
+ DRV_LOG(ERR, "Unsupported TX offload: %lx", txmode->offloads);
+ return -EINVAL;
+ }
+
+ if (rxmode->offloads & ~BNIC_DEV_RX_OFFLOAD_SUPPORT) {
+ DRV_LOG(ERR, "Unsupported RX offload: %lx", rxmode->offloads);
+ return -EINVAL;
+ }
+
+ if (dev->data->nb_rx_queues != dev->data->nb_tx_queues) {
+ DRV_LOG(ERR, "Only support equal number of rx/tx queues");
+ return -EINVAL;
+ }
+
+ if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
+ DRV_LOG(ERR, "number of TX/RX queues must be power of 2");
+ return -EINVAL;
+ }
+
+ priv->num_queues = dev->data->nb_rx_queues;
+
+ manadv_set_context_attr(priv->ib_ctx, MANADV_CTX_ATTR_BUF_ALLOCATORS,
+ (void *)((uintptr_t)&(struct manadv_ctx_allocators){
+ .alloc = &mana_alloc_verbs_buf,
+ .free = &mana_free_verbs_buf,
+ .data = 0,
+ }));
+
+ return 0;
+}
+
+static int
+mana_dev_close(struct rte_eth_dev *dev)
+{
+ struct mana_priv *priv = dev->data->dev_private;
+ int ret;
+
+ ret = ibv_close_device(priv->ib_ctx);
+ if (ret) {
+ ret = errno;
+ return ret;
+ }
+
+ return 0;
+}
+
const struct eth_dev_ops mana_dev_ops = {
+ .dev_configure = mana_dev_configure,
+ .dev_close = mana_dev_close,
};
const struct eth_dev_ops mana_dev_sec_ops = {
@@ -652,8 +736,7 @@ static int mana_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
static int mana_dev_uninit(struct rte_eth_dev *dev)
{
- RTE_SET_USED(dev);
- return 0;
+ return mana_dev_close(dev);
}
static int mana_pci_remove(struct rte_pci_device *pci_dev)
@@ -177,9 +177,6 @@ uint16_t mana_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
uint16_t mana_tx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
uint16_t pkts_n);
-void *mana_alloc_verbs_buf(size_t size, void *data);
-void mana_free_verbs_buf(void *ptr, void *data);
-
/** Request timeout for IPC. */
#define MANA_MP_REQ_TIMEOUT_SEC 5