[v2,02/17] net/mana: add device configuration and stop

Message ID 1657067328-18374-3-git-send-email-longli@linuxonhyperv.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series Introduce Microsoft Azure Network Adatper (MANA) PMD |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Long Li July 6, 2022, 12:28 a.m. UTC
  From: Long Li <longli@microsoft.com>

MANA defines its memory allocation functions to override IB layer default
functions to allocate device queues. This patch adds the code for device
configuration and stop.

Signed-off-by: Long Li <longli@microsoft.com>
---

Change log:
v2:
Removed validation for offload settings in mana_dev_configure().

 drivers/net/mana/mana.c | 75 +++++++++++++++++++++++++++++++++++++++--
 drivers/net/mana/mana.h |  3 ++
 2 files changed, 76 insertions(+), 2 deletions(-)
  

Patch

diff --git a/drivers/net/mana/mana.c b/drivers/net/mana/mana.c
index 63ec1f75f0..1ea2cecd37 100644
--- a/drivers/net/mana/mana.c
+++ b/drivers/net/mana/mana.c
@@ -40,7 +40,79 @@  static rte_spinlock_t mana_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
 int mana_logtype_driver;
 int mana_logtype_init;
 
+void *mana_alloc_verbs_buf(size_t size, void *data)
+{
+	void *ret;
+	size_t alignment = rte_mem_page_size();
+	int socket = (int)(uintptr_t)data;
+
+	DRV_LOG(DEBUG, "size=%zu socket=%d", size, socket);
+
+	if (alignment == (size_t)-1) {
+		DRV_LOG(ERR, "Failed to get mem page size");
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	ret = rte_zmalloc_socket("mana_verb_buf", size, alignment, socket);
+	if (!ret && size)
+		rte_errno = ENOMEM;
+	return ret;
+}
+
+void mana_free_verbs_buf(void *ptr, void *data __rte_unused)
+{
+	rte_free(ptr);
+}
+
+static int mana_dev_configure(struct rte_eth_dev *dev)
+{
+	struct mana_priv *priv = dev->data->dev_private;
+	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+
+	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+		dev_conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+	if (dev->data->nb_rx_queues != dev->data->nb_tx_queues) {
+		DRV_LOG(ERR, "Only support equal number of rx/tx queues");
+		return -EINVAL;
+	}
+
+	if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
+		DRV_LOG(ERR, "number of TX/RX queues must be power of 2");
+		return -EINVAL;
+	}
+
+	priv->num_queues = dev->data->nb_rx_queues;
+
+	manadv_set_context_attr(priv->ib_ctx, MANADV_CTX_ATTR_BUF_ALLOCATORS,
+				(void *)((uintptr_t)&(struct manadv_ctx_allocators){
+					.alloc = &mana_alloc_verbs_buf,
+					.free = &mana_free_verbs_buf,
+					.data = 0,
+				}));
+
+	return 0;
+}
+
+static int
+mana_dev_close(struct rte_eth_dev *dev)
+{
+	struct mana_priv *priv = dev->data->dev_private;
+	int ret;
+
+	ret = ibv_close_device(priv->ib_ctx);
+	if (ret) {
+		ret = errno;
+		return ret;
+	}
+
+	return 0;
+}
+
 const struct eth_dev_ops mana_dev_ops = {
+	.dev_configure		= mana_dev_configure,
+	.dev_close		= mana_dev_close,
 };
 
 const struct eth_dev_ops mana_dev_sec_ops = {
@@ -627,8 +699,7 @@  static int mana_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 
 static int mana_dev_uninit(struct rte_eth_dev *dev)
 {
-	RTE_SET_USED(dev);
-	return 0;
+	return mana_dev_close(dev);
 }
 
 static int mana_pci_remove(struct rte_pci_device *pci_dev)
diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
index e30c030b4e..66873394b9 100644
--- a/drivers/net/mana/mana.h
+++ b/drivers/net/mana/mana.h
@@ -207,4 +207,7 @@  int mana_mp_req_verbs_cmd_fd(struct rte_eth_dev *dev);
 
 void mana_mp_req_on_rxtx(struct rte_eth_dev *dev, enum mana_mp_req_type type);
 
+void *mana_alloc_verbs_buf(size_t size, void *data);
+void mana_free_verbs_buf(void *ptr, void *data __rte_unused);
+
 #endif