get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/118939/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 118939,
    "url": "http://patchwork.dpdk.org/api/patches/118939/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20221021091928.2674471-8-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221021091928.2674471-8-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221021091928.2674471-8-junfeng.guo@intel.com",
    "date": "2022-10-21T09:19:27",
    "name": "[v7,7/8] net/gve: add support for queue operations",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "ac1309fa411d98d02b24cf0a432d325b3a8d0c79",
    "submitter": {
        "id": 1785,
        "url": "http://patchwork.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patchwork.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20221021091928.2674471-8-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 25366,
            "url": "http://patchwork.dpdk.org/api/series/25366/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=25366",
            "date": "2022-10-21T09:19:20",
            "name": "introduce GVE PMD",
            "version": 7,
            "mbox": "http://patchwork.dpdk.org/series/25366/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/118939/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/118939/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E7206A0032;\n\tFri, 21 Oct 2022 11:21:46 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 5404042BF3;\n\tFri, 21 Oct 2022 11:21:29 +0200 (CEST)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by mails.dpdk.org (Postfix) with ESMTP id 5F26F42BF0\n for <dev@dpdk.org>; Fri, 21 Oct 2022 11:21:22 +0200 (CEST)",
            "from fmsmga003.fm.intel.com ([10.253.24.29])\n by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 21 Oct 2022 02:21:21 -0700",
            "from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])\n by FMSMGA003.fm.intel.com with ESMTP; 21 Oct 2022 02:21:18 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1666344082; x=1697880082;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=E1k7dOFIXL8J9SEddRdwU1J4APpqtnyZgnLyRWwHmyI=;\n b=Zi595cfnQI2UH2omHVtBJC9XtqTD0WgBTNYsYSC828ytKyh2GWMG8g4q\n OFpt7zPfx0shCyjdz81S1m3hT9VOQcQAoecfUpYwHik+hGOVeFNmLedS9\n UmFEX25QTl5vdMTjK7B1hLEhEvxHwyzTwTcrriqjztQS09/eI0toGjU7M\n f0lMED2GpE7eqCXxSjmJjU+J/tpMbrCEt4R5NFeRqgD7GPzH5nKVmozZW\n 8lYW82EkYMx+baA5hO+U9U75s9N4NRYkl+apH0vcMwwVMQs7DBozrUQRy\n 337srtSESnRETNpX2G6saInFJWCs13SYA5/cP6vJafWWBw7jUbFWJAJlb A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10506\"; a=\"294355098\"",
            "E=Sophos;i=\"5.95,200,1661842800\"; d=\"scan'208\";a=\"294355098\"",
            "E=McAfee;i=\"6500,9779,10506\"; a=\"719619932\"",
            "E=Sophos;i=\"5.95,200,1661842800\"; d=\"scan'208\";a=\"719619932\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@xilinx.com,\n beilei.xing@intel.com",
        "Cc": "dev@dpdk.org, xiaoyun.li@intel.com, awogbemila@google.com,\n bruce.richardson@intel.com, hemant.agrawal@nxp.com,\n stephen@networkplumber.org, chenbo.xia@intel.com, helin.zhang@intel.com,\n Junfeng Guo <junfeng.guo@intel.com>",
        "Subject": "[PATCH v7 7/8] net/gve: add support for queue operations",
        "Date": "Fri, 21 Oct 2022 17:19:27 +0800",
        "Message-Id": "<20221021091928.2674471-8-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20221021091928.2674471-1-junfeng.guo@intel.com>",
        "References": "<20221020103656.1068036-1-junfeng.guo@intel.com>\n <20221021091928.2674471-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add support for queue operations:\n- setup rx/tx queue\n- release rx/tx queue\n- start rx/tx queues\n- stop rx/tx queues\n\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n drivers/net/gve/gve_ethdev.c | 204 +++++++++++++++++++++++++++++++++\n drivers/net/gve/gve_ethdev.h |  52 +++++++++\n drivers/net/gve/gve_rx.c     | 212 ++++++++++++++++++++++++++++++++++\n drivers/net/gve/gve_tx.c     | 214 +++++++++++++++++++++++++++++++++++\n drivers/net/gve/meson.build  |   2 +\n 5 files changed, 684 insertions(+)\n create mode 100644 drivers/net/gve/gve_rx.c\n create mode 100644 drivers/net/gve/gve_tx.c",
    "diff": "diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c\nindex 7fbe0c78c9..892e7e2e1c 100644\n--- a/drivers/net/gve/gve_ethdev.c\n+++ b/drivers/net/gve/gve_ethdev.c\n@@ -28,6 +28,68 @@ gve_write_version(uint8_t *driver_version_register)\n \twriteb('\\n', driver_version_register);\n }\n \n+static int\n+gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages)\n+{\n+\tchar z_name[RTE_MEMZONE_NAMESIZE];\n+\tstruct gve_queue_page_list *qpl;\n+\tconst struct rte_memzone *mz;\n+\tdma_addr_t page_bus;\n+\tuint32_t i;\n+\n+\tif (priv->num_registered_pages + pages >\n+\t    priv->max_registered_pages) {\n+\t\tPMD_DRV_LOG(ERR, \"Pages %\" PRIu64 \" > max registered pages %\" PRIu64,\n+\t\t\t    priv->num_registered_pages + pages,\n+\t\t\t    priv->max_registered_pages);\n+\t\treturn -EINVAL;\n+\t}\n+\tqpl = &priv->qpl[id];\n+\tsnprintf(z_name, sizeof(z_name), \"gve_%s_qpl%d\", priv->pci_dev->device.name, id);\n+\tmz = rte_memzone_reserve_aligned(z_name, pages * PAGE_SIZE,\n+\t\t\t\t\t rte_socket_id(),\n+\t\t\t\t\t RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);\n+\tif (mz == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to alloc %s.\", z_name);\n+\t\treturn -ENOMEM;\n+\t}\n+\tqpl->page_buses = rte_zmalloc(\"qpl page buses\", pages * sizeof(dma_addr_t), 0);\n+\tif (qpl->page_buses == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to alloc qpl %u page buses\", id);\n+\t\treturn -ENOMEM;\n+\t}\n+\tpage_bus = mz->iova;\n+\tfor (i = 0; i < pages; i++) {\n+\t\tqpl->page_buses[i] = page_bus;\n+\t\tpage_bus += PAGE_SIZE;\n+\t}\n+\tqpl->id = id;\n+\tqpl->mz = mz;\n+\tqpl->num_entries = pages;\n+\n+\tpriv->num_registered_pages += pages;\n+\n+\treturn 0;\n+}\n+\n+static void\n+gve_free_qpls(struct gve_priv *priv)\n+{\n+\tuint16_t nb_txqs = priv->max_nb_txq;\n+\tuint16_t nb_rxqs = priv->max_nb_rxq;\n+\tuint32_t i;\n+\n+\tfor (i = 0; i < nb_txqs + nb_rxqs; i++) {\n+\t\tif (priv->qpl[i].mz != NULL)\n+\t\t\trte_memzone_free(priv->qpl[i].mz);\n+\t\tif (priv->qpl[i].page_buses != NULL)\n+\t\t\trte_free(priv->qpl[i].page_buses);\n+\t}\n+\n+\tif (priv->qpl != NULL)\n+\t\trte_free(priv->qpl);\n+}\n+\n static int\n gve_dev_configure(struct rte_eth_dev *dev)\n {\n@@ -42,6 +104,43 @@ gve_dev_configure(struct rte_eth_dev *dev)\n \treturn 0;\n }\n \n+static int\n+gve_refill_pages(struct gve_rx_queue *rxq)\n+{\n+\tstruct rte_mbuf *nmb;\n+\tuint16_t i;\n+\tint diag;\n+\n+\tdiag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc);\n+\tif (diag < 0) {\n+\t\tfor (i = 0; i < rxq->nb_rx_desc - 1; i++) {\n+\t\t\tnmb = rte_pktmbuf_alloc(rxq->mpool);\n+\t\t\tif (!nmb)\n+\t\t\t\tbreak;\n+\t\t\trxq->sw_ring[i] = nmb;\n+\t\t}\n+\t\tif (i < rxq->nb_rx_desc - 1)\n+\t\t\treturn -ENOMEM;\n+\t}\n+\trxq->nb_avail = 0;\n+\trxq->next_avail = rxq->nb_rx_desc - 1;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tif (rxq->is_gqi_qpl) {\n+\t\t\trxq->rx_data_ring[i].addr = rte_cpu_to_be_64(i * PAGE_SIZE);\n+\t\t} else {\n+\t\t\tif (i == rxq->nb_rx_desc - 1)\n+\t\t\t\tbreak;\n+\t\t\tnmb = rxq->sw_ring[i];\n+\t\t\trxq->rx_data_ring[i].addr = rte_cpu_to_be_64(rte_mbuf_data_iova(nmb));\n+\t\t}\n+\t}\n+\n+\trte_write32(rte_cpu_to_be_32(rxq->next_avail), rxq->qrx_tail);\n+\n+\treturn 0;\n+}\n+\n static int\n gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)\n {\n@@ -73,16 +172,70 @@ gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)\n static int\n gve_dev_start(struct rte_eth_dev *dev)\n {\n+\tuint16_t num_queues = dev->data->nb_tx_queues;\n+\tstruct gve_priv *priv = dev->data->dev_private;\n+\tstruct gve_tx_queue *txq;\n+\tstruct gve_rx_queue *rxq;\n+\tuint16_t i;\n+\tint err;\n+\n+\tpriv->txqs = (struct gve_tx_queue **)dev->data->tx_queues;\n+\terr = gve_adminq_create_tx_queues(priv, num_queues);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"failed to create %u tx queues.\", num_queues);\n+\t\treturn err;\n+\t}\n+\tfor (i = 0; i < num_queues; i++) {\n+\t\ttxq = priv->txqs[i];\n+\t\ttxq->qtx_tail =\n+\t\t&priv->db_bar2[rte_be_to_cpu_32(txq->qres->db_index)];\n+\t\ttxq->qtx_head =\n+\t\t&priv->cnt_array[rte_be_to_cpu_32(txq->qres->counter_index)];\n+\n+\t\trte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr);\n+\t}\n+\n+\tnum_queues = dev->data->nb_rx_queues;\n+\tpriv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues;\n+\terr = gve_adminq_create_rx_queues(priv, num_queues);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"failed to create %u rx queues.\", num_queues);\n+\t\tgoto err_tx;\n+\t}\n+\tfor (i = 0; i < num_queues; i++) {\n+\t\trxq = priv->rxqs[i];\n+\t\trxq->qrx_tail =\n+\t\t&priv->db_bar2[rte_be_to_cpu_32(rxq->qres->db_index)];\n+\n+\t\trte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr);\n+\n+\t\terr = gve_refill_pages(rxq);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to refill for RX\");\n+\t\t\tgoto err_rx;\n+\t\t}\n+\t}\n+\n \tdev->data->dev_started = 1;\n \tgve_link_update(dev, 0);\n \n \treturn 0;\n+\n+err_rx:\n+\tgve_stop_rx_queues(dev);\n+err_tx:\n+\tgve_stop_tx_queues(dev);\n+\treturn err;\n }\n \n static int\n gve_dev_stop(struct rte_eth_dev *dev)\n {\n \tdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;\n+\n+\tgve_stop_tx_queues(dev);\n+\tgve_stop_rx_queues(dev);\n+\n \tdev->data->dev_started = 0;\n \n \treturn 0;\n@@ -91,7 +244,11 @@ gve_dev_stop(struct rte_eth_dev *dev)\n static int\n gve_dev_close(struct rte_eth_dev *dev)\n {\n+\tstruct gve_priv *priv = dev->data->dev_private;\n+\tstruct gve_tx_queue *txq;\n+\tstruct gve_rx_queue *rxq;\n \tint err = 0;\n+\tuint16_t i;\n \n \tif (dev->data->dev_started) {\n \t\terr = gve_dev_stop(dev);\n@@ -99,6 +256,19 @@ gve_dev_close(struct rte_eth_dev *dev)\n \t\t\tPMD_DRV_LOG(ERR, \"Failed to stop dev.\");\n \t}\n \n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\ttxq = dev->data->tx_queues[i];\n+\t\tgve_tx_queue_release(txq);\n+\t}\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\tgve_rx_queue_release(rxq);\n+\t}\n+\n+\tgve_free_qpls(priv);\n+\trte_free(priv->adminq);\n+\n \tdev->data->mac_addrs = NULL;\n \n \treturn err;\n@@ -185,6 +355,8 @@ static const struct eth_dev_ops gve_eth_dev_ops = {\n \t.dev_stop             = gve_dev_stop,\n \t.dev_close            = gve_dev_close,\n \t.dev_infos_get        = gve_dev_info_get,\n+\t.rx_queue_setup       = gve_rx_queue_setup,\n+\t.tx_queue_setup       = gve_tx_queue_setup,\n \t.link_update          = gve_link_update,\n \t.mtu_set              = gve_dev_mtu_set,\n };\n@@ -322,7 +494,9 @@ gve_setup_device_resources(struct gve_priv *priv)\n static int\n gve_init_priv(struct gve_priv *priv, bool skip_describe_device)\n {\n+\tuint16_t pages;\n \tint num_ntfy;\n+\tuint32_t i;\n \tint err;\n \n \t/* Set up the adminq */\n@@ -373,10 +547,40 @@ gve_init_priv(struct gve_priv *priv, bool skip_describe_device)\n \tPMD_DRV_LOG(INFO, \"Max TX queues %d, Max RX queues %d\",\n \t\t    priv->max_nb_txq, priv->max_nb_rxq);\n \n+\t/* In GQI_QPL queue format:\n+\t * Allocate queue page lists according to max queue number\n+\t * tx qpl id should start from 0 while rx qpl id should start\n+\t * from priv->max_nb_txq\n+\t */\n+\tif (priv->queue_format == GVE_GQI_QPL_FORMAT) {\n+\t\tpriv->qpl = rte_zmalloc(\"gve_qpl\",\n+\t\t\t\t\t(priv->max_nb_txq + priv->max_nb_rxq) *\n+\t\t\t\t\tsizeof(struct gve_queue_page_list), 0);\n+\t\tif (priv->qpl == NULL) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to alloc qpl.\");\n+\t\t\terr = -ENOMEM;\n+\t\t\tgoto free_adminq;\n+\t\t}\n+\n+\t\tfor (i = 0; i < priv->max_nb_txq + priv->max_nb_rxq; i++) {\n+\t\t\tif (i < priv->max_nb_txq)\n+\t\t\t\tpages = priv->tx_pages_per_qpl;\n+\t\t\telse\n+\t\t\t\tpages = priv->rx_data_slot_cnt;\n+\t\t\terr = gve_alloc_queue_page_list(priv, i, pages);\n+\t\t\tif (err != 0) {\n+\t\t\t\tPMD_DRV_LOG(ERR, \"Failed to alloc qpl %u.\", i);\n+\t\t\t\tgoto err_qpl;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n setup_device:\n \terr = gve_setup_device_resources(priv);\n \tif (!err)\n \t\treturn 0;\n+err_qpl:\n+\tgve_free_qpls(priv);\n free_adminq:\n \tgve_adminq_free(priv);\n \treturn err;\ndiff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h\nindex 57c29374b5..00c69d1b88 100644\n--- a/drivers/net/gve/gve_ethdev.h\n+++ b/drivers/net/gve/gve_ethdev.h\n@@ -37,15 +37,35 @@ union gve_tx_desc {\n \tstruct gve_tx_seg_desc seg; /* subsequent descs for a packet */\n };\n \n+struct gve_tx_iovec {\n+\tuint32_t iov_base; /* offset in fifo */\n+\tuint32_t iov_len;\n+};\n+\n struct gve_tx_queue {\n \tvolatile union gve_tx_desc *tx_desc_ring;\n \tconst struct rte_memzone *mz;\n \tuint64_t tx_ring_phys_addr;\n+\tstruct rte_mbuf **sw_ring;\n+\tvolatile rte_be32_t *qtx_tail;\n+\tvolatile rte_be32_t *qtx_head;\n \n+\tuint32_t tx_tail;\n \tuint16_t nb_tx_desc;\n+\tuint16_t nb_free;\n+\tuint32_t next_to_clean;\n+\tuint16_t free_thresh;\n \n \t/* Only valid for DQO_QPL queue format */\n+\tuint16_t sw_tail;\n+\tuint16_t sw_ntc;\n+\tuint16_t sw_nb_free;\n+\tuint32_t fifo_size;\n+\tuint32_t fifo_head;\n+\tuint32_t fifo_avail;\n+\tuint64_t fifo_base;\n \tstruct gve_queue_page_list *qpl;\n+\tstruct gve_tx_iovec *iov_ring;\n \n \tuint16_t port_id;\n \tuint16_t queue_id;\n@@ -59,6 +79,8 @@ struct gve_tx_queue {\n \n \t/* Only valid for DQO_RDA queue format */\n \tstruct gve_tx_queue *complq;\n+\n+\tuint8_t is_gqi_qpl;\n };\n \n struct gve_rx_queue {\n@@ -67,9 +89,17 @@ struct gve_rx_queue {\n \tconst struct rte_memzone *mz;\n \tconst struct rte_memzone *data_mz;\n \tuint64_t rx_ring_phys_addr;\n+\tstruct rte_mbuf **sw_ring;\n+\tstruct rte_mempool *mpool;\n \n+\tuint16_t rx_tail;\n \tuint16_t nb_rx_desc;\n+\tuint16_t expected_seqno; /* the next expected seqno */\n+\tuint16_t free_thresh;\n+\tuint32_t next_avail;\n+\tuint32_t nb_avail;\n \n+\tvolatile rte_be32_t *qrx_tail;\n \tvolatile rte_be32_t *ntfy_addr;\n \n \t/* only valid for GQI_QPL queue format */\n@@ -86,6 +116,8 @@ struct gve_rx_queue {\n \n \t/* Only valid for DQO_RDA queue format */\n \tstruct gve_rx_queue *bufq;\n+\n+\tuint8_t is_gqi_qpl;\n };\n \n struct gve_priv {\n@@ -225,4 +257,24 @@ gve_clear_device_rings_ok(struct gve_priv *priv)\n \t\t\t\t&priv->state_flags);\n }\n \n+int\n+gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,\n+\t\t   unsigned int socket_id, const struct rte_eth_rxconf *conf,\n+\t\t   struct rte_mempool *pool);\n+int\n+gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,\n+\t\t   unsigned int socket_id, const struct rte_eth_txconf *conf);\n+\n+void\n+gve_tx_queue_release(void *txq);\n+\n+void\n+gve_rx_queue_release(void *rxq);\n+\n+void\n+gve_stop_tx_queues(struct rte_eth_dev *dev);\n+\n+void\n+gve_stop_rx_queues(struct rte_eth_dev *dev);\n+\n #endif /* _GVE_ETHDEV_H_ */\ndiff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c\nnew file mode 100644\nindex 0000000000..e64a461253\n--- /dev/null\n+++ b/drivers/net/gve/gve_rx.c\n@@ -0,0 +1,212 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(C) 2022 Intel Corporation\n+ */\n+\n+#include \"gve_ethdev.h\"\n+#include \"base/gve_adminq.h\"\n+\n+static inline void\n+gve_reset_rxq(struct gve_rx_queue *rxq)\n+{\n+\tstruct rte_mbuf **sw_ring = rxq->sw_ring;\n+\tuint32_t size, i;\n+\n+\tif (rxq == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"pointer to rxq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tsize = rxq->nb_rx_desc * sizeof(struct gve_rx_desc);\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)rxq->rx_desc_ring)[i] = 0;\n+\n+\tsize = rxq->nb_rx_desc * sizeof(union gve_rx_data_slot);\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)rxq->rx_data_ring)[i] = 0;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++)\n+\t\tsw_ring[i] = NULL;\n+\n+\trxq->rx_tail = 0;\n+\trxq->next_avail = 0;\n+\trxq->nb_avail = rxq->nb_rx_desc;\n+\trxq->expected_seqno = 1;\n+}\n+\n+static inline void\n+gve_release_rxq_mbufs(struct gve_rx_queue *rxq)\n+{\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tif (rxq->sw_ring[i]) {\n+\t\t\trte_pktmbuf_free_seg(rxq->sw_ring[i]);\n+\t\t\trxq->sw_ring[i] = NULL;\n+\t\t}\n+\t}\n+\n+\trxq->nb_avail = rxq->nb_rx_desc;\n+}\n+\n+void\n+gve_rx_queue_release(void *rxq)\n+{\n+\tstruct gve_rx_queue *q = rxq;\n+\n+\tif (!q)\n+\t\treturn;\n+\n+\tif (q->is_gqi_qpl) {\n+\t\tgve_adminq_unregister_page_list(q->hw, q->qpl->id);\n+\t\tq->qpl = NULL;\n+\t}\n+\n+\tgve_release_rxq_mbufs(q);\n+\trte_free(q->sw_ring);\n+\trte_memzone_free(q->data_mz);\n+\trte_memzone_free(q->mz);\n+\trte_memzone_free(q->qres_mz);\n+\tq->qres = NULL;\n+\trte_free(q);\n+}\n+\n+int\n+gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,\n+\t\tuint16_t nb_desc, unsigned int socket_id,\n+\t\tconst struct rte_eth_rxconf *conf, struct rte_mempool *pool)\n+{\n+\tstruct gve_priv *hw = dev->data->dev_private;\n+\tconst struct rte_memzone *mz;\n+\tstruct gve_rx_queue *rxq;\n+\tuint16_t free_thresh;\n+\tint err = 0;\n+\n+\tif (nb_desc != hw->rx_desc_cnt) {\n+\t\tPMD_DRV_LOG(WARNING, \"gve doesn't support nb_desc config, use hw nb_desc %u.\",\n+\t\t\t    hw->rx_desc_cnt);\n+\t}\n+\tnb_desc = hw->rx_desc_cnt;\n+\n+\t/* Free memory if needed. */\n+\tif (dev->data->rx_queues[queue_id]) {\n+\t\tgve_rx_queue_release(dev->data->rx_queues[queue_id]);\n+\t\tdev->data->rx_queues[queue_id] = NULL;\n+\t}\n+\n+\t/* Allocate the RX queue data structure. */\n+\trxq = rte_zmalloc_socket(\"gve rxq\",\n+\t\t\t\t sizeof(struct gve_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (!rxq) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate memory for rx queue structure\");\n+\t\terr = -ENOMEM;\n+\t\tgoto err_rxq;\n+\t}\n+\n+\tfree_thresh = conf->rx_free_thresh ? conf->rx_free_thresh : GVE_DEFAULT_RX_FREE_THRESH;\n+\tif (free_thresh >= nb_desc) {\n+\t\tPMD_DRV_LOG(ERR, \"rx_free_thresh (%u) must be less than nb_desc (%u) minus 3.\",\n+\t\t\t    free_thresh, rxq->nb_rx_desc);\n+\t\terr = -EINVAL;\n+\t\tgoto err_rxq;\n+\t}\n+\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->free_thresh = free_thresh;\n+\trxq->queue_id = queue_id;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->ntfy_id = hw->num_ntfy_blks / 2 + queue_id;\n+\trxq->is_gqi_qpl = hw->queue_format == GVE_GQI_QPL_FORMAT;\n+\trxq->mpool = pool;\n+\trxq->hw = hw;\n+\trxq->ntfy_addr = &hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[rxq->ntfy_id].id)];\n+\n+\trxq->rx_buf_len = rte_pktmbuf_data_room_size(rxq->mpool) - RTE_PKTMBUF_HEADROOM;\n+\n+\t/* Allocate software ring */\n+\trxq->sw_ring = rte_zmalloc_socket(\"gve rx sw ring\", sizeof(struct rte_mbuf *) * nb_desc,\n+\t\t\t\t\t  RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (!rxq->sw_ring) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate memory for SW RX ring\");\n+\t\terr = -ENOMEM;\n+\t\tgoto err_rxq;\n+\t}\n+\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rx_ring\", queue_id,\n+\t\t\t\t      nb_desc * sizeof(struct gve_rx_desc),\n+\t\t\t\t      PAGE_SIZE, socket_id);\n+\tif (mz == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to reserve DMA memory for RX\");\n+\t\terr = -ENOMEM;\n+\t\tgoto err_sw_ring;\n+\t}\n+\trxq->rx_desc_ring = (struct gve_rx_desc *)mz->addr;\n+\trxq->rx_ring_phys_addr = mz->iova;\n+\trxq->mz = mz;\n+\n+\tmz = rte_eth_dma_zone_reserve(dev, \"gve rx data ring\", queue_id,\n+\t\t\t\t      sizeof(union gve_rx_data_slot) * nb_desc,\n+\t\t\t\t      PAGE_SIZE, socket_id);\n+\tif (mz == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate memory for RX data ring\");\n+\t\terr = -ENOMEM;\n+\t\tgoto err_rx_ring;\n+\t}\n+\trxq->rx_data_ring = (union gve_rx_data_slot *)mz->addr;\n+\trxq->data_mz = mz;\n+\tif (rxq->is_gqi_qpl) {\n+\t\trxq->qpl = &hw->qpl[rxq->ntfy_id];\n+\t\terr = gve_adminq_register_page_list(hw, rxq->qpl);\n+\t\tif (err != 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to register qpl %u\", queue_id);\n+\t\t\tgoto err_data_ring;\n+\t\t}\n+\t}\n+\n+\tmz = rte_eth_dma_zone_reserve(dev, \"rxq_res\", queue_id,\n+\t\t\t\t      sizeof(struct gve_queue_resources),\n+\t\t\t\t      PAGE_SIZE, socket_id);\n+\tif (mz == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to reserve DMA memory for RX resource\");\n+\t\terr = -ENOMEM;\n+\t\tgoto err_data_ring;\n+\t}\n+\trxq->qres = (struct gve_queue_resources *)mz->addr;\n+\trxq->qres_mz = mz;\n+\n+\tgve_reset_rxq(rxq);\n+\n+\tdev->data->rx_queues[queue_id] = rxq;\n+\n+\treturn 0;\n+\n+err_data_ring:\n+\trte_memzone_free(rxq->data_mz);\n+err_rx_ring:\n+\trte_memzone_free(rxq->mz);\n+err_sw_ring:\n+\trte_free(rxq->sw_ring);\n+err_rxq:\n+\trte_free(rxq);\n+\treturn err;\n+}\n+\n+void\n+gve_stop_rx_queues(struct rte_eth_dev *dev)\n+{\n+\tstruct gve_priv *hw = dev->data->dev_private;\n+\tstruct gve_rx_queue *rxq;\n+\tuint16_t i;\n+\tint err;\n+\n+\terr = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues);\n+\tif (err != 0)\n+\t\tPMD_DRV_LOG(WARNING, \"failed to destroy rxqs\");\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\tgve_release_rxq_mbufs(rxq);\n+\t\tgve_reset_rxq(rxq);\n+\t}\n+}\ndiff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c\nnew file mode 100644\nindex 0000000000..b706b62e71\n--- /dev/null\n+++ b/drivers/net/gve/gve_tx.c\n@@ -0,0 +1,214 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(C) 2022 Intel Corporation\n+ */\n+\n+#include \"gve_ethdev.h\"\n+#include \"base/gve_adminq.h\"\n+\n+static inline void\n+gve_reset_txq(struct gve_tx_queue *txq)\n+{\n+\tstruct rte_mbuf **sw_ring = txq->sw_ring;\n+\tuint32_t size, i;\n+\n+\tif (txq == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Pointer to txq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tsize = txq->nb_tx_desc * sizeof(union gve_tx_desc);\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)txq->tx_desc_ring)[i] = 0;\n+\n+\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\tsw_ring[i] = NULL;\n+\t\tif (txq->is_gqi_qpl) {\n+\t\t\ttxq->iov_ring[i].iov_base = 0;\n+\t\t\ttxq->iov_ring[i].iov_len = 0;\n+\t\t}\n+\t}\n+\n+\ttxq->tx_tail = 0;\n+\ttxq->nb_free = txq->nb_tx_desc - 1;\n+\ttxq->next_to_clean = 0;\n+\n+\tif (txq->is_gqi_qpl) {\n+\t\ttxq->fifo_size = PAGE_SIZE * txq->hw->tx_pages_per_qpl;\n+\t\ttxq->fifo_avail = txq->fifo_size;\n+\t\ttxq->fifo_head = 0;\n+\t\ttxq->fifo_base = (uint64_t)(txq->qpl->mz->addr);\n+\n+\t\ttxq->sw_tail = 0;\n+\t\ttxq->sw_nb_free = txq->nb_tx_desc - 1;\n+\t\ttxq->sw_ntc = 0;\n+\t}\n+}\n+\n+static inline void\n+gve_release_txq_mbufs(struct gve_tx_queue *txq)\n+{\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < txq->nb_tx_desc; i++) {\n+\t\tif (txq->sw_ring[i]) {\n+\t\t\trte_pktmbuf_free_seg(txq->sw_ring[i]);\n+\t\t\ttxq->sw_ring[i] = NULL;\n+\t\t}\n+\t}\n+}\n+\n+void\n+gve_tx_queue_release(void *txq)\n+{\n+\tstruct gve_tx_queue *q = txq;\n+\n+\tif (!q)\n+\t\treturn;\n+\n+\tif (q->is_gqi_qpl) {\n+\t\tgve_adminq_unregister_page_list(q->hw, q->qpl->id);\n+\t\trte_free(q->iov_ring);\n+\t\tq->qpl = NULL;\n+\t}\n+\n+\tgve_release_txq_mbufs(q);\n+\trte_free(q->sw_ring);\n+\trte_memzone_free(q->mz);\n+\trte_memzone_free(q->qres_mz);\n+\tq->qres = NULL;\n+\trte_free(q);\n+}\n+\n+int\n+gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,\n+\t\t   unsigned int socket_id, const struct rte_eth_txconf *conf)\n+{\n+\tstruct gve_priv *hw = dev->data->dev_private;\n+\tconst struct rte_memzone *mz;\n+\tstruct gve_tx_queue *txq;\n+\tuint16_t free_thresh;\n+\tint err = 0;\n+\n+\tif (nb_desc != hw->tx_desc_cnt) {\n+\t\tPMD_DRV_LOG(WARNING, \"gve doesn't support nb_desc config, use hw nb_desc %u.\",\n+\t\t\t    hw->tx_desc_cnt);\n+\t}\n+\tnb_desc = hw->tx_desc_cnt;\n+\n+\t/* Free memory if needed. */\n+\tif (dev->data->tx_queues[queue_id]) {\n+\t\tgve_tx_queue_release(dev->data->tx_queues[queue_id]);\n+\t\tdev->data->tx_queues[queue_id] = NULL;\n+\t}\n+\n+\t/* Allocate the TX queue data structure. */\n+\ttxq = rte_zmalloc_socket(\"gve txq\", sizeof(struct gve_tx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (!txq) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n+\t\terr = -ENOMEM;\n+\t\tgoto err_txq;\n+\t}\n+\n+\tfree_thresh = conf->tx_free_thresh ? conf->tx_free_thresh : GVE_DEFAULT_TX_FREE_THRESH;\n+\tif (free_thresh >= nb_desc - 3) {\n+\t\tPMD_DRV_LOG(ERR, \"tx_free_thresh (%u) must be less than nb_desc (%u) minus 3.\",\n+\t\t\t    free_thresh, txq->nb_tx_desc);\n+\t\terr = -EINVAL;\n+\t\tgoto err_txq;\n+\t}\n+\n+\ttxq->nb_tx_desc = nb_desc;\n+\ttxq->free_thresh = free_thresh;\n+\ttxq->queue_id = queue_id;\n+\ttxq->port_id = dev->data->port_id;\n+\ttxq->ntfy_id = queue_id;\n+\ttxq->is_gqi_qpl = hw->queue_format == GVE_GQI_QPL_FORMAT;\n+\ttxq->hw = hw;\n+\ttxq->ntfy_addr = &hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[txq->ntfy_id].id)];\n+\n+\t/* Allocate software ring */\n+\ttxq->sw_ring = rte_zmalloc_socket(\"gve tx sw ring\",\n+\t\t\t\t\t  sizeof(struct rte_mbuf *) * nb_desc,\n+\t\t\t\t\t  RTE_CACHE_LINE_SIZE, socket_id);\n+\tif (!txq->sw_ring) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate memory for SW TX ring\");\n+\t\terr = -ENOMEM;\n+\t\tgoto err_txq;\n+\t}\n+\n+\tmz = rte_eth_dma_zone_reserve(dev, \"tx_ring\", queue_id,\n+\t\t\t\t      nb_desc * sizeof(union gve_tx_desc),\n+\t\t\t\t      PAGE_SIZE, socket_id);\n+\tif (mz == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to reserve DMA memory for TX\");\n+\t\terr = -ENOMEM;\n+\t\tgoto err_sw_ring;\n+\t}\n+\ttxq->tx_desc_ring = (union gve_tx_desc *)mz->addr;\n+\ttxq->tx_ring_phys_addr = mz->iova;\n+\ttxq->mz = mz;\n+\n+\tif (txq->is_gqi_qpl) {\n+\t\ttxq->iov_ring = rte_zmalloc_socket(\"gve tx iov ring\",\n+\t\t\t\t\t\t   sizeof(struct gve_tx_iovec) * nb_desc,\n+\t\t\t\t\t\t   RTE_CACHE_LINE_SIZE, socket_id);\n+\t\tif (!txq->iov_ring) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate memory for SW TX ring\");\n+\t\t\terr = -ENOMEM;\n+\t\t\tgoto err_tx_ring;\n+\t\t}\n+\t\ttxq->qpl = &hw->qpl[queue_id];\n+\t\terr = gve_adminq_register_page_list(hw, txq->qpl);\n+\t\tif (err != 0) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to register qpl %u\", queue_id);\n+\t\t\tgoto err_iov_ring;\n+\t\t}\n+\t}\n+\n+\tmz = rte_eth_dma_zone_reserve(dev, \"txq_res\", queue_id, sizeof(struct gve_queue_resources),\n+\t\t\t\t      PAGE_SIZE, socket_id);\n+\tif (mz == NULL) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to reserve DMA memory for TX resource\");\n+\t\terr = -ENOMEM;\n+\t\tgoto err_iov_ring;\n+\t}\n+\ttxq->qres = (struct gve_queue_resources *)mz->addr;\n+\ttxq->qres_mz = mz;\n+\n+\tgve_reset_txq(txq);\n+\n+\tdev->data->tx_queues[queue_id] = txq;\n+\n+\treturn 0;\n+\n+err_iov_ring:\n+\tif (txq->is_gqi_qpl)\n+\t\trte_free(txq->iov_ring);\n+err_tx_ring:\n+\trte_memzone_free(txq->mz);\n+err_sw_ring:\n+\trte_free(txq->sw_ring);\n+err_txq:\n+\trte_free(txq);\n+\treturn err;\n+}\n+\n+void\n+gve_stop_tx_queues(struct rte_eth_dev *dev)\n+{\n+\tstruct gve_priv *hw = dev->data->dev_private;\n+\tstruct gve_tx_queue *txq;\n+\tuint16_t i;\n+\tint err;\n+\n+\terr = gve_adminq_destroy_tx_queues(hw, dev->data->nb_tx_queues);\n+\tif (err != 0)\n+\t\tPMD_DRV_LOG(WARNING, \"failed to destroy txqs\");\n+\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\ttxq = dev->data->tx_queues[i];\n+\t\tgve_release_txq_mbufs(txq);\n+\t\tgve_reset_txq(txq);\n+\t}\n+}\ndiff --git a/drivers/net/gve/meson.build b/drivers/net/gve/meson.build\nindex d8ec64b3a3..af0010c01c 100644\n--- a/drivers/net/gve/meson.build\n+++ b/drivers/net/gve/meson.build\n@@ -9,6 +9,8 @@ endif\n \n sources = files(\n         'base/gve_adminq.c',\n+        'gve_rx.c',\n+        'gve_tx.c',\n         'gve_ethdev.c',\n )\n includes += include_directories('base')\n",
    "prefixes": [
        "v7",
        "7/8"
    ]
}