get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/118877/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 118877,
    "url": "http://patchwork.dpdk.org/api/patches/118877/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20221021051821.2164939-6-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221021051821.2164939-6-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221021051821.2164939-6-junfeng.guo@intel.com",
    "date": "2022-10-21T05:18:12",
    "name": "[v9,05/14] net/idpf: add support for queue start and stop",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "4dc8efa2784c86bc75b6cba6afbd5746525611c4",
    "submitter": {
        "id": 1785,
        "url": "http://patchwork.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patchwork.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20221021051821.2164939-6-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 25359,
            "url": "http://patchwork.dpdk.org/api/series/25359/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=25359",
            "date": "2022-10-21T05:18:07",
            "name": "add support for idpf PMD in DPDK",
            "version": 9,
            "mbox": "http://patchwork.dpdk.org/series/25359/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/118877/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/118877/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 5D4CEA0552;\n\tFri, 21 Oct 2022 07:20:33 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 16C1342BAC;\n\tFri, 21 Oct 2022 07:20:06 +0200 (CEST)",
            "from mga07.intel.com (mga07.intel.com [134.134.136.100])\n by mails.dpdk.org (Postfix) with ESMTP id 3DC7542B8F\n for <dev@dpdk.org>; Fri, 21 Oct 2022 07:20:01 +0200 (CEST)",
            "from fmsmga001.fm.intel.com ([10.253.24.23])\n by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 20 Oct 2022 22:20:00 -0700",
            "from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104])\n by fmsmga001.fm.intel.com with ESMTP; 20 Oct 2022 22:19:58 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1666329601; x=1697865601;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=TkQoHEqcJW4r30nHuxBFa1rlBPj1At6uP4K9+xv2pOo=;\n b=dPOgALZ97bmgbMk7u0m5d1KNd7vwMo4os8gTmkigi5E1XsEaVyRoGNf1\n CHCby8NJaeiA9FiHNOYwuqjo5H9YCJOr8TMSu+x2Q4yx8C1H5uf+KKUZ7\n AU6gzldLZ8ZbRV9Z7jcvw+zDij9kZr6aY4DVP5bJnLHLxyKPTyJG9++XV\n lSl/gkplwo4gonntMBJqXs8I/6Rm8KxxUyvMLfSp0dUoCy7VAzbBkBb4V\n BNyYFJKcTRE9VlGpZ70ZhSYmMaeOH7r99gC252tNMab3iDNISgnzWqsyA\n KMJSJL0KnNQDsWCreUvmAHO11QIW4t7u/rXmvzSJ9leJwf+kT+u1mNED5 A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10506\"; a=\"371128568\"",
            "E=Sophos;i=\"5.95,200,1661842800\"; d=\"scan'208\";a=\"371128568\"",
            "E=McAfee;i=\"6500,9779,10506\"; a=\"772826331\"",
            "E=Sophos;i=\"5.95,200,1661842800\"; d=\"scan'208\";a=\"772826331\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com,\n jingjing.wu@intel.com,\n beilei.xing@intel.com",
        "Cc": "dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,\n Xiaoyun Li <xiaoyun.li@intel.com>",
        "Subject": "[PATCH v9 05/14] net/idpf: add support for queue start and stop",
        "Date": "Fri, 21 Oct 2022 13:18:12 +0800",
        "Message-Id": "<20221021051821.2164939-6-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20221021051821.2164939-1-junfeng.guo@intel.com>",
        "References": "<20221020062951.645121-2-junfeng.guo@intel.com>\n <20221021051821.2164939-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add support for queue operations:\n - rx_queue_start\n - rx_queue_stop\n - tx_queue_start\n - tx_queue_stop\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\nSigned-off-by: Xiaoyun Li <xiaoyun.li@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n doc/guides/nics/features/idpf.ini |   1 +\n drivers/net/idpf/idpf_ethdev.c    |  46 +++++\n drivers/net/idpf/idpf_ethdev.h    |   3 +\n drivers/net/idpf/idpf_rxtx.c      | 309 ++++++++++++++++++++++++++++++\n drivers/net/idpf/idpf_rxtx.h      |   6 +\n drivers/net/idpf/idpf_vchnl.c     | 150 +++++++++++++++\n 6 files changed, 515 insertions(+)",
    "diff": "diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini\nindex 681a908194..597beec5d9 100644\n--- a/doc/guides/nics/features/idpf.ini\n+++ b/doc/guides/nics/features/idpf.ini\n@@ -7,6 +7,7 @@\n ; is selected.\n ;\n [Features]\n+Queue start/stop     = Y\n Runtime Rx queue setup = Y\n Runtime Tx queue setup = Y\n Multiprocess aware   = Y\ndiff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c\nindex f9717cd5ff..c25f222c5e 100644\n--- a/drivers/net/idpf/idpf_ethdev.c\n+++ b/drivers/net/idpf/idpf_ethdev.c\n@@ -57,6 +57,10 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {\n \t.dev_start\t\t\t= idpf_dev_start,\n \t.dev_stop\t\t\t= idpf_dev_stop,\n \t.dev_close\t\t\t= idpf_dev_close,\n+\t.rx_queue_start\t\t\t= idpf_rx_queue_start,\n+\t.rx_queue_stop\t\t\t= idpf_rx_queue_stop,\n+\t.tx_queue_start\t\t\t= idpf_tx_queue_start,\n+\t.tx_queue_stop\t\t\t= idpf_tx_queue_stop,\n \t.rx_queue_setup\t\t\t= idpf_rx_queue_setup,\n \t.rx_queue_release\t\t= idpf_dev_rx_queue_release,\n \t.tx_queue_setup\t\t\t= idpf_tx_queue_setup,\n@@ -211,6 +215,39 @@ idpf_dev_configure(__rte_unused struct rte_eth_dev *dev)\n \treturn 0;\n }\n \n+static int\n+idpf_start_queues(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_rx_queue *rxq;\n+\tstruct idpf_tx_queue *txq;\n+\tint err = 0;\n+\tint i;\n+\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\ttxq = dev->data->tx_queues[i];\n+\t\tif (!txq || txq->tx_deferred_start)\n+\t\t\tcontinue;\n+\t\terr = idpf_tx_queue_start(dev, i);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Fail to start Tx queue %u\", i);\n+\t\t\treturn err;\n+\t\t}\n+\t}\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\tif (!rxq || rxq->rx_deferred_start)\n+\t\t\tcontinue;\n+\t\terr = idpf_rx_queue_start(dev, i);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Fail to start Rx queue %u\", i);\n+\t\t\treturn err;\n+\t\t}\n+\t}\n+\n+\treturn err;\n+}\n+\n static int\n idpf_dev_start(struct rte_eth_dev *dev)\n {\n@@ -220,6 +257,11 @@ idpf_dev_start(struct rte_eth_dev *dev)\n \n \tvport->stopped = 0;\n \n+\tif (idpf_start_queues(dev)) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to start queues\");\n+\t\tgoto err_mtu;\n+\t}\n+\n \tif (idpf_vc_ena_dis_vport(vport, true)) {\n \t\tPMD_DRV_LOG(ERR, \"Failed to enable vport\");\n \t\tgoto err_vport;\n@@ -228,6 +270,8 @@ idpf_dev_start(struct rte_eth_dev *dev)\n \treturn 0;\n \n err_vport:\n+\tidpf_stop_queues(dev);\n+err_mtu:\n \treturn -1;\n }\n \n@@ -244,6 +288,8 @@ idpf_dev_stop(struct rte_eth_dev *dev)\n \tif (idpf_vc_ena_dis_vport(vport, false))\n \t\tPMD_DRV_LOG(ERR, \"disable vport failed\");\n \n+\tidpf_stop_queues(dev);\n+\n \tvport->stopped = 1;\n \tdev->data->dev_started = 0;\n \ndiff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h\nindex 49d71154aa..9608204e4c 100644\n--- a/drivers/net/idpf/idpf_ethdev.h\n+++ b/drivers/net/idpf/idpf_ethdev.h\n@@ -219,6 +219,9 @@ int idpf_vc_config_rxqs(struct idpf_vport *vport);\n int idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id);\n int idpf_vc_config_txqs(struct idpf_vport *vport);\n int idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id);\n+int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,\n+\t\t      bool rx, bool on);\n+int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable);\n int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);\n int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,\n \t\t      uint16_t buf_len, uint8_t *buf);\ndiff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c\nindex 82cc1d8f05..95193713c4 100644\n--- a/drivers/net/idpf/idpf_rxtx.c\n+++ b/drivers/net/idpf/idpf_rxtx.c\n@@ -929,6 +929,289 @@ idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\t\t\t\t socket_id, tx_conf);\n }\n \n+static int\n+idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)\n+{\n+\tvolatile struct virtchnl2_singleq_rx_buf_desc *rxd;\n+\tstruct rte_mbuf *mbuf = NULL;\n+\tuint64_t dma_addr;\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tmbuf = rte_mbuf_raw_alloc(rxq->mp);\n+\t\tif (unlikely(!mbuf)) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate mbuf for RX\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\trte_mbuf_refcnt_set(mbuf, 1);\n+\t\tmbuf->next = NULL;\n+\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tmbuf->nb_segs = 1;\n+\t\tmbuf->port = rxq->port_id;\n+\n+\t\tdma_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n+\n+\t\trxd = &((volatile struct virtchnl2_singleq_rx_buf_desc *)(rxq->rx_ring))[i];\n+\t\trxd->pkt_addr = dma_addr;\n+\t\trxd->hdr_addr = 0;\n+\t\trxd->rsvd1 = 0;\n+\t\trxd->rsvd2 = 0;\n+\t\trxq->sw_ring[i] = mbuf;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq)\n+{\n+\tvolatile struct virtchnl2_splitq_rx_buf_desc *rxd;\n+\tstruct rte_mbuf *mbuf = NULL;\n+\tuint64_t dma_addr;\n+\tuint16_t i;\n+\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\tmbuf = rte_mbuf_raw_alloc(rxq->mp);\n+\t\tif (unlikely(!mbuf)) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate mbuf for RX\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\trte_mbuf_refcnt_set(mbuf, 1);\n+\t\tmbuf->next = NULL;\n+\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tmbuf->nb_segs = 1;\n+\t\tmbuf->port = rxq->port_id;\n+\n+\t\tdma_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n+\n+\t\trxd = &((volatile struct virtchnl2_splitq_rx_buf_desc *)(rxq->rx_ring))[i];\n+\t\trxd->qword0.buf_id = i;\n+\t\trxd->qword0.rsvd0 = 0;\n+\t\trxd->qword0.rsvd1 = 0;\n+\t\trxd->pkt_addr = dma_addr;\n+\t\trxd->hdr_addr = 0;\n+\t\trxd->rsvd2 = 0;\n+\n+\t\trxq->sw_ring[i] = mbuf;\n+\t}\n+\n+\trxq->nb_rx_hold = 0;\n+\trxq->rx_tail = rxq->nb_rx_desc - 1;\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct idpf_rx_queue *rxq;\n+\tint err;\n+\n+\tif (rx_queue_id >= dev->data->nb_rx_queues)\n+\t\treturn -EINVAL;\n+\n+\trxq = dev->data->rx_queues[rx_queue_id];\n+\n+\tif (!rxq || !rxq->q_set) {\n+\t\tPMD_DRV_LOG(ERR, \"RX queue %u not available or setup\",\n+\t\t\t\t\trx_queue_id);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (!rxq->bufq1) {\n+\t\t/* Single queue */\n+\t\terr = idpf_alloc_single_rxq_mbufs(rxq);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate RX queue mbuf\");\n+\t\t\treturn err;\n+\t\t}\n+\n+\t\trte_wmb();\n+\n+\t\t/* Init the RX tail register. */\n+\t\tIDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);\n+\t} else {\n+\t\t/* Split queue */\n+\t\terr = idpf_alloc_split_rxq_mbufs(rxq->bufq1);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate RX buffer queue mbuf\");\n+\t\t\treturn err;\n+\t\t}\n+\t\terr = idpf_alloc_split_rxq_mbufs(rxq->bufq2);\n+\t\tif (err) {\n+\t\t\tPMD_DRV_LOG(ERR, \"Failed to allocate RX buffer queue mbuf\");\n+\t\t\treturn err;\n+\t\t}\n+\n+\t\trte_wmb();\n+\n+\t\t/* Init the RX tail register. */\n+\t\tIDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);\n+\t\tIDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_rx_queue *rxq =\n+\t\t(struct idpf_rx_queue *)dev->data->rx_queues[rx_queue_id];\n+\tint err = 0;\n+\n+\tPMD_DRV_FUNC_TRACE();\n+\n+\terr = idpf_vc_config_rxq(vport, rx_queue_id);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Fail to configure Rx queue %u\", rx_queue_id);\n+\t\treturn err;\n+\t}\n+\n+\terr = idpf_rx_queue_init(dev, rx_queue_id);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to init RX queue %u\",\n+\t\t\t    rx_queue_id);\n+\t\treturn err;\n+\t}\n+\n+\t/* Ready to switch the queue on */\n+\terr = idpf_switch_queue(vport, rx_queue_id, true, true);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch RX queue %u on\",\n+\t\t\t    rx_queue_id);\n+\t} else {\n+\t\trxq->q_started = true;\n+\t\tdev->data->rx_queue_state[rx_queue_id] =\n+\t\t\tRTE_ETH_QUEUE_STATE_STARTED;\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n+{\n+\tstruct idpf_tx_queue *txq;\n+\n+\tif (tx_queue_id >= dev->data->nb_tx_queues)\n+\t\treturn -EINVAL;\n+\n+\ttxq = dev->data->tx_queues[tx_queue_id];\n+\n+\t/* Init the RX tail register. */\n+\tIDPF_PCI_REG_WRITE(txq->qtx_tail, 0);\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_tx_queue *txq =\n+\t\t(struct idpf_tx_queue *)dev->data->tx_queues[tx_queue_id];\n+\tint err = 0;\n+\n+\tPMD_DRV_FUNC_TRACE();\n+\n+\terr = idpf_vc_config_txq(vport, tx_queue_id);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Fail to configure Tx queue %u\", tx_queue_id);\n+\t\treturn err;\n+\t}\n+\n+\terr = idpf_tx_queue_init(dev, tx_queue_id);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to init TX queue %u\",\n+\t\t\t    tx_queue_id);\n+\t\treturn err;\n+\t}\n+\n+\t/* Ready to switch the queue on */\n+\terr = idpf_switch_queue(vport, tx_queue_id, false, true);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch TX queue %u on\",\n+\t\t\t    tx_queue_id);\n+\t} else {\n+\t\ttxq->q_started = true;\n+\t\tdev->data->tx_queue_state[tx_queue_id] =\n+\t\t\tRTE_ETH_QUEUE_STATE_STARTED;\n+\t}\n+\n+\treturn err;\n+}\n+\n+int\n+idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_rx_queue *rxq;\n+\tint err;\n+\n+\tPMD_DRV_FUNC_TRACE();\n+\n+\tif (rx_queue_id >= dev->data->nb_rx_queues)\n+\t\treturn -EINVAL;\n+\n+\terr = idpf_switch_queue(vport, rx_queue_id, true, false);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch RX queue %u off\",\n+\t\t\t    rx_queue_id);\n+\t\treturn err;\n+\t}\n+\n+\trxq = dev->data->rx_queues[rx_queue_id];\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\trxq->ops->release_mbufs(rxq);\n+\t\treset_single_rx_queue(rxq);\n+\t} else {\n+\t\trxq->bufq1->ops->release_mbufs(rxq->bufq1);\n+\t\trxq->bufq2->ops->release_mbufs(rxq->bufq2);\n+\t\treset_split_rx_queue(rxq);\n+\t}\n+\tdev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;\n+\n+\treturn 0;\n+}\n+\n+int\n+idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_tx_queue *txq;\n+\tint err;\n+\n+\tPMD_DRV_FUNC_TRACE();\n+\n+\tif (tx_queue_id >= dev->data->nb_tx_queues)\n+\t\treturn -EINVAL;\n+\n+\terr = idpf_switch_queue(vport, tx_queue_id, false, false);\n+\tif (err) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to switch TX queue %u off\",\n+\t\t\t    tx_queue_id);\n+\t\treturn err;\n+\t}\n+\n+\ttxq = dev->data->tx_queues[tx_queue_id];\n+\ttxq->ops->release_mbufs(txq);\n+\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\treset_single_tx_queue(txq);\n+\t} else {\n+\t\treset_split_tx_descq(txq);\n+\t\treset_split_tx_complq(txq->complq);\n+\t}\n+\tdev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;\n+\n+\treturn 0;\n+}\n+\n void\n idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n@@ -940,3 +1223,29 @@ idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)\n {\n \tidpf_tx_queue_release(dev->data->tx_queues[qid]);\n }\n+\n+void\n+idpf_stop_queues(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_rx_queue *rxq;\n+\tstruct idpf_tx_queue *txq;\n+\tint i;\n+\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\tif (!rxq)\n+\t\t\tcontinue;\n+\n+\t\tif (idpf_rx_queue_stop(dev, i))\n+\t\t\tPMD_DRV_LOG(WARNING, \"Fail to stop Rx queue %d\", i);\n+\t}\n+\n+\tfor (i = 0; i < dev->data->nb_tx_queues; i++) {\n+\t\ttxq = dev->data->tx_queues[i];\n+\t\tif (!txq)\n+\t\t\tcontinue;\n+\n+\t\tif (idpf_tx_queue_stop(dev, i))\n+\t\t\tPMD_DRV_LOG(WARNING, \"Fail to stop Tx queue %d\", i);\n+\t}\n+}\ndiff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h\nindex 69a1fa6348..f0427b96c5 100644\n--- a/drivers/net/idpf/idpf_rxtx.h\n+++ b/drivers/net/idpf/idpf_rxtx.h\n@@ -175,12 +175,18 @@ int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\tconst struct rte_eth_rxconf *rx_conf,\n \t\t\tstruct rte_mempool *mp);\n int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+int idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+int idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n void idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\tuint16_t nb_desc, unsigned int socket_id,\n \t\t\tconst struct rte_eth_txconf *tx_conf);\n int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n+int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n \n+void idpf_stop_queues(struct rte_eth_dev *dev);\n+\n #endif /* _IDPF_RXTX_H_ */\ndiff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c\nindex 3578346bce..6b6881872b 100644\n--- a/drivers/net/idpf/idpf_vchnl.c\n+++ b/drivers/net/idpf/idpf_vchnl.c\n@@ -831,6 +831,156 @@ idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id)\n \treturn err;\n }\n \n+static int\n+idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,\n+\t\t\t  uint32_t type, bool on)\n+{\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct virtchnl2_del_ena_dis_queues *queue_select;\n+\tstruct virtchnl2_queue_chunk *queue_chunk;\n+\tstruct idpf_cmd_info args;\n+\tint err, len;\n+\n+\tlen = sizeof(struct virtchnl2_del_ena_dis_queues);\n+\tqueue_select = rte_zmalloc(\"queue_select\", len, 0);\n+\tif (!queue_select)\n+\t\treturn -ENOMEM;\n+\n+\tqueue_chunk = queue_select->chunks.chunks;\n+\tqueue_select->chunks.num_chunks = 1;\n+\tqueue_select->vport_id = vport->vport_id;\n+\n+\tqueue_chunk->type = type;\n+\tqueue_chunk->start_queue_id = qid;\n+\tqueue_chunk->num_queues = 1;\n+\n+\targs.ops = on ? VIRTCHNL2_OP_ENABLE_QUEUES :\n+\t\tVIRTCHNL2_OP_DISABLE_QUEUES;\n+\targs.in_args = (u8 *)queue_select;\n+\targs.in_args_size = len;\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_%s_QUEUES\",\n+\t\t\t    on ? \"ENABLE\" : \"DISABLE\");\n+\n+\trte_free(queue_select);\n+\treturn err;\n+}\n+\n+int\n+idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,\n+\t\t     bool rx, bool on)\n+{\n+\tuint32_t type;\n+\tint err, queue_id;\n+\n+\t/* switch txq/rxq */\n+\ttype = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;\n+\n+\tif (type == VIRTCHNL2_QUEUE_TYPE_RX)\n+\t\tqueue_id = vport->chunks_info.rx_start_qid + qid;\n+\telse\n+\t\tqueue_id = vport->chunks_info.tx_start_qid + qid;\n+\terr = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);\n+\tif (err)\n+\t\treturn err;\n+\n+\t/* switch tx completion queue */\n+\tif (!rx && vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {\n+\t\ttype = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;\n+\t\tqueue_id = vport->chunks_info.tx_compl_start_qid + qid;\n+\t\terr = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t}\n+\n+\t/* switch rx buffer queue */\n+\tif (rx && vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {\n+\t\ttype = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;\n+\t\tqueue_id = vport->chunks_info.rx_buf_start_qid + 2 * qid;\n+\t\terr = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t\tqueue_id++;\n+\t\terr = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);\n+\t\tif (err)\n+\t\t\treturn err;\n+\t}\n+\n+\treturn err;\n+}\n+\n+#define IDPF_RXTX_QUEUE_CHUNKS_NUM\t2\n+int\n+idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable)\n+{\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct virtchnl2_del_ena_dis_queues *queue_select;\n+\tstruct virtchnl2_queue_chunk *queue_chunk;\n+\tuint32_t type;\n+\tstruct idpf_cmd_info args;\n+\tuint16_t num_chunks;\n+\tint err, len;\n+\n+\tnum_chunks = IDPF_RXTX_QUEUE_CHUNKS_NUM;\n+\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)\n+\t\tnum_chunks++;\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)\n+\t\tnum_chunks++;\n+\n+\tlen = sizeof(struct virtchnl2_del_ena_dis_queues) +\n+\t\tsizeof(struct virtchnl2_queue_chunk) * (num_chunks - 1);\n+\tqueue_select = rte_zmalloc(\"queue_select\", len, 0);\n+\tif (queue_select == NULL)\n+\t\treturn -ENOMEM;\n+\n+\tqueue_chunk = queue_select->chunks.chunks;\n+\tqueue_select->chunks.num_chunks = num_chunks;\n+\tqueue_select->vport_id = vport->vport_id;\n+\n+\ttype = VIRTCHNL_QUEUE_TYPE_RX;\n+\tqueue_chunk[type].type = type;\n+\tqueue_chunk[type].start_queue_id = vport->chunks_info.rx_start_qid;\n+\tqueue_chunk[type].num_queues = vport->num_rx_q;\n+\n+\ttype = VIRTCHNL2_QUEUE_TYPE_TX;\n+\tqueue_chunk[type].type = type;\n+\tqueue_chunk[type].start_queue_id = vport->chunks_info.tx_start_qid;\n+\tqueue_chunk[type].num_queues = vport->num_tx_q;\n+\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {\n+\t\ttype = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;\n+\t\tqueue_chunk[type].type = type;\n+\t\tqueue_chunk[type].start_queue_id =\n+\t\t\tvport->chunks_info.rx_buf_start_qid;\n+\t\tqueue_chunk[type].num_queues = vport->num_rx_bufq;\n+\t}\n+\n+\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {\n+\t\ttype = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;\n+\t\tqueue_chunk[type].type = type;\n+\t\tqueue_chunk[type].start_queue_id =\n+\t\t\tvport->chunks_info.tx_compl_start_qid;\n+\t\tqueue_chunk[type].num_queues = vport->num_tx_complq;\n+\t}\n+\n+\targs.ops = enable ? VIRTCHNL2_OP_ENABLE_QUEUES :\n+\t\tVIRTCHNL2_OP_DISABLE_QUEUES;\n+\targs.in_args = (u8 *)queue_select;\n+\targs.in_args_size = len;\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_%s_QUEUES\",\n+\t\t\t    enable ? \"ENABLE\" : \"DISABLE\");\n+\n+\trte_free(queue_select);\n+\treturn err;\n+}\n+\n int\n idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)\n {\n",
    "prefixes": [
        "v9",
        "05/14"
    ]
}