get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/124693/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 124693,
    "url": "http://patchwork.dpdk.org/api/patches/124693/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20230302212057.1114863-4-mingxia.liu@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230302212057.1114863-4-mingxia.liu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230302212057.1114863-4-mingxia.liu@intel.com",
    "date": "2023-03-02T21:20:39",
    "name": "[v9,03/21] net/cpfl: add Rx queue setup",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "ba5c494046488f0a7ba23ddacd351d60051f8b23",
    "submitter": {
        "id": 2514,
        "url": "http://patchwork.dpdk.org/api/people/2514/?format=api",
        "name": "Liu, Mingxia",
        "email": "mingxia.liu@intel.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patchwork.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20230302212057.1114863-4-mingxia.liu@intel.com/mbox/",
    "series": [
        {
            "id": 27228,
            "url": "http://patchwork.dpdk.org/api/series/27228/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=27228",
            "date": "2023-03-02T21:20:36",
            "name": "add support for cpfl PMD in DPDK",
            "version": 9,
            "mbox": "http://patchwork.dpdk.org/series/27228/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/124693/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/124693/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 47D6341DB5;\n\tThu,  2 Mar 2023 14:06:27 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 979C6427F5;\n\tThu,  2 Mar 2023 14:06:11 +0100 (CET)",
            "from mga02.intel.com (mga02.intel.com [134.134.136.20])\n by mails.dpdk.org (Postfix) with ESMTP id 3627A40E09\n for <dev@dpdk.org>; Thu,  2 Mar 2023 14:06:09 +0100 (CET)",
            "from orsmga001.jf.intel.com ([10.7.209.18])\n by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 02 Mar 2023 05:06:08 -0800",
            "from dpdk-mingxial-ice.sh.intel.com ([10.67.110.191])\n by orsmga001.jf.intel.com with ESMTP; 02 Mar 2023 05:06:07 -0800"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1677762369; x=1709298369;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=kvkDvvEj3DNcAXzq7AzI182QPDCLypat1qsNXogIt6c=;\n b=mpjoD8LFNCL8YvhvAPIJdBrpECJ0z8AVQqOE2pi95EIVIkfZRgWx/uAW\n sIm37Mefmtrom2MG1xtgbun7faI8tKP3sq5l4gqraTmLAzFKIn5WP2ffD\n 7zunecA1tgUNOBgedStTtLtAFUl/cRzT2Y7tTd6xZsrKHe5APtLrRxL0j\n oQizCncipNtELjvOe/rrLzRChm+HVimYKla9+8qDjxKY3ckLgjXY1CIgo\n LblUJDQrw4Zwj164UMHrvcUWTRD/9ApGtLFMBzzAri4aV56Kc80uA4v/h\n 1nPQ7TMRSC6WIS50Lst1b6IGx+KhXyJJpkxbDz/0yUsuK5Dz2WwzdMZWZ w==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10636\"; a=\"322988128\"",
            "E=Sophos;i=\"5.98,227,1673942400\"; d=\"scan'208\";a=\"322988128\"",
            "E=McAfee;i=\"6500,9779,10636\"; a=\"707406369\"",
            "E=Sophos;i=\"5.98,227,1673942400\"; d=\"scan'208\";a=\"707406369\""
        ],
        "X-ExtLoop1": "1",
        "From": "Mingxia Liu <mingxia.liu@intel.com>",
        "To": "dev@dpdk.org,\n\tbeilei.xing@intel.com,\n\tyuying.zhang@intel.com",
        "Cc": "Mingxia Liu <mingxia.liu@intel.com>",
        "Subject": "[PATCH v9 03/21] net/cpfl: add Rx queue setup",
        "Date": "Thu,  2 Mar 2023 21:20:39 +0000",
        "Message-Id": "<20230302212057.1114863-4-mingxia.liu@intel.com>",
        "X-Mailer": "git-send-email 2.34.1",
        "In-Reply-To": "<20230302212057.1114863-1-mingxia.liu@intel.com>",
        "References": "<20230302103527.931071-1-mingxia.liu@intel.com>\n <20230302212057.1114863-1-mingxia.liu@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Add support for rx_queue_setup ops.\n\nThere are two queue modes supported, single queue mode and split\nqueue mode for Rx queue.\n\nFor the single queue model, the descriptor RX queue is used by SW\nto post buffer descriptors to HW, and it's also used by HW to post\ncompleted descriptors to SW.\n\nFor the split queue model, \"RX buffer queues\" are used to pass\ndescriptor buffers from SW to HW, while RX queues are used only to\npass the descriptor completions from HW to SW.\n\nSigned-off-by: Mingxia Liu <mingxia.liu@intel.com>\n---\n drivers/net/cpfl/cpfl_ethdev.c |  11 ++\n drivers/net/cpfl/cpfl_rxtx.c   | 232 +++++++++++++++++++++++++++++++++\n drivers/net/cpfl/cpfl_rxtx.h   |   6 +\n 3 files changed, 249 insertions(+)",
    "diff": "diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c\nindex c26ff57730..ae011da76f 100644\n--- a/drivers/net/cpfl/cpfl_ethdev.c\n+++ b/drivers/net/cpfl/cpfl_ethdev.c\n@@ -99,12 +99,22 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \t\t.tx_rs_thresh = CPFL_DEFAULT_TX_RS_THRESH,\n \t};\n \n+\tdev_info->default_rxconf = (struct rte_eth_rxconf) {\n+\t\t.rx_free_thresh = CPFL_DEFAULT_RX_FREE_THRESH,\n+\t};\n+\n \tdev_info->tx_desc_lim = (struct rte_eth_desc_lim) {\n \t\t.nb_max = CPFL_MAX_RING_DESC,\n \t\t.nb_min = CPFL_MIN_RING_DESC,\n \t\t.nb_align = CPFL_ALIGN_RING_DESC,\n \t};\n \n+\tdev_info->rx_desc_lim = (struct rte_eth_desc_lim) {\n+\t\t.nb_max = CPFL_MAX_RING_DESC,\n+\t\t.nb_min = CPFL_MIN_RING_DESC,\n+\t\t.nb_align = CPFL_ALIGN_RING_DESC,\n+\t};\n+\n \treturn 0;\n }\n \n@@ -191,6 +201,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)\n static const struct eth_dev_ops cpfl_eth_dev_ops = {\n \t.dev_configure\t\t\t= cpfl_dev_configure,\n \t.dev_close\t\t\t= cpfl_dev_close,\n+\t.rx_queue_setup\t\t\t= cpfl_rx_queue_setup,\n \t.tx_queue_setup\t\t\t= cpfl_tx_queue_setup,\n \t.dev_infos_get\t\t\t= cpfl_dev_info_get,\n \t.link_update\t\t\t= cpfl_dev_link_update,\ndiff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c\nindex 737d069ec2..930d725a4a 100644\n--- a/drivers/net/cpfl/cpfl_rxtx.c\n+++ b/drivers/net/cpfl/cpfl_rxtx.c\n@@ -9,6 +9,25 @@\n #include \"cpfl_ethdev.h\"\n #include \"cpfl_rxtx.h\"\n \n+static uint64_t\n+cpfl_rx_offload_convert(uint64_t offload)\n+{\n+\tuint64_t ol = 0;\n+\n+\tif ((offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0)\n+\t\tol |= IDPF_RX_OFFLOAD_IPV4_CKSUM;\n+\tif ((offload & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) != 0)\n+\t\tol |= IDPF_RX_OFFLOAD_UDP_CKSUM;\n+\tif ((offload & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) != 0)\n+\t\tol |= IDPF_RX_OFFLOAD_TCP_CKSUM;\n+\tif ((offload & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)\n+\t\tol |= IDPF_RX_OFFLOAD_OUTER_IPV4_CKSUM;\n+\tif ((offload & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0)\n+\t\tol |= IDPF_RX_OFFLOAD_TIMESTAMP;\n+\n+\treturn ol;\n+}\n+\n static uint64_t\n cpfl_tx_offload_convert(uint64_t offload)\n {\n@@ -94,6 +113,219 @@ cpfl_dma_zone_release(const struct rte_memzone *mz)\n \trte_memzone_free(mz);\n }\n \n+static int\n+cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,\n+\t\t\t uint16_t queue_idx, uint16_t rx_free_thresh,\n+\t\t\t uint16_t nb_desc, unsigned int socket_id,\n+\t\t\t struct rte_mempool *mp, uint8_t bufq_id)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *base = vport->adapter;\n+\tstruct idpf_hw *hw = &base->hw;\n+\tconst struct rte_memzone *mz;\n+\tstruct idpf_rx_queue *bufq;\n+\tuint16_t len;\n+\tint ret;\n+\n+\tbufq = rte_zmalloc_socket(\"cpfl bufq\",\n+\t\t\t\t   sizeof(struct idpf_rx_queue),\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (bufq == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx buffer queue.\");\n+\t\tret = -ENOMEM;\n+\t\tgoto err_bufq1_alloc;\n+\t}\n+\n+\tbufq->mp = mp;\n+\tbufq->nb_rx_desc = nb_desc;\n+\tbufq->rx_free_thresh = rx_free_thresh;\n+\tbufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx;\n+\tbufq->port_id = dev->data->port_id;\n+\tbufq->rx_hdr_len = 0;\n+\tbufq->adapter = base;\n+\n+\tlen = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM;\n+\tbufq->rx_buf_len = len;\n+\n+\t/* Allocate a little more to support bulk allocate. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\n+\tmz = cpfl_dma_zone_reserve(dev, queue_idx, len,\n+\t\t\t\t   VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,\n+\t\t\t\t   socket_id, true);\n+\tif (mz == NULL) {\n+\t\tret = -ENOMEM;\n+\t\tgoto err_mz_reserve;\n+\t}\n+\n+\tbufq->rx_ring_phys_addr = mz->iova;\n+\tbufq->rx_ring = mz->addr;\n+\tbufq->mz = mz;\n+\n+\tbufq->sw_ring =\n+\t\trte_zmalloc_socket(\"cpfl rx bufq sw ring\",\n+\t\t\t\t   sizeof(struct rte_mbuf *) * len,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   socket_id);\n+\tif (bufq->sw_ring == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n+\t\tret = -ENOMEM;\n+\t\tgoto err_sw_ring_alloc;\n+\t}\n+\n+\tidpf_qc_split_rx_bufq_reset(bufq);\n+\tbufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +\n+\t\t\t queue_idx * vport->chunks_info.rx_buf_qtail_spacing);\n+\tbufq->q_set = true;\n+\n+\tif (bufq_id == IDPF_RX_SPLIT_BUFQ1_ID) {\n+\t\trxq->bufq1 = bufq;\n+\t} else if (bufq_id == IDPF_RX_SPLIT_BUFQ2_ID) {\n+\t\trxq->bufq2 = bufq;\n+\t} else {\n+\t\tPMD_INIT_LOG(ERR, \"Invalid buffer queue index.\");\n+\t\tret = -EINVAL;\n+\t\tgoto err_bufq_id;\n+\t}\n+\n+\treturn 0;\n+\n+err_bufq_id:\n+\trte_free(bufq->sw_ring);\n+err_sw_ring_alloc:\n+\tcpfl_dma_zone_release(mz);\n+err_mz_reserve:\n+\trte_free(bufq);\n+err_bufq1_alloc:\n+\treturn ret;\n+}\n+\n+static void\n+cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)\n+{\n+\trte_free(bufq->sw_ring);\n+\tcpfl_dma_zone_release(bufq->mz);\n+\trte_free(bufq);\n+}\n+\n+int\n+cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t    uint16_t nb_desc, unsigned int socket_id,\n+\t\t    const struct rte_eth_rxconf *rx_conf,\n+\t\t    struct rte_mempool *mp)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *base = vport->adapter;\n+\tstruct idpf_hw *hw = &base->hw;\n+\tconst struct rte_memzone *mz;\n+\tstruct idpf_rx_queue *rxq;\n+\tuint16_t rx_free_thresh;\n+\tuint64_t offloads;\n+\tbool is_splitq;\n+\tuint16_t len;\n+\tint ret;\n+\n+\toffloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;\n+\n+\t/* Check free threshold */\n+\trx_free_thresh = (rx_conf->rx_free_thresh == 0) ?\n+\t\tCPFL_DEFAULT_RX_FREE_THRESH :\n+\t\trx_conf->rx_free_thresh;\n+\tif (idpf_qc_rx_thresh_check(nb_desc, rx_free_thresh) != 0)\n+\t\treturn -EINVAL;\n+\n+\t/* Setup Rx queue */\n+\trxq = rte_zmalloc_socket(\"cpfl rxq\",\n+\t\t\t\t sizeof(struct idpf_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t socket_id);\n+\tif (rxq == NULL) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx queue data structure\");\n+\t\tret = -ENOMEM;\n+\t\tgoto err_rxq_alloc;\n+\t}\n+\n+\tis_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);\n+\n+\trxq->mp = mp;\n+\trxq->nb_rx_desc = nb_desc;\n+\trxq->rx_free_thresh = rx_free_thresh;\n+\trxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->rx_deferred_start = rx_conf->rx_deferred_start;\n+\trxq->rx_hdr_len = 0;\n+\trxq->adapter = base;\n+\trxq->offloads = cpfl_rx_offload_convert(offloads);\n+\n+\tlen = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;\n+\trxq->rx_buf_len = len;\n+\n+\t/* Allocate a little more to support bulk allocate. */\n+\tlen = nb_desc + IDPF_RX_MAX_BURST;\n+\tmz = cpfl_dma_zone_reserve(dev, queue_idx, len, VIRTCHNL2_QUEUE_TYPE_RX,\n+\t\t\t\t   socket_id, is_splitq);\n+\tif (mz == NULL) {\n+\t\tret = -ENOMEM;\n+\t\tgoto err_mz_reserve;\n+\t}\n+\trxq->rx_ring_phys_addr = mz->iova;\n+\trxq->rx_ring = mz->addr;\n+\trxq->mz = mz;\n+\n+\tif (!is_splitq) {\n+\t\trxq->sw_ring = rte_zmalloc_socket(\"cpfl rxq sw ring\",\n+\t\t\t\t\t\t  sizeof(struct rte_mbuf *) * len,\n+\t\t\t\t\t\t  RTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t\t  socket_id);\n+\t\tif (rxq->sw_ring == NULL) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n+\t\t\tret = -ENOMEM;\n+\t\t\tgoto err_sw_ring_alloc;\n+\t\t}\n+\n+\t\tidpf_qc_single_rx_queue_reset(rxq);\n+\t\trxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +\n+\t\t\t\tqueue_idx * vport->chunks_info.rx_qtail_spacing);\n+\t} else {\n+\t\tidpf_qc_split_rx_descq_reset(rxq);\n+\n+\t\t/* Setup Rx buffer queues */\n+\t\tret = cpfl_rx_split_bufq_setup(dev, rxq, 2 * queue_idx,\n+\t\t\t\t\t       rx_free_thresh, nb_desc,\n+\t\t\t\t\t       socket_id, mp, 1);\n+\t\tif (ret != 0) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to setup buffer queue 1\");\n+\t\t\tret = -EINVAL;\n+\t\t\tgoto err_bufq1_setup;\n+\t\t}\n+\n+\t\tret = cpfl_rx_split_bufq_setup(dev, rxq, 2 * queue_idx + 1,\n+\t\t\t\t\t       rx_free_thresh, nb_desc,\n+\t\t\t\t\t       socket_id, mp, 2);\n+\t\tif (ret != 0) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to setup buffer queue 2\");\n+\t\t\tret = -EINVAL;\n+\t\t\tgoto err_bufq2_setup;\n+\t\t}\n+\t}\n+\n+\trxq->q_set = true;\n+\tdev->data->rx_queues[queue_idx] = rxq;\n+\n+\treturn 0;\n+\n+err_bufq2_setup:\n+\tcpfl_rx_split_bufq_release(rxq->bufq1);\n+err_bufq1_setup:\n+err_sw_ring_alloc:\n+\tcpfl_dma_zone_release(mz);\n+err_mz_reserve:\n+\trte_free(rxq);\n+err_rxq_alloc:\n+\treturn ret;\n+}\n+\n static int\n cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,\n \t\t     uint16_t queue_idx, uint16_t nb_desc,\ndiff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h\nindex 232630c5e9..e0221abfa3 100644\n--- a/drivers/net/cpfl/cpfl_rxtx.h\n+++ b/drivers/net/cpfl/cpfl_rxtx.h\n@@ -16,10 +16,16 @@\n /* Base address of the HW descriptor ring should be 128B aligned. */\n #define CPFL_RING_BASE_ALIGN\t128\n \n+#define CPFL_DEFAULT_RX_FREE_THRESH\t32\n+\n #define CPFL_DEFAULT_TX_RS_THRESH\t32\n #define CPFL_DEFAULT_TX_FREE_THRESH\t32\n \n int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n \t\t\tuint16_t nb_desc, unsigned int socket_id,\n \t\t\tconst struct rte_eth_txconf *tx_conf);\n+int cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\tuint16_t nb_desc, unsigned int socket_id,\n+\t\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\t\tstruct rte_mempool *mp);\n #endif /* _CPFL_RXTX_H_ */\n",
    "prefixes": [
        "v9",
        "03/21"
    ]
}