get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/104099/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 104099,
    "url": "http://patchwork.dpdk.org/api/patches/104099/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20211110074829.16654-4-apeksha.gupta@nxp.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211110074829.16654-4-apeksha.gupta@nxp.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211110074829.16654-4-apeksha.gupta@nxp.com",
    "date": "2021-11-10T07:48:27",
    "name": "[v9,3/5] net/enetfec: support queue configuration",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "21c2820e67949b767a093ae7713bdc138c60c6e3",
    "submitter": {
        "id": 1570,
        "url": "http://patchwork.dpdk.org/api/people/1570/?format=api",
        "name": "Apeksha Gupta",
        "email": "apeksha.gupta@nxp.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patchwork.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20211110074829.16654-4-apeksha.gupta@nxp.com/mbox/",
    "series": [
        {
            "id": 20445,
            "url": "http://patchwork.dpdk.org/api/series/20445/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=20445",
            "date": "2021-11-10T07:48:24",
            "name": "drivers/net: add NXP ENETFEC driver",
            "version": 9,
            "mbox": "http://patchwork.dpdk.org/series/20445/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/104099/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/104099/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 31D43A034F;\n\tWed, 10 Nov 2021 08:48:50 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 965AC41121;\n\tWed, 10 Nov 2021 08:48:39 +0100 (CET)",
            "from inva020.nxp.com (inva020.nxp.com [92.121.34.13])\n by mails.dpdk.org (Postfix) with ESMTP id 948F0410EF\n for <dev@dpdk.org>; Wed, 10 Nov 2021 08:48:35 +0100 (CET)",
            "from inva020.nxp.com (localhost [127.0.0.1])\n by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 709041A0E93;\n Wed, 10 Nov 2021 08:48:35 +0100 (CET)",
            "from aprdc01srsp001v.ap-rdc01.nxp.com\n (aprdc01srsp001v.ap-rdc01.nxp.com [165.114.16.16])\n by inva020.eu-rdc02.nxp.com (Postfix) with ESMTP id 0CE761A0E81;\n Wed, 10 Nov 2021 08:48:35 +0100 (CET)",
            "from lsv03186.swis.in-blr01.nxp.com (lsv03186.swis.in-blr01.nxp.com\n [92.120.146.182])\n by aprdc01srsp001v.ap-rdc01.nxp.com (Postfix) with ESMTP id 07878183ACCB;\n Wed, 10 Nov 2021 15:48:33 +0800 (+08)"
        ],
        "From": "Apeksha Gupta <apeksha.gupta@nxp.com>",
        "To": "ferruh.yigit@intel.com, david.marchand@redhat.com,\n andrew.rybchenko@oktetlabs.ru",
        "Cc": "dev@dpdk.org, sachin.saxena@nxp.com, hemant.agrawal@nxp.com,\n Apeksha Gupta <apeksha.gupta@nxp.com>",
        "Date": "Wed, 10 Nov 2021 13:18:27 +0530",
        "Message-Id": "<20211110074829.16654-4-apeksha.gupta@nxp.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20211110074829.16654-1-apeksha.gupta@nxp.com>",
        "References": "<20211109113432.11876-6-apeksha.gupta@nxp.com>\n <20211110074829.16654-1-apeksha.gupta@nxp.com>",
        "X-Virus-Scanned": "ClamAV using ClamSMTP",
        "Subject": "[dpdk-dev] [PATCH v9 3/5] net/enetfec: support queue configuration",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch adds Rx/Tx queue configuration setup operations.\nOn packet reception the respective BD Ring status bit is set\nwhich is then used for packet processing.\n\nSigned-off-by: Sachin Saxena <sachin.saxena@nxp.com>\nSigned-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>\n---\n drivers/net/enetfec/enet_ethdev.c | 222 +++++++++++++++++++++++++++++-\n drivers/net/enetfec/enet_ethdev.h |  74 ++++++++++\n 2 files changed, 295 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c\nindex fe6b5e539f..f70489ff91 100644\n--- a/drivers/net/enetfec/enet_ethdev.c\n+++ b/drivers/net/enetfec/enet_ethdev.c\n@@ -41,6 +41,11 @@\n \n #define NUM_OF_BD_QUEUES\t\t6\n \n+/* Supported Rx offloads */\n+static uint64_t dev_rx_offloads_sup =\n+\t\tRTE_ETH_RX_OFFLOAD_CHECKSUM |\n+\t\tRTE_ETH_RX_OFFLOAD_VLAN;\n+\n /*\n  * This function is called to start or restart the ENETFEC during a link\n  * change, transmit timeout, or to reconfigure the ENETFEC. The network\n@@ -186,10 +191,225 @@ enetfec_eth_stop(struct rte_eth_dev *dev)\n \treturn 0;\n }\n \n+static int\n+enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,\n+\tstruct rte_eth_dev_info *dev_info)\n+{\n+\tdev_info->max_rx_pktlen = ENETFEC_MAX_RX_PKT_LEN;\n+\tdev_info->max_rx_queues = ENETFEC_MAX_Q;\n+\tdev_info->max_tx_queues = ENETFEC_MAX_Q;\n+\tdev_info->rx_offload_capa = dev_rx_offloads_sup;\n+\treturn 0;\n+}\n+\n+static const unsigned short offset_des_active_rxq[] = {\n+\tENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2\n+};\n+\n+static const unsigned short offset_des_active_txq[] = {\n+\tENETFEC_TDAR_0, ENETFEC_TDAR_1, ENETFEC_TDAR_2\n+};\n+\n+static int\n+enetfec_tx_queue_setup(struct rte_eth_dev *dev,\n+\t\t\tuint16_t queue_idx,\n+\t\t\tuint16_t nb_desc,\n+\t\t\tunsigned int socket_id __rte_unused,\n+\t\t\tconst struct rte_eth_txconf *tx_conf)\n+{\n+\tstruct enetfec_private *fep = dev->data->dev_private;\n+\tunsigned int i;\n+\tstruct bufdesc *bdp, *bd_base;\n+\tstruct enetfec_priv_tx_q *txq;\n+\tunsigned int size;\n+\tunsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :\n+\t\tsizeof(struct bufdesc);\n+\tunsigned int dsize_log2 = fls64(dsize);\n+\n+\t/* Tx deferred start is not supported */\n+\tif (tx_conf->tx_deferred_start) {\n+\t\tENETFEC_PMD_ERR(\"%p:Tx deferred start not supported\",\n+\t\t\t(void *)dev);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* allocate transmit queue */\n+\ttxq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);\n+\tif (txq == NULL) {\n+\t\tENETFEC_PMD_ERR(\"transmit queue allocation failed\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tif (nb_desc > MAX_TX_BD_RING_SIZE) {\n+\t\tnb_desc = MAX_TX_BD_RING_SIZE;\n+\t\tENETFEC_PMD_WARN(\"modified the nb_desc to MAX_TX_BD_RING_SIZE\");\n+\t}\n+\ttxq->bd.ring_size = nb_desc;\n+\tfep->total_tx_ring_size += txq->bd.ring_size;\n+\tfep->tx_queues[queue_idx] = txq;\n+\n+\trte_write32(rte_cpu_to_le_32(fep->bd_addr_p_t[queue_idx]),\n+\t\t(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TD_START(queue_idx));\n+\n+\t/* Set transmit descriptor base. */\n+\ttxq = fep->tx_queues[queue_idx];\n+\ttxq->fep = fep;\n+\tsize = dsize * txq->bd.ring_size;\n+\tbd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];\n+\ttxq->bd.queue_id = queue_idx;\n+\ttxq->bd.base = bd_base;\n+\ttxq->bd.cur = bd_base;\n+\ttxq->bd.d_size = dsize;\n+\ttxq->bd.d_size_log2 = dsize_log2;\n+\ttxq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +\n+\t\t\toffset_des_active_txq[queue_idx];\n+\tbd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);\n+\ttxq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);\n+\tbdp = txq->bd.base;\n+\tbdp = txq->bd.cur;\n+\n+\tfor (i = 0; i < txq->bd.ring_size; i++) {\n+\t\t/* Initialize the BD for every fragment in the page. */\n+\t\trte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);\n+\t\tif (txq->tx_mbuf[i] != NULL) {\n+\t\t\trte_pktmbuf_free(txq->tx_mbuf[i]);\n+\t\t\ttxq->tx_mbuf[i] = NULL;\n+\t\t}\n+\t\trte_write32(0, &bdp->bd_bufaddr);\n+\t\tbdp = enet_get_nextdesc(bdp, &txq->bd);\n+\t}\n+\n+\t/* Set the last buffer to wrap */\n+\tbdp = enet_get_prevdesc(bdp, &txq->bd);\n+\trte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |\n+\t\trte_read16(&bdp->bd_sc)), &bdp->bd_sc);\n+\ttxq->dirty_tx = bdp;\n+\tdev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];\n+\treturn 0;\n+}\n+\n+static int\n+enetfec_rx_queue_setup(struct rte_eth_dev *dev,\n+\t\t\tuint16_t queue_idx,\n+\t\t\tuint16_t nb_rx_desc,\n+\t\t\tunsigned int socket_id __rte_unused,\n+\t\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\t\tstruct rte_mempool *mb_pool)\n+{\n+\tstruct enetfec_private *fep = dev->data->dev_private;\n+\tunsigned int i;\n+\tstruct bufdesc *bd_base;\n+\tstruct bufdesc *bdp;\n+\tstruct enetfec_priv_rx_q *rxq;\n+\tunsigned int size;\n+\tunsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :\n+\t\t\tsizeof(struct bufdesc);\n+\tunsigned int dsize_log2 = fls64(dsize);\n+\n+\t/* Rx deferred start is not supported */\n+\tif (rx_conf->rx_deferred_start) {\n+\t\tENETFEC_PMD_ERR(\"%p:Rx deferred start not supported\",\n+\t\t\t(void *)dev);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* allocate receive queue */\n+\trxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);\n+\tif (rxq == NULL) {\n+\t\tENETFEC_PMD_ERR(\"receive queue allocation failed\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tif (nb_rx_desc > MAX_RX_BD_RING_SIZE) {\n+\t\tnb_rx_desc = MAX_RX_BD_RING_SIZE;\n+\t\tENETFEC_PMD_WARN(\"modified the nb_desc to MAX_RX_BD_RING_SIZE\");\n+\t}\n+\n+\trxq->bd.ring_size = nb_rx_desc;\n+\tfep->total_rx_ring_size += rxq->bd.ring_size;\n+\tfep->rx_queues[queue_idx] = rxq;\n+\n+\trte_write32(rte_cpu_to_le_32(fep->bd_addr_p_r[queue_idx]),\n+\t\t(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RD_START(queue_idx));\n+\trte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),\n+\t\t(uint8_t *)fep->hw_baseaddr_v + ENETFEC_MRB_SIZE(queue_idx));\n+\n+\t/* Set receive descriptor base. */\n+\trxq = fep->rx_queues[queue_idx];\n+\trxq->pool = mb_pool;\n+\tsize = dsize * rxq->bd.ring_size;\n+\tbd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];\n+\trxq->bd.queue_id = queue_idx;\n+\trxq->bd.base = bd_base;\n+\trxq->bd.cur = bd_base;\n+\trxq->bd.d_size = dsize;\n+\trxq->bd.d_size_log2 = dsize_log2;\n+\trxq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +\n+\t\t\toffset_des_active_rxq[queue_idx];\n+\tbd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);\n+\trxq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);\n+\n+\trxq->fep = fep;\n+\tbdp = rxq->bd.base;\n+\trxq->bd.cur = bdp;\n+\n+\tfor (i = 0; i < nb_rx_desc; i++) {\n+\t\t/* Initialize Rx buffers from pktmbuf pool */\n+\t\tstruct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);\n+\t\tif (mbuf == NULL) {\n+\t\t\tENETFEC_PMD_ERR(\"mbuf failed\");\n+\t\t\tgoto err_alloc;\n+\t\t}\n+\n+\t\t/* Get the virtual address & physical address */\n+\t\trte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),\n+\t\t\t&bdp->bd_bufaddr);\n+\n+\t\trxq->rx_mbuf[i] = mbuf;\n+\t\trte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);\n+\n+\t\tbdp = enet_get_nextdesc(bdp, &rxq->bd);\n+\t}\n+\n+\t/* Initialize the receive buffer descriptors. */\n+\tbdp = rxq->bd.cur;\n+\tfor (i = 0; i < rxq->bd.ring_size; i++) {\n+\t\t/* Initialize the BD for every fragment in the page. */\n+\t\tif (rte_read32(&bdp->bd_bufaddr) > 0)\n+\t\t\trte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),\n+\t\t\t\t&bdp->bd_sc);\n+\t\telse\n+\t\t\trte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);\n+\n+\t\tbdp = enet_get_nextdesc(bdp, &rxq->bd);\n+\t}\n+\n+\t/* Set the last buffer to wrap */\n+\tbdp = enet_get_prevdesc(bdp, &rxq->bd);\n+\trte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |\n+\t\trte_read16(&bdp->bd_sc)),  &bdp->bd_sc);\n+\tdev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];\n+\trte_write32(0, fep->rx_queues[queue_idx]->bd.active_reg_desc);\n+\treturn 0;\n+\n+err_alloc:\n+\tfor (i = 0; i < nb_rx_desc; i++) {\n+\t\tif (rxq->rx_mbuf[i] != NULL) {\n+\t\t\trte_pktmbuf_free(rxq->rx_mbuf[i]);\n+\t\t\trxq->rx_mbuf[i] = NULL;\n+\t\t}\n+\t}\n+\trte_free(rxq);\n+\treturn errno;\n+}\n+\n static const struct eth_dev_ops enetfec_ops = {\n \t.dev_configure          = enetfec_eth_configure,\n \t.dev_start              = enetfec_eth_start,\n-\t.dev_stop               = enetfec_eth_stop\n+\t.dev_stop               = enetfec_eth_stop,\n+\t.dev_infos_get          = enetfec_eth_info,\n+\t.rx_queue_setup         = enetfec_rx_queue_setup,\n+\t.tx_queue_setup         = enetfec_tx_queue_setup\n };\n \n static int\ndiff --git a/drivers/net/enetfec/enet_ethdev.h b/drivers/net/enetfec/enet_ethdev.h\nindex 0f0684ab11..babc7190fb 100644\n--- a/drivers/net/enetfec/enet_ethdev.h\n+++ b/drivers/net/enetfec/enet_ethdev.h\n@@ -10,8 +10,13 @@\n /* full duplex */\n #define FULL_DUPLEX             0x00\n \n+#define MAX_TX_BD_RING_SIZE\t512\t/* It should be power of 2 */\n+#define MAX_RX_BD_RING_SIZE\t512\n #define PKT_MAX_BUF_SIZE        1984\n #define OPT_FRAME_SIZE         (PKT_MAX_BUF_SIZE << 16)\n+#define ENETFEC_MAX_RX_PKT_LEN  3000\n+\n+#define __iomem\n \n /*\n  * ENETFEC can support 1 rx and tx queue..\n@@ -22,6 +27,49 @@\n #define writel(v, p) ({*(volatile unsigned int *)(p) = (v); })\n #define readl(p) rte_read32(p)\n \n+struct bufdesc {\n+\tuint16_t\t\tbd_datlen;  /* buffer data length */\n+\tuint16_t\t\tbd_sc;      /* buffer control & status */\n+\tuint32_t\t\tbd_bufaddr; /* buffer address */\n+};\n+\n+struct bufdesc_ex {\n+\tstruct\t\t\tbufdesc desc;\n+\tuint32_t\t\tbd_esc;\n+\tuint32_t\t\tbd_prot;\n+\tuint32_t\t\tbd_bdu;\n+\tuint32_t\t\tts;\n+\tuint16_t\t\tres0[4];\n+};\n+\n+struct bufdesc_prop {\n+\tint\t\t\tqueue_id;\n+\t/* Addresses of Tx and Rx buffers */\n+\tstruct bufdesc\t\t*base;\n+\tstruct bufdesc\t\t*last;\n+\tstruct bufdesc\t\t*cur;\n+\tvoid __iomem\t\t*active_reg_desc;\n+\tuint64_t\t\tdescr_baseaddr_p;\n+\tunsigned short\t\tring_size;\n+\tunsigned char\t\td_size;\n+\tunsigned char\t\td_size_log2;\n+};\n+\n+struct enetfec_priv_tx_q {\n+\tstruct bufdesc_prop\tbd;\n+\tstruct rte_mbuf\t\t*tx_mbuf[MAX_TX_BD_RING_SIZE];\n+\tstruct bufdesc\t\t*dirty_tx;\n+\tstruct rte_mempool\t*pool;\n+\tstruct enetfec_private\t*fep;\n+};\n+\n+struct enetfec_priv_rx_q {\n+\tstruct bufdesc_prop\tbd;\n+\tstruct rte_mbuf\t\t*rx_mbuf[MAX_RX_BD_RING_SIZE];\n+\tstruct rte_mempool\t*pool;\n+\tstruct enetfec_private\t*fep;\n+};\n+\n struct enetfec_private {\n \tstruct rte_eth_dev\t*dev;\n \tstruct rte_eth_stats\tstats;\n@@ -54,4 +102,30 @@ struct enetfec_private {\n \tstruct enetfec_priv_tx_q *tx_queues[ENETFEC_MAX_Q];\n };\n \n+static inline struct\n+bufdesc *enet_get_nextdesc(struct bufdesc *bdp, struct bufdesc_prop *bd)\n+{\n+\treturn (bdp >= bd->last) ? bd->base\n+\t\t: (struct bufdesc *)(((uintptr_t)bdp) + bd->d_size);\n+}\n+\n+static inline int\n+fls64(unsigned long word)\n+{\n+\treturn (64 - __builtin_clzl(word)) - 1;\n+}\n+\n+static inline struct\n+bufdesc *enet_get_prevdesc(struct bufdesc *bdp, struct bufdesc_prop *bd)\n+{\n+\treturn (bdp <= bd->base) ? bd->last\n+\t\t: (struct bufdesc *)(((uintptr_t)bdp) - bd->d_size);\n+}\n+\n+static inline int\n+enet_get_bd_index(struct bufdesc *bdp, struct bufdesc_prop *bd)\n+{\n+\treturn ((const char *)bdp - (const char *)bd->base) >> bd->d_size_log2;\n+}\n+\n #endif /*__ENETFEC_ETHDEV_H__*/\n",
    "prefixes": [
        "v9",
        "3/5"
    ]
}