get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/95364/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 95364,
    "url": "http://patchwork.dpdk.org/api/patches/95364/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20210706095545.10776-17-jiawenwu@trustnetic.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210706095545.10776-17-jiawenwu@trustnetic.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210706095545.10776-17-jiawenwu@trustnetic.com",
    "date": "2021-07-06T09:55:42",
    "name": "[v7,16/19] net/ngbe: add Rx queue start and stop",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "86d2cb0866057860c8e4d67bb74a4c30c35f941b",
    "submitter": {
        "id": 1932,
        "url": "http://patchwork.dpdk.org/api/people/1932/?format=api",
        "name": "Jiawen Wu",
        "email": "jiawenwu@trustnetic.com"
    },
    "delegate": {
        "id": 3961,
        "url": "http://patchwork.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20210706095545.10776-17-jiawenwu@trustnetic.com/mbox/",
    "series": [
        {
            "id": 17659,
            "url": "http://patchwork.dpdk.org/api/series/17659/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=17659",
            "date": "2021-07-06T09:55:28",
            "name": "net: ngbe PMD",
            "version": 7,
            "mbox": "http://patchwork.dpdk.org/series/17659/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/95364/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/95364/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 9BCB2A0C47;\n\tTue,  6 Jul 2021 11:59:12 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id C2932413B7;\n\tTue,  6 Jul 2021 11:56:33 +0200 (CEST)",
            "from smtpbguseast1.qq.com (smtpbguseast1.qq.com [54.204.34.129])\n by mails.dpdk.org (Postfix) with ESMTP id 92E79413A0\n for <dev@dpdk.org>; Tue,  6 Jul 2021 11:56:31 +0200 (CEST)",
            "from jiawenwu.trustnetic.com (unknown [183.129.236.74])\n by esmtp6.qq.com (ESMTP) with\n id ; Tue, 06 Jul 2021 17:56:19 +0800 (CST)"
        ],
        "X-QQ-mid": "bizesmtp51t1625565380tgzw1fcb",
        "X-QQ-SSF": "01400000002000D0E000B00A0000000",
        "X-QQ-FEAT": "t98IiFeQWziYQwSa1RMO5reiRe25iE3ytk/EgwO1GwHlCqtlMGWHpp3/2SAbI\n 7wzxYm21JinoOIHyUvtL81WQJh4aC+HhGDdXHmA/J8UCwsvkjqs9CH/hp7mOV07ps5tRT7a\n 4Wr1MS77XDwvvDhXIxPYP/cZMOD6GPTEYbZfYRFkeL81yBZOVGnHVw6ncJ+1T0mdnUjE0ux\n ejGVHs0xjqrPU7TA2lM0COEAw7Ip+/ALKpBvVOrNUaXnhUEpi5FxPIysdrx7sRQ5KbC3Edu\n uWiWtPv2DAJkKCM/tdQwAw523SEjYXUdxtQHMrZK1DIqwe51yUVbEXBVL1dkKFiEW+FkxrI\n uFj3pqhAaUwiJoyOtibWmFxHvygt1TEj7gLqpNc",
        "X-QQ-GoodBg": "2",
        "From": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "To": "dev@dpdk.org",
        "Cc": "Jiawen Wu <jiawenwu@trustnetic.com>",
        "Date": "Tue,  6 Jul 2021 17:55:42 +0800",
        "Message-Id": "<20210706095545.10776-17-jiawenwu@trustnetic.com>",
        "X-Mailer": "git-send-email 2.21.0.windows.1",
        "In-Reply-To": "<20210706095545.10776-1-jiawenwu@trustnetic.com>",
        "References": "<20210706095545.10776-1-jiawenwu@trustnetic.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-QQ-SENDSIZE": "520",
        "Feedback-ID": "bizesmtp:trustnetic.com:qybgforeign:qybgforeign6",
        "X-QQ-Bgrelay": "1",
        "Subject": "[dpdk-dev] [PATCH v7 16/19] net/ngbe: add Rx queue start and stop",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Initializes receive unit, support to start and stop receive unit for\nspecified queues.\n\nSigned-off-by: Jiawen Wu <jiawenwu@trustnetic.com>\n---\n drivers/net/ngbe/base/ngbe_dummy.h |  15 ++\n drivers/net/ngbe/base/ngbe_hw.c    | 105 ++++++++++++++\n drivers/net/ngbe/base/ngbe_hw.h    |   4 +\n drivers/net/ngbe/base/ngbe_type.h  |   4 +\n drivers/net/ngbe/ngbe_ethdev.c     |   5 +\n drivers/net/ngbe/ngbe_ethdev.h     |   6 +\n drivers/net/ngbe/ngbe_rxtx.c       | 215 ++++++++++++++++++++++++++++-\n drivers/net/ngbe/ngbe_rxtx.h       |   8 ++\n 8 files changed, 359 insertions(+), 3 deletions(-)",
    "diff": "diff --git a/drivers/net/ngbe/base/ngbe_dummy.h b/drivers/net/ngbe/base/ngbe_dummy.h\nindex 387bb16aec..8863acef0d 100644\n--- a/drivers/net/ngbe/base/ngbe_dummy.h\n+++ b/drivers/net/ngbe/base/ngbe_dummy.h\n@@ -59,6 +59,18 @@ static inline s32 ngbe_mac_get_mac_addr_dummy(struct ngbe_hw *TUP0, u8 *TUP1)\n {\n \treturn NGBE_ERR_OPS_DUMMY;\n }\n+static inline s32 ngbe_mac_enable_rx_dma_dummy(struct ngbe_hw *TUP0, u32 TUP1)\n+{\n+\treturn NGBE_ERR_OPS_DUMMY;\n+}\n+static inline s32 ngbe_mac_disable_sec_rx_path_dummy(struct ngbe_hw *TUP0)\n+{\n+\treturn NGBE_ERR_OPS_DUMMY;\n+}\n+static inline s32 ngbe_mac_enable_sec_rx_path_dummy(struct ngbe_hw *TUP0)\n+{\n+\treturn NGBE_ERR_OPS_DUMMY;\n+}\n static inline s32 ngbe_mac_acquire_swfw_sync_dummy(struct ngbe_hw *TUP0,\n \t\t\t\t\tu32 TUP1)\n {\n@@ -167,6 +179,9 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)\n \thw->mac.start_hw = ngbe_mac_start_hw_dummy;\n \thw->mac.stop_hw = ngbe_mac_stop_hw_dummy;\n \thw->mac.get_mac_addr = ngbe_mac_get_mac_addr_dummy;\n+\thw->mac.enable_rx_dma = ngbe_mac_enable_rx_dma_dummy;\n+\thw->mac.disable_sec_rx_path = ngbe_mac_disable_sec_rx_path_dummy;\n+\thw->mac.enable_sec_rx_path = ngbe_mac_enable_sec_rx_path_dummy;\n \thw->mac.acquire_swfw_sync = ngbe_mac_acquire_swfw_sync_dummy;\n \thw->mac.release_swfw_sync = ngbe_mac_release_swfw_sync_dummy;\n \thw->mac.setup_link = ngbe_mac_setup_link_dummy;\ndiff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c\nindex 87c02a8f1f..6b575fc67b 100644\n--- a/drivers/net/ngbe/base/ngbe_hw.c\n+++ b/drivers/net/ngbe/base/ngbe_hw.c\n@@ -536,6 +536,63 @@ void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask)\n \tngbe_release_eeprom_semaphore(hw);\n }\n \n+/**\n+ *  ngbe_disable_sec_rx_path - Stops the receive data path\n+ *  @hw: pointer to hardware structure\n+ *\n+ *  Stops the receive data path and waits for the HW to internally empty\n+ *  the Rx security block\n+ **/\n+s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw)\n+{\n+#define NGBE_MAX_SECRX_POLL 4000\n+\n+\tint i;\n+\tu32 secrxreg;\n+\n+\tDEBUGFUNC(\"ngbe_disable_sec_rx_path\");\n+\n+\n+\tsecrxreg = rd32(hw, NGBE_SECRXCTL);\n+\tsecrxreg |= NGBE_SECRXCTL_XDSA;\n+\twr32(hw, NGBE_SECRXCTL, secrxreg);\n+\tfor (i = 0; i < NGBE_MAX_SECRX_POLL; i++) {\n+\t\tsecrxreg = rd32(hw, NGBE_SECRXSTAT);\n+\t\tif (!(secrxreg & NGBE_SECRXSTAT_RDY))\n+\t\t\t/* Use interrupt-safe sleep just in case */\n+\t\t\tusec_delay(10);\n+\t\telse\n+\t\t\tbreak;\n+\t}\n+\n+\t/* For informational purposes only */\n+\tif (i >= NGBE_MAX_SECRX_POLL)\n+\t\tDEBUGOUT(\"Rx unit being enabled before security \"\n+\t\t\t \"path fully disabled.  Continuing with init.\\n\");\n+\n+\treturn 0;\n+}\n+\n+/**\n+ *  ngbe_enable_sec_rx_path - Enables the receive data path\n+ *  @hw: pointer to hardware structure\n+ *\n+ *  Enables the receive data path.\n+ **/\n+s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw)\n+{\n+\tu32 secrxreg;\n+\n+\tDEBUGFUNC(\"ngbe_enable_sec_rx_path\");\n+\n+\tsecrxreg = rd32(hw, NGBE_SECRXCTL);\n+\tsecrxreg &= ~NGBE_SECRXCTL_XDSA;\n+\twr32(hw, NGBE_SECRXCTL, secrxreg);\n+\tngbe_flush(hw);\n+\n+\treturn 0;\n+}\n+\n /**\n  *  ngbe_clear_vmdq - Disassociate a VMDq pool index from a rx address\n  *  @hw: pointer to hardware struct\n@@ -756,6 +813,21 @@ void ngbe_disable_rx(struct ngbe_hw *hw)\n \twr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, 0);\n }\n \n+void ngbe_enable_rx(struct ngbe_hw *hw)\n+{\n+\tu32 pfdtxgswc;\n+\n+\twr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, NGBE_MACRXCFG_ENA);\n+\twr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, NGBE_PBRXCTL_ENA);\n+\n+\tif (hw->mac.set_lben) {\n+\t\tpfdtxgswc = rd32(hw, NGBE_PSRCTL);\n+\t\tpfdtxgswc |= NGBE_PSRCTL_LBENA;\n+\t\twr32(hw, NGBE_PSRCTL, pfdtxgswc);\n+\t\thw->mac.set_lben = false;\n+\t}\n+}\n+\n /**\n  *  ngbe_set_mac_type - Sets MAC type\n  *  @hw: pointer to the HW structure\n@@ -802,6 +874,36 @@ s32 ngbe_set_mac_type(struct ngbe_hw *hw)\n \treturn err;\n }\n \n+/**\n+ *  ngbe_enable_rx_dma - Enable the Rx DMA unit\n+ *  @hw: pointer to hardware structure\n+ *  @regval: register value to write to RXCTRL\n+ *\n+ *  Enables the Rx DMA unit\n+ **/\n+s32 ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval)\n+{\n+\tDEBUGFUNC(\"ngbe_enable_rx_dma\");\n+\n+\t/*\n+\t * Workaround silicon errata when enabling the Rx datapath.\n+\t * If traffic is incoming before we enable the Rx unit, it could hang\n+\t * the Rx DMA unit.  Therefore, make sure the security engine is\n+\t * completely disabled prior to enabling the Rx unit.\n+\t */\n+\n+\thw->mac.disable_sec_rx_path(hw);\n+\n+\tif (regval & NGBE_PBRXCTL_ENA)\n+\t\tngbe_enable_rx(hw);\n+\telse\n+\t\tngbe_disable_rx(hw);\n+\n+\thw->mac.enable_sec_rx_path(hw);\n+\n+\treturn 0;\n+}\n+\n void ngbe_map_device_id(struct ngbe_hw *hw)\n {\n \tu16 oem = hw->sub_system_id & NGBE_OEM_MASK;\n@@ -886,11 +988,14 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw)\n \tmac->init_hw = ngbe_init_hw;\n \tmac->reset_hw = ngbe_reset_hw_em;\n \tmac->start_hw = ngbe_start_hw;\n+\tmac->enable_rx_dma = ngbe_enable_rx_dma;\n \tmac->get_mac_addr = ngbe_get_mac_addr;\n \tmac->stop_hw = ngbe_stop_hw;\n \tmac->acquire_swfw_sync = ngbe_acquire_swfw_sync;\n \tmac->release_swfw_sync = ngbe_release_swfw_sync;\n \n+\tmac->disable_sec_rx_path = ngbe_disable_sec_rx_path;\n+\tmac->enable_sec_rx_path = ngbe_enable_sec_rx_path;\n \t/* RAR */\n \tmac->set_rar = ngbe_set_rar;\n \tmac->clear_rar = ngbe_clear_rar;\ndiff --git a/drivers/net/ngbe/base/ngbe_hw.h b/drivers/net/ngbe/base/ngbe_hw.h\nindex 791d15fbec..17a0a03c88 100644\n--- a/drivers/net/ngbe/base/ngbe_hw.h\n+++ b/drivers/net/ngbe/base/ngbe_hw.h\n@@ -34,6 +34,8 @@ s32 ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u32 vmdq,\n \t\t\t  u32 enable_addr);\n s32 ngbe_clear_rar(struct ngbe_hw *hw, u32 index);\n s32 ngbe_init_rx_addrs(struct ngbe_hw *hw);\n+s32 ngbe_disable_sec_rx_path(struct ngbe_hw *hw);\n+s32 ngbe_enable_sec_rx_path(struct ngbe_hw *hw);\n \n s32 ngbe_validate_mac_addr(u8 *mac_addr);\n s32 ngbe_acquire_swfw_sync(struct ngbe_hw *hw, u32 mask);\n@@ -46,10 +48,12 @@ s32 ngbe_init_uta_tables(struct ngbe_hw *hw);\n s32 ngbe_init_thermal_sensor_thresh(struct ngbe_hw *hw);\n s32 ngbe_mac_check_overtemp(struct ngbe_hw *hw);\n void ngbe_disable_rx(struct ngbe_hw *hw);\n+void ngbe_enable_rx(struct ngbe_hw *hw);\n s32 ngbe_init_shared_code(struct ngbe_hw *hw);\n s32 ngbe_set_mac_type(struct ngbe_hw *hw);\n s32 ngbe_init_ops_pf(struct ngbe_hw *hw);\n s32 ngbe_init_phy(struct ngbe_hw *hw);\n+s32 ngbe_enable_rx_dma(struct ngbe_hw *hw, u32 regval);\n void ngbe_map_device_id(struct ngbe_hw *hw);\n \n #endif /* _NGBE_HW_H_ */\ndiff --git a/drivers/net/ngbe/base/ngbe_type.h b/drivers/net/ngbe/base/ngbe_type.h\nindex 2846a6a2b6..28540e4ba0 100644\n--- a/drivers/net/ngbe/base/ngbe_type.h\n+++ b/drivers/net/ngbe/base/ngbe_type.h\n@@ -97,6 +97,9 @@ struct ngbe_mac_info {\n \ts32 (*start_hw)(struct ngbe_hw *hw);\n \ts32 (*stop_hw)(struct ngbe_hw *hw);\n \ts32 (*get_mac_addr)(struct ngbe_hw *hw, u8 *mac_addr);\n+\ts32 (*enable_rx_dma)(struct ngbe_hw *hw, u32 regval);\n+\ts32 (*disable_sec_rx_path)(struct ngbe_hw *hw);\n+\ts32 (*enable_sec_rx_path)(struct ngbe_hw *hw);\n \ts32 (*acquire_swfw_sync)(struct ngbe_hw *hw, u32 mask);\n \tvoid (*release_swfw_sync)(struct ngbe_hw *hw, u32 mask);\n \n@@ -190,6 +193,7 @@ struct ngbe_hw {\n \tu16 nb_rx_queues;\n \tu16 nb_tx_queues;\n \n+\tu32 q_rx_regs[8 * 4];\n \tu32 q_tx_regs[8 * 4];\n \tbool is_pf;\n };\ndiff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c\nindex f1911bdcbc..2a40dbd184 100644\n--- a/drivers/net/ngbe/ngbe_ethdev.c\n+++ b/drivers/net/ngbe/ngbe_ethdev.c\n@@ -569,6 +569,8 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \n \tdev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;\n \tdev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;\n+\tdev_info->min_rx_bufsize = 1024;\n+\tdev_info->max_rx_pktlen = 15872;\n \n \tdev_info->default_rxconf = (struct rte_eth_rxconf) {\n \t\t.rx_thresh = {\n@@ -598,6 +600,7 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \t\t\t\tETH_LINK_SPEED_10M;\n \n \t/* Driver-preferred Rx/Tx parameters */\n+\tdev_info->default_rxportconf.burst_size = 32;\n \tdev_info->default_txportconf.burst_size = 32;\n \tdev_info->default_rxportconf.nb_queues = 1;\n \tdev_info->default_txportconf.nb_queues = 1;\n@@ -1090,6 +1093,8 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {\n \t.dev_start                  = ngbe_dev_start,\n \t.dev_stop                   = ngbe_dev_stop,\n \t.link_update                = ngbe_dev_link_update,\n+\t.rx_queue_start\t            = ngbe_dev_rx_queue_start,\n+\t.rx_queue_stop              = ngbe_dev_rx_queue_stop,\n \t.tx_queue_start\t            = ngbe_dev_tx_queue_start,\n \t.tx_queue_stop              = ngbe_dev_tx_queue_stop,\n \t.rx_queue_setup             = ngbe_dev_rx_queue_setup,\ndiff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h\nindex f631d847ad..1d71d28023 100644\n--- a/drivers/net/ngbe/ngbe_ethdev.h\n+++ b/drivers/net/ngbe/ngbe_ethdev.h\n@@ -86,9 +86,15 @@ void ngbe_dev_tx_init(struct rte_eth_dev *dev);\n \n int ngbe_dev_rxtx_start(struct rte_eth_dev *dev);\n \n+void ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id);\n+void ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id);\n void ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id);\n void ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id);\n \n+int ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+\n+int ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);\n+\n int ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);\n \n int ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);\ndiff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c\nindex 54ae1802eb..2622f562c4 100644\n--- a/drivers/net/ngbe/ngbe_rxtx.c\n+++ b/drivers/net/ngbe/ngbe_rxtx.c\n@@ -509,15 +509,111 @@ ngbe_dev_clear_queues(struct rte_eth_dev *dev)\n \t}\n }\n \n+static int __rte_cold\n+ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq)\n+{\n+\tstruct ngbe_rx_entry *rxe = rxq->sw_ring;\n+\tuint64_t dma_addr;\n+\tunsigned int i;\n+\n+\t/* Initialize software ring entries */\n+\tfor (i = 0; i < rxq->nb_rx_desc; i++) {\n+\t\t/* the ring can also be modified by hardware */\n+\t\tvolatile struct ngbe_rx_desc *rxd;\n+\t\tstruct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);\n+\n+\t\tif (mbuf == NULL) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Rx mbuf alloc failed queue_id=%u port_id=%u\",\n+\t\t\t\t     (unsigned int)rxq->queue_id,\n+\t\t\t\t     (unsigned int)rxq->port_id);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\tmbuf->data_off = RTE_PKTMBUF_HEADROOM;\n+\t\tmbuf->port = rxq->port_id;\n+\n+\t\tdma_addr =\n+\t\t\trte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));\n+\t\trxd = &rxq->rx_ring[i];\n+\t\tNGBE_RXD_HDRADDR(rxd, 0);\n+\t\tNGBE_RXD_PKTADDR(rxd, dma_addr);\n+\t\trxe[i].mbuf = mbuf;\n+\t}\n+\n+\treturn 0;\n+}\n+\n /*\n  * Initializes Receive Unit.\n  */\n int __rte_cold\n ngbe_dev_rx_init(struct rte_eth_dev *dev)\n {\n-\tRTE_SET_USED(dev);\n+\tstruct ngbe_hw *hw;\n+\tstruct ngbe_rx_queue *rxq;\n+\tuint64_t bus_addr;\n+\tuint32_t fctrl;\n+\tuint32_t hlreg0;\n+\tuint32_t srrctl;\n+\tuint16_t buf_size;\n+\tuint16_t i;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\thw = ngbe_dev_hw(dev);\n+\n+\t/*\n+\t * Make sure receives are disabled while setting\n+\t * up the Rx context (registers, descriptor rings, etc.).\n+\t */\n+\twr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, 0);\n+\twr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0);\n+\n+\t/* Enable receipt of broadcasted frames */\n+\tfctrl = rd32(hw, NGBE_PSRCTL);\n+\tfctrl |= NGBE_PSRCTL_BCA;\n+\twr32(hw, NGBE_PSRCTL, fctrl);\n+\n+\thlreg0 = rd32(hw, NGBE_SECRXCTL);\n+\thlreg0 &= ~NGBE_SECRXCTL_XDSA;\n+\twr32(hw, NGBE_SECRXCTL, hlreg0);\n+\n+\twr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,\n+\t\t\tNGBE_FRMSZ_MAX(NGBE_FRAME_SIZE_DFT));\n \n-\treturn -EINVAL;\n+\t/* Setup Rx queues */\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\n+\t\t/* Setup the Base and Length of the Rx Descriptor Rings */\n+\t\tbus_addr = rxq->rx_ring_phys_addr;\n+\t\twr32(hw, NGBE_RXBAL(rxq->reg_idx),\n+\t\t\t\t(uint32_t)(bus_addr & BIT_MASK32));\n+\t\twr32(hw, NGBE_RXBAH(rxq->reg_idx),\n+\t\t\t\t(uint32_t)(bus_addr >> 32));\n+\t\twr32(hw, NGBE_RXRP(rxq->reg_idx), 0);\n+\t\twr32(hw, NGBE_RXWP(rxq->reg_idx), 0);\n+\n+\t\tsrrctl = NGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);\n+\n+\t\t/* Set if packets are dropped when no descriptors available */\n+\t\tif (rxq->drop_en)\n+\t\t\tsrrctl |= NGBE_RXCFG_DROP;\n+\n+\t\t/*\n+\t\t * Configure the Rx buffer size in the PKTLEN field of\n+\t\t * the RXCFG register of the queue.\n+\t\t * The value is in 1 KB resolution. Valid values can be from\n+\t\t * 1 KB to 16 KB.\n+\t\t */\n+\t\tbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -\n+\t\t\tRTE_PKTMBUF_HEADROOM);\n+\t\tbuf_size = ROUND_DOWN(buf_size, 0x1 << 10);\n+\t\tsrrctl |= NGBE_RXCFG_PKTLEN(buf_size);\n+\n+\t\twr32(hw, NGBE_RXCFG(rxq->reg_idx), srrctl);\n+\t}\n+\n+\treturn 0;\n }\n \n /*\n@@ -562,7 +658,9 @@ ngbe_dev_rxtx_start(struct rte_eth_dev *dev)\n {\n \tstruct ngbe_hw     *hw;\n \tstruct ngbe_tx_queue *txq;\n+\tstruct ngbe_rx_queue *rxq;\n \tuint32_t dmatxctl;\n+\tuint32_t rxctrl;\n \tuint16_t i;\n \tint ret = 0;\n \n@@ -592,7 +690,39 @@ ngbe_dev_rxtx_start(struct rte_eth_dev *dev)\n \t\t}\n \t}\n \n-\treturn -EINVAL;\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\trxq = dev->data->rx_queues[i];\n+\t\tif (rxq->rx_deferred_start == 0) {\n+\t\t\tret = ngbe_dev_rx_queue_start(dev, i);\n+\t\t\tif (ret < 0)\n+\t\t\t\treturn ret;\n+\t\t}\n+\t}\n+\n+\t/* Enable Receive engine */\n+\trxctrl = rd32(hw, NGBE_PBRXCTL);\n+\trxctrl |= NGBE_PBRXCTL_ENA;\n+\thw->mac.enable_rx_dma(hw, rxctrl);\n+\n+\treturn 0;\n+}\n+\n+void\n+ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)\n+{\n+\tu32 *reg = &hw->q_rx_regs[rx_queue_id * 8];\n+\t*(reg++) = rd32(hw, NGBE_RXBAL(rx_queue_id));\n+\t*(reg++) = rd32(hw, NGBE_RXBAH(rx_queue_id));\n+\t*(reg++) = rd32(hw, NGBE_RXCFG(rx_queue_id));\n+}\n+\n+void\n+ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)\n+{\n+\tu32 *reg = &hw->q_rx_regs[rx_queue_id * 8];\n+\twr32(hw, NGBE_RXBAL(rx_queue_id), *(reg++));\n+\twr32(hw, NGBE_RXBAH(rx_queue_id), *(reg++));\n+\twr32(hw, NGBE_RXCFG(rx_queue_id), *(reg++) & ~NGBE_RXCFG_ENA);\n }\n \n void\n@@ -613,6 +743,85 @@ ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)\n \twr32(hw, NGBE_TXCFG(tx_queue_id), *(reg++) & ~NGBE_TXCFG_ENA);\n }\n \n+/*\n+ * Start Receive Units for specified queue.\n+ */\n+int __rte_cold\n+ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n+\tstruct ngbe_rx_queue *rxq;\n+\tuint32_t rxdctl;\n+\tint poll_ms;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\trxq = dev->data->rx_queues[rx_queue_id];\n+\n+\t/* Allocate buffers for descriptor rings */\n+\tif (ngbe_alloc_rx_queue_mbufs(rxq) != 0) {\n+\t\tPMD_INIT_LOG(ERR, \"Could not alloc mbuf for queue:%d\",\n+\t\t\t     rx_queue_id);\n+\t\treturn -1;\n+\t}\n+\trxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));\n+\trxdctl |= NGBE_RXCFG_ENA;\n+\twr32(hw, NGBE_RXCFG(rxq->reg_idx), rxdctl);\n+\n+\t/* Wait until Rx Enable ready */\n+\tpoll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;\n+\tdo {\n+\t\trte_delay_ms(1);\n+\t\trxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));\n+\t} while (--poll_ms && !(rxdctl & NGBE_RXCFG_ENA));\n+\tif (poll_ms == 0)\n+\t\tPMD_INIT_LOG(ERR, \"Could not enable Rx Queue %d\", rx_queue_id);\n+\trte_wmb();\n+\twr32(hw, NGBE_RXRP(rxq->reg_idx), 0);\n+\twr32(hw, NGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);\n+\tdev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;\n+\n+\treturn 0;\n+}\n+\n+/*\n+ * Stop Receive Units for specified queue.\n+ */\n+int __rte_cold\n+ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)\n+{\n+\tstruct ngbe_hw *hw = ngbe_dev_hw(dev);\n+\tstruct ngbe_adapter *adapter = ngbe_dev_adapter(dev);\n+\tstruct ngbe_rx_queue *rxq;\n+\tuint32_t rxdctl;\n+\tint poll_ms;\n+\n+\tPMD_INIT_FUNC_TRACE();\n+\n+\trxq = dev->data->rx_queues[rx_queue_id];\n+\n+\tngbe_dev_save_rx_queue(hw, rxq->reg_idx);\n+\twr32m(hw, NGBE_RXCFG(rxq->reg_idx), NGBE_RXCFG_ENA, 0);\n+\n+\t/* Wait until Rx Enable bit clear */\n+\tpoll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;\n+\tdo {\n+\t\trte_delay_ms(1);\n+\t\trxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));\n+\t} while (--poll_ms && (rxdctl & NGBE_RXCFG_ENA));\n+\tif (poll_ms == 0)\n+\t\tPMD_INIT_LOG(ERR, \"Could not disable Rx Queue %d\", rx_queue_id);\n+\n+\trte_delay_us(RTE_NGBE_WAIT_100_US);\n+\tngbe_dev_store_rx_queue(hw, rxq->reg_idx);\n+\n+\tngbe_rx_queue_release_mbufs(rxq);\n+\tngbe_reset_rx_queue(adapter, rxq);\n+\tdev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;\n+\n+\treturn 0;\n+}\n+\n /*\n  * Start Transmit Units for specified queue.\n  */\ndiff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h\nindex 58487caa95..668588fbf8 100644\n--- a/drivers/net/ngbe/ngbe_rxtx.h\n+++ b/drivers/net/ngbe/ngbe_rxtx.h\n@@ -43,6 +43,14 @@ struct ngbe_rx_desc {\n \t} qw1; /* also as r.hdr_addr */\n };\n \n+/* @ngbe_rx_desc.qw0 */\n+#define NGBE_RXD_PKTADDR(rxd, v)  \\\n+\t(((volatile __le64 *)(rxd))[0] = cpu_to_le64(v))\n+\n+/* @ngbe_rx_desc.qw1 */\n+#define NGBE_RXD_HDRADDR(rxd, v)  \\\n+\t(((volatile __le64 *)(rxd))[1] = cpu_to_le64(v))\n+\n /*****************************************************************************\n  * Transmit Descriptor\n  *****************************************************************************/\n",
    "prefixes": [
        "v7",
        "16/19"
    ]
}