get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/102140/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 102140,
    "url": "http://patchwork.dpdk.org/api/patches/102140/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20211019081738.2165150-6-xuemingl@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211019081738.2165150-6-xuemingl@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211019081738.2165150-6-xuemingl@nvidia.com",
    "date": "2021-10-19T08:17:37",
    "name": "[v9,5/6] app/testpmd: force shared Rx queue polled on same core",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "68c2a1d2a7ad03b1b42b073363557389113720b3",
    "submitter": {
        "id": 1904,
        "url": "http://patchwork.dpdk.org/api/people/1904/?format=api",
        "name": "Xueming Li",
        "email": "xuemingl@nvidia.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patchwork.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20211019081738.2165150-6-xuemingl@nvidia.com/mbox/",
    "series": [
        {
            "id": 19770,
            "url": "http://patchwork.dpdk.org/api/series/19770/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=19770",
            "date": "2021-10-19T08:17:32",
            "name": "ethdev: introduce shared Rx queue",
            "version": 9,
            "mbox": "http://patchwork.dpdk.org/series/19770/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/102140/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/102140/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 92318A0C4E;\n\tTue, 19 Oct 2021 10:18:47 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 7FE0A410EE;\n\tTue, 19 Oct 2021 10:18:47 +0200 (CEST)",
            "from NAM12-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam12on2057.outbound.protection.outlook.com [40.107.244.57])\n by mails.dpdk.org (Postfix) with ESMTP id 901E1410EA\n for <dev@dpdk.org>; Tue, 19 Oct 2021 10:18:46 +0200 (CEST)",
            "from DM6PR11CA0004.namprd11.prod.outlook.com (2603:10b6:5:190::17)\n by CH2PR12MB4168.namprd12.prod.outlook.com (2603:10b6:610:a8::7) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4608.16; Tue, 19 Oct\n 2021 08:18:44 +0000",
            "from DM6NAM11FT025.eop-nam11.prod.protection.outlook.com\n (2603:10b6:5:190:cafe::58) by DM6PR11CA0004.outlook.office365.com\n (2603:10b6:5:190::17) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4608.15 via Frontend\n Transport; Tue, 19 Oct 2021 08:18:44 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n DM6NAM11FT025.mail.protection.outlook.com (10.13.172.197) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4608.15 via Frontend Transport; Tue, 19 Oct 2021 08:18:43 +0000",
            "from nvidia.com (172.20.187.5) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Tue, 19 Oct\n 2021 08:18:37 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=LaJGE6cLGQRLPj/j1nG4dCidcXWeeFCfXT/TB7gG/BrrYbC4cZ8Z/Y5OWr/yORIDLUgt5u/R+9oVaYsPfeF2rK9e4nNF0rFUdTFnRXfUytMGyvdcyIraGQZfDTRqqbG5Xa+EHBQ01Pk26xtI6js69Zha1Q589TqYksRjEEh4PCkW9W3+k47ZK+2XjL4B82TpbyQqvNADGYKWZ5DJHaUhkkvYcrsUaeT+BhYQXFQ6Idi6YYMjF0zlt9uxFLjqZRWkar97J5G8iztUKAjVRddyoVKAbGsUOzTEDkkr7TJbi318CFV+Y5y9TGQ+z/Nwh047X4FjwITJDz4yGokVMFL03g==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=L6rWHKqE0+MsWJfxIv9xpN3cluU1PHlg7oMBgb/ihoY=;\n b=OS7g+0bYecfqp8gdA0UUiLDozghapA9g2t5hCNf40ev0TvASNxqQUyQouBgdE1ViGy3uwCeZqdMQtx6Cg2Ac8Mk+lpEnyBDiRO7hGq4Xj8O4mQQ45hECYpV3eLtbRkfNsYBnL7Y06K7rB4Jjd1qjKDyjoG19ISYmjrg8dECb50QYxIZn6wtMYgpA8S/lfM9XT1cudPc6Jkgk4Pjf8+fs76pJUZYwosx4IUsoHqhCDhzA7AD8K/46m0SEdvH4ljwDZqGvszJv6nHxFnjj/y1Rhh0iXnEvRIm4F2kVB5rKMcGvT75nk5Rr7GdqVr3tA7S1ftQal0i98fGReEVfO9LrvQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=intel.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=quarantine pct=100) action=none\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=L6rWHKqE0+MsWJfxIv9xpN3cluU1PHlg7oMBgb/ihoY=;\n b=a/5P8wZKiyEuyzrYCzCkshOiZUweynwSXHZzpWw0OGc540RzPYVNG7y3aOu4XqOoIayaTdXI6TmID1VQeRaPj7G7Fg8YlpYmmCTTp8Ne8dizlqFp++YuhaIltIqfGxGGZ2NTd5dsdpFhAiNhkwyl7mMulg6Nu5ZW6+4iSqoN8gze7/AG8rxoG+JIOGWeQ75Uas5SjpLezZDAZoh7pLMe/yh4RhIvbVPXsY5TJqAA/Zvk2RPuBRPR6Zs4rkOCZ2Eq2Ol2DDRbZPl46zwaAWDDBCvEdQANmGz79FM80bx8YtMMQyQs9cxpGr4zWWZ/d22Ti78fwbhyQiEaXRzvnSynCQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; intel.com; dkim=none (message not signed)\n header.d=none;intel.com; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Xueming Li <xuemingl@nvidia.com>",
        "To": "<dev@dpdk.org>, Zhang Yuying <yuying.zhang@intel.com>",
        "CC": "<xuemingl@nvidia.com>, Jerin Jacob <jerinjacobk@gmail.com>, Ferruh Yigit\n <ferruh.yigit@intel.com>, Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Thomas Monjalon\n <thomas@monjalon.net>, Lior Margalit <lmargalit@nvidia.com>, \"Ananyev\n Konstantin\" <konstantin.ananyev@intel.com>, Ajit Khaparde\n <ajit.khaparde@broadcom.com>, Xiaoyun Li <xiaoyun.li@intel.com>",
        "Date": "Tue, 19 Oct 2021 16:17:37 +0800",
        "Message-ID": "<20211019081738.2165150-6-xuemingl@nvidia.com>",
        "X-Mailer": "git-send-email 2.33.0",
        "In-Reply-To": "<20211019081738.2165150-1-xuemingl@nvidia.com>",
        "References": "<20210727034204.20649-1-xuemingl@nvidia.com>\n <20211019081738.2165150-1-xuemingl@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL111.nvidia.com (172.20.187.18) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "734b1fbf-53bb-4da8-60d3-08d992d91127",
        "X-MS-TrafficTypeDiagnostic": "CH2PR12MB4168:",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <CH2PR12MB4168C1AFD60DEDDA2CC75E62A1BD9@CH2PR12MB4168.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:3631;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n Ff8nSEhZ/ldlTl93VKMgUOgwoNnCAQJQ6MXBqLKVFFKq2ZvPXq4wuce3Nnc8JiS4YiwLDl2MeX4k3XsXkZLEra9nXQstKUnEFsBZjBt1S6RgdnMvp62KkJOjOCTW3f1bAMFOzJeesBjqcAH/pGeKqDU7DtPLOVpTjaZTpnbf+e8EPhvUlaOBtiQ+yBsy44h846ccIz4G1UcDrEnhunmM4pGTc3uiFpVRChfbMi6ispiNAdOpLTJJmBDFTvC2W/xmwU/3FDU1sZxa90bZqBOkeZ57cmy+be/ojAX4wKHfExrKDbPtjs/+bo0DZvV8ybQ1BqjdSeMrYDDj20KjEvm9GPdRPfXVI6fQHFUPD/gZwTpVLdXi0bLWaUzfde7U95HT05IRpolYDXc//kB0gta67jtCz9+kdKCv70Ex3mwLrrCrXAJTciQwp4rfThJymvZhPAFp5Ii/rYfX88Ve7WnP47g9JWWBbxzBBoD1+eQ0XZ0rGSbJ7ZF8urpohS7t4KaeXVzARH33pkr2Qyf6WlcaYAnizh1bHhRiHTntxZjg4ad/Us/oNvmhjwtTOiz2wiEM+cbW8oz+lZuOB+4QWGN20dfYZuouiUFvDe62kTF8R07FXaBsNmzPvYQL1P/OpNUgDQomYErGBIEQcLFMfnIAgsIwY/EfWSNq25SsZJE+dzuLJBjZwXcoMNQoWdjcx19rtBD6PwnHxIYhF9yUDfdVSw==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(46966006)(36840700001)(356005)(7696005)(16526019)(186003)(83380400001)(336012)(6286002)(7636003)(70586007)(508600001)(55016002)(36756003)(2616005)(54906003)(26005)(8676002)(36860700001)(316002)(36906005)(86362001)(2906002)(82310400003)(5660300002)(1076003)(426003)(47076005)(8936002)(110136005)(70206006)(4326008);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "19 Oct 2021 08:18:43.7408 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 734b1fbf-53bb-4da8-60d3-08d992d91127",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT025.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "CH2PR12MB4168",
        "Subject": "[dpdk-dev] [PATCH v9 5/6] app/testpmd: force shared Rx queue polled\n on same core",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Shared Rx queue must be polled on same core. This patch checks and stops\nforwarding if shared RxQ being scheduled on multiple\ncores.\n\nIt's suggested to use same number of Rx queues and polling cores.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\n---\n app/test-pmd/config.c  | 103 +++++++++++++++++++++++++++++++++++++++++\n app/test-pmd/testpmd.c |   4 +-\n app/test-pmd/testpmd.h |   2 +\n 3 files changed, 108 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c\nindex f8fb8961cae..c4150d77589 100644\n--- a/app/test-pmd/config.c\n+++ b/app/test-pmd/config.c\n@@ -2890,6 +2890,109 @@ port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,\n \t}\n }\n \n+/*\n+ * Check whether a shared rxq scheduled on other lcores.\n+ */\n+static bool\n+fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc,\n+\t\t\t   portid_t src_port, queueid_t src_rxq,\n+\t\t\t   uint32_t share_group, queueid_t share_rxq)\n+{\n+\tstreamid_t sm_id;\n+\tstreamid_t nb_fs_per_lcore;\n+\tlcoreid_t  nb_fc;\n+\tlcoreid_t  lc_id;\n+\tstruct fwd_stream *fs;\n+\tstruct rte_port *port;\n+\tstruct rte_eth_dev_info *dev_info;\n+\tstruct rte_eth_rxconf *rxq_conf;\n+\n+\tnb_fc = cur_fwd_config.nb_fwd_lcores;\n+\t/* Check remaining cores. */\n+\tfor (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) {\n+\t\tsm_id = fwd_lcores[lc_id]->stream_idx;\n+\t\tnb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;\n+\t\tfor (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;\n+\t\t     sm_id++) {\n+\t\t\tfs = fwd_streams[sm_id];\n+\t\t\tport = &ports[fs->rx_port];\n+\t\t\tdev_info = &port->dev_info;\n+\t\t\trxq_conf = &port->rx_conf[fs->rx_queue];\n+\t\t\tif ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)\n+\t\t\t    == 0)\n+\t\t\t\t/* Not shared rxq. */\n+\t\t\t\tcontinue;\n+\t\t\tif (domain_id != port->dev_info.switch_info.domain_id)\n+\t\t\t\tcontinue;\n+\t\t\tif (rxq_conf->share_group != share_group)\n+\t\t\t\tcontinue;\n+\t\t\tif (rxq_conf->share_qid != share_rxq)\n+\t\t\t\tcontinue;\n+\t\t\tprintf(\"Shared Rx queue group %u queue %hu can't be scheduled on different cores:\\n\",\n+\t\t\t       share_group, share_rxq);\n+\t\t\tprintf(\"  lcore %hhu Port %hu queue %hu\\n\",\n+\t\t\t       src_lc, src_port, src_rxq);\n+\t\t\tprintf(\"  lcore %hhu Port %hu queue %hu\\n\",\n+\t\t\t       lc_id, fs->rx_port, fs->rx_queue);\n+\t\t\tprintf(\"Please use --nb-cores=%hu to limit number of forwarding cores\\n\",\n+\t\t\t       nb_rxq);\n+\t\t\treturn true;\n+\t\t}\n+\t}\n+\treturn false;\n+}\n+\n+/*\n+ * Check shared rxq configuration.\n+ *\n+ * Shared group must not being scheduled on different core.\n+ */\n+bool\n+pkt_fwd_shared_rxq_check(void)\n+{\n+\tstreamid_t sm_id;\n+\tstreamid_t nb_fs_per_lcore;\n+\tlcoreid_t  nb_fc;\n+\tlcoreid_t  lc_id;\n+\tstruct fwd_stream *fs;\n+\tuint16_t domain_id;\n+\tstruct rte_port *port;\n+\tstruct rte_eth_dev_info *dev_info;\n+\tstruct rte_eth_rxconf *rxq_conf;\n+\n+\tnb_fc = cur_fwd_config.nb_fwd_lcores;\n+\t/*\n+\t * Check streams on each core, make sure the same switch domain +\n+\t * group + queue doesn't get scheduled on other cores.\n+\t */\n+\tfor (lc_id = 0; lc_id < nb_fc; lc_id++) {\n+\t\tsm_id = fwd_lcores[lc_id]->stream_idx;\n+\t\tnb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;\n+\t\tfor (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;\n+\t\t     sm_id++) {\n+\t\t\tfs = fwd_streams[sm_id];\n+\t\t\t/* Update lcore info stream being scheduled. */\n+\t\t\tfs->lcore = fwd_lcores[lc_id];\n+\t\t\tport = &ports[fs->rx_port];\n+\t\t\tdev_info = &port->dev_info;\n+\t\t\trxq_conf = &port->rx_conf[fs->rx_queue];\n+\t\t\tif ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)\n+\t\t\t    == 0)\n+\t\t\t\t/* Not shared rxq. */\n+\t\t\t\tcontinue;\n+\t\t\t/* Check shared rxq not scheduled on remaining cores. */\n+\t\t\tdomain_id = port->dev_info.switch_info.domain_id;\n+\t\t\tif (fwd_stream_on_other_lcores(domain_id, lc_id,\n+\t\t\t\t\t\t       fs->rx_port,\n+\t\t\t\t\t\t       fs->rx_queue,\n+\t\t\t\t\t\t       rxq_conf->share_group,\n+\t\t\t\t\t\t       rxq_conf->share_qid))\n+\t\t\t\treturn false;\n+\t\t}\n+\t}\n+\treturn true;\n+}\n+\n /*\n  * Setup forwarding configuration for each logical core.\n  */\ndiff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c\nindex 123142ed110..f3f81ef561f 100644\n--- a/app/test-pmd/testpmd.c\n+++ b/app/test-pmd/testpmd.c\n@@ -2236,10 +2236,12 @@ start_packet_forwarding(int with_tx_first)\n \n \tfwd_config_setup();\n \n+\tpkt_fwd_config_display(&cur_fwd_config);\n+\tif (!pkt_fwd_shared_rxq_check())\n+\t\treturn;\n \tif(!no_flush_rx)\n \t\tflush_fwd_rx_queues();\n \n-\tpkt_fwd_config_display(&cur_fwd_config);\n \trxtx_config_display();\n \n \tfwd_stats_reset();\ndiff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h\nindex 3dfaaad94c0..f121a2da90c 100644\n--- a/app/test-pmd/testpmd.h\n+++ b/app/test-pmd/testpmd.h\n@@ -144,6 +144,7 @@ struct fwd_stream {\n \tuint64_t     core_cycles; /**< used for RX and TX processing */\n \tstruct pkt_burst_stats rx_burst_stats;\n \tstruct pkt_burst_stats tx_burst_stats;\n+\tstruct fwd_lcore *lcore; /**< Lcore being scheduled. */\n };\n \n /**\n@@ -795,6 +796,7 @@ void port_summary_header_display(void);\n void rx_queue_infos_display(portid_t port_idi, uint16_t queue_id);\n void tx_queue_infos_display(portid_t port_idi, uint16_t queue_id);\n void fwd_lcores_config_display(void);\n+bool pkt_fwd_shared_rxq_check(void);\n void pkt_fwd_config_display(struct fwd_config *cfg);\n void rxtx_config_display(void);\n void fwd_config_setup(void);\n",
    "prefixes": [
        "v9",
        "5/6"
    ]
}