get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/102136/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 102136,
    "url": "http://patchwork.dpdk.org/api/patches/102136/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20211019081738.2165150-2-xuemingl@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211019081738.2165150-2-xuemingl@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211019081738.2165150-2-xuemingl@nvidia.com",
    "date": "2021-10-19T08:17:33",
    "name": "[v9,1/6] ethdev: introduce shared Rx queue",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "8fef240b2071d8164c5eba8d49748c4fd6b78da7",
    "submitter": {
        "id": 1904,
        "url": "http://patchwork.dpdk.org/api/people/1904/?format=api",
        "name": "Xueming Li",
        "email": "xuemingl@nvidia.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patchwork.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20211019081738.2165150-2-xuemingl@nvidia.com/mbox/",
    "series": [
        {
            "id": 19770,
            "url": "http://patchwork.dpdk.org/api/series/19770/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=19770",
            "date": "2021-10-19T08:17:32",
            "name": "ethdev: introduce shared Rx queue",
            "version": 9,
            "mbox": "http://patchwork.dpdk.org/series/19770/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/102136/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/102136/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id A3193A0C4E;\n\tTue, 19 Oct 2021 10:18:18 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 89DBC410DF;\n\tTue, 19 Oct 2021 10:18:18 +0200 (CEST)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2062.outbound.protection.outlook.com [40.107.243.62])\n by mails.dpdk.org (Postfix) with ESMTP id C629A410DF\n for <dev@dpdk.org>; Tue, 19 Oct 2021 10:18:16 +0200 (CEST)",
            "from DM5PR07CA0087.namprd07.prod.outlook.com (2603:10b6:4:ae::16) by\n DM6PR12MB4957.namprd12.prod.outlook.com (2603:10b6:5:20d::14) with\n Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.4608.17; Tue, 19 Oct 2021 08:18:15 +0000",
            "from DM6NAM11FT031.eop-nam11.prod.protection.outlook.com\n (2603:10b6:4:ae:cafe::fe) by DM5PR07CA0087.outlook.office365.com\n (2603:10b6:4:ae::16) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4608.14 via Frontend\n Transport; Tue, 19 Oct 2021 08:18:15 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n DM6NAM11FT031.mail.protection.outlook.com (10.13.172.203) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4608.15 via Frontend Transport; Tue, 19 Oct 2021 08:18:13 +0000",
            "from nvidia.com (172.20.187.5) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Tue, 19 Oct\n 2021 08:18:00 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=AKgIXky7s8fcPvskOsY9GJcU0bwN2LHrfrly3zsOj7USjJWCpqOXnTGdLDZz+EIK9d/6s4H5VJSG7iVqQNwvPqHKfdN12MnpBPdDXwCoM+biCwQ7QNT/39dNZmyBlGVJ5S9Sva7G/N7aSUkbUPcY1vE9yvBx3yquWN3la/praSve5kbyVvFNcymGca6CtKlvvdXqFs2nRFnDS3BtBV0scCiow5vHcigbTlK1Td7NS1JTn/04fo9xG3uFvcXx7dM5K84tX5PmxZBV9f1celVCWMEQaGppiQ1Gsou8gzym1xRKQKlEOzFc0qrbtA2eF5pa1JQ+RsLkgbqtpLSCSrqorg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=NuvyKTPkgQ00fMmk813NOX/l5D/ze5yS9w5Pr2g4eb8=;\n b=DDovIEMMRjwzAUSNd8tkPjzinCt0wtAKnexkuJR7J+T3lHR/OGkAC5lQCwt7xB8vGOcUQAF0DbxMkuzKLaPrRkmwa2eZZHwaa12IZ0cQemvVDoBAkvKUVg7lANc6ipnrvitCbJtNxib6bSCyTiWkl7rktFl/JI0NPJTGPeVufCvyCdRZyl0vi9SQqHADmtwGn5z/edCZP+6ln5pJ0ZRWYQtPY2m5vziM3bYXioNGR0r+Vl4BpO2oGSOCBg0ctX+pwfIYx/V7ORFZiuGN55HPgY5mlKVgPwVADTmOSyQn/6gZrJtCmjcBbBRRRcoovhlVwb9Z7MJJRIfF4KTSGL8ibA==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=monjalon.net smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=quarantine pct=100) action=none\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=NuvyKTPkgQ00fMmk813NOX/l5D/ze5yS9w5Pr2g4eb8=;\n b=FHf1A2u8FKdkRj1LUbJ+Re1MBxCfi0PppYu1hahFEAuBDcRTABeWMvhcxne3cCaakgbFWx6dTLlEek6RGIiuoiwWxqIjTPiKr2OQ51Q7qe9UsO6oZ8XCy1SEH/NALfOUjJZaWKWWyBJqa2vnSBnWYEuXILRpcnJzgtZLyV0Y2xwrwvUVoAKc3VfiY2lLh9dZ5/X02e7DpC20OudHqk0uQXUYVXY3znQrSfKQ/Tisi05Wc3AAryTAlNNp6tMbv4q520cveEYQQT34X4fUC7PxAmUu+sSX3Ms9N+f0AkoYgOxThuYxoPWZae0k7Ryo6XCPEQxZdUtEcbZfrCqXvnlvpw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; monjalon.net; dkim=none (message not signed)\n header.d=none;monjalon.net; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Xueming Li <xuemingl@nvidia.com>",
        "To": "<dev@dpdk.org>, Zhang Yuying <yuying.zhang@intel.com>",
        "CC": "<xuemingl@nvidia.com>, Jerin Jacob <jerinjacobk@gmail.com>, Ferruh Yigit\n <ferruh.yigit@intel.com>, Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Thomas Monjalon\n <thomas@monjalon.net>, Lior Margalit <lmargalit@nvidia.com>, \"Ananyev\n Konstantin\" <konstantin.ananyev@intel.com>, Ajit Khaparde\n <ajit.khaparde@broadcom.com>",
        "Date": "Tue, 19 Oct 2021 16:17:33 +0800",
        "Message-ID": "<20211019081738.2165150-2-xuemingl@nvidia.com>",
        "X-Mailer": "git-send-email 2.33.0",
        "In-Reply-To": "<20211019081738.2165150-1-xuemingl@nvidia.com>",
        "References": "<20210727034204.20649-1-xuemingl@nvidia.com>\n <20211019081738.2165150-1-xuemingl@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=\"UTF-8\"",
        "Content-Transfer-Encoding": "8bit",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL111.nvidia.com (172.20.187.18) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "827d07d9-f21e-46c0-a436-08d992d8ffc1",
        "X-MS-TrafficTypeDiagnostic": "DM6PR12MB4957:",
        "X-LD-Processed": "43083d15-7273-40c1-b7db-39efd9ccc17a,ExtAddr",
        "X-Microsoft-Antispam-PRVS": "\n <DM6PR12MB49572DB457310FB76D76D1ABA1BD9@DM6PR12MB4957.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:9508;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n JY9GbEqRtBWOwgnW7mpFk0yvzUImSrQanZsVaBNGdxX/2UqbrumZq++aUI3z5y85wQqLR4/bc6Qpnf1Spz2HxOq4OnMGZD87CysSzvN450YAm7Szny6GyXju1+6nQHZe0+oQvUzC7p7mm2OXt4CErb2J0AqYsVUGgE2Jfvi4lIhBdoumXV+AEg3obpVycuiOylUnPhdYvUuAt4mSAd8QXur51UI6Sz1u4eAmuy9q2SUhoH0dnEdDAAFM7+32NtyonrOiBeXQXTi6YFvy84V+EGTSCiSZYD+7qdcACUysb2aH4tY8vzve3ZfoMvp6it1/bcb4+LmXLzhoZCzh7934gYx2jlG87RxatX5F4QDRY2eHROY7G7HvgB1k9DmdWQ4RpzVcmhAAEbBT2vomk+fxDkWFSNlwQyIrvMETrI67lLZQYSKAUIXqOg87RbjlruGEmyhQwfQHZDxsR8FeoYAeG5CaK4FGKkiILrr0zz6ejLNhsV8/xMa7A2r+XT1T37VdMF/h3ERS+Exw22F13EoNtFmAYmm+ryUNRD6zlaiBUZmTGlw2amdLWNRfA37mCywjdjXqkOdqteOMA+V6jOfAchjogj1KwzNchJBm+fHr8LfZnOHOJssYnS3ILETJoONXcGJfy+uPvflXUVdF93SEbKdFJrA6aYDbzbnSkCIlsPed42yenq00e8ZrU5uufGUdLjpWDXdfdQ78kidMp8+TaNgVCopPjRi2BRP7JE1dYDHCrFMplfFFrtYntbNX33w3tQl3Z7NdCo07oESFo9Htb+kAAY4hHkqzD82O5QcgxeA=",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(316002)(336012)(82310400003)(83380400001)(508600001)(6286002)(70586007)(4326008)(70206006)(6666004)(7696005)(5660300002)(426003)(2616005)(186003)(26005)(36756003)(7636003)(2906002)(110136005)(8936002)(36860700001)(8676002)(54906003)(1076003)(47076005)(86362001)(16526019)(356005)(55016002);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "19 Oct 2021 08:18:13.4017 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 827d07d9-f21e-46c0-a436-08d992d8ffc1",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT031.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM6PR12MB4957",
        "Subject": "[dpdk-dev] [PATCH v9 1/6] ethdev: introduce shared Rx queue",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "In current DPDK framework, each Rx queue is pre-loaded with mbufs to\nsave incoming packets. For some PMDs, when number of representors scale\nout in a switch domain, the memory consumption became significant.\nPolling all ports also leads to high cache miss, high latency and low\nthroughput.\n\nThis patch introduces shared Rx queue. Ports in same Rx domain and\nswitch domain could share Rx queue set by specifying non-zero sharing\ngroup in Rx queue configuration.\n\nShared Rx queue is identified by share_rxq field of Rx queue\nconfiguration. Port A RxQ X can share RxQ with Port B RxQ Y by using\nsame shared Rx queue ID.\n\nNo special API is defined to receive packets from shared Rx queue.\nPolling any member port of a shared Rx queue receives packets of that\nqueue for all member ports, port_id is identified by mbuf->port. PMD is\nresponsible to resolve shared Rx queue from device and queue data.\n\nShared Rx queue must be polled in same thread or core, polling a queue\nID of any member port is essentially same.\n\nMultiple share groups are supported. PMD should support mixed\nconfiguration by allowing multiple share groups and non-shared Rx queue\non one port.\n\nExample grouping and polling model to reflect service priority:\n Group1, 2 shared Rx queues per port: PF, rep0, rep1\n Group2, 1 shared Rx queue per port: rep2, rep3, ... rep127\n Core0: poll PF queue0\n Core1: poll PF queue1\n Core2: poll rep2 queue0\n\nPMD advertise shared Rx queue capability via RTE_ETH_DEV_CAPA_RXQ_SHARE.\n\nPMD is responsible for shared Rx queue consistency checks to avoid\nmember port's configuration contradict each other.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\nReviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>\n---\n doc/guides/nics/features.rst                  | 13 ++++++++++\n doc/guides/nics/features/default.ini          |  1 +\n .../prog_guide/switch_representation.rst      | 11 +++++++++\n doc/guides/rel_notes/release_21_11.rst        |  6 +++++\n lib/ethdev/rte_ethdev.c                       |  8 +++++++\n lib/ethdev/rte_ethdev.h                       | 24 +++++++++++++++++++\n 6 files changed, 63 insertions(+)",
    "diff": "diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst\nindex e346018e4b8..89f9accbca1 100644\n--- a/doc/guides/nics/features.rst\n+++ b/doc/guides/nics/features.rst\n@@ -615,6 +615,19 @@ Supports inner packet L4 checksum.\n   ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_OUTER_UDP_CKSUM``.\n \n \n+.. _nic_features_shared_rx_queue:\n+\n+Shared Rx queue\n+---------------\n+\n+Supports shared Rx queue for ports in same Rx domain of a switch domain.\n+\n+* **[uses]     rte_eth_dev_info**: ``dev_capa:RTE_ETH_DEV_CAPA_RXQ_SHARE``.\n+* **[uses]     rte_eth_dev_info,rte_eth_switch_info**: ``rx_domain``, ``domain_id``.\n+* **[uses]     rte_eth_rxconf**: ``share_group``, ``share_qid``.\n+* **[provides] mbuf**: ``mbuf.port``.\n+\n+\n .. _nic_features_packet_type_parsing:\n \n Packet type parsing\ndiff --git a/doc/guides/nics/features/default.ini b/doc/guides/nics/features/default.ini\nindex d473b94091a..93f5d1b46f4 100644\n--- a/doc/guides/nics/features/default.ini\n+++ b/doc/guides/nics/features/default.ini\n@@ -19,6 +19,7 @@ Free Tx mbuf on demand =\n Queue start/stop     =\n Runtime Rx queue setup =\n Runtime Tx queue setup =\n+Shared Rx queue      =\n Burst mode info      =\n Power mgmt address monitor =\n MTU update           =\ndiff --git a/doc/guides/prog_guide/switch_representation.rst b/doc/guides/prog_guide/switch_representation.rst\nindex ff6aa91c806..4f2532a91ea 100644\n--- a/doc/guides/prog_guide/switch_representation.rst\n+++ b/doc/guides/prog_guide/switch_representation.rst\n@@ -123,6 +123,17 @@ thought as a software \"patch panel\" front-end for applications.\n .. [1] `Ethernet switch device driver model (switchdev)\n        <https://www.kernel.org/doc/Documentation/networking/switchdev.txt>`_\n \n+- For some PMDs, memory usage of representors is huge when number of\n+  representor grows, mbufs are allocated for each descriptor of Rx queue.\n+  Polling large number of ports brings more CPU load, cache miss and\n+  latency. Shared Rx queue can be used to share Rx queue between PF and\n+  representors among same Rx domain. ``RTE_ETH_DEV_CAPA_RXQ_SHARE`` in\n+  device info is used to indicate the capability. Setting non-zero share\n+  group in Rx queue configuration to enable share, share_qid is used to\n+  identify the shared Rx queue in group. Polling any member port can\n+  receive packets of all member ports in the group, port ID is saved in\n+  ``mbuf.port``.\n+\n Basic SR-IOV\n ------------\n \ndiff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst\nindex d5435a64aa1..b34d9776a15 100644\n--- a/doc/guides/rel_notes/release_21_11.rst\n+++ b/doc/guides/rel_notes/release_21_11.rst\n@@ -75,6 +75,12 @@ New Features\n     operations.\n   * Added multi-process support.\n \n+* **Added ethdev shared Rx queue support.**\n+\n+  * Added new device capability flag and Rx domain field to switch info.\n+  * Added share group and share queue ID to Rx queue configuration.\n+  * Added testpmd support and dedicate forwarding engine.\n+\n * **Added new RSS offload types for IPv4/L4 checksum in RSS flow.**\n \n   Added macros ETH_RSS_IPV4_CHKSUM and ETH_RSS_L4_CHKSUM, now IPv4 and\ndiff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c\nindex 028907bc4b9..bc55f899f72 100644\n--- a/lib/ethdev/rte_ethdev.c\n+++ b/lib/ethdev/rte_ethdev.c\n@@ -2159,6 +2159,14 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,\n \t\treturn -EINVAL;\n \t}\n \n+\tif (local_conf.share_group > 0 &&\n+\t    (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {\n+\t\tRTE_ETHDEV_LOG(ERR,\n+\t\t\t\"Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\\n\",\n+\t\t\tport_id, rx_queue_id, local_conf.share_group);\n+\t\treturn -EINVAL;\n+\t}\n+\n \t/*\n \t * If LRO is enabled, check that the maximum aggregated packet\n \t * size is supported by the configured device.\ndiff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h\nindex 6d80514ba7a..34acc91273d 100644\n--- a/lib/ethdev/rte_ethdev.h\n+++ b/lib/ethdev/rte_ethdev.h\n@@ -1044,6 +1044,14 @@ struct rte_eth_rxconf {\n \tuint8_t rx_drop_en; /**< Drop packets if no descriptors are available. */\n \tuint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */\n \tuint16_t rx_nseg; /**< Number of descriptions in rx_seg array. */\n+\t/**\n+\t * Share group index in Rx domain and switch domain.\n+\t * Non-zero value to enable Rx queue share, zero value disable share.\n+\t * PMD is responsible for Rx queue consistency checks to avoid member\n+\t * port's configuration contradict to each other.\n+\t */\n+\tuint16_t share_group;\n+\tuint16_t share_qid; /**< Shared Rx queue ID in group. */\n \t/**\n \t * Per-queue Rx offloads to be set using DEV_RX_OFFLOAD_* flags.\n \t * Only offloads set on rx_queue_offload_capa or rx_offload_capa\n@@ -1445,6 +1453,16 @@ struct rte_eth_conf {\n #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001\n /** Device supports Tx queue setup after device started. */\n #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002\n+/**\n+ * Device supports shared Rx queue among ports within Rx domain and\n+ * switch domain. Mbufs are consumed by shared Rx queue instead of\n+ * each queue. Multiple groups are supported by share_group of Rx\n+ * queue configuration. Shared Rx queue is identified by PMD using\n+ * share_qid of Rx queue configuration. Polling any port in the group\n+ * receive packets of all member ports, source port identified by\n+ * mbuf->port field.\n+ */\n+#define RTE_ETH_DEV_CAPA_RXQ_SHARE              RTE_BIT64(2)\n /**@}*/\n \n /*\n@@ -1488,6 +1506,12 @@ struct rte_eth_switch_info {\n \t * but each driver should explicitly define the mapping of switch\n \t * port identifier to that physical interconnect/switch\n \t */\n+\t/**\n+\t * Shared Rx queue sub-domain boundary. Only ports in same Rx domain\n+\t * and switch domain can share Rx queue. Valid only if device advertised\n+\t * RTE_ETH_DEV_CAPA_RXQ_SHARE capability.\n+\t */\n+\tuint16_t rx_domain;\n };\n \n /**\n",
    "prefixes": [
        "v9",
        "1/6"
    ]
}