get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/99694/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 99694,
    "url": "http://patchwork.dpdk.org/api/patches/99694/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20210926111904.237736-12-xuemingl@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210926111904.237736-12-xuemingl@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210926111904.237736-12-xuemingl@nvidia.com",
    "date": "2021-09-26T11:19:04",
    "name": "[11/11] net/mlx5: support shared Rx queue",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "67ba29aa81040ea882910d6d8503cb295800412b",
    "submitter": {
        "id": 1904,
        "url": "http://patchwork.dpdk.org/api/people/1904/?format=api",
        "name": "Xueming Li",
        "email": "xuemingl@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patchwork.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20210926111904.237736-12-xuemingl@nvidia.com/mbox/",
    "series": [
        {
            "id": 19166,
            "url": "http://patchwork.dpdk.org/api/series/19166/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=19166",
            "date": "2021-09-26T11:18:53",
            "name": "net/mlx5: support shared Rx queue",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/19166/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/99694/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/99694/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E658FA0547;\n\tSun, 26 Sep 2021 13:20:43 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 128C740E78;\n\tSun, 26 Sep 2021 13:20:41 +0200 (CEST)",
            "from NAM11-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam11on2053.outbound.protection.outlook.com [40.107.236.53])\n by mails.dpdk.org (Postfix) with ESMTP id 69A5C4003F\n for <dev@dpdk.org>; Sun, 26 Sep 2021 13:20:39 +0200 (CEST)",
            "from BN6PR1101CA0010.namprd11.prod.outlook.com\n (2603:10b6:405:4a::20) by BYAPR12MB2871.namprd12.prod.outlook.com\n (2603:10b6:a03:13d::25) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4544.15; Sun, 26 Sep\n 2021 11:20:37 +0000",
            "from BN8NAM11FT061.eop-nam11.prod.protection.outlook.com\n (2603:10b6:405:4a:cafe::6b) by BN6PR1101CA0010.outlook.office365.com\n (2603:10b6:405:4a::20) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4544.13 via Frontend\n Transport; Sun, 26 Sep 2021 11:20:37 +0000",
            "from mail.nvidia.com (216.228.112.32) by\n BN8NAM11FT061.mail.protection.outlook.com (10.13.177.144) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4544.13 via Frontend Transport; Sun, 26 Sep 2021 11:20:37 +0000",
            "from DRHQMAIL107.nvidia.com (10.27.9.16) by HQMAIL109.nvidia.com\n (172.20.187.15) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Sun, 26 Sep\n 2021 04:20:36 -0700",
            "from nvidia.com (172.20.187.5) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Sun, 26 Sep\n 2021 11:20:34 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=YG4yemd6BXj7iuJiB6wDv8Dswto74Znzs0tGBJLF4q/Pwmkx+48W3h5F+x6RwGva2yPNLGGkw9YbhcWvJStrct9kD5XxocrNFXYLBX23umuGdiPZpPsfoCf0k28f3Dv3cfssyZYln1AG1h9YmlTmA+DfVRu27HfpDFclLaQsbZdlVwn9Q1R8bT7jXhhSYB8YN7L/PL/f/qE7vG1cjf9uAP+R5BDzw8qm8FjOjnYo5/xB7Kj61J6MVKBTiV375KABYOnv7YuAhOBuhS2nZ/KvBMr1z+ZzpATyMeNFlao8Vfjo1UtDvaeiRkUlJT6kL/09uzej6y3gQH1LkggtjcJowg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version;\n bh=yPRCQNcYnl5/4SbLy66mAZbW3XX0VcBOK+p/sd40nHw=;\n b=DYJsNp3wb9XNU+e9VsaDVgLcueOlCoejwueUr4llJmcQMTNXMkoqrzcLrWb+Jin791RiLHjMV4AZ7vowNVcC++bdmHQ2xKGiRTYQmzom/s6V0jYEVWubAWWX1Z24lSGSTj+YzY+g3FTsI1DFjnPlYuuOev00ChDwyuYvwlNO23N1JMt8yeiG4FsilPpKRAh563bkabUauKilRu1H/OxkIM1cHu10kXpsj5rWAscMfBHE+w0kjh1B0RwRCwmW2bEWlmJYRflfVff0RO6N+n68Fv68aav4h+BPkG/CRtyPrcTXMGDdJmn7HJrKS7lhYPYukA2k28tMPH/srn9qzGgCaQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.32) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=yPRCQNcYnl5/4SbLy66mAZbW3XX0VcBOK+p/sd40nHw=;\n b=Az1toUoJJs9Wu3ayuUKYY+5k2Znahl5jBjdhTUI8L4PXfM4eXhCPE2gUgGcA962nyrjL7ASEYsHXLCgsOnMR0GWrdPTZw2Mi9P5dSOrmbZ31wICPnDbU2F5AOnwJtqjXHNITbFigKagqr7ssyIOdzcXADa9YEnIl/tTuCBMzeP8o5O+KFJdEgqW1fyl+5dAOfQTbrH4k9RUAn+HKVUztuvtj2fyw7Y2Jz+vjAlC3buEx/8jYC+LjLW2zeYbvwEGqN8I0faRLmHOU9wji/rXbHdh7ojRmIrx0+A8CK4JNz1+CEo/yWDJzj/WVal9h3Zbr9dZZpQjDX7ODicoPbFsNnw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.32)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.32 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.32; helo=mail.nvidia.com;",
        "From": "Xueming Li <xuemingl@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>, Matan Azrad\n <matan@nvidia.com>, Viacheslav Ovsiienko <viacheslavo@nvidia.com>",
        "Date": "Sun, 26 Sep 2021 19:19:04 +0800",
        "Message-ID": "<20210926111904.237736-12-xuemingl@nvidia.com>",
        "X-Mailer": "git-send-email 2.33.0",
        "In-Reply-To": "<20210926111904.237736-1-xuemingl@nvidia.com>",
        "References": "<20210926111904.237736-1-xuemingl@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL107.nvidia.com (172.20.187.13) To\n DRHQMAIL107.nvidia.com (10.27.9.16)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "f72671ed-0068-4843-b062-08d980dfaa63",
        "X-MS-TrafficTypeDiagnostic": "BYAPR12MB2871:",
        "X-Microsoft-Antispam-PRVS": "\n <BYAPR12MB2871E04B1D2F85386ED68C0DA1A69@BYAPR12MB2871.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:94;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n BHzQy4wzdMjbV2aLqYLQSlLetWd9GpO7IOtcn848ZAjKd0Niq8h2a7IU1kJI6QX01iDNh5WKwV7m6kBJCxbWeu/eTMRtf5kNcjq6wfV6TaCddbyFyrGbDjq1takLviiRCRUC7ObZWQSmSBqk47Rt17HyJ5YsxE9QqGePEauj4Fppqu2Xf2KRyRgGBMeCf3oYovFJHe06EMya4D60BOvVxiWKBFQM3bXLqQDw7nOrjVu4u2EScTbroRKSAOQEyuNn4VHY0DWUkRFpFlUAwQCaHuWTMSYGYTtgcdSh+y+k8T6sSU0FXqcrd6Bnf/isTfRI6QQJka4sBD+Mj0zK4i/HG5tOElsrXvMP96633jCgknWbj2GirQgSfK59G+k4kMZ0A9hEWEr/ToixiYEK+nzVDPj/jVmmtk8ArLMgyLtna2cQ+GcSSGzlET9sKjs80Bp8tV5He6Jjtss33iDDiy8rtdZgreKKr0PkDsSlM7yiCMLIserz/K2vcFdN/A4I+rMZifn9Plr3HFflncDfq6Ozeenj0LqfZhDSRqpV3otQQl7SFvdW9TddL/Dhs//jP8aMw9RKPPn3Adg/Fb8HHFuGuGy7UmdJCOlbCbPtT7GBJ2BRzwisonwxAZOzdg6SxAmqLTEoC8CpmWSqscnDIh0T5kshvTn1V+gHCzfmOQ+65pLswmAKaDgxY4goNEt9FalCUySETYUs6hknBlX8zltQrg==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.32; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid01.nvidia.com; CAT:NONE;\n SFS:(4636009)(46966006)(36840700001)(6286002)(36860700001)(36756003)(107886003)(6916009)(26005)(47076005)(6666004)(30864003)(16526019)(1076003)(186003)(83380400001)(7696005)(2616005)(8676002)(316002)(2906002)(86362001)(70586007)(82310400003)(7636003)(336012)(55016002)(5660300002)(8936002)(54906003)(508600001)(70206006)(4326008)(426003)(356005);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "26 Sep 2021 11:20:37.1849 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n f72671ed-0068-4843-b062-08d980dfaa63",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.32];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n BN8NAM11FT061.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BYAPR12MB2871",
        "Subject": "[dpdk-dev] [PATCH 11/11] net/mlx5: support shared Rx queue",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch introduces shared RXQ. All share Rx queues with same group\nand queue id shares same rxq_ctrl. Rxq_ctrl and rxq_data are shared,\nall queues from different member port share same WQ and CQ, essentially\none Rx WQ, mbufs are filled into this singleton WQ.\n\nShared rxq_data is set into device Rx queues of all member ports as\nrxq object, used for receiving packets. Polling queue of any member\nports returns packets of any member, mbuf->port is used to identify\nsource port.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\n---\n doc/guides/nics/features/mlx5.ini |   1 +\n doc/guides/nics/mlx5.rst          |   6 +\n drivers/net/mlx5/linux/mlx5_os.c  |   2 +\n drivers/net/mlx5/mlx5.h           |   2 +\n drivers/net/mlx5/mlx5_devx.c      |   9 +-\n drivers/net/mlx5/mlx5_rx.h        |   7 +\n drivers/net/mlx5/mlx5_rxq.c       | 208 ++++++++++++++++++++++++++----\n drivers/net/mlx5/mlx5_trigger.c   |  76 ++++++-----\n 8 files changed, 255 insertions(+), 56 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini\nindex f01abd4231f..ff5e669acc1 100644\n--- a/doc/guides/nics/features/mlx5.ini\n+++ b/doc/guides/nics/features/mlx5.ini\n@@ -11,6 +11,7 @@ Removal event        = Y\n Rx interrupt         = Y\n Fast mbuf free       = Y\n Queue start/stop     = Y\n+Shared Rx queue      = Y\n Burst mode info      = Y\n Power mgmt address monitor = Y\n MTU update           = Y\ndiff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst\nindex ca3e7f560da..494ee957c1d 100644\n--- a/doc/guides/nics/mlx5.rst\n+++ b/doc/guides/nics/mlx5.rst\n@@ -113,6 +113,7 @@ Features\n - Connection tracking.\n - Sub-Function representors.\n - Sub-Function.\n+- Shared Rx queue.\n \n \n Limitations\n@@ -464,6 +465,11 @@ Limitations\n   - In order to achieve best insertion rate, application should manage the flows per lcore.\n   - Better to disable memory reclaim by setting ``reclaim_mem_mode`` to 0 to accelerate the flow object allocation and release with cache.\n \n+ Shared Rx queue:\n+\n+  - Counter of received packets and bytes number of devices in same share group are same.\n+  - Counter of received packets and bytes number of queues in same group and queue ID are same.\n+\n Statistics\n ----------\n \ndiff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c\nindex 27233b679c6..b631768b4f9 100644\n--- a/drivers/net/mlx5/linux/mlx5_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_os.c\n@@ -457,6 +457,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)\n \t\t\tmlx5_glue->dr_create_flow_action_default_miss();\n \tif (!sh->default_miss_action)\n \t\tDRV_LOG(WARNING, \"Default miss action is not supported.\");\n+\tLIST_INIT(&sh->shared_rxqs);\n \treturn 0;\n error:\n \t/* Rollback the created objects. */\n@@ -531,6 +532,7 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv)\n \tMLX5_ASSERT(sh && sh->refcnt);\n \tif (sh->refcnt > 1)\n \t\treturn;\n+\tMLX5_ASSERT(LIST_EMPTY(&sh->shared_rxqs));\n #ifdef HAVE_MLX5DV_DR\n \tif (sh->rx_domain) {\n \t\tmlx5_glue->dr_destroy_domain(sh->rx_domain);\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 6a9c99a8826..c671c8a354f 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -1193,6 +1193,7 @@ struct mlx5_dev_ctx_shared {\n \tstruct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX];\n \t/* Flex parser profiles information. */\n \tvoid *devx_rx_uar; /* DevX UAR for Rx. */\n+\tLIST_HEAD(shared_rxqs, mlx5_rxq_ctrl) shared_rxqs; /* Shared RXQs. */\n \tstruct mlx5_aso_age_mng *aso_age_mng;\n \t/* Management data for aging mechanism using ASO Flow Hit. */\n \tstruct mlx5_geneve_tlv_option_resource *geneve_tlv_option_resource;\n@@ -1257,6 +1258,7 @@ struct mlx5_rxq_obj {\n \t\t};\n \t\tstruct mlx5_devx_obj *rq; /* DevX RQ object for hairpin. */\n \t\tstruct {\n+\t\t\tstruct mlx5_devx_rmp devx_rmp; /* RMP for shared RQ. */\n \t\t\tstruct mlx5_devx_cq cq_obj; /* DevX CQ object. */\n \t\t\tvoid *devx_channel;\n \t\t};\ndiff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c\nindex 371ff387c99..01561639038 100644\n--- a/drivers/net/mlx5/mlx5_devx.c\n+++ b/drivers/net/mlx5/mlx5_devx.c\n@@ -170,6 +170,8 @@ mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq)\n \t\tmemset(&rxq->devx_rq, 0, sizeof(rxq->devx_rq));\n \t\tmlx5_devx_cq_destroy(&rxq_obj->cq_obj);\n \t\tmemset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));\n+\t\tif (!RXQ_CTRL_LAST(rxq))\n+\t\t\treturn;\n \t\tif (rxq_obj->devx_channel) {\n \t\t\tmlx5_os_devx_destroy_event_channel\n \t\t\t\t\t\t\t(rxq_obj->devx_channel);\n@@ -270,6 +272,8 @@ mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)\n \trq_attr.wq_attr.pd = priv->sh->pdn;\n \trq_attr.counter_set_id = priv->counter_set_id;\n \t/* Create RQ using DevX API. */\n+\tif (rxq_data->shared)\n+\t\trxq->devx_rq.rmp = &rxq_ctrl->obj->devx_rmp;\n \treturn mlx5_devx_rq_create(priv->sh->ctx, &rxq->devx_rq,\n \t\t\t\t   wqe_size, log_desc_n, &rq_attr,\n \t\t\t\t   rxq_ctrl->socket);\n@@ -495,7 +499,10 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)\n \tret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);\n \tif (ret)\n \t\tgoto error;\n-\trxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;\n+\tif (rxq_data->shared)\n+\t\trxq_data->wqes = (void *)(uintptr_t)tmpl->devx_rmp.wq.umem_buf;\n+\telse\n+\t\trxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;\n \trxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.db_rec;\n \tmlx5_rxq_initialize(rxq_data);\n \tpriv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED;\ndiff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h\nindex 161399c764d..a83fa6e8db1 100644\n--- a/drivers/net/mlx5/mlx5_rx.h\n+++ b/drivers/net/mlx5/mlx5_rx.h\n@@ -26,6 +26,9 @@\n #define RXQ_PORT(rxq_ctrl) LIST_FIRST(&(rxq_ctrl)->owners)->priv\n #define RXQ_DEV(rxq_ctrl) ETH_DEV(RXQ_PORT(rxq_ctrl))\n #define RXQ_PORT_ID(rxq_ctrl) PORT_ID(RXQ_PORT(rxq_ctrl))\n+#define RXQ_CTRL_LAST(rxq) \\\n+\t(LIST_FIRST(&(rxq)->ctrl->owners) == (rxq) && \\\n+\tLIST_NEXT((rxq), owner_entry) == NULL)\n \n struct mlx5_rxq_stats {\n #ifdef MLX5_PMD_SOFT_COUNTERS\n@@ -107,6 +110,7 @@ struct mlx5_rxq_data {\n \tunsigned int lro:1; /* Enable LRO. */\n \tunsigned int dynf_meta:1; /* Dynamic metadata is configured. */\n \tunsigned int mcqe_format:3; /* CQE compression format. */\n+\tunsigned int shared:1; /* Shared RXQ. */\n \tvolatile uint32_t *rq_db;\n \tvolatile uint32_t *cq_db;\n \tuint16_t port_id;\n@@ -169,6 +173,9 @@ struct mlx5_rxq_ctrl {\n \tstruct mlx5_dev_ctx_shared *sh; /* Shared context. */\n \tenum mlx5_rxq_type type; /* Rxq type. */\n \tunsigned int socket; /* CPU socket ID for allocations. */\n+\tLIST_ENTRY(mlx5_rxq_ctrl) share_entry; /* Entry in shared RXQ list. */\n+\tuint32_t share_group; /* Group ID of shared RXQ. */\n+\tunsigned int started:1; /* Whether (shared) RXQ has been started. */\n \tunsigned int irq:1; /* Whether IRQ is enabled. */\n \tuint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */\n \tuint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex cde01a48022..45f78ad076b 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -28,6 +28,7 @@\n #include \"mlx5_rx.h\"\n #include \"mlx5_utils.h\"\n #include \"mlx5_autoconf.h\"\n+#include \"mlx5_devx.h\"\n \n \n /* Default RSS hash key also used for ConnectX-3. */\n@@ -352,6 +353,9 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)\n \t\toffloads |= DEV_RX_OFFLOAD_VLAN_STRIP;\n \tif (MLX5_LRO_SUPPORTED(dev))\n \t\toffloads |= DEV_RX_OFFLOAD_TCP_LRO;\n+\tif (priv->config.hca_attr.mem_rq_rmp &&\n+\t    priv->obj_ops.rxq_obj_new == devx_obj_ops.rxq_obj_new)\n+\t\toffloads |= RTE_ETH_RX_OFFLOAD_SHARED_RXQ;\n \treturn offloads;\n }\n \n@@ -648,6 +652,114 @@ mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)\n \treturn 0;\n }\n \n+/**\n+ * Get the shared Rx queue object that matches group and queue index.\n+ *\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param group\n+ *   Shared RXQ group.\n+ * @param idx\n+ *   RX queue index.\n+ *\n+ * @return\n+ *   Shared RXQ object that matching, or NULL if not found.\n+ */\n+static struct mlx5_rxq_ctrl *\n+mlx5_shared_rxq_get(struct rte_eth_dev *dev, uint32_t group, uint16_t idx)\n+{\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tLIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {\n+\t\tif (rxq_ctrl->share_group == group && rxq_ctrl->rxq.idx == idx)\n+\t\t\treturn rxq_ctrl;\n+\t}\n+\treturn NULL;\n+}\n+\n+/**\n+ * Check whether requested Rx queue configuration matches shared RXQ.\n+ *\n+ * @param rxq_ctrl\n+ *   Pointer to shared RXQ.\n+ * @param dev\n+ *   Pointer to Ethernet device structure.\n+ * @param idx\n+ *   Queue index.\n+ * @param desc\n+ *   Number of descriptors to configure in queue.\n+ * @param socket\n+ *   NUMA socket on which memory must be allocated.\n+ * @param[in] conf\n+ *   Thresholds parameters.\n+ * @param mp\n+ *   Memory pool for buffer allocations.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static bool\n+mlx5_shared_rxq_match(struct mlx5_rxq_ctrl *rxq_ctrl, struct rte_eth_dev *dev,\n+\t\t      uint16_t idx, uint16_t desc, unsigned int socket,\n+\t\t      const struct rte_eth_rxconf *conf,\n+\t\t      struct rte_mempool *mp)\n+{\n+\tstruct mlx5_priv *spriv = LIST_FIRST(&rxq_ctrl->owners)->priv;\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tunsigned int mprq_stride_nums = priv->config.mprq.stride_num_n ?\n+\t\tpriv->config.mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;\n+\n+\tRTE_SET_USED(conf);\n+\tif (rxq_ctrl->socket != socket) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: socket mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t}\n+\tif (priv->config.mprq.enabled)\n+\t\tdesc >>= mprq_stride_nums;\n+\tif (rxq_ctrl->rxq.elts_n != log2above(desc)) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: descriptor number mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t}\n+\tif (priv->mtu != spriv->mtu) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: mtu mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t}\n+\tif (priv->dev_data->dev_conf.intr_conf.rxq !=\n+\t    spriv->dev_data->dev_conf.intr_conf.rxq) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: interrupt mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t}\n+\tif (!spriv->config.mprq.enabled && rxq_ctrl->rxq.mp != mp) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: mempool mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t}\n+\tif (priv->config.hw_padding != spriv->config.hw_padding) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: padding mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t}\n+\tif (memcmp(&priv->config.mprq, &spriv->config.mprq,\n+\t\t   sizeof(priv->config.mprq)) != 0) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: MPRQ mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t}\n+\tif (priv->config.cqe_comp != spriv->config.cqe_comp ||\n+\t    (priv->config.cqe_comp &&\n+\t     priv->config.cqe_comp_fmt != spriv->config.cqe_comp_fmt)) {\n+\t\tDRV_LOG(ERR, \"port %u queue index %u failed to join shared group: CQE compression mismatch\",\n+\t\t\tdev->data->port_id, idx);\n+\t\treturn false;\n+\t}\n+\treturn true;\n+}\n+\n /**\n  *\n  * @param dev\n@@ -673,12 +785,14 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n {\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_rxq_priv *rxq;\n-\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = NULL;\n \tstruct rte_eth_rxseg_split *rx_seg =\n \t\t\t\t(struct rte_eth_rxseg_split *)conf->rx_seg;\n \tstruct rte_eth_rxseg_split rx_single = {.mp = mp};\n \tuint16_t n_seg = conf->rx_nseg;\n \tint res;\n+\tuint64_t offloads = conf->offloads |\n+\t\t\t    dev->data->dev_conf.rxmode.offloads;\n \n \tif (mp) {\n \t\t/*\n@@ -690,9 +804,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t\tn_seg = 1;\n \t}\n \tif (n_seg > 1) {\n-\t\tuint64_t offloads = conf->offloads |\n-\t\t\t\t    dev->data->dev_conf.rxmode.offloads;\n-\n \t\t/* The offloads should be checked on rte_eth_dev layer. */\n \t\tMLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);\n \t\tif (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {\n@@ -704,9 +815,32 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t\t}\n \t\tMLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);\n \t}\n+\tif (offloads & RTE_ETH_RX_OFFLOAD_SHARED_RXQ) {\n+\t\tif (!priv->config.hca_attr.mem_rq_rmp) {\n+\t\t\tDRV_LOG(ERR, \"port %u queue index %u shared Rx queue not supported by fw\",\n+\t\t\t\t     dev->data->port_id, idx);\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\tif (priv->obj_ops.rxq_obj_new != devx_obj_ops.rxq_obj_new) {\n+\t\t\tDRV_LOG(ERR, \"port %u queue index %u shared Rx queue needs DevX api\",\n+\t\t\t\t     dev->data->port_id, idx);\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\t/* Try to reuse shared RXQ. */\n+\t\trxq_ctrl = mlx5_shared_rxq_get(dev, conf->shared_group, idx);\n+\t\tif (rxq_ctrl != NULL &&\n+\t\t    !mlx5_shared_rxq_match(rxq_ctrl, dev, idx, desc, socket,\n+\t\t\t\t\t   conf, mp)) {\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t}\n \tres = mlx5_rx_queue_pre_setup(dev, idx, &desc);\n \tif (res)\n \t\treturn res;\n+\t/* Allocate RXQ. */\n \trxq = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sizeof(*rxq), 0,\n \t\t\t  SOCKET_ID_ANY);\n \tif (!rxq) {\n@@ -718,14 +852,22 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \trxq->priv = priv;\n \trxq->idx = idx;\n \t(*priv->rxq_privs)[idx] = rxq;\n-\trxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg, n_seg);\n-\tif (!rxq_ctrl) {\n-\t\tDRV_LOG(ERR, \"port %u unable to allocate rx queue index %u\",\n-\t\t\tdev->data->port_id, idx);\n-\t\tmlx5_free(rxq);\n-\t\t(*priv->rxq_privs)[idx] = NULL;\n-\t\trte_errno = ENOMEM;\n-\t\treturn -rte_errno;\n+\tif (rxq_ctrl != NULL) {\n+\t\t/* Join owner list of shared RXQ. */\n+\t\tLIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);\n+\t\trxq->ctrl = rxq_ctrl;\n+\t} else {\n+\t\t/* Create new shared RXQ. */\n+\t\trxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg,\n+\t\t\t\t\tn_seg);\n+\t\tif (rxq_ctrl == NULL) {\n+\t\t\tDRV_LOG(ERR, \"port %u unable to allocate rx queue index %u\",\n+\t\t\t\tdev->data->port_id, idx);\n+\t\t\tmlx5_free(rxq);\n+\t\t\t(*priv->rxq_privs)[idx] = NULL;\n+\t\t\trte_errno = ENOMEM;\n+\t\t\treturn -rte_errno;\n+\t\t}\n \t}\n \tDRV_LOG(DEBUG, \"port %u adding Rx queue %u to list\",\n \t\tdev->data->port_id, idx);\n@@ -1071,6 +1213,9 @@ mlx5_rxq_obj_verify(struct rte_eth_dev *dev)\n \tstruct mlx5_rxq_obj *rxq_obj;\n \n \tLIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {\n+\t\tif (rxq_obj->rxq_ctrl->rxq.shared &&\n+\t\t    !LIST_EMPTY(&rxq_obj->rxq_ctrl->owners))\n+\t\t\tcontinue;\n \t\tDRV_LOG(DEBUG, \"port %u Rx queue %u still referenced\",\n \t\t\tdev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);\n \t\t++ret;\n@@ -1348,6 +1493,10 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,\n \t\treturn NULL;\n \t}\n \tLIST_INIT(&tmpl->owners);\n+\tif (offloads & RTE_ETH_RX_OFFLOAD_SHARED_RXQ) {\n+\t\ttmpl->rxq.shared = 1;\n+\t\tLIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);\n+\t}\n \trxq->ctrl = tmpl;\n \tLIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);\n \tMLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);\n@@ -1771,6 +1920,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tstruct mlx5_rxq_priv *rxq;\n \tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\tbool free_ctrl;\n \n \tif (priv->rxq_privs == NULL)\n \t\treturn 0;\n@@ -1780,24 +1930,36 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)\n \tif (mlx5_rxq_deref(dev, idx) > 1)\n \t\treturn 1;\n \trxq_ctrl = rxq->ctrl;\n-\tif (rxq_ctrl->obj != NULL) {\n+\t/* If the last entry in share RXQ. */\n+\tfree_ctrl = RXQ_CTRL_LAST(rxq);\n+\tif (rxq->devx_rq.rq != NULL)\n \t\tpriv->obj_ops.rxq_obj_release(rxq);\n-\t\tLIST_REMOVE(rxq_ctrl->obj, next);\n-\t\tmlx5_free(rxq_ctrl->obj);\n-\t\trxq_ctrl->obj = NULL;\n+\tif (free_ctrl) {\n+\t\tif (rxq_ctrl->obj != NULL) {\n+\t\t\tLIST_REMOVE(rxq_ctrl->obj, next);\n+\t\t\tmlx5_free(rxq_ctrl->obj);\n+\t\t\trxq_ctrl->obj = NULL;\n+\t\t}\n+\t\trxq_ctrl->started = false;\n \t}\n \tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {\n-\t\trxq_free_elts(rxq_ctrl);\n+\t\tif (free_ctrl)\n+\t\t\trxq_free_elts(rxq_ctrl);\n \t\tdev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;\n \t}\n \tif (!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED)) {\n-\t\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {\n-\t\t\tmlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);\n-\t\t\tmlx5_mprq_free_mp(dev, rxq_ctrl);\n-\t\t}\n \t\tLIST_REMOVE(rxq, owner_entry);\n-\t\tLIST_REMOVE(rxq_ctrl, next);\n-\t\tmlx5_free(rxq_ctrl);\n+\t\tif (free_ctrl) {\n+\t\t\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {\n+\t\t\t\tmlx5_mr_btree_free\n+\t\t\t\t\t(&rxq_ctrl->rxq.mr_ctrl.cache_bh);\n+\t\t\t\tmlx5_mprq_free_mp(dev, rxq_ctrl);\n+\t\t\t}\n+\t\t\tif (rxq_ctrl->rxq.shared)\n+\t\t\t\tLIST_REMOVE(rxq_ctrl, share_entry);\n+\t\t\tLIST_REMOVE(rxq_ctrl, next);\n+\t\t\tmlx5_free(rxq_ctrl);\n+\t\t}\n \t\tdev->data->rx_queues[idx] = NULL;\n \t\tmlx5_free(rxq);\n \t\t(*priv->rxq_privs)[idx] = NULL;\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex 1e865e74e39..2fd8c70cce5 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -122,6 +122,46 @@ mlx5_rxq_stop(struct rte_eth_dev *dev)\n \t\tmlx5_rxq_release(dev, i);\n }\n \n+static int\n+mlx5_rxq_ctrl_prepare(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl,\n+\t\t      unsigned int idx)\n+{\n+\tint ret = 0;\n+\n+\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {\n+\t\tif (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {\n+\t\t\t/* Allocate/reuse/resize mempool for MPRQ. */\n+\t\t\tif (mlx5_mprq_alloc_mp(dev, rxq_ctrl) < 0)\n+\t\t\t\treturn -rte_errno;\n+\n+\t\t\t/* Pre-register Rx mempools. */\n+\t\t\tmlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,\n+\t\t\t\t\t  rxq_ctrl->rxq.mprq_mp);\n+\t\t} else {\n+\t\t\tuint32_t s;\n+\t\t\tfor (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)\n+\t\t\t\tmlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,\n+\t\t\t\t\t\t  rxq_ctrl->rxq.rxseg[s].mp);\n+\t\t}\n+\t\tret = rxq_alloc_elts(rxq_ctrl);\n+\t\tif (ret)\n+\t\t\treturn ret;\n+\t}\n+\tMLX5_ASSERT(!rxq_ctrl->obj);\n+\trxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,\n+\t\t\t\t    sizeof(*rxq_ctrl->obj), 0,\n+\t\t\t\t    rxq_ctrl->socket);\n+\tif (!rxq_ctrl->obj) {\n+\t\tDRV_LOG(ERR, \"Port %u Rx queue %u can't allocate resources.\",\n+\t\t\tdev->data->port_id, idx);\n+\t\trte_errno = ENOMEM;\n+\t\treturn -rte_errno;\n+\t}\n+\tDRV_LOG(DEBUG, \"Port %u rxq %u updated with %p.\", dev->data->port_id,\n+\t\tidx, (void *)&rxq_ctrl->obj);\n+\treturn 0;\n+}\n+\n /**\n  * Start traffic on Rx queues.\n  *\n@@ -149,45 +189,17 @@ mlx5_rxq_start(struct rte_eth_dev *dev)\n \t\tif (rxq == NULL)\n \t\t\tcontinue;\n \t\trxq_ctrl = rxq->ctrl;\n-\t\tif (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {\n-\t\t\tif (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {\n-\t\t\t\t/* Allocate/reuse/resize mempool for MPRQ. */\n-\t\t\t\tif (mlx5_mprq_alloc_mp(dev, rxq_ctrl) < 0)\n-\t\t\t\t\tgoto error;\n-\t\t\t\t/* Pre-register Rx mempools. */\n-\t\t\t\tmlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,\n-\t\t\t\t\t\t  rxq_ctrl->rxq.mprq_mp);\n-\t\t\t} else {\n-\t\t\t\tuint32_t s;\n-\n-\t\t\t\tfor (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)\n-\t\t\t\t\tmlx5_mr_update_mp\n-\t\t\t\t\t\t(dev, &rxq_ctrl->rxq.mr_ctrl,\n-\t\t\t\t\t\trxq_ctrl->rxq.rxseg[s].mp);\n-\t\t\t}\n-\t\t\tret = rxq_alloc_elts(rxq_ctrl);\n-\t\t\tif (ret)\n+\t\tif (!rxq_ctrl->started) {\n+\t\t\tif (mlx5_rxq_ctrl_prepare(dev, rxq_ctrl, i) < 0)\n \t\t\t\tgoto error;\n-\t\t}\n-\t\tMLX5_ASSERT(!rxq_ctrl->obj);\n-\t\trxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,\n-\t\t\t\t\t    sizeof(*rxq_ctrl->obj), 0,\n-\t\t\t\t\t    rxq_ctrl->socket);\n-\t\tif (!rxq_ctrl->obj) {\n-\t\t\tDRV_LOG(ERR,\n-\t\t\t\t\"Port %u Rx queue %u can't allocate resources.\",\n-\t\t\t\tdev->data->port_id, i);\n-\t\t\trte_errno = ENOMEM;\n-\t\t\tgoto error;\n+\t\t\tLIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);\n+\t\t\trxq_ctrl->started = true;\n \t\t}\n \t\tret = priv->obj_ops.rxq_obj_new(rxq);\n \t\tif (ret) {\n \t\t\tmlx5_free(rxq_ctrl->obj);\n \t\t\tgoto error;\n \t\t}\n-\t\tDRV_LOG(DEBUG, \"Port %u rxq %u updated with %p.\",\n-\t\t\tdev->data->port_id, i, (void *)&rxq_ctrl->obj);\n-\t\tLIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);\n \t}\n \treturn 0;\n error:\n",
    "prefixes": [
        "11/11"
    ]
}