get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/102107/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 102107,
    "url": "http://patchwork.dpdk.org/api/patches/102107/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20211018224353.3362537-4-dkozlyuk@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211018224353.3362537-4-dkozlyuk@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211018224353.3362537-4-dkozlyuk@nvidia.com",
    "date": "2021-10-18T22:43:52",
    "name": "[v9,3/4] common/mlx5: add mempool registration facilities",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "c8969f95d678ce1c8fb9c2cc73981c31e450648f",
    "submitter": {
        "id": 2367,
        "url": "http://patchwork.dpdk.org/api/people/2367/?format=api",
        "name": "Dmitry Kozlyuk",
        "email": "dkozlyuk@oss.nvidia.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20211018224353.3362537-4-dkozlyuk@nvidia.com/mbox/",
    "series": [
        {
            "id": 19761,
            "url": "http://patchwork.dpdk.org/api/series/19761/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=19761",
            "date": "2021-10-18T22:43:49",
            "name": "net/mlx5: implicit mempool registration",
            "version": 9,
            "mbox": "http://patchwork.dpdk.org/series/19761/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/102107/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/102107/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 01720A0C45;\n\tTue, 19 Oct 2021 00:44:36 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 8052941101;\n\tTue, 19 Oct 2021 00:44:24 +0200 (CEST)",
            "from AZHDRRW-EX01.nvidia.com (azhdrrw-ex01.nvidia.com\n [20.51.104.162]) by mails.dpdk.org (Postfix) with ESMTP id 161E3410F8\n for <dev@dpdk.org>; Tue, 19 Oct 2021 00:44:20 +0200 (CEST)",
            "from NAM12-BN8-obe.outbound.protection.outlook.com (104.47.55.172)\n by mxs.oss.nvidia.com (10.13.234.36) with Microsoft SMTP Server\n (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.2.858.15; Mon, 18 Oct 2021 15:44:19 -0700",
            "from DM3PR08CA0004.namprd08.prod.outlook.com (2603:10b6:0:52::14) by\n BYAPR12MB3175.namprd12.prod.outlook.com (2603:10b6:a03:13a::11) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4608.18; Mon, 18 Oct\n 2021 22:44:15 +0000",
            "from DM6NAM11FT056.eop-nam11.prod.protection.outlook.com\n (2603:10b6:0:52:cafe::6e) by DM3PR08CA0004.outlook.office365.com\n (2603:10b6:0:52::14) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4608.15 via Frontend\n Transport; Mon, 18 Oct 2021 22:44:15 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n DM6NAM11FT056.mail.protection.outlook.com (10.13.173.99) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4608.15 via Frontend Transport; Mon, 18 Oct 2021 22:44:15 +0000",
            "from nvidia.com (172.20.187.5) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Mon, 18 Oct\n 2021 22:44:12 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=lhtkkMBT+Q2fnmP7Iw1Q6p3hnGJ6YI2Q+xSytt37hFe047Ka8D0T45lO49B1EEsgcP1XBN1fWq8f+uj/Nu1rs0kBcsRurzq9xyEymPDWNsZDaiYHMbs737pT0pdAao5jvfbY972t0fjVH8XXxMs/at0Ksk2RHC0jepT4XDtP9yjJaKYmK97LodHhqdEEj014y7NO/B62OFdidqpIjDfaN4JlAV3y75OoLuWk8DrN6ztHB+Vc1FifziAzuup+fAjQ/cNPBBhRgUYxO5Jxinphnf9DO22+7Zn0zdZg8ocYgqis8eGWSBCojok25NW7oJez+Juj5zB03EaoIb8PYr1EDg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=0ZVl9WA6rVz9NCADmC3Uxqzh/ysKiH5eWJ3/klTNwNc=;\n b=cUZfzHpvuBzyIntfV8deJq/4TUa4dXLWjuIJ3CGqMawXTY6qk5MFVvG8UoRZrr34xYOIAoRUwsB1p1zopXlUJXiQgyHelk7DGFgF6E+6AzdYt5Zx58+8bVgXT2w0dcZltsuoz8O0kopymCMIDbL/YzDD+pNzNu6N51vd/QS6m5Mpu9BjYdSbYP46mTPXV8BTN4TPbNBfBIH/0VtqC2Z+R4KXR3E8ICd6aJJ/f86F23Czx9vENHMzC++7j8cBk1emHACA59bAFJOIuBvrrWRackR1IeGjERHP6K0sM2XNpYRu8s+TvaQSznuK3APMJ3QbhzkoFpAGQxH2rouw/kvmWw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=intel.com smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=quarantine pct=100) action=none\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=0ZVl9WA6rVz9NCADmC3Uxqzh/ysKiH5eWJ3/klTNwNc=;\n b=XNnhJz7fz+/xvy78JIp+E6kunDE6kfOoush2Kd/wNrsriHLNkl1KcMO7ItQ2Z+rUP0C4PMeveo1XwKKhMz3vONZD/8MztuNjg/1ch70wfIeq6OyB7wpKNmN6sUZjixsJhD4E4npSDJ+VuijdPDlMJ/IWGs33A2Wdpl9UTRfoYpwGnJAeLZJIei7+w6X9XH8NNTflIdMX618HKnqr+sTRiXVK/QwwLXWE2Zgg86sogTyZi/LEL/587de8IQfiHuGMKTfQ6a+9RMqcxg9cu1sX7cgOPhIA77hghgPyfbIFRODhEUj/QIHWAMqJLnKM+f2xwpa9PJ0z5gfMWh+ZZxf+DA==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; intel.com; dkim=none (message not signed)\n header.d=none;intel.com; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Dmitry Kozlyuk <dkozlyuk@oss.nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Matan Azrad <matan@oss.nvidia.com>, Viacheslav Ovsiienko\n <viacheslavo@oss.nvidia.com>, Ray Kinsella <mdr@ashroe.eu>, Anatoly Burakov\n <anatoly.burakov@intel.com>",
        "Date": "Tue, 19 Oct 2021 01:43:52 +0300",
        "Message-ID": "<20211018224353.3362537-4-dkozlyuk@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20211018224353.3362537-1-dkozlyuk@nvidia.com>",
        "References": "<20211018144059.3303406-1-dkozlyuk@nvidia.com>\n <20211018224353.3362537-1-dkozlyuk@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL111.nvidia.com (172.20.187.18) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "43eb1d36-007a-400b-5336-08d99288d032",
        "X-MS-TrafficTypeDiagnostic": "BYAPR12MB3175:",
        "X-Microsoft-Antispam-PRVS": "\n <BYAPR12MB3175692EE886A6C49E6280F8B9BC9@BYAPR12MB3175.namprd12.prod.outlook.com>",
        "X-MS-Exchange-Transport-Forked": "True",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:34;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n NIa9PUGBEk2srZllkxFI9v/ePgMjJDQFzRuqSvFurzrD9ECq6wgHYDvVPUQHaZTdNreFEu5B1k6g5pUeeriLyR1Mte9pMF0Q/U1eipq2G/q1nJri4BmyCDvc1FCXWalAVOZyPkF6yMI6zqjd2DbIUWxGg9c1bWJSVddMSuwF+2tBGsjEtRwryl/8ZOTcfcwXxXLTpwoxBpbcFXB1x9C1xDD+w5d/e0ZZDf4WV4yXeETKDw1rFfE2MV0xKy4itYLOwoUt8b/D3OnLq/0IW1mqgANFOA4O7bEftEpoczsT8gx6EPdgKEw1ub1ORsRb9OZiSCbF5KNIszZ/tmTvr2QrsZ1c9zxaggRipVcNJcIHl7LT4JYe48v8cJ9i8gJMiJZaLgTbwBYOBpzAnioQjiVD0dd+5Km+e4xhlUiEFTmIfN41EWcy0dLW3EDr4Kn2QW0Zb4o9Vdv/oJevSn6oiG2pZVBPjqVF9/+5GODanpVZml2RtTkX9uiYQDJc+EtE/Asf3N2aREpHLbkMJ/MPDwBQmzr1dHvz2pCHXIkwwU25bfZVLnSbSQKxlHSCbNE2TiLU6nkDOaPnyjarQlZyGmm/G/H49s1QcJ1cJ/IpTYgmHVPQJTBy7fScKwI41A9CtZt0u1zr7VdDOoVbEs0E7Dh44S2KKFPbP66wcP4M2XeWDwfXI+bGqNtMI4SJGIXsqfCfeeAQrvHrln5kxp2+VSKa1Q==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(8936002)(5660300002)(83380400001)(4326008)(356005)(107886003)(36860700001)(336012)(55016002)(7696005)(8676002)(36906005)(47076005)(186003)(86362001)(6916009)(36756003)(6666004)(6286002)(7636003)(316002)(70206006)(1076003)(70586007)(26005)(82310400003)(30864003)(2616005)(426003)(54906003)(2906002)(508600001)(16526019);\n DIR:OUT; SFP:1101;",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "18 Oct 2021 22:44:15.1904 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 43eb1d36-007a-400b-5336-08d99288d032",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT056.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BYAPR12MB3175",
        "Subject": "[dpdk-dev] [PATCH v9 3/4] common/mlx5: add mempool registration\n facilities",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add internal API to register mempools, that is, to create memory\nregions (MR) for their memory and store them in a separate database.\nImplementation deals with multi-process, so that class drivers don't\nneed to. Each protection domain has its own database. Memory regions\ncan be shared within a database if they represent a single hugepage\ncovering one or more mempools entirely.\n\nAdd internal API to lookup an MR key for an address that belongs\nto a known mempool. It is a responsibility of a class driver\nto extract the mempool from an mbuf.\n\nSigned-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/common/mlx5/mlx5_common_mp.c |  50 +++\n drivers/common/mlx5/mlx5_common_mp.h |  14 +\n drivers/common/mlx5/mlx5_common_mr.c | 580 +++++++++++++++++++++++++++\n drivers/common/mlx5/mlx5_common_mr.h |  17 +\n drivers/common/mlx5/version.map      |   5 +\n 5 files changed, 666 insertions(+)",
    "diff": "diff --git a/drivers/common/mlx5/mlx5_common_mp.c b/drivers/common/mlx5/mlx5_common_mp.c\nindex 673a7c31de..6dfc5535e0 100644\n--- a/drivers/common/mlx5/mlx5_common_mp.c\n+++ b/drivers/common/mlx5/mlx5_common_mp.c\n@@ -54,6 +54,56 @@ mlx5_mp_req_mr_create(struct mlx5_mp_id *mp_id, uintptr_t addr)\n \treturn ret;\n }\n \n+/**\n+ * @param mp_id\n+ *   ID of the MP process.\n+ * @param share_cache\n+ *   Shared MR cache.\n+ * @param pd\n+ *   Protection domain.\n+ * @param mempool\n+ *   Mempool to register or unregister.\n+ * @param reg\n+ *   True to register the mempool, False to unregister.\n+ */\n+int\n+mlx5_mp_req_mempool_reg(struct mlx5_mp_id *mp_id,\n+\t\t\tstruct mlx5_mr_share_cache *share_cache, void *pd,\n+\t\t\tstruct rte_mempool *mempool, bool reg)\n+{\n+\tstruct rte_mp_msg mp_req;\n+\tstruct rte_mp_msg *mp_res;\n+\tstruct rte_mp_reply mp_rep;\n+\tstruct mlx5_mp_param *req = (struct mlx5_mp_param *)mp_req.param;\n+\tstruct mlx5_mp_arg_mempool_reg *arg = &req->args.mempool_reg;\n+\tstruct mlx5_mp_param *res;\n+\tstruct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};\n+\tenum mlx5_mp_req_type type;\n+\tint ret;\n+\n+\tMLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);\n+\ttype = reg ? MLX5_MP_REQ_MEMPOOL_REGISTER :\n+\t\t     MLX5_MP_REQ_MEMPOOL_UNREGISTER;\n+\tmp_init_msg(mp_id, &mp_req, type);\n+\targ->share_cache = share_cache;\n+\targ->pd = pd;\n+\targ->mempool = mempool;\n+\tret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"port %u request to primary process failed\",\n+\t\t\tmp_id->port_id);\n+\t\treturn -rte_errno;\n+\t}\n+\tMLX5_ASSERT(mp_rep.nb_received == 1);\n+\tmp_res = &mp_rep.msgs[0];\n+\tres = (struct mlx5_mp_param *)mp_res->param;\n+\tret = res->result;\n+\tif (ret)\n+\t\trte_errno = -ret;\n+\tmlx5_free(mp_rep.msgs);\n+\treturn ret;\n+}\n+\n /**\n  * Request Verbs queue state modification to the primary process.\n  *\ndiff --git a/drivers/common/mlx5/mlx5_common_mp.h b/drivers/common/mlx5/mlx5_common_mp.h\nindex 6829141fc7..527bf3cad8 100644\n--- a/drivers/common/mlx5/mlx5_common_mp.h\n+++ b/drivers/common/mlx5/mlx5_common_mp.h\n@@ -14,6 +14,8 @@\n enum mlx5_mp_req_type {\n \tMLX5_MP_REQ_VERBS_CMD_FD = 1,\n \tMLX5_MP_REQ_CREATE_MR,\n+\tMLX5_MP_REQ_MEMPOOL_REGISTER,\n+\tMLX5_MP_REQ_MEMPOOL_UNREGISTER,\n \tMLX5_MP_REQ_START_RXTX,\n \tMLX5_MP_REQ_STOP_RXTX,\n \tMLX5_MP_REQ_QUEUE_STATE_MODIFY,\n@@ -33,6 +35,12 @@ struct mlx5_mp_arg_queue_id {\n \tuint16_t queue_id; /* DPDK queue ID. */\n };\n \n+struct mlx5_mp_arg_mempool_reg {\n+\tstruct mlx5_mr_share_cache *share_cache;\n+\tvoid *pd; /* NULL for MLX5_MP_REQ_MEMPOOL_UNREGISTER */\n+\tstruct rte_mempool *mempool;\n+};\n+\n /* Pameters for IPC. */\n struct mlx5_mp_param {\n \tenum mlx5_mp_req_type type;\n@@ -41,6 +49,8 @@ struct mlx5_mp_param {\n \tRTE_STD_C11\n \tunion {\n \t\tuintptr_t addr; /* MLX5_MP_REQ_CREATE_MR */\n+\t\tstruct mlx5_mp_arg_mempool_reg mempool_reg;\n+\t\t/* MLX5_MP_REQ_MEMPOOL_(UN)REGISTER */\n \t\tstruct mlx5_mp_arg_queue_state_modify state_modify;\n \t\t/* MLX5_MP_REQ_QUEUE_STATE_MODIFY */\n \t\tstruct mlx5_mp_arg_queue_id queue_id;\n@@ -91,6 +101,10 @@ void mlx5_mp_uninit_secondary(const char *name);\n __rte_internal\n int mlx5_mp_req_mr_create(struct mlx5_mp_id *mp_id, uintptr_t addr);\n __rte_internal\n+int mlx5_mp_req_mempool_reg(struct mlx5_mp_id *mp_id,\n+\t\t\tstruct mlx5_mr_share_cache *share_cache, void *pd,\n+\t\t\tstruct rte_mempool *mempool, bool reg);\n+__rte_internal\n int mlx5_mp_req_queue_state_modify(struct mlx5_mp_id *mp_id,\n \t\t\t\t   struct mlx5_mp_arg_queue_state_modify *sm);\n __rte_internal\ndiff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c\nindex 98fe8698e2..2e039a4e70 100644\n--- a/drivers/common/mlx5/mlx5_common_mr.c\n+++ b/drivers/common/mlx5/mlx5_common_mr.c\n@@ -2,7 +2,10 @@\n  * Copyright 2016 6WIND S.A.\n  * Copyright 2020 Mellanox Technologies, Ltd\n  */\n+#include <stddef.h>\n+\n #include <rte_eal_memconfig.h>\n+#include <rte_eal_paging.h>\n #include <rte_errno.h>\n #include <rte_mempool.h>\n #include <rte_malloc.h>\n@@ -21,6 +24,29 @@ struct mr_find_contig_memsegs_data {\n \tconst struct rte_memseg_list *msl;\n };\n \n+/* Virtual memory range. */\n+struct mlx5_range {\n+\tuintptr_t start;\n+\tuintptr_t end;\n+};\n+\n+/** Memory region for a mempool. */\n+struct mlx5_mempool_mr {\n+\tstruct mlx5_pmd_mr pmd_mr;\n+\tuint32_t refcnt; /**< Number of mempools sharing this MR. */\n+};\n+\n+/* Mempool registration. */\n+struct mlx5_mempool_reg {\n+\tLIST_ENTRY(mlx5_mempool_reg) next;\n+\t/** Registered mempool, used to designate registrations. */\n+\tstruct rte_mempool *mp;\n+\t/** Memory regions for the address ranges of the mempool. */\n+\tstruct mlx5_mempool_mr *mrs;\n+\t/** Number of memory regions. */\n+\tunsigned int mrs_n;\n+};\n+\n /**\n  * Expand B-tree table to a given size. Can't be called with holding\n  * memory_hotplug_lock or share_cache.rwlock due to rte_realloc().\n@@ -1191,3 +1217,557 @@ mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)\n \trte_rwlock_read_unlock(&share_cache->rwlock);\n #endif\n }\n+\n+static int\n+mlx5_range_compare_start(const void *lhs, const void *rhs)\n+{\n+\tconst struct mlx5_range *r1 = lhs, *r2 = rhs;\n+\n+\tif (r1->start > r2->start)\n+\t\treturn 1;\n+\telse if (r1->start < r2->start)\n+\t\treturn -1;\n+\treturn 0;\n+}\n+\n+static void\n+mlx5_range_from_mempool_chunk(struct rte_mempool *mp, void *opaque,\n+\t\t\t      struct rte_mempool_memhdr *memhdr,\n+\t\t\t      unsigned int idx)\n+{\n+\tstruct mlx5_range *ranges = opaque, *range = &ranges[idx];\n+\tuint64_t page_size = rte_mem_page_size();\n+\n+\tRTE_SET_USED(mp);\n+\trange->start = RTE_ALIGN_FLOOR((uintptr_t)memhdr->addr, page_size);\n+\trange->end = RTE_ALIGN_CEIL(range->start + memhdr->len, page_size);\n+}\n+\n+/**\n+ * Get VA-contiguous ranges of the mempool memory.\n+ * Each range start and end is aligned to the system page size.\n+ *\n+ * @param[in] mp\n+ *   Analyzed mempool.\n+ * @param[out] out\n+ *   Receives the ranges, caller must release it with free().\n+ * @param[out] ount_n\n+ *   Receives the number of @p out elements.\n+ *\n+ * @return\n+ *   0 on success, (-1) on failure.\n+ */\n+static int\n+mlx5_get_mempool_ranges(struct rte_mempool *mp, struct mlx5_range **out,\n+\t\t\tunsigned int *out_n)\n+{\n+\tstruct mlx5_range *chunks;\n+\tunsigned int chunks_n = mp->nb_mem_chunks, contig_n, i;\n+\n+\t/* Collect page-aligned memory ranges of the mempool. */\n+\tchunks = calloc(sizeof(chunks[0]), chunks_n);\n+\tif (chunks == NULL)\n+\t\treturn -1;\n+\trte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, chunks);\n+\t/* Merge adjacent chunks and place them at the beginning. */\n+\tqsort(chunks, chunks_n, sizeof(chunks[0]), mlx5_range_compare_start);\n+\tcontig_n = 1;\n+\tfor (i = 1; i < chunks_n; i++)\n+\t\tif (chunks[i - 1].end != chunks[i].start) {\n+\t\t\tchunks[contig_n - 1].end = chunks[i - 1].end;\n+\t\t\tchunks[contig_n] = chunks[i];\n+\t\t\tcontig_n++;\n+\t\t}\n+\t/* Extend the last contiguous chunk to the end of the mempool. */\n+\tchunks[contig_n - 1].end = chunks[i - 1].end;\n+\t*out = chunks;\n+\t*out_n = contig_n;\n+\treturn 0;\n+}\n+\n+/**\n+ * Analyze mempool memory to select memory ranges to register.\n+ *\n+ * @param[in] mp\n+ *   Mempool to analyze.\n+ * @param[out] out\n+ *   Receives memory ranges to register, aligned to the system page size.\n+ *   The caller must release them with free().\n+ * @param[out] out_n\n+ *   Receives the number of @p out items.\n+ * @param[out] share_hugepage\n+ *   Receives True if the entire pool resides within a single hugepage.\n+ *\n+ * @return\n+ *   0 on success, (-1) on failure.\n+ */\n+static int\n+mlx5_mempool_reg_analyze(struct rte_mempool *mp, struct mlx5_range **out,\n+\t\t\t unsigned int *out_n, bool *share_hugepage)\n+{\n+\tstruct mlx5_range *ranges = NULL;\n+\tunsigned int i, ranges_n = 0;\n+\tstruct rte_memseg_list *msl;\n+\n+\tif (mlx5_get_mempool_ranges(mp, &ranges, &ranges_n) < 0) {\n+\t\tDRV_LOG(ERR, \"Cannot get address ranges for mempool %s\",\n+\t\t\tmp->name);\n+\t\treturn -1;\n+\t}\n+\t/* Check if the hugepage of the pool can be shared. */\n+\t*share_hugepage = false;\n+\tmsl = rte_mem_virt2memseg_list((void *)ranges[0].start);\n+\tif (msl != NULL) {\n+\t\tuint64_t hugepage_sz = 0;\n+\n+\t\t/* Check that all ranges are on pages of the same size. */\n+\t\tfor (i = 0; i < ranges_n; i++) {\n+\t\t\tif (hugepage_sz != 0 && hugepage_sz != msl->page_sz)\n+\t\t\t\tbreak;\n+\t\t\thugepage_sz = msl->page_sz;\n+\t\t}\n+\t\tif (i == ranges_n) {\n+\t\t\t/*\n+\t\t\t * If the entire pool is within one hugepage,\n+\t\t\t * combine all ranges into one of the hugepage size.\n+\t\t\t */\n+\t\t\tuintptr_t reg_start = ranges[0].start;\n+\t\t\tuintptr_t reg_end = ranges[ranges_n - 1].end;\n+\t\t\tuintptr_t hugepage_start =\n+\t\t\t\tRTE_ALIGN_FLOOR(reg_start, hugepage_sz);\n+\t\t\tuintptr_t hugepage_end = hugepage_start + hugepage_sz;\n+\t\t\tif (reg_end < hugepage_end) {\n+\t\t\t\tranges[0].start = hugepage_start;\n+\t\t\t\tranges[0].end = hugepage_end;\n+\t\t\t\tranges_n = 1;\n+\t\t\t\t*share_hugepage = true;\n+\t\t\t}\n+\t\t}\n+\t}\n+\t*out = ranges;\n+\t*out_n = ranges_n;\n+\treturn 0;\n+}\n+\n+/** Create a registration object for the mempool. */\n+static struct mlx5_mempool_reg *\n+mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n)\n+{\n+\tstruct mlx5_mempool_reg *mpr = NULL;\n+\n+\tmpr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,\n+\t\t\t  sizeof(*mpr) + mrs_n * sizeof(mpr->mrs[0]),\n+\t\t\t  RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\tif (mpr == NULL) {\n+\t\tDRV_LOG(ERR, \"Cannot allocate mempool %s registration object\",\n+\t\t\tmp->name);\n+\t\treturn NULL;\n+\t}\n+\tmpr->mp = mp;\n+\tmpr->mrs = (struct mlx5_mempool_mr *)(mpr + 1);\n+\tmpr->mrs_n = mrs_n;\n+\treturn mpr;\n+}\n+\n+/**\n+ * Destroy a mempool registration object.\n+ *\n+ * @param standalone\n+ *   Whether @p mpr owns its MRs excludively, i.e. they are not shared.\n+ */\n+static void\n+mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t struct mlx5_mempool_reg *mpr, bool standalone)\n+{\n+\tif (standalone) {\n+\t\tunsigned int i;\n+\n+\t\tfor (i = 0; i < mpr->mrs_n; i++)\n+\t\t\tshare_cache->dereg_mr_cb(&mpr->mrs[i].pmd_mr);\n+\t}\n+\tmlx5_free(mpr);\n+}\n+\n+/** Find registration object of a mempool. */\n+static struct mlx5_mempool_reg *\n+mlx5_mempool_reg_lookup(struct mlx5_mr_share_cache *share_cache,\n+\t\t\tstruct rte_mempool *mp)\n+{\n+\tstruct mlx5_mempool_reg *mpr;\n+\n+\tLIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)\n+\t\tif (mpr->mp == mp)\n+\t\t\tbreak;\n+\treturn mpr;\n+}\n+\n+/** Increment reference counters of MRs used in the registration. */\n+static void\n+mlx5_mempool_reg_attach(struct mlx5_mempool_reg *mpr)\n+{\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < mpr->mrs_n; i++)\n+\t\t__atomic_add_fetch(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);\n+}\n+\n+/**\n+ * Decrement reference counters of MRs used in the registration.\n+ *\n+ * @return True if no more references to @p mpr MRs exist, False otherwise.\n+ */\n+static bool\n+mlx5_mempool_reg_detach(struct mlx5_mempool_reg *mpr)\n+{\n+\tunsigned int i;\n+\tbool ret = false;\n+\n+\tfor (i = 0; i < mpr->mrs_n; i++)\n+\t\tret |= __atomic_sub_fetch(&mpr->mrs[i].refcnt, 1,\n+\t\t\t\t\t  __ATOMIC_RELAXED) == 0;\n+\treturn ret;\n+}\n+\n+static int\n+mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t\t void *pd, struct rte_mempool *mp)\n+{\n+\tstruct mlx5_range *ranges = NULL;\n+\tstruct mlx5_mempool_reg *mpr, *new_mpr;\n+\tunsigned int i, ranges_n;\n+\tbool share_hugepage;\n+\tint ret = -1;\n+\n+\t/* Early check to avoid unnecessary creation of MRs. */\n+\trte_rwlock_read_lock(&share_cache->rwlock);\n+\tmpr = mlx5_mempool_reg_lookup(share_cache, mp);\n+\trte_rwlock_read_unlock(&share_cache->rwlock);\n+\tif (mpr != NULL) {\n+\t\tDRV_LOG(DEBUG, \"Mempool %s is already registered for PD %p\",\n+\t\t\tmp->name, pd);\n+\t\trte_errno = EEXIST;\n+\t\tgoto exit;\n+\t}\n+\tif (mlx5_mempool_reg_analyze(mp, &ranges, &ranges_n,\n+\t\t\t\t     &share_hugepage) < 0) {\n+\t\tDRV_LOG(ERR, \"Cannot get mempool %s memory ranges\", mp->name);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto exit;\n+\t}\n+\tnew_mpr = mlx5_mempool_reg_create(mp, ranges_n);\n+\tif (new_mpr == NULL) {\n+\t\tDRV_LOG(ERR,\n+\t\t\t\"Cannot create a registration object for mempool %s in PD %p\",\n+\t\t\tmp->name, pd);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto exit;\n+\t}\n+\t/*\n+\t * If the entire mempool fits in a single hugepage, the MR for this\n+\t * hugepage can be shared across mempools that also fit in it.\n+\t */\n+\tif (share_hugepage) {\n+\t\trte_rwlock_write_lock(&share_cache->rwlock);\n+\t\tLIST_FOREACH(mpr, &share_cache->mempool_reg_list, next) {\n+\t\t\tif (mpr->mrs[0].pmd_mr.addr == (void *)ranges[0].start)\n+\t\t\t\tbreak;\n+\t\t}\n+\t\tif (mpr != NULL) {\n+\t\t\tnew_mpr->mrs = mpr->mrs;\n+\t\t\tmlx5_mempool_reg_attach(new_mpr);\n+\t\t\tLIST_INSERT_HEAD(&share_cache->mempool_reg_list,\n+\t\t\t\t\t new_mpr, next);\n+\t\t}\n+\t\trte_rwlock_write_unlock(&share_cache->rwlock);\n+\t\tif (mpr != NULL) {\n+\t\t\tDRV_LOG(DEBUG, \"Shared MR %#x in PD %p for mempool %s with mempool %s\",\n+\t\t\t\tmpr->mrs[0].pmd_mr.lkey, pd, mp->name,\n+\t\t\t\tmpr->mp->name);\n+\t\t\tret = 0;\n+\t\t\tgoto exit;\n+\t\t}\n+\t}\n+\tfor (i = 0; i < ranges_n; i++) {\n+\t\tstruct mlx5_mempool_mr *mr = &new_mpr->mrs[i];\n+\t\tconst struct mlx5_range *range = &ranges[i];\n+\t\tsize_t len = range->end - range->start;\n+\n+\t\tif (share_cache->reg_mr_cb(pd, (void *)range->start, len,\n+\t\t    &mr->pmd_mr) < 0) {\n+\t\t\tDRV_LOG(ERR,\n+\t\t\t\t\"Failed to create an MR in PD %p for address range \"\n+\t\t\t\t\"[0x%\" PRIxPTR \", 0x%\" PRIxPTR \"] (%zu bytes) for mempool %s\",\n+\t\t\t\tpd, range->start, range->end, len, mp->name);\n+\t\t\tbreak;\n+\t\t}\n+\t\tDRV_LOG(DEBUG,\n+\t\t\t\"Created a new MR %#x in PD %p for address range \"\n+\t\t\t\"[0x%\" PRIxPTR \", 0x%\" PRIxPTR \"] (%zu bytes) for mempool %s\",\n+\t\t\tmr->pmd_mr.lkey, pd, range->start, range->end, len,\n+\t\t\tmp->name);\n+\t}\n+\tif (i != ranges_n) {\n+\t\tmlx5_mempool_reg_destroy(share_cache, new_mpr, true);\n+\t\trte_errno = EINVAL;\n+\t\tgoto exit;\n+\t}\n+\t/* Concurrent registration is not supposed to happen. */\n+\trte_rwlock_write_lock(&share_cache->rwlock);\n+\tmpr = mlx5_mempool_reg_lookup(share_cache, mp);\n+\tif (mpr == NULL) {\n+\t\tmlx5_mempool_reg_attach(new_mpr);\n+\t\tLIST_INSERT_HEAD(&share_cache->mempool_reg_list,\n+\t\t\t\t new_mpr, next);\n+\t\tret = 0;\n+\t}\n+\trte_rwlock_write_unlock(&share_cache->rwlock);\n+\tif (mpr != NULL) {\n+\t\tDRV_LOG(DEBUG, \"Mempool %s is already registered for PD %p\",\n+\t\t\tmp->name, pd);\n+\t\tmlx5_mempool_reg_destroy(share_cache, new_mpr, true);\n+\t\trte_errno = EEXIST;\n+\t\tgoto exit;\n+\t}\n+exit:\n+\tfree(ranges);\n+\treturn ret;\n+}\n+\n+static int\n+mlx5_mr_mempool_register_secondary(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t\t   void *pd, struct rte_mempool *mp,\n+\t\t\t\t   struct mlx5_mp_id *mp_id)\n+{\n+\tif (mp_id == NULL) {\n+\t\trte_errno = EINVAL;\n+\t\treturn -1;\n+\t}\n+\treturn mlx5_mp_req_mempool_reg(mp_id, share_cache, pd, mp, true);\n+}\n+\n+/**\n+ * Register the memory of a mempool in the protection domain.\n+ *\n+ * @param share_cache\n+ *   Shared MR cache of the protection domain.\n+ * @param pd\n+ *   Protection domain object.\n+ * @param mp\n+ *   Mempool to register.\n+ * @param mp_id\n+ *   Multi-process identifier, may be NULL for the primary process.\n+ *\n+ * @return\n+ *   0 on success, (-1) on failure and rte_errno is set.\n+ */\n+int\n+mlx5_mr_mempool_register(struct mlx5_mr_share_cache *share_cache, void *pd,\n+\t\t\t struct rte_mempool *mp, struct mlx5_mp_id *mp_id)\n+{\n+\tif (mp->flags & MEMPOOL_F_NON_IO)\n+\t\treturn 0;\n+\tswitch (rte_eal_process_type()) {\n+\tcase RTE_PROC_PRIMARY:\n+\t\treturn mlx5_mr_mempool_register_primary(share_cache, pd, mp);\n+\tcase RTE_PROC_SECONDARY:\n+\t\treturn mlx5_mr_mempool_register_secondary(share_cache, pd, mp,\n+\t\t\t\t\t\t\t  mp_id);\n+\tdefault:\n+\t\treturn -1;\n+\t}\n+}\n+\n+static int\n+mlx5_mr_mempool_unregister_primary(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t\t   struct rte_mempool *mp)\n+{\n+\tstruct mlx5_mempool_reg *mpr;\n+\tbool standalone = false;\n+\n+\trte_rwlock_write_lock(&share_cache->rwlock);\n+\tLIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)\n+\t\tif (mpr->mp == mp) {\n+\t\t\tLIST_REMOVE(mpr, next);\n+\t\t\tstandalone = mlx5_mempool_reg_detach(mpr);\n+\t\t\tif (standalone)\n+\t\t\t\t/*\n+\t\t\t\t * The unlock operation below provides a memory\n+\t\t\t\t * barrier due to its store-release semantics.\n+\t\t\t\t */\n+\t\t\t\t++share_cache->dev_gen;\n+\t\t\tbreak;\n+\t\t}\n+\trte_rwlock_write_unlock(&share_cache->rwlock);\n+\tif (mpr == NULL) {\n+\t\trte_errno = ENOENT;\n+\t\treturn -1;\n+\t}\n+\tmlx5_mempool_reg_destroy(share_cache, mpr, standalone);\n+\treturn 0;\n+}\n+\n+static int\n+mlx5_mr_mempool_unregister_secondary(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t\t     struct rte_mempool *mp,\n+\t\t\t\t     struct mlx5_mp_id *mp_id)\n+{\n+\tif (mp_id == NULL) {\n+\t\trte_errno = EINVAL;\n+\t\treturn -1;\n+\t}\n+\treturn mlx5_mp_req_mempool_reg(mp_id, share_cache, NULL, mp, false);\n+}\n+\n+/**\n+ * Unregister the memory of a mempool from the protection domain.\n+ *\n+ * @param share_cache\n+ *   Shared MR cache of the protection domain.\n+ * @param mp\n+ *   Mempool to unregister.\n+ * @param mp_id\n+ *   Multi-process identifier, may be NULL for the primary process.\n+ *\n+ * @return\n+ *   0 on success, (-1) on failure and rte_errno is set.\n+ */\n+int\n+mlx5_mr_mempool_unregister(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t   struct rte_mempool *mp, struct mlx5_mp_id *mp_id)\n+{\n+\tif (mp->flags & MEMPOOL_F_NON_IO)\n+\t\treturn 0;\n+\tswitch (rte_eal_process_type()) {\n+\tcase RTE_PROC_PRIMARY:\n+\t\treturn mlx5_mr_mempool_unregister_primary(share_cache, mp);\n+\tcase RTE_PROC_SECONDARY:\n+\t\treturn mlx5_mr_mempool_unregister_secondary(share_cache, mp,\n+\t\t\t\t\t\t\t    mp_id);\n+\tdefault:\n+\t\treturn -1;\n+\t}\n+}\n+\n+/**\n+ * Lookup a MR key by and address in a registered mempool.\n+ *\n+ * @param mpr\n+ *   Mempool registration object.\n+ * @param addr\n+ *   Address within the mempool.\n+ * @param entry\n+ *   Bottom-half cache entry to fill.\n+ *\n+ * @return\n+ *   MR key or UINT32_MAX on failure, which can only happen\n+ *   if the address is not from within the mempool.\n+ */\n+static uint32_t\n+mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg *mpr, uintptr_t addr,\n+\t\t\t struct mr_cache_entry *entry)\n+{\n+\tuint32_t lkey = UINT32_MAX;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < mpr->mrs_n; i++) {\n+\t\tconst struct mlx5_pmd_mr *mr = &mpr->mrs[i].pmd_mr;\n+\t\tuintptr_t mr_addr = (uintptr_t)mr->addr;\n+\n+\t\tif (mr_addr <= addr) {\n+\t\t\tlkey = rte_cpu_to_be_32(mr->lkey);\n+\t\t\tentry->start = mr_addr;\n+\t\t\tentry->end = mr_addr + mr->len;\n+\t\t\tentry->lkey = lkey;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\treturn lkey;\n+}\n+\n+/**\n+ * Update bottom-half cache from the list of mempool registrations.\n+ *\n+ * @param share_cache\n+ *   Pointer to a global shared MR cache.\n+ * @param mr_ctrl\n+ *   Per-queue MR control handle.\n+ * @param entry\n+ *   Pointer to an entry in the bottom-half cache to update\n+ *   with the MR lkey looked up.\n+ * @param mp\n+ *   Mempool containing the address.\n+ * @param addr\n+ *   Address to lookup.\n+ * @return\n+ *   MR lkey on success, UINT32_MAX on failure.\n+ */\n+static uint32_t\n+mlx5_lookup_mempool_regs(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t struct mlx5_mr_ctrl *mr_ctrl,\n+\t\t\t struct mr_cache_entry *entry,\n+\t\t\t struct rte_mempool *mp, uintptr_t addr)\n+{\n+\tstruct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;\n+\tstruct mlx5_mempool_reg *mpr;\n+\tuint32_t lkey = UINT32_MAX;\n+\n+\t/* If local cache table is full, try to double it. */\n+\tif (unlikely(bt->len == bt->size))\n+\t\tmr_btree_expand(bt, bt->size << 1);\n+\t/* Look up in mempool registrations. */\n+\trte_rwlock_read_lock(&share_cache->rwlock);\n+\tmpr = mlx5_mempool_reg_lookup(share_cache, mp);\n+\tif (mpr != NULL)\n+\t\tlkey = mlx5_mempool_reg_addr2mr(mpr, addr, entry);\n+\trte_rwlock_read_unlock(&share_cache->rwlock);\n+\t/*\n+\t * Update local cache. Even if it fails, return the found entry\n+\t * to update top-half cache. Next time, this entry will be found\n+\t * in the global cache.\n+\t */\n+\tif (lkey != UINT32_MAX)\n+\t\tmr_btree_insert(bt, entry);\n+\treturn lkey;\n+}\n+\n+/**\n+ * Bottom-half lookup for the address from the mempool.\n+ *\n+ * @param share_cache\n+ *   Pointer to a global shared MR cache.\n+ * @param mr_ctrl\n+ *   Per-queue MR control handle.\n+ * @param mp\n+ *   Mempool containing the address.\n+ * @param addr\n+ *   Address to lookup.\n+ * @return\n+ *   MR lkey on success, UINT32_MAX on failure.\n+ */\n+uint32_t\n+mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,\n+\t\t      struct mlx5_mr_ctrl *mr_ctrl,\n+\t\t      struct rte_mempool *mp, uintptr_t addr)\n+{\n+\tstruct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];\n+\tuint32_t lkey;\n+\tuint16_t bh_idx = 0;\n+\n+\t/* Binary-search MR translation table. */\n+\tlkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);\n+\t/* Update top-half cache. */\n+\tif (likely(lkey != UINT32_MAX)) {\n+\t\t*repl = (*mr_ctrl->cache_bh.table)[bh_idx];\n+\t} else {\n+\t\tlkey = mlx5_lookup_mempool_regs(share_cache, mr_ctrl, repl,\n+\t\t\t\t\t\tmp, addr);\n+\t\t/* Can only fail if the address is not from the mempool. */\n+\t\tif (unlikely(lkey == UINT32_MAX))\n+\t\t\treturn UINT32_MAX;\n+\t}\n+\t/* Update the most recently used entry. */\n+\tmr_ctrl->mru = mr_ctrl->head;\n+\t/* Point to the next victim, the oldest. */\n+\tmr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;\n+\treturn lkey;\n+}\ndiff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h\nindex 6e465a05e9..685ac98e08 100644\n--- a/drivers/common/mlx5/mlx5_common_mr.h\n+++ b/drivers/common/mlx5/mlx5_common_mr.h\n@@ -13,6 +13,7 @@\n \n #include <rte_rwlock.h>\n #include <rte_bitmap.h>\n+#include <rte_mbuf.h>\n #include <rte_memory.h>\n \n #include \"mlx5_glue.h\"\n@@ -75,6 +76,7 @@ struct mlx5_mr_ctrl {\n } __rte_packed;\n \n LIST_HEAD(mlx5_mr_list, mlx5_mr);\n+LIST_HEAD(mlx5_mempool_reg_list, mlx5_mempool_reg);\n \n /* Global per-device MR cache. */\n struct mlx5_mr_share_cache {\n@@ -83,6 +85,7 @@ struct mlx5_mr_share_cache {\n \tstruct mlx5_mr_btree cache; /* Global MR cache table. */\n \tstruct mlx5_mr_list mr_list; /* Registered MR list. */\n \tstruct mlx5_mr_list mr_free_list; /* Freed MR list. */\n+\tstruct mlx5_mempool_reg_list mempool_reg_list; /* Mempool database. */\n \tmlx5_reg_mr_t reg_mr_cb; /* Callback to reg_mr func */\n \tmlx5_dereg_mr_t dereg_mr_cb; /* Callback to dereg_mr func */\n } __rte_packed;\n@@ -136,6 +139,10 @@ uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,\n \t\t\t    struct mlx5_mr_ctrl *mr_ctrl,\n \t\t\t    uintptr_t addr, unsigned int mr_ext_memseg_en);\n __rte_internal\n+uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t       struct mlx5_mr_ctrl *mr_ctrl,\n+\t\t\t       struct rte_mempool *mp, uintptr_t addr);\n+__rte_internal\n void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);\n __rte_internal\n void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);\n@@ -179,4 +186,14 @@ mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr);\n __rte_internal\n void\n mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb);\n+\n+__rte_internal\n+int\n+mlx5_mr_mempool_register(struct mlx5_mr_share_cache *share_cache, void *pd,\n+\t\t\t struct rte_mempool *mp, struct mlx5_mp_id *mp_id);\n+__rte_internal\n+int\n+mlx5_mr_mempool_unregister(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t   struct rte_mempool *mp, struct mlx5_mp_id *mp_id);\n+\n #endif /* RTE_PMD_MLX5_COMMON_MR_H_ */\ndiff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map\nindex d3c5040aac..85100d5afb 100644\n--- a/drivers/common/mlx5/version.map\n+++ b/drivers/common/mlx5/version.map\n@@ -152,4 +152,9 @@ INTERNAL {\n \tmlx5_realloc;\n \n \tmlx5_translate_port_name; # WINDOWS_NO_EXPORT\n+\n+\tmlx5_mr_mempool_register;\n+\tmlx5_mr_mempool_unregister;\n+\tmlx5_mp_req_mempool_reg;\n+\tmlx5_mr_mempool2mr_bh;\n };\n",
    "prefixes": [
        "v9",
        "3/4"
    ]
}