get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/97038/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 97038,
    "url": "http://patchwork.dpdk.org/api/patches/97038/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20210818090755.2419483-4-dkozlyuk@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210818090755.2419483-4-dkozlyuk@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210818090755.2419483-4-dkozlyuk@nvidia.com",
    "date": "2021-08-18T09:07:54",
    "name": "[3/4] common/mlx5: add mempool registration facilities",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "af74891f16d5c7a83c5f61b8a090ccc7d37fd873",
    "submitter": {
        "id": 2248,
        "url": "http://patchwork.dpdk.org/api/people/2248/?format=api",
        "name": "Dmitry Kozlyuk",
        "email": "dkozlyuk@nvidia.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20210818090755.2419483-4-dkozlyuk@nvidia.com/mbox/",
    "series": [
        {
            "id": 18324,
            "url": "http://patchwork.dpdk.org/api/series/18324/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=18324",
            "date": "2021-08-18T09:07:51",
            "name": "net/mlx5: implicit mempool registration",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/18324/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/97038/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/97038/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 8DB87A0C47;\n\tWed, 18 Aug 2021 11:08:33 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 1782B411D3;\n\tWed, 18 Aug 2021 11:08:25 +0200 (CEST)",
            "from NAM10-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam10on2042.outbound.protection.outlook.com [40.107.94.42])\n by mails.dpdk.org (Postfix) with ESMTP id 48A4C411CE\n for <dev@dpdk.org>; Wed, 18 Aug 2021 11:08:23 +0200 (CEST)",
            "from DS7PR03CA0067.namprd03.prod.outlook.com (2603:10b6:5:3bb::12)\n by DM5PR12MB1450.namprd12.prod.outlook.com (2603:10b6:4:3::8) with Microsoft\n SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id\n 15.20.4415.15; Wed, 18 Aug 2021 09:08:21 +0000",
            "from DM6NAM11FT035.eop-nam11.prod.protection.outlook.com\n (2603:10b6:5:3bb:cafe::26) by DS7PR03CA0067.outlook.office365.com\n (2603:10b6:5:3bb::12) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4415.16 via Frontend\n Transport; Wed, 18 Aug 2021 09:08:21 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n DM6NAM11FT035.mail.protection.outlook.com (10.13.172.100) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4415.16 via Frontend Transport; Wed, 18 Aug 2021 09:08:21 +0000",
            "from DRHQMAIL107.nvidia.com (10.27.9.16) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Wed, 18 Aug\n 2021 09:08:20 +0000",
            "from nvidia.com (172.20.187.5) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Wed, 18 Aug\n 2021 09:08:18 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=amI/o4t7m7xlXBXRGYcFNe/m3gsVRME4CjicLKcDpVVizPWfGeQr2Ls6ciBQ38BLLF9wpdo8sreT27f32NMmwhOYIelLfI8AkSEeKF3CkhqYChRxp8/9PReWj/w7gQZFsEdSPioMtSSWRJq6W0yvmm9nwfPrkTpkwKMRGZVZYpAIO5CTCIQFjxufBmCO3B06d/tQBPEtmq3/xA3NvnkznIOIybltmjqVXQ2yo9jyZt0reiQ405pVUey9CCg/A1g0SNy5k73dEN9bWzw0nucqnKQoqnJ7uMNo3IL6BWi1WmKIUqMiHc1je6lfqlUMy/m8yO2k6m2iIJNyvLGM5Wvi+g==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=7lAHF1vrPPdFqeYNRH4ZfjZnhKJg8WEtHPEbINVGJ48=;\n b=iHbsCg/pCfsqpscqjquQrwYhNCB3Ew2Rnq5yvkdeE443/+wE00aMOjFvXDUoWyXUScSiNlO4wqP6T2UYTooZ/aFP8ryOtmg/VseDzkLu2RFPlKFBB9XxpZy+iKgVNypXEAw7XmFKqr99D1FubUFTpyJdzpF3gCW8XY0Nhia9QiMSf3ldWqG5lr4PEGJMUFfDc998GLRNpD6ZPFOiATiGcMPR2OUXXE7WQda1aN2qqHwGCdKJyF3kfzBSE7L0B7bcu+QAfXkeqatv1OcwR1rEG60foi95DeANPdozELnheoYOXVFB0UgVOw9NsvmHoQkSjO+rQ1vZALKKjlpHKiIdkw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=none pct=100) action=none header.from=nvidia.com;\n dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=7lAHF1vrPPdFqeYNRH4ZfjZnhKJg8WEtHPEbINVGJ48=;\n b=MwqsWRcyRjxM64H7odLkgpbygPWW4qgZETSnx9wNeSCtVGab5/10ZjKv0lDWrs1MGD5O2/b1qvE+cXpLwgl0BntX1sEuB2Xfz2sfB4S7VoSs56mHhar58Gw83j8In7hnZ50jjIwS3DyGdfHY2zTsb1aN1eFPz4xRZoJkLLYK75dzYStGUDl8E7nGtJZUVEqUFlli3CX0Tr4b1Nz7IKwkvTvyjKPDjmRXV5sXkqJtpYlPj/37ZOVlgTwSE8gK00k7z88YVxqV5DJu6FSYyqT5nLuJOcA7qZLqD574d/krgUkk2WfEuDHqjA0CH/Kx6v1UGMF9wfDUFHS6m8AbMl4FCw==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Dmitry Kozlyuk <dkozlyuk@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Matan Azrad <matan@nvidia.com>, Shahaf Shuler <shahafs@nvidia.com>,\n Viacheslav Ovsiienko <viacheslavo@nvidia.com>, Ray Kinsella <mdr@ashroe.eu>,\n Anatoly Burakov <anatoly.burakov@intel.com>",
        "Date": "Wed, 18 Aug 2021 12:07:54 +0300",
        "Message-ID": "<20210818090755.2419483-4-dkozlyuk@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20210818090755.2419483-1-dkozlyuk@nvidia.com>",
        "References": "<20210818090755.2419483-1-dkozlyuk@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.5]",
        "X-ClientProxiedBy": "HQMAIL111.nvidia.com (172.20.187.18) To\n DRHQMAIL107.nvidia.com (10.27.9.16)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "dae57321-08e8-4a56-ceec-08d96227b9fc",
        "X-MS-TrafficTypeDiagnostic": "DM5PR12MB1450:",
        "X-Microsoft-Antispam-PRVS": "\n <DM5PR12MB145016E7E9CFE58930D4B678B9FF9@DM5PR12MB1450.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:34;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n N2zYIHcwI/Vk38Kxz0OuFkHIHIRIBOst3jRSkG/ZDLGvTIAZ/dPfl3jzCM/LA1+PQiGR00u77NhNbDz0jTIycT/Onk/8JKfsAHDg1zGsNMHPn4ZAXVl5SLkeFdY99lEg5IrHvgUDVnHcHY9p0DsBIpKaktJgEpNcomkvZ7eNxQUitSjC/1wfEKxHnn29nxGQn339PYIoSk71nkdWvrBKuapAqEq2eP9gK6ipwiJUZHhrW1A2JLc+L0C49Wg8tJyQEdn7ZH7aji6GAUKSCGzinJeBQMn6aZVWtDcTxSI4wtK3pcrGU3CBOMZbqSYnZQAD+edeVcd5RwRm8AfjUsXCej8RNFtlvrYY979wTVw0dCTxxOgXMmxh+xmhZ9Xj1zlA1b3lEjobZEAEw3Md/fUOLgLb0MHuH6pVC3uKdSm3p+E+/hGmmnUjxfpLY6jIcQqlbzf01boJmMvKNI7/TfX0o9ThbEZW+7aPFhNzRpkcdq8V27fkHxowhJK0bNMRA2/sXmaJod3e0FxnJoJCEhCH+KeVXILFUgwPlV9+pfFy3Is74+YC21ReJA6o9SF4GVG0i6ONlZpSoMu+CbSw28b+wDZiv8rOP0T488HYMOXz+XhNY3k17TiptVLPAJIlMi0C+E2bDwskUJ//W/MrIHAu8m0/HiZhbAc9hw1JvE1jkb8P603WYVz8y5U/xJp0MqzDyhK/nv657vlWl9wcejUPEg==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(39860400002)(136003)(376002)(346002)(396003)(46966006)(36840700001)(1076003)(55016002)(36906005)(36860700001)(86362001)(82310400003)(7696005)(82740400003)(356005)(5660300002)(30864003)(7636003)(83380400001)(2906002)(26005)(426003)(2616005)(47076005)(186003)(6286002)(6916009)(8936002)(70206006)(54906003)(6666004)(316002)(8676002)(4326008)(478600001)(36756003)(16526019)(70586007)(336012);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "18 Aug 2021 09:08:21.1486 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n dae57321-08e8-4a56-ceec-08d96227b9fc",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT035.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "DM5PR12MB1450",
        "Subject": "[dpdk-dev] [PATCH 3/4] common/mlx5: add mempool registration\n facilities",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add internal API to register mempools, that is, to create memory\nregions (MR) for their memory and store them in a separate database.\nImplementation deals with multi-process, so that class drivers don't\nneed to. Each protection domain has its own database. Memory regions\ncan be shared within a database if they represent a single hugepage\ncovering one or more mempools entirely.\n\nAdd internal API to lookup an MR key for an address that belongs\nto a known mempool. It is a responsibility of a class driver\nto extract the mempool from an mbuf.\n\nSigned-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/common/mlx5/mlx5_common_mp.c |  50 +++\n drivers/common/mlx5/mlx5_common_mp.h |  14 +\n drivers/common/mlx5/mlx5_common_mr.c | 564 +++++++++++++++++++++++++++\n drivers/common/mlx5/mlx5_common_mr.h |  17 +\n drivers/common/mlx5/version.map      |   5 +\n 5 files changed, 650 insertions(+)",
    "diff": "diff --git a/drivers/common/mlx5/mlx5_common_mp.c b/drivers/common/mlx5/mlx5_common_mp.c\nindex 673a7c31de..6dfc5535e0 100644\n--- a/drivers/common/mlx5/mlx5_common_mp.c\n+++ b/drivers/common/mlx5/mlx5_common_mp.c\n@@ -54,6 +54,56 @@ mlx5_mp_req_mr_create(struct mlx5_mp_id *mp_id, uintptr_t addr)\n \treturn ret;\n }\n \n+/**\n+ * @param mp_id\n+ *   ID of the MP process.\n+ * @param share_cache\n+ *   Shared MR cache.\n+ * @param pd\n+ *   Protection domain.\n+ * @param mempool\n+ *   Mempool to register or unregister.\n+ * @param reg\n+ *   True to register the mempool, False to unregister.\n+ */\n+int\n+mlx5_mp_req_mempool_reg(struct mlx5_mp_id *mp_id,\n+\t\t\tstruct mlx5_mr_share_cache *share_cache, void *pd,\n+\t\t\tstruct rte_mempool *mempool, bool reg)\n+{\n+\tstruct rte_mp_msg mp_req;\n+\tstruct rte_mp_msg *mp_res;\n+\tstruct rte_mp_reply mp_rep;\n+\tstruct mlx5_mp_param *req = (struct mlx5_mp_param *)mp_req.param;\n+\tstruct mlx5_mp_arg_mempool_reg *arg = &req->args.mempool_reg;\n+\tstruct mlx5_mp_param *res;\n+\tstruct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};\n+\tenum mlx5_mp_req_type type;\n+\tint ret;\n+\n+\tMLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);\n+\ttype = reg ? MLX5_MP_REQ_MEMPOOL_REGISTER :\n+\t\t     MLX5_MP_REQ_MEMPOOL_UNREGISTER;\n+\tmp_init_msg(mp_id, &mp_req, type);\n+\targ->share_cache = share_cache;\n+\targ->pd = pd;\n+\targ->mempool = mempool;\n+\tret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);\n+\tif (ret) {\n+\t\tDRV_LOG(ERR, \"port %u request to primary process failed\",\n+\t\t\tmp_id->port_id);\n+\t\treturn -rte_errno;\n+\t}\n+\tMLX5_ASSERT(mp_rep.nb_received == 1);\n+\tmp_res = &mp_rep.msgs[0];\n+\tres = (struct mlx5_mp_param *)mp_res->param;\n+\tret = res->result;\n+\tif (ret)\n+\t\trte_errno = -ret;\n+\tmlx5_free(mp_rep.msgs);\n+\treturn ret;\n+}\n+\n /**\n  * Request Verbs queue state modification to the primary process.\n  *\ndiff --git a/drivers/common/mlx5/mlx5_common_mp.h b/drivers/common/mlx5/mlx5_common_mp.h\nindex 6829141fc7..527bf3cad8 100644\n--- a/drivers/common/mlx5/mlx5_common_mp.h\n+++ b/drivers/common/mlx5/mlx5_common_mp.h\n@@ -14,6 +14,8 @@\n enum mlx5_mp_req_type {\n \tMLX5_MP_REQ_VERBS_CMD_FD = 1,\n \tMLX5_MP_REQ_CREATE_MR,\n+\tMLX5_MP_REQ_MEMPOOL_REGISTER,\n+\tMLX5_MP_REQ_MEMPOOL_UNREGISTER,\n \tMLX5_MP_REQ_START_RXTX,\n \tMLX5_MP_REQ_STOP_RXTX,\n \tMLX5_MP_REQ_QUEUE_STATE_MODIFY,\n@@ -33,6 +35,12 @@ struct mlx5_mp_arg_queue_id {\n \tuint16_t queue_id; /* DPDK queue ID. */\n };\n \n+struct mlx5_mp_arg_mempool_reg {\n+\tstruct mlx5_mr_share_cache *share_cache;\n+\tvoid *pd; /* NULL for MLX5_MP_REQ_MEMPOOL_UNREGISTER */\n+\tstruct rte_mempool *mempool;\n+};\n+\n /* Pameters for IPC. */\n struct mlx5_mp_param {\n \tenum mlx5_mp_req_type type;\n@@ -41,6 +49,8 @@ struct mlx5_mp_param {\n \tRTE_STD_C11\n \tunion {\n \t\tuintptr_t addr; /* MLX5_MP_REQ_CREATE_MR */\n+\t\tstruct mlx5_mp_arg_mempool_reg mempool_reg;\n+\t\t/* MLX5_MP_REQ_MEMPOOL_(UN)REGISTER */\n \t\tstruct mlx5_mp_arg_queue_state_modify state_modify;\n \t\t/* MLX5_MP_REQ_QUEUE_STATE_MODIFY */\n \t\tstruct mlx5_mp_arg_queue_id queue_id;\n@@ -91,6 +101,10 @@ void mlx5_mp_uninit_secondary(const char *name);\n __rte_internal\n int mlx5_mp_req_mr_create(struct mlx5_mp_id *mp_id, uintptr_t addr);\n __rte_internal\n+int mlx5_mp_req_mempool_reg(struct mlx5_mp_id *mp_id,\n+\t\t\tstruct mlx5_mr_share_cache *share_cache, void *pd,\n+\t\t\tstruct rte_mempool *mempool, bool reg);\n+__rte_internal\n int mlx5_mp_req_queue_state_modify(struct mlx5_mp_id *mp_id,\n \t\t\t\t   struct mlx5_mp_arg_queue_state_modify *sm);\n __rte_internal\ndiff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c\nindex 98fe8698e2..21a83d6e1b 100644\n--- a/drivers/common/mlx5/mlx5_common_mr.c\n+++ b/drivers/common/mlx5/mlx5_common_mr.c\n@@ -2,7 +2,10 @@\n  * Copyright 2016 6WIND S.A.\n  * Copyright 2020 Mellanox Technologies, Ltd\n  */\n+#include <stddef.h>\n+\n #include <rte_eal_memconfig.h>\n+#include <rte_eal_paging.h>\n #include <rte_errno.h>\n #include <rte_mempool.h>\n #include <rte_malloc.h>\n@@ -21,6 +24,29 @@ struct mr_find_contig_memsegs_data {\n \tconst struct rte_memseg_list *msl;\n };\n \n+/* Virtual memory range. */\n+struct mlx5_range {\n+\tuintptr_t start;\n+\tuintptr_t end;\n+};\n+\n+/** Memory region for a mempool. */\n+struct mlx5_mempool_mr {\n+\tstruct mlx5_pmd_mr pmd_mr;\n+\tuint32_t refcnt; /**< Number of mempools sharing this MR. */\n+};\n+\n+/* Mempool registration. */\n+struct mlx5_mempool_reg {\n+\tLIST_ENTRY(mlx5_mempool_reg) next;\n+\t/** Registered mempool, used to designate registrations. */\n+\tstruct rte_mempool *mp;\n+\t/** Memory regions for the address ranges of the mempool. */\n+\tstruct mlx5_mempool_mr *mrs;\n+\t/** Number of memory regions. */\n+\tunsigned int mrs_n;\n+};\n+\n /**\n  * Expand B-tree table to a given size. Can't be called with holding\n  * memory_hotplug_lock or share_cache.rwlock due to rte_realloc().\n@@ -1191,3 +1217,541 @@ mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused)\n \trte_rwlock_read_unlock(&share_cache->rwlock);\n #endif\n }\n+\n+static int\n+mlx5_range_compare_start(const void *lhs, const void *rhs)\n+{\n+\tconst struct mlx5_range *r1 = lhs, *r2 = rhs;\n+\n+\tif (r1->start > r2->start)\n+\t\treturn 1;\n+\telse if (r1->start < r2->start)\n+\t\treturn -1;\n+\treturn 0;\n+}\n+\n+static void\n+mlx5_range_from_mempool_chunk(struct rte_mempool *mp, void *opaque,\n+\t\t\t      struct rte_mempool_memhdr *memhdr,\n+\t\t\t      unsigned int idx)\n+{\n+\tstruct mlx5_range *ranges = opaque, *range = &ranges[idx];\n+\tuint64_t page_size = rte_mem_page_size();\n+\n+\tRTE_SET_USED(mp);\n+\trange->start = RTE_ALIGN_FLOOR((uintptr_t)memhdr->addr, page_size);\n+\trange->end = RTE_ALIGN_CEIL(range->start + memhdr->len, page_size);\n+}\n+\n+/**\n+ * Get VA-contiguous ranges of the mempool memory.\n+ * Each range start and end is aligned to the system page size.\n+ *\n+ * @param[in] mp\n+ *   Analyzed mempool.\n+ * @param[out] out\n+ *   Receives the ranges, caller must release it with free().\n+ * @param[out] ount_n\n+ *   Receives the number of @p out elements.\n+ *\n+ * @return\n+ *   0 on success, (-1) on failure.\n+ */\n+static int\n+mlx5_get_mempool_ranges(struct rte_mempool *mp, struct mlx5_range **out,\n+\t\t\tunsigned int *out_n)\n+{\n+\tstruct mlx5_range *chunks;\n+\tunsigned int chunks_n = mp->nb_mem_chunks, contig_n, i;\n+\n+\t/* Collect page-aligned memory ranges of the mempool. */\n+\tchunks = calloc(sizeof(chunks[0]), chunks_n);\n+\tif (chunks == NULL)\n+\t\treturn -1;\n+\trte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, chunks);\n+\t/* Merge adjacent chunks and place them at the beginning. */\n+\tqsort(chunks, chunks_n, sizeof(chunks[0]), mlx5_range_compare_start);\n+\tcontig_n = 1;\n+\tfor (i = 1; i < chunks_n; i++)\n+\t\tif (chunks[i - 1].end != chunks[i].start) {\n+\t\t\tchunks[contig_n - 1].end = chunks[i - 1].end;\n+\t\t\tchunks[contig_n] = chunks[i];\n+\t\t\tcontig_n++;\n+\t\t}\n+\t/* Extend the last contiguous chunk to the end of the mempool. */\n+\tchunks[contig_n - 1].end = chunks[i - 1].end;\n+\t*out = chunks;\n+\t*out_n = contig_n;\n+\treturn 0;\n+}\n+\n+/**\n+ * Analyze mempool memory to select memory ranges to register.\n+ *\n+ * @param[in] mp\n+ *   Mempool to analyze.\n+ * @param[out] out\n+ *   Receives memory ranges to register, aligned to the system page size.\n+ *   The caller must release them with free().\n+ * @param[out] out_n\n+ *   Receives the number of @p out items.\n+ * @param[out] share_hugepage\n+ *   Receives True if the entire pool resides within a single hugepage.\n+ *\n+ * @return\n+ *   0 on success, (-1) on failure.\n+ */\n+static int\n+mlx5_mempool_reg_analyze(struct rte_mempool *mp, struct mlx5_range **out,\n+\t\t\t unsigned int *out_n, bool *share_hugepage)\n+{\n+\tstruct mlx5_range *ranges = NULL;\n+\tunsigned int i, ranges_n = 0;\n+\tstruct rte_memseg_list *msl;\n+\n+\tif (mlx5_get_mempool_ranges(mp, &ranges, &ranges_n) < 0) {\n+\t\tDRV_LOG(ERR, \"Cannot get address ranges for mempool %s\",\n+\t\t\tmp->name);\n+\t\treturn -1;\n+\t}\n+\t/* Check if the hugepage of the pool can be shared. */\n+\t*share_hugepage = false;\n+\tmsl = rte_mem_virt2memseg_list((void *)ranges[0].start);\n+\tif (msl != NULL) {\n+\t\tuint64_t hugepage_sz = 0;\n+\n+\t\t/* Check that all ranges are on pages of the same size. */\n+\t\tfor (i = 0; i < ranges_n; i++) {\n+\t\t\tif (hugepage_sz != 0 && hugepage_sz != msl->page_sz)\n+\t\t\t\tbreak;\n+\t\t\thugepage_sz = msl->page_sz;\n+\t\t}\n+\t\tif (i == ranges_n) {\n+\t\t\t/*\n+\t\t\t * If the entire pool is within one hugepage,\n+\t\t\t * combine all ranges into one of the hugepage size.\n+\t\t\t */\n+\t\t\tuintptr_t reg_start = ranges[0].start;\n+\t\t\tuintptr_t reg_end = ranges[ranges_n - 1].end;\n+\t\t\tuintptr_t hugepage_start =\n+\t\t\t\tRTE_ALIGN_FLOOR(reg_start, hugepage_sz);\n+\t\t\tuintptr_t hugepage_end = hugepage_start + hugepage_sz;\n+\t\t\tif (reg_end < hugepage_end) {\n+\t\t\t\tranges[0].start = hugepage_start;\n+\t\t\t\tranges[0].end = hugepage_end;\n+\t\t\t\tranges_n = 1;\n+\t\t\t\t*share_hugepage = true;\n+\t\t\t}\n+\t\t}\n+\t}\n+\t*out = ranges;\n+\t*out_n = ranges_n;\n+\treturn 0;\n+}\n+\n+/** Create a registration object for the mempool. */\n+static struct mlx5_mempool_reg *\n+mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n)\n+{\n+\tstruct mlx5_mempool_reg *mpr = NULL;\n+\n+\tmpr = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,\n+\t\t\t  sizeof(*mpr) + mrs_n * sizeof(mpr->mrs[0]),\n+\t\t\t  RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);\n+\tif (mpr == NULL) {\n+\t\tDRV_LOG(ERR, \"Cannot allocate mempool %s registration object\",\n+\t\t\tmp->name);\n+\t\treturn NULL;\n+\t}\n+\tmpr->mp = mp;\n+\tmpr->mrs = (struct mlx5_mempool_mr *)(mpr + 1);\n+\tmpr->mrs_n = mrs_n;\n+\treturn mpr;\n+}\n+\n+/**\n+ * Destroy a mempool registration object.\n+ *\n+ * @param standalone\n+ *   Whether @p mpr owns its MRs excludively, i.e. they are not shared.\n+ */\n+static void\n+mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t struct mlx5_mempool_reg *mpr, bool standalone)\n+{\n+\tif (standalone) {\n+\t\tunsigned int i;\n+\n+\t\tfor (i = 0; i < mpr->mrs_n; i++)\n+\t\t\tshare_cache->dereg_mr_cb(&mpr->mrs[i].pmd_mr);\n+\t}\n+\tmlx5_free(mpr);\n+}\n+\n+/** Find registration object of a mempool. */\n+static struct mlx5_mempool_reg *\n+mlx5_mempool_reg_lookup(struct mlx5_mr_share_cache *share_cache,\n+\t\t\tstruct rte_mempool *mp)\n+{\n+\tstruct mlx5_mempool_reg *mpr;\n+\n+\tLIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)\n+\t\tif (mpr->mp == mp)\n+\t\t\tbreak;\n+\treturn mpr;\n+}\n+\n+/** Increment reference counters of MRs used in the registration. */\n+static void\n+mlx5_mempool_reg_attach(struct mlx5_mempool_reg *mpr)\n+{\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < mpr->mrs_n; i++)\n+\t\t__atomic_add_fetch(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);\n+}\n+\n+/**\n+ * Decrement reference counters of MRs used in the registration.\n+ *\n+ * @return True if no more references to @p mpr MRs exist, False otherwise.\n+ */\n+static bool\n+mlx5_mempool_reg_detach(struct mlx5_mempool_reg *mpr)\n+{\n+\tunsigned int i;\n+\tbool ret = false;\n+\n+\tfor (i = 0; i < mpr->mrs_n; i++)\n+\t\tret |= __atomic_sub_fetch(&mpr->mrs[i].refcnt, 1,\n+\t\t\t\t\t  __ATOMIC_RELAXED) == 0;\n+\treturn ret;\n+}\n+\n+static int\n+mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t\t void *pd, struct rte_mempool *mp)\n+{\n+\tstruct mlx5_range *ranges = NULL;\n+\tstruct mlx5_mempool_reg *mpr, *new_mpr;\n+\tunsigned int i, ranges_n;\n+\tbool share_hugepage;\n+\tint ret = -1;\n+\n+\tif (mlx5_mempool_reg_analyze(mp, &ranges, &ranges_n,\n+\t\t\t\t     &share_hugepage) < 0) {\n+\t\tDRV_LOG(ERR, \"Cannot get mempool %s memory ranges\", mp->name);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto exit;\n+\t}\n+\tnew_mpr = mlx5_mempool_reg_create(mp, ranges_n);\n+\tif (new_mpr == NULL) {\n+\t\tDRV_LOG(ERR,\n+\t\t\t\"Cannot create a registration object for mempool %s in PD %p\",\n+\t\t\tmp->name, pd);\n+\t\trte_errno = ENOMEM;\n+\t\tgoto exit;\n+\t}\n+\t/*\n+\t * If the entire mempool fits in a single hugepage, the MR for this\n+\t * hugepage can be shared across mempools that also fit in it.\n+\t */\n+\tif (share_hugepage) {\n+\t\trte_rwlock_write_lock(&share_cache->rwlock);\n+\t\tLIST_FOREACH(mpr, &share_cache->mempool_reg_list, next) {\n+\t\t\tif (mpr->mrs[0].pmd_mr.addr == (void *)ranges[0].start)\n+\t\t\t\tbreak;\n+\t\t}\n+\t\tif (mpr != NULL) {\n+\t\t\tnew_mpr->mrs = mpr->mrs;\n+\t\t\tmlx5_mempool_reg_attach(new_mpr);\n+\t\t\tLIST_INSERT_HEAD(&share_cache->mempool_reg_list,\n+\t\t\t\t\t new_mpr, next);\n+\t\t}\n+\t\trte_rwlock_write_unlock(&share_cache->rwlock);\n+\t\tif (mpr != NULL) {\n+\t\t\tDRV_LOG(DEBUG, \"Shared MR %#x in PD %p for mempool %s with mempool %s\",\n+\t\t\t\tmpr->mrs[0].pmd_mr.lkey, pd, mp->name,\n+\t\t\t\tmpr->mp->name);\n+\t\t\tret = 0;\n+\t\t\tgoto exit;\n+\t\t}\n+\t}\n+\tfor (i = 0; i < ranges_n; i++) {\n+\t\tstruct mlx5_mempool_mr *mr = &new_mpr->mrs[i];\n+\t\tconst struct mlx5_range *range = &ranges[i];\n+\t\tsize_t len = range->end - range->start;\n+\n+\t\tif (share_cache->reg_mr_cb(pd, (void *)range->start, len,\n+\t\t    &mr->pmd_mr) < 0) {\n+\t\t\tDRV_LOG(ERR,\n+\t\t\t\t\"Failed to create an MR in PD %p for address range \"\n+\t\t\t\t\"[0x%\" PRIxPTR \", 0x%\" PRIxPTR \"] (%zu bytes) for mempool %s\",\n+\t\t\t\tpd, range->start, range->end, len, mp->name);\n+\t\t\tbreak;\n+\t\t}\n+\t\tDRV_LOG(DEBUG,\n+\t\t\t\"Created a new MR %#x in PD %p for address range \"\n+\t\t\t\"[0x%\" PRIxPTR \", 0x%\" PRIxPTR \"] (%zu bytes) for mempool %s\",\n+\t\t\tmr->pmd_mr.lkey, pd, range->start, range->end, len,\n+\t\t\tmp->name);\n+\t}\n+\tif (i != ranges_n) {\n+\t\tmlx5_mempool_reg_destroy(share_cache, new_mpr, true);\n+\t\trte_errno = EINVAL;\n+\t\tgoto exit;\n+\t}\n+\t/* Concurrent registration is not supposed to happen. */\n+\trte_rwlock_write_lock(&share_cache->rwlock);\n+\tmpr =  mlx5_mempool_reg_lookup(share_cache, mp);\n+\tif (mpr == NULL) {\n+\t\tmlx5_mempool_reg_attach(new_mpr);\n+\t\tLIST_INSERT_HEAD(&share_cache->mempool_reg_list,\n+\t\t\t\t new_mpr, next);\n+\t\tret = 0;\n+\t}\n+\trte_rwlock_write_unlock(&share_cache->rwlock);\n+\tif (mpr != NULL) {\n+\t\tDRV_LOG(ERR, \"Mempool %s is already registered for PD %p\",\n+\t\t\tmp->name, pd);\n+\t\tmlx5_mempool_reg_destroy(share_cache, new_mpr, true);\n+\t\trte_errno = EEXIST;\n+\t\tgoto exit;\n+\t}\n+exit:\n+\tfree(ranges);\n+\treturn ret;\n+}\n+\n+static int\n+mlx5_mr_mempool_register_secondary(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t\t   void *pd, struct rte_mempool *mp,\n+\t\t\t\t   struct mlx5_mp_id *mp_id)\n+{\n+\tif (mp_id == NULL) {\n+\t\trte_errno = EINVAL;\n+\t\treturn -1;\n+\t}\n+\treturn mlx5_mp_req_mempool_reg(mp_id, share_cache, pd, mp, true);\n+}\n+\n+/**\n+ * Register the memory of a mempool in the protection domain.\n+ *\n+ * @param share_cache\n+ *   Shared MR cache of the protection domain.\n+ * @param pd\n+ *   Protection domain object.\n+ * @param mp\n+ *   Mempool to register.\n+ * @param mp_id\n+ *   Multi-process identifier, may be NULL for the primary process.\n+ *\n+ * @return\n+ *   0 on success, (-1) on failure and rte_errno is set.\n+ */\n+int\n+mlx5_mr_mempool_register(struct mlx5_mr_share_cache *share_cache, void *pd,\n+\t\t\t struct rte_mempool *mp, struct mlx5_mp_id *mp_id)\n+{\n+\tif (mp->flags & MEMPOOL_F_NON_IO)\n+\t\treturn 0;\n+\tswitch (rte_eal_process_type()) {\n+\tcase RTE_PROC_PRIMARY:\n+\t\treturn mlx5_mr_mempool_register_primary(share_cache, pd, mp);\n+\tcase RTE_PROC_SECONDARY:\n+\t\treturn mlx5_mr_mempool_register_secondary(share_cache, pd, mp,\n+\t\t\t\t\t\t\t  mp_id);\n+\tdefault:\n+\t\treturn -1;\n+\t}\n+}\n+\n+static int\n+mlx5_mr_mempool_unregister_primary(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t\t   struct rte_mempool *mp)\n+{\n+\tstruct mlx5_mempool_reg *mpr;\n+\tbool standalone = false;\n+\n+\trte_rwlock_write_lock(&share_cache->rwlock);\n+\tLIST_FOREACH(mpr, &share_cache->mempool_reg_list, next)\n+\t\tif (mpr->mp == mp) {\n+\t\t\tstandalone = mlx5_mempool_reg_detach(mpr);\n+\t\t\tLIST_REMOVE(mpr, next);\n+\t\t\tbreak;\n+\t\t}\n+\trte_rwlock_write_unlock(&share_cache->rwlock);\n+\tif (mpr == NULL) {\n+\t\trte_errno = ENOENT;\n+\t\treturn -1;\n+\t}\n+\tmlx5_mempool_reg_destroy(share_cache, mpr, standalone);\n+\treturn 0;\n+}\n+\n+static int\n+mlx5_mr_mempool_unregister_secondary(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t\t     struct rte_mempool *mp,\n+\t\t\t\t     struct mlx5_mp_id *mp_id)\n+{\n+\tif (mp_id == NULL) {\n+\t\trte_errno = EINVAL;\n+\t\treturn -1;\n+\t}\n+\treturn mlx5_mp_req_mempool_reg(mp_id, share_cache, NULL, mp, false);\n+}\n+\n+/**\n+ * Unregister the memory of a mempool from the protection domain.\n+ *\n+ * @param share_cache\n+ *   Shared MR cache of the protection domain.\n+ * @param mp\n+ *   Mempool to unregister.\n+ * @param mp_id\n+ *   Multi-process identifier, may be NULL for the primary process.\n+ *\n+ * @return\n+ *   0 on success, (-1) on failure and rte_errno is set.\n+ */\n+int\n+mlx5_mr_mempool_unregister(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t   struct rte_mempool *mp, struct mlx5_mp_id *mp_id)\n+{\n+\tif (mp->flags & MEMPOOL_F_NON_IO)\n+\t\treturn 0;\n+\tswitch (rte_eal_process_type()) {\n+\tcase RTE_PROC_PRIMARY:\n+\t\treturn mlx5_mr_mempool_unregister_primary(share_cache, mp);\n+\tcase RTE_PROC_SECONDARY:\n+\t\treturn mlx5_mr_mempool_unregister_secondary(share_cache, mp,\n+\t\t\t\t\t\t\t    mp_id);\n+\tdefault:\n+\t\treturn -1;\n+\t}\n+}\n+\n+/**\n+ * Lookup a MR key by and address in a registered mempool.\n+ *\n+ * @param mpr\n+ *   Mempool registration object.\n+ * @param addr\n+ *   Address within the mempool.\n+ * @param entry\n+ *   Bottom-half cache entry to fill.\n+ *\n+ * @return\n+ *   MR key or UINT32_MAX on failure, which can only happen\n+ *   if the address is not from within the mempool.\n+ */\n+static uint32_t\n+mlx5_mempool_reg_addr2mr(struct mlx5_mempool_reg *mpr, uintptr_t addr,\n+\t\t\t struct mr_cache_entry *entry)\n+{\n+\tuint32_t lkey = UINT32_MAX;\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < mpr->mrs_n; i++) {\n+\t\tconst struct mlx5_pmd_mr *mr = &mpr->mrs[i].pmd_mr;\n+\t\tuintptr_t mr_addr = (uintptr_t)mr->addr;\n+\n+\t\tif (mr_addr <= addr) {\n+\t\t\tlkey = rte_cpu_to_be_32(mr->lkey);\n+\t\t\tentry->start = mr_addr;\n+\t\t\tentry->end = mr_addr + mr->len;\n+\t\t\tentry->lkey = lkey;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\treturn lkey;\n+}\n+\n+/**\n+ * Update bottom-half cache from the list of mempool registrations.\n+ *\n+ * @param share_cache\n+ *   Pointer to a global shared MR cache.\n+ * @param mr_ctrl\n+ *   Per-queue MR control handle.\n+ * @param entry\n+ *   Pointer to an entry in the bottom-half cache to update\n+ *   with the MR lkey looked up.\n+ * @param mp\n+ *   Mempool containing the address.\n+ * @param addr\n+ *   Address to lookup.\n+ * @return\n+ *   MR lkey on success, UINT32_MAX on failure.\n+ */\n+static uint32_t\n+mlx5_lookup_mempool_regs(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t struct mlx5_mr_ctrl *mr_ctrl,\n+\t\t\t struct mr_cache_entry *entry,\n+\t\t\t struct rte_mempool *mp, uintptr_t addr)\n+{\n+\tstruct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;\n+\tstruct mlx5_mempool_reg *mpr;\n+\tuint32_t lkey = UINT32_MAX;\n+\n+\t/* If local cache table is full, try to double it. */\n+\tif (unlikely(bt->len == bt->size))\n+\t\tmr_btree_expand(bt, bt->size << 1);\n+\t/* Look up in mempool registrations. */\n+\trte_rwlock_read_lock(&share_cache->rwlock);\n+\tmpr = mlx5_mempool_reg_lookup(share_cache, mp);\n+\tif (mpr != NULL)\n+\t\tlkey = mlx5_mempool_reg_addr2mr(mpr, addr, entry);\n+\trte_rwlock_read_unlock(&share_cache->rwlock);\n+\t/*\n+\t * Update local cache. Even if it fails, return the found entry\n+\t * to update top-half cache. Next time, this entry will be found\n+\t * in the global cache.\n+\t */\n+\tif (lkey != UINT32_MAX)\n+\t\tmr_btree_insert(bt, entry);\n+\treturn lkey;\n+}\n+\n+/**\n+ * Bottom-half lookup for the address from the mempool.\n+ *\n+ * @param share_cache\n+ *   Pointer to a global shared MR cache.\n+ * @param mr_ctrl\n+ *   Per-queue MR control handle.\n+ * @param mp\n+ *   Mempool containing the address.\n+ * @param addr\n+ *   Address to lookup.\n+ * @return\n+ *   MR lkey on success, UINT32_MAX on failure.\n+ */\n+uint32_t\n+mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,\n+\t\t      struct mlx5_mr_ctrl *mr_ctrl,\n+\t\t      struct rte_mempool *mp, uintptr_t addr)\n+{\n+\tstruct mr_cache_entry *repl = &mr_ctrl->cache[mr_ctrl->head];\n+\tuint32_t lkey;\n+\tuint16_t bh_idx = 0;\n+\n+\t/* Binary-search MR translation table. */\n+\tlkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);\n+\t/* Update top-half cache. */\n+\tif (likely(lkey != UINT32_MAX)) {\n+\t\t*repl = (*mr_ctrl->cache_bh.table)[bh_idx];\n+\t} else {\n+\t\tlkey = mlx5_lookup_mempool_regs(share_cache, mr_ctrl, repl,\n+\t\t\t\t\t\tmp, addr);\n+\t\t/* Can only fail if the address is not from the mempool. */\n+\t\tif (unlikely(lkey == UINT32_MAX))\n+\t\t\treturn UINT32_MAX;\n+\t}\n+\t/* Update the most recently used entry. */\n+\tmr_ctrl->mru = mr_ctrl->head;\n+\t/* Point to the next victim, the oldest. */\n+\tmr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;\n+\treturn lkey;\n+}\ndiff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h\nindex 6e465a05e9..685ac98e08 100644\n--- a/drivers/common/mlx5/mlx5_common_mr.h\n+++ b/drivers/common/mlx5/mlx5_common_mr.h\n@@ -13,6 +13,7 @@\n \n #include <rte_rwlock.h>\n #include <rte_bitmap.h>\n+#include <rte_mbuf.h>\n #include <rte_memory.h>\n \n #include \"mlx5_glue.h\"\n@@ -75,6 +76,7 @@ struct mlx5_mr_ctrl {\n } __rte_packed;\n \n LIST_HEAD(mlx5_mr_list, mlx5_mr);\n+LIST_HEAD(mlx5_mempool_reg_list, mlx5_mempool_reg);\n \n /* Global per-device MR cache. */\n struct mlx5_mr_share_cache {\n@@ -83,6 +85,7 @@ struct mlx5_mr_share_cache {\n \tstruct mlx5_mr_btree cache; /* Global MR cache table. */\n \tstruct mlx5_mr_list mr_list; /* Registered MR list. */\n \tstruct mlx5_mr_list mr_free_list; /* Freed MR list. */\n+\tstruct mlx5_mempool_reg_list mempool_reg_list; /* Mempool database. */\n \tmlx5_reg_mr_t reg_mr_cb; /* Callback to reg_mr func */\n \tmlx5_dereg_mr_t dereg_mr_cb; /* Callback to dereg_mr func */\n } __rte_packed;\n@@ -136,6 +139,10 @@ uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,\n \t\t\t    struct mlx5_mr_ctrl *mr_ctrl,\n \t\t\t    uintptr_t addr, unsigned int mr_ext_memseg_en);\n __rte_internal\n+uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t       struct mlx5_mr_ctrl *mr_ctrl,\n+\t\t\t       struct rte_mempool *mp, uintptr_t addr);\n+__rte_internal\n void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);\n __rte_internal\n void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);\n@@ -179,4 +186,14 @@ mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr);\n __rte_internal\n void\n mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb);\n+\n+__rte_internal\n+int\n+mlx5_mr_mempool_register(struct mlx5_mr_share_cache *share_cache, void *pd,\n+\t\t\t struct rte_mempool *mp, struct mlx5_mp_id *mp_id);\n+__rte_internal\n+int\n+mlx5_mr_mempool_unregister(struct mlx5_mr_share_cache *share_cache,\n+\t\t\t   struct rte_mempool *mp, struct mlx5_mp_id *mp_id);\n+\n #endif /* RTE_PMD_MLX5_COMMON_MR_H_ */\ndiff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map\nindex e5cb6b7060..d5e9635a14 100644\n--- a/drivers/common/mlx5/version.map\n+++ b/drivers/common/mlx5/version.map\n@@ -149,4 +149,9 @@ INTERNAL {\n \tmlx5_realloc;\n \n \tmlx5_translate_port_name; # WINDOWS_NO_EXPORT\n+\n+\tmlx5_mr_mempool_register;\n+\tmlx5_mr_mempool_unregister;\n+\tmlx5_mp_req_mempool_reg;\n+\tmlx5_mr_mempool2mr_bh;\n };\n",
    "prefixes": [
        "3/4"
    ]
}