get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/104490/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 104490,
    "url": "http://patchwork.dpdk.org/api/patches/104490/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20211117184936.2581314-1-dkozlyuk@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211117184936.2581314-1-dkozlyuk@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211117184936.2581314-1-dkozlyuk@nvidia.com",
    "date": "2021-11-17T18:49:36",
    "name": "common/mlx5: fix mempool registration",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "d7c6c39707f96f491f8900847f6e7fd350fbd41b",
    "submitter": {
        "id": 2248,
        "url": "http://patchwork.dpdk.org/api/people/2248/?format=api",
        "name": "Dmitry Kozlyuk",
        "email": "dkozlyuk@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patchwork.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20211117184936.2581314-1-dkozlyuk@nvidia.com/mbox/",
    "series": [
        {
            "id": 20633,
            "url": "http://patchwork.dpdk.org/api/series/20633/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=20633",
            "date": "2021-11-17T18:49:36",
            "name": "common/mlx5: fix mempool registration",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/20633/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/104490/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/104490/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id AB712A0C41;\n\tWed, 17 Nov 2021 19:50:04 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 3A49640687;\n\tWed, 17 Nov 2021 19:50:04 +0100 (CET)",
            "from NAM10-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam10on2075.outbound.protection.outlook.com [40.107.93.75])\n by mails.dpdk.org (Postfix) with ESMTP id 300AE40040\n for <dev@dpdk.org>; Wed, 17 Nov 2021 19:50:02 +0100 (CET)",
            "from MW4PR04CA0043.namprd04.prod.outlook.com (2603:10b6:303:6a::18)\n by BL1PR12MB5221.namprd12.prod.outlook.com (2603:10b6:208:30b::9)\n with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4713.19; Wed, 17 Nov\n 2021 18:49:59 +0000",
            "from CO1NAM11FT022.eop-nam11.prod.protection.outlook.com\n (2603:10b6:303:6a:cafe::1f) by MW4PR04CA0043.outlook.office365.com\n (2603:10b6:303:6a::18) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4669.11 via Frontend\n Transport; Wed, 17 Nov 2021 18:49:59 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n CO1NAM11FT022.mail.protection.outlook.com (10.13.175.199) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4713.20 via Frontend Transport; Wed, 17 Nov 2021 18:49:58 +0000",
            "from nvidia.com (172.20.187.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Wed, 17 Nov\n 2021 18:49:49 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=Y0cR11WdmjP0bYKnhewBHskYoEe/991i504Y+lwsQh1oHd3wd9fhQHfLsVuF4NZY59XPCKOx6iRctz7TivCNwYGbR7oVmj9Ml3MmkROwUpcVqsESMwZr+z8FeH8f9OAtkLH5bhgWJy4KYgPnP1I+E1qsRcHZtpbTjLSvzsSTak916vF2Mo4OTV58ffq9B8Eugpjvf+MBLWOGklK+wqdmfKu2UH36TPuHZdggmoIl+v691+y5w95s7lKpAzax0g9toEMwAstbFQ78e7dc4YzONXkqIqgzIJuFAtPZW3lf1D9oNNQkKmBZ8y9amN8c6lzSOl0SSGS8S8wcZCvNYtla2w==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=88F1QoB8P5T+kvS3OofkIlDgP1HmXVWeneB8W19v43I=;\n b=ML4MM4Uiyr8vwyWaIrgTID7m4CtcVHd02GfmFIBgPduigbFz6lzl8LmP1itBnt9Y+qGpQrJyhihzROHhbi0Y5WPZmnV1Hd8gAwXjFkoSoDasZsVBDsfpCni/Qnj+8pE7KStZXg+QGSyexEYiD4dY4wKJh2wBUYJ6PNWq12pRPlH8aLw4r99Tk+dZ+tpmXSISgihSQXgwCCV5eY5ZnfCBoM88AIjZd5gKjqeRh9Np8qjlR2Z/UtIvTTK615Rxd/blzLATlcr480Tfcbs+b4PejGRtCc3Xob2DCsfnIFEm5pHmXAUovcqAaW8Xu5OeL/AGVc5X5vDmHmN98LQwu8xb9Q==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=quarantine pct=100) action=none\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=88F1QoB8P5T+kvS3OofkIlDgP1HmXVWeneB8W19v43I=;\n b=OctkHTPMVyFESYDTYh7X/x1+UscT7oNuR3nAWxMyVFBMmG/lu4ynMJsezkLPWa3TrmJVYuHnjIkvQ2Y9AotcD59c+CnfhRVkB5HOyyfxhS0h9Pmf5nhbk9W7CSbOLMNWh8MiVevPW5d6AuJIuDZvviooYLXX8V7DbRWykGnKnWwHg1ClFQ76MVhbdQl4VuLN3WQ3CQsLkQIomyIEECmfruCdYN7Dyvc/HDkqiG8RcqG9GwTmN7ECMIYYMYAQei9VQAzj/9jzaH73OchzCv6FvxmBYgk4iVJeFHlm7w5H+ucpybtlJ/AGSNyycyhJnYvTKkqv4oCY+1bxTfwvoGfpGQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "Dmitry Kozlyuk <dkozlyuk@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Raslan Darawsheh <rasland@nvidia.com>, Viacheslav Ovsiienko\n <viacheslavo@nvidia.com>, Michael Baum <michaelba@nvidia.com>, Matan Azrad\n <matan@nvidia.com>",
        "Subject": "[PATCH] common/mlx5: fix mempool registration",
        "Date": "Wed, 17 Nov 2021 20:49:36 +0200",
        "Message-ID": "<20211117184936.2581314-1-dkozlyuk@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL105.nvidia.com (172.20.187.12) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "118da9ff-a8f5-4957-c3e1-08d9a9fb0dee",
        "X-MS-TrafficTypeDiagnostic": "BL1PR12MB5221:",
        "X-Microsoft-Antispam-PRVS": "\n <BL1PR12MB522152FB8AB9CB812ECE679BB99A9@BL1PR12MB5221.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:2331;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n vE8qdS03ncKHjPudATJe1Xh1UWYSLzH88FKKKsggPMApjVMiUGniMB3XRCnj4m4Xrtsz6jr522C9vOsqldclkg6PVUfwY2fsmgABP7Et8Exgwk7TUpW+K7GHlrCXhozkjB7Uaf6W1syNioHGKQyRbwiLYEU2PR4omoCmLNsSo0LCtaoXTKhuoJfrgaLehDkP0IU7Y5wSHlw6eKLPyG6dl2F0t9KOaoNhM/Ls4dWZo8HsHKKzc6bXEHWHpERoVQmIdKU6XMDB4+fwGn3XZ5/rnzqbXcKE6uhaspDTZbtAxUElq4HQOU4HUJEyfSGqkJx7CDL8eGpXl/UwGABS/zf+21CL/8DvR2/AWa9+S64ympGLlbmxF1sbHbwUM+2M+mWahUSiCDM+ILBXkk9pkgVqS1A11WJZLRdiksDFoHhqP2Nrj7232SHDY6w5E3fP+QfynfmgV9GZ76SuxcGqsss/OOEchCz4WaQXollHvDm8eiao7CzrE00DlJENrhEkm5fa7Djbpm/+X5OZCp4hkb4e69zLfoi/9UZHatpmb7tjAvsN+2QXhMUZ4Zt+xL6ychc5EI76uvbJruovlaAsPZ9QRPNpgKW/+JNpl5Pr2ufR8xCRDdNTN58pghB7ZVACqDuxe5QZbj3ZWdbLJU+30+xBdqcpdrntULCsNbF1+okyBl3om2c+SKHHCkbO5lXPNvS1an42AQ7Xx+gndrINcb6TFg==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(36840700001)(46966006)(316002)(26005)(36756003)(54906003)(508600001)(47076005)(86362001)(55016002)(336012)(7636003)(70586007)(2616005)(70206006)(8936002)(107886003)(30864003)(426003)(6286002)(83380400001)(186003)(4326008)(36860700001)(82310400003)(16526019)(8676002)(6666004)(7696005)(1076003)(6916009)(356005)(2906002)(5660300002);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "17 Nov 2021 18:49:58.4253 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 118da9ff-a8f5-4957-c3e1-08d9a9fb0dee",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT022.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BL1PR12MB5221",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Mempool registration was not correctly processing\nmempools with RTE_PKTMBUF_F_PINEND_EXT_BUF flag set\n(\"pinned mempools\" for short), because it is not known\nat registration time whether the mempool is a pktmbuf one,\nand its elements may not yet be initialized to analyze them.\nAttempts had been made to recognize such pools,\nbut there was no robust solution, only the owner of a mempool\n(the application or a device) knows its type.\nThis patch extends common/mlx5 registration code\nto accept a hint that the mempool is a pinned one\nand uses this capability from net/mlx5 driver.\n\n1. Remove all code assuming pktmbuf pool type\n   or trying to recognize the type of a pool.\n2. Register pinned mempools used for Rx\n   and their external memory on port start.\n3. Change Tx slow path logic as follows:\n   3.1. Search the mempool database for a memory region (MR)\n        by the mbuf pool and its buffer address.\n   3.2. If not MR for the address is found for the mempool,\n\tand the mempool contains only pinned external buffers,\n\tperform the mempool registration of the mempool\n\tand its external pinned memory.\n   3.3. Fall back to using page-based MRs in other cases\n\t(for example, a buffer with externally attached memory,\n\tbut not from a pinned mempool).\n\nFixes: 690b2a88c2f7 (\"common/mlx5: add mempool registration facilities\")\nFixes: fec28ca0e3a9 (\"net/mlx5: support mempool registration\")\n\nSigned-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>\nReviewed-by: Matan Azrad <matan@nvidia.com>\n---\nApplies to next-net-mlx.\n\n drivers/common/mlx5/mlx5_common.c    |  11 +--\n drivers/common/mlx5/mlx5_common_mp.c |   4 +-\n drivers/common/mlx5/mlx5_common_mp.h |  10 ++-\n drivers/common/mlx5/mlx5_common_mr.c | 109 ++++++++++++++++-----------\n drivers/common/mlx5/mlx5_common_mr.h |  12 +--\n drivers/net/mlx5/linux/mlx5_mp_os.c  |   3 +-\n drivers/net/mlx5/mlx5_rxq.c          |   2 +-\n drivers/net/mlx5/mlx5_trigger.c      |   7 +-\n 8 files changed, 88 insertions(+), 70 deletions(-)",
    "diff": "diff --git a/drivers/common/mlx5/mlx5_common.c b/drivers/common/mlx5/mlx5_common.c\nindex 66c2c08b7d..f1650f94c6 100644\n--- a/drivers/common/mlx5/mlx5_common.c\n+++ b/drivers/common/mlx5/mlx5_common.c\n@@ -317,9 +317,9 @@ mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size)\n  */\n static int\n mlx5_dev_mempool_register(struct mlx5_common_device *cdev,\n-\t\t\t  struct rte_mempool *mp)\n+\t\t\t  struct rte_mempool *mp, bool is_extmem)\n {\n-\treturn mlx5_mr_mempool_register(cdev, mp);\n+\treturn mlx5_mr_mempool_register(cdev, mp, is_extmem);\n }\n \n /**\n@@ -353,7 +353,7 @@ mlx5_dev_mempool_register_cb(struct rte_mempool *mp, void *arg)\n \tstruct mlx5_common_device *cdev = arg;\n \tint ret;\n \n-\tret = mlx5_dev_mempool_register(cdev, mp);\n+\tret = mlx5_dev_mempool_register(cdev, mp, false);\n \tif (ret < 0 && rte_errno != EEXIST)\n \t\tDRV_LOG(ERR,\n \t\t\t\"Failed to register existing mempool %s for PD %p: %s\",\n@@ -390,13 +390,10 @@ mlx5_dev_mempool_event_cb(enum rte_mempool_event event, struct rte_mempool *mp,\n \t\t\t  void *arg)\n {\n \tstruct mlx5_common_device *cdev = arg;\n-\tbool extmem = mlx5_mempool_is_extmem(mp);\n \n \tswitch (event) {\n \tcase RTE_MEMPOOL_EVENT_READY:\n-\t\tif (extmem)\n-\t\t\tbreak;\n-\t\tif (mlx5_dev_mempool_register(cdev, mp) < 0)\n+\t\tif (mlx5_dev_mempool_register(cdev, mp, false) < 0)\n \t\t\tDRV_LOG(ERR,\n \t\t\t\t\"Failed to register new mempool %s for PD %p: %s\",\n \t\t\t\tmp->name, cdev->pd, rte_strerror(rte_errno));\ndiff --git a/drivers/common/mlx5/mlx5_common_mp.c b/drivers/common/mlx5/mlx5_common_mp.c\nindex 536d61f66c..a7a671b7c5 100644\n--- a/drivers/common/mlx5/mlx5_common_mp.c\n+++ b/drivers/common/mlx5/mlx5_common_mp.c\n@@ -65,7 +65,8 @@ mlx5_mp_req_mr_create(struct mlx5_common_device *cdev, uintptr_t addr)\n  */\n int\n mlx5_mp_req_mempool_reg(struct mlx5_common_device *cdev,\n-\t\t\tstruct rte_mempool *mempool, bool reg)\n+\t\t\tstruct rte_mempool *mempool, bool reg,\n+\t\t\tbool is_extmem)\n {\n \tstruct rte_mp_msg mp_req;\n \tstruct rte_mp_msg *mp_res;\n@@ -82,6 +83,7 @@ mlx5_mp_req_mempool_reg(struct mlx5_common_device *cdev,\n \t\t     MLX5_MP_REQ_MEMPOOL_UNREGISTER;\n \tmp_init_port_agnostic_msg(&mp_req, type);\n \targ->mempool = mempool;\n+\targ->is_extmem = is_extmem;\n \targ->cdev = cdev;\n \tret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);\n \tif (ret) {\ndiff --git a/drivers/common/mlx5/mlx5_common_mp.h b/drivers/common/mlx5/mlx5_common_mp.h\nindex b1e3a41a20..4599ba8f92 100644\n--- a/drivers/common/mlx5/mlx5_common_mp.h\n+++ b/drivers/common/mlx5/mlx5_common_mp.h\n@@ -37,9 +37,12 @@ struct mlx5_mp_arg_queue_id {\n \n struct mlx5_mp_arg_mr_manage {\n \tstruct mlx5_common_device *cdev;\n+\tRTE_STD_C11\n \tunion {\n-\t\tstruct rte_mempool *mempool;\n-\t\t/* MLX5_MP_REQ_MEMPOOL_(UN)REGISTER */\n+\t\tstruct {\n+\t\t\tstruct rte_mempool *mempool;\n+\t\t\tbool is_extmem;\n+\t\t}; /* MLX5_MP_REQ_MEMPOOL_(UN)REGISTER */\n \t\tuintptr_t addr; /* MLX5_MP_REQ_CREATE_MR */\n \t};\n };\n@@ -134,7 +137,8 @@ __rte_internal\n int mlx5_mp_req_mr_create(struct mlx5_common_device *cdev, uintptr_t addr);\n __rte_internal\n int mlx5_mp_req_mempool_reg(struct mlx5_common_device *cdev,\n-\t\t\t    struct rte_mempool *mempool, bool reg);\n+\t\t\t    struct rte_mempool *mempool, bool reg,\n+\t\t\t    bool is_extmem);\n __rte_internal\n int mlx5_mp_req_queue_state_modify(struct mlx5_mp_id *mp_id,\n \t\t\t\t   struct mlx5_mp_arg_queue_state_modify *sm);\ndiff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c\nindex a7a499f6f9..498fa7513f 100644\n--- a/drivers/common/mlx5/mlx5_common_mr.c\n+++ b/drivers/common/mlx5/mlx5_common_mr.c\n@@ -47,6 +47,8 @@ struct mlx5_mempool_reg {\n \tstruct mlx5_mempool_mr *mrs;\n \t/** Number of memory regions. */\n \tunsigned int mrs_n;\n+\t/** Whether the MR were created for external pinned memory. */\n+\tbool is_extmem;\n };\n \n void\n@@ -1400,6 +1402,8 @@ mlx5_mempool_get_extmem(struct rte_mempool *mp, struct mlx5_range **out,\n  *\n  * @param[in] mp\n  *   Analyzed mempool.\n+ * @param[in] is_extmem\n+ *   Whether the pool is contains only external pinned buffers.\n  * @param[out] out\n  *   Receives the ranges, caller must release it with free().\n  * @param[out] ount_n\n@@ -1409,17 +1413,16 @@ mlx5_mempool_get_extmem(struct rte_mempool *mp, struct mlx5_range **out,\n  *   0 on success, (-1) on failure.\n  */\n static int\n-mlx5_get_mempool_ranges(struct rte_mempool *mp, struct mlx5_range **out,\n-\t\t\tunsigned int *out_n)\n+mlx5_get_mempool_ranges(struct rte_mempool *mp, bool is_extmem,\n+\t\t\tstruct mlx5_range **out, unsigned int *out_n)\n {\n \tstruct mlx5_range *chunks;\n \tunsigned int chunks_n, contig_n, i;\n \tint ret;\n \n \t/* Collect the pool underlying memory. */\n-\tret = mlx5_mempool_is_extmem(mp) ?\n-\t      mlx5_mempool_get_extmem(mp, &chunks, &chunks_n) :\n-\t      mlx5_mempool_get_chunks(mp, &chunks, &chunks_n);\n+\tret = is_extmem ? mlx5_mempool_get_extmem(mp, &chunks, &chunks_n) :\n+\t\t\t  mlx5_mempool_get_chunks(mp, &chunks, &chunks_n);\n \tif (ret < 0)\n \t\treturn ret;\n \t/* Merge adjacent chunks and place them at the beginning. */\n@@ -1443,6 +1446,8 @@ mlx5_get_mempool_ranges(struct rte_mempool *mp, struct mlx5_range **out,\n  *\n  * @param[in] mp\n  *   Mempool to analyze.\n+ * @param[in] is_extmem\n+ *   Whether the pool is contains only external pinned buffers.\n  * @param[out] out\n  *   Receives memory ranges to register, aligned to the system page size.\n  *   The caller must release them with free().\n@@ -1455,14 +1460,15 @@ mlx5_get_mempool_ranges(struct rte_mempool *mp, struct mlx5_range **out,\n  *   0 on success, (-1) on failure.\n  */\n static int\n-mlx5_mempool_reg_analyze(struct rte_mempool *mp, struct mlx5_range **out,\n-\t\t\t unsigned int *out_n, bool *share_hugepage)\n+mlx5_mempool_reg_analyze(struct rte_mempool *mp, bool is_extmem,\n+\t\t\t struct mlx5_range **out, unsigned int *out_n,\n+\t\t\t bool *share_hugepage)\n {\n \tstruct mlx5_range *ranges = NULL;\n \tunsigned int i, ranges_n = 0;\n \tstruct rte_memseg_list *msl;\n \n-\tif (mlx5_get_mempool_ranges(mp, &ranges, &ranges_n) < 0) {\n+\tif (mlx5_get_mempool_ranges(mp, is_extmem, &ranges, &ranges_n) < 0) {\n \t\tDRV_LOG(ERR, \"Cannot get address ranges for mempool %s\",\n \t\t\tmp->name);\n \t\treturn -1;\n@@ -1504,7 +1510,8 @@ mlx5_mempool_reg_analyze(struct rte_mempool *mp, struct mlx5_range **out,\n \n /** Create a registration object for the mempool. */\n static struct mlx5_mempool_reg *\n-mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n)\n+mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n,\n+\t\t\tbool is_extmem)\n {\n \tstruct mlx5_mempool_reg *mpr = NULL;\n \n@@ -1519,6 +1526,7 @@ mlx5_mempool_reg_create(struct rte_mempool *mp, unsigned int mrs_n)\n \tmpr->mp = mp;\n \tmpr->mrs = (struct mlx5_mempool_mr *)(mpr + 1);\n \tmpr->mrs_n = mrs_n;\n+\tmpr->is_extmem = is_extmem;\n \treturn mpr;\n }\n \n@@ -1583,31 +1591,32 @@ mlx5_mempool_reg_detach(struct mlx5_mempool_reg *mpr)\n \n static int\n mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,\n-\t\t\t\t void *pd, struct rte_mempool *mp)\n+\t\t\t\t void *pd, struct rte_mempool *mp,\n+\t\t\t\t bool is_extmem)\n {\n \tstruct mlx5_range *ranges = NULL;\n-\tstruct mlx5_mempool_reg *mpr, *new_mpr;\n+\tstruct mlx5_mempool_reg *mpr, *old_mpr, *new_mpr;\n \tunsigned int i, ranges_n;\n-\tbool share_hugepage;\n+\tbool share_hugepage, standalone = false;\n \tint ret = -1;\n \n \t/* Early check to avoid unnecessary creation of MRs. */\n \trte_rwlock_read_lock(&share_cache->rwlock);\n-\tmpr = mlx5_mempool_reg_lookup(share_cache, mp);\n+\told_mpr = mlx5_mempool_reg_lookup(share_cache, mp);\n \trte_rwlock_read_unlock(&share_cache->rwlock);\n-\tif (mpr != NULL) {\n+\tif (old_mpr != NULL && (!is_extmem || old_mpr->is_extmem)) {\n \t\tDRV_LOG(DEBUG, \"Mempool %s is already registered for PD %p\",\n \t\t\tmp->name, pd);\n \t\trte_errno = EEXIST;\n \t\tgoto exit;\n \t}\n-\tif (mlx5_mempool_reg_analyze(mp, &ranges, &ranges_n,\n+\tif (mlx5_mempool_reg_analyze(mp, is_extmem, &ranges, &ranges_n,\n \t\t\t\t     &share_hugepage) < 0) {\n \t\tDRV_LOG(ERR, \"Cannot get mempool %s memory ranges\", mp->name);\n \t\trte_errno = ENOMEM;\n \t\tgoto exit;\n \t}\n-\tnew_mpr = mlx5_mempool_reg_create(mp, ranges_n);\n+\tnew_mpr = mlx5_mempool_reg_create(mp, ranges_n, is_extmem);\n \tif (new_mpr == NULL) {\n \t\tDRV_LOG(ERR,\n \t\t\t\"Cannot create a registration object for mempool %s in PD %p\",\n@@ -1667,6 +1676,12 @@ mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,\n \t/* Concurrent registration is not supposed to happen. */\n \trte_rwlock_write_lock(&share_cache->rwlock);\n \tmpr = mlx5_mempool_reg_lookup(share_cache, mp);\n+\tif (mpr == old_mpr && old_mpr != NULL) {\n+\t\tLIST_REMOVE(old_mpr, next);\n+\t\tstandalone = mlx5_mempool_reg_detach(mpr);\n+\t\t/* No need to flush the cache: old MRs cannot be in use. */\n+\t\tmpr = NULL;\n+\t}\n \tif (mpr == NULL) {\n \t\tmlx5_mempool_reg_attach(new_mpr);\n \t\tLIST_INSERT_HEAD(&share_cache->mempool_reg_list, new_mpr, next);\n@@ -1679,6 +1694,10 @@ mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,\n \t\tmlx5_mempool_reg_destroy(share_cache, new_mpr, true);\n \t\trte_errno = EEXIST;\n \t\tgoto exit;\n+\t} else if (old_mpr != NULL) {\n+\t\tDRV_LOG(DEBUG, \"Mempool %s registration for PD %p updated for external memory\",\n+\t\t\tmp->name, pd);\n+\t\tmlx5_mempool_reg_destroy(share_cache, old_mpr, standalone);\n \t}\n exit:\n \tfree(ranges);\n@@ -1687,9 +1706,9 @@ mlx5_mr_mempool_register_primary(struct mlx5_mr_share_cache *share_cache,\n \n static int\n mlx5_mr_mempool_register_secondary(struct mlx5_common_device *cdev,\n-\t\t\t\t   struct rte_mempool *mp)\n+\t\t\t\t   struct rte_mempool *mp, bool is_extmem)\n {\n-\treturn mlx5_mp_req_mempool_reg(cdev, mp, true);\n+\treturn mlx5_mp_req_mempool_reg(cdev, mp, true, is_extmem);\n }\n \n /**\n@@ -1705,16 +1724,17 @@ mlx5_mr_mempool_register_secondary(struct mlx5_common_device *cdev,\n  */\n int\n mlx5_mr_mempool_register(struct mlx5_common_device *cdev,\n-\t\t\t struct rte_mempool *mp)\n+\t\t\t struct rte_mempool *mp, bool is_extmem)\n {\n \tif (mp->flags & RTE_MEMPOOL_F_NON_IO)\n \t\treturn 0;\n \tswitch (rte_eal_process_type()) {\n \tcase RTE_PROC_PRIMARY:\n \t\treturn mlx5_mr_mempool_register_primary(&cdev->mr_scache,\n-\t\t\t\t\t\t\tcdev->pd, mp);\n+\t\t\t\t\t\t\tcdev->pd, mp,\n+\t\t\t\t\t\t\tis_extmem);\n \tcase RTE_PROC_SECONDARY:\n-\t\treturn mlx5_mr_mempool_register_secondary(cdev, mp);\n+\t\treturn mlx5_mr_mempool_register_secondary(cdev, mp, is_extmem);\n \tdefault:\n \t\treturn -1;\n \t}\n@@ -1753,7 +1773,7 @@ static int\n mlx5_mr_mempool_unregister_secondary(struct mlx5_common_device *cdev,\n \t\t\t\t     struct rte_mempool *mp)\n {\n-\treturn mlx5_mp_req_mempool_reg(cdev, mp, false);\n+\treturn mlx5_mp_req_mempool_reg(cdev, mp, false, false /* is_extmem */);\n }\n \n /**\n@@ -1910,32 +1930,33 @@ mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,\n uint32_t\n mlx5_mr_mb2mr_bh(struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mb)\n {\n+\tstruct mlx5_mprq_buf *buf;\n \tuint32_t lkey;\n \tuintptr_t addr = (uintptr_t)mb->buf_addr;\n \tstruct mlx5_common_device *cdev = mr_ctrl->cdev;\n+\tstruct rte_mempool *mp;\n \n-\tif (cdev->config.mr_mempool_reg_en) {\n-\t\tstruct rte_mempool *mp = NULL;\n-\t\tstruct mlx5_mprq_buf *buf;\n-\n-\t\tif (!RTE_MBUF_HAS_EXTBUF(mb)) {\n-\t\t\tmp = mlx5_mb2mp(mb);\n-\t\t} else if (mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {\n-\t\t\t/* Recover MPRQ mempool. */\n-\t\t\tbuf = mb->shinfo->fcb_opaque;\n-\t\t\tmp = buf->mp;\n-\t\t}\n-\t\tif (mp != NULL) {\n-\t\t\tlkey = mlx5_mr_mempool2mr_bh(&cdev->mr_scache,\n-\t\t\t\t\t\t     mr_ctrl, mp, addr);\n-\t\t\t/*\n-\t\t\t * Lookup can only fail on invalid input, e.g. \"addr\"\n-\t\t\t * is not from \"mp\" or \"mp\" has MEMPOOL_F_NON_IO set.\n-\t\t\t */\n-\t\t\tif (lkey != UINT32_MAX)\n-\t\t\t\treturn lkey;\n-\t\t}\n-\t\t/* Fallback for generic mechanism in corner cases. */\n+\t/* Recover MPRQ mempool. */\n+\tif (RTE_MBUF_HAS_EXTBUF(mb) &&\n+\t    mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) {\n+\t\tbuf = mb->shinfo->fcb_opaque;\n+\t\tmp = buf->mp;\n+\t} else {\n+\t\tmp = mlx5_mb2mp(mb);\n+\t}\n+\tlkey = mlx5_mr_mempool2mr_bh(&cdev->mr_scache,\n+\t\t\t\t     mr_ctrl, mp, addr);\n+\tif (lkey != UINT32_MAX)\n+\t\treturn lkey;\n+\t/* Register pinned external memory if the mempool is not used for Rx. */\n+\tif (cdev->config.mr_mempool_reg_en &&\n+\t    (rte_pktmbuf_priv_flags(mp) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)) {\n+\t\tif (mlx5_mr_mempool_register(mr_ctrl->cdev, mp, true) < 0)\n+\t\t\treturn UINT32_MAX;\n+\t\tlkey = mlx5_mr_mempool2mr_bh(&cdev->mr_scache,\n+\t\t\t\t\t     mr_ctrl, mp, addr);\n+\t\tMLX5_ASSERT(lkey != UINT32_MAX);\n \t}\n+\t/* Fallback to generic mechanism in corner cases. */\n \treturn mlx5_mr_addr2mr_bh(mr_ctrl, addr);\n }\ndiff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h\nindex 442b9d4694..08035f48ee 100644\n--- a/drivers/common/mlx5/mlx5_common_mr.h\n+++ b/drivers/common/mlx5/mlx5_common_mr.h\n@@ -257,20 +257,10 @@ mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb);\n __rte_internal\n int\n mlx5_mr_mempool_register(struct mlx5_common_device *cdev,\n-\t\t\t struct rte_mempool *mp);\n+\t\t\t struct rte_mempool *mp, bool is_extmem);\n __rte_internal\n int\n mlx5_mr_mempool_unregister(struct mlx5_common_device *cdev,\n \t\t\t   struct rte_mempool *mp);\n \n-/** Check if @p mp has buffers pinned in external memory. */\n-static inline bool\n-mlx5_mempool_is_extmem(struct rte_mempool *mp)\n-{\n-\treturn (mp->private_data_size ==\n-\t\tsizeof(struct rte_pktmbuf_pool_private)) &&\n-\t       (mp->elt_size >= sizeof(struct rte_mbuf)) &&\n-\t       (rte_pktmbuf_priv_flags(mp) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF);\n-}\n-\n #endif /* RTE_PMD_MLX5_COMMON_MR_H_ */\ndiff --git a/drivers/net/mlx5/linux/mlx5_mp_os.c b/drivers/net/mlx5/linux/mlx5_mp_os.c\nindex edc5203dd6..c448a3e9eb 100644\n--- a/drivers/net/mlx5/linux/mlx5_mp_os.c\n+++ b/drivers/net/mlx5/linux/mlx5_mp_os.c\n@@ -48,7 +48,8 @@ mlx5_mp_os_handle_port_agnostic(const struct rte_mp_msg *mp_msg,\n \t\treturn rte_mp_reply(&mp_res, peer);\n \tcase MLX5_MP_REQ_MEMPOOL_REGISTER:\n \t\tmp_init_port_agnostic_msg(&mp_res, param->type);\n-\t\tres->result = mlx5_mr_mempool_register(mng->cdev, mng->mempool);\n+\t\tres->result = mlx5_mr_mempool_register(mng->cdev, mng->mempool,\n+\t\t\t\t\t\t       mng->is_extmem);\n \t\treturn rte_mp_reply(&mp_res, peer);\n \tcase MLX5_MP_REQ_MEMPOOL_UNREGISTER:\n \t\tmp_init_port_agnostic_msg(&mp_res, param->type);\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex d5a7155392..a8ef21c6f1 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -1458,7 +1458,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)\n \t\trte_errno = ENOMEM;\n \t\treturn -rte_errno;\n \t}\n-\tret = mlx5_mr_mempool_register(priv->sh->cdev, mp);\n+\tret = mlx5_mr_mempool_register(priv->sh->cdev, mp, false);\n \tif (ret < 0 && rte_errno != EEXIST) {\n \t\tret = rte_errno;\n \t\tDRV_LOG(ERR, \"port %u failed to register a mempool for Multi-Packet RQ\",\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex e2bfde19c7..bafb41d9cf 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -147,14 +147,17 @@ mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)\n \t}\n \tfor (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {\n \t\tuint32_t flags;\n+\t\tbool is_extmem;\n \n \t\tmp = rxq_ctrl->rxq.rxseg[s].mp;\n \t\tflags = mp != rxq_ctrl->rxq.mprq_mp ?\n \t\t\trte_pktmbuf_priv_flags(mp) : 0;\n-\t\tret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp);\n+\t\tis_extmem = (flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) != 0;\n+\t\tret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp,\n+\t\t\t\t\t       is_extmem);\n \t\tif (ret < 0 && rte_errno != EEXIST)\n \t\t\treturn ret;\n-\t\tif ((flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) == 0)\n+\t\tif (!is_extmem)\n \t\t\trte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,\n \t\t\t\t\t     &rxq_ctrl->rxq);\n \t}\n",
    "prefixes": []
}