get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/105531/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 105531,
    "url": "http://patchwork.dpdk.org/api/patches/105531/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20211230143744.3550098-3-dkozlyuk@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211230143744.3550098-3-dkozlyuk@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211230143744.3550098-3-dkozlyuk@nvidia.com",
    "date": "2021-12-30T14:37:40",
    "name": "[RFC,2/6] mem: add dirty malloc element support",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "79e8f5f06dced9368ef8cc82b70fd40a1ca82783",
    "submitter": {
        "id": 2248,
        "url": "http://patchwork.dpdk.org/api/people/2248/?format=api",
        "name": "Dmitry Kozlyuk",
        "email": "dkozlyuk@nvidia.com"
    },
    "delegate": {
        "id": 24651,
        "url": "http://patchwork.dpdk.org/api/users/24651/?format=api",
        "username": "dmarchand",
        "first_name": "David",
        "last_name": "Marchand",
        "email": "david.marchand@redhat.com"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20211230143744.3550098-3-dkozlyuk@nvidia.com/mbox/",
    "series": [
        {
            "id": 21042,
            "url": "http://patchwork.dpdk.org/api/series/21042/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=21042",
            "date": "2021-12-30T14:37:38",
            "name": "Fast restart with many hugepages",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/21042/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/105531/comments/",
    "check": "warning",
    "checks": "http://patchwork.dpdk.org/api/patches/105531/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 04900A04A5;\n\tThu, 30 Dec 2021 15:38:23 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id E3A3941155;\n\tThu, 30 Dec 2021 15:38:14 +0100 (CET)",
            "from NAM04-MW2-obe.outbound.protection.outlook.com\n (mail-mw2nam08on2044.outbound.protection.outlook.com [40.107.101.44])\n by mails.dpdk.org (Postfix) with ESMTP id CE0DA4114B\n for <dev@dpdk.org>; Thu, 30 Dec 2021 15:38:11 +0100 (CET)",
            "from CO2PR18CA0049.namprd18.prod.outlook.com (2603:10b6:104:2::17)\n by BN8PR12MB2865.namprd12.prod.outlook.com (2603:10b6:408:96::13) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4823.18; Thu, 30 Dec\n 2021 14:38:09 +0000",
            "from CO1NAM11FT053.eop-nam11.prod.protection.outlook.com\n (2603:10b6:104:2:cafe::2a) by CO2PR18CA0049.outlook.office365.com\n (2603:10b6:104:2::17) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4844.14 via Frontend\n Transport; Thu, 30 Dec 2021 14:38:09 +0000",
            "from mail.nvidia.com (12.22.5.236) by\n CO1NAM11FT053.mail.protection.outlook.com (10.13.175.63) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4844.14 via Frontend Transport; Thu, 30 Dec 2021 14:38:09 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL109.nvidia.com\n (10.27.9.19) with Microsoft SMTP Server (TLS) id 15.0.1497.18;\n Thu, 30 Dec 2021 14:38:07 +0000",
            "from nvidia.com (172.20.187.6) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id 15.2.986.9; Thu, 30 Dec 2021\n 06:38:03 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=cYvhmC2wap3YhYud1m0BbHEvVFc7KjpDvQQPQ+g6Q11Y2QbooDqvwVwHx5zJMfqDs+Vhhw6nCLMRSOB5pJeGH4toq7hnDVxqqqDff6g0XVrKZdRwBwB+LeoGnYqtKmIHvt2dqfA0phw5JgjWGL2GRb5SAD2vRogJeGHVFbao70eyF/v0IsLjhlvy71b6zR2tUWHAeZEp/5zXlns9ZcG+Nv4CkK+l25S2ilTLQR8My1MgnKFLvCBDJTiVTE+WPEC0AQrcFRxlRLKjjZ59yO9oaTdVtpLtXRa4BJkAuM0GOqNfBjk10Vt2pXWt9+U2tJ8oAw5RLHHtGChtMVOD5OUmdw==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=8eCp5kln7VpcTxH7TvnKPCtOGgMUQyayCZHVigMpvTU=;\n b=AIpGtzvL4TYN24tPIISgjrQldechMYLkcRzj3sFRfDL2f3K74ow1hFDiLXFevKS187jK/qcOkF6Rsi30vVhzDXjwukW//d0x6ekHIvOfsf+B1N6BiJLey/UksD06SQJ9hT2AAn6BMZ87kJrjTeRrD94alcy6a4PC9ol7/lwPxLisvUEmEkq5q28hdoFP7Mz5N/0GYY0SvOufaVLHyRnEjnTMHYFRIQD8CgGUsBVM9ypecIRuRKB/cAPseBNBdoxlJcxOwvLkEqqvz96oHgdCzNTYIJ1ll4+B54D8cCL4gVNW6tq+wmwMMfUO+ia4mxB9Ngp+ATGdmaylq+DT31/Esw==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.236) smtp.rcpttodomain=intel.com smtp.mailfrom=nvidia.com; dmarc=pass\n (p=reject sp=reject pct=100) action=none header.from=nvidia.com; dkim=none\n (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=8eCp5kln7VpcTxH7TvnKPCtOGgMUQyayCZHVigMpvTU=;\n b=WWMRqBXiulx+Vt6mTLSss5/I2NT1avJp6Yi0pnKzjI+362o2ErNkjlELbBH7qktMrSDuSXwmtXmmVD54xHG5i5eWvUsONhZZTCNcHdXT/XQ0KvG5WPHZiv+Fu9wKQizsqvRsQ5kW9HLtqn+Sp8KzFPh31mF2ngVSDdpSv3ky1/yhCMTJUIOzckAMvV70R1/tjNsQQgtJURVmD5FnmZNlsuly+dv21rkaE0y1/7XBrU13k74Wya9Jdn27SZexAjN/1onnX9NbFSXXb9xO3QwuTP9ckTOK5eQAgxeKgrtF24pJpXWzQNZi4uEtVykR2iYm9qyoyaN/T73nxDUOapYDXQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.236)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.236 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.236; helo=mail.nvidia.com;",
        "From": "Dmitry Kozlyuk <dkozlyuk@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Anatoly Burakov <anatoly.burakov@intel.com>",
        "Subject": "[RFC PATCH 2/6] mem: add dirty malloc element support",
        "Date": "Thu, 30 Dec 2021 16:37:40 +0200",
        "Message-ID": "<20211230143744.3550098-3-dkozlyuk@nvidia.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20211230143744.3550098-1-dkozlyuk@nvidia.com>",
        "References": "<20211230143744.3550098-1-dkozlyuk@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL105.nvidia.com (172.20.187.12) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "39215f94-b823-4b21-f8d0-08d9cba20004",
        "X-MS-TrafficTypeDiagnostic": "BN8PR12MB2865:EE_",
        "X-Microsoft-Antispam-PRVS": "\n <BN8PR12MB286591C5C747595ACEA5CFB4B9459@BN8PR12MB2865.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:9508;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n KAgZ80A41HjvA5KVj0CpPCyCbfy5gXNA2o14TgkU3fK3rbRrC2rklXFExC8hOCUsgkSeG8usBm9etPXrFL7I8WkvMZA9HvKDnahFHMQ6jUI/uuGH2xuIOLkXqsFnhQzbfFbEEZ1p7t36o5vZtWystFtDOPXsf624QWCM4utb9Nnq4aiABq1GpKniiv4PP115l+sRmCHMwUckNp5jK4b8koYChEpbLyWex/utre0YYHmEnF5d4gK/j7lDGbSw/uISo8WlNm1nCzmwdFeRD9m+m+78FCeeHcBRnV5Xu27bfwm5GNSnIH/ZffSIETBnMTHs9QWFryBJme8A9TBHdovvkaAcvyryb9PRoM61pimSMBG1I6RELXWh+Rvo9GzOtakr1KSD+6d/GTQUN4WMIqL5A1njuityjiCyTd+Qiq/wpSjCVLMd3JT0OwQG/uLtkslU2KMQMiJBU6PlqOl9o7A4POHe2WnCns353FMMwyTKfbFwkyJQFPsHHDCLnpy/IdxX7QAGG3ikiAlI3sG1HQeFUGelxCXI0VZpE/EcRVFwLhEb4QaXM6SfeUQmSXmGDWkKfdUkXWXPdgEt9zdWiWR8IUPbyuBxdBq2NSYrVtIguqNATHcPpVQlDiGMVIUshdz9/dgrtydiDZph5rD5bUyYluStJuyXaTFpMUnVOPYcGMEysgrx2K9/xuA7uT9IYE6ONbScSjuHZzVoNZSJZxTVjLd3IXPJTOW2iTwDwUEISB+W5ooMuuUai7gCoCnZBwvlN6Z7pruNsgSqcdO+ixtt/97Mfbs+NHNXAsYmJIjEZ+/ne0/Nmpuf3Xi6jxieHitayJSNm4m/z52UfqQ/VBbzsA==",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.236; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:CAL; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(4636009)(40470700002)(36840700001)(46966006)(70206006)(81166007)(7696005)(2616005)(26005)(4326008)(316002)(6916009)(508600001)(83380400001)(47076005)(1076003)(6666004)(70586007)(16526019)(6286002)(86362001)(36756003)(2906002)(5660300002)(186003)(40460700001)(356005)(8676002)(426003)(336012)(8936002)(82310400004)(36860700001)(55016003)(14143004)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "30 Dec 2021 14:38:09.3852 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 39215f94-b823-4b21-f8d0-08d9cba20004",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.236];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT053.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BN8PR12MB2865",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "EAL malloc layer assumed all free elements content\nis filled with zeros (\"clean\"), as opposed to uninitialized (\"dirty\").\nThis assumption was ensured in two ways:\n1. EAL memalloc layer always returned clean memory.\n2. Freed memory was cleared before returning into the heap.\n\nClearing the memory can be as slow as around 14 GiB/s.\nTo save doing so, memalloc layer is allowed to return dirty memory.\nSuch segments being marked with RTE_MEMSEG_FLAG_DIRTY.\nThe allocator tracks elements that contain dirty memory\nusing the new flag in the element header.\nWhen clean memory is requested via rte_zmalloc*()\nand the suitable element is dirty, it is cleared on allocation.\nWhen memory is deallocated, the freed element is joined\nwith adjacent free elements, and the dirty flag is updated:\n\n    dirty + freed + dirty = dirty  =>  no need to clean\n            freed + dirty = dirty      the freed memory\n\n    clean + freed + clean = clean  =>  freed memory\n    clean + freed         = clean      must be cleared\n            freed + clean = clean\n            freed         = clean\n\nAs a result, memory is either cleared on free, as before,\nor it will be cleared on allocation if need be, but never twice.\n\nSigned-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>\n---\n lib/eal/common/malloc_elem.c | 22 +++++++++++++++++++---\n lib/eal/common/malloc_elem.h | 11 +++++++++--\n lib/eal/common/malloc_heap.c | 18 ++++++++++++------\n lib/eal/common/rte_malloc.c  | 21 ++++++++++++++-------\n lib/eal/include/rte_memory.h |  8 ++++++--\n 5 files changed, 60 insertions(+), 20 deletions(-)",
    "diff": "diff --git a/lib/eal/common/malloc_elem.c b/lib/eal/common/malloc_elem.c\nindex bdd20a162e..e04e0890fb 100644\n--- a/lib/eal/common/malloc_elem.c\n+++ b/lib/eal/common/malloc_elem.c\n@@ -129,7 +129,7 @@ malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align)\n void\n malloc_elem_init(struct malloc_elem *elem, struct malloc_heap *heap,\n \t\tstruct rte_memseg_list *msl, size_t size,\n-\t\tstruct malloc_elem *orig_elem, size_t orig_size)\n+\t\tstruct malloc_elem *orig_elem, size_t orig_size, bool dirty)\n {\n \telem->heap = heap;\n \telem->msl = msl;\n@@ -137,6 +137,7 @@ malloc_elem_init(struct malloc_elem *elem, struct malloc_heap *heap,\n \telem->next = NULL;\n \tmemset(&elem->free_list, 0, sizeof(elem->free_list));\n \telem->state = ELEM_FREE;\n+\telem->dirty = dirty;\n \telem->size = size;\n \telem->pad = 0;\n \telem->orig_elem = orig_elem;\n@@ -300,7 +301,7 @@ split_elem(struct malloc_elem *elem, struct malloc_elem *split_pt)\n \tconst size_t new_elem_size = elem->size - old_elem_size;\n \n \tmalloc_elem_init(split_pt, elem->heap, elem->msl, new_elem_size,\n-\t\t\t elem->orig_elem, elem->orig_size);\n+\t\t\telem->orig_elem, elem->orig_size, elem->dirty);\n \tsplit_pt->prev = elem;\n \tsplit_pt->next = next_elem;\n \tif (next_elem)\n@@ -506,6 +507,7 @@ join_elem(struct malloc_elem *elem1, struct malloc_elem *elem2)\n \telse\n \t\telem1->heap->last = elem1;\n \telem1->next = next;\n+\telem1->dirty |= elem2->dirty;\n \tif (elem1->pad) {\n \t\tstruct malloc_elem *inner = RTE_PTR_ADD(elem1, elem1->pad);\n \t\tinner->size = elem1->size - elem1->pad;\n@@ -579,6 +581,14 @@ malloc_elem_free(struct malloc_elem *elem)\n \tptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN);\n \tdata_len = elem->size - MALLOC_ELEM_OVERHEAD;\n \n+\t/*\n+\t * Consider the element clean for the purposes of joining.\n+\t * If both neighbors are clean or non-existent,\n+\t * the joint element will be clean,\n+\t * which means the memory should be cleared.\n+\t * There is no need to clear the memory if the joint element is dirty.\n+\t */\n+\telem->dirty = false;\n \telem = malloc_elem_join_adjacent_free(elem);\n \n \tmalloc_elem_free_list_insert(elem);\n@@ -588,8 +598,14 @@ malloc_elem_free(struct malloc_elem *elem)\n \t/* decrease heap's count of allocated elements */\n \telem->heap->alloc_count--;\n \n-\t/* poison memory */\n+#ifndef RTE_MALLOC_DEBUG\n+\t/* Normally clear the memory when needed. */\n+\tif (!elem->dirty)\n+\t\tmemset(ptr, 0, data_len);\n+#else\n+\t/* Always poison the memory in debug mode. */\n \tmemset(ptr, MALLOC_POISON, data_len);\n+#endif\n \n \treturn elem;\n }\ndiff --git a/lib/eal/common/malloc_elem.h b/lib/eal/common/malloc_elem.h\nindex 15d8ba7af2..f2aa98821b 100644\n--- a/lib/eal/common/malloc_elem.h\n+++ b/lib/eal/common/malloc_elem.h\n@@ -27,7 +27,13 @@ struct malloc_elem {\n \tLIST_ENTRY(malloc_elem) free_list;\n \t/**< list of free elements in heap */\n \tstruct rte_memseg_list *msl;\n-\tvolatile enum elem_state state;\n+\t/** Element state, @c dirty and @c pad validity depends on it. */\n+\t/* An extra bit is needed to represent enum elem_state as signed int. */\n+\tenum elem_state state : 3;\n+\t/** If state == ELEM_FREE: the memory is not filled with zeroes. */\n+\tuint32_t dirty : 1;\n+\t/** Reserved for future use. */\n+\tuint32_t reserved : 28;\n \tuint32_t pad;\n \tsize_t size;\n \tstruct malloc_elem *orig_elem;\n@@ -320,7 +326,8 @@ malloc_elem_init(struct malloc_elem *elem,\n \t\tstruct rte_memseg_list *msl,\n \t\tsize_t size,\n \t\tstruct malloc_elem *orig_elem,\n-\t\tsize_t orig_size);\n+\t\tsize_t orig_size,\n+\t\tbool dirty);\n \n void\n malloc_elem_insert(struct malloc_elem *elem);\ndiff --git a/lib/eal/common/malloc_heap.c b/lib/eal/common/malloc_heap.c\nindex 55aad2711b..24080fc473 100644\n--- a/lib/eal/common/malloc_heap.c\n+++ b/lib/eal/common/malloc_heap.c\n@@ -93,11 +93,11 @@ malloc_socket_to_heap_id(unsigned int socket_id)\n  */\n static struct malloc_elem *\n malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl,\n-\t\tvoid *start, size_t len)\n+\t\tvoid *start, size_t len, bool dirty)\n {\n \tstruct malloc_elem *elem = start;\n \n-\tmalloc_elem_init(elem, heap, msl, len, elem, len);\n+\tmalloc_elem_init(elem, heap, msl, len, elem, len, dirty);\n \n \tmalloc_elem_insert(elem);\n \n@@ -135,7 +135,8 @@ malloc_add_seg(const struct rte_memseg_list *msl,\n \n \tfound_msl = &mcfg->memsegs[msl_idx];\n \n-\tmalloc_heap_add_memory(heap, found_msl, ms->addr, len);\n+\tmalloc_heap_add_memory(heap, found_msl, ms->addr, len,\n+\t\t\tms->flags & RTE_MEMSEG_FLAG_DIRTY);\n \n \theap->total_size += len;\n \n@@ -303,7 +304,8 @@ alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,\n \tstruct rte_memseg_list *msl;\n \tstruct malloc_elem *elem = NULL;\n \tsize_t alloc_sz;\n-\tint allocd_pages;\n+\tint allocd_pages, i;\n+\tbool dirty = false;\n \tvoid *ret, *map_addr;\n \n \talloc_sz = (size_t)pg_sz * n_segs;\n@@ -372,8 +374,12 @@ alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,\n \t\tgoto fail;\n \t}\n \n+\t/* Element is dirty if it contains at least one dirty page. */\n+\tfor (i = 0; i < allocd_pages; i++)\n+\t\tdirty |= ms[i]->flags & RTE_MEMSEG_FLAG_DIRTY;\n+\n \t/* add newly minted memsegs to malloc heap */\n-\telem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz);\n+\telem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz, dirty);\n \n \t/* try once more, as now we have allocated new memory */\n \tret = find_suitable_element(heap, elt_size, flags, align, bound,\n@@ -1260,7 +1266,7 @@ malloc_heap_add_external_memory(struct malloc_heap *heap,\n \tmemset(msl->base_va, 0, msl->len);\n \n \t/* now, add newly minted memory to the malloc heap */\n-\tmalloc_heap_add_memory(heap, msl, msl->base_va, msl->len);\n+\tmalloc_heap_add_memory(heap, msl, msl->base_va, msl->len, false);\n \n \theap->total_size += msl->len;\n \ndiff --git a/lib/eal/common/rte_malloc.c b/lib/eal/common/rte_malloc.c\nindex d0bec26920..71a3f7ecb4 100644\n--- a/lib/eal/common/rte_malloc.c\n+++ b/lib/eal/common/rte_malloc.c\n@@ -115,15 +115,22 @@ rte_zmalloc_socket(const char *type, size_t size, unsigned align, int socket)\n {\n \tvoid *ptr = rte_malloc_socket(type, size, align, socket);\n \n+\tif (ptr != NULL) {\n+\t\tstruct malloc_elem *elem = malloc_elem_from_data(ptr);\n+\n+\t\tif (elem->dirty) {\n+\t\t\tmemset(ptr, 0, size);\n+\t\t} else {\n #ifdef RTE_MALLOC_DEBUG\n-\t/*\n-\t * If DEBUG is enabled, then freed memory is marked with poison\n-\t * value and set to zero on allocation.\n-\t * If DEBUG is not enabled then  memory is already zeroed.\n-\t */\n-\tif (ptr != NULL)\n-\t\tmemset(ptr, 0, size);\n+\t\t\t/*\n+\t\t\t * If DEBUG is enabled, then freed memory is marked\n+\t\t\t * with a poison value and set to zero on allocation.\n+\t\t\t * If DEBUG is disabled then memory is already zeroed.\n+\t\t\t */\n+\t\t\tmemset(ptr, 0, size);\n #endif\n+\t\t}\n+\t}\n \n \trte_eal_trace_mem_zmalloc(type, size, align, socket, ptr);\n \treturn ptr;\ndiff --git a/lib/eal/include/rte_memory.h b/lib/eal/include/rte_memory.h\nindex 6d018629ae..d76e7ba780 100644\n--- a/lib/eal/include/rte_memory.h\n+++ b/lib/eal/include/rte_memory.h\n@@ -19,6 +19,7 @@\n extern \"C\" {\n #endif\n \n+#include <rte_bitops.h>\n #include <rte_common.h>\n #include <rte_compat.h>\n #include <rte_config.h>\n@@ -37,11 +38,14 @@ extern \"C\" {\n \n #define SOCKET_ID_ANY -1                    /**< Any NUMA socket. */\n \n+/** Prevent this segment from being freed back to the OS. */\n+#define RTE_MEMSEG_FLAG_DO_NOT_FREE RTE_BIT32(0)\n+/** This segment is not fileld with zeros. */\n+#define RTE_MEMSEG_FLAG_DIRTY RTE_BIT32(1)\n+\n /**\n  * Physical memory segment descriptor.\n  */\n-#define RTE_MEMSEG_FLAG_DO_NOT_FREE (1 << 0)\n-/**< Prevent this segment from being freed back to the OS. */\n struct rte_memseg {\n \trte_iova_t iova;            /**< Start IO address. */\n \tRTE_STD_C11\n",
    "prefixes": [
        "RFC",
        "2/6"
    ]
}