get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/62112/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 62112,
    "url": "http://patchwork.dpdk.org/api/patches/62112/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/20191028140122.9592-6-olivier.matz@6wind.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20191028140122.9592-6-olivier.matz@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20191028140122.9592-6-olivier.matz@6wind.com",
    "date": "2019-10-28T14:01:22",
    "name": "[5/5] mempool: prevent objects from being across pages",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a105e13e000079fb784081f56d4f40fedd62aafb",
    "submitter": {
        "id": 8,
        "url": "http://patchwork.dpdk.org/api/people/8/?format=api",
        "name": "Olivier Matz",
        "email": "olivier.matz@6wind.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/20191028140122.9592-6-olivier.matz@6wind.com/mbox/",
    "series": [
        {
            "id": 7108,
            "url": "http://patchwork.dpdk.org/api/series/7108/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=7108",
            "date": "2019-10-28T14:01:17",
            "name": "mempool: avoid objects allocations across pages",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/7108/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/62112/comments/",
    "check": "fail",
    "checks": "http://patchwork.dpdk.org/api/patches/62112/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 0A7431BF4D;\n\tMon, 28 Oct 2019 15:02:08 +0100 (CET)",
            "from proxy.6wind.com (host.76.145.23.62.rev.coltfrance.com\n\t[62.23.145.76]) by dpdk.org (Postfix) with ESMTP id 805DC1BEF1\n\tfor <dev@dpdk.org>; Mon, 28 Oct 2019 15:01:51 +0100 (CET)",
            "from glumotte.dev.6wind.com. (unknown [10.16.0.195])\n\tby proxy.6wind.com (Postfix) with ESMTP id 64E383373B6;\n\tMon, 28 Oct 2019 15:01:51 +0100 (CET)"
        ],
        "From": "Olivier Matz <olivier.matz@6wind.com>",
        "To": "dev@dpdk.org",
        "Cc": "Anatoly Burakov <anatoly.burakov@intel.com>,\n\tAndrew Rybchenko <arybchenko@solarflare.com>,\n\tFerruh Yigit <ferruh.yigit@linux.intel.com>,\n\t\"Giridharan, Ganesan\" <ggiridharan@rbbn.com>,\n\tJerin Jacob Kollanukkaran <jerinj@marvell.com>,\n\t\"Kiran Kumar Kokkilagadda\" <kirankumark@marvell.com>,\n\tStephen Hemminger <sthemmin@microsoft.com>,\n\tThomas Monjalon <thomas@monjalon.net>,\n\tVamsi Krishna Attunuru <vattunuru@marvell.com>",
        "Date": "Mon, 28 Oct 2019 15:01:22 +0100",
        "Message-Id": "<20191028140122.9592-6-olivier.matz@6wind.com>",
        "X-Mailer": "git-send-email 2.20.1",
        "In-Reply-To": "<20191028140122.9592-1-olivier.matz@6wind.com>",
        "References": "<20190719133845.32432-1-olivier.matz@6wind.com>\n\t<20191028140122.9592-1-olivier.matz@6wind.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Subject": "[dpdk-dev] [PATCH 5/5] mempool: prevent objects from being across\n\tpages",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "When populating a mempool, ensure that objects are not located across\nseveral pages, except if user did not request iova contiguous objects.\n\nSigned-off-by: Vamsi Krishna Attunuru <vattunuru@marvell.com>\nSigned-off-by: Olivier Matz <olivier.matz@6wind.com>\n---\n lib/librte_mempool/rte_mempool.c             | 23 +++++-----------\n lib/librte_mempool/rte_mempool_ops_default.c | 29 ++++++++++++++++++--\n 2 files changed, 33 insertions(+), 19 deletions(-)",
    "diff": "diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c\nindex 7664764e5..b23fd1b06 100644\n--- a/lib/librte_mempool/rte_mempool.c\n+++ b/lib/librte_mempool/rte_mempool.c\n@@ -428,8 +428,6 @@ rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz)\n \n \tif (!need_iova_contig_obj)\n \t\t*pg_sz = 0;\n-\telse if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA)\n-\t\t*pg_sz = 0;\n \telse if (rte_eal_has_hugepages() || alloc_in_ext_mem)\n \t\t*pg_sz = get_min_page_size(mp->socket_id);\n \telse\n@@ -478,17 +476,15 @@ rte_mempool_populate_default(struct rte_mempool *mp)\n \t * then just set page shift and page size to 0, because the user has\n \t * indicated that there's no need to care about anything.\n \t *\n-\t * if we do need contiguous objects, there is also an option to reserve\n-\t * the entire mempool memory as one contiguous block of memory, in\n-\t * which case the page shift and alignment wouldn't matter as well.\n+\t * if we do need contiguous objects (if a mempool driver has its\n+\t * own calc_size() method returning min_chunk_size = mem_size),\n+\t * there is also an option to reserve the entire mempool memory\n+\t * as one contiguous block of memory.\n \t *\n \t * if we require contiguous objects, but not necessarily the entire\n-\t * mempool reserved space to be contiguous, then there are two options.\n-\t *\n-\t * if our IO addresses are virtual, not actual physical (IOVA as VA\n-\t * case), then no page shift needed - our memory allocation will give us\n-\t * contiguous IO memory as far as the hardware is concerned, so\n-\t * act as if we're getting contiguous memory.\n+\t * mempool reserved space to be contiguous, pg_sz will be != 0,\n+\t * and the default ops->populate() will take care of not placing\n+\t * objects across pages.\n \t *\n \t * if our IO addresses are physical, we may get memory from bigger\n \t * pages, or we might get memory from smaller pages, and how much of it\n@@ -501,11 +497,6 @@ rte_mempool_populate_default(struct rte_mempool *mp)\n \t *\n \t * If we fail to get enough contiguous memory, then we'll go and\n \t * reserve space in smaller chunks.\n-\t *\n-\t * We also have to take into account the fact that memory that we're\n-\t * going to allocate from can belong to an externally allocated memory\n-\t * area, in which case the assumption of IOVA as VA mode being\n-\t * synonymous with IOVA contiguousness will not hold.\n \t */\n \n \tneed_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);\ndiff --git a/lib/librte_mempool/rte_mempool_ops_default.c b/lib/librte_mempool/rte_mempool_ops_default.c\nindex f6aea7662..dd09a0a32 100644\n--- a/lib/librte_mempool/rte_mempool_ops_default.c\n+++ b/lib/librte_mempool/rte_mempool_ops_default.c\n@@ -61,21 +61,44 @@ rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,\n \treturn mem_size;\n }\n \n+/* Returns -1 if object crosses a page boundary, else returns 0 */\n+static int\n+check_obj_bounds(char *obj, size_t pg_sz, size_t elt_sz)\n+{\n+\tif (pg_sz == 0)\n+\t\treturn 0;\n+\tif (elt_sz > pg_sz)\n+\t\treturn 0;\n+\tif (RTE_PTR_ALIGN(obj, pg_sz) != RTE_PTR_ALIGN(obj + elt_sz - 1, pg_sz))\n+\t\treturn -1;\n+\treturn 0;\n+}\n+\n int\n rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,\n \t\tvoid *vaddr, rte_iova_t iova, size_t len,\n \t\trte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)\n {\n-\tsize_t total_elt_sz;\n+\tchar *va = vaddr;\n+\tsize_t total_elt_sz, pg_sz;\n \tsize_t off;\n \tunsigned int i;\n \tvoid *obj;\n \n+\trte_mempool_get_page_size(mp, &pg_sz);\n+\n \ttotal_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;\n \n-\tfor (off = 0, i = 0; off + total_elt_sz <= len && i < max_objs; i++) {\n+\tfor (off = 0, i = 0; i < max_objs; i++) {\n+\t\t/* align offset to next page start if required */\n+\t\tif (check_obj_bounds(va + off, pg_sz, total_elt_sz) < 0)\n+\t\t\toff += RTE_PTR_ALIGN_CEIL(va + off, pg_sz) - (va + off);\n+\n+\t\tif (off + total_elt_sz > len)\n+\t\t\tbreak;\n+\n \t\toff += mp->header_size;\n-\t\tobj = (char *)vaddr + off;\n+\t\tobj = va + off;\n \t\tobj_cb(mp, obj_cb_arg, obj,\n \t\t       (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off));\n \t\trte_mempool_ops_enqueue_bulk(mp, &obj, 1);\n",
    "prefixes": [
        "5/5"
    ]
}