get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/11275/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 11275,
    "url": "http://patchwork.dpdk.org/api/patches/11275/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/1457517037-71693-5-git-send-email-david.hunt@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1457517037-71693-5-git-send-email-david.hunt@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1457517037-71693-5-git-send-email-david.hunt@intel.com",
    "date": "2016-03-09T09:50:37",
    "name": "[dpdk-dev,v3,4/4] mempool: add in the RTE_NEXT_ABI for ABI breakages",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "1422ce858d148a279109eb94e8bf75250f769104",
    "submitter": {
        "id": 342,
        "url": "http://patchwork.dpdk.org/api/people/342/?format=api",
        "name": "Hunt, David",
        "email": "david.hunt@intel.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/1457517037-71693-5-git-send-email-david.hunt@intel.com/mbox/",
    "series": [],
    "comments": "http://patchwork.dpdk.org/api/patches/11275/comments/",
    "check": "pending",
    "checks": "http://patchwork.dpdk.org/api/patches/11275/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id B641B2BC2;\n\tWed,  9 Mar 2016 10:51:18 +0100 (CET)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n\tby dpdk.org (Postfix) with ESMTP id 418F0378E\n\tfor <dev@dpdk.org>; Wed,  9 Mar 2016 10:51:16 +0100 (CET)",
            "from fmsmga001.fm.intel.com ([10.253.24.23])\n\tby orsmga102.jf.intel.com with ESMTP; 09 Mar 2016 01:51:15 -0800",
            "from sie-lab-214-251.ir.intel.com (HELO silpixa373510.ir.intel.com)\n\t([10.237.214.251])\n\tby fmsmga001.fm.intel.com with ESMTP; 09 Mar 2016 01:51:14 -0800"
        ],
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.24,310,1455004800\"; d=\"scan'208\";a=\"920053051\"",
        "From": "David Hunt <david.hunt@intel.com>",
        "To": "dev@dpdk.org",
        "Date": "Wed,  9 Mar 2016 09:50:37 +0000",
        "Message-Id": "<1457517037-71693-5-git-send-email-david.hunt@intel.com>",
        "X-Mailer": "git-send-email 2.5.0",
        "In-Reply-To": "<1457517037-71693-1-git-send-email-david.hunt@intel.com>",
        "References": "<1455634095-4183-1-git-send-email-david.hunt@intel.com>\n\t<1457517037-71693-1-git-send-email-david.hunt@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v3 4/4] mempool: add in the RTE_NEXT_ABI for ABI\n\tbreakages",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch is for those people who want to be easily able to switch\nbetween the new mempool layout and the old. Change the value of\nRTE_NEXT_ABI in common_base config file\n\nv3: Updated to take re-work of file layouts into consideration\n\nv2: Kept all the NEXT_ABI defs to this patch so as to make the\nprevious patches easier to read, and also to imake it clear what\ncode is necessary to keep ABI compatibility when NEXT_ABI is\ndisabled.\n\nSigned-off-by: David Hunt <david.hunt@intel.com>\n---\n app/test/Makefile                |   2 +\n app/test/test_mempool_perf.c     |   3 +\n lib/librte_mbuf/rte_mbuf.c       |   7 ++\n lib/librte_mempool/Makefile      |   2 +\n lib/librte_mempool/rte_mempool.c | 245 ++++++++++++++++++++++++++++++++++++++-\n lib/librte_mempool/rte_mempool.h |  59 +++++++++-\n 6 files changed, 315 insertions(+), 3 deletions(-)",
    "diff": "diff --git a/app/test/Makefile b/app/test/Makefile\nindex 9a2f75f..8fcf0c2 100644\n--- a/app/test/Makefile\n+++ b/app/test/Makefile\n@@ -74,7 +74,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_TIMER) += test_timer_perf.c\n SRCS-$(CONFIG_RTE_LIBRTE_TIMER) += test_timer_racecond.c\n \n SRCS-y += test_mempool.c\n+ifeq ($(CONFIG_RTE_NEXT_ABI),y)\n SRCS-y += test_ext_mempool.c\n+endif\n SRCS-y += test_mempool_perf.c\n \n SRCS-y += test_mbuf.c\ndiff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c\nindex 091c1df..ca69e49 100644\n--- a/app/test/test_mempool_perf.c\n+++ b/app/test/test_mempool_perf.c\n@@ -161,6 +161,9 @@ per_lcore_mempool_test(__attribute__((unused)) void *arg)\n \t\t\t\t\t\t\t   n_get_bulk);\n \t\t\t\tif (unlikely(ret < 0)) {\n \t\t\t\t\trte_mempool_dump(stdout, mp);\n+#ifndef RTE_NEXT_ABI\n+\t\t\t\t\trte_ring_dump(stdout, mp->ring);\n+#endif\n \t\t\t\t\t/* in this case, objects are lost... */\n \t\t\t\t\treturn -1;\n \t\t\t\t}\ndiff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c\nindex 42b0cd1..967d987 100644\n--- a/lib/librte_mbuf/rte_mbuf.c\n+++ b/lib/librte_mbuf/rte_mbuf.c\n@@ -167,6 +167,7 @@ rte_pktmbuf_pool_create(const char *name, unsigned n,\n \tmbp_priv.mbuf_data_room_size = data_room_size;\n \tmbp_priv.mbuf_priv_size = priv_size;\n \n+#ifdef RTE_NEXT_ABI\n #ifdef RTE_MEMPOOL_HANDLER_EXT\n \treturn rte_mempool_create_ext(name, n, elt_size,\n \t\tcache_size, sizeof(struct rte_pktmbuf_pool_private),\n@@ -179,6 +180,12 @@ rte_pktmbuf_pool_create(const char *name, unsigned n,\n \t\trte_pktmbuf_pool_init, &mbp_priv, rte_pktmbuf_init, NULL,\n \t\tsocket_id, 0);\n #endif\n+#else\n+\treturn rte_mempool_create(name, n, elt_size,\n+\t\tcache_size, sizeof(struct rte_pktmbuf_pool_private),\n+\t\trte_pktmbuf_pool_init, &mbp_priv, rte_pktmbuf_init, NULL,\n+\t\tsocket_id, 0);\n+#endif\n }\n \n /* do some sanity checks on a mbuf: panic if it fails */\ndiff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile\nindex a32c89e..a27eef9 100644\n--- a/lib/librte_mempool/Makefile\n+++ b/lib/librte_mempool/Makefile\n@@ -42,8 +42,10 @@ LIBABIVER := 1\n \n # all source are stored in SRCS-y\n SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool.c\n+ifeq ($(CONFIG_RTE_NEXT_ABI),y)\n SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_handler.c\n SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_default.c\n+endif\n \n ifeq ($(CONFIG_RTE_LIBRTE_XEN_DOM0),y)\n SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_dom0_mempool.c\ndiff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c\nindex 7342a7f..e77ef47 100644\n--- a/lib/librte_mempool/rte_mempool.c\n+++ b/lib/librte_mempool/rte_mempool.c\n@@ -59,7 +59,10 @@\n #include <rte_spinlock.h>\n \n #include \"rte_mempool.h\"\n+#ifdef RTE_NEXT_ABI\n #include \"rte_mempool_handler.h\"\n+#endif\n+\n \n TAILQ_HEAD(rte_mempool_list, rte_tailq_entry);\n \n@@ -150,7 +153,11 @@ mempool_add_elem(struct rte_mempool *mp, void *obj, uint32_t obj_idx,\n \t\tobj_init(mp, obj_init_arg, obj, obj_idx);\n \n \t/* enqueue in ring */\n+#ifdef RTE_NEXT_ABI\n \trte_mempool_ext_put_bulk(mp, &obj, 1);\n+#else\n+\trte_ring_mp_enqueue_bulk(mp->ring, &obj, 1);\n+#endif\n }\n \n uint32_t\n@@ -420,6 +427,7 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,\n \t\t\t\t\t       MEMPOOL_PG_SHIFT_MAX);\n }\n \n+#ifdef RTE_NEXT_ABI\n /*\n  * Common mempool create function.\n  * Create the mempool over already allocated chunk of memory.\n@@ -711,6 +719,229 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,\n \n \treturn mp;\n }\n+#else\n+/*\n+ * Create the mempool over already allocated chunk of memory.\n+ * That external memory buffer can consists of physically disjoint pages.\n+ * Setting vaddr to NULL, makes mempool to fallback to original behaviour\n+ * and allocate space for mempool and it's elements as one big chunk of\n+ * physically continuos memory.\n+ * */\n+struct rte_mempool *\n+rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,\n+\t\tunsigned cache_size, unsigned private_data_size,\n+\t\trte_mempool_ctor_t *mp_init, void *mp_init_arg,\n+\t\trte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,\n+\t\tint socket_id, unsigned flags, void *vaddr,\n+\t\tconst phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift)\n+{\n+\tchar mz_name[RTE_MEMZONE_NAMESIZE];\n+\tchar rg_name[RTE_RING_NAMESIZE];\n+\tstruct rte_mempool_list *mempool_list;\n+\tstruct rte_mempool *mp = NULL;\n+\tstruct rte_tailq_entry *te;\n+\tstruct rte_ring *r;\n+\tconst struct rte_memzone *mz;\n+\tsize_t mempool_size;\n+\tint mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;\n+\tint rg_flags = 0;\n+\tvoid *obj;\n+\tstruct rte_mempool_objsz objsz;\n+\tvoid *startaddr;\n+\tint page_size = getpagesize();\n+\n+\t/* compilation-time checks */\n+\tRTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &\n+\t\t\t  RTE_CACHE_LINE_MASK) != 0);\n+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0\n+\tRTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &\n+\t\t\t  RTE_CACHE_LINE_MASK) != 0);\n+\tRTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &\n+\t\t\t  RTE_CACHE_LINE_MASK) != 0);\n+#endif\n+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG\n+\tRTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &\n+\t\t\t  RTE_CACHE_LINE_MASK) != 0);\n+\tRTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &\n+\t\t\t  RTE_CACHE_LINE_MASK) != 0);\n+#endif\n+\n+\tmempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);\n+\n+\t/* asked cache too big */\n+\tif (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||\n+\t    CALC_CACHE_FLUSHTHRESH(cache_size) > n) {\n+\t\trte_errno = EINVAL;\n+\t\treturn NULL;\n+\t}\n+\n+\t/* check that we have both VA and PA */\n+\tif (vaddr != NULL && paddr == NULL) {\n+\t\trte_errno = EINVAL;\n+\t\treturn NULL;\n+\t}\n+\n+\t/* Check that pg_num and pg_shift parameters are valid. */\n+\tif (pg_num < RTE_DIM(mp->elt_pa) || pg_shift > MEMPOOL_PG_SHIFT_MAX) {\n+\t\trte_errno = EINVAL;\n+\t\treturn NULL;\n+\t}\n+\n+\t/* \"no cache align\" imply \"no spread\" */\n+\tif (flags & MEMPOOL_F_NO_CACHE_ALIGN)\n+\t\tflags |= MEMPOOL_F_NO_SPREAD;\n+\n+\t/* ring flags */\n+\tif (flags & MEMPOOL_F_SP_PUT)\n+\t\trg_flags |= RING_F_SP_ENQ;\n+\tif (flags & MEMPOOL_F_SC_GET)\n+\t\trg_flags |= RING_F_SC_DEQ;\n+\n+\t/* calculate mempool object sizes. */\n+\tif (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {\n+\t\trte_errno = EINVAL;\n+\t\treturn NULL;\n+\t}\n+\n+\trte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK);\n+\n+\t/* allocate the ring that will be used to store objects */\n+\t/* Ring functions will return appropriate errors if we are\n+\t * running as a secondary process etc., so no checks made\n+\t * in this function for that condition */\n+\tsnprintf(rg_name, sizeof(rg_name), RTE_MEMPOOL_MZ_FORMAT, name);\n+\tr = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags);\n+\tif (r == NULL)\n+\t\tgoto exit;\n+\n+\t/*\n+\t * reserve a memory zone for this mempool: private data is\n+\t * cache-aligned\n+\t */\n+\tprivate_data_size = (private_data_size +\n+\t\tRTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK);\n+\n+\tif (!rte_eal_has_hugepages()) {\n+\t\t/*\n+\t\t * expand private data size to a whole page, so that the\n+\t\t * first pool element will start on a new standard page\n+\t\t */\n+\t\tint head = sizeof(struct rte_mempool);\n+\t\tint new_size = (private_data_size + head) % page_size;\n+\n+\t\tif (new_size)\n+\t\t\tprivate_data_size += page_size - new_size;\n+\t}\n+\n+\t/* try to allocate tailq entry */\n+\tte = rte_zmalloc(\"MEMPOOL_TAILQ_ENTRY\", sizeof(*te), 0);\n+\tif (te == NULL) {\n+\t\tRTE_LOG(ERR, MEMPOOL, \"Cannot allocate tailq entry!\\n\");\n+\t\tgoto exit;\n+\t}\n+\n+\t/*\n+\t * If user provided an external memory buffer, then use it to\n+\t * store mempool objects. Otherwise reserve a memzone that is large\n+\t * enough to hold mempool header and metadata plus mempool objects.\n+\t */\n+\tmempool_size = MEMPOOL_HEADER_SIZE(mp, pg_num) + private_data_size;\n+\tmempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);\n+\tif (vaddr == NULL)\n+\t\tmempool_size += (size_t)objsz.total_size * n;\n+\n+\tif (!rte_eal_has_hugepages()) {\n+\t\t/*\n+\t\t * we want the memory pool to start on a page boundary,\n+\t\t * because pool elements crossing page boundaries would\n+\t\t * result in discontiguous physical addresses\n+\t\t */\n+\t\tmempool_size += page_size;\n+\t}\n+\n+\tsnprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name);\n+\n+\tmz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);\n+\n+\t/*\n+\t * no more memory: in this case we loose previously reserved\n+\t * space for the ring as we cannot free it\n+\t */\n+\tif (mz == NULL) {\n+\t\trte_free(te);\n+\t\tgoto exit;\n+\t}\n+\n+\tif (rte_eal_has_hugepages()) {\n+\t\tstartaddr = (void *)mz->addr;\n+\t} else {\n+\t\t/* align memory pool start address on a page boundary */\n+\t\tunsigned long addr = (unsigned long)mz->addr;\n+\n+\t\tif (addr & (page_size - 1)) {\n+\t\t\taddr += page_size;\n+\t\t\taddr &= ~(page_size - 1);\n+\t\t}\n+\t\tstartaddr = (void *)addr;\n+\t}\n+\n+\t/* init the mempool structure */\n+\tmp = startaddr;\n+\tmemset(mp, 0, sizeof(*mp));\n+\tsnprintf(mp->name, sizeof(mp->name), \"%s\", name);\n+\tmp->phys_addr = mz->phys_addr;\n+\tmp->ring = r;\n+\tmp->size = n;\n+\tmp->flags = flags;\n+\tmp->elt_size = objsz.elt_size;\n+\tmp->header_size = objsz.header_size;\n+\tmp->trailer_size = objsz.trailer_size;\n+\tmp->cache_size = cache_size;\n+\tmp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);\n+\tmp->private_data_size = private_data_size;\n+\n+\t/* calculate address of the first element for continuous mempool. */\n+\tobj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) +\n+\t\tprivate_data_size;\n+\tobj = RTE_PTR_ALIGN_CEIL(obj, RTE_MEMPOOL_ALIGN);\n+\n+\t/* populate address translation fields. */\n+\tmp->pg_num = pg_num;\n+\tmp->pg_shift = pg_shift;\n+\tmp->pg_mask = RTE_LEN2MASK(mp->pg_shift, typeof(mp->pg_mask));\n+\n+\t/* mempool elements allocated together with mempool */\n+\tif (vaddr == NULL) {\n+\t\tmp->elt_va_start = (uintptr_t)obj;\n+\t\tmp->elt_pa[0] = mp->phys_addr +\n+\t\t\t(mp->elt_va_start - (uintptr_t)mp);\n+\n+\t/* mempool elements in a separate chunk of memory. */\n+\t} else {\n+\t\tmp->elt_va_start = (uintptr_t)vaddr;\n+\t\tmemcpy(mp->elt_pa, paddr, sizeof(mp->elt_pa[0]) * pg_num);\n+\t}\n+\n+\tmp->elt_va_end = mp->elt_va_start;\n+\n+\t/* call the initializer */\n+\tif (mp_init)\n+\t\tmp_init(mp, mp_init_arg);\n+\n+\tmempool_populate(mp, n, 1, obj_init, obj_init_arg);\n+\n+\tte->data = (void *) mp;\n+\n+\trte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);\n+\tTAILQ_INSERT_TAIL(mempool_list, te, next);\n+\trte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);\n+\n+exit:\n+\trte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK);\n+\n+\treturn mp;\n+}\n+#endif\n \n /* Return the number of entries in the mempool */\n unsigned\n@@ -718,7 +949,11 @@ rte_mempool_count(const struct rte_mempool *mp)\n {\n \tunsigned count;\n \n+#ifdef RTE_NEXT_ABI\n \tcount = rte_mempool_ext_get_count(mp);\n+#else\n+\tcount = rte_ring_count(mp->ring);\n+#endif\n \n #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0\n \t{\n@@ -874,6 +1109,9 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp)\n \n \tfprintf(f, \"mempool <%s>@%p\\n\", mp->name, mp);\n \tfprintf(f, \"  flags=%x\\n\", mp->flags);\n+#ifndef RTE_NEXT_ABI\n+\tfprintf(f, \"  ring=<%s>@%p\\n\", mp->ring->name, mp->ring);\n+#endif\n \tfprintf(f, \"  phys_addr=0x%\" PRIx64 \"\\n\", mp->phys_addr);\n \tfprintf(f, \"  size=%\"PRIu32\"\\n\", mp->size);\n \tfprintf(f, \"  header_size=%\"PRIu32\"\\n\", mp->header_size);\n@@ -896,7 +1134,11 @@ rte_mempool_dump(FILE *f, const struct rte_mempool *mp)\n \t\t\tmp->size);\n \n \tcache_count = rte_mempool_dump_cache(f, mp);\n+#ifdef RTE_NEXT_ABI\n \tcommon_count = rte_mempool_ext_get_count(mp);\n+#else\n+\tcommon_count = rte_ring_count(mp->ring);\n+#endif\n \tif ((cache_count + common_count) > mp->size)\n \t\tcommon_count = mp->size - cache_count;\n \tfprintf(f, \"  common_pool_count=%u\\n\", common_count);\n@@ -991,7 +1233,7 @@ void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *),\n \trte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK);\n }\n \n-\n+#ifdef RTE_NEXT_ABI\n /* create the mempool using an external mempool manager */\n struct rte_mempool *\n rte_mempool_create_ext(const char *name, unsigned n, unsigned elt_size,\n@@ -1017,3 +1259,4 @@ rte_mempool_create_ext(const char *name, unsigned n, unsigned elt_size,\n \n \n }\n+#endif\ndiff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h\nindex f987d8a..4b14b80 100644\n--- a/lib/librte_mempool/rte_mempool.h\n+++ b/lib/librte_mempool/rte_mempool.h\n@@ -175,6 +175,7 @@ struct rte_mempool_objtlr {\n #endif\n };\n \n+#ifdef RTE_NEXT_ABI\n /* Handler functions for external mempool support */\n typedef void *(*rte_mempool_alloc_t)(struct rte_mempool *mp,\n \t\tconst char *name, unsigned n, int socket_id, unsigned flags);\n@@ -256,12 +257,16 @@ rte_mempool_ext_get_count(const struct rte_mempool *mp);\n  */\n int\n rte_mempool_ext_free(struct rte_mempool *mp);\n+#endif\n \n /**\n  * The RTE mempool structure.\n  */\n struct rte_mempool {\n \tchar name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */\n+#ifndef RTE_NEXT_ABI\n+\tstruct rte_ring *ring;           /**< Ring to store objects. */\n+#endif\n \tphys_addr_t phys_addr;           /**< Phys. addr. of mempool struct. */\n \tint flags;                       /**< Flags of the mempool. */\n \tuint32_t size;                   /**< Size of the mempool. */\n@@ -275,6 +280,7 @@ struct rte_mempool {\n \n \tunsigned private_data_size;      /**< Size of private data. */\n \n+#ifdef RTE_NEXT_ABI\n \t/* Common pool data structure pointer */\n \tvoid *pool;\n \n@@ -286,6 +292,7 @@ struct rte_mempool {\n \t * directly would not be valid for secondary processes.\n \t */\n \tint16_t handler_idx;\n+#endif\n \n #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0\n \t/** Per-lcore local cache. */\n@@ -316,8 +323,9 @@ struct rte_mempool {\n #define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/\n #define MEMPOOL_F_SP_PUT         0x0004 /**< Default put is \"single-producer\".*/\n #define MEMPOOL_F_SC_GET         0x0008 /**< Default get is \"single-consumer\".*/\n+#ifdef RTE_NEXT_ABI\n #define MEMPOOL_F_INT_HANDLER    0x0020 /**< Using internal mempool handler */\n-\n+#endif\n \n /**\n  * @internal When debug is enabled, store some statistics.\n@@ -847,7 +855,12 @@ void rte_mempool_dump(FILE *f, const struct rte_mempool *mp);\n  */\n static inline void __attribute__((always_inline))\n __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,\n-\t\t    unsigned n, __rte_unused int is_mp)\n+#ifdef RTE_NEXT_ABI\n+\t\tunsigned n, __rte_unused int is_mp)\n+#else\n+\t\tunsigned n, int is_mp)\n+#endif\n+\n {\n #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0\n \tstruct rte_mempool_cache *cache;\n@@ -887,9 +900,15 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,\n \n \tcache->len += n;\n \n+#ifdef RTE_NEXT_ABI\n \tif (unlikely(cache->len >= flushthresh)) {\n \t\trte_mempool_ext_put_bulk(mp, &cache->objs[cache_size],\n \t\t\t\tcache->len - cache_size);\n+#else\n+\tif (cache->len >= flushthresh) {\n+\t\trte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],\n+\t\t\t\tcache->len - cache_size);\n+#endif\n \t\tcache->len = cache_size;\n \t\t/*\n \t\t * Increment stats counter to tell us how many pool puts\n@@ -903,10 +922,28 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,\n ring_enqueue:\n #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */\n \n+#ifdef RTE_NEXT_ABI\n \t/* Increment stats counter to tell us how many pool puts happened */\n \t__MEMPOOL_STAT_ADD(mp, put_pool, n);\n \n \trte_mempool_ext_put_bulk(mp, obj_table, n);\n+#else\n+\t/* push remaining objects in ring */\n+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG\n+\tif (is_mp) {\n+\t\tif (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)\n+\t\t\trte_panic(\"cannot put objects in mempool\\n\");\n+\t} else {\n+\t\tif (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)\n+\t\t\trte_panic(\"cannot put objects in mempool\\n\");\n+\t}\n+#else\n+\tif (is_mp)\n+\t\trte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);\n+\telse\n+\t\trte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);\n+#endif\n+#endif\n }\n \n \n@@ -1030,7 +1067,11 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)\n  */\n static inline int __attribute__((always_inline))\n __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,\n+#ifdef RTE_NEXT_ABI\n \t\t   unsigned n, __attribute__((unused))int is_mc)\n+#else\n+\t\t   unsigned n, int is_mc)\n+#endif\n {\n \tint ret;\n #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0\n@@ -1054,8 +1095,13 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,\n \t\tuint32_t req = n + (cache_size - cache->len);\n \n \t\t/* How many do we require i.e. number to fill the cache + the request */\n+#ifdef RTE_NEXT_ABI\n \t\tret = rte_mempool_ext_get_bulk(mp,\n \t\t\t&cache->objs[cache->len], req);\n+#else\n+\t\tret = rte_ring_mc_dequeue_bulk(mp->ring,\n+\t\t\t&cache->objs[cache->len], req);\n+#endif\n \t\tif (unlikely(ret < 0)) {\n \t\t\t/*\n \t\t\t * In the offchance that we are buffer constrained,\n@@ -1083,7 +1129,14 @@ ring_dequeue:\n #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */\n \n \t/* get remaining objects from ring */\n+#ifdef RTE_NEXT_ABI\n \tret = rte_mempool_ext_get_bulk(mp, obj_table, n);\n+#else\n+\tif (is_mc)\n+\t\tret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);\n+\telse\n+\t\tret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);\n+#endif\n \n \tif (ret < 0)\n \t\t__MEMPOOL_STAT_ADD(mp, get_fail, n);\n@@ -1485,6 +1538,7 @@ ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,\n  */\n void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *arg),\n \t\t      void *arg);\n+#ifdef RTE_NEXT_ABI\n \n /**\n  * Function to get the name of a mempool handler\n@@ -1559,6 +1613,7 @@ rte_mempool_create_ext(const char *name, unsigned n, unsigned elt_size,\n \t\trte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,\n \t\tint socket_id, unsigned flags,\n \t\tconst char *handler_name);\n+#endif\n \n #ifdef __cplusplus\n }\n",
    "prefixes": [
        "dpdk-dev",
        "v3",
        "4/4"
    ]
}