From patchwork Thu Apr 16 15:07:19 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vladimir Medvedkin X-Patchwork-Id: 68700 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 92801A0588; Thu, 16 Apr 2020 17:07:28 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6B5E61DD3B; Thu, 16 Apr 2020 17:07:28 +0200 (CEST) Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id C5A7D1DD2F for ; Thu, 16 Apr 2020 17:07:26 +0200 (CEST) IronPort-SDR: goD6IKsG0AR10a5EPWbFOdpvXkpFEYZBEl3mD8ILGPBG+q+Xs8eXgRCGSl9l0YhMl28uU8BhGE sIIvQ+Kfseag== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 Apr 2020 08:07:25 -0700 IronPort-SDR: CwPe/kcUUxwNJ2RqogR42k1G+Axw1nwyMIGWBKHhBcA6glN1ZwFDDb5nr0yxfXdPjwP0Zt7UZq 8OvCLt5AXmKQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.72,391,1580803200"; d="scan'208";a="272095563" Received: from silpixa00400072.ir.intel.com ([10.237.222.213]) by orsmga002.jf.intel.com with ESMTP; 16 Apr 2020 08:07:23 -0700 From: Vladimir Medvedkin To: dev@dpdk.org Cc: yipeng1.wang@intel.com, sameh.gobriel@intel.com, bruce.richardson@intel.com Date: Thu, 16 Apr 2020 16:07:19 +0100 Message-Id: <1587049639-127242-1-git-send-email-vladimir.medvedkin@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1587049211-126654-1-git-send-email-vladimir.medvedkin@intel.com> References: <1587049211-126654-1-git-send-email-vladimir.medvedkin@intel.com> Subject: [dpdk-dev] [PATCH v5] hash: add hash bulk lookup with hash signatures array X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Implement rte_hash_lookup_with_hash_bulk_data() and rte_hash_lookup_with_hash_bulk() - bulk lookup functions with precomputed hash signatures. Add these two functions into performance tests. Signed-off-by: Vladimir Medvedkin Acked-by: Yipeng Wang --- app/test/test_hash_perf.c | 61 ++++++- lib/librte_hash/rte_cuckoo_hash.c | 310 ++++++++++++++++++++++++----------- lib/librte_hash/rte_hash.h | 55 +++++++ lib/librte_hash/rte_hash_version.map | 2 + 4 files changed, 326 insertions(+), 102 deletions(-) diff --git a/app/test/test_hash_perf.c b/app/test/test_hash_perf.c index a438eae..76cdac5 100644 --- a/app/test/test_hash_perf.c +++ b/app/test/test_hash_perf.c @@ -391,8 +391,8 @@ timed_lookups(unsigned int with_hash, unsigned int with_data, } static int -timed_lookups_multi(unsigned int with_data, unsigned int table_index, - unsigned int ext) +timed_lookups_multi(unsigned int with_hash, unsigned int with_data, + unsigned int table_index, unsigned int ext) { unsigned i, j, k; int32_t positions_burst[BURST_SIZE]; @@ -417,7 +417,7 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index, for (j = 0; j < keys_to_add/BURST_SIZE; j++) { for (k = 0; k < BURST_SIZE; k++) keys_burst[k] = keys[j * BURST_SIZE + k]; - if (with_data) { + if (!with_hash && with_data) { ret = rte_hash_lookup_bulk_data(h[table_index], (const void **) keys_burst, BURST_SIZE, @@ -442,6 +442,55 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index, return -1; } } + } else if (with_hash && with_data) { + ret = rte_hash_lookup_with_hash_bulk_data( + h[table_index], + (const void **)keys_burst, + &signatures[j * BURST_SIZE], + BURST_SIZE, &hit_mask, ret_data); + if (ret != BURST_SIZE) { + printf("Expect to find %u keys," + " but found %d\n", + BURST_SIZE, ret); + return -1; + } + for (k = 0; k < BURST_SIZE; k++) { + if ((hit_mask & (1ULL << k)) == 0) { + printf("Key number %u" + " not found\n", + j * BURST_SIZE + k); + return -1; + } + expected_data[k] = + (void *)((uintptr_t)signatures[ + j * BURST_SIZE + k]); + if (ret_data[k] != expected_data[k]) { + printf("Data returned for key" + " number %u is %p," + " but should be %p\n", + j * BURST_SIZE + k, + ret_data[k], + expected_data[k]); + return -1; + } + } + } else if (with_hash && !with_data) { + ret = rte_hash_lookup_with_hash_bulk( + h[table_index], + (const void **)keys_burst, + &signatures[j * BURST_SIZE], + BURST_SIZE, positions_burst); + for (k = 0; k < BURST_SIZE; k++) { + if (positions_burst[k] != + positions[j * + BURST_SIZE + k]) { + printf("Key looked up in %d, should be in %d\n", + positions_burst[k], + positions[j * + BURST_SIZE + k]); + return -1; + } + } } else { rte_hash_lookup_bulk(h[table_index], (const void **) keys_burst, @@ -462,7 +511,8 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index, const uint64_t end_tsc = rte_rdtsc(); const uint64_t time_taken = end_tsc - start_tsc; - cycles[table_index][LOOKUP_MULTI][0][with_data] = time_taken/num_lookups; + cycles[table_index][LOOKUP_MULTI][with_hash][with_data] = + time_taken/num_lookups; return 0; } @@ -543,7 +593,8 @@ run_all_tbl_perf_tests(unsigned int with_pushes, unsigned int with_locks, if (timed_lookups(with_hash, with_data, i, ext) < 0) return -1; - if (timed_lookups_multi(with_data, i, ext) < 0) + if (timed_lookups_multi(with_hash, with_data, + i, ext) < 0) return -1; if (timed_deletes(with_hash, with_data, i, ext) < 0) diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c index 6af8ca4..38767a8 100644 --- a/lib/librte_hash/rte_cuckoo_hash.c +++ b/lib/librte_hash/rte_cuckoo_hash.c @@ -1711,64 +1711,20 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, } } -#define PREFETCH_OFFSET 4 static inline void -__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys, - int32_t num_keys, int32_t *positions, - uint64_t *hit_mask, void *data[]) +__bulk_lookup_l(const struct rte_hash *h, const void **keys, + const struct rte_hash_bucket **primary_bkt, + const struct rte_hash_bucket **secondary_bkt, + uint16_t *sig, int32_t num_keys, int32_t *positions, + uint64_t *hit_mask, void *data[]) { uint64_t hits = 0; int32_t i; int32_t ret; - uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX]; - uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX]; - uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX]; - uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX]; - const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; - const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0}; uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0}; struct rte_hash_bucket *cur_bkt, *next_bkt; - /* Prefetch first keys */ - for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++) - rte_prefetch0(keys[i]); - - /* - * Prefetch rest of the keys, calculate primary and - * secondary bucket and prefetch them - */ - for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) { - rte_prefetch0(keys[i + PREFETCH_OFFSET]); - - prim_hash[i] = rte_hash_hash(h, keys[i]); - - sig[i] = get_short_sig(prim_hash[i]); - prim_index[i] = get_prim_bucket_index(h, prim_hash[i]); - sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]); - - primary_bkt[i] = &h->buckets[prim_index[i]]; - secondary_bkt[i] = &h->buckets[sec_index[i]]; - - rte_prefetch0(primary_bkt[i]); - rte_prefetch0(secondary_bkt[i]); - } - - /* Calculate and prefetch rest of the buckets */ - for (; i < num_keys; i++) { - prim_hash[i] = rte_hash_hash(h, keys[i]); - - sig[i] = get_short_sig(prim_hash[i]); - prim_index[i] = get_prim_bucket_index(h, prim_hash[i]); - sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]); - - primary_bkt[i] = &h->buckets[prim_index[i]]; - secondary_bkt[i] = &h->buckets[sec_index[i]]; - - rte_prefetch0(primary_bkt[i]); - rte_prefetch0(secondary_bkt[i]); - } - __hash_rw_reader_lock(h); /* Compare signatures and prefetch key slot of first hit */ @@ -1903,63 +1859,20 @@ __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys, } static inline void -__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys, - int32_t num_keys, int32_t *positions, - uint64_t *hit_mask, void *data[]) +__bulk_lookup_lf(const struct rte_hash *h, const void **keys, + const struct rte_hash_bucket **primary_bkt, + const struct rte_hash_bucket **secondary_bkt, + uint16_t *sig, int32_t num_keys, int32_t *positions, + uint64_t *hit_mask, void *data[]) { uint64_t hits = 0; int32_t i; int32_t ret; - uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX]; - uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX]; - uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX]; - uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX]; - const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; - const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0}; uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0}; struct rte_hash_bucket *cur_bkt, *next_bkt; uint32_t cnt_b, cnt_a; - /* Prefetch first keys */ - for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++) - rte_prefetch0(keys[i]); - - /* - * Prefetch rest of the keys, calculate primary and - * secondary bucket and prefetch them - */ - for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) { - rte_prefetch0(keys[i + PREFETCH_OFFSET]); - - prim_hash[i] = rte_hash_hash(h, keys[i]); - - sig[i] = get_short_sig(prim_hash[i]); - prim_index[i] = get_prim_bucket_index(h, prim_hash[i]); - sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]); - - primary_bkt[i] = &h->buckets[prim_index[i]]; - secondary_bkt[i] = &h->buckets[sec_index[i]]; - - rte_prefetch0(primary_bkt[i]); - rte_prefetch0(secondary_bkt[i]); - } - - /* Calculate and prefetch rest of the buckets */ - for (; i < num_keys; i++) { - prim_hash[i] = rte_hash_hash(h, keys[i]); - - sig[i] = get_short_sig(prim_hash[i]); - prim_index[i] = get_prim_bucket_index(h, prim_hash[i]); - sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]); - - primary_bkt[i] = &h->buckets[prim_index[i]]; - secondary_bkt[i] = &h->buckets[sec_index[i]]; - - rte_prefetch0(primary_bkt[i]); - rte_prefetch0(secondary_bkt[i]); - } - for (i = 0; i < num_keys; i++) positions[i] = -ENOENT; @@ -2124,6 +2037,92 @@ __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys, *hit_mask = hits; } +#define PREFETCH_OFFSET 4 +static inline void +__bulk_lookup_prefetching_loop(const struct rte_hash *h, + const void **keys, int32_t num_keys, + uint16_t *sig, + const struct rte_hash_bucket **primary_bkt, + const struct rte_hash_bucket **secondary_bkt) +{ + int32_t i; + uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX]; + uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX]; + uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX]; + + /* Prefetch first keys */ + for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++) + rte_prefetch0(keys[i]); + + /* + * Prefetch rest of the keys, calculate primary and + * secondary bucket and prefetch them + */ + for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) { + rte_prefetch0(keys[i + PREFETCH_OFFSET]); + + prim_hash[i] = rte_hash_hash(h, keys[i]); + + sig[i] = get_short_sig(prim_hash[i]); + prim_index[i] = get_prim_bucket_index(h, prim_hash[i]); + sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]); + + primary_bkt[i] = &h->buckets[prim_index[i]]; + secondary_bkt[i] = &h->buckets[sec_index[i]]; + + rte_prefetch0(primary_bkt[i]); + rte_prefetch0(secondary_bkt[i]); + } + + /* Calculate and prefetch rest of the buckets */ + for (; i < num_keys; i++) { + prim_hash[i] = rte_hash_hash(h, keys[i]); + + sig[i] = get_short_sig(prim_hash[i]); + prim_index[i] = get_prim_bucket_index(h, prim_hash[i]); + sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]); + + primary_bkt[i] = &h->buckets[prim_index[i]]; + secondary_bkt[i] = &h->buckets[sec_index[i]]; + + rte_prefetch0(primary_bkt[i]); + rte_prefetch0(secondary_bkt[i]); + } +} + + +static inline void +__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys, + int32_t num_keys, int32_t *positions, + uint64_t *hit_mask, void *data[]) +{ + uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + + __bulk_lookup_prefetching_loop(h, keys, num_keys, sig, + primary_bkt, secondary_bkt); + + __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys, + positions, hit_mask, data); +} + +static inline void +__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys, + int32_t num_keys, int32_t *positions, + uint64_t *hit_mask, void *data[]) +{ + uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + + __bulk_lookup_prefetching_loop(h, keys, num_keys, sig, + primary_bkt, secondary_bkt); + + __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys, + positions, hit_mask, data); +} + static inline void __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys, int32_t num_keys, int32_t *positions, @@ -2165,6 +2164,123 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys, return __builtin_popcountl(*hit_mask); } + +static inline void +__rte_hash_lookup_with_hash_bulk_l(const struct rte_hash *h, + const void **keys, hash_sig_t *prim_hash, + int32_t num_keys, int32_t *positions, + uint64_t *hit_mask, void *data[]) +{ + int32_t i; + uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX]; + uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX]; + uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + + /* + * Prefetch keys, calculate primary and + * secondary bucket and prefetch them + */ + for (i = 0; i < num_keys; i++) { + rte_prefetch0(keys[i]); + + sig[i] = get_short_sig(prim_hash[i]); + prim_index[i] = get_prim_bucket_index(h, prim_hash[i]); + sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]); + + primary_bkt[i] = &h->buckets[prim_index[i]]; + secondary_bkt[i] = &h->buckets[sec_index[i]]; + + rte_prefetch0(primary_bkt[i]); + rte_prefetch0(secondary_bkt[i]); + } + + __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys, + positions, hit_mask, data); +} + +static inline void +__rte_hash_lookup_with_hash_bulk_lf(const struct rte_hash *h, + const void **keys, hash_sig_t *prim_hash, + int32_t num_keys, int32_t *positions, + uint64_t *hit_mask, void *data[]) +{ + int32_t i; + uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX]; + uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX]; + uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + + /* + * Prefetch keys, calculate primary and + * secondary bucket and prefetch them + */ + for (i = 0; i < num_keys; i++) { + rte_prefetch0(keys[i]); + + sig[i] = get_short_sig(prim_hash[i]); + prim_index[i] = get_prim_bucket_index(h, prim_hash[i]); + sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]); + + primary_bkt[i] = &h->buckets[prim_index[i]]; + secondary_bkt[i] = &h->buckets[sec_index[i]]; + + rte_prefetch0(primary_bkt[i]); + rte_prefetch0(secondary_bkt[i]); + } + + __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys, + positions, hit_mask, data); +} + +static inline void +__rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys, + hash_sig_t *prim_hash, int32_t num_keys, + int32_t *positions, uint64_t *hit_mask, void *data[]) +{ + if (h->readwrite_concur_lf_support) + __rte_hash_lookup_with_hash_bulk_lf(h, keys, prim_hash, + num_keys, positions, hit_mask, data); + else + __rte_hash_lookup_with_hash_bulk_l(h, keys, prim_hash, + num_keys, positions, hit_mask, data); +} + +int +rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys, + hash_sig_t *sig, uint32_t num_keys, int32_t *positions) +{ + RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || + (sig == NULL) || (num_keys == 0) || + (num_keys > RTE_HASH_LOOKUP_BULK_MAX) || + (positions == NULL)), -EINVAL); + + __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys, + positions, NULL, NULL); + return 0; +} + +int +rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h, + const void **keys, hash_sig_t *sig, + uint32_t num_keys, uint64_t *hit_mask, void *data[]) +{ + RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || + (sig == NULL) || (num_keys == 0) || + (num_keys > RTE_HASH_LOOKUP_BULK_MAX) || + (hit_mask == NULL)), -EINVAL); + + int32_t positions[num_keys]; + + __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys, + positions, hit_mask, data); + + /* Return number of hits */ + return __builtin_popcountl(*hit_mask); +} + int32_t rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next) { diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h index ed0673b..13099d7 100644 --- a/lib/librte_hash/rte_hash.h +++ b/lib/librte_hash/rte_hash.h @@ -519,6 +519,61 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys, uint32_t num_keys, uint64_t *hit_mask, void *data[]); /** + * Find multiple keys in the hash table with precomputed hash value array. + * This operation is multi-thread safe with regarding to other lookup threads. + * Read-write concurrency can be enabled by setting flag during + * table creation. + * + * @param h + * Hash table to look in. + * @param keys + * A pointer to a list of keys to look for. + * @param sig + * A pointer to a list of precomputed hash values for keys. + * @param num_keys + * How many keys are in the keys list (less than RTE_HASH_LOOKUP_BULK_MAX). + * @param positions + * Output containing a list of values, corresponding to the list of keys that + * can be used by the caller as an offset into an array of user data. These + * values are unique for each key, and are the same values that were returned + * when each key was added. If a key in the list was not found, then -ENOENT + * will be the value. + * @return + * -EINVAL if there's an error, otherwise 0. + */ +__rte_experimental +int +rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys, + hash_sig_t *sig, uint32_t num_keys, int32_t *positions); + +/** + * Find multiple keys in the hash table with precomputed hash value array. + * This operation is multi-thread safe with regarding to other lookup threads. + * Read-write concurrency can be enabled by setting flag during + * table creation. + * + * @param h + * Hash table to look in. + * @param keys + * A pointer to a list of keys to look for. + * @param sig + * A pointer to a list of precomputed hash values for keys. + * @param num_keys + * How many keys are in the keys list (less than RTE_HASH_LOOKUP_BULK_MAX). + * @param hit_mask + * Output containing a bitmask with all successful lookups. + * @param data + * Output containing array of data returned from all the successful lookups. + * @return + * -EINVAL if there's an error, otherwise number of successful lookups. + */ +__rte_experimental +int +rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h, + const void **keys, hash_sig_t *sig, + uint32_t num_keys, uint64_t *hit_mask, void *data[]); + +/** * Find multiple keys in the hash table. * This operation is multi-thread safe with regarding to other lookup threads. * Read-write concurrency can be enabled by setting flag during diff --git a/lib/librte_hash/rte_hash_version.map b/lib/librte_hash/rte_hash_version.map index a8fbbc3..c2a9094 100644 --- a/lib/librte_hash/rte_hash_version.map +++ b/lib/librte_hash/rte_hash_version.map @@ -33,6 +33,8 @@ EXPERIMENTAL { global: rte_hash_free_key_with_position; + rte_hash_lookup_with_hash_bulk; + rte_hash_lookup_with_hash_bulk_data; rte_hash_max_key_id; };