[v4,2/2] hash table: add an iterator over conflicting entries

Message ID 20181009192907.85650-2-qiaobinf@bu.edu
State New
Delegated to: Thomas Monjalon
Headers show
Series
  • [v4,1/2] hash table: fix a bug in rte_hash_iterate()
Related show

Checks

Context Check Description
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Fu, Qiaobin Oct. 9, 2018, 7:29 p.m.
Function rte_hash_iterate_conflict_entries_with_hash() iterates
over the entries that conflict with an incoming entry.

Iterating over conflicting entries enables one to decide
if the incoming entry is more valuable than the entries already
in the hash table. This is particularly useful after
an insertion failure.

v4:
* Fix the style issue

* Follow the ABI updates

v3:
* Make the rte_hash_iterate() API similar to
  rte_hash_iterate_conflict_entries()

v2:
* Fix the style issue

* Make the API more universal

Signed-off-by: Qiaobin Fu <qiaobinf@bu.edu>
Reviewed-by: Cody Doucette <doucette@bu.edu>
Reviewed-by: Michel Machado <michel@digirati.com.br>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
Reviewed-by: Yipeng Wang <yipeng1.wang@intel.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Gaëtan Rivet <gaetan.rivet@6wind.com>
---
 MAINTAINERS                          |   2 +-
 lib/librte_hash/rte_cuckoo_hash.c    | 134 ++++++++++++++++++++++++++-
 lib/librte_hash/rte_cuckoo_hash.h    |  11 +++
 lib/librte_hash/rte_hash.h           |  71 +++++++++++++-
 lib/librte_hash/rte_hash_version.map |  14 +++
 test/test/test_hash.c                |   6 +-
 test/test/test_hash_multiwriter.c    |   8 +-
 test/test/test_hash_readwrite.c      |  14 ++-
 8 files changed, 246 insertions(+), 14 deletions(-)

Comments

Wang, Yipeng1 Oct. 10, 2018, 2:54 a.m. | #1
Hi, Qiaobin,

Could you try to rebase on this patch set? (http://patchwork.dpdk.org/cover/46106/) 

I checked your patch and I think there won't be many functional issues merging our patches. Let's coordinate to make sure the final merge is easier. 

Other comments inlined:

>-----Original Message-----
>From: Qiaobin Fu [mailto:qiaobinf@bu.edu]
>diff --git a/MAINTAINERS b/MAINTAINERS
>index 9fd258fad..e8c81656f 100644
>--- a/MAINTAINERS
>+++ b/MAINTAINERS
>@@ -1055,7 +1055,7 @@ F: test/test/test_efd*
> F: examples/server_node_efd/
> F: doc/guides/sample_app_ug/server_node_efd.rst
>
>-Hashes
>+Hashes - EXPERIMENTAL
[Wang, Yipeng] I don’t think we need to mark the whole library to be experimental, do we?

> M: Bruce Richardson <bruce.richardson@intel.com>
> M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
> F: lib/librte_hash/
>diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
>index a3e76684d..439251a7f 100644
>--- a/lib/librte_hash/rte_cuckoo_hash.c
>+++ b/lib/librte_hash/rte_cuckoo_hash.c
>@@ -1301,7 +1301,10 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
> }
>
> int32_t
>-rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
>+rte_hash_iterate_v1808(const struct rte_hash *h,
>+	const void **key,
>+	void **data,
>+	uint32_t *next)
> {
> 	uint32_t bucket_idx, idx, position;
> 	struct rte_hash_key *next_key;
>@@ -1344,3 +1347,132 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32
>
> 	return position - 1;
> }
>+VERSION_SYMBOL(rte_hash_iterate, _v1808, 18.08);
>+
>+int32_t
>+rte_hash_iterate_v1811(const struct rte_hash *h,
[Wang, Yipeng]With the ext table patch set, this function considers the ext bucket as well. For a rebase,
You just need to add that part of code.
>+	const void **key,
>+	void **data,
>+	struct rte_hash_iterator_state *state)
>+{
>+	struct rte_hash_iterator_priv *it;
>+	uint32_t bucket_idx, idx, position;
>+	struct rte_hash_key *next_key;
>+
>+	RETURN_IF_TRUE(((h == NULL) || (key == NULL) ||
>+		(data == NULL) || (state == NULL)), -EINVAL);
>+
>+	RTE_BUILD_BUG_ON(sizeof(struct rte_hash_iterator_priv) >
>+		sizeof(struct rte_hash_iterator_state));
>+
>+	it = (struct rte_hash_iterator_priv *)state;
>+	if (it->next == 0)
>+		it->total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
>+
>+	/* Out of bounds */
>+	if (it->next >= it->total_entries)
>+		return -ENOENT;
>+
>+	/* Calculate bucket and index of current iterator */
>+	bucket_idx = it->next / RTE_HASH_BUCKET_ENTRIES;
>+	idx = it->next % RTE_HASH_BUCKET_ENTRIES;
>+
>+	__hash_rw_reader_lock(h);
>+	/* If current position is empty, go to the next one */
>+	while (h->buckets[bucket_idx].key_idx[idx] == EMPTY_SLOT) {
>+		it->next++;
>+		/* End of table */
>+		if (it->next == it->total_entries) {
>+			__hash_rw_reader_unlock(h);
>+			return -ENOENT;
>+		}
>+		bucket_idx = it->next / RTE_HASH_BUCKET_ENTRIES;
>+		idx = it->next % RTE_HASH_BUCKET_ENTRIES;
>+	}
>+	/* Get position of entry in key table */
>+	position = h->buckets[bucket_idx].key_idx[idx];
>+	next_key = (struct rte_hash_key *) ((char *)h->key_store +
>+				position * h->key_entry_size);
>+	/* Return key and data */
>+	*key = next_key->key;
>+	*data = next_key->pdata;
>+
>+	__hash_rw_reader_unlock(h);
>+
>+	/* Increment iterator */
>+	it->next++;
>+
>+	return position - 1;
>+}
>+MAP_STATIC_SYMBOL(int32_t rte_hash_iterate(const struct rte_hash *h,
>+	const void **key, void **data, struct rte_hash_iterator_state *state),
>+	rte_hash_iterate_v1811);
>+
>+int32_t __rte_experimental
>+rte_hash_iterate_conflict_entries_with_hash(struct rte_hash *h,
>+	const void **key,
>+	void **data,
>+	hash_sig_t sig,
>+	struct rte_hash_iterator_state *state)
[Wang, Yipeng] With the ext bucket feature, the conflict entries can include the entries in the
Linked list. You can keep the current behavior for simplicity or iterate the linked list
as well if the extendable bucket is turned on.  For iterating the linked list, the vnext
can grow larger than RTE_HASH_BUCKET_ENTRIES. It depends on you but if not
Iterate the linked list, please indicate it in the API comment.
>+{
>+	struct rte_hash_iterator_conflict_entries_priv *it;
>+
>+	RETURN_IF_TRUE(((h == NULL) || (key == NULL) ||
>+		(data == NULL) || (state == NULL)), -EINVAL);
>+
>+	RTE_BUILD_BUG_ON(sizeof(
>+		struct rte_hash_iterator_conflict_entries_priv) >
>+		sizeof(struct rte_hash_iterator_state));
>+
>+	it = (struct rte_hash_iterator_conflict_entries_priv *)state;
>+	if (it->vnext == 0) {
>+		/*
>+		 * Get the primary bucket index given
>+		 * the precomputed hash value.
>+		 */
>+		it->primary_bidx = sig & h->bucket_bitmask;
>+		/*
>+		 * Get the secondary bucket index given
>+		 * the precomputed hash value.
>+		 */
>+		it->secondary_bidx =
>+			rte_hash_secondary_hash(sig) & h->bucket_bitmask;
>+	}
>+
>+	while (it->vnext < RTE_HASH_BUCKET_ENTRIES * 2) {
>+		uint32_t bidx = it->vnext < RTE_HASH_BUCKET_ENTRIES ?
>+			it->primary_bidx : it->secondary_bidx;
>+		uint32_t next = it->vnext & (RTE_HASH_BUCKET_ENTRIES - 1);
>+		uint32_t position;
>+		struct rte_hash_key *next_key;
>+
>+		RTE_BUILD_BUG_ON(!RTE_IS_POWER_OF_2(RTE_HASH_BUCKET_ENTRIES));
[Wang, Yipeng] I think we already checked this in rte_cuckoo_hash.h

>+		__hash_rw_reader_lock(h);
>+		position = h->buckets[bidx].key_idx[next];
>+
>+		/* Increment iterator. */
>+		it->vnext++;
>+
>+		/*
>+		 * The test below is unlikely because this iterator is meant
>+		 * to be used after a failed insert.
>+		 */
>+		if (unlikely(position == EMPTY_SLOT)) {
>+			__hash_rw_reader_unlock(h);
 [Wang, Yipeng] Should you just lock the outer while loop so that if there are many empty entries, the lock function won't
be called multiple times? It is more coarse grained but saves the cost of lock.

Patch

diff --git a/MAINTAINERS b/MAINTAINERS
index 9fd258fad..e8c81656f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1055,7 +1055,7 @@  F: test/test/test_efd*
 F: examples/server_node_efd/
 F: doc/guides/sample_app_ug/server_node_efd.rst
 
-Hashes
+Hashes - EXPERIMENTAL
 M: Bruce Richardson <bruce.richardson@intel.com>
 M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 F: lib/librte_hash/
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index a3e76684d..439251a7f 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1301,7 +1301,10 @@  rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
 }
 
 int32_t
-rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
+rte_hash_iterate_v1808(const struct rte_hash *h,
+	const void **key,
+	void **data,
+	uint32_t *next)
 {
 	uint32_t bucket_idx, idx, position;
 	struct rte_hash_key *next_key;
@@ -1344,3 +1347,132 @@  rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32
 
 	return position - 1;
 }
+VERSION_SYMBOL(rte_hash_iterate, _v1808, 18.08);
+
+int32_t
+rte_hash_iterate_v1811(const struct rte_hash *h,
+	const void **key,
+	void **data,
+	struct rte_hash_iterator_state *state)
+{
+	struct rte_hash_iterator_priv *it;
+	uint32_t bucket_idx, idx, position;
+	struct rte_hash_key *next_key;
+
+	RETURN_IF_TRUE(((h == NULL) || (key == NULL) ||
+		(data == NULL) || (state == NULL)), -EINVAL);
+
+	RTE_BUILD_BUG_ON(sizeof(struct rte_hash_iterator_priv) >
+		sizeof(struct rte_hash_iterator_state));
+
+	it = (struct rte_hash_iterator_priv *)state;
+	if (it->next == 0)
+		it->total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
+
+	/* Out of bounds */
+	if (it->next >= it->total_entries)
+		return -ENOENT;
+
+	/* Calculate bucket and index of current iterator */
+	bucket_idx = it->next / RTE_HASH_BUCKET_ENTRIES;
+	idx = it->next % RTE_HASH_BUCKET_ENTRIES;
+
+	__hash_rw_reader_lock(h);
+	/* If current position is empty, go to the next one */
+	while (h->buckets[bucket_idx].key_idx[idx] == EMPTY_SLOT) {
+		it->next++;
+		/* End of table */
+		if (it->next == it->total_entries) {
+			__hash_rw_reader_unlock(h);
+			return -ENOENT;
+		}
+		bucket_idx = it->next / RTE_HASH_BUCKET_ENTRIES;
+		idx = it->next % RTE_HASH_BUCKET_ENTRIES;
+	}
+	/* Get position of entry in key table */
+	position = h->buckets[bucket_idx].key_idx[idx];
+	next_key = (struct rte_hash_key *) ((char *)h->key_store +
+				position * h->key_entry_size);
+	/* Return key and data */
+	*key = next_key->key;
+	*data = next_key->pdata;
+
+	__hash_rw_reader_unlock(h);
+
+	/* Increment iterator */
+	it->next++;
+
+	return position - 1;
+}
+MAP_STATIC_SYMBOL(int32_t rte_hash_iterate(const struct rte_hash *h,
+	const void **key, void **data, struct rte_hash_iterator_state *state),
+	rte_hash_iterate_v1811);
+
+int32_t __rte_experimental
+rte_hash_iterate_conflict_entries_with_hash(struct rte_hash *h,
+	const void **key,
+	void **data,
+	hash_sig_t sig,
+	struct rte_hash_iterator_state *state)
+{
+	struct rte_hash_iterator_conflict_entries_priv *it;
+
+	RETURN_IF_TRUE(((h == NULL) || (key == NULL) ||
+		(data == NULL) || (state == NULL)), -EINVAL);
+
+	RTE_BUILD_BUG_ON(sizeof(
+		struct rte_hash_iterator_conflict_entries_priv) >
+		sizeof(struct rte_hash_iterator_state));
+
+	it = (struct rte_hash_iterator_conflict_entries_priv *)state;
+	if (it->vnext == 0) {
+		/*
+		 * Get the primary bucket index given
+		 * the precomputed hash value.
+		 */
+		it->primary_bidx = sig & h->bucket_bitmask;
+		/*
+		 * Get the secondary bucket index given
+		 * the precomputed hash value.
+		 */
+		it->secondary_bidx =
+			rte_hash_secondary_hash(sig) & h->bucket_bitmask;
+	}
+
+	while (it->vnext < RTE_HASH_BUCKET_ENTRIES * 2) {
+		uint32_t bidx = it->vnext < RTE_HASH_BUCKET_ENTRIES ?
+			it->primary_bidx : it->secondary_bidx;
+		uint32_t next = it->vnext & (RTE_HASH_BUCKET_ENTRIES - 1);
+		uint32_t position;
+		struct rte_hash_key *next_key;
+
+		RTE_BUILD_BUG_ON(!RTE_IS_POWER_OF_2(RTE_HASH_BUCKET_ENTRIES));
+		__hash_rw_reader_lock(h);
+		position = h->buckets[bidx].key_idx[next];
+
+		/* Increment iterator. */
+		it->vnext++;
+
+		/*
+		 * The test below is unlikely because this iterator is meant
+		 * to be used after a failed insert.
+		 */
+		if (unlikely(position == EMPTY_SLOT)) {
+			__hash_rw_reader_unlock(h);
+			continue;
+		}
+
+		/* Get the entry in key table. */
+		next_key = (struct rte_hash_key *) ((char *)h->key_store +
+				position * h->key_entry_size);
+		/* Return key and data. */
+		*key = next_key->key;
+		*data = next_key->pdata;
+
+		__hash_rw_reader_unlock(h);
+
+		return position - 1;
+	}
+
+	return -ENOENT;
+}
diff --git a/lib/librte_hash/rte_cuckoo_hash.h b/lib/librte_hash/rte_cuckoo_hash.h
index b43f467d5..70297b16d 100644
--- a/lib/librte_hash/rte_cuckoo_hash.h
+++ b/lib/librte_hash/rte_cuckoo_hash.h
@@ -195,4 +195,15 @@  struct queue_node {
 	int prev_slot;               /* Parent(slot) in search path */
 };
 
+struct rte_hash_iterator_priv {
+	uint32_t next;
+	uint32_t total_entries;
+};
+
+struct rte_hash_iterator_conflict_entries_priv {
+	uint32_t   vnext;
+	uint32_t   primary_bidx;
+	uint32_t   secondary_bidx;
+};
+
 #endif
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index 9e7d9315f..43f6d8b88 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -14,6 +14,8 @@ 
 #include <stdint.h>
 #include <stddef.h>
 
+#include <rte_compat.h>
+
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -37,6 +39,9 @@  extern "C" {
 /** Flag to support reader writer concurrency */
 #define RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY 0x04
 
+/** Size of the hash table iterator state structure */
+#define RTE_HASH_ITERATOR_STATE_SIZE 64
+
 /** Signature of key that is stored internally. */
 typedef uint32_t hash_sig_t;
 
@@ -64,6 +69,16 @@  struct rte_hash_parameters {
 /** @internal A hash table structure. */
 struct rte_hash;
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @internal A hash table iterator state structure.
+ */
+struct rte_hash_iterator_state {
+	uint8_t space[RTE_HASH_ITERATOR_STATE_SIZE];
+} __rte_cache_aligned;
+
 /**
  * Create a new hash table.
  *
@@ -443,6 +458,9 @@  rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
 		      uint32_t num_keys, int32_t *positions);
 
 /**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
  * Iterate through the hash table, returning key-value pairs.
  *
  * @param h
@@ -453,16 +471,61 @@  rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
  * @param data
  *   Output containing the data associated with key.
  *   Returns NULL if data was not stored.
- * @param next
- *   Pointer to iterator. Should be 0 to start iterating the hash table.
- *   Iterator is incremented after each call of this function.
+ * @param state
+ *   Pointer to the iterator state.
  * @return
  *   Position where key was stored, if successful.
  *   - -EINVAL if the parameters are invalid.
  *   - -ENOENT if end of the hash table.
  */
 int32_t
-rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next);
+rte_hash_iterate(const struct rte_hash *h,
+	const void **key,
+	void **data,
+	struct rte_hash_iterator_state *state);
+
+int32_t
+rte_hash_iterate_v1808(const struct rte_hash *h,
+	const void **key,
+	void **data,
+	uint32_t *next);
+
+int32_t
+rte_hash_iterate_v1811(const struct rte_hash *h,
+	const void **key,
+	void **data,
+	struct rte_hash_iterator_state *state);
+BIND_DEFAULT_SYMBOL(rte_hash_iterate, _v1811, 18.11);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Iterate over entries that conflict with a given hash.
+ *
+ * @param h
+ *   Hash table to iterate.
+ * @param key
+ *   Output containing the key at where the iterator is currently pointing.
+ * @param data
+ *   Output containing the data associated with key.
+ *   Returns NULL if data was not stored.
+ * @param sig
+ *   Precomputed hash value for the conflict entry.
+ * @param state
+ *   Pointer to the iterator state.
+ * @return
+ *   Position where key was stored, if successful.
+ *   - -EINVAL if the parameters are invalid.
+ *   - -ENOENT if there is no more conflicting entries.
+ */
+int32_t __rte_experimental
+rte_hash_iterate_conflict_entries_with_hash(struct rte_hash *h,
+	const void **key,
+	void **data,
+	hash_sig_t sig,
+	struct rte_hash_iterator_state *state);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_hash/rte_hash_version.map b/lib/librte_hash/rte_hash_version.map
index e216ac8e2..b1bb5cb02 100644
--- a/lib/librte_hash/rte_hash_version.map
+++ b/lib/librte_hash/rte_hash_version.map
@@ -53,3 +53,17 @@  DPDK_18.08 {
 	rte_hash_count;
 
 } DPDK_16.07;
+
+DPDK_18.11 {
+	global:
+
+	rte_hash_iterate;
+
+} DPDK_18.08;
+
+EXPERIMENTAL {
+	global:
+
+	rte_hash_iterate_conflict_entries_with_hash;
+
+};
diff --git a/test/test/test_hash.c b/test/test/test_hash.c
index b3db9fd10..a9691e5d5 100644
--- a/test/test/test_hash.c
+++ b/test/test/test_hash.c
@@ -1170,8 +1170,8 @@  static int test_hash_iteration(void)
 	void *next_data;
 	void *data[NUM_ENTRIES];
 	unsigned added_keys;
-	uint32_t iter = 0;
 	int ret = 0;
+	struct rte_hash_iterator_state state;
 
 	ut_params.entries = NUM_ENTRIES;
 	ut_params.name = "test_hash_iteration";
@@ -1190,8 +1190,10 @@  static int test_hash_iteration(void)
 			break;
 	}
 
+	memset(&state, 0, sizeof(state));
+
 	/* Iterate through the hash table */
-	while (rte_hash_iterate(handle, &next_key, &next_data, &iter) >= 0) {
+	while (rte_hash_iterate(handle, &next_key, &next_data, &state) >= 0) {
 		/* Search for the key in the list of keys added */
 		for (i = 0; i < NUM_ENTRIES; i++) {
 			if (memcmp(next_key, keys[i], ut_params.key_len) == 0) {
diff --git a/test/test/test_hash_multiwriter.c b/test/test/test_hash_multiwriter.c
index 6a3eb10bd..63c0cd8d0 100644
--- a/test/test/test_hash_multiwriter.c
+++ b/test/test/test_hash_multiwriter.c
@@ -4,6 +4,7 @@ 
 
 #include <inttypes.h>
 #include <locale.h>
+#include <string.h>
 
 #include <rte_cycles.h>
 #include <rte_hash.h>
@@ -125,12 +126,15 @@  test_hash_multiwriter(void)
 
 	const void *next_key;
 	void *next_data;
-	uint32_t iter = 0;
 
 	uint32_t duplicated_keys = 0;
 	uint32_t lost_keys = 0;
 	uint32_t count;
 
+	struct rte_hash_iterator_state state;
+
+	memset(&state, 0, sizeof(state));
+
 	snprintf(name, 32, "test%u", calledCount++);
 	hash_params.name = name;
 
@@ -203,7 +207,7 @@  test_hash_multiwriter(void)
 		goto err3;
 	}
 
-	while (rte_hash_iterate(handle, &next_key, &next_data, &iter) >= 0) {
+	while (rte_hash_iterate(handle, &next_key, &next_data, &state) >= 0) {
 		/* Search for the key in the list of keys added .*/
 		i = *(const uint32_t *)next_key;
 		tbl_multiwriter_test_params.found[i]++;
diff --git a/test/test/test_hash_readwrite.c b/test/test/test_hash_readwrite.c
index 55ae33d80..f9279e21e 100644
--- a/test/test/test_hash_readwrite.c
+++ b/test/test/test_hash_readwrite.c
@@ -166,18 +166,21 @@  test_hash_readwrite_functional(int use_htm)
 	unsigned int i;
 	const void *next_key;
 	void *next_data;
-	uint32_t iter = 0;
 
 	uint32_t duplicated_keys = 0;
 	uint32_t lost_keys = 0;
 	int use_jhash = 1;
 
+	struct rte_hash_iterator_state state;
+
 	rte_atomic64_init(&gcycles);
 	rte_atomic64_clear(&gcycles);
 
 	rte_atomic64_init(&ginsertions);
 	rte_atomic64_clear(&ginsertions);
 
+	memset(&state, 0, sizeof(state));
+
 	if (init_params(use_htm, use_jhash) != 0)
 		goto err;
 
@@ -196,7 +199,7 @@  test_hash_readwrite_functional(int use_htm)
 	rte_eal_mp_wait_lcore();
 
 	while (rte_hash_iterate(tbl_rw_test_param.h, &next_key,
-			&next_data, &iter) >= 0) {
+			&next_data, &state) >= 0) {
 		/* Search for the key in the list of keys added .*/
 		i = *(const uint32_t *)next_key;
 		tbl_rw_test_param.found[i]++;
@@ -315,9 +318,10 @@  test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
 
 	const void *next_key;
 	void *next_data;
-	uint32_t iter = 0;
 	int use_jhash = 0;
 
+	struct rte_hash_iterator_state state;
+
 	uint32_t duplicated_keys = 0;
 	uint32_t lost_keys = 0;
 
@@ -333,6 +337,8 @@  test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
 	rte_atomic64_init(&gwrite_cycles);
 	rte_atomic64_clear(&gwrite_cycles);
 
+	memset(&state, 0, sizeof(state));
+
 	if (init_params(use_htm, use_jhash) != 0)
 		goto err;
 
@@ -485,7 +491,7 @@  test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
 		rte_eal_mp_wait_lcore();
 
 		while (rte_hash_iterate(tbl_rw_test_param.h,
-				&next_key, &next_data, &iter) >= 0) {
+				&next_key, &next_data, &state) >= 0) {
 			/* Search for the key in the list of keys added .*/
 			i = *(const uint32_t *)next_key;
 			tbl_rw_test_param.found[i]++;