From patchwork Fri Jun 30 17:09:26 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vladimir Medvedkin X-Patchwork-Id: 129167 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id EA81B42D98; Fri, 30 Jun 2023 19:09:38 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 71EDA410FC; Fri, 30 Jun 2023 19:09:38 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 8B90240EDB; Fri, 30 Jun 2023 19:09:36 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1688144976; x=1719680976; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=GsYjua5r7ri6HlIuw35cI82xdCZM+acSIyPBen5XEIk=; b=ahHbfV2jQ2FosHiTnU4IKl4pY0Oe1rSwsfVSh+GPheVzkW4Utb4oXYSc psQ1+02+Hbzqf2jbj4BryrnPfpUmSZjXs2o5gPnEab13lO58vC7YZn31w h5QP+UqLlX+O53g34ss3nXavhBqRSV9ksYDHsEKerTKX+/lReFZNoiJSR 7jhTeNB9rDN0tqoGuJMRYavPrrLp4bFzju259CVa68M/cMGKraWso51MI KV6bHjcZWR33OJR2GWFb/GPjqvvUFAWF30g5vuW6+eitmbrCDAIH4uVtu 4vcluNhbiK+hcCVvGvoCHtYJWQJodub57/1S8FGhfw+olrnWhGdQzNfIW A==; X-IronPort-AV: E=McAfee;i="6600,9927,10757"; a="393168800" X-IronPort-AV: E=Sophos;i="6.01,171,1684825200"; d="scan'208";a="393168800" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 30 Jun 2023 10:09:30 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10757"; a="783151842" X-IronPort-AV: E=Sophos;i="6.01,171,1684825200"; d="scan'208";a="783151842" Received: from silpixa00401176.ir.intel.com (HELO silpixa00401176.ger.corp.intel.com) ([10.237.222.204]) by fmsmga008.fm.intel.com with ESMTP; 30 Jun 2023 10:09:29 -0700 From: Vladimir Medvedkin To: dev@dpdk.org Cc: konstantin.v.ananyev@yandex.ru, stable@dpdk.org Subject: [PATCH v2 1/2] hash: fix reading unaligned bits implementation Date: Fri, 30 Jun 2023 17:09:26 +0000 Message-Id: <20230630170927.32829-1-vladimir.medvedkin@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230628191219.78753-1-vladimir.medvedkin@intel.com> References: <20230628191219.78753-1-vladimir.medvedkin@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Fixes: 28ebff11c2dc ("hash: add predictable RSS") Cc: stable@dpdk.org Acked-by: Konstantin Ananyev Tested-by: Konstantin Ananyev Signed-off-by: Vladimir Medvedkin --- lib/hash/rte_thash.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/hash/rte_thash.c b/lib/hash/rte_thash.c index 0249883b8d..2228af576b 100644 --- a/lib/hash/rte_thash.c +++ b/lib/hash/rte_thash.c @@ -670,7 +670,7 @@ rte_thash_get_gfni_matrices(struct rte_thash_ctx *ctx) } static inline uint8_t -read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset) +read_unaligned_byte(uint8_t *ptr, unsigned int offset) { uint8_t ret = 0; @@ -681,13 +681,14 @@ read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset) (CHAR_BIT - (offset % CHAR_BIT)); } - return ret >> (CHAR_BIT - len); + return ret; } static inline uint32_t read_unaligned_bits(uint8_t *ptr, int len, int offset) { uint32_t ret = 0; + int shift; len = RTE_MAX(len, 0); len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT)); @@ -695,13 +696,14 @@ read_unaligned_bits(uint8_t *ptr, int len, int offset) while (len > 0) { ret <<= CHAR_BIT; - ret |= read_unaligned_byte(ptr, RTE_MIN(len, CHAR_BIT), - offset); + ret |= read_unaligned_byte(ptr, offset); offset += CHAR_BIT; len -= CHAR_BIT; } - return ret; + shift = (len == 0) ? 0 : + (CHAR_BIT - ((len + CHAR_BIT) % CHAR_BIT)); + return ret >> shift; } /* returns mask for len bits with given offset inside byte */ From patchwork Fri Jun 30 17:09:27 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vladimir Medvedkin X-Patchwork-Id: 129168 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D622842D98; Fri, 30 Jun 2023 19:10:06 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C6A6B41104; Fri, 30 Jun 2023 19:10:06 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 8104140EDB for ; Fri, 30 Jun 2023 19:10:05 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1688145005; x=1719681005; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=TKYhN6CQlXYQCkx4tSkwL1yEvBURJ1l6LHJSvNdDsLk=; b=YzQDq/GbkV5h7T676Vr8qZu6ra6MLUIrHVYSdsFCuq6e5/eBTdV7vHqz 9ecAcXF3blko4SoFLHuPg9Gz91iTvtwJoyafJ1TDpPcaGQ1cJ1c+XWgmA E2MJ01Pn9VMIfW513X7sf9pW9BBzla/Qzhf7TOkOXEv8rDsZq9EipITnb wMPUINlN2tykB7/dAWRyvNnt9obwzMrtp6PzV2XHjXlSV6zeVH3Z5iwwn Aw4OD46+DsaG5tkGo22yjW9IMGuCQemDI062pxBnKMBRvi4/a3Lk9pnAT PC1gJop+VbCcVQcDxyZagtzaLnEiuzVMavELiQ0IXM63EL8x/WOq1ZWp/ Q==; X-IronPort-AV: E=McAfee;i="6600,9927,10757"; a="393168806" X-IronPort-AV: E=Sophos;i="6.01,171,1684825200"; d="scan'208";a="393168806" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 30 Jun 2023 10:09:31 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10757"; a="783151846" X-IronPort-AV: E=Sophos;i="6.01,171,1684825200"; d="scan'208";a="783151846" Received: from silpixa00401176.ir.intel.com (HELO silpixa00401176.ger.corp.intel.com) ([10.237.222.204]) by fmsmga008.fm.intel.com with ESMTP; 30 Jun 2023 10:09:30 -0700 From: Vladimir Medvedkin To: dev@dpdk.org Cc: konstantin.v.ananyev@yandex.ru Subject: [PATCH v2 2/2] test: add additional tests for thash library Date: Fri, 30 Jun 2023 17:09:27 +0000 Message-Id: <20230630170927.32829-2-vladimir.medvedkin@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230630170927.32829-1-vladimir.medvedkin@intel.com> References: <20230628191219.78753-1-vladimir.medvedkin@intel.com> <20230630170927.32829-1-vladimir.medvedkin@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Adds tests comparing the results of applying the output of rte_thash_get_complement() to the tuple with the result of calling rte_thash_adjust_tuple(). Suggested-by: Konstantin Ananyev Signed-off-by: Konstantin Ananyev Signed-off-by: Vladimir Medvedkin --- app/test/test_thash.c | 132 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) diff --git a/app/test/test_thash.c b/app/test/test_thash.c index 62ba4a9528..53d9611e18 100644 --- a/app/test/test_thash.c +++ b/app/test/test_thash.c @@ -804,6 +804,137 @@ test_adjust_tuple(void) return TEST_SUCCESS; } +static uint32_t +calc_tuple_hash(const uint8_t tuple[TUPLE_SZ], const uint8_t *key) +{ + uint32_t i, hash; + uint32_t tmp[TUPLE_SZ / sizeof(uint32_t)]; + + for (i = 0; i < RTE_DIM(tmp); i++) + tmp[i] = rte_be_to_cpu_32( + *(const uint32_t *)&tuple[i * sizeof(uint32_t)]); + + hash = rte_softrss(tmp, RTE_DIM(tmp), key); + return hash; +} + +static int +check_adj_tuple(const uint8_t tuple[TUPLE_SZ], const uint8_t *key, + uint32_t dhv, uint32_t ohv, uint32_t adjust, uint32_t reta_sz, + const char *prefix) +{ + uint32_t hash, hashlsb; + + hash = calc_tuple_hash(tuple, key); + hashlsb = hash & HASH_MSK(reta_sz); + + printf("%s(%s) for tuple:\n", __func__, prefix); + rte_memdump(stdout, NULL, tuple, TUPLE_SZ); + printf("\treta_sz: %u,\n" + "\torig hash: %#x,\n" + "\tdesired: %#x,\n" + "\tadjust: %#x,\n" + "\tactual: %#x,\n", + reta_sz, ohv, dhv, adjust, hashlsb); + + if (dhv == hashlsb) { + printf("\t***Succeeded\n"); + return 0; + } + + printf("\t***Failed\n"); + return -1; +} + +static int +test_adjust_tuple_mb(uint32_t reta_sz, uint32_t bofs) +{ + struct rte_thash_ctx *ctx; + struct rte_thash_subtuple_helper *h; + const int key_len = 40; + const uint8_t *new_key; + uint8_t orig_tuple[TUPLE_SZ]; + uint8_t tuple_1[TUPLE_SZ]; + uint8_t tuple_2[TUPLE_SZ]; + uint32_t orig_hash; + int rc, ret; + uint32_t adj_bits; + unsigned int random = rte_rand(); + unsigned int desired_value = random & HASH_MSK(reta_sz); + + const uint32_t h_offset = offsetof(union rte_thash_tuple, v4.dport) * CHAR_BIT; + const uint32_t h_size = sizeof(uint16_t) * CHAR_BIT - bofs; + + printf("===%s(reta_sz=%u,bofs=%u)===\n", __func__, reta_sz, bofs); + + memset(orig_tuple, 0xab, sizeof(orig_tuple)); + + ctx = rte_thash_init_ctx("test", key_len, reta_sz, NULL, 0); + RTE_TEST_ASSERT(ctx != NULL, "can not create thash ctx\n"); + + ret = rte_thash_add_helper(ctx, "test", h_size, h_offset); + RTE_TEST_ASSERT(ret == 0, "can not add helper, ret %d\n", ret); + + new_key = rte_thash_get_key(ctx); + + h = rte_thash_get_helper(ctx, "test"); + + orig_hash = calc_tuple_hash(orig_tuple, new_key); + + adj_bits = rte_thash_get_complement(h, orig_hash, desired_value); + + /* use method #1, update tuple manually */ + memcpy(tuple_1, orig_tuple, sizeof(tuple_1)); + { + uint16_t nv, ov, *p; + + p = (uint16_t *)(tuple_1 + h_offset / CHAR_BIT); + ov = p[0]; + nv = ov ^ rte_cpu_to_be_16(adj_bits << bofs); + printf("%s#%d: ov=%#hx, nv=%#hx, adj=%#x;\n", + __func__, __LINE__, ov, nv, adj_bits); + p[0] = nv; + } + + rc = check_adj_tuple(tuple_1, new_key, desired_value, orig_hash, + adj_bits, reta_sz, "method #1"); + if (h_offset % CHAR_BIT == 0) + ret |= rc; + + /* use method #2, use library function to adjust tuple */ + memcpy(tuple_2, orig_tuple, sizeof(tuple_2)); + + rte_thash_adjust_tuple(ctx, h, tuple_2, sizeof(tuple_2), + desired_value, 1, NULL, NULL); + ret |= check_adj_tuple(tuple_2, new_key, desired_value, orig_hash, + adj_bits, reta_sz, "method #2"); + + rte_thash_free_ctx(ctx); + + ret |= memcmp(tuple_1, tuple_2, sizeof(tuple_1)); + + printf("%s EXIT=======\n", __func__); + return ret; +} + +static int +test_adjust_tuple_mult_reta(void) +{ + uint32_t i, j, np, nt; + + nt = 0, np = 0; + for (i = 0; i < CHAR_BIT; i++) { + for (j = 6; j <= RTE_THASH_RETA_SZ_MAX - i; j++) { + np += (test_adjust_tuple_mb(j, i) == 0); + nt++; + } + } + + printf("%s: tests executed: %u, test passed: %u\n", __func__, nt, np); + RTE_TEST_ASSERT(nt == np, "%u subtests failed", nt - np); + return TEST_SUCCESS; +} + static struct unit_test_suite thash_tests = { .suite_name = "thash autotest", .setup = NULL, @@ -824,6 +955,7 @@ static struct unit_test_suite thash_tests = { TEST_CASE(test_predictable_rss_min_seq), TEST_CASE(test_predictable_rss_multirange), TEST_CASE(test_adjust_tuple), + TEST_CASE(test_adjust_tuple_mult_reta), TEST_CASES_END() } };