From patchwork Tue Oct 27 15:11:24 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vladimir Medvedkin X-Patchwork-Id: 82343 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id C9281A04B5; Tue, 27 Oct 2020 16:12:07 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 99B3D6968; Tue, 27 Oct 2020 16:11:50 +0100 (CET) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id 056DB5937 for ; Tue, 27 Oct 2020 16:11:38 +0100 (CET) IronPort-SDR: KK1lpgG33ddC0bNx8s3NrDHsBXGml9WSrKfeVhHspYtBFG6Lb6FCRLz3GxnZECHi/CCURd/MRP gDpmRap9YQ/w== X-IronPort-AV: E=McAfee;i="6000,8403,9786"; a="168225855" X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="168225855" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Oct 2020 08:11:37 -0700 IronPort-SDR: kt5qWj0ylA9R8Ev6NbZ/jCpcYq/lN4MFhkhGjszbIwgrB1cHtfXLV+4tDMwm3i+6oogb5ufL6G uSPXNR1E4qyQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="322963801" Received: from silpixa00400072.ir.intel.com ([10.237.222.213]) by orsmga006.jf.intel.com with ESMTP; 27 Oct 2020 08:11:35 -0700 From: Vladimir Medvedkin To: dev@dpdk.org Cc: david.marchand@redhat.com, jerinj@marvell.com, mdr@ashroe.eu, thomas@monjalon.net, konstantin.ananyev@intel.com, bruce.richardson@intel.com, ciara.power@intel.com Date: Tue, 27 Oct 2020 15:11:24 +0000 Message-Id: X-Mailer: git-send-email 2.7.4 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH v15 1/8] fib: make lookup function type configurable X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add type argument to dir24_8_get_lookup_fn() Now it supports 3 different lookup implementations: RTE_FIB_LOOKUP_DIR24_8_SCALAR_MACRO RTE_FIB_LOOKUP_DIR24_8_SCALAR_INLINE RTE_FIB_LOOKUP_DIR24_8_SCALAR_UNI Add new rte_fib_set_lookup_fn() - user can change lookup function type runtime. Signed-off-by: Vladimir Medvedkin Acked-by: Konstantin Ananyev --- lib/librte_fib/dir24_8.c | 84 ++++++++++++++++++++++++++++------------------ lib/librte_fib/dir24_8.h | 2 +- lib/librte_fib/rte_fib.c | 21 +++++++++++- lib/librte_fib/rte_fib.h | 32 ++++++++++++++++++ lib/librte_fib/version.map | 1 + 5 files changed, 106 insertions(+), 34 deletions(-) diff --git a/lib/librte_fib/dir24_8.c b/lib/librte_fib/dir24_8.c index c9dce3c..ab5a1b2 100644 --- a/lib/librte_fib/dir24_8.c +++ b/lib/librte_fib/dir24_8.c @@ -45,13 +45,6 @@ struct dir24_8_tbl { #define ROUNDUP(x, y) RTE_ALIGN_CEIL(x, (1 << (32 - y))) -enum lookup_type { - MACRO, - INLINE, - UNI -}; -enum lookup_type test_lookup = MACRO; - static inline void * get_tbl24_p(struct dir24_8_tbl *dp, uint32_t ip, uint8_t nh_sz) { @@ -252,35 +245,62 @@ dir24_8_lookup_bulk_uni(void *p, const uint32_t *ips, } } +static inline rte_fib_lookup_fn_t +get_scalar_fn(enum rte_fib_dir24_8_nh_sz nh_sz) +{ + switch (nh_sz) { + case RTE_FIB_DIR24_8_1B: + return dir24_8_lookup_bulk_1b; + case RTE_FIB_DIR24_8_2B: + return dir24_8_lookup_bulk_2b; + case RTE_FIB_DIR24_8_4B: + return dir24_8_lookup_bulk_4b; + case RTE_FIB_DIR24_8_8B: + return dir24_8_lookup_bulk_8b; + default: + return NULL; + } +} + +static inline rte_fib_lookup_fn_t +get_scalar_fn_inlined(enum rte_fib_dir24_8_nh_sz nh_sz) +{ + switch (nh_sz) { + case RTE_FIB_DIR24_8_1B: + return dir24_8_lookup_bulk_0; + case RTE_FIB_DIR24_8_2B: + return dir24_8_lookup_bulk_1; + case RTE_FIB_DIR24_8_4B: + return dir24_8_lookup_bulk_2; + case RTE_FIB_DIR24_8_8B: + return dir24_8_lookup_bulk_3; + default: + return NULL; + } +} + rte_fib_lookup_fn_t -dir24_8_get_lookup_fn(struct rte_fib_conf *fib_conf) +dir24_8_get_lookup_fn(void *p, enum rte_fib_lookup_type type) { - enum rte_fib_dir24_8_nh_sz nh_sz = fib_conf->dir24_8.nh_sz; + enum rte_fib_dir24_8_nh_sz nh_sz; + struct dir24_8_tbl *dp = p; - if (test_lookup == MACRO) { - switch (nh_sz) { - case RTE_FIB_DIR24_8_1B: - return dir24_8_lookup_bulk_1b; - case RTE_FIB_DIR24_8_2B: - return dir24_8_lookup_bulk_2b; - case RTE_FIB_DIR24_8_4B: - return dir24_8_lookup_bulk_4b; - case RTE_FIB_DIR24_8_8B: - return dir24_8_lookup_bulk_8b; - } - } else if (test_lookup == INLINE) { - switch (nh_sz) { - case RTE_FIB_DIR24_8_1B: - return dir24_8_lookup_bulk_0; - case RTE_FIB_DIR24_8_2B: - return dir24_8_lookup_bulk_1; - case RTE_FIB_DIR24_8_4B: - return dir24_8_lookup_bulk_2; - case RTE_FIB_DIR24_8_8B: - return dir24_8_lookup_bulk_3; - } - } else + if (dp == NULL) + return NULL; + + nh_sz = dp->nh_sz; + + switch (type) { + case RTE_FIB_LOOKUP_DIR24_8_SCALAR_MACRO: + return get_scalar_fn(nh_sz); + case RTE_FIB_LOOKUP_DIR24_8_SCALAR_INLINE: + return get_scalar_fn_inlined(nh_sz); + case RTE_FIB_LOOKUP_DIR24_8_SCALAR_UNI: return dir24_8_lookup_bulk_uni; + default: + return NULL; + } + return NULL; } diff --git a/lib/librte_fib/dir24_8.h b/lib/librte_fib/dir24_8.h index 1ec437c..6c43f67 100644 --- a/lib/librte_fib/dir24_8.h +++ b/lib/librte_fib/dir24_8.h @@ -22,7 +22,7 @@ void dir24_8_free(void *p); rte_fib_lookup_fn_t -dir24_8_get_lookup_fn(struct rte_fib_conf *conf); +dir24_8_get_lookup_fn(void *p, enum rte_fib_lookup_type type); int dir24_8_modify(struct rte_fib *fib, uint32_t ip, uint8_t depth, diff --git a/lib/librte_fib/rte_fib.c b/lib/librte_fib/rte_fib.c index e090808..2b5fdf5 100644 --- a/lib/librte_fib/rte_fib.c +++ b/lib/librte_fib/rte_fib.c @@ -107,7 +107,8 @@ init_dataplane(struct rte_fib *fib, __rte_unused int socket_id, fib->dp = dir24_8_create(dp_name, socket_id, conf); if (fib->dp == NULL) return -rte_errno; - fib->lookup = dir24_8_get_lookup_fn(conf); + fib->lookup = dir24_8_get_lookup_fn(fib->dp, + RTE_FIB_LOOKUP_DIR24_8_SCALAR_MACRO); fib->modify = dir24_8_modify; return 0; default: @@ -317,3 +318,21 @@ rte_fib_get_rib(struct rte_fib *fib) { return (fib == NULL) ? NULL : fib->rib; } + +int +rte_fib_select_lookup(struct rte_fib *fib, + enum rte_fib_lookup_type type) +{ + rte_fib_lookup_fn_t fn; + + switch (fib->type) { + case RTE_FIB_DIR24_8: + fn = dir24_8_get_lookup_fn(fib->dp, type); + if (fn == NULL) + return -EINVAL; + fib->lookup = fn; + return 0; + default: + return -EINVAL; + } +} diff --git a/lib/librte_fib/rte_fib.h b/lib/librte_fib/rte_fib.h index 84ee774..d46fedc 100644 --- a/lib/librte_fib/rte_fib.h +++ b/lib/librte_fib/rte_fib.h @@ -58,6 +58,21 @@ enum rte_fib_dir24_8_nh_sz { RTE_FIB_DIR24_8_8B }; +/** Type of lookup function implementation */ +enum rte_fib_lookup_type { + RTE_FIB_LOOKUP_DIR24_8_SCALAR_MACRO, + /**< Macro based lookup function */ + RTE_FIB_LOOKUP_DIR24_8_SCALAR_INLINE, + /**< + * Lookup implementation using inlined functions + * for different next hop sizes + */ + RTE_FIB_LOOKUP_DIR24_8_SCALAR_UNI + /**< + * Unified lookup function for all next hop sizes + */ +}; + /** FIB configuration structure */ struct rte_fib_conf { enum rte_fib_type type; /**< Type of FIB struct */ @@ -196,6 +211,23 @@ __rte_experimental struct rte_rib * rte_fib_get_rib(struct rte_fib *fib); +/** + * Set lookup function based on type + * + * @param fib + * FIB object handle + * @param type + * type of lookup function + * + * @return + * -EINVAL on failure + * 0 on success + */ +__rte_experimental +int +rte_fib_select_lookup(struct rte_fib *fib, + enum rte_fib_lookup_type type); + #ifdef __cplusplus } #endif diff --git a/lib/librte_fib/version.map b/lib/librte_fib/version.map index 9527417..5fd792a 100644 --- a/lib/librte_fib/version.map +++ b/lib/librte_fib/version.map @@ -9,6 +9,7 @@ EXPERIMENTAL { rte_fib_lookup_bulk; rte_fib_get_dp; rte_fib_get_rib; + rte_fib_select_lookup; rte_fib6_add; rte_fib6_create; From patchwork Tue Oct 27 15:11:25 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Vladimir Medvedkin X-Patchwork-Id: 82344 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 448E3A04B5; Tue, 27 Oct 2020 16:12:28 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 06DBE6CA1; Tue, 27 Oct 2020 16:11:52 +0100 (CET) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id 06498594B for ; Tue, 27 Oct 2020 16:11:39 +0100 (CET) IronPort-SDR: oLeA0u0D9p7ipiwviM22bIttzp0mOdunLcmMyNDBAye1oK9b3gpJhaMUcXRLxPJytl4E82jyCj 0tn7tt/3Uiyw== X-IronPort-AV: E=McAfee;i="6000,8403,9786"; a="168225862" X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="168225862" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Oct 2020 08:11:39 -0700 IronPort-SDR: Qe5iIHP4cDFSIdIiasavBFLlmDuCrJLHdfQIbsTvJWbow6kI3+upQcL80U2fYFI9T9siNETZgB tPQ3Eg76RILw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="322963815" Received: from silpixa00400072.ir.intel.com ([10.237.222.213]) by orsmga006.jf.intel.com with ESMTP; 27 Oct 2020 08:11:37 -0700 From: Vladimir Medvedkin To: dev@dpdk.org Cc: david.marchand@redhat.com, jerinj@marvell.com, mdr@ashroe.eu, thomas@monjalon.net, konstantin.ananyev@intel.com, bruce.richardson@intel.com, ciara.power@intel.com Date: Tue, 27 Oct 2020 15:11:25 +0000 Message-Id: <463a051b13a7f873b909cd9147177fde17fb3af3.1603811281.git.vladimir.medvedkin@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: References: In-Reply-To: References: MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v15 2/8] fib: move lookup definition into the header file X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Move dir24_8 table layout and lookup definition into the private header file. This is necessary for implementing a vectorized lookup function in a separate .с file. Signed-off-by: Vladimir Medvedkin Acked-by: Konstantin Ananyev --- lib/librte_fib/dir24_8.c | 225 +---------------------------------------------- lib/librte_fib/dir24_8.h | 224 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 225 insertions(+), 224 deletions(-) diff --git a/lib/librte_fib/dir24_8.c b/lib/librte_fib/dir24_8.c index ab5a1b2..87400fc 100644 --- a/lib/librte_fib/dir24_8.c +++ b/lib/librte_fib/dir24_8.c @@ -11,240 +11,17 @@ #include #include -#include #include #include -#include -#include #include +#include #include "dir24_8.h" #define DIR24_8_NAMESIZE 64 -#define DIR24_8_TBL24_NUM_ENT (1 << 24) -#define DIR24_8_TBL8_GRP_NUM_ENT 256U -#define DIR24_8_EXT_ENT 1 -#define DIR24_8_TBL24_MASK 0xffffff00 - -#define BITMAP_SLAB_BIT_SIZE_LOG2 6 -#define BITMAP_SLAB_BIT_SIZE (1 << BITMAP_SLAB_BIT_SIZE_LOG2) -#define BITMAP_SLAB_BITMASK (BITMAP_SLAB_BIT_SIZE - 1) - -struct dir24_8_tbl { - uint32_t number_tbl8s; /**< Total number of tbl8s */ - uint32_t rsvd_tbl8s; /**< Number of reserved tbl8s */ - uint32_t cur_tbl8s; /**< Current number of tbl8s */ - enum rte_fib_dir24_8_nh_sz nh_sz; /**< Size of nexthop entry */ - uint64_t def_nh; /**< Default next hop */ - uint64_t *tbl8; /**< tbl8 table. */ - uint64_t *tbl8_idxes; /**< bitmap containing free tbl8 idxes*/ - /* tbl24 table. */ - __extension__ uint64_t tbl24[0] __rte_cache_aligned; -}; - #define ROUNDUP(x, y) RTE_ALIGN_CEIL(x, (1 << (32 - y))) -static inline void * -get_tbl24_p(struct dir24_8_tbl *dp, uint32_t ip, uint8_t nh_sz) -{ - return (void *)&((uint8_t *)dp->tbl24)[(ip & - DIR24_8_TBL24_MASK) >> (8 - nh_sz)]; -} - -static inline uint8_t -bits_in_nh(uint8_t nh_sz) -{ - return 8 * (1 << nh_sz); -} - -static inline uint64_t -get_max_nh(uint8_t nh_sz) -{ - return ((1ULL << (bits_in_nh(nh_sz) - 1)) - 1); -} - -static inline uint32_t -get_tbl24_idx(uint32_t ip) -{ - return ip >> 8; -} - -static inline uint32_t -get_tbl8_idx(uint32_t res, uint32_t ip) -{ - return (res >> 1) * DIR24_8_TBL8_GRP_NUM_ENT + (uint8_t)ip; -} - -static inline uint64_t -lookup_msk(uint8_t nh_sz) -{ - return ((1ULL << ((1 << (nh_sz + 3)) - 1)) << 1) - 1; -} - -static inline uint8_t -get_psd_idx(uint32_t val, uint8_t nh_sz) -{ - return val & ((1 << (3 - nh_sz)) - 1); -} - -static inline uint32_t -get_tbl_idx(uint32_t val, uint8_t nh_sz) -{ - return val >> (3 - nh_sz); -} - -static inline uint64_t -get_tbl24(struct dir24_8_tbl *dp, uint32_t ip, uint8_t nh_sz) -{ - return ((dp->tbl24[get_tbl_idx(get_tbl24_idx(ip), nh_sz)] >> - (get_psd_idx(get_tbl24_idx(ip), nh_sz) * - bits_in_nh(nh_sz))) & lookup_msk(nh_sz)); -} - -static inline uint64_t -get_tbl8(struct dir24_8_tbl *dp, uint32_t res, uint32_t ip, uint8_t nh_sz) -{ - return ((dp->tbl8[get_tbl_idx(get_tbl8_idx(res, ip), nh_sz)] >> - (get_psd_idx(get_tbl8_idx(res, ip), nh_sz) * - bits_in_nh(nh_sz))) & lookup_msk(nh_sz)); -} - -static inline int -is_entry_extended(uint64_t ent) -{ - return (ent & DIR24_8_EXT_ENT) == DIR24_8_EXT_ENT; -} - -#define LOOKUP_FUNC(suffix, type, bulk_prefetch, nh_sz) \ -static void dir24_8_lookup_bulk_##suffix(void *p, const uint32_t *ips, \ - uint64_t *next_hops, const unsigned int n) \ -{ \ - struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; \ - uint64_t tmp; \ - uint32_t i; \ - uint32_t prefetch_offset = \ - RTE_MIN((unsigned int)bulk_prefetch, n); \ - \ - for (i = 0; i < prefetch_offset; i++) \ - rte_prefetch0(get_tbl24_p(dp, ips[i], nh_sz)); \ - for (i = 0; i < (n - prefetch_offset); i++) { \ - rte_prefetch0(get_tbl24_p(dp, \ - ips[i + prefetch_offset], nh_sz)); \ - tmp = ((type *)dp->tbl24)[ips[i] >> 8]; \ - if (unlikely(is_entry_extended(tmp))) \ - tmp = ((type *)dp->tbl8)[(uint8_t)ips[i] + \ - ((tmp >> 1) * DIR24_8_TBL8_GRP_NUM_ENT)]; \ - next_hops[i] = tmp >> 1; \ - } \ - for (; i < n; i++) { \ - tmp = ((type *)dp->tbl24)[ips[i] >> 8]; \ - if (unlikely(is_entry_extended(tmp))) \ - tmp = ((type *)dp->tbl8)[(uint8_t)ips[i] + \ - ((tmp >> 1) * DIR24_8_TBL8_GRP_NUM_ENT)]; \ - next_hops[i] = tmp >> 1; \ - } \ -} \ - -LOOKUP_FUNC(1b, uint8_t, 5, 0) -LOOKUP_FUNC(2b, uint16_t, 6, 1) -LOOKUP_FUNC(4b, uint32_t, 15, 2) -LOOKUP_FUNC(8b, uint64_t, 12, 3) - -static inline void -dir24_8_lookup_bulk(struct dir24_8_tbl *dp, const uint32_t *ips, - uint64_t *next_hops, const unsigned int n, uint8_t nh_sz) -{ - uint64_t tmp; - uint32_t i; - uint32_t prefetch_offset = RTE_MIN(15U, n); - - for (i = 0; i < prefetch_offset; i++) - rte_prefetch0(get_tbl24_p(dp, ips[i], nh_sz)); - for (i = 0; i < (n - prefetch_offset); i++) { - rte_prefetch0(get_tbl24_p(dp, ips[i + prefetch_offset], - nh_sz)); - tmp = get_tbl24(dp, ips[i], nh_sz); - if (unlikely(is_entry_extended(tmp))) - tmp = get_tbl8(dp, tmp, ips[i], nh_sz); - - next_hops[i] = tmp >> 1; - } - for (; i < n; i++) { - tmp = get_tbl24(dp, ips[i], nh_sz); - if (unlikely(is_entry_extended(tmp))) - tmp = get_tbl8(dp, tmp, ips[i], nh_sz); - - next_hops[i] = tmp >> 1; - } -} - -static void -dir24_8_lookup_bulk_0(void *p, const uint32_t *ips, - uint64_t *next_hops, const unsigned int n) -{ - struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; - - dir24_8_lookup_bulk(dp, ips, next_hops, n, 0); -} - -static void -dir24_8_lookup_bulk_1(void *p, const uint32_t *ips, - uint64_t *next_hops, const unsigned int n) -{ - struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; - - dir24_8_lookup_bulk(dp, ips, next_hops, n, 1); -} - -static void -dir24_8_lookup_bulk_2(void *p, const uint32_t *ips, - uint64_t *next_hops, const unsigned int n) -{ - struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; - - dir24_8_lookup_bulk(dp, ips, next_hops, n, 2); -} - -static void -dir24_8_lookup_bulk_3(void *p, const uint32_t *ips, - uint64_t *next_hops, const unsigned int n) -{ - struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; - - dir24_8_lookup_bulk(dp, ips, next_hops, n, 3); -} - -static void -dir24_8_lookup_bulk_uni(void *p, const uint32_t *ips, - uint64_t *next_hops, const unsigned int n) -{ - struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; - uint64_t tmp; - uint32_t i; - uint32_t prefetch_offset = RTE_MIN(15U, n); - uint8_t nh_sz = dp->nh_sz; - - for (i = 0; i < prefetch_offset; i++) - rte_prefetch0(get_tbl24_p(dp, ips[i], nh_sz)); - for (i = 0; i < (n - prefetch_offset); i++) { - rte_prefetch0(get_tbl24_p(dp, ips[i + prefetch_offset], - nh_sz)); - tmp = get_tbl24(dp, ips[i], nh_sz); - if (unlikely(is_entry_extended(tmp))) - tmp = get_tbl8(dp, tmp, ips[i], nh_sz); - - next_hops[i] = tmp >> 1; - } - for (; i < n; i++) { - tmp = get_tbl24(dp, ips[i], nh_sz); - if (unlikely(is_entry_extended(tmp))) - tmp = get_tbl8(dp, tmp, ips[i], nh_sz); - - next_hops[i] = tmp >> 1; - } -} - static inline rte_fib_lookup_fn_t get_scalar_fn(enum rte_fib_dir24_8_nh_sz nh_sz) { diff --git a/lib/librte_fib/dir24_8.h b/lib/librte_fib/dir24_8.h index 6c43f67..bac65ee 100644 --- a/lib/librte_fib/dir24_8.h +++ b/lib/librte_fib/dir24_8.h @@ -6,6 +6,9 @@ #ifndef _DIR24_8_H_ #define _DIR24_8_H_ +#include +#include + /** * @file * DIR24_8 algorithm @@ -15,6 +18,227 @@ extern "C" { #endif +#define DIR24_8_TBL24_NUM_ENT (1 << 24) +#define DIR24_8_TBL8_GRP_NUM_ENT 256U +#define DIR24_8_EXT_ENT 1 +#define DIR24_8_TBL24_MASK 0xffffff00 + +#define BITMAP_SLAB_BIT_SIZE_LOG2 6 +#define BITMAP_SLAB_BIT_SIZE (1 << BITMAP_SLAB_BIT_SIZE_LOG2) +#define BITMAP_SLAB_BITMASK (BITMAP_SLAB_BIT_SIZE - 1) + +struct dir24_8_tbl { + uint32_t number_tbl8s; /**< Total number of tbl8s */ + uint32_t rsvd_tbl8s; /**< Number of reserved tbl8s */ + uint32_t cur_tbl8s; /**< Current number of tbl8s */ + enum rte_fib_dir24_8_nh_sz nh_sz; /**< Size of nexthop entry */ + uint64_t def_nh; /**< Default next hop */ + uint64_t *tbl8; /**< tbl8 table. */ + uint64_t *tbl8_idxes; /**< bitmap containing free tbl8 idxes*/ + /* tbl24 table. */ + __extension__ uint64_t tbl24[0] __rte_cache_aligned; +}; + +static inline void * +get_tbl24_p(struct dir24_8_tbl *dp, uint32_t ip, uint8_t nh_sz) +{ + return (void *)&((uint8_t *)dp->tbl24)[(ip & + DIR24_8_TBL24_MASK) >> (8 - nh_sz)]; +} + +static inline uint8_t +bits_in_nh(uint8_t nh_sz) +{ + return 8 * (1 << nh_sz); +} + +static inline uint64_t +get_max_nh(uint8_t nh_sz) +{ + return ((1ULL << (bits_in_nh(nh_sz) - 1)) - 1); +} + +static inline uint32_t +get_tbl24_idx(uint32_t ip) +{ + return ip >> 8; +} + +static inline uint32_t +get_tbl8_idx(uint32_t res, uint32_t ip) +{ + return (res >> 1) * DIR24_8_TBL8_GRP_NUM_ENT + (uint8_t)ip; +} + +static inline uint64_t +lookup_msk(uint8_t nh_sz) +{ + return ((1ULL << ((1 << (nh_sz + 3)) - 1)) << 1) - 1; +} + +static inline uint8_t +get_psd_idx(uint32_t val, uint8_t nh_sz) +{ + return val & ((1 << (3 - nh_sz)) - 1); +} + +static inline uint32_t +get_tbl_idx(uint32_t val, uint8_t nh_sz) +{ + return val >> (3 - nh_sz); +} + +static inline uint64_t +get_tbl24(struct dir24_8_tbl *dp, uint32_t ip, uint8_t nh_sz) +{ + return ((dp->tbl24[get_tbl_idx(get_tbl24_idx(ip), nh_sz)] >> + (get_psd_idx(get_tbl24_idx(ip), nh_sz) * + bits_in_nh(nh_sz))) & lookup_msk(nh_sz)); +} + +static inline uint64_t +get_tbl8(struct dir24_8_tbl *dp, uint32_t res, uint32_t ip, uint8_t nh_sz) +{ + return ((dp->tbl8[get_tbl_idx(get_tbl8_idx(res, ip), nh_sz)] >> + (get_psd_idx(get_tbl8_idx(res, ip), nh_sz) * + bits_in_nh(nh_sz))) & lookup_msk(nh_sz)); +} + +static inline int +is_entry_extended(uint64_t ent) +{ + return (ent & DIR24_8_EXT_ENT) == DIR24_8_EXT_ENT; +} + +#define LOOKUP_FUNC(suffix, type, bulk_prefetch, nh_sz) \ +static inline void dir24_8_lookup_bulk_##suffix(void *p, const uint32_t *ips, \ + uint64_t *next_hops, const unsigned int n) \ +{ \ + struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; \ + uint64_t tmp; \ + uint32_t i; \ + uint32_t prefetch_offset = \ + RTE_MIN((unsigned int)bulk_prefetch, n); \ + \ + for (i = 0; i < prefetch_offset; i++) \ + rte_prefetch0(get_tbl24_p(dp, ips[i], nh_sz)); \ + for (i = 0; i < (n - prefetch_offset); i++) { \ + rte_prefetch0(get_tbl24_p(dp, \ + ips[i + prefetch_offset], nh_sz)); \ + tmp = ((type *)dp->tbl24)[ips[i] >> 8]; \ + if (unlikely(is_entry_extended(tmp))) \ + tmp = ((type *)dp->tbl8)[(uint8_t)ips[i] + \ + ((tmp >> 1) * DIR24_8_TBL8_GRP_NUM_ENT)]; \ + next_hops[i] = tmp >> 1; \ + } \ + for (; i < n; i++) { \ + tmp = ((type *)dp->tbl24)[ips[i] >> 8]; \ + if (unlikely(is_entry_extended(tmp))) \ + tmp = ((type *)dp->tbl8)[(uint8_t)ips[i] + \ + ((tmp >> 1) * DIR24_8_TBL8_GRP_NUM_ENT)]; \ + next_hops[i] = tmp >> 1; \ + } \ +} \ + +LOOKUP_FUNC(1b, uint8_t, 5, 0) +LOOKUP_FUNC(2b, uint16_t, 6, 1) +LOOKUP_FUNC(4b, uint32_t, 15, 2) +LOOKUP_FUNC(8b, uint64_t, 12, 3) + +static inline void +dir24_8_lookup_bulk(struct dir24_8_tbl *dp, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n, uint8_t nh_sz) +{ + uint64_t tmp; + uint32_t i; + uint32_t prefetch_offset = RTE_MIN(15U, n); + + for (i = 0; i < prefetch_offset; i++) + rte_prefetch0(get_tbl24_p(dp, ips[i], nh_sz)); + for (i = 0; i < (n - prefetch_offset); i++) { + rte_prefetch0(get_tbl24_p(dp, ips[i + prefetch_offset], + nh_sz)); + tmp = get_tbl24(dp, ips[i], nh_sz); + if (unlikely(is_entry_extended(tmp))) + tmp = get_tbl8(dp, tmp, ips[i], nh_sz); + + next_hops[i] = tmp >> 1; + } + for (; i < n; i++) { + tmp = get_tbl24(dp, ips[i], nh_sz); + if (unlikely(is_entry_extended(tmp))) + tmp = get_tbl8(dp, tmp, ips[i], nh_sz); + + next_hops[i] = tmp >> 1; + } +} + +static inline void +dir24_8_lookup_bulk_0(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n) +{ + struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; + + dir24_8_lookup_bulk(dp, ips, next_hops, n, 0); +} + +static inline void +dir24_8_lookup_bulk_1(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n) +{ + struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; + + dir24_8_lookup_bulk(dp, ips, next_hops, n, 1); +} + +static inline void +dir24_8_lookup_bulk_2(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n) +{ + struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; + + dir24_8_lookup_bulk(dp, ips, next_hops, n, 2); +} + +static inline void +dir24_8_lookup_bulk_3(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n) +{ + struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; + + dir24_8_lookup_bulk(dp, ips, next_hops, n, 3); +} + +static inline void +dir24_8_lookup_bulk_uni(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n) +{ + struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; + uint64_t tmp; + uint32_t i; + uint32_t prefetch_offset = RTE_MIN(15U, n); + uint8_t nh_sz = dp->nh_sz; + + for (i = 0; i < prefetch_offset; i++) + rte_prefetch0(get_tbl24_p(dp, ips[i], nh_sz)); + for (i = 0; i < (n - prefetch_offset); i++) { + rte_prefetch0(get_tbl24_p(dp, ips[i + prefetch_offset], + nh_sz)); + tmp = get_tbl24(dp, ips[i], nh_sz); + if (unlikely(is_entry_extended(tmp))) + tmp = get_tbl8(dp, tmp, ips[i], nh_sz); + + next_hops[i] = tmp >> 1; + } + for (; i < n; i++) { + tmp = get_tbl24(dp, ips[i], nh_sz); + if (unlikely(is_entry_extended(tmp))) + tmp = get_tbl8(dp, tmp, ips[i], nh_sz); + + next_hops[i] = tmp >> 1; + } +} + void * dir24_8_create(const char *name, int socket_id, struct rte_fib_conf *conf); From patchwork Tue Oct 27 15:11:26 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vladimir Medvedkin X-Patchwork-Id: 82345 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D4A7DA04B5; Tue, 27 Oct 2020 16:12:56 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A889268C0; Tue, 27 Oct 2020 16:12:05 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 26F485ABB for ; Tue, 27 Oct 2020 16:11:47 +0100 (CET) IronPort-SDR: 5PddwBDnK5+PAhbjQILwxZy3Tg5czC5fboyPmO9hssthoikuG8YpRnhsK/nSXyErKmon5WDGZ7 Xrd/SeYXCn9g== X-IronPort-AV: E=McAfee;i="6000,8403,9786"; a="168197262" X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="168197262" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Oct 2020 08:11:45 -0700 IronPort-SDR: zP2m0GH58uhvLln1zaYxkpafv3Jf18jcbZv/h1VSZ+CqI0JmwIkwMPFN2gc3JXr3xIHBl1ecCT guULAWebiLPw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="322963833" Received: from silpixa00400072.ir.intel.com ([10.237.222.213]) by orsmga006.jf.intel.com with ESMTP; 27 Oct 2020 08:11:39 -0700 From: Vladimir Medvedkin To: dev@dpdk.org Cc: david.marchand@redhat.com, jerinj@marvell.com, mdr@ashroe.eu, thomas@monjalon.net, konstantin.ananyev@intel.com, bruce.richardson@intel.com, ciara.power@intel.com Date: Tue, 27 Oct 2020 15:11:26 +0000 Message-Id: X-Mailer: git-send-email 2.7.4 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH v15 3/8] fib: introduce AVX512 lookup X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add new lookup implementation for DIR24_8 algorithm using AVX512 instruction set Signed-off-by: Vladimir Medvedkin Acked-by: Konstantin Ananyev --- doc/guides/rel_notes/release_20_11.rst | 3 + lib/librte_fib/dir24_8.c | 39 ++++++++ lib/librte_fib/dir24_8_avx512.c | 165 +++++++++++++++++++++++++++++++++ lib/librte_fib/dir24_8_avx512.h | 24 +++++ lib/librte_fib/meson.build | 34 +++++++ lib/librte_fib/rte_fib.c | 2 +- lib/librte_fib/rte_fib.h | 6 +- 7 files changed, 271 insertions(+), 2 deletions(-) create mode 100644 lib/librte_fib/dir24_8_avx512.c create mode 100644 lib/librte_fib/dir24_8_avx512.h diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst index dca8d41..c430e8e 100644 --- a/doc/guides/rel_notes/release_20_11.rst +++ b/doc/guides/rel_notes/release_20_11.rst @@ -345,6 +345,9 @@ New Features * Replaced ``--scalar`` command-line option with ``--alg=``, to allow the user to select the desired classify method. +* **Added AVX512 lookup implementation for FIB.** + + Added a AVX512 lookup functions implementation into FIB library. Removed Items ------------- diff --git a/lib/librte_fib/dir24_8.c b/lib/librte_fib/dir24_8.c index 87400fc..c97ae02 100644 --- a/lib/librte_fib/dir24_8.c +++ b/lib/librte_fib/dir24_8.c @@ -13,11 +13,18 @@ #include #include #include +#include #include #include #include "dir24_8.h" +#ifdef CC_DIR24_8_AVX512_SUPPORT + +#include "dir24_8_avx512.h" + +#endif /* CC_DIR24_8_AVX512_SUPPORT */ + #define DIR24_8_NAMESIZE 64 #define ROUNDUP(x, y) RTE_ALIGN_CEIL(x, (1 << (32 - y))) @@ -56,11 +63,38 @@ get_scalar_fn_inlined(enum rte_fib_dir24_8_nh_sz nh_sz) } } +static inline rte_fib_lookup_fn_t +get_vector_fn(enum rte_fib_dir24_8_nh_sz nh_sz) +{ +#ifdef CC_DIR24_8_AVX512_SUPPORT + if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) <= 0) || + (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) + return NULL; + + switch (nh_sz) { + case RTE_FIB_DIR24_8_1B: + return rte_dir24_8_vec_lookup_bulk_1b; + case RTE_FIB_DIR24_8_2B: + return rte_dir24_8_vec_lookup_bulk_2b; + case RTE_FIB_DIR24_8_4B: + return rte_dir24_8_vec_lookup_bulk_4b; + case RTE_FIB_DIR24_8_8B: + return rte_dir24_8_vec_lookup_bulk_8b; + default: + return NULL; + } +#else + RTE_SET_USED(nh_sz); +#endif + return NULL; +} + rte_fib_lookup_fn_t dir24_8_get_lookup_fn(void *p, enum rte_fib_lookup_type type) { enum rte_fib_dir24_8_nh_sz nh_sz; struct dir24_8_tbl *dp = p; + rte_fib_lookup_fn_t ret_fn = NULL; if (dp == NULL) return NULL; @@ -74,6 +108,11 @@ dir24_8_get_lookup_fn(void *p, enum rte_fib_lookup_type type) return get_scalar_fn_inlined(nh_sz); case RTE_FIB_LOOKUP_DIR24_8_SCALAR_UNI: return dir24_8_lookup_bulk_uni; + case RTE_FIB_LOOKUP_DIR24_8_VECTOR_AVX512: + return get_vector_fn(nh_sz); + case RTE_FIB_LOOKUP_DEFAULT: + ret_fn = get_vector_fn(nh_sz); + return (ret_fn) ? ret_fn : get_scalar_fn(nh_sz); default: return NULL; } diff --git a/lib/librte_fib/dir24_8_avx512.c b/lib/librte_fib/dir24_8_avx512.c new file mode 100644 index 0000000..43dba28 --- /dev/null +++ b/lib/librte_fib/dir24_8_avx512.c @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#include +#include + +#include "dir24_8.h" +#include "dir24_8_avx512.h" + +static __rte_always_inline void +dir24_8_vec_lookup_x16(void *p, const uint32_t *ips, + uint64_t *next_hops, int size) +{ + struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; + __mmask16 msk_ext; + __mmask16 exp_msk = 0x5555; + __m512i ip_vec, idxes, res, bytes; + const __m512i zero = _mm512_set1_epi32(0); + const __m512i lsb = _mm512_set1_epi32(1); + const __m512i lsbyte_msk = _mm512_set1_epi32(0xff); + __m512i tmp1, tmp2, res_msk; + __m256i tmp256; + /* used to mask gather values if size is 1/2 (8/16 bit next hops) */ + if (size == sizeof(uint8_t)) + res_msk = _mm512_set1_epi32(UINT8_MAX); + else if (size == sizeof(uint16_t)) + res_msk = _mm512_set1_epi32(UINT16_MAX); + + ip_vec = _mm512_loadu_si512(ips); + /* mask 24 most significant bits */ + idxes = _mm512_srli_epi32(ip_vec, 8); + + /** + * lookup in tbl24 + * Put it inside branch to make compiler happy with -O0 + */ + if (size == sizeof(uint8_t)) { + res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 1); + res = _mm512_and_epi32(res, res_msk); + } else if (size == sizeof(uint16_t)) { + res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 2); + res = _mm512_and_epi32(res, res_msk); + } else + res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 4); + + /* get extended entries indexes */ + msk_ext = _mm512_test_epi32_mask(res, lsb); + + if (msk_ext != 0) { + idxes = _mm512_srli_epi32(res, 1); + idxes = _mm512_slli_epi32(idxes, 8); + bytes = _mm512_and_epi32(ip_vec, lsbyte_msk); + idxes = _mm512_maskz_add_epi32(msk_ext, idxes, bytes); + if (size == sizeof(uint8_t)) { + idxes = _mm512_mask_i32gather_epi32(zero, msk_ext, + idxes, (const int *)dp->tbl8, 1); + idxes = _mm512_and_epi32(idxes, res_msk); + } else if (size == sizeof(uint16_t)) { + idxes = _mm512_mask_i32gather_epi32(zero, msk_ext, + idxes, (const int *)dp->tbl8, 2); + idxes = _mm512_and_epi32(idxes, res_msk); + } else + idxes = _mm512_mask_i32gather_epi32(zero, msk_ext, + idxes, (const int *)dp->tbl8, 4); + + res = _mm512_mask_blend_epi32(msk_ext, res, idxes); + } + + res = _mm512_srli_epi32(res, 1); + tmp1 = _mm512_maskz_expand_epi32(exp_msk, res); + tmp256 = _mm512_extracti32x8_epi32(res, 1); + tmp2 = _mm512_maskz_expand_epi32(exp_msk, + _mm512_castsi256_si512(tmp256)); + _mm512_storeu_si512(next_hops, tmp1); + _mm512_storeu_si512(next_hops + 8, tmp2); +} + +static __rte_always_inline void +dir24_8_vec_lookup_x8_8b(void *p, const uint32_t *ips, + uint64_t *next_hops) +{ + struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p; + const __m512i zero = _mm512_set1_epi32(0); + const __m512i lsbyte_msk = _mm512_set1_epi64(0xff); + const __m512i lsb = _mm512_set1_epi64(1); + __m512i res, idxes, bytes; + __m256i idxes_256, ip_vec; + __mmask8 msk_ext; + + ip_vec = _mm256_loadu_si256((const void *)ips); + /* mask 24 most significant bits */ + idxes_256 = _mm256_srli_epi32(ip_vec, 8); + + /* lookup in tbl24 */ + res = _mm512_i32gather_epi64(idxes_256, (const void *)dp->tbl24, 8); + + /* get extended entries indexes */ + msk_ext = _mm512_test_epi64_mask(res, lsb); + + if (msk_ext != 0) { + bytes = _mm512_cvtepi32_epi64(ip_vec); + idxes = _mm512_srli_epi64(res, 1); + idxes = _mm512_slli_epi64(idxes, 8); + bytes = _mm512_and_epi64(bytes, lsbyte_msk); + idxes = _mm512_maskz_add_epi64(msk_ext, idxes, bytes); + idxes = _mm512_mask_i64gather_epi64(zero, msk_ext, idxes, + (const void *)dp->tbl8, 8); + + res = _mm512_mask_blend_epi64(msk_ext, res, idxes); + } + + res = _mm512_srli_epi64(res, 1); + _mm512_storeu_si512(next_hops, res); +} + +void +rte_dir24_8_vec_lookup_bulk_1b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n) +{ + uint32_t i; + for (i = 0; i < (n / 16); i++) + dir24_8_vec_lookup_x16(p, ips + i * 16, next_hops + i * 16, + sizeof(uint8_t)); + + dir24_8_lookup_bulk_1b(p, ips + i * 16, next_hops + i * 16, + n - i * 16); +} + +void +rte_dir24_8_vec_lookup_bulk_2b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n) +{ + uint32_t i; + for (i = 0; i < (n / 16); i++) + dir24_8_vec_lookup_x16(p, ips + i * 16, next_hops + i * 16, + sizeof(uint16_t)); + + dir24_8_lookup_bulk_2b(p, ips + i * 16, next_hops + i * 16, + n - i * 16); +} + +void +rte_dir24_8_vec_lookup_bulk_4b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n) +{ + uint32_t i; + for (i = 0; i < (n / 16); i++) + dir24_8_vec_lookup_x16(p, ips + i * 16, next_hops + i * 16, + sizeof(uint32_t)); + + dir24_8_lookup_bulk_4b(p, ips + i * 16, next_hops + i * 16, + n - i * 16); +} + +void +rte_dir24_8_vec_lookup_bulk_8b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n) +{ + uint32_t i; + for (i = 0; i < (n / 8); i++) + dir24_8_vec_lookup_x8_8b(p, ips + i * 8, next_hops + i * 8); + + dir24_8_lookup_bulk_8b(p, ips + i * 8, next_hops + i * 8, n - i * 8); +} diff --git a/lib/librte_fib/dir24_8_avx512.h b/lib/librte_fib/dir24_8_avx512.h new file mode 100644 index 0000000..1d3c2b9 --- /dev/null +++ b/lib/librte_fib/dir24_8_avx512.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#ifndef _DIR248_AVX512_H_ +#define _DIR248_AVX512_H_ + +void +rte_dir24_8_vec_lookup_bulk_1b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n); + +void +rte_dir24_8_vec_lookup_bulk_2b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n); + +void +rte_dir24_8_vec_lookup_bulk_4b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n); + +void +rte_dir24_8_vec_lookup_bulk_8b(void *p, const uint32_t *ips, + uint64_t *next_hops, const unsigned int n); + +#endif /* _DIR248_AVX512_H_ */ diff --git a/lib/librte_fib/meson.build b/lib/librte_fib/meson.build index 771828f..0a8adef 100644 --- a/lib/librte_fib/meson.build +++ b/lib/librte_fib/meson.build @@ -5,3 +5,37 @@ sources = files('rte_fib.c', 'rte_fib6.c', 'dir24_8.c', 'trie.c') headers = files('rte_fib.h', 'rte_fib6.h') deps += ['rib'] + +# compile AVX512 version if: +# we are building 64-bit binary AND binutils can generate proper code +if dpdk_conf.has('RTE_ARCH_X86_64') and binutils_ok.returncode() == 0 + # compile AVX512 version if either: + # a. we have AVX512F supported in minimum instruction set baseline + # b. it's not minimum instruction set, but supported by compiler + # + # in former case, just add avx512 C file to files list + # in latter case, compile c file to static lib, using correct + # compiler flags, and then have the .o file from static lib + # linked into main lib. + + # check if all required flags already enabled (variant a). + acl_avx512_flags = ['__AVX512F__','__AVX512DQ__'] + acl_avx512_on = true + foreach f:acl_avx512_flags + if cc.get_define(f, args: machine_args) == '' + acl_avx512_on = false + endif + endforeach + + if acl_avx512_on == true + cflags += ['-DCC_DIR24_8_AVX512_SUPPORT'] + sources += files('dir24_8_avx512.c') + elif cc.has_multi_arguments('-mavx512f', '-mavx512dq') + dir24_8_avx512_tmp = static_library('dir24_8_avx512_tmp', + 'dir24_8_avx512.c', + dependencies: static_rte_eal, + c_args: cflags + ['-mavx512f', '-mavx512dq']) + objs += dir24_8_avx512_tmp.extract_objects('dir24_8_avx512.c') + cflags += '-DCC_DIR24_8_AVX512_SUPPORT' + endif +endif diff --git a/lib/librte_fib/rte_fib.c b/lib/librte_fib/rte_fib.c index 2b5fdf5..398dbf9 100644 --- a/lib/librte_fib/rte_fib.c +++ b/lib/librte_fib/rte_fib.c @@ -108,7 +108,7 @@ init_dataplane(struct rte_fib *fib, __rte_unused int socket_id, if (fib->dp == NULL) return -rte_errno; fib->lookup = dir24_8_get_lookup_fn(fib->dp, - RTE_FIB_LOOKUP_DIR24_8_SCALAR_MACRO); + RTE_FIB_LOOKUP_DEFAULT); fib->modify = dir24_8_modify; return 0; default: diff --git a/lib/librte_fib/rte_fib.h b/lib/librte_fib/rte_fib.h index d46fedc..8688c93 100644 --- a/lib/librte_fib/rte_fib.h +++ b/lib/librte_fib/rte_fib.h @@ -60,6 +60,8 @@ enum rte_fib_dir24_8_nh_sz { /** Type of lookup function implementation */ enum rte_fib_lookup_type { + RTE_FIB_LOOKUP_DEFAULT, + /**< Selects the best implementation based on the max simd bitwidth */ RTE_FIB_LOOKUP_DIR24_8_SCALAR_MACRO, /**< Macro based lookup function */ RTE_FIB_LOOKUP_DIR24_8_SCALAR_INLINE, @@ -67,10 +69,12 @@ enum rte_fib_lookup_type { * Lookup implementation using inlined functions * for different next hop sizes */ - RTE_FIB_LOOKUP_DIR24_8_SCALAR_UNI + RTE_FIB_LOOKUP_DIR24_8_SCALAR_UNI, /**< * Unified lookup function for all next hop sizes */ + RTE_FIB_LOOKUP_DIR24_8_VECTOR_AVX512 + /**< Vector implementation using AVX512 */ }; /** FIB configuration structure */ From patchwork Tue Oct 27 15:11:27 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vladimir Medvedkin X-Patchwork-Id: 82346 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4C654A04B5; Tue, 27 Oct 2020 16:13:17 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0691472EF; Tue, 27 Oct 2020 16:12:07 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id EAE1D5ABB for ; Tue, 27 Oct 2020 16:11:47 +0100 (CET) IronPort-SDR: iANeDJtpQmtZFQvJPzKrplZSuP+ZA69weIqlltSsHrJ9ZYMI2xZnydiAju5BBk/iNXWPDxnrbJ US6O19TXEKKQ== X-IronPort-AV: E=McAfee;i="6000,8403,9786"; a="168197272" X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="168197272" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Oct 2020 08:11:47 -0700 IronPort-SDR: 2eM3IthXvAu/ewiSl2l3M097xLEl+IpWT21ul1Z6ywdJ4Em4fOkjRkDfKO95XEvsBE7RjnOM7q V0G5W62om49g== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="322963841" Received: from silpixa00400072.ir.intel.com ([10.237.222.213]) by orsmga006.jf.intel.com with ESMTP; 27 Oct 2020 08:11:45 -0700 From: Vladimir Medvedkin To: dev@dpdk.org Cc: david.marchand@redhat.com, jerinj@marvell.com, mdr@ashroe.eu, thomas@monjalon.net, konstantin.ananyev@intel.com, bruce.richardson@intel.com, ciara.power@intel.com Date: Tue, 27 Oct 2020 15:11:27 +0000 Message-Id: X-Mailer: git-send-email 2.7.4 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH v15 4/8] fib6: make lookup function type configurable X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add type argument to trie_get_lookup_fn() Now it only supports RTE_FIB6_LOOKUP_TRIE_SCALAR Add new rte_fib6_set_lookup_fn() - user can change lookup function type runtime. Signed-off-by: Vladimir Medvedkin Acked-by: Konstantin Ananyev --- lib/librte_fib/rte_fib6.c | 20 +++++++++++++++++++- lib/librte_fib/rte_fib6.h | 23 +++++++++++++++++++++++ lib/librte_fib/trie.c | 47 +++++++++++++++++++++++++++------------------- lib/librte_fib/trie.h | 2 +- lib/librte_fib/version.map | 1 + 5 files changed, 72 insertions(+), 21 deletions(-) diff --git a/lib/librte_fib/rte_fib6.c b/lib/librte_fib/rte_fib6.c index a1f0db8..45792bd 100644 --- a/lib/librte_fib/rte_fib6.c +++ b/lib/librte_fib/rte_fib6.c @@ -107,7 +107,7 @@ init_dataplane(struct rte_fib6 *fib, __rte_unused int socket_id, fib->dp = trie_create(dp_name, socket_id, conf); if (fib->dp == NULL) return -rte_errno; - fib->lookup = rte_trie_get_lookup_fn(conf); + fib->lookup = trie_get_lookup_fn(fib->dp, RTE_FIB6_LOOKUP_TRIE_SCALAR); fib->modify = trie_modify; return 0; default: @@ -319,3 +319,21 @@ rte_fib6_get_rib(struct rte_fib6 *fib) { return (fib == NULL) ? NULL : fib->rib; } + +int +rte_fib6_select_lookup(struct rte_fib6 *fib, + enum rte_fib6_lookup_type type) +{ + rte_fib6_lookup_fn_t fn; + + switch (fib->type) { + case RTE_FIB6_TRIE: + fn = trie_get_lookup_fn(fib->dp, type); + if (fn == NULL) + return -EINVAL; + fib->lookup = fn; + return 0; + default: + return -EINVAL; + } +} diff --git a/lib/librte_fib/rte_fib6.h b/lib/librte_fib/rte_fib6.h index bbfcf23..8086f03 100644 --- a/lib/librte_fib/rte_fib6.h +++ b/lib/librte_fib/rte_fib6.h @@ -53,12 +53,18 @@ enum rte_fib6_op { RTE_FIB6_DEL, }; +/** Size of nexthop (1 << nh_sz) bits for TRIE based FIB */ enum rte_fib_trie_nh_sz { RTE_FIB6_TRIE_2B = 1, RTE_FIB6_TRIE_4B, RTE_FIB6_TRIE_8B }; +/** Type of lookup function implementation */ +enum rte_fib6_lookup_type { + RTE_FIB6_LOOKUP_TRIE_SCALAR /**< Scalar lookup function implementation*/ +}; + /** FIB configuration structure */ struct rte_fib6_conf { enum rte_fib6_type type; /**< Type of FIB struct */ @@ -201,6 +207,23 @@ __rte_experimental struct rte_rib6 * rte_fib6_get_rib(struct rte_fib6 *fib); +/** + * Set lookup function based on type + * + * @param fib + * FIB object handle + * @param type + * type of lookup function + * + * @return + * -EINVAL on failure + * 0 on success + */ +__rte_experimental +int +rte_fib6_select_lookup(struct rte_fib6 *fib, + enum rte_fib6_lookup_type type); + #ifdef __cplusplus } #endif diff --git a/lib/librte_fib/trie.c b/lib/librte_fib/trie.c index 2ae2add..11a7ca2 100644 --- a/lib/librte_fib/trie.c +++ b/lib/librte_fib/trie.c @@ -59,13 +59,6 @@ enum edge { REDGE }; -enum lookup_type { - MACRO, - INLINE, - UNI -}; -static enum lookup_type test_lookup = MACRO; - static inline uint32_t get_tbl24_idx(const uint8_t *ip) { @@ -153,22 +146,38 @@ LOOKUP_FUNC(2b, uint16_t, 1) LOOKUP_FUNC(4b, uint32_t, 2) LOOKUP_FUNC(8b, uint64_t, 3) +static inline rte_fib6_lookup_fn_t +get_scalar_fn(enum rte_fib_trie_nh_sz nh_sz) +{ + switch (nh_sz) { + case RTE_FIB6_TRIE_2B: + return rte_trie_lookup_bulk_2b; + case RTE_FIB6_TRIE_4B: + return rte_trie_lookup_bulk_4b; + case RTE_FIB6_TRIE_8B: + return rte_trie_lookup_bulk_8b; + default: + return NULL; + } +} + rte_fib6_lookup_fn_t -rte_trie_get_lookup_fn(struct rte_fib6_conf *conf) +trie_get_lookup_fn(void *p, enum rte_fib6_lookup_type type) { - enum rte_fib_trie_nh_sz nh_sz = conf->trie.nh_sz; + enum rte_fib_trie_nh_sz nh_sz; + struct rte_trie_tbl *dp = p; - if (test_lookup == MACRO) { - switch (nh_sz) { - case RTE_FIB6_TRIE_2B: - return rte_trie_lookup_bulk_2b; - case RTE_FIB6_TRIE_4B: - return rte_trie_lookup_bulk_4b; - case RTE_FIB6_TRIE_8B: - return rte_trie_lookup_bulk_8b; - } + if (dp == NULL) + return NULL; + + nh_sz = dp->nh_sz; + + switch (type) { + case RTE_FIB6_LOOKUP_TRIE_SCALAR: + return get_scalar_fn(nh_sz); + default: + return NULL; } - return NULL; } diff --git a/lib/librte_fib/trie.h b/lib/librte_fib/trie.h index bb750c5..e328bef 100644 --- a/lib/librte_fib/trie.h +++ b/lib/librte_fib/trie.h @@ -22,7 +22,7 @@ void trie_free(void *p); rte_fib6_lookup_fn_t -rte_trie_get_lookup_fn(struct rte_fib6_conf *fib_conf); +trie_get_lookup_fn(void *p, enum rte_fib6_lookup_type type); int trie_modify(struct rte_fib6 *fib, const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE], diff --git a/lib/librte_fib/version.map b/lib/librte_fib/version.map index 5fd792a..be975ea 100644 --- a/lib/librte_fib/version.map +++ b/lib/librte_fib/version.map @@ -19,6 +19,7 @@ EXPERIMENTAL { rte_fib6_lookup_bulk; rte_fib6_get_dp; rte_fib6_get_rib; + rte_fib6_select_lookup; local: *; }; From patchwork Tue Oct 27 15:11:28 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: Vladimir Medvedkin X-Patchwork-Id: 82347 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8951AA04B5; Tue, 27 Oct 2020 16:13:45 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id AF705A9A0; Tue, 27 Oct 2020 16:12:08 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 6091E6A1A for ; Tue, 27 Oct 2020 16:11:51 +0100 (CET) IronPort-SDR: b1n5qB0Wz4M+Tli5JYqxjaHgSId/6SgFlBXSPeRIREvV3HR0QAkiE8/pLguDK1eCsX+3N2SDw6 fHE0RbXP5THQ== X-IronPort-AV: E=McAfee;i="6000,8403,9786"; a="168197279" X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="168197279" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Oct 2020 08:11:49 -0700 IronPort-SDR: 5odc0KwgQOFABKJWETVOpkQWMhc0zuzCMwex7mro6Gy+4yTy8mfac9dV8DCUxbqKwN5/4/41ck 0kImViOqzqMA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="322963848" Received: from silpixa00400072.ir.intel.com ([10.237.222.213]) by orsmga006.jf.intel.com with ESMTP; 27 Oct 2020 08:11:47 -0700 From: Vladimir Medvedkin To: dev@dpdk.org Cc: david.marchand@redhat.com, jerinj@marvell.com, mdr@ashroe.eu, thomas@monjalon.net, konstantin.ananyev@intel.com, bruce.richardson@intel.com, ciara.power@intel.com Date: Tue, 27 Oct 2020 15:11:28 +0000 Message-Id: <46cfc592b989e3fb9e3409f922e44b155c6c11a5.1603811281.git.vladimir.medvedkin@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: References: In-Reply-To: References: MIME-Version: 1.0 Subject: [dpdk-dev] [PATCH v15 5/8] fib6: move lookup definition into the header file X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Move trie table layout and lookup definition into the private header file. This is necessary for implementing a vectorized lookup function in a separate .с file. Signed-off-by: Vladimir Medvedkin Acked-by: Konstantin Ananyev --- lib/librte_fib/trie.c | 121 -------------------------------------------------- lib/librte_fib/trie.h | 117 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 117 insertions(+), 121 deletions(-) diff --git a/lib/librte_fib/trie.c b/lib/librte_fib/trie.c index 11a7ca2..08a03ab 100644 --- a/lib/librte_fib/trie.c +++ b/lib/librte_fib/trie.c @@ -11,141 +11,20 @@ #include #include -#include #include #include -#include #include #include #include "trie.h" -/* @internal Total number of tbl24 entries. */ -#define TRIE_TBL24_NUM_ENT (1 << 24) - -/* Maximum depth value possible for IPv6 LPM. */ -#define TRIE_MAX_DEPTH 128 - -/* @internal Number of entries in a tbl8 group. */ -#define TRIE_TBL8_GRP_NUM_ENT 256ULL - -/* @internal Total number of tbl8 groups in the tbl8. */ -#define TRIE_TBL8_NUM_GROUPS 65536 - -/* @internal bitmask with valid and valid_group fields set */ -#define TRIE_EXT_ENT 1 - #define TRIE_NAMESIZE 64 -#define BITMAP_SLAB_BIT_SIZE_LOG2 6 -#define BITMAP_SLAB_BIT_SIZE (1ULL << BITMAP_SLAB_BIT_SIZE_LOG2) -#define BITMAP_SLAB_BITMASK (BITMAP_SLAB_BIT_SIZE - 1) - -struct rte_trie_tbl { - uint32_t number_tbl8s; /**< Total number of tbl8s */ - uint32_t rsvd_tbl8s; /**< Number of reserved tbl8s */ - uint32_t cur_tbl8s; /**< Current cumber of tbl8s */ - uint64_t def_nh; /**< Default next hop */ - enum rte_fib_trie_nh_sz nh_sz; /**< Size of nexthop entry */ - uint64_t *tbl8; /**< tbl8 table. */ - uint32_t *tbl8_pool; /**< bitmap containing free tbl8 idxes*/ - uint32_t tbl8_pool_pos; - /* tbl24 table. */ - __extension__ uint64_t tbl24[0] __rte_cache_aligned; -}; - enum edge { LEDGE, REDGE }; -static inline uint32_t -get_tbl24_idx(const uint8_t *ip) -{ - return ip[0] << 16|ip[1] << 8|ip[2]; -} - -static inline void * -get_tbl24_p(struct rte_trie_tbl *dp, const uint8_t *ip, uint8_t nh_sz) -{ - uint32_t tbl24_idx; - - tbl24_idx = get_tbl24_idx(ip); - return (void *)&((uint8_t *)dp->tbl24)[tbl24_idx << nh_sz]; -} - -static inline uint8_t -bits_in_nh(uint8_t nh_sz) -{ - return 8 * (1 << nh_sz); -} - -static inline uint64_t -get_max_nh(uint8_t nh_sz) -{ - return ((1ULL << (bits_in_nh(nh_sz) - 1)) - 1); -} - -static inline uint64_t -lookup_msk(uint8_t nh_sz) -{ - return ((1ULL << ((1 << (nh_sz + 3)) - 1)) << 1) - 1; -} - -static inline uint8_t -get_psd_idx(uint32_t val, uint8_t nh_sz) -{ - return val & ((1 << (3 - nh_sz)) - 1); -} - -static inline uint32_t -get_tbl_pos(uint32_t val, uint8_t nh_sz) -{ - return val >> (3 - nh_sz); -} - -static inline uint64_t -get_tbl_val_by_idx(uint64_t *tbl, uint32_t idx, uint8_t nh_sz) -{ - return ((tbl[get_tbl_pos(idx, nh_sz)] >> (get_psd_idx(idx, nh_sz) * - bits_in_nh(nh_sz))) & lookup_msk(nh_sz)); -} - -static inline void * -get_tbl_p_by_idx(uint64_t *tbl, uint64_t idx, uint8_t nh_sz) -{ - return (uint8_t *)tbl + (idx << nh_sz); -} - -static inline int -is_entry_extended(uint64_t ent) -{ - return (ent & TRIE_EXT_ENT) == TRIE_EXT_ENT; -} - -#define LOOKUP_FUNC(suffix, type, nh_sz) \ -static void rte_trie_lookup_bulk_##suffix(void *p, \ - uint8_t ips[][RTE_FIB6_IPV6_ADDR_SIZE], \ - uint64_t *next_hops, const unsigned int n) \ -{ \ - struct rte_trie_tbl *dp = (struct rte_trie_tbl *)p; \ - uint64_t tmp; \ - uint32_t i, j; \ - \ - for (i = 0; i < n; i++) { \ - tmp = ((type *)dp->tbl24)[get_tbl24_idx(&ips[i][0])]; \ - j = 3; \ - while (is_entry_extended(tmp)) { \ - tmp = ((type *)dp->tbl8)[ips[i][j++] + \ - ((tmp >> 1) * TRIE_TBL8_GRP_NUM_ENT)]; \ - } \ - next_hops[i] = tmp >> 1; \ - } \ -} -LOOKUP_FUNC(2b, uint16_t, 1) -LOOKUP_FUNC(4b, uint32_t, 2) -LOOKUP_FUNC(8b, uint64_t, 3) - static inline rte_fib6_lookup_fn_t get_scalar_fn(enum rte_fib_trie_nh_sz nh_sz) { diff --git a/lib/librte_fib/trie.h b/lib/librte_fib/trie.h index e328bef..a4f429c 100644 --- a/lib/librte_fib/trie.h +++ b/lib/librte_fib/trie.h @@ -10,11 +10,128 @@ * @file * RTE IPv6 Longest Prefix Match (LPM) */ +#include +#include #ifdef __cplusplus extern "C" { #endif +/* @internal Total number of tbl24 entries. */ +#define TRIE_TBL24_NUM_ENT (1 << 24) +/* Maximum depth value possible for IPv6 LPM. */ +#define TRIE_MAX_DEPTH 128 +/* @internal Number of entries in a tbl8 group. */ +#define TRIE_TBL8_GRP_NUM_ENT 256ULL +/* @internal Total number of tbl8 groups in the tbl8. */ +#define TRIE_TBL8_NUM_GROUPS 65536 +/* @internal bitmask with valid and valid_group fields set */ +#define TRIE_EXT_ENT 1 + +#define BITMAP_SLAB_BIT_SIZE_LOG2 6 +#define BITMAP_SLAB_BIT_SIZE (1ULL << BITMAP_SLAB_BIT_SIZE_LOG2) +#define BITMAP_SLAB_BITMASK (BITMAP_SLAB_BIT_SIZE - 1) + +struct rte_trie_tbl { + uint32_t number_tbl8s; /**< Total number of tbl8s */ + uint32_t rsvd_tbl8s; /**< Number of reserved tbl8s */ + uint32_t cur_tbl8s; /**< Current cumber of tbl8s */ + uint64_t def_nh; /**< Default next hop */ + enum rte_fib_trie_nh_sz nh_sz; /**< Size of nexthop entry */ + uint64_t *tbl8; /**< tbl8 table. */ + uint32_t *tbl8_pool; /**< bitmap containing free tbl8 idxes*/ + uint32_t tbl8_pool_pos; + /* tbl24 table. */ + __extension__ uint64_t tbl24[0] __rte_cache_aligned; +}; + +static inline uint32_t +get_tbl24_idx(const uint8_t *ip) +{ + return ip[0] << 16|ip[1] << 8|ip[2]; +} + +static inline void * +get_tbl24_p(struct rte_trie_tbl *dp, const uint8_t *ip, uint8_t nh_sz) +{ + uint32_t tbl24_idx; + + tbl24_idx = get_tbl24_idx(ip); + return (void *)&((uint8_t *)dp->tbl24)[tbl24_idx << nh_sz]; +} + +static inline uint8_t +bits_in_nh(uint8_t nh_sz) +{ + return 8 * (1 << nh_sz); +} + +static inline uint64_t +get_max_nh(uint8_t nh_sz) +{ + return ((1ULL << (bits_in_nh(nh_sz) - 1)) - 1); +} + +static inline uint64_t +lookup_msk(uint8_t nh_sz) +{ + return ((1ULL << ((1 << (nh_sz + 3)) - 1)) << 1) - 1; +} + +static inline uint8_t +get_psd_idx(uint32_t val, uint8_t nh_sz) +{ + return val & ((1 << (3 - nh_sz)) - 1); +} + +static inline uint32_t +get_tbl_pos(uint32_t val, uint8_t nh_sz) +{ + return val >> (3 - nh_sz); +} + +static inline uint64_t +get_tbl_val_by_idx(uint64_t *tbl, uint32_t idx, uint8_t nh_sz) +{ + return ((tbl[get_tbl_pos(idx, nh_sz)] >> (get_psd_idx(idx, nh_sz) * + bits_in_nh(nh_sz))) & lookup_msk(nh_sz)); +} + +static inline void * +get_tbl_p_by_idx(uint64_t *tbl, uint64_t idx, uint8_t nh_sz) +{ + return (uint8_t *)tbl + (idx << nh_sz); +} + +static inline int +is_entry_extended(uint64_t ent) +{ + return (ent & TRIE_EXT_ENT) == TRIE_EXT_ENT; +} + +#define LOOKUP_FUNC(suffix, type, nh_sz) \ +static inline void rte_trie_lookup_bulk_##suffix(void *p, \ + uint8_t ips[][RTE_FIB6_IPV6_ADDR_SIZE], \ + uint64_t *next_hops, const unsigned int n) \ +{ \ + struct rte_trie_tbl *dp = (struct rte_trie_tbl *)p; \ + uint64_t tmp; \ + uint32_t i, j; \ + \ + for (i = 0; i < n; i++) { \ + tmp = ((type *)dp->tbl24)[get_tbl24_idx(&ips[i][0])]; \ + j = 3; \ + while (is_entry_extended(tmp)) { \ + tmp = ((type *)dp->tbl8)[ips[i][j++] + \ + ((tmp >> 1) * TRIE_TBL8_GRP_NUM_ENT)]; \ + } \ + next_hops[i] = tmp >> 1; \ + } \ +} +LOOKUP_FUNC(2b, uint16_t, 1) +LOOKUP_FUNC(4b, uint32_t, 2) +LOOKUP_FUNC(8b, uint64_t, 3) + void * trie_create(const char *name, int socket_id, struct rte_fib6_conf *conf); From patchwork Tue Oct 27 15:11:29 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vladimir Medvedkin X-Patchwork-Id: 82348 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id A73DCA04B5; Tue, 27 Oct 2020 16:14:04 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 40EF6AC8F; Tue, 27 Oct 2020 16:12:10 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id C36486A44 for ; Tue, 27 Oct 2020 16:11:51 +0100 (CET) IronPort-SDR: CCAGGkXuuatv2kxSAGEIe+ZOld85xXqOWAsvh1D4QrfVUYu3KSNYb4ZllV8Ua1XcbIlKAihWvK 1YybbN86SAtw== X-IronPort-AV: E=McAfee;i="6000,8403,9786"; a="168197285" X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="168197285" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Oct 2020 08:11:51 -0700 IronPort-SDR: sLecS4GrsHEGUSrvrt2uTCRsnB2bUhMIlb76aKCRH1+Nhkqcp70z1XzxFb/oSGx9w0bLQWnh1T hmDmMnlrb7ew== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="322963856" Received: from silpixa00400072.ir.intel.com ([10.237.222.213]) by orsmga006.jf.intel.com with ESMTP; 27 Oct 2020 08:11:49 -0700 From: Vladimir Medvedkin To: dev@dpdk.org Cc: david.marchand@redhat.com, jerinj@marvell.com, mdr@ashroe.eu, thomas@monjalon.net, konstantin.ananyev@intel.com, bruce.richardson@intel.com, ciara.power@intel.com Date: Tue, 27 Oct 2020 15:11:29 +0000 Message-Id: <31bc5dd2234d19e9964df6a6ac68591d549ff103.1603811281.git.vladimir.medvedkin@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH v15 6/8] fib6: introduce AVX512 lookup X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add new lookup implementation for FIB6 trie algorithm using AVX512 instruction set Signed-off-by: Vladimir Medvedkin Acked-by: Konstantin Ananyev --- doc/guides/rel_notes/release_20_11.rst | 2 +- lib/librte_fib/meson.build | 17 +++ lib/librte_fib/rte_fib6.c | 2 +- lib/librte_fib/rte_fib6.h | 5 +- lib/librte_fib/trie.c | 36 +++++ lib/librte_fib/trie_avx512.c | 269 +++++++++++++++++++++++++++++++++ lib/librte_fib/trie_avx512.h | 20 +++ 7 files changed, 348 insertions(+), 3 deletions(-) create mode 100644 lib/librte_fib/trie_avx512.c create mode 100644 lib/librte_fib/trie_avx512.h diff --git a/doc/guides/rel_notes/release_20_11.rst b/doc/guides/rel_notes/release_20_11.rst index c430e8e..2bb3408 100644 --- a/doc/guides/rel_notes/release_20_11.rst +++ b/doc/guides/rel_notes/release_20_11.rst @@ -347,7 +347,7 @@ New Features * **Added AVX512 lookup implementation for FIB.** - Added a AVX512 lookup functions implementation into FIB library. + Added a AVX512 lookup functions implementation into FIB and FIB6 libraries. Removed Items ------------- diff --git a/lib/librte_fib/meson.build b/lib/librte_fib/meson.build index 0a8adef..5d93de9 100644 --- a/lib/librte_fib/meson.build +++ b/lib/librte_fib/meson.build @@ -30,6 +30,12 @@ if dpdk_conf.has('RTE_ARCH_X86_64') and binutils_ok.returncode() == 0 if acl_avx512_on == true cflags += ['-DCC_DIR24_8_AVX512_SUPPORT'] sources += files('dir24_8_avx512.c') + # TRIE AVX512 implementation uses avx512bw intrinsics along with + # avx512f and avx512dq + if cc.get_define('__AVX512BW__', args: machine_args) != '' + cflags += ['-DCC_TRIE_AVX512_SUPPORT'] + sources += files('trie_avx512.c') + endif elif cc.has_multi_arguments('-mavx512f', '-mavx512dq') dir24_8_avx512_tmp = static_library('dir24_8_avx512_tmp', 'dir24_8_avx512.c', @@ -37,5 +43,16 @@ if dpdk_conf.has('RTE_ARCH_X86_64') and binutils_ok.returncode() == 0 c_args: cflags + ['-mavx512f', '-mavx512dq']) objs += dir24_8_avx512_tmp.extract_objects('dir24_8_avx512.c') cflags += '-DCC_DIR24_8_AVX512_SUPPORT' + # TRIE AVX512 implementation uses avx512bw intrinsics along with + # avx512f and avx512dq + if cc.has_argument('-mavx512bw') + trie_avx512_tmp = static_library('trie_avx512_tmp', + 'trie_avx512.c', + dependencies: static_rte_eal, + c_args: cflags + ['-mavx512f', \ + '-mavx512dq', '-mavx512bw']) + objs += trie_avx512_tmp.extract_objects('trie_avx512.c') + cflags += '-DCC_TRIE_AVX512_SUPPORT' + endif endif endif diff --git a/lib/librte_fib/rte_fib6.c b/lib/librte_fib/rte_fib6.c index 45792bd..1f5af0f 100644 --- a/lib/librte_fib/rte_fib6.c +++ b/lib/librte_fib/rte_fib6.c @@ -107,7 +107,7 @@ init_dataplane(struct rte_fib6 *fib, __rte_unused int socket_id, fib->dp = trie_create(dp_name, socket_id, conf); if (fib->dp == NULL) return -rte_errno; - fib->lookup = trie_get_lookup_fn(fib->dp, RTE_FIB6_LOOKUP_TRIE_SCALAR); + fib->lookup = trie_get_lookup_fn(fib->dp, RTE_FIB6_LOOKUP_DEFAULT); fib->modify = trie_modify; return 0; default: diff --git a/lib/librte_fib/rte_fib6.h b/lib/librte_fib/rte_fib6.h index 8086f03..887de7b 100644 --- a/lib/librte_fib/rte_fib6.h +++ b/lib/librte_fib/rte_fib6.h @@ -62,7 +62,10 @@ enum rte_fib_trie_nh_sz { /** Type of lookup function implementation */ enum rte_fib6_lookup_type { - RTE_FIB6_LOOKUP_TRIE_SCALAR /**< Scalar lookup function implementation*/ + RTE_FIB6_LOOKUP_DEFAULT, + /**< Selects the best implementation based on the max simd bitwidth */ + RTE_FIB6_LOOKUP_TRIE_SCALAR, /**< Scalar lookup function implementation*/ + RTE_FIB6_LOOKUP_TRIE_VECTOR_AVX512 /**< Vector implementation using AVX512 */ }; /** FIB configuration structure */ diff --git a/lib/librte_fib/trie.c b/lib/librte_fib/trie.c index 08a03ab..5242c08 100644 --- a/lib/librte_fib/trie.c +++ b/lib/librte_fib/trie.c @@ -13,11 +13,18 @@ #include #include #include +#include #include #include #include "trie.h" +#ifdef CC_TRIE_AVX512_SUPPORT + +#include "trie_avx512.h" + +#endif /* CC_TRIE_AVX512_SUPPORT */ + #define TRIE_NAMESIZE 64 enum edge { @@ -40,11 +47,35 @@ get_scalar_fn(enum rte_fib_trie_nh_sz nh_sz) } } +static inline rte_fib6_lookup_fn_t +get_vector_fn(enum rte_fib_trie_nh_sz nh_sz) +{ +#ifdef CC_TRIE_AVX512_SUPPORT + if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) <= 0) || + (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) + return NULL; + switch (nh_sz) { + case RTE_FIB6_TRIE_2B: + return rte_trie_vec_lookup_bulk_2b; + case RTE_FIB6_TRIE_4B: + return rte_trie_vec_lookup_bulk_4b; + case RTE_FIB6_TRIE_8B: + return rte_trie_vec_lookup_bulk_8b; + default: + return NULL; + } +#else + RTE_SET_USED(nh_sz); +#endif + return NULL; +} + rte_fib6_lookup_fn_t trie_get_lookup_fn(void *p, enum rte_fib6_lookup_type type) { enum rte_fib_trie_nh_sz nh_sz; struct rte_trie_tbl *dp = p; + rte_fib6_lookup_fn_t ret_fn = NULL; if (dp == NULL) return NULL; @@ -54,6 +85,11 @@ trie_get_lookup_fn(void *p, enum rte_fib6_lookup_type type) switch (type) { case RTE_FIB6_LOOKUP_TRIE_SCALAR: return get_scalar_fn(nh_sz); + case RTE_FIB6_LOOKUP_TRIE_VECTOR_AVX512: + return get_vector_fn(nh_sz); + case RTE_FIB6_LOOKUP_DEFAULT: + ret_fn = get_vector_fn(nh_sz); + return (ret_fn) ? ret_fn : get_scalar_fn(nh_sz); default: return NULL; } diff --git a/lib/librte_fib/trie_avx512.c b/lib/librte_fib/trie_avx512.c new file mode 100644 index 0000000..b1c9e4e --- /dev/null +++ b/lib/librte_fib/trie_avx512.c @@ -0,0 +1,269 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#include +#include + +#include "trie.h" +#include "trie_avx512.h" + +static __rte_always_inline void +transpose_x16(uint8_t ips[16][RTE_FIB6_IPV6_ADDR_SIZE], + __m512i *first, __m512i *second, __m512i *third, __m512i *fourth) +{ + __m512i tmp1, tmp2, tmp3, tmp4; + __m512i tmp5, tmp6, tmp7, tmp8; + const __rte_x86_zmm_t perm_idxes = { + .u32 = { 0, 4, 8, 12, 2, 6, 10, 14, + 1, 5, 9, 13, 3, 7, 11, 15 + }, + }; + + /* load all ip addresses */ + tmp1 = _mm512_loadu_si512(&ips[0][0]); + tmp2 = _mm512_loadu_si512(&ips[4][0]); + tmp3 = _mm512_loadu_si512(&ips[8][0]); + tmp4 = _mm512_loadu_si512(&ips[12][0]); + + /* transpose 4 byte chunks of 16 ips */ + tmp5 = _mm512_unpacklo_epi32(tmp1, tmp2); + tmp7 = _mm512_unpackhi_epi32(tmp1, tmp2); + tmp6 = _mm512_unpacklo_epi32(tmp3, tmp4); + tmp8 = _mm512_unpackhi_epi32(tmp3, tmp4); + + tmp1 = _mm512_unpacklo_epi32(tmp5, tmp6); + tmp3 = _mm512_unpackhi_epi32(tmp5, tmp6); + tmp2 = _mm512_unpacklo_epi32(tmp7, tmp8); + tmp4 = _mm512_unpackhi_epi32(tmp7, tmp8); + + /* first 4-byte chunks of ips[] */ + *first = _mm512_permutexvar_epi32(perm_idxes.z, tmp1); + /* second 4-byte chunks of ips[] */ + *second = _mm512_permutexvar_epi32(perm_idxes.z, tmp3); + /* third 4-byte chunks of ips[] */ + *third = _mm512_permutexvar_epi32(perm_idxes.z, tmp2); + /* fourth 4-byte chunks of ips[] */ + *fourth = _mm512_permutexvar_epi32(perm_idxes.z, tmp4); +} + +static __rte_always_inline void +transpose_x8(uint8_t ips[8][RTE_FIB6_IPV6_ADDR_SIZE], + __m512i *first, __m512i *second) +{ + __m512i tmp1, tmp2, tmp3, tmp4; + const __rte_x86_zmm_t perm_idxes = { + .u64 = { 0, 2, 4, 6, 1, 3, 5, 7 + }, + }; + + tmp1 = _mm512_loadu_si512(&ips[0][0]); + tmp2 = _mm512_loadu_si512(&ips[4][0]); + + tmp3 = _mm512_unpacklo_epi64(tmp1, tmp2); + *first = _mm512_permutexvar_epi64(perm_idxes.z, tmp3); + tmp4 = _mm512_unpackhi_epi64(tmp1, tmp2); + *second = _mm512_permutexvar_epi64(perm_idxes.z, tmp4); +} + +static __rte_always_inline void +trie_vec_lookup_x16(void *p, uint8_t ips[16][RTE_FIB6_IPV6_ADDR_SIZE], + uint64_t *next_hops, int size) +{ + struct rte_trie_tbl *dp = (struct rte_trie_tbl *)p; + const __m512i zero = _mm512_set1_epi32(0); + const __m512i lsb = _mm512_set1_epi32(1); + const __m512i two_lsb = _mm512_set1_epi32(3); + __m512i first, second, third, fourth; /*< IPv6 four byte chunks */ + __m512i idxes, res, shuf_idxes; + __m512i tmp, tmp2, bytes, byte_chunk, base_idxes; + /* used to mask gather values if size is 2 (16 bit next hops) */ + const __m512i res_msk = _mm512_set1_epi32(UINT16_MAX); + const __rte_x86_zmm_t bswap = { + .u8 = { 2, 1, 0, 255, 6, 5, 4, 255, + 10, 9, 8, 255, 14, 13, 12, 255, + 2, 1, 0, 255, 6, 5, 4, 255, + 10, 9, 8, 255, 14, 13, 12, 255, + 2, 1, 0, 255, 6, 5, 4, 255, + 10, 9, 8, 255, 14, 13, 12, 255, + 2, 1, 0, 255, 6, 5, 4, 255, + 10, 9, 8, 255, 14, 13, 12, 255 + }, + }; + const __mmask64 k = 0x1111111111111111; + int i = 3; + __mmask16 msk_ext, new_msk; + __mmask16 exp_msk = 0x5555; + + transpose_x16(ips, &first, &second, &third, &fourth); + + /* get_tbl24_idx() for every 4 byte chunk */ + idxes = _mm512_shuffle_epi8(first, bswap.z); + + /** + * lookup in tbl24 + * Put it inside branch to make compiller happy with -O0 + */ + if (size == sizeof(uint16_t)) { + res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 2); + res = _mm512_and_epi32(res, res_msk); + } else + res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 4); + + + /* get extended entries indexes */ + msk_ext = _mm512_test_epi32_mask(res, lsb); + + tmp = _mm512_srli_epi32(res, 1); + + /* idxes to retrieve bytes */ + shuf_idxes = _mm512_setr_epi32(3, 7, 11, 15, + 19, 23, 27, 31, + 35, 39, 43, 47, + 51, 55, 59, 63); + + base_idxes = _mm512_setr_epi32(0, 4, 8, 12, + 16, 20, 24, 28, + 32, 36, 40, 44, + 48, 52, 56, 60); + + /* traverse down the trie */ + while (msk_ext) { + idxes = _mm512_maskz_slli_epi32(msk_ext, tmp, 8); + byte_chunk = (i < 8) ? + ((i >= 4) ? second : first) : + ((i >= 12) ? fourth : third); + bytes = _mm512_maskz_shuffle_epi8(k, byte_chunk, shuf_idxes); + idxes = _mm512_maskz_add_epi32(msk_ext, idxes, bytes); + if (size == sizeof(uint16_t)) { + tmp = _mm512_mask_i32gather_epi32(zero, msk_ext, + idxes, (const int *)dp->tbl8, 2); + tmp = _mm512_and_epi32(tmp, res_msk); + } else + tmp = _mm512_mask_i32gather_epi32(zero, msk_ext, + idxes, (const int *)dp->tbl8, 4); + new_msk = _mm512_test_epi32_mask(tmp, lsb); + res = _mm512_mask_blend_epi32(msk_ext ^ new_msk, res, tmp); + tmp = _mm512_srli_epi32(tmp, 1); + msk_ext = new_msk; + + shuf_idxes = _mm512_maskz_add_epi8(k, shuf_idxes, lsb); + shuf_idxes = _mm512_and_epi32(shuf_idxes, two_lsb); + shuf_idxes = _mm512_maskz_add_epi8(k, shuf_idxes, base_idxes); + i++; + } + + res = _mm512_srli_epi32(res, 1); + tmp = _mm512_maskz_expand_epi32(exp_msk, res); + __m256i tmp256; + tmp256 = _mm512_extracti32x8_epi32(res, 1); + tmp2 = _mm512_maskz_expand_epi32(exp_msk, + _mm512_castsi256_si512(tmp256)); + _mm512_storeu_si512(next_hops, tmp); + _mm512_storeu_si512(next_hops + 8, tmp2); +} + +static void +trie_vec_lookup_x8_8b(void *p, uint8_t ips[8][RTE_FIB6_IPV6_ADDR_SIZE], + uint64_t *next_hops) +{ + struct rte_trie_tbl *dp = (struct rte_trie_tbl *)p; + const __m512i zero = _mm512_set1_epi32(0); + const __m512i lsb = _mm512_set1_epi32(1); + const __m512i three_lsb = _mm512_set1_epi32(7); + __m512i first, second; /*< IPv6 eight byte chunks */ + __m512i idxes, res, shuf_idxes; + __m512i tmp, bytes, byte_chunk, base_idxes; + const __rte_x86_zmm_t bswap = { + .u8 = { 2, 1, 0, 255, 255, 255, 255, 255, + 10, 9, 8, 255, 255, 255, 255, 255, + 2, 1, 0, 255, 255, 255, 255, 255, + 10, 9, 8, 255, 255, 255, 255, 255, + 2, 1, 0, 255, 255, 255, 255, 255, + 10, 9, 8, 255, 255, 255, 255, 255, + 2, 1, 0, 255, 255, 255, 255, 255, + 10, 9, 8, 255, 255, 255, 255, 255 + }, + }; + const __mmask64 k = 0x101010101010101; + int i = 3; + __mmask8 msk_ext, new_msk; + + transpose_x8(ips, &first, &second); + + /* get_tbl24_idx() for every 4 byte chunk */ + idxes = _mm512_shuffle_epi8(first, bswap.z); + + /* lookup in tbl24 */ + res = _mm512_i64gather_epi64(idxes, (const void *)dp->tbl24, 8); + /* get extended entries indexes */ + msk_ext = _mm512_test_epi64_mask(res, lsb); + + tmp = _mm512_srli_epi64(res, 1); + + /* idxes to retrieve bytes */ + shuf_idxes = _mm512_setr_epi64(3, 11, 19, 27, 35, 43, 51, 59); + + base_idxes = _mm512_setr_epi64(0, 8, 16, 24, 32, 40, 48, 56); + + /* traverse down the trie */ + while (msk_ext) { + idxes = _mm512_maskz_slli_epi64(msk_ext, tmp, 8); + byte_chunk = (i < 8) ? first : second; + bytes = _mm512_maskz_shuffle_epi8(k, byte_chunk, shuf_idxes); + idxes = _mm512_maskz_add_epi64(msk_ext, idxes, bytes); + tmp = _mm512_mask_i64gather_epi64(zero, msk_ext, + idxes, (const void *)dp->tbl8, 8); + new_msk = _mm512_test_epi64_mask(tmp, lsb); + res = _mm512_mask_blend_epi64(msk_ext ^ new_msk, res, tmp); + tmp = _mm512_srli_epi64(tmp, 1); + msk_ext = new_msk; + + shuf_idxes = _mm512_maskz_add_epi8(k, shuf_idxes, lsb); + shuf_idxes = _mm512_and_epi64(shuf_idxes, three_lsb); + shuf_idxes = _mm512_maskz_add_epi8(k, shuf_idxes, base_idxes); + i++; + } + + res = _mm512_srli_epi64(res, 1); + _mm512_storeu_si512(next_hops, res); +} + +void +rte_trie_vec_lookup_bulk_2b(void *p, uint8_t ips[][RTE_FIB6_IPV6_ADDR_SIZE], + uint64_t *next_hops, const unsigned int n) +{ + uint32_t i; + for (i = 0; i < (n / 16); i++) { + trie_vec_lookup_x16(p, (uint8_t (*)[16])&ips[i * 16][0], + next_hops + i * 16, sizeof(uint16_t)); + } + rte_trie_lookup_bulk_2b(p, (uint8_t (*)[16])&ips[i * 16][0], + next_hops + i * 16, n - i * 16); +} + +void +rte_trie_vec_lookup_bulk_4b(void *p, uint8_t ips[][RTE_FIB6_IPV6_ADDR_SIZE], + uint64_t *next_hops, const unsigned int n) +{ + uint32_t i; + for (i = 0; i < (n / 16); i++) { + trie_vec_lookup_x16(p, (uint8_t (*)[16])&ips[i * 16][0], + next_hops + i * 16, sizeof(uint32_t)); + } + rte_trie_lookup_bulk_4b(p, (uint8_t (*)[16])&ips[i * 16][0], + next_hops + i * 16, n - i * 16); +} + +void +rte_trie_vec_lookup_bulk_8b(void *p, uint8_t ips[][RTE_FIB6_IPV6_ADDR_SIZE], + uint64_t *next_hops, const unsigned int n) +{ + uint32_t i; + for (i = 0; i < (n / 8); i++) { + trie_vec_lookup_x8_8b(p, (uint8_t (*)[16])&ips[i * 8][0], + next_hops + i * 8); + } + rte_trie_lookup_bulk_8b(p, (uint8_t (*)[16])&ips[i * 8][0], + next_hops + i * 8, n - i * 8); +} diff --git a/lib/librte_fib/trie_avx512.h b/lib/librte_fib/trie_avx512.h new file mode 100644 index 0000000..ef8c7f0 --- /dev/null +++ b/lib/librte_fib/trie_avx512.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ + +#ifndef _TRIE_AVX512_H_ +#define _TRIE_AVX512_H_ + +void +rte_trie_vec_lookup_bulk_2b(void *p, uint8_t ips[][RTE_FIB6_IPV6_ADDR_SIZE], + uint64_t *next_hops, const unsigned int n); + +void +rte_trie_vec_lookup_bulk_4b(void *p, uint8_t ips[][RTE_FIB6_IPV6_ADDR_SIZE], + uint64_t *next_hops, const unsigned int n); + +void +rte_trie_vec_lookup_bulk_8b(void *p, uint8_t ips[][RTE_FIB6_IPV6_ADDR_SIZE], + uint64_t *next_hops, const unsigned int n); + +#endif /* _TRIE_AVX512_H_ */ From patchwork Tue Oct 27 15:11:30 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vladimir Medvedkin X-Patchwork-Id: 82349 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 05B53A04B5; Tue, 27 Oct 2020 16:14:29 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C120EBBB4; Tue, 27 Oct 2020 16:12:11 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 4ADE572DE for ; Tue, 27 Oct 2020 16:11:55 +0100 (CET) IronPort-SDR: 3uCNiN/WI03okNsxj8jgNZeiCrn/uG+MOlOavj+KpNrrkCC/HNC9KqzeVtOLxvjUpS8PCZsST8 jAjiLqt30DvA== X-IronPort-AV: E=McAfee;i="6000,8403,9786"; a="168197290" X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="168197290" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Oct 2020 08:11:53 -0700 IronPort-SDR: zzQaZmSHTf9wjNUxWplska7pw7TTswrm3jQuRMXrRL1oSSbBc4qJtm7Jx4x00OFLpZrTJjmHop 3O4ThokB5KnA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="322963861" Received: from silpixa00400072.ir.intel.com ([10.237.222.213]) by orsmga006.jf.intel.com with ESMTP; 27 Oct 2020 08:11:51 -0700 From: Vladimir Medvedkin To: dev@dpdk.org Cc: david.marchand@redhat.com, jerinj@marvell.com, mdr@ashroe.eu, thomas@monjalon.net, konstantin.ananyev@intel.com, bruce.richardson@intel.com, ciara.power@intel.com Date: Tue, 27 Oct 2020 15:11:30 +0000 Message-Id: <3f7ebc36e813627639b7353447d74d59c4d8aa0d.1603811281.git.vladimir.medvedkin@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH v15 7/8] app/testfib: add support for different lookup functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Added -v option to switch between different lookup implementations to measure their performance and correctness. Signed-off-by: Vladimir Medvedkin --- app/test-fib/main.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 62 insertions(+), 3 deletions(-) diff --git a/app/test-fib/main.c b/app/test-fib/main.c index 9cf01b1..9e6a4f2 100644 --- a/app/test-fib/main.c +++ b/app/test-fib/main.c @@ -99,6 +99,7 @@ static struct { uint8_t ent_sz; uint8_t rnd_lookup_ips_ratio; uint8_t print_fract; + uint8_t lookup_fn; } config = { .routes_file = NULL, .lookup_ips_file = NULL, @@ -110,7 +111,8 @@ static struct { .tbl8 = DEFAULT_LPM_TBL8, .ent_sz = 4, .rnd_lookup_ips_ratio = 0, - .print_fract = 10 + .print_fract = 10, + .lookup_fn = 0 }; struct rt_rule_4 { @@ -638,7 +640,11 @@ print_usage(void) "1/2/4/8 (default 4)>]\n" "[-g ]\n" "[-w ]\n" - "[-u ]\n", + "[-u ]\n" + "[-v ]\n", config.prgname); } @@ -681,7 +687,7 @@ parse_opts(int argc, char **argv) int opt; char *endptr; - while ((opt = getopt(argc, argv, "f:t:n:d:l:r:c6ab:e:g:w:u:s")) != + while ((opt = getopt(argc, argv, "f:t:n:d:l:r:c6ab:e:g:w:u:sv:")) != -1) { switch (opt) { case 'f': @@ -769,6 +775,23 @@ parse_opts(int argc, char **argv) rte_exit(-EINVAL, "Invalid option -g\n"); } break; + case 'v': + if ((strcmp(optarg, "s1") == 0) || + (strcmp(optarg, "s") == 0)) { + config.lookup_fn = 1; + break; + } else if (strcmp(optarg, "v") == 0) { + config.lookup_fn = 2; + break; + } else if (strcmp(optarg, "s2") == 0) { + config.lookup_fn = 3; + break; + } else if (strcmp(optarg, "s3") == 0) { + config.lookup_fn = 4; + break; + } + print_usage(); + rte_exit(-EINVAL, "Invalid option -v %s\n", optarg); default: print_usage(); rte_exit(-EINVAL, "Invalid options\n"); @@ -846,6 +869,27 @@ run_v4(void) return -rte_errno; } + if (config.lookup_fn != 0) { + if (config.lookup_fn == 1) + ret = rte_fib_select_lookup(fib, + RTE_FIB_LOOKUP_DIR24_8_SCALAR_MACRO); + else if (config.lookup_fn == 2) + ret = rte_fib_select_lookup(fib, + RTE_FIB_LOOKUP_DIR24_8_VECTOR_AVX512); + else if (config.lookup_fn == 3) + ret = rte_fib_select_lookup(fib, + RTE_FIB_LOOKUP_DIR24_8_SCALAR_INLINE); + else if (config.lookup_fn == 4) + ret = rte_fib_select_lookup(fib, + RTE_FIB_LOOKUP_DIR24_8_SCALAR_UNI); + else + ret = -EINVAL; + if (ret != 0) { + printf("Can not init lookup function\n"); + return ret; + } + } + for (k = config.print_fract, i = 0; k > 0; k--) { start = rte_rdtsc_precise(); for (j = 0; j < (config.nb_routes - i) / k; j++) { @@ -1025,6 +1069,21 @@ run_v6(void) return -rte_errno; } + if (config.lookup_fn != 0) { + if (config.lookup_fn == 1) + ret = rte_fib6_select_lookup(fib, + RTE_FIB6_LOOKUP_TRIE_SCALAR); + else if (config.lookup_fn == 2) + ret = rte_fib6_select_lookup(fib, + RTE_FIB6_LOOKUP_TRIE_VECTOR_AVX512); + else + ret = -EINVAL; + if (ret != 0) { + printf("Can not init lookup function\n"); + return ret; + } + } + for (k = config.print_fract, i = 0; k > 0; k--) { start = rte_rdtsc_precise(); for (j = 0; j < (config.nb_routes - i) / k; j++) { From patchwork Tue Oct 27 15:11:31 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vladimir Medvedkin X-Patchwork-Id: 82350 X-Patchwork-Delegate: david.marchand@redhat.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D9907A04B5; Tue, 27 Oct 2020 16:14:58 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 564F1BC8C; Tue, 27 Oct 2020 16:12:13 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 0A10D6883 for ; Tue, 27 Oct 2020 16:11:55 +0100 (CET) IronPort-SDR: zBOK9VcaQnkRbbwxqvYX7wwDJIgigUylFoaRhDvid0H594Ej0BiGBxCXhG4B6kqjFLK/vuwXNX xi71RYCVxhQw== X-IronPort-AV: E=McAfee;i="6000,8403,9786"; a="168197299" X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="168197299" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Oct 2020 08:11:55 -0700 IronPort-SDR: ySHmNN9uqtbHoOlIcjKNFiRgZivopf4h2235WLYVCWZCKuTvWpYw414IJRS2TwaPiWmu4qoWbJ azQK5feCKJjw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,424,1596524400"; d="scan'208";a="322963874" Received: from silpixa00400072.ir.intel.com ([10.237.222.213]) by orsmga006.jf.intel.com with ESMTP; 27 Oct 2020 08:11:53 -0700 From: Vladimir Medvedkin To: dev@dpdk.org Cc: david.marchand@redhat.com, jerinj@marvell.com, mdr@ashroe.eu, thomas@monjalon.net, konstantin.ananyev@intel.com, bruce.richardson@intel.com, ciara.power@intel.com Date: Tue, 27 Oct 2020 15:11:31 +0000 Message-Id: X-Mailer: git-send-email 2.7.4 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH v15 8/8] fib: remove unnecessary type of fib X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" FIB type RTE_FIB_TYPE_MAX is used only for sanity checks, remove it to prevent applications start using it. The same is for FIB6's RTE_FIB6_TYPE_MAX. Signed-off-by: Vladimir Medvedkin --- app/test/test_fib.c | 2 +- app/test/test_fib6.c | 2 +- lib/librte_fib/rte_fib.c | 2 +- lib/librte_fib/rte_fib.h | 3 +-- lib/librte_fib/rte_fib6.c | 2 +- lib/librte_fib/rte_fib6.h | 3 +-- 6 files changed, 6 insertions(+), 8 deletions(-) diff --git a/app/test/test_fib.c b/app/test/test_fib.c index ca80a5d..e46b993 100644 --- a/app/test/test_fib.c +++ b/app/test/test_fib.c @@ -61,7 +61,7 @@ test_create_invalid(void) "Call succeeded with invalid parameters\n"); config.max_routes = MAX_ROUTES; - config.type = RTE_FIB_TYPE_MAX; + config.type = RTE_FIB_DIR24_8 + 1; fib = rte_fib_create(__func__, SOCKET_ID_ANY, &config); RTE_TEST_ASSERT(fib == NULL, "Call succeeded with invalid parameters\n"); diff --git a/app/test/test_fib6.c b/app/test/test_fib6.c index af589fe..74abfc7 100644 --- a/app/test/test_fib6.c +++ b/app/test/test_fib6.c @@ -63,7 +63,7 @@ test_create_invalid(void) "Call succeeded with invalid parameters\n"); config.max_routes = MAX_ROUTES; - config.type = RTE_FIB6_TYPE_MAX; + config.type = RTE_FIB6_TRIE + 1; fib = rte_fib6_create(__func__, SOCKET_ID_ANY, &config); RTE_TEST_ASSERT(fib == NULL, "Call succeeded with invalid parameters\n"); diff --git a/lib/librte_fib/rte_fib.c b/lib/librte_fib/rte_fib.c index 398dbf9..b354d4b 100644 --- a/lib/librte_fib/rte_fib.c +++ b/lib/librte_fib/rte_fib.c @@ -159,7 +159,7 @@ rte_fib_create(const char *name, int socket_id, struct rte_fib_conf *conf) /* Check user arguments. */ if ((name == NULL) || (conf == NULL) || (conf->max_routes < 0) || - (conf->type >= RTE_FIB_TYPE_MAX)) { + (conf->type > RTE_FIB_DIR24_8)) { rte_errno = EINVAL; return NULL; } diff --git a/lib/librte_fib/rte_fib.h b/lib/librte_fib/rte_fib.h index 8688c93..9a49313 100644 --- a/lib/librte_fib/rte_fib.h +++ b/lib/librte_fib/rte_fib.h @@ -34,8 +34,7 @@ struct rte_rib; /** Type of FIB struct */ enum rte_fib_type { RTE_FIB_DUMMY, /**< RIB tree based FIB */ - RTE_FIB_DIR24_8, /**< DIR24_8 based FIB */ - RTE_FIB_TYPE_MAX + RTE_FIB_DIR24_8 /**< DIR24_8 based FIB */ }; /** Modify FIB function */ diff --git a/lib/librte_fib/rte_fib6.c b/lib/librte_fib/rte_fib6.c index 1f5af0f..44cc0c9 100644 --- a/lib/librte_fib/rte_fib6.c +++ b/lib/librte_fib/rte_fib6.c @@ -160,7 +160,7 @@ rte_fib6_create(const char *name, int socket_id, struct rte_fib6_conf *conf) /* Check user arguments. */ if ((name == NULL) || (conf == NULL) || (conf->max_routes < 0) || - (conf->type >= RTE_FIB6_TYPE_MAX)) { + (conf->type > RTE_FIB6_TRIE)) { rte_errno = EINVAL; return NULL; } diff --git a/lib/librte_fib/rte_fib6.h b/lib/librte_fib/rte_fib6.h index 887de7b..adb5005 100644 --- a/lib/librte_fib/rte_fib6.h +++ b/lib/librte_fib/rte_fib6.h @@ -35,8 +35,7 @@ struct rte_rib6; /** Type of FIB struct */ enum rte_fib6_type { RTE_FIB6_DUMMY, /**< RIB6 tree based FIB */ - RTE_FIB6_TRIE, /**< TRIE based fib */ - RTE_FIB6_TYPE_MAX + RTE_FIB6_TRIE /**< TRIE based fib */ }; /** Modify FIB function */