From patchwork Wed Jan 29 14:06:08 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vladimir Medvedkin X-Patchwork-Id: 65342 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id C6650A052F; Wed, 29 Jan 2020 15:07:14 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 569D81C035; Wed, 29 Jan 2020 15:06:30 +0100 (CET) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 290891C00D for ; Wed, 29 Jan 2020 15:06:21 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 29 Jan 2020 06:06:20 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,378,1574150400"; d="scan'208";a="217955248" Received: from silpixa00400072.ir.intel.com ([10.237.222.213]) by orsmga007.jf.intel.com with ESMTP; 29 Jan 2020 06:06:19 -0800 From: Vladimir Medvedkin To: dev@dpdk.org Cc: konstantin.ananyev@intel.com, akhil.goyal@nxp.com Date: Wed, 29 Jan 2020 14:06:08 +0000 Message-Id: <1580306768-110555-7-git-send-email-vladimir.medvedkin@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1580306768-110555-1-git-send-email-vladimir.medvedkin@intel.com> References: <1580306768-110555-1-git-send-email-vladimir.medvedkin@intel.com> In-Reply-To: <1579012036-326214-1-git-send-email-vladimir.medvedkin@intel.com> References: <1579012036-326214-1-git-send-email-vladimir.medvedkin@intel.com> Subject: [dpdk-dev] [PATCH v5 6/6] examples/ipsec-secgw: add SAD cache X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Introduce SAD cache. Stores the most recent SA in a per lcore cache. Cache represents flat array containing SA's indexed by SPI. Signed-off-by: Vladimir Medvedkin Acked-by: Konstantin Ananyev --- examples/ipsec-secgw/ipsec-secgw.c | 30 +++++++++- examples/ipsec-secgw/ipsec.h | 1 + examples/ipsec-secgw/sa.c | 32 +---------- examples/ipsec-secgw/sad.c | 40 ++++++++++++++ examples/ipsec-secgw/sad.h | 109 ++++++++++++++++++++++++++++++++++--- 5 files changed, 171 insertions(+), 41 deletions(-) diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 3e5f82e..32ecd26 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -46,6 +46,7 @@ #include "ipsec.h" #include "parser.h" +#include "sad.h" #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 @@ -192,7 +193,10 @@ static uint32_t mtu_size = RTE_ETHER_MTU; static uint64_t frag_ttl_ns = MAX_FRAG_TTL_NS; /* application wide librte_ipsec/SA parameters */ -struct app_sa_prm app_sa_prm = {.enable = 0}; +struct app_sa_prm app_sa_prm = { + .enable = 0, + .cache_sz = SA_CACHE_SZ + }; static const char *cfgfile; struct lcore_rx_queue { @@ -1102,7 +1106,7 @@ main_loop(__attribute__((unused)) void *dummy) uint16_t portid; uint8_t queueid; struct lcore_conf *qconf; - int32_t socket_id; + int32_t rc, socket_id; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; struct lcore_rx_queue *rxql; @@ -1132,6 +1136,14 @@ main_loop(__attribute__((unused)) void *dummy) qconf->frag.pool_dir = socket_ctx[socket_id].mbuf_pool; qconf->frag.pool_indir = socket_ctx[socket_id].mbuf_pool_indir; + rc = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz); + if (rc != 0) { + RTE_LOG(ERR, IPSEC, + "SAD cache init on lcore %u, failed with code: %d\n", + lcore_id, rc); + return rc; + } + if (qconf->nb_rx_queue == 0) { RTE_LOG(DEBUG, IPSEC, "lcore %u has nothing to do\n", lcore_id); @@ -1271,6 +1283,7 @@ print_usage(const char *prgname) " [-w REPLAY_WINDOW_SIZE]" " [-e]" " [-a]" + " [-c]" " -f CONFIG_FILE" " --config (port,queue,lcore)[,(port,queue,lcore)]" " [--single-sa SAIDX]" @@ -1290,6 +1303,8 @@ print_usage(const char *prgname) " size for each SA\n" " -e enables ESN\n" " -a enables SA SQN atomic behaviour\n" + " -c specifies inbound SAD cache size,\n" + " zero value disables the cache (default value: 128)\n" " -f CONFIG_FILE: Configuration file\n" " --config (port,queue,lcore): Rx queue configuration\n" " --single-sa SAIDX: Use single SA index for outbound traffic,\n" @@ -1442,7 +1457,7 @@ parse_args(int32_t argc, char **argv) argvopt = argv; - while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:", + while ((opt = getopt_long(argc, argvopt, "aelp:Pu:f:j:w:c:", lgopts, &option_index)) != EOF) { switch (opt) { @@ -1501,6 +1516,15 @@ parse_args(int32_t argc, char **argv) app_sa_prm.enable = 1; app_sa_prm.flags |= RTE_IPSEC_SAFLAG_SQN_ATOM; break; + case 'c': + ret = parse_decimal(optarg); + if (ret < 0) { + printf("Invalid SA cache size: %s\n", optarg); + print_usage(prgname); + return -1; + } + app_sa_prm.cache_sz = ret; + break; case CMD_LINE_OPT_CONFIG_NUM: ret = parse_config(optarg); if (ret) { diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index 3c77232..4f2fd61 100644 --- a/examples/ipsec-secgw/ipsec.h +++ b/examples/ipsec-secgw/ipsec.h @@ -81,6 +81,7 @@ struct app_sa_prm { uint32_t enable; /* use librte_ipsec API for ipsec pkt processing */ uint32_t window_size; /* replay window size */ uint32_t enable_esn; /* enable/disable ESN support */ + uint32_t cache_sz; /* per lcore SA cache size */ uint64_t flags; /* rte_ipsec_sa_prm.flags */ }; diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index b3b83e3..099a11b 100644 --- a/examples/ipsec-secgw/sa.c +++ b/examples/ipsec-secgw/sa.c @@ -839,7 +839,7 @@ sa_create(const char *name, int32_t socket_id, uint32_t nb_sa) return NULL; } - sa_ctx = rte_malloc(NULL, sizeof(struct sa_ctx) + + sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) + sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE); if (sa_ctx == NULL) { @@ -1451,9 +1451,6 @@ inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[], void *sa_arr[], uint16_t nb_pkts) { uint32_t i; - struct ip *ip; - uint32_t *src4_addr; - uint8_t *src6_addr; void *result_sa; struct ipsec_sa *sa; @@ -1479,32 +1476,7 @@ inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[], intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG; result_sa = (void *)intsa; } - - ip = rte_pktmbuf_mtod(pkts[i], struct ip *); - switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { - case IP4_TUNNEL: - src4_addr = RTE_PTR_ADD(ip, - offsetof(struct ip, ip_src)); - if ((ip->ip_v == IPVERSION) && - (sa->src.ip.ip4 == *src4_addr) && - (sa->dst.ip.ip4 == *(src4_addr + 1))) - sa_arr[i] = result_sa; - else - sa_arr[i] = NULL; - break; - case IP6_TUNNEL: - src6_addr = RTE_PTR_ADD(ip, - offsetof(struct ip6_hdr, ip6_src)); - if ((ip->ip_v == IP6_VERSION) && - !memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) && - !memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16)) - sa_arr[i] = result_sa; - else - sa_arr[i] = NULL; - break; - case TRANSPORT: - sa_arr[i] = result_sa; - } + sa_arr[i] = result_sa; } } diff --git a/examples/ipsec-secgw/sad.c b/examples/ipsec-secgw/sad.c index fd31101..5b2c0e6 100644 --- a/examples/ipsec-secgw/sad.c +++ b/examples/ipsec-secgw/sad.c @@ -3,10 +3,17 @@ */ #include +#include #include "ipsec.h" #include "sad.h" +RTE_DEFINE_PER_LCORE(struct ipsec_sad_cache, sad_cache) = { + .v4 = NULL, + .v6 = NULL, + .mask = 0, +}; + int ipsec_sad_add(struct ipsec_sad *sad, struct ipsec_sa *sa) { @@ -65,6 +72,39 @@ ipsec_sad_add(struct ipsec_sad *sad, struct ipsec_sa *sa) return 0; } +/* + * Init per lcore SAD cache. + * Must be called by every processing lcore. + */ +int +ipsec_sad_lcore_cache_init(uint32_t nb_cache_ent) +{ + uint32_t cache_elem; + size_t cache_mem_sz; + struct ipsec_sad_cache *cache; + + cache = &RTE_PER_LCORE(sad_cache); + + cache_elem = rte_align32pow2(nb_cache_ent); + cache_mem_sz = sizeof(struct ipsec_sa *) * cache_elem; + + if (cache_mem_sz != 0) { + cache->v4 = rte_zmalloc_socket(NULL, cache_mem_sz, + RTE_CACHE_LINE_SIZE, rte_socket_id()); + if (cache->v4 == NULL) + return -rte_errno; + + cache->v6 = rte_zmalloc_socket(NULL, cache_mem_sz, + RTE_CACHE_LINE_SIZE, rte_socket_id()); + if (cache->v6 == NULL) + return -rte_errno; + + cache->mask = cache_elem - 1; + } + + return 0; +} + int ipsec_sad_create(const char *name, struct ipsec_sad *sad, int socket_id, struct ipsec_sa_cnt *sa_cnt) diff --git a/examples/ipsec-secgw/sad.h b/examples/ipsec-secgw/sad.h index 29ed0f8..81a6ff2 100644 --- a/examples/ipsec-secgw/sad.h +++ b/examples/ipsec-secgw/sad.h @@ -7,6 +7,17 @@ #include +#define SA_CACHE_SZ 128 +#define SPI2IDX(spi, mask) ((spi) & (mask)) + +struct ipsec_sad_cache { + struct ipsec_sa **v4; + struct ipsec_sa **v6; + uint32_t mask; +}; + +RTE_DECLARE_PER_LCORE(struct ipsec_sad_cache, sad_cache); + struct ipsec_sad { struct rte_ipsec_sad *sad_v4; struct rte_ipsec_sad *sad_v6; @@ -17,8 +28,42 @@ int ipsec_sad_create(const char *name, struct ipsec_sad *sad, int ipsec_sad_add(struct ipsec_sad *sad, struct ipsec_sa *sa); +int ipsec_sad_lcore_cache_init(uint32_t nb_cache_ent); + +static inline int +cmp_sa_key(struct ipsec_sa *sa, int is_v4, struct rte_ipv4_hdr *ipv4, + struct rte_ipv6_hdr *ipv6) +{ + int sa_type = WITHOUT_TRANSPORT_VERSION(sa->flags); + if ((sa_type == TRANSPORT) || + /* IPv4 check */ + (is_v4 && (sa_type == IP4_TUNNEL) && + (sa->src.ip.ip4 == ipv4->src_addr) && + (sa->dst.ip.ip4 == ipv4->dst_addr)) || + /* IPv6 check */ + (!is_v4 && (sa_type == IP6_TUNNEL) && + (!memcmp(sa->src.ip.ip6.ip6, ipv6->src_addr, 16)) && + (!memcmp(sa->dst.ip.ip6.ip6, ipv6->dst_addr, 16)))) + return 1; + + return 0; +} + static inline void -sad_lookup(const struct ipsec_sad *sad, struct rte_mbuf *pkts[], +sa_cache_update(struct ipsec_sa **sa_cache, struct ipsec_sa *sa, uint32_t mask) +{ + uint32_t cache_idx; + + /* SAD cache is disabled */ + if (mask == 0) + return; + + cache_idx = SPI2IDX(sa->spi, mask); + sa_cache[cache_idx] = sa; +} + +static inline void +sad_lookup(struct ipsec_sad *sad, struct rte_mbuf *pkts[], void *sa[], uint16_t nb_pkts) { uint32_t i; @@ -34,13 +79,39 @@ sad_lookup(const struct ipsec_sad *sad, struct rte_mbuf *pkts[], const union rte_ipsec_sad_key *keys_v6[nb_pkts]; void *v4_res[nb_pkts]; void *v6_res[nb_pkts]; + uint32_t spi, cache_idx; + struct ipsec_sad_cache *cache; + struct ipsec_sa *cached_sa; + int is_ipv4; + + cache = &RTE_PER_LCORE(sad_cache); /* split received packets by address family into two arrays */ for (i = 0; i < nb_pkts; i++) { ipv4 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv4_hdr *); + ipv6 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv6_hdr *); esp = rte_pktmbuf_mtod_offset(pkts[i], struct rte_esp_hdr *, pkts[i]->l3_len); - if ((ipv4->version_ihl >> 4) == IPVERSION) { + + is_ipv4 = ((ipv4->version_ihl >> 4) == IPVERSION); + spi = rte_be_to_cpu_32(esp->spi); + cache_idx = SPI2IDX(spi, cache->mask); + + if (is_ipv4) { + cached_sa = (cache->mask != 0) ? + cache->v4[cache_idx] : NULL; + /* check SAD cache entry */ + if ((cached_sa != NULL) && (cached_sa->spi == spi)) { + if (cmp_sa_key(cached_sa, 1, ipv4, ipv6)) { + /* cache hit */ + sa[i] = cached_sa; + continue; + } + } + /* + * cache miss + * preparing sad key to proceed with sad lookup + */ v4[nb_v4].spi = esp->spi; v4[nb_v4].dip = ipv4->dst_addr; v4[nb_v4].sip = ipv4->src_addr; @@ -48,7 +119,14 @@ sad_lookup(const struct ipsec_sad *sad, struct rte_mbuf *pkts[], &v4[nb_v4]; v4_idxes[nb_v4++] = i; } else { - ipv6 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv6_hdr *); + cached_sa = (cache->mask != 0) ? + cache->v6[cache_idx] : NULL; + if ((cached_sa != NULL) && (cached_sa->spi == spi)) { + if (cmp_sa_key(cached_sa, 0, ipv4, ipv6)) { + sa[i] = cached_sa; + continue; + } + } v6[nb_v6].spi = esp->spi; memcpy(v6[nb_v6].dip, ipv6->dst_addr, sizeof(ipv6->dst_addr)); @@ -65,11 +143,26 @@ sad_lookup(const struct ipsec_sad *sad, struct rte_mbuf *pkts[], if (nb_v6 != 0) rte_ipsec_sad_lookup(sad->sad_v6, keys_v6, v6_res, nb_v6); - for (i = 0; i < nb_v4; i++) - sa[v4_idxes[i]] = v4_res[i]; - - for (i = 0; i < nb_v6; i++) - sa[v6_idxes[i]] = v6_res[i]; + for (i = 0; i < nb_v4; i++) { + ipv4 = rte_pktmbuf_mtod(pkts[v4_idxes[i]], + struct rte_ipv4_hdr *); + if (cmp_sa_key(v4_res[i], 1, ipv4, NULL)) { + sa[v4_idxes[i]] = v4_res[i]; + sa_cache_update(cache->v4, (struct ipsec_sa *)v4_res[i], + cache->mask); + } else + sa[v4_idxes[i]] = NULL; + } + for (i = 0; i < nb_v6; i++) { + ipv6 = rte_pktmbuf_mtod(pkts[v6_idxes[i]], + struct rte_ipv6_hdr *); + if (cmp_sa_key(v6_res[i], 0, NULL, ipv6)) { + sa[v6_idxes[i]] = v6_res[i]; + sa_cache_update(cache->v6, (struct ipsec_sa *)v6_res[i], + cache->mask); + } else + sa[v6_idxes[i]] = NULL; + } } #endif /* __SAD_H__ */