From patchwork Thu Apr 7 16:12:05 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tianli Lai X-Patchwork-Id: 109412 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 60FF0A050B; Thu, 7 Apr 2022 10:12:26 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 09D6B40689; Thu, 7 Apr 2022 10:12:26 +0200 (CEST) Received: from smtp.tom.com (smtprz14.163.net [106.3.154.247]) by mails.dpdk.org (Postfix) with ESMTP id EBD854014F for ; Thu, 7 Apr 2022 10:12:23 +0200 (CEST) Received: from my-app01.tom.com (my-app01.tom.com [127.0.0.1]) by freemail01.tom.com (Postfix) with ESMTP id 0B8C51EA0060 for ; Thu, 7 Apr 2022 16:12:21 +0800 (CST) Received: from my-app01.tom.com (HELO smtp.tom.com) ([127.0.0.1]) by my-app01 (TOM SMTP Server) with SMTP ID -1134844129 for ; Thu, 07 Apr 2022 16:12:21 +0800 (CST) Received: from antispam3.tom.com (unknown [172.25.16.54]) by freemail01.tom.com (Postfix) with ESMTP id 015DE1EA005F for ; Thu, 7 Apr 2022 16:12:20 +0800 (CST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=tom.com; s=201807; t=1649319141; bh=J7uTcEqY7MDCrqXgCSYFEhaqR3KRgXydw6tf5HuE50g=; h=From:To:Subject:Date:From; b=NPvnkVF1Ky3TRPJaM3nO4DgEswAVlntvoeB7W9pIm5QgvyWfDLhXfrRTaDBxFHLNp YHUTZdCD4B4y8RV7i3GxcoIbJVvvjhevi3IngHIvxU6xFdyN9pgFpBYvVftJNBaqVw okP3aQUMQ0aewj1hO+datsq8PQYdTcBWlnT3tuX4= Received: from antispam3.tom.com (antispam3.tom.com [127.0.0.1]) by antispam3.tom.com (Postfix) with ESMTP id B88579C1BF2 for ; Thu, 7 Apr 2022 16:12:20 +0800 (CST) X-Virus-Scanned: Debian amavisd-new at antispam3.tom.com Received: from antispam3.tom.com ([127.0.0.1]) by antispam3.tom.com (antispam3.tom.com [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id fbpuy7pEeOpX for ; Thu, 7 Apr 2022 16:12:19 +0800 (CST) Received: from localhost.localdomain (unknown [119.123.241.123]) by antispam3.tom.com (Postfix) with ESMTPA id 712EC9C1BF1 for ; Thu, 7 Apr 2022 16:12:19 +0800 (CST) X-K: live From: Tianli Lai To: dev@dpdk.org Subject: [PATCH] examples/kni: add interrupt mode to receive packets Date: Fri, 8 Apr 2022 00:12:05 +0800 Message-Id: <20220407161205.8633-1-laitianli@tom.com> X-Mailer: git-send-email 2.27.0 MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org kni application have two main-loop threads that they CPU utilization are up to 100 percent, this two theads are writing thread and reading thread. I thank set interrupt mode at reading thread would reduce this thread CPU utilization. Signed-off-by: Tianli Lai --- examples/kni/main.c | 107 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 105 insertions(+), 2 deletions(-) diff --git a/examples/kni/main.c b/examples/kni/main.c index e99ef5c38a..4e2d2df348 100644 --- a/examples/kni/main.c +++ b/examples/kni/main.c @@ -98,6 +98,8 @@ static struct rte_eth_conf port_conf = { }, }; +/* ethernet addresses of ports */ +static rte_spinlock_t locks[RTE_MAX_ETHPORTS]; /* Mempool for mbufs */ static struct rte_mempool * pktmbuf_pool = NULL; @@ -107,6 +109,8 @@ static uint32_t ports_mask = 0; static int promiscuous_on = 0; /* Monitor link status continually. off by default. */ static int monitor_links; +/* rx set in interrupt mode off by default. */ +static int intr_rx_en; /* Structure type for recording kni interface specific stats */ struct kni_interface_stats { @@ -277,6 +281,87 @@ kni_egress(struct kni_port_params *p) } } +/** + * force polling thread sleep until one-shot rx interrupt triggers + * @param port_id + * Port id. + * @param queue_id + * Rx queue id. + * @return + * 0 on success + */ +static int +sleep_until_rx_interrupt(int num, int lcore) +{ + /* + * we want to track when we are woken up by traffic so that we can go + * back to sleep again without log spamming. Avoid cache line sharing + * to prevent threads stepping on each others' toes. + */ + static struct { + bool wakeup; + } __rte_cache_aligned status[RTE_MAX_LCORE]; + struct rte_epoll_event event[num]; + int n, i; + uint16_t port_id; + uint8_t queue_id; + void *data; + + if (status[lcore].wakeup) { + RTE_LOG(INFO, APP, + "lcore %u sleeps until interrupt triggers\n", + rte_lcore_id()); + } + + n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, num, 10); + for (i = 0; i < n; i++) { + data = event[i].epdata.data; + port_id = ((uintptr_t)data) >> CHAR_BIT; + queue_id = ((uintptr_t)data) & + RTE_LEN2MASK(CHAR_BIT, uint8_t); + RTE_LOG(INFO, APP, + "lcore %u is waked up from rx interrupt on" + " port %d queue %d\n", + rte_lcore_id(), port_id, queue_id); + } + status[lcore].wakeup = n != 0; + + return 0; +} + +static void +turn_on_off_intr(uint16_t port_id, uint16_t queue_id, bool on) +{ + rte_spinlock_lock(&(locks[port_id])); + if (on) + rte_eth_dev_rx_intr_enable(port_id, queue_id); + else + rte_eth_dev_rx_intr_disable(port_id, queue_id); + rte_spinlock_unlock(&(locks[port_id])); +} + +static int event_register(void) +{ + uint8_t queueid; + uint16_t portid; + uint32_t data; + int ret; + + portid = 0; + queueid = 0; + data = portid << CHAR_BIT | queueid; + + ret = rte_eth_dev_rx_intr_ctl_q(portid, queueid, + RTE_EPOLL_PER_THREAD, + RTE_INTR_EVENT_ADD, + (void *)((uintptr_t)data)); + if (ret) + return ret; + + + return 0; +} + static int main_loop(__rte_unused void *arg) { @@ -291,12 +376,19 @@ main_loop(__rte_unused void *arg) LCORE_MAX }; enum lcore_rxtx flag = LCORE_NONE; + int intr_en = 0; RTE_ETH_FOREACH_DEV(i) { if (!kni_port_params_array[i]) continue; + /* initialize spinlock for each port */ + rte_spinlock_init(&(locks[i])); if (kni_port_params_array[i]->lcore_rx == (uint8_t)lcore_id) { flag = LCORE_RX; + if (intr_rx_en && !event_register()) + intr_en = 1; + else + RTE_LOG(INFO, APP, "RX interrupt won't enable.\n"); break; } else if (kni_port_params_array[i]->lcore_tx == (uint8_t)lcore_id) { @@ -317,6 +409,11 @@ main_loop(__rte_unused void *arg) if (f_pause) continue; kni_ingress(kni_port_params_array[i]); + if (unlikely(intr_en)) { + turn_on_off_intr(i, 0, 1); + sleep_until_rx_interrupt(1, lcore_id); + turn_on_off_intr(i, 0, 0); + } } } else if (flag == LCORE_TX) { RTE_LOG(INFO, APP, "Lcore %u is writing to port %d\n", @@ -341,12 +438,13 @@ main_loop(__rte_unused void *arg) static void print_usage(const char *prgname) { - RTE_LOG(INFO, APP, "\nUsage: %s [EAL options] -- -p PORTMASK -P -m " + RTE_LOG(INFO, APP, "\nUsage: %s [EAL options] -- -p PORTMASK -P -m -I " "[--config (port,lcore_rx,lcore_tx,lcore_kthread...)" "[,(port,lcore_rx,lcore_tx,lcore_kthread...)]]\n" " -p PORTMASK: hex bitmask of ports to use\n" " -P : enable promiscuous mode\n" " -m : enable monitoring of port carrier state\n" + " -I : enable rx interrupt mode\n" " --config (port,lcore_rx,lcore_tx,lcore_kthread...): " "port and lcore configurations\n", prgname); @@ -527,7 +625,7 @@ parse_args(int argc, char **argv) opterr = 0; /* Parse command line */ - while ((opt = getopt_long(argc, argv, "p:Pm", longopts, + while ((opt = getopt_long(argc, argv, "p:PmI", longopts, &longindex)) != EOF) { switch (opt) { case 'p': @@ -539,6 +637,9 @@ parse_args(int argc, char **argv) case 'm': monitor_links = 1; break; + case 'I': + intr_rx_en = 1; + break; case 0: if (!strncmp(longopts[longindex].name, CMDLINE_OPT_CONFIG, @@ -610,6 +711,8 @@ init_port(uint16_t port) if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; + if (intr_rx_en) + local_port_conf.intr_conf.rxq = 1; ret = rte_eth_dev_configure(port, 1, 1, &local_port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",