From patchwork Mon May 16 17:35:49 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Shijith Thotton X-Patchwork-Id: 111191 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id EA200A00BE; Mon, 16 May 2022 19:39:35 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 25F9A42B68; Mon, 16 May 2022 19:39:32 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id EB40142685 for ; Mon, 16 May 2022 19:39:30 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 24GBCUpj024067; Mon, 16 May 2022 10:37:28 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=llNKxbyEXkKyiDlFmEc6cITnT2GDedhxxYiUGYC3o7o=; b=feaWhdEEIAsQttITwIxWEMuqahkxc/PIur78MW1I6eNHYK204FdLWdxk9Dtf84SHBa7c fOdeO+yzzGcjALzTffT/BCexBCj1EM4XF/9CdXjocSA/HA+La1I7EQF8uy6Ab4q/BniU 8vKnmIICy0eemOyleUG8SnNeFpkbWzFLenXq1nw7pxJbpuNaQJ/lRj0EwV5oMI0UHzgQ tSwyDClg8ja7Uh13t8Dz/NwoUvKga1Vfj8Ryb8Tue60A3JNXHavl6y99wcGiQyStZcAn 03u4XOTu84+OwbTUDktvumg7kgfG6i6ev4p+F77KEvPhxPavarw0IvMtwk7L4PhFXjYU IA== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3g2bxsqyu8-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Mon, 16 May 2022 10:37:28 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Mon, 16 May 2022 10:37:26 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Mon, 16 May 2022 10:37:26 -0700 Received: from localhost.localdomain (unknown [10.28.34.29]) by maili.marvell.com (Postfix) with ESMTP id E44D23F70CB; Mon, 16 May 2022 10:37:23 -0700 (PDT) From: Shijith Thotton To: , CC: Shijith Thotton , , , , Subject: [PATCH v4 3/5] test/event: test cases to test runtime queue attribute Date: Mon, 16 May 2022 23:05:49 +0530 Message-ID: <7f2bd0734a71e6b46496933379df1bac51cc8b0a.1652722314.git.sthotton@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: References: MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: Uju6ANIKYczQaRi30lATqEhaMYmll3mT X-Proofpoint-GUID: Uju6ANIKYczQaRi30lATqEhaMYmll3mT X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.858,Hydra:6.0.486,FMLib:17.11.64.514 definitions=2022-05-16_15,2022-05-16_02,2022-02-23_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Added test cases to test changing of queue QoS attributes priority, weight and affinity at runtime. Signed-off-by: Shijith Thotton --- app/test/test_eventdev.c | 201 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 201 insertions(+) diff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c index 4f51042bda..336529038e 100644 --- a/app/test/test_eventdev.c +++ b/app/test/test_eventdev.c @@ -385,6 +385,201 @@ test_eventdev_queue_attr_priority(void) return TEST_SUCCESS; } +static int +test_eventdev_queue_attr_priority_runtime(void) +{ + uint32_t queue_count, queue_req, prio, deq_cnt; + struct rte_event_queue_conf qconf; + struct rte_event_port_conf pconf; + struct rte_event_dev_info info; + struct rte_event event = { + .op = RTE_EVENT_OP_NEW, + .event_type = RTE_EVENT_TYPE_CPU, + .sched_type = RTE_SCHED_TYPE_ATOMIC, + .u64 = 0xbadbadba, + }; + int i, ret; + + ret = rte_event_dev_info_get(TEST_DEV_ID, &info); + TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); + + if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR)) + return TEST_SKIPPED; + + TEST_ASSERT_SUCCESS(rte_event_dev_attr_get( + TEST_DEV_ID, RTE_EVENT_DEV_ATTR_QUEUE_COUNT, + &queue_count), + "Queue count get failed"); + + /* Need at least 2 queues to test LOW and HIGH priority. */ + TEST_ASSERT(queue_count > 1, "Not enough event queues, needed 2"); + queue_req = 2; + + for (i = 0; i < (int)queue_count; i++) { + ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i, &qconf); + TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i); + ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf); + TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i); + } + + ret = rte_event_queue_attr_set(TEST_DEV_ID, 0, + RTE_EVENT_QUEUE_ATTR_PRIORITY, + RTE_EVENT_DEV_PRIORITY_LOWEST); + if (ret == -ENOTSUP) + return TEST_SKIPPED; + TEST_ASSERT_SUCCESS(ret, "Queue0 priority set failed"); + + ret = rte_event_queue_attr_set(TEST_DEV_ID, 1, + RTE_EVENT_QUEUE_ATTR_PRIORITY, + RTE_EVENT_DEV_PRIORITY_HIGHEST); + if (ret == -ENOTSUP) + return TEST_SKIPPED; + TEST_ASSERT_SUCCESS(ret, "Queue1 priority set failed"); + + /* Setup event port 0 */ + ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf); + TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info"); + ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf); + TEST_ASSERT_SUCCESS(ret, "Failed to setup port0"); + ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0); + TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d", + TEST_DEV_ID); + + ret = rte_event_dev_start(TEST_DEV_ID); + TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID); + + for (i = 0; i < (int)queue_req; i++) { + event.queue_id = i; + while (rte_event_enqueue_burst(TEST_DEV_ID, 0, &event, 1) != 1) + rte_pause(); + } + + prio = RTE_EVENT_DEV_PRIORITY_HIGHEST; + deq_cnt = 0; + while (deq_cnt < queue_req) { + uint32_t queue_prio; + + if (rte_event_dequeue_burst(TEST_DEV_ID, 0, &event, 1, 0) == 0) + continue; + + ret = rte_event_queue_attr_get(TEST_DEV_ID, event.queue_id, + RTE_EVENT_QUEUE_ATTR_PRIORITY, + &queue_prio); + if (ret == -ENOTSUP) + return TEST_SKIPPED; + + TEST_ASSERT_SUCCESS(ret, "Queue priority get failed"); + TEST_ASSERT(queue_prio >= prio, + "Received event from a lower priority queue first"); + prio = queue_prio; + deq_cnt++; + } + + return TEST_SUCCESS; +} + +static int +test_eventdev_queue_attr_weight_runtime(void) +{ + struct rte_event_queue_conf qconf; + struct rte_event_dev_info info; + uint32_t queue_count; + int i, ret; + + ret = rte_event_dev_info_get(TEST_DEV_ID, &info); + TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); + + if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR)) + return TEST_SKIPPED; + + TEST_ASSERT_SUCCESS(rte_event_dev_attr_get( + TEST_DEV_ID, RTE_EVENT_DEV_ATTR_QUEUE_COUNT, + &queue_count), + "Queue count get failed"); + + for (i = 0; i < (int)queue_count; i++) { + ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i, &qconf); + TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i); + ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf); + TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i); + } + + for (i = 0; i < (int)queue_count; i++) { + uint32_t get_val; + uint64_t set_val; + + set_val = i % RTE_EVENT_QUEUE_WEIGHT_HIGHEST; + ret = rte_event_queue_attr_set( + TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_WEIGHT, set_val); + if (ret == -ENOTSUP) + return TEST_SKIPPED; + + TEST_ASSERT_SUCCESS(ret, "Queue weight set failed"); + + ret = rte_event_queue_attr_get( + TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_WEIGHT, &get_val); + if (ret == -ENOTSUP) + return TEST_SKIPPED; + + TEST_ASSERT_SUCCESS(ret, "Queue weight get failed"); + TEST_ASSERT_EQUAL(get_val, set_val, + "Wrong weight value for queue%d", i); + } + + return TEST_SUCCESS; +} + +static int +test_eventdev_queue_attr_affinity_runtime(void) +{ + struct rte_event_queue_conf qconf; + struct rte_event_dev_info info; + uint32_t queue_count; + int i, ret; + + ret = rte_event_dev_info_get(TEST_DEV_ID, &info); + TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); + + if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR)) + return TEST_SKIPPED; + + TEST_ASSERT_SUCCESS(rte_event_dev_attr_get( + TEST_DEV_ID, RTE_EVENT_DEV_ATTR_QUEUE_COUNT, + &queue_count), + "Queue count get failed"); + + for (i = 0; i < (int)queue_count; i++) { + ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i, &qconf); + TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i); + ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf); + TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i); + } + + for (i = 0; i < (int)queue_count; i++) { + uint32_t get_val; + uint64_t set_val; + + set_val = i % RTE_EVENT_QUEUE_AFFINITY_HIGHEST; + ret = rte_event_queue_attr_set( + TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_AFFINITY, set_val); + if (ret == -ENOTSUP) + return TEST_SKIPPED; + + TEST_ASSERT_SUCCESS(ret, "Queue affinity set failed"); + + ret = rte_event_queue_attr_get( + TEST_DEV_ID, i, RTE_EVENT_QUEUE_ATTR_AFFINITY, &get_val); + if (ret == -ENOTSUP) + return TEST_SKIPPED; + + TEST_ASSERT_SUCCESS(ret, "Queue affinity get failed"); + TEST_ASSERT_EQUAL(get_val, set_val, + "Wrong affinity value for queue%d", i); + } + + return TEST_SUCCESS; +} + static int test_eventdev_queue_attr_nb_atomic_flows(void) { @@ -964,6 +1159,12 @@ static struct unit_test_suite eventdev_common_testsuite = { test_eventdev_queue_count), TEST_CASE_ST(eventdev_configure_setup, NULL, test_eventdev_queue_attr_priority), + TEST_CASE_ST(eventdev_configure_setup, eventdev_stop_device, + test_eventdev_queue_attr_priority_runtime), + TEST_CASE_ST(eventdev_configure_setup, NULL, + test_eventdev_queue_attr_weight_runtime), + TEST_CASE_ST(eventdev_configure_setup, NULL, + test_eventdev_queue_attr_affinity_runtime), TEST_CASE_ST(eventdev_configure_setup, NULL, test_eventdev_queue_attr_nb_atomic_flows), TEST_CASE_ST(eventdev_configure_setup, NULL,