From patchwork Tue Aug 2 08:23:14 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ling, WeiX" X-Patchwork-Id: 114525 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8170DA0543; Tue, 2 Aug 2022 10:28:49 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7A7A242B72; Tue, 2 Aug 2022 10:28:49 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id 3E6A8400D7 for ; Tue, 2 Aug 2022 10:28:47 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1659428927; x=1690964927; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=5omsnYqw2cIJS1H6kw4V8bYYq16lLKX4JZErhyfntCE=; b=nbhcqjKI3Q8PjaMZLSjCAJXJdoyB1kFnbvioNKrD8EiGoPurBTxbNLpY SFR/b+tmJMzRRXEKKQy6Erg0ntn62kg0hLNpXfEjbFixCa9lOfUlQ12Al iJBCS58aFkmftX+rBozG+UieFYrNP1V8T5FAidDmtdnb7qOHuQ+o6s4rI 8UxYhW+P/FQNhHZTC1dw1zj+fTCp0nHh4H1NheNhs4c5qxP/W/zXa14l5 Ed86heYtJdHXbKF2a3Y8fN72F1XcELK/YGPwre2uBdcox+pIK4MmFVqW1 ZI36lylfYjQ3aT6PhlTetOBV3XpIms75LhWQMBJKip++b/K2s3Pt1H/eD A==; X-IronPort-AV: E=McAfee;i="6400,9594,10426"; a="375656807" X-IronPort-AV: E=Sophos;i="5.93,210,1654585200"; d="scan'208,223";a="375656807" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 02 Aug 2022 01:28:46 -0700 X-IronPort-AV: E=Sophos;i="5.93,210,1654585200"; d="scan'208,223";a="630615282" Received: from unknown (HELO localhost.localdomain) ([10.239.252.222]) by orsmga008-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 02 Aug 2022 01:28:44 -0700 From: Wei Ling To: dts@dpdk.org Cc: Wei Ling Subject: [dts][PATCH V1 2/2] tests/virtio_event_idx_interrupt_cbdma: modify testsuite to test virtio dequeue Date: Tue, 2 Aug 2022 04:23:14 -0400 Message-Id: <20220802082314.1103885-1-weix.ling@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org From DPDK-22.07, virtio support async dequeue for split and packed ring path, so modify virtio_event_idx_interrupt_cbdma testsuite to test the split and packed ring async dequeue feature. Signed-off-by: Wei Ling Acked-by: Xingguang He Tested-by: Chenyu Huang Acked-by: Lijuan Tu --- ...tSuite_virtio_event_idx_interrupt_cbdma.py | 211 ++++++++++++------ 1 file changed, 140 insertions(+), 71 deletions(-) diff --git a/tests/TestSuite_virtio_event_idx_interrupt_cbdma.py b/tests/TestSuite_virtio_event_idx_interrupt_cbdma.py index 4a65c381..c5d7af18 100644 --- a/tests/TestSuite_virtio_event_idx_interrupt_cbdma.py +++ b/tests/TestSuite_virtio_event_idx_interrupt_cbdma.py @@ -28,7 +28,7 @@ class TestVirtioIdxInterruptCbdma(TestCase): self.verify(len(self.dut_ports) >= 1, "Insufficient ports for testing") self.ports_socket = self.dut.get_numa_id(self.dut_ports[0]) self.core_list = self.dut.get_core_list("all", socket=self.ports_socket) - self.core_list_vhost = self.core_list[0:17] + self.vhost_core_list = self.core_list[0:17] self.cores_num = len( [n for n in self.dut.cores if int(n["socket"]) == self.ports_socket] ) @@ -55,7 +55,6 @@ class TestVirtioIdxInterruptCbdma(TestCase): self.dut.send_expect("killall -s INT %s" % self.testpmd_name, "#") self.dut.send_expect("killall -s INT qemu-system-x86_64", "#") self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#") - self.vhost = self.dut.new_session(suite="vhost") def get_core_mask(self): self.core_config = "1S/%dC/1T" % (self.nb_cores + 1) @@ -254,22 +253,20 @@ class TestVirtioIdxInterruptCbdma(TestCase): close all vms """ self.vm.stop() - self.vhost.send_expect("quit", "#", 20) + self.vhost_pmd.quit() - def test_perf_split_ring_virito_pci_driver_reload_with_cbdma_enabled(self): + def test_perf_split_ring_virito_pci_driver_reload_test_with_cbdma_enable(self): """ - Test Case1: Split ring virtio-pci driver reload test with CBDMA enabled + Test Case1: Split ring virtio-pci driver reload test with CBDMA enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(1) - lcore_dma = "[lcore{}@{}]".format(self.core_list_vhost[1], self.cbdma_list[0]) - vhost_param = "--nb-cores=1 --txd=1024 --rxd=1024 --lcore-dma={}".format( - lcore_dma - ) - vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net,queues=1,dmas=[txq0]'" + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1) + lcore_dma = "lcore%s@%s" % (self.vhost_core_list[1], self.cbdma_list[0]) + vhost_param = "--nb-cores=1 --txd=1024 --rxd=1024 --lcore-dma=[%s]" % lcore_dma + vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net,queues=1,dmas=[txq0;rxq0]'" ports = self.cbdma_list ports.append(self.dut.ports_info[0]["pci"]) self.vhost_pmd.start_testpmd( - cores=self.core_list_vhost, + cores=self.vhost_core_list, ports=ports, prefix="vhost", eal_param=vhost_eal_param, @@ -279,44 +276,78 @@ class TestVirtioIdxInterruptCbdma(TestCase): self.queues = 1 self.start_vms(packed=False) self.config_virito_net_in_vm() - res = self.check_packets_after_reload_virtio_device(reload_times=100) + res = self.check_packets_after_reload_virtio_device(reload_times=10) self.verify(res is True, "Should increase the wait times of ixia") self.stop_all_apps() - def test_perf_wake_up_split_ring_virtio_net_cores_with_event_idx_interrupt_mode_and_cbdma_enabled_16queue( + def test_perf_split_ring_16_queues_virtio_net_event_idx_interrupt_mode_test_with_cbdma_enable( self, ): """ - Test Case2: Wake up split ring virtio-net cores with event idx interrupt mode and cbdma enabled 16 queues test + Test Case2: Split ring 16 queues virtio-net event idx interrupt mode test with cbdma enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(16, allow_diff_socket=True) + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True) lcore_dma = ( - f"[lcore{self.core_list_vhost[1]}@{self.cbdma_list[0]}," - f"lcore{self.core_list[2]}@{self.cbdma_list[0]}," - f"lcore{self.core_list[3]}@{self.cbdma_list[1]}," - f"lcore{self.core_list[4]}@{self.cbdma_list[2]}," - f"lcore{self.core_list[5]}@{self.cbdma_list[3]}," - f"lcore{self.core_list[6]}@{self.cbdma_list[4]}," - f"lcore{self.core_list[7]}@{self.cbdma_list[5]}," - f"lcore{self.core_list[8]}@{self.cbdma_list[6]}," - f"lcore{self.core_list[9]}@{self.cbdma_list[7]}," - f"lcore{self.core_list[10]}@{self.cbdma_list[8]}," - f"lcore{self.core_list[11]}@{self.cbdma_list[9]}," - f"lcore{self.core_list[12]}@{self.cbdma_list[10]}," - f"lcore{self.core_list[13]}@{self.cbdma_list[11]}," - f"lcore{self.core_list[14]}@{self.cbdma_list[12]}," - f"lcore{self.core_list[15]}@{self.cbdma_list[13]}," - f"lcore{self.core_list[16]}@{self.cbdma_list[14]}," - f"lcore{self.core_list[17]}@{self.cbdma_list[15]}]" + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[2], + self.cbdma_list[1], + self.vhost_core_list[3], + self.cbdma_list[2], + self.vhost_core_list[4], + self.cbdma_list[3], + self.vhost_core_list[5], + self.cbdma_list[4], + self.vhost_core_list[6], + self.cbdma_list[5], + self.vhost_core_list[7], + self.cbdma_list[6], + self.vhost_core_list[8], + self.cbdma_list[7], + self.vhost_core_list[9], + self.cbdma_list[8], + self.vhost_core_list[10], + self.cbdma_list[9], + self.vhost_core_list[11], + self.cbdma_list[10], + self.vhost_core_list[12], + self.cbdma_list[11], + self.vhost_core_list[13], + self.cbdma_list[12], + self.vhost_core_list[14], + self.cbdma_list[13], + self.vhost_core_list[15], + self.cbdma_list[14], + self.vhost_core_list[16], + self.cbdma_list[15], + ) ) - vhost_param = "--nb-cores=16 --txd=1024 --rxd=1024 --rxq=16 --txq=16 --lcore-dma={}".format( - lcore_dma + vhost_param = ( + "--nb-cores=16 --txd=1024 --rxd=1024 --rxq=16 --txq=16 --lcore-dma=[%s]" + % lcore_dma ) - vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net,queues=16,client=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;txq8;txq9;txq10;txq11;txq12;txq13;txq14;txq15]'" + vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net,queues=16,client=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;txq8;txq9;txq10;txq11;txq12;txq13;txq14;txq15;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7;rxq8;rxq9;rxq10;rxq11;rxq12;rxq13;rxq14;rxq15]'" ports = self.cbdma_list ports.append(self.dut.ports_info[0]["pci"]) self.vhost_pmd.start_testpmd( - cores=self.core_list_vhost, + cores=self.vhost_core_list, ports=ports, prefix="vhost", eal_param=vhost_eal_param, @@ -328,22 +359,23 @@ class TestVirtioIdxInterruptCbdma(TestCase): self.config_virito_net_in_vm() self.start_to_send_packets(delay=15) self.check_each_queue_has_packets_info_on_vhost() + self.vhost_pmd.execute_cmd("start") + self.start_to_send_packets(delay=15) + self.check_each_queue_has_packets_info_on_vhost() self.stop_all_apps() - def test_perf_packed_ring_virito_pci_driver_reload_with_cbdma_enabled(self): + def test_perf_packed_ring_virito_pci_driver_reload_test_with_cbdma_enable(self): """ - Test Case3: Packed ring virtio-pci driver reload test with CBDMA enabled + Test Case3: Packed ring virtio-pci driver reload test with CBDMA enable """ - self.get_cbdma_ports_info_and_bind_to_dpdk(1) - lcore_dma = "[lcore{}@{}]".format(self.core_list_vhost[1], self.cbdma_list[0]) - vhost_param = "--nb-cores=1 --txd=1024 --rxd=1024 --lcore-dma={}".format( - lcore_dma - ) - vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net,queues=1,dmas=[txq0]'" + self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1) + lcore_dma = "lcore%s@%s" % (self.vhost_core_list[1], self.cbdma_list[0]) + vhost_param = "--nb-cores=1 --txd=1024 --rxd=1024 --lcore-dma=[%s]" % lcore_dma + vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net,queues=1,dmas=[txq0;rxq0]'" ports = self.cbdma_list ports.append(self.dut.ports_info[0]["pci"]) self.vhost_pmd.start_testpmd( - cores=self.core_list_vhost, + cores=self.vhost_core_list, ports=ports, prefix="vhost", eal_param=vhost_eal_param, @@ -353,44 +385,78 @@ class TestVirtioIdxInterruptCbdma(TestCase): self.queues = 1 self.start_vms(packed=True) self.config_virito_net_in_vm() - res = self.check_packets_after_reload_virtio_device(reload_times=100) + res = self.check_packets_after_reload_virtio_device(reload_times=10) self.verify(res is True, "Should increase the wait times of ixia") self.stop_all_apps() - def test_perf_wake_up_packed_ring_virtio_net_cores_with_event_idx_interrupt_mode_and_cbdma_enabled_16queue( + def test_perf_packed_ring_16_queues_virtio_net_event_idx_interrupt_mode_test_with_cbdma_enable( self, ): """ - Test Case4: Wake up packed ring virtio-net cores with event idx interrupt mode and cbdma enabled 16 queues test + Test Case4: Packed ring 16 queues virtio-net event idx interrupt mode test with cbdma enable """ self.get_cbdma_ports_info_and_bind_to_dpdk(16, allow_diff_socket=True) lcore_dma = ( - f"[lcore{self.core_list_vhost[1]}@{self.cbdma_list[0]}," - f"lcore{self.core_list[2]}@{self.cbdma_list[0]}," - f"lcore{self.core_list[3]}@{self.cbdma_list[1]}," - f"lcore{self.core_list[4]}@{self.cbdma_list[2]}," - f"lcore{self.core_list[5]}@{self.cbdma_list[3]}," - f"lcore{self.core_list[6]}@{self.cbdma_list[4]}," - f"lcore{self.core_list[7]}@{self.cbdma_list[5]}," - f"lcore{self.core_list[8]}@{self.cbdma_list[6]}," - f"lcore{self.core_list[9]}@{self.cbdma_list[7]}," - f"lcore{self.core_list[10]}@{self.cbdma_list[8]}," - f"lcore{self.core_list[11]}@{self.cbdma_list[9]}," - f"lcore{self.core_list[12]}@{self.cbdma_list[10]}," - f"lcore{self.core_list[13]}@{self.cbdma_list[11]}," - f"lcore{self.core_list[14]}@{self.cbdma_list[12]}," - f"lcore{self.core_list[15]}@{self.cbdma_list[13]}," - f"lcore{self.core_list[16]}@{self.cbdma_list[14]}," - f"lcore{self.core_list[17]}@{self.cbdma_list[15]}]" + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s," + "lcore%s@%s" + % ( + self.vhost_core_list[1], + self.cbdma_list[0], + self.vhost_core_list[2], + self.cbdma_list[1], + self.vhost_core_list[3], + self.cbdma_list[2], + self.vhost_core_list[4], + self.cbdma_list[3], + self.vhost_core_list[5], + self.cbdma_list[4], + self.vhost_core_list[6], + self.cbdma_list[5], + self.vhost_core_list[7], + self.cbdma_list[6], + self.vhost_core_list[8], + self.cbdma_list[7], + self.vhost_core_list[9], + self.cbdma_list[8], + self.vhost_core_list[10], + self.cbdma_list[9], + self.vhost_core_list[11], + self.cbdma_list[10], + self.vhost_core_list[12], + self.cbdma_list[11], + self.vhost_core_list[13], + self.cbdma_list[12], + self.vhost_core_list[14], + self.cbdma_list[13], + self.vhost_core_list[15], + self.cbdma_list[14], + self.vhost_core_list[16], + self.cbdma_list[15], + ) ) - vhost_param = "--nb-cores=16 --txd=1024 --rxd=1024 --rxq=16 --txq=16 --lcore-dma={}".format( - lcore_dma + vhost_param = ( + "--nb-cores=16 --txd=1024 --rxd=1024 --rxq=16 --txq=16 --lcore-dma=[%s]" + % lcore_dma ) - vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net,queues=16,client=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;txq8;txq9;txq10;txq11;txq12;txq13;txq14;txq15]'" + vhost_eal_param = "--vdev 'net_vhost,iface=vhost-net,queues=16,client=1,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;txq8;txq9;txq10;txq11;txq12;txq13;txq14;txq15;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7;rxq8;rxq9;rxq10;rxq11;rxq12;rxq13;rxq14;rxq15]'" ports = self.cbdma_list ports.append(self.dut.ports_info[0]["pci"]) self.vhost_pmd.start_testpmd( - cores=self.core_list_vhost, + cores=self.vhost_core_list, ports=ports, prefix="vhost", eal_param=vhost_eal_param, @@ -402,6 +468,9 @@ class TestVirtioIdxInterruptCbdma(TestCase): self.config_virito_net_in_vm() self.start_to_send_packets(delay=15) self.check_each_queue_has_packets_info_on_vhost() + self.vhost_pmd.execute_cmd("start") + self.start_to_send_packets(delay=15) + self.check_each_queue_has_packets_info_on_vhost() self.stop_all_apps() def tear_down(self): @@ -416,4 +485,4 @@ class TestVirtioIdxInterruptCbdma(TestCase): """ Run after each test suite. """ - self.dut.close_session(self.vhost) + self.dut.close_session(self.vhost_user)