[V4,2/2] tests/vswitch_sample_cbdma: modify testsuite to test virito dequeue

Message ID 20220816075926.3419441-1-weix.ling@intel.com (mailing list archive)
State Superseded
Headers
Series modify vswitch_sample_cbdma to test virito dequeue |

Checks

Context Check Description
ci/Intel-dts-doc-test fail Testing issues
ci/Intel-dts-format-test success Testing OK
ci/Intel-dts-pylama-test success Testing OK

Commit Message

Ling, WeiX Aug. 16, 2022, 7:59 a.m. UTC
  From DPDK-22.07, virtio support async dequeue for split and packed ring
path, so modify vswitch_sample_cbdma testsuite to test the split and
packed ring async dequeue feature.

Signed-off-by: Wei Ling <weix.ling@intel.com>
---
 tests/TestSuite_vswitch_sample_cbdma.py | 699 ++++++++++--------------
 1 file changed, 296 insertions(+), 403 deletions(-)
  

Patch

diff --git a/tests/TestSuite_vswitch_sample_cbdma.py b/tests/TestSuite_vswitch_sample_cbdma.py
index 9fb6150b..52553a15 100644
--- a/tests/TestSuite_vswitch_sample_cbdma.py
+++ b/tests/TestSuite_vswitch_sample_cbdma.py
@@ -33,14 +33,13 @@  class TestVswitchSampleCBDMA(TestCase):
         self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])
         self.cores = self.dut.get_core_list("all", socket=self.ports_socket)
         self.vhost_core_list = self.cores[0:2]
+        self.vhost_core_range = "%s-%s" % (
+            self.vhost_core_list[0],
+            self.vhost_core_list[-1],
+        )
         self.vuser0_core_list = self.cores[2:4]
         self.vuser1_core_list = self.cores[4:6]
-        self.vhost_core_mask = utils.create_mask(self.vhost_core_list)
         self.mem_channels = self.dut.get_memory_channels()
-        # get cbdma device
-        self.cbdma_dev_infos = []
-        self.dmas_info = None
-        self.device_str = None
         self.out_path = "/tmp"
         out = self.tester.send_expect("ls -d %s" % self.out_path, "# ")
         if "No such file or directory" in out:
@@ -61,11 +60,6 @@  class TestVswitchSampleCBDMA(TestCase):
         self.virtio_user1 = self.dut.new_session(suite="virtio-user1")
         self.virtio_user0_pmd = PmdOutput(self.dut, self.virtio_user0)
         self.virtio_user1_pmd = PmdOutput(self.dut, self.virtio_user1)
-        self.mrg_rxbuf = 0
-        self.in_order = 0
-        self.vectorized = 0
-        self.packed_vq = 0
-        self.server = 0
         self.random_string = string.ascii_letters + string.digits
         self.virtio_ip0 = "1.1.1.2"
         self.virtio_ip1 = "1.1.1.3"
@@ -75,6 +69,8 @@  class TestVswitchSampleCBDMA(TestCase):
         """
         Run before each test case.
         """
+        self.table_header = ["Frame Size(Byte)", "Mode", "Throughput(Mpps)"]
+        self.result_table_create(self.table_header)
         self.dut.send_expect("rm -rf %s/vhost-net*" % self.base_dir, "#")
         self.dut.send_expect("killall -I dpdk-vhost", "#", 20)
         self.dut.send_expect("killall -I dpdk-testpmd", "#", 20)
@@ -93,9 +89,7 @@  class TestVswitchSampleCBDMA(TestCase):
         )
         return True if out == "2048" else False
 
-    def start_vhost_app(
-        self, with_cbdma=True, cbdma_num=1, socket_num=1, client_mode=False
-    ):
+    def start_vhost_app(self, cbdma_num, socket_num, dmas_info, client_mode=False):
         """
         launch the vhost app on vhost side
         """
@@ -105,87 +99,53 @@  class TestVswitchSampleCBDMA(TestCase):
             socket_file_param += "--socket-file ./vhost-net{} ".format(item)
         allow_pci = [self.dut.ports_info[0]["pci"]]
         for item in range(cbdma_num):
-            allow_pci.append(self.cbdma_dev_infos[item])
+            allow_pci.append(self.cbdma_list[item])
         allow_option = ""
         for item in allow_pci:
             allow_option += " -a {}".format(item)
-        if with_cbdma:
-            if client_mode:
-                params = (
-                    " -c {} -n {} {} -- -p 0x1 --mergeable 1 --vm2vm 1 --stats 1 "
-                    + socket_file_param
-                    + "--dmas [{}] --client --total-num-mbufs 600000"
-                ).format(
-                    self.vhost_core_mask,
-                    self.mem_channels,
-                    allow_option,
-                    self.dmas_info,
-                )
-            else:
-                params = (
-                    " -c {} -n {} {} -- -p 0x1 --mergeable 1 --vm2vm 1 --stats 1 "
-                    + socket_file_param
-                    + "--dmas [{}] --total-num-mbufs 600000"
-                ).format(
-                    self.vhost_core_mask,
-                    self.mem_channels,
-                    allow_option,
-                    self.dmas_info,
-                )
-        else:
-            params = (
-                " -c {} -n {} {} -- -p 0x1 --mergeable 1 --vm2vm 1 --stats 1 "
-                + socket_file_param
-                + "--total-num-mbufs 600000"
-            ).format(self.vhost_core_mask, self.mem_channels, allow_option)
+        params = (
+            " -l {} -n {} {} -- -p 0x1 --mergeable 1 --vm2vm 1 --stats 1 "
+            + socket_file_param
+            + "--dmas [{}] --total-num-mbufs 600000"
+        ).format(
+            self.vhost_core_range,
+            self.mem_channels,
+            allow_option,
+            dmas_info,
+        )
+        if client_mode:
+            params = params + " --client"
         self.command_line = self.app_path + params
         self.vhost_user.send_command(self.command_line)
-        # After started dpdk-vhost app, wait 3 seconds
         time.sleep(3)
 
-    def start_virtio_testpmd(
-        self,
-        pmd_session,
-        dev_mac,
-        dev_id,
-        cores,
-        prefix,
-        enable_queues=1,
-        nb_cores=1,
-        used_queues=1,
-        force_max_simd_bitwidth=False,
-        power2=False,
-    ):
+    def start_virtio_testpmd_with_vhost_net0(self, eal_param="", param=""):
         """
         launch the testpmd as virtio with vhost_net0
         """
-        txd_rxd = 1024
-        eal_params = " --vdev=net_virtio_user0,mac={},path=./vhost-net{},queues={},mrg_rxbuf={},in_order={}".format(
-            dev_mac, dev_id, enable_queues, self.mrg_rxbuf, self.in_order
-        )
-        if self.vectorized == 1:
-            eal_params += ",vectorized=1"
-        if self.packed_vq == 1:
-            eal_params += ",packed_vq=1"
-        if self.server:
-            eal_params += ",server=1"
-        if power2:
-            txd_rxd += 1
-            eal_params += ",queue_size={}".format(txd_rxd)
         if self.check_2M_env:
-            eal_params += " --single-file-segments"
-        if force_max_simd_bitwidth:
-            eal_params += " --force-max-simd-bitwidth=512"
-        params = "--rxq={} --txq={} --txd={} --rxd={} --nb-cores={}".format(
-            used_queues, used_queues, txd_rxd, txd_rxd, nb_cores
+            eal_param += " --single-file-segments"
+        self.virtio_user0_pmd.start_testpmd(
+            cores=self.vuser0_core_list,
+            eal_param=eal_param,
+            param=param,
+            no_pci=True,
+            prefix="virtio-user0",
+            fixed_prefix=True,
         )
-        pmd_session.start_testpmd(
-            cores=cores,
-            param=params,
-            eal_param=eal_params,
+
+    def start_virtio_testpmd_with_vhost_net1(self, eal_param="", param=""):
+        """
+        launch the testpmd as virtio with vhost_net1
+        """
+        if self.check_2M_env:
+            eal_param += " --single-file-segments"
+        self.virtio_user1_pmd.start_testpmd(
+            cores=self.vuser1_core_list,
+            eal_param=eal_param,
+            param=param,
             no_pci=True,
-            ports=[],
-            prefix=prefix,
+            prefix="virtio-user1",
             fixed_prefix=True,
         )
 
@@ -202,20 +162,21 @@  class TestVswitchSampleCBDMA(TestCase):
         start two VM, each VM has one virtio device
         """
         mergeable = "on" if mergeable else "off"
-        setting_args = "disable-modern=true,mrg_rxbuf={0},csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on".format(
-            mergeable
+        disable_modern = "false" if packed else "true"
+        packed_path = ",packed=on" if packed else ""
+        setting_args = (
+            "disable-modern=%s,mrg_rxbuf=%s,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on%s"
+            % (disable_modern, mergeable, packed_path)
         )
-        if packed:
-            setting_args = setting_args + ",packed=on"
         for i in range(self.vm_num):
             vm_dut = None
             vm_info = VM(self.dut, "vm%d" % i, "vhost_sample")
             vm_params = {}
             vm_params["driver"] = "vhost-user"
             if server_mode:
-                vm_params["opt_path"] = self.base_dir + "/vhost-net%d" % i + ",server"
+                vm_params["opt_path"] = "./vhost-net%d" % i + ",server"
             else:
-                vm_params["opt_path"] = self.base_dir + "/vhost-net%d" % i
+                vm_params["opt_path"] = "./vhost-net%d" % i
             vm_params["opt_mac"] = "52:54:00:00:00:0%d" % (i + 1)
             if vm_diff_param and i > 0:
                 vm_params["opt_settings"] = setting_args + ",packed=on"
@@ -249,10 +210,13 @@  class TestVswitchSampleCBDMA(TestCase):
             dut.bind_interfaces_linux(driver="vfio-pci")
             i += 1
 
-    def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num):
+    def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False):
         """
-        get all cbdma ports
+        get and bind cbdma ports into DPDK driver
         """
+        self.all_cbdma_list = []
+        self.cbdma_list = []
+        self.cbdma_str = ""
         out = self.dut.send_expect(
             "./usertools/dpdk-devbind.py --status-dev dma", "# ", 30
         )
@@ -267,23 +231,19 @@  class TestVswitchSampleCBDMA(TestCase):
                     cur_socket = 1
                 else:
                     cur_socket = 0
-                if self.ports_socket == cur_socket:
-                    self.cbdma_dev_infos.append(pci_info.group(1))
+                if allow_diff_socket:
+                    self.all_cbdma_list.append(pci_info.group(1))
+                else:
+                    if self.ports_socket == cur_socket:
+                        self.all_cbdma_list.append(pci_info.group(1))
         self.verify(
-            len(self.cbdma_dev_infos) >= cbdma_num,
-            "There no enough cbdma device to run this suite",
+            len(self.all_cbdma_list) >= cbdma_num, "There no enough cbdma device"
         )
-        used_cbdma = self.cbdma_dev_infos[0:cbdma_num]
-        dmas_info = ""
-        for dmas in used_cbdma:
-            number = used_cbdma.index(dmas)
-            dmas = "txd{}@{},".format(number, dmas)
-            dmas_info += dmas
-        self.dmas_info = dmas_info[:-1]
-        self.device_str = " ".join(used_cbdma)
+        self.cbdma_list = self.all_cbdma_list[0:cbdma_num]
+        self.cbdma_str = " ".join(self.cbdma_list)
         self.dut.send_expect(
             "./usertools/dpdk-devbind.py --force --bind=%s %s"
-            % (self.drivername, self.device_str),
+            % (self.drivername, self.cbdma_str),
             "# ",
             60,
         )
@@ -306,14 +266,14 @@  class TestVswitchSampleCBDMA(TestCase):
         )
 
     def bind_cbdma_device_to_kernel(self):
-        if self.device_str is not None:
+        if self.cbdma_str is not None:
             self.dut.send_expect("modprobe ioatdma", "# ")
             self.dut.send_expect(
-                "./usertools/dpdk-devbind.py -u %s" % self.device_str, "# ", 30
+                "./usertools/dpdk-devbind.py -u %s" % self.cbdma_str, "# ", 30
             )
             self.dut.send_expect(
                 "./usertools/dpdk-devbind.py --force --bind=ioatdma  %s"
-                % self.device_str,
+                % self.cbdma_str,
                 "# ",
                 60,
             )
@@ -335,10 +295,6 @@  class TestVswitchSampleCBDMA(TestCase):
         return tgen_input
 
     def perf_test(self, frame_sizes, dst_mac_list):
-        # Create test results table
-        table_header = ["Frame Size(Byte)", "Throughput(Mpps)"]
-        self.result_table_create(table_header)
-        # Begin test perf
         test_result = {}
         for frame_size in frame_sizes:
             self.logger.info(
@@ -358,118 +314,64 @@  class TestVswitchSampleCBDMA(TestCase):
             )
             throughput = pps / 1000000.0
             test_result[frame_size] = throughput
-            self.result_table_add([frame_size, throughput])
-        self.result_table_print()
         return test_result
 
     def pvp_test_with_cbdma(self):
         frame_sizes = [64, 128, 256, 512, 1024, 1280, 1518]
-        self.virtio_user0_pmd.execute_cmd("set fwd mac")
-        self.virtio_user0_pmd.execute_cmd("start tx_first")
-        self.virtio_user0_pmd.execute_cmd("stop")
-        self.virtio_user0_pmd.execute_cmd("set fwd mac")
-        self.virtio_user0_pmd.execute_cmd("start")
         dst_mac_list = [self.virtio_dst_mac0]
         perf_result = self.perf_test(frame_sizes, dst_mac_list)
         return perf_result
 
-    def test_perf_pvp_check_with_cbdma_channel_using_vhost_async_driver(self):
+    def let_vswitch_know_mac(self, virtio_pmd, relaunch=False):
+        if not relaunch:
+            virtio_pmd.execute_cmd("set fwd mac")
+            virtio_pmd.execute_cmd("start tx_first")
+        else:
+            virtio_pmd.execute_cmd("stop")
+            virtio_pmd.execute_cmd("start tx_first")
+
+    def test_perf_pvp_perfromance_check_with_cbdma_channel_using_vhost_async_driver(
+        self,
+    ):
         """
         Test Case1: PVP performance check with CBDMA channel using vhost async driver
         """
         perf_result = []
-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)
-
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+        dmas_info = "txd0@%s,rxd0@%s" % (self.cbdma_list[0], self.cbdma_list[1])
         self.start_vhost_app(
-            with_cbdma=True, cbdma_num=1, socket_num=1, client_mode=True
-        )
-        # packed ring
-        self.mrg_rxbuf = 0
-        self.in_order = 1
-        self.vectorized = 1
-        self.packed_vq = 1
-        self.server = 1
-        self.start_virtio_testpmd(
-            pmd_session=self.virtio_user0_pmd,
-            dev_mac=self.virtio_dst_mac0,
-            dev_id=0,
-            cores=self.vuser0_core_list,
-            prefix="testpmd0",
-            nb_cores=1,
-            used_queues=1,
-            force_max_simd_bitwidth=True,
-            power2=False,
+            cbdma_num=2, socket_num=1, dmas_info=dmas_info, client_mode=True
         )
-        packed_ring_result = self.pvp_test_with_cbdma()
 
-        # packed ring of power2
-        self.virtio_user0_pmd.execute_cmd("quit", "# ")
-        self.mrg_rxbuf = 0
-        self.in_order = 1
-        self.vectorized = 1
-        self.packed_vq = 1
-        self.server = 1
-
-        self.start_virtio_testpmd(
-            pmd_session=self.virtio_user0_pmd,
-            dev_mac=self.virtio_dst_mac0,
-            dev_id=0,
-            cores=self.vuser0_core_list,
-            prefix="testpmd0",
-            nb_cores=1,
-            used_queues=1,
-            force_max_simd_bitwidth=True,
-            power2=True,
+        # packed ring path
+        virtio0_eal_param = "--force-max-simd-bitwidth=512 --vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=1,mrg_rxbuf=0,in_order=1,vectorized=1,packed_vq=1,server=1"
+        virtio0_param = "--rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1"
+        self.start_virtio_testpmd_with_vhost_net0(
+            eal_param=virtio0_eal_param, param=virtio0_param
         )
-        packed_ring_power2_result = self.pvp_test_with_cbdma()
+        self.let_vswitch_know_mac(virtio_pmd=self.virtio_user0_pmd, relaunch=False)
+        packed_ring_result = self.pvp_test_with_cbdma()
 
-        # split ring
+        # split ring path
         self.virtio_user0_pmd.execute_cmd("quit", "# ")
-        self.mrg_rxbuf = 0
-        self.in_order = 1
-        self.vectorized = 1
-        self.packed_vq = 0
-        self.server = 1
-
-        self.start_virtio_testpmd(
-            pmd_session=self.virtio_user0_pmd,
-            dev_mac=self.virtio_dst_mac0,
-            dev_id=0,
-            cores=self.vuser0_core_list,
-            prefix="testpmd0",
-            nb_cores=1,
-            used_queues=1,
-            force_max_simd_bitwidth=False,
-            power2=False,
+        virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=1,mrg_rxbuf=0,in_order=1,vectorized=1,server=1"
+        virtio0_param = "--rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1"
+        self.start_virtio_testpmd_with_vhost_net0(
+            eal_param=virtio0_eal_param, param=virtio0_param
         )
+        self.let_vswitch_know_mac(virtio_pmd=self.virtio_user0_pmd)
         split_ring_reult = self.pvp_test_with_cbdma()
 
-        self.table_header = ["Frame Size(Byte)", "Mode", "Throughput(Mpps)"]
-        self.result_table_create(self.table_header)
         for key in packed_ring_result.keys():
             perf_result.append([key, "packed_ring", packed_ring_result[key]])
-        for key in packed_ring_power2_result.keys():
-            perf_result.append(
-                [key, "packed_ring_power2", packed_ring_power2_result[key]]
-            )
+
         for key in split_ring_reult.keys():
             perf_result.append([key, "split_ring", split_ring_reult[key]])
+
         for table_row in perf_result:
             self.result_table_add(table_row)
+
         self.result_table_print()
-        for key in packed_ring_result.keys():
-            self.verify(
-                packed_ring_result[key] > 1, "The perf test result is lower than 1 Mpps"
-            )
-        for key in packed_ring_power2_result.keys():
-            self.verify(
-                packed_ring_power2_result[key] > 1,
-                "The perf test result is lower than 1 Mpps",
-            )
-        for key in split_ring_reult.keys():
-            self.verify(
-                split_ring_reult[key] > 1, "The perf test result is lower than 1 Mpps"
-            )
 
     def config_stream_imix(self, frame_sizes, dst_mac_list):
         tgen_input = []
@@ -496,10 +398,6 @@  class TestVswitchSampleCBDMA(TestCase):
         return tgen_input
 
     def perf_test_imix(self, frame_sizes, dst_mac_list):
-        # Create test results table
-        table_header = ["Frame Size(Byte)", "Throughput(Mpps)"]
-        self.result_table_create(table_header)
-        # Begin test perf
         test_result = {}
         tgenInput = self.config_stream_imix(frame_sizes, dst_mac_list)
         fields_config = {
@@ -520,83 +418,54 @@  class TestVswitchSampleCBDMA(TestCase):
         )
         throughput = pps / 1000000.0
         test_result["imix"] = throughput
-        self.result_table_add(["imix", throughput])
-        self.result_table_print()
         return test_result
 
-    def pvp_test_with_multi_cbdma(self, relaunch=False):
+    def pvp_test_with_multi_cbdma(self):
         frame_sizes = [64, 128, 256, 512, 1024, 1280, 1518]
-        if relaunch:
-            self.virtio_user0_pmd.execute_cmd("stop")
-            self.virtio_user1_pmd.execute_cmd("stop")
-            self.virtio_user0_pmd.execute_cmd("clear port stats all")
-            self.virtio_user1_pmd.execute_cmd("clear port stats all")
-        self.virtio_user0_pmd.execute_cmd("set fwd mac")
-        self.virtio_user1_pmd.execute_cmd("set fwd mac")
-        self.virtio_user0_pmd.execute_cmd("start tx_first")
-        self.virtio_user1_pmd.execute_cmd("start tx_first")
         dst_mac_list = [self.virtio_dst_mac0, self.virtio_dst_mac1]
         perf_result = self.perf_test_imix(frame_sizes, dst_mac_list)
-        out0 = self.virtio_user0_pmd.execute_cmd("show port stats all")
-        out1 = self.virtio_user1_pmd.execute_cmd("show port stats all")
-        rx_num0 = re.compile("RX-packets: (.*?)\s+?").findall(out0, re.S)
-        rx_num1 = re.compile("RX-packets: (.*?)\s+?").findall(out1, re.S)
-        self.verify(int(rx_num0[0]) > 32, "virtio-user0 not receive pkts from tester")
-        self.verify(int(rx_num1[0]) > 32, "virtio-user1 not receive pkts from tester")
         return perf_result
 
-    def test_perf_pvp_test_with_two_vm_and_two_cbdma_channels_using_vhost_async_driver(
-        self,
-    ):
+    def test_perf_pvp_test_with_2_vms_using_vhost_async_driver(self):
         """
-        Test Case2: PVP test with two VM and two CBDMA channels using vhost async driver
+        Test Case2: PVP test with two VMs using vhost async driver
         """
         perf_result = []
-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
-
-        self.logger.info("Launch vhost app perf test")
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+        dmas_info = "txd0@%s,rxd0@%s,txd1@%s,rxd1@%s" % (
+            self.cbdma_list[0],
+            self.cbdma_list[1],
+            self.cbdma_list[2],
+            self.cbdma_list[3],
+        )
         self.start_vhost_app(
-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True
+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
         )
-        self.mrg_rxbuf = 1
-        self.in_order = 0
-        self.vectorized = 0
-        self.packed_vq = 1
-        self.server = 1
-        self.start_virtio_testpmd(
-            pmd_session=self.virtio_user0_pmd,
-            dev_mac=self.virtio_dst_mac0,
-            dev_id=0,
-            cores=self.vuser0_core_list,
-            prefix="testpmd0",
-            nb_cores=1,
-            used_queues=1,
+        virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=1,server=1,mrg_rxbuf=1,in_order=0,packed_vq=1"
+        virtio0_param = "--rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1"
+        self.start_virtio_testpmd_with_vhost_net0(
+            eal_param=virtio0_eal_param, param=virtio0_param
         )
-        self.mrg_rxbuf = 1
-        self.in_order = 1
-        self.vectorized = 1
-        self.packed_vq = 0
-        self.server = 1
-        self.start_virtio_testpmd(
-            pmd_session=self.virtio_user1_pmd,
-            dev_mac=self.virtio_dst_mac1,
-            dev_id=1,
-            cores=self.vuser1_core_list,
-            prefix="testpmd1",
-            nb_cores=1,
-            used_queues=1,
+
+        virtio1_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:11,path=./vhost-net1,queues=1,server=1,mrg_rxbuf=1,in_order=1,vectorized=1"
+        virtio1_param = "--rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1"
+        self.start_virtio_testpmd_with_vhost_net1(
+            eal_param=virtio1_eal_param, param=virtio1_param
         )
+        self.let_vswitch_know_mac(virtio_pmd=self.virtio_user0_pmd, relaunch=False)
+        self.let_vswitch_know_mac(virtio_pmd=self.virtio_user1_pmd, relaunch=False)
+
         before_relunch = self.pvp_test_with_multi_cbdma()
 
-        self.logger.info("Relaunch vhost app perf test")
         self.vhost_user.send_expect("^C", "# ", 20)
+        dmas_info = "txd0@%s,rxd1@%s" % (self.cbdma_list[0], self.cbdma_list[1])
         self.start_vhost_app(
-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True
+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
         )
-        after_relunch = self.pvp_test_with_multi_cbdma(relaunch=True)
+        self.let_vswitch_know_mac(virtio_pmd=self.virtio_user0_pmd, relaunch=True)
+        self.let_vswitch_know_mac(virtio_pmd=self.virtio_user1_pmd, relaunch=True)
+        after_relunch = self.pvp_test_with_multi_cbdma()
 
-        self.table_header = ["Frame Size(Byte)", "Mode", "Throughput(Mpps)"]
-        self.result_table_create(self.table_header)
         for key in before_relunch.keys():
             perf_result.append(["imix", "Before Re-launch vhost", before_relunch[key]])
         for key in after_relunch.keys():
@@ -604,14 +473,15 @@  class TestVswitchSampleCBDMA(TestCase):
         for table_row in perf_result:
             self.result_table_add(table_row)
         self.result_table_print()
-        for key in before_relunch.keys():
-            self.verify(
-                before_relunch[key] > 1, "The perf test result is lower than 1 Mpps"
-            )
-        for key in after_relunch.keys():
-            self.verify(
-                after_relunch[key] > 1, "The perf test result is lower than 1 Mpps"
+
+        self.verify(
+            (
+                abs(before_relunch["imix"] - after_relunch["imix"])
+                / before_relunch["imix"]
             )
+            < 0.5,
+            "perf data have drop after re-launch vhost",
+        )
 
     def get_receive_throughput(self, pmd_session, count=10):
         i = 0
@@ -636,7 +506,6 @@  class TestVswitchSampleCBDMA(TestCase):
         pmd_session.execute_cmd("set eth-peer 0 %s" % eth_peer_mac)
 
     def send_pkts_from_testpmd1(self, pmd_session, pkt_len):
-        pmd_session.execute_cmd("stop")
         if pkt_len in [64, 2000]:
             pmd_session.execute_cmd("set txpkts %s" % pkt_len)
         elif pkt_len == 8000:
@@ -645,15 +514,8 @@  class TestVswitchSampleCBDMA(TestCase):
             pmd_session.execute_cmd("set txpkts 64,256,2000,64,256,2000")
         pmd_session.execute_cmd("start tx_first")
 
-    def vm2vm_check_with_two_cbdma(self, relaunch=False):
+    def vm2vm_check_with_two_cbdma(self):
         frame_sizes = [64, 2000, 8000, "imix"]
-        if relaunch:
-            self.virtio_user0_pmd.execute_cmd("stop")
-            self.virtio_user1_pmd.execute_cmd("stop")
-            self.virtio_user0_pmd.execute_cmd("clear port stats all")
-            self.virtio_user1_pmd.execute_cmd("clear port stats all")
-            self.virtio_user0_pmd.execute_cmd("show port stats all")
-            self.virtio_user1_pmd.execute_cmd("show port stats all")
         self.set_testpmd0_param(self.virtio_user0_pmd, self.virtio_dst_mac1)
         self.set_testpmd1_param(self.virtio_user1_pmd, self.virtio_dst_mac0)
 
@@ -662,65 +524,46 @@  class TestVswitchSampleCBDMA(TestCase):
             self.send_pkts_from_testpmd1(
                 pmd_session=self.virtio_user1_pmd, pkt_len=frame_size
             )
-            # Create test results table
-            table_header = ["Frame Size(Byte)", "Throughput(Mpps)"]
-            self.result_table_create(table_header)
             rx_pps = self.get_receive_throughput(pmd_session=self.virtio_user1_pmd)
-            self.result_table_add([frame_size, rx_pps])
             rx_throughput[frame_size] = rx_pps
-            self.result_table_print()
         return rx_throughput
 
-    def test_vm2vm_fwd_test_with_two_cbdma_channels(self):
+    def test_vm2vm_virtio_user_forwarding_test_using_vhost_async_driver(self):
         """
-        Test Case3: VM2VM forwarding test with two CBDMA channels
+        Test Case3: VM2VM virtio-user forwarding test using vhost async driver
         """
         perf_result = []
-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
-
-        self.logger.info("Launch vhost app perf test")
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+        dmas_info = "txd0@%s,rxd0@%s,txd1@%s,rxd1@%s" % (
+            self.cbdma_list[0],
+            self.cbdma_list[1],
+            self.cbdma_list[2],
+            self.cbdma_list[3],
+        )
         self.start_vhost_app(
-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True
+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
         )
-        self.mrg_rxbuf = 1
-        self.in_order = 0
-        self.vectorized = 0
-        self.packed_vq = 1
-        self.server = 1
-        self.start_virtio_testpmd(
-            pmd_session=self.virtio_user0_pmd,
-            dev_mac=self.virtio_dst_mac0,
-            dev_id=0,
-            cores=self.vuser0_core_list,
-            prefix="testpmd0",
-            nb_cores=1,
-            used_queues=1,
+        virtio0_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=1,server=1,mrg_rxbuf=1,in_order=0,packed_vq=1"
+        virtio0_param = "--rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1"
+        self.start_virtio_testpmd_with_vhost_net0(
+            eal_param=virtio0_eal_param, param=virtio0_param
         )
-        self.mrg_rxbuf = 1
-        self.in_order = 1
-        self.vectorized = 1
-        self.packed_vq = 0
-        self.server = 1
-        self.start_virtio_testpmd(
-            pmd_session=self.virtio_user1_pmd,
-            dev_mac=self.virtio_dst_mac1,
-            dev_id=1,
-            cores=self.vuser1_core_list,
-            prefix="testpmd1",
-            nb_cores=1,
-            used_queues=1,
+
+        virtio1_eal_param = "--vdev=net_virtio_user0,mac=00:11:22:33:44:11,path=./vhost-net1,queues=1,server=1,mrg_rxbuf=1,in_order=1,vectorized=1"
+        virtio1_param = "--rxq=1 --txq=1 --txd=1024 --rxd=1024 --nb-cores=1"
+        self.start_virtio_testpmd_with_vhost_net1(
+            eal_param=virtio1_eal_param, param=virtio1_param
         )
         before_relunch_result = self.vm2vm_check_with_two_cbdma()
 
-        self.logger.info("Relaunch vhost app perf test")
         self.vhost_user.send_expect("^C", "# ", 20)
+        dmas_info = "txd0@%s,rxd1@%s" % (self.cbdma_list[0], self.cbdma_list[1])
         self.start_vhost_app(
-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True
+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
         )
-        after_relunch_result = self.vm2vm_check_with_two_cbdma(relaunch=True)
+        self.virtio_user0_pmd.execute_cmd("stop")
+        after_relunch_result = self.vm2vm_check_with_two_cbdma()
 
-        self.table_header = ["Frame Size(Byte)", "Mode", "Throughput(Mpps)"]
-        self.result_table_create(self.table_header)
         for key in before_relunch_result.keys():
             perf_result.append(
                 [key, "Before Re-launch vhost", before_relunch_result[key]]
@@ -732,36 +575,21 @@  class TestVswitchSampleCBDMA(TestCase):
         for table_row in perf_result:
             self.result_table_add(table_row)
         self.result_table_print()
-        for key in before_relunch_result.keys():
-            self.verify(
-                before_relunch_result[key] > 0.1,
-                "The perf test result is lower than 0.1 Mpps",
-            )
-        for key in after_relunch_result.keys():
-            self.verify(
-                after_relunch_result[key] > 0.1,
-                "The perf test result is lower than 0.1 Mpps",
-            )
 
     def vm2vm_check_with_two_vhost_device(self):
         rx_throughput = {}
         self.frame_sizes = [64, 2000, 8000, "imix"]
         for frame_size in self.frame_sizes:
             self.send_pkts_from_testpmd1(pmd_session=self.vm1_pmd, pkt_len=frame_size)
-            # Create test results table
-            table_header = ["Frame Size(Byte)", "Throughput(Mpps)"]
-            self.result_table_create(table_header)
             rx_pps = self.get_receive_throughput(pmd_session=self.vm1_pmd)
-            self.result_table_add([frame_size, rx_pps])
             rx_throughput[frame_size] = rx_pps
-            self.result_table_print()
         return rx_throughput
 
-    def start_vms_testpmd_and_test(self, need_start_vm=True):
+    def start_vms_testpmd_and_test(self, need_start_vm=True, packed=False):
         if need_start_vm:
             self.start_vms(
                 mergeable=True,
-                packed=False,
+                packed=packed,
                 server_mode=True,
                 set_target=True,
                 bind_dev=True,
@@ -778,42 +606,46 @@  class TestVswitchSampleCBDMA(TestCase):
         self.vm1_pmd.quit()
         return perf_result
 
-    def test_vm2vm_test_with_cbdma_channels_register_or_unregister_stable_check(self):
+    def test_vm2vm_virtio_pmd_split_ring_test_with_cbdma_channels_register_and_unregister_stable_check(
+        self,
+    ):
         """
-        Test Case4: VM2VM test with cbdma channels register/unregister stable check
+        Test Case 4: VM2VM virtio-pmd split ring test with cbdma channels register/unregister stable check
         """
         perf_result = []
-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
-
-        self.logger.info("Before rebind VM Driver perf test")
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+        dmas_info = "txd0@%s,rxd0@%s,txd1@%s,rxd1@%s" % (
+            self.cbdma_list[0],
+            self.cbdma_list[1],
+            self.cbdma_list[2],
+            self.cbdma_list[3],
+        )
         self.start_vhost_app(
-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True
+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
+        )
+        before_rebind = self.start_vms_testpmd_and_test(
+            need_start_vm=True, packed=False
         )
-        before_rebind = self.start_vms_testpmd_and_test(need_start_vm=True)
 
-        self.logger.info("After rebind VM Driver perf test")
         # repeat bind 50 time from virtio-pci to vfio-pci
         self.repeat_bind_driver(dut=self.vm_dut[0], repeat_times=50)
         self.repeat_bind_driver(dut=self.vm_dut[1], repeat_times=50)
+
         self.vhost_user.send_expect("^C", "# ", 20)
+        dmas_info = "txd0@%s,rxd1@%s" % (self.cbdma_list[0], self.cbdma_list[3])
         self.start_vhost_app(
-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True
+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
+        )
+        after_rebind = self.start_vms_testpmd_and_test(
+            need_start_vm=False, packed=False
         )
-        after_bind = self.start_vms_testpmd_and_test(need_start_vm=False)
-        # repeat bind 50 time from virtio-pci to vfio-pci
-        self.repeat_bind_driver(dut=self.vm_dut[0], repeat_times=50)
-        self.repeat_bind_driver(dut=self.vm_dut[1], repeat_times=50)
 
-        self.table_header = [
-            "Frame Size(Byte)",
-            "Before/After Bind VM Driver",
-            "Throughput(Mpps)",
-        ]
-        self.result_table_create(self.table_header)
         for key in before_rebind.keys():
             perf_result.append([key, "Before rebind driver", before_rebind[key]])
-        for key in after_bind.keys():
-            perf_result.append([key, "After rebind driver", after_bind[key]])
+
+        for key in after_rebind.keys():
+            perf_result.append([key, "After rebind driver", after_rebind[key]])
+
         for table_row in perf_result:
             self.result_table_add(table_row)
         self.result_table_print()
@@ -851,8 +683,6 @@  class TestVswitchSampleCBDMA(TestCase):
         """
         get the iperf test result
         """
-        self.table_header = ["Mode", "[M|G]bits/sec"]
-        self.result_table_create(self.table_header)
         self.vm_dut[0].send_expect("pkill iperf", "# ")
         self.vm_dut[1].session.copy_file_from("%s/iperf_client.log" % self.dut.base_dir)
         fp = open("./iperf_client.log")
@@ -871,13 +701,6 @@  class TestVswitchSampleCBDMA(TestCase):
             and float(iperfdata[-1].split()[0]) >= 1,
             "the throughput must be above 1Gbits/sec",
         )
-
-        # put the result to table
-        results_row = ["vm2vm", iperfdata[-1]]
-        self.result_table_add(results_row)
-
-        # print iperf resut
-        self.result_table_print()
         # rm the iperf log file in vm
         self.vm_dut[0].send_expect("rm iperf_server.log", "#", 10)
         self.vm_dut[1].send_expect("rm iperf_client.log", "#", 10)
@@ -889,7 +712,7 @@  class TestVswitchSampleCBDMA(TestCase):
         """
         # default file_size=1024K
         data = ""
-        for char in range(file_size * 1024):
+        for _ in range(file_size * 1024):
             data += random.choice(self.random_string)
         self.vm_dut[0].send_expect('echo "%s" > /tmp/payload' % data, "# ")
         # scp this file to vm1
@@ -927,74 +750,144 @@  class TestVswitchSampleCBDMA(TestCase):
         iperfdata = self.get_iperf_result()
         return iperfdata
 
-    def test_vm2vm_split_ring_test_with_iperf_and_reconnect_stable_check(self):
+    def test_vm2vm_virtio_pmd_packed_ring_test_with_cbdma_channels_register_unregister_stable_check(
+        self,
+    ):
         """
-        Test Case5: VM2VM split ring test with iperf and reconnect stable check
+        Test Case 5: VM2VM virtio-pmd packed ring test with cbdma channels register/unregister stable check
         """
         perf_result = []
-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+        dmas_info = "txd0@%s,rxd0@%s,txd1@%s,rxd1@%s" % (
+            self.cbdma_list[0],
+            self.cbdma_list[1],
+            self.cbdma_list[2],
+            self.cbdma_list[3],
+        )
+        self.start_vhost_app(
+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
+        )
+        before_rebind = self.start_vms_testpmd_and_test(need_start_vm=True, packed=True)
+
+        # repeat bind 50 time from virtio-pci to vfio-pci
+        self.repeat_bind_driver(dut=self.vm_dut[0], repeat_times=50)
+        self.repeat_bind_driver(dut=self.vm_dut[1], repeat_times=50)
+
+        after_rebind = self.start_vms_testpmd_and_test(need_start_vm=False, packed=True)
 
-        self.logger.info("launch vhost")
+        for key in before_rebind.keys():
+            perf_result.append([key, "Before rebind driver", before_rebind[key]])
+
+        for key in after_rebind.keys():
+            perf_result.append([key, "After rebind driver", after_rebind[key]])
+
+        for table_row in perf_result:
+            self.result_table_add(table_row)
+        self.result_table_print()
+
+    def test_vm2vm_virtio_net_split_ring_test_with_4_cbdma_channels_and_iperf_stable_check(
+        self,
+    ):
+        """
+        Test Case 6: VM2VM virtio-net split ring test with 4 cbdma channels and iperf stable check
+        """
+        perf_result = []
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+        dmas_info = "txd0@%s,rxd0@%s,txd1@%s,rxd1@%s" % (
+            self.cbdma_list[0],
+            self.cbdma_list[1],
+            self.cbdma_list[2],
+            self.cbdma_list[3],
+        )
         self.start_vhost_app(
-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True
+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
         )
-        before_rerun = self.start_iperf_and_scp_test_in_vms(
+        before_relaunch = self.start_iperf_and_scp_test_in_vms(
             need_start_vm=True, mergeable=False, packed=False, server_mode=True
         )
+        perf_result.append(["split ring", "Before relaunch test", before_relaunch])
 
-        self.logger.info("relaunch vhost")
         self.vhost_user.send_expect("^C", "# ", 20)
         self.start_vhost_app(
-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=True
+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
         )
-        self.logger.info("rerun scp and iperf test")
-        rerun_test_1 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
-        rerun_test_2 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
-        rerun_test_3 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
-        rerun_test_4 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
-        rerun_test_5 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
-
-        self.table_header = ["Path", "Before/After rerun scp/iperf", "Throughput(Mpps)"]
-        self.result_table_create(self.table_header)
-        perf_result.append(["split ring", "Before rerun", before_rerun])
-        perf_result.append(["split ring", "rerun test 1", rerun_test_1])
-        perf_result.append(["split ring", "rerun test 2", rerun_test_2])
-        perf_result.append(["split ring", "rerun test 3", rerun_test_3])
-        perf_result.append(["split ring", "rerun test 4", rerun_test_4])
-        perf_result.append(["split ring", "rerun test 5", rerun_test_5])
+
+        for _ in range(5):
+            rerun_result = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
+            perf_result.append(["split ring", "After  rerun test", rerun_result])
+
+        self.vhost_user.send_expect("^C", "# ", 20)
+        dmas_info = "txd0@%s,rxd1@%s" % (self.cbdma_list[0], self.cbdma_list[1])
+        self.start_vhost_app(
+            cbdma_num=2, socket_num=2, dmas_info=dmas_info, client_mode=True
+        )
+
+        after_relaunch = self.start_iperf_and_scp_test_in_vms(
+            need_start_vm=False, mergeable=False, packed=False, server_mode=True
+        )
+
+        perf_result.append(["split ring", "After relaunch test", after_relaunch])
+
         for table_row in perf_result:
             self.result_table_add(table_row)
         self.result_table_print()
 
-    def test_vm2vm_packed_ring_test_with_iperf_and_reconnect_stable_check(self):
+    def test_vm2vm_virtio_net_packed_ring_test_with_4_cbdma_channels_and_iperf_stable_check(
+        self,
+    ):
         """
-        Test Case6: VM2VM packed ring test with iperf and reconnect stable test
+        Test Case 7: VM2VM virtio-net packed ring test with 4 cbdma channels and iperf stable check
         """
         perf_result = []
-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=4)
+        dmas_info = "txd0@%s,rxd0@%s,txd1@%s,rxd1@%s" % (
+            self.cbdma_list[0],
+            self.cbdma_list[1],
+            self.cbdma_list[2],
+            self.cbdma_list[3],
+        )
+        self.start_vhost_app(
+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
+        )
+        before_relaunch = self.start_iperf_and_scp_test_in_vms(
+            need_start_vm=True, mergeable=False, packed=True, server_mode=True
+        )
+
+        perf_result.append(["packed ring", "Before rerun test", before_relaunch])
 
+        self.vhost_user.send_expect("^C", "# ", 20)
         self.start_vhost_app(
-            with_cbdma=True, cbdma_num=2, socket_num=2, client_mode=False
+            cbdma_num=4, socket_num=2, dmas_info=dmas_info, client_mode=True
         )
-        before_rerun = self.start_iperf_and_scp_test_in_vms(
+        for _ in range(5):
+            rerun_result = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
+            perf_result.append(["packed ring", "After  rerun test", rerun_result])
+
+        for table_row in perf_result:
+            self.result_table_add(table_row)
+        self.result_table_print()
+
+    def test_vm2vm_virtio_net_packed_ring_test_with_2_cbdma_channels_and_iperf_stable_check(
+        self,
+    ):
+        """
+        Test Case 8: VM2VM virtio-net packed ring test with 2 cbdma channels and iperf stable check
+        """
+        perf_result = []
+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)
+        dmas_info = "txd0@%s,rxd1@%s" % (self.cbdma_list[0], self.cbdma_list[1])
+        self.start_vhost_app(
+            cbdma_num=2, socket_num=2, dmas_info=dmas_info, client_mode=False
+        )
+        before_relaunch = self.start_iperf_and_scp_test_in_vms(
             need_start_vm=True, mergeable=False, packed=True, server_mode=False
         )
+        perf_result.append(["packed ring", "Before rerun test", before_relaunch])
 
-        self.logger.info("rerun scp and iperf test")
-        rerun_test_1 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
-        rerun_test_2 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
-        rerun_test_3 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
-        rerun_test_4 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
-        rerun_test_5 = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
+        for _ in range(5):
+            rerun_result = self.start_iperf_and_scp_test_in_vms(need_start_vm=False)
+            perf_result.append(["packed ring", "After  rerun test", rerun_result])
 
-        self.table_header = ["Path", "Before/After rerun scp/iperf", "Throughput(Mpps)"]
-        self.result_table_create(self.table_header)
-        perf_result.append(["packed ring", "Before rerun test", before_rerun])
-        perf_result.append(["packed ring", "rerun test 1", rerun_test_1])
-        perf_result.append(["packed ring", "rerun test 2", rerun_test_2])
-        perf_result.append(["packed ring", "rerun test 3", rerun_test_3])
-        perf_result.append(["packed ring", "rerun test 4", rerun_test_4])
-        perf_result.append(["packed ring", "rerun test 5", rerun_test_5])
         for table_row in perf_result:
             self.result_table_add(table_row)
         self.result_table_print()
@@ -1011,10 +904,10 @@  class TestVswitchSampleCBDMA(TestCase):
         """
         Run after each test case.
         """
-        self.dut.kill_all()
         for i in range(len(self.vm)):
             self.vm[i].stop()
         self.vhost_user.send_expect("^C", "# ", 20)
+        self.dut.kill_all()
         self.bind_cbdma_device_to_kernel()
 
     def tear_down_all(self):