get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/109174/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 109174,
    "url": "http://patchwork.dpdk.org/api/patches/109174/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dts/patch/20220406082126.25324-1-weix.ling@intel.com/",
    "project": {
        "id": 3,
        "url": "http://patchwork.dpdk.org/api/projects/3/?format=api",
        "name": "DTS",
        "link_name": "dts",
        "list_id": "dts.dpdk.org",
        "list_email": "dts@dpdk.org",
        "web_url": "",
        "scm_url": "git://dpdk.org/tools/dts",
        "webscm_url": "http://git.dpdk.org/tools/dts/",
        "list_archive_url": "https://inbox.dpdk.org/dts",
        "list_archive_url_format": "https://inbox.dpdk.org/dts/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220406082126.25324-1-weix.ling@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dts/20220406082126.25324-1-weix.ling@intel.com",
    "date": "2022-04-06T08:21:26",
    "name": "[V1,2/5] tests/vm2vm_virtio_net_perf: delete CBDMA test case",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "85084ebaa079e7678e945120a9a5784cd56fd9d2",
    "submitter": {
        "id": 1828,
        "url": "http://patchwork.dpdk.org/api/people/1828/?format=api",
        "name": "Ling, WeiX",
        "email": "weix.ling@intel.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.dpdk.org/project/dts/patch/20220406082126.25324-1-weix.ling@intel.com/mbox/",
    "series": [
        {
            "id": 22361,
            "url": "http://patchwork.dpdk.org/api/series/22361/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dts/list/?series=22361",
            "date": "2022-04-06T08:20:59",
            "name": "migrate cbdma case in new testsuite",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/22361/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/109174/comments/",
    "check": "pending",
    "checks": "http://patchwork.dpdk.org/api/patches/109174/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dts-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 15CCBA0509;\n\tWed,  6 Apr 2022 10:21:35 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 106794284B;\n\tWed,  6 Apr 2022 10:21:35 +0200 (CEST)",
            "from mga07.intel.com (mga07.intel.com [134.134.136.100])\n by mails.dpdk.org (Postfix) with ESMTP id 447BA40689\n for <dts@dpdk.org>; Wed,  6 Apr 2022 10:21:33 +0200 (CEST)",
            "from fmsmga001.fm.intel.com ([10.253.24.23])\n by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 06 Apr 2022 01:21:32 -0700",
            "from unknown (HELO localhost.localdomain) ([10.239.251.222])\n by fmsmga001-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 06 Apr 2022 01:21:30 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1649233293; x=1680769293;\n h=from:to:cc:subject:date:message-id:mime-version:\n content-transfer-encoding;\n bh=pKlsJEGktI3u+xVnxt2BNiwtAzhITJR5BF641VSJJq8=;\n b=iIkOYnMCnS7hB9OcwOjJGQKafyRYjOkKTUKBi9q1tJCTaIaZeAInNblk\n 3MkJjyVzj/HwwoZrlvYbCcKcT4fX3FweCOfKv6Hw898y6ZhfuosiF3lPB\n aKqoNDUS2+opFO1Ail26zqVuXRnxZYmg3oY8/UIvzb3K13yxfzuyanrnd\n X1cIlM/NSm/f9oFyvy4JFWyVCR7ERq19SqYX/tb5jvx78msq0E6ubt3He\n nlE3Cjh8SEuZbt3X9h+gLVRwGDJABAQZ+qhBdW9ll/VmwOxYD83z+9x79\n +mWIGB0v9RU+AhOJ7blngC0UyulcRwf+hxNbtn2JpEZIuz/JDSD+LnyO8 g==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,10308\"; a=\"324155295\"",
            "E=Sophos;i=\"5.90,239,1643702400\"; d=\"scan'208\";a=\"324155295\"",
            "E=Sophos;i=\"5.90,239,1643702400\"; d=\"scan'208\";a=\"697282900\""
        ],
        "From": "Wei Ling <weix.ling@intel.com>",
        "To": "dts@dpdk.org",
        "Cc": "Wei Ling <weix.ling@intel.com>",
        "Subject": "[dts][PATCH V1 2/5] tests/vm2vm_virtio_net_perf: delete CBDMA test\n case",
        "Date": "Wed,  6 Apr 2022 16:21:26 +0800",
        "Message-Id": "<20220406082126.25324-1-weix.ling@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dts@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "test suite reviews and discussions <dts.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dts>,\n <mailto:dts-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dts/>",
        "List-Post": "<mailto:dts@dpdk.org>",
        "List-Help": "<mailto:dts-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dts>,\n <mailto:dts-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dts-bounces@dpdk.org"
    },
    "content": "As commit 53d3f4778c(vhost: integrate dmadev in asynchronous data-path),\ndelete cbdma related cases form tests/vm2vm_virtio_net_perf.\n\nSigned-off-by: Wei Ling <weix.ling@intel.com>\n---\n tests/TestSuite_vm2vm_virtio_net_perf.py | 793 ++---------------------\n 1 file changed, 44 insertions(+), 749 deletions(-)",
    "diff": "diff --git a/tests/TestSuite_vm2vm_virtio_net_perf.py b/tests/TestSuite_vm2vm_virtio_net_perf.py\nindex 486f1acf..8c234c24 100644\n--- a/tests/TestSuite_vm2vm_virtio_net_perf.py\n+++ b/tests/TestSuite_vm2vm_virtio_net_perf.py\n@@ -38,7 +38,6 @@ vm2vm split ring and packed ring vhost-user/virtio-net check the payload of larg\n mergeable and non-mergeable dequeue zero copy.\n please use qemu version greater 4.1.94 which support packed feathur to test this suite.\n \"\"\"\n-import random\n import re\n import string\n import time\n@@ -71,12 +70,6 @@ class TestVM2VMVirtioNetPerf(TestCase):\n         self.vhost = self.dut.new_session(suite=\"vhost\")\n         self.pmd_vhost = PmdOutput(self.dut, self.vhost)\n         self.app_testpmd_path = self.dut.apps_name[\"test-pmd\"]\n-        # get cbdma device\n-        self.cbdma_dev_infos = []\n-        self.dmas_info = None\n-        self.device_str = None\n-        self.checked_vm = False\n-        self.dut.restore_interfaces()\n \n     def set_up(self):\n         \"\"\"\n@@ -86,158 +79,29 @@ class TestVM2VMVirtioNetPerf(TestCase):\n         self.vm_dut = []\n         self.vm = []\n \n-    def get_cbdma_ports_info_and_bind_to_dpdk(\n-        self, cbdma_num=2, allow_diff_socket=False\n-    ):\n-        \"\"\"\n-        get all cbdma ports\n-        \"\"\"\n-        out = self.dut.send_expect(\n-            \"./usertools/dpdk-devbind.py --status-dev dma\", \"# \", 30\n-        )\n-        device_info = out.split(\"\\n\")\n-        for device in device_info:\n-            pci_info = re.search(\"\\s*(0000:\\S*:\\d*.\\d*)\", device)\n-            if pci_info is not None:\n-                dev_info = pci_info.group(1)\n-                # the numa id of ioat dev, only add the device which on same socket with nic dev\n-                bus = int(dev_info[5:7], base=16)\n-                if bus >= 128:\n-                    cur_socket = 1\n-                else:\n-                    cur_socket = 0\n-                if allow_diff_socket:\n-                    self.cbdma_dev_infos.append(pci_info.group(1))\n-                else:\n-                    if self.ports_socket == cur_socket:\n-                        self.cbdma_dev_infos.append(pci_info.group(1))\n-        self.verify(\n-            len(self.cbdma_dev_infos) >= cbdma_num,\n-            \"There no enough cbdma device to run this suite\",\n-        )\n-        used_cbdma = self.cbdma_dev_infos[0:cbdma_num]\n-        dmas_info = \"\"\n-        for dmas in used_cbdma[0 : int(cbdma_num / 2)]:\n-            number = used_cbdma[0 : int(cbdma_num / 2)].index(dmas)\n-            dmas = \"txq{}@{},\".format(number, dmas)\n-            dmas_info += dmas\n-        for dmas in used_cbdma[int(cbdma_num / 2) :]:\n-            number = used_cbdma[int(cbdma_num / 2) :].index(dmas)\n-            dmas = \"txq{}@{},\".format(number, dmas)\n-            dmas_info += dmas\n-        self.dmas_info = dmas_info[:-1]\n-        self.device_str = \" \".join(used_cbdma)\n-        self.dut.send_expect(\n-            \"./usertools/dpdk-devbind.py --force --bind=%s %s\"\n-            % (self.drivername, self.device_str),\n-            \"# \",\n-            60,\n-        )\n-\n-    def bind_cbdma_device_to_kernel(self):\n-        if self.device_str is not None:\n-            self.dut.send_expect(\"modprobe ioatdma\", \"# \")\n-            self.dut.send_expect(\n-                \"./usertools/dpdk-devbind.py -u %s\" % self.device_str, \"# \", 30\n-            )\n-            self.dut.send_expect(\n-                \"./usertools/dpdk-devbind.py --force --bind=ioatdma  %s\"\n-                % self.device_str,\n-                \"# \",\n-                60,\n-            )\n-\n-    @property\n-    def check_2m_env(self):\n-        out = self.dut.send_expect(\n-            \"cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'\", \"# \"\n-        )\n-        return True if out == \"2048\" else False\n-\n     def start_vhost_testpmd(\n         self,\n-        cbdma=False,\n         no_pci=True,\n-        client_mode=False,\n-        enable_queues=1,\n-        nb_cores=2,\n-        rxq_txq=None,\n-        exchange_cbdma=False,\n-        iova_mode=\"\",\n     ):\n         \"\"\"\n         launch the testpmd with different parameters\n         \"\"\"\n-        if cbdma is True:\n-            dmas_info_list = self.dmas_info.split(\",\")\n-            cbdma_arg_0_list = []\n-            cbdma_arg_1_list = []\n-            for item in dmas_info_list:\n-                if dmas_info_list.index(item) < int(len(dmas_info_list) / 2):\n-                    cbdma_arg_0_list.append(item)\n-                else:\n-                    cbdma_arg_1_list.append(item)\n-            cbdma_arg_0 = \",dmas=[{}]\".format(\";\".join(cbdma_arg_0_list))\n-            cbdma_arg_1 = \",dmas=[{}]\".format(\";\".join(cbdma_arg_1_list))\n-        else:\n-            cbdma_arg_0 = \"\"\n-            cbdma_arg_1 = \"\"\n         testcmd = self.app_testpmd_path + \" \"\n-        if not client_mode:\n-            vdev1 = \"--vdev 'net_vhost0,iface=%s/vhost-net0,queues=%d%s' \" % (\n-                self.base_dir,\n-                enable_queues,\n-                cbdma_arg_0,\n-            )\n-            vdev2 = \"--vdev 'net_vhost1,iface=%s/vhost-net1,queues=%d%s' \" % (\n-                self.base_dir,\n-                enable_queues,\n-                cbdma_arg_1,\n-            )\n-        else:\n-            vdev1 = \"--vdev 'net_vhost0,iface=%s/vhost-net0,client=1,queues=%d%s' \" % (\n-                self.base_dir,\n-                enable_queues,\n-                cbdma_arg_0,\n-            )\n-            vdev2 = \"--vdev 'net_vhost1,iface=%s/vhost-net1,client=1,queues=%d%s' \" % (\n-                self.base_dir,\n-                enable_queues,\n-                cbdma_arg_1,\n-            )\n-        if exchange_cbdma:\n-            vdev1 = \"--vdev 'net_vhost0,iface=%s/vhost-net0,client=1,queues=%d%s' \" % (\n-                self.base_dir,\n-                enable_queues,\n-                cbdma_arg_1,\n-            )\n-            vdev2 = \"--vdev 'net_vhost1,iface=%s/vhost-net1,client=1,queues=%d%s' \" % (\n-                self.base_dir,\n-                enable_queues,\n-                cbdma_arg_0,\n-            )\n-\n+        vdev1 = \"--vdev 'net_vhost0,iface=%s/vhost-net0,queues=1' \" % (\n+            self.base_dir\n+        )\n+        vdev2 = \"--vdev 'net_vhost1,iface=%s/vhost-net1,queues=1' \" % (\n+            self.base_dir\n+        )\n         eal_params = self.dut.create_eal_parameters(\n             cores=self.cores_list, prefix=\"vhost\", no_pci=no_pci\n         )\n-        if rxq_txq is None:\n-            params = \" -- -i --nb-cores=%d --txd=1024 --rxd=1024\" % nb_cores\n-        else:\n-            params = \" -- -i --nb-cores=%d --txd=1024 --rxd=1024 --rxq=%d --txq=%d\" % (\n-                nb_cores,\n-                rxq_txq,\n-                rxq_txq,\n-            )\n-        if iova_mode:\n-            iova_parm = \" --iova=\" + iova_mode\n-        else:\n-            iova_parm = \"\"\n-        self.command_line = testcmd + eal_params + vdev1 + vdev2 + iova_parm + params\n+        params = \" -- -i --nb-cores=2 --txd=1024 --rxd=1024\"\n+        self.command_line = testcmd + eal_params + vdev1 + vdev2 + params\n         self.pmd_vhost.execute_cmd(self.command_line, timeout=30)\n-        self.pmd_vhost.execute_cmd(\"vhost enable tx all\", timeout=30)\n         self.pmd_vhost.execute_cmd(\"start\", timeout=30)\n \n-    def start_vms(self, server_mode=False, opt_queue=None, vm_config=\"vhost_sample\"):\n+    def start_vms(self, vm_config=\"vhost_sample\"):\n         \"\"\"\n         start two VM, each VM has one virtio device\n         \"\"\"\n@@ -246,12 +110,7 @@ class TestVM2VMVirtioNetPerf(TestCase):\n             vm_info = VM(self.dut, \"vm%d\" % i, vm_config)\n             vm_params = {}\n             vm_params[\"driver\"] = \"vhost-user\"\n-            if not server_mode:\n-                vm_params[\"opt_path\"] = self.base_dir + \"/vhost-net%d\" % i\n-            else:\n-                vm_params[\"opt_path\"] = self.base_dir + \"/vhost-net%d\" % i + \",server\"\n-            if opt_queue is not None:\n-                vm_params[\"opt_queue\"] = opt_queue\n+            vm_params[\"opt_path\"] = self.base_dir + \"/vhost-net%d\" % i\n             vm_params[\"opt_mac\"] = \"52:54:00:00:00:0%d\" % (i + 1)\n             vm_params[\"opt_settings\"] = self.vm_args\n             vm_info.set_vm_device(**vm_params)\n@@ -265,23 +124,15 @@ class TestVM2VMVirtioNetPerf(TestCase):\n             self.vm_dut.append(vm_dut)\n             self.vm.append(vm_info)\n \n-    def config_vm_env(self, combined=False, rxq_txq=1):\n+    def config_vm_env(self):\n         \"\"\"\n         set virtio device IP and run arp protocal\n         \"\"\"\n         vm1_intf = self.vm_dut[0].ports_info[0][\"intf\"]\n         vm2_intf = self.vm_dut[1].ports_info[0][\"intf\"]\n-        if combined:\n-            self.vm_dut[0].send_expect(\n-                \"ethtool -L %s combined %d\" % (vm1_intf, rxq_txq), \"#\", 10\n-            )\n         self.vm_dut[0].send_expect(\n             \"ifconfig %s %s\" % (vm1_intf, self.virtio_ip1), \"#\", 10\n         )\n-        if combined:\n-            self.vm_dut[1].send_expect(\n-                \"ethtool -L %s combined %d\" % (vm2_intf, rxq_txq), \"#\", 10\n-            )\n         self.vm_dut[1].send_expect(\n             \"ifconfig %s %s\" % (vm2_intf, self.virtio_ip2), \"#\", 10\n         )\n@@ -292,87 +143,22 @@ class TestVM2VMVirtioNetPerf(TestCase):\n             \"arp -s %s %s\" % (self.virtio_ip1, self.virtio_mac1), \"#\", 10\n         )\n \n-    def prepare_test_env(\n-        self,\n-        cbdma=False,\n-        no_pci=True,\n-        client_mode=False,\n-        enable_queues=1,\n-        nb_cores=2,\n-        server_mode=False,\n-        opt_queue=None,\n-        combined=False,\n-        rxq_txq=None,\n-        iova_mode=\"\",\n-    ):\n-        \"\"\"\n-        start vhost testpmd and qemu, and config the vm env\n-        \"\"\"\n-        self.start_vhost_testpmd(\n-            cbdma=cbdma,\n-            no_pci=no_pci,\n-            client_mode=client_mode,\n-            enable_queues=enable_queues,\n-            nb_cores=nb_cores,\n-            rxq_txq=rxq_txq,\n-            iova_mode=iova_mode,\n-        )\n-        self.start_vms(server_mode=server_mode, opt_queue=opt_queue)\n-        self.config_vm_env(combined=combined, rxq_txq=rxq_txq)\n-\n     def start_iperf(self, iperf_mode=\"tso\"):\n         \"\"\"\n         run perf command between to vms\n         \"\"\"\n         # clear the port xstats before iperf\n         self.vhost.send_expect(\"clear port xstats all\", \"testpmd> \", 10)\n-\n-        # add -f g param, use Gbits/sec report teste result\n         if iperf_mode == \"tso\":\n-            iperf_server = \"iperf -s -i 1\"\n-            iperf_client = \"iperf -c 1.1.1.2 -i 1 -t 60\"\n+            server = \"iperf -s -i 1\"\n+            client = \"iperf -c 1.1.1.2 -i 1 -t 60\"\n         elif iperf_mode == \"ufo\":\n-            iperf_server = \"iperf -s -u -i 1\"\n-            iperf_client = \"iperf -c 1.1.1.2 -i 1 -t 30 -P 4 -u -b 1G -l 9000\"\n-        self.vm_dut[0].send_expect(\"%s > iperf_server.log &\" % iperf_server, \"\", 10)\n-        self.vm_dut[1].send_expect(\"%s > iperf_client.log &\" % iperf_client, \"\", 60)\n+            server = \"iperf -s -u -i 1\"\n+            client = \"iperf -c 1.1.1.2 -i 1 -t 60 -P 4 -u -b 1G -l 9000\"\n+        self.vm_dut[0].send_expect(\"%s > iperf_server.log &\" % server, \"\", 10)\n+        self.vm_dut[1].send_expect(\"%s > iperf_client.log &\" % client, \"\", 10)\n         time.sleep(90)\n \n-    def get_perf_result(self):\n-        \"\"\"\n-        get the iperf test result\n-        \"\"\"\n-        self.table_header = [\"Mode\", \"[M|G]bits/sec\"]\n-        self.result_table_create(self.table_header)\n-        self.vm_dut[0].send_expect(\"pkill iperf\", \"# \")\n-        self.vm_dut[1].session.copy_file_from(\"%s/iperf_client.log\" % self.dut.base_dir)\n-        fp = open(\"./iperf_client.log\")\n-        fmsg = fp.read()\n-        fp.close()\n-        # remove the server report info from msg\n-        index = fmsg.find(\"Server Report\")\n-        if index != -1:\n-            fmsg = fmsg[:index]\n-        iperfdata = re.compile(\"\\S*\\s*[M|G]bits/sec\").findall(fmsg)\n-        # the last data of iperf is the ave data from 0-30 sec\n-        self.verify(len(iperfdata) != 0, \"The iperf data between to vms is 0\")\n-        self.verify(\n-            (iperfdata[-1]).split()[1] == \"Gbits/sec\",\n-            \"The iperf data is %s,Can't reach Gbits/sec\" % iperfdata[-1],\n-        )\n-        self.logger.info(\"The iperf data between vms is %s\" % iperfdata[-1])\n-\n-        # put the result to table\n-        results_row = [\"vm2vm\", iperfdata[-1]]\n-        self.result_table_add(results_row)\n-\n-        # print iperf resut\n-        self.result_table_print()\n-        # rm the iperf log file in vm\n-        self.vm_dut[0].send_expect(\"rm iperf_server.log\", \"#\", 10)\n-        self.vm_dut[1].send_expect(\"rm iperf_client.log\", \"#\", 10)\n-        return float(iperfdata[-1].split()[0])\n-\n     def verify_xstats_info_on_vhost(self):\n         \"\"\"\n         check both 2VMs can receive and send big packets to each other\n@@ -390,16 +176,6 @@ class TestVM2VMVirtioNetPerf(TestCase):\n             int(tx_info.group(1)) > 0, \"Port 0 not forward packet greater than 1522\"\n         )\n \n-    def start_iperf_and_verify_vhost_xstats_info(self, iperf_mode=\"tso\"):\n-        \"\"\"\n-        start to send packets and verify vm can received data of iperf\n-        and verify the vhost can received big pkts in testpmd\n-        \"\"\"\n-        self.start_iperf(iperf_mode)\n-        iperfdata = self.get_perf_result()\n-        self.verify_xstats_info_on_vhost()\n-        return iperfdata\n-\n     def stop_all_apps(self):\n         for i in range(len(self.vm)):\n             self.vm[i].stop()\n@@ -434,557 +210,76 @@ class TestVM2VMVirtioNetPerf(TestCase):\n             \"tx-tcp6-segmentation in vm not right\",\n         )\n \n-    def check_scp_file_valid_between_vms(self, file_size=1024):\n-        \"\"\"\n-        scp file form VM1 to VM2, check the data is valid\n-        \"\"\"\n-        # default file_size=1024K\n-        data = \"\"\n-        for char in range(file_size * 1024):\n-            data += random.choice(self.random_string)\n-        self.vm_dut[0].send_expect('echo \"%s\" > /tmp/payload' % data, \"# \")\n-        # scp this file to vm1\n-        out = self.vm_dut[1].send_command(\n-            \"scp root@%s:/tmp/payload /root\" % self.virtio_ip1, timeout=5\n-        )\n-        if \"Are you sure you want to continue connecting\" in out:\n-            self.vm_dut[1].send_command(\"yes\", timeout=3)\n-        self.vm_dut[1].send_command(self.vm[0].password, timeout=3)\n-        # get the file info in vm1, and check it valid\n-        md5_send = self.vm_dut[0].send_expect(\"md5sum /tmp/payload\", \"# \")\n-        md5_revd = self.vm_dut[1].send_expect(\"md5sum /root/payload\", \"# \")\n-        md5_send = md5_send[: md5_send.find(\" \")]\n-        md5_revd = md5_revd[: md5_revd.find(\" \")]\n-        self.verify(\n-            md5_send == md5_revd, \"the received file is different with send file\"\n-        )\n-\n     def test_vm2vm_split_ring_iperf_with_tso(self):\n         \"\"\"\n         TestCase1: VM2VM split ring vhost-user/virtio-net test with tcp traffic\n         \"\"\"\n         self.vm_args = \"disable-modern=false,mrg_rxbuf=off,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on\"\n-        self.prepare_test_env(\n-            cbdma=False,\n-            no_pci=True,\n-            client_mode=False,\n-            enable_queues=1,\n-            nb_cores=2,\n-            server_mode=False,\n-            opt_queue=1,\n-            combined=False,\n-            rxq_txq=None,\n-        )\n-        self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"tso\")\n-\n-    def test_vm2vm_split_ring_with_tso_and_cbdma_enable(self):\n-        \"\"\"\n-        TestCase2: VM2VM split ring vhost-user/virtio-net CBDMA enable test with tcp traffic\n-        \"\"\"\n-        self.vm_args = \"disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on\"\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)\n-        self.prepare_test_env(\n-            cbdma=True,\n-            no_pci=False,\n-            client_mode=False,\n-            enable_queues=1,\n-            nb_cores=2,\n-            server_mode=False,\n-            opt_queue=1,\n-            combined=False,\n-            rxq_txq=None,\n-        )\n-        cbdma_value = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"tso\")\n-        expect_value = self.get_suite_cfg()[\"expected_throughput\"][\n-            \"test_vm2vm_split_ring_iperf_with_tso\"\n-        ]\n-        self.verify(\n-            cbdma_value > expect_value,\n-            \"CBDMA enable performance: %s is lower than CBDMA disable: %s.\"\n-            % (cbdma_value, expect_value),\n-        )\n+        self.start_vhost_testpmd()\n+        self.start_vms()\n+        self.config_vm_env()\n+        self.start_iperf(iperf_mode='tso')\n+        self.verify_xstats_info_on_vhost()\n \n     def test_vm2vm_split_ring_iperf_with_ufo(self):\n         \"\"\"\n-        TestCase3: VM2VM split ring vhost-user/virtio-net test with udp traffic\n+        TestCase2: VM2VM split ring vhost-user/virtio-net test with udp traffic\n         \"\"\"\n         self.vm_args = \"disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on\"\n-        self.prepare_test_env(\n-            cbdma=False,\n-            no_pci=True,\n-            client_mode=False,\n-            enable_queues=1,\n-            nb_cores=1,\n-            server_mode=False,\n-            opt_queue=1,\n-            combined=False,\n-            rxq_txq=None,\n-        )\n-        self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"ufo\")\n+        self.start_vhost_testpmd()\n+        self.start_vms()\n+        self.config_vm_env()\n+        self.start_iperf(iperf_mode='ufo')\n+        self.verify_xstats_info_on_vhost()\n \n     def test_vm2vm_split_ring_device_capbility(self):\n         \"\"\"\n-        TestCase4: Check split ring virtio-net device capability\n+        TestCase3: Check split ring virtio-net device capability\n         \"\"\"\n         self.vm_args = \"disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on\"\n-        self.start_vhost_testpmd(\n-            cbdma=False,\n-            no_pci=True,\n-            client_mode=False,\n-            enable_queues=1,\n-            nb_cores=2,\n-            rxq_txq=None,\n-        )\n+        self.start_vhost_testpmd()\n         self.start_vms()\n         self.offload_capbility_check(self.vm_dut[0])\n         self.offload_capbility_check(self.vm_dut[1])\n \n-    def test_vm2vm_split_ring_with_mergeable_path_check_large_packet_and_cbdma_enable_8queue(\n-        self,\n-    ):\n-        \"\"\"\n-        TestCase5: VM2VM virtio-net split ring mergeable CBDMA enable test with large packet payload valid check\n-        \"\"\"\n-        ipef_result = []\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)\n-\n-        self.logger.info(\"Launch vhost with CBDMA and with 8 queue with VA mode\")\n-        self.vm_args = \"disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on\"\n-        self.prepare_test_env(\n-            cbdma=True,\n-            no_pci=False,\n-            client_mode=True,\n-            enable_queues=8,\n-            nb_cores=4,\n-            server_mode=True,\n-            opt_queue=8,\n-            combined=True,\n-            rxq_txq=8,\n-            iova_mode=\"va\",\n-        )\n-        self.check_scp_file_valid_between_vms()\n-        iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(\n-            iperf_mode=\"tso\"\n-        )\n-        ipef_result.append(\n-            [\n-                \"Enable\",\n-                \"mergeable path with VA mode\",\n-                8,\n-                iperf_data_cbdma_enable_8_queue,\n-            ]\n-        )\n-\n-        self.logger.info(\"Re-launch and exchange CBDMA and with 8 queue with VA mode\")\n-        self.vhost.send_expect(\"quit\", \"# \", 30)\n-        self.start_vhost_testpmd(\n-            cbdma=True,\n-            no_pci=False,\n-            client_mode=True,\n-            enable_queues=8,\n-            nb_cores=4,\n-            rxq_txq=8,\n-            exchange_cbdma=True,\n-            iova_mode=\"va\",\n-        )\n-        self.check_scp_file_valid_between_vms()\n-        iperf_data_cbdma_enable_8_queue_exchange = (\n-            self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"tso\")\n-        )\n-        ipef_result.append(\n-            [\n-                \"Disable\",\n-                \"mergeable path exchange CBDMA with VA mode\",\n-                8,\n-                iperf_data_cbdma_enable_8_queue_exchange,\n-            ]\n-        )\n-\n-        # This test step need to test on 1G guest hugepage ENV.\n-        if not self.check_2m_env:\n-            self.logger.info(\n-                \"Re-launch and exchange CBDMA and with 8 queue with PA mode\"\n-            )\n-            self.vhost.send_expect(\"quit\", \"# \", 30)\n-            self.start_vhost_testpmd(\n-                cbdma=True,\n-                no_pci=False,\n-                client_mode=True,\n-                enable_queues=8,\n-                nb_cores=4,\n-                rxq_txq=8,\n-                exchange_cbdma=True,\n-                iova_mode=\"pa\",\n-            )\n-            self.check_scp_file_valid_between_vms()\n-            iperf_data_cbdma_enable_8_queue_exchange_pa = (\n-                self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"tso\")\n-            )\n-            ipef_result.append(\n-                [\n-                    \"Disable\",\n-                    \"mergeable path exchange CBDMA with PA mode\",\n-                    8,\n-                    iperf_data_cbdma_enable_8_queue_exchange_pa,\n-                ]\n-            )\n-\n-        self.logger.info(\"Re-launch without CBDMA and with 4 queue\")\n-        self.vhost.send_expect(\"quit\", \"# \", 30)\n-        self.start_vhost_testpmd(\n-            cbdma=False,\n-            no_pci=False,\n-            client_mode=True,\n-            enable_queues=4,\n-            nb_cores=4,\n-            rxq_txq=4,\n-        )\n-        self.config_vm_env(combined=True, rxq_txq=4)\n-        self.check_scp_file_valid_between_vms()\n-        iperf_data_cbdma_disable_4_queue = (\n-            self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"tso\")\n-        )\n-        ipef_result.append(\n-            [\n-                \"Disable\",\n-                \"mergeable path without CBDMA with 4 queue\",\n-                4,\n-                iperf_data_cbdma_disable_4_queue,\n-            ]\n-        )\n-\n-        self.logger.info(\"Re-launch without CBDMA and with 1 queue\")\n-        self.vhost.send_expect(\"quit\", \"# \", 30)\n-        self.start_vhost_testpmd(\n-            cbdma=False,\n-            no_pci=False,\n-            client_mode=True,\n-            enable_queues=4,\n-            nb_cores=4,\n-            rxq_txq=1,\n-        )\n-        self.config_vm_env(combined=True, rxq_txq=1)\n-        self.check_scp_file_valid_between_vms()\n-        iperf_data_cbdma_disable_1_queue = (\n-            self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"tso\")\n-        )\n-        ipef_result.append(\n-            [\n-                \"Disable\",\n-                \"mergeable path without CBDMA with 1 queue\",\n-                1,\n-                iperf_data_cbdma_disable_1_queue,\n-            ]\n-        )\n-\n-        self.table_header = [\"CBDMA Enable/Disable\", \"Mode\", \"rxq/txq\", \"Gbits/sec\"]\n-        self.result_table_create(self.table_header)\n-        for table_row in ipef_result:\n-            self.result_table_add(table_row)\n-        self.result_table_print()\n-        self.verify(\n-            iperf_data_cbdma_enable_8_queue > iperf_data_cbdma_disable_4_queue,\n-            \"CMDMA enable: %s is lower than CBDMA disable: %s\"\n-            % (iperf_data_cbdma_enable_8_queue, iperf_data_cbdma_disable_4_queue),\n-        )\n-\n-    def test_vm2vm_split_ring_with_no_mergeable_path_check_large_packet_and_cbdma_enable_8queue(\n-        self,\n-    ):\n-        \"\"\"\n-        TestCase6: VM2VM virtio-net split ring non-mergeable CBDMA enable test with large packet payload valid check\n-        \"\"\"\n-        ipef_result = []\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)\n-\n-        self.logger.info(\"Launch vhost-testpmd with CBDMA and used 8 queue\")\n-        self.vm_args = \"disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on\"\n-        self.prepare_test_env(\n-            cbdma=True,\n-            no_pci=False,\n-            client_mode=True,\n-            enable_queues=8,\n-            nb_cores=4,\n-            server_mode=True,\n-            opt_queue=8,\n-            combined=True,\n-            rxq_txq=8,\n-        )\n-        self.check_scp_file_valid_between_vms()\n-        iperf_data_cbdma_enable_8_queue = self.start_iperf_and_verify_vhost_xstats_info(\n-            iperf_mode=\"tso\"\n-        )\n-        ipef_result.append(\n-            [\"Enable\", \"no-mergeable path\", 8, iperf_data_cbdma_enable_8_queue]\n-        )\n-\n-        self.logger.info(\"Re-launch without CBDMA and used 8 queue\")\n-        self.vhost.send_expect(\"quit\", \"# \", 30)\n-        self.start_vhost_testpmd(\n-            cbdma=False,\n-            no_pci=False,\n-            client_mode=True,\n-            enable_queues=8,\n-            nb_cores=4,\n-            rxq_txq=8,\n-        )\n-        self.check_scp_file_valid_between_vms()\n-        iperf_data_cbdma_disable_8_queue = (\n-            self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"tso\")\n-        )\n-        ipef_result.append(\n-            [\"Disable\", \"no-mergeable path\", 8, iperf_data_cbdma_disable_8_queue]\n-        )\n-\n-        self.logger.info(\"Re-launch without CBDMA and used 1 queue\")\n-        self.vhost.send_expect(\"quit\", \"# \", 30)\n-        self.start_vhost_testpmd(\n-            cbdma=False,\n-            no_pci=False,\n-            client_mode=True,\n-            enable_queues=8,\n-            nb_cores=4,\n-            rxq_txq=1,\n-        )\n-        self.config_vm_env(combined=True, rxq_txq=1)\n-        self.check_scp_file_valid_between_vms()\n-        iperf_data_cbdma_disable_1_queue = (\n-            self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"tso\")\n-        )\n-        ipef_result.append(\n-            [\"Disable\", \"no-mergeable path\", 1, iperf_data_cbdma_disable_1_queue]\n-        )\n-\n-        self.table_header = [\"CBDMA Enable/Disable\", \"Mode\", \"rxq/txq\", \"Gbits/sec\"]\n-        self.result_table_create(self.table_header)\n-        for table_row in ipef_result:\n-            self.result_table_add(table_row)\n-        self.result_table_print()\n-        self.verify(\n-            iperf_data_cbdma_enable_8_queue > iperf_data_cbdma_disable_8_queue,\n-            \"CMDMA enable: %s is lower than CBDMA disable: %s\"\n-            % (iperf_data_cbdma_enable_8_queue, iperf_data_cbdma_disable_8_queue),\n-        )\n-\n     def test_vm2vm_packed_ring_iperf_with_tso(self):\n         \"\"\"\n-        TestCase7: VM2VM packed ring vhost-user/virtio-net test with tcp traffic\n-        \"\"\"\n-        self.vm_args = \"disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on\"\n-        self.prepare_test_env(\n-            cbdma=False,\n-            no_pci=True,\n-            client_mode=False,\n-            enable_queues=1,\n-            nb_cores=2,\n-            server_mode=False,\n-            opt_queue=1,\n-            combined=False,\n-            rxq_txq=None,\n-        )\n-        self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"tso\")\n-\n-    def test_vm2vm_packed_ring_iperf_with_tso_and_cbdma_enable(self):\n-        \"\"\"\n-        TestCase8: VM2VM packed ring vhost-user/virtio-net CBDMA enable test with tcp traffic\n+        TestCase4: VM2VM packed ring vhost-user/virtio-net test with tcp traffic\n         \"\"\"\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)\n         self.vm_args = \"disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on\"\n-        self.prepare_test_env(\n-            cbdma=True,\n-            no_pci=False,\n-            client_mode=False,\n-            enable_queues=1,\n-            nb_cores=2,\n-            server_mode=False,\n-            opt_queue=None,\n-            combined=False,\n-            rxq_txq=None,\n-        )\n-        self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"tso\")\n+        self.start_vhost_testpmd()\n+        self.start_vms()\n+        self.config_vm_env()\n+        self.start_iperf(iperf_mode='tso')\n+        self.verify_xstats_info_on_vhost()\n \n     def test_vm2vm_packed_ring_iperf_with_ufo(self):\n         \"\"\"\n-        Test Case 9: VM2VM packed ring vhost-user/virtio-net test with udp trafficc\n+        Test Case 5: VM2VM packed ring vhost-user/virtio-net test with udp trafficc\n         \"\"\"\n         self.vm_args = \"disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on\"\n-        self.prepare_test_env(\n-            cbdma=False,\n-            no_pci=True,\n-            client_mode=False,\n-            enable_queues=1,\n-            nb_cores=2,\n-            server_mode=False,\n-            opt_queue=None,\n-            combined=False,\n-            rxq_txq=None,\n-        )\n-        self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"ufo\")\n+        self.start_vhost_testpmd()\n+        self.start_vms()\n+        self.config_vm_env()\n+        self.start_iperf(iperf_mode='ufo')\n+        self.verify_xstats_info_on_vhost()\n \n     def test_vm2vm_packed_ring_device_capbility(self):\n         \"\"\"\n-        Test Case 10: Check packed ring virtio-net device capability\n+        Test Case 6: Check packed ring virtio-net device capability\n         \"\"\"\n         self.vm_args = \"disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on\"\n-        self.start_vhost_testpmd(\n-            cbdma=False,\n-            no_pci=True,\n-            client_mode=False,\n-            enable_queues=1,\n-            nb_cores=2,\n-            rxq_txq=None,\n-        )\n+        self.start_vhost_testpmd()\n         self.start_vms()\n         self.offload_capbility_check(self.vm_dut[0])\n         self.offload_capbility_check(self.vm_dut[1])\n \n-    def test_vm2vm_packed_ring_with_mergeable_path_check_large_packet_and_cbdma_enable_8queue(\n-        self,\n-    ):\n-        \"\"\"\n-        Test Case 11: VM2VM virtio-net packed ring mergeable 8 queues CBDMA enable test with large packet payload valid check\n-        \"\"\"\n-        ipef_result = []\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)\n-\n-        self.logger.info(\"Launch vhost-testpmd with CBDMA and used 8 queue\")\n-        self.vm_args = \"disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on\"\n-        self.prepare_test_env(\n-            cbdma=True,\n-            no_pci=False,\n-            client_mode=False,\n-            enable_queues=8,\n-            nb_cores=4,\n-            server_mode=False,\n-            opt_queue=8,\n-            combined=True,\n-            rxq_txq=8,\n-        )\n-        for i in range(0, 5):\n-            self.check_scp_file_valid_between_vms()\n-            iperf_data_cbdma_enable_8_queue = (\n-                self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"tso\")\n-            )\n-            ipef_result.append(\n-                [\"Enable_%d\" % i, \"mergeable path\", 8, iperf_data_cbdma_enable_8_queue]\n-            )\n-            self.table_header = [\"CBDMA Enable/Disable\", \"Mode\", \"rxq/txq\", \"Gbits/sec\"]\n-            self.result_table_create(self.table_header)\n-        for table_row in ipef_result:\n-            self.result_table_add(table_row)\n-        self.result_table_print()\n-\n-    def test_vm2vm_packed_ring_with_no_mergeable_path_check_large_packet_and_cbdma_enable_8queue(\n-        self,\n-    ):\n-        \"\"\"\n-        Test Case 12: VM2VM virtio-net packed ring non-mergeable 8 queues CBDMA enable test with large packet payload valid check\n-        \"\"\"\n-        ipef_result = []\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)\n-\n-        self.logger.info(\"Launch vhost-testpmd with CBDMA and used 8 queue\")\n-        self.vm_args = \"disable-modern=false,mrg_rxbuf=off,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on\"\n-        self.prepare_test_env(\n-            cbdma=True,\n-            no_pci=False,\n-            client_mode=False,\n-            enable_queues=8,\n-            nb_cores=4,\n-            server_mode=False,\n-            opt_queue=8,\n-            combined=True,\n-            rxq_txq=8,\n-        )\n-        for i in range(0, 5):\n-            self.check_scp_file_valid_between_vms()\n-            iperf_data_cbdma_enable_8_queue = (\n-                self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"tso\")\n-            )\n-            ipef_result.append(\n-                [\"Enable\", \"mergeable path\", 8, iperf_data_cbdma_enable_8_queue]\n-            )\n-            self.table_header = [\"CBDMA Enable/Disable\", \"Mode\", \"rxq/txq\", \"Gbits/sec\"]\n-            self.result_table_create(self.table_header)\n-        for table_row in ipef_result:\n-            self.result_table_add(table_row)\n-        self.result_table_print()\n-\n-    def test_vm2vm_packed_ring_with_tso_and_cbdma_enable_iova_pa(self):\n-        \"\"\"\n-        Test Case 13: VM2VM packed ring vhost-user/virtio-net CBDMA enable test with tcp traffic when set iova=pa\n-        \"\"\"\n-        # This test case need to test on 1G guest hugepage ENV.\n-        self.vm_args = \"disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,packed=on\"\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)\n-        self.prepare_test_env(\n-            cbdma=True,\n-            no_pci=False,\n-            client_mode=False,\n-            enable_queues=1,\n-            nb_cores=2,\n-            server_mode=False,\n-            opt_queue=1,\n-            combined=False,\n-            rxq_txq=None,\n-            iova_mode=\"pa\",\n-        )\n-        self.check_scp_file_valid_between_vms()\n-        cbdma_value = self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"tso\")\n-        expect_value = self.get_suite_cfg()[\"expected_throughput\"][\n-            \"test_vm2vm_split_ring_iperf_with_tso\"\n-        ]\n-        self.verify(\n-            cbdma_value > expect_value,\n-            \"CBDMA enable performance: %s is lower than CBDMA disable: %s.\"\n-            % (cbdma_value, expect_value),\n-        )\n-\n-    def test_vm2vm_packed_ring_with_mergeable_path_check_large_packet_and_cbdma_enable_8queue_iova_pa(\n-        self,\n-    ):\n-        \"\"\"\n-        Test Case 14: VM2VM virtio-net packed ring mergeable 8 queues CBDMA enable and PA mode test with large packet payload valid check\n-        \"\"\"\n-        # This test case need to test on 1G guest hugepage ENV.\n-        ipef_result = []\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)\n-\n-        self.logger.info(\"Launch vhost-testpmd with CBDMA and used 8 queue\")\n-        self.vm_args = \"disable-modern=false,mrg_rxbuf=on,mq=on,vectors=40,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on,guest_ufo=on,host_ufo=on,packed=on\"\n-        self.prepare_test_env(\n-            cbdma=True,\n-            no_pci=False,\n-            client_mode=False,\n-            enable_queues=8,\n-            nb_cores=4,\n-            server_mode=False,\n-            opt_queue=8,\n-            combined=True,\n-            rxq_txq=8,\n-            iova_mode=\"pa\",\n-        )\n-        for i in range(0, 5):\n-            self.check_scp_file_valid_between_vms()\n-            iperf_data_cbdma_enable_8_queue = (\n-                self.start_iperf_and_verify_vhost_xstats_info(iperf_mode=\"tso\")\n-            )\n-            ipef_result.append(\n-                [\"Enable_%d\" % i, \"mergeable path\", 8, iperf_data_cbdma_enable_8_queue]\n-            )\n-            self.table_header = [\"CBDMA Enable/Disable\", \"Mode\", \"rxq/txq\", \"Gbits/sec\"]\n-            self.result_table_create(self.table_header)\n-        for table_row in ipef_result:\n-            self.result_table_add(table_row)\n-        self.result_table_print()\n-\n     def tear_down(self):\n         \"\"\"\n         run after each test case.\n         \"\"\"\n         self.stop_all_apps()\n         self.dut.kill_all()\n-        self.bind_cbdma_device_to_kernel()\n \n     def tear_down_all(self):\n         \"\"\"\n",
    "prefixes": [
        "V1",
        "2/5"
    ]
}