get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/115185/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 115185,
    "url": "http://patchwork.dpdk.org/api/patches/115185/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dts/patch/20220817020050.4016027-1-weix.ling@intel.com/",
    "project": {
        "id": 3,
        "url": "http://patchwork.dpdk.org/api/projects/3/?format=api",
        "name": "DTS",
        "link_name": "dts",
        "list_id": "dts.dpdk.org",
        "list_email": "dts@dpdk.org",
        "web_url": "",
        "scm_url": "git://dpdk.org/tools/dts",
        "webscm_url": "http://git.dpdk.org/tools/dts/",
        "list_archive_url": "https://inbox.dpdk.org/dts",
        "list_archive_url_format": "https://inbox.dpdk.org/dts/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220817020050.4016027-1-weix.ling@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dts/20220817020050.4016027-1-weix.ling@intel.com",
    "date": "2022-08-17T02:00:50",
    "name": "[V3,2/3] tests/basic_4k_pages_cbdma: modify testsuite to test virtio dequeue",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "cca1ca053430ffa4158272bc52f5ea1a17d4c608",
    "submitter": {
        "id": 1828,
        "url": "http://patchwork.dpdk.org/api/people/1828/?format=api",
        "name": "Ling, WeiX",
        "email": "weix.ling@intel.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.dpdk.org/project/dts/patch/20220817020050.4016027-1-weix.ling@intel.com/mbox/",
    "series": [
        {
            "id": 24329,
            "url": "http://patchwork.dpdk.org/api/series/24329/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dts/list/?series=24329",
            "date": "2022-08-17T02:00:18",
            "name": "modify basic_4k_pages_cbdma to test virtio dequeue",
            "version": 3,
            "mbox": "http://patchwork.dpdk.org/series/24329/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/115185/comments/",
    "check": "pending",
    "checks": "http://patchwork.dpdk.org/api/patches/115185/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dts-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 25BA5A0032;\n\tWed, 17 Aug 2022 04:05:09 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 1B14B40DDC;\n\tWed, 17 Aug 2022 04:05:09 +0200 (CEST)",
            "from mga17.intel.com (mga17.intel.com [192.55.52.151])\n by mails.dpdk.org (Postfix) with ESMTP id 22326400D6\n for <dts@dpdk.org>; Wed, 17 Aug 2022 04:05:06 +0200 (CEST)",
            "from orsmga006.jf.intel.com ([10.7.209.51])\n by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 16 Aug 2022 19:05:06 -0700",
            "from unknown (HELO localhost.localdomain) ([10.239.252.222])\n by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 16 Aug 2022 19:05:03 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1660701907; x=1692237907;\n h=from:to:cc:subject:date:message-id:mime-version:\n content-transfer-encoding;\n bh=r+E+oY6VmnQGkkK6taENRRDmxHm9dUrFm7/nB34tnEI=;\n b=SgkrByGQJHKJS4prKHvgYvu97ZEZ6XyiBBbg54fjs5nXaRE8BxOXE/JO\n K2cb2WuJQxrBl/N7woPjNqRQ86eauZZhH3qcGjpD3c8FkCJolNEhU12qC\n SBbObKr7CA98ITmRtzrEGTfM6owzV/Z8cm/moJyW3AaCNjWUNy8hPig95\n HnyvB6LuXn8ju9BWbk3Ua9goRUXCOQqkzFAJXi5NzQg29lv+q8bFjrds5\n dP43bmd2lBO7k3qkW4GyvkydjIz/PWmifm0f0WFUiei+4dlXRfogkkQjR\n XmujqI7MLGBAc+xafbfktLkLaBzgDcmXVCMsli0XVt92HSJXjLgo0nTdx A==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6400,9594,10441\"; a=\"272771462\"",
            "E=Sophos;i=\"5.93,242,1654585200\";\n d=\"scan'208,223\";a=\"272771462\"",
            "E=Sophos;i=\"5.93,242,1654585200\";\n d=\"scan'208,223\";a=\"583563033\""
        ],
        "From": "Wei Ling <weix.ling@intel.com>",
        "To": "dts@dpdk.org",
        "Cc": "Wei Ling <weix.ling@intel.com>",
        "Subject": "[dts][PATCH V3 2/3] tests/basic_4k_pages_cbdma: modify testsuite to\n test virtio dequeue",
        "Date": "Tue, 16 Aug 2022 22:00:50 -0400",
        "Message-Id": "<20220817020050.4016027-1-weix.ling@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dts@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "test suite reviews and discussions <dts.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dts>,\n <mailto:dts-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dts/>",
        "List-Post": "<mailto:dts@dpdk.org>",
        "List-Help": "<mailto:dts-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dts>,\n <mailto:dts-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dts-bounces@dpdk.org"
    },
    "content": "From DPDK-22.07, virtio support async dequeue for split and packed ring\npath, so modify basic_4k_pages_cbdma testsuite to test the split \nand packed ring async dequeue feature.\n\nSigned-off-by: Wei Ling <weix.ling@intel.com>\n---\n tests/TestSuite_basic_4k_pages_cbdma.py | 976 +++++++++++++++++++++---\n 1 file changed, 886 insertions(+), 90 deletions(-)",
    "diff": "diff --git a/tests/TestSuite_basic_4k_pages_cbdma.py b/tests/TestSuite_basic_4k_pages_cbdma.py\nindex 45e78f1e..e69a3d4f 100644\n--- a/tests/TestSuite_basic_4k_pages_cbdma.py\n+++ b/tests/TestSuite_basic_4k_pages_cbdma.py\n@@ -7,18 +7,28 @@ DPDK Test suite.\n vhost/virtio-user pvp with 4K pages.\n \"\"\"\n \n+import os\n+import random\n import re\n+import string\n import time\n \n-import framework.utils as utils\n+from framework.config import VirtConf\n from framework.packet import Packet\n from framework.pktgen import PacketGeneratorHelper\n from framework.pmd_output import PmdOutput\n+from framework.qemu_kvm import QEMUKvm\n+from framework.settings import CONFIG_ROOT_PATH\n from framework.test_case import TestCase\n-from framework.virt_common import VM\n \n \n class TestBasic4kPagesCbdma(TestCase):\n+    def get_virt_config(self, vm_name):\n+        conf = VirtConf(CONFIG_ROOT_PATH + os.sep + self.suite_name + \".cfg\")\n+        conf.load_virt_config(vm_name)\n+        virt_conf = conf.get_virt_config()\n+        return virt_conf\n+\n     def set_up_all(self):\n         \"\"\"\n         Run at the start of each test suite.\n@@ -56,13 +66,51 @@ class TestBasic4kPagesCbdma(TestCase):\n         self.virtio_mac1 = \"52:54:00:00:00:01\"\n         self.virtio_mac2 = \"52:54:00:00:00:02\"\n         self.base_dir = self.dut.base_dir.replace(\"~\", \"/root\")\n+        self.random_string = string.ascii_letters + string.digits\n+\n+        self.vm0_virt_conf = self.get_virt_config(vm_name=\"vm0\")\n+        for param in self.vm0_virt_conf:\n+            if \"cpu\" in param.keys():\n+                self.vm0_cpupin = param[\"cpu\"][0][\"cpupin\"]\n+                self.vm0_lcore = \",\".join(list(self.vm0_cpupin.split()))\n+                self.vm0_lcore_smp = len(list(self.vm0_cpupin.split()))\n+            if \"qemu\" in param.keys():\n+                self.vm0_qemu_path = param[\"qemu\"][0][\"path\"]\n+            if \"mem\" in param.keys():\n+                self.vm0_mem_size = param[\"mem\"][0][\"size\"]\n+            if \"disk\" in param.keys():\n+                self.vm0_image_path = param[\"disk\"][0][\"file\"]\n+            if \"vnc\" in param.keys():\n+                self.vm0_vnc = param[\"vnc\"][0][\"displayNum\"]\n+            if \"login\" in param.keys():\n+                self.vm0_user = param[\"login\"][0][\"user\"]\n+                self.vm0_passwd = param[\"login\"][0][\"password\"]\n+\n+        self.vm1_virt_conf = self.get_virt_config(vm_name=\"vm1\")\n+        for param in self.vm1_virt_conf:\n+            if \"cpu\" in param.keys():\n+                self.vm1_cpupin = param[\"cpu\"][0][\"cpupin\"]\n+                self.vm1_lcore = \",\".join(list(self.vm1_cpupin.split()))\n+                self.vm1_lcore_smp = len(list(self.vm1_cpupin.split()))\n+            if \"qemu\" in param.keys():\n+                self.vm1_qemu_path = param[\"qemu\"][0][\"path\"]\n+            if \"mem\" in param.keys():\n+                self.vm1_mem_size = param[\"mem\"][0][\"size\"]\n+            if \"disk\" in param.keys():\n+                self.vm1_image_path = param[\"disk\"][0][\"file\"]\n+            if \"vnc\" in param.keys():\n+                self.vm1_vnc = param[\"vnc\"][0][\"displayNum\"]\n+            if \"login\" in param.keys():\n+                self.vm1_user = param[\"login\"][0][\"user\"]\n+                self.vm1_passwd = param[\"login\"][0][\"password\"]\n \n     def set_up(self):\n         \"\"\"\n         Run before each test case.\n         \"\"\"\n-        self.dut.send_expect(\"rm -rf /tmp/vhost-net*\", \"# \")\n         self.dut.send_expect(\"killall -s INT %s\" % self.testpmd_name, \"# \")\n+        self.dut.send_expect(\"killall -s INT qemu-system-x86_64\", \"#\")\n+        self.dut.send_expect(\"rm -rf /tmp/vhost-net*\", \"# \")\n         self.umount_tmpfs_for_4k()\n         # Prepare the result table\n         self.table_header = [\"Frame\"]\n@@ -73,6 +121,123 @@ class TestBasic4kPagesCbdma(TestCase):\n         self.result_table_create(self.table_header)\n         self.vm_dut = []\n         self.vm = []\n+        self.packed = False\n+\n+    def start_vm(self, packed=False, queues=1, server=False):\n+        if packed:\n+            packed_param = \",packed=on\"\n+        else:\n+            packed_param = \"\"\n+\n+        if server:\n+            server = \",server\"\n+        else:\n+            server = \"\"\n+\n+        self.qemu_cmd0 = (\n+            f\"taskset -c {self.vm0_lcore} {self.vm0_qemu_path} -name vm0 -enable-kvm \"\n+            f\"-pidfile /tmp/.vm0.pid -daemonize -monitor unix:/tmp/vm0_monitor.sock,server,nowait \"\n+            f\"-netdev user,id=nttsip1,hostfwd=tcp:%s:6000-:22 -device e1000,netdev=nttsip1  \"\n+            f\"-chardev socket,id=char0,path=/root/dpdk/vhost-net0{server} \"\n+            f\"-netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce,queues={queues} \"\n+            f\"-device virtio-net-pci,netdev=netdev0,mac=%s,\"\n+            f\"disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on{packed_param} \"\n+            f\"-cpu host -smp {self.vm0_lcore_smp} -m {self.vm0_mem_size} -object memory-backend-file,id=mem,size={self.vm0_mem_size}M,mem-path=/mnt/tmpfs_nohuge0,share=on \"\n+            f\"-numa node,memdev=mem -mem-prealloc -drive file={self.vm0_image_path} \"\n+            f\"-chardev socket,path=/tmp/vm0_qga0.sock,server,nowait,id=vm0_qga0 -device virtio-serial \"\n+            f\"-device virtserialport,chardev=vm0_qga0,name=org.qemu.guest_agent.0 -vnc :{self.vm0_vnc} \"\n+        )\n+\n+        self.qemu_cmd1 = (\n+            f\"taskset -c {self.vm1_lcore} {self.vm1_qemu_path} -name vm1 -enable-kvm \"\n+            f\"-pidfile /tmp/.vm1.pid -daemonize -monitor unix:/tmp/vm1_monitor.sock,server,nowait \"\n+            f\"-netdev user,id=nttsip1,hostfwd=tcp:%s:6001-:22 -device e1000,netdev=nttsip1  \"\n+            f\"-chardev socket,id=char0,path=/root/dpdk/vhost-net1{server} \"\n+            f\"-netdev type=vhost-user,id=netdev0,chardev=char0,vhostforce,queues={queues} \"\n+            f\"-device virtio-net-pci,netdev=netdev0,mac=%s,\"\n+            f\"disable-modern=false,mrg_rxbuf=on,csum=on,guest_csum=on,host_tso4=on,guest_tso4=on,guest_ecn=on{packed_param} \"\n+            f\"-cpu host -smp {self.vm1_lcore_smp} -m {self.vm1_mem_size} -object memory-backend-file,id=mem,size={self.vm1_mem_size}M,mem-path=/mnt/tmpfs_nohuge1,share=on \"\n+            f\"-numa node,memdev=mem -mem-prealloc -drive file={self.vm1_image_path} \"\n+            f\"-chardev socket,path=/tmp/vm1_qga0.sock,server,nowait,id=vm1_qga0 -device virtio-serial \"\n+            f\"-device virtserialport,chardev=vm1_qga0,name=org.qemu.guest_agent.0 -vnc :{self.vm1_vnc} \"\n+        )\n+\n+        self.vm0_session = self.dut.new_session(suite=\"vm0_session\")\n+        cmd0 = self.qemu_cmd0 % (\n+            self.dut.get_ip_address(),\n+            self.virtio_mac1,\n+        )\n+        self.vm0_session.send_expect(cmd0, \"# \")\n+        time.sleep(10)\n+        self.vm0_dut = self.connect_vm0()\n+        self.verify(self.vm0_dut is not None, \"vm start fail\")\n+        self.vm_session = self.vm0_dut.new_session(suite=\"vm_session\")\n+\n+        self.vm1_session = self.dut.new_session(suite=\"vm1_session\")\n+        cmd1 = self.qemu_cmd1 % (\n+            self.dut.get_ip_address(),\n+            self.virtio_mac2,\n+        )\n+        self.vm1_session.send_expect(cmd1, \"# \")\n+        time.sleep(10)\n+        self.vm1_dut = self.connect_vm1()\n+        self.verify(self.vm1_dut is not None, \"vm start fail\")\n+        self.vm_session = self.vm1_dut.new_session(suite=\"vm_session\")\n+\n+    def connect_vm0(self):\n+        self.vm0 = QEMUKvm(self.dut, \"vm0\", self.suite_name)\n+        self.vm0.net_type = \"hostfwd\"\n+        self.vm0.hostfwd_addr = \"%s:6000\" % self.dut.get_ip_address()\n+        self.vm0.def_driver = \"vfio-pci\"\n+        self.vm0.driver_mode = \"noiommu\"\n+        self.wait_vm_net_ready(vm_index=0)\n+        vm_dut = self.vm0.instantiate_vm_dut(autodetect_topo=False, bind_dev=False)\n+        if vm_dut:\n+            return vm_dut\n+        else:\n+            return None\n+\n+    def connect_vm1(self):\n+        self.vm1 = QEMUKvm(self.dut, \"vm1\", \"vm_hotplug\")\n+        self.vm1.net_type = \"hostfwd\"\n+        self.vm1.hostfwd_addr = \"%s:6001\" % self.dut.get_ip_address()\n+        self.vm1.def_driver = \"vfio-pci\"\n+        self.vm1.driver_mode = \"noiommu\"\n+        self.wait_vm_net_ready(vm_index=1)\n+        vm_dut = self.vm1.instantiate_vm_dut(autodetect_topo=False, bind_dev=False)\n+        if vm_dut:\n+            return vm_dut\n+        else:\n+            return None\n+\n+    def wait_vm_net_ready(self, vm_index=0):\n+        self.vm_net_session = self.dut.new_session(suite=\"vm_net_session\")\n+        self.start_time = time.time()\n+        cur_time = time.time()\n+        time_diff = cur_time - self.start_time\n+        while time_diff < 120:\n+            try:\n+                out = self.vm_net_session.send_expect(\n+                    \"~/QMP/qemu-ga-client --address=/tmp/vm%s_qga0.sock ifconfig\"\n+                    % vm_index,\n+                    \"#\",\n+                )\n+            except Exception as EnvironmentError:\n+                pass\n+            if \"10.0.2\" in out:\n+                pos = self.vm0.hostfwd_addr.find(\":\")\n+                ssh_key = (\n+                    \"[\"\n+                    + self.vm0.hostfwd_addr[:pos]\n+                    + \"]\"\n+                    + self.vm0.hostfwd_addr[pos:]\n+                )\n+                os.system(\"ssh-keygen -R %s\" % ssh_key)\n+                break\n+            time.sleep(1)\n+            cur_time = time.time()\n+            time_diff = cur_time - self.start_time\n+        self.dut.close_session(self.vm_net_session)\n \n     def get_cbdma_ports_info_and_bind_to_dpdk(self, cbdma_num, allow_diff_socket=False):\n         \"\"\"\n@@ -179,59 +344,22 @@ class TestBasic4kPagesCbdma(TestCase):\n             fixed_prefix=True,\n         )\n \n-    def start_vms(\n-        self,\n-        setting_args=\"\",\n-        server_mode=False,\n-        opt_queue=None,\n-        vm_config=\"vhost_sample\",\n-    ):\n-        \"\"\"\n-        start one VM, each VM has one virtio device\n-        \"\"\"\n-        vm_params = {}\n-        if opt_queue is not None:\n-            vm_params[\"opt_queue\"] = opt_queue\n-\n-        for i in range(self.vm_num):\n-            vm_dut = None\n-            vm_info = VM(self.dut, \"vm%d\" % i, vm_config)\n-\n-            vm_params[\"driver\"] = \"vhost-user\"\n-            if not server_mode:\n-                vm_params[\"opt_path\"] = self.base_dir + \"/vhost-net%d\" % i\n-            else:\n-                vm_params[\"opt_path\"] = self.base_dir + \"/vhost-net%d\" % i + \",server\"\n-            vm_params[\"opt_mac\"] = \"52:54:00:00:00:0%d\" % (i + 1)\n-            vm_params[\"opt_settings\"] = setting_args\n-            vm_info.set_vm_device(**vm_params)\n-            time.sleep(3)\n-            try:\n-                vm_dut = vm_info.start(set_target=False)\n-                if vm_dut is None:\n-                    raise Exception(\"Set up VM ENV failed\")\n-            except Exception as e:\n-                print((utils.RED(\"Failure for %s\" % str(e))))\n-                raise e\n-            self.vm_dut.append(vm_dut)\n-            self.vm.append(vm_info)\n-\n     def config_vm_ip(self):\n         \"\"\"\n         set virtio device IP and run arp protocal\n         \"\"\"\n-        vm1_intf = self.vm_dut[0].ports_info[0][\"intf\"]\n-        vm2_intf = self.vm_dut[1].ports_info[0][\"intf\"]\n-        self.vm_dut[0].send_expect(\n+        vm1_intf = self.vm0_dut.ports_info[0][\"intf\"]\n+        vm2_intf = self.vm1_dut.ports_info[0][\"intf\"]\n+        self.vm0_dut.send_expect(\n             \"ifconfig %s %s\" % (vm1_intf, self.virtio_ip1), \"#\", 10\n         )\n-        self.vm_dut[1].send_expect(\n+        self.vm1_dut.send_expect(\n             \"ifconfig %s %s\" % (vm2_intf, self.virtio_ip2), \"#\", 10\n         )\n-        self.vm_dut[0].send_expect(\n+        self.vm0_dut.send_expect(\n             \"arp -s %s %s\" % (self.virtio_ip2, self.virtio_mac2), \"#\", 10\n         )\n-        self.vm_dut[1].send_expect(\n+        self.vm1_dut.send_expect(\n             \"arp -s %s %s\" % (self.virtio_ip1, self.virtio_mac1), \"#\", 10\n         )\n \n@@ -239,27 +367,54 @@ class TestBasic4kPagesCbdma(TestCase):\n         \"\"\"\n         set virtio device combined\n         \"\"\"\n-        vm1_intf = self.vm_dut[0].ports_info[0][\"intf\"]\n-        vm2_intf = self.vm_dut[1].ports_info[0][\"intf\"]\n-        self.vm_dut[0].send_expect(\n+        vm1_intf = self.vm0_dut.ports_info[0][\"intf\"]\n+        vm2_intf = self.vm1_dut.ports_info[0][\"intf\"]\n+        self.vm0_dut.send_expect(\n             \"ethtool -L %s combined %d\" % (vm1_intf, combined), \"#\", 10\n         )\n-        self.vm_dut[1].send_expect(\n+        self.vm1_dut.send_expect(\n             \"ethtool -L %s combined %d\" % (vm2_intf, combined), \"#\", 10\n         )\n \n+    def check_ping_between_vms(self):\n+        ping_out = self.vm0_dut.send_expect(\n+            \"ping {} -c 4\".format(self.virtio_ip2), \"#\", 20\n+        )\n+        self.logger.info(ping_out)\n+\n+    def check_scp_file_valid_between_vms(self, file_size=1024):\n+        \"\"\"\n+        scp file form VM1 to VM2, check the data is valid\n+        \"\"\"\n+        # default file_size=1024K\n+        data = \"\"\n+        for _ in range(file_size * 1024):\n+            data += random.choice(self.random_string)\n+        self.vm0_dut.send_expect('echo \"%s\" > /tmp/payload' % data, \"# \")\n+        # scp this file to vm1\n+        out = self.vm1_dut.send_command(\n+            \"scp root@%s:/tmp/payload /root\" % self.virtio_ip1, timeout=5\n+        )\n+        if \"Are you sure you want to continue connecting\" in out:\n+            self.vm1_dut.send_command(\"yes\", timeout=3)\n+        self.vm1_dut.send_command(self.vm0_passwd, timeout=3)\n+        # get the file info in vm1, and check it valid\n+        md5_send = self.vm0_dut.send_expect(\"md5sum /tmp/payload\", \"# \")\n+        md5_revd = self.vm1_dut.send_expect(\"md5sum /root/payload\", \"# \")\n+        md5_send = md5_send[: md5_send.find(\" \")]\n+        md5_revd = md5_revd[: md5_revd.find(\" \")]\n+        self.verify(\n+            md5_send == md5_revd, \"the received file is different with send file\"\n+        )\n+\n     def start_iperf(self):\n         \"\"\"\n         run perf command between to vms\n         \"\"\"\n         iperf_server = \"iperf -s -i 1\"\n         iperf_client = \"iperf -c {} -i 1 -t 60\".format(self.virtio_ip1)\n-        self.vm_dut[0].send_expect(\n-            \"{} > iperf_server.log &\".format(iperf_server), \"\", 10\n-        )\n-        self.vm_dut[1].send_expect(\n-            \"{} > iperf_client.log &\".format(iperf_client), \"\", 60\n-        )\n+        self.vm0_dut.send_expect(\"{} > iperf_server.log &\".format(iperf_server), \"\", 10)\n+        self.vm1_dut.send_expect(\"{} > iperf_client.log &\".format(iperf_client), \"\", 60)\n         time.sleep(60)\n \n     def get_iperf_result(self):\n@@ -268,8 +423,8 @@ class TestBasic4kPagesCbdma(TestCase):\n         \"\"\"\n         self.table_header = [\"Mode\", \"[M|G]bits/sec\"]\n         self.result_table_create(self.table_header)\n-        self.vm_dut[0].send_expect(\"pkill iperf\", \"# \")\n-        self.vm_dut[1].session.copy_file_from(\"%s/iperf_client.log\" % self.dut.base_dir)\n+        self.vm0_dut.send_expect(\"pkill iperf\", \"# \")\n+        self.vm1_dut.session.copy_file_from(\"%s/iperf_client.log\" % self.dut.base_dir)\n         fp = open(\"./iperf_client.log\")\n         fmsg = fp.read()\n         fp.close()\n@@ -289,19 +444,18 @@ class TestBasic4kPagesCbdma(TestCase):\n         # print iperf resut\n         self.result_table_print()\n         # rm the iperf log file in vm\n-        self.vm_dut[0].send_expect(\"rm iperf_server.log\", \"#\", 10)\n-        self.vm_dut[1].send_expect(\"rm iperf_client.log\", \"#\", 10)\n+        self.vm0_dut.send_expect(\"rm iperf_server.log\", \"#\", 10)\n+        self.vm1_dut.send_expect(\"rm iperf_client.log\", \"#\", 10)\n \n     def verify_xstats_info_on_vhost(self):\n         \"\"\"\n         check both 2VMs can receive and send big packets to each other\n         \"\"\"\n-        self.vhost_user_pmd.execute_cmd(\"show port stats all\")\n         out_tx = self.vhost_user_pmd.execute_cmd(\"show port xstats 0\")\n         out_rx = self.vhost_user_pmd.execute_cmd(\"show port xstats 1\")\n \n-        tx_info = re.search(\"tx_size_1523_to_max_packets:\\s*(\\d*)\", out_tx)\n-        rx_info = re.search(\"rx_size_1523_to_max_packets:\\s*(\\d*)\", out_rx)\n+        tx_info = re.search(\"tx_q0_size_1519_max_packets:\\s*(\\d*)\", out_tx)\n+        rx_info = re.search(\"rx_q0_size_1519_max_packets:\\s*(\\d*)\", out_rx)\n \n         self.verify(\n             int(rx_info.group(1)) > 0, \"Port 1 not receive packet greater than 1522\"\n@@ -327,34 +481,32 @@ class TestBasic4kPagesCbdma(TestCase):\n         out = self.dut.send_expect(\n             \"mount |grep 'mnt/tmpfs' |awk -F ' ' {'print $3'}\", \"#\"\n         )\n-        mount_infos = out.replace(\"\\r\", \"\").split(\"\\n\")\n-        if len(mount_infos) != 0:\n-            for mount_info in mount_infos:\n+        if out != \"\":\n+            mount_points = out.replace(\"\\r\", \"\").split(\"\\n\")\n+        else:\n+            mount_points = []\n+        if len(mount_points) != 0:\n+            for mount_info in mount_points:\n                 self.dut.send_expect(\"umount {}\".format(mount_info), \"# \")\n \n-    def umount_huge_pages(self):\n-        self.dut.send_expect(\"mount |grep '/mnt/huge' |awk -F ' ' {'print $3'}\", \"#\")\n-        self.dut.send_expect(\"umount /mnt/huge\", \"# \")\n-\n-    def mount_huge_pages(self):\n-        self.dut.send_expect(\"mkdir -p /mnt/huge\", \"# \")\n-        self.dut.send_expect(\"mount -t hugetlbfs nodev /mnt/huge\", \"# \")\n-\n-    def test_perf_pvp_virtio_user_split_ring_with_4K_pages_and_cbdma_enable(self):\n+    def test_perf_pvp_split_ring_vhost_async_operation_using_4K_pages_and_cbdma_enable(\n+        self,\n+    ):\n         \"\"\"\n-        Test Case 1: Basic test vhost/virtio-user split ring with 4K-pages and cbdma enable\n+        Test Case 1: Basic test vhost-user/virtio-user split ring vhost async operation using 4K-pages and cbdma enable\n         \"\"\"\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(1)\n-        lcore_dma = f\"lcore{self.vhost_core_list[1]}@{self.cbdma_list[0]}\"\n-        vhost_eal_param = \"--no-huge -m 1024 --vdev 'net_vhost0,iface=./vhost-net,queues=1,dmas=[txq0]'\"\n-        vhost_param = \" --no-numa --socket-num={} --lcore-dma=[{}]\".format(\n-            self.ports_socket, lcore_dma\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)\n+        lcore_dma = \"lcore%s@%s,\" % (self.vhost_core_list[1], self.cbdma_list[0])\n+        vhost_eal_param = \"--no-huge -m 1024 --vdev 'net_vhost0,iface=./vhost-net,queues=1,dmas=[txq0;rxq0]'\"\n+        vhost_param = \" --no-numa --socket-num=%s --lcore-dma=[%s]\" % (\n+            self.ports_socket,\n+            lcore_dma,\n         )\n         ports = [self.dut.ports_info[0][\"pci\"]]\n         for i in self.cbdma_list:\n             ports.append(i)\n         self.start_vhost_user_testpmd(\n-            cores=self.vhost_core_list[0:2],\n+            cores=self.vhost_core_list,\n             eal_param=vhost_eal_param,\n             param=vhost_param,\n             ports=ports,\n@@ -370,21 +522,24 @@ class TestBasic4kPagesCbdma(TestCase):\n         self.send_and_verify()\n         self.result_table_print()\n \n-    def test_perf_pvp_virtio_user_packed_ring_with_4K_pages_and_cbdma_enable(self):\n+    def test_perf_pvp_packed_ring_vhost_async_operation_using_4K_pages_and_cbdma_enable(\n+        self,\n+    ):\n         \"\"\"\n-        Test Case 2: Basic test vhost/virtio-user packed ring with 4K-pages and cbdma enable\n+        Test Case 2: Basic test vhost-user/virtio-user packed ring vhost async operation using 4K-pages and cbdma enable\n         \"\"\"\n-        self.get_cbdma_ports_info_and_bind_to_dpdk(1)\n-        lcore_dma = f\"lcore{self.vhost_core_list[1]}@{self.cbdma_list[0]}\"\n-        vhost_eal_param = \"--no-huge -m 1024 --vdev 'net_vhost0,iface=./vhost-net,queues=1,dmas=[txq0]'\"\n-        vhost_param = \" --no-numa --socket-num={} --lcore-dma=[{}]\".format(\n-            self.ports_socket, lcore_dma\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=1)\n+        lcore_dma = \"lcore%s@%s,\" % (self.vhost_core_list[1], self.cbdma_list[0])\n+        vhost_eal_param = \"--no-huge -m 1024 --vdev 'net_vhost0,iface=./vhost-net,queues=1,dmas=[txq0;rxq0]'\"\n+        vhost_param = \" --no-numa --socket-num=%s --lcore-dma=[%s]\" % (\n+            self.ports_socket,\n+            lcore_dma,\n         )\n         ports = [self.dut.ports_info[0][\"pci\"]]\n         for i in self.cbdma_list:\n             ports.append(i)\n         self.start_vhost_user_testpmd(\n-            cores=self.vhost_core_list[0:2],\n+            cores=self.vhost_core_list,\n             eal_param=vhost_eal_param,\n             param=vhost_param,\n             ports=ports,\n@@ -400,6 +555,645 @@ class TestBasic4kPagesCbdma(TestCase):\n         self.send_and_verify()\n         self.result_table_print()\n \n+    def test_vm2vm_split_ring_vhost_async_operaiton_test_with_tcp_traffic_using_4k_pages_and_cbdma_enable(\n+        self,\n+    ):\n+        \"\"\"\n+        Test Case 3: VM2VM vhost-user/virtio-net split ring vhost async operation test with tcp traffic using 4K-pages and cbdma enable\n+        \"\"\"\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)\n+        lcore_dma = \"lcore%s@%s,\" \"lcore%s@%s\" % (\n+            self.vhost_core_list[1],\n+            self.cbdma_list[0],\n+            self.vhost_core_list[2],\n+            self.cbdma_list[1],\n+        )\n+        vhost_eal_param = (\n+            \"--no-huge -m 1024 \"\n+            + \"--vdev 'net_vhost0,iface=./vhost-net0,queues=1,tso=1,dmas=[txq0;rxq0],dma_ring_size=2048'\"\n+            + \" --vdev 'net_vhost1,iface=./vhost-net1,queues=1,tso=1,dmas=[txq0;rxq0],dma_ring_size=2048'\"\n+        )\n+        vhost_param = \" --nb-cores=2 --txd=1024 --rxd=1024 --lcore-dma=[%s]\" % lcore_dma\n+        self.start_vhost_user_testpmd(\n+            cores=self.vhost_core_list,\n+            eal_param=vhost_eal_param,\n+            param=vhost_param,\n+            ports=self.cbdma_list,\n+        )\n+        self.vhost_user_pmd.execute_cmd(\"start\")\n+        self.mount_tmpfs_for_4k(number=2)\n+\n+        self.start_vm(packed=False, queues=1, server=False)\n+        self.config_vm_ip()\n+        self.check_ping_between_vms()\n+        self.start_iperf()\n+        self.get_iperf_result()\n+        self.verify_xstats_info_on_vhost()\n+\n+        self.vm0.stop()\n+        self.vm1.stop()\n+        self.vhost_user_pmd.quit()\n+\n+    def test_vm2vm_packed_ring_vhost_async_operaiton_test_with_tcp_traffic_using_4k_pages_and_cbdma_enable(\n+        self,\n+    ):\n+        \"\"\"\n+        Test Case 4: VM2VM vhost-user/virtio-net packed ring vhost async operation test with tcp traffic using 4K-pages and cbdma enable\n+        \"\"\"\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=2)\n+        lcore_dma = \"lcore%s@%s,\" \"lcore%s@%s\" % (\n+            self.vhost_core_list[1],\n+            self.cbdma_list[0],\n+            self.vhost_core_list[2],\n+            self.cbdma_list[1],\n+        )\n+        vhost_eal_param = (\n+            \"--no-huge -m 1024 \"\n+            + \"--vdev 'net_vhost0,iface=./vhost-net0,queues=1,tso=1,dmas=[txq0;rxq0],dma_ring_size=2048'\"\n+            + \" --vdev 'net_vhost1,iface=./vhost-net1,queues=1,tso=1,dmas=[txq0;rxq0],dma_ring_size=2048'\"\n+        )\n+        vhost_param = \" --nb-cores=2 --txd=1024 --rxd=1024 --lcore-dma=[%s]\" % lcore_dma\n+        self.start_vhost_user_testpmd(\n+            cores=self.vhost_core_list,\n+            eal_param=vhost_eal_param,\n+            param=vhost_param,\n+            ports=self.cbdma_list,\n+        )\n+        self.vhost_user_pmd.execute_cmd(\"start\")\n+        self.mount_tmpfs_for_4k(number=2)\n+\n+        self.start_vm(packed=True, queues=1, server=False)\n+        self.config_vm_ip()\n+        self.check_ping_between_vms()\n+        self.start_iperf()\n+        self.get_iperf_result()\n+        self.verify_xstats_info_on_vhost()\n+\n+        self.vm0.stop()\n+        self.vm1.stop()\n+        self.vhost_user_pmd.quit()\n+\n+    def test_vm2vm_split_ring_multi_queues_using_4k_pages_and_cbdma_enable(self):\n+        \"\"\"\n+        Test Case 5: vm2vm vhost/virtio-net split ring multi queues using 4K-pages and cbdma enable\n+        \"\"\"\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)\n+        lcore_dma = (\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s\"\n+            % (\n+                self.vhost_core_list[1],\n+                self.cbdma_list[0],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[1],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[2],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[3],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[4],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[5],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[6],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[7],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[8],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[9],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[10],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[11],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[12],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[13],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[14],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[15],\n+            )\n+        )\n+        vhost_eal_param = (\n+            \"--no-huge -m 1024 \"\n+            + \"--vdev 'net_vhost0,iface=./vhost-net0,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7]'\"\n+            + \" --vdev 'net_vhost1,iface=./vhost-net1,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7]'\"\n+        )\n+        vhost_param = (\n+            \" --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8 --lcore-dma=[%s]\"\n+            % lcore_dma\n+        )\n+        self.start_vhost_user_testpmd(\n+            cores=self.vhost_core_list,\n+            eal_param=vhost_eal_param,\n+            param=vhost_param,\n+            ports=self.cbdma_list,\n+        )\n+        self.vhost_user_pmd.execute_cmd(\"start\")\n+        self.mount_tmpfs_for_4k(number=2)\n+\n+        self.start_vm(packed=False, queues=8, server=True)\n+        self.config_vm_ip()\n+        self.config_vm_combined(combined=8)\n+        self.check_scp_file_valid_between_vms()\n+        self.start_iperf()\n+        self.get_iperf_result()\n+\n+        self.vhost_user_pmd.quit()\n+        lcore_dma = (\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s\"\n+            % (\n+                self.vhost_core_list[1],\n+                self.cbdma_list[0],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[1],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[2],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[3],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[0],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[2],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[4],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[5],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[6],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[7],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[1],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[3],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[8],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[9],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[10],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[11],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[12],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[13],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[14],\n+                self.vhost_core_list[4],\n+                self.cbdma_list[15],\n+            )\n+        )\n+        vhost_eal_param = (\n+            \"--no-huge -m 1024 \"\n+            + \"--vdev 'net_vhost0,iface=./vhost-net0,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7]'\"\n+            + \" --vdev 'net_vhost1,iface=./vhost-net1,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7]'\"\n+        )\n+        vhost_param = (\n+            \" --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8 --lcore-dma=[%s]\"\n+            % lcore_dma\n+        )\n+        self.start_vhost_user_testpmd(\n+            cores=self.vhost_core_list,\n+            eal_param=vhost_eal_param,\n+            param=vhost_param,\n+            ports=self.cbdma_list,\n+        )\n+        self.vhost_user_pmd.execute_cmd(\"start\")\n+        self.check_ping_between_vms()\n+        self.check_scp_file_valid_between_vms()\n+        self.start_iperf()\n+        self.get_iperf_result()\n+\n+        self.vhost_user_pmd.quit()\n+        vhost_eal_param = (\n+            \"--no-huge -m 1024 \"\n+            + \"--vdev 'net_vhost0,iface=./vhost-net0,client=1,queues=4'\"\n+            + \" --vdev 'net_vhost1,iface=./vhost-net1,client=1,queues=4'\"\n+        )\n+        vhost_param = \" --nb-cores=4 --txd=1024 --rxd=1024 --rxq=4 --txq=4\"\n+        self.start_vhost_user_testpmd(\n+            cores=self.vhost_core_list,\n+            eal_param=vhost_eal_param,\n+            param=vhost_param,\n+            ports=self.cbdma_list,\n+        )\n+        self.vhost_user_pmd.execute_cmd(\"start\")\n+        self.config_vm_combined(combined=4)\n+        self.check_ping_between_vms()\n+        self.check_scp_file_valid_between_vms()\n+        self.start_iperf()\n+        self.get_iperf_result()\n+\n+        self.vhost_user_pmd.quit()\n+        vhost_eal_param = (\n+            \"--no-huge -m 1024 \"\n+            + \"--vdev 'net_vhost0,iface=./vhost-net0,client=1,queues=4'\"\n+            + \" --vdev 'net_vhost1,iface=./vhost-net1,client=1,queues=4'\"\n+        )\n+        vhost_param = \" --nb-cores=4 --txd=1024 --rxd=1024 --rxq=1 --txq=1\"\n+        self.start_vhost_user_testpmd(\n+            cores=self.vhost_core_list,\n+            eal_param=vhost_eal_param,\n+            param=vhost_param,\n+            ports=self.cbdma_list,\n+        )\n+        self.vhost_user_pmd.execute_cmd(\"start\")\n+        self.config_vm_combined(combined=1)\n+        self.check_ping_between_vms()\n+        self.check_scp_file_valid_between_vms()\n+        self.start_iperf()\n+        self.get_iperf_result()\n+\n+        self.vm0.stop()\n+        self.vm1.stop()\n+        self.vhost_user_pmd.quit()\n+\n+    def test_vm2vm_packed_ring_multi_queues_using_4k_pages_and_cbdma_enable(self):\n+        \"\"\"\n+        Test Case 6: vm2vm vhost/virtio-net packed ring multi queues using 4K-pages and cbdma enable\n+        \"\"\"\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)\n+        lcore_dma = (\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s\"\n+            % (\n+                self.vhost_core_list[1],\n+                self.cbdma_list[0],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[1],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[2],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[3],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[4],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[5],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[6],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[7],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[8],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[9],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[10],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[11],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[12],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[13],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[14],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[15],\n+            )\n+        )\n+        vhost_eal_param = (\n+            \"--no-huge -m 1024 \"\n+            + \"--vdev 'net_vhost0,iface=./vhost-net0,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7]'\"\n+            + \" --vdev 'net_vhost1,iface=./vhost-net1,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7]'\"\n+        )\n+        vhost_param = (\n+            \" --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8 --lcore-dma=[%s]\"\n+            % lcore_dma\n+        )\n+        self.start_vhost_user_testpmd(\n+            cores=self.vhost_core_list,\n+            eal_param=vhost_eal_param,\n+            param=vhost_param,\n+            ports=self.cbdma_list,\n+        )\n+        self.vhost_user_pmd.execute_cmd(\"start\")\n+        self.mount_tmpfs_for_4k(number=2)\n+\n+        self.start_vm(packed=True, queues=8, server=True)\n+        self.config_vm_ip()\n+        self.config_vm_combined(combined=8)\n+        self.check_ping_between_vms()\n+        self.check_scp_file_valid_between_vms()\n+        self.start_iperf()\n+        self.get_iperf_result()\n+\n+        self.vm0.stop()\n+        self.vm1.stop()\n+        self.vhost_user_pmd.quit()\n+\n+    def test_vm2vm_split_ring_multi_queues_using_1G_and_4k_pages_and_cbdma_enable(self):\n+        \"\"\"\n+        Test Case 7: vm2vm vhost/virtio-net split ring multi queues using 1G/4k-pages and cbdma enable\n+        \"\"\"\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)\n+        lcore_dma = (\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s\"\n+            % (\n+                self.vhost_core_list[1],\n+                self.cbdma_list[0],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[1],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[2],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[3],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[4],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[5],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[6],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[7],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[8],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[9],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[10],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[11],\n+                self.vhost_core_list[4],\n+                self.cbdma_list[12],\n+                self.vhost_core_list[4],\n+                self.cbdma_list[13],\n+                self.vhost_core_list[4],\n+                self.cbdma_list[14],\n+                self.vhost_core_list[4],\n+                self.cbdma_list[15],\n+            )\n+        )\n+        vhost_eal_param = (\n+            \"-m 1024 \"\n+            + \"--vdev 'net_vhost0,iface=./vhost-net0,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'\"\n+            + \" --vdev 'net_vhost1,iface=./vhost-net1,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'\"\n+        )\n+        vhost_param = (\n+            \" --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8 --lcore-dma=[%s]\"\n+            % lcore_dma\n+        )\n+        self.start_vhost_user_testpmd(\n+            cores=self.vhost_core_list,\n+            eal_param=vhost_eal_param,\n+            param=vhost_param,\n+            ports=self.cbdma_list,\n+        )\n+        self.vhost_user_pmd.execute_cmd(\"start\")\n+        self.mount_tmpfs_for_4k(number=2)\n+\n+        self.start_vm(packed=False, queues=8, server=True)\n+        self.config_vm_ip()\n+        self.config_vm_combined(combined=8)\n+        self.check_ping_between_vms()\n+        self.check_scp_file_valid_between_vms()\n+        self.start_iperf()\n+        self.get_iperf_result()\n+\n+        self.vhost_user_pmd.quit()\n+        lcore_dma = (\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s\"\n+            % (\n+                self.vhost_core_list[1],\n+                self.cbdma_list[0],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[1],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[2],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[3],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[0],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[2],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[4],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[5],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[6],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[7],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[1],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[3],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[8],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[9],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[10],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[11],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[12],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[13],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[14],\n+                self.vhost_core_list[4],\n+                self.cbdma_list[15],\n+            )\n+        )\n+        vhost_eal_param = (\n+            \"--no-huge -m 1024 \"\n+            + \"--vdev 'net_vhost0,iface=./vhost-net0,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'\"\n+            + \" --vdev 'net_vhost1,iface=./vhost-net1,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;txq6;txq7;rxq0;rxq1;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'\"\n+        )\n+        vhost_param = (\n+            \" --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8 --lcore-dma=[%s]\"\n+            % lcore_dma\n+        )\n+        self.start_vhost_user_testpmd(\n+            cores=self.vhost_core_list,\n+            eal_param=vhost_eal_param,\n+            param=vhost_param,\n+            ports=self.cbdma_list,\n+        )\n+        self.vhost_user_pmd.execute_cmd(\"start\")\n+        self.check_ping_between_vms()\n+        self.check_scp_file_valid_between_vms()\n+        self.start_iperf()\n+        self.get_iperf_result()\n+\n+        self.vm0.stop()\n+        self.vm1.stop()\n+        self.vhost_user_pmd.quit()\n+\n+    def test_vm2vm_packed_ring_multi_queues_using_1G_and_4k_pages_and_cbdma_enable(\n+        self,\n+    ):\n+        \"\"\"\n+        Test Case 8: vm2vm vhost/virtio-net split packed ring multi queues with 1G/4k-pages and cbdma enable\n+        \"\"\"\n+        self.get_cbdma_ports_info_and_bind_to_dpdk(cbdma_num=16, allow_diff_socket=True)\n+        lcore_dma = (\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s,\"\n+            \"lcore%s@%s\"\n+            % (\n+                self.vhost_core_list[1],\n+                self.cbdma_list[0],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[1],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[2],\n+                self.vhost_core_list[1],\n+                self.cbdma_list[3],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[4],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[5],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[6],\n+                self.vhost_core_list[2],\n+                self.cbdma_list[7],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[8],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[9],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[10],\n+                self.vhost_core_list[3],\n+                self.cbdma_list[11],\n+                self.vhost_core_list[4],\n+                self.cbdma_list[12],\n+                self.vhost_core_list[4],\n+                self.cbdma_list[13],\n+                self.vhost_core_list[4],\n+                self.cbdma_list[14],\n+                self.vhost_core_list[4],\n+                self.cbdma_list[15],\n+            )\n+        )\n+        vhost_eal_param = (\n+            \"-m 1024 \"\n+            + \"--vdev 'net_vhost0,iface=./vhost-net0,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'\"\n+            + \" --vdev 'net_vhost1,iface=./vhost-net1,client=1,queues=8,dmas=[txq0;txq1;txq2;txq3;txq4;txq5;rxq2;rxq3;rxq4;rxq5;rxq6;rxq7]'\"\n+        )\n+        vhost_param = (\n+            \" --nb-cores=4 --txd=1024 --rxd=1024 --rxq=8 --txq=8 --lcore-dma=[%s]\"\n+            % lcore_dma\n+        )\n+        self.start_vhost_user_testpmd(\n+            cores=self.vhost_core_list,\n+            eal_param=vhost_eal_param,\n+            param=vhost_param,\n+            ports=self.cbdma_list,\n+        )\n+        self.vhost_user_pmd.execute_cmd(\"start\")\n+        self.mount_tmpfs_for_4k(number=2)\n+\n+        self.start_vm(packed=True, queues=8, server=True)\n+        self.config_vm_ip()\n+        self.config_vm_combined(combined=8)\n+        self.check_ping_between_vms()\n+        self.check_scp_file_valid_between_vms()\n+        self.start_iperf()\n+        self.get_iperf_result()\n+\n+        self.vm0.stop()\n+        self.vm1.stop()\n+        self.vhost_user_pmd.quit()\n+\n     def tear_down(self):\n         \"\"\"\n         Run after each test case.\n@@ -407,6 +1201,8 @@ class TestBasic4kPagesCbdma(TestCase):\n         self.virtio_user0_pmd.quit()\n         self.vhost_user_pmd.quit()\n         self.dut.send_expect(\"killall -s INT %s\" % self.testpmd_name, \"# \")\n+        self.dut.send_expect(\"killall -s INT qemu-system-x86_64\", \"#\")\n+        self.dut.send_expect(\"rm -rf /tmp/vhost-net*\", \"# \")\n         self.bind_cbdma_device_to_kernel()\n         self.umount_tmpfs_for_4k()\n \n",
    "prefixes": [
        "V3",
        "2/3"
    ]
}