Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/106048/?format=api
http://patchwork.dpdk.org/api/patches/106048/?format=api", "web_url": "http://patchwork.dpdk.org/project/dts/patch/20220119025852.898224-1-weix.ling@intel.com/", "project": { "id": 3, "url": "http://patchwork.dpdk.org/api/projects/3/?format=api", "name": "DTS", "link_name": "dts", "list_id": "dts.dpdk.org", "list_email": "dts@dpdk.org", "web_url": "", "scm_url": "git://dpdk.org/tools/dts", "webscm_url": "http://git.dpdk.org/tools/dts/", "list_archive_url": "https://inbox.dpdk.org/dts", "list_archive_url_format": "https://inbox.dpdk.org/dts/{}", "commit_url_format": "" }, "msgid": "<20220119025852.898224-1-weix.ling@intel.com>", "list_archive_url": "https://inbox.dpdk.org/dts/20220119025852.898224-1-weix.ling@intel.com", "date": "2022-01-19T02:58:52", "name": "[V1,3/3] tests/vhost_cbdma:modify test suite sync with test plan change", "commit_ref": null, "pull_url": null, "state": "superseded", "archived": false, "hash": "47391df76f7bb154a03fb4c0e3bcba36d39bc372", "submitter": { "id": 1828, "url": "http://patchwork.dpdk.org/api/people/1828/?format=api", "name": "Ling, WeiX", "email": "weix.ling@intel.com" }, "delegate": null, "mbox": "http://patchwork.dpdk.org/project/dts/patch/20220119025852.898224-1-weix.ling@intel.com/mbox/", "series": [ { "id": 21241, "url": "http://patchwork.dpdk.org/api/series/21241/?format=api", "web_url": "http://patchwork.dpdk.org/project/dts/list/?series=21241", "date": "2022-01-19T02:58:20", "name": "modify test plan to coverage more test point", "version": 1, "mbox": "http://patchwork.dpdk.org/series/21241/mbox/" } ], "comments": "http://patchwork.dpdk.org/api/patches/106048/comments/", "check": "fail", "checks": "http://patchwork.dpdk.org/api/patches/106048/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<dts-bounces@dpdk.org>", "X-Original-To": "patchwork@inbox.dpdk.org", "Delivered-To": "patchwork@inbox.dpdk.org", "Received": [ "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 18401A0350;\n\tWed, 19 Jan 2022 03:59:02 +0100 (CET)", "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 0C90B40DDB;\n\tWed, 19 Jan 2022 03:59:02 +0100 (CET)", "from mga04.intel.com (mga04.intel.com [192.55.52.120])\n by mails.dpdk.org (Postfix) with ESMTP id A5EE24013F\n for <dts@dpdk.org>; Wed, 19 Jan 2022 03:58:59 +0100 (CET)", "from orsmga004.jf.intel.com ([10.7.209.38])\n by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 18 Jan 2022 18:58:58 -0800", "from unknown (HELO localhost.localdomain) ([10.239.251.222])\n by orsmga004-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 18 Jan 2022 18:58:56 -0800" ], "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1642561139; x=1674097139;\n h=from:to:cc:subject:date:message-id:mime-version:\n content-transfer-encoding;\n bh=LBb1G49yLFmshOGehpPh2tLrDfc8ltBO/pfdG1uueJc=;\n b=n059A1/cbtJiAsE4acYZ1Jh7La4k0oW1cR8U+EAUfM5suTjG2TkYVKTF\n AzSJNQ164VTlDg2xLPIyrwh4K0Itda5jWGLMPIaniMcpPH44C+Pv37QUN\n POvz98n7Vbfps38RjdXm5xJ+xXVP0+HFhRpKXv1xdGL/nJBV+tgbK/5EH\n xs7I+2fnwEAQCN0PA+Eri/kpyf78exFQcoT7HUJXiBU/Nen4zWoXJFnvc\n 5M3/bnBBICoRiXbyWQaweyJV4y4vwo2NxCbde3iwgueyRX+WcHyen3gop\n DDk4Wm6l2CGTn5IuKjgmpzlNbd+ZlZoBuLCm7hgndPIc3RfYiwH/1N01/ g==;", "X-IronPort-AV": [ "E=McAfee;i=\"6200,9189,10231\"; a=\"243791100\"", "E=Sophos;i=\"5.88,298,1635231600\"; d=\"scan'208\";a=\"243791100\"", "E=Sophos;i=\"5.88,298,1635231600\"; d=\"scan'208\";a=\"625726340\"" ], "From": "Wei Ling <weix.ling@intel.com>", "To": "dts@dpdk.org", "Cc": "Wei Ling <weix.ling@intel.com>", "Subject": "[dts][PATCH V1 3/3] tests/vhost_cbdma:modify test suite sync with\n test plan change", "Date": "Wed, 19 Jan 2022 10:58:52 +0800", "Message-Id": "<20220119025852.898224-1-weix.ling@intel.com>", "X-Mailer": "git-send-email 2.25.1", "MIME-Version": "1.0", "Content-Transfer-Encoding": "8bit", "X-BeenThere": "dts@dpdk.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "test suite reviews and discussions <dts.dpdk.org>", "List-Unsubscribe": "<https://mails.dpdk.org/options/dts>,\n <mailto:dts-request@dpdk.org?subject=unsubscribe>", "List-Archive": "<http://mails.dpdk.org/archives/dts/>", "List-Post": "<mailto:dts@dpdk.org>", "List-Help": "<mailto:dts-request@dpdk.org?subject=help>", "List-Subscribe": "<https://mails.dpdk.org/listinfo/dts>,\n <mailto:dts-request@dpdk.org?subject=subscribe>", "Errors-To": "dts-bounces@dpdk.org" }, "content": "Modify and add test suite sync with test plan change.\n\nSigned-off-by: Wei Ling <weix.ling@intel.com>\n---\n tests/TestSuite_vhost_cbdma.py | 488 ++++++++++++++++++---------------\n 1 file changed, 273 insertions(+), 215 deletions(-)", "diff": "diff --git a/tests/TestSuite_vhost_cbdma.py b/tests/TestSuite_vhost_cbdma.py\nindex c0cc772d..f5ae1289 100644\n--- a/tests/TestSuite_vhost_cbdma.py\n+++ b/tests/TestSuite_vhost_cbdma.py\n@@ -167,32 +167,29 @@ class TestVirTioVhostCbdma(TestCase):\n self.verify(int(self.result_first[0]) > 1 and int(self.result_secondary[0]) > 1, \"forward packets no correctly\")\n \n @property\n- def check_2m_env(self):\n+ def check_2M_env(self):\n out = self.dut.send_expect(\"cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'\", \"# \")\n return True if out == '2048' else False\n \n- def launch_testpmd_as_vhost_user(self, command, cores=\"Default\", dev=\"\", ports = \"\"):\n- self.pmdout_vhost_user.start_testpmd(cores=cores, param=command, vdevs=[dev], ports=ports, prefix=\"vhost\")\n- self.vhost_user.send_expect('set fwd mac', 'testpmd> ', 120)\n- self.vhost_user.send_expect('start', 'testpmd> ', 120)\n-\n- def launch_testpmd_as_virtio_user1(self, command, cores=\"Default\", dev=\"\"):\n- eal_params = \"\"\n- if self.check_2m_env:\n- eal_params += \" --single-file-segments\"\n- self.pmdout_virtio_user1.start_testpmd(cores, command, vdevs=[dev], no_pci=True, prefix=\"virtio1\", eal_param=eal_params)\n- self.virtio_user1.send_expect('set fwd mac', 'testpmd> ', 30)\n- self.virtio_user1.send_expect('start', 'testpmd> ', 30)\n- self.virtio_user1.send_expect('show port info all', 'testpmd> ', 30)\n-\n- def launch_testpmd_as_virtio_user(self, command, cores=\"Default\", dev=\"\"):\n+ def launch_testpmd_as_vhost_user(self, command, cores=\"Default\", dev=\"\", ports = \"\", iova_mode='pa', set_pmd_param=True):\n+ if iova_mode:\n+ iova_parm = \"--iova=\" + iova_mode\n+ else:\n+ iova_parm = \"\"\n+ self.pmdout_vhost_user.start_testpmd(cores=cores, param=command, vdevs=[dev], ports=ports, prefix=\"vhost\", eal_param=iova_parm)\n+ if set_pmd_param:\n+ self.vhost_user.send_expect('set fwd mac', 'testpmd> ', 30)\n+ self.vhost_user.send_expect('start', 'testpmd> ', 30)\n+\n+ def launch_testpmd_as_virtio_user(self, command, cores=\"Default\", dev=\"\", set_pmd_param=True):\n eal_params = \"\"\n- if self.check_2m_env:\n+ if self.check_2M_env:\n eal_params += \" --single-file-segments\"\n- self.pmdout_virtio_user.start_testpmd(cores, command, vdevs=[dev],no_pci=True, prefix=\"virtio\", eal_param=eal_params)\n- self.virtio_user.send_expect('set fwd mac', 'testpmd> ', 120)\n- self.virtio_user.send_expect('start', 'testpmd> ', 120)\n- self.virtio_user.send_expect('show port info all', 'testpmd> ', 30)\n+ self.pmdout_virtio_user.start_testpmd(cores, command, vdevs=[dev], no_pci=True, prefix=\"virtio\", eal_param=eal_params)\n+ if set_pmd_param:\n+ self.virtio_user.send_expect('set fwd mac', 'testpmd> ', 30)\n+ self.virtio_user.send_expect('start', 'testpmd> ', 30)\n+ self.virtio_user.send_expect('show port info all', 'testpmd> ', 30)\n \n def diff_param_launch_send_and_verify(self, mode, params, dev, cores, is_quit=True, launch_virtio=True):\n if launch_virtio:\n@@ -202,35 +199,37 @@ class TestVirTioVhostCbdma(TestCase):\n self.virtio_user.send_expect(\"quit\", \"# \")\n time.sleep(3)\n \n- def test_perf_pvp_spilt_all_path_with_cbdma_vhost_enqueue_operations(self):\n+ def test_perf_pvp_spilt_ring_all_path_vhost_enqueue_operations_with_cbdma(self):\n \"\"\"\n- Test Case 1: PVP Split all path with DMA-accelerated vhost enqueue\n+ Test Case 1: PVP split ring all path vhost enqueue operations with cbdma\n \"\"\"\n self.test_target = self.running_case\n self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]\n txd_rxd = 1024\n- eal_tx_rxd = ' --nb-cores=%d --txd=%d --rxd=%d'\n- queue = 1\n- used_cbdma_num = 1\n- self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)\n+ vhost_param = ' --nb-cores=%d --txd=%d --rxd=%d'\n+ nb_cores = 1\n+ queues = 1\n+ self.get_cbdma_ports_info_and_bind_to_dpdk(1)\n vhost_vdevs = f\"'net_vhost0,iface=/tmp/s0,queues=%d,dmas=[txq0@{self.device_str}]'\"\n- dev_path_mode_mapper = {\n- \"inorder_mergeable_path\": 'mrg_rxbuf=1,in_order=1',\n- \"mergeable_path\": 'mrg_rxbuf=1,in_order=0',\n- \"inorder_non_mergeable_path\": 'mrg_rxbuf=0,in_order=1',\n- \"non_mergeable_path\": 'mrg_rxbuf=0,in_order=0',\n- \"vector_rx_path\": 'mrg_rxbuf=0,in_order=0',\n+ virtio_path_dict_VA = {\n+ \"inorder_mergeable_path_VA\": 'mrg_rxbuf=1,in_order=1',\n+ \"mergeable_path_VA\": 'mrg_rxbuf=1,in_order=0',\n+ \"inorder_non_mergeable_path_VA\": 'mrg_rxbuf=0,in_order=1',\n+ \"non_mergeable_path_VA\": 'mrg_rxbuf=0,in_order=0',\n+ \"vector_rx_path_VA\": 'mrg_rxbuf=0,in_order=0,vectorized=1',\n }\n- pvp_split_all_path_virtio_params = \"--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d\" % (queue, txd_rxd, txd_rxd)\n allow_pci = [self.dut.ports_info[0]['pci']]\n- for index in range(used_cbdma_num):\n+ for index in range(1):\n allow_pci.append(self.cbdma_dev_infos[index])\n- self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2], dev=vhost_vdevs % (queue), ports=allow_pci)\n- for key, path_mode in dev_path_mode_mapper.items():\n- if key == \"vector_rx_path\":\n- pvp_split_all_path_virtio_params = eal_tx_rxd % (queue, txd_rxd, txd_rxd)\n- vdevs = f\"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'\" % queue\n- self.diff_param_launch_send_and_verify(key, pvp_split_all_path_virtio_params, vdevs, self.cores[2:4], is_quit=False)\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd), self.cores[0:2],\n+ dev=vhost_vdevs % (nb_cores), ports=allow_pci, iova_mode='va')\n+ for key, path_mode in virtio_path_dict_VA.items():\n+ if key == \"non_mergeable_path_VA\":\n+ virtio_param = \" --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d\" % (nb_cores, txd_rxd, txd_rxd)\n+ else:\n+ virtio_param = \" --nb-cores=%d --txd=%d --rxd=%d\" % (nb_cores, txd_rxd, txd_rxd)\n+ vdevs = f\"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'\" % nb_cores\n+ self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=False, launch_virtio=True)\n self.mode_list.append(key)\n # step3 restart vhost port, then check throughput again\n key += \"_RestartVhost\"\n@@ -239,99 +238,133 @@ class TestVirTioVhostCbdma(TestCase):\n self.vhost_user.send_expect('start', 'testpmd> ', 10)\n self.vhost_user.send_expect('show port info all', 'testpmd> ', 30)\n self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10)\n- self.diff_param_launch_send_and_verify(key, pvp_split_all_path_virtio_params, vdevs,\n- self.cores[2:4], launch_virtio=False)\n+ self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=True, launch_virtio=False)\n self.mode_list.append(key)\n+ # step9 quit all testpma and re-run with PA mode\n+ virtio_path_dict_PA = {\n+ \"inorder_mergeable_path_PA\": 'mrg_rxbuf=1,in_order=1',\n+ \"mergeable_path_PA\": 'mrg_rxbuf=1,in_order=0',\n+ \"inorder_non_mergeable_path_PA\": 'mrg_rxbuf=0,in_order=1',\n+ \"non_mergeable_path_PA\": 'mrg_rxbuf=0,in_order=0',\n+ \"vector_rx_path_PA\": 'mrg_rxbuf=0,in_order=0,vectorized=1',\n+ }\n self.vhost_user.send_expect(\"quit\", \"# \")\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd), self.cores[0:2],\n+ dev=vhost_vdevs % (nb_cores), ports=allow_pci, iova_mode='pa')\n+ for key, path_mode in virtio_path_dict_PA.items():\n+ if key == \"non_mergeable_path_PA\":\n+ virtio_param = \" --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d\" % (nb_cores, txd_rxd, txd_rxd)\n+ else:\n+ virtio_param = \" --nb-cores=%d --txd=%d --rxd=%d\" % (nb_cores, txd_rxd, txd_rxd)\n+ vdevs = f\"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'\" % queues\n+ self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=False,\n+ launch_virtio=True)\n+ self.mode_list.append(key)\n+ # step3 restart vhost port, then check throughput again\n+ key += \"_RestartVhost\"\n+ self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10)\n+ self.vhost_user.send_expect('stop', 'testpmd> ', 10)\n+ self.vhost_user.send_expect('start', 'testpmd> ', 10)\n+ self.vhost_user.send_expect('show port info all', 'testpmd> ', 30)\n+ self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10)\n+ self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=True,\n+ launch_virtio=False)\n+ self.mode_list.append(key)\n+\n self.result_table_print()\n self.handle_expected(mode_list=self.mode_list)\n self.handle_results(mode_list=self.mode_list)\n \n- def test_perf_dynamic_queue_number_cbdma_vhost_enqueue_operations(self):\n+ def test_perf_pvp_spilt_ring_all_dynamic_queue_number_vhost_enqueue_operations_with_cbdma(self):\n \"\"\"\n- Test Case2: Split ring dynamic queue number test for DMA-accelerated vhost Tx operations\n+ Test Case2: PVP split ring dynamic queue number vhost enqueue operations with cbdma\n \"\"\"\n self.test_target = self.running_case\n self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]\n- used_cbdma_num = 8\n- queue = 8\n- txd_rxd = 1024\n nb_cores = 1\n+ txd_rxd = 1024\n+ queues = 8\n virtio_path = \"/tmp/s0\"\n path_mode = 'mrg_rxbuf=1,in_order=1'\n- self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)\n- eal_params = \" --nb-cores=1 --txd=1024 --rxd=1024 --txq=%d --rxq=%d \"\n- dynamic_queue_number_cbdma_virtio_params = f\" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params % (queue,queue)}\"\n- virtio_dev = f\"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queue},server=1\"\n+ self.get_cbdma_ports_info_and_bind_to_dpdk(8)\n+ vhost_param = \" --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d \"\n+ virtio_param = \" --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d \"\n vhost_dev = f\"'net_vhost0,iface={virtio_path},queues=%d,client=1,%s'\"\n- # launch vhost testpmd\n+ virtio_dev = f\"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queues},server=1\"\n allow_pci = [self.dut.ports_info[0]['pci']]\n- for index in range(used_cbdma_num):\n+ for index in range(8):\n allow_pci.append(self.cbdma_dev_infos[index])\n-\n- # no cbdma to launch vhost\n- self.launch_testpmd_as_vhost_user(eal_params % (queue,queue), self.cores[0:2], dev=vhost_dev % (queue,''), ports=[allow_pci[0]])\n- mode = \"no_cbdma\"\n- self.mode_list.append(mode)\n- self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)\n- self.send_and_verify(mode, queue_list=range(queue))\n+ # without cbdma to launch vhost\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2],\n+ dev=vhost_dev % (queues,''), ports=[allow_pci[0]], iova_mode='va')\n+ self.mode_list.append(\"with_0_cbdma\")\n+ self.launch_testpmd_as_virtio_user(virtio_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[2:4],\n+ dev=virtio_dev)\n+ self.send_and_verify(\"with_0_cbdma\", queue_list=range(queues))\n+\n+ # with 4 cbdma and 4 queue and VA mode to launch vhost\n self.vhost_user.send_expect(\"quit\", \"#\")\n+ vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}]\"\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2],\n+ dev=vhost_dev % (queues, vhost_dmas), ports=allow_pci[:5], iova_mode='va')\n+ self.mode_list.append(\"with_4_cbdma\")\n+ self.send_and_verify(\"with_4_cbdma\", queue_list=range(int(queues/2)))\n \n- # used 4 cbdma_num and 4 queue to launch vhost\n+ #with 8 cbdma and 8 queue and VA mode to launch vhost\n+ self.vhost_user.send_expect(\"quit\", \"#\")\n+ vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}]\"\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2],\n+ dev=vhost_dev % (queues, vhost_dmas), ports=allow_pci, iova_mode='va')\n+ self.mode_list.append(\"with_8_cbdma\")\n+ self.send_and_verify(\"with_8_cbdma\", queue_list=range(queues))\n \n- vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}]\"\n- self.launch_testpmd_as_vhost_user(eal_params % (queue/2,queue/2), self.cores[0:2], dev=vhost_dev % (int(queue/2),vhost_dmas), ports=allow_pci[:5])\n- self.send_and_verify(\"used_4_cbdma_num\", queue_list=range(int(queue/2)))\n- self.mode_list.append(\"used_4_cbdma_num\")\n+ # with 6 cbdma and 2 without cbdma and PA mode to launch vhost\n self.vhost_user.send_expect(\"quit\", \"#\")\n+ vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]}]\"\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2],\n+ dev=vhost_dev % (queues, vhost_dmas), ports=allow_pci, iova_mode='pa')\n+ self.mode_list.append(\"with_6_cbdma\")\n+ self.send_and_verify(\"with_6_cbdma\", queue_list=range(queues))\n \n- #used 8 cbdma_num to launch vhost\n- vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}]\"\n- self.launch_testpmd_as_vhost_user(eal_params % (queue, queue), self.cores[0:2],\n- dev=vhost_dev % (queue,vhost_dmas), ports=allow_pci)\n- self.send_and_verify(\"used_8_cbdma_num\", queue_list=range(queue))\n- self.mode_list.append(\"used_8_cbdma_num\")\n- self.send_and_verify(\"used_8_cbdma_num_1\", queue_list=range(queue))\n- self.mode_list.append(\"used_8_cbdma_num_1\")\n- self.virtio_user.send_expect(\"stop\", \"testpmd> \", 60)\n- time.sleep(5)\n self.virtio_user.send_expect(\"quit\", \"# \")\n self.vhost_user.send_expect(\"quit\", \"# \")\n self.result_table_print()\n- # result_rows = [[], [64, 'dynamic_queue2', 7.4959375, 12.593175], [1518, 'dynamic_queue2', 1.91900225, 59.028509209999996]]\n- result_rows = self.result_table_getrows() #\n self.handle_expected(mode_list=self.mode_list)\n self.handle_results(mode_list=self.mode_list)\n \n- def test_perf_pvp_packed_all_path_with_cbdma_vhost_enqueue_operations(self):\n+ def test_perf_pvp_packed_ring_all_path_vhost_enqueue_operations_with_cbdma(self):\n \"\"\"\n- Test Case 3: PVP packed ring all path with DMA-accelerated vhost enqueue\n+ Test Case 3: PVP packed ring all path vhost enqueue operations with cbdma\n \"\"\"\n self.test_target = self.running_case\n self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]\n txd_rxd = 1024\n- eal_tx_rxd = ' --nb-cores=%d --txd=%d --rxd=%d'\n- queue = 1\n- used_cbdma_num = 1\n- self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)\n+ vhost_param = ' --nb-cores=%d --txd=%d --rxd=%d'\n+ nb_cores = 1\n+ queues = 1\n+ self.get_cbdma_ports_info_and_bind_to_dpdk(1)\n vhost_vdevs = f\"'net_vhost0,iface=/tmp/s0,queues=%d,dmas=[txq0@{self.device_str}]'\"\n- dev_path_mode_mapper = {\n- \"inorder_mergeable_path\": 'mrg_rxbuf=1,in_order=1,packed_vq=1',\n- \"mergeable_path\": 'mrg_rxbuf=1,in_order=0,packed_vq=1',\n- \"inorder_non_mergeable_path\": 'mrg_rxbuf=0,in_order=1,packed_vq=1',\n- \"non_mergeable_path\": 'mrg_rxbuf=0,in_order=0,packed_vq=1',\n- \"vector_rx_path\": 'mrg_rxbuf=0,in_order=0,packed_vq=1',\n+ virtio_path_dict_VA = {\n+ \"inorder_mergeable_path_VA\": 'mrg_rxbuf=1,in_order=1,packed_vq=1',\n+ \"mergeable_path_VA\": 'mrg_rxbuf=1,in_order=0,packed_vq=1',\n+ \"inorder_non_mergeable_path_VA\": 'mrg_rxbuf=0,in_order=1,packed_vq=1',\n+ \"non_mergeable_path_VA\": 'mrg_rxbuf=0,in_order=0,packed_vq=1',\n+ \"vector_rx_path_VA\": 'mrg_rxbuf=0,in_order=1,vectorized=1,packed_vq=1',\n+ \"vector_rx_path_not_power_of_2_VA\": 'mrg_rxbuf=0,in_order=1,vectorized=1,packed_vq=1,queue_size=1025'\n }\n- pvp_split_all_path_virtio_params = \"--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d\" % (queue, txd_rxd, txd_rxd)\n allow_pci = [self.dut.ports_info[0]['pci']]\n- for index in range(used_cbdma_num):\n+ for index in range(1):\n allow_pci.append(self.cbdma_dev_infos[index])\n- self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2], dev=vhost_vdevs % (queue), ports=allow_pci)\n- for key, path_mode in dev_path_mode_mapper.items():\n- if key == \"vector_rx_path\":\n- pvp_split_all_path_virtio_params = eal_tx_rxd % (queue, txd_rxd, txd_rxd)\n- vdevs = f\"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'\" % queue\n- self.diff_param_launch_send_and_verify(key, pvp_split_all_path_virtio_params, vdevs, self.cores[2:4], is_quit=False)\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd), self.cores[0:2],\n+ dev=vhost_vdevs % (nb_cores), ports=allow_pci, iova_mode='va')\n+ for key, path_mode in virtio_path_dict_VA.items():\n+ if key == \"vector_rx_path_not_power_of_2_VA\":\n+ virtio_param = \" --nb-cores=%d --txd=%d --rxd=%d\" % (nb_cores, (txd_rxd + 1), (txd_rxd + 1))\n+ else:\n+ virtio_param = \" --nb-cores=%d --txd=%d --rxd=%d\" % (nb_cores, txd_rxd, txd_rxd)\n+ vdevs = f\"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'\" % queues\n+ self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=False,\n+ launch_virtio=True)\n self.mode_list.append(key)\n # step3 restart vhost port, then check throughput again\n key += \"_RestartVhost\"\n@@ -340,153 +373,180 @@ class TestVirTioVhostCbdma(TestCase):\n self.vhost_user.send_expect('start', 'testpmd> ', 10)\n self.vhost_user.send_expect('show port info all', 'testpmd> ', 30)\n self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10)\n- self.diff_param_launch_send_and_verify(key, pvp_split_all_path_virtio_params, vdevs,\n- self.cores[2:4], launch_virtio=False)\n+ self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=True,\n+ launch_virtio=False)\n self.mode_list.append(key)\n+ # step9 quit all testpma and re-run with PA mode\n+ virtio_path_dict_PA = {\n+ \"inorder_mergeable_path_PA\": 'mrg_rxbuf=1,in_order=1,packed_vq=1',\n+ \"mergeable_path_PA\": 'mrg_rxbuf=1,in_order=0,packed_vq=1',\n+ \"inorder_non_mergeable_path_PA\": 'mrg_rxbuf=0,in_order=1,packed_vq=1',\n+ \"non_mergeable_path_PA\": 'mrg_rxbuf=0,in_order=0,packed_vq=1',\n+ \"vector_rx_path_PA\": 'mrg_rxbuf=0,in_order=0,vectorized=1,packed_vq=1',\n+ \"vector_rx_path_not_power_of_2_PA\": 'mrg_rxbuf=0,in_order=0,vectorized=1,packed_vq=1,queue_size=1025'\n+ }\n self.vhost_user.send_expect(\"quit\", \"# \")\n+ self.launch_testpmd_as_vhost_user(vhost_param % (queues, txd_rxd, txd_rxd), self.cores[0:2],\n+ dev=vhost_vdevs % (queues), ports=allow_pci, iova_mode='pa')\n+ for key, path_mode in virtio_path_dict_PA.items():\n+ if key == \"vector_rx_path_not_power_of_2_VA\":\n+ virtio_param = \" --nb-cores=%d --txd=%d --rxd=%d\" % (nb_cores, (txd_rxd + 1), (txd_rxd + 1))\n+ else:\n+ virtio_param = \" --nb-cores=%d --txd=%d --rxd=%d\" % (nb_cores, txd_rxd, txd_rxd)\n+ vdevs = f\"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'\" % queues\n+ self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=False,\n+ launch_virtio=True)\n+ self.mode_list.append(key)\n+ # step3 restart vhost port, then check throughput again\n+ key += \"_RestartVhost\"\n+ self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10)\n+ self.vhost_user.send_expect('stop', 'testpmd> ', 10)\n+ self.vhost_user.send_expect('start', 'testpmd> ', 10)\n+ self.vhost_user.send_expect('show port info all', 'testpmd> ', 30)\n+ self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10)\n+ self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=True,\n+ launch_virtio=False)\n+ self.mode_list.append(key)\n+\n self.result_table_print()\n self.handle_expected(mode_list=self.mode_list)\n self.handle_results(mode_list=self.mode_list)\n \n- def test_perf_packed_dynamic_queue_number_cbdma_vhost_enqueue_operations(self):\n+ def test_perf_pvp_packed_ring_all_dynamic_queue_number_vhost_enqueue_operations_with_cbdma(self):\n \"\"\"\n- Test Case4: Packed ring dynamic queue number test for DMA-accelerated vhost Tx operations\n+ Test Case 4: PVP packed ring dynamic queue number vhost enqueue operations with cbdma\n \"\"\"\n self.test_target = self.running_case\n self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]\n- used_cbdma_num = 8\n- queue = 8\n- txd_rxd = 1024\n nb_cores = 1\n+ txd_rxd = 1024\n+ queues = 8\n virtio_path = \"/tmp/s0\"\n path_mode = 'mrg_rxbuf=1,in_order=1,packed_vq=1'\n- self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)\n- vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]}]\"\n- eal_params = \" --nb-cores=1 --txd=1024 --rxd=1024 --txq=%d --rxq=%d \"\n- dynamic_queue_number_cbdma_virtio_params = f\" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params % (queue, queue)}\"\n- virtio_dev = f\"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queue},server=1\"\n- vhost_dev = f\"'net_vhost0,iface={virtio_path},queues=%s,client=1,%s'\"\n- # launch vhost testpmd\n+ self.get_cbdma_ports_info_and_bind_to_dpdk(8)\n+ vhost_param = \" --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d \"\n+ virtio_param = \" --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d \"\n+ vhost_dev = f\"'net_vhost0,iface={virtio_path},queues=%d,client=1,%s'\"\n+ virtio_dev = f\"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queues},server=1\"\n allow_pci = [self.dut.ports_info[0]['pci']]\n- for index in range(used_cbdma_num):\n+ for index in range(8):\n allow_pci.append(self.cbdma_dev_infos[index])\n+ # without cbdma to launch vhost\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2],\n+ dev=vhost_dev % (queues, ''), ports=[allow_pci[0]], iova_mode='va')\n+ self.mode_list.append(\"with_0_cbdma\")\n+ self.launch_testpmd_as_virtio_user(virtio_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[2:4],\n+ dev=virtio_dev)\n+ self.send_and_verify(\"with_0_cbdma\", queue_list=range(queues))\n+\n+ # with 4 cbdma and 4 queue and VA mode to launch vhost\n+ self.vhost_user.send_expect(\"quit\", \"#\")\n+ vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}]\"\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2],\n+ dev=vhost_dev % (queues, vhost_dmas), ports=allow_pci[:5], iova_mode='va')\n+ self.mode_list.append(\"with_4_cbdma\")\n+ self.send_and_verify(\"with_4_cbdma\", queue_list=range(int(queues / 2)))\n \n- # no cbdma to launch vhost\n- self.launch_testpmd_as_vhost_user(eal_params % (queue,queue), self.cores[0:2], dev=vhost_dev % (queue,''), ports= [allow_pci[0]])\n- mode = \"no_cbdma\"\n- self.mode_list.append(mode)\n- self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)\n- self.send_and_verify(mode, queue_list=range(queue))\n+ # with 8 cbdma and 8 queue and VA mode to launch vhost\n self.vhost_user.send_expect(\"quit\", \"#\")\n+ vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}]\"\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2],\n+ dev=vhost_dev % (queues, vhost_dmas), ports=allow_pci, iova_mode='va')\n+ self.mode_list.append(\"with_8_cbdma\")\n+ self.send_and_verify(\"with_8_cbdma\", queue_list=range(queues))\n \n- # used 4 cbdma_num and 4 queue to launch vhost\n- vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}]\"\n- self.launch_testpmd_as_vhost_user(eal_params % (queue/2,queue/2), self.cores[0:2],\n- dev=vhost_dev % (int(queue/2),vhost_dmas), ports=allow_pci[:5])\n- self.send_and_verify(\"used_4_cbdma_num\", queue_list=range(int(queue/2)))\n- self.mode_list.append(\"used_4_cbdma_num\")\n+ # with 6 cbdma and 2 without cbdma and PA mode to launch vhost\n self.vhost_user.send_expect(\"quit\", \"#\")\n+ vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]}]\"\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2],\n+ dev=vhost_dev % (queues, vhost_dmas), ports=allow_pci, iova_mode='pa')\n+ self.mode_list.append(\"with_6_cbdma\")\n+ self.send_and_verify(\"with_6_cbdma\", queue_list=range(queues))\n \n- #used 8 cbdma_num to launch vhost\n- vhost_dmas = f\"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}]\"\n- self.launch_testpmd_as_vhost_user(eal_params % (queue, queue), self.cores[0:2],\n- dev=vhost_dev % (queue,vhost_dmas), ports=allow_pci)\n- self.send_and_verify(\"used_8_cbdma_num\", queue_list=range(queue))\n- self.mode_list.append(\"used_8_cbdma_num\")\n- self.send_and_verify(\"used_8_cbdma_num_1\", queue_list=range(queue))\n- self.mode_list.append(\"used_8_cbdma_num_1\")\n- self.virtio_user.send_expect(\"stop\", \"testpmd> \", 60)\n- time.sleep(5)\n self.virtio_user.send_expect(\"quit\", \"# \")\n self.vhost_user.send_expect(\"quit\", \"# \")\n self.result_table_print()\n- # result_rows = [[], [64, 'dynamic_queue2', 7.4959375, 12.593175], [1518, 'dynamic_queue2', 1.91900225, 59.028509209999996]]\n- result_rows = self.result_table_getrows() #\n self.handle_expected(mode_list=self.mode_list)\n self.handle_results(mode_list=self.mode_list)\n \n+ def send_chain_packets_and_verify(self):\n+ self.pmdout_virtio_user.execute_cmd(\"clear port stats all\")\n+ self.pmdout_virtio_user.execute_cmd(\"start\")\n+ self.pmdout_vhost_user.execute_cmd(\"vhost enable tx all\")\n+ self.pmdout_vhost_user.execute_cmd(\"set txpkts 65535,65535,65535,65535,65535\")\n+ self.pmdout_vhost_user.execute_cmd(\"start tx_first 32\")\n+ self.pmdout_vhost_user.execute_cmd(\"show port stats all\")\n+ out = self.pmdout_virtio_user.execute_cmd(\"show port stats all\")\n+ rx_pkts = int(re.search(\"RX-packets: (\\d+)\", out).group(1))\n+ self.verify(rx_pkts > 0, \"virtio-user can not received packets\")\n \n \n- def test_perf_compare_pvp_split_ring_performance(self):\n+ def test_loopback_split_ring_large_chain_packets_stress_test_with_cbdma_enqueue(self):\n \"\"\"\n- Test Case5: Compare PVP split ring performance between CPU copy, CBDMA copy and Sync copy\n+ Test Case5: loopback split ring large chain packets stress test with cbdma enqueue\n \"\"\"\n- used_cbdma_num = 1\n- queue = 1\n- txd_rxd = 1024\n- eal_tx_rxd = ' --nb-cores=%d --txd=%d --rxd=%d'\n- path_mode = 'mrg_rxbuf=1,in_order=1,server=1'\n- allow_pci = [self.dut.ports_info[0]['pci']]\n- self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)\n- for index in range(used_cbdma_num):\n+ nb_cores = 1\n+ queues = 1\n+ txd_rxd = 2048\n+ txq_rxq = 1\n+ virtio_path = \"/tmp/s0\"\n+ path_mode = 'mrg_rxbuf=1,in_order=0,vectorized=1,queue_size=2048'\n+ self.get_cbdma_ports_info_and_bind_to_dpdk(1)\n+ vhost_param = \" --nb-cores=%d --mbuf-size=65535\"\n+ virtio_param = \" --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d \"\n+ virtio_dev = f\"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues=%d\"\n+ vhost_vdevs = f\"'net_vhost0,iface=/tmp/s0,queues=%d,dmas=[txq0@{self.device_str}]'\"\n+ allow_pci = []\n+ for index in range(1):\n allow_pci.append(self.cbdma_dev_infos[index])\n- path_mode = 'mrg_rxbuf=1,in_order=1'\n- vhost_vdevs = f\"'net_vhost0,iface=/tmp/s0,queues=%d,client=1,dmas=[txq0@{self.device_str}]'\"\n- compare_pvp_split_ring_performance = \"--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d\" % (queue, txd_rxd, txd_rxd)\n- dev_path_mode_mapper = {\n- \"sync_cbdma\": '',\n- \"cpu\": '',\n- }\n- for key in dev_path_mode_mapper.items():\n- if key == \"cpu\":\n- vhost_vdevs = f\"'net_vhost0,iface=/tmp/s0,queues=1'\"\n- self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2], dev=vhost_vdevs, ports=[allow_pci[0]])\n- vdevs = f\"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'\" % queue\n- self.launch_testpmd_as_virtio_user(compare_pvp_split_ring_performance, self.cores[2:4], dev=vdevs)\n- mode = \"cpu_copy_64\"\n- self.mode_list.append(mode)\n- self.send_and_verify(mode, frame_sizes=[64], pkt_length_mode='fixed')\n- perf_cpu_copy_64 = self.throughput[mode][64][self.nb_desc]\n- self.virtio_user.send_expect('show port stats all', 'testpmd> ', 10)\n- self.virtio_user.send_expect(\"quit\", \"# \")\n- self.vhost_user.send_expect(\"quit\", \"# \")\n- else:\n- self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue), ports=allow_pci)\n- vdevs = f\"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d,server=1'\" % queue\n- self.launch_testpmd_as_virtio_user(compare_pvp_split_ring_performance, self.cores[2:4],dev=vdevs)\n- mode = \"sync_copy_64\"\n- self.mode_list.append(mode)\n- self.send_and_verify(mode,frame_sizes=[64],pkt_length_mode='fixed')\n- perf_sync_copy_64 = self.throughput[mode][64][self.nb_desc]\n- mode = \"cbdma_copy_1518\"\n- self.mode_list.append(mode)\n- self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode='fixed')\n- perf_cbdma_copy_1518 = self.throughput[mode][1518][self.nb_desc]\n- self.virtio_user.send_expect('show port stats all', 'testpmd> ', 10)\n- self.vhost_user.send_expect(\"quit\", \"# \")\n- time.sleep(3)\n- self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue), ports=allow_pci)\n- mode = \"sync_copy_1518\"\n- self.mode_list.append(mode)\n- self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode='fixed')\n- perf_sync_copy_1518 = self.throughput[mode][1518][self.nb_desc]\n- self.check_port_stats_result(self.virtio_user)\n- self.virtio_user.send_expect(\"quit\", \"# \")\n- self.vhost_user.send_expect(\"quit\", \"# \")\n- self.result_table_print()\n- self.verify(abs(perf_sync_copy_64 - perf_cpu_copy_64)/perf_sync_copy_64 < 0.1, \"sync_copy_64 vs. cpu_copy_64 delta > 10%\" )\n- self.verify(abs(perf_cbdma_copy_1518 - perf_sync_copy_1518)/perf_sync_copy_1518 > 0.05,\"cbdma_copy_1518 vs sync_copy_1518 delta < 5%\")\n-\n- @staticmethod\n- def vhost_or_virtio_set_one_queue(session):\n- session.send_expect('stop', 'testpmd> ', 120)\n- session.send_expect('port stop all', 'testpmd> ', 120)\n- session.send_expect('port config all rxq 1', 'testpmd> ', 120)\n- session.send_expect('port config all txq 1', 'testpmd> ', 120)\n- session.send_expect('port start all', 'testpmd> ', 120)\n- session.send_expect('start', 'testpmd> ', 120)\n- session.send_expect('show port info all', 'testpmd> ', 30)\n- session.send_expect('show port stats all', 'testpmd> ', 120)\n- time.sleep(5)\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores), self.cores[0:2],\n+ dev=vhost_vdevs % (queues), ports=allow_pci, iova_mode='va', set_pmd_param=False)\n+ self.launch_testpmd_as_virtio_user(virtio_param % (nb_cores, txd_rxd, txd_rxd, txq_rxq, txq_rxq), self.cores[2:4],\n+ dev=virtio_dev % (queues), set_pmd_param=False)\n+ self.send_chain_packets_and_verify()\n+\n+ self.logger.info(\"Quit and relaunch vhost with PA mode\")\n+ self.pmdout_virtio_user.execute_cmd(\"quit\", \"#\")\n+ self.pmdout_vhost_user.execute_cmd(\"quit\", \"#\")\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores), self.cores[0:2],\n+ dev=vhost_vdevs % (queues), ports=allow_pci, iova_mode='pa', set_pmd_param=False)\n+ self.launch_testpmd_as_virtio_user(virtio_param % (nb_cores, txd_rxd, txd_rxd, txq_rxq, txq_rxq),self.cores[2:4],\n+ dev=virtio_dev % (queues), set_pmd_param=False)\n+ self.send_chain_packets_and_verify()\n+\n+ def test_loopback_packed_ring_large_chain_packets_stress_test_with_cbdma_enqueue(self):\n+ \"\"\"\n+ Test Case6: loopback packed ring large chain packets stress test with cbdma enqueue\n+ \"\"\"\n+ nb_cores = 1\n+ queues = 1\n+ txd_rxd = 2048\n+ txq_rxq = 1\n+ virtio_path = \"/tmp/s0\"\n+ path_mode = 'mrg_rxbuf=1,in_order=0,vectorized=1,packed_vq=1,queue_size=2048'\n+ self.get_cbdma_ports_info_and_bind_to_dpdk(1)\n+ vhost_param = \" --nb-cores=%d --mbuf-size=65535\"\n+ virtio_param = \" --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d \"\n+ virtio_dev = f\"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues=%d\"\n+ vhost_vdevs = f\"'net_vhost0,iface=/tmp/s0,queues=%d,dmas=[txq0@{self.device_str}]'\"\n+ allow_pci = []\n+ for index in range(1):\n+ allow_pci.append(self.cbdma_dev_infos[index])\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores), self.cores[0:2],\n+ dev=vhost_vdevs % (queues), ports=allow_pci, iova_mode='va', set_pmd_param=False)\n+ self.launch_testpmd_as_virtio_user(virtio_param % (nb_cores, txd_rxd, txd_rxd, txq_rxq, txq_rxq), self.cores[2:4],\n+ dev=virtio_dev % (queues), set_pmd_param=False)\n+ self.send_chain_packets_and_verify()\n+\n+ self.logger.info(\"Quit and relaunch vhost with PA mode\")\n+ self.pmdout_virtio_user.execute_cmd(\"quit\", \"#\")\n+ self.pmdout_vhost_user.execute_cmd(\"quit\", \"#\")\n+ self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores), self.cores[0:2],\n+ dev=vhost_vdevs % (queues), ports=allow_pci, iova_mode='pa', set_pmd_param=False)\n+ self.launch_testpmd_as_virtio_user(virtio_param % (nb_cores, txd_rxd, txd_rxd, txq_rxq, txq_rxq),self.cores[2:4],\n+ dev=virtio_dev % (queues), set_pmd_param=False)\n+ self.send_chain_packets_and_verify()\n \n- @property\n- def check_value(self):\n- check_dict = dict.fromkeys(self.frame_sizes)\n- linerate = {64: 0.085, 128: 0.12, 256: 0.20, 512: 0.35, 1024: 0.50, 1280: 0.55, 1518: 0.60}\n- for size in self.frame_sizes:\n- speed = self.wirespeed(self.nic, size, self.number_of_ports)\n- check_dict[size] = round(speed * linerate[size], 2)\n- return check_dict\n \n def send_imix_and_verify(self, mode, multiple_queue=True, queue_list=[]):\n \"\"\"\n@@ -708,8 +768,6 @@ class TestVirTioVhostCbdma(TestCase):\n \"\"\"\n self.dut.send_expect(\"killall -I %s\" % self.testpmd_name, '#', 20)\n self.bind_cbdma_device_to_kernel()\n- if self.running_case == 'test_check_threshold_value_with_cbdma':\n- self.bind_nic_driver(self.dut_ports, self.drivername)\n \n def tear_down_all(self):\n \"\"\"\n", "prefixes": [ "V1", "3/3" ] }{ "id": 106048, "url": "