From patchwork Wed Jan 19 02:58:29 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ling, WeiX" X-Patchwork-Id: 106046 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id B1AC8A0350; Wed, 19 Jan 2022 03:58:39 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A4A19407FF; Wed, 19 Jan 2022 03:58:39 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by mails.dpdk.org (Postfix) with ESMTP id 0B04B4013F for ; Wed, 19 Jan 2022 03:58:36 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1642561117; x=1674097117; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=1MV9LQUptg7IbWY1UHgiiAOCQbv2qvzPBRYh85TbmE8=; b=fWpY95HiKfkREptXYvSmmYW8o5PO1dXMJoMF0J/RslsVEuZjblhmULbG 6CSSoo6xVc1asEWkoNtUeN8IIH6i/HMuO+m78n4agt0nXqXpxUXVn/2G6 iyNx8cgurvEl4nBk3K0JqKRUrsNCblgJbAkXT3Ia/SpfAFSKLspcS9Idy 7m1Ac+2D4U3WOUOFWOpaivtQ9seGwyH9NROpHryaefLp855qPX3UOR3cT W3LmbTsMwCBdg3J7QVQG50yta55a6j9g/N3lX2IIoAidtD8hDLFAJpHnz rwU8QYudQszO6OafkNrXvTnAZ+YwWA9Jzy9CeevNAZEGEGhIqYdpWPbZu w==; X-IronPort-AV: E=McAfee;i="6200,9189,10231"; a="308311758" X-IronPort-AV: E=Sophos;i="5.88,298,1635231600"; d="scan'208";a="308311758" Received: from orsmga004.jf.intel.com ([10.7.209.38]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Jan 2022 18:58:36 -0800 X-IronPort-AV: E=Sophos;i="5.88,298,1635231600"; d="scan'208";a="625726277" Received: from unknown (HELO localhost.localdomain) ([10.239.251.222]) by orsmga004-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Jan 2022 18:58:33 -0800 From: Wei Ling To: dts@dpdk.org Cc: Wei Ling Subject: [dts][PATCH V1 1/3] conf/vhost_cbdma:modify config sync with test plan change Date: Wed, 19 Jan 2022 10:58:29 +0800 Message-Id: <20220119025829.898108-1-weix.ling@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org modify config sync with test plan change, such as case name. Signed-off-by: Wei Ling --- conf/vhost_cbdma.cfg | 63 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 3 deletions(-) diff --git a/conf/vhost_cbdma.cfg b/conf/vhost_cbdma.cfg index 66981031..dcb1504b 100644 --- a/conf/vhost_cbdma.cfg +++ b/conf/vhost_cbdma.cfg @@ -1,6 +1,63 @@ [suite] update_expected = True -test_parameters = {64: [1024], 1518: [1024]} -test_duration = 60 +test_parameters = {'imix': [1024],} +test_duration = 20 accepted_tolerance = 2 -expected_throughput = {'test_perf_dynamic_queue_number_cbdma_vhost_enqueue_operations': {'dynamic_queue2': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'virtio_user_dynamic_queue2_change_to_1': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'Relaunch_dynamic_queue2': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'vhost_userRelaunch_dynamic_queue2_change_to_1': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'Relaunch_vhost_2_cbdma': {64: {1024: 0.00}, 1518: {1024: 0.00}}}, 'test_perf_pvp_spilt_all_path_with_cbdma_vhost_enqueue_operations': {'inorder_mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_mergeable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'mergeable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_non_mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_non_mergeable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'non_mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'non_mergeable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'vector_rx_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'vector_rx_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}},'test_perf_packed_dynamic_queue_number_cbdma_vhost_enqueue_operations': {'dynamic_queue2': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'virtio_user_dynamic_queue2_change_to_1': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'Relaunch_dynamic_queue2': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'vhost_userRelaunch_dynamic_queue2_change_to_1': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'Relaunch_vhost_2_cbdma': {64: {1024: 0.00}, 1518: {1024: 0.00}}},'test_perf_pvp_packed_all_path_with_cbdma_vhost_enqueue_operations': {'inorder_mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_mergeable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'mergeable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_non_mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'inorder_non_mergeable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'non_mergeable_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'non_mergeable_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'vector_rx_path': {64: {1024: 0.00}, 1518: {1024: 0.00}}, 'vector_rx_path_RestartVhost': {64: {1024: 0.00}, 1518: {1024: 0.00}}},} +expected_throughput = { + 'test_perf_pvp_spilt_ring_all_path_vhost_enqueue_operations_with_cbdma':{ + 'inorder_mergeable_path_VA': {'imix': {1024: 0.0},}, + 'inorder_mergeable_path_VA_RestartVhost': {'imix': {1024: 0.0},}, + 'inorder_non_mergeable_path_VA': {'imix': {1024: 0.0},}, + 'inorder_non_mergeable_path_VA_RestartVhost': {'imix': {1024: 0.0},}, + 'mergeable_path_VA': {'imix': {1024: 0.0},}, + 'mergeable_path_VA_RestartVhost': {'imix': {1024: 0.0},}, + 'non_mergeable_path_VA': {'imix': {1024: 0.0},}, + 'non_mergeable_path_VA_RestartVhost': {'imix': {1024: 0.0},}, + 'vector_rx_path_VA': {'imix': {1024: 0.0},}, + 'vector_rx_path_VA_RestartVhost': {'imix': {1024: 0.0},}, + 'inorder_mergeable_path_PA': {'imix': {1024: 0.0},}, + 'inorder_mergeable_path_PA_RestartVhost': {'imix': {1024: 0.0},}, + 'inorder_non_mergeable_path_PA': {'imix': {1024: 0.0},}, + 'inorder_non_mergeable_path_PA_RestartVhost': {'imix': {1024: 0.0},}, + 'mergeable_path_PA': {'imix': {1024: 0.0},}, + 'mergeable_path_PA_RestartVhost': {'imix': {1024: 0.0},}, + 'non_mergeable_path_PA': {'imix': {1024: 0.0},}, + 'non_mergeable_path_PA_RestartVhost': {'imix': {1024: 0.0},}, + 'vector_rx_path_PA': {'imix': {1024: 0.0},}, + 'vector_rx_path_PA_RestartVhost': {'imix': {1024: 0.0},}}, + 'test_perf_pvp_spilt_ring_all_dynamic_queue_number_vhost_enqueue_operations_with_cbdma':{ + 'with_0_cbdma': {'imix': {1024: 0.0},}, + 'with_4_cbdma': {'imix': {1024: 0.0},}, + 'with_8_cbdma': {'imix': {1024: 0.0},}, + 'with_6_cbdma': {'imix': {1024: 0.0},}}, + 'test_perf_pvp_packed_ring_all_path_vhost_enqueue_operations_with_cbdma':{ + 'inorder_mergeable_path_VA': {'imix': {1024: 0.0},}, + 'inorder_mergeable_path_VA_RestartVhost': {'imix': {1024: 0.0},}, + 'inorder_non_mergeable_path_VA': {'imix': {1024: 0.0},}, + 'inorder_non_mergeable_path_VA_RestartVhost': {'imix': {1024: 0.0},}, + 'mergeable_path_VA': {'imix': {1024: 0.0},}, + 'mergeable_path_VA_RestartVhost': {'imix': {1024: 0.0},}, + 'non_mergeable_path_VA': {'imix': {1024: 0.0},}, + 'non_mergeable_path_VA_RestartVhost': {'imix': {1024: 0.0},}, + 'vector_rx_path_VA': {'imix': {1024: 0.0},}, + 'vector_rx_path_VA_RestartVhost': {'imix': {1024: 0.0},}, + 'vector_rx_path_not_power_of_2_VA':{'imix': {1024: 0.0},}, + 'vector_rx_path_not_power_of_2_VA_RestartVhost':{'imix': {1024: 0.0},}, + 'inorder_mergeable_path_PA': {'imix': {1024: 0.0},}, + 'inorder_mergeable_path_PA_RestartVhost': {'imix': {1024: 0.0},}, + 'inorder_non_mergeable_path_PA': {'imix': {1024: 0.0},}, + 'inorder_non_mergeable_path_PA_RestartVhost': {'imix': {1024: 0.0},}, + 'mergeable_path_PA': {'imix': {1024: 0.0},}, + 'mergeable_path_PA_RestartVhost': {'imix': {1024: 0.0},}, + 'non_mergeable_path_PA': {'imix': {1024: 0.0},}, + 'non_mergeable_path_PA_RestartVhost': {'imix': {1024: 0.0},}, + 'vector_rx_path_PA': {'imix': {1024: 0.0},}, + 'vector_rx_path_PA_RestartVhost': {'imix': {1024: 0.0},}, + 'vector_rx_path_not_power_of_2_PA':{'imix': {1024: 0.0},}, + 'vector_rx_path_not_power_of_2_PA_RestartVhost':{'imix': {1024: 0.0},}}, + 'test_perf_pvp_packed_ring_all_dynamic_queue_number_vhost_enqueue_operations_with_cbdma':{ + 'with_0_cbdma': {'imix': {1024: 0.0},}, + 'with_4_cbdma': {'imix': {1024: 0.0},}, + 'with_8_cbdma': {'imix': {1024: 0.0},}, + 'with_6_cbdma': {'imix': {1024: 0.0},}},} + From patchwork Wed Jan 19 02:58:40 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ling, WeiX" X-Patchwork-Id: 106047 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D0D76A0350; Wed, 19 Jan 2022 03:58:50 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id CB5BB40C35; Wed, 19 Jan 2022 03:58:50 +0100 (CET) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id DFBA94013F for ; Wed, 19 Jan 2022 03:58:48 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1642561129; x=1674097129; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=hY+aW/k8Y/gR3QedbwfyWHy2y4WNSqvYgmCCbkxEBdE=; b=hf9H3RlURivjNlcLO7WKhxS5yKq/SQ1984TBHv+yB10LtpYwGrH8s977 SmO8YIVA99XAS9ssLeHafN53NaM8b7974DAGcYxHFD1z0tr5WDGaqty+z 9NnSmPyJN/dgg2hoWHH5zTcQZ6ZQ6OZhTKgJZdxoR0L8lGt/dlleY3vdh ffNbdvh/z7BYy88fEAiE53mKk4yvjkK/VDczN83sXpMQ+qBc2eeNEp4zf q+BxDERMFqBQRBRnnMKVkDW2S4jQQ1avQnegKomOJXAjBfZHZZGrIAE6B ItzE0BBJ8joswTnX05uB+2YfcNMaHdESEaDfFH77PYaUfZtemQcCqlJ5G A==; X-IronPort-AV: E=McAfee;i="6200,9189,10231"; a="242532056" X-IronPort-AV: E=Sophos;i="5.88,298,1635231600"; d="scan'208";a="242532056" Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Jan 2022 18:58:47 -0800 X-IronPort-AV: E=Sophos;i="5.88,298,1635231600"; d="scan'208";a="625726306" Received: from unknown (HELO localhost.localdomain) ([10.239.251.222]) by orsmga004-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Jan 2022 18:58:44 -0800 From: Wei Ling To: dts@dpdk.org Cc: Wei Ling Subject: [dts][PATCH V1 2/3] test_plans/vhost_cbdma_test_plan:modify test plan to coverage more test point Date: Wed, 19 Jan 2022 10:58:40 +0800 Message-Id: <20220119025840.898166-1-weix.ling@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Modify test plan to coverage more test point, such as add VA and PA mode. Signed-off-by: Wei Ling --- test_plans/vhost_cbdma_test_plan.rst | 280 +++++++++++++++++---------- 1 file changed, 176 insertions(+), 104 deletions(-) diff --git a/test_plans/vhost_cbdma_test_plan.rst b/test_plans/vhost_cbdma_test_plan.rst index 3d0e518a..234498a4 100644 --- a/test_plans/vhost_cbdma_test_plan.rst +++ b/test_plans/vhost_cbdma_test_plan.rst @@ -38,40 +38,43 @@ Overview -------- This feature supports to offload large data movement in vhost enqueue operations -from the CPU to the I/OAT device for every queue. Note that I/OAT acceleration -is just enabled for split rings now. In addition, a queue can only use one I/OAT -device, and I/OAT devices cannot be shared among vhost ports and queues. That is, -an I/OAT device can only be used by one queue at a time. DMA devices used by -queues are assigned by users; for a queue without assigning a DMA device, the -PMD will leverages librte_vhost to perform vhost enqueue operations. Moreover, -users cannot enable I/OAT acceleration for live-migration. Large copies are -offloaded from the CPU to the DMA engine in an asynchronous manner. The CPU just -submits copy jobs to the DMA engine and without waiting for DMA copy completion; +from the CPU to the I/OAT(a DMA engine in Intel's processor) device for every queue. +In addition, a queue can only use one I/OAT device, and I/OAT devices cannot be shared +among vhost ports and queues. That is, an I/OAT device can only be used by one queue at +a time. DMA devices(e.g.,CBDMA) used by queues are assigned by users; for a queue without +assigning a DMA device, the PMD will leverages librte_vhost to perform vhost enqueue +operations. Moreover, users cannot enable I/OAT acceleration for live-migration. Large +copies are offloaded from the CPU to the DMA engine in an asynchronous manner. The CPU +just submits copy jobs to the DMA engine and without waiting for DMA copy completion; there is no CPU intervention during DMA data transfer. By overlapping CPU computation and DMA copy, we can save precious CPU cycles and improve the overall throughput for vhost-user PMD based applications, like OVS. Due to startup overheads associated with DMA engines, small copies are performed by the CPU. +DPDK 21.11 adds vfio support for DMA device in vhost. When DMA devices are bound to +vfio driver, VA mode is the default and recommended. For PA mode, page by page mapping +may exceed IOMMU's max capability, better to use 1G guest hugepage. We introduce a new vdev parameter to enable DMA acceleration for Tx operations of queues: - - dmas: This parameter is used to specify the assigned DMA device of a queue. Here is an example: - $ ./dpdk-testpmd -c f -n 4 \ - --vdev 'net_vhost0,iface=/tmp/s0,queues=1,dmas=[txq0@80:04.0]' + $ ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c f -n 4 \ + --vdev 'net_vhost0,iface=/tmp/s0,queues=1,dmas=[txq0@0000:00:04.0] --iova=va -- -i' -Test Case 1: PVP Split all path with DMA-accelerated vhost enqueue -================================================================== +Test Case 1: PVP split ring all path vhost enqueue operations with cbdma +======================================================================== Packet pipeline: ================ TG --> NIC --> Vhost --> Virtio--> Vhost --> NIC --> TG -1. Bind one cbdma port and one nic port to vfio-pci, then launch vhost by below command:: +1. Bind 1 CBDMA port and 1 NIC port to vfio-pci, then launch vhost by below command:: - ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 2-3 --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=1,dmas=[txq0@80:04.0]' \ + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 2-3 --file-prefix=vhost \ + --vdev 'net_vhost0,iface=/tmp/s0,queues=1,dmas=[txq0@0000:00:04.0]' \ + --iova=va \ -- -i --nb-cores=1 --txd=1024 --rxd=1024 >set fwd mac >start @@ -80,11 +83,11 @@ TG --> NIC --> Vhost --> Virtio--> Vhost --> NIC --> TG ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 --no-pci --file-prefix=virtio \ --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=1,in_order=1,queues=1 \ - -- -i --tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=1 --txd=1024 --rxd=1024 + -- -i --nb-cores=1 --txd=1024 --rxd=1024 >set fwd mac >start -3. Send imix packets [64,1518] from packet generator, check the throughput can get expected data, restart vhost port, then check throughput again:: +3. Send imix packets [64,1518] from packet generator, check the throughput can get expected data, restart vhost port and send imix pkts again, check get same throuhput:: testpmd>show port stats all testpmd>stop @@ -95,7 +98,7 @@ TG --> NIC --> Vhost --> Virtio--> Vhost --> NIC --> TG ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 --no-pci --file-prefix=virtio \ --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=1,in_order=0,queues=1 \ - -- -i --tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=1 --txd=1024 --rxd=1024 + -- -i --nb-cores=1 --txd=1024 --rxd=1024 >set fwd mac >start @@ -103,7 +106,7 @@ TG --> NIC --> Vhost --> Virtio--> Vhost --> NIC --> TG ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 --no-pci --file-prefix=virtio \ --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=0,in_order=1,queues=1 \ - -- -i --tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=1 --txd=1024 --rxd=1024 + -- -i --nb-cores=1 --txd=1024 --rxd=1024 >set fwd mac >start @@ -111,26 +114,37 @@ TG --> NIC --> Vhost --> Virtio--> Vhost --> NIC --> TG ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 --no-pci --file-prefix=virtio \ --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=0,in_order=0,queues=1 \ - -- -i --tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=1 --txd=1024 --rxd=1024 + -- -i --enable-hw-vlan-strip --nb-cores=1 --txd=1024 --rxd=1024 >set fwd mac >start 7. Relaunch virtio-user with vector_rx path, then repeat step 3:: - ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 \ - --no-pci --file-prefix=virtio \ - --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=0,in_order=0,queues=1 \ + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 --no-pci --file-prefix=virtio \ + --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=0,in_order=0,vectorized=1,queues=1 \ -- -i --nb-cores=1 --txd=1024 --rxd=1024 >set fwd mac >start -Test Case 2: Split ring dynamic queue number test for DMA-accelerated vhost Tx operations -========================================================================================= +8. Quit all testpmd and relaunch vhost with iova=pa by below command:: -1. Bind 8 cbdma channels and one nic port to vfio-pci, then launch vhost by below command:: + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 2-3 --file-prefix=vhost \ + --vdev 'net_vhost0,iface=/tmp/s0,queues=1,dmas=[txq0@0000:00:04.0]' \ + --iova=pa \ + -- -i --nb-cores=1 --txd=1024 --rxd=1024 + >set fwd mac + >start + +9. Rerun steps 2-7. + +Test Case 2: PVP split ring dynamic queue number vhost enqueue operations with cbdma +===================================================================================== + +1. Bind 8 CBDMA ports and 1 NIC port to vfio-pci, then launch vhost by below command:: ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 28-29 \ --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=8,client=1' \ + --iova=va \ -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txq=8 --rxq=8 >set fwd mac >start @@ -139,48 +153,64 @@ Test Case 2: Split ring dynamic queue number test for DMA-accelerated vhost Tx o ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 30-31 --no-pci --file-prefix=virtio \ --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=1,in_order=1,queues=8,server=1 \ - -- -i --tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=1 --txd=1024 --rxd=1024 --txq=8 --rxq=8 + -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txq=8 --rxq=8 >set fwd mac >start -3. Send imix packets from packet generator with random ip, check perforamnce can get target. +3. Send imix packets[64,1518] from packet generator with random ip, check perforamnce can get target. 4. Stop vhost port, check vhost RX and TX direction both exist packtes in 8 queues from vhost log. -5. Quit vhost port and relaunch vhost with 4 queues w/ cbdma:: +5. Quit and relaunch vhost with 4 queues w/ cbdma and 4 queues w/o cbdma:: ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 28-29 \ - --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=4,client=1,dmas=[txq0@00:04.0;txq1@00:04.1;txq2@00:04.2;txq3@00:04.3]' \ - -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txq=4 --rxq=4 + --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=8,client=1,dmas=[txq0@0000:00:04.0;txq1@0000:00:04.1;txq2@0000:00:04.2;txq3@0000:00:04.3]' \ + --iova=va \ + -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txq=8 --rxq=8 >set fwd mac >start -6. Send imix packets from packet generator with random ip, check perforamnce can get target. +6. Send imix packets[64,1518] from packet generator with random ip, check perforamnce can get target. -7. Stop vhost port, check vhost RX and TX direction both exist packtes in 4 queues from vhost log. +7. Stop vhost port, check vhost RX and TX direction both exist packtes in 8 queues from vhost log. -8. Quit vhost port and relaunch vhost with 8 queues w/ cbdma:: +8. Quit and relaunch vhost with 8 queues w/ cbdma:: ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 28-29 \ - --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=8,client=1,dmas=[txq0@80:04.0;txq1@80:04.1;txq2@80:04.2;txq3@80:04.3;txq4@80:04.4;txq5@80:04.5;txq6@80:04.6;txq7@80:04.7]' \ + --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=8,client=1,dmas=[txq0@0000:00:04.0;txq1@0000:00:04.1;txq2@0000:00:04.2;txq3@0000:00:04.3;txq4@0000:00:04.4;txq5@0000:00:04.5;txq6@0000:00:04.6;txq7@0000:00:04.7]' \ + --iova=va \ + -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txq=8 --rxq=8 + >set fwd mac + >start + +9. Send imix packets[64,1518] from packet generator with random ip, check perforamnce can get target. + +10. Stop vhost port, check vhost RX and TX direction both exist packtes in 8 queues from vhost log. + +11. Quit and relaunch vhost with iova=pa, 6 queues w/ cbdma and 2 queues w/o cbdma:: + + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 28-29 \ + --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=8,client=1,dmas=[txq0@0000:00:04.0;txq1@0000:00:04.1;txq2@0000:00:04.2;txq3@0000:00:04.3;txq4@0000:00:04.4;txq5@0000:00:04.5]' \ + --iova=pa \ -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txq=8 --rxq=8 >set fwd mac >start -9. Send imix packets from packet generator with random ip, check perforamnce can get target. +12. Send imix packets[64,1518] from packet generator with random ip, check perforamnce can get target. -10. Stop vhost port, check vhost RX and TX direction both exist packtes in 8 queues from vhost log. +13. Stop vhost port, check vhost RX and TX direction both exist packtes in 8 queues from vhost log. -Test Case 3: PVP packed ring all path with DMA-accelerated vhost enqueue -======================================================================== +Test Case 3: PVP packed ring all path vhost enqueue operations with cbdma +========================================================================= Packet pipeline: ================ TG --> NIC --> Vhost --> Virtio--> Vhost --> NIC --> TG -1. Bind one cbdma port and one nic port to vfio-pci, then launch vhost by below command:: +1. Bind 1 CBDMA port and 1 NIC port to vfio-pci, then launch vhost by below command:: - ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 2-3 --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=1,dmas=[txq0@80:04.0]' \ + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 2-3 --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=1,dmas=[txq0@0000:80:04.0]' \ + --iova=va \ -- -i --nb-cores=1 --txd=1024 --rxd=1024 >set fwd mac >start @@ -189,11 +219,11 @@ TG --> NIC --> Vhost --> Virtio--> Vhost --> NIC --> TG ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 --no-pci --file-prefix=virtio \ --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=1,in_order=1,queues=1,packed_vq=1 \ - -- -i --tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=1 --txd=1024 --rxd=1024 + -- -i --nb-cores=1 --txd=1024 --rxd=1024 >set fwd mac >start -3. Send imix packets [64,1518] from packet generator, check the throughput can get expected data, restart vhost port, then check throughput again:: +3. Send imix packets [64,1518] from packet generator, check the throughput can get expected data, restart vhost port and send imix pkts again, check get same throuhput:: testpmd>show port stats all testpmd>stop @@ -204,7 +234,7 @@ TG --> NIC --> Vhost --> Virtio--> Vhost --> NIC --> TG ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 --no-pci --file-prefix=virtio \ --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=1,in_order=0,queues=1,packed_vq=1 \ - -- -i --tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=1 --txd=1024 --rxd=1024 + -- -i --nb-cores=1 --txd=1024 --rxd=1024 >set fwd mac >start @@ -212,7 +242,7 @@ TG --> NIC --> Vhost --> Virtio--> Vhost --> NIC --> TG ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 --no-pci --file-prefix=virtio \ --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=0,in_order=1,queues=1,packed_vq=1 \ - -- -i --tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=1 --txd=1024 --rxd=1024 + -- -i --nb-cores=1 --txd=1024 --rxd=1024 >set fwd mac >start @@ -220,35 +250,45 @@ TG --> NIC --> Vhost --> Virtio--> Vhost --> NIC --> TG ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 --no-pci --file-prefix=virtio \ --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=0,in_order=0,queues=1,packed_vq=1 \ - -- -i --tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=1 --txd=1024 --rxd=1024 + -- -i --nb-cores=1 --txd=1024 --rxd=1024 >set fwd mac >start 7. Relaunch virtio-user with vectorized path, then repeat step 3:: - ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 \ - --no-pci --file-prefix=virtio \ - --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=0,in_order=1,queues=1,packed_vq=1 \ + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 --no-pci --file-prefix=virtio --force-max-simd-bitwidth=512 \ + --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=0,in_order=1,packed_vq=1,vectorized=1,queues=1 \ -- -i --nb-cores=1 --txd=1024 --rxd=1024 >set fwd mac >start -8. Relaunch virtio-user with vector_rx path, then repeat step 3:: +8. Relaunch virtio-user with vectorized path and ring size is not power of 2, then repeat step 3:: - ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 \ - --no-pci --file-prefix=virtio \ - --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=0,in_order=1,queues=1,packed_vq=1 \ - -- -i --enable-hw-vlan-strip --nb-cores=1 --txd=1024 --rxd=1024 + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 --no-pci --file-prefix=virtio --force-max-simd-bitwidth=512 \ + --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=0,in_order=1,packed_vq=1,vectorized=1,queues=1,queue_size=1025 \ + -- -i --nb-cores=1 --txd=1025 --rxd=1025 + >set fwd mac + >start + +9. Quit all testpmd and relaunch vhost with iova=pa by below command:: + + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 2-3 --file-prefix=vhost \ + --vdev 'net_vhost0,iface=/tmp/s0,queues=1,dmas=[txq0@0000:80:04.0]' \ + --iova=pa \ + -- -i --nb-cores=1 --txd=1024 --rxd=1024 >set fwd mac >start -Test Case 4: Packed ring dynamic queue number test for DMA-accelerated vhost Tx operations -========================================================================================== +10. Rerun steps 2-8. + +Test Case 4: PVP packed ring dynamic queue number vhost enqueue operations with cbdma +===================================================================================== -1. Bind 8 cbdma channels and one nic port to vfio-pci, then launch vhost by below command:: +1. Bind 8 CBDMA ports and 1 NIC port to vfio-pci, then launch vhost by below command:: ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 28-29 \ --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=8,client=1' \ + --iova=va \ -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txq=8 --rxq=8 >set fwd mac >start @@ -256,8 +296,8 @@ Test Case 4: Packed ring dynamic queue number test for DMA-accelerated vhost Tx 2. Launch virtio-user by below command:: ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 30-31 --no-pci --file-prefix=virtio \ - --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=1,in_order=1,queues=8,server=1,packed_vq=1 \ - -- -i --tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=1 --txd=1024 --rxd=1024 --txq=8 --rxq=8 + --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=1,in_order=0,queues=8,server=1,packed_vq=1 \ + -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txq=8 --rxq=8 >set fwd mac >start @@ -265,11 +305,12 @@ Test Case 4: Packed ring dynamic queue number test for DMA-accelerated vhost Tx 4. Stop vhost port, check vhost RX and TX direction both exist packtes in 8 queues from vhost log. -5. Quit vhost port and relaunch vhost with 4 queues w/ cbdma:: +5. Quit and relaunch vhost with 4 queues w/ cbdma and 4 queues w/o cbdma:: ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 28-29 \ - --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=4,client=1,dmas=[txq0@80:04.0;txq1@80:04.1;txq2@80:04.2;txq3@80:04.3]' \ - -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txq=4 --rxq=4 + --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=8,client=1,dmas=[txq0@0000:80:04.0;txq1@0000:80:04.1;txq2@0000:80:04.2;txq3@0000:80:04.3]' \ + --iova=va \ + -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txq=8 --rxq=8 >set fwd mac >start @@ -277,71 +318,102 @@ Test Case 4: Packed ring dynamic queue number test for DMA-accelerated vhost Tx 7. Stop vhost port, check vhost RX and TX direction both exist packtes in 4 queues from vhost log. -8. Quit vhost port and relaunch vhost with 8 queues w/ cbdma:: +8. Quit and relaunch vhost with 8 queues w/ cbdma:: ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 28-29 \ - --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=8,client=1,dmas=[txq0@80:04.0;txq1@80:04.1;txq2@80:04.2;txq3@80:04.3;txq4@80:04.4;txq5@80:04.5;txq6@80:04.6;txq7@80:04.7]' \ - -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txq=8 --rxq=8 - >set fwd mac - >start + --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=8,client=1,dmas=[txq0@0000:80:04.0;txq1@0000:80:04.1;txq2@0000:80:04.2;txq3@0000:80:04.3;txq4@0000:80:04.4;txq5@0000:80:04.5;txq6@0000:80:04.6;txq7@0000:80:04.7]' \ + --iova=va \ + -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txq=8 --rxq=8 + >set fwd mac + >start 9. Send imix packets from packet generator with random ip, check perforamnce can get target. 10. Stop vhost port, check vhost RX and TX direction both exist packtes in 8 queues from vhost log. -Test Case 5: Compare PVP split ring performance between CPU copy, CBDMA copy and Sync copy -========================================================================================== +11. Quit and relaunch vhost with iova=pa, 6 queues w/ cbdma and 2 queues w/o cbdma:: -1. Bind one cbdma port and one nic port which on same numa to vfio-pci, then launch vhost by below command:: + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 28-29 \ + --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=8,client=1,dmas=[txq0@0000:80:04.0;txq1@0000:80:04.1;txq2@0000:80:04.2;txq3@0000:80:04.3;txq4@0000:80:04.4;txq5@0000:80:04.5]' \ + --iova=pa \ + -- -i --nb-cores=1 --txd=1024 --rxd=1024 --txq=8 --rxq=8 + >set fwd mac + >start - ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 2-3 --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=1,client=1,dmas=[txq0@00:01.0]' \ - -- -i --nb-cores=1 --txd=1024 --rxd=1024 - >set fwd mac - >start +12. Send imix packets from packet generator with random ip, check perforamnce can get target. -2. Launch virtio-user with inorder mergeable path:: +13. Stop vhost port, check vhost RX and TX direction both exist packtes in 8 queues from vhost log. - ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 --no-pci --file-prefix=virtio \ - --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=1,in_order=1,queues=1,server=1 \ - -- -i --tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=1 --txd=1024 --rxd=1024 - >set fwd mac - >start +Test Case 5: loopback split ring large chain packets stress test with cbdma enqueue +==================================================================================== -3. Send packets with 64b and 1518b seperately from packet generator, record the throughput as sync copy throughput for 64b and cbdma copy for 1518b:: +Packet pipeline: +================ +Vhost <--> Virtio - testpmd>show port stats all +1. Bind 1 CBDMA channel to vfio-pci and launch vhost:: -4.Quit vhost side, relaunch with below cmd:: + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 2-3 -n 4 \ + --vdev 'eth_vhost0,iface=vhost-net0,queues=1,dmas=[txq0@0000:00:04.0]' \ + --iova=va \ + -- -i --nb-cores=1 --mbuf-size=65535 - ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 2-3 --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=1,client=1,dmas=[txq0@00:01.0]' \ - -- -i --nb-cores=1 --txd=1024 --rxd=1024 - >set fwd mac +2. Launch virtio and start testpmd:: + + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 30,31 -n 4 --file-prefix=testpmd0 --no-pci \ + --vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=1, \ + mrg_rxbuf=1,in_order=0,vectorized=1,queue_size=2048 \ + -- -i --rxq=1 --txq=1 --txd=2048 --rxd=2048 --nb-cores=1 >start -5. Send packets with 1518b from packet generator, record the throughput as sync copy throughput for 1518b:: +3. Send large packets from vhost, check virtio can receive packets:: - testpmd>show port stats all + testpmd> vhost enable tx all + testpmd> set txpkts 65535,65535,65535,65535,65535 + testpmd> start tx_first 32 + testpmd> show port stats all -6. Quit two testpmd, relaunch vhost by below command:: +4. Quit all testpmd and relaunch vhost with iova=pa:: - ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 2-3 --file-prefix=vhost --vdev 'net_vhost0,iface=/tmp/s0,queues=1' \ - -- -i --nb-cores=1 --txd=1024 --rxd=1024 - >set fwd mac - >start + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 2-3 -n 4 \ + --vdev 'eth_vhost0,iface=vhost-net0,queues=1,dmas=[txq0@0000:00:04.0]' \ + --iova=pa \ + -- -i --nb-cores=1 --mbuf-size=65535 -7. Launch virtio-user with inorder mergeable path:: +5. Rerun steps 2-3. - ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -n 4 -l 5-6 --no-pci --file-prefix=virtio \ - --vdev=net_virtio_user0,mac=00:01:02:03:04:05,path=/tmp/s0,mrg_rxbuf=1,in_order=1,queues=1 \ - -- -i --tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=1 --txd=1024 --rxd=1024 - >set fwd mac +Test Case 6: loopback packed ring large chain packets stress test with cbdma enqueue +==================================================================================== + +Packet pipeline: +================ +Vhost <--> Virtio + +1. Bind 1 CBDMA channel to vfio-pci and launch vhost:: + + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 2-3 -n 4 \ + --vdev 'eth_vhost0,iface=vhost-net0,queues=1,dmas=[txq0@0000:00:04.0]' \ + --iova=va \ + -- -i --nb-cores=1 --mbuf-size=65535 + +2. Launch virtio and start testpmd:: + + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 30,31 -n 4 --file-prefix=testpmd0 --no-pci \ + --vdev=net_virtio_user0,mac=00:11:22:33:44:10,path=./vhost-net0,queues=1, \ + mrg_rxbuf=1,in_order=0,vectorized=1,packed_vq=1,queue_size=2048 \ + -- -i --rxq=1 --txq=1 --txd=2048 --rxd=2048 --nb-cores=1 >start -8. Send packets with 64b from packet generator, record the throughput as cpu copy for 64b:: +3. Send large packets from vhost, check virtio can receive packets:: - testpmd>show port stats all + testpmd> vhost enable tx all + testpmd> set txpkts 65535,65535,65535,65535,65535 + testpmd> start tx_first 32 + testpmd> show port stats all + +4. Quit all testpmd and relaunch vhost with iova=pa:: -9. Check performance can meet below requirement:: + ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -l 2-3 -n 4 \ + --vdev 'eth_vhost0,iface=vhost-net0,queues=1,dmas=[txq0@0000:00:04.0]' --iova=pa -- -i --nb-cores=1 --mbuf-size=65535 - (1)CPU copy vs. sync copy delta < 10% for 64B packet size - (2)CBDMA copy vs sync copy delta > 5% for 1518 packet size +5. Rerun steps 2-3. From patchwork Wed Jan 19 02:58:52 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ling, WeiX" X-Patchwork-Id: 106048 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 18401A0350; Wed, 19 Jan 2022 03:59:02 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0C90B40DDB; Wed, 19 Jan 2022 03:59:02 +0100 (CET) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id A5EE24013F for ; Wed, 19 Jan 2022 03:58:59 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1642561139; x=1674097139; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=LBb1G49yLFmshOGehpPh2tLrDfc8ltBO/pfdG1uueJc=; b=n059A1/cbtJiAsE4acYZ1Jh7La4k0oW1cR8U+EAUfM5suTjG2TkYVKTF AzSJNQ164VTlDg2xLPIyrwh4K0Itda5jWGLMPIaniMcpPH44C+Pv37QUN POvz98n7Vbfps38RjdXm5xJ+xXVP0+HFhRpKXv1xdGL/nJBV+tgbK/5EH xs7I+2fnwEAQCN0PA+Eri/kpyf78exFQcoT7HUJXiBU/Nen4zWoXJFnvc 5M3/bnBBICoRiXbyWQaweyJV4y4vwo2NxCbde3iwgueyRX+WcHyen3gop DDk4Wm6l2CGTn5IuKjgmpzlNbd+ZlZoBuLCm7hgndPIc3RfYiwH/1N01/ g==; X-IronPort-AV: E=McAfee;i="6200,9189,10231"; a="243791100" X-IronPort-AV: E=Sophos;i="5.88,298,1635231600"; d="scan'208";a="243791100" Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Jan 2022 18:58:58 -0800 X-IronPort-AV: E=Sophos;i="5.88,298,1635231600"; d="scan'208";a="625726340" Received: from unknown (HELO localhost.localdomain) ([10.239.251.222]) by orsmga004-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Jan 2022 18:58:56 -0800 From: Wei Ling To: dts@dpdk.org Cc: Wei Ling Subject: [dts][PATCH V1 3/3] tests/vhost_cbdma:modify test suite sync with test plan change Date: Wed, 19 Jan 2022 10:58:52 +0800 Message-Id: <20220119025852.898224-1-weix.ling@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Modify and add test suite sync with test plan change. Signed-off-by: Wei Ling Tested-by: Wei Ling --- tests/TestSuite_vhost_cbdma.py | 488 ++++++++++++++++++--------------- 1 file changed, 273 insertions(+), 215 deletions(-) diff --git a/tests/TestSuite_vhost_cbdma.py b/tests/TestSuite_vhost_cbdma.py index c0cc772d..f5ae1289 100644 --- a/tests/TestSuite_vhost_cbdma.py +++ b/tests/TestSuite_vhost_cbdma.py @@ -167,32 +167,29 @@ class TestVirTioVhostCbdma(TestCase): self.verify(int(self.result_first[0]) > 1 and int(self.result_secondary[0]) > 1, "forward packets no correctly") @property - def check_2m_env(self): + def check_2M_env(self): out = self.dut.send_expect("cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# ") return True if out == '2048' else False - def launch_testpmd_as_vhost_user(self, command, cores="Default", dev="", ports = ""): - self.pmdout_vhost_user.start_testpmd(cores=cores, param=command, vdevs=[dev], ports=ports, prefix="vhost") - self.vhost_user.send_expect('set fwd mac', 'testpmd> ', 120) - self.vhost_user.send_expect('start', 'testpmd> ', 120) - - def launch_testpmd_as_virtio_user1(self, command, cores="Default", dev=""): - eal_params = "" - if self.check_2m_env: - eal_params += " --single-file-segments" - self.pmdout_virtio_user1.start_testpmd(cores, command, vdevs=[dev], no_pci=True, prefix="virtio1", eal_param=eal_params) - self.virtio_user1.send_expect('set fwd mac', 'testpmd> ', 30) - self.virtio_user1.send_expect('start', 'testpmd> ', 30) - self.virtio_user1.send_expect('show port info all', 'testpmd> ', 30) - - def launch_testpmd_as_virtio_user(self, command, cores="Default", dev=""): + def launch_testpmd_as_vhost_user(self, command, cores="Default", dev="", ports = "", iova_mode='pa', set_pmd_param=True): + if iova_mode: + iova_parm = "--iova=" + iova_mode + else: + iova_parm = "" + self.pmdout_vhost_user.start_testpmd(cores=cores, param=command, vdevs=[dev], ports=ports, prefix="vhost", eal_param=iova_parm) + if set_pmd_param: + self.vhost_user.send_expect('set fwd mac', 'testpmd> ', 30) + self.vhost_user.send_expect('start', 'testpmd> ', 30) + + def launch_testpmd_as_virtio_user(self, command, cores="Default", dev="", set_pmd_param=True): eal_params = "" - if self.check_2m_env: + if self.check_2M_env: eal_params += " --single-file-segments" - self.pmdout_virtio_user.start_testpmd(cores, command, vdevs=[dev],no_pci=True, prefix="virtio", eal_param=eal_params) - self.virtio_user.send_expect('set fwd mac', 'testpmd> ', 120) - self.virtio_user.send_expect('start', 'testpmd> ', 120) - self.virtio_user.send_expect('show port info all', 'testpmd> ', 30) + self.pmdout_virtio_user.start_testpmd(cores, command, vdevs=[dev], no_pci=True, prefix="virtio", eal_param=eal_params) + if set_pmd_param: + self.virtio_user.send_expect('set fwd mac', 'testpmd> ', 30) + self.virtio_user.send_expect('start', 'testpmd> ', 30) + self.virtio_user.send_expect('show port info all', 'testpmd> ', 30) def diff_param_launch_send_and_verify(self, mode, params, dev, cores, is_quit=True, launch_virtio=True): if launch_virtio: @@ -202,35 +199,37 @@ class TestVirTioVhostCbdma(TestCase): self.virtio_user.send_expect("quit", "# ") time.sleep(3) - def test_perf_pvp_spilt_all_path_with_cbdma_vhost_enqueue_operations(self): + def test_perf_pvp_spilt_ring_all_path_vhost_enqueue_operations_with_cbdma(self): """ - Test Case 1: PVP Split all path with DMA-accelerated vhost enqueue + Test Case 1: PVP split ring all path vhost enqueue operations with cbdma """ self.test_target = self.running_case self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] txd_rxd = 1024 - eal_tx_rxd = ' --nb-cores=%d --txd=%d --rxd=%d' - queue = 1 - used_cbdma_num = 1 - self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num) + vhost_param = ' --nb-cores=%d --txd=%d --rxd=%d' + nb_cores = 1 + queues = 1 + self.get_cbdma_ports_info_and_bind_to_dpdk(1) vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=%d,dmas=[txq0@{self.device_str}]'" - dev_path_mode_mapper = { - "inorder_mergeable_path": 'mrg_rxbuf=1,in_order=1', - "mergeable_path": 'mrg_rxbuf=1,in_order=0', - "inorder_non_mergeable_path": 'mrg_rxbuf=0,in_order=1', - "non_mergeable_path": 'mrg_rxbuf=0,in_order=0', - "vector_rx_path": 'mrg_rxbuf=0,in_order=0', + virtio_path_dict_VA = { + "inorder_mergeable_path_VA": 'mrg_rxbuf=1,in_order=1', + "mergeable_path_VA": 'mrg_rxbuf=1,in_order=0', + "inorder_non_mergeable_path_VA": 'mrg_rxbuf=0,in_order=1', + "non_mergeable_path_VA": 'mrg_rxbuf=0,in_order=0', + "vector_rx_path_VA": 'mrg_rxbuf=0,in_order=0,vectorized=1', } - pvp_split_all_path_virtio_params = "--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (queue, txd_rxd, txd_rxd) allow_pci = [self.dut.ports_info[0]['pci']] - for index in range(used_cbdma_num): + for index in range(1): allow_pci.append(self.cbdma_dev_infos[index]) - self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2], dev=vhost_vdevs % (queue), ports=allow_pci) - for key, path_mode in dev_path_mode_mapper.items(): - if key == "vector_rx_path": - pvp_split_all_path_virtio_params = eal_tx_rxd % (queue, txd_rxd, txd_rxd) - vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % queue - self.diff_param_launch_send_and_verify(key, pvp_split_all_path_virtio_params, vdevs, self.cores[2:4], is_quit=False) + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd), self.cores[0:2], + dev=vhost_vdevs % (nb_cores), ports=allow_pci, iova_mode='va') + for key, path_mode in virtio_path_dict_VA.items(): + if key == "non_mergeable_path_VA": + virtio_param = " --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (nb_cores, txd_rxd, txd_rxd) + else: + virtio_param = " --nb-cores=%d --txd=%d --rxd=%d" % (nb_cores, txd_rxd, txd_rxd) + vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % nb_cores + self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=False, launch_virtio=True) self.mode_list.append(key) # step3 restart vhost port, then check throughput again key += "_RestartVhost" @@ -239,99 +238,133 @@ class TestVirTioVhostCbdma(TestCase): self.vhost_user.send_expect('start', 'testpmd> ', 10) self.vhost_user.send_expect('show port info all', 'testpmd> ', 30) self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10) - self.diff_param_launch_send_and_verify(key, pvp_split_all_path_virtio_params, vdevs, - self.cores[2:4], launch_virtio=False) + self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=True, launch_virtio=False) self.mode_list.append(key) + # step9 quit all testpma and re-run with PA mode + virtio_path_dict_PA = { + "inorder_mergeable_path_PA": 'mrg_rxbuf=1,in_order=1', + "mergeable_path_PA": 'mrg_rxbuf=1,in_order=0', + "inorder_non_mergeable_path_PA": 'mrg_rxbuf=0,in_order=1', + "non_mergeable_path_PA": 'mrg_rxbuf=0,in_order=0', + "vector_rx_path_PA": 'mrg_rxbuf=0,in_order=0,vectorized=1', + } self.vhost_user.send_expect("quit", "# ") + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd), self.cores[0:2], + dev=vhost_vdevs % (nb_cores), ports=allow_pci, iova_mode='pa') + for key, path_mode in virtio_path_dict_PA.items(): + if key == "non_mergeable_path_PA": + virtio_param = " --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (nb_cores, txd_rxd, txd_rxd) + else: + virtio_param = " --nb-cores=%d --txd=%d --rxd=%d" % (nb_cores, txd_rxd, txd_rxd) + vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % queues + self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=False, + launch_virtio=True) + self.mode_list.append(key) + # step3 restart vhost port, then check throughput again + key += "_RestartVhost" + self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10) + self.vhost_user.send_expect('stop', 'testpmd> ', 10) + self.vhost_user.send_expect('start', 'testpmd> ', 10) + self.vhost_user.send_expect('show port info all', 'testpmd> ', 30) + self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10) + self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=True, + launch_virtio=False) + self.mode_list.append(key) + self.result_table_print() self.handle_expected(mode_list=self.mode_list) self.handle_results(mode_list=self.mode_list) - def test_perf_dynamic_queue_number_cbdma_vhost_enqueue_operations(self): + def test_perf_pvp_spilt_ring_all_dynamic_queue_number_vhost_enqueue_operations_with_cbdma(self): """ - Test Case2: Split ring dynamic queue number test for DMA-accelerated vhost Tx operations + Test Case2: PVP split ring dynamic queue number vhost enqueue operations with cbdma """ self.test_target = self.running_case self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] - used_cbdma_num = 8 - queue = 8 - txd_rxd = 1024 nb_cores = 1 + txd_rxd = 1024 + queues = 8 virtio_path = "/tmp/s0" path_mode = 'mrg_rxbuf=1,in_order=1' - self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num) - eal_params = " --nb-cores=1 --txd=1024 --rxd=1024 --txq=%d --rxq=%d " - dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params % (queue,queue)}" - virtio_dev = f"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queue},server=1" + self.get_cbdma_ports_info_and_bind_to_dpdk(8) + vhost_param = " --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d " + virtio_param = " --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d " vhost_dev = f"'net_vhost0,iface={virtio_path},queues=%d,client=1,%s'" - # launch vhost testpmd + virtio_dev = f"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queues},server=1" allow_pci = [self.dut.ports_info[0]['pci']] - for index in range(used_cbdma_num): + for index in range(8): allow_pci.append(self.cbdma_dev_infos[index]) - - # no cbdma to launch vhost - self.launch_testpmd_as_vhost_user(eal_params % (queue,queue), self.cores[0:2], dev=vhost_dev % (queue,''), ports=[allow_pci[0]]) - mode = "no_cbdma" - self.mode_list.append(mode) - self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev) - self.send_and_verify(mode, queue_list=range(queue)) + # without cbdma to launch vhost + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2], + dev=vhost_dev % (queues,''), ports=[allow_pci[0]], iova_mode='va') + self.mode_list.append("with_0_cbdma") + self.launch_testpmd_as_virtio_user(virtio_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[2:4], + dev=virtio_dev) + self.send_and_verify("with_0_cbdma", queue_list=range(queues)) + + # with 4 cbdma and 4 queue and VA mode to launch vhost self.vhost_user.send_expect("quit", "#") + vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}]" + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2], + dev=vhost_dev % (queues, vhost_dmas), ports=allow_pci[:5], iova_mode='va') + self.mode_list.append("with_4_cbdma") + self.send_and_verify("with_4_cbdma", queue_list=range(int(queues/2))) - # used 4 cbdma_num and 4 queue to launch vhost + #with 8 cbdma and 8 queue and VA mode to launch vhost + self.vhost_user.send_expect("quit", "#") + vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}]" + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2], + dev=vhost_dev % (queues, vhost_dmas), ports=allow_pci, iova_mode='va') + self.mode_list.append("with_8_cbdma") + self.send_and_verify("with_8_cbdma", queue_list=range(queues)) - vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}]" - self.launch_testpmd_as_vhost_user(eal_params % (queue/2,queue/2), self.cores[0:2], dev=vhost_dev % (int(queue/2),vhost_dmas), ports=allow_pci[:5]) - self.send_and_verify("used_4_cbdma_num", queue_list=range(int(queue/2))) - self.mode_list.append("used_4_cbdma_num") + # with 6 cbdma and 2 without cbdma and PA mode to launch vhost self.vhost_user.send_expect("quit", "#") + vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]}]" + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2], + dev=vhost_dev % (queues, vhost_dmas), ports=allow_pci, iova_mode='pa') + self.mode_list.append("with_6_cbdma") + self.send_and_verify("with_6_cbdma", queue_list=range(queues)) - #used 8 cbdma_num to launch vhost - vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}]" - self.launch_testpmd_as_vhost_user(eal_params % (queue, queue), self.cores[0:2], - dev=vhost_dev % (queue,vhost_dmas), ports=allow_pci) - self.send_and_verify("used_8_cbdma_num", queue_list=range(queue)) - self.mode_list.append("used_8_cbdma_num") - self.send_and_verify("used_8_cbdma_num_1", queue_list=range(queue)) - self.mode_list.append("used_8_cbdma_num_1") - self.virtio_user.send_expect("stop", "testpmd> ", 60) - time.sleep(5) self.virtio_user.send_expect("quit", "# ") self.vhost_user.send_expect("quit", "# ") self.result_table_print() - # result_rows = [[], [64, 'dynamic_queue2', 7.4959375, 12.593175], [1518, 'dynamic_queue2', 1.91900225, 59.028509209999996]] - result_rows = self.result_table_getrows() # self.handle_expected(mode_list=self.mode_list) self.handle_results(mode_list=self.mode_list) - def test_perf_pvp_packed_all_path_with_cbdma_vhost_enqueue_operations(self): + def test_perf_pvp_packed_ring_all_path_vhost_enqueue_operations_with_cbdma(self): """ - Test Case 3: PVP packed ring all path with DMA-accelerated vhost enqueue + Test Case 3: PVP packed ring all path vhost enqueue operations with cbdma """ self.test_target = self.running_case self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] txd_rxd = 1024 - eal_tx_rxd = ' --nb-cores=%d --txd=%d --rxd=%d' - queue = 1 - used_cbdma_num = 1 - self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num) + vhost_param = ' --nb-cores=%d --txd=%d --rxd=%d' + nb_cores = 1 + queues = 1 + self.get_cbdma_ports_info_and_bind_to_dpdk(1) vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=%d,dmas=[txq0@{self.device_str}]'" - dev_path_mode_mapper = { - "inorder_mergeable_path": 'mrg_rxbuf=1,in_order=1,packed_vq=1', - "mergeable_path": 'mrg_rxbuf=1,in_order=0,packed_vq=1', - "inorder_non_mergeable_path": 'mrg_rxbuf=0,in_order=1,packed_vq=1', - "non_mergeable_path": 'mrg_rxbuf=0,in_order=0,packed_vq=1', - "vector_rx_path": 'mrg_rxbuf=0,in_order=0,packed_vq=1', + virtio_path_dict_VA = { + "inorder_mergeable_path_VA": 'mrg_rxbuf=1,in_order=1,packed_vq=1', + "mergeable_path_VA": 'mrg_rxbuf=1,in_order=0,packed_vq=1', + "inorder_non_mergeable_path_VA": 'mrg_rxbuf=0,in_order=1,packed_vq=1', + "non_mergeable_path_VA": 'mrg_rxbuf=0,in_order=0,packed_vq=1', + "vector_rx_path_VA": 'mrg_rxbuf=0,in_order=1,vectorized=1,packed_vq=1', + "vector_rx_path_not_power_of_2_VA": 'mrg_rxbuf=0,in_order=1,vectorized=1,packed_vq=1,queue_size=1025' } - pvp_split_all_path_virtio_params = "--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (queue, txd_rxd, txd_rxd) allow_pci = [self.dut.ports_info[0]['pci']] - for index in range(used_cbdma_num): + for index in range(1): allow_pci.append(self.cbdma_dev_infos[index]) - self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2], dev=vhost_vdevs % (queue), ports=allow_pci) - for key, path_mode in dev_path_mode_mapper.items(): - if key == "vector_rx_path": - pvp_split_all_path_virtio_params = eal_tx_rxd % (queue, txd_rxd, txd_rxd) - vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % queue - self.diff_param_launch_send_and_verify(key, pvp_split_all_path_virtio_params, vdevs, self.cores[2:4], is_quit=False) + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd), self.cores[0:2], + dev=vhost_vdevs % (nb_cores), ports=allow_pci, iova_mode='va') + for key, path_mode in virtio_path_dict_VA.items(): + if key == "vector_rx_path_not_power_of_2_VA": + virtio_param = " --nb-cores=%d --txd=%d --rxd=%d" % (nb_cores, (txd_rxd + 1), (txd_rxd + 1)) + else: + virtio_param = " --nb-cores=%d --txd=%d --rxd=%d" % (nb_cores, txd_rxd, txd_rxd) + vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % queues + self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=False, + launch_virtio=True) self.mode_list.append(key) # step3 restart vhost port, then check throughput again key += "_RestartVhost" @@ -340,153 +373,180 @@ class TestVirTioVhostCbdma(TestCase): self.vhost_user.send_expect('start', 'testpmd> ', 10) self.vhost_user.send_expect('show port info all', 'testpmd> ', 30) self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10) - self.diff_param_launch_send_and_verify(key, pvp_split_all_path_virtio_params, vdevs, - self.cores[2:4], launch_virtio=False) + self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=True, + launch_virtio=False) self.mode_list.append(key) + # step9 quit all testpma and re-run with PA mode + virtio_path_dict_PA = { + "inorder_mergeable_path_PA": 'mrg_rxbuf=1,in_order=1,packed_vq=1', + "mergeable_path_PA": 'mrg_rxbuf=1,in_order=0,packed_vq=1', + "inorder_non_mergeable_path_PA": 'mrg_rxbuf=0,in_order=1,packed_vq=1', + "non_mergeable_path_PA": 'mrg_rxbuf=0,in_order=0,packed_vq=1', + "vector_rx_path_PA": 'mrg_rxbuf=0,in_order=0,vectorized=1,packed_vq=1', + "vector_rx_path_not_power_of_2_PA": 'mrg_rxbuf=0,in_order=0,vectorized=1,packed_vq=1,queue_size=1025' + } self.vhost_user.send_expect("quit", "# ") + self.launch_testpmd_as_vhost_user(vhost_param % (queues, txd_rxd, txd_rxd), self.cores[0:2], + dev=vhost_vdevs % (queues), ports=allow_pci, iova_mode='pa') + for key, path_mode in virtio_path_dict_PA.items(): + if key == "vector_rx_path_not_power_of_2_VA": + virtio_param = " --nb-cores=%d --txd=%d --rxd=%d" % (nb_cores, (txd_rxd + 1), (txd_rxd + 1)) + else: + virtio_param = " --nb-cores=%d --txd=%d --rxd=%d" % (nb_cores, txd_rxd, txd_rxd) + vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % queues + self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=False, + launch_virtio=True) + self.mode_list.append(key) + # step3 restart vhost port, then check throughput again + key += "_RestartVhost" + self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10) + self.vhost_user.send_expect('stop', 'testpmd> ', 10) + self.vhost_user.send_expect('start', 'testpmd> ', 10) + self.vhost_user.send_expect('show port info all', 'testpmd> ', 30) + self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10) + self.diff_param_launch_send_and_verify(key, virtio_param, vdevs, self.cores[2:4], is_quit=True, + launch_virtio=False) + self.mode_list.append(key) + self.result_table_print() self.handle_expected(mode_list=self.mode_list) self.handle_results(mode_list=self.mode_list) - def test_perf_packed_dynamic_queue_number_cbdma_vhost_enqueue_operations(self): + def test_perf_pvp_packed_ring_all_dynamic_queue_number_vhost_enqueue_operations_with_cbdma(self): """ - Test Case4: Packed ring dynamic queue number test for DMA-accelerated vhost Tx operations + Test Case 4: PVP packed ring dynamic queue number vhost enqueue operations with cbdma """ self.test_target = self.running_case self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target] - used_cbdma_num = 8 - queue = 8 - txd_rxd = 1024 nb_cores = 1 + txd_rxd = 1024 + queues = 8 virtio_path = "/tmp/s0" path_mode = 'mrg_rxbuf=1,in_order=1,packed_vq=1' - self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num) - vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]}]" - eal_params = " --nb-cores=1 --txd=1024 --rxd=1024 --txq=%d --rxq=%d " - dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params % (queue, queue)}" - virtio_dev = f"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queue},server=1" - vhost_dev = f"'net_vhost0,iface={virtio_path},queues=%s,client=1,%s'" - # launch vhost testpmd + self.get_cbdma_ports_info_and_bind_to_dpdk(8) + vhost_param = " --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d " + virtio_param = " --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d " + vhost_dev = f"'net_vhost0,iface={virtio_path},queues=%d,client=1,%s'" + virtio_dev = f"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queues},server=1" allow_pci = [self.dut.ports_info[0]['pci']] - for index in range(used_cbdma_num): + for index in range(8): allow_pci.append(self.cbdma_dev_infos[index]) + # without cbdma to launch vhost + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2], + dev=vhost_dev % (queues, ''), ports=[allow_pci[0]], iova_mode='va') + self.mode_list.append("with_0_cbdma") + self.launch_testpmd_as_virtio_user(virtio_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[2:4], + dev=virtio_dev) + self.send_and_verify("with_0_cbdma", queue_list=range(queues)) + + # with 4 cbdma and 4 queue and VA mode to launch vhost + self.vhost_user.send_expect("quit", "#") + vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}]" + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2], + dev=vhost_dev % (queues, vhost_dmas), ports=allow_pci[:5], iova_mode='va') + self.mode_list.append("with_4_cbdma") + self.send_and_verify("with_4_cbdma", queue_list=range(int(queues / 2))) - # no cbdma to launch vhost - self.launch_testpmd_as_vhost_user(eal_params % (queue,queue), self.cores[0:2], dev=vhost_dev % (queue,''), ports= [allow_pci[0]]) - mode = "no_cbdma" - self.mode_list.append(mode) - self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev) - self.send_and_verify(mode, queue_list=range(queue)) + # with 8 cbdma and 8 queue and VA mode to launch vhost self.vhost_user.send_expect("quit", "#") + vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}]" + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2], + dev=vhost_dev % (queues, vhost_dmas), ports=allow_pci, iova_mode='va') + self.mode_list.append("with_8_cbdma") + self.send_and_verify("with_8_cbdma", queue_list=range(queues)) - # used 4 cbdma_num and 4 queue to launch vhost - vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}]" - self.launch_testpmd_as_vhost_user(eal_params % (queue/2,queue/2), self.cores[0:2], - dev=vhost_dev % (int(queue/2),vhost_dmas), ports=allow_pci[:5]) - self.send_and_verify("used_4_cbdma_num", queue_list=range(int(queue/2))) - self.mode_list.append("used_4_cbdma_num") + # with 6 cbdma and 2 without cbdma and PA mode to launch vhost self.vhost_user.send_expect("quit", "#") + vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]}]" + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores, txd_rxd, txd_rxd, queues, queues), self.cores[0:2], + dev=vhost_dev % (queues, vhost_dmas), ports=allow_pci, iova_mode='pa') + self.mode_list.append("with_6_cbdma") + self.send_and_verify("with_6_cbdma", queue_list=range(queues)) - #used 8 cbdma_num to launch vhost - vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}]" - self.launch_testpmd_as_vhost_user(eal_params % (queue, queue), self.cores[0:2], - dev=vhost_dev % (queue,vhost_dmas), ports=allow_pci) - self.send_and_verify("used_8_cbdma_num", queue_list=range(queue)) - self.mode_list.append("used_8_cbdma_num") - self.send_and_verify("used_8_cbdma_num_1", queue_list=range(queue)) - self.mode_list.append("used_8_cbdma_num_1") - self.virtio_user.send_expect("stop", "testpmd> ", 60) - time.sleep(5) self.virtio_user.send_expect("quit", "# ") self.vhost_user.send_expect("quit", "# ") self.result_table_print() - # result_rows = [[], [64, 'dynamic_queue2', 7.4959375, 12.593175], [1518, 'dynamic_queue2', 1.91900225, 59.028509209999996]] - result_rows = self.result_table_getrows() # self.handle_expected(mode_list=self.mode_list) self.handle_results(mode_list=self.mode_list) + def send_chain_packets_and_verify(self): + self.pmdout_virtio_user.execute_cmd("clear port stats all") + self.pmdout_virtio_user.execute_cmd("start") + self.pmdout_vhost_user.execute_cmd("vhost enable tx all") + self.pmdout_vhost_user.execute_cmd("set txpkts 65535,65535,65535,65535,65535") + self.pmdout_vhost_user.execute_cmd("start tx_first 32") + self.pmdout_vhost_user.execute_cmd("show port stats all") + out = self.pmdout_virtio_user.execute_cmd("show port stats all") + rx_pkts = int(re.search("RX-packets: (\d+)", out).group(1)) + self.verify(rx_pkts > 0, "virtio-user can not received packets") - def test_perf_compare_pvp_split_ring_performance(self): + def test_loopback_split_ring_large_chain_packets_stress_test_with_cbdma_enqueue(self): """ - Test Case5: Compare PVP split ring performance between CPU copy, CBDMA copy and Sync copy + Test Case5: loopback split ring large chain packets stress test with cbdma enqueue """ - used_cbdma_num = 1 - queue = 1 - txd_rxd = 1024 - eal_tx_rxd = ' --nb-cores=%d --txd=%d --rxd=%d' - path_mode = 'mrg_rxbuf=1,in_order=1,server=1' - allow_pci = [self.dut.ports_info[0]['pci']] - self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num) - for index in range(used_cbdma_num): + nb_cores = 1 + queues = 1 + txd_rxd = 2048 + txq_rxq = 1 + virtio_path = "/tmp/s0" + path_mode = 'mrg_rxbuf=1,in_order=0,vectorized=1,queue_size=2048' + self.get_cbdma_ports_info_and_bind_to_dpdk(1) + vhost_param = " --nb-cores=%d --mbuf-size=65535" + virtio_param = " --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d " + virtio_dev = f"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues=%d" + vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=%d,dmas=[txq0@{self.device_str}]'" + allow_pci = [] + for index in range(1): allow_pci.append(self.cbdma_dev_infos[index]) - path_mode = 'mrg_rxbuf=1,in_order=1' - vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=%d,client=1,dmas=[txq0@{self.device_str}]'" - compare_pvp_split_ring_performance = "--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (queue, txd_rxd, txd_rxd) - dev_path_mode_mapper = { - "sync_cbdma": '', - "cpu": '', - } - for key in dev_path_mode_mapper.items(): - if key == "cpu": - vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=1'" - self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2], dev=vhost_vdevs, ports=[allow_pci[0]]) - vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % queue - self.launch_testpmd_as_virtio_user(compare_pvp_split_ring_performance, self.cores[2:4], dev=vdevs) - mode = "cpu_copy_64" - self.mode_list.append(mode) - self.send_and_verify(mode, frame_sizes=[64], pkt_length_mode='fixed') - perf_cpu_copy_64 = self.throughput[mode][64][self.nb_desc] - self.virtio_user.send_expect('show port stats all', 'testpmd> ', 10) - self.virtio_user.send_expect("quit", "# ") - self.vhost_user.send_expect("quit", "# ") - else: - self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue), ports=allow_pci) - vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d,server=1'" % queue - self.launch_testpmd_as_virtio_user(compare_pvp_split_ring_performance, self.cores[2:4],dev=vdevs) - mode = "sync_copy_64" - self.mode_list.append(mode) - self.send_and_verify(mode,frame_sizes=[64],pkt_length_mode='fixed') - perf_sync_copy_64 = self.throughput[mode][64][self.nb_desc] - mode = "cbdma_copy_1518" - self.mode_list.append(mode) - self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode='fixed') - perf_cbdma_copy_1518 = self.throughput[mode][1518][self.nb_desc] - self.virtio_user.send_expect('show port stats all', 'testpmd> ', 10) - self.vhost_user.send_expect("quit", "# ") - time.sleep(3) - self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue), ports=allow_pci) - mode = "sync_copy_1518" - self.mode_list.append(mode) - self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode='fixed') - perf_sync_copy_1518 = self.throughput[mode][1518][self.nb_desc] - self.check_port_stats_result(self.virtio_user) - self.virtio_user.send_expect("quit", "# ") - self.vhost_user.send_expect("quit", "# ") - self.result_table_print() - self.verify(abs(perf_sync_copy_64 - perf_cpu_copy_64)/perf_sync_copy_64 < 0.1, "sync_copy_64 vs. cpu_copy_64 delta > 10%" ) - self.verify(abs(perf_cbdma_copy_1518 - perf_sync_copy_1518)/perf_sync_copy_1518 > 0.05,"cbdma_copy_1518 vs sync_copy_1518 delta < 5%") - - @staticmethod - def vhost_or_virtio_set_one_queue(session): - session.send_expect('stop', 'testpmd> ', 120) - session.send_expect('port stop all', 'testpmd> ', 120) - session.send_expect('port config all rxq 1', 'testpmd> ', 120) - session.send_expect('port config all txq 1', 'testpmd> ', 120) - session.send_expect('port start all', 'testpmd> ', 120) - session.send_expect('start', 'testpmd> ', 120) - session.send_expect('show port info all', 'testpmd> ', 30) - session.send_expect('show port stats all', 'testpmd> ', 120) - time.sleep(5) + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores), self.cores[0:2], + dev=vhost_vdevs % (queues), ports=allow_pci, iova_mode='va', set_pmd_param=False) + self.launch_testpmd_as_virtio_user(virtio_param % (nb_cores, txd_rxd, txd_rxd, txq_rxq, txq_rxq), self.cores[2:4], + dev=virtio_dev % (queues), set_pmd_param=False) + self.send_chain_packets_and_verify() + + self.logger.info("Quit and relaunch vhost with PA mode") + self.pmdout_virtio_user.execute_cmd("quit", "#") + self.pmdout_vhost_user.execute_cmd("quit", "#") + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores), self.cores[0:2], + dev=vhost_vdevs % (queues), ports=allow_pci, iova_mode='pa', set_pmd_param=False) + self.launch_testpmd_as_virtio_user(virtio_param % (nb_cores, txd_rxd, txd_rxd, txq_rxq, txq_rxq),self.cores[2:4], + dev=virtio_dev % (queues), set_pmd_param=False) + self.send_chain_packets_and_verify() + + def test_loopback_packed_ring_large_chain_packets_stress_test_with_cbdma_enqueue(self): + """ + Test Case6: loopback packed ring large chain packets stress test with cbdma enqueue + """ + nb_cores = 1 + queues = 1 + txd_rxd = 2048 + txq_rxq = 1 + virtio_path = "/tmp/s0" + path_mode = 'mrg_rxbuf=1,in_order=0,vectorized=1,packed_vq=1,queue_size=2048' + self.get_cbdma_ports_info_and_bind_to_dpdk(1) + vhost_param = " --nb-cores=%d --mbuf-size=65535" + virtio_param = " --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d " + virtio_dev = f"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues=%d" + vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=%d,dmas=[txq0@{self.device_str}]'" + allow_pci = [] + for index in range(1): + allow_pci.append(self.cbdma_dev_infos[index]) + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores), self.cores[0:2], + dev=vhost_vdevs % (queues), ports=allow_pci, iova_mode='va', set_pmd_param=False) + self.launch_testpmd_as_virtio_user(virtio_param % (nb_cores, txd_rxd, txd_rxd, txq_rxq, txq_rxq), self.cores[2:4], + dev=virtio_dev % (queues), set_pmd_param=False) + self.send_chain_packets_and_verify() + + self.logger.info("Quit and relaunch vhost with PA mode") + self.pmdout_virtio_user.execute_cmd("quit", "#") + self.pmdout_vhost_user.execute_cmd("quit", "#") + self.launch_testpmd_as_vhost_user(vhost_param % (nb_cores), self.cores[0:2], + dev=vhost_vdevs % (queues), ports=allow_pci, iova_mode='pa', set_pmd_param=False) + self.launch_testpmd_as_virtio_user(virtio_param % (nb_cores, txd_rxd, txd_rxd, txq_rxq, txq_rxq),self.cores[2:4], + dev=virtio_dev % (queues), set_pmd_param=False) + self.send_chain_packets_and_verify() - @property - def check_value(self): - check_dict = dict.fromkeys(self.frame_sizes) - linerate = {64: 0.085, 128: 0.12, 256: 0.20, 512: 0.35, 1024: 0.50, 1280: 0.55, 1518: 0.60} - for size in self.frame_sizes: - speed = self.wirespeed(self.nic, size, self.number_of_ports) - check_dict[size] = round(speed * linerate[size], 2) - return check_dict def send_imix_and_verify(self, mode, multiple_queue=True, queue_list=[]): """ @@ -708,8 +768,6 @@ class TestVirTioVhostCbdma(TestCase): """ self.dut.send_expect("killall -I %s" % self.testpmd_name, '#', 20) self.bind_cbdma_device_to_kernel() - if self.running_case == 'test_check_threshold_value_with_cbdma': - self.bind_nic_driver(self.dut_ports, self.drivername) def tear_down_all(self): """