@@ -57,15 +57,12 @@ from copy import deepcopy
class TestVirTioVhostCbdma(TestCase):
def set_up_all(self):
- # Get and verify the ports
self.dut_ports = self.dut.get_ports()
self.number_of_ports = 1
- self.vhost_user = self.dut.new_session(suite="vhost-user")
- self.virtio_user = self.dut.new_session(suite="virtio-user")
- self.virtio_user1 = self.dut.new_session(suite="virtio-user1")
- self.pmdout_vhost_user = PmdOutput(self.dut, self.vhost_user)
- self.pmdout_virtio_user = PmdOutput(self.dut, self.virtio_user)
- self.pmdout_virtio_user1 = PmdOutput(self.dut, self.virtio_user1)
+ self.vhost = self.dut.new_session(suite="vhost")
+ self.vuser0 = self.dut.new_session(suite="vuser0")
+ self.vhost_pmd = PmdOutput(self.dut, self.vhost)
+ self.vuser0_pmd = PmdOutput(self.dut, self.vuser0)
self.frame_sizes = [64, 1518]
self.virtio_mac = "00:01:02:03:04:05"
self.headers_size = HEADER_SIZE['eth'] + HEADER_SIZE['ip']
@@ -95,14 +92,9 @@ class TestVirTioVhostCbdma(TestCase):
self.table_header.append("Mpps")
self.table_header.append("% linerate")
self.result_table_create(self.table_header)
- # test parameters include: frames size, descriptor numbers
self.test_parameters = self.get_suite_cfg()['test_parameters']
- # traffic duraion in second
self.test_duration = self.get_suite_cfg()['test_duration']
- # initialize throughput attribution
- # {'TestCase':{ 'Mode': {'$framesize':{"$nb_desc": 'throughput'}}}
self.throughput = {}
- # Accepted tolerance in Mpps
self.gap = self.get_suite_cfg()['accepted_tolerance']
self.test_result = {}
self.nb_desc = self.test_parameters.get(list(self.test_parameters.keys())[0])[0]
@@ -130,11 +122,10 @@ class TestVirTioVhostCbdma(TestCase):
out = self.dut.send_expect('./usertools/dpdk-devbind.py --status-dev misc', '# ', 30)
device_info = out.split('\n')
for device in device_info:
- pci_info = re.search('\s*(0000:\d*:\d*.\d*)', device)
+ pci_info = re.search('\s*(0000:\S*:\d*.\d*)', device)
if pci_info is not None:
dev_info = pci_info.group(1)
- # the numa id of ioat dev, only add the device which
- # on same socket with nic dev
+ # the numa id of ioat dev, only add the device which on same socket with nic dev
bus = int(dev_info[5:7], base=16)
if bus >= 128:
cur_socket = 1
@@ -157,7 +148,7 @@ class TestVirTioVhostCbdma(TestCase):
"""
check each queue has receive packets
"""
- out = self.vhost_user.send_expect("stop", "testpmd> ", 60)
+ out = self.vhost_pmd.execute_cmd("stop")
for queue_index in queue_list:
queue = "Queue= %d" % queue_index
index = out.find(queue)
@@ -165,59 +156,46 @@ class TestVirTioVhostCbdma(TestCase):
tx = re.search("TX-packets:\s*(\d*)", out[index:])
rx_packets = int(rx.group(1))
tx_packets = int(tx.group(1))
- self.verify(rx_packets > 0 and tx_packets > 0,
- "The queue %d rx-packets or tx-packets is 0 about " %
- queue_index + \
- "rx-packets:%d, tx-packets:%d" %
- (rx_packets, tx_packets))
- self.vhost_user.send_expect("clear port stats all", "testpmd> ", 30)
- self.vhost_user.send_expect("start", "testpmd> ", 30)
-
- def check_port_stats_result(self, session):
- out = session.send_expect("show port stats all", "testpmd> ", 30)
- self.result_first = re.findall(r'RX-packets: (\w+)', out)
- self.result_secondary = re.findall(r'TX-packets: (\w+)', out)
- self.verify(int(self.result_first[0]) > 1 and int(self.result_secondary[0]) > 1, "forward packets no correctly")
+ self.verify(rx_packets > 0 and tx_packets > 0, "The queue %d rx-packets or tx-packets is 0 about " %
+ queue_index + "rx-packets:%d, tx-packets:%d" %(rx_packets, tx_packets))
+ self.vhost_pmd.execute_cmd("clear port stats all")
+ self.vhost_pmd.execute_cmd("start")
+
+ def check_port_stats_result(self, pmd):
+ out = pmd.execute_cmd("show port stats all")
+ rx_packets_num = re.findall(r'RX-packets: (\w+)', out)[0]
+ tx_packets_num = re.findall(r'TX-packets: (\w+)', out)[0]
+ self.verify(int(rx_packets_num) >= 1 and int(tx_packets_num) >= 1, "forward packets no correctly")
@property
def check_2m_env(self):
out = self.dut.send_expect("cat /proc/meminfo |grep Hugepagesize|awk '{print($2)}'", "# ")
return True if out == '2048' else False
- def launch_testpmd_as_vhost_user(self, command, cores="Default", dev="", ports = ""):
- self.pmdout_vhost_user.start_testpmd(cores=cores, param=command, vdevs=[dev], ports=ports, prefix="vhost")
- self.vhost_user.send_expect('set fwd mac', 'testpmd> ', 120)
- self.vhost_user.send_expect('start', 'testpmd> ', 120)
+ def launch_vhost_testpmd(self, command, cores="Default", dev="", ports = ""):
+ self.vhost_pmd.start_testpmd(cores=cores, param=command, vdevs=[dev], ports=ports, prefix="vhost")
+ self.vhost_pmd.execute_cmd('set fwd mac')
+ self.vhost_pmd.execute_cmd('start')
- def launch_testpmd_as_virtio_user1(self, command, cores="Default", dev=""):
- eal_params = ""
+ def launch_vuser0_testpmd(self, command, cores="Default", dev="", eal_params=""):
if self.check_2m_env:
eal_params += " --single-file-segments"
- self.pmdout_virtio_user1.start_testpmd(cores, command, vdevs=[dev], no_pci=True, prefix="virtio1", eal_param=eal_params)
- self.virtio_user1.send_expect('set fwd mac', 'testpmd> ', 30)
- self.virtio_user1.send_expect('start', 'testpmd> ', 30)
- self.virtio_user1.send_expect('show port info all', 'testpmd> ', 30)
+ self.vuser0_pmd.start_testpmd(cores, command, vdevs=[dev],no_pci=True, prefix="vuser0", eal_param=eal_params)
+ self.vuser0_pmd.execute_cmd('set fwd mac')
+ self.vuser0_pmd.execute_cmd('start')
+ self.vuser0_pmd.execute_cmd('show port info all')
- def launch_testpmd_as_virtio_user(self, command, cores="Default", dev=""):
- eal_params = ""
- if self.check_2m_env:
- eal_params += " --single-file-segments"
- self.pmdout_virtio_user.start_testpmd(cores, command, vdevs=[dev],no_pci=True, prefix="virtio", eal_param=eal_params)
- self.virtio_user.send_expect('set fwd mac', 'testpmd> ', 120)
- self.virtio_user.send_expect('start', 'testpmd> ', 120)
- self.virtio_user.send_expect('show port info all', 'testpmd> ', 30)
+ def diff_param_launch_send_and_verify(self, mode, params, dev, cores, launch_vuse0=True, quit_vuser0=False, eal_params=""):
+ if launch_vuse0:
+ self.launch_vuser0_testpmd(command=params, cores=cores, dev=dev, eal_params=eal_params)
+ self.send_and_verify(mode=mode)
+ if quit_vuser0:
+ self.vuser0_pmd.quit()
+ time.sleep(3)
- def diff_param_launch_send_and_verify(self, mode, params, dev, cores, is_quit=True, launch_virtio=True):
- if launch_virtio:
- self.launch_testpmd_as_virtio_user(params, cores, dev=dev)
- self.send_and_verify(mode)
- if is_quit:
- self.virtio_user.send_expect("quit", "# ")
- time.sleep(3)
-
- def test_perf_pvp_spilt_all_path_with_cbdma_vhost_enqueue_operations(self):
+ def test_perf_pvp_split_all_path_with_DMA_accelerated_vhost_enqueue(self):
"""
- Test Case 1: PVP Split all path with DMA-accelerated vhost enqueue
+ Test Case 1: PVP split all path with DMA-accelerated vhost enqueue
"""
self.test_target = self.running_case
self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]
@@ -227,259 +205,287 @@ class TestVirTioVhostCbdma(TestCase):
queue = 1
used_cbdma_num = 1
self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
- vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=%d,dmas=[txq0@{self.device_str}],dmathr=%d'"
+ vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=%d,dmas=[txq0@{self.device_str}],dmathr=%d,client=1'"
dev_path_mode_mapper = {
"inorder_mergeable_path": 'mrg_rxbuf=1,in_order=1',
"mergeable_path": 'mrg_rxbuf=1,in_order=0',
"inorder_non_mergeable_path": 'mrg_rxbuf=0,in_order=1',
"non_mergeable_path": 'mrg_rxbuf=0,in_order=0',
- "vector_rx_path": 'mrg_rxbuf=0,in_order=0',
+ "vector_rx_path": 'mrg_rxbuf=0,in_order=0,vectorized=1',
}
- pvp_split_all_path_virtio_params = "--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (queue, txd_rxd, txd_rxd)
+ virtio_user_params = "--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (queue, txd_rxd, txd_rxd)
allow_pci = [self.dut.ports_info[0]['pci']]
for index in range(used_cbdma_num):
allow_pci.append(self.cbdma_dev_infos[index])
- self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2], dev=vhost_vdevs % (queue, dmathr), ports=allow_pci)
- for key, path_mode in dev_path_mode_mapper.items():
+ self.launch_vhost_testpmd(command=eal_tx_rxd % (queue, txd_rxd, txd_rxd), cores=self.cores[0:2],
+ dev=vhost_vdevs %(queue, dmathr), ports=allow_pci)
+ for key, value in dev_path_mode_mapper.items():
if key == "vector_rx_path":
- pvp_split_all_path_virtio_params = eal_tx_rxd % (queue, txd_rxd, txd_rxd)
- vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % queue
- self.diff_param_launch_send_and_verify(key, pvp_split_all_path_virtio_params, vdevs, self.cores[2:4], is_quit=False)
+ virtio_user_params = eal_tx_rxd % (queue, txd_rxd, txd_rxd)
+ virtio_vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{value},queues=%d,server=1'" % queue
+ self.diff_param_launch_send_and_verify(mode=key, params=virtio_user_params, dev=virtio_vdevs, cores=self.cores[2:4], launch_vuse0=True, quit_vuser0=False)
self.mode_list.append(key)
# step3 restart vhost port, then check throughput again
key += "_RestartVhost"
- self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10)
- self.vhost_user.send_expect('stop', 'testpmd> ', 10)
- self.vhost_user.send_expect('start', 'testpmd> ', 10)
- self.vhost_user.send_expect('show port info all', 'testpmd> ', 30)
- self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10)
- self.diff_param_launch_send_and_verify(key, pvp_split_all_path_virtio_params, vdevs,
- self.cores[2:4], launch_virtio=False)
+ self.vhost_pmd.execute_cmd('show port stats all')
+ self.vhost_pmd.execute_cmd('stop')
+ self.vhost_pmd.execute_cmd('start')
+ self.vhost_pmd.execute_cmd('show port stats all')
+ self.diff_param_launch_send_and_verify(mode=key, params=virtio_user_params, dev=virtio_vdevs, cores=self.cores[2:4], launch_vuse0=False, quit_vuser0=True)
self.mode_list.append(key)
- self.vhost_user.send_expect("quit", "# ")
+ self.vhost_pmd.quit()
self.result_table_print()
self.handle_expected(mode_list=self.mode_list)
self.handle_results(mode_list=self.mode_list)
- def test_perf_dynamic_queue_number_cbdma_vhost_enqueue_operations(self):
+ def test_perf_split_ring_dynamic_queue_number_test_for_DMA_accelerated_vhost_Tx_operations(self):
"""
- Test Case2: Split ring dynamic queue number test for DMA-accelerated vhost Tx operations
+ Test Case 2: PVP split ring dynamic queue number test for DMA-accelerated vhost Tx operations
"""
self.test_target = self.running_case
self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]
used_cbdma_num = 8
queue = 8
txd_rxd = 1024
- dmathr = 1024
nb_cores = 1
virtio_path = "/tmp/s0"
path_mode = 'mrg_rxbuf=1,in_order=1'
self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
- eal_params = " --nb-cores=1 --txd=1024 --rxd=1024 --txq=%d --rxq=%d "
- dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params % (queue,queue)}"
+ eal_params = " --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d "
+ virtio_user_params = f" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params % (nb_cores, txd_rxd, txd_rxd, queue,queue)}"
+ vhost_dev = f"'net_vhost0,iface=/tmp/s0,queues=%d,client=1%s'"
virtio_dev = f"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queue},server=1"
- vhost_dev = f"'net_vhost0,iface={virtio_path},queues=%d,client=1,%s'"
# launch vhost testpmd
allow_pci = [self.dut.ports_info[0]['pci']]
for index in range(used_cbdma_num):
allow_pci.append(self.cbdma_dev_infos[index])
# no cbdma to launch vhost
- self.launch_testpmd_as_vhost_user(eal_params % (queue,queue), self.cores[0:2], dev=vhost_dev % (queue,''), ports=[allow_pci[0]])
+ self.launch_vhost_testpmd(command=eal_params % (nb_cores, txd_rxd, txd_rxd, queue, queue),
+ cores=self.cores[0:2], dev=vhost_dev % (queue, ''), ports=[allow_pci[0]])
mode = "no_cbdma"
self.mode_list.append(mode)
- self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)
+ self.launch_vuser0_testpmd(command=virtio_user_params, cores=self.cores[2:4], dev=virtio_dev)
self.send_and_verify(mode, queue_list=range(queue))
- self.vhost_user.send_expect("quit", "#")
+ self.vhost_pmd.quit()
# used 4 cbdma_num and 4 queue to launch vhost
-
+ queue = 4
+ txd_rxd = 256
+ dmathr = 1024
vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}],dmathr={dmathr}"
- self.launch_testpmd_as_vhost_user(eal_params % (queue/2,queue/2), self.cores[0:2], dev=vhost_dev % (int(queue/2),vhost_dmas), ports=allow_pci[:5])
- self.send_and_verify("used_4_cbdma_num", queue_list=range(int(queue/2)))
+ self.launch_vhost_testpmd(command=eal_params % (nb_cores, txd_rxd, txd_rxd, queue, queue),
+ cores=self.cores[0:2], dev=vhost_dev % (queue, (',' + vhost_dmas)), ports=allow_pci[:5])
+ self.send_and_verify("used_4_cbdma_num", queue_list=range(queue))
self.mode_list.append("used_4_cbdma_num")
- self.vhost_user.send_expect("quit", "#")
+ self.vhost_pmd.quit()
- #used 8 cbdma_num to launch vhost
+ # used 8 cbdma_num to launch vhost
+ queue = 8
+ txd_rxd = 1024
+ dmathr = 1024
vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}],dmathr={dmathr}"
- self.launch_testpmd_as_vhost_user(eal_params % (queue, queue), self.cores[0:2],
- dev=vhost_dev % (queue,vhost_dmas), ports=allow_pci)
+ self.launch_vhost_testpmd(command=eal_params % (nb_cores, txd_rxd, txd_rxd, queue, queue),
+ cores=self.cores[0:2], dev=vhost_dev % (queue, (',' + vhost_dmas)), ports=allow_pci)
self.send_and_verify("used_8_cbdma_num", queue_list=range(queue))
self.mode_list.append("used_8_cbdma_num")
- self.send_and_verify("used_8_cbdma_num_1", queue_list=range(queue))
- self.mode_list.append("used_8_cbdma_num_1")
- self.virtio_user.send_expect("stop", "testpmd> ", 60)
+ self.vuser0_pmd.execute_cmd('stop')
time.sleep(5)
- self.virtio_user.send_expect("quit", "# ")
- self.vhost_user.send_expect("quit", "# ")
+ self.vuser0_pmd.quit()
+ self.vhost_pmd.quit()
self.result_table_print()
- # result_rows = [[], [64, 'dynamic_queue2', 7.4959375, 12.593175], [1518, 'dynamic_queue2', 1.91900225, 59.028509209999996]]
- result_rows = self.result_table_getrows() #
self.handle_expected(mode_list=self.mode_list)
self.handle_results(mode_list=self.mode_list)
- def test_check_threshold_value_with_cbdma(self):
- """
- Test Case3: CBDMA threshold value check
+ def test_perf_pvp_packed_all_path_with_DMA_accelerated_vhost_enqueue(self):
"""
- used_cbdma_num = 4
- params = '--nb-cores=1 --rxq=2 --txq=2'
- dmathr = [512, 4096]
- vid_dict = {}
- self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
- self.dut.restore_interfaces()
- # launch vhost, Check the cbdma threshold value for each vhost port can be config correct from vhost log
- vhost_vdev = [f"'eth_vhost0,iface=vhost-net0,queues=2,client=1,dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]}],dmathr={dmathr[0]}'", \
- f"'eth_vhost1,iface=vhost-net1,queues=2,client=1,dmas=[txq0@{self.used_cbdma[2]};txq1@{self.used_cbdma[3]}],dmathr={dmathr[1]}'"]
- virtio_dev0 = f"net_virtio_user0,mac=00:01:02:03:04:05,path=./vhost-net0,queues=2,server=1,packed_vq=0,mrg_rxbuf=1,in_order=0,queue_size=4096"
- virtio_dev1 = f"net_virtio_user1,mac=00:01:02:03:04:05,path=./vhost-net1,queues=2,server=1,packed_vq=0,mrg_rxbuf=1,in_order=0,queue_size=4096"
- vdev_params = '{} --vdev {}'.format(vhost_vdev[0], vhost_vdev[1])
- allow_pci = []
- for index in range(used_cbdma_num):
- allow_pci.append(self.cbdma_dev_infos[index])
- self.pmdout_vhost_user.start_testpmd(cores=self.cores[0:2], param=params, vdevs=[vdev_params], ports=allow_pci, prefix="vhost", fixed_prefix=True)
- self.vhost_user.send_expect('start', 'testpmd> ', 120)
- # vid0,qid0,dma2,threshold:4096
- self.launch_testpmd_as_virtio_user1(params, self.cores[2:4], dev=virtio_dev1)
- vid_dict[dmathr[1]] = 0
- # vid1,qid0,dma0,threshold:512
- self.launch_testpmd_as_virtio_user(params, self.cores[4:6], dev=virtio_dev0)
- vid_dict[dmathr[0]] = 1
- # Check the cbdma threshold value for each vhost port can be config correct from vhost log
- out = self.vhost_user.get_session_before(timeout=2)
- self.vhost_user.send_expect("quit", "# ")
- self.virtio_user.send_expect("quit", "# ")
- self.virtio_user1.send_expect("quit", "# ")
- pattern = re.compile(r'dma parameters: vid\S+,qid\d+,dma\d+,threshold:\d+')
- return_param = re.findall(pattern, out)
- self.logger.info("Actual Info:" + str(return_param))
- check_value = 0
- for dma in dmathr:
- check_value += len(re.findall('vid{},\S+threshold:{}'.format(vid_dict[dma], dma), str(return_param)))
- self.verify(check_value == used_cbdma_num, "Check failed: Actual value:{}".format(return_param))
-
- def test_perf_pvp_packed_all_path_with_cbdma_vhost_enqueue_operations(self):
- """
- Test Case 4: PVP packed ring all path with DMA-accelerated vhost enqueue
+ Test Case 3: PVP packed ring all path with DMA-accelerated vhost enqueue
"""
self.test_target = self.running_case
self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]
txd_rxd = 1024
- dmathr = 1024
+ dmathr = 0
eal_tx_rxd = ' --nb-cores=%d --txd=%d --rxd=%d'
queue = 1
used_cbdma_num = 1
self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
- vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=%d,dmas=[txq0@{self.device_str}],dmathr=%d'"
+ vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=%d,dmas=[txq0@{self.device_str}],dmathr=%d,client=1'"
dev_path_mode_mapper = {
"inorder_mergeable_path": 'mrg_rxbuf=1,in_order=1,packed_vq=1',
"mergeable_path": 'mrg_rxbuf=1,in_order=0,packed_vq=1',
"inorder_non_mergeable_path": 'mrg_rxbuf=0,in_order=1,packed_vq=1',
"non_mergeable_path": 'mrg_rxbuf=0,in_order=0,packed_vq=1',
- "vector_rx_path": 'mrg_rxbuf=0,in_order=0,packed_vq=1',
+ "vector_rx_path": 'mrg_rxbuf=0,in_order=1,vectorized=1,packed_vq=1',
+ "vector_rx_path_no_power_of_2": 'mrg_rxbuf=0,in_order=1,vectorized=1,packed_vq=1,queue_size=%d' % (txd_rxd + 1),
}
- pvp_split_all_path_virtio_params = "--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (queue, txd_rxd, txd_rxd)
+ virtio_user_params = "--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (queue, txd_rxd, txd_rxd)
allow_pci = [self.dut.ports_info[0]['pci']]
for index in range(used_cbdma_num):
allow_pci.append(self.cbdma_dev_infos[index])
- self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2], dev=vhost_vdevs % (queue, dmathr), ports=allow_pci)
- for key, path_mode in dev_path_mode_mapper.items():
+ self.launch_vhost_testpmd(command=eal_tx_rxd % (queue, txd_rxd, txd_rxd), cores=self.cores[0:2],
+ dev=vhost_vdevs % (queue, dmathr), ports=allow_pci)
+
+ for key, value in dev_path_mode_mapper.items():
+ eal_params = ""
if key == "vector_rx_path":
- pvp_split_all_path_virtio_params = eal_tx_rxd % (queue, txd_rxd, txd_rxd)
- vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % queue
- self.diff_param_launch_send_and_verify(key, pvp_split_all_path_virtio_params, vdevs, self.cores[2:4], is_quit=False)
+ virtio_user_params = eal_tx_rxd % (queue, txd_rxd, txd_rxd)
+ eal_params += " --force-max-simd-bitwidth=512"
+ if key == "vector_rx_path_no_power_of_2":
+ txd_rxd = txd_rxd + 1
+ virtio_user_params = eal_tx_rxd % (queue, txd_rxd, txd_rxd)
+ eal_params += " --force-max-simd-bitwidth=512"
+ virtio_vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{value},queues=%d,server=1'" % queue
+ self.diff_param_launch_send_and_verify(mode=key, params=virtio_user_params, dev=virtio_vdevs,
+ cores=self.cores[2:4], launch_vuse0=True, quit_vuser0=False, eal_params=eal_params)
self.mode_list.append(key)
# step3 restart vhost port, then check throughput again
key += "_RestartVhost"
- self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10)
- self.vhost_user.send_expect('stop', 'testpmd> ', 10)
- self.vhost_user.send_expect('start', 'testpmd> ', 10)
- self.vhost_user.send_expect('show port info all', 'testpmd> ', 30)
- self.vhost_user.send_expect('show port stats all', 'testpmd> ', 10)
- self.diff_param_launch_send_and_verify(key, pvp_split_all_path_virtio_params, vdevs,
- self.cores[2:4], launch_virtio=False)
+ self.vhost_pmd.execute_cmd('show port stats all')
+ self.vhost_pmd.execute_cmd('stop')
+ self.vhost_pmd.execute_cmd('start')
+ self.vhost_pmd.execute_cmd('show port stats all')
+ self.diff_param_launch_send_and_verify(mode=key, params=virtio_user_params, dev=virtio_vdevs,
+ cores=self.cores[2:4], launch_vuse0=False, quit_vuser0=True, eal_params=eal_params)
self.mode_list.append(key)
- self.vhost_user.send_expect("quit", "# ")
+ self.vhost_pmd.quit()
self.result_table_print()
self.handle_expected(mode_list=self.mode_list)
self.handle_results(mode_list=self.mode_list)
- def test_perf_packed_dynamic_queue_number_cbdma_vhost_enqueue_operations(self):
+ def test_perf_packed_ring_dynamic_queue_number_test_for_DMA_accelerated_vhost_Tx_operations(self):
"""
- Test Case5: Packed ring dynamic queue number test for DMA-accelerated vhost Tx operations
+ Test Case 4: PVP packed ring dynamic queue number test for DMA-accelerated vhost Tx operations
"""
self.test_target = self.running_case
self.expected_throughput = self.get_suite_cfg()['expected_throughput'][self.test_target]
used_cbdma_num = 8
queue = 8
txd_rxd = 1024
- dmathr = 1024
nb_cores = 1
virtio_path = "/tmp/s0"
- path_mode = 'mrg_rxbuf=1,in_order=1,packed_vq=1'
+ path_mode = 'mrg_rxbuf=1,in_order=0,packed_vq=1'
self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
- vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]}],dmathr={dmathr}"
- eal_params = " --nb-cores=1 --txd=1024 --rxd=1024 --txq=%d --rxq=%d "
- dynamic_queue_number_cbdma_virtio_params = f" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params % (queue, queue)}"
+ eal_params = " --nb-cores=%d --txd=%d --rxd=%d --txq=%d --rxq=%d "
+ virtio_user_params = f" --tx-offloads=0x0 --enable-hw-vlan-strip {eal_params % (nb_cores, txd_rxd, txd_rxd, queue, queue)}"
+ vhost_dev = f"'net_vhost0,iface=/tmp/s0,queues=%d,client=1%s'"
virtio_dev = f"net_virtio_user0,mac={self.virtio_mac},path={virtio_path},{path_mode},queues={queue},server=1"
- vhost_dev = f"'net_vhost0,iface={virtio_path},queues=%s,client=1,%s'"
# launch vhost testpmd
allow_pci = [self.dut.ports_info[0]['pci']]
for index in range(used_cbdma_num):
allow_pci.append(self.cbdma_dev_infos[index])
# no cbdma to launch vhost
- self.launch_testpmd_as_vhost_user(eal_params % (queue,queue), self.cores[0:2], dev=vhost_dev % (queue,''), ports= [allow_pci[0]])
+ self.launch_vhost_testpmd(command=eal_params % (nb_cores, txd_rxd, txd_rxd, queue, queue),
+ cores=self.cores[0:2], dev=vhost_dev % (queue, ''), ports=[allow_pci[0]])
mode = "no_cbdma"
self.mode_list.append(mode)
- self.launch_testpmd_as_virtio_user(dynamic_queue_number_cbdma_virtio_params, self.cores[2:4], dev=virtio_dev)
+ self.launch_vuser0_testpmd(command=virtio_user_params, cores=self.cores[2:4], dev=virtio_dev)
self.send_and_verify(mode, queue_list=range(queue))
- self.vhost_user.send_expect("quit", "#")
+ self.vhost_pmd.quit()
# used 4 cbdma_num and 4 queue to launch vhost
+ queue = 4
+ txd_rxd = 256
+ dmathr = 1024
vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]}],dmathr={dmathr}"
- self.launch_testpmd_as_vhost_user(eal_params % (queue/2,queue/2), self.cores[0:2],
- dev=vhost_dev % (int(queue/2),vhost_dmas), ports=allow_pci[:5])
- self.send_and_verify("used_4_cbdma_num", queue_list=range(int(queue/2)))
+ self.launch_vhost_testpmd(command=eal_params % (nb_cores, txd_rxd, txd_rxd, queue, queue),
+ cores=self.cores[0:2], dev=vhost_dev % (queue, (',' + vhost_dmas)),
+ ports=allow_pci[:5])
+ self.send_and_verify("used_4_cbdma_num", queue_list=range(queue))
self.mode_list.append("used_4_cbdma_num")
- self.vhost_user.send_expect("quit", "#")
+ self.vhost_pmd.quit()
- #used 8 cbdma_num to launch vhost
+ # used 8 cbdma_num to launch vhost
+ queue = 8
+ txd_rxd = 1024
+ dmathr = 1024
vhost_dmas = f"dmas=[txq0@{self.used_cbdma[0]};txq1@{self.used_cbdma[1]};txq2@{self.used_cbdma[2]};txq3@{self.used_cbdma[3]};txq4@{self.used_cbdma[4]};txq5@{self.used_cbdma[5]};txq6@{self.used_cbdma[6]};txq7@{self.used_cbdma[7]}],dmathr={dmathr}"
- self.launch_testpmd_as_vhost_user(eal_params % (queue, queue), self.cores[0:2],
- dev=vhost_dev % (queue,vhost_dmas), ports=allow_pci)
+ self.launch_vhost_testpmd(command=eal_params % (nb_cores, txd_rxd, txd_rxd, queue, queue),
+ cores=self.cores[0:2], dev=vhost_dev % (queue, (',' + vhost_dmas)), ports=allow_pci)
self.send_and_verify("used_8_cbdma_num", queue_list=range(queue))
self.mode_list.append("used_8_cbdma_num")
- self.send_and_verify("used_8_cbdma_num_1", queue_list=range(queue))
- self.mode_list.append("used_8_cbdma_num_1")
- self.virtio_user.send_expect("stop", "testpmd> ", 60)
+ self.vuser0_pmd.execute_cmd('stop')
time.sleep(5)
- self.virtio_user.send_expect("quit", "# ")
- self.vhost_user.send_expect("quit", "# ")
+ self.vuser0_pmd.quit()
+ self.vhost_pmd.quit()
self.result_table_print()
- # result_rows = [[], [64, 'dynamic_queue2', 7.4959375, 12.593175], [1518, 'dynamic_queue2', 1.91900225, 59.028509209999996]]
- result_rows = self.result_table_getrows() #
self.handle_expected(mode_list=self.mode_list)
self.handle_results(mode_list=self.mode_list)
+ def test_perf_split_ring_performance_comparasion_between_CPU_copy_CBDMA_copy_and_Sync_copy(self):
+ """
+ Test Case 5: PVP split ring performance comparasion between CPU copy, CBDMA copy and Sync copy
+ """
+ used_cbdma_num = 1
+ queue = 1
+ txd_rxd = 1024
+ eal_tx_rxd = ' --nb-cores=%d --txd=%d --rxd=%d'
+ path_mode = 'mrg_rxbuf=1,in_order=1'
+ allow_pci = [self.dut.ports_info[0]['pci']]
+ self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
+ for index in range(used_cbdma_num):
+ allow_pci.append(self.cbdma_dev_infos[index])
+ vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=%d,client=1,dmas=[txq0@{self.device_str}],%s'"
+ virtio_user_params = "--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (queue, txd_rxd, txd_rxd)
+ dev_path_mode_mapper = {
+ "sync_cbdma": ['dmathr=1024', 'dmathr=2000'],
+ "cpu": 'dmathr=0',
+ }
+ for key,dma_mode in dev_path_mode_mapper.items():
+ if key == "cpu":
+ vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=1'"
+ self.launch_vhost_testpmd(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2], dev=vhost_vdevs, ports=[allow_pci[0]])
+ vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % queue
+ self.launch_vuser0_testpmd(virtio_user_params, self.cores[2:4], dev=vdevs)
+ mode = "cpu_copy_64"
+ self.mode_list.append(mode)
+ self.send_and_verify(mode, frame_sizes=[64], pkt_length_mode='fixed')
+ perf_cpu_copy_64 = self.throughput[mode][64][self.nb_desc]
+ self.vuser0_pmd.execute_cmd('show port stats all')
+ self.vuser0_pmd.quit()
+ self.vhost_pmd.quit()
+ else:
+ self.launch_vhost_testpmd(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue, dma_mode[0]), ports=allow_pci)
+ vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d,server=1'" % queue
+ self.launch_vuser0_testpmd(virtio_user_params, self.cores[2:4],dev=vdevs)
+ mode = "sync_copy_64"
+ self.mode_list.append(mode)
+ self.send_and_verify(mode,frame_sizes=[64],pkt_length_mode='fixed')
+ perf_sync_copy_64 = self.throughput[mode][64][self.nb_desc]
+ mode = "cbdma_copy_1518"
+ self.mode_list.append(mode)
+ self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode='fixed')
+ perf_cbdma_copy_1518 = self.throughput[mode][1518][self.nb_desc]
+ self.vuser0_pmd.execute_cmd('show port stats all')
+ self.vhost_pmd.quit()
+ time.sleep(3)
+ self.launch_vhost_testpmd(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue, dma_mode[1]), ports=allow_pci)
+ mode = "sync_copy_1518"
+ self.mode_list.append(mode)
+ self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode='fixed')
+ perf_sync_copy_1518 = self.throughput[mode][1518][self.nb_desc]
+ self.check_port_stats_result(self.vuser0_pmd)
+ self.vuser0_pmd.quit()
+ self.vhost_pmd.quit()
+ self.result_table_print()
+ self.verify(abs(perf_sync_copy_64 - perf_cpu_copy_64)/perf_sync_copy_64 < 0.1, "sync_copy_64 vs. cpu_copy_64 delta > 10%" )
+ self.verify(abs(perf_cbdma_copy_1518 - perf_sync_copy_1518)/perf_sync_copy_1518 > 0.05,"cbdma_copy_1518 vs sync_copy_1518 delta < 5%")
-
- def test_perf_compare_pvp_split_ring_performance(self):
+ def test_perf_packed_ring_performance_comparasion_between_CPU_copy_CBDMA_copy_and_Sync_copy(self):
"""
- Test Case6: Compare PVP split ring performance between CPU copy, CBDMA copy and Sync copy
+ Test Case 6: PVP packed ring performance comparasion between CPU copy, CBDMA copy and Sync copy
"""
used_cbdma_num = 1
queue = 1
txd_rxd = 1024
eal_tx_rxd = ' --nb-cores=%d --txd=%d --rxd=%d'
- path_mode = 'mrg_rxbuf=1,in_order=1,server=1'
+ path_mode = 'mrg_rxbuf=1,in_order=1,packed_vq=1'
allow_pci = [self.dut.ports_info[0]['pci']]
self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
for index in range(used_cbdma_num):
allow_pci.append(self.cbdma_dev_infos[index])
- path_mode = 'mrg_rxbuf=1,in_order=1'
vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=%d,client=1,dmas=[txq0@{self.device_str}],%s'"
- compare_pvp_split_ring_performance = "--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (queue, txd_rxd, txd_rxd)
+ virtio_user_params = "--tx-offloads=0x0 --enable-hw-vlan-strip --nb-cores=%d --txd=%d --rxd=%d" % (queue, txd_rxd, txd_rxd)
dev_path_mode_mapper = {
"sync_cbdma": ['dmathr=1024', 'dmathr=2000'],
"cpu": 'dmathr=0',
@@ -487,20 +493,20 @@ class TestVirTioVhostCbdma(TestCase):
for key,dma_mode in dev_path_mode_mapper.items():
if key == "cpu":
vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=1'"
- self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2], dev=vhost_vdevs, ports=[allow_pci[0]])
+ self.launch_vhost_testpmd(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2], dev=vhost_vdevs, ports=[allow_pci[0]])
vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % queue
- self.launch_testpmd_as_virtio_user(compare_pvp_split_ring_performance, self.cores[2:4], dev=vdevs)
+ self.launch_vuser0_testpmd(virtio_user_params, self.cores[2:4], dev=vdevs)
mode = "cpu_copy_64"
self.mode_list.append(mode)
self.send_and_verify(mode, frame_sizes=[64], pkt_length_mode='fixed')
perf_cpu_copy_64 = self.throughput[mode][64][self.nb_desc]
- self.virtio_user.send_expect('show port stats all', 'testpmd> ', 10)
- self.virtio_user.send_expect("quit", "# ")
- self.vhost_user.send_expect("quit", "# ")
+ self.vuser0_pmd.execute_cmd('show port stats all')
+ self.vuser0_pmd.quit()
+ self.vhost_pmd.quit()
else:
- self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue, dma_mode[0]), ports=allow_pci)
+ self.launch_vhost_testpmd(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue, dma_mode[0]), ports=allow_pci)
vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d,server=1'" % queue
- self.launch_testpmd_as_virtio_user(compare_pvp_split_ring_performance, self.cores[2:4],dev=vdevs)
+ self.launch_vuser0_testpmd(virtio_user_params, self.cores[2:4],dev=vdevs)
mode = "sync_copy_64"
self.mode_list.append(mode)
self.send_and_verify(mode,frame_sizes=[64],pkt_length_mode='fixed')
@@ -509,48 +515,66 @@ class TestVirTioVhostCbdma(TestCase):
self.mode_list.append(mode)
self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode='fixed')
perf_cbdma_copy_1518 = self.throughput[mode][1518][self.nb_desc]
- self.virtio_user.send_expect('show port stats all', 'testpmd> ', 10)
- self.vhost_user.send_expect("quit", "# ")
+ self.vuser0_pmd.execute_cmd('show port stats all')
+ self.vhost_pmd.quit()
time.sleep(3)
- self.launch_testpmd_as_vhost_user(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue, dma_mode[1]), ports=allow_pci)
+ self.launch_vhost_testpmd(eal_tx_rxd % (queue, txd_rxd, txd_rxd), self.cores[0:2],dev=vhost_vdevs % (queue, dma_mode[1]), ports=allow_pci)
mode = "sync_copy_1518"
self.mode_list.append(mode)
self.send_and_verify(mode,frame_sizes=[1518],pkt_length_mode='fixed')
perf_sync_copy_1518 = self.throughput[mode][1518][self.nb_desc]
- self.check_port_stats_result(self.virtio_user)
- self.virtio_user.send_expect("quit", "# ")
- self.vhost_user.send_expect("quit", "# ")
+ self.check_port_stats_result(self.vuser0_pmd)
+ self.vuser0_pmd.quit()
+ self.vhost_pmd.quit()
self.result_table_print()
self.verify(abs(perf_sync_copy_64 - perf_cpu_copy_64)/perf_sync_copy_64 < 0.1, "sync_copy_64 vs. cpu_copy_64 delta > 10%" )
self.verify(abs(perf_cbdma_copy_1518 - perf_sync_copy_1518)/perf_sync_copy_1518 > 0.05,"cbdma_copy_1518 vs sync_copy_1518 delta < 5%")
- @staticmethod
- def vhost_or_virtio_set_one_queue(session):
- session.send_expect('stop', 'testpmd> ', 120)
- session.send_expect('port stop all', 'testpmd> ', 120)
- session.send_expect('port config all rxq 1', 'testpmd> ', 120)
- session.send_expect('port config all txq 1', 'testpmd> ', 120)
- session.send_expect('port start all', 'testpmd> ', 120)
- session.send_expect('start', 'testpmd> ', 120)
- session.send_expect('show port info all', 'testpmd> ', 30)
- session.send_expect('show port stats all', 'testpmd> ', 120)
- time.sleep(5)
+ def get_receive_throughput(self, pmd_session, count=10):
+ i = 0
+ while i < count:
+ pmd_session.execute_cmd('show port stats all')
+ i += 1
+ else:
+ out = pmd_session.execute_cmd('show port stats all')
+ pmd_session.execute_cmd('stop')
+ rx_throughput = re.compile('Rx-pps: \s+(.*?)\s+?').findall(out, re.S)
+ return float(rx_throughput[0]) / 1000000.0
- @property
- def check_value(self):
- check_dict = dict.fromkeys(self.frame_sizes)
- linerate = {64: 0.085, 128: 0.12, 256: 0.20, 512: 0.35, 1024: 0.50, 1280: 0.55, 1518: 0.60}
- for size in self.frame_sizes:
- speed = self.wirespeed(self.nic, size, self.number_of_ports)
- check_dict[size] = round(speed * linerate[size], 2)
- return check_dict
+ def test_loopback_with_cbdma_enqueue_large_chain_packets_stress_test(self):
+ """
+ Test Case 7: loopback with cbdma enqueue large chain packets stress test
+ """
+ used_cbdma_num = 1
+ queue = 1
+ mbuf_size = 65535
+ dmathr = 0
+ txd_rxd = 2048
+ txq_rxq = 1
+ eal_tx_rxd = ' --nb-cores=%d --mbuf-size=%d'
+ path_mode = 'mrg_rxbuf=1,in_order=0,vectorized=1,packed_vq=1,queue_size=2048'
+ self.get_cbdma_ports_info_and_bind_to_dpdk(used_cbdma_num)
+ vhost_vdevs = f"'net_vhost0,iface=/tmp/s0,queues=%d,dmas=[txq0@{self.device_str}],dmathr=%d'"
+ virtio_vdevs = f"'net_virtio_user0,mac={self.virtio_mac},path=/tmp/s0,{path_mode},queues=%d'" % queue
+ virtio_user_params = "--rxq=%d --txq=%d --txd=%d --rxd=%d --nb-cores=%d " % (txq_rxq, txq_rxq, txd_rxd, txd_rxd, queue)
+ self.launch_vhost_testpmd(command=eal_tx_rxd % (queue, mbuf_size), cores=self.cores[0:2], dev=vhost_vdevs % (queue, dmathr), ports=[self.device_str])
+ self.launch_vuser0_testpmd(command=virtio_user_params, cores=self.cores[2:4], dev=virtio_vdevs)
+ self.vhost_pmd.execute_cmd("vhost enable tx all")
+ self.vhost_pmd.execute_cmd("set txpkts 65535,65535,65535,65535,65535")
+ self.vhost_pmd.execute_cmd("start tx_first 32")
+ frame_size = '65535 * 5'
+ throughput = self.get_receive_throughput(pmd_session=self.vuser0_pmd)
+ table_header = ['Frame Size(Byte)', 'Throughput(Mpps)']
+ self.result_table_create(table_header)
+ self.result_table_add([frame_size, throughput])
+ self.result_table_print()
+ self.verify(throughput > 0, 'vuser0 can not receive packets')
def send_imix_and_verify(self, mode, multiple_queue=True, queue_list=[]):
"""
Send imix packet with packet generator and verify
"""
- frame_sizes = [
- 64, 128, 256, 512, 1024, 1280, 1518, ]
+ frame_sizes = [64, 128, 256, 512, 1024, 1280, 1518]
tgenInput = []
for frame_size in frame_sizes:
payload_size = frame_size - self.headers_size
@@ -562,17 +586,15 @@ class TestVirTioVhostCbdma(TestCase):
pkt.assign_layers(['ether', 'ipv4', 'raw'])
pkt.config_layers([('ether', {'dst': '%s' % self.virtio_mac}), ('ipv4', {'src': '1.1.1.1'}),
('raw', {'payload': ['01'] * int('%d' % payload_size)})])
- pkt.save_pcapfile(self.tester, "%s/multiqueuerandomip_%s.pcap" % (self.out_path, frame_size))
- tgenInput.append((port, port, "%s/multiqueuerandomip_%s.pcap" % (self.out_path, frame_size)))
-
+ pkt.save_pcapfile(self.tester, "%s/vhost_cbdma_imix_%s.pcap" % (self.out_path, frame_size))
+ tgenInput.append((port, port, "%s/vhost_cbdma_imix_%s.pcap" % (self.out_path, frame_size)))
self.tester.pktgen.clear_streams()
streams = self.pktgen_helper.prepare_stream_from_tginput(tgenInput, 100, fields_config, self.tester.pktgen)
trans_options = {'delay': 5, 'duration': self.test_duration}
bps, pps = self.tester.pktgen.measure_throughput(stream_ids=streams, options=trans_options)
Mpps = pps / 1000000.0
Mbps = bps / 1000000.0
- self.verify(Mbps > 0,
- f"{self.running_case} can not receive packets of frame size {frame_sizes}")
+ self.verify(Mbps > 0, f"{self.running_case} can not receive packets of frame size {frame_sizes}")
bps_linerate = self.wirespeed(self.nic, 64, 1) * 8 * (64 + 20)
throughput = Mbps * 100 / float(bps_linerate)
self.throughput[mode] = {
@@ -594,6 +616,7 @@ class TestVirTioVhostCbdma(TestCase):
Send packet with packet generator and verify
"""
if pkt_length_mode == 'imix':
+ self.logger.info("Send imix packets from pktgen")
self.send_imix_and_verify(mode, multiple_queue, queue_list)
return
@@ -610,8 +633,8 @@ class TestVirTioVhostCbdma(TestCase):
pkt1.assign_layers(['ether', 'ipv4', 'raw'])
pkt1.config_layers([('ether', {'dst': '%s' % self.virtio_mac}), ('ipv4', {'src': '1.1.1.1'}),
('raw', {'payload': ['01'] * int('%d' % payload_size)})])
- pkt1.save_pcapfile(self.tester, "%s/multiqueuerandomip_%s.pcap" % (self.out_path, frame_size))
- tgenInput.append((port, port, "%s/multiqueuerandomip_%s.pcap" % (self.out_path, frame_size)))
+ pkt1.save_pcapfile(self.tester, "%s/vhost_cbdma_%s.pcap" % (self.out_path, frame_size))
+ tgenInput.append((port, port, "%s/vhost_cbdma_%s.pcap" % (self.out_path, frame_size)))
self.tester.pktgen.clear_streams()
streams = self.pktgen_helper.prepare_stream_from_tginput(tgenInput, 100, fields_config, self.tester.pktgen)
trans_options = {'delay': 5, 'duration': 20}
@@ -761,18 +784,14 @@ class TestVirTioVhostCbdma(TestCase):
def tear_down(self):
"""
Run after each test case.
- Clear qemu and testpmd to avoid blocking the following TCs
"""
self.dut.send_expect("killall -I %s" % self.testpmd_name, '#', 20)
self.bind_cbdma_device_to_kernel()
- if self.running_case == 'test_check_threshold_value_with_cbdma':
- self.bind_nic_driver(self.dut_ports, self.drivername)
def tear_down_all(self):
"""
Run after each test suite.
"""
- self.dut.close_session(self.vhost_user)
- self.dut.close_session(self.virtio_user)
- self.dut.close_session(self.virtio_user1)
+ self.dut.close_session(self.vhost)
+ self.dut.close_session(self.vuser0)
self.dut.kill_all()