From patchwork Thu Sep 1 17:12:57 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Huang, ZhiminX" X-Patchwork-Id: 115717 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DF40BA00C5; Thu, 1 Sep 2022 10:57:51 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D80EE41614; Thu, 1 Sep 2022 10:57:51 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id B782E40395 for ; Thu, 1 Sep 2022 10:57:49 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1662022669; x=1693558669; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=o4WBz9lkPW9dYAPHDO9GgwXMQHVbKOidEbjTABUgtaI=; b=mVESQWPcl1jS6U5JapFmbNPQoU/UCCqWRiQqD2smz1laPWC4VRew7PjB dLlov5bcLU88L18MrTCn8Lzmu5SuKGnhUpEb+NMdKVOwaoek4WawsB01p TBx83I6EQ0JXY1obY/x+nknIqEAmZagqgUZdoi8dvLxZTDgjKyIC+SeVF VIlShF7BZyCk4wIhXO3jtLTpCrQGpChW4JVrN7mLvVdbc1dry+2J0w9Pw tATrldHB1nz/iG9CyO5EmRk+P5kHYGpoCdG6tj4LG7KkBWlvhDkWDelMj DlV/KjayyxmoHdWXgdgaDNAXx0eXA5c5+ggLSvXlTFO+C/j+G7H2+MTov A==; X-IronPort-AV: E=McAfee;i="6500,9779,10456"; a="294400286" X-IronPort-AV: E=Sophos;i="5.93,280,1654585200"; d="scan'208";a="294400286" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Sep 2022 01:57:48 -0700 X-IronPort-AV: E=Sophos;i="5.93,280,1654585200"; d="scan'208";a="589406712" Received: from unknown (HELO cvl_100g_103.icx.intel.com) ([10.239.252.93]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Sep 2022 01:57:45 -0700 From: Zhimin Huang To: dts@dpdk.org Cc: Zhimin Huang Subject: [dts][PATCH V2 3/3] tests/ice_enable_basic_hqos_on_pf:add new test suite Date: Fri, 2 Sep 2022 01:12:57 +0800 Message-Id: <20220901171257.12492-3-zhiminx.huang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20220901171257.12492-1-zhiminx.huang@intel.com> References: <20220901171257.12492-1-zhiminx.huang@intel.com> X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org add 22.07 new feature test suite Signed-off-by: Zhimin Huang --- .../TestSuite_ice_enable_basic_hqos_on_pf.py | 652 ++++++++++++++++++ 1 file changed, 652 insertions(+) create mode 100644 tests/TestSuite_ice_enable_basic_hqos_on_pf.py diff --git a/tests/TestSuite_ice_enable_basic_hqos_on_pf.py b/tests/TestSuite_ice_enable_basic_hqos_on_pf.py new file mode 100644 index 00000000..a35f06fb --- /dev/null +++ b/tests/TestSuite_ice_enable_basic_hqos_on_pf.py @@ -0,0 +1,652 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2022 Intel Corporation +# +""" +DPDK Test suite. +ICE Enable basic HQoS on PF driver. +""" + +import re +from copy import deepcopy +from pprint import pformat + +from framework.packet import Packet +from framework.pktgen import TRANSMIT_CONT +from framework.pmd_output import PmdOutput +from framework.settings import HEADER_SIZE, get_nic_name +from framework.test_case import TestCase + +PKT_LEN = [64, 128, 256, 512, 1024, 1518, 512, 1024] +STREAM_UP_CONFIG = [0, 1, 2, 0, 0, 0, 0, 0] +LINERATE = 100 + + +class TestIceEnableBasicHqosOnPF(TestCase): + def set_up_all(self): + self.dut_ports = self.dut.get_ports() + self.verify(len(self.dut_ports) >= 2, "Insufficient ports") + "test topo is port 0 is 100G-E810C and port 1 is 25G-E810" + self.skip_case( + self.check_require_nic_for_test(), + "Topology is ICE_100G-E810C_QSFP and ICE_25G-E810_XXV_SFP", + ) + self.cores = "1S/9C/1T" + # check core num + core_list = self.dut.get_core_list(self.cores) + self.verify(len(core_list) >= 9, "Insufficient cores for testing") + self.tester_port0 = self.tester.get_local_port(self.dut_ports[0]) + self.tester_port1 = self.tester.get_local_port(self.dut_ports[1]) + self.dut_port0_mac = self.dut.get_mac_address(self.dut_ports[0]) + + self.pmd_output = PmdOutput(self.dut) + + def get_nic_info_from_ports_cfg(self): + """ + get port.cfg nic type/intf/pci list + :return: port config nic list + """ + nic_list = [] + for id in self.dut.get_ports(): + nic_dict = {} + for info in ["type", "intf", "pci"]: + nic_dict[info] = self.dut.ports_info[id][info] + if info == "type": + nic_dict["name"] = get_nic_name(nic_dict[info]) + nic_list.append(nic_dict) + return nic_list + + def check_require_nic_for_test(self): + """ + check port 0 is E810_100G and port 1 is E810_25G + :return: check status, True or False + """ + for id, _nic in enumerate(self.get_nic_info_from_ports_cfg()): + if _nic["name"] not in ["ICE_100G-E810C_QSFP", "ICE_25G-E810_XXV_SFP"]: + return False + if id == 0 and _nic["name"] != "ICE_100G-E810C_QSFP": + return False + return True + + def launch_testpmd(self, param=""): + """ + start testpmd and check testpmd link status + :param param: rxq/txq + """ + self.pmd_output.start_testpmd(self.cores, param=param) + res = self.pmd_output.wait_link_status_up("all", timeout=15) + self.verify(res is True, "there have port link is down") + self.testpmd_flag = True + + def close_testpmd(self): + """ + close testpmd + """ + if not self.testpmd_flag: + return + try: + self.pmd_output.quit() + except Exception as e: + self.logger.error("The testpmd status is incorrect") + self.testpmd_flag = False + + def get_queue_packets_stats(self, port=1): + """ + get testpmd tx pkts stats + :param port: tx port + :return: pkts list + """ + output = self.pmd_output.execute_cmd("stop") + self.pmd_output.execute_cmd("start") + p = re.compile("TX Port= %d/Queue=.*\n.*TX-packets: ([0-9]+)\s" % port) + tx_pkts = list(map(int, p.findall(output))) + return tx_pkts + + def add_stream_to_pktgen(self, txport, rxport, send_pkts, option): + """ + add streams to pktgen and return streams id + """ + stream_ids = [] + for pkt in send_pkts: + _option = deepcopy(option) + _option["pcap"] = pkt + stream_id = self.tester.pktgen.add_stream(txport, rxport, send_pkts[0]) + self.tester.pktgen.config_stream(stream_id, _option) + stream_ids.append(stream_id) + return stream_ids + + def config_stream(self, fields, frame_size=64): + """ + config stream and return pkt + """ + pri = fields + pkt_config = { + "type": "VLAN_UDP", + "pkt_layers": { + "ether": {"dst": self.dut_port0_mac}, + "vlan": {"vlan": pri, "prio": pri}, + "raw": {"payload": ["58"] * self.get_pkt_len(frame_size)}, + }, + } + pkt_type = pkt_config.get("type") + pkt_layers = pkt_config.get("pkt_layers") + pkt = Packet(pkt_type=pkt_type) + for layer in list(pkt_layers.keys()): + pkt.config_layer(layer, pkt_layers[layer]) + return pkt.pktgen.pkt + + def testpmd_query_stats(self): + """ + traffic callback function, return port 1 stats + """ + output = self.pmd_output.execute_cmd("show port stats 1") + if not output: + return + port_pat = ".*NIC statistics for (port \d+) .*" + rx_pat = ".*Rx-pps:\s+(\d+)\s+Rx-bps:\s+(\d+).*" + tx_pat = ".*Tx-pps:\s+(\d+)\s+Tx-bps:\s+(\d+).*" + port = re.findall(port_pat, output, re.M) + rx = re.findall(rx_pat, output, re.M) + tx = re.findall(tx_pat, output, re.M) + if not port or not rx or not tx: + return + stat = {} + for port_id, (rx_pps, rx_bps), (tx_pps, tx_bps) in zip(port, rx, tx): + stat[port_id] = { + "rx_pps": float(rx_pps), + "rx_bps": float(rx_bps), + "tx_pps": float(tx_pps), + "tx_bps": float(tx_bps), + } + self.pmd_stat = stat + + def get_pkt_len(self, frame_size): + HEADER_SIZE["vlan"] = 4 + headers_size = sum([HEADER_SIZE[x] for x in ["eth", "ip", "vlan", "udp"]]) + pktlen = frame_size - headers_size + return pktlen + + def start_traffic(self, send_pkts): + """ + send stream and return results + """ + self.tester.pktgen.clear_streams() + duration = 20 + s_option = { + "stream_config": { + "txmode": {}, + "transmit_mode": TRANSMIT_CONT, + "rate": LINERATE, + }, + "fields_config": { + "ip": { + "src": { + "start": "198.18.0.0", + "end": "198.18.0.255", + "step": 1, + "action": "random", + }, + }, + }, + } + stream_ids = self.add_stream_to_pktgen( + self.tester_port0, self.tester_port0, send_pkts, s_option + ) + traffic_opt = { + "method": "throughput", + "duration": duration, + "interval": duration - 5, + "callback": self.testpmd_query_stats, + } + result = self.tester.pktgen.measure(stream_ids, traffic_opt) + return result + + def get_traffic_results(self): + """ + get traffic results, append results, port stats, queue stats + """ + stream = [] + results = [] + for id in range(len(STREAM_UP_CONFIG)): + pkt = self.config_stream(STREAM_UP_CONFIG[id], frame_size=PKT_LEN[id]) + stream.append(pkt) + result = self.start_traffic(stream) + queue_stats = self.get_queue_packets_stats() + results.append([result, self.pmd_stat, queue_stats]) + return results + + def check_traffic_throughput(self, expect_results, rel_results): + """ + compare traffic throughput with expect results + """ + status = False + for traffic_task, _result in zip(expect_results, rel_results): + _expected, unit, port = traffic_task + result, pmd_stat, _ = _result + real_stat = pmd_stat.get(f"port {port}", {}) + real_bps = real_stat.get("tx_bps") + bias = 10 + if unit == "MBps": + status = ((real_bps / 8 / 1e6 - _expected) * 100 / _expected) < bias + elif unit == "-MBps": + status = real_bps / 8 / 1e6 < _expected + elif unit in ["Gbps", "rGbps"]: + status = abs(((real_bps / 1e9 - _expected) * 100 / _expected)) < bias + msg = ( + f"{pformat(traffic_task)}" + " not get expected throughput value, real is: " + f"{pformat(pmd_stat)}" + ) + self.verify(status, msg) + + def check_queue_pkts_ratio(self, expected, results): + """ + check queue ratio + """ + for result in results: + queue_group0 = result[-1][:4] + queue_group1 = result[-1][4:] + queue_group2 = [] + if len(expected) == 3: + queue_group1 = result[-1][4:8] + queue_group2 = result[-1][8:] + queue_stats = [queue_group0, queue_group1, queue_group2] + for id, ex in enumerate(expected): + total_pkts = sum(queue_stats[id]) + total_ratio = sum(ex) + if not ex: + self.verify(not total_pkts, "Queue group 1 has not TX throughput") + return + ratio = [] + for idx, queue_stat in enumerate(queue_stats[id]): + percentage = queue_stat / total_pkts * 100 + ratio.append(percentage) + bias = 10 + for idx, percentage in enumerate(ex): + percentage = percentage / total_ratio * 100 + _bias = abs(ratio[idx] - percentage) / percentage * 100 + self.logger.info( + "ratio and percentage:{}".format((ratio[idx], percentage)) + ) + if _bias < bias: + continue + else: + msg = "can not get expected queue ratio" + self.verify(False, msg) + + def check_queue_group_throughput(self, expected, results): + """ + check queue group ratio + """ + for result in results: + queue_group0 = result[-1][:4] + queue_group1 = result[-1][4:] + queue_group2 = [] + if len(expected) == 3: + queue_group1 = result[-1][4:8] + queue_group2 = result[-1][8:] + queue_stats = [queue_group0, queue_group1, queue_group2] + total_pkts = sum(queue_stats) + total_ratio = sum(expected) + ratio = [] + for idx, queue_stat in enumerate(queue_stats): + percentage = queue_stat / total_pkts * 100 + ratio.append(percentage) + bias = 10 + for idx, percentage in enumerate(expected): + percentage = percentage / total_ratio * 100 + _bias = abs(ratio[idx] - percentage) / percentage * 100 + self.logger.info( + "ratio and percentage:{}".format((ratio[idx], percentage)) + ) + if _bias < bias: + continue + else: + msg = "can not get expected queue ratio" + self.verify(False, msg) + + def test_perf_queuegroup_RR_queue_WFQ_RR_nolimit(self): + + self.launch_testpmd(param="--rxq=8 --txq=8") + cmds = [ + "add port tm node shaper profile 1 1 100000000 0 100000000 0 0 0", + "add port tm nonleaf node 1 1000000 -1 0 1 0 -1 1 0 0", + "add port tm nonleaf node 1 900000 1000000 0 1 1 -1 1 0 0", + "add port tm nonleaf node 1 800000 900000 0 1 2 -1 1 0 0", + "add port tm nonleaf node 1 700000 800000 0 1 3 -1 1 0 0", + "add port tm nonleaf node 1 600000 800000 0 1 3 -1 1 0 0", + "add port tm leaf node 1 0 700000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 1 700000 0 2 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 2 700000 0 3 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 3 700000 0 4 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 4 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 5 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 6 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 7 600000 0 1 4 -1 0 0xffffffff 0 0", + "port tm hierarchy commit 1 no", + "start", + ] + for cmd in cmds: + self.pmd_output.execute_cmd(cmd) + + traffic_tasks = [ + [8.25, "Gbps", 1], + ] + results = self.get_traffic_results() + self.check_traffic_throughput(traffic_tasks, results) + expected = [[1, 2, 3, 4], [1, 1, 1, 1]] + self.check_queue_pkts_ratio(expected, results) + expected = [1, 1] + self.check_queue_group_throughput(expected, results) + + def test_perf_queuegroup_SP_queue_WFQ_RR_nolimit(self): + + self.launch_testpmd(param="--rxq=8 --txq=8") + cmds = [ + "add port tm node shaper profile 1 1 100000000 0 100000000 0 0 0", + "add port tm nonleaf node 1 1000000 -1 0 1 0 -1 1 0 0", + "add port tm nonleaf node 1 900000 1000000 0 1 1 -1 1 0 0", + "add port tm nonleaf node 1 800000 900000 0 1 2 -1 1 0 0", + "add port tm nonleaf node 1 700000 800000 0 1 3 -1 1 0 0", + "add port tm nonleaf node 1 600000 800000 1 1 3 -1 1 0 0", + "add port tm leaf node 1 0 700000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 1 700000 0 2 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 2 700000 0 3 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 3 700000 0 4 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 4 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 5 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 6 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 7 600000 0 1 4 -1 0 0xffffffff 0 0", + "port tm hierarchy commit 1 no", + "start", + ] + for cmd in cmds: + self.pmd_output.execute_cmd(cmd) + + traffic_tasks = [ + [8.25, "Gbps", 1], + ] + results = self.get_traffic_results() + self.check_traffic_throughput(traffic_tasks, results) + expected = [[1, 2, 3, 4], []] + self.check_queue_pkts_ratio(expected, results) + + def test_perf_queuegroup_RR_queue_WFQ_RR(self): + + self.launch_testpmd(param="--rxq=8 --txq=8") + cmds = [ + "add port tm node shaper profile 1 1 300000000 0 300000000 0 0 0", + "add port tm nonleaf node 1 1000000 -1 0 1 0 -1 1 0 0", + "add port tm nonleaf node 1 900000 1000000 0 1 1 -1 1 0 0", + "add port tm nonleaf node 1 800000 900000 0 1 2 -1 1 0 0", + "add port tm nonleaf node 1 700000 800000 0 1 3 -1 1 0 0", + "add port tm nonleaf node 1 600000 800000 0 1 3 1 1 0 0", + "add port tm leaf node 1 0 700000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 1 700000 0 2 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 2 700000 0 3 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 3 700000 0 4 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 4 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 5 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 6 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 7 600000 0 1 4 -1 0 0xffffffff 0 0", + "port tm hierarchy commit 1 no", + "start", + ] + for cmd in cmds: + self.pmd_output.execute_cmd(cmd) + + traffic_tasks = [ + [8.25, "Gbps", 1], + ] + results = self.get_traffic_results() + self.check_traffic_throughput(traffic_tasks, results) + expected = [[1, 2, 3, 4], [8, 8, 80, 16, 300, 400, 1, 10]] + self.check_queue_pkts_ratio(expected, results) + + def test_perf_queuegroup_SP_queue_WFQ_SP(self): + + self.launch_testpmd(param="--rxq=12 --txq=12") + cmds = [ + "add port tm node shaper profile 1 1 300 0 300000000 0 0 0", + "add port tm node shaper profile 1 2 300 0 100000000 0 0 0", + "add port tm node shaper profile 1 3 300 0 10000000 0 0 0", + "add port tm node shaper profile 1 4 300 0 20000000 0 0 0", + "add port tm node shaper profile 1 5 200 0 400000000 0 0 0", + "add port tm nonleaf node 1 1000000 -1 0 1 0 -1 1 0 0", + "add port tm nonleaf node 1 900000 1000000 0 1 1 -1 1 0 0", + "add port tm nonleaf node 1 800000 900000 0 1 2 -1 1 0 0", + "add port tm nonleaf node 1 700000 800000 0 1 3 1 1 0 0", + "add port tm nonleaf node 1 600000 800000 7 1 3 -1 1 0 0", + "add port tm leaf node 1 0 700000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 1 700000 0 2 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 2 700000 0 3 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 3 700000 0 4 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 4 600000 0 1 4 3 0 0xffffffff 0 0", + "add port tm leaf node 1 5 600000 2 1 4 3 0 0xffffffff 0 0", + "add port tm leaf node 1 6 600000 1 1 4 2 0 0xffffffff 0 0", + "add port tm leaf node 1 7 600000 2 1 4 4 0 0xffffffff 0 0", + "add port tm leaf node 1 8 600000 3 1 4 1 0 0xffffffff 0 0", + "add port tm leaf node 1 9 600000 3 1 4 5 0 0xffffffff 0 0", + "add port tm leaf node 1 10 600000 5 1 4 3 0 0xffffffff 0 0", + "add port tm leaf node 1 11 600000 7 1 4 3 0 0xffffffff 0 0", + "port tm hierarchy commit 1 no", + "start", + ] + for cmd in cmds: + self.pmd_output.execute_cmd(cmd) + + traffic_tasks = [ + [8.25, "Gbps", 1], + ] + results = self.get_traffic_results() + self.check_traffic_throughput(traffic_tasks, results) + expected = [[1, 2, 3, 4], [8, 8, 80, 16, 240, 240, 1, 1]] + self.check_queue_pkts_ratio(expected, results) + expected = [2, 1] + self.check_queue_group_throughput(expected, results) + + def test_perf_queuegroup_RR_queue_RR_SP_WFQ(self): + + self.launch_testpmd(param="--rxq=16 --txq=16") + cmds = [ + "add port tm node shaper profile 1 1 300 0 300000000 0 0 0", + "add port tm node shaper profile 1 2 100 0 100000000 0 0 0", + "add port tm nonleaf node 1 1000000 -1 0 1 0 -1 1 0 0", + "add port tm nonleaf node 1 900000 1000000 0 1 1 -1 1 0 0", + "add port tm nonleaf node 1 800000 900000 0 1 2 -1 1 0 0", + "add port tm nonleaf node 1 700000 800000 0 1 3 -1 1 0 0", + "add port tm nonleaf node 1 600000 800000 0 1 3 -1 1 0 0", + "add port tm nonleaf node 1 500000 800000 0 1 3 -1 1 0 0", + "add port tm leaf node 1 0 700000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 1 700000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 2 700000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 3 700000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 4 600000 0 1 4 2 0 0xffffffff 0 0", + "add port tm leaf node 1 5 600000 4 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 6 600000 1 1 4 1 0 0xffffffff 0 0", + "add port tm leaf node 1 7 600000 7 1 4 2 0 0xffffffff 0 0", + "add port tm leaf node 1 8 500000 0 4 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 9 500000 0 2 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 10 500000 0 2 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 11 500000 0 100 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 12 500000 0 3 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 13 500000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 14 500000 0 5 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 15 500000 0 7 4 -1 0 0xffffffff 0 0", + "port tm hierarchy commit 1 no", + "start", + ] + for cmd in cmds: + self.pmd_output.execute_cmd(cmd) + + traffic_tasks = [ + [8.25, "Gbps", 1], + ] + results = self.get_traffic_results() + self.check_traffic_throughput(traffic_tasks, results) + expected = [[1, 1, 1, 1], [8, 1, 20, 1], [4, 2, 2, 100, 3, 1, 5, 7]] + self.check_queue_pkts_ratio(expected, results) + expected = [1, 1, 1] + self.check_queue_group_throughput(expected, results) + + def test_perf_queuegroup_SP_queue_RR_SP_WFQ(self): + + self.launch_testpmd(param="--rxq=16 --txq=16") + cmds = [ + "add port tm node shaper profile 1 1 300 0 300000000 0 0 0", + "add port tm node shaper profile 1 2 100 0 100000000 0 0 0", + "add port tm nonleaf node 1 1000000 -1 0 1 0 -1 1 0 0", + "add port tm nonleaf node 1 900000 1000000 0 1 1 -1 1 0 0", + "add port tm nonleaf node 1 800000 900000 0 1 2 -1 1 0 0", + "add port tm nonleaf node 1 700000 800000 0 1 3 2 1 0 0", + "add port tm nonleaf node 1 600000 800000 1 1 3 2 1 0 0", + "add port tm nonleaf node 1 500000 800000 2 1 3 1 1 0 0", + "add port tm leaf node 1 0 700000 0 1 4 1 0 0xffffffff 0 0", + "add port tm leaf node 1 1 700000 0 1 4 1 0 0xffffffff 0 0", + "add port tm leaf node 1 2 700000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 3 700000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 4 600000 0 1 4 1 0 0xffffffff 0 0", + "add port tm leaf node 1 5 600000 4 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 6 600000 1 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 7 600000 7 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 8 500000 0 4 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 9 500000 0 2 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 10 500000 0 2 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 11 500000 0 100 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 12 500000 0 3 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 13 500000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 14 500000 0 5 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 15 500000 0 7 4 -1 0 0xffffffff 0 0", + "port tm hierarchy commit 1 no", + "start", + ] + for cmd in cmds: + self.pmd_output.execute_cmd(cmd) + + traffic_tasks = [ + [4, "Gbps", 1], + ] + results = self.get_traffic_results() + self.check_traffic_throughput(traffic_tasks, results) + expected = [[1, 1, 1, 1], [800, 1, 1, 1], [4, 2, 2, 100, 3, 1, 5, 7]] + self.check_queue_pkts_ratio(expected, results) + expected = [1, 1, 3] + self.check_queue_group_throughput(expected, results) + + def test_perf_queuegroup_RR_queue_WFQ_WFQ(self): + + self.launch_testpmd(param="--rxq=8 --txq=8") + cmds = [ + "add port tm node shaper profile 1 1 10000000 0 10000000 0 0 0", + "add port tm node shaper profile 1 2 20000000 0 20000000 0 0 0", + "add port tm node shaper profile 1 3 30000000 0 30000000 0 0 0", + "add port tm node shaper profile 1 4 40000000 0 40000000 0 0 0", + "add port tm nonleaf node 1 1000000 -1 0 1 0 -1 1 0 0", + "add port tm nonleaf node 1 900000 1000000 0 1 1 -1 1 0 0", + "add port tm nonleaf node 1 800000 900000 0 1 2 -1 1 0 0", + "add port tm nonleaf node 1 700000 800000 0 1 3 -1 1 0 0", + "add port tm nonleaf node 1 600000 800000 0 1 3 -1 1 0 0", + "add port tm leaf node 1 0 700000 0 1 4 1 0 0xffffffff 0 0", + "add port tm leaf node 1 1 700000 0 2 4 1 0 0xffffffff 0 0", + "add port tm leaf node 1 2 700000 0 3 4 4 0 0xffffffff 0 0", + "add port tm leaf node 1 3 700000 0 4 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 4 600000 0 1 4 4 0 0xffffffff 0 0", + "add port tm leaf node 1 5 600000 0 2 4 3 0 0xffffffff 0 0", + "add port tm leaf node 1 6 600000 0 3 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 7 600000 0 4 4 -1 0 0xffffffff 0 0", + "port tm hierarchy commit 1 no", + "start", + ] + for cmd in cmds: + self.pmd_output.execute_cmd(cmd) + + traffic_tasks = [ + [8.25, "Gbps", 1], + ] + results = self.get_traffic_results() + self.check_traffic_throughput(traffic_tasks, results) + expected = [[4, 3, 170, 220], [1, 1, 1, 300]] + self.check_queue_pkts_ratio(expected, results) + expected = [1, 1] + self.check_queue_group_throughput(expected, results) + + def test_perf_negative_case(self): + + self.launch_testpmd(param="--rxq=16 --txq=16") + cmd1 = [ + "add port tm node shaper profile 1 1 100000000 0 100000000 0 0 0", + "add port tm nonleaf node 1 1000000 -1 0 1 0 -1 1 0 0", + "add port tm nonleaf node 1 900000 1000000 0 1 1 -1 1 0 0", + "add port tm nonleaf node 1 800000 900000 0 1 2 -1 1 0 0", + "add port tm nonleaf node 1 700000 800000 0 1 3 -1 1 0 0", + "add port tm nonleaf node 1 600000 800000 0 2 3 -1 1 0 0", + ] + output = "" + for cmd in cmd1: + output += self.pmd_output.execute_cmd(cmd) + check_msg = "ice_tm_node_add(): weight != 1 not supported in level 3" + self.verify( + check_msg in output, "Configure invalid parameters, report expected errors." + ) + cmd2 = [ + "port stop 1", + "del port tm node 1 600000", + "add port tm nonleaf node 1 600000 800000 0 1 3 -1 1 0 0", + "port start 1", + "add port tm leaf node 1 0 700000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 1 700000 0 2 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 2 700000 0 3 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 3 700000 0 201 4 -1 0 0xffffffff 0 0", + ] + output = "" + for cmd in cmd2: + output += self.pmd_output.execute_cmd(cmd) + check_msg = "node weight: weight must be between 1 and 200 (error 21)" + self.verify( + check_msg in output, "Configure invalid parameters, report expected errors." + ) + cmd3 = [ + "add port tm leaf node 1 3 700000 0 200 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 4 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 5 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 6 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 7 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 8 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 9 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 10 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 11 600000 8 1 4 -1 0 0xffffffff 0 0", + ] + output = "" + for cmd in cmd3: + output += self.pmd_output.execute_cmd(cmd) + check_msg = "node priority: priority should be less than 8 (error 20)" + self.verify( + check_msg in output, "Configure invalid parameters, report expected errors." + ) + cmd4 = [ + "add port tm leaf node 1 11 600000 7 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 12 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 13 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 14 600000 0 1 4 -1 0 0xffffffff 0 0", + "add port tm leaf node 1 15 600000 0 1 4 -1 0 0xffffffff 0 0", + "port tm hierarchy commit 1 no", + ] + output = "" + for cmd in cmd4: + output += self.pmd_output.execute_cmd(cmd) + check_msg = "ice_move_recfg_lan_txq(): move lan queue 12 failed\r\nice_hierarchy_commit(): move queue 12 failed\r\ncause unspecified: (no stated reason) (error 1)" + self.verify( + check_msg in output, "Configure invalid parameters, report expected errors." + ) + + def tear_down(self): + """ + Run after each test case. + """ + self.close_testpmd() + self.dut.kill_all() + + def tear_down_all(self): + """ + Run after each test suite. + """ + self.dut.kill_all()