From patchwork Mon Nov 28 01:54:15 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jin Ling X-Patchwork-Id: 120170 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id CF0C7A0093; Mon, 28 Nov 2022 02:54:34 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id CA18742BAC; Mon, 28 Nov 2022 02:54:34 +0100 (CET) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id 460E140156 for ; Mon, 28 Nov 2022 02:54:33 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1669600473; x=1701136473; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=bwH9YoSvS9R6xuGUzsXfY2Fw1/9v90B554Mg3iJ46nM=; b=SOt/CaJjcBG29OdFWgCJQBFCF/NB+deF+gBFmqpSnBJnMQLqZks7j3Rj SQGcxKnrurf3l+XY6/FunMSOfxDMRHV6pfMI8Uloy+3fStDUl7V0+utM+ 1Kg59wSwNuweNmZTe6F7+ucp3KsIu77y2Yt8PoyX7htuU6DXVV+R9aqwm j4I8kVpriuaWWG3eJVh/DW/Yjf3A2Co1qV0qalKDJBq0FqdECuCKV/Hz6 fAXQG+F3K/EbPiLLo2BK+wpTsyaR9Gq7pTs+1WVFVuY/Gfa6tXHzlD37K o5b0BtPI5VGap1NRe9MuWIxI/YhJdZWFWYuWd/FWo3VYWy8dDuUZbz2wz Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10544"; a="302304095" X-IronPort-AV: E=Sophos;i="5.96,199,1665471600"; d="scan'208";a="302304095" Received: from orsmga005.jf.intel.com ([10.7.209.41]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 27 Nov 2022 17:54:32 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10544"; a="817652972" X-IronPort-AV: E=Sophos;i="5.96,199,1665471600"; d="scan'208";a="817652972" Received: from dpdk-lingjin.sh.intel.com ([10.67.115.8]) by orsmga005.jf.intel.com with ESMTP; 27 Nov 2022 17:54:30 -0800 From: jinling To: dts@dpdk.org Cc: lijuan.tu@intel.com, jinling Subject: [dts][PATCH V1 2/3] tests/ice_dcf_disable_acl_filter: add new test suite Date: Mon, 28 Nov 2022 09:54:15 +0800 Message-Id: <20221128015416.4018440-3-jin.ling@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20221128015416.4018440-1-jin.ling@intel.com> References: <20221128015416.4018440-1-jin.ling@intel.com> MIME-Version: 1.0 X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org The startup time of ACL engine is too long(6s+), So add a devargs that customers can use it to disable/enable acl angine. add new test suite to test acl engine is disabled. Signed-off-by: jinling --- tests/TestSuite_ice_dcf_disable_acl_filter.py | 267 ++++++++++++++++++ 1 file changed, 267 insertions(+) create mode 100644 tests/TestSuite_ice_dcf_disable_acl_filter.py diff --git a/tests/TestSuite_ice_dcf_disable_acl_filter.py b/tests/TestSuite_ice_dcf_disable_acl_filter.py new file mode 100644 index 00000000..30334a26 --- /dev/null +++ b/tests/TestSuite_ice_dcf_disable_acl_filter.py @@ -0,0 +1,267 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2020 Intel Corporation +# + + +import re +import time + +from framework.packet import Packet +from framework.pmd_output import PmdOutput +from framework.test_case import TestCase, check_supported_nic +from framework.utils import BLUE, GREEN, RED + +rule_switch_unsupported = { + "message": "ice_flow_create(): Failed to create flow", + "ipv4": [ + "flow create 0 ingress pattern eth src spec 00:11:22:33:44:55 src mask 00:ff:ff:ff:ff:ff / ipv4 / end actions drop / end", + "flow create 0 ingress pattern eth src spec 00:11:22:33:44:55 src mask ff:ff:ff:ff:ff:00 dst spec 33:00:00:00:00:02 dst mask ff:ff:ff:ff:ff:fe / ipv4 src spec 192.168.0.1 src mask 255.255.255.0 dst spec 192.168.0.2 dst mask 255.255.0.255 / end actions drop / end", + ], + "ipv4_tcp": [ + "flow create 0 ingress pattern eth src spec 00:11:22:33:44:55 src mask ff:ff:ff:ff:ff:fe / ipv4 / tcp / end actions drop / end", + "flow create 0 ingress pattern eth src spec 00:11:22:33:44:55 src mask ff:ff:ff:ff:ff:00 dst spec 00:01:23:45:67:89 dst mask ff:ff:ff:ff:00:ff / ipv4 src spec 192.168.0.1 src mask 255.255.255.0 dst spec 192.168.0.2 dst mask 255.255.0.255 / tcp src spec 8010 src mask 65520 dst spec 8017 dst mask 65520 / end actions drop / end", + ], + "ipvv4_udp": [ + "flow create 0 ingress pattern eth src spec 00:11:22:33:44:55 src mask ff:ff:ff:ff:ff:fe / ipv4 / udp / end actions drop / end", + "flow create 0 ingress pattern eth src spec 00:11:22:33:44:55 src mask ff:ff:ff:ff:ff:00 dst spec 00:01:23:45:67:89 dst mask ff:ff:ff:ff:00:ff / ipv4 src spec 192.168.0.1 src mask 255.255.255.0 dst spec 192.168.0.2 dst mask 255.255.0.255 / udp src spec 8010 src mask 65520 dst spec 8017 dst mask 65520 / end actions drop / end", + ], + "ipv4_sctp": [ + "flow create 0 ingress pattern eth src spec 00:11:22:33:44:55 src mask ff:ff:ff:ff:ff:fe / ipv4 / sctp / end actions drop / end", + "flow create 0 ingress pattern eth dst spec 00:11:22:33:44:55 dst mask ff:ff:ff:ff:ff:00 / ipv4 / sctp / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 src spec 192.168.0.1 src mask 255.255.255.254 / sctp / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 dst spec 192.168.0.2 dst mask 255.255.255.243 / sctp / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 / sctp src spec 8010 src mask 65520 / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 / sctp dst spec 8010 dst mask 65520 / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 src spec 192.168.0.1 src mask 255.255.255.0 dst spec 192.168.0.2 dst mask 255.255.0.255 / sctp src spec 8010 src mask 65520 dst spec 8017 dst mask 65520 / end actions drop / end", + "flow create 0 ingress pattern eth src spec 00:11:22:33:44:55 src mask ff:ff:ff:ff:ff:00 dst spec 00:01:23:45:67:89 dst mask ff:ff:ff:ff:00:ff / ipv4 src spec 192.168.0.1 src mask 255.255.255.0 dst spec 192.168.0.2 dst mask 255.255.0.255 / sctp src spec 8010 src mask 65520 dst spec 8017 dst mask 65520 / end actions drop / end", + ], +} + +rule_switch_supported = { + "message": "Succeeded to create (2) flow", + "ipv4": [ + "flow create 0 ingress pattern eth dst spec 00:11:22:33:44:55 dst mask ff:ff:ff:ff:ff:ff / ipv4 / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 src spec 192.168.0.1 src mask 255.255.255.0 / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 dst spec 192.168.0.2 dst mask 255.255.255.0 / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 src spec 192.168.0.1 src mask 255.255.255.0 dst spec 192.168.0.2 dst mask 255.255.0.255 / end actions drop / end", + ], + "ipv4_tcp": [ + "flow create 0 ingress pattern eth dst spec 00:11:22:33:44:55 dst mask ff:ff:ff:ff:ff:00 / ipv4 / tcp / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 src spec 192.168.0.1 src mask 255.255.255.254 / tcp / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 dst spec 192.168.0.2 dst mask 255.255.255.243 / tcp / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 / tcp src spec 8010 src mask 65520 / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 / tcp dst spec 8010 dst mask 65520 / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 src spec 192.168.0.1 src mask 255.255.255.0 dst spec 192.168.0.2 dst mask 255.255.0.255 / tcp src spec 8010 src mask 65520 dst spec 8017 dst mask 65520 / end actions drop / end", + ], + "ipvv4_udp": [ + "flow create 0 ingress pattern eth dst spec 00:11:22:33:44:55 dst mask ff:ff:ff:ff:ff:00 / ipv4 / udp / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 src spec 192.168.0.1 src mask 255.255.255.254 / udp / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 dst spec 192.168.0.2 dst mask 255.255.255.243 / udp / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 / udp src spec 8010 src mask 65520 / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 / udp dst spec 8010 dst mask 65520 / end actions drop / end", + "flow create 0 ingress pattern eth / ipv4 src spec 192.168.0.1 src mask 255.255.255.0 dst spec 192.168.0.2 dst mask 255.255.0.255 / udp src spec 8010 src mask 65520 dst spec 8017 dst mask 65520 / end actions drop / end", + ], +} + +subcases = { + "case": [rule_switch_supported, rule_switch_unsupported], + "result": [], +} + + +class ICEDCFACLDisableFilterTest(TestCase): + supported_nic = ["ICE_25G-E810C_SFP", "ICE_25G-E810_XXV_SFP", "ICE_100G-E810C_QSFP"] + + @check_supported_nic(supported_nic) + def set_up_all(self): + """ + Run at the start of each test suite. + """ + self.dut_ports = self.dut.get_ports(self.nic) + # Verify that enough ports are available + self.verify(len(self.dut_ports) >= 2, "Insufficient ports") + localPort0 = self.tester.get_local_port(self.dut_ports[0]) + self.tester_iface0 = self.tester.get_interface(localPort0) + self.pf0_intf = self.dut.ports_info[self.dut_ports[0]]["intf"] + self.dut.send_expect("ifconfig %s up" % self.tester_iface0, "# ") + self.pkt = Packet() + self.pmd_output = PmdOutput(self.dut) + self.testpmd_status = "close" + # bind pf to kernel + self.dut.bind_interfaces_linux("ice") + # set vf driver + self.vf_driver = "vfio-pci" + self.dut.send_expect("modprobe vfio-pci", "# ") + self.path = self.dut.apps_name["test-pmd"] + self.setup_1pf_1vf_env() + self.dut.send_expect("ifconfig %s up" % self.tester_iface0, "# ", 15) + + def setup_1pf_1vf_env(self, pf_port=0, driver="default"): + + self.used_dut_port_0 = self.dut_ports[pf_port] + # get PF interface name + out = self.dut.send_expect("ethtool -i %s" % self.pf0_intf, "#") + # generate 1 VF on PF + self.dut.generate_sriov_vfs_by_port(self.used_dut_port_0, 1, driver=driver) + self.sriov_vfs_port_0 = self.dut.ports_info[self.used_dut_port_0]["vfs_port"] + self.vf0_pci = self.sriov_vfs_port_0[0].pci + # set VF0 as trust + self.dut.send_expect("ip link set %s vf 0 trust on" % self.pf0_intf, "#") + # bind drivers + for port in self.sriov_vfs_port_0: + port.bind_driver(self.vf_driver) + time.sleep(5) + + def set_up(self): + """ + Run before each test case. + """ + self.setup_1pf_1vf_env() + + def create_testpmd_command(self, param, acl_status): + """ + Create testpmd command + """ + # Prepare testpmd EAL and parameters + if acl_status != "": + all_eal_param = self.dut.create_eal_parameters( + cores="1S/4C/1T", + ports=[self.vf0_pci], + port_options={self.vf0_pci: "cap=dcf,acl=" + acl_status}, + ) + else: + all_eal_param = self.dut.create_eal_parameters( + cores="1S/4C/1T", + ports=[self.vf0_pci], + port_options={self.vf0_pci: "cap=dcf"}, + ) + command = self.path + all_eal_param + "--log-level='ice,7'" + " -- -i" + param + return command + + def launch_testpmd(self, param="", acl_status="off"): + """ + launch testpmd with the command + """ + time.sleep(5) + command = self.create_testpmd_command(param, acl_status) + out = self.dut.send_expect(command, "testpmd> ", 20) + # self.testpmd_status = "running" + return out + + def create_acl_filter_rule(self, rules, session_name="", check_stats=False): + """ + Create acl filter rules, + set check_stats=False to check all rules can not be created by acl engine successfully + """ + if session_name == "": + session_name = self.dut + + rule_list = {} + # switch_rule = "Succeeded to create (2) flow" + # failed_rule = "ice_flow_create(): Failed to create flow" + + # if isinstance(rules, list): + for item in rules.values(): + if isinstance(item, list): + for rule in item: + out = session_name.send_expect(rule, "testpmd> ") + if rules["message"] in out: + rule_list.update({rule: False}) + else: + rule_list.update({rule: True}) + + if check_stats: + self.verify( + all(list(rule_list.values())), + "some rules created successfully, result %s, rule %s" + % (rule_list, rules), + ) + else: + self.verify( + not any(list(rule_list.values())), + "all rules should create failed, result {}".format(rule_list), + ) + + def test_startup_time(self): + + repeat_time = 5 + start_up_time_acl_off = [] + start_up_time_acl_on = [] + regex = re.compile(".*real (\d+\.\d{2}).*") + # acl = off + command_acl_off = self.create_testpmd_command(param="", acl_status="off") + # acl = on + command_acl_on = self.create_testpmd_command(param="", acl_status="") + # record startup time + for i in range(repeat_time): + out_acl_off = self.dut.send_expect( + "echo quit | time -p ./%s" % (command_acl_off), + "#", + 120, + ) + + out_acl_on = self.dut.send_expect( + "echo quit | time -p ./%s" % (command_acl_on), + "#", + 120, + ) + + time_acl_on = regex.findall(out_acl_on)[0] + time_acl_off = regex.findall(out_acl_off)[0] + if time_acl_on != "" and time_acl_off != "": + start_up_time_acl_off.append(eval(time_acl_off)) + start_up_time_acl_on.append(eval(time_acl_on)) + # get the average + avg_start_up_time_acl_on = sum(start_up_time_acl_on) / repeat_time + avg_start_up_time_acl_off = sum(start_up_time_acl_off) / repeat_time + self.verify( + avg_start_up_time_acl_on >= avg_start_up_time_acl_off + 6, + "disable acl to reduce startup time failed!!!", + ) + self.testpmd_status = "close" + + def test_disable_acl(self): + + launch_testpmd = True + if launch_testpmd: + # launch testpmd + self.launch_testpmd(acl_status="off") + + self.dut.send_expect("flow flush 0", "testpmd> ", 120) + + for subcase in subcases["case"]: + # test subcase 01 + try: + self.create_acl_filter_rule(subcase) + + except Exception as e: + print(RED(e)) + subcases["result"].append(False) + self.verify(all(subcases["result"]), "test disable failed.") + + self.testpmd_status = "open" + + def quit_testpmd(self): + """ + quit testpmd + """ + if self.testpmd_status != "close": + # destroy all flow rules on DCF + self.dut.send_expect("flow flush 0", "testpmd> ", 15) + self.dut.send_expect("clear port stats all", "testpmd> ", 15) + self.dut.send_expect("quit", "#", 30) + # kill all DPDK application + self.dut.kill_all() + # self.testpmd_status = "close" + + def tear_down(self): + """ + Run after each test case. + """ + self.quit_testpmd() + + def tear_down_all(self): + """ + Run after each test suite. + """ + self.dut.kill_all()