From patchwork Thu Jan 13 15:07:50 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Huang, ZhiminX" X-Patchwork-Id: 105763 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 328A6A00C4; Thu, 13 Jan 2022 07:38:42 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1D8E742720; Thu, 13 Jan 2022 07:38:42 +0100 (CET) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 141B44271D for ; Thu, 13 Jan 2022 07:38:39 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1642055920; x=1673591920; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=zmCuVZ8psUg2611gQBonvJeOKT1dDbcbrMF0o+Qg+uY=; b=En5Ur33LgC9c/O42o61pP6agUZNGHUj6yTR9RmXjP+4c+dsqRoEJMgEg 3Lt2KY0RDr7gxVeHMf9odcVyaZLEDuDU00s6feyeGNx8WQeBFpfTFXC+X jOpyyuOd8kuF/bbdnfMzAZMR/IrMqvju+selwCgabTFX6C4cY78tTr/Jd U23fau3J9+80PhlpghdQqsYI78sR075fC7lmVQbFL4IO6cmU83O8kXkLs y1Fyr7Yy81lxopX+Hv4z6yjdzIRQsXqjo8onI2dQv4nrTbcILZBHzp/oy hUlmJDYyY66fKCyGPcTyEq/UsDc1V6Vnng0Q79DZXlAVhrrAu8fTiVUtO w==; X-IronPort-AV: E=McAfee;i="6200,9189,10225"; a="241501752" X-IronPort-AV: E=Sophos;i="5.88,284,1635231600"; d="scan'208";a="241501752" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Jan 2022 22:38:39 -0800 X-IronPort-AV: E=Sophos;i="5.88,284,1635231600"; d="scan'208";a="670419129" Received: from unknown (HELO localhost.localdomain) ([10.239.251.93]) by fmsmga001-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Jan 2022 22:38:38 -0800 From: Zhimin Huang To: dts@dpdk.org Cc: Zhimin Huang Subject: [dts][PATCH V2 1/4] tests/cvl_advanced_iavf_rss:add vf rss test case Date: Thu, 13 Jan 2022 23:07:50 +0800 Message-Id: <20220113150753.27031-2-zhiminx.huang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20220113150753.27031-1-zhiminx.huang@intel.com> References: <20220113150753.27031-1-zhiminx.huang@intel.com> X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org add vf rss test case with checksum as inputset. Signed-off-by: Zhimin Huang --- tests/TestSuite_cvl_advanced_iavf_rss.py | 416 ++++++++++++++++++++++- 1 file changed, 408 insertions(+), 8 deletions(-) diff --git a/tests/TestSuite_cvl_advanced_iavf_rss.py b/tests/TestSuite_cvl_advanced_iavf_rss.py index 1848baa2..0e63a242 100755 --- a/tests/TestSuite_cvl_advanced_iavf_rss.py +++ b/tests/TestSuite_cvl_advanced_iavf_rss.py @@ -37,7 +37,7 @@ from framework.packet import Packet from framework.pmd_output import PmdOutput from framework.test_case import TestCase -from .rte_flow_common import RssProcessing +from .rte_flow_common import RssProcessing, FdirProcessing, check_mark vf0_mac = "00:11:22:33:44:55" @@ -505,6 +505,29 @@ mac_ipv4_all = { ], } +mac_ipv4_ipv4_chksum = { + 'sub_casename': 'mac_ipv4_ipv4_chksum', + 'port_id': 0, + 'rule': 'flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-chksum end queues end / end', + 'test': [ + { + 'send_packet': eval(str(mac_ipv4_toeplitz_basic_pkt['ipv4-nonfrag']).replace('src="192.168.0.2"', + 'src="192.168.0.2", chksum=0x1')), + 'action': 'save_hash', + }, + { + 'send_packet': eval(str(mac_ipv4_toeplitz_basic_pkt['ipv4-nonfrag']).replace('src="192.168.0.2"', + 'src="192.168.0.2", chksum=0xffff')), + 'action': 'check_hash_different', + }, + { + 'send_packet': eval(str(mac_ipv4_toeplitz_basic_pkt['ipv4-nonfrag']).replace('dst="192.168.0.1", src="192.168.0.2"', + 'dst="192.168.1.1", src="192.168.1.2", chksum=0x1')), + 'action': 'check_hash_same', + }, + ], +} + # mac ipv4_udp mac_ipv4_udp_l2_src = { 'sub_casename': 'mac_ipv4_udp_l2_src', @@ -902,6 +925,52 @@ mac_ipv4_udp_all = { ], } +mac_ipv4_udp_l4_chksum = { + 'sub_casename': 'mac_ipv4_udp_l4_chksum', + 'port_id': 0, + 'rule': 'flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types l4-chksum end queues end / end', + 'test': [ + { + 'send_packet': eval(str(mac_ipv4_udp_toeplitz_basic_pkt['ipv4-udp']).replace("dport=23", "dport=23,chksum=0xffff")), + 'action': 'save_hash', + }, + { + 'send_packet': eval(str(mac_ipv4_udp_toeplitz_basic_pkt['ipv4-udp']).replace("dport=23", "dport=23,chksum=0xfffe")), + 'action': 'check_hash_different', + }, + { + 'send_packet': eval(str(mac_ipv4_udp_toeplitz_basic_pkt['ipv4-udp']).replace('dst="192.168.0.1", src="192.168.0.2"', + 'dst="192.168.1.1", src="192.168.1.2", chksum=0x3') + .replace('sport=22,dport=23', 'sport=32,dport=33,chksum=0xffff')), + 'action': 'check_hash_same', + }, + ], +} + +mac_ipv4_udp_ipv4_chksum = { + 'sub_casename': 'mac_ipv4_udp_ipv4_chksum', + 'port_id': 0, + 'rule': 'flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-chksum end queues end / end', + 'test': [ + { + 'send_packet': eval(str(mac_ipv4_udp_toeplitz_basic_pkt['ipv4-udp']).replace('src="192.168.0.2"', 'src="192.168.0.2",chksum=0xffff')), + 'action': 'save_hash', + }, + { + 'send_packet': eval(str(mac_ipv4_udp_toeplitz_basic_pkt['ipv4-udp']).replace('src="192.168.0.2"', 'src="192.168.0.2",chksum=0xfffe')), + 'action': 'check_hash_different', + }, + { + 'send_packet': eval(str(mac_ipv4_udp_toeplitz_basic_pkt['ipv4-udp']).replace('dst="192.168.0.1", src="192.168.0.2"', + 'dst="192.168.1.1", src="192.168.1.2", chksum=0xffff') + .replace('sport=22,dport=23', 'sport=32,dport=33,chksum=0xffff')), + 'action': 'check_hash_same', + }, + ], +} + +mac_ipv4_udp_chksum = [mac_ipv4_udp_l4_chksum, mac_ipv4_udp_ipv4_chksum] + # mac ipv4_tcp mac_ipv4_tcp_l2_src = { 'sub_casename': 'mac_ipv4_tcp_l2_src', @@ -1299,6 +1368,11 @@ mac_ipv4_tcp_all = { ], } +mac_ipv4_tcp_chksum = [eval(str(element).replace("mac_ipv4_udp", "mac_ipv4_tcp") + .replace("ipv4 / udp", "ipv4 / tcp") + .replace("/UDP(sport=", "/TCP(sport=")) + for element in mac_ipv4_udp_chksum] + # mac ipv4_sctp mac_ipv4_sctp_l2_src = { 'sub_casename': 'mac_ipv4_sctp_l2_src', @@ -1696,6 +1770,11 @@ mac_ipv4_sctp_all = { ], } +mac_ipv4_sctp_chksum = [eval(str(element).replace("mac_ipv4_udp", "mac_ipv4_sctp") + .replace("/UDP(sport=", "/SCTP(sport=") + .replace("ipv4 / udp", "ipv4 / sctp")) + for element in mac_ipv4_udp_chksum] + # mac_ipv6 mac_ipv6_l2_src = { 'sub_casename': 'mac_ipv6_l2_src', @@ -2436,6 +2515,29 @@ mac_ipv6_udp_all = { # }, ], } + +mac_ipv6_udp_l4_chksum = { + 'sub_casename': 'mac_ipv6_udp_l4_chksum', + 'port_id': 0, + 'rule': 'flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types l4-chksum end queues end / end', + 'test': [ + { + 'send_packet': eval(str(mac_ipv6_toeplitz_basic_pkt['ipv6-udp']).replace("dport=23", "dport=23, chksum=0x1")), + 'action': 'save_hash', + }, + { + 'send_packet': eval(str(mac_ipv6_toeplitz_basic_pkt['ipv6-udp']).replace("dport=23", "dport=23, chksum=0x2")), + 'action': 'check_hash_different', + }, + { + 'send_packet': eval(str(mac_ipv6_toeplitz_basic_pkt['ipv6-udp']).replace("sport=22,dport=23", "sport=22,dport=23,chksum=0x1") + .replace("1800:2929", "1800:3939") + .replace("2020", "3030")), + 'action': 'check_hash_same', + }, + ], +} + # mac_ipv6_tcp mac_ipv6_tcp_l2_src = { 'sub_casename': 'mac_ipv6_tcp_l2_src', @@ -2800,6 +2902,11 @@ mac_ipv6_tcp_all = { # }, ], } + +mac_ipv6_tcp_l4_chksum = eval(str(mac_ipv6_udp_l4_chksum).replace("mac_ipv6_udp", "mac_ipv6_tcp") + .replace("ipv6 / udp", "ipv6 / tcp") + .replace("/UDP(sport=", "/TCP(sport=")) + # mac_ipv6_sctp mac_ipv6_sctp_l2_src = { 'sub_casename': 'mac_ipv6_sctp_l2_src', @@ -3164,41 +3271,47 @@ mac_ipv6_sctp_all = { # }, ], } + +mac_ipv6_sctp_l4_chksum = eval(str(mac_ipv6_udp_l4_chksum).replace("mac_ipv6_udp", "mac_ipv6_sctp") + .replace("/SCTP(sport=", "/TCP(sport=") + .replace("ipv6 / udp", "ipv6 / sctp") + .replace("/UDP(sport=", "/SCTP(sport=")) + # toeplitz related data end -mac_ipv4 = [mac_ipv4_l2_src, mac_ipv4_l2_dst, mac_ipv4_l2src_l2dst, mac_ipv4_l3_src, mac_ipv4_l3_dst, mac_ipv4_all] +mac_ipv4 = [mac_ipv4_l2_src, mac_ipv4_l2_dst, mac_ipv4_l2src_l2dst, mac_ipv4_l3_src, mac_ipv4_l3_dst, mac_ipv4_all, mac_ipv4_ipv4_chksum] mac_ipv4_udp = [mac_ipv4_udp_l2_src, mac_ipv4_udp_l2_dst, mac_ipv4_udp_l2src_l2dst, mac_ipv4_udp_l3_src, mac_ipv4_udp_l3_dst, mac_ipv4_udp_l3src_l4src, mac_ipv4_udp_l3src_l4dst, mac_ipv4_udp_l3dst_l4src, mac_ipv4_udp_l3dst_l4dst, - mac_ipv4_udp_l4_src, mac_ipv4_udp_l4_dst, mac_ipv4_udp_all] + mac_ipv4_udp_l4_src, mac_ipv4_udp_l4_dst, mac_ipv4_udp_all] + mac_ipv4_udp_chksum mac_ipv4_tcp = [mac_ipv4_tcp_l2_src, mac_ipv4_tcp_l2_dst, mac_ipv4_tcp_l2src_l2dst, mac_ipv4_tcp_l3_src, mac_ipv4_tcp_l3_dst, mac_ipv4_tcp_l3src_l4src, mac_ipv4_tcp_l3src_l4dst, mac_ipv4_tcp_l3dst_l4src, mac_ipv4_tcp_l3dst_l4dst, - mac_ipv4_tcp_l4_src, mac_ipv4_tcp_l4_dst, mac_ipv4_tcp_all] + mac_ipv4_tcp_l4_src, mac_ipv4_tcp_l4_dst, mac_ipv4_tcp_all] + mac_ipv4_tcp_chksum mac_ipv4_sctp = [mac_ipv4_sctp_l2_src, mac_ipv4_sctp_l2_dst, mac_ipv4_sctp_l2src_l2dst, mac_ipv4_sctp_l3_src, mac_ipv4_sctp_l3_dst, mac_ipv4_sctp_l3src_l4src, mac_ipv4_sctp_l3src_l4dst, mac_ipv4_sctp_l3dst_l4src, mac_ipv4_sctp_l3dst_l4dst, - mac_ipv4_sctp_l4_src, mac_ipv4_sctp_l4_dst, mac_ipv4_sctp_all] + mac_ipv4_sctp_l4_src, mac_ipv4_sctp_l4_dst, mac_ipv4_sctp_all] + mac_ipv4_sctp_chksum mac_ipv6 = [mac_ipv6_l2_src, mac_ipv6_l2_dst, mac_ipv6_l2src_l2dst, mac_ipv6_l3_src, mac_ipv6_l3_dst, mac_ipv6_all] mac_ipv6_udp = [mac_ipv6_udp_l2_src, mac_ipv6_udp_l2_dst, mac_ipv6_udp_l2src_l2dst, mac_ipv6_udp_l3_src, mac_ipv6_udp_l3_dst, mac_ipv6_udp_l3src_l4src, mac_ipv6_udp_l3src_l4dst, mac_ipv6_udp_l3dst_l4src, mac_ipv6_udp_l3dst_l4dst, - mac_ipv6_udp_l4_src, mac_ipv6_udp_l4_dst, mac_ipv6_udp_all] + mac_ipv6_udp_l4_src, mac_ipv6_udp_l4_dst, mac_ipv6_udp_all, mac_ipv6_udp_l4_chksum] mac_ipv6_tcp = [mac_ipv6_tcp_l2_src, mac_ipv6_tcp_l2_dst, mac_ipv6_tcp_l2src_l2dst, mac_ipv6_tcp_l3_src, mac_ipv6_tcp_l3_dst, mac_ipv6_tcp_l3src_l4src, mac_ipv6_tcp_l3src_l4dst, mac_ipv6_tcp_l3dst_l4src, mac_ipv6_tcp_l3dst_l4dst, - mac_ipv6_tcp_l4_src, mac_ipv6_tcp_l4_dst, mac_ipv6_tcp_all] + mac_ipv6_tcp_l4_src, mac_ipv6_tcp_l4_dst, mac_ipv6_tcp_all, mac_ipv6_tcp_l4_chksum] mac_ipv6_sctp = [mac_ipv6_sctp_l2_src, mac_ipv6_sctp_l2_dst, mac_ipv6_sctp_l2src_l2dst, mac_ipv6_sctp_l3_src, mac_ipv6_sctp_l3_dst, mac_ipv6_sctp_l3src_l4src, mac_ipv6_sctp_l3src_l4dst, mac_ipv6_sctp_l3dst_l4src, mac_ipv6_sctp_l3dst_l4dst, - mac_ipv6_sctp_l4_src, mac_ipv6_sctp_l4_dst, mac_ipv6_sctp_all] + mac_ipv6_sctp_l4_src, mac_ipv6_sctp_l4_dst, mac_ipv6_sctp_all, mac_ipv6_sctp_l4_chksum] # symmetric related data start mac_ipv4_symmetric = { @@ -5212,6 +5325,14 @@ class AdvancedIavfRSSTest(TestCase): out = self.pmd_output.execute_cmd(i, timeout=1) self.verify('iavf_flow_create(): Failed to create flow' in out, "rule %s create successfully" % i) + rules_chksum = [ + 'flow create 0 ingress pattern eth / ipv4 / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv6 / end actions rss types ipv6-chksum end queues end / end' + ] + for i in rules_chksum: + out = self.pmd_output.execute_cmd(i) + self.verify('Invalid argument' in out or 'Bad arguments' in out, "negative rules not support to create") + def test_multirules(self): # Subcase 1: two rules with same pattern but different hash input set, not hit default profile self.logger.info('===================Test sub case: multirules subcase 1 ================') @@ -5450,6 +5571,285 @@ class AdvancedIavfRSSTest(TestCase): def test_symmetric_mac_ipv6_gre_ipv6_udp(self): self.rssprocess.handle_rss_distribute_cases(cases_info=mac_ipv6_gre_ipv6_udp_symmetric) + def test_checksum_for_different_payload_length(self): + self.rssprocess.error_msgs = [] + self.pmd_output.quit() + self.pmd_output.start_testpmd(cores="1S/4C/1T", param="--rxq={0} --txq={0}".format(self.rxq), + ports=[self.sriov_vfs_port[0].pci], socket=self.ports_socket) + self.pmd_output.execute_cmd("set verbose 1") + self.pmd_output.execute_cmd("set fwd rxonly") + self.pmd_output.execute_cmd("start") + pkt_list = [ + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/("X"*64)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/UDP()/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/UDP()/("X"*64)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/TCP()/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/TCP()/("X"*64)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/SCTP()/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/SCTP()/("X"*64)', + ] + rule_list = [ + 'flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types l4-chksum end queues end / end' + ] + test_temp = { + 'send_packet': '', + 'action': '', + } + pre_test = [] + for i in range(len(pkt_list)): + if i == 0: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'save_hash'")) + else: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'check_hash_same'")) + pre_test.append(tests) + self.rssprocess.handle_tests(pre_test) + test_5_tuple = [] + rules = self.rssprocess.create_rule(rule_list[0:3]) + self.rssprocess.check_rule(rule_list=rules) + for i in range(len(pkt_list)): + if i % 2 == 0: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'save_hash'")) + else: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'check_hash_same'")) + test_5_tuple.append(tests) + self.rssprocess.handle_tests(test_5_tuple) + test_l4_chksum = [] + rules = self.rssprocess.create_rule(rule_list[3:]) + self.rssprocess.check_rule(rule_list=rules) + for i in range(2, len(pkt_list)): + if i % 2 == 0: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'save_hash'")) + else: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'check_hash_different'")) + test_l4_chksum.append(tests) + self.rssprocess.handle_tests(test_l4_chksum) + test_ipv4_chksum = [] + ipv4_chksum_rule = eval(str(rule_list).replace("l4-chksum", "ipv4-chksum")) + rules = self.rssprocess.create_rule(ipv4_chksum_rule[3:] + ["flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-chksum end queues end / end"]) + self.rssprocess.check_rule(rule_list=rules) + for i in range(len(pkt_list)): + if i % 2 == 0: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'save_hash'")) + else: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'check_hash_different'")) + test_ipv4_chksum.append(tests) + self.rssprocess.handle_tests(test_ipv4_chksum) + self.verify(not self.rssprocess.error_msgs, 'some subcases failed') + + def validate_packet_checksum(self, pkts, expect_pkts): + expect_chksum = dict() + checksum_pattern = re.compile("chksum.*=.*(0x[0-9a-z]+)") + self.tester.send_expect("scapy", ">>> ") + sniff_src = self.dut.get_mac_address(self.dut_ports[0]) + for pkt in expect_pkts: + self.tester.send_expect("p = %s" % expect_pkts[pkt], ">>>") + out = self.tester.send_expect("p.show2()", ">>>") + chksums = checksum_pattern.findall(out) + expect_chksum[pkt] = chksums + self.logger.info(expect_chksum) + self.tester.send_expect("exit()", "#") + for pkt in pkts: + inst = self.tester.tcpdump_sniff_packets(intf=self.tester_iface0, count=len(pkts), filters=[{'layer': 'ether', 'config': {'src': vf0_mac}}]) + out = self.rssprocess.send_pkt_get_output(pkts=pkts[pkt]) + rece_pkt = self.tester.load_tcpdump_sniff_packets(inst) + rece_chksum = rece_pkt[0].sprintf("%IP.chksum%;%TCP.chksum%;%UDP.chksum%;%SCTP.chksum%").split(";") + self.logger.info(rece_chksum) + test_chksum = [] + [test_chksum.append(i) for i in rece_chksum if i != '??'] + self.logger.info("expect_chksum:{} test_chksum:{}".format(expect_chksum[pkt], test_chksum)) + self.verify(expect_chksum[pkt] == test_chksum, 'tx checksum is incorrect') + + def test_flow_rule_not_impact_rx_tx_chksum(self): + self.pmd_output.quit() + self.pmd_output.start_testpmd(cores="1S/4C/1T", param="--rxq={0} --txq={0}".format(self.rxq), + ports=[self.sriov_vfs_port[0].pci], socket=self.ports_socket) + self.pmd_output.execute_cmd("set verbose 1") + self.pmd_output.execute_cmd("port stop all") + self.pmd_output.execute_cmd("set fwd csum") + self.pmd_output.execute_cmd("csum set ip hw 0") + self.pmd_output.execute_cmd("csum set udp hw 0") + self.pmd_output.execute_cmd("csum set tcp hw 0") + self.pmd_output.execute_cmd("csum set sctp hw 0") + self.pmd_output.execute_cmd("port start all") + self.pmd_output.execute_cmd("start") + self.tester.check_interfaces_link(self.tester_iface0) + pkt_list = { + 'IP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1", chksum=0xfff3)/("X"*48)', + 'IP/TCP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/TCP(sport=22, chksum=0xfff3)/("X"*48)', + 'IP/UDP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/UDP(sport=22, chksum=0x1)/("X"*48)', + 'IP/SCTP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/SCTP(sport=22, chksum=0x0)/("X"*48)', + 'IPv6/TCP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/TCP(sport=22, chksum=0xe38)/("X"*48)', + 'IPv6/UDP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/UDP(sport=22, chksum=0xe38)/("X"*48)', + 'IPv6/SCTP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/SCTP(sport=22, chksum=0x0)/("X"*48)', + } + expect_pkt = { + 'IP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/("X"*48)', + 'IP/TCP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/TCP(sport=22)/("X"*48)', + 'IP/UDP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/UDP(sport=22)/("X"*48)', + 'IP/SCTP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/SCTP(sport=22)/("X"*48)', + 'IPv6/TCP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/TCP(sport=22)/("X"*48)', + 'IPv6/UDP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/UDP(sport=22)/("X"*48)', + 'IPv6/SCTP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/SCTP(sport=22)/("X"*48)', + } + rule_list = [ + 'flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types l4-chksum end queues end / end', + ] + self.validate_packet_checksum(pkt_list, expect_pkt) + rss_test = { + 'sub_casename': 'rss_test', + 'port_id': 0, + 'rule': rule_list, + 'pre-test': [ + { + 'send_packet': pkt_list['IP'], + 'action': {'save_hash': 'IP'}, + }, + { + 'send_packet': pkt_list['IP/TCP'], + 'action': {'save_hash': 'IP/TCP'}, + }, + { + 'send_packet': pkt_list['IP/UDP'], + 'action': {'save_hash': 'IP/UDP'}, + }, + { + 'send_packet': pkt_list['IP/SCTP'], + 'action': {'save_hash': 'IP/SCTP'}, + }, + { + 'send_packet': pkt_list['IPv6/TCP'], + 'action': {'save_hash': 'IPv6/TCP'}, + }, + { + 'send_packet': pkt_list['IPv6/UDP'], + 'action': {'save_hash': 'IPv6/UDP'}, + }, + { + 'send_packet': pkt_list['IPv6/SCTP'], + 'action': {'save_hash': 'IPv6/SCTP'}, + }, + ], + 'test': [ + { + 'send_packet': pkt_list['IP'], + 'action': {'check_hash_different': 'IP'}, + }, + { + 'send_packet': pkt_list['IP/TCP'], + 'action': {'check_hash_different': 'IP/TCP'}, + }, + { + 'send_packet': pkt_list['IP/UDP'], + 'action': {'check_hash_different': 'IP/UDP'}, + }, + { + 'send_packet': pkt_list['IP/SCTP'], + 'action': {'check_hash_different': 'IP/SCTP'}, + }, + { + 'send_packet': pkt_list['IPv6/TCP'], + 'action': {'check_hash_different': 'IPv6/TCP'}, + }, + { + 'send_packet': pkt_list['IPv6/UDP'], + 'action': {'check_hash_different': 'IPv6/UDP'}, + }, + { + 'send_packet': pkt_list['IPv6/SCTP'], + 'action': {'check_hash_different': 'IPv6/SCTP'}, + }, + ], + } + self.rssprocess.handle_rss_distribute_cases(rss_test) + self.validate_packet_checksum(pkt_list, expect_pkt) + + def test_combined_case_with_fdir_queue_group(self): + fdirprocess = FdirProcessing(self, self.pmd_output, [self.tester_iface0, self.tester_iface1], self.rxq) + hash_and_queue_list = [] + queue_group = re.compile("end actions rss queues (\d+)\s(\d+)") + self.pmd_output.quit() + self.pmd_output.start_testpmd(cores="1S/4C/1T", param="--rxq={0} --txq={0}".format(self.rxq), + ports=[self.sriov_vfs_port[0].pci], socket=self.ports_socket) + self.pmd_output.execute_cmd("set verbose 1") + self.pmd_output.execute_cmd("set fwd rxonly") + self.pmd_output.execute_cmd("start") + pkt_list = [ + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1", chksum=0xfff3)/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/TCP(sport=22, chksum=0xfff3)/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/UDP(sport=22, chksum=0x1)/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/SCTP(sport=22, chksum=0x1)/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/TCP(sport=22, chksum=0xe38)/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/UDP(sport=22, chksum=0xe38)/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6(src="ABAB:910A:2222:5498:8475:1111:3900:1010")/SCTP(sport=22, chksum=0xf)/("X"*48)', + ] + rss_rule_list = [ + 'flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types l4-chksum end queues end / end', + ] + fdir_rule_list = [ + 'flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss queues 4 5 end / mark / end', + 'flow create 0 ingress pattern eth / ipv4 / udp / end actions rss queues 6 7 end / mark / end', + 'flow create 0 ingress pattern eth / ipv4 src is 192.168.0.1 / sctp / end actions rss queues 8 9 end / mark / end', + 'flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss queues 10 11 end / mark / end', + 'flow create 0 ingress pattern eth / ipv6 / udp / end actions rss queues 12 13 end / mark / end', + 'flow create 0 ingress pattern eth / ipv6 src is ABAB:910A:2222:5498:8475:1111:3900:1010 / sctp / end actions rss queues 14 15 end / mark / end', + ] + fdirprocess.create_rule(fdir_rule_list) + fdir_rule_list.insert(0, "") + for i in range(len(pkt_list)): + out = fdirprocess.send_pkt_get_output(pkt_list[i]) + hash_and_queue_tuple = self.rssprocess.get_hash_and_queues(out) + if i == 0: + check_mark(out, pkt_num=1, check_param={"port_id": 0, "rss": True, 'rxq': self.rxq}) + else: + queue_list = list(map(int, queue_group.search(fdir_rule_list[i]).groups())) + check_mark(out, pkt_num=1, check_param={"port_id": 0, 'rxq': self.rxq, "queue": queue_list, "mark_id": 0}) + hash_and_queue_list.append(hash_and_queue_tuple) + self.rssprocess.create_rule(rss_rule_list) + for i in range(len(pkt_list)): + out = fdirprocess.send_pkt_get_output(pkt_list[i]) + hashes, queues = self.rssprocess.get_hash_and_queues(out) + if i == 0: + check_mark(out, pkt_num=1, check_param={"port_id": 0, "rss": True, 'rxq': self.rxq}) + hashes_0 = hashes + else: + queue_list = list(map(int, queue_group.search(fdir_rule_list[i]).groups())) + check_mark(out, pkt_num=1, check_param={"port_id": 0, 'rxq': self.rxq, "queue": queue_list, "mark_id": 0}) + self.logger.info("pre_hash: {} test_hash: {}".format(hash_and_queue_list[i][0], hashes)) + self.verify(hash_and_queue_list[i][0] != hashes, "expect hash values changed") + self.rssprocess.destroy_rule(rule_id=[0, 1, 2]) + self.rssprocess.create_rule("flow create 0 ingress pattern eth / ipv4 src is 192.168.0.1 / end actions rss queues 0 1 2 3 end / end") + out = fdirprocess.send_pkt_get_output(pkt_list[0]) + hashes, queues = self.rssprocess.get_hash_and_queues(out) + check_mark(out, pkt_num=1, check_param={"port_id": 0, "queue": [0, 1, 2, 3]}) + self.logger.info("test_hash: {} post_hash: {}".format(hashes_0, hashes)) + self.verify(hashes == hashes_0, "expect hash values not changed") + def tear_down(self): # destroy all flow rule on port 0 self.dut.send_command("flow flush 0", timeout=1) From patchwork Thu Jan 13 15:07:51 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Huang, ZhiminX" X-Patchwork-Id: 105764 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4FDD7A00C4; Thu, 13 Jan 2022 07:38:44 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 49EBD4271B; Thu, 13 Jan 2022 07:38:44 +0100 (CET) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id AB6DD40150 for ; Thu, 13 Jan 2022 07:38:41 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1642055921; x=1673591921; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=++mjNOO+TZK/AIkZ228KhnC79ZfAp23ABrr+PtMWUI8=; b=LHQzHNSvwDptzOwWlquRcqCRmmqueXYDV+bTacqVqO7Eeu6xEEJZKEuz 42PBB3XnpmGsPKu20NfjmgrHQ7dAquRS6W9QRxDEjshzvDDXA5mr6jZl3 iAFjuCNLKcdTsJRKDaVwlqJ/WQd9EC41MJIcJdbhVTJOBMYShlu7DFwVa 3K1Et/mbT9Z9h0bZAUGmQ0mOCO34Sl6gWzFF6or3tHZePo+9TujD6zwQL 28BP4XXBYTlGTQ6VRk/vG56JbTHnbGeIIpeL9ZUK+2UWOJEFhY1AfvPKF /VZYhsM47ELDAj4kT9U09/7eXDETsFMCcFRvRhkSgzLCJAHsIkQWyuc3A w==; X-IronPort-AV: E=McAfee;i="6200,9189,10225"; a="241501755" X-IronPort-AV: E=Sophos;i="5.88,284,1635231600"; d="scan'208";a="241501755" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Jan 2022 22:38:41 -0800 X-IronPort-AV: E=Sophos;i="5.88,284,1635231600"; d="scan'208";a="670419135" Received: from unknown (HELO localhost.localdomain) ([10.239.251.93]) by fmsmga001-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Jan 2022 22:38:39 -0800 From: Zhimin Huang To: dts@dpdk.org Cc: Zhimin Huang Subject: [dts][PATCH V2 2/4] tests/cvl_advanced_rss:add pf rss cases Date: Thu, 13 Jan 2022 23:07:51 +0800 Message-Id: <20220113150753.27031-3-zhiminx.huang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20220113150753.27031-1-zhiminx.huang@intel.com> References: <20220113150753.27031-1-zhiminx.huang@intel.com> X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org add pf rss test case with checksum as inputset. Signed-off-by: Zhimin Huang --- tests/TestSuite_cvl_advanced_rss.py | 454 +++++++++++++++++++++++++++- 1 file changed, 446 insertions(+), 8 deletions(-) diff --git a/tests/TestSuite_cvl_advanced_rss.py b/tests/TestSuite_cvl_advanced_rss.py index d84ef395..0ab61f74 100644 --- a/tests/TestSuite_cvl_advanced_rss.py +++ b/tests/TestSuite_cvl_advanced_rss.py @@ -37,7 +37,7 @@ from framework.packet import Packet from framework.pmd_output import PmdOutput from framework.test_case import TestCase -from .rte_flow_common import RssProcessing +from .rte_flow_common import RssProcessing, FdirProcessing, check_mark # toeplitz related data start mac_ipv4_toeplitz_basic_pkt = { @@ -683,6 +683,40 @@ mac_ipv4_all = { ], } +mac_ipv4_ipv4_chksum = { + 'sub_casename': 'mac_ipv4_ipv4_chksum', + 'port_id': 0, + 'rule': 'flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-chksum end queues end / end', + 'test': [ + { + 'send_packet': eval(str(mac_ipv4_toeplitz_basic_pkt['ipv4-nonfrag']).replace('src="192.168.0.2"', + 'src="192.168.0.2", chksum=0x1')), + 'action': 'save_hash', + }, + { + 'send_packet': eval(str(mac_ipv4_toeplitz_basic_pkt['ipv4-nonfrag']).replace('src="192.168.0.2"', + 'src="192.168.0.2", chksum=0xffff')), + 'action': 'check_hash_different', + }, + { + 'send_packet': eval(str(mac_ipv4_toeplitz_basic_pkt['ipv4-nonfrag']).replace('dst="192.168.0.1", src="192.168.0.2"', + 'dst="192.168.1.1", src="192.168.1.2", chksum=0x1')), + 'action': 'check_hash_same', + }, + { + 'send_packet': mac_ipv6_toeplitz_basic_pkt['ipv6-nonfrag'], + 'action': 'check_no_hash', + }, + ], + 'post-test': [ + { + 'send_packet': eval(str(mac_ipv4_toeplitz_basic_pkt['ipv4-nonfrag']).replace('src="192.168.0.2"', + 'src="192.168.0.2", chksum=0x1')), + 'action': 'check_no_hash', + }, + ], +} + #mac ipv4_udp mac_ipv4_udp_l2_src = { 'sub_casename': 'mac_ipv4_udp_l2_src', @@ -1146,6 +1180,72 @@ mac_ipv4_udp_ipv4 = { ], } +mac_ipv4_udp_l4_chksum = { + 'sub_casename': 'mac_ipv4_udp_l4_chksum', + 'port_id': 0, + 'rule': 'flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types l4-chksum end queues end / end', + 'test': [ + { + 'send_packet': eval(str(mac_ipv4_udp_toeplitz_basic_pkt['ipv4-udp']).replace("dport=23", "dport=23,chksum=0xffff")), + 'action': 'save_hash', + }, + { + 'send_packet': eval(str(mac_ipv4_udp_toeplitz_basic_pkt['ipv4-udp']).replace("dport=23", "dport=23,chksum=0xfffe")), + 'action': 'check_hash_different', + }, + { + 'send_packet': eval(str(mac_ipv4_udp_toeplitz_basic_pkt['ipv4-udp']).replace('dst="192.168.0.1", src="192.168.0.2"', + 'dst="192.168.1.1", src="192.168.1.2", chksum=0x3') + .replace('sport=22,dport=23', 'sport=32,dport=33,chksum=0xffff')), + 'action': 'check_hash_same', + }, + { + 'send_packet': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1",dst="192.168.0.2")/SCTP(sport=22, dport=23,chksum=0xffff)/("X"*48)', + 'action': 'check_no_hash', + }, + ], + 'post-test': [ + { + 'send_packet': eval(str(mac_ipv4_udp_toeplitz_basic_pkt['ipv4-udp']).replace("dport=23", "dport=23,chksum=0xffff")), + 'action': 'check_no_hash', + }, + ], +} + +mac_ipv4_udp_ipv4_chksum = { + 'sub_casename': 'mac_ipv4_udp_ipv4_chksum', + 'port_id': 0, + 'rule': 'flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-chksum end queues end / end', + 'test': [ + { + 'send_packet': eval(str(mac_ipv4_udp_toeplitz_basic_pkt['ipv4-udp']).replace('src="192.168.0.2"', 'src="192.168.0.2",chksum=0xffff')), + 'action': 'save_hash', + }, + { + 'send_packet': eval(str(mac_ipv4_udp_toeplitz_basic_pkt['ipv4-udp']).replace('src="192.168.0.2"', 'src="192.168.0.2",chksum=0xfffe')), + 'action': 'check_hash_different', + }, + { + 'send_packet': eval(str(mac_ipv4_udp_toeplitz_basic_pkt['ipv4-udp']).replace('dst="192.168.0.1", src="192.168.0.2"', + 'dst="192.168.1.1", src="192.168.1.2", chksum=0xffff') + .replace('sport=22,dport=23', 'sport=32,dport=33,chksum=0xffff')), + 'action': 'check_hash_same', + }, + { + 'send_packet': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1",dst="192.168.0.2")/SCTP(sport=22, dport=23,chksum=0xffff)/("X"*48)', + 'action': 'check_no_hash', + }, + ], + 'post-test': [ + { + 'send_packet': eval(str(mac_ipv4_udp_toeplitz_basic_pkt['ipv4-udp']).replace('src="192.168.0.2"', 'src="192.168.0.2",chksum=0xffff')), + 'action': 'check_no_hash', + }, + ], +} + +mac_ipv4_udp_chksum = [mac_ipv4_udp_l4_chksum, mac_ipv4_udp_ipv4_chksum] + #mac ipv4_tcp mac_ipv4_tcp_l2_src = { 'sub_casename': 'mac_ipv4_tcp_l2_src', @@ -1609,6 +1709,11 @@ mac_ipv4_tcp_ipv4 = { ], } +mac_ipv4_tcp_chksum = [eval(str(element).replace("mac_ipv4_udp", "mac_ipv4_tcp") + .replace("ipv4 / udp", "ipv4 / tcp") + .replace("/UDP(sport=", "/TCP(sport=")) + for element in mac_ipv4_udp_chksum] + #mac ipv4_sctp mac_ipv4_sctp_l2_src = { 'sub_casename': 'mac_ipv4_sctp_l2_src', @@ -2072,6 +2177,12 @@ mac_ipv4_sctp_ipv4 = { ], } +mac_ipv4_sctp_chksum = [eval(str(element).replace("mac_ipv4_udp", "mac_ipv4_sctp") + .replace("SCTP(sport=", "TCP(sport=") + .replace("ipv4 / udp", "ipv4 / sctp") + .replace("/UDP(sport=", "/SCTP(sport=")) + for element in mac_ipv4_udp_chksum] + #mac_ipv6 mac_ipv6_l2_src = { 'sub_casename': 'mac_ipv6_l2_src', @@ -2997,6 +3108,38 @@ mac_ipv6_udp_ipv6 = { ], } +mac_ipv6_udp_l4_chksum = { + 'sub_casename': 'mac_ipv6_udp_l4_chksum', + 'port_id': 0, + 'rule': 'flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types l4-chksum end queues end / end', + 'test': [ + { + 'send_packet': eval(str(mac_ipv6_toeplitz_basic_pkt['ipv6-udp']).replace("dport=23", "dport=23, chksum=0x1")), + 'action': 'save_hash', + }, + { + 'send_packet': eval(str(mac_ipv6_toeplitz_basic_pkt['ipv6-udp']).replace("dport=23", "dport=23, chksum=0x2")), + 'action': 'check_hash_different', + }, + { + 'send_packet': eval(str(mac_ipv6_toeplitz_basic_pkt['ipv6-udp']).replace("sport=22,dport=23", "sport=22,dport=23,chksum=0x1") + .replace("1800:2929", "1800:3939") + .replace("2020", "3030")), + 'action': 'check_hash_same', + }, + { + 'send_packet': eval(str(mac_ipv6_toeplitz_basic_pkt['ipv6-udp']).replace("/UDP(sport=22,dport=23)", "/SCTP(sport=22,dport=23,chksum=0x1)")), + 'action': 'check_no_hash', + }, + ], + 'post-test': [ + { + 'send_packet': eval(str(mac_ipv6_toeplitz_basic_pkt['ipv6-udp']).replace("dport=23", "dport=23, chksum=0x1")), + 'action': 'check_no_hash', + }, + ], +} + #mac_ipv6_tcp mac_ipv6_tcp_l2_src = { 'sub_casename': 'mac_ipv6_tcp_l2_src', @@ -3444,6 +3587,10 @@ mac_ipv6_tcp_ipv6 = { ], } +mac_ipv6_tcp_l4_chksum = eval(str(mac_ipv6_udp_l4_chksum).replace("mac_ipv6_udp", "mac_ipv6_tcp") + .replace("ipv6 / udp", "ipv6 / tcp") + .replace("/UDP(sport=", "/TCP(sport=")) + #mac_ipv6_sctp mac_ipv6_sctp_l2_src = { 'sub_casename': 'mac_ipv6_sctp_l2_src', @@ -3891,42 +4038,47 @@ mac_ipv6_sctp_ipv6 = { ], } +mac_ipv6_sctp_l4_chksum = eval(str(mac_ipv6_udp_l4_chksum).replace("mac_ipv6_udp", "mac_ipv6_sctp") + .replace("/SCTP(sport=", "/TCP(sport=") + .replace("ipv6 / udp", "ipv6 / sctp") + .replace("/UDP(sport=", "/SCTP(sport=")) + # toeplitz related data end -mac_ipv4_1 = [mac_ipv4_l2_src, mac_ipv4_l2_dst, mac_ipv4_l2src_l2dst] +mac_ipv4_1 = [mac_ipv4_l2_src, mac_ipv4_l2_dst, mac_ipv4_l2src_l2dst, mac_ipv4_ipv4_chksum] mac_ipv4_2 = [mac_ipv4_l3_src, mac_ipv4_l3_dst, mac_ipv4_all] mac_ipv4_udp = [mac_ipv4_udp_l2_src, mac_ipv4_udp_l2_dst, mac_ipv4_udp_l2src_l2dst, mac_ipv4_udp_l3_src, mac_ipv4_udp_l3_dst, mac_ipv4_udp_l3src_l4src, mac_ipv4_udp_l3src_l4dst, mac_ipv4_udp_l3dst_l4src, mac_ipv4_udp_l3dst_l4dst, - mac_ipv4_udp_l4_src, mac_ipv4_udp_l4_dst, mac_ipv4_udp_all, mac_ipv4_udp_ipv4] + mac_ipv4_udp_l4_src, mac_ipv4_udp_l4_dst, mac_ipv4_udp_all, mac_ipv4_udp_ipv4] + mac_ipv4_udp_chksum mac_ipv4_tcp = [mac_ipv4_tcp_l2_src, mac_ipv4_tcp_l2_dst, mac_ipv4_tcp_l2src_l2dst, mac_ipv4_tcp_l3_src, mac_ipv4_tcp_l3_dst, mac_ipv4_tcp_l3src_l4src, mac_ipv4_tcp_l3src_l4dst, mac_ipv4_tcp_l3dst_l4src, mac_ipv4_tcp_l3dst_l4dst, - mac_ipv4_tcp_l4_src, mac_ipv4_tcp_l4_dst, mac_ipv4_tcp_all, mac_ipv4_tcp_ipv4] + mac_ipv4_tcp_l4_src, mac_ipv4_tcp_l4_dst, mac_ipv4_tcp_all, mac_ipv4_tcp_ipv4] + mac_ipv4_tcp_chksum mac_ipv4_sctp = [mac_ipv4_sctp_l2_src, mac_ipv4_sctp_l2_dst, mac_ipv4_sctp_l2src_l2dst, mac_ipv4_sctp_l3_src, mac_ipv4_sctp_l3_dst, mac_ipv4_sctp_l3src_l4src, mac_ipv4_sctp_l3src_l4dst, mac_ipv4_sctp_l3dst_l4src, mac_ipv4_sctp_l3dst_l4dst, - mac_ipv4_sctp_l4_src, mac_ipv4_sctp_l4_dst, mac_ipv4_sctp_all, mac_ipv4_sctp_ipv4] + mac_ipv4_sctp_l4_src, mac_ipv4_sctp_l4_dst, mac_ipv4_sctp_all, mac_ipv4_sctp_ipv4] + mac_ipv4_sctp_chksum mac_ipv6 = [mac_ipv6_l2_src, mac_ipv6_l2_dst, mac_ipv6_l2src_l2dst, mac_ipv6_l3_src, mac_ipv6_l3_dst, mac_ipv6_all] mac_ipv6_udp = [mac_ipv6_udp_l2_src, mac_ipv6_udp_l2_dst, mac_ipv6_udp_l2src_l2dst, mac_ipv6_udp_l3_src, mac_ipv6_udp_l3_dst, mac_ipv6_udp_l3src_l4src, mac_ipv6_udp_l3src_l4dst, mac_ipv6_udp_l3dst_l4src, mac_ipv6_udp_l3dst_l4dst, - mac_ipv6_udp_l4_src, mac_ipv6_udp_l4_dst, mac_ipv6_udp_all, mac_ipv6_udp_ipv6] + mac_ipv6_udp_l4_src, mac_ipv6_udp_l4_dst, mac_ipv6_udp_all, mac_ipv6_udp_ipv6, mac_ipv6_udp_l4_chksum] mac_ipv6_tcp = [mac_ipv6_tcp_l2_src, mac_ipv6_tcp_l2_dst, mac_ipv6_tcp_l2src_l2dst, mac_ipv6_tcp_l3_src, mac_ipv6_tcp_l3_dst, mac_ipv6_tcp_l3src_l4src, mac_ipv6_tcp_l3src_l4dst, mac_ipv6_tcp_l3dst_l4src, mac_ipv6_tcp_l3dst_l4dst, - mac_ipv6_tcp_l4_src, mac_ipv6_tcp_l4_dst, mac_ipv6_tcp_all, mac_ipv6_tcp_ipv6] + mac_ipv6_tcp_l4_src, mac_ipv6_tcp_l4_dst, mac_ipv6_tcp_all, mac_ipv6_tcp_ipv6, mac_ipv6_tcp_l4_chksum] mac_ipv6_sctp = [mac_ipv6_sctp_l2_src, mac_ipv6_sctp_l2_dst, mac_ipv6_sctp_l2src_l2dst, mac_ipv6_sctp_l3_src, mac_ipv6_sctp_l3_dst, mac_ipv6_sctp_l3src_l4src, mac_ipv6_sctp_l3src_l4dst, mac_ipv6_sctp_l3dst_l4src, mac_ipv6_sctp_l3dst_l4dst, - mac_ipv6_sctp_l4_src, mac_ipv6_sctp_l4_dst, mac_ipv6_sctp_all, mac_ipv6_sctp_ipv6] + mac_ipv6_sctp_l4_src, mac_ipv6_sctp_l4_dst, mac_ipv6_sctp_all, mac_ipv6_sctp_ipv6, mac_ipv6_sctp_l4_chksum] # symmetric related data start mac_ipv4_symmetric = { @@ -5061,6 +5213,14 @@ class AdvancedRSSTest(TestCase): for i in rules_val: out = self.pmd_output.execute_cmd(i, timeout=1) self.verify('Invalid argument' in out, "rule %s validate successfully" % i) + + rules_chksum = [ + 'flow create 0 ingress pattern eth / ipv4 / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv6 / end actions rss types ipv6-chksum end queues end / end' + ] + for i in rules_chksum: + out = self.pmd_output.execute_cmd(i) + self.verify('Invalid argument' in out or 'Bad arguments' in out, "negative rules not support to create") def test_multirules(self): self.switch_testpmd(symmetric=True) @@ -5236,6 +5396,284 @@ class AdvancedRSSTest(TestCase): self.rssprocess.destroy_rule(port_id=0, rule_id=rule_id_0) self.verify(not self.rssprocess.error_msgs, 'some subcases failed') + def test_checksum_for_different_payload_length(self): + self.rssprocess.error_msgs = [] + self.pmd_output.quit() + self.pmd_output.start_testpmd(cores="1S/4C/1T", param="--rxq={0} --txq={0}".format(self.rxq), + eal_param=f"-a {self.pci0}", socket=self.ports_socket) + self.pmd_output.execute_cmd("set verbose 1") + self.pmd_output.execute_cmd("set fwd rxonly") + self.pmd_output.execute_cmd("start") + pkt_list = [ + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/("X"*64)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/UDP()/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/UDP()/("X"*64)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/TCP()/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/TCP()/("X"*64)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/SCTP()/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP()/SCTP()/("X"*64)', + ] + rule_list = [ + 'flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types ipv4-udp end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types ipv4-tcp end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types ipv4-sctp end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types l4-chksum end queues end / end' + ] + test_temp = { + 'send_packet': '', + 'action': '', + } + pre_test = [] + for i in range(len(pkt_list)): + if i == 0: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'save_hash'")) + else: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'check_hash_same'")) + pre_test.append(tests) + self.rssprocess.handle_tests(pre_test) + test_5_tuple = [] + rules = self.rssprocess.create_rule(rule_list[0:3]) + self.rssprocess.check_rule(rule_list=rules) + for i in range(len(pkt_list)): + if i % 2 == 0: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'save_hash'")) + else: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'check_hash_same'")) + test_5_tuple.append(tests) + self.rssprocess.handle_tests(test_5_tuple) + test_l4_chksum = [] + rules = self.rssprocess.create_rule(rule_list[3:]) + self.rssprocess.check_rule(rule_list=rules) + for i in range(2, len(pkt_list)): + if i % 2 == 0: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'save_hash'")) + else: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'check_hash_different'")) + test_l4_chksum.append(tests) + self.rssprocess.handle_tests(test_l4_chksum) + test_ipv4_chksum = [] + ipv4_chksum_rule = eval(str(rule_list).replace("l4-chksum", "ipv4-chksum")) + rules = self.rssprocess.create_rule(ipv4_chksum_rule[3:] + ["flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-chksum end queues end / end"]) + self.rssprocess.check_rule(rule_list=rules) + for i in range(len(pkt_list)): + if i % 2 == 0: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'save_hash'")) + else: + tests = eval(str(test_temp).replace("'send_packet': ''", "'send_packet': '{}'".format(pkt_list[i])) + .replace("'action': ''", "'action': 'check_hash_different'")) + test_ipv4_chksum.append(tests) + self.rssprocess.handle_tests(test_ipv4_chksum) + self.verify(not self.rssprocess.error_msgs, 'some subcases failed') + + def validate_packet_checksum(self, pkts, expect_pkts): + expect_chksum = dict() + checksum_pattern = re.compile("chksum.*=.*(0x[0-9a-z]+)") + self.tester.send_expect("scapy", ">>> ") + sniff_src = self.dut.get_mac_address(self.dut_ports[0]) + for pkt in expect_pkts: + self.tester.send_expect("p = %s" % expect_pkts[pkt], ">>>") + out = self.tester.send_expect("p.show2()", ">>>") + chksums = checksum_pattern.findall(out) + expect_chksum[pkt] = chksums + self.logger.info(expect_chksum) + self.tester.send_expect("exit()", "#") + for pkt in pkts: + inst = self.tester.tcpdump_sniff_packets(intf=self.tester_iface0, count=len(pkts), filters=[{'layer': 'ether', 'config': {'src': sniff_src}}]) + out = self.rssprocess.send_pkt_get_output(pkts=pkts[pkt]) + rece_pkt = self.tester.load_tcpdump_sniff_packets(inst) + rece_chksum = rece_pkt[0].sprintf("%IP.chksum%;%TCP.chksum%;%UDP.chksum%;%SCTP.chksum%").split(";") + self.logger.info(rece_chksum) + test_chksum = [] + [test_chksum.append(i) for i in rece_chksum if i != '??'] + self.logger.info("expect_chksum:{} test_chksum:{}".format(expect_chksum[pkt], test_chksum)) + self.verify(expect_chksum[pkt] == test_chksum, 'tx checksum is incorrect') + + def test_flow_rule_not_impact_rx_tx_chksum(self): + self.pmd_output.quit() + self.pmd_output.start_testpmd(cores="1S/4C/1T", param="--rxq={0} --txq={0}".format(self.rxq), + eal_param=f"-a {self.pci0}", socket=self.ports_socket) + self.pmd_output.execute_cmd("set verbose 1") + self.pmd_output.execute_cmd("port stop all") + self.pmd_output.execute_cmd("set fwd csum") + self.pmd_output.execute_cmd("csum set ip hw 0") + self.pmd_output.execute_cmd("csum set udp hw 0") + self.pmd_output.execute_cmd("csum set tcp hw 0") + self.pmd_output.execute_cmd("csum set sctp hw 0") + self.pmd_output.execute_cmd("port start all") + self.pmd_output.execute_cmd("start") + self.tester.check_interfaces_link(self.tester_iface0) + pkt_list = { + 'IP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1", chksum=0xfff3)/("X"*48)', + 'IP/TCP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/TCP(sport=22, chksum=0xfff3)/("X"*48)', + 'IP/UDP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/UDP(sport=22, chksum=0x1)/("X"*48)', + 'IP/SCTP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/SCTP(sport=22, chksum=0x0)/("X"*48)', + 'IPv6/TCP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/TCP(sport=22, chksum=0xe38)/("X"*48)', + 'IPv6/UDP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/UDP(sport=22, chksum=0xe38)/("X"*48)', + 'IPv6/SCTP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/SCTP(sport=22, chksum=0x0)/("X"*48)', + } + expect_pkt = { + 'IP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/("X"*48)', + 'IP/TCP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/TCP(sport=22)/("X"*48)', + 'IP/UDP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/UDP(sport=22)/("X"*48)', + 'IP/SCTP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/SCTP(sport=22)/("X"*48)', + 'IPv6/TCP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/TCP(sport=22)/("X"*48)', + 'IPv6/UDP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/UDP(sport=22)/("X"*48)', + 'IPv6/SCTP': 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/SCTP(sport=22)/("X"*48)', + } + rule_list = [ + 'flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types l4-chksum end queues end / end', + ] + self.validate_packet_checksum(pkt_list, expect_pkt) + rss_test = { + 'sub_casename': 'rss_test', + 'port_id': 0, + 'rule': rule_list, + 'pre-test': [ + { + 'send_packet': pkt_list['IP'], + 'action': {'save_hash': 'IP'}, + }, + { + 'send_packet': pkt_list['IP/TCP'], + 'action': {'save_hash': 'IP/TCP'}, + }, + { + 'send_packet': pkt_list['IP/UDP'], + 'action': {'save_hash': 'IP/UDP'}, + }, + { + 'send_packet': pkt_list['IP/SCTP'], + 'action': {'save_hash': 'IP/SCTP'}, + }, + { + 'send_packet': pkt_list['IPv6/TCP'], + 'action': {'save_hash': 'IPv6/TCP'}, + }, + { + 'send_packet': pkt_list['IPv6/UDP'], + 'action': {'save_hash': 'IPv6/UDP'}, + }, + { + 'send_packet': pkt_list['IPv6/SCTP'], + 'action': {'save_hash': 'IPv6/SCTP'}, + }, + ], + 'test': [ + { + 'send_packet': pkt_list['IP'], + 'action': {'check_hash_different': 'IP'}, + }, + { + 'send_packet': pkt_list['IP/TCP'], + 'action': {'check_hash_different': 'IP/TCP'}, + }, + { + 'send_packet': pkt_list['IP/UDP'], + 'action': {'check_hash_different': 'IP/UDP'}, + }, + { + 'send_packet': pkt_list['IP/SCTP'], + 'action': {'check_hash_different': 'IP/SCTP'}, + }, + { + 'send_packet': pkt_list['IPv6/TCP'], + 'action': {'check_hash_different': 'IPv6/TCP'}, + }, + { + 'send_packet': pkt_list['IPv6/UDP'], + 'action': {'check_hash_different': 'IPv6/UDP'}, + }, + { + 'send_packet': pkt_list['IPv6/SCTP'], + 'action': {'check_hash_different': 'IPv6/SCTP'}, + }, + ], + } + self.rssprocess.handle_rss_distribute_cases(rss_test) + self.validate_packet_checksum(pkt_list, expect_pkt) + + def test_combined_case_with_fdir_queue_group(self): + fdirprocess = FdirProcessing(self, self.pmd_output, [self.tester_iface0, self.tester_iface1], self.rxq) + hash_and_queue_list = [] + queue_group = re.compile("end actions rss queues (\d+)\s(\d+)") + self.pmd_output.quit() + self.pmd_output.start_testpmd(cores="1S/4C/1T", param="--rxq={0} --txq={0}".format(self.rxq), + eal_param=f"-a {self.pci0}", socket=self.ports_socket) + self.pmd_output.execute_cmd("set verbose 1") + self.pmd_output.execute_cmd("set fwd rxonly") + self.pmd_output.execute_cmd("start") + pkt_list = [ + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1", chksum=0xfff3)/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/TCP(sport=22, chksum=0xfff3)/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/UDP(sport=22, chksum=0x1)/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/SCTP(sport=22, chksum=0x1)/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/TCP(sport=22, chksum=0xe38)/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/UDP(sport=22, chksum=0xe38)/("X"*48)', + 'Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6(src="ABAB:910A:2222:5498:8475:1111:3900:1010")/SCTP(sport=22, chksum=0xf)/("X"*48)', + ] + rss_rule_list = [ + 'flow create 0 ingress pattern eth / ipv4 / end actions rss types ipv4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / udp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv4 / sctp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types l4-chksum end queues end / end', + 'flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types l4-chksum end queues end / end', + ] + fdir_rule_list = [ + 'flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 / tcp / end actions rss queues 4 5 end / mark / end', + 'flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 / udp / end actions rss queues 6 7 end / mark / end', + 'flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 / sctp / end actions rss queues 8 9 end / mark / end', + 'flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv6 / tcp / end actions rss queues 10 11 end / mark / end', + 'flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv6 / udp / end actions rss queues 12 13 end / mark / end', + 'flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv6 / sctp / end actions rss queues 14 15 end / mark / end', + ] + fdirprocess.create_rule(fdir_rule_list) + fdir_rule_list.insert(0, "") + for i in range(len(pkt_list)): + out = fdirprocess.send_pkt_get_output(pkt_list[i]) + hash_and_queue_tuple = self.rssprocess.get_hash_and_queues(out) + if i == 0: + check_mark(out, pkt_num=1, check_param={"port_id": 0, "rss": True}) + else: + queue_list = list(map(int, queue_group.search(fdir_rule_list[i]).groups())) + check_mark(out, pkt_num=1, check_param={"port_id": 0, "queue": queue_list, "mark_id": 0}) + hash_and_queue_list.append(hash_and_queue_tuple) + self.rssprocess.create_rule(rss_rule_list) + for i in range(len(pkt_list)): + out = fdirprocess.send_pkt_get_output(pkt_list[i]) + hashes, queues = self.rssprocess.get_hash_and_queues(out) + if i == 0: + check_mark(out, pkt_num=1, check_param={"port_id": 0, "rss": True}) + hashes_0 = hashes + else: + queue_list = list(map(int, queue_group.search(fdir_rule_list[i]).groups())) + check_mark(out, pkt_num=1, check_param={"port_id": 0, "queue": queue_list, "mark_id": 0}) + self.logger.info("pre_hash: {} test_hash: {}".format(hash_and_queue_list[i][0], hashes)) + self.verify(hash_and_queue_list[i][0] != hashes, "expect hash values changed") + self.rssprocess.create_rule("flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 / end actions rss queues 0 1 2 3 end / end") + out = fdirprocess.send_pkt_get_output(pkt_list[0]) + hashes, queues = self.rssprocess.get_hash_and_queues(out) + check_mark(out, pkt_num=1, check_param={"port_id": 0, "queue": [1, 2, 3, 4]}) + self.logger.info("test_hash: {} post_hash: {}".format(hashes_0, hashes)) + self.verify(hashes == hashes_0, "expect hash values not changed") + def tear_down(self): # destroy all flow rule on port 0 self.dut.send_command("flow flush 0", timeout=1) From patchwork Thu Jan 13 15:07:52 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: "Huang, ZhiminX" X-Patchwork-Id: 105765 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 77763A00C5; Thu, 13 Jan 2022 07:38:45 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7310342727; Thu, 13 Jan 2022 07:38:45 +0100 (CET) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 1FAE440150 for ; Thu, 13 Jan 2022 07:38:42 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1642055923; x=1673591923; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=zF972p9MZVwA77z9mCc/t9zHlgqg9JSzXQB9BkcMVes=; b=nAWVsSLRWhFqtF0edQapVZoFr8w1PHTsuvegsCJWZvTT+plijOZC3u5I hMmf9KjurI/bMe8LVGA81KEqn78GiT8DEVzhptSNrmbEnsm6xzdG1QAx7 PKqCBbMHdvXN22FXZbBDn8GE8V/Qd7ztymo6KgL1UOC0AtqsdrrYeQKCM 6Pg1JwPPxhivwANWe1byf3n+T6UzaIwYQl7AB+mHbfidA1HVHDG89icic P6Bl991ZbYlplz66RdqTqfWVSgeK0g4GoiVZ+7406QpfwKK8YZx4h1MnS V6p3VyJuz4/taq0HLSivFGwaPOllUmSmHEmL3BCkt53GBYl3leOa7Ha/N A==; X-IronPort-AV: E=McAfee;i="6200,9189,10225"; a="241501758" X-IronPort-AV: E=Sophos;i="5.88,284,1635231600"; d="scan'208";a="241501758" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Jan 2022 22:38:42 -0800 X-IronPort-AV: E=Sophos;i="5.88,284,1635231600"; d="scan'208";a="670419138" Received: from unknown (HELO localhost.localdomain) ([10.239.251.93]) by fmsmga001-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Jan 2022 22:38:41 -0800 From: Zhimin Huang To: dts@dpdk.org Cc: Zhimin Huang Subject: [dts][PATCH V2 3/4] test_plans/cvl_advanced_rss_test_plan:modify testplan Date: Thu, 13 Jan 2022 23:07:52 +0800 Message-Id: <20220113150753.27031-4-zhiminx.huang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20220113150753.27031-1-zhiminx.huang@intel.com> References: <20220113150753.27031-1-zhiminx.huang@intel.com> MIME-Version: 1.0 X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org 1.modify about tx checksum test steps 2.add mark to support fdir rule. Signed-off-by: Zhimin Huang --- test_plans/cvl_advanced_rss_test_plan.rst | 34 ++++++++++++++--------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/test_plans/cvl_advanced_rss_test_plan.rst b/test_plans/cvl_advanced_rss_test_plan.rst index 83ebf66d..4e119422 100644 --- a/test_plans/cvl_advanced_rss_test_plan.rst +++ b/test_plans/cvl_advanced_rss_test_plan.rst @@ -2376,8 +2376,8 @@ Test case: Checksum for different payload length 8. Send packets of step 2. Check the IPV4/UDP/TCP/SCTP packets with different payload length have different hash value. -Test case: Set HW csum, flow rule doesn’t impact RX checksum and TX checksum -============================================================================ +Test case: Set HW csum, flow rule does not impact RX checksum and TX checksum +============================================================================= 1. launch testpmd without "--disable-rss":: ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c f -n 6 -- -i --rxq=16 --txq=16 @@ -2394,9 +2394,14 @@ Test case: Set HW csum, flow rule doesn’t impact RX checksum and TX checksum set verbose 1 start -3. Capture the tx packet at tester port:: +3. Capture the tx packet at tester port and check checksum values same as expect pkts:: - tcpdump -i enp216s0f0 -Q in -e -n -v -x + take a IPV4 for example: + p = Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/("X"*48) + p.show2() + expect pkts checksum value: chksum= 0x3b0f + + tcpdump -i ens7 'ether src 40:a6:b7:0b:76:28 and ether[12:2] != 0x88cc' -Q in -w /tmp/tester/sniff_ens7.pcap -c 7 4. Send packets:: @@ -2408,7 +2413,10 @@ Test case: Set HW csum, flow rule doesn’t impact RX checksum and TX checksum p6=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/UDP(sport=22, chksum=0xe38)/("X"*48) p7=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/SCTP(sport=22, chksum=0xf)/("X"*48) - Check rx checksum good or bad, check if the tx checksum correct. + take a IPV4 for example: + get sniff_ens7.pcap checksum value: 0x3b0f + + check the tx checksum correct. 5. Create rss rules with chsum as inputset:: @@ -2420,7 +2428,7 @@ Test case: Set HW csum, flow rule doesn’t impact RX checksum and TX checksum flow create 0 ingress pattern eth / ipv6 / udp / end actions rss types l4-chksum end queues end / end flow create 0 ingress pattern eth / ipv6 / sctp / end actions rss types l4-chksum end queues end / end -6. Send the same packets, check the hash value changed, check rx and tx checksum, get the same result. +6. repeat 3-4 steps, send the same packets, check the hash value changed, check rx and tx checksum, get the same result. Test case: Combined case with fdir queue group ============================================== @@ -2433,12 +2441,12 @@ Test case: Combined case with fdir queue group 2. Create fdir rules to queue group:: - flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 / tcp / end actions rss queues 4 5 end / end - flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 / udp / end actions rss queues 6 7 end / end - flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 / sctp / end actions rss queues 8 9 end / end - flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv6 / tcp / end actions rss queues 10 11 end / end - flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv6 / udp / end actions rss queues 12 13 end / end - flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv6 / sctp / end actions rss queues 14 15 end / end + flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 / tcp / end actions rss queues 4 5 end / mark / end + flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 / udp / end actions rss queues 6 7 end / mark / end + flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv4 / sctp / end actions rss queues 8 9 end / mark / end + flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv6 / tcp / end actions rss queues 10 11 end / mark / end + flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv6 / udp / end actions rss queues 12 13 end / mark / end + flow create 0 ingress pattern eth dst is 00:11:22:33:44:55 / ipv6 / sctp / end actions rss queues 14 15 end / mark / end 3. Send packets:: @@ -2450,7 +2458,7 @@ Test case: Combined case with fdir queue group p6=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/UDP(sport=22, chksum=0xe38)/("X"*48) p7=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6(src="ABAB:910A:2222:5498:8475:1111:3900:1010")/SCTP(sport=22, chksum=0xf)/("X"*48) - Check p2-p7 are distributed to specified queue group, + Check p2-p7 are distributed to specified queue group and mark id, p1 is distributed by RSS hash value. 4. Create rss rule with inputset checksum:: From patchwork Thu Jan 13 15:07:53 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: "Huang, ZhiminX" X-Patchwork-Id: 105766 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9867BA00C4; Thu, 13 Jan 2022 07:38:46 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 933C94272D; Thu, 13 Jan 2022 07:38:46 +0100 (CET) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 94E5642727 for ; Thu, 13 Jan 2022 07:38:44 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1642055924; x=1673591924; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=2lp5UVd0a5d16uDUcrubFlb6Pj0Lr2NbvtH5cXBywF0=; b=M90WbIe9+krsFgW9C0iwSW9Vxn3FccJBxeu/lFBNEei/VxR3vvfMzcox xpUZ5NB/kCDB3738ACfMsTpnv8f9WScby6G7Ug6Q988fxPXfPmWEXlPdO n4NZlQZqm9F2wiVyMoDnwCbXfIajiWHczgjbs+pqOHMLOtv7BBUG5M98w jcQR/gjjCmgq8MTwODtuEjR8EdhoXFTLjcQWlXfVDPhRlQ0P5xY+yQ1Xg CQB/t40ihtmppjrLDxtsNYZsvnN0+ML0L3qUxKZMhhyyGpPvpRiU5pnni 9yB27t79lggu22ppTS8hfStsPu9xkyOEZ7YefuT/+exiZzseyz37rrUjt Q==; X-IronPort-AV: E=McAfee;i="6200,9189,10225"; a="241501763" X-IronPort-AV: E=Sophos;i="5.88,284,1635231600"; d="scan'208";a="241501763" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Jan 2022 22:38:44 -0800 X-IronPort-AV: E=Sophos;i="5.88,284,1635231600"; d="scan'208";a="670419143" Received: from unknown (HELO localhost.localdomain) ([10.239.251.93]) by fmsmga001-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Jan 2022 22:38:42 -0800 From: Zhimin Huang To: dts@dpdk.org Cc: Zhimin Huang Subject: [dts][PATCH V2 4/4] test_plans/cvl_advanced_iavf_rss_test_plan:modify test plan Date: Thu, 13 Jan 2022 23:07:53 +0800 Message-Id: <20220113150753.27031-5-zhiminx.huang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20220113150753.27031-1-zhiminx.huang@intel.com> References: <20220113150753.27031-1-zhiminx.huang@intel.com> MIME-Version: 1.0 X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org 1.remove no hit pattern test steps. 2.modify about tx checksum test steps. 3.add mark to support fdir rule. Signed-off-by: Zhimin Huang --- .../cvl_advanced_iavf_rss_test_plan.rst | 58 +++++++------------ 1 file changed, 21 insertions(+), 37 deletions(-) diff --git a/test_plans/cvl_advanced_iavf_rss_test_plan.rst b/test_plans/cvl_advanced_iavf_rss_test_plan.rst index 59f63fbd..f991aaf7 100755 --- a/test_plans/cvl_advanced_iavf_rss_test_plan.rst +++ b/test_plans/cvl_advanced_iavf_rss_test_plan.rst @@ -2783,10 +2783,6 @@ Test case: MAC_IPV4_IPV4_CHKSUM p3=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:01")/IP(src="192.168.1.1",dst="192.168.1.2",chksum=0x1)/ ("X"*48) -5. not hit pattern:: - - p4=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="ABAB:910A:2222:5498:8475:1111:3900:1010")/("X"*48) - Test case: MAC_IPV4_UDP_CHKSUM ============================== Subcase 1: MAC_IPV4_UDP_L4_CHKSUM @@ -2807,10 +2803,6 @@ Subcase 1: MAC_IPV4_UDP_L4_CHKSUM p3=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:01")/IP(src="192.168.1.1", dst="192.168.1.2",chksum=0x3)/UDP(sport=32, dport=33,chksum=0xffff)/("X"*48) -5. not hit pattern:: - - p4=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1",dst="192.168.0.2")/SCTP(sport=22, dport=23,chksum=0xffff)/("X"*48) - Subcase 2: MAC_IPV4_UDP_IPV4_CHKSUM ----------------------------------- 1. create rss rule:: @@ -2829,10 +2821,6 @@ Subcase 2: MAC_IPV4_UDP_IPV4_CHKSUM p3=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:01")/IP(src="192.168.1.1", dst="192.168.1.2",chksum=0xffff)/UDP(sport=32, dport=33,chksum=0xffff)/("X"*48) -5. not hit pattern:: - - p4=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1",dst="192.168.0.2",chksum=0xffff)/SCTP(sport=22, dport=23)/("X"*48) - Test case: MAC_IPV4_TCP_CHKSUM ============================== The rules and packets in this test case is similar to "Test case: MAC_IPV4_UDP_CHKSUM", @@ -2885,10 +2873,6 @@ Test case: MAC_IPV6_UDP_L4_CHKSUM p3 = Ether(src="52:00:00:00:00:01", dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2021", src="ABAB:910A:2222:5498:8475:1111:3900:1011")/UDP(sport=32, dport=33, chksum=0x1)/("X"*48) -5. not hit pattern:: - - p4 = Ether(src="52:00:00:00:00:00", dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="ABAB:910A:2222:5498:8475:1111:3900:1010")/TCP(sport=22, dport=23, chksum=0x1)/("X"*49) - Test case: MAC_IPV6_TCP_L4_CHKSUM ================================= 1. create rss rule:: @@ -2907,10 +2891,6 @@ Test case: MAC_IPV6_TCP_L4_CHKSUM p3 = Ether(src="52:00:00:00:00:01", dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2021", src="ABAB:910A:2222:5498:8475:1111:3900:1011")/TCP(sport=32, dport=33, chksum=0x1)/("X"*48) -5. not hit pattern:: - - p4 = Ether(src="52:00:00:00:00:00", dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="ABAB:910A:2222:5498:8475:1111:3900:1010")/SCTP(sport=22, dport=23, chksum=0x1)/("X"*49) - Test case: MAC_IPV6_SCTP_L4_CHKSUM ================================== 1. create rss rule:: @@ -2929,10 +2909,6 @@ Test case: MAC_IPV6_SCTP_L4_CHKSUM p3 = Ether(src="52:00:00:00:00:01", dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2021", src="ABAB:910A:2222:5498:8475:1111:3900:1011")/SCTP(sport=32, dport=33, chksum=0xffffffff)/("X"*48) -5. not hit pattern:: - - p4 = Ether(src="52:00:00:00:00:00", dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="ABAB:910A:2222:5498:8475:1111:3900:1010")/UDP(sport=22, dport=23, chksum=0x1)/("X"*49) - Test case: Checksum for different payload length ================================================ 1. launch testpmd without "--disable-rss":: @@ -2983,8 +2959,8 @@ Test case: Checksum for different payload length 8. Send packets of step 2. Check the IPV4/UDP/TCP/SCTP packets with different payload length have different hash value. -Test case: Set HW csum, flow rule doesn’t impact RX checksum and TX checksum -============================================================================ +Test case: Set HW csum, flow rule does not impact RX checksum and TX checksum +============================================================================= 1. launch testpmd without "--disable-rss":: ./x86_64-native-linuxapp-gcc/app/dpdk-testpmd -c f -n 6 -- -i --rxq=16 --txq=16 @@ -3001,21 +2977,29 @@ Test case: Set HW csum, flow rule doesn’t impact RX checksum and TX checksum set verbose 1 start -3. Capture the tx packet at tester port:: +3. Capture the tx packet at tester port and check checksum values same as expect pkts:: - tcpdump -i enp216s0f0 -Q in -e -n -v -x + take a IPV4 for example: + p = Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/("X"*48) + p.show2() + expect pkts checksum value: chksum= 0x3b0f + + tcpdump -i ens7 'ether src 40:a6:b7:0b:76:28 and ether[12:2] != 0x88cc' -Q in -w /tmp/tester/sniff_ens7.pcap -c 7 4. Send packets:: p1=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1", chksum=0xfff3)/("X"*48) p2=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/TCP(sport=22, chksum=0xfff3)/("X"*48) p3=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/UDP(sport=22, chksum=0x1)/("X"*48) - p4=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/SCTP(sport=22, chksum=0x1)/("X"*48) + p4=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IP(src="192.168.0.1")/SCTP(sport=22, chksum=0x0)/("X"*48) p5=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/TCP(sport=22, chksum=0xe38)/("X"*48) p6=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/UDP(sport=22, chksum=0xe38)/("X"*48) - p7=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/SCTP(sport=22, chksum=0xf)/("X"*48) + p7=Ether(dst="00:11:22:33:44:55", src="52:00:00:00:00:00")/IPv6()/SCTP(sport=22, chksum=0x0)/("X"*48) + + take a IPV4 for example: + get sniff_ens7.pcap checksum value: 0x3b0f - Check rx checksum good or bad, check if the tx checksum correct. + check the tx checksum correct. 5. Create rss rules with chsum as inputset:: @@ -3040,12 +3024,12 @@ Test case: Combined case with fdir queue group 2. Create fdir rules to queue group:: - flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss queues 4 5 end / end - flow create 0 ingress pattern eth / ipv4 / udp / end actions rss queues 6 7 end / end - flow create 0 ingress pattern eth / ipv4 src is 192.168.0.1 / sctp / end actions rss queues 8 9 end / end - flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss queues 10 11 end / end - flow create 0 ingress pattern eth / ipv6 / udp / end actions rss queues 12 13 end / end - flow create 0 ingress pattern eth / ipv6 src is ABAB:910A:2222:5498:8475:1111:3900:1010 / sctp / end actions rss queues 14 15 end / end + flow create 0 ingress pattern eth / ipv4 / tcp / end actions rss queues 4 5 end / mark / end + flow create 0 ingress pattern eth / ipv4 / udp / end actions rss queues 6 7 end / mark / end + flow create 0 ingress pattern eth / ipv4 src is 192.168.0.1 / sctp / end actions rss queues 8 9 end mark / / end + flow create 0 ingress pattern eth / ipv6 / tcp / end actions rss queues 10 11 end / mark / end + flow create 0 ingress pattern eth / ipv6 / udp / end actions rss queues 12 13 end / mark / end + flow create 0 ingress pattern eth / ipv6 src is ABAB:910A:2222:5498:8475:1111:3900:1010 / sctp / end actions rss queues 14 15 end / mark / end 3. Send packets::