From patchwork Wed Sep 14 05:38:55 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Weiyuan Li X-Patchwork-Id: 116260 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1E983A0032; Wed, 14 Sep 2022 07:38:49 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id F1D5440151; Wed, 14 Sep 2022 07:38:48 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id A9FA640141 for ; Wed, 14 Sep 2022 07:38:46 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1663133926; x=1694669926; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=QCo1jo8S8law7RUUgOinZ9luuUZSHTK1gR3U/m0gxyQ=; b=jE/7SyNUVIjAw28bnC8kIG6mVDXls+OZnH5potOk3G9gBDZ4ECW1o9Ix a03m11NVHsG/0mXqcslZqTrxv5oaTnZg3+lPvFFt4SjthOcUz0+8Cz2q6 +3+XnExSdsO4CsBQw+mx7mB44sc3VzqXoKm+ZI0YkKEF+ZSaylene6el1 wEzeg3J4S2yGJVwDX+edWcmSLKFwrMlm8CEvzxwfWAgCCrxkNjX6GHMSL Mn5KVEFbUpbSbsN8N1S427SMXn9E+k9gF/rjvrH0LftbRKYYQcI8VJZD7 /Ek431vz/HDWMDlOhJ1F/8kLWhfq900WBFCJgsYJDP8RpB37E/w0fHYij Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10469"; a="360080773" X-IronPort-AV: E=Sophos;i="5.93,313,1654585200"; d="scan'208";a="360080773" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Sep 2022 22:38:45 -0700 X-IronPort-AV: E=Sophos;i="5.93,313,1654585200"; d="scan'208";a="594245975" Received: from unknown (HELO localhost.localdomain) ([10.239.252.248]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Sep 2022 22:38:42 -0700 From: Weiyuan Li To: dts@dpdk.org Cc: Weiyuan Li , Jiale Song , Peng Yuan Subject: [dts][PATCH V3 1/4] tests/ice_iavf_fdir: add multicast new test case Date: Wed, 14 Sep 2022 13:38:55 +0800 Message-Id: <20220914053858.11506-1-weiyuanx.li@intel.com> X-Mailer: git-send-email 2.27.0 MIME-Version: 1.0 X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org Add ipv4/ipv6 multicast new case for dpdk22.07 new feature. Signed-off-by: Weiyuan Li --- Tested-by: Jiale Song < songx.jiale@intel.com> Acked-by: Peng Yuan v2: -modify the ice_iavf_fdir scipt tear_down method and destroy environment first. V3: -modify script and test plan disable promisc and trust mode. -add multicast detailes description reference. -modify script and test plan configure multicast address. tests/TestSuite_ice_iavf_fdir.py | 286 ++++++++++++++++++++++++++++++- 1 file changed, 285 insertions(+), 1 deletion(-) diff --git a/tests/TestSuite_ice_iavf_fdir.py b/tests/TestSuite_ice_iavf_fdir.py index 1cc71f67..d6ca832a 100644 --- a/tests/TestSuite_ice_iavf_fdir.py +++ b/tests/TestSuite_ice_iavf_fdir.py @@ -29,6 +29,20 @@ MAC_IPV4_PAY = { ], } +MAC_IPV4_PAY_MULTICAST = { + "match": [ + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.1", proto=255, ttl=2, tos=4) / Raw("X" * 80)', + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.1", proto=255, ttl=2, tos=4)/UDP(sport=22,dport=23)/Raw("x" * 80)', + ], + "mismatch": [ + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.22",dst="224.0.0.1", proto=255, ttl=2, tos=4) / Raw("X" * 80)', + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.2", proto=255, ttl=2, tos=4) / Raw("X" * 80)', + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.1", proto=1, ttl=2, tos=4) / Raw("X" * 80)', + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.1", proto=255, ttl=3, tos=4) / Raw("X" * 80)', + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.1", proto=255, ttl=2, tos=9) / Raw("X" * 80)', + ], +} + MAC_IPV4_PAY_protocol = { "match": [ 'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", proto=1)/Raw("x" * 80)', @@ -46,6 +60,23 @@ MAC_IPV4_PAY_protocol = { ], } +MAC_IPV4_PAY_multicast_protocol = { + "match": [ + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.1", proto=1)/Raw("x" * 80)', + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.19",dst="224.0.0.1", proto=1)/Raw("x" * 80)', + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.1", ttl=2, tos=4)/UDP(sport=22,dport=23)/Raw("x" * 80)', + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.1", proto=17)/TCP(sport=22,dport=23)/Raw("x" * 80)', + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.1", proto=17, ttl=2, tos=4)/Raw("x" * 80)', + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.1", proto=17)/Raw("x" * 80)', + ], + "mismatch": [ + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.2", proto=1)/Raw("x" * 80)', + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.1", proto=6)/UDP(sport=22,dport=23)/Raw("x" * 80)', + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.1")/TCP(sport=22,dport=23)/Raw("x" * 80)', + 'Ether(dst="11:22:33:44:55:66")/IP(src="192.168.0.20",dst="224.0.0.1", ttl=2, tos=4)/SCTP()/Raw("x" * 80)', + ], +} + MAC_IPV4_UDP = { "match": [ 'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4) /UDP(sport=22,dport=23)/Raw("x" * 80)' @@ -61,6 +92,20 @@ MAC_IPV4_UDP = { ], } +MAC_IPV6_PAY_MULTICAST = { + "match": [ + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::2", src="2001::2", nh=0, tc=1, hlim=2)/("X"*480)', + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::2", src="2001::2", nh=0, tc=1, hlim=2)/UDP(sport=22,dport=23)/("X"*480)', + ], + "mismatch": [ + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::1", src="2001::2", nh=0, tc=1, hlim=2)/("X"*480)', + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::2", src="2001::1", nh=0, tc=1, hlim=2)/("X"*480)', + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::2", src="2001::2", nh=2, tc=1, hlim=2)/("X"*480)', + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::2", src="2001::2", nh=0, tc=2, hlim=2)/("X"*480)', + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::2", src="2001::2", nh=0, tc=1, hlim=5)/("X"*480)', + ], +} + MAC_IPV4_TCP = { "match": [ 'Ether(dst="00:11:22:33:44:55")/IP(src="192.168.0.20",dst="192.168.0.21", ttl=2, tos=4) /TCP(sport=22,dport=23)/Raw("x" * 80)' @@ -121,6 +166,22 @@ MAC_IPV6_PAY_protocol = { ], } +MAC_IPV6_PAY_multicast_protocol = { + "match": [ + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::2", src="2001::2", nh=17, tc=1, hlim=2)/("X"*480)', + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::2")/UDP(sport=22,dport=23)/("X"*480)', + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::2", nh=17)/TCP(sport=22,dport=23)/("X"*480)', + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::2")/UDP(sport=22,dport=23)/TCP(sport=22,dport=23)/("X"*480)', + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::2", nh=6)/("X"*480)', + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::2")/TCP(sport=22,dport=23)/("X"*480)', + ], + "mismatch": [ + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::3", nh=1)/("X"*480)', + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::2")/SCTP()/("X"*480)', + 'Ether(dst="11:22:33:44:55:66")/IPv6(dst="ff01::2", nh=1)/TCP(sport=22,dport=23)/("X"*480)', + ], +} + MAC_IPV6_UDP = { "match": [ 'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/UDP(sport=22,dport=23)/("X"*480)' @@ -136,6 +197,94 @@ MAC_IPV6_UDP = { ], } +tv_mac_ipv4_pay_queue_index_multicast = { + "name": "test_mac_ipv4_pay_queue_index_multicast", + "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 224.0.0.1 proto is 255 ttl is 2 tos is 4 / end actions queue index 1 / mark / end", + "scapy_str": MAC_IPV4_PAY_MULTICAST, + "check_param": {"port_id": 0, "queue": 1, "mark_id": 0}, +} + +tv_mac_ipv4_pay_rss_multicast = { + "name": "test_mac_ipv4_pay_rss_multicast", + "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 224.0.0.1 proto is 255 ttl is 2 tos is 4 / end actions rss queues 2 3 end / mark / end", + "scapy_str": MAC_IPV4_PAY_MULTICAST, + "check_param": {"port_id": 0, "queue": [2, 3], "mark_id": 0}, +} + +tv_mac_ipv4_pay_passthru_multicast = { + "name": "test_mac_ipv4_pay_passthru_multicast", + "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 224.0.0.1 proto is 255 ttl is 2 tos is 4 / end actions passthru / mark / end", + "scapy_str": MAC_IPV4_PAY_MULTICAST, + "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}, +} + +tv_mac_ipv4_pay_drop_multicast = { + "name": "test_mac_ipv4_pay_drop_multicast", + "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 224.0.0.1 proto is 255 ttl is 2 tos is 4 / end actions drop / mark / end", + "scapy_str": MAC_IPV4_PAY_MULTICAST, + "check_param": {"port_id": 0, "drop": 1, "mark_id": 0}, +} + +tv_mac_ipv4_pay_mark_rss_multicast = { + "name": "test_mac_ipv4_pay_mark_rss_multicast", + "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 224.0.0.1 proto is 255 ttl is 2 tos is 4 / end actions mark / rss / end", + "scapy_str": MAC_IPV4_PAY_MULTICAST, + "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}, +} + +tv_mac_ipv4_pay_mark_multicast = { + "name": "test_mac_ipv4_pay_mark_multicast", + "rule": "flow create 0 ingress pattern eth / ipv4 src is 192.168.0.20 dst is 224.0.0.1 proto is 255 ttl is 2 tos is 4 / end actions mark / end", + "scapy_str": MAC_IPV4_PAY_MULTICAST, + "check_param": {"port_id": 0, "mark_id": 0}, +} + +tv_mac_ipv6_pay_queue_index_multicast = { + "name": "test_mac_ipv4_pay_queue_index_multicast", + "rule": "flow create 0 ingress pattern eth / ipv6 dst is ff01::2 src is 2001::2 proto is 0 hop is 2 tc is 1 / end actions queue index 15 / mark / end", + "scapy_str": MAC_IPV6_PAY_MULTICAST, + "check_param": {"port_id": 0, "queue": 15, "mark_id": 0}, +} + +tv_mac_ipv6_pay_rss_multicast = { + "name": "test_mac_ipv6_pay_rss_multicast", + "rule": "flow create 0 ingress pattern eth / ipv6 dst is ff01::2 src is 2001::2 proto is 0 hop is 2 tc is 1 / end actions rss queues 8 9 10 11 12 13 14 15 end / mark / end", + "scapy_str": MAC_IPV6_PAY_MULTICAST, + "check_param": { + "port_id": 0, + "queue": [8, 9, 10, 11, 12, 13, 14, 15], + "mark_id": 0, + }, +} + +tv_mac_ipv6_pay_passthru_multicast = { + "name": "test_mac_ipv6_pay_passthru_multicast", + "rule": "flow create 0 ingress pattern eth / ipv6 dst is ff01::2 src is 2001::2 proto is 0 hop is 2 tc is 1 / end actions passthru / mark / end", + "scapy_str": MAC_IPV6_PAY_MULTICAST, + "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}, +} + +tv_mac_ipv6_pay_drop_multicast = { + "name": "test_mac_ipv6_pay_drop_multicast", + "rule": "flow create 0 ingress pattern eth / ipv6 dst is ff01::2 src is 2001::2 proto is 0 hop is 2 tc is 1 / end actions drop / mark / end", + "scapy_str": MAC_IPV6_PAY_MULTICAST, + "check_param": {"port_id": 0, "drop": 1, "mark_id": 0}, +} + +tv_mac_ipv6_pay_mark_rss_multicast = { + "name": "test_mac_ipv6_pay_mark_rss_multicast", + "rule": "flow create 0 ingress pattern eth / ipv6 dst is ff01::2 src is 2001::2 proto is 0 hop is 2 tc is 1 / end actions mark / rss / end", + "scapy_str": MAC_IPV6_PAY_MULTICAST, + "check_param": {"port_id": 0, "passthru": 1, "mark_id": 0}, +} + +tv_mac_ipv6_pay_mark_multicast = { + "name": "test_mac_ipv6_pay_mark_multicast", + "rule": "flow create 0 ingress pattern eth / ipv6 dst is ff01::2 src is 2001::2 proto is 0 hop is 2 tc is 1 / end actions mark / end", + "scapy_str": MAC_IPV6_PAY_MULTICAST, + "check_param": {"port_id": 0, "mark_id": 0}, +} + MAC_IPV6_TCP = { "match": [ 'Ether(dst="00:11:22:33:44:55")/IPv6(dst="CDCD:910A:2222:5498:8475:1111:3900:2020", src="2001::2",tc=1, hlim=2)/TCP(sport=22,dport=23)/("X"*480)' @@ -7223,6 +7372,15 @@ vectors_ipv4_pay = [ tv_mac_ipv4_pay_mark, ] +vectors_ipv4_pay_multicast = [ + tv_mac_ipv4_pay_queue_index_multicast, + tv_mac_ipv4_pay_rss_multicast, + tv_mac_ipv4_pay_passthru_multicast, + tv_mac_ipv4_pay_drop_multicast, + tv_mac_ipv4_pay_mark_rss_multicast, + tv_mac_ipv4_pay_mark_multicast, +] + vectors_ipv4_udp = [ tv_mac_ipv4_udp_drop, tv_mac_ipv4_udp_queue_group, @@ -7232,6 +7390,15 @@ vectors_ipv4_udp = [ tv_mac_ipv4_udp_mark, ] +vectors_ipv6_pay_multicast = [ + tv_mac_ipv6_pay_queue_index_multicast, + tv_mac_ipv6_pay_rss_multicast, + tv_mac_ipv6_pay_passthru_multicast, + tv_mac_ipv6_pay_drop_multicast, + tv_mac_ipv6_pay_mark_rss_multicast, + tv_mac_ipv6_pay_mark_multicast, +] + vectors_ipv4_tcp = [ tv_mac_ipv4_tcp_drop, tv_mac_ipv4_tcp_queue_group, @@ -8554,6 +8721,16 @@ class TestICEIAVFFdir(TestCase): self.pmd_output.execute_cmd( "port config 1 rss-hash-key ipv4 1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd" ) + if self.running_case in [ + "test_mac_ipv4_pay_multicast", + "test_mac_ipv4_multicast_protocol", + "test_mac_ipv6_pay_multicast", + "test_mac_ipv6_multicast_protocol", + ]: + self.pmd_output.execute_cmd("set promisc all off") + self.pmd_output.execute_cmd("set allmulti all on") + # add multicast mac address to pmd + self.pmd_output.execute_cmd("mcast_addr add 0 11:22:33:44:55:66") res = self.pmd_output.wait_link_status_up("all", timeout=15) self.verify(res is True, "there have port link is down") self.pmd_output.execute_cmd("start") @@ -8700,6 +8877,9 @@ class TestICEIAVFFdir(TestCase): def test_mac_ipv4_pay(self): self.rte_flow_process(vectors_ipv4_pay) + def test_mac_ipv4_pay_multicast(self): + self.rte_flow_process(vectors_ipv4_pay_multicast) + def test_mac_ipv4_udp(self): self.rte_flow_process(vectors_ipv4_udp) @@ -8712,6 +8892,9 @@ class TestICEIAVFFdir(TestCase): def test_mac_ipv6_pay(self): self.rte_flow_process(vectors_ipv6_pay) + def test_mac_ipv6_pay_multicast(self): + self.rte_flow_process(vectors_ipv6_pay_multicast) + def test_mac_ipv6_udp(self): self.rte_flow_process(vectors_ipv6_udp) @@ -8828,6 +9011,53 @@ class TestICEIAVFFdir(TestCase): out4, pkt_num=6, check_param={"port_id": 0, "passthru": 1}, stats=False ) + def test_mac_ipv4_multicast_protocol(self): + rules = [ + "flow create 0 ingress pattern eth / ipv4 dst is 224.0.0.1 proto is 1 / end actions queue index 1 / mark id 1 / end", + "flow create 0 ingress pattern eth / ipv4 dst is 224.0.0.1 proto is 17 / end actions passthru / mark id 3 / end", + ] + + # validate rules + self.validate_fdir_rule(rules, check_stats=True) + self.check_fdir_rule(port_id=0, stats=False) + + # create rules + rule_li = self.create_fdir_rule(rules, check_stats=True) + self.check_fdir_rule(port_id=0, rule_list=rule_li) + + # pkt1 and pkt2 in "match" match rule 0, pkt3-6 match rule 1. + out1 = self.send_pkts_getouput(MAC_IPV4_PAY_multicast_protocol["match"][0:2]) + rfc.check_iavf_fdir_mark( + out1, + pkt_num=2, + check_param={"port_id": 0, "mark_id": 1, "queue": 1}, + stats=True, + ) + + out2 = self.send_pkts_getouput(MAC_IPV4_PAY_multicast_protocol["match"][2:6]) + rfc.check_iavf_fdir_mark( + out2, + pkt_num=4, + check_param={"port_id": 0, "mark_id": 3, "passthru": 1}, + stats=True, + ) + + # send mismatched packets: + out3 = self.send_pkts_getouput(MAC_IPV4_PAY_multicast_protocol["mismatch"]) + rfc.check_iavf_fdir_mark( + out3, pkt_num=4, check_param={"port_id": 0, "passthru": 1}, stats=False + ) + + # destroy the rules and check there is no rule listed. + self.destroy_fdir_rule(rule_id=rule_li, port_id=0) + self.check_fdir_rule(port_id=0, stats=False) + + # send matched packet + out4 = self.send_pkts_getouput(MAC_IPV4_PAY_multicast_protocol["match"]) + rfc.check_iavf_fdir_mark( + out4, pkt_num=6, check_param={"port_id": 0, "passthru": 1}, stats=False + ) + def test_mac_ipv6_protocol(self): rules = [ "flow create 0 ingress pattern eth / ipv6 dst is CDCD:910A:2222:5498:8475:1111:3900:2020 proto is 17 / end actions rss queues 5 6 end / mark id 0 / end", @@ -8875,6 +9105,53 @@ class TestICEIAVFFdir(TestCase): out4, pkt_num=6, check_param={"port_id": 0, "passthru": 1}, stats=False ) + def test_mac_ipv6_multicast_protocol(self): + rules = [ + "flow create 0 ingress pattern eth / ipv6 dst is ff01::2 proto is 17 / end actions rss queues 5 6 end / mark id 0 / end", + "flow create 0 ingress pattern eth / ipv6 dst is ff01::2 proto is 6 / end actions mark id 2 / rss / end", + ] + + # validate rules + self.validate_fdir_rule(rules, check_stats=True) + self.check_fdir_rule(port_id=0, stats=False) + + # create rules + rule_li = self.create_fdir_rule(rules, check_stats=True) + self.check_fdir_rule(port_id=0, rule_list=rule_li) + + # pkt1-4 in "match" match rule 0, pkt5-6 match rule 1. + out1 = self.send_pkts_getouput(MAC_IPV6_PAY_multicast_protocol["match"][0:4]) + rfc.check_iavf_fdir_mark( + out1, + pkt_num=4, + check_param={"port_id": 0, "mark_id": 0, "queue": [5, 6]}, + stats=True, + ) + + out2 = self.send_pkts_getouput(MAC_IPV6_PAY_multicast_protocol["match"][4:6]) + rfc.check_iavf_fdir_mark( + out2, + pkt_num=2, + check_param={"port_id": 0, "mark_id": 2, "passthru": 1}, + stats=True, + ) + + # send mismatched packets: + out3 = self.send_pkts_getouput(MAC_IPV6_PAY_multicast_protocol["mismatch"]) + rfc.check_iavf_fdir_mark( + out3, pkt_num=3, check_param={"port_id": 0, "passthru": 1}, stats=False + ) + + # destroy the rules and check there is no rule listed. + self.destroy_fdir_rule(rule_id=rule_li, port_id=0) + self.check_fdir_rule(port_id=0, stats=False) + + # send matched packet + out4 = self.send_pkts_getouput(MAC_IPV6_PAY_multicast_protocol["match"]) + rfc.check_iavf_fdir_mark( + out4, pkt_num=6, check_param={"port_id": 0, "passthru": 1}, stats=False + ) + @skip_unsupported_pkg("os default") def test_mac_ipv4_gtpu_eh_without_teid(self): rules = [ @@ -11648,8 +11925,15 @@ class TestICEIAVFFdir(TestCase): def tear_down(self): # destroy all flow rule on port 0 - self.dut.kill_all() + if self.running_case in [ + "test_mac_ipv4_pay_multicast", + "test_mac_ipv4_multicast_protocol", + "test_mac_ipv6_pay_multicast", + "test_mac_ipv6_multicast_protocol", + ]: + self.pmd_output.execute_cmd("mcast_addr remove 0 11:22:33:44:55:66") self.destroy_env() + self.dut.kill_all() if getattr(self, "session_secondary", None): self.dut.close_session(self.session_secondary) if getattr(self, "session_third", None):