new file mode 100644
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#include <stdlib.h>
+#include <rte_flow.h>
+
+#include "jump_flow.h"
+
+
+struct rte_flow *
+create_jump_flow(uint16_t port_id, uint16_t group_id, struct rte_flow_error *error)
+{
+ struct rte_flow_action actions[2] = {0};
+ struct rte_flow_item patterns[2] = {0};
+ struct rte_flow *flow = NULL;
+
+ struct rte_flow_attr flow_attr = {
+ .ingress = 1,
+ .group = 0,
+ };
+
+ struct rte_flow_action_jump jump = {
+ .group = group_id,
+ };
+
+ /* Set up jump action to target group */
+ actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
+ actions[0].conf = &jump;
+ actions[1].type = RTE_FLOW_ACTION_TYPE_END;
+
+ /* match on ethernet */
+ patterns[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+ patterns[1].type = RTE_FLOW_ITEM_TYPE_END;
+
+ /* Validate the rule and create it. */
+ if (rte_flow_validate(port_id, &flow_attr, patterns, actions, error) == 0)
+ flow = rte_flow_create(port_id, &flow_attr, patterns, actions, error);
+ return flow;
+}
new file mode 100644
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#ifndef JUMP_FLOW_H
+#define JUMP_FLOW_H
+
+
+struct rte_flow *
+create_jump_flow(uint16_t port_id, uint16_t group_id, struct rte_flow_error *error);
+
+
+#endif /* JUMP_FLOW_H */
@@ -12,11 +12,15 @@ deps += ['argparse']
sources = files(
'main.c',
'flow_skeleton.c',
+ 'jump_flow.c',
'snippets/snippet_match_ipv4.c',
'snippets/snippet_match_gre.c',
'snippets/snippet_match_mpls.c',
+ 'snippets/snippet_match_nsh.c',
'snippets/snippet_match_port_affinity.c',
+ 'snippets/snippet_match_roce_ib_bth.c',
'snippets/snippet_re_route_to_kernel.c',
+ 'snippets/snippet_switch_granularity.c'
)
# The code snippets are not utilized.
new file mode 100644
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#include <stdlib.h>
+#include <rte_flow.h>
+
+#include "../common.h"
+#include "../jump_flow.h"
+#include "snippet_match_nsh.h"
+
+static void
+snippet_init_nsh(void)
+{
+ flow_attr.transfer = 1;
+ flow_attr.group = 1;
+ flow_attr.priority = 0;
+}
+
+static void
+snippet_match_nsh_create_actions(uint16_t port_id, struct rte_flow_action *action)
+{
+ /* jump to group 1 */
+ struct rte_flow_error error;
+ create_jump_flow(port_id, 1, &error);
+
+ struct rte_flow_action_port_id *portid = calloc(1, sizeof(struct rte_flow_action_port_id));
+ if (portid == NULL)
+ fprintf(stderr, "Failed to allocate memory for port_id\n");
+
+ /* To match on NSH to port_id 1. */
+ portid->id = 1;
+
+ action[0].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
+ action[0].conf = portid;
+ action[1].type = RTE_FLOW_ACTION_TYPE_END;
+}
+
+static void
+snippet_match_nsh_create_patterns(struct rte_flow_item *pattern)
+{
+ struct rte_flow_item_udp *spec;
+ struct rte_flow_item_udp *mask;
+
+ spec = calloc(1, sizeof(struct rte_flow_item_udp));
+ if (spec == NULL)
+ fprintf(stderr, "Failed to allocate memory for spec\n");
+
+ mask = calloc(1, sizeof(struct rte_flow_item_udp));
+ if (mask == NULL)
+ fprintf(stderr, "Failed to allocate memory for mask\n");
+
+ /* Set the patterns. */
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+ pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
+
+ pattern[2].type = RTE_FLOW_ITEM_TYPE_UDP;
+ spec->hdr.dst_port = RTE_BE16(250);
+ mask->hdr.dst_port = RTE_BE16(0xffff);
+ pattern[2].spec = spec;
+ pattern[2].mask = mask;
+
+ pattern[3].type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE;
+ pattern[4].type = RTE_FLOW_ITEM_TYPE_NSH;
+ pattern[5].type = RTE_FLOW_ITEM_TYPE_ETH;
+ pattern[6].type = RTE_FLOW_ITEM_TYPE_END;
+}
+
+static struct rte_flow_template_table *
+snippet_nsh_flow_create_table(__rte_unused uint16_t port_id,
+ __rte_unused struct rte_flow_error *error)
+{
+ return NULL;
+}
new file mode 100644
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#ifndef SNIPPET_MATCH_NSH_H
+#define SNIPPET_MATCH_NSH_H
+
+/* Network Service Header (NSH)
+ * provides a mechanism for metadata exchange along the instantiated service paths.
+ * The NSH is the Service Function Chaining (SFC) encapsulation required to support the
+ * SFC architecture.
+ * NSH, a data-plane protocol can be matched now using the existed item: RTE_FLOW_ITEM_TYPE_NSH.
+ * Currently this is supported ONLY when NSH follows VXLAN-GPE,
+ * and the "l3_vxlan_en=1" and "dv_flow_en=1" (Default) is set.
+ */
+
+#define MAX_PATTERN_NUM 7 /* Maximal number of patterns for this example. */
+#define MAX_ACTION_NUM 2 /* Maximal number of actions for this example. */
+
+static void
+snippet_init_nsh(void);
+#define snippet_init snippet_init_nsh
+
+static void
+snippet_match_nsh_create_actions(uint16_t port_id, struct rte_flow_action *actions);
+#define snippet_skeleton_flow_create_actions snippet_match_nsh_create_actions
+
+static void
+snippet_match_nsh_create_patterns(struct rte_flow_item *pattern);
+#define snippet_skeleton_flow_create_patterns snippet_match_nsh_create_patterns
+
+static struct rte_flow_template_table *
+snippet_nsh_flow_create_table(uint16_t port_id, struct rte_flow_error *error);
+#define snippet_skeleton_flow_create_table snippet_nsh_flow_create_table
+
+#endif /* SNIPPET_MATCH_NSH_H */
new file mode 100644
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#include <stdlib.h>
+#include <rte_flow.h>
+
+#include "../common.h"
+#include "../jump_flow.h"
+#include "snippet_match_roce_ib_bth.h"
+
+static void
+snippet_init_roce_ib_bth(void)
+{
+ flow_attr.ingress = 1;
+ flow_attr.group = 1;
+ flow_attr.priority = 1;
+}
+
+static void
+snippet_match_roce_ib_bth_create_actions(uint16_t port_id, struct rte_flow_action *action)
+{
+ /* jump to group 1 */
+ struct rte_flow_error error;
+ create_jump_flow(port_id, 1, &error);
+
+ /* Create one action that moves the packet to the selected queue. */
+ struct rte_flow_action_queue *queue = calloc(1, sizeof(struct rte_flow_action_queue));
+ if (queue == NULL)
+ fprintf(stderr, "Failed to allocate memory for queue\n");
+
+ /* Set the selected queue. */
+ queue->index = 1;
+
+ /* Set the action move packet to the selected queue. */
+ action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
+ action[0].conf = queue;
+ action[1].type = RTE_FLOW_ACTION_TYPE_END;
+}
+
+static void
+snippet_match_roce_ib_bth_create_patterns(struct rte_flow_item *pattern)
+{
+ struct rte_flow_item_ib_bth *bth;
+
+ bth = calloc(1, sizeof(struct rte_flow_item_ib_bth));
+ if (bth == NULL)
+ fprintf(stderr, "Failed to allocate memory for bth\n");
+
+ bth->hdr.opcode = 0x81;
+ bth->hdr.dst_qp[0] = 0x0;
+ bth->hdr.dst_qp[1] = 0xab;
+ bth->hdr.dst_qp[2] = 0xd4;
+
+ /* Set the patterns. */
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+ pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ pattern[2].type = RTE_FLOW_ITEM_TYPE_UDP;
+ pattern[3].type = RTE_FLOW_ITEM_TYPE_IB_BTH;
+ pattern[3].spec = bth;
+ pattern[4].type = RTE_FLOW_ITEM_TYPE_END;
+}
+
+static struct rte_flow_template_table *
+snippet_match_roce_ib_bth_create_table(__rte_unused uint16_t port_id,
+ __rte_unused struct rte_flow_error *error)
+{
+ return NULL;
+}
new file mode 100644
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#ifndef SNIPPET_MATCH_ROCE_IB_BTH_H
+#define SNIPPET_MATCH_ROCE_IB_BTH_H
+
+/* Matching RoCE IB BTH opcode/dest_qp
+ * IB BTH fields (opcode, and dst_qp) can be matched now using the new IB BTH item:
+ * RTE_FLOW_ITEM_TYPE_IB_BTH.
+ * Currently, this item is supported on group > 1, and supports only the RoCEv2 packet.
+ * The input BTH match item is defaulted to match one RoCEv2 packet.
+ */
+
+#define MAX_PATTERN_NUM 5 /* Maximal number of patterns for this example. */
+#define MAX_ACTION_NUM 2 /* Maximal number of actions for this example. */
+
+static void
+snippet_init_roce_ib_bth(void);
+#define snippet_init snippet_init_roce_ib_bth
+
+static void
+snippet_match_roce_ib_bth_create_actions(uint16_t port_id, struct rte_flow_action *action);
+#define snippet_skeleton_flow_create_actions snippet_match_roce_ib_bth_create_actions
+
+static void
+snippet_match_roce_ib_bth_create_patterns(struct rte_flow_item *pattern);
+#define snippet_skeleton_flow_create_patterns snippet_match_roce_ib_bth_create_patterns
+
+static struct rte_flow_template_table *
+snippet_match_roce_ib_bth_create_table(__rte_unused uint16_t port_id,
+__rte_unused struct rte_flow_error *error);
+#define snippet_skeleton_flow_create_table snippet_match_roce_ib_bth_create_table
+
+#endif /* SNIPPET_MATCH_ROCE_IB_BTH_H */
new file mode 100644
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#include <stdlib.h>
+#include <rte_flow.h>
+
+#include "../common.h"
+#include "../jump_flow.h"
+#include "snippet_switch_granularity.h"
+
+static void
+snippet_init_switch_granularity(void)
+{
+ flow_attr.ingress = 0;
+ flow_attr.transfer = 1;
+ flow_attr.group = 1;
+ flow_attr.priority = 1;
+}
+
+static void
+snippet_match_switch_granularity_create_actions(uint16_t port_id, struct rte_flow_action *action)
+{
+ /* jump to group 1 */
+ struct rte_flow_error error;
+ create_jump_flow(port_id, 1, &error);
+
+ struct rte_flow_action_ethdev *represented_port = calloc(1,
+ sizeof(struct rte_flow_action_ethdev));
+ if (represented_port == NULL)
+ fprintf(stderr, "Failed to allocate memory for represented_port\n");
+
+ represented_port->port_id = 0;
+ action[0].type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT;
+ action[0].conf = represented_port;
+ action[1].type = RTE_FLOW_ACTION_TYPE_END;
+}
+
+static void
+snippet_match_switch_granularity_create_patterns(struct rte_flow_item *pattern)
+{
+ /* Set the patterns. */
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+ pattern[1].type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT;
+ pattern[1].spec = NULL;
+ pattern[1].mask = NULL;
+ pattern[2].type = RTE_FLOW_ITEM_TYPE_END;
+}
+
+static struct rte_flow_template_table *
+create_table_switch_granularity(__rte_unused uint16_t port_id,
+ __rte_unused struct rte_flow_error *error)
+{
+ return NULL;
+}
new file mode 100644
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+ #ifndef SNIPPET_SWITCH_GRANULARITY_H
+ #define SNIPPET_SWITCH_GRANULARITY_H
+
+/* Switch Granularity Rule Matching
+ * supports the represented_port item in pattern.
+ * If the spec and the mask are both set to NULL, the source vPort
+ * will not be added to the matcher, it will match patterns for all
+ * vPort to reduce rule count and memory consumption.
+ * When testpmd starts with a PF, a VF-rep0 and a VF-rep1,
+ * the snippets will redirect packets from VF0 and VF1 to the wire
+ */
+
+ #define MAX_PATTERN_NUM 3 /* Maximal number of patterns for this example. */
+ #define MAX_ACTION_NUM 2 /* Maximal number of actions for this example. */
+
+static void
+snippet_init_switch_granularity(void);
+#define snippet_init snippet_init_switch_granularity
+
+static void
+snippet_match_switch_granularity_create_actions(uint16_t port_id, struct rte_flow_action *action);
+#define snippet_skeleton_flow_create_actions snippet_match_switch_granularity_create_actions
+
+static void
+snippet_match_switch_granularity_create_patterns(struct rte_flow_item *pattern);
+#define snippet_skeleton_flow_create_patterns snippet_match_switch_granularity_create_patterns
+
+static struct rte_flow_template_table *
+create_table_switch_granularity(uint16_t port_id, struct rte_flow_error *error);
+#define snippet_skeleton_flow_create_table create_table_switch_granularity
+
+ #endif /* SNIPPET_SWITCH_GRANULARITY_H */