@@ -388,6 +388,14 @@ to determine the L2 header to be written to the packet before sending
the packet out to a particular ethdev_tx node.
``rte_node_ip4_rewrite_add()`` is control path API to add next-hop info.
+ip4_reassembly
+~~~~~~~~~~~~~~
+This node is an intermediate node that reassembles ipv4 fragmented packets,
+non-fragmented packets pass through the node un-effected. The node rewrites
+it's stream and moves it to the next node.
+The fragment table and death row table should be setup via the
+``rte_node_ip4_reassembly_configure`` API.
+
ip6_lookup
~~~~~~~~~~
This node is an intermediate node that does LPM lookup for the received
@@ -218,6 +218,7 @@ static struct rte_node_register ethdev_rx_node_base = {
/* Default pkt classification node */
[ETHDEV_RX_NEXT_PKT_CLS] = "pkt_cls",
[ETHDEV_RX_NEXT_IP4_LOOKUP] = "ip4_lookup",
+ [ETHDEV_RX_NEXT_IP4_REASSEMBLY] = "ip4_reassembly",
},
};
@@ -39,6 +39,7 @@ struct ethdev_rx_node_elem {
enum ethdev_rx_next_nodes {
ETHDEV_RX_NEXT_IP4_LOOKUP,
ETHDEV_RX_NEXT_PKT_CLS,
+ ETHDEV_RX_NEXT_IP4_REASSEMBLY,
ETHDEV_RX_NEXT_MAX,
};
new file mode 100644
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#include <arpa/inet.h>
+#include <stdlib.h>
+#include <sys/socket.h>
+
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_graph.h>
+#include <rte_graph_worker.h>
+#include <rte_ip.h>
+#include <rte_ip_frag.h>
+#include <rte_mbuf.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+
+#include "rte_node_ip4_api.h"
+
+#include "ip4_reassembly_priv.h"
+#include "node_private.h"
+
+struct ip4_reassembly_elem {
+ struct ip4_reassembly_elem *next;
+ struct ip4_reassembly_ctx ctx;
+ rte_node_t node_id;
+};
+
+/* IP4 reassembly global data struct */
+struct ip4_reassembly_node_main {
+ struct ip4_reassembly_elem *head;
+};
+
+typedef struct ip4_reassembly_ctx ip4_reassembly_ctx_t;
+typedef struct ip4_reassembly_elem ip4_reassembly_elem_t;
+
+static struct ip4_reassembly_node_main ip4_reassembly_main;
+
+static uint16_t
+ip4_reassembly_node_process(struct rte_graph *graph, struct rte_node *node, void **objs,
+ uint16_t nb_objs)
+{
+#define PREFETCH_OFFSET 4
+ struct rte_mbuf *mbuf, *mbuf_out;
+ struct rte_ip_frag_death_row *dr;
+ struct ip4_reassembly_ctx *ctx;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ip_frag_tbl *tbl;
+ uint16_t idx = 0;
+ void **to_next;
+ int i;
+
+ ctx = (struct ip4_reassembly_ctx *)node->ctx;
+
+ /* Get core specific reassembly tbl */
+ tbl = ctx->tbl;
+ dr = ctx->dr;
+
+ for (i = 0; i < PREFETCH_OFFSET && i < nb_objs; i++) {
+ rte_prefetch0(rte_pktmbuf_mtod_offset((struct rte_mbuf *)objs[i], void *,
+ sizeof(struct rte_ether_hdr)));
+ }
+
+ to_next = node->objs;
+ for (i = 0; i < nb_objs - PREFETCH_OFFSET; i++) {
+#if RTE_GRAPH_BURST_SIZE > 64
+ /* Prefetch next-next mbufs */
+ if (likely(i + 8 < nb_objs))
+ rte_prefetch0(objs[i + 8]);
+#endif
+ rte_prefetch0(rte_pktmbuf_mtod_offset((struct rte_mbuf *)objs[i + PREFETCH_OFFSET],
+ void *, sizeof(struct rte_ether_hdr)));
+ mbuf = (struct rte_mbuf *)objs[i];
+
+ ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
+ sizeof(struct rte_ether_hdr));
+ if (rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)) {
+ /* prepare mbuf: setup l2_len/l3_len. */
+ mbuf->l2_len = sizeof(struct rte_ether_hdr);
+ mbuf->l3_len = sizeof(struct rte_ipv4_hdr);
+
+ mbuf_out = rte_ipv4_frag_reassemble_packet(tbl, dr, mbuf, rte_rdtsc(),
+ ipv4_hdr);
+ } else {
+ mbuf_out = mbuf;
+ }
+
+ if (mbuf_out)
+ to_next[idx++] = (void *)mbuf_out;
+ }
+
+ for (; i < nb_objs; i++) {
+ mbuf = (struct rte_mbuf *)objs[i];
+
+ ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
+ sizeof(struct rte_ether_hdr));
+ if (rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr)) {
+ /* prepare mbuf: setup l2_len/l3_len. */
+ mbuf->l2_len = sizeof(struct rte_ether_hdr);
+ mbuf->l3_len = sizeof(struct rte_ipv4_hdr);
+
+ mbuf_out = rte_ipv4_frag_reassemble_packet(tbl, dr, mbuf, rte_rdtsc(),
+ ipv4_hdr);
+ } else {
+ mbuf_out = mbuf;
+ }
+
+ if (mbuf_out)
+ to_next[idx++] = (void *)mbuf_out;
+ }
+ node->idx = idx;
+ rte_node_next_stream_move(graph, node, 0);
+ rte_ip_frag_free_death_row(dr, 4);
+
+ return idx;
+}
+
+int
+rte_node_ip4_reassembly_configure(struct rte_node_ip4_reassembly_cfg *cfg, uint16_t cnt)
+{
+ ip4_reassembly_elem_t *elem;
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ elem = malloc(sizeof(ip4_reassembly_elem_t));
+ if (elem == NULL)
+ return -ENOMEM;
+ elem->ctx.dr = cfg[i].dr;
+ elem->ctx.tbl = cfg[i].tbl;
+ elem->node_id = cfg[i].node_id;
+ elem->next = ip4_reassembly_main.head;
+ ip4_reassembly_main.head = elem;
+ }
+
+ return 0;
+}
+
+static int
+ip4_reassembly_node_init(const struct rte_graph *graph, struct rte_node *node)
+{
+ ip4_reassembly_ctx_t *ctx = (ip4_reassembly_ctx_t *)node->ctx;
+ ip4_reassembly_elem_t *elem = ip4_reassembly_main.head;
+
+ RTE_SET_USED(graph);
+ while (elem) {
+ if (elem->node_id == node->id) {
+ /* Update node specific context */
+ memcpy(ctx, &elem->ctx, sizeof(ip4_reassembly_ctx_t));
+ break;
+ }
+ elem = elem->next;
+ }
+
+ return 0;
+}
+
+static struct rte_node_register ip4_reassembly_node = {
+ .process = ip4_reassembly_node_process,
+ .name = "ip4_reassembly",
+
+ .init = ip4_reassembly_node_init,
+
+ .nb_edges = RTE_NODE_IP4_REASSEMBLY_NEXT_MAX,
+};
+
+struct rte_node_register *
+ip4_reassembly_node_get(void)
+{
+ return &ip4_reassembly_node;
+}
+
+RTE_NODE_REGISTER(ip4_reassembly_node);
new file mode 100644
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell International Ltd.
+ */
+
+#ifndef __INCLUDE_IP4_REASSEMBLY_PRIV_H__
+#define __INCLUDE_IP4_REASSEMBLY_PRIV_H__
+
+/**
+ * @internal
+ *
+ * Ip4_reassembly context structure.
+ */
+struct ip4_reassembly_ctx {
+ struct rte_ip_frag_tbl *tbl;
+ struct rte_ip_frag_death_row *dr;
+};
+
+/**
+ * @internal
+ *
+ * Get the IP4 reassembly node
+ *
+ * @return
+ * Pointer to the IP4 reassembly node.
+ */
+struct rte_node_register *ip4_reassembly_node_get(void);
+
+#endif /* __INCLUDE_IP4_REASSEMBLY_PRIV_H__ */
@@ -19,8 +19,9 @@ sources = files(
'null.c',
'pkt_cls.c',
'pkt_drop.c',
+ 'ip4_reassembly.c'
)
headers = files('rte_node_ip4_api.h', 'rte_node_ip6_api.h', 'rte_node_eth_api.h')
# Strict-aliasing rules are violated by uint8_t[] to context size casts.
cflags += '-fno-strict-aliasing'
-deps += ['graph', 'mbuf', 'lpm', 'ethdev', 'mempool', 'cryptodev']
+deps += ['graph', 'mbuf', 'lpm', 'ethdev', 'mempool', 'cryptodev', 'ip_frag']
@@ -35,6 +35,27 @@ enum rte_node_ip4_lookup_next {
/**< Number of next nodes of lookup node. */
};
+/**
+ * IP4 reassembly next nodes.
+ */
+enum rte_node_ip4_reassembly_next {
+ RTE_NODE_IP4_REASSEMBLY_NEXT_MAX,
+ /**< Number of next nodes of reassembly node. */
+};
+
+/**
+ * Reassembly configure structure.
+ * @see rte_node_ip4_reassembly_configure
+ */
+struct rte_node_ip4_reassembly_cfg {
+ struct rte_ip_frag_tbl *tbl;
+ /**< Reassembly fragmentation table. */
+ struct rte_ip_frag_death_row *dr;
+ /**< Reassembly deathrow table. */
+ rte_node_t node_id;
+ /**< Node identifier to configure. */
+};
+
/**
* Add ipv4 route to lookup table.
*
@@ -73,6 +94,20 @@ __rte_experimental
int rte_node_ip4_rewrite_add(uint16_t next_hop, uint8_t *rewrite_data,
uint8_t rewrite_len, uint16_t dst_port);
+/**
+ * Add reassembly node configuration data.
+ *
+ * @param cfg
+ * Pointer to the configuration structure.
+ * @param cnt
+ * Number of configuration structures passed.
+ *
+ * @return
+ * 0 on success, negative otherwise.
+ */
+__rte_experimental
+int rte_node_ip4_reassembly_configure(struct rte_node_ip4_reassembly_cfg *cfg, uint16_t cnt);
+
#ifdef __cplusplus
}
#endif
@@ -4,6 +4,7 @@ EXPERIMENTAL {
rte_node_eth_config;
rte_node_ip4_route_add;
rte_node_ip4_rewrite_add;
+ rte_node_ip4_reassembly_configure;
rte_node_ip6_rewrite_add;
rte_node_ip6_route_add;
rte_node_logtype;