On 6/8/2023 2:31 PM, Mike Pattrick wrote:
> Previously the noisy neighbour vnf simulation would only operate in io
> mode, forwarding packets as is. However, this limited the usefulness of
> noisy neighbour simulation.
>
> This feature has now been expanded to supporting mac, macswap, and
> 5tswap modes. To facilitate adding this support, some new header files
> were added.
>
> Signed-off-by: Mike Pattrick <mkp@redhat.com>
>
Reviewed-by: Ferruh Yigit <ferruh.yigit@amd.com>
Applied to dpdk-next-net/main, thanks.
> @@ -4052,9 +4052,16 @@ rxtx_config_display(void)
> {
> portid_t pid;
> queueid_t qid;
> + char buf[32];
> +
> + if (cur_fwd_eng->status)
> + snprintf(buf, sizeof(buf), " (%s)", cur_fwd_eng->status);
> + else
> + buf[0] = '\0';
>
> - printf(" %s packet forwarding%s packets/burst=%d\n",
> + printf(" %s%s packet forwarding%s packets/burst=%d\n",
> cur_fwd_eng->fwd_mode_name,
> + buf,
> retry_enabled == 0 ? "" : " with retry",
> nb_pkt_per_burst);
>
> @@ -4816,10 +4823,17 @@ pkt_fwd_config_display(struct fwd_config *cfg)
> struct fwd_stream *fs;
> lcoreid_t lc_id;
> streamid_t sm_id;
> + char buf[32];
> +
> + if (cfg->fwd_eng->status)
> + snprintf(buf, sizeof(buf), " (%s)", cfg->fwd_eng->status);
> + else
> + buf[0] = '\0';
>
> - printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
> + printf("%s%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
> "NUMA support %s, MP allocation mode: %s\n",
> cfg->fwd_eng->fwd_mode_name,
> + buf,
> retry_enabled == 0 ? "" : " with retry",
> cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
> numa_support == 1 ? "enabled" : "disabled",
Removed interim 'buf' variable (and 'cfg->fwd_eng->status' specific
check), and updated as following while merging:
```
- printf(" %s packet forwarding%s packets/burst=%d\n",
+ printf(" %s%s%s packet forwarding%s packets/burst=%d\n",
cur_fwd_eng->fwd_mode_name,
+ cur_fwd_eng->status ? "-" : "",
+ cur_fwd_eng->status ? cur_fwd_eng->status : "",
```
@@ -17,64 +17,8 @@
#include <rte_ip.h>
#include <rte_flow.h>
-#include "macswap_common.h"
#include "testpmd.h"
-
-
-static inline void
-swap_mac(struct rte_ether_hdr *eth_hdr)
-{
- struct rte_ether_addr addr;
-
- /* Swap dest and src mac addresses. */
- rte_ether_addr_copy(ð_hdr->dst_addr, &addr);
- rte_ether_addr_copy(ð_hdr->src_addr, ð_hdr->dst_addr);
- rte_ether_addr_copy(&addr, ð_hdr->src_addr);
-}
-
-static inline void
-swap_ipv4(struct rte_ipv4_hdr *ipv4_hdr)
-{
- rte_be32_t addr;
-
- /* Swap dest and src ipv4 addresses. */
- addr = ipv4_hdr->src_addr;
- ipv4_hdr->src_addr = ipv4_hdr->dst_addr;
- ipv4_hdr->dst_addr = addr;
-}
-
-static inline void
-swap_ipv6(struct rte_ipv6_hdr *ipv6_hdr)
-{
- uint8_t addr[16];
-
- /* Swap dest and src ipv6 addresses. */
- memcpy(&addr, &ipv6_hdr->src_addr, 16);
- memcpy(&ipv6_hdr->src_addr, &ipv6_hdr->dst_addr, 16);
- memcpy(&ipv6_hdr->dst_addr, &addr, 16);
-}
-
-static inline void
-swap_tcp(struct rte_tcp_hdr *tcp_hdr)
-{
- rte_be16_t port;
-
- /* Swap dest and src tcp port. */
- port = tcp_hdr->src_port;
- tcp_hdr->src_port = tcp_hdr->dst_port;
- tcp_hdr->dst_port = port;
-}
-
-static inline void
-swap_udp(struct rte_udp_hdr *udp_hdr)
-{
- rte_be16_t port;
-
- /* Swap dest and src udp port */
- port = udp_hdr->src_port;
- udp_hdr->src_port = udp_hdr->dst_port;
- udp_hdr->dst_port = port;
-}
+#include "5tswap.h"
/*
* 5 tuple swap forwarding mode: Swap the source and the destination of layers
@@ -85,22 +29,7 @@ static bool
pkt_burst_5tuple_swap(struct fwd_stream *fs)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct rte_port *txp;
- struct rte_mbuf *mb;
- uint16_t next_proto;
- uint64_t ol_flags;
- uint16_t proto;
uint16_t nb_rx;
- int i;
- union {
- struct rte_ether_hdr *eth;
- struct rte_vlan_hdr *vlan;
- struct rte_ipv4_hdr *ipv4;
- struct rte_ipv6_hdr *ipv6;
- struct rte_tcp_hdr *tcp;
- struct rte_udp_hdr *udp;
- uint8_t *byte;
- } h;
/*
* Receive a burst of packets and forward them.
@@ -109,49 +38,8 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
if (unlikely(nb_rx == 0))
return false;
- txp = &ports[fs->tx_port];
- ol_flags = ol_flags_init(txp->dev_conf.txmode.offloads);
- vlan_qinq_set(pkts_burst, nb_rx, ol_flags,
- txp->tx_vlan_id, txp->tx_vlan_id_outer);
- for (i = 0; i < nb_rx; i++) {
- if (likely(i < nb_rx - 1))
- rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i+1],
- void *));
- mb = pkts_burst[i];
- h.eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
- proto = h.eth->ether_type;
- swap_mac(h.eth);
- mb->l2_len = sizeof(struct rte_ether_hdr);
- h.eth++;
- while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
- proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
- proto = h.vlan->eth_proto;
- h.vlan++;
- mb->l2_len += sizeof(struct rte_vlan_hdr);
- }
- if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
- swap_ipv4(h.ipv4);
- next_proto = h.ipv4->next_proto_id;
- mb->l3_len = rte_ipv4_hdr_len(h.ipv4);
- h.byte += mb->l3_len;
- } else if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV6)) {
- swap_ipv6(h.ipv6);
- next_proto = h.ipv6->proto;
- h.ipv6++;
- mb->l3_len = sizeof(struct rte_ipv6_hdr);
- } else {
- mbuf_field_set(mb, ol_flags);
- continue;
- }
- if (next_proto == IPPROTO_UDP) {
- swap_udp(h.udp);
- mb->l4_len = sizeof(struct rte_udp_hdr);
- } else if (next_proto == IPPROTO_TCP) {
- swap_tcp(h.tcp);
- mb->l4_len = (h.tcp->data_off & 0xf0) >> 2;
- }
- mbuf_field_set(mb, ol_flags);
- }
+ do_5tswap(pkts_burst, nb_rx, fs);
+
common_fwd_stream_transmit(fs, pkts_burst, nb_rx);
return true;
new file mode 100644
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2014-2020 Mellanox Technologies, Ltd
+ */
+
+#ifndef _5TSWAP_H_
+#define _5TSWAP_H_
+
+#include "macswap_common.h"
+
+static inline void
+swap_mac(struct rte_ether_hdr *eth_hdr)
+{
+ struct rte_ether_addr addr;
+
+ /* Swap dest and src mac addresses. */
+ rte_ether_addr_copy(ð_hdr->dst_addr, &addr);
+ rte_ether_addr_copy(ð_hdr->src_addr, ð_hdr->dst_addr);
+ rte_ether_addr_copy(&addr, ð_hdr->src_addr);
+}
+
+static inline void
+swap_ipv4(struct rte_ipv4_hdr *ipv4_hdr)
+{
+ rte_be32_t addr;
+
+ /* Swap dest and src ipv4 addresses. */
+ addr = ipv4_hdr->src_addr;
+ ipv4_hdr->src_addr = ipv4_hdr->dst_addr;
+ ipv4_hdr->dst_addr = addr;
+}
+
+static inline void
+swap_ipv6(struct rte_ipv6_hdr *ipv6_hdr)
+{
+ uint8_t addr[16];
+
+ /* Swap dest and src ipv6 addresses. */
+ memcpy(&addr, &ipv6_hdr->src_addr, 16);
+ memcpy(&ipv6_hdr->src_addr, &ipv6_hdr->dst_addr, 16);
+ memcpy(&ipv6_hdr->dst_addr, &addr, 16);
+}
+
+static inline void
+swap_tcp(struct rte_tcp_hdr *tcp_hdr)
+{
+ rte_be16_t port;
+
+ /* Swap dest and src tcp port. */
+ port = tcp_hdr->src_port;
+ tcp_hdr->src_port = tcp_hdr->dst_port;
+ tcp_hdr->dst_port = port;
+}
+
+static inline void
+swap_udp(struct rte_udp_hdr *udp_hdr)
+{
+ rte_be16_t port;
+
+ /* Swap dest and src udp port */
+ port = udp_hdr->src_port;
+ udp_hdr->src_port = udp_hdr->dst_port;
+ udp_hdr->dst_port = port;
+}
+
+static inline void
+do_5tswap(struct rte_mbuf *pkts_burst[], uint16_t nb_rx,
+ struct fwd_stream *fs)
+{
+ struct rte_port *txp;
+ struct rte_mbuf *mb;
+ uint16_t next_proto;
+ uint64_t ol_flags;
+ uint16_t proto;
+ int i;
+ union {
+ struct rte_ether_hdr *eth;
+ struct rte_vlan_hdr *vlan;
+ struct rte_ipv4_hdr *ipv4;
+ struct rte_ipv6_hdr *ipv6;
+ struct rte_tcp_hdr *tcp;
+ struct rte_udp_hdr *udp;
+ uint8_t *byte;
+ } h;
+
+ txp = &ports[fs->tx_port];
+ ol_flags = ol_flags_init(txp->dev_conf.txmode.offloads);
+ vlan_qinq_set(pkts_burst, nb_rx, ol_flags,
+ txp->tx_vlan_id, txp->tx_vlan_id_outer);
+ for (i = 0; i < nb_rx; i++) {
+ if (likely(i < nb_rx - 1))
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i+1],
+ void *));
+ mb = pkts_burst[i];
+ h.eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
+ proto = h.eth->ether_type;
+ swap_mac(h.eth);
+ mb->l2_len = sizeof(struct rte_ether_hdr);
+ h.eth++;
+ while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
+ proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
+ proto = h.vlan->eth_proto;
+ h.vlan++;
+ mb->l2_len += sizeof(struct rte_vlan_hdr);
+ }
+ if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
+ swap_ipv4(h.ipv4);
+ next_proto = h.ipv4->next_proto_id;
+ mb->l3_len = rte_ipv4_hdr_len(h.ipv4);
+ h.byte += mb->l3_len;
+ } else if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV6)) {
+ swap_ipv6(h.ipv6);
+ next_proto = h.ipv6->proto;
+ h.ipv6++;
+ mb->l3_len = sizeof(struct rte_ipv6_hdr);
+ } else {
+ mbuf_field_set(mb, ol_flags);
+ continue;
+ }
+ if (next_proto == IPPROTO_UDP) {
+ swap_udp(h.udp);
+ mb->l4_len = sizeof(struct rte_udp_hdr);
+ } else if (next_proto == IPPROTO_TCP) {
+ swap_tcp(h.tcp);
+ mb->l4_len = (h.tcp->data_off & 0xf0) >> 2;
+ }
+ mbuf_field_set(mb, ol_flags);
+ }
+}
+
+#endif /* _5TSWAP_H_ */
@@ -4052,9 +4052,16 @@ rxtx_config_display(void)
{
portid_t pid;
queueid_t qid;
+ char buf[32];
+
+ if (cur_fwd_eng->status)
+ snprintf(buf, sizeof(buf), " (%s)", cur_fwd_eng->status);
+ else
+ buf[0] = '\0';
- printf(" %s packet forwarding%s packets/burst=%d\n",
+ printf(" %s%s packet forwarding%s packets/burst=%d\n",
cur_fwd_eng->fwd_mode_name,
+ buf,
retry_enabled == 0 ? "" : " with retry",
nb_pkt_per_burst);
@@ -4816,10 +4823,17 @@ pkt_fwd_config_display(struct fwd_config *cfg)
struct fwd_stream *fs;
lcoreid_t lc_id;
streamid_t sm_id;
+ char buf[32];
+
+ if (cfg->fwd_eng->status)
+ snprintf(buf, sizeof(buf), " (%s)", cfg->fwd_eng->status);
+ else
+ buf[0] = '\0';
- printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
+ printf("%s%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
"NUMA support %s, MP allocation mode: %s\n",
cfg->fwd_eng->fwd_mode_name,
+ buf,
retry_enabled == 0 ? "" : " with retry",
cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
numa_support == 1 ? "enabled" : "disabled",
@@ -35,6 +35,7 @@
#include <rte_flow.h>
#include "testpmd.h"
+#include "macfwd.h"
/*
* Forwarding of packets in MAC mode.
@@ -45,13 +46,7 @@ static bool
pkt_burst_mac_forward(struct fwd_stream *fs)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct rte_port *txp;
- struct rte_mbuf *mb;
- struct rte_ether_hdr *eth_hdr;
uint16_t nb_rx;
- uint16_t i;
- uint64_t ol_flags = 0;
- uint64_t tx_offloads;
/*
* Receive a burst of packets and forward them.
@@ -60,31 +55,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
if (unlikely(nb_rx == 0))
return false;
- txp = &ports[fs->tx_port];
- tx_offloads = txp->dev_conf.txmode.offloads;
- if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
- ol_flags = RTE_MBUF_F_TX_VLAN;
- if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
- ol_flags |= RTE_MBUF_F_TX_QINQ;
- if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
- ol_flags |= RTE_MBUF_F_TX_MACSEC;
- for (i = 0; i < nb_rx; i++) {
- if (likely(i < nb_rx - 1))
- rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
- void *));
- mb = pkts_burst[i];
- eth_hdr = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
- rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
- ð_hdr->dst_addr);
- rte_ether_addr_copy(&ports[fs->tx_port].eth_addr,
- ð_hdr->src_addr);
- mb->ol_flags &= RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL;
- mb->ol_flags |= ol_flags;
- mb->l2_len = sizeof(struct rte_ether_hdr);
- mb->l3_len = sizeof(struct rte_ipv4_hdr);
- mb->vlan_tci = txp->tx_vlan_id;
- mb->vlan_tci_outer = txp->tx_vlan_id_outer;
- }
+ do_macfwd(pkts_burst, nb_rx, fs);
common_fwd_stream_transmit(fs, pkts_burst, nb_rx);
new file mode 100644
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _MACFWD_H_
+#define _MACFWD_H_
+
+static inline void
+do_macfwd(struct rte_mbuf *pkts_burst[], uint16_t nb_rx,
+ struct fwd_stream *fs)
+{
+ struct rte_ether_hdr *eth_hdr;
+ uint64_t ol_flags = 0;
+ uint64_t tx_offloads;
+ struct rte_mbuf *mb;
+ struct rte_port *txp = &ports[fs->tx_port];
+ uint16_t i;
+
+ tx_offloads = txp->dev_conf.txmode.offloads;
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
+ ol_flags = RTE_MBUF_F_TX_VLAN;
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
+ ol_flags |= RTE_MBUF_F_TX_QINQ;
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
+ ol_flags |= RTE_MBUF_F_TX_MACSEC;
+ for (i = 0; i < nb_rx; i++) {
+ if (likely(i < nb_rx - 1))
+ rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
+ void *));
+ mb = pkts_burst[i];
+ eth_hdr = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
+ rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
+ ð_hdr->dst_addr);
+ rte_ether_addr_copy(&ports[fs->tx_port].eth_addr,
+ ð_hdr->src_addr);
+ mb->ol_flags &= RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL;
+ mb->ol_flags |= ol_flags;
+ mb->l2_len = sizeof(struct rte_ether_hdr);
+ mb->l3_len = sizeof(struct rte_ipv4_hdr);
+ mb->vlan_tci = txp->tx_vlan_id;
+ mb->vlan_tci_outer = txp->tx_vlan_id_outer;
+ }
+}
+
+#endif /* _MACFWD_H_ */
@@ -32,6 +32,18 @@
#include <rte_malloc.h>
#include "testpmd.h"
+#include "5tswap.h"
+#include "macfwd.h"
+#if defined(RTE_ARCH_X86)
+#include "macswap_sse.h"
+#elif defined(__ARM_NEON)
+#include "macswap_neon.h"
+#else
+#include "macswap.h"
+#endif
+
+#define NOISY_STRSIZE 256
+#define NOISY_RING "noisy_ring_%d\n"
struct noisy_config {
struct rte_ring *f;
@@ -80,9 +92,6 @@ sim_memory_lookups(struct noisy_config *ncf, uint16_t nb_pkts)
{
uint16_t i, j;
- if (!ncf->do_sim)
- return;
-
for (i = 0; i < nb_pkts; i++) {
for (j = 0; j < noisy_lkup_num_writes; j++)
do_write(ncf->vnf_mem);
@@ -110,15 +119,13 @@ sim_memory_lookups(struct noisy_config *ncf, uint16_t nb_pkts)
* out of the FIFO
* 4. Cases 2 and 3 combined
*/
-static bool
-pkt_burst_noisy_vnf(struct fwd_stream *fs)
+static uint16_t
+noisy_eth_tx_burst(struct fwd_stream *fs, uint16_t nb_rx, struct rte_mbuf **pkts_burst)
{
const uint64_t freq_khz = rte_get_timer_hz() / 1000;
struct noisy_config *ncf = noisy_cfg[fs->rx_port];
- struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct rte_mbuf *tmp_pkts[MAX_PKT_BURST];
uint16_t nb_deqd = 0;
- uint16_t nb_rx = 0;
uint16_t nb_tx = 0;
uint16_t nb_enqd;
unsigned int fifo_free;
@@ -126,12 +133,16 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
bool needs_flush = false;
uint64_t now;
- nb_rx = common_fwd_stream_receive(fs, pkts_burst, nb_pkt_per_burst);
- if (unlikely(nb_rx == 0))
- goto flush;
+ if (unlikely(nb_rx == 0)) {
+ if (!ncf->do_buffering)
+ goto end;
+ else
+ goto flush;
+ }
if (!ncf->do_buffering) {
- sim_memory_lookups(ncf, nb_rx);
+ if (ncf->do_sim)
+ sim_memory_lookups(ncf, nb_rx);
nb_tx = common_fwd_stream_transmit(fs, pkts_burst, nb_rx);
goto end;
}
@@ -150,7 +161,8 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
nb_tx = common_fwd_stream_transmit(fs, tmp_pkts, nb_deqd);
}
- sim_memory_lookups(ncf, nb_enqd);
+ if (ncf->do_sim)
+ sim_memory_lookups(ncf, nb_enqd);
flush:
if (ncf->do_flush) {
@@ -169,11 +181,66 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
ncf->prev_time = rte_get_timer_cycles();
}
end:
+ return nb_tx;
+}
+
+static bool
+pkt_burst_io(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ uint16_t nb_rx;
+ uint16_t nb_tx;
+
+ nb_rx = common_fwd_stream_receive(fs, pkts_burst, nb_pkt_per_burst);
+ nb_tx = noisy_eth_tx_burst(fs, nb_rx, pkts_burst);
+
return nb_rx > 0 || nb_tx > 0;
}
-#define NOISY_STRSIZE 256
-#define NOISY_RING "noisy_ring_%d\n"
+static bool
+pkt_burst_mac(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ uint16_t nb_rx;
+ uint16_t nb_tx;
+
+ nb_rx = common_fwd_stream_receive(fs, pkts_burst, nb_pkt_per_burst);
+ if (likely(nb_rx != 0))
+ do_macfwd(pkts_burst, nb_rx, fs);
+ nb_tx = noisy_eth_tx_burst(fs, nb_rx, pkts_burst);
+
+ return nb_rx > 0 || nb_tx > 0;
+}
+
+static bool
+pkt_burst_macswap(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ uint16_t nb_rx;
+ uint16_t nb_tx;
+
+ nb_rx = common_fwd_stream_receive(fs, pkts_burst, nb_pkt_per_burst);
+ if (likely(nb_rx != 0))
+ do_macswap(pkts_burst, nb_rx, &ports[fs->tx_port]);
+ nb_tx = noisy_eth_tx_burst(fs, nb_rx, pkts_burst);
+
+ return nb_rx > 0 || nb_tx > 0;
+}
+
+static bool
+pkt_burst_5tswap(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ uint16_t nb_rx;
+ uint16_t nb_tx;
+
+ nb_rx = common_fwd_stream_receive(fs, pkts_burst, nb_pkt_per_burst);
+ if (likely(nb_rx != 0))
+ do_5tswap(pkts_burst, nb_rx, fs);
+ nb_tx = noisy_eth_tx_burst(fs, nb_rx, pkts_burst);
+
+ return nb_rx > 0 || nb_tx > 0;
+}
static void
noisy_fwd_end(portid_t pi)
@@ -226,6 +293,20 @@ noisy_fwd_begin(portid_t pi)
"--noisy-lkup-memory-size must be > 0\n");
}
+ if (noisy_fwd_mode == NOISY_FWD_MODE_IO)
+ noisy_vnf_engine.packet_fwd = pkt_burst_io;
+ else if (noisy_fwd_mode == NOISY_FWD_MODE_MAC)
+ noisy_vnf_engine.packet_fwd = pkt_burst_mac;
+ else if (noisy_fwd_mode == NOISY_FWD_MODE_MACSWAP)
+ noisy_vnf_engine.packet_fwd = pkt_burst_macswap;
+ else if (noisy_fwd_mode == NOISY_FWD_MODE_5TSWAP)
+ noisy_vnf_engine.packet_fwd = pkt_burst_5tswap;
+ else
+ rte_exit(EXIT_FAILURE,
+ " Invalid noisy_fwd_mode specified\n");
+
+ noisy_vnf_engine.status = noisy_fwd_mode_desc[noisy_fwd_mode];
+
return 0;
}
@@ -233,6 +314,6 @@ struct fwd_engine noisy_vnf_engine = {
.fwd_mode_name = "noisy",
.port_fwd_begin = noisy_fwd_begin,
.port_fwd_end = noisy_fwd_end,
- .stream_init = common_fwd_stream_init,
- .packet_fwd = pkt_burst_noisy_vnf,
+ .stream_init = common_fwd_stream_init,
+ .packet_fwd = pkt_burst_io,
};
@@ -190,6 +190,7 @@ usage(char* progname)
" anon: use regular DPDK memory to create and anonymous memory to populate mempool\n"
" xmem: use anonymous memory to create and populate mempool\n"
" xmemhuge: use anonymous hugepage memory to create and populate mempool\n");
+ printf(" --noisy-forward-mode=<io|mac|macswap|5tswap>: set the sub-fwd mode, defaults to io\n");
printf(" --noisy-tx-sw-buffer-size=N: size of FIFO buffer\n");
printf(" --noisy-tx-sw-buffer-flushtime=N: flush FIFO after N ms\n");
printf(" --noisy-lkup-memory=N: allocate N MB of VNF memory\n");
@@ -698,6 +699,7 @@ launch_args_parse(int argc, char** argv)
{ "mp-alloc", 1, 0, 0 },
{ "tx-ip", 1, 0, 0 },
{ "tx-udp", 1, 0, 0 },
+ { "noisy-forward-mode", 1, 0, 0 },
{ "noisy-tx-sw-buffer-size", 1, 0, 0 },
{ "noisy-tx-sw-buffer-flushtime", 1, 0, 0 },
{ "noisy-lkup-memory", 1, 0, 0 },
@@ -1444,6 +1446,19 @@ launch_args_parse(int argc, char** argv)
rte_exit(EXIT_FAILURE,
"noisy-lkup-num-reads-writes must be >= 0\n");
}
+ if (!strcmp(lgopts[opt_idx].name,
+ "noisy-forward-mode")) {
+ int i;
+ for (i = 0; i < NOISY_FWD_MODE_MAX; i++)
+ if (!strcmp(optarg, noisy_fwd_mode_desc[i])) {
+ noisy_fwd_mode = i;
+ break;
+ }
+ if (i == NOISY_FWD_MODE_MAX)
+ rte_exit(EXIT_FAILURE, "noisy-forward-mode %s invalid,"
+ " must be a valid noisy-forward-mode value\n",
+ optarg);
+ }
if (!strcmp(lgopts[opt_idx].name, "no-iova-contig"))
mempool_flags = RTE_MEMPOOL_F_NO_IOVA_CONTIG;
@@ -330,6 +330,20 @@ int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
*/
int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
+/*
+ * Configurable sub-forwarding mode for the noisy_vnf forwarding mode.
+ */
+enum noisy_fwd_mode noisy_fwd_mode;
+
+/* String version of enum noisy_fwd_mode */
+const char * const noisy_fwd_mode_desc[] = {
+ [NOISY_FWD_MODE_IO] = "io",
+ [NOISY_FWD_MODE_MAC] = "mac",
+ [NOISY_FWD_MODE_MACSWAP] = "macswap",
+ [NOISY_FWD_MODE_5TSWAP] = "5tswap",
+ [NOISY_FWD_MODE_MAX] = NULL,
+};
+
/*
* Configurable value of buffered packets before sending.
*/
@@ -116,6 +116,14 @@ enum {
QUEUE_JOB_TYPE_ACTION_QUERY,
};
+enum noisy_fwd_mode {
+ NOISY_FWD_MODE_IO,
+ NOISY_FWD_MODE_MAC,
+ NOISY_FWD_MODE_MACSWAP,
+ NOISY_FWD_MODE_5TSWAP,
+ NOISY_FWD_MODE_MAX,
+};
+
/**
* The data structure associated with RX and TX packet burst statistics
* that are recorded for each forwarding stream.
@@ -391,6 +399,7 @@ struct fwd_engine {
port_fwd_end_t port_fwd_end; /**< NULL if nothing special to do. */
stream_init_t stream_init; /**< NULL if nothing special to do. */
packet_fwd_t packet_fwd; /**< Mandatory. */
+ const char *status; /**< NULL if nothing to display. */
};
void common_fwd_stream_init(struct fwd_stream *fs);
@@ -555,6 +564,8 @@ extern int8_t rx_drop_en;
extern int16_t tx_free_thresh;
extern int16_t tx_rs_thresh;
+extern enum noisy_fwd_mode noisy_fwd_mode;
+extern const char * const noisy_fwd_mode_desc[];
extern uint16_t noisy_tx_sw_bufsz;
extern uint16_t noisy_tx_sw_buf_flush_time;
extern uint64_t noisy_lkup_mem_sz;
@@ -487,6 +487,15 @@ The command line options are:
* xmemhuge: create and populate mempool using externally and anonymously
allocated hugepage area
+* ``--noisy-forward-mode=mode``
+
+ Set the noisy vnf forwarding mode where ``mode`` is one of the following::
+
+ io (the default)
+ mac
+ macswap
+ 5tswap
+
* ``--noisy-tx-sw-buffer-size``
Set the number of maximum elements of the FIFO queue to be created