@@ -91,9 +91,6 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
uint64_t ol_flags;
uint16_t proto;
uint16_t nb_rx;
- uint16_t nb_tx;
- uint32_t retry;
-
int i;
union {
struct rte_ether_hdr *eth;
@@ -155,24 +152,7 @@ pkt_burst_5tuple_swap(struct fwd_stream *fs)
}
mbuf_field_set(mb, ol_flags);
}
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
- /*
- * Retry if necessary
- */
- if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
- retry = 0;
- while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
- rte_delay_us(burst_tx_delay_time);
- nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
- &pkts_burst[nb_tx], nb_rx - nb_tx);
- }
- }
- fs->tx_packets += nb_tx;
- inc_tx_burst_stats(fs, nb_tx);
- if (unlikely(nb_tx < nb_rx)) {
- fs->fwd_dropped += (nb_rx - nb_tx);
- rte_pktmbuf_free_bulk(&pkts_burst[nb_tx], nb_rx - nb_tx);
- }
+ common_fwd_stream_transmit(fs, pkts_burst, nb_rx);
return true;
}
@@ -847,12 +847,10 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
uint8_t gro_enable;
#endif
uint16_t nb_rx;
- uint16_t nb_tx;
uint16_t nb_prep;
uint16_t i;
uint64_t rx_ol_flags, tx_ol_flags;
uint64_t tx_offloads;
- uint32_t retry;
uint32_t rx_bad_ip_csum;
uint32_t rx_bad_l4_csum;
uint32_t rx_bad_outer_l4_csum;
@@ -1169,32 +1167,13 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
rte_pktmbuf_free_bulk(&tx_pkts_burst[nb_prep], nb_rx - nb_prep);
}
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst,
- nb_prep);
+ common_fwd_stream_transmit(fs, tx_pkts_burst, nb_prep);
- /*
- * Retry if necessary
- */
- if (unlikely(nb_tx < nb_prep) && fs->retry_enabled) {
- retry = 0;
- while (nb_tx < nb_prep && retry++ < burst_tx_retry_num) {
- rte_delay_us(burst_tx_delay_time);
- nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
- &tx_pkts_burst[nb_tx], nb_prep - nb_tx);
- }
- }
- fs->tx_packets += nb_tx;
fs->rx_bad_ip_csum += rx_bad_ip_csum;
fs->rx_bad_l4_csum += rx_bad_l4_csum;
fs->rx_bad_outer_l4_csum += rx_bad_outer_l4_csum;
fs->rx_bad_outer_ip_csum += rx_bad_outer_ip_csum;
- inc_tx_burst_stats(fs, nb_tx);
- if (unlikely(nb_tx < nb_prep)) {
- fs->fwd_dropped += (nb_prep - nb_tx);
- rte_pktmbuf_free_bulk(&tx_pkts_burst[nb_tx], nb_prep - nb_tx);
- }
-
return true;
}
@@ -75,7 +75,6 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
uint16_t nb_dropped;
uint16_t nb_pkt;
uint16_t nb_clones = nb_pkt_flowgen_clones;
- uint32_t retry;
uint64_t tx_offloads;
int next_flow = RTE_PER_LCORE(_next_flow);
@@ -158,30 +157,13 @@ pkt_burst_flow_gen(struct fwd_stream *fs)
next_flow = 0;
}
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
- /*
- * Retry if necessary
- */
- if (unlikely(nb_tx < nb_pkt) && fs->retry_enabled) {
- retry = 0;
- while (nb_tx < nb_pkt && retry++ < burst_tx_retry_num) {
- rte_delay_us(burst_tx_delay_time);
- nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
- &pkts_burst[nb_tx], nb_pkt - nb_tx);
- }
- }
- fs->tx_packets += nb_tx;
-
- inc_tx_burst_stats(fs, nb_tx);
+ nb_tx = common_fwd_stream_transmit(fs, pkts_burst, nb_pkt);
nb_dropped = nb_pkt - nb_tx;
if (unlikely(nb_dropped > 0)) {
/* Back out the flow counter. */
next_flow -= nb_dropped;
while (next_flow < 0)
next_flow += nb_flows_flowgen;
-
- fs->fwd_dropped += nb_dropped;
- rte_pktmbuf_free_bulk(&pkts_burst[nb_tx], nb_pkt - nb_tx);
}
RTE_PER_LCORE(_next_flow) = next_flow;
@@ -280,10 +280,8 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
struct rte_ipv4_hdr *ip_h;
struct rte_icmp_hdr *icmp_h;
struct rte_ether_addr eth_addr;
- uint32_t retry;
uint32_t ip_addr;
uint16_t nb_rx;
- uint16_t nb_tx;
uint16_t nb_replies;
uint16_t eth_type;
uint16_t vlan_id;
@@ -476,30 +474,8 @@ reply_to_icmp_echo_rqsts(struct fwd_stream *fs)
}
/* Send back ICMP echo replies, if any. */
- if (nb_replies > 0) {
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
- nb_replies);
- /*
- * Retry if necessary
- */
- if (unlikely(nb_tx < nb_replies) && fs->retry_enabled) {
- retry = 0;
- while (nb_tx < nb_replies &&
- retry++ < burst_tx_retry_num) {
- rte_delay_us(burst_tx_delay_time);
- nb_tx += rte_eth_tx_burst(fs->tx_port,
- fs->tx_queue,
- &pkts_burst[nb_tx],
- nb_replies - nb_tx);
- }
- }
- fs->tx_packets += nb_tx;
- inc_tx_burst_stats(fs, nb_tx);
- if (unlikely(nb_tx < nb_replies)) {
- fs->fwd_dropped += (nb_replies - nb_tx);
- rte_pktmbuf_free_bulk(&pkts_burst[nb_tx], nb_replies - nb_tx);
- }
- }
+ if (nb_replies > 0)
+ common_fwd_stream_transmit(fs, pkts_burst, nb_replies);
return true;
}
@@ -182,13 +182,10 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
/* Forward PTP packet with hardware TX timestamp */
mb->ol_flags |= RTE_MBUF_F_TX_IEEE1588_TMST;
- if (rte_eth_tx_burst(fs->tx_port, fs->tx_queue, &mb, 1) == 0) {
+ if (common_fwd_stream_transmit(fs, &mb, 1) == 0) {
printf("Port %u sent PTP packet dropped\n", fs->tx_port);
- fs->fwd_dropped += 1;
- rte_pktmbuf_free(mb);
return false;
}
- fs->tx_packets += 1;
/*
* Check the TX timestamp.
@@ -46,8 +46,6 @@ pkt_burst_io_forward(struct fwd_stream *fs)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
uint16_t nb_rx;
- uint16_t nb_tx;
- uint32_t retry;
/*
* Receive a burst of packets and forward them.
@@ -56,25 +54,7 @@ pkt_burst_io_forward(struct fwd_stream *fs)
if (unlikely(nb_rx == 0))
return false;
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
- pkts_burst, nb_rx);
- /*
- * Retry if necessary
- */
- if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
- retry = 0;
- while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
- rte_delay_us(burst_tx_delay_time);
- nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
- &pkts_burst[nb_tx], nb_rx - nb_tx);
- }
- }
- fs->tx_packets += nb_tx;
- inc_tx_burst_stats(fs, nb_tx);
- if (unlikely(nb_tx < nb_rx)) {
- fs->fwd_dropped += (nb_rx - nb_tx);
- rte_pktmbuf_free_bulk(&pkts_burst[nb_tx], nb_rx - nb_tx);
- }
+ common_fwd_stream_transmit(fs, pkts_burst, nb_rx);
return true;
}
@@ -48,9 +48,7 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
struct rte_port *txp;
struct rte_mbuf *mb;
struct rte_ether_hdr *eth_hdr;
- uint32_t retry;
uint16_t nb_rx;
- uint16_t nb_tx;
uint16_t i;
uint64_t ol_flags = 0;
uint64_t tx_offloads;
@@ -87,25 +85,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
mb->vlan_tci = txp->tx_vlan_id;
mb->vlan_tci_outer = txp->tx_vlan_id_outer;
}
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
- /*
- * Retry if necessary
- */
- if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
- retry = 0;
- while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
- rte_delay_us(burst_tx_delay_time);
- nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
- &pkts_burst[nb_tx], nb_rx - nb_tx);
- }
- }
- fs->tx_packets += nb_tx;
- inc_tx_burst_stats(fs, nb_tx);
- if (unlikely(nb_tx < nb_rx)) {
- fs->fwd_dropped += (nb_rx - nb_tx);
- rte_pktmbuf_free_bulk(&pkts_burst[nb_tx], nb_rx - nb_tx);
- }
+ common_fwd_stream_transmit(fs, pkts_burst, nb_rx);
return true;
}
@@ -51,10 +51,7 @@ static bool
pkt_burst_mac_swap(struct fwd_stream *fs)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct rte_port *txp;
uint16_t nb_rx;
- uint16_t nb_tx;
- uint32_t retry;
/*
* Receive a burst of packets and forward them.
@@ -63,28 +60,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
if (unlikely(nb_rx == 0))
return false;
- txp = &ports[fs->tx_port];
-
- do_macswap(pkts_burst, nb_rx, txp);
-
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
- /*
- * Retry if necessary
- */
- if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
- retry = 0;
- while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
- rte_delay_us(burst_tx_delay_time);
- nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
- &pkts_burst[nb_tx], nb_rx - nb_tx);
- }
- }
- fs->tx_packets += nb_tx;
- inc_tx_burst_stats(fs, nb_tx);
- if (unlikely(nb_tx < nb_rx)) {
- fs->fwd_dropped += (nb_rx - nb_tx);
- rte_pktmbuf_free_bulk(&pkts_burst[nb_tx], nb_rx - nb_tx);
- }
+ do_macswap(pkts_burst, nb_rx, &ports[fs->tx_port]);
+ common_fwd_stream_transmit(fs, pkts_burst, nb_rx);
return true;
}
@@ -93,30 +93,6 @@ sim_memory_lookups(struct noisy_config *ncf, uint16_t nb_pkts)
}
}
-static uint16_t
-do_retry(uint16_t nb_rx, uint16_t nb_tx, struct rte_mbuf **pkts,
- struct fwd_stream *fs)
-{
- uint32_t retry = 0;
-
- while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
- rte_delay_us(burst_tx_delay_time);
- nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
- &pkts[nb_tx], nb_rx - nb_tx);
- }
-
- return nb_tx;
-}
-
-static uint32_t
-drop_pkts(struct rte_mbuf **pkts, uint16_t nb_rx, uint16_t nb_tx)
-{
- if (nb_tx < nb_rx)
- rte_pktmbuf_free_bulk(&pkts[nb_tx], nb_rx - nb_tx);
-
- return nb_rx - nb_tx;
-}
-
/*
* Forwarding of packets in noisy VNF mode. Forward packets but perform
* memory operations first as specified on cmdline.
@@ -156,37 +132,22 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
if (!ncf->do_buffering) {
sim_memory_lookups(ncf, nb_rx);
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
- pkts_burst, nb_rx);
- if (unlikely(nb_tx < nb_rx) && fs->retry_enabled)
- nb_tx += do_retry(nb_rx, nb_tx, pkts_burst, fs);
- inc_tx_burst_stats(fs, nb_tx);
- fs->tx_packets += nb_tx;
- fs->fwd_dropped += drop_pkts(pkts_burst, nb_rx, nb_tx);
+ nb_tx = common_fwd_stream_transmit(fs, pkts_burst, nb_rx);
goto end;
}
fifo_free = rte_ring_free_count(ncf->f);
if (fifo_free >= nb_rx) {
- nb_enqd = rte_ring_enqueue_burst(ncf->f,
- (void **) pkts_burst, nb_rx, NULL);
- if (nb_enqd < nb_rx)
- fs->fwd_dropped += drop_pkts(pkts_burst,
- nb_rx, nb_enqd);
- } else {
- nb_deqd = rte_ring_dequeue_burst(ncf->f,
- (void **) tmp_pkts, nb_rx, NULL);
- nb_enqd = rte_ring_enqueue_burst(ncf->f,
- (void **) pkts_burst, nb_deqd, NULL);
- if (nb_deqd > 0) {
- nb_tx = rte_eth_tx_burst(fs->tx_port,
- fs->tx_queue, tmp_pkts,
- nb_deqd);
- if (unlikely(nb_tx < nb_rx) && fs->retry_enabled)
- nb_tx += do_retry(nb_rx, nb_tx, tmp_pkts, fs);
- inc_tx_burst_stats(fs, nb_tx);
- fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, nb_tx);
+ nb_enqd = rte_ring_enqueue_burst(ncf->f, (void **) pkts_burst, nb_rx, NULL);
+ if (nb_enqd < nb_rx) {
+ fs->fwd_dropped += nb_rx - nb_enqd;
+ rte_pktmbuf_free_bulk(&pkts_burst[nb_enqd], nb_rx - nb_enqd);
}
+ } else {
+ nb_deqd = rte_ring_dequeue_burst(ncf->f, (void **) tmp_pkts, nb_rx, NULL);
+ nb_enqd = rte_ring_enqueue_burst(ncf->f, (void **) pkts_burst, nb_deqd, NULL);
+ if (nb_deqd > 0)
+ nb_tx = common_fwd_stream_transmit(fs, tmp_pkts, nb_deqd);
}
sim_memory_lookups(ncf, nb_enqd);
@@ -202,16 +163,9 @@ pkt_burst_noisy_vnf(struct fwd_stream *fs)
noisy_tx_sw_buf_flush_time > 0 && !nb_tx;
}
while (needs_flush && !rte_ring_empty(ncf->f)) {
- unsigned int sent;
nb_deqd = rte_ring_dequeue_burst(ncf->f, (void **)tmp_pkts,
MAX_PKT_BURST, NULL);
- sent = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
- tmp_pkts, nb_deqd);
- if (unlikely(sent < nb_deqd) && fs->retry_enabled)
- sent += do_retry(nb_deqd, sent, tmp_pkts, fs);
- inc_tx_burst_stats(fs, sent);
- fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent);
- nb_tx += sent;
+ nb_tx += common_fwd_stream_transmit(fs, tmp_pkts, nb_deqd);
ncf->prev_time = rte_get_timer_cycles();
}
end:
@@ -852,11 +852,34 @@ common_fwd_stream_receive(struct fwd_stream *fs, struct rte_mbuf **burst,
return nb_rx;
}
-static inline void
-inc_tx_burst_stats(struct fwd_stream *fs, uint16_t nb_tx)
+static inline uint16_t
+common_fwd_stream_transmit(struct fwd_stream *fs, struct rte_mbuf **burst,
+ unsigned int nb_pkts)
{
+ uint16_t nb_tx;
+ uint32_t retry;
+
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, burst, nb_pkts);
+ /*
+ * Retry if necessary
+ */
+ if (unlikely(nb_tx < nb_pkts) && fs->retry_enabled) {
+ retry = 0;
+ while (nb_tx < nb_pkts && retry++ < burst_tx_retry_num) {
+ rte_delay_us(burst_tx_delay_time);
+ nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+ &burst[nb_tx], nb_pkts - nb_tx);
+ }
+ }
+ fs->tx_packets += nb_tx;
if (record_burst_stats)
fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+ if (unlikely(nb_tx < nb_pkts)) {
+ fs->fwd_dropped += (nb_pkts - nb_tx);
+ rte_pktmbuf_free_bulk(&burst[nb_tx], nb_pkts - nb_tx);
+ }
+
+ return nb_tx;
}
/* Prototypes */
@@ -334,7 +334,6 @@ pkt_burst_transmit(struct fwd_stream *fs)
uint16_t nb_tx;
uint16_t nb_pkt;
uint16_t vlan_tci, vlan_tci_outer;
- uint32_t retry;
uint64_t ol_flags = 0;
uint64_t tx_offloads;
@@ -391,25 +390,11 @@ pkt_burst_transmit(struct fwd_stream *fs)
if (nb_pkt == 0)
return false;
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
-
- /*
- * Retry if necessary
- */
- if (unlikely(nb_tx < nb_pkt) && fs->retry_enabled) {
- retry = 0;
- while (nb_tx < nb_pkt && retry++ < burst_tx_retry_num) {
- rte_delay_us(burst_tx_delay_time);
- nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
- &pkts_burst[nb_tx], nb_pkt - nb_tx);
- }
- }
- fs->tx_packets += nb_tx;
+ nb_tx = common_fwd_stream_transmit(fs, pkts_burst, nb_pkt);
if (txonly_multi_flow)
RTE_PER_LCORE(_ip_var) -= nb_pkt - nb_tx;
- inc_tx_burst_stats(fs, nb_tx);
if (unlikely(nb_tx < nb_pkt)) {
if (verbose_level > 0 && fs->fwd_dropped == 0)
printf("port %d tx_queue %d - drop "
@@ -417,8 +402,6 @@ pkt_burst_transmit(struct fwd_stream *fs)
fs->tx_port, fs->tx_queue,
(unsigned) nb_pkt, (unsigned) nb_tx,
(unsigned) (nb_pkt - nb_tx));
- fs->fwd_dropped += (nb_pkt - nb_tx);
- rte_pktmbuf_free_bulk(&pkts_burst[nb_tx], nb_pkt - nb_tx);
}
return true;