@@ -129,6 +129,15 @@ extern struct rte_mempool *dpaa2_tx_sg_pool;
#define DPAA2_POOL_SIZE 2048
/* SG pool cache size */
#define DPAA2_POOL_CACHE_SIZE 256
+/* structure to free external and indirect
+ * buffers.
+ */
+struct sw_buf_free {
+ /* To which packet this segment belongs */
+ uint16_t pkt_id;
+ /* The actual segment */
+ struct rte_mbuf *seg;
+};
/* enable timestamp in mbuf*/
extern bool dpaa2_enable_ts[];
@@ -403,9 +403,12 @@ eth_fd_to_mbuf(const struct qbman_fd *fd,
static int __rte_noinline __rte_hot
eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
struct qbman_fd *fd,
+ struct sw_buf_free *free_buf,
+ uint32_t *free_count,
+ uint32_t pkt_id,
uint16_t bpid)
{
- struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
+ struct rte_mbuf *cur_seg = mbuf, *mi, *temp;
struct qbman_sge *sgt, *sge = NULL;
int i, offset = 0;
@@ -486,10 +489,11 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
#endif
}
}
- cur_seg = cur_seg->next;
} else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
+ free_buf[*free_count].seg = cur_seg;
+ free_buf[*free_count].pkt_id = pkt_id;
+ ++*free_count;
DPAA2_SET_FLE_IVP(sge);
- cur_seg = cur_seg->next;
} else {
/* Get owner MBUF from indirect buffer */
mi = rte_mbuf_from_indirect(cur_seg);
@@ -503,11 +507,11 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
mempool_to_bpid(mi->pool));
rte_mbuf_refcnt_update(mi, 1);
}
- prev_seg = cur_seg;
- cur_seg = cur_seg->next;
- prev_seg->next = NULL;
- rte_pktmbuf_free(prev_seg);
+ free_buf[*free_count].seg = cur_seg;
+ free_buf[*free_count].pkt_id = pkt_id;
+ ++*free_count;
}
+ cur_seg = cur_seg->next;
}
DPAA2_SG_SET_FINAL(sge, true);
return 0;
@@ -515,11 +519,19 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
static void
eth_mbuf_to_fd(struct rte_mbuf *mbuf,
- struct qbman_fd *fd, uint16_t bpid) __rte_unused;
+ struct qbman_fd *fd,
+ struct sw_buf_free *buf_to_free,
+ uint32_t *free_count,
+ uint32_t pkt_id,
+ uint16_t bpid) __rte_unused;
static void __rte_noinline __rte_hot
eth_mbuf_to_fd(struct rte_mbuf *mbuf,
- struct qbman_fd *fd, uint16_t bpid)
+ struct qbman_fd *fd,
+ struct sw_buf_free *buf_to_free,
+ uint32_t *free_count,
+ uint32_t pkt_id,
+ uint16_t bpid)
{
DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
@@ -540,6 +552,9 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf,
(void **)&mbuf, 1, 0);
#endif
} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
+ buf_to_free[*free_count].seg = mbuf;
+ buf_to_free[*free_count].pkt_id = pkt_id;
+ ++*free_count;
DPAA2_SET_FD_IVP(fd);
} else {
struct rte_mbuf *mi;
@@ -549,7 +564,10 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf,
DPAA2_SET_FD_IVP(fd);
else
rte_mbuf_refcnt_update(mi, 1);
- rte_pktmbuf_free(mbuf);
+
+ buf_to_free[*free_count].seg = mbuf;
+ buf_to_free[*free_count].pkt_id = pkt_id;
+ ++*free_count;
}
}
@@ -1226,7 +1244,8 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
struct dpaa2_dev_priv *priv = eth_data->dev_private;
uint32_t flags[MAX_TX_RING_SLOTS] = {0};
- struct rte_mbuf **orig_bufs = bufs;
+ struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
+ uint32_t free_count = 0;
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
@@ -1324,11 +1343,17 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
mp = (*bufs)->pool;
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop],
+ buf_to_free,
+ &free_count,
+ loop,
mempool_to_bpid(mp)))
goto send_n_return;
} else {
eth_mbuf_to_fd(*bufs,
- &fd_arr[loop], 0);
+ &fd_arr[loop],
+ buf_to_free,
+ &free_count,
+ loop, 0);
}
bufs++;
#ifdef RTE_LIBRTE_IEEE1588
@@ -1373,11 +1398,17 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (unlikely((*bufs)->nb_segs > 1)) {
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop],
+ buf_to_free,
+ &free_count,
+ loop,
bpid))
goto send_n_return;
} else {
eth_mbuf_to_fd(*bufs,
- &fd_arr[loop], bpid);
+ &fd_arr[loop],
+ buf_to_free,
+ &free_count,
+ loop, bpid);
}
}
#ifdef RTE_LIBRTE_IEEE1588
@@ -1410,12 +1441,9 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
dpaa2_q->tx_pkts += num_tx;
- loop = 0;
- while (loop < num_tx) {
- if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
- rte_pktmbuf_free(*orig_bufs);
- orig_bufs++;
- loop++;
+ for (loop = 0; loop < free_count; loop++) {
+ if (buf_to_free[loop].pkt_id < num_tx)
+ rte_pktmbuf_free_seg(buf_to_free[loop].seg);
}
return num_tx;
@@ -1445,12 +1473,9 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
skip_tx:
dpaa2_q->tx_pkts += num_tx;
- loop = 0;
- while (loop < num_tx) {
- if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
- rte_pktmbuf_free(*orig_bufs);
- orig_bufs++;
- loop++;
+ for (loop = 0; loop < free_count; loop++) {
+ if (buf_to_free[loop].pkt_id < num_tx)
+ rte_pktmbuf_free_seg(buf_to_free[loop].seg);
}
return num_tx;
@@ -1523,7 +1548,7 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
struct rte_mbuf **bufs, uint16_t nb_pkts)
{
/* Function to transmit the frames to multiple queues respectively.*/
- uint32_t loop, retry_count;
+ uint32_t loop, i, retry_count;
int32_t ret;
struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
uint32_t frames_to_send, num_free_eq_desc = 0;
@@ -1536,6 +1561,8 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
struct rte_eth_dev_data *eth_data;
struct dpaa2_dev_priv *priv;
struct dpaa2_queue *order_sendq;
+ struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
+ uint32_t free_count = 0;
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
@@ -1647,11 +1674,17 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
if (unlikely((*bufs)->nb_segs > 1)) {
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop],
+ buf_to_free,
+ &free_count,
+ loop,
bpid))
goto send_frames;
} else {
eth_mbuf_to_fd(*bufs,
- &fd_arr[loop], bpid);
+ &fd_arr[loop],
+ buf_to_free,
+ &free_count,
+ loop, bpid);
}
}
@@ -1676,6 +1709,10 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
}
}
+ for (i = 0; i < free_count; i++) {
+ if (buf_to_free[i].pkt_id < loop)
+ rte_pktmbuf_free_seg(buf_to_free[i].seg);
+ }
return loop;
}
@@ -1698,6 +1735,8 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
int32_t ret;
uint16_t num_tx = 0;
uint16_t bpid;
+ struct sw_buf_free buf_to_free[DPAA2_MAX_SGS * dpaa2_dqrr_size];
+ uint32_t free_count = 0;
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
@@ -1810,11 +1849,17 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (unlikely((*bufs)->nb_segs > 1)) {
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop],
+ buf_to_free,
+ &free_count,
+ loop,
bpid))
goto send_n_return;
} else {
eth_mbuf_to_fd(*bufs,
- &fd_arr[loop], bpid);
+ &fd_arr[loop],
+ buf_to_free,
+ &free_count,
+ loop, bpid);
}
}
bufs++;
@@ -1843,6 +1888,11 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
nb_pkts -= loop;
}
dpaa2_q->tx_pkts += num_tx;
+ for (loop = 0; loop < free_count; loop++) {
+ if (buf_to_free[loop].pkt_id < num_tx)
+ rte_pktmbuf_free_seg(buf_to_free[loop].seg);
+ }
+
return num_tx;
send_n_return:
@@ -1867,6 +1917,11 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
skip_tx:
dpaa2_q->tx_pkts += num_tx;
+ for (loop = 0; loop < free_count; loop++) {
+ if (buf_to_free[loop].pkt_id < num_tx)
+ rte_pktmbuf_free_seg(buf_to_free[loop].seg);
+ }
+
return num_tx;
}