On 9/28/2022 6:25 AM, Gagandeep Singh wrote:
> From: brick <brick.yang@nxp.com>
>
> Check if there exists free enqueue descriptors before enqueuing Tx
> packet. Also try to free enqueue descriptors in case they are not
> free.
>
> Fixes: ed1cdbed6a15 ("net/dpaa2: support multiple Tx queues enqueue for ordered")
> Cc: stable@dpdk.org
>
> Signed-off-by: brick <brick.yang@nxp.com>
Can you please use name tag as "Name Surname <email lower case>", like
Signed-off-by: Brick Yang <brick.yang@nxp.com>
<...>
> + DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
> + eth_data, dpaa2_q[loop]->fqid);
> +
> + /*Check if the queue is congested*/
syntax, more common to put space before/after '/* ' & ' */'
> + retry_count = 0;
> + while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
> + retry_count++;
> + /* Retry for some time before giving up */
> + if (retry_count > CONG_RETRY_COUNT)
> + goto send_frames;
> + }
> +
> + /*Prepare enqueue descriptor*/
ditto
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017,2019-2021 NXP
+ * Copyright 2017,2019-2022 NXP
*/
#include <assert.h>
@@ -175,7 +175,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
if (retry_count > DPAA2_EV_TX_RETRY_COUNT) {
num_tx += loop;
nb_events -= loop;
- return num_tx + loop;
+ return num_tx;
}
} else {
loop += ret;
@@ -1015,9 +1015,7 @@ dpaa2_eventdev_txa_enqueue(void *port,
txq[i] = rte_eth_devices[m[i]->port].data->tx_queues[qid];
}
- dpaa2_dev_tx_multi_txq_ordered(txq, m, nb_events);
-
- return nb_events;
+ return dpaa2_dev_tx_multi_txq_ordered(txq, m, nb_events);
}
static struct eventdev_ops dpaa2_eventdev_ops = {
@@ -1525,7 +1525,7 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
uint32_t loop, retry_count;
int32_t ret;
struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
- uint32_t frames_to_send;
+ uint32_t frames_to_send, num_free_eq_desc = 0;
struct rte_mempool *mp;
struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
@@ -1547,16 +1547,44 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
}
swp = DPAA2_PER_LCORE_PORTAL;
- for (loop = 0; loop < nb_pkts; loop++) {
+ frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
+ dpaa2_eqcr_size : nb_pkts;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
eth_data = dpaa2_q[loop]->eth_data;
priv = eth_data->dev_private;
+ if (!priv->en_loose_ordered) {
+ if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
+ if (!num_free_eq_desc) {
+ num_free_eq_desc = dpaa2_free_eq_descriptors();
+ if (!num_free_eq_desc)
+ goto send_frames;
+ }
+ num_free_eq_desc--;
+ }
+ }
+
+ DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
+ eth_data, dpaa2_q[loop]->fqid);
+
+ /*Check if the queue is congested*/
+ retry_count = 0;
+ while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
+ retry_count++;
+ /* Retry for some time before giving up */
+ if (retry_count > CONG_RETRY_COUNT)
+ goto send_frames;
+ }
+
+ /*Prepare enqueue descriptor*/
qbman_eq_desc_clear(&eqdesc[loop]);
+
if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
dpaa2_set_enqueue_descriptor(order_sendq,
- (*bufs),
- &eqdesc[loop]);
+ (*bufs),
+ &eqdesc[loop]);
} else {
qbman_eq_desc_set_no_orp(&eqdesc[loop],
DPAA2_EQ_RESP_ERR_FQ);
@@ -1564,14 +1592,6 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
dpaa2_q[loop]->fqid);
}
- retry_count = 0;
- while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
- retry_count++;
- /* Retry for some time before giving up */
- if (retry_count > CONG_RETRY_COUNT)
- goto send_frames;
- }
-
if (likely(RTE_MBUF_DIRECT(*bufs))) {
mp = (*bufs)->pool;
/* Check the basic scenario and set
@@ -1591,7 +1611,6 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
&fd_arr[loop],
mempool_to_bpid(mp));
bufs++;
- dpaa2_q[loop]++;
continue;
}
} else {
@@ -1637,18 +1656,19 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue,
}
bufs++;
- dpaa2_q[loop]++;
}
send_frames:
frames_to_send = loop;
loop = 0;
+ retry_count = 0;
while (loop < frames_to_send) {
ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
&fd_arr[loop],
frames_to_send - loop);
if (likely(ret > 0)) {
loop += ret;
+ retry_count = 0;
} else {
retry_count++;
if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
@@ -1834,7 +1854,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
retry_count = 0;
while (i < loop) {
ret = qbman_swp_enqueue_multiple_desc(swp,
- &eqdesc[loop], &fd_arr[i], loop - i);
+ &eqdesc[i], &fd_arr[i], loop - i);
if (unlikely(ret < 0)) {
retry_count++;
if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)