@@ -589,7 +589,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->mz = mz;
txq->sw_ring = rte_zmalloc_socket("cpfl tx sw ring",
- sizeof(struct idpf_tx_entry) * len,
+ sizeof(struct ci_tx_entry) * len,
RTE_CACHE_LINE_SIZE, socket_id);
if (txq->sw_ring == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
@@ -220,7 +220,7 @@ RTE_EXPORT_INTERNAL_SYMBOL(idpf_qc_split_tx_descq_reset)
void
idpf_qc_split_tx_descq_reset(struct idpf_tx_queue *txq)
{
- struct idpf_tx_entry *txe;
+ struct ci_tx_entry *txe;
uint32_t i, size;
uint16_t prev;
@@ -278,7 +278,7 @@ RTE_EXPORT_INTERNAL_SYMBOL(idpf_qc_single_tx_queue_reset)
void
idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq)
{
- struct idpf_tx_entry *txe;
+ struct ci_tx_entry *txe;
uint32_t i, size;
uint16_t prev;
@@ -773,7 +773,7 @@ idpf_split_tx_free(struct idpf_tx_queue *cq)
volatile struct idpf_splitq_tx_compl_desc *compl_ring = cq->compl_ring;
volatile struct idpf_splitq_tx_compl_desc *txd;
uint16_t next = cq->tx_tail;
- struct idpf_tx_entry *txe;
+ struct ci_tx_entry *txe;
struct idpf_tx_queue *txq;
uint16_t gen, qid, q_head;
uint16_t nb_desc_clean;
@@ -882,9 +882,9 @@ idpf_dp_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
struct idpf_tx_queue *txq = (struct idpf_tx_queue *)tx_queue;
volatile struct idpf_flex_tx_sched_desc *txr;
volatile struct idpf_flex_tx_sched_desc *txd;
- struct idpf_tx_entry *sw_ring;
+ struct ci_tx_entry *sw_ring;
union idpf_tx_offload tx_offload = {0};
- struct idpf_tx_entry *txe, *txn;
+ struct ci_tx_entry *txe, *txn;
uint16_t nb_used, tx_id, sw_id;
struct rte_mbuf *tx_pkt;
uint16_t nb_to_clean;
@@ -1326,7 +1326,7 @@ static inline int
idpf_xmit_cleanup(struct idpf_tx_queue *txq)
{
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
- struct idpf_tx_entry *sw_ring = txq->sw_ring;
+ struct ci_tx_entry *sw_ring = txq->sw_ring;
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
uint16_t nb_tx_to_clean;
@@ -1371,8 +1371,8 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
volatile struct idpf_base_tx_desc *txd;
volatile struct idpf_base_tx_desc *txr;
union idpf_tx_offload tx_offload = {0};
- struct idpf_tx_entry *txe, *txn;
- struct idpf_tx_entry *sw_ring;
+ struct ci_tx_entry *txe, *txn;
+ struct ci_tx_entry *sw_ring;
struct idpf_tx_queue *txq;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
@@ -10,6 +10,7 @@
#include <rte_mbuf_core.h>
#include "idpf_common_device.h"
+#include "../common/tx.h"
#define IDPF_RX_MAX_BURST 32
@@ -148,12 +149,6 @@ struct idpf_rx_queue {
uint32_t hw_register_set;
};
-struct idpf_tx_entry {
- struct rte_mbuf *mbuf;
- uint16_t next_id;
- uint16_t last_id;
-};
-
/* Structure associated with each TX queue. */
struct idpf_tx_queue {
const struct rte_memzone *mz; /* memzone for Tx ring */
@@ -163,7 +158,7 @@ struct idpf_tx_queue {
struct idpf_splitq_tx_compl_desc *compl_ring;
};
rte_iova_t tx_ring_dma; /* Tx ring DMA address */
- struct idpf_tx_entry *sw_ring; /* address array of SW ring */
+ struct ci_tx_entry *sw_ring; /* address array of SW ring */
uint16_t nb_tx_desc; /* ring length */
uint16_t tx_tail; /* current value of tail */
@@ -209,10 +204,6 @@ union idpf_tx_offload {
};
};
-struct idpf_tx_vec_entry {
- struct rte_mbuf *mbuf;
-};
-
union idpf_tx_desc {
struct idpf_base_tx_desc *tx_ring;
struct idpf_flex_tx_sched_desc *desc_ring;
@@ -480,20 +480,11 @@ idpf_dp_singleq_recv_pkts_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16
{
return _idpf_singleq_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts);
}
-static __rte_always_inline void
-idpf_tx_backlog_entry(struct idpf_tx_entry *txep,
- struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- int i;
-
- for (i = 0; i < (int)nb_pkts; ++i)
- txep[i].mbuf = tx_pkts[i];
-}
static __rte_always_inline int
idpf_singleq_tx_free_bufs_vec(struct idpf_tx_queue *txq)
{
- struct idpf_tx_entry *txep;
+ struct ci_tx_entry *txep;
uint32_t n;
uint32_t i;
int nb_free = 0;
@@ -623,7 +614,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts
{
struct idpf_tx_queue *txq = (struct idpf_tx_queue *)tx_queue;
volatile struct idpf_base_tx_desc *txdp;
- struct idpf_tx_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = IDPF_TX_DESC_CMD_EOP;
uint64_t rs = IDPF_TX_DESC_CMD_RS | flags;
@@ -640,13 +631,13 @@ idpf_singleq_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts
tx_id = txq->tx_tail;
txdp = &txq->idpf_tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = (struct ci_tx_entry_vec *)&txq->sw_ring[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
- idpf_tx_backlog_entry(txep, tx_pkts, n);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, n);
idpf_singleq_vtx(txdp, tx_pkts, n - 1, flags);
tx_pkts += (n - 1);
@@ -661,10 +652,10 @@ idpf_singleq_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts
/* avoid reach the end of ring */
txdp = &txq->idpf_tx_ring[tx_id];
- txep = &txq->sw_ring[tx_id];
+ txep = (struct ci_tx_entry_vec *)&txq->sw_ring[tx_id];
}
- idpf_tx_backlog_entry(txep, tx_pkts, nb_commit);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, nb_commit);
idpf_singleq_vtx(txdp, tx_pkts, nb_commit, flags);
@@ -1001,7 +1001,7 @@ idpf_dp_splitq_recv_pkts_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
static __rte_always_inline int
idpf_tx_singleq_free_bufs_avx512(struct idpf_tx_queue *txq)
{
- struct idpf_tx_vec_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint32_t n;
uint32_t i;
int nb_free = 0;
@@ -1114,16 +1114,6 @@ idpf_tx_singleq_free_bufs_avx512(struct idpf_tx_queue *txq)
return txq->tx_rs_thresh;
}
-static __rte_always_inline void
-tx_backlog_entry_avx512(struct idpf_tx_vec_entry *txep,
- struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- int i;
-
- for (i = 0; i < (int)nb_pkts; ++i)
- txep[i].mbuf = tx_pkts[i];
-}
-
static __rte_always_inline void
idpf_singleq_vtx1(volatile struct idpf_base_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
@@ -1198,7 +1188,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pk
{
struct idpf_tx_queue *txq = tx_queue;
volatile struct idpf_base_tx_desc *txdp;
- struct idpf_tx_vec_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
uint64_t flags = IDPF_TX_DESC_CMD_EOP;
uint64_t rs = IDPF_TX_DESC_CMD_RS | flags;
@@ -1223,7 +1213,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pk
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
- tx_backlog_entry_avx512(txep, tx_pkts, n);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, n);
idpf_singleq_vtx(txdp, tx_pkts, n - 1, flags);
tx_pkts += (n - 1);
@@ -1242,7 +1232,7 @@ idpf_singleq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pk
txep += tx_id;
}
- tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, nb_commit);
idpf_singleq_vtx(txdp, tx_pkts, nb_commit, flags);
@@ -1327,7 +1317,7 @@ idpf_splitq_scan_cq_ring(struct idpf_tx_queue *cq)
static __rte_always_inline int
idpf_tx_splitq_free_bufs_avx512(struct idpf_tx_queue *txq)
{
- struct idpf_tx_vec_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint32_t n;
uint32_t i;
int nb_free = 0;
@@ -1502,7 +1492,7 @@ idpf_splitq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkt
{
struct idpf_tx_queue *txq = (struct idpf_tx_queue *)tx_queue;
volatile struct idpf_flex_tx_sched_desc *txdp;
- struct idpf_tx_vec_entry *txep;
+ struct ci_tx_entry_vec *txep;
uint16_t n, nb_commit, tx_id;
/* bit2 is reserved and must be set to 1 according to Spec */
uint64_t cmd_dtype = IDPF_TXD_FLEX_FLOW_CMD_EOP;
@@ -1525,7 +1515,7 @@ idpf_splitq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkt
n = (uint16_t)(txq->nb_tx_desc - tx_id);
if (nb_commit >= n) {
- tx_backlog_entry_avx512(txep, tx_pkts, n);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, n);
idpf_splitq_vtx(txdp, tx_pkts, n - 1, cmd_dtype);
tx_pkts += (n - 1);
@@ -1544,7 +1534,7 @@ idpf_splitq_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkt
txep += tx_id;
}
- tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
+ ci_tx_backlog_entry_vec(txep, tx_pkts, nb_commit);
idpf_splitq_vtx(txdp, tx_pkts, nb_commit, cmd_dtype);
@@ -1601,7 +1591,7 @@ idpf_tx_release_mbufs_avx512(struct idpf_tx_queue *txq)
{
unsigned int i;
const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
- struct idpf_tx_vec_entry *swr = (void *)txq->sw_ring;
+ struct ci_tx_entry_vec *swr = (void *)txq->sw_ring;
if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
return;
@@ -13,6 +13,7 @@
#include "idpf_ethdev.h"
#include "idpf_rxtx.h"
+#include "../common/tx.h"
#define IDPF_TX_SINGLE_Q "tx_single"
#define IDPF_RX_SINGLE_Q "rx_single"
@@ -462,7 +462,7 @@ idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->mz = mz;
txq->sw_ring = rte_zmalloc_socket("idpf tx sw ring",
- sizeof(struct idpf_tx_entry) * len,
+ sizeof(struct ci_tx_entry) * len,
RTE_CACHE_LINE_SIZE, socket_id);
if (txq->sw_ring == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
@@ -7,6 +7,7 @@
#include <idpf_common_rxtx.h>
#include "idpf_ethdev.h"
+#include "../common/tx.h"
/* In QLEN must be whole number of 32 descriptors. */
#define IDPF_ALIGN_RING_DESC 32
@@ -10,6 +10,7 @@
#include "idpf_ethdev.h"
#include "idpf_rxtx.h"
+#include "../common/rx.h"
#define IDPF_SCALAR_PATH 0
#define IDPF_VECTOR_PATH 1