@@ -166,6 +166,97 @@ gdma_post_work_request(struct mana_gdma_queue *queue,
return 0;
}
+#ifdef RTE_ARCH_32
+union gdma_short_doorbell_entry {
+ uint32_t as_uint32;
+
+ struct {
+ uint32_t tail_ptr_incr : 16; /* Number of CQEs */
+ uint32_t id : 12;
+ uint32_t reserved : 3;
+ uint32_t arm : 1;
+ } cq;
+
+ struct {
+ uint32_t tail_ptr_incr : 16; /* In number of bytes */
+ uint32_t id : 12;
+ uint32_t reserved : 4;
+ } rq;
+
+ struct {
+ uint32_t tail_ptr_incr : 16; /* In number of bytes */
+ uint32_t id : 12;
+ uint32_t reserved : 4;
+ } sq;
+
+ struct {
+ uint32_t tail_ptr_incr : 16; /* Number of EQEs */
+ uint32_t id : 12;
+ uint32_t reserved : 3;
+ uint32_t arm : 1;
+ } eq;
+}; /* HW DATA */
+
+enum {
+ DOORBELL_SHORT_OFFSET_SQ = 0x10,
+ DOORBELL_SHORT_OFFSET_RQ = 0x410,
+ DOORBELL_SHORT_OFFSET_CQ = 0x810,
+ DOORBELL_SHORT_OFFSET_EQ = 0xFF0,
+};
+
+/*
+ * Write to hardware doorbell to notify new activity.
+ */
+int
+mana_ring_short_doorbell(void *db_page, enum gdma_queue_types queue_type,
+ uint32_t queue_id, uint32_t tail_incr, uint8_t arm)
+{
+ uint8_t *addr = db_page;
+ union gdma_short_doorbell_entry e = {};
+
+ if ((queue_id & ~GDMA_SHORT_DB_QID_MASK) ||
+ (tail_incr & ~GDMA_SHORT_DB_INC_MASK)) {
+ DP_LOG(ERR, "%s: queue_id %u or "
+ "tail_incr %u overflowed, queue type %d",
+ __func__, queue_id, tail_incr, queue_type);
+ return -EINVAL;
+ }
+
+ switch (queue_type) {
+ case GDMA_QUEUE_SEND:
+ e.sq.id = queue_id;
+ e.sq.tail_ptr_incr = tail_incr;
+ addr += DOORBELL_SHORT_OFFSET_SQ;
+ break;
+
+ case GDMA_QUEUE_RECEIVE:
+ e.rq.id = queue_id;
+ e.rq.tail_ptr_incr = tail_incr;
+ addr += DOORBELL_SHORT_OFFSET_RQ;
+ break;
+
+ case GDMA_QUEUE_COMPLETION:
+ e.cq.id = queue_id;
+ e.cq.tail_ptr_incr = tail_incr;
+ e.cq.arm = arm;
+ addr += DOORBELL_SHORT_OFFSET_CQ;
+ break;
+
+ default:
+ DP_LOG(ERR, "Unsupported queue type %d", queue_type);
+ return -1;
+ }
+
+ /* Ensure all writes are done before ringing doorbell */
+ rte_wmb();
+
+ DP_LOG(DEBUG, "db_page %p addr %p queue_id %u type %u tail %u arm %u",
+ db_page, addr, queue_id, queue_type, tail_incr, arm);
+
+ rte_write32(e.as_uint32, addr);
+ return 0;
+}
+#else
union gdma_doorbell_entry {
uint64_t as_uint64;
@@ -248,6 +339,7 @@ mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
rte_write64(e.as_uint64, addr);
return 0;
}
+#endif
/*
* Poll completion queue for completions.
@@ -50,6 +50,21 @@ struct mana_shared_data {
#define MAX_TX_WQE_SIZE 512
#define MAX_RX_WQE_SIZE 256
+/* For 32 bit only */
+#ifdef RTE_ARCH_32
+#define GDMA_SHORT_DB_INC_MASK 0xffff
+#define GDMA_SHORT_DB_QID_MASK 0xfff
+
+#define GDMA_SHORT_DB_MAX_WQE (0x10000 / GDMA_WQE_ALIGNMENT_UNIT_SIZE)
+
+#define TX_WQE_SHORT_DB_THRESHOLD \
+ (GDMA_SHORT_DB_MAX_WQE - \
+ (MAX_TX_WQE_SIZE / GDMA_WQE_ALIGNMENT_UNIT_SIZE))
+#define RX_WQE_SHORT_DB_THRESHOLD \
+ (GDMA_SHORT_DB_MAX_WQE - \
+ (MAX_RX_WQE_SIZE / GDMA_WQE_ALIGNMENT_UNIT_SIZE))
+#endif
+
/* Values from the GDMA specification document, WQE format description */
#define INLINE_OOB_SMALL_SIZE_IN_BYTES 8
#define INLINE_OOB_LARGE_SIZE_IN_BYTES 24
@@ -425,6 +440,11 @@ struct mana_rxq {
*/
uint32_t desc_ring_head, desc_ring_tail;
+#ifdef RTE_ARCH_32
+ /* For storing wqe increment count btw each short doorbell ring */
+ uint32_t wqe_cnt_to_short_db;
+#endif
+
struct mana_gdma_queue gdma_rq;
struct mana_gdma_queue gdma_cq;
struct gdma_comp *gdma_comp_buf;
@@ -455,8 +475,14 @@ extern int mana_logtype_init;
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#ifdef RTE_ARCH_32
+int mana_ring_short_doorbell(void *db_page, enum gdma_queue_types queue_type,
+ uint32_t queue_id, uint32_t tail_incr,
+ uint8_t arm);
+#else
int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
uint32_t queue_id, uint32_t tail, uint8_t arm);
+#endif
int mana_rq_ring_doorbell(struct mana_rxq *rxq);
int gdma_post_work_request(struct mana_gdma_queue *queue,
@@ -39,10 +39,18 @@ mana_rq_ring_doorbell(struct mana_rxq *rxq)
/* Hardware Spec specifies that software client should set 0 for
* wqe_cnt for Receive Queues.
*/
+#ifdef RTE_ARCH_32
+ ret = mana_ring_short_doorbell(db_page, GDMA_QUEUE_RECEIVE,
+ rxq->gdma_rq.id,
+ rxq->wqe_cnt_to_short_db *
+ GDMA_WQE_ALIGNMENT_UNIT_SIZE,
+ 0);
+#else
ret = mana_ring_doorbell(db_page, GDMA_QUEUE_RECEIVE,
rxq->gdma_rq.id,
rxq->gdma_rq.head * GDMA_WQE_ALIGNMENT_UNIT_SIZE,
0);
+#endif
if (ret)
DP_LOG(ERR, "failed to ring RX doorbell ret %d", ret);
@@ -97,6 +105,9 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
/* update queue for tracking pending packets */
desc->pkt = mbuf;
desc->wqe_size_in_bu = wqe_size_in_bu;
+#ifdef RTE_ARCH_32
+ rxq->wqe_cnt_to_short_db += wqe_size_in_bu;
+#endif
rxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc;
} else {
DP_LOG(DEBUG, "failed to post recv ret %d", ret);
@@ -115,12 +126,22 @@ mana_alloc_and_post_rx_wqes(struct mana_rxq *rxq)
int ret;
uint32_t i;
+#ifdef RTE_ARCH_32
+ rxq->wqe_cnt_to_short_db = 0;
+#endif
for (i = 0; i < rxq->num_desc; i++) {
ret = mana_alloc_and_post_rx_wqe(rxq);
if (ret) {
DP_LOG(ERR, "failed to post RX ret = %d", ret);
return ret;
}
+
+#ifdef RTE_ARCH_32
+ if (rxq->wqe_cnt_to_short_db > RX_WQE_SHORT_DB_THRESHOLD) {
+ mana_rq_ring_doorbell(rxq);
+ rxq->wqe_cnt_to_short_db = 0;
+ }
+#endif
}
mana_rq_ring_doorbell(rxq);
@@ -397,6 +418,10 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint32_t i;
int polled = 0;
+#ifdef RTE_ARCH_32
+ rxq->wqe_cnt_to_short_db = 0;
+#endif
+
repoll:
/* Polling on new completions if we have no backlog */
if (rxq->comp_buf_idx == rxq->comp_buf_len) {
@@ -505,6 +530,16 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
wqe_posted++;
if (pkt_received == pkts_n)
break;
+
+#ifdef RTE_ARCH_32
+ /* Ring short doorbell if approaching the wqe increment
+ * limit.
+ */
+ if (rxq->wqe_cnt_to_short_db > RX_WQE_SHORT_DB_THRESHOLD) {
+ mana_rq_ring_doorbell(rxq);
+ rxq->wqe_cnt_to_short_db = 0;
+ }
+#endif
}
rxq->backlog_idx = pkt_idx;
@@ -525,6 +560,15 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
return pkt_received;
}
+#ifdef RTE_ARCH_32
+static int
+mana_arm_cq(struct mana_rxq *rxq __rte_unused, uint8_t arm __rte_unused)
+{
+ DP_LOG(ERR, "Do not support in 32 bit");
+
+ return -ENODEV;
+}
+#else
static int
mana_arm_cq(struct mana_rxq *rxq, uint8_t arm)
{
@@ -538,6 +582,7 @@ mana_arm_cq(struct mana_rxq *rxq, uint8_t arm)
return mana_ring_doorbell(priv->db_page, GDMA_QUEUE_COMPLETION,
rxq->gdma_cq.id, head, arm);
}
+#endif
int
mana_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
@@ -176,6 +176,9 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
void *db_page;
uint16_t pkt_sent = 0;
uint32_t num_comp, i;
+#ifdef RTE_ARCH_32
+ uint32_t wqe_count = 0;
+#endif
/* Process send completions from GDMA */
num_comp = gdma_poll_completion_queue(&txq->gdma_cq,
@@ -418,6 +421,20 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
DP_LOG(DEBUG, "nb_pkts %u pkt[%d] sent",
nb_pkts, pkt_idx);
+#ifdef RTE_ARCH_32
+ wqe_count += wqe_size_in_bu;
+ if (wqe_count > TX_WQE_SHORT_DB_THRESHOLD) {
+ /* wqe_count approaching to short doorbell
+ * increment limit. Stop processing further
+ * more packets and just ring short
+ * doorbell.
+ */
+ DP_LOG(DEBUG, "wqe_count %u reaching limit, "
+ "pkt_sent %d",
+ wqe_count, pkt_sent);
+ break;
+ }
+#endif
} else {
DP_LOG(DEBUG, "pkt[%d] failed to post send ret %d",
pkt_idx, ret);
@@ -436,11 +453,19 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
if (pkt_sent) {
+#ifdef RTE_ARCH_32
+ ret = mana_ring_short_doorbell(db_page, GDMA_QUEUE_SEND,
+ txq->gdma_sq.id,
+ wqe_count *
+ GDMA_WQE_ALIGNMENT_UNIT_SIZE,
+ 0);
+#else
ret = mana_ring_doorbell(db_page, GDMA_QUEUE_SEND,
txq->gdma_sq.id,
txq->gdma_sq.head *
GDMA_WQE_ALIGNMENT_UNIT_SIZE,
0);
+#endif
if (ret)
DP_LOG(ERR, "mana_ring_doorbell failed ret %d", ret);
}