From: Long Li <longli@microsoft.com>
With all the RX queues created, MANA can use those queues to receive
packets.
Signed-off-by: Long Li <longli@microsoft.com>
---
Change log:
v2:
Add mana_ to all function names.
Rename a camel case.
doc/guides/nics/features/mana.ini | 2 +
drivers/net/mana/mana.c | 2 +
drivers/net/mana/mana.h | 37 +++++++++++
drivers/net/mana/mp.c | 2 +
drivers/net/mana/rx.c | 104 ++++++++++++++++++++++++++++++
5 files changed, 147 insertions(+)
@@ -6,6 +6,8 @@
[Features]
Link status = P
Linux = Y
+L3 checksum offload = Y
+L4 checksum offload = Y
Multiprocess aware = Y
Queue start/stop = Y
Removal event = Y
@@ -950,6 +950,8 @@ static int mana_pci_probe_mac(struct rte_pci_driver *pci_drv __rte_unused,
/* fd is no not used after mapping doorbell */
close(fd);
+ eth_dev->rx_pkt_burst = mana_rx_burst;
+
rte_spinlock_lock(&mana_shared_data->lock);
mana_shared_data->secondary_cnt++;
mana_local_data.secondary_cnt++;
@@ -178,6 +178,11 @@ struct gdma_work_request {
enum mana_cqe_type {
CQE_INVALID = 0,
+
+ CQE_RX_OKAY = 1,
+ CQE_RX_COALESCED_4 = 2,
+ CQE_RX_OBJECT_FENCE = 3,
+ CQE_RX_TRUNCATED = 4,
};
struct mana_cqe_header {
@@ -203,6 +208,35 @@ struct mana_cqe_header {
(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
+struct mana_rx_comp_per_packet_info {
+ uint32_t packet_length : 16;
+ uint32_t reserved0 : 16;
+ uint32_t reserved1;
+ uint32_t packet_hash;
+}; /* HW DATA */
+#define RX_COM_OOB_NUM_PACKETINFO_SEGMENTS 4
+
+struct mana_rx_comp_oob {
+ struct mana_cqe_header cqe_hdr;
+
+ uint32_t rx_vlan_id : 12;
+ uint32_t rx_vlan_tag_present : 1;
+ uint32_t rx_outer_ip_header_checksum_succeeded : 1;
+ uint32_t rx_outer_ip_header_checksum_failed : 1;
+ uint32_t reserved : 1;
+ uint32_t rx_hash_type : 9;
+ uint32_t rx_ip_header_checksum_succeeded : 1;
+ uint32_t rx_ip_header_checksum_failed : 1;
+ uint32_t rx_tcp_checksum_succeeded : 1;
+ uint32_t rx_tcp_checksum_failed : 1;
+ uint32_t rx_udp_checksum_succeeded : 1;
+ uint32_t rx_udp_checksum_failed : 1;
+ uint32_t reserved1 : 1;
+ struct mana_rx_comp_per_packet_info
+ packet_info[RX_COM_OOB_NUM_PACKETINFO_SEGMENTS];
+ uint32_t received_wqe_offset;
+}; /* HW DATA */
+
struct gdma_wqe_dma_oob {
uint32_t reserved:24;
uint32_t last_v_bytes:8;
@@ -371,6 +405,9 @@ int gdma_post_work_request(struct mana_gdma_queue *queue,
struct gdma_posted_wqe_info *wqe_info);
uint8_t *gdma_get_wqe_pointer(struct mana_gdma_queue *queue);
+uint16_t mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **rx_pkts,
+ uint16_t pkts_n);
+
uint16_t mana_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
uint16_t pkts_n);
@@ -138,6 +138,8 @@ static int mana_mp_secondary_handle(const struct rte_mp_msg *mp_msg,
case MANA_MP_REQ_START_RXTX:
DRV_LOG(INFO, "Port %u starting datapath", dev->data->port_id);
+ dev->rx_pkt_burst = mana_rx_burst;
+
rte_mb();
res->result = 0;
@@ -344,3 +344,107 @@ int mana_start_rx_queues(struct rte_eth_dev *dev)
mana_stop_rx_queues(dev);
return ret;
}
+
+uint16_t mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ uint16_t pkt_received = 0, cqe_processed = 0;
+ struct mana_rxq *rxq = dpdk_rxq;
+ struct mana_priv *priv = rxq->priv;
+ struct gdma_comp comp;
+ struct rte_mbuf *mbuf;
+ int ret;
+
+ while (pkt_received < pkts_n &&
+ gdma_poll_completion_queue(&rxq->gdma_cq, &comp) == 1) {
+ struct mana_rxq_desc *desc;
+ struct mana_rx_comp_oob *oob =
+ (struct mana_rx_comp_oob *)&comp.completion_data[0];
+
+ if (comp.work_queue_number != rxq->gdma_rq.id) {
+ DRV_LOG(ERR, "rxq comp id mismatch wqid=0x%x rcid=0x%x",
+ comp.work_queue_number, rxq->gdma_rq.id);
+ rxq->stats.errors++;
+ break;
+ }
+
+ desc = &rxq->desc_ring[rxq->desc_ring_tail];
+ rxq->gdma_rq.tail += desc->wqe_size_in_bu;
+ mbuf = desc->pkt;
+
+ switch (oob->cqe_hdr.cqe_type) {
+ case CQE_RX_OKAY:
+ /* Proceed to process mbuf */
+ break;
+
+ case CQE_RX_TRUNCATED:
+ DRV_LOG(ERR, "Drop a truncated packet");
+ rxq->stats.errors++;
+ rte_pktmbuf_free(mbuf);
+ goto drop;
+
+ case CQE_RX_COALESCED_4:
+ DRV_LOG(ERR, "RX coalescing is not supported");
+ continue;
+
+ default:
+ DRV_LOG(ERR, "Unknown RX CQE type %d",
+ oob->cqe_hdr.cqe_type);
+ continue;
+ }
+
+ DRV_LOG(DEBUG, "mana_rx_comp_oob CQE_RX_OKAY rxq %p", rxq);
+
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->next = NULL;
+ mbuf->pkt_len = oob->packet_info[0].packet_length;
+ mbuf->data_len = oob->packet_info[0].packet_length;
+ mbuf->port = priv->port_id;
+
+ if (oob->rx_ip_header_checksum_succeeded)
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+
+ if (oob->rx_ip_header_checksum_failed)
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+
+ if (oob->rx_outer_ip_header_checksum_failed)
+ mbuf->ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
+
+ if (oob->rx_tcp_checksum_succeeded ||
+ oob->rx_udp_checksum_succeeded)
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+
+ if (oob->rx_tcp_checksum_failed ||
+ oob->rx_udp_checksum_failed)
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
+
+ if (oob->rx_hash_type == MANA_HASH_L3 ||
+ oob->rx_hash_type == MANA_HASH_L4) {
+ mbuf->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
+ mbuf->hash.rss = oob->packet_info[0].packet_hash;
+ }
+
+ pkts[pkt_received++] = mbuf;
+ rxq->stats.packets++;
+ rxq->stats.bytes += mbuf->data_len;
+
+drop:
+ rxq->desc_ring_tail++;
+ if (rxq->desc_ring_tail >= rxq->num_desc)
+ rxq->desc_ring_tail = 0;
+
+ cqe_processed++;
+
+ /* Post another request */
+ ret = mana_alloc_and_post_rx_wqe(rxq);
+ if (ret) {
+ DRV_LOG(ERR, "failed to post rx wqe ret=%d", ret);
+ break;
+ }
+ }
+
+ if (cqe_processed)
+ mana_rq_ring_doorbell(rxq);
+
+ return pkt_received;
+}