[v2,2/2] net/cnxk: dump Rx descriptor info to file
Checks
Commit Message
Add support for eth_rx_descriptor_dump for cn9k and cn10k.
This patch dumps contents of receviced packet descriptor from CQ
for debug to file
Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
drivers/net/cnxk/cn10k_ethdev.c | 67 +++++++++++++++++++++++++++++++++
drivers/net/cnxk/cn9k_ethdev.c | 53 ++++++++++++++++++++++++++
2 files changed, 120 insertions(+)
Comments
On Thu, Dec 7, 2023 at 9:13 PM Rakesh Kudurumalla
<rkudurumalla@marvell.com> wrote:
>
> Add support for eth_rx_descriptor_dump for cn9k and cn10k.
> This patch dumps contents of receviced packet descriptor from CQ
> for debug to file
>
> Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
Series applied to dpdk-next-net-mrvl/for-main. Thanks
@@ -656,6 +656,72 @@ cn10k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
return rc;
}
+static int
+cn10k_nix_rx_avail_get(struct cn10k_eth_rxq *rxq)
+{
+ uint32_t qmask = rxq->qmask;
+ uint64_t reg, head, tail;
+ int available;
+
+ /* Use LDADDA version to avoid reorder */
+ reg = roc_atomic64_add_sync(rxq->wdata, rxq->cq_status);
+ /* CQ_OP_STATUS operation error */
+ if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
+ reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
+ return 0;
+ tail = reg & 0xFFFFF;
+ head = (reg >> 20) & 0xFFFFF;
+ if (tail < head)
+ available = tail - head + qmask + 1;
+ else
+ available = tail - head;
+
+ return available;
+}
+
+static int
+cn10k_rx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint16_t offset, uint16_t num, FILE *file)
+{
+ struct cn10k_eth_rxq *rxq = eth_dev->data->rx_queues[qid];
+ const uint64_t data_off = rxq->data_off;
+ const uint32_t qmask = rxq->qmask;
+ const uintptr_t desc = rxq->desc;
+ struct cpt_parse_hdr_s *cpth;
+ uint32_t head = rxq->head;
+ struct nix_cqe_hdr_s *cq;
+ uint16_t count = 0;
+ int available_pkts;
+ uint64_t cq_w1;
+
+ available_pkts = cn10k_nix_rx_avail_get(rxq);
+
+ if ((offset + num - 1) >= available_pkts) {
+ plt_err("Invalid BD num=%u\n", num);
+ return -EINVAL;
+ }
+
+ while (count < num) {
+ cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head) +
+ count + offset);
+ cq_w1 = *((const uint64_t *)cq + 1);
+ if (cq_w1 & BIT(11)) {
+ rte_iova_t buff = *((rte_iova_t *)((uint64_t *)cq + 9));
+ struct rte_mbuf *mbuf =
+ (struct rte_mbuf *)(buff - data_off);
+ cpth = (struct cpt_parse_hdr_s *)
+ ((uintptr_t)mbuf + (uint16_t)data_off);
+ roc_cpt_parse_hdr_dump(file, cpth);
+ } else {
+ roc_nix_cqe_dump(file, cq);
+ }
+
+ count++;
+ head &= qmask;
+ }
+ return 0;
+}
+
static int
cn10k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
int mark_yellow, int mark_red,
@@ -794,6 +860,7 @@ nix_eth_dev_ops_override(void)
cn10k_nix_reassembly_capability_get;
cnxk_eth_dev_ops.ip_reassembly_conf_get = cn10k_nix_reassembly_conf_get;
cnxk_eth_dev_ops.ip_reassembly_conf_set = cn10k_nix_reassembly_conf_set;
+ cnxk_eth_dev_ops.eth_rx_descriptor_dump = cn10k_rx_descriptor_dump;
}
/* Update platform specific tm ops */
@@ -664,6 +664,58 @@ cn9k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
return rc;
}
+static int
+cn9k_nix_rx_avail_get(struct cn9k_eth_rxq *rxq)
+{
+ uint32_t qmask = rxq->qmask;
+ uint64_t reg, head, tail;
+ int available;
+
+ /* Use LDADDA version to avoid reorder */
+ reg = roc_atomic64_add_sync(rxq->wdata, rxq->cq_status);
+ /* CQ_OP_STATUS operation error */
+ if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
+ reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
+ return 0;
+ tail = reg & 0xFFFFF;
+ head = (reg >> 20) & 0xFFFFF;
+ if (tail < head)
+ available = tail - head + qmask + 1;
+ else
+ available = tail - head;
+
+ return available;
+}
+
+static int
+cn9k_rx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint16_t offset, uint16_t num, FILE *file)
+{
+ struct cn9k_eth_rxq *rxq = eth_dev->data->rx_queues[qid];
+ const uint32_t qmask = rxq->qmask;
+ const uintptr_t desc = rxq->desc;
+ uint32_t head = rxq->head;
+ struct nix_cqe_hdr_s *cq;
+ uint16_t count = 0;
+ int available_pkts;
+
+ available_pkts = cn9k_nix_rx_avail_get(rxq);
+
+ if ((offset + num - 1) >= available_pkts) {
+ plt_err("Invalid BD num=%u\n", num);
+ return -EINVAL;
+ }
+
+ while (count < num) {
+ cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head) +
+ count + offset);
+ roc_nix_cqe_dump(file, cq);
+ count++;
+ head &= qmask;
+ }
+ return 0;
+}
+
/* Update platform specific eth dev ops */
static void
nix_eth_dev_ops_override(void)
@@ -687,6 +739,7 @@ nix_eth_dev_ops_override(void)
cnxk_eth_dev_ops.mtr_ops_get = NULL;
cnxk_eth_dev_ops.timesync_read_tx_timestamp =
cn9k_nix_timesync_read_tx_timestamp;
+ cnxk_eth_dev_ops.eth_rx_descriptor_dump = cn9k_rx_descriptor_dump;
}
/* Update platform specific eth dev ops */