From patchwork Thu Dec 7 13:46:51 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Rakesh Kudurumalla X-Patchwork-Id: 134928 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 62D9B43699; Thu, 7 Dec 2023 14:47:06 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5177942EF9; Thu, 7 Dec 2023 14:47:06 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 54E4F42EF8 for ; Thu, 7 Dec 2023 14:47:04 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id 3B79PGow032068 for ; Thu, 7 Dec 2023 05:47:03 -0800 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=+6yJNfhRvkwoHCCnwJB/v3CH676t+5aADXOinQDUyFo=; b=IQiI3I/UGxKvuLnta4K6kH41razjCfQDd4WB14mAD3FL4o7O66qZT4ZDChbljLjYCeMC pAl0fw0KbcK1V3oeM/t4Kp3VKZzvv9w5J2qLfC62noKM9s9DnQs9D4qvKZ4yt+H+4hzm 3nV4bo+tEEaitg7gR/8Oo7wWl9VbG/Vz/K9DhSHlViYy2TNb5Gn2mnhZe6g3Ks+BX7Qp ew91/pM9p4DmgzqeQ9K/cBXjIeElggR2r+J55giUvDxqqOJ/MZDpDySSxkN57IPt5fE2 K2X4c+LjTRu7kQRqpvG1Sn1qEmVy/EYxWyW7tMVHrGtuGw5yYNUCrlEoPcl0/ZgTJm5o cw== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3uubdd8sqv-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Thu, 07 Dec 2023 05:47:03 -0800 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48; Thu, 7 Dec 2023 05:47:00 -0800 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.48 via Frontend Transport; Thu, 7 Dec 2023 05:47:00 -0800 Received: from localhost.localdomain (unknown [10.28.36.154]) by maili.marvell.com (Postfix) with ESMTP id DA6D03F7051; Thu, 7 Dec 2023 05:46:57 -0800 (PST) From: Rakesh Kudurumalla To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao CC: , , Rakesh Kudurumalla Subject: [PATCH v2 2/2] net/cnxk: dump Rx descriptor info to file Date: Thu, 7 Dec 2023 19:16:51 +0530 Message-ID: <20231207134651.1718457-2-rkudurumalla@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20231207134651.1718457-1-rkudurumalla@marvell.com> References: <20231205094539.1377142-1-rkudurumalla@marvell.com> <20231207134651.1718457-1-rkudurumalla@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: yC0u5Pt_8HW_zylBukVp_QUSgdaBS7HE X-Proofpoint-GUID: yC0u5Pt_8HW_zylBukVp_QUSgdaBS7HE X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.272,Aquarius:18.0.997,Hydra:6.0.619,FMLib:17.11.176.26 definitions=2023-12-07_11,2023-12-07_01,2023-05-22_02 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for eth_rx_descriptor_dump for cn9k and cn10k. This patch dumps contents of receviced packet descriptor from CQ for debug to file Signed-off-by: Rakesh Kudurumalla --- drivers/net/cnxk/cn10k_ethdev.c | 67 +++++++++++++++++++++++++++++++++ drivers/net/cnxk/cn9k_ethdev.c | 53 ++++++++++++++++++++++++++ 2 files changed, 120 insertions(+) diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c index 4a4e97287c..a2e943a3d0 100644 --- a/drivers/net/cnxk/cn10k_ethdev.c +++ b/drivers/net/cnxk/cn10k_ethdev.c @@ -656,6 +656,72 @@ cn10k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev, return rc; } +static int +cn10k_nix_rx_avail_get(struct cn10k_eth_rxq *rxq) +{ + uint32_t qmask = rxq->qmask; + uint64_t reg, head, tail; + int available; + + /* Use LDADDA version to avoid reorder */ + reg = roc_atomic64_add_sync(rxq->wdata, rxq->cq_status); + /* CQ_OP_STATUS operation error */ + if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) || + reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR)) + return 0; + tail = reg & 0xFFFFF; + head = (reg >> 20) & 0xFFFFF; + if (tail < head) + available = tail - head + qmask + 1; + else + available = tail - head; + + return available; +} + +static int +cn10k_rx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid, + uint16_t offset, uint16_t num, FILE *file) +{ + struct cn10k_eth_rxq *rxq = eth_dev->data->rx_queues[qid]; + const uint64_t data_off = rxq->data_off; + const uint32_t qmask = rxq->qmask; + const uintptr_t desc = rxq->desc; + struct cpt_parse_hdr_s *cpth; + uint32_t head = rxq->head; + struct nix_cqe_hdr_s *cq; + uint16_t count = 0; + int available_pkts; + uint64_t cq_w1; + + available_pkts = cn10k_nix_rx_avail_get(rxq); + + if ((offset + num - 1) >= available_pkts) { + plt_err("Invalid BD num=%u\n", num); + return -EINVAL; + } + + while (count < num) { + cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head) + + count + offset); + cq_w1 = *((const uint64_t *)cq + 1); + if (cq_w1 & BIT(11)) { + rte_iova_t buff = *((rte_iova_t *)((uint64_t *)cq + 9)); + struct rte_mbuf *mbuf = + (struct rte_mbuf *)(buff - data_off); + cpth = (struct cpt_parse_hdr_s *) + ((uintptr_t)mbuf + (uint16_t)data_off); + roc_cpt_parse_hdr_dump(file, cpth); + } else { + roc_nix_cqe_dump(file, cq); + } + + count++; + head &= qmask; + } + return 0; +} + static int cn10k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green, int mark_yellow, int mark_red, @@ -794,6 +860,7 @@ nix_eth_dev_ops_override(void) cn10k_nix_reassembly_capability_get; cnxk_eth_dev_ops.ip_reassembly_conf_get = cn10k_nix_reassembly_conf_get; cnxk_eth_dev_ops.ip_reassembly_conf_set = cn10k_nix_reassembly_conf_set; + cnxk_eth_dev_ops.eth_rx_descriptor_dump = cn10k_rx_descriptor_dump; } /* Update platform specific tm ops */ diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c index bae4dda5e2..67f21a9c7f 100644 --- a/drivers/net/cnxk/cn9k_ethdev.c +++ b/drivers/net/cnxk/cn9k_ethdev.c @@ -664,6 +664,58 @@ cn9k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green, return rc; } +static int +cn9k_nix_rx_avail_get(struct cn9k_eth_rxq *rxq) +{ + uint32_t qmask = rxq->qmask; + uint64_t reg, head, tail; + int available; + + /* Use LDADDA version to avoid reorder */ + reg = roc_atomic64_add_sync(rxq->wdata, rxq->cq_status); + /* CQ_OP_STATUS operation error */ + if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) || + reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR)) + return 0; + tail = reg & 0xFFFFF; + head = (reg >> 20) & 0xFFFFF; + if (tail < head) + available = tail - head + qmask + 1; + else + available = tail - head; + + return available; +} + +static int +cn9k_rx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid, + uint16_t offset, uint16_t num, FILE *file) +{ + struct cn9k_eth_rxq *rxq = eth_dev->data->rx_queues[qid]; + const uint32_t qmask = rxq->qmask; + const uintptr_t desc = rxq->desc; + uint32_t head = rxq->head; + struct nix_cqe_hdr_s *cq; + uint16_t count = 0; + int available_pkts; + + available_pkts = cn9k_nix_rx_avail_get(rxq); + + if ((offset + num - 1) >= available_pkts) { + plt_err("Invalid BD num=%u\n", num); + return -EINVAL; + } + + while (count < num) { + cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head) + + count + offset); + roc_nix_cqe_dump(file, cq); + count++; + head &= qmask; + } + return 0; +} + /* Update platform specific eth dev ops */ static void nix_eth_dev_ops_override(void) @@ -687,6 +739,7 @@ nix_eth_dev_ops_override(void) cnxk_eth_dev_ops.mtr_ops_get = NULL; cnxk_eth_dev_ops.timesync_read_tx_timestamp = cn9k_nix_timesync_read_tx_timestamp; + cnxk_eth_dev_ops.eth_rx_descriptor_dump = cn9k_rx_descriptor_dump; } /* Update platform specific eth dev ops */