[v4,5/5] dma/dpaa: support statistics

Message ID 20211109043910.4016824-6-g.singh@nxp.com (mailing list archive)
State Accepted, archived
Delegated to: Thomas Monjalon
Headers
Series Introduce DPAA DMA driver |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/github-robot: build success github build: passed
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/iol-spell-check-testing warning Testing issues
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance success Performance Testing PASS

Commit Message

Gagandeep Singh Nov. 9, 2021, 4:39 a.m. UTC
  This patch support DMA read and reset statistics
operations

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/dmadevs/dpaa.rst  |  1 +
 drivers/dma/dpaa/dpaa_qdma.c | 51 +++++++++++++++++++++++++++++++++++-
 drivers/dma/dpaa/dpaa_qdma.h |  1 +
 3 files changed, 52 insertions(+), 1 deletion(-)
  

Patch

diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index 4fbd8a25fb..7d51c8c4cd 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -57,6 +57,7 @@  The DPAA DMA implements following features in the dmadev API:
 - Supports DMA silent mode.
 - Supports issuing DMA of data within memory without hogging CPU while
   performing DMA operation.
+- support statistics
 
 Platform Requirement
 ~~~~~~~~~~~~~~~~~~~~
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index ebe6211f08..cb272c700f 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -319,7 +319,7 @@  static struct fsl_qdma_queue
 			queue_temp->count = 0;
 			queue_temp->pending = 0;
 			queue_temp->virt_head = queue_temp->cq;
-
+			queue_temp->stats = (struct rte_dma_stats){0};
 		}
 	}
 	return queue_head;
@@ -619,6 +619,9 @@  fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
 		reg |= FSL_QDMA_BCQMR_EI_BE;
 		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+		fsl_queue->stats.submitted++;
+	} else {
+		fsl_queue->pending++;
 	}
 	return fsl_comp->index;
 }
@@ -754,6 +757,7 @@  dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 		reg |= FSL_QDMA_BCQMR_EI_BE;
 		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
 		fsl_queue->pending--;
+		fsl_queue->stats.submitted++;
 	}
 
 	return 0;
@@ -793,6 +797,9 @@  dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	void *block;
 	int intr;
 	void *status = fsl_qdma->status_base;
+	struct fsl_qdma_chan *fsl_chan =
+		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
 
 	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
 	if (intr) {
@@ -812,6 +819,7 @@  dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 		qdma_writel(0xffffffff,
 			    status + FSL_QDMA_DEDR);
 		intr = qdma_readl(status + FSL_QDMA_DEDR);
+		fsl_queue->stats.errors++;
 	}
 
 	block = fsl_qdma->block_base +
@@ -819,6 +827,7 @@  dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 
 	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
 						last_idx, st);
+	fsl_queue->stats.completed += intr;
 
 	return intr;
 }
@@ -834,6 +843,9 @@  dpaa_qdma_dequeue(void *dev_private,
 	void *block;
 	int intr;
 	void *status = fsl_qdma->status_base;
+	struct fsl_qdma_chan *fsl_chan =
+		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
 
 	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
 	if (intr) {
@@ -854,6 +866,7 @@  dpaa_qdma_dequeue(void *dev_private,
 			    status + FSL_QDMA_DEDR);
 		intr = qdma_readl(status + FSL_QDMA_DEDR);
 		*has_error = true;
+		fsl_queue->stats.errors++;
 	}
 
 	block = fsl_qdma->block_base +
@@ -861,16 +874,52 @@  dpaa_qdma_dequeue(void *dev_private,
 
 	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
 						last_idx, NULL);
+	fsl_queue->stats.completed += intr;
 
 	return intr;
 }
 
+static int
+dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
+		    struct rte_dma_stats *rte_stats, uint32_t size)
+{
+	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
+	struct fsl_qdma_chan *fsl_chan =
+		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct rte_dma_stats *stats = &fsl_queue->stats;
+
+	if (size < sizeof(rte_stats))
+		return -EINVAL;
+	if (rte_stats == NULL)
+		return -EINVAL;
+
+	*rte_stats = *stats;
+
+	return 0;
+}
+
+static int
+dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
+{
+	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
+	struct fsl_qdma_chan *fsl_chan =
+		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+
+	fsl_queue->stats = (struct rte_dma_stats){0};
+
+	return 0;
+}
+
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
 	.dev_info_get		  = dpaa_info_get,
 	.dev_configure            = dpaa_qdma_configure,
 	.dev_start                = dpaa_qdma_start,
 	.dev_close                = dpaa_qdma_close,
 	.vchan_setup		  = dpaa_qdma_queue_setup,
+	.stats_get		  = dpaa_qdma_stats_get,
+	.stats_reset		  = dpaa_qdma_stats_reset,
 };
 
 static int
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 6d0ac58317..bf49b2d5d9 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -210,6 +210,7 @@  struct fsl_qdma_queue {
 	u32			pending;
 	struct fsl_qdma_format	*cq;
 	void			*block_base;
+	struct rte_dma_stats	stats;
 };
 
 struct fsl_qdma_comp {