[v7,08/11] app/mldev: enable support for queue pairs and size

Message ID 20230316211434.13409-9-syalavarthi@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Thomas Monjalon
Headers
Series Implementation of mldev test application |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Srikanth Yalavarthi March 16, 2023, 9:14 p.m. UTC
  Added support to create multiple queue-pairs per device to
enqueue and dequeue inference requests. Number of queue pairs
to be created can be specified through "--queue_pairs" option.
Support is also enabled to control the number of descriptors
per each queue pair through "--queue_size" option. Inference
requests for a model are distributed across all available
queue-pairs.

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
Acked-by: Anup Prabhu <aprabhu@marvell.com>
---
 app/test-mldev/ml_options.c            | 31 +++++++++-
 app/test-mldev/ml_options.h            |  4 ++
 app/test-mldev/test_common.c           |  2 +-
 app/test-mldev/test_inference_common.c | 79 +++++++++++++++++++++-----
 app/test-mldev/test_inference_common.h |  1 +
 doc/guides/tools/testmldev.rst         | 44 +++++++++++++-
 6 files changed, 140 insertions(+), 21 deletions(-)
  

Patch

diff --git a/app/test-mldev/ml_options.c b/app/test-mldev/ml_options.c
index 19f2e1279e..901adaed33 100644
--- a/app/test-mldev/ml_options.c
+++ b/app/test-mldev/ml_options.c
@@ -25,6 +25,8 @@  ml_options_default(struct ml_options *opt)
 	opt->nb_filelist = 0;
 	opt->repetitions = 1;
 	opt->burst_size = 1;
+	opt->queue_pairs = 1;
+	opt->queue_size = 1;
 	opt->debug = false;
 }
 
@@ -152,11 +154,30 @@  ml_parse_burst_size(struct ml_options *opt, const char *arg)
 	return parser_read_uint16(&opt->burst_size, arg);
 }
 
+static int
+ml_parse_queue_pairs(struct ml_options *opt, const char *arg)
+{
+	int ret;
+
+	ret = parser_read_uint16(&opt->queue_pairs, arg);
+
+	return ret;
+}
+
+static int
+ml_parse_queue_size(struct ml_options *opt, const char *arg)
+{
+	return parser_read_uint16(&opt->queue_size, arg);
+}
+
 static void
 ml_dump_test_options(const char *testname)
 {
-	if (strcmp(testname, "device_ops") == 0)
+	if (strcmp(testname, "device_ops") == 0) {
+		printf("\t\t--queue_pairs      : number of queue pairs to create\n"
+		       "\t\t--queue_size       : size fo queue-pair\n");
 		printf("\n");
+	}
 
 	if (strcmp(testname, "model_ops") == 0) {
 		printf("\t\t--models           : comma separated list of models\n");
@@ -167,7 +188,9 @@  ml_dump_test_options(const char *testname)
 	    (strcmp(testname, "inference_interleave") == 0)) {
 		printf("\t\t--filelist         : comma separated list of model, input and output\n"
 		       "\t\t--repetitions      : number of inference repetitions\n"
-		       "\t\t--burst_size       : inference burst size\n");
+		       "\t\t--burst_size       : inference burst size\n"
+		       "\t\t--queue_pairs      : number of queue pairs to create\n"
+		       "\t\t--queue_size       : size fo queue-pair\n");
 		printf("\n");
 	}
 }
@@ -195,6 +218,8 @@  static struct option lgopts[] = {
 	{ML_FILELIST, 1, 0, 0},
 	{ML_REPETITIONS, 1, 0, 0},
 	{ML_BURST_SIZE, 1, 0, 0},
+	{ML_QUEUE_PAIRS, 1, 0, 0},
+	{ML_QUEUE_SIZE, 1, 0, 0},
 	{ML_DEBUG, 0, 0, 0},
 	{ML_HELP, 0, 0, 0},
 	{NULL, 0, 0, 0}};
@@ -212,6 +237,8 @@  ml_opts_parse_long(int opt_idx, struct ml_options *opt)
 		{ML_FILELIST, ml_parse_filelist},
 		{ML_REPETITIONS, ml_parse_repetitions},
 		{ML_BURST_SIZE, ml_parse_burst_size},
+		{ML_QUEUE_PAIRS, ml_parse_queue_pairs},
+		{ML_QUEUE_SIZE, ml_parse_queue_size},
 	};
 
 	for (i = 0; i < RTE_DIM(parsermap); i++) {
diff --git a/app/test-mldev/ml_options.h b/app/test-mldev/ml_options.h
index 00342d8a0c..c4018ee9d1 100644
--- a/app/test-mldev/ml_options.h
+++ b/app/test-mldev/ml_options.h
@@ -19,6 +19,8 @@ 
 #define ML_FILELIST    ("filelist")
 #define ML_REPETITIONS ("repetitions")
 #define ML_BURST_SIZE  ("burst_size")
+#define ML_QUEUE_PAIRS ("queue_pairs")
+#define ML_QUEUE_SIZE  ("queue_size")
 #define ML_DEBUG       ("debug")
 #define ML_HELP	       ("help")
 
@@ -36,6 +38,8 @@  struct ml_options {
 	uint8_t nb_filelist;
 	uint64_t repetitions;
 	uint16_t burst_size;
+	uint16_t queue_pairs;
+	uint16_t queue_size;
 	bool debug;
 };
 
diff --git a/app/test-mldev/test_common.c b/app/test-mldev/test_common.c
index 8c4da4609a..016b31c6ba 100644
--- a/app/test-mldev/test_common.c
+++ b/app/test-mldev/test_common.c
@@ -75,7 +75,7 @@  ml_test_device_configure(struct ml_test *test, struct ml_options *opt)
 	/* configure device */
 	dev_config.socket_id = opt->socket_id;
 	dev_config.nb_models = t->dev_info.max_models;
-	dev_config.nb_queue_pairs = t->dev_info.max_queue_pairs;
+	dev_config.nb_queue_pairs = opt->queue_pairs;
 	ret = rte_ml_dev_configure(opt->dev_id, &dev_config);
 	if (ret != 0) {
 		ml_err("Failed to configure ml device, dev_id = %d\n", opt->dev_id);
diff --git a/app/test-mldev/test_inference_common.c b/app/test-mldev/test_inference_common.c
index 35323306de..b4ad3c4b72 100644
--- a/app/test-mldev/test_inference_common.c
+++ b/app/test-mldev/test_inference_common.c
@@ -66,7 +66,7 @@  ml_enqueue_single(void *arg)
 	req->fid = fid;
 
 enqueue_req:
-	burst_enq = rte_ml_enqueue_burst(t->cmn.opt->dev_id, 0, &op, 1);
+	burst_enq = rte_ml_enqueue_burst(t->cmn.opt->dev_id, args->qp_id, &op, 1);
 	if (burst_enq == 0)
 		goto enqueue_req;
 
@@ -103,7 +103,7 @@  ml_dequeue_single(void *arg)
 		return 0;
 
 dequeue_req:
-	burst_deq = rte_ml_dequeue_burst(t->cmn.opt->dev_id, 0, &op, 1);
+	burst_deq = rte_ml_dequeue_burst(t->cmn.opt->dev_id, args->qp_id, &op, 1);
 
 	if (likely(burst_deq == 1)) {
 		total_deq += burst_deq;
@@ -183,7 +183,8 @@  ml_enqueue_burst(void *arg)
 	pending = ops_count;
 
 enqueue_reqs:
-	burst_enq = rte_ml_enqueue_burst(t->cmn.opt->dev_id, 0, &args->enq_ops[idx], pending);
+	burst_enq =
+		rte_ml_enqueue_burst(t->cmn.opt->dev_id, args->qp_id, &args->enq_ops[idx], pending);
 	pending = pending - burst_enq;
 
 	if (pending > 0) {
@@ -224,8 +225,8 @@  ml_dequeue_burst(void *arg)
 		return 0;
 
 dequeue_burst:
-	burst_deq =
-		rte_ml_dequeue_burst(t->cmn.opt->dev_id, 0, args->deq_ops, t->cmn.opt->burst_size);
+	burst_deq = rte_ml_dequeue_burst(t->cmn.opt->dev_id, args->qp_id, args->deq_ops,
+					 t->cmn.opt->burst_size);
 
 	if (likely(burst_deq > 0)) {
 		total_deq += burst_deq;
@@ -259,6 +260,19 @@  test_inference_cap_check(struct ml_options *opt)
 		return false;
 
 	rte_ml_dev_info_get(opt->dev_id, &dev_info);
+
+	if (opt->queue_pairs > dev_info.max_queue_pairs) {
+		ml_err("Insufficient capabilities: queue_pairs = %u, max_queue_pairs = %u",
+		       opt->queue_pairs, dev_info.max_queue_pairs);
+		return false;
+	}
+
+	if (opt->queue_size > dev_info.max_desc) {
+		ml_err("Insufficient capabilities: queue_size = %u, max_desc = %u", opt->queue_size,
+		       dev_info.max_desc);
+		return false;
+	}
+
 	if (opt->nb_filelist > dev_info.max_models) {
 		ml_err("Insufficient capabilities:  Filelist count exceeded device limit, count = %u (max limit = %u)",
 		       opt->nb_filelist, dev_info.max_models);
@@ -310,10 +324,21 @@  test_inference_opt_check(struct ml_options *opt)
 		return -EINVAL;
 	}
 
+	if (opt->queue_pairs == 0) {
+		ml_err("Invalid option, queue_pairs = %u\n", opt->queue_pairs);
+		return -EINVAL;
+	}
+
+	if (opt->queue_size == 0) {
+		ml_err("Invalid option, queue_size = %u\n", opt->queue_size);
+		return -EINVAL;
+	}
+
 	/* check number of available lcores. */
-	if (rte_lcore_count() < 3) {
+	if (rte_lcore_count() < (uint32_t)(opt->queue_pairs * 2 + 1)) {
 		ml_err("Insufficient lcores = %u\n", rte_lcore_count());
-		ml_err("Minimum lcores required to create %u queue-pairs = %u\n", 1, 3);
+		ml_err("Minimum lcores required to create %u queue-pairs = %u\n", opt->queue_pairs,
+		       (opt->queue_pairs * 2 + 1));
 		return -EINVAL;
 	}
 
@@ -331,6 +356,8 @@  test_inference_opt_dump(struct ml_options *opt)
 	/* dump test opts */
 	ml_dump("repetitions", "%" PRIu64, opt->repetitions);
 	ml_dump("burst_size", "%u", opt->burst_size);
+	ml_dump("queue_pairs", "%u", opt->queue_pairs);
+	ml_dump("queue_size", "%u", opt->queue_size);
 
 	ml_dump_begin("filelist");
 	for (i = 0; i < opt->nb_filelist; i++) {
@@ -422,23 +449,31 @@  ml_inference_mldev_setup(struct ml_test *test, struct ml_options *opt)
 {
 	struct rte_ml_dev_qp_conf qp_conf;
 	struct test_inference *t;
+	uint16_t qp_id;
 	int ret;
 
 	t = ml_test_priv(test);
 
+	RTE_SET_USED(t);
+
 	ret = ml_test_device_configure(test, opt);
 	if (ret != 0)
 		return ret;
 
 	/* setup queue pairs */
-	qp_conf.nb_desc = t->cmn.dev_info.max_desc;
+	qp_conf.nb_desc = opt->queue_size;
 	qp_conf.cb = NULL;
 
-	ret = rte_ml_dev_queue_pair_setup(opt->dev_id, 0, &qp_conf, opt->socket_id);
-	if (ret != 0) {
-		ml_err("Failed to setup ml device queue-pair, dev_id = %d, qp_id = %u\n",
-		       opt->dev_id, 0);
-		goto error;
+	for (qp_id = 0; qp_id < opt->queue_pairs; qp_id++) {
+		qp_conf.nb_desc = opt->queue_size;
+		qp_conf.cb = NULL;
+
+		ret = rte_ml_dev_queue_pair_setup(opt->dev_id, qp_id, &qp_conf, opt->socket_id);
+		if (ret != 0) {
+			ml_err("Failed to setup ml device queue-pair, dev_id = %d, qp_id = %u\n",
+			       opt->dev_id, qp_id);
+			return ret;
+		}
 	}
 
 	ret = ml_test_device_start(test, opt);
@@ -700,14 +735,28 @@  ml_inference_launch_cores(struct ml_test *test, struct ml_options *opt, uint16_t
 {
 	struct test_inference *t = ml_test_priv(test);
 	uint32_t lcore_id;
+	uint32_t nb_reqs;
 	uint32_t id = 0;
+	uint32_t qp_id;
+
+	nb_reqs = opt->repetitions / opt->queue_pairs;
 
 	RTE_LCORE_FOREACH_WORKER(lcore_id)
 	{
-		if (id == 2)
+		if (id >= opt->queue_pairs * 2)
 			break;
 
-		t->args[lcore_id].nb_reqs = opt->repetitions;
+		qp_id = id / 2;
+		t->args[lcore_id].qp_id = qp_id;
+		t->args[lcore_id].nb_reqs = nb_reqs;
+		if (qp_id == 0)
+			t->args[lcore_id].nb_reqs += opt->repetitions - nb_reqs * opt->queue_pairs;
+
+		if (t->args[lcore_id].nb_reqs == 0) {
+			id++;
+			break;
+		}
+
 		t->args[lcore_id].start_fid = start_fid;
 		t->args[lcore_id].end_fid = end_fid;
 
diff --git a/app/test-mldev/test_inference_common.h b/app/test-mldev/test_inference_common.h
index da800f2bd4..81d9b07d41 100644
--- a/app/test-mldev/test_inference_common.h
+++ b/app/test-mldev/test_inference_common.h
@@ -22,6 +22,7 @@  struct ml_core_args {
 	uint64_t nb_reqs;
 	uint16_t start_fid;
 	uint16_t end_fid;
+	uint32_t qp_id;
 
 	struct rte_ml_op **enq_ops;
 	struct rte_ml_op **deq_ops;
diff --git a/doc/guides/tools/testmldev.rst b/doc/guides/tools/testmldev.rst
index eb9081723b..17ee2cf7fb 100644
--- a/doc/guides/tools/testmldev.rst
+++ b/doc/guides/tools/testmldev.rst
@@ -95,6 +95,17 @@  The following are the command-line options supported by the test application.
 
         Set the burst size to be used when enqueuing / dequeuing inferences. Default value is `1`.
 
+* ``--queue_pairs <n>``
+
+        Set the number of queue-pairs to be used for inference enqueue and dequeue operations.
+        Default value is `1`.
+
+* ``--queue_size <n>``
+
+        Set the size of queue-pair to be created for inference enqueue / dequeue operations.
+        Queue size would translate into `rte_ml_dev_qp_conf::nb_desc` field during queue-pair
+        creation. Default value is `1`.
+
 * ``--debug``
 
         Enable the tests to run in debug mode.
@@ -120,12 +131,17 @@  Supported command line options for the `device_ops` test are following::
         --test
         --dev_id
         --socket_id
+        --queue_pairs
+        --queue_size
 
 
 DEVICE_OPS Test
 ~~~~~~~~~~~~~~~
 
-Device ops test validates the device configuration and reconfiguration.
+Device ops test validates the device configuration and reconfiguration support. The test configures
+ML device based on the option ``--queue_pairs`` and ``--queue_size`` specified by the user, and
+later reconfigures the ML device with the number of queue pairs and queue size based the maximum
+specified through the device info.
 
 
 Example
@@ -139,6 +155,14 @@  Command to run device_ops test:
         --test=device_ops
 
 
+Command to run device_ops test with user options:
+
+.. code-block:: console
+
+    sudo <build_dir>/app/dpdk-test-mldev -c 0xf -a <PCI_ID> -- \
+        --test=device_ops --queue_pairs <M> --queue_size <N>
+
+
 ML Model Tests
 -------------------------
 
@@ -241,6 +265,8 @@  Supported command line options for inference tests are following::
         --filelist
         --repetitions
         --burst_size
+        --queue_pairs
+        --queue_size
 
 
 List of files to be used for the inference tests can be specified through the option
@@ -252,6 +278,9 @@  the test, one entry per model. Maximum number of file entries supported by the t
 When ``--burst_size <num>`` option is specified for the test, enqueue and dequeue burst would
 try to enqueue or dequeue ``num`` number of inferences per each call respectively.
 
+In the inference test, a pair of lcores are mapped to each queue pair. Minimum number of lcores
+required for the tests is equal to ``(queue_pairs * 2 + 1)``.
+
 .. Note::
 
     * The ``--filelist <file_list>`` is a mandatory option for running inference tests.
@@ -296,6 +325,14 @@  Example command to run inference_ordered test with a specific burst size:
         --test=inference_ordered --filelist model.bin,input.bin,output.bin \
         --burst_size 12
 
+Example command to run inference_ordered test with multiple queue-pairs and queue size:
+
+.. code-block:: console
+
+    sudo <build_dir>/app/dpdk-test-mldev -c 0xf -a <PCI_ID> -- \
+        --test=inference_ordered --filelist model.bin,input.bin,output.bin \
+        --queue_pairs 4 --queue_size 16
+
 
 INFERENCE_INTERLEAVE Test
 ~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -336,13 +373,14 @@  Example command to run inference_interleave test with multiple models:
         --test=inference_interleave --filelist model_A.bin,input_A.bin,output_A.bin \
         --filelist model_B.bin,input_B.bin,output_B.bin
 
-Example command to run inference_interleave test with a specific burst size:
+Example command to run inference_interleave test with a specific burst size, multiple queue-pairs
+and queue size:
 
 .. code-block:: console
 
     sudo <build_dir>/app/dpdk-test-mldev -c 0xf -a <PCI_ID> -- \
         --test=inference_interleave --filelist model.bin,input.bin,output.bin \
-        --burst_size 16
+        --queue_pairs 8 --queue_size 12 --burst_size 16
 
 
 Debug mode