[v7,09/11] app/mldev: enable support for inference batches

Message ID 20230316211434.13409-10-syalavarthi@marvell.com (mailing list archive)
State Accepted, archived
Delegated to: Thomas Monjalon
Headers
Series Implementation of mldev test application |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Srikanth Yalavarthi March 16, 2023, 9:14 p.m. UTC
  Enabled support to execute multiple batches of inferences
per each enqueue request. Input and reference for the test
should be appropriately provided for multi-batch run. Number
of batches can be specified through "--batches" option.

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
Acked-by: Anup Prabhu <aprabhu@marvell.com>
---
 app/test-mldev/ml_options.c            | 12 +++++++++++-
 app/test-mldev/ml_options.h            |  2 ++
 app/test-mldev/test_inference_common.c | 22 +++++++++++++---------
 app/test-mldev/test_model_common.c     |  6 ++++++
 app/test-mldev/test_model_common.h     |  1 +
 doc/guides/tools/testmldev.rst         |  6 ++++++
 6 files changed, 39 insertions(+), 10 deletions(-)
  

Patch

diff --git a/app/test-mldev/ml_options.c b/app/test-mldev/ml_options.c
index 901adaed33..44df44991b 100644
--- a/app/test-mldev/ml_options.c
+++ b/app/test-mldev/ml_options.c
@@ -27,6 +27,7 @@  ml_options_default(struct ml_options *opt)
 	opt->burst_size = 1;
 	opt->queue_pairs = 1;
 	opt->queue_size = 1;
+	opt->batches = 0;
 	opt->debug = false;
 }
 
@@ -170,6 +171,12 @@  ml_parse_queue_size(struct ml_options *opt, const char *arg)
 	return parser_read_uint16(&opt->queue_size, arg);
 }
 
+static int
+ml_parse_batches(struct ml_options *opt, const char *arg)
+{
+	return parser_read_uint16(&opt->batches, arg);
+}
+
 static void
 ml_dump_test_options(const char *testname)
 {
@@ -190,7 +197,8 @@  ml_dump_test_options(const char *testname)
 		       "\t\t--repetitions      : number of inference repetitions\n"
 		       "\t\t--burst_size       : inference burst size\n"
 		       "\t\t--queue_pairs      : number of queue pairs to create\n"
-		       "\t\t--queue_size       : size fo queue-pair\n");
+		       "\t\t--queue_size       : size fo queue-pair\n"
+		       "\t\t--batches          : number of batches of input\n");
 		printf("\n");
 	}
 }
@@ -220,6 +228,7 @@  static struct option lgopts[] = {
 	{ML_BURST_SIZE, 1, 0, 0},
 	{ML_QUEUE_PAIRS, 1, 0, 0},
 	{ML_QUEUE_SIZE, 1, 0, 0},
+	{ML_BATCHES, 1, 0, 0},
 	{ML_DEBUG, 0, 0, 0},
 	{ML_HELP, 0, 0, 0},
 	{NULL, 0, 0, 0}};
@@ -239,6 +248,7 @@  ml_opts_parse_long(int opt_idx, struct ml_options *opt)
 		{ML_BURST_SIZE, ml_parse_burst_size},
 		{ML_QUEUE_PAIRS, ml_parse_queue_pairs},
 		{ML_QUEUE_SIZE, ml_parse_queue_size},
+		{ML_BATCHES, ml_parse_batches},
 	};
 
 	for (i = 0; i < RTE_DIM(parsermap); i++) {
diff --git a/app/test-mldev/ml_options.h b/app/test-mldev/ml_options.h
index c4018ee9d1..48fe064150 100644
--- a/app/test-mldev/ml_options.h
+++ b/app/test-mldev/ml_options.h
@@ -21,6 +21,7 @@ 
 #define ML_BURST_SIZE  ("burst_size")
 #define ML_QUEUE_PAIRS ("queue_pairs")
 #define ML_QUEUE_SIZE  ("queue_size")
+#define ML_BATCHES     ("batches")
 #define ML_DEBUG       ("debug")
 #define ML_HELP	       ("help")
 
@@ -40,6 +41,7 @@  struct ml_options {
 	uint16_t burst_size;
 	uint16_t queue_pairs;
 	uint16_t queue_size;
+	uint16_t batches;
 	bool debug;
 };
 
diff --git a/app/test-mldev/test_inference_common.c b/app/test-mldev/test_inference_common.c
index b4ad3c4b72..0f281aed6c 100644
--- a/app/test-mldev/test_inference_common.c
+++ b/app/test-mldev/test_inference_common.c
@@ -50,7 +50,7 @@  ml_enqueue_single(void *arg)
 		goto retry;
 
 	op->model_id = t->model[fid].id;
-	op->nb_batches = t->model[fid].info.batch_size;
+	op->nb_batches = t->model[fid].nb_batches;
 	op->mempool = t->op_pool;
 
 	op->input.addr = req->input;
@@ -163,7 +163,7 @@  ml_enqueue_burst(void *arg)
 
 	for (i = 0; i < ops_count; i++) {
 		args->enq_ops[i]->model_id = t->model[fid].id;
-		args->enq_ops[i]->nb_batches = t->model[fid].info.batch_size;
+		args->enq_ops[i]->nb_batches = t->model[fid].nb_batches;
 		args->enq_ops[i]->mempool = t->op_pool;
 
 		args->enq_ops[i]->input.addr = args->reqs[i]->input;
@@ -359,6 +359,11 @@  test_inference_opt_dump(struct ml_options *opt)
 	ml_dump("queue_pairs", "%u", opt->queue_pairs);
 	ml_dump("queue_size", "%u", opt->queue_size);
 
+	if (opt->batches == 0)
+		ml_dump("batches", "%u (default)", opt->batches);
+	else
+		ml_dump("batches", "%u", opt->batches);
+
 	ml_dump_begin("filelist");
 	for (i = 0; i < opt->nb_filelist; i++) {
 		ml_dump_list("model", i, opt->filelist[i].model);
@@ -528,8 +533,8 @@  ml_request_initialize(struct rte_mempool *mp, void *opaque, void *obj, unsigned
 	req->niters = 0;
 
 	/* quantize data */
-	rte_ml_io_quantize(t->cmn.opt->dev_id, t->model[t->fid].id,
-			   t->model[t->fid].info.batch_size, t->model[t->fid].input, req->input);
+	rte_ml_io_quantize(t->cmn.opt->dev_id, t->model[t->fid].id, t->model[t->fid].nb_batches,
+			   t->model[t->fid].input, req->input);
 }
 
 int
@@ -547,7 +552,7 @@  ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, uint16_t
 	int ret;
 
 	/* get input buffer size */
-	ret = rte_ml_io_input_size_get(opt->dev_id, t->model[fid].id, t->model[fid].info.batch_size,
+	ret = rte_ml_io_input_size_get(opt->dev_id, t->model[fid].id, t->model[fid].nb_batches,
 				       &t->model[fid].inp_qsize, &t->model[fid].inp_dsize);
 	if (ret != 0) {
 		ml_err("Failed to get input size, model : %s\n", opt->filelist[fid].model);
@@ -555,9 +560,8 @@  ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, uint16_t
 	}
 
 	/* get output buffer size */
-	ret = rte_ml_io_output_size_get(opt->dev_id, t->model[fid].id,
-					t->model[fid].info.batch_size, &t->model[fid].out_qsize,
-					&t->model[fid].out_dsize);
+	ret = rte_ml_io_output_size_get(opt->dev_id, t->model[fid].id, t->model[fid].nb_batches,
+					&t->model[fid].out_qsize, &t->model[fid].out_dsize);
 	if (ret != 0) {
 		ml_err("Failed to get input size, model : %s\n", opt->filelist[fid].model);
 		return ret;
@@ -702,7 +706,7 @@  ml_request_finish(struct rte_mempool *mp, void *opaque, void *obj, unsigned int
 		return;
 
 	t->nb_used++;
-	rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id, t->model[req->fid].info.batch_size,
+	rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id, t->model[req->fid].nb_batches,
 			     req->output, model->output);
 }
 
diff --git a/app/test-mldev/test_model_common.c b/app/test-mldev/test_model_common.c
index b94d46154d..c28e452f29 100644
--- a/app/test-mldev/test_model_common.c
+++ b/app/test-mldev/test_model_common.c
@@ -71,6 +71,12 @@  ml_model_load(struct ml_test *test, struct ml_options *opt, struct ml_model *mod
 		return ret;
 	}
 
+	/* Update number of batches */
+	if (opt->batches == 0)
+		model->nb_batches = model->info.batch_size;
+	else
+		model->nb_batches = opt->batches;
+
 	model->state = MODEL_LOADED;
 
 	return 0;
diff --git a/app/test-mldev/test_model_common.h b/app/test-mldev/test_model_common.h
index 5ee975109d..19429ce142 100644
--- a/app/test-mldev/test_model_common.h
+++ b/app/test-mldev/test_model_common.h
@@ -30,6 +30,7 @@  struct ml_model {
 	uint8_t *output;
 
 	struct rte_mempool *io_pool;
+	uint32_t nb_batches;
 };
 
 int ml_model_load(struct ml_test *test, struct ml_options *opt, struct ml_model *model,
diff --git a/doc/guides/tools/testmldev.rst b/doc/guides/tools/testmldev.rst
index 17ee2cf7fb..6632025fdf 100644
--- a/doc/guides/tools/testmldev.rst
+++ b/doc/guides/tools/testmldev.rst
@@ -106,6 +106,11 @@  The following are the command-line options supported by the test application.
         Queue size would translate into `rte_ml_dev_qp_conf::nb_desc` field during queue-pair
         creation. Default value is `1`.
 
+* ``--batches <n>``
+
+        Set the number batches in the input file provided for inference run. When not specified
+        the test would assume the number of batches is equal to the batch size of the model.
+
 * ``--debug``
 
         Enable the tests to run in debug mode.
@@ -267,6 +272,7 @@  Supported command line options for inference tests are following::
         --burst_size
         --queue_pairs
         --queue_size
+        --batches
 
 
 List of files to be used for the inference tests can be specified through the option