[v2,06/12] app/mldev: add test case to interleave inferences

Message ID 20221129082109.6809-6-syalavarthi@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series [v2,01/12] app/mldev: implement test framework for mldev |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Srikanth Yalavarthi Nov. 29, 2022, 8:21 a.m. UTC
  Added test case to interleave inference requests from multiple
models. Interleaving would load and start all models and launch
inference requests for the models using available queue-pairs

Operations sequence when testing with N models and R reps,

(load + start) x N -> (enqueue + dequeue) x N x R ...
	-> (stop + unload) x N

Test can be executed by selecting "inference_interleave" test.

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
---
 app/test-mldev/meson.build                 |   1 +
 app/test-mldev/ml_options.c                |   3 +-
 app/test-mldev/test_inference_common.c     |  12 +--
 app/test-mldev/test_inference_common.h     |   4 +-
 app/test-mldev/test_inference_interleave.c | 118 +++++++++++++++++++++
 5 files changed, 129 insertions(+), 9 deletions(-)
 create mode 100644 app/test-mldev/test_inference_interleave.c
  

Patch

diff --git a/app/test-mldev/meson.build b/app/test-mldev/meson.build
index 475d76d126..41d22fb22c 100644
--- a/app/test-mldev/meson.build
+++ b/app/test-mldev/meson.build
@@ -18,6 +18,7 @@  sources = files(
         'test_model_ops.c',
         'test_inference_common.c',
         'test_inference_ordered.c',
+        'test_inference_interleave.c',
 )
 
 deps += ['mldev']
diff --git a/app/test-mldev/ml_options.c b/app/test-mldev/ml_options.c
index 10dad18fff..01ea050ee7 100644
--- a/app/test-mldev/ml_options.c
+++ b/app/test-mldev/ml_options.c
@@ -162,7 +162,8 @@  ml_dump_test_options(const char *testname)
 		printf("\n");
 	}
 
-	if (strcmp(testname, "inference_ordered") == 0) {
+	if ((strcmp(testname, "inference_ordered") == 0) ||
+	    (strcmp(testname, "inference_interleave") == 0)) {
 		printf("\t\t--filelist         : comma separated list of model, input and output\n"
 		       "\t\t--repetitions      : number of inference repetitions\n");
 		printf("\n");
diff --git a/app/test-mldev/test_inference_common.c b/app/test-mldev/test_inference_common.c
index 8b5dc89346..f0b15861a0 100644
--- a/app/test-mldev/test_inference_common.c
+++ b/app/test-mldev/test_inference_common.c
@@ -115,7 +115,7 @@  ml_dequeue_single(void *arg)
 		total_deq += burst_deq;
 		if (unlikely(op->status == RTE_ML_OP_STATUS_ERROR)) {
 			rte_ml_op_error_get(t->cmn.opt->dev_id, op, &error);
-			ml_err("error_code = 0x%016lx, error_message = %s\n", error.errcode,
+			ml_err("error_code = 0x%" PRIx64 ", error_message = %s\n", error.errcode,
 			       error.message);
 		}
 		req = (struct ml_request *)op->user_ptr;
@@ -334,10 +334,10 @@  ml_request_initialize(struct rte_mempool *mp, void *opaque, void *obj, unsigned
 	RTE_SET_USED(mp);
 	RTE_SET_USED(obj_idx);
 
-	req->input = RTE_PTR_ADD(
-		obj, RTE_ALIGN_CEIL(sizeof(struct ml_request), t->cmn.dev_info.min_align_size));
-	req->output = RTE_PTR_ADD(req->input, RTE_ALIGN_CEIL(t->model[t->fid].inp_qsize,
-							     t->cmn.dev_info.min_align_size));
+	req->input = (uint8_t *)obj +
+		     RTE_ALIGN_CEIL(sizeof(struct ml_request), t->cmn.dev_info.min_align_size);
+	req->output = req->input +
+		      RTE_ALIGN_CEIL(t->model[t->fid].inp_qsize, t->cmn.dev_info.min_align_size);
 	req->niters = 0;
 
 	/* quantize data */
@@ -387,7 +387,7 @@  ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, int16_t f
 	}
 
 	t->model[fid].input = mz->addr;
-	t->model[fid].output = RTE_PTR_ADD(t->model[fid].input, t->model[fid].inp_dsize);
+	t->model[fid].output = t->model[fid].input + t->model[fid].inp_dsize;
 
 	/* load input file */
 	fp = fopen(opt->filelist[fid].input, "r");
diff --git a/app/test-mldev/test_inference_common.h b/app/test-mldev/test_inference_common.h
index 91007954b4..b058abada4 100644
--- a/app/test-mldev/test_inference_common.h
+++ b/app/test-mldev/test_inference_common.h
@@ -17,8 +17,8 @@ 
 #include "test_model_common.h"
 
 struct ml_request {
-	void *input;
-	void *output;
+	uint8_t *input;
+	uint8_t *output;
 	int16_t fid;
 	uint64_t niters;
 };
diff --git a/app/test-mldev/test_inference_interleave.c b/app/test-mldev/test_inference_interleave.c
new file mode 100644
index 0000000000..74ad0c597f
--- /dev/null
+++ b/app/test-mldev/test_inference_interleave.c
@@ -0,0 +1,118 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Marvell.
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_launch.h>
+
+#include "ml_common.h"
+#include "ml_test.h"
+#include "test_inference_common.h"
+#include "test_model_common.h"
+
+static int
+test_inference_interleave_driver(struct ml_test *test, struct ml_options *opt)
+{
+	struct test_inference *t;
+	int16_t fid = 0;
+	int ret = 0;
+
+	t = ml_test_priv(test);
+
+	ret = ml_inference_mldev_setup(test, opt);
+	if (ret != 0)
+		return ret;
+
+	ret = ml_inference_mem_setup(test, opt);
+	if (ret != 0)
+		return ret;
+
+	/* load and start all models */
+	for (fid = 0; fid < opt->nb_filelist; fid++) {
+		ret = ml_model_load(test, opt, &t->model[fid], fid);
+		if (ret != 0)
+			goto error;
+
+		ret = ml_model_start(test, opt, &t->model[fid], fid);
+		if (ret != 0)
+			goto error;
+
+		ret = ml_inference_iomem_setup(test, opt, fid);
+		if (ret != 0)
+			goto error;
+	}
+
+	/* launch inference requests */
+	ret = ml_inference_launch_cores(test, opt, 0, opt->nb_filelist - 1);
+	if (ret != 0) {
+		ml_err("failed to launch cores");
+		goto error;
+	}
+
+	rte_eal_mp_wait_lcore();
+
+	/* stop and unload all models */
+	for (fid = 0; fid < opt->nb_filelist; fid++) {
+		ret = ml_inference_result(test, opt, fid);
+		if (ret != ML_TEST_SUCCESS)
+			goto error;
+
+		ml_inference_iomem_destroy(test, opt, fid);
+
+		ret = ml_model_stop(test, opt, &t->model[fid], fid);
+		if (ret != 0)
+			goto error;
+
+		ret = ml_model_unload(test, opt, &t->model[fid], fid);
+		if (ret != 0)
+			goto error;
+	}
+
+	ml_inference_mem_destroy(test, opt);
+
+	ret = ml_inference_mldev_destroy(test, opt);
+	if (ret != 0)
+		return ret;
+
+	t->cmn.result = ML_TEST_SUCCESS;
+
+	return 0;
+
+error:
+	ml_inference_mem_destroy(test, opt);
+	for (fid = 0; fid < opt->nb_filelist; fid++) {
+		ml_inference_iomem_destroy(test, opt, fid);
+		ml_model_stop(test, opt, &t->model[fid], fid);
+		ml_model_unload(test, opt, &t->model[fid], fid);
+	}
+
+	t->cmn.result = ML_TEST_FAILED;
+
+	return ret;
+}
+
+static int
+test_inference_interleave_result(struct ml_test *test, struct ml_options *opt)
+{
+	struct test_inference *t;
+
+	RTE_SET_USED(opt);
+
+	t = ml_test_priv(test);
+
+	return t->cmn.result;
+}
+
+static const struct ml_test_ops inference_interleave = {
+	.cap_check = test_inference_cap_check,
+	.opt_check = test_inference_opt_check,
+	.opt_dump = test_inference_opt_dump,
+	.test_setup = test_inference_setup,
+	.test_destroy = test_inference_destroy,
+	.test_driver = test_inference_interleave_driver,
+	.test_result = test_inference_interleave_result,
+};
+
+ML_TEST_REGISTER(inference_interleave);