@@ -2,6 +2,7 @@
* Copyright (c) 2022 Marvell.
*/
+#include <rte_errno.h>
#include <rte_log.h>
#include <rte_mldev.h>
#include <rte_mldev_pmd.h>
@@ -107,6 +108,9 @@ rte_ml_dev_pmd_allocate(const char *name, uint8_t socket_id)
ml_dev_globals.nb_devs++;
}
+ dev->enqueue_burst = NULL;
+ dev->dequeue_burst = NULL;
+
return dev;
}
@@ -628,4 +632,76 @@ rte_ml_op_pool_free(struct rte_mempool *mempool)
rte_mempool_free(mempool);
}
+uint16_t
+rte_ml_enqueue_burst(int16_t dev_id, uint16_t qp_id, struct rte_ml_op **ops, uint16_t nb_ops)
+{
+ struct rte_ml_dev *dev;
+
+#ifdef RTE_LIBRTE_ML_DEV_DEBUG
+ if (!rte_ml_dev_is_valid_dev(dev_id)) {
+ RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ dev = rte_ml_dev_pmd_get_dev(dev_id);
+ if (*dev->enqueue_burst == NULL) {
+ rte_errno = -ENOTSUP;
+ return 0;
+ }
+
+ if (ops == NULL) {
+ RTE_MLDEV_LOG(ERR, "Dev %d, ops cannot be NULL\n", dev_id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ if (qp_id >= dev->data->nb_queue_pairs) {
+ RTE_MLDEV_LOG(ERR, "Invalid qp_id %u\n", qp_id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+#else
+ dev = rte_ml_dev_pmd_get_dev(dev_id);
+#endif
+
+ return (*dev->enqueue_burst)(dev, qp_id, ops, nb_ops);
+}
+
+uint16_t
+rte_ml_dequeue_burst(int16_t dev_id, uint16_t qp_id, struct rte_ml_op **ops, uint16_t nb_ops)
+{
+ struct rte_ml_dev *dev;
+
+#ifdef RTE_LIBRTE_ML_DEV_DEBUG
+ if (!rte_ml_dev_is_valid_dev(dev_id)) {
+ RTE_MLDEV_LOG(ERR, "Invalid dev_id = %d\n", dev_id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ dev = rte_ml_dev_pmd_get_dev(dev_id);
+ if (*dev->dequeue_burst == NULL) {
+ rte_errno = -ENOTSUP;
+ return 0;
+ }
+
+ if (ops == NULL) {
+ RTE_MLDEV_LOG(ERR, "Dev %d, ops cannot be NULL\n", dev_id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ if (qp_id >= dev->data->nb_queue_pairs) {
+ RTE_MLDEV_LOG(ERR, "Invalid qp_id %u\n", qp_id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+#else
+ dev = rte_ml_dev_pmd_get_dev(dev_id);
+#endif
+
+ return (*dev->dequeue_burst)(dev, qp_id, ops, nb_ops);
+}
+
RTE_LOG_REGISTER_DEFAULT(rte_ml_dev_logtype, INFO);
@@ -33,6 +33,46 @@ extern "C" {
struct rte_ml_dev;
+/**
+ * @internal
+ *
+ * Enqueue a burst of inference requests to a queue on ML device.
+ *
+ * @param dev
+ * ML device pointer.
+ * @param qp_id
+ * Queue-pair ID.
+ * @param ops
+ * Array of ML ops to be enqueued.
+ * @param nb_ops
+ * Number of ops to enqueue.
+ *
+ * @return
+ * - Number of ops enqueued.
+ */
+typedef uint16_t (*mldev_enqueue_t)(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op **ops,
+ uint16_t nb_ops);
+
+/**
+ * @internal
+ *
+ * Dequeue a burst of inference requests from a queue on ML device.
+ *
+ * @param dev
+ * ML device pointer.
+ * @param qp_id
+ * Queue-pair ID.
+ * @param ops
+ * Array of ML ops to dequeued.
+ * @param nb_ops
+ * Number of ops to dequeue.
+ *
+ * @return
+ * - Number of ops dequeued.
+ */
+typedef uint16_t (*mldev_dequeue_t)(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op **ops,
+ uint16_t nb_ops);
+
/**
* Definitions of all functions exported by a driver through the generic structure of type
* *ml_dev_ops* supplied in the *rte_ml_dev* structure associated with a device.
@@ -451,6 +491,12 @@ struct rte_ml_dev_data {
* The data structure associated with each ML device.
*/
struct rte_ml_dev {
+ /** Pointer to PMD enqueue function. */
+ mldev_enqueue_t enqueue_burst;
+
+ /** Pointer to PMD dequeue function. */
+ mldev_dequeue_t dequeue_burst;
+
/** Pointer to device data. */
struct rte_ml_dev_data *data;
@@ -40,6 +40,8 @@ struct rte_ml_dev_pmd_init_params {
uint64_t private_data_size;
};
+struct rte_ml_dev;
+
/**
* @internal
*
@@ -1,6 +1,7 @@
EXPERIMENTAL {
global:
+ rte_ml_dequeue_burst;
rte_ml_dev_close;
rte_ml_dev_configure;
rte_ml_dev_count;
@@ -11,6 +12,7 @@ EXPERIMENTAL {
rte_ml_dev_socket_id;
rte_ml_dev_start;
rte_ml_dev_stop;
+ rte_ml_enqueue_burst;
rte_ml_io_dequantize;
rte_ml_io_input_size_get;
rte_ml_io_output_size_get;