@@ -921,6 +921,22 @@ dummy_burst_capacity(__rte_unused const void *dev_private,
return 0;
}
+static uint16_t
+dummy_enqueue(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+ __rte_unused struct rte_dma_op **ops, __rte_unused uint16_t nb_ops)
+{
+ RTE_DMA_LOG(ERR, "Enqueue not configured or not supported.");
+ return 0;
+}
+
+static uint16_t
+dummy_dequeue(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+ __rte_unused struct rte_dma_op **ops, __rte_unused uint16_t nb_ops)
+{
+ RTE_DMA_LOG(ERR, "Enqueue not configured or not supported.");
+ return 0;
+}
+
static void
dma_fp_object_dummy(struct rte_dma_fp_object *obj)
{
@@ -932,6 +948,8 @@ dma_fp_object_dummy(struct rte_dma_fp_object *obj)
obj->completed = dummy_completed;
obj->completed_status = dummy_completed_status;
obj->burst_capacity = dummy_burst_capacity;
+ obj->enqueue = dummy_enqueue;
+ obj->dequeue = dummy_dequeue;
}
static int
@@ -265,6 +265,11 @@ int16_t rte_dma_next_dev(int16_t start_dev_id);
* known from 'nb_priorities' field in struct rte_dma_info.
*/
#define RTE_DMA_CAPA_PRI_POLICY_SP RTE_BIT64(8)
+/** Support enqueue and dequeue operations.
+ *
+ * @see struct rte_dma_op
+ */
+#define RTE_DMA_CAPA_OPS_ENQ_DEQ RTE_BIT64(9)
/** Support copy operation.
* This capability start with index of 32, so that it could leave gap between
@@ -351,6 +356,15 @@ struct rte_dma_conf {
* Lowest value indicates higher priority and vice-versa.
*/
uint16_t priority;
+ /** Indicates whether to use enqueue dequeue operations using rte_dma_op.
+ * false-default mode, true-enqueue, dequeue mode.
+ * This value can be set to true only when ENQ_DEQ_OPS capability is
+ * supported. When enabled, only calls to `rte_dma_enqueue_ops` and
+ * `rte_dma_dequeue_ops` are valid.
+ *
+ * @see RTE_DMA_CAPA_OPS_ENQ_DEQ
+ */
+ bool enable_enq_deq;
};
/**
@@ -791,6 +805,63 @@ struct rte_dma_sge {
uint32_t length; /**< The DMA operation length. */
};
+/**
+ * A structure used to hold event based DMA operation entry. All the information
+ * required for a DMA transfer shall be populated in "struct rte_dma_op"
+ * instance.
+ */
+struct rte_dma_op {
+ uint64_t flags;
+ /**< Flags related to the operation.
+ * @see RTE_DMA_OP_FLAG_*
+ */
+ struct rte_mempool *op_mp;
+ /**< Mempool from which op is allocated. */
+ enum rte_dma_status_code status;
+ /**< Status code for this operation. */
+ uint32_t rsvd;
+ /**< Reserved for future use. */
+ uint64_t impl_opaque[2];
+ /**< Implementation-specific opaque data.
+ * An dma device implementation use this field to hold
+ * implementation specific values to share between dequeue and enqueue
+ * operations.
+ * The application should not modify this field.
+ */
+ uint64_t user_meta;
+ /**< Memory to store user specific metadata.
+ * The dma device implementation should not modify this area.
+ */
+ uint64_t event_meta;
+ /**< Event metadata of DMA completion event.
+ * Used when RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND is not
+ * supported in OP_NEW mode.
+ * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_NEW
+ * @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND
+ *
+ * Used when RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD is not
+ * supported in OP_FWD mode.
+ * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
+ * @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD
+ *
+ * @see struct rte_event::event
+ */
+ int16_t dma_dev_id;
+ /**< DMA device ID to be used with OP_FORWARD mode.
+ * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
+ */
+ uint16_t vchan;
+ /**< DMA vchan ID to be used with OP_FORWARD mode
+ * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
+ */
+ uint16_t nb_src;
+ /**< Number of source segments. */
+ uint16_t nb_dst;
+ /**< Number of destination segments. */
+ struct rte_dma_sge src_dst_seg[0];
+ /**< Source and destination segments. */
+};
+
#ifdef __cplusplus
}
#endif
@@ -1154,6 +1225,80 @@ rte_dma_burst_capacity(int16_t dev_id, uint16_t vchan)
return ret;
}
+/**
+ * Enqueue rte_dma_ops to DMA device, can only be used underlying supports
+ * RTE_DMA_CAPA_OPS_ENQ_DEQ and rte_dma_conf::enable_enq_deq is enabled in
+ * rte_dma_configure()
+ * The ops enqueued will be immediately submitted to the DMA device.
+ * The enqueue should be coupled with dequeue to retrieve completed ops, calls
+ * to rte_dma_submit(), rte_dma_completed() and rte_dma_completed_status()
+ * are not valid.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param vchan
+ * The identifier of virtual DMA channel.
+ * @param ops
+ * Pointer to rte_dma_op array.
+ * @param nb_ops
+ * Number of rte_dma_op in the ops array
+ * @return uint16_t
+ * - Number of successfully submitted ops.
+ */
+static inline uint16_t
+rte_dma_enqueue_ops(int16_t dev_id, uint16_t vchan, struct rte_dma_op **ops, uint16_t nb_ops)
+{
+ struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+ uint16_t ret;
+
+#ifdef RTE_DMADEV_DEBUG
+ if (!rte_dma_is_valid(dev_id))
+ return 0;
+ if (*obj->enqueue == NULL)
+ return 0;
+#endif
+
+ ret = (*obj->enqueue)(obj->dev_private, vchan, ops, nb_ops);
+ rte_dma_trace_enqueue_ops(dev_id, vchan, (void **)ops, nb_ops);
+
+ return ret;
+}
+
+/**
+ * Dequeue completed rte_dma_ops submitted to the DMA device, can only be used
+ * underlying supports RTE_DMA_CAPA_OPS_ENQ_DEQ and rte_dma_conf::enable_enq_deq
+ * is enabled in rte_dma_configure()
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param vchan
+ * The identifier of virtual DMA channel.
+ * @param ops
+ * Pointer to rte_dma_op array.
+ * @param nb_ops
+ * Size of rte_dma_op array.
+ * @return
+ * - Number of successfully completed ops. Should be less or equal to nb_ops.
+ */
+static inline uint16_t
+rte_dma_dequeue_ops(int16_t dev_id, uint16_t vchan, struct rte_dma_op **ops, uint16_t nb_ops)
+{
+ struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+ uint16_t ret;
+
+#ifdef RTE_DMADEV_DEBUG
+ if (!rte_dma_is_valid(dev_id))
+ return 0;
+ if (*obj->dequeue == NULL)
+ return 0;
+#endif
+
+ ret = (*obj->dequeue)(obj->dev_private, vchan, ops, nb_ops);
+ rte_dma_trace_dequeue_ops(dev_id, vchan, (void **)ops, nb_ops);
+
+ return ret;
+}
+
#ifdef __cplusplus
}
#endif
@@ -50,6 +50,14 @@ typedef uint16_t (*rte_dma_completed_status_t)(void *dev_private,
/** @internal Used to check the remaining space in descriptor ring. */
typedef uint16_t (*rte_dma_burst_capacity_t)(const void *dev_private, uint16_t vchan);
+/** @internal Used to enqueue a rte_dma_op to the dma engine. */
+typedef uint16_t (*rte_dma_enqueue_ops_t)(void *dev_private, uint16_t vchan,
+ struct rte_dma_op **ops, uint16_t nb_ops);
+
+/** @internal Used to dequeue rte_dma_op from the dma engine. */
+typedef uint16_t (*rte_dma_dequeue_ops_t)(void *dev_private, uint16_t vchan,
+ struct rte_dma_op **ops, uint16_t nb_ops);
+
/**
* @internal
* Fast-path dmadev functions and related data are hold in a flat array.
@@ -73,6 +81,8 @@ struct __rte_cache_aligned rte_dma_fp_object {
rte_dma_completed_t completed;
rte_dma_completed_status_t completed_status;
rte_dma_burst_capacity_t burst_capacity;
+ rte_dma_enqueue_ops_t enqueue;
+ rte_dma_dequeue_ops_t dequeue;
};
extern struct rte_dma_fp_object *rte_dma_fp_objs;
@@ -143,6 +143,26 @@ RTE_TRACE_POINT_FP(
rte_trace_point_emit_u16(ret);
)
+RTE_TRACE_POINT_FP(
+ rte_dma_trace_enqueue_ops,
+ RTE_TRACE_POINT_ARGS(int16_t dev_id, uint16_t vchan, void **ops,
+ uint16_t nb_ops),
+ rte_trace_point_emit_i16(dev_id);
+ rte_trace_point_emit_u16(vchan);
+ rte_trace_point_emit_ptr(ops);
+ rte_trace_point_emit_u16(nb_ops);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_dma_trace_dequeue_ops,
+ RTE_TRACE_POINT_ARGS(int16_t dev_id, uint16_t vchan, void **ops,
+ uint16_t nb_ops),
+ rte_trace_point_emit_i16(dev_id);
+ rte_trace_point_emit_u16(vchan);
+ rte_trace_point_emit_ptr(ops);
+ rte_trace_point_emit_u16(nb_ops);
+)
+
#ifdef __cplusplus
}
#endif
@@ -56,3 +56,9 @@ RTE_TRACE_POINT_REGISTER(rte_dma_trace_completed_status,
RTE_TRACE_POINT_REGISTER(rte_dma_trace_burst_capacity,
lib.dmadev.burst_capacity)
+
+RTE_TRACE_POINT_REGISTER(rte_dma_trace_enqueue_ops,
+ lib.dmadev.enqueue_ops)
+
+RTE_TRACE_POINT_REGISTER(rte_dma_trace_dequeue_ops,
+ lib.dmadev.dequeue_ops)