diff mbox series

[2/2] eventdev: add function to enq new events to the same queue

Message ID 20220627095702.8047-2-pbhagavatula@marvell.com (mailing list archive)
State New
Delegated to: Jerin Jacob
Headers show
Series [1/2] doc: add enqueue depth for new event type | expand

Checks

Context Check Description
ci/iol-abi-testing warning Testing issues
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-intel-Performance fail Performance Testing issues
ci/github-robot: build fail github build: failed
ci/iol-aarch64-unit-testing success Testing PASS
ci/intel-Testing success Testing PASS
ci/Intel-compilation success Compilation OK
ci/checkpatch success coding style OK

Commit Message

Pavan Nikhilesh Bhagavatula June 27, 2022, 9:57 a.m. UTC
From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Introduce new fastpath function to enqueue events with type *OP_NEW*
to the same destination event queue.
This function can be used as a hint to the PMD to use optimized the
enqueue sequence.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 lib/eventdev/eventdev_pmd.h      |  5 +-
 lib/eventdev/eventdev_private.c  | 13 ++++++
 lib/eventdev/rte_eventdev.h      | 80 +++++++++++++++++++++++++++++++-
 lib/eventdev/rte_eventdev_core.h | 11 ++++-
 4 files changed, 105 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 69402668d8..f0bb97fb89 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -178,7 +178,10 @@  struct rte_eventdev {
 	/**< Pointer to PMD eth Tx adapter enqueue function. */
 	event_crypto_adapter_enqueue_t ca_enqueue;
 
-	uint64_t reserved_64s[4]; /**< Reserved for future fields */
+	event_enqueue_queue_burst_t enqueue_new_same_dest;
+	/**< PMD enqueue burst queue new function to same destination queue. */
+
+	uint64_t reserved_64s[3]; /**< Reserved for future fields */
 	void *reserved_ptrs[3];	  /**< Reserved for future fields */
 } __rte_cache_aligned;
 
diff --git a/lib/eventdev/eventdev_private.c b/lib/eventdev/eventdev_private.c
index 1d3d9d357e..53d1db281b 100644
--- a/lib/eventdev/eventdev_private.c
+++ b/lib/eventdev/eventdev_private.c
@@ -24,6 +24,17 @@  dummy_event_enqueue_burst(__rte_unused void *port,
 	return 0;
 }
 
+static uint16_t
+dummy_event_enqueue_queue_burst(__rte_unused void *port,
+				__rte_unused uint8_t queue,
+				__rte_unused const struct rte_event ev[],
+				__rte_unused uint16_t nb_events)
+{
+	RTE_EDEV_LOG_ERR(
+		"event enqueue burst requested for unconfigured event device");
+	return 0;
+}
+
 static uint16_t
 dummy_event_dequeue(__rte_unused void *port, __rte_unused struct rte_event *ev,
 		    __rte_unused uint64_t timeout_ticks)
@@ -90,6 +101,7 @@  event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
 		.enqueue_burst = dummy_event_enqueue_burst,
 		.enqueue_new_burst = dummy_event_enqueue_burst,
 		.enqueue_forward_burst = dummy_event_enqueue_burst,
+		.enqueue_new_same_dest = dummy_event_enqueue_queue_burst,
 		.dequeue = dummy_event_dequeue,
 		.dequeue_burst = dummy_event_dequeue_burst,
 		.maintain = dummy_event_maintain,
@@ -111,6 +123,7 @@  event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
 	fp_op->enqueue_burst = dev->enqueue_burst;
 	fp_op->enqueue_new_burst = dev->enqueue_new_burst;
 	fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
+	fp_op->enqueue_new_same_dest = dev->enqueue_new_same_dest;
 	fp_op->dequeue = dev->dequeue;
 	fp_op->dequeue_burst = dev->dequeue_burst;
 	fp_op->maintain = dev->maintain;
diff --git a/lib/eventdev/rte_eventdev.h b/lib/eventdev/rte_eventdev.h
index 6a6f6ea4c1..2aa563740b 100644
--- a/lib/eventdev/rte_eventdev.h
+++ b/lib/eventdev/rte_eventdev.h
@@ -425,8 +425,9 @@  struct rte_event_dev_info {
 	 * A device that does not support bulk dequeue will set this as 1.
 	 */
 	uint32_t max_event_port_enqueue_depth;
-	/**< Maximum number of events can be enqueued at a time from an
-	 * event port by this device.
+	/**< Maximum number of events that can be enqueued at a time to a
+	 * event port by this device, applicable for rte_event::op is either
+	 * *RTE_EVENT_OP_FORWARD* or *RTE_EVENT_OP_RELEASE*
 	 * A device that does not support bulk enqueue will set this as 1.
 	 */
 	uint8_t max_event_port_links;
@@ -446,6 +447,12 @@  struct rte_event_dev_info {
 	 * device. These ports and queues are not accounted for in
 	 * max_event_ports or max_event_queues.
 	 */
+	int16_t max_event_port_enqueue_new_burst;
+	/**< Maximum number of events that can be enqueued at a time to a
+	 * event port by this device, applicable when rte_event::op is set to
+	 * *RTE_EVENT_OP_NEW*.
+	 * A device with no limits will set this value to -1.
+	 */
 };
 
 /**
@@ -2082,6 +2089,75 @@  rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
 					 fp_ops->enqueue_forward_burst);
 }
 
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
+ * an event device designated by its *dev_id* through the event port specified
+ * by *port_id* to the same queue specified by *queue_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_NEW* and are destined to the
+ * same queue. This specialized function can provide the additional hint to the
+ * PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_queue_burst() result is undefined if the enqueue
+ * burst has event object of operation type != RTE_EVENT_OP_NEW.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param port_id
+ *   The identifier of the event port.
+ * @param queue_id
+ *   The identifier of the event port.
+ * @param ev
+ *   Points to an array of *nb_events* objects of type *rte_event* structure
+ *   which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ *   The number of event objects to enqueue, typically number of
+ *   rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...)
+ *   available for this port.
+ *
+ * @return
+ *   The number of event objects actually enqueued on the event device. The
+ *   return value can be less than the value of the *nb_events* parameter when
+ *   the event devices queue is full or if invalid parameters are specified in a
+ *   *rte_event*. If the return value is less than *nb_events*, the remaining
+ *   events at the end of ev[] are not consumed and the caller has to take care
+ *   of them, and rte_errno is set accordingly. Possible errno values include:
+ *   - EINVAL   The port ID is invalid, device ID is invalid, an event's queue
+ *              ID is invalid, or an event's sched type doesn't match the
+ *              capabilities of the destination queue.
+ *   - ENOSPC   The event port was backpressured and unable to enqueue
+ *              one or more events. This error code is only applicable to
+ *              closed systems.
+ * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH
+ * @see rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_new_queue_burst(uint8_t dev_id, uint8_t port_id,
+				  uint8_t queue_id, const struct rte_event ev[],
+				  uint16_t nb_events)
+{
+	const struct rte_event_fp_ops *fp_ops;
+	void *port;
+
+	fp_ops = &rte_event_fp_ops[dev_id];
+	port = fp_ops->data[port_id];
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+	if (dev_id >= RTE_EVENT_MAX_DEVS ||
+	    port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	if (port == NULL) {
+		rte_errno = EINVAL;
+		return 0;
+	}
+#endif
+	return fp_ops->enqueue_new_same_dest(port, queue_id, ev, nb_events);
+}
+
 /**
  * Dequeue a burst of events objects or an event object from the event port
  * designated by its *event_port_id*, on an event device designated
diff --git a/lib/eventdev/rte_eventdev_core.h b/lib/eventdev/rte_eventdev_core.h
index c328bdbc82..4d7d27e82d 100644
--- a/lib/eventdev/rte_eventdev_core.h
+++ b/lib/eventdev/rte_eventdev_core.h
@@ -20,6 +20,13 @@  typedef uint16_t (*event_enqueue_burst_t)(void *port,
 					  uint16_t nb_events);
 /**< @internal Enqueue burst of events on port of a device */
 
+typedef uint16_t (*event_enqueue_queue_burst_t)(void *port, uint8_t queue_id,
+						const struct rte_event ev[],
+						uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device to a specific
+ * event queue.
+ */
+
 typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
 				    uint64_t timeout_ticks);
 /**< @internal Dequeue event from port of a device */
@@ -65,7 +72,9 @@  struct rte_event_fp_ops {
 	/**< PMD Tx adapter enqueue same destination function. */
 	event_crypto_adapter_enqueue_t ca_enqueue;
 	/**< PMD Crypto adapter enqueue function. */
-	uintptr_t reserved[6];
+	event_enqueue_queue_burst_t enqueue_new_same_dest;
+	/**< PMD enqueue burst new function to same destination queue. */
+	uintptr_t reserved[5];
 } __rte_cache_aligned;
 
 extern struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];