eventdev: optimize Rx adapter event enqueue

Message ID 1559005504-9695-1-git-send-email-nikhil.rao@intel.com (mailing list archive)
State Accepted, archived
Delegated to: Jerin Jacob
Headers
Series eventdev: optimize Rx adapter event enqueue |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail apply issues

Commit Message

Rao, Nikhil May 28, 2019, 1:05 a.m. UTC
  Setup event when the Rx queue is added to the
adapter in place of generating the event when it is
being enqueued to the event device.

Signed-off-by: Nikhil Rao <nikhil.rao@intel.com>
---
 lib/librte_eventdev/rte_event_eth_rx_adapter.c | 39 ++++++++++----------------
 1 file changed, 15 insertions(+), 24 deletions(-)

This patch depends on
http://patchwork.dpdk.org/patch/53614/
  

Patch

diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
index a201668..c51b9f3 100644
--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
@@ -198,11 +198,8 @@  struct eth_rx_queue_info {
 	int queue_enabled;	/* True if added */
 	int intr_enabled;
 	uint16_t wt;		/* Polling weight */
-	uint8_t event_queue_id;	/* Event queue to enqueue packets to */
-	uint8_t sched_type;	/* Sched type for events */
-	uint8_t priority;	/* Event priority */
-	uint32_t flow_id;	/* App provided flow identifier */
 	uint32_t flow_id_mask;	/* Set to ~0 if app provides flow id else 0 */
+	uint64_t event;
 };
 
 static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
@@ -758,10 +755,8 @@  static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
 	struct rte_eth_event_enqueue_buffer *buf =
 					&rx_adapter->event_enqueue_buffer;
 	struct rte_event *ev = &buf->events[buf->count];
-	int32_t qid = eth_rx_queue_info->event_queue_id;
-	uint8_t sched_type = eth_rx_queue_info->sched_type;
-	uint8_t priority = eth_rx_queue_info->priority;
-	uint32_t flow_id;
+	uint64_t event = eth_rx_queue_info->event;
+	uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
 	struct rte_mbuf *m = mbufs[0];
 	uint32_t rss_mask;
 	uint32_t rss;
@@ -803,17 +798,9 @@  static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
 		rss = do_rss ?
 			rxa_do_softrss(m, rx_adapter->rss_key_be) :
 			m->hash.rss;
-		flow_id =
-		    eth_rx_queue_info->flow_id &
-				eth_rx_queue_info->flow_id_mask;
-		flow_id |= rss & ~eth_rx_queue_info->flow_id_mask;
-		ev->flow_id = flow_id;
-		ev->op = RTE_EVENT_OP_NEW;
-		ev->sched_type = sched_type;
-		ev->queue_id = qid;
-		ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
-		ev->sub_event_type = 0;
-		ev->priority = priority;
+		ev->event = event;
+		ev->flow_id = (rss & ~flow_id_mask) |
+				(ev->flow_id & flow_id_mask);
 		ev->mbuf = m;
 		ev++;
 	}
@@ -1705,6 +1692,7 @@  static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
 	int pollq;
 	int intrq;
 	int sintrq;
+	struct rte_event *qi_ev;
 
 	if (rx_queue_id == -1) {
 		uint16_t nb_rx_queues;
@@ -1721,16 +1709,19 @@  static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
 	sintrq = rxa_shared_intr(dev_info, rx_queue_id);
 
 	queue_info = &dev_info->rx_queue[rx_queue_id];
-	queue_info->event_queue_id = ev->queue_id;
-	queue_info->sched_type = ev->sched_type;
-	queue_info->priority = ev->priority;
 	queue_info->wt = conf->servicing_weight;
 
+	qi_ev = (struct rte_event *)&queue_info->event;
+	qi_ev->event = ev->event;
+	qi_ev->op = RTE_EVENT_OP_NEW;
+	qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
+	qi_ev->sub_event_type = 0;
+
 	if (conf->rx_queue_flags &
 			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
-		queue_info->flow_id = ev->flow_id;
 		queue_info->flow_id_mask = ~0;
-	}
+	} else
+		qi_ev->flow_id = 0;
 
 	rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
 	if (rxa_polled_queue(dev_info, rx_queue_id)) {