[RFC,v5,6/6] event/dsw: optimize serving port logic
Checks
Commit Message
To reduce flow migration overhead, replace the array-based
representation of which set of ports are bound to a particular queue
by a multi-word bitset.
Signed-off-by: Mattias Rönnblom <mattias.ronnblom@ericsson.com>
---
drivers/event/dsw/dsw_evdev.c | 19 +++++++------------
drivers/event/dsw/dsw_evdev.h | 3 ++-
drivers/event/dsw/dsw_event.c | 7 ++++---
3 files changed, 13 insertions(+), 16 deletions(-)
@@ -118,6 +118,7 @@ dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
queue->schedule_type = conf->schedule_type;
}
+ rte_bitset_init(queue->serving_ports, DSW_MAX_PORTS);
queue->num_serving_ports = 0;
return 0;
@@ -144,20 +145,16 @@ dsw_queue_release(struct rte_eventdev *dev __rte_unused,
static void
queue_add_port(struct dsw_queue *queue, uint16_t port_id)
{
- uint64_t port_mask = UINT64_C(1) << port_id;
-
- queue->serving_ports |= port_mask;
+ rte_bitset_set(queue->serving_ports, port_id);
queue->num_serving_ports++;
}
static bool
queue_remove_port(struct dsw_queue *queue, uint16_t port_id)
{
- uint64_t port_mask = UINT64_C(1) << port_id;
-
- if (queue->serving_ports & port_mask) {
+ if (rte_bitset_test(queue->serving_ports, port_id)) {
queue->num_serving_ports--;
- queue->serving_ports ^= port_mask;
+ rte_bitset_clear(queue->serving_ports, port_id);
return true;
}
@@ -257,14 +254,12 @@ initial_flow_to_port_assignment(struct dsw_evdev *dsw)
struct dsw_queue *queue = &dsw->queues[queue_id];
uint16_t flow_hash;
for (flow_hash = 0; flow_hash < DSW_MAX_FLOWS; flow_hash++) {
- uint8_t skip =
- rte_rand_max(queue->num_serving_ports);
+ uint8_t skip = rte_rand_max(queue->num_serving_ports);
uint8_t port_id;
for (port_id = 0;; port_id++) {
- uint64_t port_mask = UINT64_C(1) << port_id;
-
- if (queue->serving_ports & port_mask) {
+ if (rte_bitset_test(queue->serving_ports,
+ port_id)) {
if (skip == 0)
break;
skip--;
@@ -7,6 +7,7 @@
#include <eventdev_pmd.h>
+#include <rte_bitset.h>
#include <rte_event_ring.h>
#include <rte_eventdev.h>
@@ -234,7 +235,7 @@ struct __rte_cache_aligned dsw_port {
struct dsw_queue {
uint8_t schedule_type;
- uint64_t serving_ports;
+ RTE_BITSET_DECLARE(serving_ports, DSW_MAX_PORTS);
uint16_t num_serving_ports;
alignas(RTE_CACHE_LINE_SIZE) uint8_t flow_to_port_map[DSW_MAX_FLOWS];
@@ -447,9 +447,8 @@ static bool
dsw_is_serving_port(struct dsw_evdev *dsw, uint8_t port_id, uint8_t queue_id)
{
struct dsw_queue *queue = &dsw->queues[queue_id];
- uint64_t port_mask = UINT64_C(1) << port_id;
- return queue->serving_ports & port_mask;
+ return rte_bitset_test(queue->serving_ports, port_id);
}
static bool
@@ -571,7 +570,9 @@ dsw_schedule(struct dsw_evdev *dsw, uint8_t queue_id, uint16_t flow_hash)
/* A single-link queue, or atomic/ordered/parallel but
* with just a single serving port.
*/
- port_id = rte_bsf64(queue->serving_ports);
+ port_id = (uint8_t)rte_bitset_find_first_set(
+ queue->serving_ports, DSW_MAX_PORTS
+ );
DSW_LOG_DP(DEBUG, "Event with queue_id %d flow_hash %d is scheduled "
"to port %d.\n", queue_id, flow_hash, port_id);