@@ -297,6 +297,7 @@ endforeach
dpdk_conf.set('RTE_MAX_ETHPORTS', get_option('max_ethports'))
dpdk_conf.set('RTE_LIBEAL_USE_HPET', get_option('use_hpet'))
dpdk_conf.set('RTE_ENABLE_TRACE_FP', get_option('enable_trace_fp'))
+dpdk_conf.set('RTE_LCORE_POLL_BUSYNESS', get_option('enable_lcore_poll_busyness'))
# values which have defaults which may be overridden
dpdk_conf.set('RTE_MAX_VFIO_GROUPS', 64)
dpdk_conf.set('RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB', 64)
@@ -39,6 +39,7 @@
#define RTE_LOG_DP_LEVEL RTE_LOG_INFO
#define RTE_BACKTRACE 1
#define RTE_MAX_VFIO_CONTAINERS 64
+#define RTE_LCORE_POLL_BUSYNESS_PERIOD_MS 2
/* bsd module defines */
#define RTE_CONTIGMEM_MAX_NUM_BUFS 64
@@ -28,6 +28,7 @@ extern "C" {
#include <stdbool.h>
#include <rte_cpuflags.h>
+#include <rte_lcore.h>
#include "rte_bbdev_op.h"
@@ -599,7 +600,9 @@ rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
{
struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
- return dev->dequeue_enc_ops(q_data, ops, num_ops);
+ const uint16_t nb_ops = dev->dequeue_enc_ops(q_data, ops, num_ops);
+ RTE_LCORE_TELEMETRY_TIMESTAMP(nb_ops);
+ return nb_ops;
}
/**
@@ -631,7 +634,9 @@ rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
{
struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
- return dev->dequeue_dec_ops(q_data, ops, num_ops);
+ const uint16_t nb_ops = dev->dequeue_dec_ops(q_data, ops, num_ops);
+ RTE_LCORE_TELEMETRY_TIMESTAMP(nb_ops);
+ return nb_ops;
}
@@ -662,7 +667,9 @@ rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
{
struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
- return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
+ const uint16_t nb_ops = dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
+ RTE_LCORE_TELEMETRY_TIMESTAMP(nb_ops);
+ return nb_ops;
}
/**
@@ -692,7 +699,9 @@ rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
{
struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
- return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
+ const uint16_t nb_ops = dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
+ RTE_LCORE_TELEMETRY_TIMESTAMP(nb_ops);
+ return nb_ops;
}
/** Definitions of device event types */
@@ -580,6 +580,8 @@ rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
nb_ops = (*dev->dequeue_burst)
(dev->data->queue_pairs[qp_id], ops, nb_ops);
+ RTE_LCORE_TELEMETRY_TIMESTAMP(nb_ops);
+
return nb_ops;
}
@@ -1915,6 +1915,8 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
rte_rcu_qsbr_thread_offline(list->qsbr, 0);
}
#endif
+
+ RTE_LCORE_TELEMETRY_TIMESTAMP(nb_ops);
return nb_ops;
}
@@ -56,6 +56,8 @@ rte_distributor_request_pkt(struct rte_distributor *d,
while (rte_rdtsc() < t)
rte_pause();
+ /* this was an empty poll */
+ RTE_LCORE_TELEMETRY_TIMESTAMP(0);
}
/*
@@ -134,24 +136,29 @@ rte_distributor_get_pkt(struct rte_distributor *d,
if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
if (return_count <= 1) {
+ uint16_t cnt;
pkts[0] = rte_distributor_get_pkt_single(d->d_single,
- worker_id, return_count ? oldpkt[0] : NULL);
- return (pkts[0]) ? 1 : 0;
- } else
- return -EINVAL;
+ worker_id,
+ return_count ? oldpkt[0] : NULL);
+ cnt = (pkts[0] != NULL) ? 1 : 0;
+ RTE_LCORE_TELEMETRY_TIMESTAMP(cnt);
+ return cnt;
+ }
+ return -EINVAL;
}
rte_distributor_request_pkt(d, worker_id, oldpkt, return_count);
- count = rte_distributor_poll_pkt(d, worker_id, pkts);
- while (count == -1) {
+ while ((count = rte_distributor_poll_pkt(d, worker_id, pkts)) == -1) {
uint64_t t = rte_rdtsc() + 100;
while (rte_rdtsc() < t)
rte_pause();
- count = rte_distributor_poll_pkt(d, worker_id, pkts);
+ /* this was an empty poll */
+ RTE_LCORE_TELEMETRY_TIMESTAMP(0);
}
+ RTE_LCORE_TELEMETRY_TIMESTAMP(count);
return count;
}
@@ -31,8 +31,13 @@ rte_distributor_request_pkt_single(struct rte_distributor_single *d,
union rte_distributor_buffer_single *buf = &d->bufs[worker_id];
int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
| RTE_DISTRIB_GET_BUF;
- RTE_WAIT_UNTIL_MASKED(&buf->bufptr64, RTE_DISTRIB_FLAGS_MASK,
- ==, 0, __ATOMIC_RELAXED);
+
+ while (!(__atomic_load_n(&buf->bufptr64, __ATOMIC_RELAXED)
+ & RTE_DISTRIB_FLAGS_MASK) == 0) {
+ rte_pause();
+ /* this was an empty poll */
+ RTE_LCORE_TELEMETRY_TIMESTAMP(0);
+ }
/* Sync with distributor on GET_BUF flag. */
__atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
@@ -59,8 +64,11 @@ rte_distributor_get_pkt_single(struct rte_distributor_single *d,
{
struct rte_mbuf *ret;
rte_distributor_request_pkt_single(d, worker_id, oldpkt);
- while ((ret = rte_distributor_poll_pkt_single(d, worker_id)) == NULL)
+ while ((ret = rte_distributor_poll_pkt_single(d, worker_id)) == NULL) {
rte_pause();
+ /* this was an empty poll */
+ RTE_LCORE_TELEMETRY_TIMESTAMP(0);
+ }
return ret;
}
@@ -149,6 +149,7 @@
#include <rte_bitops.h>
#include <rte_common.h>
#include <rte_compat.h>
+#include <rte_lcore.h>
#ifdef __cplusplus
extern "C" {
@@ -1027,7 +1028,7 @@ rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
uint16_t *last_idx, bool *has_error)
{
struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
- uint16_t idx;
+ uint16_t idx, nb_ops;
bool err;
#ifdef RTE_DMADEV_DEBUG
@@ -1050,8 +1051,10 @@ rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
has_error = &err;
*has_error = false;
- return (*obj->completed)(obj->dev_private, vchan, nb_cpls, last_idx,
- has_error);
+ nb_ops = (*obj->completed)(obj->dev_private, vchan, nb_cpls, last_idx,
+ has_error);
+ RTE_LCORE_TELEMETRY_TIMESTAMP(nb_ops);
+ return nb_ops;
}
/**
@@ -1090,7 +1093,7 @@ rte_dma_completed_status(int16_t dev_id, uint16_t vchan,
enum rte_dma_status_code *status)
{
struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
- uint16_t idx;
+ uint16_t idx, nb_ops;
#ifdef RTE_DMADEV_DEBUG
if (!rte_dma_is_valid(dev_id) || nb_cpls == 0 || status == NULL)
@@ -1101,8 +1104,10 @@ rte_dma_completed_status(int16_t dev_id, uint16_t vchan,
if (last_idx == NULL)
last_idx = &idx;
- return (*obj->completed_status)(obj->dev_private, vchan, nb_cpls,
+ nb_ops = (*obj->completed_status)(obj->dev_private, vchan, nb_cpls,
last_idx, status);
+ RTE_LCORE_TELEMETRY_TIMESTAMP(nb_ops);
+ return nb_ops;
}
/**
new file mode 100644
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <unistd.h>
+#include <limits.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+
+#ifdef RTE_LCORE_POLL_BUSYNESS
+#include <rte_telemetry.h>
+#endif
+
+int __rte_lcore_telemetry_enabled;
+
+#ifdef RTE_LCORE_POLL_BUSYNESS
+
+struct lcore_telemetry {
+ int busyness;
+ /**< Calculated busyness (gets set/returned by the API) */
+ int raw_busyness;
+ /**< Calculated busyness times 100. */
+ uint64_t interval_ts;
+ /**< when previous telemetry interval started */
+ uint64_t empty_cycles;
+ /**< empty cycle count since last interval */
+ uint64_t last_poll_ts;
+ /**< last poll timestamp */
+ bool last_empty;
+ /**< if last poll was empty */
+ unsigned int contig_poll_cnt;
+ /**< contiguous (always empty/non empty) poll counter */
+} __rte_cache_aligned;
+
+static struct lcore_telemetry *telemetry_data;
+
+#define LCORE_POLL_BUSYNESS_MAX 100
+#define LCORE_POLL_BUSYNESS_NOT_SET -1
+#define LCORE_POLL_BUSYNESS_MIN 0
+
+#define SMOOTH_COEFF 5
+#define STATE_CHANGE_OPT 32
+
+static void lcore_config_init(void)
+{
+ int lcore_id;
+
+ telemetry_data = calloc(RTE_MAX_LCORE, sizeof(telemetry_data[0]));
+ if (telemetry_data == NULL)
+ rte_panic("Could not init lcore telemetry data: Out of memory\n");
+
+ RTE_LCORE_FOREACH(lcore_id) {
+ struct lcore_telemetry *td = &telemetry_data[lcore_id];
+
+ td->interval_ts = 0;
+ td->last_poll_ts = 0;
+ td->empty_cycles = 0;
+ td->last_empty = true;
+ td->contig_poll_cnt = 0;
+ td->busyness = LCORE_POLL_BUSYNESS_NOT_SET;
+ td->raw_busyness = 0;
+ }
+}
+
+int rte_lcore_poll_busyness(unsigned int lcore_id)
+{
+ const uint64_t active_thresh = rte_get_tsc_hz() * RTE_LCORE_POLL_BUSYNESS_PERIOD_MS;
+ struct lcore_telemetry *tdata;
+
+ if (lcore_id >= RTE_MAX_LCORE)
+ return -EINVAL;
+ tdata = &telemetry_data[lcore_id];
+
+ /* if the lcore is not active */
+ if (tdata->interval_ts == 0)
+ return LCORE_POLL_BUSYNESS_NOT_SET;
+ /* if the core hasn't been active in a while */
+ else if ((rte_rdtsc() - tdata->interval_ts) > active_thresh)
+ return LCORE_POLL_BUSYNESS_NOT_SET;
+
+ /* this core is active, report its busyness */
+ return telemetry_data[lcore_id].busyness;
+}
+
+int rte_lcore_poll_busyness_enabled(void)
+{
+ return __rte_lcore_telemetry_enabled;
+}
+
+void rte_lcore_poll_busyness_enabled_set(int enable)
+{
+ __rte_lcore_telemetry_enabled = !!enable;
+
+ if (!enable)
+ lcore_config_init();
+}
+
+static inline int calc_raw_busyness(const struct lcore_telemetry *tdata,
+ const uint64_t empty, const uint64_t total)
+{
+ /*
+ * we don't want to use floating point math here, but we want for our
+ * busyness to react smoothly to sudden changes, while still keeping the
+ * accuracy and making sure that over time the average follows busyness
+ * as measured just-in-time. therefore, we will calculate the average
+ * busyness using integer math, but shift the decimal point two places
+ * to the right, so that 100.0 becomes 10000. this allows us to report
+ * integer values (0..100) while still allowing ourselves to follow the
+ * just-in-time measurements when we calculate our averages.
+ */
+ const int max_raw_idle = LCORE_POLL_BUSYNESS_MAX * 100;
+
+ /*
+ * at upper end of the busyness scale, going up from 90->100 will take
+ * longer than going from 10->20 because of the averaging. to address
+ * this, we invert the scale when doing calculations: that is, we
+ * effectively calculate average *idle* cycle percentage, not average
+ * *busy* cycle percentage. this means that the scale is naturally
+ * biased towards fast scaling up, and slow scaling down.
+ */
+ const int prev_raw_idle = max_raw_idle - tdata->raw_busyness;
+
+ /* calculate rate of idle cycles, times 100 */
+ const int cur_raw_idle = (int)((empty * max_raw_idle) / total);
+
+ /* smoothen the idleness */
+ const int smoothened_idle =
+ (cur_raw_idle + prev_raw_idle * (SMOOTH_COEFF - 1)) / SMOOTH_COEFF;
+
+ /* convert idleness back to busyness */
+ return max_raw_idle - smoothened_idle;
+}
+
+void __rte_lcore_telemetry_timestamp(uint16_t nb_rx)
+{
+ const unsigned int lcore_id = rte_lcore_id();
+ uint64_t interval_ts, empty_cycles, cur_tsc, last_poll_ts;
+ struct lcore_telemetry *tdata;
+ const bool empty = nb_rx == 0;
+ uint64_t diff_int, diff_last;
+ bool last_empty;
+
+ /* This telemetry is not supported for unregistered non-EAL threads */
+ if (lcore_id >= RTE_MAX_LCORE)
+ return;
+
+ tdata = &telemetry_data[lcore_id];
+ last_empty = tdata->last_empty;
+
+ /* optimization: don't do anything if status hasn't changed */
+ if (last_empty == empty && tdata->contig_poll_cnt++ < STATE_CHANGE_OPT)
+ return;
+ /* status changed or we're waiting for too long, reset counter */
+ tdata->contig_poll_cnt = 0;
+
+ cur_tsc = rte_rdtsc();
+
+ interval_ts = tdata->interval_ts;
+ empty_cycles = tdata->empty_cycles;
+ last_poll_ts = tdata->last_poll_ts;
+
+ diff_int = cur_tsc - interval_ts;
+ diff_last = cur_tsc - last_poll_ts;
+
+ /* is this the first time we're here? */
+ if (interval_ts == 0) {
+ tdata->busyness = LCORE_POLL_BUSYNESS_MIN;
+ tdata->raw_busyness = 0;
+ tdata->interval_ts = cur_tsc;
+ tdata->empty_cycles = 0;
+ tdata->contig_poll_cnt = 0;
+ goto end;
+ }
+
+ /* update the empty counter if we got an empty poll earlier */
+ if (last_empty)
+ empty_cycles += diff_last;
+
+ /* have we passed the interval? */
+ uint64_t interval = ((rte_get_tsc_hz() / MS_PER_S) * RTE_LCORE_POLL_BUSYNESS_PERIOD_MS);
+ if (diff_int > interval) {
+ int raw_busyness;
+
+ /* get updated busyness value */
+ raw_busyness = calc_raw_busyness(tdata, empty_cycles, diff_int);
+
+ /* set a new interval, reset empty counter */
+ tdata->interval_ts = cur_tsc;
+ tdata->empty_cycles = 0;
+ tdata->raw_busyness = raw_busyness;
+ /* bring busyness back to 0..100 range, biased to round up */
+ tdata->busyness = (raw_busyness + 50) / 100;
+ } else
+ /* we may have updated empty counter */
+ tdata->empty_cycles = empty_cycles;
+
+end:
+ /* update status for next poll */
+ tdata->last_poll_ts = cur_tsc;
+ tdata->last_empty = empty;
+}
+
+static int
+lcore_poll_busyness_enable(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ rte_lcore_poll_busyness_enabled_set(1);
+
+ rte_tel_data_start_dict(d);
+
+ rte_tel_data_add_dict_int(d, "poll_busyness_enabled", 1);
+
+ return 0;
+}
+
+static int
+lcore_poll_busyness_disable(const char *cmd __rte_unused,
+ const char *params __rte_unused,
+ struct rte_tel_data *d)
+{
+ rte_lcore_poll_busyness_enabled_set(0);
+
+ rte_tel_data_start_dict(d);
+
+ rte_tel_data_add_dict_int(d, "poll_busyness_enabled", 0);
+
+ if (telemetry_data != NULL)
+ free(telemetry_data);
+
+ return 0;
+}
+
+static int
+lcore_handle_poll_busyness(const char *cmd __rte_unused,
+ const char *params __rte_unused, struct rte_tel_data *d)
+{
+ char corenum[64];
+ int i;
+
+ rte_tel_data_start_dict(d);
+
+ RTE_LCORE_FOREACH(i) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
+ snprintf(corenum, sizeof(corenum), "%d", i);
+ rte_tel_data_add_dict_int(d, corenum, rte_lcore_poll_busyness(i));
+ }
+
+ return 0;
+}
+
+RTE_INIT(lcore_init_telemetry)
+{
+ __rte_lcore_telemetry_enabled = true;
+
+ lcore_config_init();
+
+ rte_telemetry_register_cmd("/eal/lcore/busyness", lcore_handle_poll_busyness,
+ "return percentage poll busyness of cores");
+
+ rte_telemetry_register_cmd("/eal/lcore/busyness_enable", lcore_poll_busyness_enable,
+ "enable lcore poll busyness measurement");
+
+ rte_telemetry_register_cmd("/eal/lcore/busyness_disable", lcore_poll_busyness_disable,
+ "disable lcore poll busyness measurement");
+}
+
+#else
+
+int rte_lcore_poll_busyness(unsigned int lcore_id __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+int rte_lcore_poll_busyness_enabled(void)
+{
+ return -ENOTSUP;
+}
+
+void rte_lcore_poll_busyness_enabled_set(int enable __rte_unused)
+{
+}
+
+void __rte_lcore_telemetry_timestamp(uint16_t nb_rx __rte_unused)
+{
+}
+
+#endif
@@ -17,6 +17,7 @@ sources += files(
'eal_common_hexdump.c',
'eal_common_interrupts.c',
'eal_common_launch.c',
+ 'eal_common_lcore_telemetry.c',
'eal_common_lcore.c',
'eal_common_log.c',
'eal_common_mcfg.c',
@@ -415,6 +415,86 @@ rte_ctrl_thread_create(pthread_t *thread, const char *name,
const pthread_attr_t *attr,
void *(*start_routine)(void *), void *arg);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Read poll busyness value corresponding to an lcore.
+ *
+ * @param lcore_id
+ * Lcore to read poll busyness value for.
+ * @return
+ * - value between 0 and 100 on success
+ * - -1 if lcore is not active
+ * - -EINVAL if lcore is invalid
+ * - -ENOMEM if not enough memory available
+ * - -ENOTSUP if not supported
+ */
+__rte_experimental
+int
+rte_lcore_poll_busyness(unsigned int lcore_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Check if lcore poll busyness telemetry is enabled.
+ *
+ * @return
+ * - 1 if lcore telemetry is enabled
+ * - 0 if lcore telemetry is disabled
+ * - -ENOTSUP if not lcore telemetry supported
+ */
+__rte_experimental
+int
+rte_lcore_poll_busyness_enabled(void);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enable or disable poll busyness telemetry.
+ *
+ * @param enable
+ * 1 to enable, 0 to disable
+ */
+__rte_experimental
+void
+rte_lcore_poll_busyness_enabled_set(int enable);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Lcore telemetry timestamping function.
+ *
+ * @param nb_rx
+ * Number of buffers processed by lcore.
+ */
+__rte_experimental
+void
+__rte_lcore_telemetry_timestamp(uint16_t nb_rx);
+
+/** @internal lcore telemetry enabled status */
+extern int __rte_lcore_telemetry_enabled;
+
+/**
+ * Call lcore telemetry timestamp function.
+ *
+ * @param nb_rx
+ * Number of buffers processed by lcore.
+ */
+#ifdef RTE_LCORE_POLL_BUSYNESS
+#define RTE_LCORE_TELEMETRY_TIMESTAMP(nb_rx) \
+ do { \
+ if (__rte_lcore_telemetry_enabled) \
+ __rte_lcore_telemetry_timestamp(nb_rx); \
+ } while (0)
+#else
+#define RTE_LCORE_TELEMETRY_TIMESTAMP(nb_rx) \
+ while (0)
+#endif
+
#ifdef __cplusplus
}
#endif
@@ -25,6 +25,9 @@ subdir(arch_subdir)
deps += ['kvargs']
if not is_windows
deps += ['telemetry']
+else
+ # core busyness telemetry depends on telemetry library
+ dpdk_conf.set('RTE_LCORE_BUSYNESS', false)
endif
if dpdk_conf.has('RTE_USE_LIBBSD')
ext_deps += libbsd
@@ -424,6 +424,13 @@ EXPERIMENTAL {
rte_thread_self;
rte_thread_set_affinity_by_id;
rte_thread_set_priority;
+
+ # added in 22.11
+ __rte_lcore_telemetry_timestamp;
+ __rte_lcore_telemetry_enabled;
+ rte_lcore_poll_busyness;
+ rte_lcore_poll_busyness_enabled;
+ rte_lcore_poll_busyness_enabled_set;
};
INTERNAL {
@@ -5675,6 +5675,8 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
#endif
rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
+
+ RTE_LCORE_TELEMETRY_TIMESTAMP(nb_rx);
return nb_rx;
}
@@ -2153,6 +2153,7 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
uint16_t nb_events, uint64_t timeout_ticks)
{
const struct rte_event_fp_ops *fp_ops;
+ uint16_t nb_evts;
void *port;
fp_ops = &rte_event_fp_ops[dev_id];
@@ -2175,10 +2176,13 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
* requests nb_events as const one
*/
if (nb_events == 1)
- return (fp_ops->dequeue)(port, ev, timeout_ticks);
+ nb_evts = (fp_ops->dequeue)(port, ev, timeout_ticks);
else
- return (fp_ops->dequeue_burst)(port, ev, nb_events,
- timeout_ticks);
+ nb_evts = (fp_ops->dequeue_burst)(port, ev, nb_events,
+ timeout_ticks);
+
+ RTE_LCORE_TELEMETRY_TIMESTAMP(nb_evts);
+ return nb_evts;
}
#define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0)
@@ -226,12 +226,15 @@ rte_rawdev_dequeue_buffers(uint16_t dev_id,
rte_rawdev_obj_t context)
{
struct rte_rawdev *dev;
+ int nb_ops;
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
dev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dequeue_bufs, -ENOTSUP);
- return (*dev->dev_ops->dequeue_bufs)(dev, buffers, count, context);
+ nb_ops = (*dev->dev_ops->dequeue_bufs)(dev, buffers, count, context);
+ RTE_LCORE_TELEMETRY_TIMESTAMP(nb_ops);
+ return nb_ops;
}
int
@@ -1530,6 +1530,7 @@ rte_regexdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
struct rte_regex_ops **ops, uint16_t nb_ops)
{
struct rte_regexdev *dev = &rte_regex_devices[dev_id];
+ uint16_t deq_ops;
#ifdef RTE_LIBRTE_REGEXDEV_DEBUG
RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dequeue, -ENOTSUP);
@@ -1538,7 +1539,9 @@ rte_regexdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
return -EINVAL;
}
#endif
- return (*dev->dequeue)(dev, qp_id, ops, nb_ops);
+ deq_ops = (*dev->dequeue)(dev, qp_id, ops, nb_ops);
+ RTE_LCORE_TELEMETRY_TIMESTAMP(deq_ops);
+ return deq_ops;
}
#ifdef __cplusplus
@@ -379,6 +379,7 @@ __rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table,
end:
if (available != NULL)
*available = entries - n;
+ RTE_LCORE_TELEMETRY_TIMESTAMP(n);
return n;
}
@@ -20,6 +20,8 @@ option('enable_driver_sdk', type: 'boolean', value: false, description:
'Install headers to build drivers.')
option('enable_kmods', type: 'boolean', value: false, description:
'build kernel modules')
+option('enable_lcore_poll_busyness', type: 'boolean', value: true, description:
+ 'enable collection of lcore poll busyness telemetry')
option('examples', type: 'string', value: '', description:
'Comma-separated list of examples to build by default')
option('flexran_sdk', type: 'string', value: '', description: