From: Konstantin Ananyev <konstantin.ananyev@huawei.com>
1) ./lib/eal/linux/eal_interrupts.c:1073:16
: warning: ISO C90 forbids variable length array ‘events’
eal_intr_handle_interrupts() is called by eal_intr_thread_main()
so it seems ok to simply alloc space for events from heap and reuse the
same buffer through the life of the thread.
2) ./lib/eal/linux/eal_interrupts.c:1319:16
: warning: ISO C90 forbids variable length array ‘evs’
make eal_epoll_wait() use a fixed size array and use it though multiple
iterations to process up to @maxevents events.
Note that technically it is not one to one replacement, as here we might
reduce number of events returned by first call to epoll_wait(..., timeout);
Signed-off-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
---
lib/eal/linux/eal_interrupts.c | 59 +++++++++++++++++++++++++++++++++++++-----
1 file changed, 52 insertions(+), 7 deletions(-)
@@ -34,6 +34,8 @@
#define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
#define NB_OTHER_INTR 1
+#define MAX_ITER_EVNUM RTE_EVENT_ETH_INTR_RING_SIZE
+
static RTE_DEFINE_PER_LCORE(int, _epfd) = -1; /**< epoll fd per thread */
/**
@@ -1068,9 +1070,9 @@ struct rte_intr_source {
* void
*/
static void
-eal_intr_handle_interrupts(int pfd, unsigned totalfds)
+eal_intr_handle_interrupts(int pfd, struct epoll_event events[],
+ unsigned int totalfds)
{
- struct epoll_event events[totalfds];
int nfds = 0;
for(;;) {
@@ -1106,6 +1108,12 @@ struct rte_intr_source {
static __rte_noreturn uint32_t
eal_intr_thread_main(__rte_unused void *arg)
{
+ uint32_t n, nb_event;
+ struct epoll_event *events, *p;
+
+ nb_event = 0;
+ events = NULL;
+
/* host thread, never break out */
for (;;) {
/* build up the epoll fd with all descriptors we are to
@@ -1159,8 +1167,23 @@ struct rte_intr_source {
numfds++;
}
rte_spinlock_unlock(&intr_lock);
+
+ /* alloc space for events, when necessary */
+ if (numfds > nb_event) {
+ n = numfds + MAX_ITER_EVNUM;
+ p = realloc(events, n * sizeof(events[0]));
+ if (p == NULL) {
+ EAL_LOG(ERR, "failed to allocate %u events",
+ numfds);
+ numfds = nb_event;
+ } else {
+ nb_event = n;
+ events = p;
+ }
+ }
+
/* serve the interrupt */
- eal_intr_handle_interrupts(pfd, numfds);
+ eal_intr_handle_interrupts(pfd, events, numfds);
/**
* when we return, we need to rebuild the
@@ -1168,6 +1191,8 @@ struct rte_intr_source {
*/
close(pfd);
}
+
+ free(events);
}
int
@@ -1316,8 +1341,9 @@ struct rte_intr_source {
eal_epoll_wait(int epfd, struct rte_epoll_event *events,
int maxevents, int timeout, bool interruptible)
{
- struct epoll_event evs[maxevents];
int rc;
+ uint32_t i, k, n, num;
+ struct epoll_event evs[MAX_ITER_EVNUM];
if (!events) {
EAL_LOG(ERR, "rte_epoll_event can't be NULL");
@@ -1328,12 +1354,31 @@ struct rte_intr_source {
if (epfd == RTE_EPOLL_PER_THREAD)
epfd = rte_intr_tls_epfd();
+ num = maxevents;
+ n = RTE_MIN(RTE_DIM(evs), num);
+
+ /* Process events in chunks of MAX_ITER_EVNUM */
+
while (1) {
- rc = epoll_wait(epfd, evs, maxevents, timeout);
+ rc = epoll_wait(epfd, evs, n, timeout);
if (likely(rc > 0)) {
+
/* epoll_wait has at least one fd ready to read */
- rc = eal_epoll_process_event(evs, rc, events);
- break;
+ for (i = 0, k = 0; rc > 0;) {
+ k += rc;
+ rc = eal_epoll_process_event(evs, rc,
+ events + i);
+ i += rc;
+
+ /*
+ * try to read more events that are already
+ * available (up to maxevents in total).
+ */
+ n = RTE_MIN(RTE_DIM(evs), num - k);
+ rc = (n == 0) ? 0 : epoll_wait(epfd, evs, n, 0);
+ }
+ return i;
+
} else if (rc < 0) {
if (errno == EINTR) {
if (interruptible)