[v2] eventdev/timer: fix overflow issue
Checks
Commit Message
The software timer adapter converts event timer timeout ticks to a
number of CPU cycles at which an rte_timer should expire. The
computation uses integer operations that can result in overflow.
Use floating point operations instead to perform the computation, and
convert the final result back to an integer type when returning. Also
move the logic that checks the timeout range into the function that
performs the above computation.
Fixes: 6750b21bd6af ("eventdev: add default software timer adapter")
Cc: stable@dpdk.org
Signed-off-by: Erik Gabriel Carrillo <erik.g.carrillo@intel.com>
---
v2:
* Fix implicit int to float conversion build warning on Clang
lib/eventdev/rte_event_timer_adapter.c | 71 ++++++++++++--------------
1 file changed, 34 insertions(+), 37 deletions(-)
Comments
On Wed, Jan 25, 2023 at 2:16 AM Erik Gabriel Carrillo
<erik.g.carrillo@intel.com> wrote:
>
> The software timer adapter converts event timer timeout ticks to a
> number of CPU cycles at which an rte_timer should expire. The
> computation uses integer operations that can result in overflow.
>
> Use floating point operations instead to perform the computation, and
> convert the final result back to an integer type when returning. Also
> move the logic that checks the timeout range into the function that
> performs the above computation.
>
> Fixes: 6750b21bd6af ("eventdev: add default software timer adapter")
> Cc: stable@dpdk.org
>
> Signed-off-by: Erik Gabriel Carrillo <erik.g.carrillo@intel.com>
> ---
> v2:
> * Fix implicit int to float conversion build warning on Clang
Not yet addressed the @Stephen Hemminger comments on using of
rte_reciprocal. Also means to check what compute can be moved to
slowpath.
Marking as "Change requested", Looking for next version.
>
> lib/eventdev/rte_event_timer_adapter.c | 71 ++++++++++++--------------
> 1 file changed, 34 insertions(+), 37 deletions(-)
>
> diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c
> index d357f7403a..2efeb73bce 100644
> --- a/lib/eventdev/rte_event_timer_adapter.c
> +++ b/lib/eventdev/rte_event_timer_adapter.c
> @@ -719,13 +719,28 @@ swtim_callback(struct rte_timer *tim)
> }
> }
>
> -static __rte_always_inline uint64_t
> +static __rte_always_inline int
> get_timeout_cycles(struct rte_event_timer *evtim,
> - const struct rte_event_timer_adapter *adapter)
> + const struct rte_event_timer_adapter *adapter,
> + uint64_t *timeout_cycles)
> {
> struct swtim *sw = swtim_pmd_priv(adapter);
> - uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
> - return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
> + uint64_t timeout_nsecs;
> + double cycles_per_nsec;
> +
> + cycles_per_nsec = (double)rte_get_timer_hz() / NSECPERSEC;
> +
> + timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns;
> + if (timeout_nsecs > sw->max_tmo_ns)
> + return -1;
> + if (timeout_nsecs < sw->timer_tick_ns)
> + return -2;
> +
> + if (timeout_cycles)
> + *timeout_cycles = (uint64_t)ceil(timeout_nsecs *
> + cycles_per_nsec);
> +
> + return 0;
> }
>
> /* This function returns true if one or more (adapter) ticks have occurred since
> @@ -759,23 +774,6 @@ swtim_did_tick(struct swtim *sw)
> return false;
> }
>
> -/* Check that event timer timeout value is in range */
> -static __rte_always_inline int
> -check_timeout(struct rte_event_timer *evtim,
> - const struct rte_event_timer_adapter *adapter)
> -{
> - uint64_t tmo_nsec;
> - struct swtim *sw = swtim_pmd_priv(adapter);
> -
> - tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
> - if (tmo_nsec > sw->max_tmo_ns)
> - return -1;
> - if (tmo_nsec < sw->timer_tick_ns)
> - return -2;
> -
> - return 0;
> -}
> -
> /* Check that event timer event queue sched type matches destination event queue
> * sched type
> */
> @@ -1195,21 +1193,6 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
> break;
> }
>
> - ret = check_timeout(evtims[i], adapter);
> - if (unlikely(ret == -1)) {
> - __atomic_store_n(&evtims[i]->state,
> - RTE_EVENT_TIMER_ERROR_TOOLATE,
> - __ATOMIC_RELAXED);
> - rte_errno = EINVAL;
> - break;
> - } else if (unlikely(ret == -2)) {
> - __atomic_store_n(&evtims[i]->state,
> - RTE_EVENT_TIMER_ERROR_TOOEARLY,
> - __ATOMIC_RELAXED);
> - rte_errno = EINVAL;
> - break;
> - }
> -
> if (unlikely(check_destination_event_queue(evtims[i],
> adapter) < 0)) {
> __atomic_store_n(&evtims[i]->state,
> @@ -1225,7 +1208,21 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
> evtims[i]->impl_opaque[0] = (uintptr_t)tim;
> evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
>
> - cycles = get_timeout_cycles(evtims[i], adapter);
> + ret = get_timeout_cycles(evtims[i], adapter, &cycles);
> + if (unlikely(ret == -1)) {
> + __atomic_store_n(&evtims[i]->state,
> + RTE_EVENT_TIMER_ERROR_TOOLATE,
> + __ATOMIC_RELAXED);
> + rte_errno = EINVAL;
> + break;
> + } else if (unlikely(ret == -2)) {
> + __atomic_store_n(&evtims[i]->state,
> + RTE_EVENT_TIMER_ERROR_TOOEARLY,
> + __ATOMIC_RELAXED);
> + rte_errno = EINVAL;
> + break;
> + }
> +
> ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
> type, lcore_id, NULL, evtims[i]);
> if (ret < 0) {
> --
> 2.23.0
>
> -----Original Message-----
> From: Jerin Jacob <jerinjacobk@gmail.com>
> Sent: Tuesday, February 7, 2023 12:01 AM
> To: Carrillo, Erik G <erik.g.carrillo@intel.com>; Stephen Hemminger
> <stephen@networkplumber.org>
> Cc: jerinj@marvell.com; dev@dpdk.org; stable@dpdk.org
> Subject: Re: [PATCH v2] eventdev/timer: fix overflow issue
>
> On Wed, Jan 25, 2023 at 2:16 AM Erik Gabriel Carrillo
> <erik.g.carrillo@intel.com> wrote:
> >
> > The software timer adapter converts event timer timeout ticks to a
> > number of CPU cycles at which an rte_timer should expire. The
> > computation uses integer operations that can result in overflow.
> >
> > Use floating point operations instead to perform the computation, and
> > convert the final result back to an integer type when returning. Also
> > move the logic that checks the timeout range into the function that
> > performs the above computation.
> >
> > Fixes: 6750b21bd6af ("eventdev: add default software timer adapter")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Erik Gabriel Carrillo <erik.g.carrillo@intel.com>
> > ---
> > v2:
> > * Fix implicit int to float conversion build warning on Clang
>
> Not yet addressed the @Stephen Hemminger comments on using of
> rte_reciprocal. Also means to check what compute can be moved to
> slowpath.
> Marking as "Change requested", Looking for next version.
>
>
Ok - thanks for the pointer to rte_reciprocal. I'll rework and submit a new version.
Regards,
Erik
@@ -719,13 +719,28 @@ swtim_callback(struct rte_timer *tim)
}
}
-static __rte_always_inline uint64_t
+static __rte_always_inline int
get_timeout_cycles(struct rte_event_timer *evtim,
- const struct rte_event_timer_adapter *adapter)
+ const struct rte_event_timer_adapter *adapter,
+ uint64_t *timeout_cycles)
{
struct swtim *sw = swtim_pmd_priv(adapter);
- uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
- return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
+ uint64_t timeout_nsecs;
+ double cycles_per_nsec;
+
+ cycles_per_nsec = (double)rte_get_timer_hz() / NSECPERSEC;
+
+ timeout_nsecs = evtim->timeout_ticks * sw->timer_tick_ns;
+ if (timeout_nsecs > sw->max_tmo_ns)
+ return -1;
+ if (timeout_nsecs < sw->timer_tick_ns)
+ return -2;
+
+ if (timeout_cycles)
+ *timeout_cycles = (uint64_t)ceil(timeout_nsecs *
+ cycles_per_nsec);
+
+ return 0;
}
/* This function returns true if one or more (adapter) ticks have occurred since
@@ -759,23 +774,6 @@ swtim_did_tick(struct swtim *sw)
return false;
}
-/* Check that event timer timeout value is in range */
-static __rte_always_inline int
-check_timeout(struct rte_event_timer *evtim,
- const struct rte_event_timer_adapter *adapter)
-{
- uint64_t tmo_nsec;
- struct swtim *sw = swtim_pmd_priv(adapter);
-
- tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
- if (tmo_nsec > sw->max_tmo_ns)
- return -1;
- if (tmo_nsec < sw->timer_tick_ns)
- return -2;
-
- return 0;
-}
-
/* Check that event timer event queue sched type matches destination event queue
* sched type
*/
@@ -1195,21 +1193,6 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
break;
}
- ret = check_timeout(evtims[i], adapter);
- if (unlikely(ret == -1)) {
- __atomic_store_n(&evtims[i]->state,
- RTE_EVENT_TIMER_ERROR_TOOLATE,
- __ATOMIC_RELAXED);
- rte_errno = EINVAL;
- break;
- } else if (unlikely(ret == -2)) {
- __atomic_store_n(&evtims[i]->state,
- RTE_EVENT_TIMER_ERROR_TOOEARLY,
- __ATOMIC_RELAXED);
- rte_errno = EINVAL;
- break;
- }
-
if (unlikely(check_destination_event_queue(evtims[i],
adapter) < 0)) {
__atomic_store_n(&evtims[i]->state,
@@ -1225,7 +1208,21 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
evtims[i]->impl_opaque[0] = (uintptr_t)tim;
evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
- cycles = get_timeout_cycles(evtims[i], adapter);
+ ret = get_timeout_cycles(evtims[i], adapter, &cycles);
+ if (unlikely(ret == -1)) {
+ __atomic_store_n(&evtims[i]->state,
+ RTE_EVENT_TIMER_ERROR_TOOLATE,
+ __ATOMIC_RELAXED);
+ rte_errno = EINVAL;
+ break;
+ } else if (unlikely(ret == -2)) {
+ __atomic_store_n(&evtims[i]->state,
+ RTE_EVENT_TIMER_ERROR_TOOEARLY,
+ __ATOMIC_RELAXED);
+ rte_errno = EINVAL;
+ break;
+ }
+
ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
type, lcore_id, NULL, evtims[i]);
if (ret < 0) {