[v2] event/cnxk: flush event queues over multiple pass
Checks
Commit Message
If an event queue flush does not complete after a fixed number of tries,
remaining queues are flushed before retrying the one with incomplete
flush.
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
---
drivers/event/cnxk/cn10k_eventdev.c | 10 +++++++++-
drivers/event/cnxk/cn9k_eventdev.c | 10 +++++++++-
drivers/event/cnxk/cnxk_eventdev.c | 31 ++++++++++++++++++++++-------
drivers/event/cnxk/cnxk_eventdev.h | 5 +++--
4 files changed, 45 insertions(+), 11 deletions(-)
Comments
On Mon, May 16, 2022 at 9:52 PM Shijith Thotton <sthotton@marvell.com> wrote:
>
> If an event queue flush does not complete after a fixed number of tries,
> remaining queues are flushed before retrying the one with incomplete
> flush.
>
> Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Applied to dpdk-next-net-eventdev/for-main. Thanks
> ---
> drivers/event/cnxk/cn10k_eventdev.c | 10 +++++++++-
> drivers/event/cnxk/cn9k_eventdev.c | 10 +++++++++-
> drivers/event/cnxk/cnxk_eventdev.c | 31 ++++++++++++++++++++++-------
> drivers/event/cnxk/cnxk_eventdev.h | 5 +++--
> 4 files changed, 45 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
> index 94829e789c..f70c4c58ce 100644
> --- a/drivers/event/cnxk/cn10k_eventdev.c
> +++ b/drivers/event/cnxk/cn10k_eventdev.c
> @@ -108,10 +108,11 @@ cn10k_sso_hws_release(void *arg, void *hws)
> memset(ws, 0, sizeof(*ws));
> }
>
> -static void
> +static int
> cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
> cnxk_handle_event_t fn, void *arg)
> {
> + uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX;
> struct cn10k_sso_hws *ws = hws;
> uint64_t cq_ds_cnt = 1;
> uint64_t aq_cnt = 1;
> @@ -141,6 +142,8 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
> fn(arg, ev);
> if (ev.sched_type != SSO_TT_EMPTY)
> cnxk_sso_hws_swtag_flush(ws->base);
> + else if (retry-- == 0)
> + break;
> do {
> val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
> } while (val & BIT_ULL(56));
> @@ -151,8 +154,13 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
> cq_ds_cnt &= 0x3FFF3FFF0000;
> }
>
> + if (aq_cnt || cq_ds_cnt || ds_cnt)
> + return -EAGAIN;
> +
> plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
> rte_mb();
> +
> + return 0;
> }
>
> static void
> diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
> index 987888d3db..dce975bba2 100644
> --- a/drivers/event/cnxk/cn9k_eventdev.c
> +++ b/drivers/event/cnxk/cn9k_eventdev.c
> @@ -117,11 +117,12 @@ cn9k_sso_hws_release(void *arg, void *hws)
> }
> }
>
> -static void
> +static int
> cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
> cnxk_handle_event_t fn, void *arg)
> {
> struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
> + uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX;
> struct cnxk_timesync_info *tstamp;
> struct cn9k_sso_hws_dual *dws;
> struct cn9k_sso_hws *ws;
> @@ -164,6 +165,8 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
> fn(arg, ev);
> if (ev.sched_type != SSO_TT_EMPTY)
> cnxk_sso_hws_swtag_flush(ws_base);
> + else if (retry-- == 0)
> + break;
> do {
> val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
> } while (val & BIT_ULL(56));
> @@ -174,7 +177,12 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
> cq_ds_cnt &= 0x3FFF3FFF0000;
> }
>
> + if (aq_cnt || cq_ds_cnt || ds_cnt)
> + return -EAGAIN;
> +
> plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
> +
> + return 0;
> }
>
> static void
> diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c
> index be021d86c9..91235ed9f3 100644
> --- a/drivers/event/cnxk/cnxk_eventdev.c
> +++ b/drivers/event/cnxk/cnxk_eventdev.c
> @@ -385,9 +385,10 @@ static void
> cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
> cnxk_sso_hws_flush_t flush_fn, uint8_t enable)
> {
> + uint8_t pend_list[RTE_EVENT_MAX_QUEUES_PER_DEV], pend_cnt, new_pcnt;
> struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> uintptr_t hwgrp_base;
> - uint16_t i;
> + uint8_t queue_id, i;
> void *ws;
>
> for (i = 0; i < dev->nb_event_ports; i++) {
> @@ -396,14 +397,30 @@ cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
> }
>
> rte_mb();
> +
> + /* Consume all the events through HWS0 */
> ws = event_dev->data->ports[0];
>
> - for (i = 0; i < dev->nb_event_queues; i++) {
> - /* Consume all the events through HWS0 */
> - hwgrp_base = roc_sso_hwgrp_base_get(&dev->sso, i);
> - flush_fn(ws, i, hwgrp_base, cnxk_handle_event, event_dev);
> - /* Enable/Disable SSO GGRP */
> - plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL);
> + /* Starting list of queues to flush */
> + pend_cnt = dev->nb_event_queues;
> + for (i = 0; i < dev->nb_event_queues; i++)
> + pend_list[i] = i;
> +
> + while (pend_cnt) {
> + new_pcnt = 0;
> + for (i = 0; i < pend_cnt; i++) {
> + queue_id = pend_list[i];
> + hwgrp_base =
> + roc_sso_hwgrp_base_get(&dev->sso, queue_id);
> + if (flush_fn(ws, queue_id, hwgrp_base,
> + cnxk_handle_event, event_dev)) {
> + pend_list[new_pcnt++] = queue_id;
> + continue;
> + }
> + /* Enable/Disable SSO GGRP */
> + plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL);
> + }
> + pend_cnt = new_pcnt;
> }
> }
>
> diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
> index 5564746e6d..a490829a8a 100644
> --- a/drivers/event/cnxk/cnxk_eventdev.h
> +++ b/drivers/event/cnxk/cnxk_eventdev.h
> @@ -54,6 +54,7 @@
> #define CN10K_GW_MODE_PREF 1
> #define CN10K_GW_MODE_PREF_WFE 2
>
> +#define CNXK_SSO_FLUSH_RETRY_MAX 0xfff
> #define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name) \
> do { \
> if (strncmp(dev->driver->name, drv_name, strlen(drv_name))) \
> @@ -69,8 +70,8 @@ typedef int (*cnxk_sso_unlink_t)(void *dev, void *ws, uint16_t *map,
> uint16_t nb_link);
> typedef void (*cnxk_handle_event_t)(void *arg, struct rte_event ev);
> typedef void (*cnxk_sso_hws_reset_t)(void *arg, void *ws);
> -typedef void (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t base,
> - cnxk_handle_event_t fn, void *arg);
> +typedef int (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t base,
> + cnxk_handle_event_t fn, void *arg);
>
> struct cnxk_sso_qos {
> uint16_t queue;
> --
> 2.25.1
>
@@ -108,10 +108,11 @@ cn10k_sso_hws_release(void *arg, void *hws)
memset(ws, 0, sizeof(*ws));
}
-static void
+static int
cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
cnxk_handle_event_t fn, void *arg)
{
+ uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX;
struct cn10k_sso_hws *ws = hws;
uint64_t cq_ds_cnt = 1;
uint64_t aq_cnt = 1;
@@ -141,6 +142,8 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
fn(arg, ev);
if (ev.sched_type != SSO_TT_EMPTY)
cnxk_sso_hws_swtag_flush(ws->base);
+ else if (retry-- == 0)
+ break;
do {
val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
} while (val & BIT_ULL(56));
@@ -151,8 +154,13 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
cq_ds_cnt &= 0x3FFF3FFF0000;
}
+ if (aq_cnt || cq_ds_cnt || ds_cnt)
+ return -EAGAIN;
+
plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
rte_mb();
+
+ return 0;
}
static void
@@ -117,11 +117,12 @@ cn9k_sso_hws_release(void *arg, void *hws)
}
}
-static void
+static int
cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
cnxk_handle_event_t fn, void *arg)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
+ uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX;
struct cnxk_timesync_info *tstamp;
struct cn9k_sso_hws_dual *dws;
struct cn9k_sso_hws *ws;
@@ -164,6 +165,8 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
fn(arg, ev);
if (ev.sched_type != SSO_TT_EMPTY)
cnxk_sso_hws_swtag_flush(ws_base);
+ else if (retry-- == 0)
+ break;
do {
val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
} while (val & BIT_ULL(56));
@@ -174,7 +177,12 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
cq_ds_cnt &= 0x3FFF3FFF0000;
}
+ if (aq_cnt || cq_ds_cnt || ds_cnt)
+ return -EAGAIN;
+
plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
+
+ return 0;
}
static void
@@ -385,9 +385,10 @@ static void
cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
cnxk_sso_hws_flush_t flush_fn, uint8_t enable)
{
+ uint8_t pend_list[RTE_EVENT_MAX_QUEUES_PER_DEV], pend_cnt, new_pcnt;
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
uintptr_t hwgrp_base;
- uint16_t i;
+ uint8_t queue_id, i;
void *ws;
for (i = 0; i < dev->nb_event_ports; i++) {
@@ -396,14 +397,30 @@ cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
}
rte_mb();
+
+ /* Consume all the events through HWS0 */
ws = event_dev->data->ports[0];
- for (i = 0; i < dev->nb_event_queues; i++) {
- /* Consume all the events through HWS0 */
- hwgrp_base = roc_sso_hwgrp_base_get(&dev->sso, i);
- flush_fn(ws, i, hwgrp_base, cnxk_handle_event, event_dev);
- /* Enable/Disable SSO GGRP */
- plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL);
+ /* Starting list of queues to flush */
+ pend_cnt = dev->nb_event_queues;
+ for (i = 0; i < dev->nb_event_queues; i++)
+ pend_list[i] = i;
+
+ while (pend_cnt) {
+ new_pcnt = 0;
+ for (i = 0; i < pend_cnt; i++) {
+ queue_id = pend_list[i];
+ hwgrp_base =
+ roc_sso_hwgrp_base_get(&dev->sso, queue_id);
+ if (flush_fn(ws, queue_id, hwgrp_base,
+ cnxk_handle_event, event_dev)) {
+ pend_list[new_pcnt++] = queue_id;
+ continue;
+ }
+ /* Enable/Disable SSO GGRP */
+ plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL);
+ }
+ pend_cnt = new_pcnt;
}
}
@@ -54,6 +54,7 @@
#define CN10K_GW_MODE_PREF 1
#define CN10K_GW_MODE_PREF_WFE 2
+#define CNXK_SSO_FLUSH_RETRY_MAX 0xfff
#define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name) \
do { \
if (strncmp(dev->driver->name, drv_name, strlen(drv_name))) \
@@ -69,8 +70,8 @@ typedef int (*cnxk_sso_unlink_t)(void *dev, void *ws, uint16_t *map,
uint16_t nb_link);
typedef void (*cnxk_handle_event_t)(void *arg, struct rte_event ev);
typedef void (*cnxk_sso_hws_reset_t)(void *arg, void *ws);
-typedef void (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t base,
- cnxk_handle_event_t fn, void *arg);
+typedef int (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t base,
+ cnxk_handle_event_t fn, void *arg);
struct cnxk_sso_qos {
uint16_t queue;