[v2,18/22] event/dlb2: add PMD's token pop public interface
Checks
Commit Message
The PMD uses a public interface to allow applications to
control the token pop mode. Supported token pop modes are
as follows, and they impact core scheduling affinity for
ldb ports.
AUTO_POP: Pop the CQ tokens immediately after dequeueing.
DELAYED_POP: Pop CQ tokens after (dequeue_depth - 1) events
are released. Supported on load-balanced ports
only.
DEFERRED_POP: Pop the CQ tokens during next dequeue operation.
Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
doc/api/doxy-api-index.md | 1 +
drivers/event/dlb2/dlb2.c | 53 ++++++++++++++++--
drivers/event/dlb2/dlb2_priv.h | 3 +
drivers/event/dlb2/meson.build | 5 +-
drivers/event/dlb2/rte_pmd_dlb2.c | 39 +++++++++++++
drivers/event/dlb2/rte_pmd_dlb2.h | 68 +++++++++++++++++++++++
drivers/event/dlb2/rte_pmd_dlb2_event_version.map | 6 ++
7 files changed, 168 insertions(+), 7 deletions(-)
create mode 100644 drivers/event/dlb2/rte_pmd_dlb2.c
create mode 100644 drivers/event/dlb2/rte_pmd_dlb2.h
Comments
On Sat, Oct 17, 2020 at 11:57 PM Timothy McDaniel
<timothy.mcdaniel@intel.com> wrote:
>
> The PMD uses a public interface to allow applications to
> control the token pop mode. Supported token pop modes are
> as follows, and they impact core scheduling affinity for
> ldb ports.
>
> AUTO_POP: Pop the CQ tokens immediately after dequeueing.
> DELAYED_POP: Pop CQ tokens after (dequeue_depth - 1) events
> are released. Supported on load-balanced ports
> only.
> DEFERRED_POP: Pop the CQ tokens during next dequeue operation.
>
> Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
> ---
> doc/api/doxy-api-index.md | 1 +
> drivers/event/dlb2/dlb2.c | 53 ++++++++++++++++--
> drivers/event/dlb2/dlb2_priv.h | 3 +
> drivers/event/dlb2/meson.build | 5 +-
> drivers/event/dlb2/rte_pmd_dlb2.c | 39 +++++++++++++
> drivers/event/dlb2/rte_pmd_dlb2.h | 68 +++++++++++++++++++++++
> drivers/event/dlb2/rte_pmd_dlb2_event_version.map | 6 ++
> 7 files changed, 168 insertions(+), 7 deletions(-)
> create mode 100644 drivers/event/dlb2/rte_pmd_dlb2.c
> create mode 100644 drivers/event/dlb2/rte_pmd_dlb2.h
>
> diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
> index b855a8f..2b2020c 100644
> --- a/doc/api/doxy-api-index.md
> +++ b/doc/api/doxy-api-index.md
> @@ -26,6 +26,7 @@ The public API headers are grouped by topics:
> [event_eth_tx_adapter] (@ref rte_event_eth_tx_adapter.h),
> [event_timer_adapter] (@ref rte_event_timer_adapter.h),
> [event_crypto_adapter] (@ref rte_event_crypto_adapter.h),
> + [dlb2] (@ref rte_pmd_dlb2.h)
move this under the already existing "- **device specific**:" section.
On 17/10/2020 19:21, Timothy McDaniel wrote:
> The PMD uses a public interface to allow applications to
> control the token pop mode. Supported token pop modes are
> as follows, and they impact core scheduling affinity for
> ldb ports.
>
> AUTO_POP: Pop the CQ tokens immediately after dequeueing.
> DELAYED_POP: Pop CQ tokens after (dequeue_depth - 1) events
> are released. Supported on load-balanced ports
> only.
> DEFERRED_POP: Pop the CQ tokens during next dequeue operation.
>
> Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
> ---
> doc/api/doxy-api-index.md | 1 +
> drivers/event/dlb2/dlb2.c | 53 ++++++++++++++++--
> drivers/event/dlb2/dlb2_priv.h | 3 +
> drivers/event/dlb2/meson.build | 5 +-
> drivers/event/dlb2/rte_pmd_dlb2.c | 39 +++++++++++++
> drivers/event/dlb2/rte_pmd_dlb2.h | 68 +++++++++++++++++++++++
> drivers/event/dlb2/rte_pmd_dlb2_event_version.map | 6 ++
> 7 files changed, 168 insertions(+), 7 deletions(-)
> create mode 100644 drivers/event/dlb2/rte_pmd_dlb2.c
> create mode 100644 drivers/event/dlb2/rte_pmd_dlb2.h
>
> diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
> index b855a8f..2b2020c 100644
> --- a/doc/api/doxy-api-index.md
> +++ b/doc/api/doxy-api-index.md
> @@ -26,6 +26,7 @@ The public API headers are grouped by topics:
> [event_eth_tx_adapter] (@ref rte_event_eth_tx_adapter.h),
> [event_timer_adapter] (@ref rte_event_timer_adapter.h),
> [event_crypto_adapter] (@ref rte_event_crypto_adapter.h),
> + [dlb2] (@ref rte_pmd_dlb2.h)
> [rawdev] (@ref rte_rawdev.h),
> [metrics] (@ref rte_metrics.h),
> [bitrate] (@ref rte_bitrate.h),
> diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
> index 483659e..9a41418 100644
> --- a/drivers/event/dlb2/dlb2.c
> +++ b/drivers/event/dlb2/dlb2.c
> @@ -1200,7 +1200,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
> qm_port->qid_mappings = &dlb2->qm_ldb_to_ev_queue_id[0];
>
> qm_port->dequeue_depth = dequeue_depth;
> -
> + qm_port->token_pop_thresh = dequeue_depth;
> qm_port->owed_tokens = 0;
> qm_port->issued_releases = 0;
>
> @@ -1368,6 +1368,8 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
>
> qm_port->dequeue_depth = dequeue_depth;
>
> + /* Directed ports are auto-pop, by default. */
> + qm_port->token_pop_mode = AUTO_POP;
> qm_port->owed_tokens = 0;
> qm_port->issued_releases = 0;
>
> @@ -2615,6 +2617,14 @@ dlb2_event_enqueue_burst(void *event_port,
> dlb2_event_build_hcws(qm_port, &events[i], j,
> sched_types, queue_ids);
>
> + if (qm_port->token_pop_mode == DELAYED_POP && j < 4 &&
> + qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
> + dlb2_construct_token_pop_qe(qm_port, j);
> +
> + /* Reset the releases counter for the next QE batch */
> + qm_port->issued_releases -= qm_port->token_pop_thresh;
> + }
> +
> dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
>
> cnt += j;
> @@ -2623,6 +2633,11 @@ dlb2_event_enqueue_burst(void *event_port,
> break;
> }
>
> + if (qm_port->token_pop_mode == DELAYED_POP &&
> + qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
> + dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
> + qm_port->issued_releases -= qm_port->token_pop_thresh;
> + }
> return cnt;
> }
>
> @@ -3105,11 +3120,25 @@ dlb2_event_release(struct dlb2_eventdev *dlb2,
> if (j == 0)
> break;
>
> + if (qm_port->token_pop_mode == DELAYED_POP && j < 4 &&
> + qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
> + dlb2_construct_token_pop_qe(qm_port, j);
> +
> + /* Reset the releases counter for the next QE batch */
> + qm_port->issued_releases -= qm_port->token_pop_thresh;
> + }
> +
> dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
>
> cnt += j;
> }
>
> + if (qm_port->token_pop_mode == DELAYED_POP &&
> + qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
> + dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
> + qm_port->issued_releases -= qm_port->token_pop_thresh;
> + }
> +
> sw_credit_update:
> /* each release returns one credit */
> if (!ev_port->outstanding_releases) {
> @@ -3193,8 +3222,8 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
> qm_port->owed_tokens += num;
>
> if (num) {
> -
> - dlb2_consume_qe_immediate(qm_port, num);
> + if (qm_port->token_pop_mode == AUTO_POP)
> + dlb2_consume_qe_immediate(qm_port, num);
>
> ev_port->outstanding_releases += num;
>
> @@ -3320,8 +3349,8 @@ dlb2_hw_dequeue(struct dlb2_eventdev *dlb2,
> qm_port->owed_tokens += num;
>
> if (num) {
> -
> - dlb2_consume_qe_immediate(qm_port, num);
> + if (qm_port->token_pop_mode == AUTO_POP)
> + dlb2_consume_qe_immediate(qm_port, num);
>
> ev_port->outstanding_releases += num;
>
> @@ -3336,6 +3365,7 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
> uint64_t wait)
> {
> struct dlb2_eventdev_port *ev_port = event_port;
> + struct dlb2_port *qm_port = &ev_port->qm_port;
> struct dlb2_eventdev *dlb2 = ev_port->dlb2;
> uint16_t cnt;
>
> @@ -3351,6 +3381,9 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
> DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
> }
>
> + if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
> + dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
> +
> cnt = dlb2_hw_dequeue(dlb2, ev_port, ev, num, wait);
>
> DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
> @@ -3370,6 +3403,7 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
> uint16_t num, uint64_t wait)
> {
> struct dlb2_eventdev_port *ev_port = event_port;
> + struct dlb2_port *qm_port = &ev_port->qm_port;
> struct dlb2_eventdev *dlb2 = ev_port->dlb2;
> uint16_t cnt;
>
> @@ -3385,6 +3419,9 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
> DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
> }
>
> + if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
> + dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
> +
> cnt = dlb2_hw_dequeue_sparse(dlb2, ev_port, ev, num, wait);
>
> DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
> @@ -3689,7 +3726,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
> struct dlb2_devargs *dlb2_args)
> {
> struct dlb2_eventdev *dlb2;
> - int err;
> + int err, i;
>
> dlb2 = dev->data->dev_private;
>
> @@ -3739,6 +3776,10 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
> return err;
> }
>
> + /* Initialize each port's token pop mode */
> + for (i = 0; i < DLB2_MAX_NUM_PORTS; i++)
> + dlb2->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
> +
> rte_spinlock_init(&dlb2->qm_instance.resource_lock);
>
> dlb2_iface_low_level_io_init();
> diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h
> index 61567a6..b73cf3f 100644
> --- a/drivers/event/dlb2/dlb2_priv.h
> +++ b/drivers/event/dlb2/dlb2_priv.h
> @@ -12,6 +12,7 @@
> #include <rte_config.h>
> #include "dlb2_user.h"
> #include "dlb2_log.h"
> +#include "rte_pmd_dlb2.h"
>
> #ifndef RTE_LIBRTE_PMD_DLB2_QUELL_STATS
> #define DLB2_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)
> @@ -290,6 +291,7 @@ struct dlb2_port {
> bool gen_bit;
> uint16_t dir_credits;
> uint32_t dequeue_depth;
> + enum dlb2_token_pop_mode token_pop_mode;
> union dlb2_port_config cfg;
> uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
> uint16_t cached_ldb_credits;
> @@ -298,6 +300,7 @@ struct dlb2_port {
> bool int_armed;
> uint16_t owed_tokens;
> int16_t issued_releases;
> + int16_t token_pop_thresh;
> int cq_depth;
> uint16_t cq_idx;
> uint16_t cq_idx_unmasked;
> diff --git a/drivers/event/dlb2/meson.build b/drivers/event/dlb2/meson.build
> index 6bf8adf..539773c 100644
> --- a/drivers/event/dlb2/meson.build
> +++ b/drivers/event/dlb2/meson.build
> @@ -1,3 +1,4 @@
> +
> # SPDX-License-Identifier: BSD-3-Clause
> # Copyright(c) 2019-2020 Intel Corporation
>
> @@ -6,7 +7,9 @@ sources = files('dlb2.c',
> 'dlb2_xstats.c',
> 'pf/dlb2_main.c',
> 'pf/dlb2_pf.c',
> - 'pf/base/dlb2_resource.c'
> + 'pf/base/dlb2_resource.c',
> + 'rte_pmd_dlb2.c'
> )
>
> deps += ['mbuf', 'mempool', 'ring', 'pci', 'bus_pci']
> +install_headers('rte_pmd_dlb2.h')
> diff --git a/drivers/event/dlb2/rte_pmd_dlb2.c b/drivers/event/dlb2/rte_pmd_dlb2.c
> new file mode 100644
> index 0000000..b09b585
> --- /dev/null
> +++ b/drivers/event/dlb2/rte_pmd_dlb2.c
> @@ -0,0 +1,39 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2020 Intel Corporation
> + */
> +
> +#include <rte_eventdev.h>
> +#include <rte_eventdev_pmd.h>
> +
> +#include "rte_pmd_dlb2.h"
> +#include "dlb2_priv.h"
> +#include "dlb2_inline_fns.h"
> +
> +int
> +rte_pmd_dlb2_set_token_pop_mode(uint8_t dev_id,
> + uint8_t port_id,
> + enum dlb2_token_pop_mode mode)
> +{
> + struct dlb2_eventdev *dlb2;
> + struct rte_eventdev *dev;
> +
> + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> + dev = &rte_eventdevs[dev_id];
> +
> + dlb2 = dlb2_pmd_priv(dev);
> +
> + if (mode >= NUM_TOKEN_POP_MODES)
> + return -EINVAL;
> +
> + /* The event device must be configured, but not yet started */
> + if (!dlb2->configured || dlb2->run_state != DLB2_RUN_STATE_STOPPED)
> + return -EINVAL;
> +
> + /* The token pop mode must be set before configuring the port */
> + if (port_id >= dlb2->num_ports || dlb2->ev_ports[port_id].setup_done)
> + return -EINVAL;
> +
> + dlb2->ev_ports[port_id].qm_port.token_pop_mode = mode;
> +
> + return 0;
> +}
> diff --git a/drivers/event/dlb2/rte_pmd_dlb2.h b/drivers/event/dlb2/rte_pmd_dlb2.h
> new file mode 100644
> index 0000000..489693f
> --- /dev/null
> +++ b/drivers/event/dlb2/rte_pmd_dlb2.h
> @@ -0,0 +1,68 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2020 Intel Corporation
> + */
> +
> +/*!
> + * @file rte_pmd_dlb2.h
> + *
> + * @brief DLB PMD-specific functions
> + *
> + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> + */
> +
> +#ifndef _RTE_PMD_DLB2_H_
> +#define _RTE_PMD_DLB2_H_
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#include <stdint.h>
> +
> +/**
> + * Selects the token pop mode for a DLB2 port.
> + */
> +enum dlb2_token_pop_mode {
> + /* Pop the CQ tokens immediately after dequeueing. */
> + AUTO_POP,
> + /* Pop CQ tokens after (dequeue_depth - 1) events are released.
> + * Supported on load-balanced ports only.
> + */
> + DELAYED_POP,
> + /* Pop the CQ tokens during next dequeue operation. */
> + DEFERRED_POP,
> +
> + /* NUM_TOKEN_POP_MODES must be last */
> + NUM_TOKEN_POP_MODES
> +};
> +
Missing the banner.
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
> +/*!
> + * Configure the token pop mode for a DLB2 port. By default, all ports use
> + * AUTO_POP. This function must be called before calling rte_event_port_setup()
> + * for the port, but after calling rte_event_dev_configure().
> + *
> + * @param dev_id
> + * The identifier of the event device.
> + * @param port_id
> + * The identifier of the event port.
> + * @param mode
> + * The token pop mode.
> + *
> + * @return
> + * - 0: Success
> + * - EINVAL: Invalid dev_id, port_id, or mode
> + * - EINVAL: The DLB2 is not configured, is already running, or the port is
> + * already setup
> + */
> +
> +__rte_experimental
> +int
> +rte_pmd_dlb2_set_token_pop_mode(uint8_t dev_id,
> + uint8_t port_id,
> + enum dlb2_token_pop_mode mode);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_PMD_DLB2_H_ */
> diff --git a/drivers/event/dlb2/rte_pmd_dlb2_event_version.map b/drivers/event/dlb2/rte_pmd_dlb2_event_version.map
> index 299ae63..84b81a4 100644
> --- a/drivers/event/dlb2/rte_pmd_dlb2_event_version.map
> +++ b/drivers/event/dlb2/rte_pmd_dlb2_event_version.map
> @@ -1,3 +1,9 @@
> DPDK_21.0 {
> local: *;
> };
> +
> +EXPERIMENTAL {
> + global:
> +
> + rte_pmd_dlb2_set_token_pop_mode;
> +};
>
> -----Original Message-----
> From: McDaniel, Timothy <timothy.mcdaniel@intel.com>
> Sent: Saturday, October 17, 2020 1:21 PM
> To: Mcnamara, John <john.mcnamara@intel.com>; Kovacevic, Marko
> <marko.kovacevic@intel.com>; Ray Kinsella <mdr@ashroe.eu>; Neil Horman
> <nhorman@tuxdriver.com>
> Cc: dev@dpdk.org; Carrillo, Erik G <erik.g.carrillo@intel.com>; Eads, Gage
> <gage.eads@intel.com>; Van Haaren, Harry <harry.van.haaren@intel.com>;
> jerinj@marvell.com
> Subject: [PATCH v2 18/22] event/dlb2: add PMD's token pop public interface
>
> The PMD uses a public interface to allow applications to
> control the token pop mode. Supported token pop modes are
> as follows, and they impact core scheduling affinity for
> ldb ports.
>
> AUTO_POP: Pop the CQ tokens immediately after dequeueing.
> DELAYED_POP: Pop CQ tokens after (dequeue_depth - 1) events
> are released. Supported on load-balanced ports
> only.
> DEFERRED_POP: Pop the CQ tokens during next dequeue operation.
>
> Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
With Jerin and Ray's issues addressed:
Reviewed-by: Gage Eads <gage.eads@intel.com>
Thanks,
Gage
> -----Original Message-----
> From: Jerin Jacob <jerinjacobk@gmail.com>
> Sent: Sunday, October 18, 2020 4:14 AM
> To: McDaniel, Timothy <timothy.mcdaniel@intel.com>
> Cc: Mcnamara, John <john.mcnamara@intel.com>; Kovacevic, Marko
> <Marko.Kovacevic@intel.com>; Ray Kinsella <mdr@ashroe.eu>; Neil Horman
> <nhorman@tuxdriver.com>; dpdk-dev <dev@dpdk.org>; Carrillo, Erik G
> <Erik.G.Carrillo@intel.com>; Eads, Gage <gage.eads@intel.com>; Van Haaren,
> Harry <harry.van.haaren@intel.com>; Jerin Jacob <jerinj@marvell.com>
> Subject: Re: [dpdk-dev] [PATCH v2 18/22] event/dlb2: add PMD's token pop
> public interface
>
> On Sat, Oct 17, 2020 at 11:57 PM Timothy McDaniel
> <timothy.mcdaniel@intel.com> wrote:
> >
> > The PMD uses a public interface to allow applications to
> > control the token pop mode. Supported token pop modes are
> > as follows, and they impact core scheduling affinity for
> > ldb ports.
> >
> > AUTO_POP: Pop the CQ tokens immediately after dequeueing.
> > DELAYED_POP: Pop CQ tokens after (dequeue_depth - 1) events
> > are released. Supported on load-balanced ports
> > only.
> > DEFERRED_POP: Pop the CQ tokens during next dequeue operation.
> >
> > Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
> > ---
> > doc/api/doxy-api-index.md | 1 +
> > drivers/event/dlb2/dlb2.c | 53 ++++++++++++++++--
> > drivers/event/dlb2/dlb2_priv.h | 3 +
> > drivers/event/dlb2/meson.build | 5 +-
> > drivers/event/dlb2/rte_pmd_dlb2.c | 39 +++++++++++++
> > drivers/event/dlb2/rte_pmd_dlb2.h | 68 +++++++++++++++++++++++
> > drivers/event/dlb2/rte_pmd_dlb2_event_version.map | 6 ++
> > 7 files changed, 168 insertions(+), 7 deletions(-)
> > create mode 100644 drivers/event/dlb2/rte_pmd_dlb2.c
> > create mode 100644 drivers/event/dlb2/rte_pmd_dlb2.h
> >
> > diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
> > index b855a8f..2b2020c 100644
> > --- a/doc/api/doxy-api-index.md
> > +++ b/doc/api/doxy-api-index.md
> > @@ -26,6 +26,7 @@ The public API headers are grouped by topics:
> > [event_eth_tx_adapter] (@ref rte_event_eth_tx_adapter.h),
> > [event_timer_adapter] (@ref rte_event_timer_adapter.h),
> > [event_crypto_adapter] (@ref rte_event_crypto_adapter.h),
> > + [dlb2] (@ref rte_pmd_dlb2.h)
>
> move this under the already existing "- **device specific**:" section.
okay, will do.
@@ -26,6 +26,7 @@ The public API headers are grouped by topics:
[event_eth_tx_adapter] (@ref rte_event_eth_tx_adapter.h),
[event_timer_adapter] (@ref rte_event_timer_adapter.h),
[event_crypto_adapter] (@ref rte_event_crypto_adapter.h),
+ [dlb2] (@ref rte_pmd_dlb2.h)
[rawdev] (@ref rte_rawdev.h),
[metrics] (@ref rte_metrics.h),
[bitrate] (@ref rte_bitrate.h),
@@ -1200,7 +1200,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
qm_port->qid_mappings = &dlb2->qm_ldb_to_ev_queue_id[0];
qm_port->dequeue_depth = dequeue_depth;
-
+ qm_port->token_pop_thresh = dequeue_depth;
qm_port->owed_tokens = 0;
qm_port->issued_releases = 0;
@@ -1368,6 +1368,8 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
qm_port->dequeue_depth = dequeue_depth;
+ /* Directed ports are auto-pop, by default. */
+ qm_port->token_pop_mode = AUTO_POP;
qm_port->owed_tokens = 0;
qm_port->issued_releases = 0;
@@ -2615,6 +2617,14 @@ dlb2_event_enqueue_burst(void *event_port,
dlb2_event_build_hcws(qm_port, &events[i], j,
sched_types, queue_ids);
+ if (qm_port->token_pop_mode == DELAYED_POP && j < 4 &&
+ qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
+ dlb2_construct_token_pop_qe(qm_port, j);
+
+ /* Reset the releases counter for the next QE batch */
+ qm_port->issued_releases -= qm_port->token_pop_thresh;
+ }
+
dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
cnt += j;
@@ -2623,6 +2633,11 @@ dlb2_event_enqueue_burst(void *event_port,
break;
}
+ if (qm_port->token_pop_mode == DELAYED_POP &&
+ qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
+ dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
+ qm_port->issued_releases -= qm_port->token_pop_thresh;
+ }
return cnt;
}
@@ -3105,11 +3120,25 @@ dlb2_event_release(struct dlb2_eventdev *dlb2,
if (j == 0)
break;
+ if (qm_port->token_pop_mode == DELAYED_POP && j < 4 &&
+ qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
+ dlb2_construct_token_pop_qe(qm_port, j);
+
+ /* Reset the releases counter for the next QE batch */
+ qm_port->issued_releases -= qm_port->token_pop_thresh;
+ }
+
dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
cnt += j;
}
+ if (qm_port->token_pop_mode == DELAYED_POP &&
+ qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
+ dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
+ qm_port->issued_releases -= qm_port->token_pop_thresh;
+ }
+
sw_credit_update:
/* each release returns one credit */
if (!ev_port->outstanding_releases) {
@@ -3193,8 +3222,8 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
qm_port->owed_tokens += num;
if (num) {
-
- dlb2_consume_qe_immediate(qm_port, num);
+ if (qm_port->token_pop_mode == AUTO_POP)
+ dlb2_consume_qe_immediate(qm_port, num);
ev_port->outstanding_releases += num;
@@ -3320,8 +3349,8 @@ dlb2_hw_dequeue(struct dlb2_eventdev *dlb2,
qm_port->owed_tokens += num;
if (num) {
-
- dlb2_consume_qe_immediate(qm_port, num);
+ if (qm_port->token_pop_mode == AUTO_POP)
+ dlb2_consume_qe_immediate(qm_port, num);
ev_port->outstanding_releases += num;
@@ -3336,6 +3365,7 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
uint64_t wait)
{
struct dlb2_eventdev_port *ev_port = event_port;
+ struct dlb2_port *qm_port = &ev_port->qm_port;
struct dlb2_eventdev *dlb2 = ev_port->dlb2;
uint16_t cnt;
@@ -3351,6 +3381,9 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
}
+ if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
+ dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
+
cnt = dlb2_hw_dequeue(dlb2, ev_port, ev, num, wait);
DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
@@ -3370,6 +3403,7 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
uint16_t num, uint64_t wait)
{
struct dlb2_eventdev_port *ev_port = event_port;
+ struct dlb2_port *qm_port = &ev_port->qm_port;
struct dlb2_eventdev *dlb2 = ev_port->dlb2;
uint16_t cnt;
@@ -3385,6 +3419,9 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
}
+ if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
+ dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
+
cnt = dlb2_hw_dequeue_sparse(dlb2, ev_port, ev, num, wait);
DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
@@ -3689,7 +3726,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
struct dlb2_devargs *dlb2_args)
{
struct dlb2_eventdev *dlb2;
- int err;
+ int err, i;
dlb2 = dev->data->dev_private;
@@ -3739,6 +3776,10 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
return err;
}
+ /* Initialize each port's token pop mode */
+ for (i = 0; i < DLB2_MAX_NUM_PORTS; i++)
+ dlb2->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
+
rte_spinlock_init(&dlb2->qm_instance.resource_lock);
dlb2_iface_low_level_io_init();
@@ -12,6 +12,7 @@
#include <rte_config.h>
#include "dlb2_user.h"
#include "dlb2_log.h"
+#include "rte_pmd_dlb2.h"
#ifndef RTE_LIBRTE_PMD_DLB2_QUELL_STATS
#define DLB2_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)
@@ -290,6 +291,7 @@ struct dlb2_port {
bool gen_bit;
uint16_t dir_credits;
uint32_t dequeue_depth;
+ enum dlb2_token_pop_mode token_pop_mode;
union dlb2_port_config cfg;
uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
uint16_t cached_ldb_credits;
@@ -298,6 +300,7 @@ struct dlb2_port {
bool int_armed;
uint16_t owed_tokens;
int16_t issued_releases;
+ int16_t token_pop_thresh;
int cq_depth;
uint16_t cq_idx;
uint16_t cq_idx_unmasked;
@@ -1,3 +1,4 @@
+
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2019-2020 Intel Corporation
@@ -6,7 +7,9 @@ sources = files('dlb2.c',
'dlb2_xstats.c',
'pf/dlb2_main.c',
'pf/dlb2_pf.c',
- 'pf/base/dlb2_resource.c'
+ 'pf/base/dlb2_resource.c',
+ 'rte_pmd_dlb2.c'
)
deps += ['mbuf', 'mempool', 'ring', 'pci', 'bus_pci']
+install_headers('rte_pmd_dlb2.h')
new file mode 100644
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd.h>
+
+#include "rte_pmd_dlb2.h"
+#include "dlb2_priv.h"
+#include "dlb2_inline_fns.h"
+
+int
+rte_pmd_dlb2_set_token_pop_mode(uint8_t dev_id,
+ uint8_t port_id,
+ enum dlb2_token_pop_mode mode)
+{
+ struct dlb2_eventdev *dlb2;
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ dlb2 = dlb2_pmd_priv(dev);
+
+ if (mode >= NUM_TOKEN_POP_MODES)
+ return -EINVAL;
+
+ /* The event device must be configured, but not yet started */
+ if (!dlb2->configured || dlb2->run_state != DLB2_RUN_STATE_STOPPED)
+ return -EINVAL;
+
+ /* The token pop mode must be set before configuring the port */
+ if (port_id >= dlb2->num_ports || dlb2->ev_ports[port_id].setup_done)
+ return -EINVAL;
+
+ dlb2->ev_ports[port_id].qm_port.token_pop_mode = mode;
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+/*!
+ * @file rte_pmd_dlb2.h
+ *
+ * @brief DLB PMD-specific functions
+ *
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ */
+
+#ifndef _RTE_PMD_DLB2_H_
+#define _RTE_PMD_DLB2_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/**
+ * Selects the token pop mode for a DLB2 port.
+ */
+enum dlb2_token_pop_mode {
+ /* Pop the CQ tokens immediately after dequeueing. */
+ AUTO_POP,
+ /* Pop CQ tokens after (dequeue_depth - 1) events are released.
+ * Supported on load-balanced ports only.
+ */
+ DELAYED_POP,
+ /* Pop the CQ tokens during next dequeue operation. */
+ DEFERRED_POP,
+
+ /* NUM_TOKEN_POP_MODES must be last */
+ NUM_TOKEN_POP_MODES
+};
+
+/*!
+ * Configure the token pop mode for a DLB2 port. By default, all ports use
+ * AUTO_POP. This function must be called before calling rte_event_port_setup()
+ * for the port, but after calling rte_event_dev_configure().
+ *
+ * @param dev_id
+ * The identifier of the event device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param mode
+ * The token pop mode.
+ *
+ * @return
+ * - 0: Success
+ * - EINVAL: Invalid dev_id, port_id, or mode
+ * - EINVAL: The DLB2 is not configured, is already running, or the port is
+ * already setup
+ */
+
+__rte_experimental
+int
+rte_pmd_dlb2_set_token_pop_mode(uint8_t dev_id,
+ uint8_t port_id,
+ enum dlb2_token_pop_mode mode);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PMD_DLB2_H_ */
@@ -1,3 +1,9 @@
DPDK_21.0 {
local: *;
};
+
+EXPERIMENTAL {
+ global:
+
+ rte_pmd_dlb2_set_token_pop_mode;
+};