[dpdk-dev,v3] eventdev: fix links map initialization for SW PMD
Checks
Commit Message
This patch initializes the links_map array entries to
EVENT_QUEUE_SERVICE_PRIORITY_INVALID, as expected by
rte_event_port_links_get(). This is necessary for the sw eventdev PMD,
which does not initialize links_map when rte_event_port_setup() calls
rte_event_port_unlink().
Fixes: e36bf651b3ca2 ("eventdev: implement the northbound APIs")
Signed-off-by: Gage Eads <gage.eads@intel.com>
Acked-by: Jerin Jacob <jerin.jacob at caviumnetworks.com>
---
v2: Refined commit message's description of patch
v3: Fixed check-git-log.sh errors, added Jerin's Acked-by
lib/librte_eventdev/rte_eventdev.c | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
Comments
On Tue, Mar 07, 2017 at 10:25:00AM -0600, Gage Eads wrote:
> This patch initializes the links_map array entries to
> EVENT_QUEUE_SERVICE_PRIORITY_INVALID, as expected by
> rte_event_port_links_get(). This is necessary for the sw eventdev PMD,
> which does not initialize links_map when rte_event_port_setup() calls
> rte_event_port_unlink().
>
> Fixes: e36bf651b3ca2 ("eventdev: implement the northbound APIs")
>
> Signed-off-by: Gage Eads <gage.eads@intel.com>
> Acked-by: Jerin Jacob <jerin.jacob at caviumnetworks.com>
Applied to dpdk-next-eventdev/master after fixing check-git-log.sh warning
http://dpdk.org/ml/archives/test-report/2017-March/013029.html
Thanks.
> ---
> v2: Refined commit message's description of patch
> v3: Fixed check-git-log.sh errors, added Jerin's Acked-by
>
> lib/librte_eventdev/rte_eventdev.c | 17 ++++++++++++-----
> 1 file changed, 12 insertions(+), 5 deletions(-)
>
> diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
> index 68bfc3b..b8cd92b 100644
> --- a/lib/librte_eventdev/rte_eventdev.c
> +++ b/lib/librte_eventdev/rte_eventdev.c
> @@ -190,6 +190,8 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
> return 0;
> }
>
> +#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
> +
> static inline int
> rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
> {
> @@ -251,6 +253,9 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
> "nb_ports %u", nb_ports);
> return -(ENOMEM);
> }
> + for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
> + dev->data->links_map[i] =
> + EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
> } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
> RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
>
> @@ -305,6 +310,10 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
>
> if (nb_ports > old_nb_ports) {
> uint8_t new_ps = nb_ports - old_nb_ports;
> + unsigned int old_links_map_end =
> + old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
> + unsigned int links_map_end =
> + nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
>
> memset(ports + old_nb_ports, 0,
> sizeof(ports[0]) * new_ps);
> @@ -312,9 +321,9 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
> sizeof(ports_dequeue_depth[0]) * new_ps);
> memset(ports_enqueue_depth + old_nb_ports, 0,
> sizeof(ports_enqueue_depth[0]) * new_ps);
> - memset(links_map +
> - (old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV),
> - 0, sizeof(ports_enqueue_depth[0]) * new_ps);
> + for (i = old_links_map_end; i < links_map_end; i++)
> + links_map[i] =
> + EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
> }
>
> dev->data->ports = ports;
> @@ -815,8 +824,6 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id,
> return diag;
> }
>
> -#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
> -
> int
> rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
> uint8_t queues[], uint16_t nb_unlinks)
> --
> 2.7.4
>
@@ -190,6 +190,8 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
return 0;
}
+#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
+
static inline int
rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
{
@@ -251,6 +253,9 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
"nb_ports %u", nb_ports);
return -(ENOMEM);
}
+ for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
+ dev->data->links_map[i] =
+ EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
} else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
@@ -305,6 +310,10 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
if (nb_ports > old_nb_ports) {
uint8_t new_ps = nb_ports - old_nb_ports;
+ unsigned int old_links_map_end =
+ old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
+ unsigned int links_map_end =
+ nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
memset(ports + old_nb_ports, 0,
sizeof(ports[0]) * new_ps);
@@ -312,9 +321,9 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
sizeof(ports_dequeue_depth[0]) * new_ps);
memset(ports_enqueue_depth + old_nb_ports, 0,
sizeof(ports_enqueue_depth[0]) * new_ps);
- memset(links_map +
- (old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV),
- 0, sizeof(ports_enqueue_depth[0]) * new_ps);
+ for (i = old_links_map_end; i < links_map_end; i++)
+ links_map[i] =
+ EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
}
dev->data->ports = ports;
@@ -815,8 +824,6 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id,
return diag;
}
-#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
-
int
rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
uint8_t queues[], uint16_t nb_unlinks)