[5/6] event/cnxk: support to set runtime queue attributes

Message ID cf558ad65e4e2c848f05dce9c6a762819391032f.1648549553.git.sthotton@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers
Series Extend and set event queue attributes at runtime |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Shijith Thotton March 29, 2022, 1:11 p.m. UTC
  Added API to set queue attributes at runtime and API to get weight and
affinity.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
---
 doc/guides/eventdevs/features/cnxk.ini |  1 +
 drivers/event/cnxk/cn10k_eventdev.c    |  4 ++
 drivers/event/cnxk/cn9k_eventdev.c     |  4 ++
 drivers/event/cnxk/cnxk_eventdev.c     | 81 ++++++++++++++++++++++++--
 drivers/event/cnxk/cnxk_eventdev.h     | 16 +++++
 5 files changed, 100 insertions(+), 6 deletions(-)
  

Comments

Van Haaren, Harry March 30, 2022, 11:05 a.m. UTC | #1
> -----Original Message-----
> From: Shijith Thotton <sthotton@marvell.com>
> Sent: Tuesday, March 29, 2022 2:11 PM
> To: dev@dpdk.org; jerinj@marvell.com
> Cc: Shijith Thotton <sthotton@marvell.com>; pbhagavatula@marvell.com
> Subject: [PATCH 5/6] event/cnxk: support to set runtime queue attributes

<snip>

> +int
> +cnxk_sso_queue_attribute_get(struct rte_eventdev *event_dev, uint8_t
> queue_id,
> +			     uint32_t attr_id, uint32_t *attr_value)
> +{
> +	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> +
> +	*attr_value = attr_id == RTE_EVENT_QUEUE_ATTR_WEIGHT ?
> +			      dev->mlt_prio[queue_id].weight :
> +			      dev->mlt_prio[queue_id].affinity;

This is future-bug prone, as adding a new Eventdev attr will return .affinity silently,
instead of the attr that is being requested.

Prefer a switch(attr_id), and explicitly handle each attr_id, with a default case
to return -1, showing the PMD refusing to handle the attr requested to the caller.

On reviewing the below, the set() below does this perfectly... except the return?

> +
> +	return 0;
> +}
> +
> +int
> +cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev, uint8_t
> queue_id,
> +			     uint32_t attr_id, uint32_t attr_value)
> +{
> +	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> +	uint8_t priority, weight, affinity;
> +	struct rte_event_queue_conf *conf;
> +
> +	conf = &event_dev->data->queues_cfg[queue_id];
> +
> +	switch (attr_id) {
> +	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
> +		conf->priority = attr_value;
> +		break;
> +	case RTE_EVENT_QUEUE_ATTR_WEIGHT:
> +		dev->mlt_prio[queue_id].weight = attr_value;
> +		break;
> +	case RTE_EVENT_QUEUE_ATTR_AFFINITY:
> +		dev->mlt_prio[queue_id].affinity = attr_value;
> +		break;
> +	default:
> +		plt_sso_dbg("Ignored setting attribute id %u", attr_id);
> +		return 0;
> +	}

Why return 0 here? This is a failure, the PMD did *not* set the attribute ID.
Make the user aware of that fact, return -1; or -EINVAL or something.

Document the explicit return values at Eventdev header level, so all PMDs can
align on the return values, providing consistency to the application.

<snip>
  
Shijith Thotton April 4, 2022, 7:59 a.m. UTC | #2
><snip>
>
>> +int
>> +cnxk_sso_queue_attribute_get(struct rte_eventdev *event_dev, uint8_t
>> queue_id,
>> +			     uint32_t attr_id, uint32_t *attr_value)
>> +{
>> +	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
>> +
>> +	*attr_value = attr_id == RTE_EVENT_QUEUE_ATTR_WEIGHT ?
>> +			      dev->mlt_prio[queue_id].weight :
>> +			      dev->mlt_prio[queue_id].affinity;
>
>This is future-bug prone, as adding a new Eventdev attr will return .affinity silently,
>instead of the attr that is being requested.
>
>Prefer a switch(attr_id), and explicitly handle each attr_id, with a default case
>to return -1, showing the PMD refusing to handle the attr requested to the caller.
>
 
Will change it similar to set().

>On reviewing the below, the set() below does this perfectly... except the return?
>
>> +
>> +	return 0;
>> +}
>> +
>> +int
>> +cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev, uint8_t
>> queue_id,
>> +			     uint32_t attr_id, uint32_t attr_value)
>> +{
>> +	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
>> +	uint8_t priority, weight, affinity;
>> +	struct rte_event_queue_conf *conf;
>> +
>> +	conf = &event_dev->data->queues_cfg[queue_id];
>> +
>> +	switch (attr_id) {
>> +	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
>> +		conf->priority = attr_value;
>> +		break;
>> +	case RTE_EVENT_QUEUE_ATTR_WEIGHT:
>> +		dev->mlt_prio[queue_id].weight = attr_value;
>> +		break;
>> +	case RTE_EVENT_QUEUE_ATTR_AFFINITY:
>> +		dev->mlt_prio[queue_id].affinity = attr_value;
>> +		break;
>> +	default:
>> +		plt_sso_dbg("Ignored setting attribute id %u", attr_id);
>> +		return 0;
>> +	}
>
>Why return 0 here? This is a failure, the PMD did *not* set the attribute ID.
>Make the user aware of that fact, return -1; or -EINVAL or something.
>
>Document the explicit return values at Eventdev header level, so all PMDs can
>align on the return values, providing consistency to the application.
>

Will update PMD and library with error number.

><snip>
  

Patch

diff --git a/doc/guides/eventdevs/features/cnxk.ini b/doc/guides/eventdevs/features/cnxk.ini
index 7633c6e3a2..bee69bf8f4 100644
--- a/doc/guides/eventdevs/features/cnxk.ini
+++ b/doc/guides/eventdevs/features/cnxk.ini
@@ -12,6 +12,7 @@  runtime_port_link          = Y
 multiple_queue_port        = Y
 carry_flow_id              = Y
 maintenance_free           = Y
+runtime_queue_attr         = y
 
 [Eth Rx adapter Features]
 internal_port              = Y
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 9b4d2895ec..f6973bb691 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -845,9 +845,13 @@  cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
+
 	.queue_def_conf = cnxk_sso_queue_def_conf,
 	.queue_setup = cnxk_sso_queue_setup,
 	.queue_release = cnxk_sso_queue_release,
+	.queue_attr_get = cnxk_sso_queue_attribute_get,
+	.queue_attr_set = cnxk_sso_queue_attribute_set,
+
 	.port_def_conf = cnxk_sso_port_def_conf,
 	.port_setup = cn10k_sso_port_setup,
 	.port_release = cn10k_sso_port_release,
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 4bba477dd1..7cb59bbbfa 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1079,9 +1079,13 @@  cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 static struct eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
+
 	.queue_def_conf = cnxk_sso_queue_def_conf,
 	.queue_setup = cnxk_sso_queue_setup,
 	.queue_release = cnxk_sso_queue_release,
+	.queue_attr_get = cnxk_sso_queue_attribute_get,
+	.queue_attr_set = cnxk_sso_queue_attribute_set,
+
 	.port_def_conf = cnxk_sso_port_def_conf,
 	.port_setup = cn9k_sso_port_setup,
 	.port_release = cn9k_sso_port_release,
diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c
index be021d86c9..73f1029779 100644
--- a/drivers/event/cnxk/cnxk_eventdev.c
+++ b/drivers/event/cnxk/cnxk_eventdev.c
@@ -120,7 +120,8 @@  cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
 				  RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
 				  RTE_EVENT_DEV_CAP_NONSEQ_MODE |
 				  RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
-				  RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
+				  RTE_EVENT_DEV_CAP_MAINTENANCE_FREE |
+				  RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR;
 }
 
 int
@@ -300,11 +301,27 @@  cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
 		     const struct rte_event_queue_conf *queue_conf)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-
-	plt_sso_dbg("Queue=%d prio=%d", queue_id, queue_conf->priority);
-	/* Normalize <0-255> to <0-7> */
-	return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, 0xFF, 0xFF,
-					  queue_conf->priority / 32);
+	uint8_t priority, weight, affinity;
+
+	/* Default weight and affinity */
+	dev->mlt_prio[queue_id].weight = RTE_EVENT_QUEUE_WEIGHT_HIGHEST;
+	dev->mlt_prio[queue_id].affinity = RTE_EVENT_QUEUE_AFFINITY_HIGHEST;
+
+	priority = CNXK_QOS_NORMALIZE(queue_conf->priority,
+				      RTE_EVENT_DEV_PRIORITY_LOWEST,
+				      CNXK_SSO_PRIORITY_CNT);
+	weight = CNXK_QOS_NORMALIZE(dev->mlt_prio[queue_id].weight,
+				    RTE_EVENT_QUEUE_WEIGHT_HIGHEST,
+				    CNXK_SSO_WEIGHT_CNT);
+	affinity = CNXK_QOS_NORMALIZE(dev->mlt_prio[queue_id].affinity,
+				      RTE_EVENT_QUEUE_AFFINITY_HIGHEST,
+				      CNXK_SSO_AFFINITY_CNT);
+
+	plt_sso_dbg("Queue=%u prio=%u weight=%u affinity=%u", queue_id,
+		    priority, weight, affinity);
+
+	return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, weight, affinity,
+					  priority);
 }
 
 void
@@ -314,6 +331,58 @@  cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
 	RTE_SET_USED(queue_id);
 }
 
+int
+cnxk_sso_queue_attribute_get(struct rte_eventdev *event_dev, uint8_t queue_id,
+			     uint32_t attr_id, uint32_t *attr_value)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+
+	*attr_value = attr_id == RTE_EVENT_QUEUE_ATTR_WEIGHT ?
+			      dev->mlt_prio[queue_id].weight :
+			      dev->mlt_prio[queue_id].affinity;
+
+	return 0;
+}
+
+int
+cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev, uint8_t queue_id,
+			     uint32_t attr_id, uint32_t attr_value)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	uint8_t priority, weight, affinity;
+	struct rte_event_queue_conf *conf;
+
+	conf = &event_dev->data->queues_cfg[queue_id];
+
+	switch (attr_id) {
+	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
+		conf->priority = attr_value;
+		break;
+	case RTE_EVENT_QUEUE_ATTR_WEIGHT:
+		dev->mlt_prio[queue_id].weight = attr_value;
+		break;
+	case RTE_EVENT_QUEUE_ATTR_AFFINITY:
+		dev->mlt_prio[queue_id].affinity = attr_value;
+		break;
+	default:
+		plt_sso_dbg("Ignored setting attribute id %u", attr_id);
+		return 0;
+	}
+
+	priority = CNXK_QOS_NORMALIZE(conf->priority,
+				      RTE_EVENT_DEV_PRIORITY_LOWEST,
+				      CNXK_SSO_PRIORITY_CNT);
+	weight = CNXK_QOS_NORMALIZE(dev->mlt_prio[queue_id].weight,
+				    RTE_EVENT_QUEUE_WEIGHT_HIGHEST,
+				    CNXK_SSO_WEIGHT_CNT);
+	affinity = CNXK_QOS_NORMALIZE(dev->mlt_prio[queue_id].affinity,
+				      RTE_EVENT_QUEUE_AFFINITY_HIGHEST,
+				      CNXK_SSO_AFFINITY_CNT);
+
+	return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, weight, affinity,
+					  priority);
+}
+
 void
 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
 		       struct rte_event_port_conf *port_conf)
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index 5564746e6d..8037cbbb3b 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -38,6 +38,9 @@ 
 #define CNXK_SSO_XAQ_CACHE_CNT (0x7)
 #define CNXK_SSO_XAQ_SLACK     (8)
 #define CNXK_SSO_WQE_SG_PTR    (9)
+#define CNXK_SSO_PRIORITY_CNT  (8)
+#define CNXK_SSO_WEIGHT_CNT    (64)
+#define CNXK_SSO_AFFINITY_CNT  (16)
 
 #define CNXK_TT_FROM_TAG(x)	    (((x) >> 32) & SSO_TT_EMPTY)
 #define CNXK_TT_FROM_EVENT(x)	    (((x) >> 38) & SSO_TT_EMPTY)
@@ -54,6 +57,7 @@ 
 #define CN10K_GW_MODE_PREF     1
 #define CN10K_GW_MODE_PREF_WFE 2
 
+#define CNXK_QOS_NORMALIZE(val, max, cnt) (val / ((max + cnt - 1) / cnt))
 #define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name)                               \
 	do {                                                                   \
 		if (strncmp(dev->driver->name, drv_name, strlen(drv_name)))    \
@@ -79,6 +83,11 @@  struct cnxk_sso_qos {
 	uint16_t iaq_prcnt;
 };
 
+struct cnxk_sso_mlt_prio {
+	uint8_t weight;
+	uint8_t affinity;
+};
+
 struct cnxk_sso_evdev {
 	struct roc_sso sso;
 	uint8_t max_event_queues;
@@ -108,6 +117,7 @@  struct cnxk_sso_evdev {
 	uint64_t *timer_adptr_sz;
 	uint16_t vec_pool_cnt;
 	uint64_t *vec_pools;
+	struct cnxk_sso_mlt_prio mlt_prio[RTE_EVENT_MAX_QUEUES_PER_DEV];
 	/* Dev args */
 	uint32_t xae_cnt;
 	uint8_t qos_queue_cnt;
@@ -234,6 +244,12 @@  void cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
 int cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
 			 const struct rte_event_queue_conf *queue_conf);
 void cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id);
+int cnxk_sso_queue_attribute_get(struct rte_eventdev *event_dev,
+				 uint8_t queue_id, uint32_t attr_id,
+				 uint32_t *attr_value);
+int cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev,
+				 uint8_t queue_id, uint32_t attr_id,
+				 uint32_t attr_value);
 void cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
 			    struct rte_event_port_conf *port_conf);
 int cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,