[1/2] mempool/octeontx2: add devargs to lock ctx in cache
Checks
Commit Message
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add device arguments to lock NPA aura and pool contexts in NDC cache.
The device args take hexadecimal bitmask where each bit represent the
corresponding aura/pool id.
Example:
-w 0002:02:00.0,npa_lock_mask=0xf // Lock first 4 aura/pool ctx
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
doc/guides/eventdevs/octeontx2.rst | 9 +++
doc/guides/mempool/octeontx2.rst | 9 +++
doc/guides/nics/octeontx2.rst | 9 +++
drivers/common/octeontx2/Makefile | 2 +-
drivers/common/octeontx2/meson.build | 2 +-
drivers/common/octeontx2/otx2_common.c | 35 ++++++++++
drivers/common/octeontx2/otx2_common.h | 3 +
.../rte_common_octeontx2_version.map | 7 ++
drivers/event/octeontx2/otx2_evdev.c | 2 +-
drivers/mempool/octeontx2/otx2_mempool.c | 1 +
drivers/mempool/octeontx2/otx2_mempool_ops.c | 68 +++++++++++++++++++
drivers/net/octeontx2/otx2_ethdev_devargs.c | 1 +
12 files changed, 145 insertions(+), 3 deletions(-)
Comments
On 3/6/20 5:35 PM, pbhagavatula@marvell.com wrote:
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Add device arguments to lock NPA aura and pool contexts in NDC cache.
> The device args take hexadecimal bitmask where each bit represent the
> corresponding aura/pool id.
> Example:
> -w 0002:02:00.0,npa_lock_mask=0xf // Lock first 4 aura/pool ctx
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
[...]
> +- ``Lock NPA contexts in NDC``
> +
> + Lock NPA aura and pool contexts in NDC cache.
> + The device args take hexadecimal bitmask where each bit represent the
> + corresponding aura/pool id.
> +
> + For example::
> + -w 0002:0e:00.0,npa_lock_mask=0xf
I think you need to make a paragraph break (empty line) after "::" in
order to have this example treated as "literal block" (same as max_pool
above - not visible in diff). At least it looks so when I build doc
with "ninja doc" and check the result in browser.
> diff --git a/doc/guides/mempool/octeontx2.rst b/doc/guides/mempool/octeontx2.rst
> index 2c9a0953b..c594934d8 100644
> --- a/doc/guides/mempool/octeontx2.rst
> +++ b/doc/guides/mempool/octeontx2.rst
> @@ -61,6 +61,15 @@ Runtime Config Options
> provide ``max_pools`` parameter to the first PCIe device probed by the given
> application.
>
> +- ``Lock NPA contexts in NDC``
> +
> + Lock NPA aura and pool contexts in NDC cache.
> + The device args take hexadecimal bitmask where each bit represent the
> + corresponding aura/pool id.
> +
> + For example::
> + -w 0002:02:00.0,npa_lock_mask=0xf
Ditto.
> diff --git a/doc/guides/nics/octeontx2.rst b/doc/guides/nics/octeontx2.rst
> index 60187ec72..819d09e11 100644
> --- a/doc/guides/nics/octeontx2.rst
> +++ b/doc/guides/nics/octeontx2.rst
> @@ -213,6 +213,15 @@ Runtime Config Options
> parameters to all the PCIe devices if application requires to configure on
> all the ethdev ports.
>
> +- ``Lock NPA contexts in NDC``
> +
> + Lock NPA aura and pool contexts in NDC cache.
> + The device args take hexadecimal bitmask where each bit represent the
> + corresponding aura/pool id.
> +
> + For example::
> + -w 0002:02:00.0,npa_lock_mask=0xf
Ditto - make that general comment (you might also want to fix other
places - not only those introduced).
[...]
> diff --git a/drivers/common/octeontx2/otx2_common.c b/drivers/common/octeontx2/otx2_common.c
> index 1a257cf07..684bb3a0f 100644
> --- a/drivers/common/octeontx2/otx2_common.c
> +++ b/drivers/common/octeontx2/otx2_common.c
> @@ -169,6 +169,41 @@ int otx2_npa_lf_obj_ref(void)
> return cnt ? 0 : -EINVAL;
> }
>
> +static int
> +parse_npa_lock_mask(const char *key, const char *value, void *extra_args)
> +{
> + RTE_SET_USED(key);
> + uint64_t val;
> +
> + val = strtoull(value, NULL, 16);
> +
> + *(uint64_t *)extra_args = val;
> +
> + return 0;
> +}
> +
> +#define OTX2_NPA_LOCK_MASK "npa_lock_mask"
> +/*
> + * @internal
> + * Parse common device arguments
> + */
> +void otx2_parse_common_devargs(struct rte_kvargs *kvlist)
> +{
> +
> + struct otx2_idev_cfg *idev;
> + uint64_t npa_lock_mask;
Missing initialization of 'npa_lock_mask' - when user does not supply
this devarg then no callback is called and you copy this to idev (below).
> +
> + idev = otx2_intra_dev_get_cfg();
> +
> + if (idev == NULL)
> + return;
> +
> + rte_kvargs_process(kvlist, OTX2_NPA_LOCK_MASK,
> + &parse_npa_lock_mask, &npa_lock_mask);
> +
> + idev->npa_lock_mask = npa_lock_mask;
> +}
[...]
> diff --git a/drivers/mempool/octeontx2/otx2_mempool_ops.c b/drivers/mempool/octeontx2/otx2_mempool_ops.c
> index ac2d61861..5075b027a 100644
> --- a/drivers/mempool/octeontx2/otx2_mempool_ops.c
> +++ b/drivers/mempool/octeontx2/otx2_mempool_ops.c
> @@ -348,6 +348,7 @@ npa_lf_aura_pool_init(struct otx2_mbox *mbox, uint32_t aura_id,
> struct npa_aq_enq_req *aura_init_req, *pool_init_req;
> struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
> struct otx2_mbox_dev *mdev = &mbox->dev[0];
> + struct otx2_idev_cfg *idev;
> int rc, off;
>
> aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
> @@ -379,6 +380,46 @@ npa_lf_aura_pool_init(struct otx2_mbox *mbox, uint32_t aura_id,
> return 0;
> else
> return NPA_LF_ERR_AURA_POOL_INIT;
> +
> + idev = otx2_intra_dev_get_cfg();
> + if (idev == NULL)
> + return 0;
Is this not an error?
> +
> + if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
> + return 0;
> +
> + aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
> + aura_init_req->aura_id = aura_id;
> + aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
> + aura_init_req->op = NPA_AQ_INSTOP_LOCK;
> +
> + pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
> + if (!pool_init_req) {
> + /* The shared memory buffer can be full.
> + * Flush it and retry
> + */
> + otx2_mbox_msg_send(mbox, 0);
> + rc = otx2_mbox_wait_for_rsp(mbox, 0);
> + if (rc < 0) {
> + otx2_err("Failed to LOCK AURA context");
> + return 0;
Same here and below - if these are not errors then maybe do not log them
as such. If they are errors then we should probably signal them via
return value ("return rc;").
> + }
> +
> + pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
> + if (!pool_init_req) {
> + otx2_err("Failed to LOCK POOL context");
> + return 0;
See above.
> + }
> + }
> + pool_init_req->aura_id = aura_id;
> + pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
> + pool_init_req->op = NPA_AQ_INSTOP_LOCK;
> +
> + rc = otx2_mbox_process(mbox);
> + if (rc < 0)
> + otx2_err("Failed to lock POOL ctx to NDC");
See above.
> +
> + return > }
>
> static int
> @@ -390,6 +431,7 @@ npa_lf_aura_pool_fini(struct otx2_mbox *mbox,
> struct npa_aq_enq_rsp *aura_rsp, *pool_rsp;
> struct otx2_mbox_dev *mdev = &mbox->dev[0];
> struct ndc_sync_op *ndc_req;
> + struct otx2_idev_cfg *idev;
> int rc, off;
>
> /* Procedure for disabling an aura/pool */
> @@ -434,6 +476,32 @@ npa_lf_aura_pool_fini(struct otx2_mbox *mbox,
> otx2_err("Error on NDC-NPA LF sync, rc %d", rc);
> return NPA_LF_ERR_AURA_POOL_FINI;
> }
> +
> + idev = otx2_intra_dev_get_cfg();
> + if (idev == NULL)
> + return 0;
> +
> + if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
> + return 0;
Same comments here and below as for *pool_init above.
> +
> + aura_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
> + aura_req->aura_id = aura_id;
> + aura_req->ctype = NPA_AQ_CTYPE_AURA;
> + aura_req->op = NPA_AQ_INSTOP_UNLOCK;
> +
> + rc = otx2_mbox_process(mbox);
> + if (rc < 0)
> + otx2_err("Failed to unlock AURA ctx to NDC");
> +
> + pool_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
> + pool_req->aura_id = aura_id;
> + pool_req->ctype = NPA_AQ_CTYPE_POOL;
> + pool_req->op = NPA_AQ_INSTOP_UNLOCK;
> +
> + rc = otx2_mbox_process(mbox);
> + if (rc < 0)
> + otx2_err("Failed to unlock POOL ctx to NDC");
> +
> return 0;
> }
With regards
Andrzej Ostruszka
>-----Original Message-----
>From: dev <dev-bounces@dpdk.org> On Behalf Of Andrzej Ostruszka
>Sent: Thursday, March 19, 2020 3:07 PM
>To: dev@dpdk.org
>Subject: Re: [dpdk-dev] [PATCH 1/2] mempool/octeontx2: add devargs
>to lock ctx in cache
>
>On 3/6/20 5:35 PM, pbhagavatula@marvell.com wrote:
>> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>>
>> Add device arguments to lock NPA aura and pool contexts in NDC
>cache.
>> The device args take hexadecimal bitmask where each bit represent
>the
>> corresponding aura/pool id.
>> Example:
>> -w 0002:02:00.0,npa_lock_mask=0xf // Lock first 4 aura/pool ctx
>>
>> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
>[...]
>> +- ``Lock NPA contexts in NDC``
>> +
>> + Lock NPA aura and pool contexts in NDC cache.
>> + The device args take hexadecimal bitmask where each bit
>represent the
>> + corresponding aura/pool id.
>> +
>> + For example::
>> + -w 0002:0e:00.0,npa_lock_mask=0xf
>
>I think you need to make a paragraph break (empty line) after "::" in
>order to have this example treated as "literal block" (same as max_pool
>above - not visible in diff). At least it looks so when I build doc
>with "ninja doc" and check the result in browser.
Will fix in v2.
>
>> diff --git a/doc/guides/mempool/octeontx2.rst
>b/doc/guides/mempool/octeontx2.rst
>> index 2c9a0953b..c594934d8 100644
>> --- a/doc/guides/mempool/octeontx2.rst
>> +++ b/doc/guides/mempool/octeontx2.rst
>> @@ -61,6 +61,15 @@ Runtime Config Options
>> provide ``max_pools`` parameter to the first PCIe device probed by
>the given
>> application.
>>
>> +- ``Lock NPA contexts in NDC``
>> +
>> + Lock NPA aura and pool contexts in NDC cache.
>> + The device args take hexadecimal bitmask where each bit
>represent the
>> + corresponding aura/pool id.
>> +
>> + For example::
>> + -w 0002:02:00.0,npa_lock_mask=0xf
>
>Ditto.
>
>> diff --git a/doc/guides/nics/octeontx2.rst
>b/doc/guides/nics/octeontx2.rst
>> index 60187ec72..819d09e11 100644
>> --- a/doc/guides/nics/octeontx2.rst
>> +++ b/doc/guides/nics/octeontx2.rst
>> @@ -213,6 +213,15 @@ Runtime Config Options
>> parameters to all the PCIe devices if application requires to
>configure on
>> all the ethdev ports.
>>
>> +- ``Lock NPA contexts in NDC``
>> +
>> + Lock NPA aura and pool contexts in NDC cache.
>> + The device args take hexadecimal bitmask where each bit
>represent the
>> + corresponding aura/pool id.
>> +
>> + For example::
>> + -w 0002:02:00.0,npa_lock_mask=0xf
>
>Ditto - make that general comment (you might also want to fix other
>places - not only those introduced).
>
>[...]
>> diff --git a/drivers/common/octeontx2/otx2_common.c
>b/drivers/common/octeontx2/otx2_common.c
>> index 1a257cf07..684bb3a0f 100644
>> --- a/drivers/common/octeontx2/otx2_common.c
>> +++ b/drivers/common/octeontx2/otx2_common.c
>> @@ -169,6 +169,41 @@ int otx2_npa_lf_obj_ref(void)
>> return cnt ? 0 : -EINVAL;
>> }
>>
>> +static int
>> +parse_npa_lock_mask(const char *key, const char *value, void
>*extra_args)
>> +{
>> + RTE_SET_USED(key);
>> + uint64_t val;
>> +
>> + val = strtoull(value, NULL, 16);
>> +
>> + *(uint64_t *)extra_args = val;
>> +
>> + return 0;
>> +}
>> +
>> +#define OTX2_NPA_LOCK_MASK "npa_lock_mask"
>> +/*
>> + * @internal
>> + * Parse common device arguments
>> + */
>> +void otx2_parse_common_devargs(struct rte_kvargs *kvlist)
>> +{
>> +
>> + struct otx2_idev_cfg *idev;
>> + uint64_t npa_lock_mask;
>
>Missing initialization of 'npa_lock_mask' - when user does not supply
>this devarg then no callback is called and you copy this to idev (below).
Will fix in v2.
>
>> +
>> + idev = otx2_intra_dev_get_cfg();
>> +
>> + if (idev == NULL)
>> + return;
>> +
>> + rte_kvargs_process(kvlist, OTX2_NPA_LOCK_MASK,
>> + &parse_npa_lock_mask, &npa_lock_mask);
>> +
>> + idev->npa_lock_mask = npa_lock_mask;
>> +}
>[...]
>> diff --git a/drivers/mempool/octeontx2/otx2_mempool_ops.c
>b/drivers/mempool/octeontx2/otx2_mempool_ops.c
>> index ac2d61861..5075b027a 100644
>> --- a/drivers/mempool/octeontx2/otx2_mempool_ops.c
>> +++ b/drivers/mempool/octeontx2/otx2_mempool_ops.c
>> @@ -348,6 +348,7 @@ npa_lf_aura_pool_init(struct otx2_mbox
>*mbox, uint32_t aura_id,
>> struct npa_aq_enq_req *aura_init_req, *pool_init_req;
>> struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
>> struct otx2_mbox_dev *mdev = &mbox->dev[0];
>> + struct otx2_idev_cfg *idev;
>> int rc, off;
>>
>> aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>> @@ -379,6 +380,46 @@ npa_lf_aura_pool_init(struct otx2_mbox
>*mbox, uint32_t aura_id,
>> return 0;
>> else
>> return NPA_LF_ERR_AURA_POOL_INIT;
>> +
>> + idev = otx2_intra_dev_get_cfg();
>> + if (idev == NULL)
>> + return 0;
>
>Is this not an error?
I think that condition would never be true as it is a part of device probe
and we would exit the application there.
I will move the condition above before sending the mbox message just to
be safe.
>
>> +
>> + if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
>> + return 0;
>> +
>> + aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>> + aura_init_req->aura_id = aura_id;
>> + aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
>> + aura_init_req->op = NPA_AQ_INSTOP_LOCK;
>> +
>> + pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>> + if (!pool_init_req) {
>> + /* The shared memory buffer can be full.
>> + * Flush it and retry
>> + */
>> + otx2_mbox_msg_send(mbox, 0);
>> + rc = otx2_mbox_wait_for_rsp(mbox, 0);
>> + if (rc < 0) {
>> + otx2_err("Failed to LOCK AURA context");
>> + return 0;
>
>Same here and below - if these are not errors then maybe do not log
>them
>as such. If they are errors then we should probably signal them via
>return value ("return rc;").
These are not catastrophic errors since locking is first come first serve and
pool can still function without locking.
I have logged them as errors for debuggability since the application has
requested through devargs.
>
>> + }
>> +
>> + pool_init_req =
>otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>> + if (!pool_init_req) {
>> + otx2_err("Failed to LOCK POOL context");
>> + return 0;
>
>See above.
>
>> + }
>> + }
>> + pool_init_req->aura_id = aura_id;
>> + pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
>> + pool_init_req->op = NPA_AQ_INSTOP_LOCK;
>> +
>> + rc = otx2_mbox_process(mbox);
>> + if (rc < 0)
>> + otx2_err("Failed to lock POOL ctx to NDC");
>
>See above.
>
>> +
>> + return > }
>>
>> static int
>> @@ -390,6 +431,7 @@ npa_lf_aura_pool_fini(struct otx2_mbox
>*mbox,
>> struct npa_aq_enq_rsp *aura_rsp, *pool_rsp;
>> struct otx2_mbox_dev *mdev = &mbox->dev[0];
>> struct ndc_sync_op *ndc_req;
>> + struct otx2_idev_cfg *idev;
>> int rc, off;
>>
>> /* Procedure for disabling an aura/pool */
>> @@ -434,6 +476,32 @@ npa_lf_aura_pool_fini(struct otx2_mbox
>*mbox,
>> otx2_err("Error on NDC-NPA LF sync, rc %d", rc);
>> return NPA_LF_ERR_AURA_POOL_FINI;
>> }
>> +
>> + idev = otx2_intra_dev_get_cfg();
>> + if (idev == NULL)
>> + return 0;
>> +
>> + if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
>> + return 0;
>
>Same comments here and below as for *pool_init above.
>
>> +
>> + aura_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>> + aura_req->aura_id = aura_id;
>> + aura_req->ctype = NPA_AQ_CTYPE_AURA;
>> + aura_req->op = NPA_AQ_INSTOP_UNLOCK;
>> +
>> + rc = otx2_mbox_process(mbox);
>> + if (rc < 0)
>> + otx2_err("Failed to unlock AURA ctx to NDC");
>> +
>> + pool_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>> + pool_req->aura_id = aura_id;
>> + pool_req->ctype = NPA_AQ_CTYPE_POOL;
>> + pool_req->op = NPA_AQ_INSTOP_UNLOCK;
>> +
>> + rc = otx2_mbox_process(mbox);
>> + if (rc < 0)
>> + otx2_err("Failed to unlock POOL ctx to NDC");
>> +
>> return 0;
>> }
>With regards
>Andrzej Ostruszka
@@ -148,6 +148,15 @@ Runtime Config Options
-w 0002:0e:00.0,tim_ring_ctl=[2-1023-1-0]
+- ``Lock NPA contexts in NDC``
+
+ Lock NPA aura and pool contexts in NDC cache.
+ The device args take hexadecimal bitmask where each bit represent the
+ corresponding aura/pool id.
+
+ For example::
+ -w 0002:0e:00.0,npa_lock_mask=0xf
+
Debugging Options
~~~~~~~~~~~~~~~~~
@@ -61,6 +61,15 @@ Runtime Config Options
provide ``max_pools`` parameter to the first PCIe device probed by the given
application.
+- ``Lock NPA contexts in NDC``
+
+ Lock NPA aura and pool contexts in NDC cache.
+ The device args take hexadecimal bitmask where each bit represent the
+ corresponding aura/pool id.
+
+ For example::
+ -w 0002:02:00.0,npa_lock_mask=0xf
+
Debugging Options
~~~~~~~~~~~~~~~~~
@@ -213,6 +213,15 @@ Runtime Config Options
parameters to all the PCIe devices if application requires to configure on
all the ethdev ports.
+- ``Lock NPA contexts in NDC``
+
+ Lock NPA aura and pool contexts in NDC cache.
+ The device args take hexadecimal bitmask where each bit represent the
+ corresponding aura/pool id.
+
+ For example::
+ -w 0002:02:00.0,npa_lock_mask=0xf
+
Limitations
-----------
@@ -35,6 +35,6 @@ SRCS-y += otx2_common.c
SRCS-y += otx2_sec_idev.c
LDLIBS += -lrte_eal
-LDLIBS += -lrte_ethdev
+LDLIBS += -lrte_ethdev -lrte_kvargs
include $(RTE_SDK)/mk/rte.lib.mk
@@ -23,6 +23,6 @@ foreach flag: extra_flags
endif
endforeach
-deps = ['eal', 'pci', 'ethdev']
+deps = ['eal', 'pci', 'ethdev', 'kvargs']
includes += include_directories('../../common/octeontx2',
'../../mempool/octeontx2', '../../bus/pci')
@@ -169,6 +169,41 @@ int otx2_npa_lf_obj_ref(void)
return cnt ? 0 : -EINVAL;
}
+static int
+parse_npa_lock_mask(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint64_t val;
+
+ val = strtoull(value, NULL, 16);
+
+ *(uint64_t *)extra_args = val;
+
+ return 0;
+}
+
+#define OTX2_NPA_LOCK_MASK "npa_lock_mask"
+/*
+ * @internal
+ * Parse common device arguments
+ */
+void otx2_parse_common_devargs(struct rte_kvargs *kvlist)
+{
+
+ struct otx2_idev_cfg *idev;
+ uint64_t npa_lock_mask;
+
+ idev = otx2_intra_dev_get_cfg();
+
+ if (idev == NULL)
+ return;
+
+ rte_kvargs_process(kvlist, OTX2_NPA_LOCK_MASK,
+ &parse_npa_lock_mask, &npa_lock_mask);
+
+ idev->npa_lock_mask = npa_lock_mask;
+}
+
/**
* @internal
*/
@@ -8,6 +8,7 @@
#include <rte_atomic.h>
#include <rte_common.h>
#include <rte_cycles.h>
+#include <rte_kvargs.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_io.h>
@@ -65,6 +66,7 @@ struct otx2_idev_cfg {
rte_atomic16_t npa_refcnt;
uint16_t npa_refcnt_u16;
};
+ uint64_t npa_lock_mask;
};
struct otx2_idev_cfg *otx2_intra_dev_get_cfg(void);
@@ -75,6 +77,7 @@ struct otx2_npa_lf *otx2_npa_lf_obj_get(void);
void otx2_npa_set_defaults(struct otx2_idev_cfg *idev);
int otx2_npa_lf_active(void *dev);
int otx2_npa_lf_obj_ref(void);
+void otx2_parse_common_devargs(struct rte_kvargs *kvlist);
/* Log */
extern int otx2_logtype_base;
@@ -45,6 +45,13 @@ DPDK_20.0.1 {
otx2_sec_idev_tx_cpt_qp_put;
} DPDK_20.0;
+DPDK_20.0.2 {
+ global:
+
+ otx2_parse_common_devargs;
+
+} DPDK_20.0;
+
EXPERIMENTAL {
global:
@@ -1659,7 +1659,7 @@ sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)
&single_ws);
rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
dev);
-
+ otx2_parse_common_devargs(kvlist);
dev->dual_ws = !single_ws;
rte_kvargs_free(kvlist);
}
@@ -191,6 +191,7 @@ otx2_parse_aura_size(struct rte_devargs *devargs)
goto exit;
rte_kvargs_process(kvlist, OTX2_MAX_POOLS, &parse_max_pools, &aura_sz);
+ otx2_parse_common_devargs(kvlist);
rte_kvargs_free(kvlist);
exit:
return aura_sz;
@@ -348,6 +348,7 @@ npa_lf_aura_pool_init(struct otx2_mbox *mbox, uint32_t aura_id,
struct npa_aq_enq_req *aura_init_req, *pool_init_req;
struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct otx2_idev_cfg *idev;
int rc, off;
aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
@@ -379,6 +380,46 @@ npa_lf_aura_pool_init(struct otx2_mbox *mbox, uint32_t aura_id,
return 0;
else
return NPA_LF_ERR_AURA_POOL_INIT;
+
+ idev = otx2_intra_dev_get_cfg();
+ if (idev == NULL)
+ return 0;
+
+ if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
+ return 0;
+
+ aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ aura_init_req->aura_id = aura_id;
+ aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_init_req->op = NPA_AQ_INSTOP_LOCK;
+
+ pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ if (!pool_init_req) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0) {
+ otx2_err("Failed to LOCK AURA context");
+ return 0;
+ }
+
+ pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ if (!pool_init_req) {
+ otx2_err("Failed to LOCK POOL context");
+ return 0;
+ }
+ }
+ pool_init_req->aura_id = aura_id;
+ pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
+ pool_init_req->op = NPA_AQ_INSTOP_LOCK;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0)
+ otx2_err("Failed to lock POOL ctx to NDC");
+
+ return 0;
}
static int
@@ -390,6 +431,7 @@ npa_lf_aura_pool_fini(struct otx2_mbox *mbox,
struct npa_aq_enq_rsp *aura_rsp, *pool_rsp;
struct otx2_mbox_dev *mdev = &mbox->dev[0];
struct ndc_sync_op *ndc_req;
+ struct otx2_idev_cfg *idev;
int rc, off;
/* Procedure for disabling an aura/pool */
@@ -434,6 +476,32 @@ npa_lf_aura_pool_fini(struct otx2_mbox *mbox,
otx2_err("Error on NDC-NPA LF sync, rc %d", rc);
return NPA_LF_ERR_AURA_POOL_FINI;
}
+
+ idev = otx2_intra_dev_get_cfg();
+ if (idev == NULL)
+ return 0;
+
+ if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
+ return 0;
+
+ aura_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ aura_req->aura_id = aura_id;
+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_req->op = NPA_AQ_INSTOP_UNLOCK;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0)
+ otx2_err("Failed to unlock AURA ctx to NDC");
+
+ pool_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ pool_req->aura_id = aura_id;
+ pool_req->ctype = NPA_AQ_CTYPE_POOL;
+ pool_req->op = NPA_AQ_INSTOP_UNLOCK;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0)
+ otx2_err("Failed to unlock POOL ctx to NDC");
+
return 0;
}
@@ -161,6 +161,7 @@ otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
&parse_switch_header_type, &switch_header_type);
rte_kvargs_process(kvlist, OTX2_RSS_TAG_AS_XOR,
&parse_flag, &rss_tag_as_xor);
+ otx2_parse_common_devargs(kvlist);
rte_kvargs_free(kvlist);
null_devargs: