[v4] mempool/octeontx2: add devargs to lock ctx in cache
Checks
Commit Message
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add device arguments to lock NPA aura and pool contexts in NDC cache.
The device args take hexadecimal bitmask where each bit represent the
corresponding aura/pool id.
Example:
-w 0002:02:00.0,npa_lock_mask=0xf // Lock first 4 aura/pool ctx
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
---
Depends on series http://patches.dpdk.org/project/dpdk/list/?series=5004
v4 Changes:
- Mark `otx2_parse_common_devargs` as __rte_internal.
v3 Changes:
- Split series into individual patches as targets are different.
v2 Changes:
- Fix formatting in doc(Andrzej).
- Add error returns for all failures(Andrzej).
- Fix devargs parameter list(Andrzej).
doc/guides/eventdevs/octeontx2.rst | 10 +++
doc/guides/mempool/octeontx2.rst | 10 +++
doc/guides/nics/octeontx2.rst | 12 +++
drivers/common/octeontx2/Makefile | 2 +-
drivers/common/octeontx2/meson.build | 2 +-
drivers/common/octeontx2/otx2_common.c | 34 +++++++++
drivers/common/octeontx2/otx2_common.h | 5 ++
.../rte_common_octeontx2_version.map | 13 ++++
drivers/event/octeontx2/otx2_evdev.c | 5 +-
drivers/mempool/octeontx2/otx2_mempool.c | 4 +-
drivers/mempool/octeontx2/otx2_mempool_ops.c | 74 +++++++++++++++++++
drivers/net/octeontx2/otx2_ethdev_devargs.c | 4 +-
12 files changed, 169 insertions(+), 6 deletions(-)
--
2.17.1
Comments
>Subject: [dpdk-dev] [PATCH v4] mempool/octeontx2: add devargs to
>lock ctx in cache
>
>From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
>Add device arguments to lock NPA aura and pool contexts in NDC cache.
>The device args take hexadecimal bitmask where each bit represent the
>corresponding aura/pool id.
>Example:
> -w 0002:02:00.0,npa_lock_mask=0xf // Lock first 4 aura/pool ctx
>
>Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
>Acked-by: Jerin Jacob <jerinj@marvell.com>
>---
>
>Depends on series
>http://patches.dpdk.org/project/dpdk/list/?series=5004
>
> v4 Changes:
> - Mark `otx2_parse_common_devargs` as __rte_internal.
Ping @thomas
> v3 Changes:
> - Split series into individual patches as targets are different.
> v2 Changes:
> - Fix formatting in doc(Andrzej).
> - Add error returns for all failures(Andrzej).
> - Fix devargs parameter list(Andrzej).
>
> doc/guides/eventdevs/octeontx2.rst | 10 +++
> doc/guides/mempool/octeontx2.rst | 10 +++
> doc/guides/nics/octeontx2.rst | 12 +++
> drivers/common/octeontx2/Makefile | 2 +-
> drivers/common/octeontx2/meson.build | 2 +-
> drivers/common/octeontx2/otx2_common.c | 34 +++++++++
> drivers/common/octeontx2/otx2_common.h | 5 ++
> .../rte_common_octeontx2_version.map | 13 ++++
> drivers/event/octeontx2/otx2_evdev.c | 5 +-
> drivers/mempool/octeontx2/otx2_mempool.c | 4 +-
> drivers/mempool/octeontx2/otx2_mempool_ops.c | 74
>+++++++++++++++++++
> drivers/net/octeontx2/otx2_ethdev_devargs.c | 4 +-
> 12 files changed, 169 insertions(+), 6 deletions(-)
>
>diff --git a/doc/guides/eventdevs/octeontx2.rst
>b/doc/guides/eventdevs/octeontx2.rst
>index d4b2515ce..6502f6415 100644
>--- a/doc/guides/eventdevs/octeontx2.rst
>+++ b/doc/guides/eventdevs/octeontx2.rst
>@@ -148,6 +148,16 @@ Runtime Config Options
>
> -w 0002:0e:00.0,tim_ring_ctl=[2-1023-1-0]
>
>+- ``Lock NPA contexts in NDC``
>+
>+ Lock NPA aura and pool contexts in NDC cache.
>+ The device args take hexadecimal bitmask where each bit represent
>the
>+ corresponding aura/pool id.
>+
>+ For example::
>+
>+ -w 0002:0e:00.0,npa_lock_mask=0xf
>+
> Debugging Options
> ~~~~~~~~~~~~~~~~~
>
>diff --git a/doc/guides/mempool/octeontx2.rst
>b/doc/guides/mempool/octeontx2.rst
>index 2c9a0953b..49b45a04e 100644
>--- a/doc/guides/mempool/octeontx2.rst
>+++ b/doc/guides/mempool/octeontx2.rst
>@@ -61,6 +61,16 @@ Runtime Config Options
> provide ``max_pools`` parameter to the first PCIe device probed by
>the given
> application.
>
>+- ``Lock NPA contexts in NDC``
>+
>+ Lock NPA aura and pool contexts in NDC cache.
>+ The device args take hexadecimal bitmask where each bit represent
>the
>+ corresponding aura/pool id.
>+
>+ For example::
>+
>+ -w 0002:02:00.0,npa_lock_mask=0xf
>+
> Debugging Options
> ~~~~~~~~~~~~~~~~~
>
>diff --git a/doc/guides/nics/octeontx2.rst
>b/doc/guides/nics/octeontx2.rst
>index 60187ec72..c2d87c9d0 100644
>--- a/doc/guides/nics/octeontx2.rst
>+++ b/doc/guides/nics/octeontx2.rst
>@@ -194,6 +194,7 @@ Runtime Config Options
> Setting this flag to 1 to select the legacy mode.
>
> For example to select the legacy mode(RSS tag adder as XOR)::
>+
> -w 0002:02:00.0,tag_as_xor=1
>
> - ``Max SPI for inbound inline IPsec`` (default ``1``)
>@@ -202,6 +203,7 @@ Runtime Config Options
> ``ipsec_in_max_spi`` ``devargs`` parameter.
>
> For example::
>+
> -w 0002:02:00.0,ipsec_in_max_spi=128
>
> With the above configuration, application can enable inline IPsec
>processing
>@@ -213,6 +215,16 @@ Runtime Config Options
> parameters to all the PCIe devices if application requires to configure
>on
> all the ethdev ports.
>
>+- ``Lock NPA contexts in NDC``
>+
>+ Lock NPA aura and pool contexts in NDC cache.
>+ The device args take hexadecimal bitmask where each bit represent
>the
>+ corresponding aura/pool id.
>+
>+ For example::
>+
>+ -w 0002:02:00.0,npa_lock_mask=0xf
>+
> Limitations
> -----------
>
>diff --git a/drivers/common/octeontx2/Makefile
>b/drivers/common/octeontx2/Makefile
>index efe3da2cc..260da8dd3 100644
>--- a/drivers/common/octeontx2/Makefile
>+++ b/drivers/common/octeontx2/Makefile
>@@ -34,6 +34,6 @@ SRCS-y += otx2_common.c
> SRCS-y += otx2_sec_idev.c
>
> LDLIBS += -lrte_eal
>-LDLIBS += -lrte_ethdev
>+LDLIBS += -lrte_ethdev -lrte_kvargs
>
> include $(RTE_SDK)/mk/rte.lib.mk
>diff --git a/drivers/common/octeontx2/meson.build
>b/drivers/common/octeontx2/meson.build
>index 996ddba14..f2c04342e 100644
>--- a/drivers/common/octeontx2/meson.build
>+++ b/drivers/common/octeontx2/meson.build
>@@ -21,6 +21,6 @@ foreach flag: extra_flags
> endif
> endforeach
>
>-deps = ['eal', 'pci', 'ethdev']
>+deps = ['eal', 'pci', 'ethdev', 'kvargs']
> includes += include_directories('../../common/octeontx2',
> '../../mempool/octeontx2', '../../bus/pci')
>diff --git a/drivers/common/octeontx2/otx2_common.c
>b/drivers/common/octeontx2/otx2_common.c
>index 1a257cf07..5e7272f69 100644
>--- a/drivers/common/octeontx2/otx2_common.c
>+++ b/drivers/common/octeontx2/otx2_common.c
>@@ -169,6 +169,40 @@ int otx2_npa_lf_obj_ref(void)
> return cnt ? 0 : -EINVAL;
> }
>
>+static int
>+parse_npa_lock_mask(const char *key, const char *value, void
>*extra_args)
>+{
>+ RTE_SET_USED(key);
>+ uint64_t val;
>+
>+ val = strtoull(value, NULL, 16);
>+
>+ *(uint64_t *)extra_args = val;
>+
>+ return 0;
>+}
>+
>+/*
>+ * @internal
>+ * Parse common device arguments
>+ */
>+void otx2_parse_common_devargs(struct rte_kvargs *kvlist)
>+{
>+
>+ struct otx2_idev_cfg *idev;
>+ uint64_t npa_lock_mask = 0;
>+
>+ idev = otx2_intra_dev_get_cfg();
>+
>+ if (idev == NULL)
>+ return;
>+
>+ rte_kvargs_process(kvlist, OTX2_NPA_LOCK_MASK,
>+ &parse_npa_lock_mask, &npa_lock_mask);
>+
>+ idev->npa_lock_mask = npa_lock_mask;
>+}
>+
> /**
> * @internal
> */
>diff --git a/drivers/common/octeontx2/otx2_common.h
>b/drivers/common/octeontx2/otx2_common.h
>index e62cdea07..f0e98fbbc 100644
>--- a/drivers/common/octeontx2/otx2_common.h
>+++ b/drivers/common/octeontx2/otx2_common.h
>@@ -8,6 +8,7 @@
> #include <rte_atomic.h>
> #include <rte_common.h>
> #include <rte_cycles.h>
>+#include <rte_kvargs.h>
> #include <rte_memory.h>
> #include <rte_memzone.h>
> #include <rte_io.h>
>@@ -49,6 +50,8 @@
> (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
> #endif
>
>+#define OTX2_NPA_LOCK_MASK "npa_lock_mask"
>+
> /* Intra device related functions */
> struct otx2_npa_lf;
> struct otx2_idev_cfg {
>@@ -60,6 +63,7 @@ struct otx2_idev_cfg {
> rte_atomic16_t npa_refcnt;
> uint16_t npa_refcnt_u16;
> };
>+ uint64_t npa_lock_mask;
> };
>
> struct otx2_idev_cfg *otx2_intra_dev_get_cfg(void);
>@@ -70,6 +74,7 @@ struct otx2_npa_lf *otx2_npa_lf_obj_get(void);
> void otx2_npa_set_defaults(struct otx2_idev_cfg *idev);
> int otx2_npa_lf_active(void *dev);
> int otx2_npa_lf_obj_ref(void);
>+void __rte_internal otx2_parse_common_devargs(struct rte_kvargs
>*kvlist);
>
> /* Log */
> extern int otx2_logtype_base;
>diff --git
>a/drivers/common/octeontx2/rte_common_octeontx2_version.map
>b/drivers/common/octeontx2/rte_common_octeontx2_version.map
>index 8f2404bd9..74e418c82 100644
>---
>a/drivers/common/octeontx2/rte_common_octeontx2_version.map
>+++
>b/drivers/common/octeontx2/rte_common_octeontx2_version.map
>@@ -45,8 +45,21 @@ DPDK_20.0.1 {
> otx2_sec_idev_tx_cpt_qp_put;
> } DPDK_20.0;
>
>+DPDK_20.0.2 {
>+ global:
>+
>+ otx2_parse_common_devargs;
>+
>+} DPDK_20.0;
>+
> EXPERIMENTAL {
> global:
>
> otx2_logtype_ep;
> };
>+
>+INTERNAL {
>+ global:
>+
>+ otx2_parse_common_devargs;
>+};
>diff --git a/drivers/event/octeontx2/otx2_evdev.c
>b/drivers/event/octeontx2/otx2_evdev.c
>index d20213d78..630073de5 100644
>--- a/drivers/event/octeontx2/otx2_evdev.c
>+++ b/drivers/event/octeontx2/otx2_evdev.c
>@@ -1659,7 +1659,7 @@ sso_parse_devargs(struct otx2_sso_evdev
>*dev, struct rte_devargs *devargs)
> &single_ws);
> rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS,
>&parse_sso_kvargs_dict,
> dev);
>-
>+ otx2_parse_common_devargs(kvlist);
> dev->dual_ws = !single_ws;
> rte_kvargs_free(kvlist);
> }
>@@ -1821,4 +1821,5 @@
>RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");
> RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2,
>OTX2_SSO_XAE_CNT "=<int>"
> OTX2_SSO_SINGLE_WS "=1"
> OTX2_SSO_GGRP_QOS "=<string>"
>- OTX2_SSO_SELFTEST "=1");
>+ OTX2_SSO_SELFTEST "=1"
>+ OTX2_NPA_LOCK_MASK "=<1-65535>");
>diff --git a/drivers/mempool/octeontx2/otx2_mempool.c
>b/drivers/mempool/octeontx2/otx2_mempool.c
>index 3a4a9425f..fb630fecf 100644
>--- a/drivers/mempool/octeontx2/otx2_mempool.c
>+++ b/drivers/mempool/octeontx2/otx2_mempool.c
>@@ -191,6 +191,7 @@ otx2_parse_aura_size(struct rte_devargs
>*devargs)
> goto exit;
>
> rte_kvargs_process(kvlist, OTX2_MAX_POOLS,
>&parse_max_pools, &aura_sz);
>+ otx2_parse_common_devargs(kvlist);
> rte_kvargs_free(kvlist);
> exit:
> return aura_sz;
>@@ -452,4 +453,5 @@ RTE_PMD_REGISTER_PCI(mempool_octeontx2,
>pci_npa);
> RTE_PMD_REGISTER_PCI_TABLE(mempool_octeontx2, pci_npa_map);
> RTE_PMD_REGISTER_KMOD_DEP(mempool_octeontx2, "vfio-pci");
> RTE_PMD_REGISTER_PARAM_STRING(mempool_octeontx2,
>- OTX2_MAX_POOLS "=<128-1048576>");
>+ OTX2_MAX_POOLS "=<128-1048576>"
>+ OTX2_NPA_LOCK_MASK "=<1-65535>");
>diff --git a/drivers/mempool/octeontx2/otx2_mempool_ops.c
>b/drivers/mempool/octeontx2/otx2_mempool_ops.c
>index 162b7f01d..ade9fa6d3 100644
>--- a/drivers/mempool/octeontx2/otx2_mempool_ops.c
>+++ b/drivers/mempool/octeontx2/otx2_mempool_ops.c
>@@ -348,8 +348,13 @@ npa_lf_aura_pool_init(struct otx2_mbox
>*mbox, uint32_t aura_id,
> struct npa_aq_enq_req *aura_init_req, *pool_init_req;
> struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
> struct otx2_mbox_dev *mdev = &mbox->dev[0];
>+ struct otx2_idev_cfg *idev;
> int rc, off;
>
>+ idev = otx2_intra_dev_get_cfg();
>+ if (idev == NULL)
>+ return -ENOMEM;
>+
> aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>
> aura_init_req->aura_id = aura_id;
>@@ -379,6 +384,44 @@ npa_lf_aura_pool_init(struct otx2_mbox
>*mbox, uint32_t aura_id,
> return 0;
> else
> return NPA_LF_ERR_AURA_POOL_INIT;
>+
>+ if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
>+ return 0;
>+
>+ aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>+ aura_init_req->aura_id = aura_id;
>+ aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
>+ aura_init_req->op = NPA_AQ_INSTOP_LOCK;
>+
>+ pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>+ if (!pool_init_req) {
>+ /* The shared memory buffer can be full.
>+ * Flush it and retry
>+ */
>+ otx2_mbox_msg_send(mbox, 0);
>+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
>+ if (rc < 0) {
>+ otx2_err("Failed to LOCK AURA context");
>+ return -ENOMEM;
>+ }
>+
>+ pool_init_req =
>otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>+ if (!pool_init_req) {
>+ otx2_err("Failed to LOCK POOL context");
>+ return -ENOMEM;
>+ }
>+ }
>+ pool_init_req->aura_id = aura_id;
>+ pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
>+ pool_init_req->op = NPA_AQ_INSTOP_LOCK;
>+
>+ rc = otx2_mbox_process(mbox);
>+ if (rc < 0) {
>+ otx2_err("Failed to lock POOL ctx to NDC");
>+ return -ENOMEM;
>+ }
>+
>+ return 0;
> }
>
> static int
>@@ -390,8 +433,13 @@ npa_lf_aura_pool_fini(struct otx2_mbox
>*mbox,
> struct npa_aq_enq_rsp *aura_rsp, *pool_rsp;
> struct otx2_mbox_dev *mdev = &mbox->dev[0];
> struct ndc_sync_op *ndc_req;
>+ struct otx2_idev_cfg *idev;
> int rc, off;
>
>+ idev = otx2_intra_dev_get_cfg();
>+ if (idev == NULL)
>+ return -EINVAL;
>+
> /* Procedure for disabling an aura/pool */
> rte_delay_us(10);
> npa_lf_aura_op_alloc(aura_handle, 0);
>@@ -434,6 +482,32 @@ npa_lf_aura_pool_fini(struct otx2_mbox
>*mbox,
> otx2_err("Error on NDC-NPA LF sync, rc %d", rc);
> return NPA_LF_ERR_AURA_POOL_FINI;
> }
>+
>+ if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
>+ return 0;
>+
>+ aura_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>+ aura_req->aura_id = aura_id;
>+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
>+ aura_req->op = NPA_AQ_INSTOP_UNLOCK;
>+
>+ rc = otx2_mbox_process(mbox);
>+ if (rc < 0) {
>+ otx2_err("Failed to unlock AURA ctx to NDC");
>+ return -EINVAL;
>+ }
>+
>+ pool_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
>+ pool_req->aura_id = aura_id;
>+ pool_req->ctype = NPA_AQ_CTYPE_POOL;
>+ pool_req->op = NPA_AQ_INSTOP_UNLOCK;
>+
>+ rc = otx2_mbox_process(mbox);
>+ if (rc < 0) {
>+ otx2_err("Failed to unlock POOL ctx to NDC");
>+ return -EINVAL;
>+ }
>+
> return 0;
> }
>
>diff --git a/drivers/net/octeontx2/otx2_ethdev_devargs.c
>b/drivers/net/octeontx2/otx2_ethdev_devargs.c
>index f29f01564..5390eb217 100644
>--- a/drivers/net/octeontx2/otx2_ethdev_devargs.c
>+++ b/drivers/net/octeontx2/otx2_ethdev_devargs.c
>@@ -161,6 +161,7 @@ otx2_ethdev_parse_devargs(struct rte_devargs
>*devargs, struct otx2_eth_dev *dev)
> &parse_switch_header_type,
>&switch_header_type);
> rte_kvargs_process(kvlist, OTX2_RSS_TAG_AS_XOR,
> &parse_flag, &rss_tag_as_xor);
>+ otx2_parse_common_devargs(kvlist);
> rte_kvargs_free(kvlist);
>
> null_devargs:
>@@ -186,4 +187,5 @@
>RTE_PMD_REGISTER_PARAM_STRING(net_octeontx2,
> OTX2_FLOW_PREALLOC_SIZE "=<1-32>"
> OTX2_FLOW_MAX_PRIORITY "=<1-32>"
> OTX2_SWITCH_HEADER_TYPE
>"=<higig2|dsa>"
>- OTX2_RSS_TAG_AS_XOR "=1");
>+ OTX2_RSS_TAG_AS_XOR "=1"
>+ OTX2_NPA_LOCK_MASK "=<1-65535>");
>--
>2.17.1
01/05/2020 12:21, Pavan Nikhilesh Bhagavatula:
> >Subject: [dpdk-dev] [PATCH v4] mempool/octeontx2: add devargs to
> >lock ctx in cache
> >
> >From: Pavan Nikhilesh <pbhagavatula@marvell.com>
> >
> >Add device arguments to lock NPA aura and pool contexts in NDC cache.
> >The device args take hexadecimal bitmask where each bit represent the
> >corresponding aura/pool id.
> >Example:
> > -w 0002:02:00.0,npa_lock_mask=0xf // Lock first 4 aura/pool ctx
> >
> >Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> >Acked-by: Jerin Jacob <jerinj@marvell.com>
> >---
> >
> >Depends on series
> >http://patches.dpdk.org/project/dpdk/list/?series=5004
> >
> > v4 Changes:
> > - Mark `otx2_parse_common_devargs` as __rte_internal.
>
> Ping @thomas
Now that __rte_internal marking was merged,
this patch is candidate for -rc2, but...
> >a/drivers/common/octeontx2/rte_common_octeontx2_version.map
> >b/drivers/common/octeontx2/rte_common_octeontx2_version.map
> >@@ -45,8 +45,21 @@ DPDK_20.0.1 {
> > otx2_sec_idev_tx_cpt_qp_put;
> > } DPDK_20.0;
> >
> >+DPDK_20.0.2 {
> >+ global:
> >+
> >+ otx2_parse_common_devargs;
> >+
> >+} DPDK_20.0;
Why are you adding the symbol both in 20.0.2 and INTERNAL below?
Also, that's a pity you did not take time to convert all the symbols
of this internal library to __rte_internal.
> >+
> > EXPERIMENTAL {
> > global:
> >
> > otx2_logtype_ep;
> > };
> >+
> >+INTERNAL {
> >+ global:
> >+
> >+ otx2_parse_common_devargs;
> >+};
>01/05/2020 12:21, Pavan Nikhilesh Bhagavatula:
>> >Subject: [dpdk-dev] [PATCH v4] mempool/octeontx2: add devargs
>to
>> >lock ctx in cache
>> >
>> >From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>> >
>> >Add device arguments to lock NPA aura and pool contexts in NDC
>cache.
>> >The device args take hexadecimal bitmask where each bit represent
>the
>> >corresponding aura/pool id.
>> >Example:
>> > -w 0002:02:00.0,npa_lock_mask=0xf // Lock first 4 aura/pool ctx
>> >
>> >Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
>> >Acked-by: Jerin Jacob <jerinj@marvell.com>
>> >---
>> >
>> >Depends on series
>> >https://urldefense.proofpoint.com/v2/url?u=http-
>3A__patches.dpdk.org_project_dpdk_list_-3Fseries-
>3D5004&d=DwICAg&c=nKjWec2b6R0mOyPaz7xtfQ&r=E3SgYMjtKCMVs
>B-fmvgGV3o-
>g_fjLhk5Pupi9ijohpc&m=WIve81BfP51j5YxMwFobYJ6Fa5_lzESSAdznqyR
>I8WQ&s=UQvvNHvQvpzcZJfEl3jp_pvsp7wM6RtKQrBch6EHVjg&e=
>> >
>> > v4 Changes:
>> > - Mark `otx2_parse_common_devargs` as __rte_internal.
>>
>> Ping @thomas
>
>Now that __rte_internal marking was merged,
>this patch is candidate for -rc2, but...
>
>
>>
>>a/drivers/common/octeontx2/rte_common_octeontx2_version.map
>>
>>b/drivers/common/octeontx2/rte_common_octeontx2_version.map
>> >@@ -45,8 +45,21 @@ DPDK_20.0.1 {
>> > otx2_sec_idev_tx_cpt_qp_put;
>> > } DPDK_20.0;
>> >
>> >+DPDK_20.0.2 {
>> >+ global:
>> >+
>> >+ otx2_parse_common_devargs;
>> >+
>> >+} DPDK_20.0;
>
>Why are you adding the symbol both in 20.0.2 and INTERNAL below?
>
>
>Also, that's a pity you did not take time to convert all the symbols
>of this internal library to __rte_internal.
>
My bad will send v5.
>
>> >+
>> > EXPERIMENTAL {
>> > global:
>> >
>> > otx2_logtype_ep;
>> > };
>> >+
>> >+INTERNAL {
>> >+ global:
>> >+
>> >+ otx2_parse_common_devargs;
>> >+};
>
>
@@ -148,6 +148,16 @@ Runtime Config Options
-w 0002:0e:00.0,tim_ring_ctl=[2-1023-1-0]
+- ``Lock NPA contexts in NDC``
+
+ Lock NPA aura and pool contexts in NDC cache.
+ The device args take hexadecimal bitmask where each bit represent the
+ corresponding aura/pool id.
+
+ For example::
+
+ -w 0002:0e:00.0,npa_lock_mask=0xf
+
Debugging Options
~~~~~~~~~~~~~~~~~
@@ -61,6 +61,16 @@ Runtime Config Options
provide ``max_pools`` parameter to the first PCIe device probed by the given
application.
+- ``Lock NPA contexts in NDC``
+
+ Lock NPA aura and pool contexts in NDC cache.
+ The device args take hexadecimal bitmask where each bit represent the
+ corresponding aura/pool id.
+
+ For example::
+
+ -w 0002:02:00.0,npa_lock_mask=0xf
+
Debugging Options
~~~~~~~~~~~~~~~~~
@@ -194,6 +194,7 @@ Runtime Config Options
Setting this flag to 1 to select the legacy mode.
For example to select the legacy mode(RSS tag adder as XOR)::
+
-w 0002:02:00.0,tag_as_xor=1
- ``Max SPI for inbound inline IPsec`` (default ``1``)
@@ -202,6 +203,7 @@ Runtime Config Options
``ipsec_in_max_spi`` ``devargs`` parameter.
For example::
+
-w 0002:02:00.0,ipsec_in_max_spi=128
With the above configuration, application can enable inline IPsec processing
@@ -213,6 +215,16 @@ Runtime Config Options
parameters to all the PCIe devices if application requires to configure on
all the ethdev ports.
+- ``Lock NPA contexts in NDC``
+
+ Lock NPA aura and pool contexts in NDC cache.
+ The device args take hexadecimal bitmask where each bit represent the
+ corresponding aura/pool id.
+
+ For example::
+
+ -w 0002:02:00.0,npa_lock_mask=0xf
+
Limitations
-----------
@@ -34,6 +34,6 @@ SRCS-y += otx2_common.c
SRCS-y += otx2_sec_idev.c
LDLIBS += -lrte_eal
-LDLIBS += -lrte_ethdev
+LDLIBS += -lrte_ethdev -lrte_kvargs
include $(RTE_SDK)/mk/rte.lib.mk
@@ -21,6 +21,6 @@ foreach flag: extra_flags
endif
endforeach
-deps = ['eal', 'pci', 'ethdev']
+deps = ['eal', 'pci', 'ethdev', 'kvargs']
includes += include_directories('../../common/octeontx2',
'../../mempool/octeontx2', '../../bus/pci')
@@ -169,6 +169,40 @@ int otx2_npa_lf_obj_ref(void)
return cnt ? 0 : -EINVAL;
}
+static int
+parse_npa_lock_mask(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint64_t val;
+
+ val = strtoull(value, NULL, 16);
+
+ *(uint64_t *)extra_args = val;
+
+ return 0;
+}
+
+/*
+ * @internal
+ * Parse common device arguments
+ */
+void otx2_parse_common_devargs(struct rte_kvargs *kvlist)
+{
+
+ struct otx2_idev_cfg *idev;
+ uint64_t npa_lock_mask = 0;
+
+ idev = otx2_intra_dev_get_cfg();
+
+ if (idev == NULL)
+ return;
+
+ rte_kvargs_process(kvlist, OTX2_NPA_LOCK_MASK,
+ &parse_npa_lock_mask, &npa_lock_mask);
+
+ idev->npa_lock_mask = npa_lock_mask;
+}
+
/**
* @internal
*/
@@ -8,6 +8,7 @@
#include <rte_atomic.h>
#include <rte_common.h>
#include <rte_cycles.h>
+#include <rte_kvargs.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_io.h>
@@ -49,6 +50,8 @@
(~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
#endif
+#define OTX2_NPA_LOCK_MASK "npa_lock_mask"
+
/* Intra device related functions */
struct otx2_npa_lf;
struct otx2_idev_cfg {
@@ -60,6 +63,7 @@ struct otx2_idev_cfg {
rte_atomic16_t npa_refcnt;
uint16_t npa_refcnt_u16;
};
+ uint64_t npa_lock_mask;
};
struct otx2_idev_cfg *otx2_intra_dev_get_cfg(void);
@@ -70,6 +74,7 @@ struct otx2_npa_lf *otx2_npa_lf_obj_get(void);
void otx2_npa_set_defaults(struct otx2_idev_cfg *idev);
int otx2_npa_lf_active(void *dev);
int otx2_npa_lf_obj_ref(void);
+void __rte_internal otx2_parse_common_devargs(struct rte_kvargs *kvlist);
/* Log */
extern int otx2_logtype_base;
@@ -45,8 +45,21 @@ DPDK_20.0.1 {
otx2_sec_idev_tx_cpt_qp_put;
} DPDK_20.0;
+DPDK_20.0.2 {
+ global:
+
+ otx2_parse_common_devargs;
+
+} DPDK_20.0;
+
EXPERIMENTAL {
global:
otx2_logtype_ep;
};
+
+INTERNAL {
+ global:
+
+ otx2_parse_common_devargs;
+};
@@ -1659,7 +1659,7 @@ sso_parse_devargs(struct otx2_sso_evdev *dev, struct rte_devargs *devargs)
&single_ws);
rte_kvargs_process(kvlist, OTX2_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
dev);
-
+ otx2_parse_common_devargs(kvlist);
dev->dual_ws = !single_ws;
rte_kvargs_free(kvlist);
}
@@ -1821,4 +1821,5 @@ RTE_PMD_REGISTER_KMOD_DEP(event_octeontx2, "vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(event_octeontx2, OTX2_SSO_XAE_CNT "=<int>"
OTX2_SSO_SINGLE_WS "=1"
OTX2_SSO_GGRP_QOS "=<string>"
- OTX2_SSO_SELFTEST "=1");
+ OTX2_SSO_SELFTEST "=1"
+ OTX2_NPA_LOCK_MASK "=<1-65535>");
@@ -191,6 +191,7 @@ otx2_parse_aura_size(struct rte_devargs *devargs)
goto exit;
rte_kvargs_process(kvlist, OTX2_MAX_POOLS, &parse_max_pools, &aura_sz);
+ otx2_parse_common_devargs(kvlist);
rte_kvargs_free(kvlist);
exit:
return aura_sz;
@@ -452,4 +453,5 @@ RTE_PMD_REGISTER_PCI(mempool_octeontx2, pci_npa);
RTE_PMD_REGISTER_PCI_TABLE(mempool_octeontx2, pci_npa_map);
RTE_PMD_REGISTER_KMOD_DEP(mempool_octeontx2, "vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(mempool_octeontx2,
- OTX2_MAX_POOLS "=<128-1048576>");
+ OTX2_MAX_POOLS "=<128-1048576>"
+ OTX2_NPA_LOCK_MASK "=<1-65535>");
@@ -348,8 +348,13 @@ npa_lf_aura_pool_init(struct otx2_mbox *mbox, uint32_t aura_id,
struct npa_aq_enq_req *aura_init_req, *pool_init_req;
struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct otx2_idev_cfg *idev;
int rc, off;
+ idev = otx2_intra_dev_get_cfg();
+ if (idev == NULL)
+ return -ENOMEM;
+
aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
aura_init_req->aura_id = aura_id;
@@ -379,6 +384,44 @@ npa_lf_aura_pool_init(struct otx2_mbox *mbox, uint32_t aura_id,
return 0;
else
return NPA_LF_ERR_AURA_POOL_INIT;
+
+ if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
+ return 0;
+
+ aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ aura_init_req->aura_id = aura_id;
+ aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_init_req->op = NPA_AQ_INSTOP_LOCK;
+
+ pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ if (!pool_init_req) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0) {
+ otx2_err("Failed to LOCK AURA context");
+ return -ENOMEM;
+ }
+
+ pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ if (!pool_init_req) {
+ otx2_err("Failed to LOCK POOL context");
+ return -ENOMEM;
+ }
+ }
+ pool_init_req->aura_id = aura_id;
+ pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
+ pool_init_req->op = NPA_AQ_INSTOP_LOCK;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to lock POOL ctx to NDC");
+ return -ENOMEM;
+ }
+
+ return 0;
}
static int
@@ -390,8 +433,13 @@ npa_lf_aura_pool_fini(struct otx2_mbox *mbox,
struct npa_aq_enq_rsp *aura_rsp, *pool_rsp;
struct otx2_mbox_dev *mdev = &mbox->dev[0];
struct ndc_sync_op *ndc_req;
+ struct otx2_idev_cfg *idev;
int rc, off;
+ idev = otx2_intra_dev_get_cfg();
+ if (idev == NULL)
+ return -EINVAL;
+
/* Procedure for disabling an aura/pool */
rte_delay_us(10);
npa_lf_aura_op_alloc(aura_handle, 0);
@@ -434,6 +482,32 @@ npa_lf_aura_pool_fini(struct otx2_mbox *mbox,
otx2_err("Error on NDC-NPA LF sync, rc %d", rc);
return NPA_LF_ERR_AURA_POOL_FINI;
}
+
+ if (!(idev->npa_lock_mask & BIT_ULL(aura_id)))
+ return 0;
+
+ aura_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ aura_req->aura_id = aura_id;
+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_req->op = NPA_AQ_INSTOP_UNLOCK;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to unlock AURA ctx to NDC");
+ return -EINVAL;
+ }
+
+ pool_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+ pool_req->aura_id = aura_id;
+ pool_req->ctype = NPA_AQ_CTYPE_POOL;
+ pool_req->op = NPA_AQ_INSTOP_UNLOCK;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to unlock POOL ctx to NDC");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -161,6 +161,7 @@ otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
&parse_switch_header_type, &switch_header_type);
rte_kvargs_process(kvlist, OTX2_RSS_TAG_AS_XOR,
&parse_flag, &rss_tag_as_xor);
+ otx2_parse_common_devargs(kvlist);
rte_kvargs_free(kvlist);
null_devargs:
@@ -186,4 +187,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_octeontx2,
OTX2_FLOW_PREALLOC_SIZE "=<1-32>"
OTX2_FLOW_MAX_PRIORITY "=<1-32>"
OTX2_SWITCH_HEADER_TYPE "=<higig2|dsa>"
- OTX2_RSS_TAG_AS_XOR "=1");
+ OTX2_RSS_TAG_AS_XOR "=1"
+ OTX2_NPA_LOCK_MASK "=<1-65535>");