[v2,2/3] common/cnxk: add congestion management ROC APIs
Checks
Commit Message
From: Sunil Kumar Kori <skori@marvell.com>
Add congestion management RoC APIs.
Depends-on: patch-24902 ("ethdev: support congestion management")
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
- Rebase on top of the dpdk-next-net-mrvl/for-next-net
drivers/common/cnxk/roc_nix.h | 5 ++
drivers/common/cnxk/roc_nix_queue.c | 106 ++++++++++++++++++++++++++++
drivers/common/cnxk/version.map | 1 +
3 files changed, 112 insertions(+)
Comments
Please look into the changes.
Regards
Sunil Kumar Kori
> -----Original Message-----
> From: skori@marvell.com <skori@marvell.com>
> Sent: Thursday, September 29, 2022 3:25 PM
> To: Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>; Kiran Kumar
> Kokkilagadda <kirankumark@marvell.com>; Sunil Kumar Kori
> <skori@marvell.com>; Satha Koteswara Rao Kottidi
> <skoteshwar@marvell.com>; Ray Kinsella <mdr@ashroe.eu>
> Cc: dev@dpdk.org
> Subject: [PATCH v2 2/3] common/cnxk: add congestion management ROC
> APIs
>
> From: Sunil Kumar Kori <skori@marvell.com>
>
> Add congestion management RoC APIs.
>
> Depends-on: patch-24902 ("ethdev: support congestion management")
>
> Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
> ---
> v1..v2:
> - Rebase on top of the dpdk-next-net-mrvl/for-next-net
>
> drivers/common/cnxk/roc_nix.h | 5 ++
> drivers/common/cnxk/roc_nix_queue.c | 106
> ++++++++++++++++++++++++++++
> drivers/common/cnxk/version.map | 1 +
> 3 files changed, 112 insertions(+)
>
[snip]
> --
> 2.25.1
@@ -315,6 +315,10 @@ struct roc_nix_rq {
/* Average SPB aura level drop threshold for RED */
uint8_t spb_red_drop;
/* Average SPB aura level pass threshold for RED */
+ uint8_t xqe_red_pass;
+ /* Average xqe level drop threshold for RED */
+ uint8_t xqe_red_drop;
+ /* Average xqe level pass threshold for RED */
uint8_t spb_red_pass;
/* LPB aura drop enable */
bool lpb_drop_ena;
@@ -869,6 +873,7 @@ int __roc_api roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq,
bool ena);
int __roc_api roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq,
bool ena);
+int __roc_api roc_nix_rq_cman_config(struct roc_nix *roc_nix, struct roc_nix_rq *rq);
int __roc_api roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable);
int __roc_api roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid);
int __roc_api roc_nix_rq_fini(struct roc_nix_rq *rq);
@@ -235,6 +235,46 @@ nix_rq_aura_buf_type_update(struct roc_nix_rq *rq, bool set)
return 0;
}
+static int
+nix_rq_cn9k_cman_cfg(struct dev *dev, struct roc_nix_rq *rq)
+{
+ struct mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *aq;
+
+ aq = mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = rq->qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
+ aq->rq.lpb_pool_pass = rq->red_pass;
+ aq->rq.lpb_pool_drop = rq->red_drop;
+ aq->rq_mask.lpb_pool_pass = ~(aq->rq_mask.lpb_pool_pass);
+ aq->rq_mask.lpb_pool_drop = ~(aq->rq_mask.lpb_pool_drop);
+
+ }
+
+ if (rq->spb_red_pass && (rq->spb_red_pass >= rq->spb_red_drop)) {
+ aq->rq.spb_pool_pass = rq->spb_red_pass;
+ aq->rq.spb_pool_drop = rq->spb_red_drop;
+ aq->rq_mask.spb_pool_pass = ~(aq->rq_mask.spb_pool_pass);
+ aq->rq_mask.spb_pool_drop = ~(aq->rq_mask.spb_pool_drop);
+
+ }
+
+ if (rq->xqe_red_pass && (rq->xqe_red_pass >= rq->xqe_red_drop)) {
+ aq->rq.xqe_pass = rq->xqe_red_pass;
+ aq->rq.xqe_drop = rq->xqe_red_drop;
+ aq->rq_mask.xqe_drop = ~(aq->rq_mask.xqe_drop);
+ aq->rq_mask.xqe_pass = ~(aq->rq_mask.xqe_pass);
+ }
+
+ return mbox_process(mbox);
+}
+
int
nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
bool cfg, bool ena)
@@ -529,6 +569,46 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
return 0;
}
+static int
+nix_rq_cman_cfg(struct dev *dev, struct roc_nix_rq *rq)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+ struct mbox *mbox = dev->mbox;
+
+ aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+ if (!aq)
+ return -ENOSPC;
+
+ aq->qidx = rq->qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
+ aq->rq.lpb_pool_pass = rq->red_pass;
+ aq->rq.lpb_pool_drop = rq->red_drop;
+ aq->rq_mask.lpb_pool_pass = ~(aq->rq_mask.lpb_pool_pass);
+ aq->rq_mask.lpb_pool_drop = ~(aq->rq_mask.lpb_pool_drop);
+
+ }
+
+ if (rq->spb_red_pass && (rq->spb_red_pass >= rq->spb_red_drop)) {
+ aq->rq.spb_pool_pass = rq->spb_red_pass;
+ aq->rq.spb_pool_drop = rq->spb_red_drop;
+ aq->rq_mask.spb_pool_pass = ~(aq->rq_mask.spb_pool_pass);
+ aq->rq_mask.spb_pool_drop = ~(aq->rq_mask.spb_pool_drop);
+
+ }
+
+ if (rq->xqe_red_pass && (rq->xqe_red_pass >= rq->xqe_red_drop)) {
+ aq->rq.xqe_pass = rq->xqe_red_pass;
+ aq->rq.xqe_drop = rq->xqe_red_drop;
+ aq->rq_mask.xqe_drop = ~(aq->rq_mask.xqe_drop);
+ aq->rq_mask.xqe_pass = ~(aq->rq_mask.xqe_pass);
+ }
+
+ return mbox_process(mbox);
+}
+
int
roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
{
@@ -616,6 +696,32 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
return nix_tel_node_add_rq(rq);
}
+int
+roc_nix_rq_cman_config(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
+{
+ bool is_cn9k = roc_model_is_cn9k();
+ struct nix *nix;
+ struct dev *dev;
+ int rc;
+
+ if (roc_nix == NULL || rq == NULL)
+ return NIX_ERR_PARAM;
+
+ nix = roc_nix_to_nix_priv(roc_nix);
+
+ if (rq->qid >= nix->nb_rx_queues)
+ return NIX_ERR_QUEUE_INVALID_RANGE;
+
+ dev = &nix->dev;
+
+ if (is_cn9k)
+ rc = nix_rq_cn9k_cman_cfg(dev, rq);
+ else
+ rc = nix_rq_cman_cfg(dev, rq);
+
+ return rc;
+}
+
int
roc_nix_rq_fini(struct roc_nix_rq *rq)
{
@@ -228,6 +228,7 @@ INTERNAL {
roc_nix_reassembly_configure;
roc_nix_register_cq_irqs;
roc_nix_register_queue_irqs;
+ roc_nix_rq_cman_config;
roc_nix_rq_dump;
roc_nix_rq_ena_dis;
roc_nix_rq_fini;