From patchwork Tue Aug 9 18:48:53 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 114770 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4604AA04FD; Tue, 9 Aug 2022 20:50:20 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3BA2242BED; Tue, 9 Aug 2022 20:50:20 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id EA1FD42BE8 for ; Tue, 9 Aug 2022 20:50:18 +0200 (CEST) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 279D4IN4016373 for ; Tue, 9 Aug 2022 11:50:18 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-type; s=pfpt0220; bh=68QCYvTk7stmxIFZjfuzR3Fs6wjw7yF2PliMMX+H7us=; b=kd07ebVadl92INtHhrPd/Dk9AM6PeLTTWsLy1bsl7UrPby313jnBZQzcK1Lxmk1yQCu/ Aj//YE+pUtMP7Po8ZUIhxrgMchQsHjR1LFPPvHZv0XSOGG+yYkpxbwqkGJSMXm/kS5yF t3llo8oCL287Fl+KZPGyRJEEEb1oH1WfQjDOdeE8jXXQMwWRs3rdla+8WvHYPliW+BYW zOJHhoISG4exZd0XGI3c5N6Kf9oXjdj8qwp62AS1wh2KHSFa7R/iKwtxTjGniPwer/ab q1PG0SZfC2YQG+SRNlG4nFR/Shg/0n+FwSgGelCsnuzZu3XJbBZJYQBySdWDxt8sxBDW rg== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3huds2uktk-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Tue, 09 Aug 2022 11:50:18 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Tue, 9 Aug 2022 11:50:16 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Tue, 9 Aug 2022 11:50:16 -0700 Received: from hyd1588t430.marvell.com (unknown [10.29.52.204]) by maili.marvell.com (Postfix) with ESMTP id 4488D3F7122; Tue, 9 Aug 2022 11:50:06 -0700 (PDT) From: Nithin Dabilpuram To: Nithin Dabilpuram , Kiran Kumar K , Sunil Kumar Kori , Satha Rao CC: , Subject: [PATCH 09/23] common/cnxk: update attributes to pools used by NIX Date: Wed, 10 Aug 2022 00:18:53 +0530 Message-ID: <20220809184908.24030-9-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.8.4 In-Reply-To: <20220809184908.24030-1-ndabilpuram@marvell.com> References: <20220809184908.24030-1-ndabilpuram@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: 2JxENP7ZGXUZJQuH4r9suE4ntU38wl9r X-Proofpoint-GUID: 2JxENP7ZGXUZJQuH4r9suE4ntU38wl9r X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.883,Hydra:6.0.517,FMLib:17.11.122.1 definitions=2022-08-09_05,2022-08-09_02,2022-06-22_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Update attributes to pools used by NIX. Signed-off-by: Nithin Dabilpuram --- drivers/common/cnxk/roc_nix_queue.c | 112 +++++++++++++++++++++++++++++++++++- 1 file changed, 110 insertions(+), 2 deletions(-) diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c index 70b4516..98b9fb4 100644 --- a/drivers/common/cnxk/roc_nix_queue.c +++ b/drivers/common/cnxk/roc_nix_queue.c @@ -140,6 +140,96 @@ roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid) return sso_enable ? true : false; } +static int +nix_rq_aura_buf_type_update(struct roc_nix_rq *rq, bool set) +{ + struct roc_nix *roc_nix = rq->roc_nix; + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + bool inl_inb_ena = roc_nix_inl_inb_is_enabled(roc_nix); + uint64_t lpb_aura = 0, vwqe_aura = 0, spb_aura = 0; + struct mbox *mbox = nix->dev.mbox; + uint64_t aura_base; + int rc, count; + + count = set ? 1 : -1; + /* For buf type set, use info from RQ context */ + if (set) { + lpb_aura = rq->aura_handle; + spb_aura = rq->spb_ena ? rq->spb_aura_handle : 0; + vwqe_aura = rq->vwqe_ena ? rq->vwqe_aura_handle : 0; + goto skip_ctx_read; + } + + aura_base = roc_npa_aura_handle_to_base(rq->aura_handle); + if (roc_model_is_cn9k()) { + struct nix_aq_enq_rsp *rsp; + struct nix_aq_enq_req *aq; + + aq = mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) + return -ENOSPC; + + aq->qidx = rq->qid; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_READ; + rc = mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + /* Get aura handle from aura */ + lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base); + if (rsp->rq.spb_ena) + spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base); + } else { + struct nix_cn10k_aq_enq_rsp *rsp; + struct nix_cn10k_aq_enq_req *aq; + + aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); + if (!aq) + return -ENOSPC; + + aq->qidx = rq->qid; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_READ; + + rc = mbox_process_msg(mbox, (void *)&rsp); + if (rc) + return rc; + + /* Get aura handle from aura */ + lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base); + if (rsp->rq.spb_ena) + spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base); + if (rsp->rq.vwqe_ena) + vwqe_aura = roc_npa_aura_handle_gen(rsp->rq.wqe_aura, aura_base); + } + +skip_ctx_read: + /* Update attributes for LPB aura */ + if (inl_inb_ena) + roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count); + else + roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET, count); + + /* Update attributes for SPB aura */ + if (spb_aura) { + if (inl_inb_ena) + roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count); + else + roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET, count); + } + + /* Update attributes for VWQE aura */ + if (vwqe_aura) { + if (inl_inb_ena) + roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE_IPSEC, count); + else + roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE, count); + } + + return 0; +} + int nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, bool ena) @@ -292,7 +382,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, /* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */ aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2; aq->rq.vtime_wait = rq->vwqe_wait_tmo; - aq->rq.wqe_aura = rq->vwqe_aura_handle; + aq->rq.wqe_aura = roc_npa_aura_handle_to_aura(rq->vwqe_aura_handle); } } else { /* CQ mode */ @@ -463,6 +553,9 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) if (rc) return rc; + /* Update aura buf type to indicate its use */ + nix_rq_aura_buf_type_update(rq, true); + return nix_tel_node_add_rq(rq); } @@ -481,6 +574,9 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) if (rq->qid >= nix->nb_rx_queues) return NIX_ERR_QUEUE_INVALID_RANGE; + /* Clear attributes for existing aura's */ + nix_rq_aura_buf_type_update(rq, false); + rq->roc_nix = roc_nix; if (is_cn9k) @@ -495,14 +591,25 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) if (rc) return rc; + /* Update aura attribute to indicate its use */ + nix_rq_aura_buf_type_update(rq, true); + return nix_tel_node_add_rq(rq); } int roc_nix_rq_fini(struct roc_nix_rq *rq) { + int rc; + /* Disabling RQ is sufficient */ - return roc_nix_rq_ena_dis(rq, false); + rc = roc_nix_rq_ena_dis(rq, false); + if (rc) + return rc; + + /* Update aura attribute to indicate its use for */ + nix_rq_aura_buf_type_update(rq, false); + return 0; } int @@ -717,6 +824,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq) if (rc) goto fail; + roc_npa_buf_type_update(sq->aura_handle, ROC_NPA_BUF_TYPE_SQB, 1); sq->sqe_mem = plt_zmalloc(blk_sz * nb_sqb_bufs, blk_sz); if (sq->sqe_mem == NULL) { rc = NIX_ERR_NO_MEM;