From patchwork Mon May 16 17:35:50 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Shijith Thotton X-Patchwork-Id: 111192 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 58396A00BE; Mon, 16 May 2022 19:39:41 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1AA3C42B6B; Mon, 16 May 2022 19:39:36 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 38F4A42B5B for ; Mon, 16 May 2022 19:39:35 +0200 (CEST) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 24GEt30Y004590; Mon, 16 May 2022 10:37:31 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=m+AvGG7Vl1oTIXMOQAdtxgkai5PvzhqpLzPGp354N8c=; b=kwwXFFOqprWcWGBBkaqNTRdXfOCqb8CxBSHwkrgRSh5qPLE1I+RPs+olKrm4pzz/RWnn utng2dv4KotYMewBqFeFd8aBM57ci23Z8neshri+le1wdDK0f4R4yS3VxralhC3tDGsK XdxagVcvtLS63ZIemYRtoaOeU33Wlt56+YmfajL/wZD9QNaOa2PD+dhpajwDohaCCi4p Db4T4souFbLQAUe6G7EHj2FNl1i3sW2QWajdr3rS45jh3fETttrmLMPIZsMgXQ+ATta0 dW4f/EkazIIjjcARyRb/Gd83OJUz4X8FusI8S1MAoJ8hlmdMdPCp5UU6x9YX89BbHRV7 vw== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3g3rsqrnxr-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Mon, 16 May 2022 10:37:31 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Mon, 16 May 2022 10:37:29 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Mon, 16 May 2022 10:37:29 -0700 Received: from localhost.localdomain (unknown [10.28.34.29]) by maili.marvell.com (Postfix) with ESMTP id AAD253F70CD; Mon, 16 May 2022 10:37:26 -0700 (PDT) From: Shijith Thotton To: , CC: Pavan Nikhilesh , , , , Shijith Thotton , Nithin Dabilpuram , "Kiran Kumar K" , Sunil Kumar Kori , Satha Rao Subject: [PATCH v4 4/5] common/cnxk: use lock when accessing mbox of SSO Date: Mon, 16 May 2022 23:05:50 +0530 Message-ID: X-Mailer: git-send-email 2.25.1 In-Reply-To: References: MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: P0277cQDDcIHcMXy5wBhT4PEEkq8FmfA X-Proofpoint-GUID: P0277cQDDcIHcMXy5wBhT4PEEkq8FmfA X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.858,Hydra:6.0.486,FMLib:17.11.64.514 definitions=2022-05-16_15,2022-05-16_02,2022-02-23_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pavan Nikhilesh Since mbox is now accessed from multiple threads, use lock to synchronize access. Signed-off-by: Pavan Nikhilesh Signed-off-by: Shijith Thotton --- drivers/common/cnxk/roc_sso.c | 174 +++++++++++++++++++++-------- drivers/common/cnxk/roc_sso_priv.h | 1 + drivers/common/cnxk/roc_tim.c | 134 ++++++++++++++-------- 3 files changed, 215 insertions(+), 94 deletions(-) diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c index f8a0a96533..358d37a9f2 100644 --- a/drivers/common/cnxk/roc_sso.c +++ b/drivers/common/cnxk/roc_sso.c @@ -36,8 +36,8 @@ sso_lf_alloc(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf, } rc = mbox_process_msg(dev->mbox, rsp); - if (rc < 0) - return rc; + if (rc) + return -EIO; return 0; } @@ -69,8 +69,8 @@ sso_lf_free(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf) } rc = mbox_process(dev->mbox); - if (rc < 0) - return rc; + if (rc) + return -EIO; return 0; } @@ -98,7 +98,7 @@ sso_rsrc_attach(struct roc_sso *roc_sso, enum sso_lf_type lf_type, } req->modify = true; - if (mbox_process(dev->mbox) < 0) + if (mbox_process(dev->mbox)) return -EIO; return 0; @@ -126,7 +126,7 @@ sso_rsrc_detach(struct roc_sso *roc_sso, enum sso_lf_type lf_type) } req->partial = true; - if (mbox_process(dev->mbox) < 0) + if (mbox_process(dev->mbox)) return -EIO; return 0; @@ -141,9 +141,9 @@ sso_rsrc_get(struct roc_sso *roc_sso) mbox_alloc_msg_free_rsrc_cnt(dev->mbox); rc = mbox_process_msg(dev->mbox, (void **)&rsrc_cnt); - if (rc < 0) { + if (rc) { plt_err("Failed to get free resource count\n"); - return rc; + return -EIO; } roc_sso->max_hwgrp = rsrc_cnt->sso; @@ -197,8 +197,8 @@ sso_msix_fill(struct roc_sso *roc_sso, uint16_t nb_hws, uint16_t nb_hwgrp) mbox_alloc_msg_msix_offset(dev->mbox); rc = mbox_process_msg(dev->mbox, (void **)&rsp); - if (rc < 0) - return rc; + if (rc) + return -EIO; for (i = 0; i < nb_hws; i++) sso->hws_msix_offset[i] = rsp->ssow_msixoff[i]; @@ -285,53 +285,71 @@ int roc_sso_hws_stats_get(struct roc_sso *roc_sso, uint8_t hws, struct roc_sso_hws_stats *stats) { - struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; + struct sso *sso = roc_sso_to_sso_priv(roc_sso); struct sso_hws_stats *req_rsp; + struct dev *dev = &sso->dev; int rc; + plt_spinlock_lock(&sso->mbox_lock); req_rsp = (struct sso_hws_stats *)mbox_alloc_msg_sso_hws_get_stats( dev->mbox); if (req_rsp == NULL) { rc = mbox_process(dev->mbox); - if (rc < 0) - return rc; + if (rc) { + rc = -EIO; + goto fail; + } req_rsp = (struct sso_hws_stats *) mbox_alloc_msg_sso_hws_get_stats(dev->mbox); - if (req_rsp == NULL) - return -ENOSPC; + if (req_rsp == NULL) { + rc = -ENOSPC; + goto fail; + } } req_rsp->hws = hws; rc = mbox_process_msg(dev->mbox, (void **)&req_rsp); - if (rc) - return rc; + if (rc) { + rc = -EIO; + goto fail; + } stats->arbitration = req_rsp->arbitration; - return 0; +fail: + plt_spinlock_unlock(&sso->mbox_lock); + return rc; } int roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint8_t hwgrp, struct roc_sso_hwgrp_stats *stats) { - struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; + struct sso *sso = roc_sso_to_sso_priv(roc_sso); struct sso_grp_stats *req_rsp; + struct dev *dev = &sso->dev; int rc; + plt_spinlock_lock(&sso->mbox_lock); req_rsp = (struct sso_grp_stats *)mbox_alloc_msg_sso_grp_get_stats( dev->mbox); if (req_rsp == NULL) { rc = mbox_process(dev->mbox); - if (rc < 0) - return rc; + if (rc) { + rc = -EIO; + goto fail; + } req_rsp = (struct sso_grp_stats *) mbox_alloc_msg_sso_grp_get_stats(dev->mbox); - if (req_rsp == NULL) - return -ENOSPC; + if (req_rsp == NULL) { + rc = -ENOSPC; + goto fail; + } } req_rsp->grp = hwgrp; rc = mbox_process_msg(dev->mbox, (void **)&req_rsp); - if (rc) - return rc; + if (rc) { + rc = -EIO; + goto fail; + } stats->aw_status = req_rsp->aw_status; stats->dq_pc = req_rsp->dq_pc; @@ -341,7 +359,10 @@ roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint8_t hwgrp, stats->ts_pc = req_rsp->ts_pc; stats->wa_pc = req_rsp->wa_pc; stats->ws_pc = req_rsp->ws_pc; - return 0; + +fail: + plt_spinlock_unlock(&sso->mbox_lock); + return rc; } int @@ -358,10 +379,12 @@ int roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos, uint8_t nb_qos, uint32_t nb_xaq) { - struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; + struct sso *sso = roc_sso_to_sso_priv(roc_sso); + struct dev *dev = &sso->dev; struct sso_grp_qos_cfg *req; int i, rc; + plt_spinlock_lock(&sso->mbox_lock); for (i = 0; i < nb_qos; i++) { uint8_t xaq_prcnt = qos[i].xaq_prcnt; uint8_t iaq_prcnt = qos[i].iaq_prcnt; @@ -370,11 +393,16 @@ roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos, req = mbox_alloc_msg_sso_grp_qos_config(dev->mbox); if (req == NULL) { rc = mbox_process(dev->mbox); - if (rc < 0) - return rc; + if (rc) { + rc = -EIO; + goto fail; + } + req = mbox_alloc_msg_sso_grp_qos_config(dev->mbox); - if (req == NULL) - return -ENOSPC; + if (req == NULL) { + rc = -ENOSPC; + goto fail; + } } req->grp = qos[i].hwgrp; req->xaq_limit = (nb_xaq * (xaq_prcnt ? xaq_prcnt : 100)) / 100; @@ -386,7 +414,12 @@ roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos, 100; } - return mbox_process(dev->mbox); + rc = mbox_process(dev->mbox); + if (rc) + rc = -EIO; +fail: + plt_spinlock_unlock(&sso->mbox_lock); + return rc; } int @@ -482,11 +515,16 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq, int roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso, uint32_t nb_xae) { - struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; + struct sso *sso = roc_sso_to_sso_priv(roc_sso); + struct dev *dev = &sso->dev; + int rc; - return sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae, - roc_sso->xae_waes, roc_sso->xaq_buf_size, - roc_sso->nb_hwgrp); + plt_spinlock_lock(&sso->mbox_lock); + rc = sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae, + roc_sso->xae_waes, roc_sso->xaq_buf_size, + roc_sso->nb_hwgrp); + plt_spinlock_unlock(&sso->mbox_lock); + return rc; } int @@ -515,9 +553,14 @@ sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq, int roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso, uint16_t nb_hwgrp) { - struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; + struct sso *sso = roc_sso_to_sso_priv(roc_sso); + struct dev *dev = &sso->dev; + int rc; - return sso_hwgrp_free_xaq_aura(dev, &roc_sso->xaq, nb_hwgrp); + plt_spinlock_lock(&sso->mbox_lock); + rc = sso_hwgrp_free_xaq_aura(dev, &roc_sso->xaq, nb_hwgrp); + plt_spinlock_unlock(&sso->mbox_lock); + return rc; } int @@ -533,16 +576,24 @@ sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps) req->npa_aura_id = npa_aura_id; req->hwgrps = hwgrps; - return mbox_process(dev->mbox); + if (mbox_process(dev->mbox)) + return -EIO; + + return 0; } int roc_sso_hwgrp_alloc_xaq(struct roc_sso *roc_sso, uint32_t npa_aura_id, uint16_t hwgrps) { - struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; + struct sso *sso = roc_sso_to_sso_priv(roc_sso); + struct dev *dev = &sso->dev; + int rc; - return sso_hwgrp_alloc_xaq(dev, npa_aura_id, hwgrps); + plt_spinlock_lock(&sso->mbox_lock); + rc = sso_hwgrp_alloc_xaq(dev, npa_aura_id, hwgrps); + plt_spinlock_unlock(&sso->mbox_lock); + return rc; } int @@ -555,40 +606,56 @@ sso_hwgrp_release_xaq(struct dev *dev, uint16_t hwgrps) return -EINVAL; req->hwgrps = hwgrps; - return mbox_process(dev->mbox); + if (mbox_process(dev->mbox)) + return -EIO; + + return 0; } int roc_sso_hwgrp_release_xaq(struct roc_sso *roc_sso, uint16_t hwgrps) { - struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; + struct sso *sso = roc_sso_to_sso_priv(roc_sso); + struct dev *dev = &sso->dev; + int rc; - return sso_hwgrp_release_xaq(dev, hwgrps); + plt_spinlock_lock(&sso->mbox_lock); + rc = sso_hwgrp_release_xaq(dev, hwgrps); + plt_spinlock_unlock(&sso->mbox_lock); + return rc; } int roc_sso_hwgrp_set_priority(struct roc_sso *roc_sso, uint16_t hwgrp, uint8_t weight, uint8_t affinity, uint8_t priority) { - struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev; + struct sso *sso = roc_sso_to_sso_priv(roc_sso); + struct dev *dev = &sso->dev; struct sso_grp_priority *req; int rc = -ENOSPC; + plt_spinlock_lock(&sso->mbox_lock); req = mbox_alloc_msg_sso_grp_set_priority(dev->mbox); if (req == NULL) - return rc; + goto fail; req->grp = hwgrp; req->weight = weight; req->affinity = affinity; req->priority = priority; rc = mbox_process(dev->mbox); - if (rc < 0) - return rc; + if (rc) { + rc = -EIO; + goto fail; + } + plt_spinlock_unlock(&sso->mbox_lock); plt_sso_dbg("HWGRP %d weight %d affinity %d priority %d", hwgrp, weight, affinity, priority); return 0; +fail: + plt_spinlock_unlock(&sso->mbox_lock); + return rc; } int @@ -603,10 +670,11 @@ roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp) if (roc_sso->max_hws < nb_hws) return -ENOENT; + plt_spinlock_lock(&sso->mbox_lock); rc = sso_rsrc_attach(roc_sso, SSO_LF_TYPE_HWS, nb_hws); if (rc < 0) { plt_err("Unable to attach SSO HWS LFs"); - return rc; + goto fail; } rc = sso_rsrc_attach(roc_sso, SSO_LF_TYPE_HWGRP, nb_hwgrp); @@ -645,6 +713,7 @@ roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp) goto sso_msix_fail; } + plt_spinlock_unlock(&sso->mbox_lock); roc_sso->nb_hwgrp = nb_hwgrp; roc_sso->nb_hws = nb_hws; @@ -657,6 +726,8 @@ roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp) sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWGRP); hwgrp_atch_fail: sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWS); +fail: + plt_spinlock_unlock(&sso->mbox_lock); return rc; } @@ -678,6 +749,7 @@ roc_sso_rsrc_fini(struct roc_sso *roc_sso) roc_sso->nb_hwgrp = 0; roc_sso->nb_hws = 0; + plt_spinlock_unlock(&sso->mbox_lock); } int @@ -696,6 +768,7 @@ roc_sso_dev_init(struct roc_sso *roc_sso) sso = roc_sso_to_sso_priv(roc_sso); memset(sso, 0, sizeof(*sso)); pci_dev = roc_sso->pci_dev; + plt_spinlock_init(&sso->mbox_lock); rc = dev_init(&sso->dev, pci_dev); if (rc < 0) { @@ -703,6 +776,7 @@ roc_sso_dev_init(struct roc_sso *roc_sso) goto fail; } + plt_spinlock_lock(&sso->mbox_lock); rc = sso_rsrc_get(roc_sso); if (rc < 0) { plt_err("Failed to get SSO resources"); @@ -739,6 +813,7 @@ roc_sso_dev_init(struct roc_sso *roc_sso) sso->pci_dev = pci_dev; sso->dev.drv_inited = true; roc_sso->lmt_base = sso->dev.lmt_base; + plt_spinlock_unlock(&sso->mbox_lock); return 0; link_mem_free: @@ -746,6 +821,7 @@ roc_sso_dev_init(struct roc_sso *roc_sso) rsrc_fail: rc |= dev_fini(&sso->dev, pci_dev); fail: + plt_spinlock_unlock(&sso->mbox_lock); return rc; } diff --git a/drivers/common/cnxk/roc_sso_priv.h b/drivers/common/cnxk/roc_sso_priv.h index 09729d4f62..674e4e0a39 100644 --- a/drivers/common/cnxk/roc_sso_priv.h +++ b/drivers/common/cnxk/roc_sso_priv.h @@ -22,6 +22,7 @@ struct sso { /* SSO link mapping. */ struct plt_bitmap **link_map; void *link_map_mem; + plt_spinlock_t mbox_lock; } __plt_cache_aligned; enum sso_err_status { diff --git a/drivers/common/cnxk/roc_tim.c b/drivers/common/cnxk/roc_tim.c index cefd9bc89d..0f9209937b 100644 --- a/drivers/common/cnxk/roc_tim.c +++ b/drivers/common/cnxk/roc_tim.c @@ -8,15 +8,16 @@ static int tim_fill_msix(struct roc_tim *roc_tim, uint16_t nb_ring) { - struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev; + struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso); struct tim *tim = roc_tim_to_tim_priv(roc_tim); + struct dev *dev = &sso->dev; struct msix_offset_rsp *rsp; int i, rc; mbox_alloc_msg_msix_offset(dev->mbox); rc = mbox_process_msg(dev->mbox, (void **)&rsp); - if (rc < 0) - return rc; + if (rc) + return -EIO; for (i = 0; i < nb_ring; i++) tim->tim_msix_offsets[i] = rsp->timlf_msixoff[i]; @@ -88,20 +89,23 @@ int roc_tim_lf_enable(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *start_tsc, uint32_t *cur_bkt) { - struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev; + struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso); + struct dev *dev = &sso->dev; struct tim_enable_rsp *rsp; struct tim_ring_req *req; int rc = -ENOSPC; + plt_spinlock_lock(&sso->mbox_lock); req = mbox_alloc_msg_tim_enable_ring(dev->mbox); if (req == NULL) - return rc; + goto fail; req->ring = ring_id; rc = mbox_process_msg(dev->mbox, (void **)&rsp); - if (rc < 0) { + if (rc) { tim_err_desc(rc); - return rc; + rc = -EIO; + goto fail; } if (cur_bkt) @@ -109,28 +113,34 @@ roc_tim_lf_enable(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *start_tsc, if (start_tsc) *start_tsc = rsp->timestarted; - return 0; +fail: + plt_spinlock_unlock(&sso->mbox_lock); + return rc; } int roc_tim_lf_disable(struct roc_tim *roc_tim, uint8_t ring_id) { - struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev; + struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso); + struct dev *dev = &sso->dev; struct tim_ring_req *req; int rc = -ENOSPC; + plt_spinlock_lock(&sso->mbox_lock); req = mbox_alloc_msg_tim_disable_ring(dev->mbox); if (req == NULL) - return rc; + goto fail; req->ring = ring_id; rc = mbox_process(dev->mbox); - if (rc < 0) { + if (rc) { tim_err_desc(rc); - return rc; + rc = -EIO; } - return 0; +fail: + plt_spinlock_unlock(&sso->mbox_lock); + return rc; } uintptr_t @@ -147,13 +157,15 @@ roc_tim_lf_config(struct roc_tim *roc_tim, uint8_t ring_id, uint8_t ena_dfb, uint32_t bucket_sz, uint32_t chunk_sz, uint32_t interval, uint64_t intervalns, uint64_t clockfreq) { - struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev; + struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso); + struct dev *dev = &sso->dev; struct tim_config_req *req; int rc = -ENOSPC; + plt_spinlock_lock(&sso->mbox_lock); req = mbox_alloc_msg_tim_config_ring(dev->mbox); if (req == NULL) - return rc; + goto fail; req->ring = ring_id; req->bigendian = false; req->bucketsize = bucket_sz; @@ -167,12 +179,14 @@ roc_tim_lf_config(struct roc_tim *roc_tim, uint8_t ring_id, req->gpioedge = TIM_GPIO_LTOH_TRANS; rc = mbox_process(dev->mbox); - if (rc < 0) { + if (rc) { tim_err_desc(rc); - return rc; + rc = -EIO; } - return 0; +fail: + plt_spinlock_unlock(&sso->mbox_lock); + return rc; } int @@ -180,27 +194,32 @@ roc_tim_lf_interval(struct roc_tim *roc_tim, enum roc_tim_clk_src clk_src, uint64_t clockfreq, uint64_t *intervalns, uint64_t *interval) { - struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev; + struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso); + struct dev *dev = &sso->dev; struct tim_intvl_req *req; struct tim_intvl_rsp *rsp; int rc = -ENOSPC; + plt_spinlock_lock(&sso->mbox_lock); req = mbox_alloc_msg_tim_get_min_intvl(dev->mbox); if (req == NULL) - return rc; + goto fail; req->clockfreq = clockfreq; req->clocksource = clk_src; rc = mbox_process_msg(dev->mbox, (void **)&rsp); - if (rc < 0) { + if (rc) { tim_err_desc(rc); - return rc; + rc = -EIO; + goto fail; } *intervalns = rsp->intvl_ns; *interval = rsp->intvl_cyc; - return 0; +fail: + plt_spinlock_unlock(&sso->mbox_lock); + return rc; } int @@ -214,17 +233,19 @@ roc_tim_lf_alloc(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *clk) struct dev *dev = &sso->dev; int rc = -ENOSPC; + plt_spinlock_lock(&sso->mbox_lock); req = mbox_alloc_msg_tim_lf_alloc(dev->mbox); if (req == NULL) - return rc; + goto fail; req->npa_pf_func = idev_npa_pffunc_get(); req->sso_pf_func = idev_sso_pffunc_get(); req->ring = ring_id; rc = mbox_process_msg(dev->mbox, (void **)&rsp); - if (rc < 0) { + if (rc) { tim_err_desc(rc); - return rc; + rc = -EIO; + goto fail; } if (clk) @@ -235,12 +256,18 @@ roc_tim_lf_alloc(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *clk) if (rc < 0) { plt_tim_dbg("Failed to register Ring[%d] IRQ", ring_id); free_req = mbox_alloc_msg_tim_lf_free(dev->mbox); - if (free_req == NULL) - return -ENOSPC; + if (free_req == NULL) { + rc = -ENOSPC; + goto fail; + } free_req->ring = ring_id; - mbox_process(dev->mbox); + rc = mbox_process(dev->mbox); + if (rc) + rc = -EIO; } +fail: + plt_spinlock_unlock(&sso->mbox_lock); return rc; } @@ -256,17 +283,20 @@ roc_tim_lf_free(struct roc_tim *roc_tim, uint8_t ring_id) tim_unregister_irq_priv(roc_tim, sso->pci_dev->intr_handle, ring_id, tim->tim_msix_offsets[ring_id]); + plt_spinlock_lock(&sso->mbox_lock); req = mbox_alloc_msg_tim_lf_free(dev->mbox); if (req == NULL) - return rc; + goto fail; req->ring = ring_id; rc = mbox_process(dev->mbox); if (rc < 0) { tim_err_desc(rc); - return rc; + rc = -EIO; } +fail: + plt_spinlock_unlock(&sso->mbox_lock); return 0; } @@ -276,40 +306,48 @@ roc_tim_init(struct roc_tim *roc_tim) struct rsrc_attach_req *attach_req; struct rsrc_detach_req *detach_req; struct free_rsrcs_rsp *free_rsrc; - struct dev *dev; + struct sso *sso; uint16_t nb_lfs; + struct dev *dev; int rc; if (roc_tim == NULL || roc_tim->roc_sso == NULL) return TIM_ERR_PARAM; + sso = roc_sso_to_sso_priv(roc_tim->roc_sso); + dev = &sso->dev; PLT_STATIC_ASSERT(sizeof(struct tim) <= TIM_MEM_SZ); - dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev; nb_lfs = roc_tim->nb_lfs; + plt_spinlock_lock(&sso->mbox_lock); mbox_alloc_msg_free_rsrc_cnt(dev->mbox); rc = mbox_process_msg(dev->mbox, (void *)&free_rsrc); - if (rc < 0) { + if (rc) { plt_err("Unable to get free rsrc count."); - return 0; + nb_lfs = 0; + goto fail; } if (nb_lfs && (free_rsrc->tim < nb_lfs)) { plt_tim_dbg("Requested LFs : %d Available LFs : %d", nb_lfs, free_rsrc->tim); - return 0; + nb_lfs = 0; + goto fail; } attach_req = mbox_alloc_msg_attach_resources(dev->mbox); - if (attach_req == NULL) - return -ENOSPC; + if (attach_req == NULL) { + nb_lfs = 0; + goto fail; + } attach_req->modify = true; attach_req->timlfs = nb_lfs ? nb_lfs : free_rsrc->tim; nb_lfs = attach_req->timlfs; rc = mbox_process(dev->mbox); - if (rc < 0) { + if (rc) { plt_err("Unable to attach TIM LFs."); - return 0; + nb_lfs = 0; + goto fail; } rc = tim_fill_msix(roc_tim, nb_lfs); @@ -317,28 +355,34 @@ roc_tim_init(struct roc_tim *roc_tim) plt_err("Unable to get TIM MSIX vectors"); detach_req = mbox_alloc_msg_detach_resources(dev->mbox); - if (detach_req == NULL) - return -ENOSPC; + if (detach_req == NULL) { + nb_lfs = 0; + goto fail; + } detach_req->partial = true; detach_req->timlfs = true; mbox_process(dev->mbox); - - return 0; + nb_lfs = 0; } +fail: + plt_spinlock_unlock(&sso->mbox_lock); return nb_lfs; } void roc_tim_fini(struct roc_tim *roc_tim) { - struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev; + struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso); struct rsrc_detach_req *detach_req; + struct dev *dev = &sso->dev; + plt_spinlock_lock(&sso->mbox_lock); detach_req = mbox_alloc_msg_detach_resources(dev->mbox); PLT_ASSERT(detach_req); detach_req->partial = true; detach_req->timlfs = true; mbox_process(dev->mbox); + plt_spinlock_unlock(&sso->mbox_lock); }