From patchwork Fri Jun 18 10:36:40 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 94410 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D1D59A0C46; Fri, 18 Jun 2021 12:39:31 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 98AAB410EC; Fri, 18 Jun 2021 12:39:26 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 3B45A40142 for ; Fri, 18 Jun 2021 12:39:25 +0200 (CEST) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id 15IAZPg8032282 for ; Fri, 18 Jun 2021 03:39:24 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-type; s=pfpt0220; bh=ZcOtloOcbJVvU3JUsaAbHubj9+kpC+o/WmWdMZavXyA=; b=auoK61kj6fWtwR3fYWrijR5IBkxMiaMf3LQxXzfb0bExw4YygT4HmbVP9AVvXL6LZC2g B1mi1ESby9944KNrHUirYr6BkcRg3FIxn30HDoO10iSUahNgoN09krC3+SeypWZjHZ1g +DPQnmy9rdEVCV9OBP/+pyA1SqjhwgGPPmdKLw3f0fhxT8dPenm0pXpDHB+1y3dfrmWs dc0hLXN6ootaeqLFLKCGtor3y7JvbQlG9hANRjgmGCvSBXF66uYoronAXL/dXD9tVXLo cak3pB7/lBb3WdDBdBj+vlssBmH5lw9FTOBapBFZVOOQrJmsimyYRYOLUPf4masYp/nY mg== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0a-0016f401.pphosted.com with ESMTP id 398r750cxf-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Fri, 18 Jun 2021 03:39:24 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Fri, 18 Jun 2021 03:39:22 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Fri, 18 Jun 2021 03:39:22 -0700 Received: from hyd1588t430.marvell.com (unknown [10.29.52.204]) by maili.marvell.com (Postfix) with ESMTP id 7DEBE5B6C93; Fri, 18 Jun 2021 03:38:31 -0700 (PDT) From: Nithin Dabilpuram To: CC: , , , , , , , Date: Fri, 18 Jun 2021 16:06:40 +0530 Message-ID: <20210618103741.26526-2-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.8.4 In-Reply-To: <20210618103741.26526-1-ndabilpuram@marvell.com> References: <20210306153404.10781-1-ndabilpuram@marvell.com> <20210618103741.26526-1-ndabilpuram@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: 0EkiD_tRLCmPVZi2I7GeOq5eCbekB54Q X-Proofpoint-GUID: 0EkiD_tRLCmPVZi2I7GeOq5eCbekB54Q X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.391, 18.0.790 definitions=2021-06-18_04:2021-06-18, 2021-06-18 signatures=0 Subject: [dpdk-dev] [PATCH v3 01/62] common/cnxk: add support to lock NIX RQ contexts X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Satha Rao This patch will consider device argument to lock rss table in NIX. This patch also adds few misc fixes such as disabling NIX Tx vlan insertion conf in SMQ, enabling SSO in NIX Tx SQ for Tx completions and TM related stats API. Signed-off-by: Satha Rao --- drivers/common/cnxk/roc_nix.h | 31 ++++++++++-- drivers/common/cnxk/roc_nix_queue.c | 2 + drivers/common/cnxk/roc_nix_rss.c | 51 ++++++++++++++++++-- drivers/common/cnxk/roc_nix_tm_utils.c | 86 +++++++++++++++++++++++++++++++++- drivers/common/cnxk/roc_platform.h | 2 + drivers/common/cnxk/version.map | 1 + 6 files changed, 163 insertions(+), 10 deletions(-) diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h index b39f461..6d9ac10 100644 --- a/drivers/common/cnxk/roc_nix.h +++ b/drivers/common/cnxk/roc_nix.h @@ -85,10 +85,11 @@ struct roc_nix_eeprom_info { #define ROC_NIX_LF_RX_CFG_LEN_OL3 BIT_ULL(41) /* Group 0 will be used for RSS, 1 -7 will be used for npc_flow RSS action*/ -#define ROC_NIX_RSS_GROUP_DEFAULT 0 -#define ROC_NIX_RSS_GRPS 8 -#define ROC_NIX_RSS_RETA_MAX ROC_NIX_RSS_RETA_SZ_256 -#define ROC_NIX_RSS_KEY_LEN 48 /* 352 Bits */ +#define ROC_NIX_RSS_GROUP_DEFAULT 0 +#define ROC_NIX_RSS_GRPS 8 +#define ROC_NIX_RSS_RETA_MAX ROC_NIX_RSS_RETA_SZ_256 +#define ROC_NIX_RSS_KEY_LEN 48 /* 352 Bits */ +#define ROC_NIX_RSS_MCAM_IDX_DEFAULT (-1) #define ROC_NIX_DEFAULT_HW_FRS 1514 @@ -184,6 +185,7 @@ struct roc_nix_sq { enum roc_nix_sq_max_sqe_sz max_sqe_sz; uint32_t nb_desc; uint16_t qid; + bool sso_ena; /* End of Input parameters */ uint16_t sqes_per_sqb_log2; struct roc_nix *roc_nix; @@ -241,6 +243,8 @@ struct roc_nix { uint16_t max_sqb_count; enum roc_nix_rss_reta_sz reta_sz; bool enable_loop; + bool hw_vlan_ins; + uint8_t lock_rx_ctx; /* End of input parameters */ /* LMT line base for "Per Core Tx LMT line" mode*/ uintptr_t lmt_base; @@ -371,6 +375,22 @@ struct roc_nix_tm_shaper_profile { void (*free_fn)(void *profile); }; +enum roc_nix_tm_node_stats_type { + ROC_NIX_TM_NODE_PKTS_DROPPED, + ROC_NIX_TM_NODE_BYTES_DROPPED, + ROC_NIX_TM_NODE_GREEN_PKTS, + ROC_NIX_TM_NODE_GREEN_BYTES, + ROC_NIX_TM_NODE_YELLOW_PKTS, + ROC_NIX_TM_NODE_YELLOW_BYTES, + ROC_NIX_TM_NODE_RED_PKTS, + ROC_NIX_TM_NODE_RED_BYTES, + ROC_NIX_TM_NODE_STATS_MAX, +}; + +struct roc_nix_tm_node_stats { + uint64_t stats[ROC_NIX_TM_NODE_STATS_MAX]; +}; + int __roc_api roc_nix_tm_node_add(struct roc_nix *roc_nix, struct roc_nix_tm_node *roc_node); int __roc_api roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id, @@ -408,6 +428,9 @@ roc_nix_tm_shaper_profile_get(struct roc_nix *roc_nix, uint32_t profile_id); struct roc_nix_tm_shaper_profile *__roc_api roc_nix_tm_shaper_profile_next( struct roc_nix *roc_nix, struct roc_nix_tm_shaper_profile *__prev); +int __roc_api roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, + uint32_t node_id, bool clear, + struct roc_nix_tm_node_stats *stats); /* * TM ratelimit tree API. */ diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c index fbf7efa..1c62aa2 100644 --- a/drivers/common/cnxk/roc_nix_queue.c +++ b/drivers/common/cnxk/roc_nix_queue.c @@ -582,6 +582,7 @@ sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, aq->sq.default_chan = nix->tx_chan_base; aq->sq.sqe_stype = NIX_STYPE_STF; aq->sq.ena = 1; + aq->sq.sso_ena = !!sq->sso_ena; if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) aq->sq.sqe_stype = NIX_STYPE_STP; aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); @@ -679,6 +680,7 @@ sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum, aq->sq.default_chan = nix->tx_chan_base; aq->sq.sqe_stype = NIX_STYPE_STF; aq->sq.ena = 1; + aq->sq.sso_ena = !!sq->sso_ena; if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8) aq->sq.sqe_stype = NIX_STYPE_STP; aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle); diff --git a/drivers/common/cnxk/roc_nix_rss.c b/drivers/common/cnxk/roc_nix_rss.c index 2d7b84a..7de69aa 100644 --- a/drivers/common/cnxk/roc_nix_rss.c +++ b/drivers/common/cnxk/roc_nix_rss.c @@ -52,7 +52,7 @@ roc_nix_rss_key_get(struct roc_nix *roc_nix, uint8_t key[ROC_NIX_RSS_KEY_LEN]) static int nix_cn9k_rss_reta_set(struct nix *nix, uint8_t group, - uint16_t reta[ROC_NIX_RSS_RETA_MAX]) + uint16_t reta[ROC_NIX_RSS_RETA_MAX], uint8_t lock_rx_ctx) { struct mbox *mbox = (&nix->dev)->mbox; struct nix_aq_enq_req *req; @@ -77,6 +77,27 @@ nix_cn9k_rss_reta_set(struct nix *nix, uint8_t group, req->qidx = (group * nix->reta_sz) + idx; req->ctype = NIX_AQ_CTYPE_RSS; req->op = NIX_AQ_INSTOP_INIT; + + if (!lock_rx_ctx) + continue; + + req = mbox_alloc_msg_nix_aq_enq(mbox); + if (!req) { + /* The shared memory buffer can be full. + * Flush it and retry + */ + rc = mbox_process(mbox); + if (rc < 0) + return rc; + req = mbox_alloc_msg_nix_aq_enq(mbox); + if (!req) + return NIX_ERR_NO_MEM; + } + req->rss.rq = reta[idx]; + /* Fill AQ info */ + req->qidx = (group * nix->reta_sz) + idx; + req->ctype = NIX_AQ_CTYPE_RSS; + req->op = NIX_AQ_INSTOP_LOCK; } rc = mbox_process(mbox); @@ -88,7 +109,7 @@ nix_cn9k_rss_reta_set(struct nix *nix, uint8_t group, static int nix_rss_reta_set(struct nix *nix, uint8_t group, - uint16_t reta[ROC_NIX_RSS_RETA_MAX]) + uint16_t reta[ROC_NIX_RSS_RETA_MAX], uint8_t lock_rx_ctx) { struct mbox *mbox = (&nix->dev)->mbox; struct nix_cn10k_aq_enq_req *req; @@ -113,6 +134,27 @@ nix_rss_reta_set(struct nix *nix, uint8_t group, req->qidx = (group * nix->reta_sz) + idx; req->ctype = NIX_AQ_CTYPE_RSS; req->op = NIX_AQ_INSTOP_INIT; + + if (!lock_rx_ctx) + continue; + + req = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); + if (!req) { + /* The shared memory buffer can be full. + * Flush it and retry + */ + rc = mbox_process(mbox); + if (rc < 0) + return rc; + req = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); + if (!req) + return NIX_ERR_NO_MEM; + } + req->rss.rq = reta[idx]; + /* Fill AQ info */ + req->qidx = (group * nix->reta_sz) + idx; + req->ctype = NIX_AQ_CTYPE_RSS; + req->op = NIX_AQ_INSTOP_LOCK; } rc = mbox_process(mbox); @@ -133,9 +175,10 @@ roc_nix_rss_reta_set(struct roc_nix *roc_nix, uint8_t group, return NIX_ERR_PARAM; if (roc_model_is_cn9k()) - rc = nix_cn9k_rss_reta_set(nix, group, reta); + rc = nix_cn9k_rss_reta_set(nix, group, reta, + roc_nix->lock_rx_ctx); else - rc = nix_rss_reta_set(nix, group, reta); + rc = nix_rss_reta_set(nix, group, reta, roc_nix->lock_rx_ctx); if (rc) return rc; diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c index 1d7dd68..6b9543e 100644 --- a/drivers/common/cnxk/roc_nix_tm_utils.c +++ b/drivers/common/cnxk/roc_nix_tm_utils.c @@ -409,6 +409,7 @@ nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node, volatile uint64_t *reg, volatile uint64_t *regval, volatile uint64_t *regval_mask) { + struct roc_nix *roc_nix = nix_priv_to_roc_nix(nix); uint8_t k = 0, hw_lvl, parent_lvl; uint64_t parent = 0, child = 0; enum roc_nix_tm_tree tree; @@ -454,8 +455,11 @@ nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node, reg[k] = NIX_AF_SMQX_CFG(schq); regval[k] = (BIT_ULL(50) | NIX_MIN_HW_FRS | ((nix->mtu & 0xFFFF) << 8)); - regval_mask[k] = - ~(BIT_ULL(50) | GENMASK_ULL(6, 0) | GENMASK_ULL(23, 8)); + /* Maximum Vtag insertion size as a multiple of four bytes */ + if (roc_nix->hw_vlan_ins) + regval[k] |= (0x2ULL << 36); + regval_mask[k] = ~(BIT_ULL(50) | GENMASK_ULL(6, 0) | + GENMASK_ULL(23, 8) | GENMASK_ULL(38, 36)); k++; /* Parent and schedule conf */ @@ -1000,3 +1004,81 @@ nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile) (profile->free_fn)(profile); } + +int +roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, uint32_t node_id, bool clear, + struct roc_nix_tm_node_stats *n_stats) +{ + struct nix *nix = roc_nix_to_nix_priv(roc_nix); + struct mbox *mbox = (&nix->dev)->mbox; + struct nix_txschq_config *req, *rsp; + struct nix_tm_node *node; + uint32_t schq; + int rc, i; + + node = nix_tm_node_search(nix, node_id, ROC_NIX_TM_USER); + if (!node) + return NIX_ERR_TM_INVALID_NODE; + + if (node->hw_lvl != NIX_TXSCH_LVL_TL1) + return NIX_ERR_OP_NOTSUP; + + schq = node->hw_id; + /* Skip fetch if not requested */ + if (!n_stats) + goto clear_stats; + + memset(n_stats, 0, sizeof(struct roc_nix_tm_node_stats)); + /* Check if node has HW resource */ + if (!(node->flags & NIX_TM_NODE_HWRES)) + return 0; + + req = mbox_alloc_msg_nix_txschq_cfg(mbox); + req->read = 1; + req->lvl = NIX_TXSCH_LVL_TL1; + + i = 0; + req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq); + req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq); + req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq); + req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq); + req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq); + req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq); + req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq); + req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq); + req->num_regs = i; + + rc = mbox_process_msg(mbox, (void **)&rsp); + if (rc) + return rc; + + /* Return stats */ + n_stats->stats[ROC_NIX_TM_NODE_PKTS_DROPPED] = rsp->regval[0]; + n_stats->stats[ROC_NIX_TM_NODE_BYTES_DROPPED] = rsp->regval[1]; + n_stats->stats[ROC_NIX_TM_NODE_GREEN_PKTS] = rsp->regval[2]; + n_stats->stats[ROC_NIX_TM_NODE_GREEN_BYTES] = rsp->regval[3]; + n_stats->stats[ROC_NIX_TM_NODE_YELLOW_PKTS] = rsp->regval[4]; + n_stats->stats[ROC_NIX_TM_NODE_YELLOW_BYTES] = rsp->regval[5]; + n_stats->stats[ROC_NIX_TM_NODE_RED_PKTS] = rsp->regval[6]; + n_stats->stats[ROC_NIX_TM_NODE_RED_BYTES] = rsp->regval[7]; + +clear_stats: + if (!clear) + return 0; + + /* Clear all the stats */ + req = mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = NIX_TXSCH_LVL_TL1; + i = 0; + req->reg[i++] = NIX_AF_TL1X_DROPPED_PACKETS(schq); + req->reg[i++] = NIX_AF_TL1X_DROPPED_BYTES(schq); + req->reg[i++] = NIX_AF_TL1X_GREEN_PACKETS(schq); + req->reg[i++] = NIX_AF_TL1X_GREEN_BYTES(schq); + req->reg[i++] = NIX_AF_TL1X_YELLOW_PACKETS(schq); + req->reg[i++] = NIX_AF_TL1X_YELLOW_BYTES(schq); + req->reg[i++] = NIX_AF_TL1X_RED_PACKETS(schq); + req->reg[i++] = NIX_AF_TL1X_RED_BYTES(schq); + req->num_regs = i; + + return mbox_process_msg(mbox, (void **)&rsp); +} diff --git a/drivers/common/cnxk/roc_platform.h b/drivers/common/cnxk/roc_platform.h index 7864fa4..911ae15 100644 --- a/drivers/common/cnxk/roc_platform.h +++ b/drivers/common/cnxk/roc_platform.h @@ -127,6 +127,8 @@ #define plt_memzone_reserve_cache_align(name, sz) \ rte_memzone_reserve_aligned(name, sz, 0, 0, RTE_CACHE_LINE_SIZE) #define plt_memzone_free rte_memzone_free +#define plt_memzone_reserve_aligned(name, len, flags, align) \ + rte_memzone_reserve_aligned((name), (len), 0, (flags), (align)) #define plt_tsc_hz rte_get_tsc_hz #define plt_delay_ms rte_delay_ms diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map index 8e67c83..c39d76f 100644 --- a/drivers/common/cnxk/version.map +++ b/drivers/common/cnxk/version.map @@ -123,6 +123,7 @@ INTERNAL { roc_nix_tm_node_parent_update; roc_nix_tm_node_pkt_mode_update; roc_nix_tm_node_shaper_update; + roc_nix_tm_node_stats_get; roc_nix_tm_node_suspend_resume; roc_nix_tm_prealloc_res; roc_nix_tm_rlimit_sq;