From patchwork Wed Sep 21 16:43:42 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Pavan Nikhilesh Bhagavatula X-Patchwork-Id: 116588 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id CD386A00C3; Wed, 21 Sep 2022 18:44:03 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E5B8D4281B; Wed, 21 Sep 2022 18:44:02 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com [67.231.148.174]) by mails.dpdk.org (Postfix) with ESMTP id 86D4D427FF for ; Wed, 21 Sep 2022 18:44:01 +0200 (CEST) Received: from pps.filterd (m0045849.ppops.net [127.0.0.1]) by mx0a-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 28L9p6hl020293; Wed, 21 Sep 2022 09:44:00 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=MCMyVktVCH5MZgTfeteNHA3mx3VyrGrVvao7XyXBQeY=; b=WlfjFhuDukN24ig2J0vU9JBdYBtVxxgzCOX+r1FPuihzkbeQmJVOhaO/lkv7I2GwwNQ5 KuY5zHjf13wnh4Mf8IAdK0v/CxCgBljZI88NVCFZgQP10k3XsNWypJOOAFbQlrSx++E8 QVkkQv5/t6C9vM+BzNAeH6u80LHR7306LSoOPGtAx22Z6ZFDaHBjwU9XAbRwyYXd0eE9 gYKwnA5/kt7szCSwbsHWdCkwH0WW3IzdSV5C/zBN6ilvZZEqOxHv21bgDocCXL2UykI4 jvRtQ67EldHVs8jXxCkF9WFn7OEsBZhYm9Bl+eOQcFkdv6BYFgy6EZcGwWnYDwAW0/gH hw== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0a-0016f401.pphosted.com (PPS) with ESMTPS id 3jr0b71gcb-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Wed, 21 Sep 2022 09:44:00 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Wed, 21 Sep 2022 09:43:59 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Wed, 21 Sep 2022 09:43:59 -0700 Received: from MININT-80QBFE8.corp.innovium.com (unknown [10.28.161.88]) by maili.marvell.com (Postfix) with ESMTP id A9EE43F706D; Wed, 21 Sep 2022 09:43:54 -0700 (PDT) From: To: , Pavan Nikhilesh , "Shijith Thotton" CC: , , , , , , , , , , Subject: [PATCH v2 3/3] event/cnxk: update event vector Tx routine Date: Wed, 21 Sep 2022 22:13:42 +0530 Message-ID: <20220921164342.2174-3-pbhagavatula@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220921164342.2174-1-pbhagavatula@marvell.com> References: <20220816154932.10168-1-pbhagavatula@marvell.com> <20220921164342.2174-1-pbhagavatula@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: UFpVX6sGKXxtRuidAYtbB1A9UhifCEkE X-Proofpoint-GUID: UFpVX6sGKXxtRuidAYtbB1A9UhifCEkE X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.895,Hydra:6.0.528,FMLib:17.11.122.1 definitions=2022-09-21_09,2022-09-20_02,2022-06-22_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Pavan Nikhilesh Update event vector transmit routine to honor elem_offset. Use ``rte_event_vector::elem_offset`` to report partial vector transmission to the application when there is not enough space in the SQ. Signed-off-by: Pavan Nikhilesh --- drivers/event/cnxk/cn10k_worker.h | 145 ++++++++++++++++++------------ 1 file changed, 89 insertions(+), 56 deletions(-) diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h index 55d9e56766..23b5d7ca7d 100644 --- a/drivers/event/cnxk/cn10k_worker.h +++ b/drivers/event/cnxk/cn10k_worker.h @@ -534,12 +534,21 @@ cn10k_sso_txq_fc_wait(const struct cn10k_eth_txq *txq) ; } -static __rte_always_inline void +static __rte_always_inline int32_t +cn10k_sso_sq_depth(const struct cn10k_eth_txq *txq) +{ + return (txq->nb_sqb_bufs_adj - + __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED)) + << txq->sqes_per_sqb_log2; +} + +static __rte_always_inline uint16_t cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd, uint16_t lmt_id, uintptr_t lmt_addr, uint8_t sched_type, const uint64_t *txq_data, const uint32_t flags) { uint8_t lnum = 0, loff = 0, shft = 0; + uint16_t ref_cnt = m->refcnt; struct cn10k_eth_txq *txq; uintptr_t laddr; uint16_t segdw; @@ -547,6 +556,9 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd, bool sec; txq = cn10k_sso_hws_xtract_meta(m, txq_data); + if (cn10k_sso_sq_depth(txq) <= 0) + return 0; + cn10k_nix_tx_skeleton(txq, cmd, flags, 0); /* Perform header writes before barrier * for TSO @@ -584,55 +596,66 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd, cn10k_sso_txq_fc_wait(txq); roc_lmt_submit_steorl(lmt_id, pa); + + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { + if (ref_cnt > 1) + rte_io_wmb(); + } + return 1; } -static __rte_always_inline void +static __rte_always_inline uint16_t cn10k_sso_vwqe_split_tx(struct cn10k_sso_hws *ws, struct rte_mbuf **mbufs, - uint16_t nb_mbufs, uint64_t *cmd, uint16_t lmt_id, - uintptr_t lmt_addr, uint8_t sched_type, + uint16_t nb_mbufs, uint64_t *cmd, const uint64_t *txq_data, const uint32_t flags) { - uint16_t port[4], queue[4]; - uint16_t i, j, pkts, scalar; + uint16_t count = 0, port, queue, ret = 0, last_idx = 0; struct cn10k_eth_txq *txq; + int32_t space; + int i; - scalar = nb_mbufs & (NIX_DESCS_PER_LOOP - 1); - pkts = RTE_ALIGN_FLOOR(nb_mbufs, NIX_DESCS_PER_LOOP); - - for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) { - port[0] = mbufs[i]->port; - port[1] = mbufs[i + 1]->port; - port[2] = mbufs[i + 2]->port; - port[3] = mbufs[i + 3]->port; - - queue[0] = rte_event_eth_tx_adapter_txq_get(mbufs[i]); - queue[1] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 1]); - queue[2] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 2]); - queue[3] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 3]); - - if (((port[0] ^ port[1]) & (port[2] ^ port[3])) || - ((queue[0] ^ queue[1]) & (queue[2] ^ queue[3]))) { - for (j = 0; j < 4; j++) - cn10k_sso_tx_one(ws, mbufs[i + j], cmd, lmt_id, - lmt_addr, sched_type, txq_data, - flags); - } else { - txq = (struct cn10k_eth_txq - *)(txq_data[(txq_data[port[0]] >> 48) + - queue[0]] & - (BIT_ULL(48) - 1)); - cn10k_nix_xmit_pkts_vector(txq, (uint64_t *)ws, - &mbufs[i], 4, cmd, - flags | NIX_TX_VWQE_F); + port = mbufs[0]->port; + queue = rte_event_eth_tx_adapter_txq_get(mbufs[0]); + for (i = 0; i < nb_mbufs; i++) { + if (port != mbufs[i]->port || + queue != rte_event_eth_tx_adapter_txq_get(mbufs[i])) { + if (count) { + txq = (struct cn10k_eth_txq + *)(txq_data[(txq_data[port] >> + 48) + + queue] & + (BIT_ULL(48) - 1)); + /* Transmit based on queue depth */ + space = cn10k_sso_sq_depth(txq); + if (space < count) + goto done; + cn10k_nix_xmit_pkts_vector( + txq, (uint64_t *)ws, &mbufs[last_idx], + count, cmd, flags | NIX_TX_VWQE_F); + ret += count; + count = 0; + } + port = mbufs[i]->port; + queue = rte_event_eth_tx_adapter_txq_get(mbufs[i]); + last_idx = i; } + count++; } - - mbufs += i; - - for (i = 0; i < scalar; i++) { - cn10k_sso_tx_one(ws, mbufs[i], cmd, lmt_id, lmt_addr, - sched_type, txq_data, flags); + if (count) { + txq = (struct cn10k_eth_txq + *)(txq_data[(txq_data[port] >> 48) + queue] & + (BIT_ULL(48) - 1)); + /* Transmit based on queue depth */ + space = cn10k_sso_sq_depth(txq); + if (space < count) + goto done; + cn10k_nix_xmit_pkts_vector(txq, (uint64_t *)ws, + &mbufs[last_idx], count, cmd, + flags | NIX_TX_VWQE_F); + ret += count; } +done: + return ret; } static __rte_always_inline uint16_t @@ -651,7 +674,11 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev, if (ev->event_type & RTE_EVENT_TYPE_VECTOR) { struct rte_mbuf **mbufs = ev->vec->mbufs; uint64_t meta = *(uint64_t *)ev->vec; + uint16_t offset, nb_pkts, left; + int32_t space; + nb_pkts = meta & 0xFFFF; + offset = (meta >> 16) & 0xFFF; if (meta & BIT(31)) { txq = (struct cn10k_eth_txq *)(txq_data[(txq_data[meta >> 32] >> @@ -659,29 +686,35 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev, (meta >> 48)] & (BIT_ULL(48) - 1)); - cn10k_nix_xmit_pkts_vector(txq, (uint64_t *)ws, mbufs, - meta & 0xFFFF, cmd, + /* Transmit based on queue depth */ + space = cn10k_sso_sq_depth(txq); + if (space <= 0) + return 0; + nb_pkts = nb_pkts < space ? nb_pkts : (uint16_t)space; + cn10k_nix_xmit_pkts_vector(txq, (uint64_t *)ws, + mbufs + offset, nb_pkts, cmd, flags | NIX_TX_VWQE_F); } else { - cn10k_sso_vwqe_split_tx( - ws, mbufs, meta & 0xFFFF, cmd, lmt_id, lmt_addr, - ev->sched_type, txq_data, flags); + nb_pkts = cn10k_sso_vwqe_split_tx(ws, mbufs + offset, + nb_pkts, cmd, + txq_data, flags); + } + left = (meta & 0xFFFF) - nb_pkts; + + if (!left) { + rte_mempool_put(rte_mempool_from_obj(ev->vec), ev->vec); + } else { + *(uint64_t *)ev->vec = + (meta & ~0xFFFFFFFUL) | + (((uint32_t)nb_pkts + offset) << 16) | left; } - rte_mempool_put(rte_mempool_from_obj(ev->vec), ev->vec); rte_prefetch0(ws); - return 1; + return !left; } m = ev->mbuf; - txq = cn10k_sso_hws_xtract_meta(m, txq_data); - if (((txq->nb_sqb_bufs_adj - - __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED)) - << txq->sqes_per_sqb_log2) <= 0) - return 0; - cn10k_sso_tx_one(ws, m, cmd, lmt_id, lmt_addr, ev->sched_type, txq_data, - flags); - - return 1; + return cn10k_sso_tx_one(ws, m, cmd, lmt_id, lmt_addr, ev->sched_type, + txq_data, flags); } #define T(name, sz, flags) \