From patchwork Mon Jan 30 06:26:38 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 122656 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D1AD7424BA; Mon, 30 Jan 2023 07:32:47 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 6A21E427E9; Mon, 30 Jan 2023 07:32:33 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by mails.dpdk.org (Postfix) with ESMTP id D90CC410FB for ; Mon, 30 Jan 2023 07:32:31 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1675060352; x=1706596352; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=EXYWc2RPYgYWSMwm0jcddbYfXxkRmCR3DDtmVgl770Q=; b=kndWCNET6Nkch2fiy+wHqazeChAjgUyFoEP/CYT33XJIbjgGxOYP3Mg4 KYd3dcmAdjtzRO0waEk+yqw7YmvIB5U8dF3hhQpojRN2ZJvph3Xc/4Qpf 5mt23ZCQ5be1BFI7aYlehrbSu6fvz6guR6jBEtx8vxgcGNCvM81yGSVjk +sfp/4G1/hEiTQngngDRXbibAQWmoBdNXz9cHv0ZsgGh2Gr15UFJozlDn W+HnyTEiSLFMu9o3UTesML4UfnG1SzPWO2hFOGy7aJNLRM700BTLXNwAR hA9Q6G7vO0VkdwkUuLQ0NxNTcFp8ERr8WXN550d+NWOy32DGfVFvB/vrR g==; X-IronPort-AV: E=McAfee;i="6500,9779,10605"; a="392035698" X-IronPort-AV: E=Sophos;i="5.97,257,1669104000"; d="scan'208";a="392035698" Received: from orsmga004.jf.intel.com ([10.7.209.38]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 29 Jan 2023 22:32:31 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10605"; a="787906469" X-IronPort-AV: E=Sophos;i="5.97,257,1669104000"; d="scan'208";a="787906469" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by orsmga004.jf.intel.com with ESMTP; 29 Jan 2023 22:32:27 -0800 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, xiaoyun.li@intel.com, helin.zhang@intel.com, Junfeng Guo , Rushil Gupta , Jordan Kimbrough , Jeroen de Borst Subject: [RFC v2 5/9] net/gve: support basic Tx data path for DQO Date: Mon, 30 Jan 2023 14:26:38 +0800 Message-Id: <20230130062642.3337239-6-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230130062642.3337239-1-junfeng.guo@intel.com> References: <20230118025347.1567078-1-junfeng.guo@intel.com> <20230130062642.3337239-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add basic Tx data path support for DQO. Signed-off-by: Junfeng Guo Signed-off-by: Rushil Gupta Signed-off-by: Jordan Kimbrough Signed-off-by: Jeroen de Borst --- drivers/net/gve/gve_ethdev.c | 1 + drivers/net/gve/gve_ethdev.h | 4 + drivers/net/gve/gve_tx_dqo.c | 141 +++++++++++++++++++++++++++++++++++ 3 files changed, 146 insertions(+) diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index 7c4be3a1cb..512a038968 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -703,6 +703,7 @@ gve_dev_init(struct rte_eth_dev *eth_dev) } else { /* override Tx/Rx setup/release eth_dev ops */ gve_eth_dev_ops_override(&gve_local_eth_dev_ops); + eth_dev->tx_pkt_burst = gve_tx_burst_dqo; } eth_dev->dev_ops = &gve_local_eth_dev_ops; diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index 93314f2db3..ba657dd6c1 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -125,6 +125,7 @@ struct gve_tx_queue { uint8_t cur_gen_bit; uint32_t last_desc_cleaned; void **txqs; + uint16_t re_cnt; /* Only valid for DQO_RDA queue format */ struct gve_tx_queue *complq; @@ -365,4 +366,7 @@ gve_stop_tx_queues_dqo(struct rte_eth_dev *dev); void gve_stop_rx_queues_dqo(struct rte_eth_dev *dev); +uint16_t +gve_tx_burst_dqo(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); + #endif /* _GVE_ETHDEV_H_ */ diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c index e2e4153f27..3583c82246 100644 --- a/drivers/net/gve/gve_tx_dqo.c +++ b/drivers/net/gve/gve_tx_dqo.c @@ -5,6 +5,147 @@ #include "gve_ethdev.h" #include "base/gve_adminq.h" +static inline void +gve_tx_clean_dqo(struct gve_tx_queue *txq) +{ + struct gve_tx_compl_desc *compl_ring; + struct gve_tx_compl_desc *compl_desc; + struct gve_tx_queue *aim_txq; + uint16_t nb_desc_clean; + struct rte_mbuf *txe; + uint16_t compl_tag; + uint16_t next; + + next = txq->complq_tail; + compl_ring = txq->compl_ring; + compl_desc = &compl_ring[next]; + + if (compl_desc->generation != txq->cur_gen_bit) + return; + + compl_tag = rte_le_to_cpu_16(compl_desc->completion_tag); + + aim_txq = txq->txqs[compl_desc->id]; + + switch (compl_desc->type) { + case GVE_COMPL_TYPE_DQO_DESC: + /* need to clean Descs from last_cleaned to compl_tag */ + if (aim_txq->last_desc_cleaned > compl_tag) + nb_desc_clean = aim_txq->nb_tx_desc - aim_txq->last_desc_cleaned + + compl_tag; + else + nb_desc_clean = compl_tag - aim_txq->last_desc_cleaned; + aim_txq->nb_free += nb_desc_clean; + aim_txq->last_desc_cleaned = compl_tag; + break; + case GVE_COMPL_TYPE_DQO_REINJECTION: + PMD_DRV_LOG(DEBUG, "GVE_COMPL_TYPE_DQO_REINJECTION !!!"); + /* FALLTHROUGH */ + case GVE_COMPL_TYPE_DQO_PKT: + txe = aim_txq->sw_ring[compl_tag]; + if (txe != NULL) { + rte_pktmbuf_free_seg(txe); + txe = NULL; + } + break; + case GVE_COMPL_TYPE_DQO_MISS: + rte_delay_us_sleep(1); + PMD_DRV_LOG(DEBUG, "GVE_COMPL_TYPE_DQO_MISS ignored !!!"); + break; + default: + PMD_DRV_LOG(ERR, "unknown completion type."); + return; + } + + next++; + if (next == txq->nb_tx_desc * DQO_TX_MULTIPLIER) { + next = 0; + txq->cur_gen_bit ^= 1; + } + + txq->complq_tail = next; +} + +uint16_t +gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct gve_tx_queue *txq = tx_queue; + volatile union gve_tx_desc_dqo *txr; + volatile union gve_tx_desc_dqo *txd; + struct rte_mbuf **sw_ring; + struct rte_mbuf *tx_pkt; + uint16_t mask, sw_mask; + uint16_t nb_to_clean; + uint16_t nb_tx = 0; + uint16_t nb_used; + uint16_t tx_id; + uint16_t sw_id; + + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + + mask = txq->nb_tx_desc - 1; + sw_mask = txq->sw_size - 1; + tx_id = txq->tx_tail; + sw_id = txq->sw_tail; + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + tx_pkt = tx_pkts[nb_tx]; + + if (txq->nb_free <= txq->free_thresh) { + nb_to_clean = DQO_TX_MULTIPLIER * txq->rs_thresh; + while (nb_to_clean--) + gve_tx_clean_dqo(txq); + } + + if (txq->nb_free < tx_pkt->nb_segs) + break; + + nb_used = tx_pkt->nb_segs; + + do { + txd = &txr[tx_id]; + + sw_ring[sw_id] = tx_pkt; + + /* fill Tx descriptor */ + txd->pkt.buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt)); + txd->pkt.dtype = GVE_TX_PKT_DESC_DTYPE_DQO; + txd->pkt.compl_tag = rte_cpu_to_le_16(sw_id); + txd->pkt.buf_size = RTE_MIN(tx_pkt->data_len, GVE_TX_MAX_BUF_SIZE_DQO); + + /* size of desc_ring and sw_ring could be different */ + tx_id = (tx_id + 1) & mask; + sw_id = (sw_id + 1) & sw_mask; + + tx_pkt = tx_pkt->next; + } while (tx_pkt); + + /* fill the last descriptor with End of Packet (EOP) bit */ + txd->pkt.end_of_packet = 1; + + txq->nb_free -= nb_used; + txq->nb_used += nb_used; + } + + /* update the tail pointer if any packets were processed */ + if (nb_tx > 0) { + /* Request a descriptor completion on the last descriptor */ + txq->re_cnt += nb_tx; + if (txq->re_cnt >= GVE_TX_MIN_RE_INTERVAL) { + txd = &txr[(tx_id - 1) & mask]; + txd->pkt.report_event = true; + txq->re_cnt = 0; + } + + rte_write32(tx_id, txq->qtx_tail); + txq->tx_tail = tx_id; + txq->sw_tail = sw_id; + } + + return nb_tx; +} + static inline void gve_release_txq_mbufs_dqo(struct gve_tx_queue *txq) {