From patchwork Fri Feb 17 07:32:27 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 124116 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 391CE41CBC; Fri, 17 Feb 2023 08:39:55 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 48C0842B8B; Fri, 17 Feb 2023 08:39:32 +0100 (CET) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id D826842D38 for ; Fri, 17 Feb 2023 08:39:29 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1676619570; x=1708155570; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=yiUZY7FHCmg1rfd/sot0/DAfn/1AtiSjqaeeM+QMHyQ=; b=MwTX0iLCnITnobiogEn8USL0ZssVuDGLUz3ekc9olElyAEE5HokR+Hqw 7LxGWJXwHERv9c324cWOoiKz2NYqd7OT0UNgxSKPKfzKnCCyEIj3ZWsdY eihrPaJek0o+NEb6Xa2G95rLqh+V/FSWy2NfMtJH9Z+lPo0dHKkZFbxp7 ba0TyIWC2/IpdbOHfeavcHf5CRJNto1AMyQJcnbfRzLlSjp03UScbFwHS SHLbcGssxwHaOQmRr7uM11+svRg5A0pJ/90uGGdNe8Sf5AG1fbecgkehs iEVn2TggYQnbFIDoX6vxAOLav19vtTFsG1UVEJFUx+2YgthApLS1R7b8q A==; X-IronPort-AV: E=McAfee;i="6500,9779,10623"; a="418153111" X-IronPort-AV: E=Sophos;i="5.97,304,1669104000"; d="scan'208";a="418153111" Received: from orsmga002.jf.intel.com ([10.7.209.21]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 Feb 2023 23:39:29 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10623"; a="670458759" X-IronPort-AV: E=Sophos;i="5.97,304,1669104000"; d="scan'208";a="670458759" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by orsmga002.jf.intel.com with ESMTP; 16 Feb 2023 23:39:25 -0800 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, xiaoyun.li@intel.com, helin.zhang@intel.com, Junfeng Guo , Rushil Gupta , Jeroen de Borst Subject: [RFC v3 09/10] net/gve: support jumbo frame for GQI Date: Fri, 17 Feb 2023 15:32:27 +0800 Message-Id: <20230217073228.340815-10-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230217073228.340815-1-junfeng.guo@intel.com> References: <20230130062642.3337239-1-junfeng.guo@intel.com> <20230217073228.340815-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add multi-segment support to enable GQI Rx Jumbo Frame. Signed-off-by: Rushil Gupta Signed-off-by: Junfeng Guo Signed-off-by: Jeroen de Borst --- drivers/net/gve/gve_ethdev.h | 8 ++ drivers/net/gve/gve_rx.c | 137 +++++++++++++++++++++++++---------- 2 files changed, 108 insertions(+), 37 deletions(-) diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index bca6e86ef0..02b997312c 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -142,6 +142,13 @@ struct gve_tx_queue { uint8_t is_gqi_qpl; }; +struct gve_rx_ctx { + struct rte_mbuf *mbuf_head; + struct rte_mbuf *mbuf_tail; + uint16_t total_frags; + bool drop_pkt; +}; + struct gve_rx_queue { volatile struct gve_rx_desc *rx_desc_ring; volatile union gve_rx_data_slot *rx_data_ring; @@ -150,6 +157,7 @@ struct gve_rx_queue { uint64_t rx_ring_phys_addr; struct rte_mbuf **sw_ring; struct rte_mempool *mpool; + struct gve_rx_ctx ctx; uint16_t rx_tail; uint16_t nb_rx_desc; diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c index e264bcadad..ecef0c4a86 100644 --- a/drivers/net/gve/gve_rx.c +++ b/drivers/net/gve/gve_rx.c @@ -5,6 +5,8 @@ #include "gve_ethdev.h" #include "base/gve_adminq.h" +#define GVE_PKT_CONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x)) + static inline void gve_rx_refill(struct gve_rx_queue *rxq) { @@ -82,43 +84,72 @@ gve_rx_refill(struct gve_rx_queue *rxq) } } -uint16_t -gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +/* + * This method processes a single rte_mbuf and handles packet segmentation + * In QPL mode it copies data from the mbuf to the gve_rx_queue. + */ +static void +gve_rx_mbuf(struct gve_rx_queue *rxq, struct rte_mbuf *rxe, uint16_t len, + uint16_t rx_id) { - volatile struct gve_rx_desc *rxr, *rxd; - struct gve_rx_queue *rxq = rx_queue; - uint16_t rx_id = rxq->rx_tail; - struct rte_mbuf *rxe; - uint16_t nb_rx, len; - uint64_t bytes = 0; + uint16_t padding = 0; uint64_t addr; - uint16_t i; - - rxr = rxq->rx_desc_ring; - nb_rx = 0; - for (i = 0; i < nb_pkts; i++) { - rxd = &rxr[rx_id]; - if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno) - break; - - if (rxd->flags_seq & GVE_RXF_ERR) { - rxq->errors++; - continue; - } - - len = rte_be_to_cpu_16(rxd->len) - GVE_RX_PAD; - rxe = rxq->sw_ring[rx_id]; - if (rxq->is_gqi_qpl) { - addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + GVE_RX_PAD; - rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off), - (void *)(size_t)addr, len); - } + rxe->data_len = len; + if (!rxq->ctx.mbuf_head) { + rxq->ctx.mbuf_head = rxe; + rxq->ctx.mbuf_tail = rxe; + rxe->nb_segs = 1; rxe->pkt_len = len; rxe->data_len = len; rxe->port = rxq->port_id; rxe->ol_flags = 0; + padding = GVE_RX_PAD; + } else { + rxq->ctx.mbuf_head->pkt_len += len; + rxq->ctx.mbuf_head->nb_segs += 1; + rxq->ctx.mbuf_tail->next = rxe; + rxq->ctx.mbuf_tail = rxe; + } + if (rxq->is_gqi_qpl) { + addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + padding; + rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off), + (void *)(size_t)addr, len); + } +} + +/* + * This method processes a single packet fragment associated with the + * passed packet descriptor. + * This methods returns whether the fragment is the last fragment + * of a packet. + */ +static bool +gve_rx(struct gve_rx_queue *rxq, volatile struct gve_rx_desc *rxd, uint16_t rx_id) +{ + bool is_last_frag = !GVE_PKT_CONT_BIT_IS_SET(rxd->flags_seq); + uint16_t frag_size = rte_be_to_cpu_16(rxd->len); + struct gve_rx_ctx *ctx = &rxq->ctx; + bool is_first_frag = ctx->total_frags == 0; + struct rte_mbuf *rxe; + + if (ctx->drop_pkt) + goto finish_frag; + if (rxd->flags_seq & GVE_RXF_ERR) { + ctx->drop_pkt = true; + rxq->errors++; + goto finish_frag; + } + + if (is_first_frag) + frag_size -= GVE_RX_PAD; + + rxe = rxq->sw_ring[rx_id]; + gve_rx_mbuf(rxq, rxe, frag_size, rx_id); + rxq->bytes += frag_size; + + if (is_first_frag) { if (rxd->flags_seq & GVE_RXF_TCP) rxe->packet_type |= RTE_PTYPE_L4_TCP; if (rxd->flags_seq & GVE_RXF_UDP) @@ -132,28 +163,60 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxe->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; rxe->hash.rss = rte_be_to_cpu_32(rxd->rss_hash); } + } - rxq->expected_seqno = gve_next_seqno(rxq->expected_seqno); +finish_frag: + ctx->total_frags++; + return is_last_frag; +} + +static void +gve_rx_ctx_clear(struct gve_rx_ctx *ctx) +{ + ctx->mbuf_head = NULL; + ctx->mbuf_tail = NULL; + ctx->drop_pkt = false; + ctx->total_frags = 0; +} + +uint16_t +gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + volatile struct gve_rx_desc *rxr, *rxd; + struct gve_rx_queue *rxq = rx_queue; + struct gve_rx_ctx *ctx = &rxq->ctx; + uint16_t rx_id = rxq->rx_tail; + uint16_t nb_rx; + + rxr = rxq->rx_desc_ring; + nb_rx = 0; + + while (nb_rx < nb_pkts) { + rxd = &rxr[rx_id]; + if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno) + break; + + if (gve_rx(rxq, rxd, rx_id)) { + if (!ctx->drop_pkt) + rx_pkts[nb_rx++] = ctx->mbuf_head; + rxq->nb_avail += ctx->total_frags; + gve_rx_ctx_clear(ctx); + } rx_id++; if (rx_id == rxq->nb_rx_desc) rx_id = 0; - rx_pkts[nb_rx] = rxe; - bytes += len; - nb_rx++; + rxq->expected_seqno = gve_next_seqno(rxq->expected_seqno); } - rxq->nb_avail += nb_rx; rxq->rx_tail = rx_id; if (rxq->nb_avail > rxq->free_thresh) gve_rx_refill(rxq); - if (nb_rx) { + if (nb_rx) rxq->packets += nb_rx; - rxq->bytes += bytes; - } return nb_rx; }