From patchwork Thu Apr 13 06:16:50 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 125990 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1AC434292F; Thu, 13 Apr 2023 08:18:16 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4869042D3B; Thu, 13 Apr 2023 08:17:41 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id C6CD442BC9 for ; Thu, 13 Apr 2023 08:17:38 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1681366658; x=1712902658; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=bc9E7YTlV16soLycH8jZnuHs9tzucD8XvuzDm/rPsZs=; b=lriW7Ul0S96Z7xPiP2zy6xx+/u2qs1REgYA/q6zIFfXW64MPY1wKMcZ2 jc3OImfm+JfZWnCHYn8+sl9TgoZsMbzlMRVTA4ePFaHMc1TY+JxWGEtkF QW3I1TNw/SworTq9HRvULMqpAUlZzsUSM43DIZuqbeiNedA6ZaEpQzxiH gIuoaYtUFdWA6ufpS4RkmufQrHsyU3BGfFic7vtmkRxdFnXICFEtnK9Hb 1JbnEZjm2RZI/fYPDYuEympEIKF+dYyZ+FZ5amCPopfrVKQPARiBfbZbA /d5RYHR5xvk16nAbRil+1R5OltoHOhXLW2qsi1SBtLgnGapmDv8HwpJZs A==; X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="341595417" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="341595417" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Apr 2023 23:17:38 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10678"; a="691824321" X-IronPort-AV: E=Sophos;i="5.98,339,1673942400"; d="scan'208";a="691824321" Received: from dpdk-jf-ntb-one.sh.intel.com ([10.67.111.104]) by fmsmga007.fm.intel.com with ESMTP; 12 Apr 2023 23:17:35 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, ferruh.yigit@amd.com, beilei.xing@intel.com Cc: dev@dpdk.org, Junfeng Guo , Rushil Gupta , Joshua Washington , Jeroen de Borst Subject: [PATCH 10/10] net/gve: support jumbo frame for GQI Date: Thu, 13 Apr 2023 14:16:50 +0800 Message-Id: <20230413061650.796940-11-junfeng.guo@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230413061650.796940-1-junfeng.guo@intel.com> References: <20230413061650.796940-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add multi-segment support to enable GQI Rx Jumbo Frame. Signed-off-by: Rushil Gupta Signed-off-by: Joshua Washington Signed-off-by: Junfeng Guo Signed-off-by: Jeroen de Borst --- drivers/net/gve/gve_ethdev.h | 8 ++ drivers/net/gve/gve_rx.c | 137 +++++++++++++++++++++++++---------- 2 files changed, 108 insertions(+), 37 deletions(-) diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index 4a0e860afa..53a75044c5 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -159,6 +159,13 @@ struct gve_tx_queue { uint8_t is_gqi_qpl; }; +struct gve_rx_ctx { + struct rte_mbuf *mbuf_head; + struct rte_mbuf *mbuf_tail; + uint16_t total_frags; + bool drop_pkt; +}; + struct gve_rx_queue { volatile struct gve_rx_desc *rx_desc_ring; volatile union gve_rx_data_slot *rx_data_ring; @@ -167,6 +174,7 @@ struct gve_rx_queue { uint64_t rx_ring_phys_addr; struct rte_mbuf **sw_ring; struct rte_mempool *mpool; + struct gve_rx_ctx ctx; uint16_t rx_tail; uint16_t nb_rx_desc; diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c index 3dd3f578f9..f2f6202404 100644 --- a/drivers/net/gve/gve_rx.c +++ b/drivers/net/gve/gve_rx.c @@ -5,6 +5,8 @@ #include "gve_ethdev.h" #include "base/gve_adminq.h" +#define GVE_PKT_CONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x)) + static inline void gve_rx_refill(struct gve_rx_queue *rxq) { @@ -87,43 +89,72 @@ gve_rx_refill(struct gve_rx_queue *rxq) } } -uint16_t -gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +/* + * This method processes a single rte_mbuf and handles packet segmentation + * In QPL mode it copies data from the mbuf to the gve_rx_queue. + */ +static void +gve_rx_mbuf(struct gve_rx_queue *rxq, struct rte_mbuf *rxe, uint16_t len, + uint16_t rx_id) { - volatile struct gve_rx_desc *rxr, *rxd; - struct gve_rx_queue *rxq = rx_queue; - uint16_t rx_id = rxq->rx_tail; - struct rte_mbuf *rxe; - uint16_t nb_rx, len; - uint64_t bytes = 0; + uint16_t padding = 0; uint64_t addr; - uint16_t i; - - rxr = rxq->rx_desc_ring; - nb_rx = 0; - for (i = 0; i < nb_pkts; i++) { - rxd = &rxr[rx_id]; - if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno) - break; - - if (rxd->flags_seq & GVE_RXF_ERR) { - rxq->stats.errors++; - continue; - } - - len = rte_be_to_cpu_16(rxd->len) - GVE_RX_PAD; - rxe = rxq->sw_ring[rx_id]; - if (rxq->is_gqi_qpl) { - addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + GVE_RX_PAD; - rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off), - (void *)(size_t)addr, len); - } + rxe->data_len = len; + if (!rxq->ctx.mbuf_head) { + rxq->ctx.mbuf_head = rxe; + rxq->ctx.mbuf_tail = rxe; + rxe->nb_segs = 1; rxe->pkt_len = len; rxe->data_len = len; rxe->port = rxq->port_id; rxe->ol_flags = 0; + padding = GVE_RX_PAD; + } else { + rxq->ctx.mbuf_head->pkt_len += len; + rxq->ctx.mbuf_head->nb_segs += 1; + rxq->ctx.mbuf_tail->next = rxe; + rxq->ctx.mbuf_tail = rxe; + } + if (rxq->is_gqi_qpl) { + addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + padding; + rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off), + (void *)(size_t)addr, len); + } +} + +/* + * This method processes a single packet fragment associated with the + * passed packet descriptor. + * This methods returns whether the fragment is the last fragment + * of a packet. + */ +static bool +gve_rx(struct gve_rx_queue *rxq, volatile struct gve_rx_desc *rxd, uint16_t rx_id) +{ + bool is_last_frag = !GVE_PKT_CONT_BIT_IS_SET(rxd->flags_seq); + uint16_t frag_size = rte_be_to_cpu_16(rxd->len); + struct gve_rx_ctx *ctx = &rxq->ctx; + bool is_first_frag = ctx->total_frags == 0; + struct rte_mbuf *rxe; + + if (ctx->drop_pkt) + goto finish_frag; + if (rxd->flags_seq & GVE_RXF_ERR) { + ctx->drop_pkt = true; + rxq->stats.errors++; + goto finish_frag; + } + + if (is_first_frag) + frag_size -= GVE_RX_PAD; + + rxe = rxq->sw_ring[rx_id]; + gve_rx_mbuf(rxq, rxe, frag_size, rx_id); + rxq->stats.bytes += frag_size; + + if (is_first_frag) { if (rxd->flags_seq & GVE_RXF_TCP) rxe->packet_type |= RTE_PTYPE_L4_TCP; if (rxd->flags_seq & GVE_RXF_UDP) @@ -137,28 +168,60 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxe->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; rxe->hash.rss = rte_be_to_cpu_32(rxd->rss_hash); } + } - rxq->expected_seqno = gve_next_seqno(rxq->expected_seqno); +finish_frag: + ctx->total_frags++; + return is_last_frag; +} + +static void +gve_rx_ctx_clear(struct gve_rx_ctx *ctx) +{ + ctx->mbuf_head = NULL; + ctx->mbuf_tail = NULL; + ctx->drop_pkt = false; + ctx->total_frags = 0; +} + +uint16_t +gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + volatile struct gve_rx_desc *rxr, *rxd; + struct gve_rx_queue *rxq = rx_queue; + struct gve_rx_ctx *ctx = &rxq->ctx; + uint16_t rx_id = rxq->rx_tail; + uint16_t nb_rx; + + rxr = rxq->rx_desc_ring; + nb_rx = 0; + + while (nb_rx < nb_pkts) { + rxd = &rxr[rx_id]; + if (GVE_SEQNO(rxd->flags_seq) != rxq->expected_seqno) + break; + + if (gve_rx(rxq, rxd, rx_id)) { + if (!ctx->drop_pkt) + rx_pkts[nb_rx++] = ctx->mbuf_head; + rxq->nb_avail += ctx->total_frags; + gve_rx_ctx_clear(ctx); + } rx_id++; if (rx_id == rxq->nb_rx_desc) rx_id = 0; - rx_pkts[nb_rx] = rxe; - bytes += len; - nb_rx++; + rxq->expected_seqno = gve_next_seqno(rxq->expected_seqno); } - rxq->nb_avail += nb_rx; rxq->rx_tail = rx_id; if (rxq->nb_avail > rxq->free_thresh) gve_rx_refill(rxq); - if (nb_rx) { + if (nb_rx) rxq->stats.packets += nb_rx; - rxq->stats.bytes += bytes; - } return nb_rx; }