From patchwork Wed Aug 3 11:30:59 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Junfeng Guo X-Patchwork-Id: 114574 X-Patchwork-Delegate: andrew.rybchenko@oktetlabs.ru Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 50A47A00C5; Wed, 3 Aug 2022 13:32:24 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1994642BD0; Wed, 3 Aug 2022 13:31:36 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id A8B2442BCD for ; Wed, 3 Aug 2022 13:31:33 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1659526293; x=1691062293; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=EDyjwYquzYxieXWvuJvvQ4hw59/Lw+n1bF3PPKKqXvc=; b=DW+k0xttioZXoJm++rvh7MmD8KjVoPY4RizAesjx3/c+UnRKqPiB/5SX +GLOLvLlMIRzxiMBFpksVwjSjr+9uQuny/02zkyqMWigyiMIpcKTXoT9h mAgOvemwXDm3ylZxuSs0smPumoKechgFMitW1V5QSMkH1KP1iQzUfO9/m mvsdl/d4mgrJk3oNNwm1lkMlnqCja4yWPgS5pSs65YvKYgNrtCOQDesoB R12roP8VlrDvR9cYjILL4k6U4ssR7Gref7g1rfi618TSEu3BKrB4DIl/y FUiMHtZmdjIjfKre7VKcABb7OnFq2zUQwDeQxNAk3MsUcikgi0KruinQ+ A==; X-IronPort-AV: E=McAfee;i="6400,9594,10427"; a="375948527" X-IronPort-AV: E=Sophos;i="5.93,214,1654585200"; d="scan'208";a="375948527" Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Aug 2022 04:31:33 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.93,214,1654585200"; d="scan'208";a="692211088" Received: from dpdk-jf-ntb-v2.sh.intel.com ([10.67.118.246]) by FMSMGA003.fm.intel.com with ESMTP; 03 Aug 2022 04:31:31 -0700 From: Junfeng Guo To: qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Cc: dev@dpdk.org, junfeng.guo@intel.com, Xiaoyun Li Subject: [PATCH 08/13] net/idpf: add basic Rx/Tx datapath Date: Wed, 3 Aug 2022 19:30:59 +0800 Message-Id: <20220803113104.1184059-9-junfeng.guo@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220803113104.1184059-1-junfeng.guo@intel.com> References: <20220803113104.1184059-1-junfeng.guo@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add basic RX & TX support in split queue mode and single queue mode. Split queue mode is selected by default. Signed-off-by: Beilei Xing Signed-off-by: Xiaoyun Li Signed-off-by: Junfeng Guo --- drivers/net/idpf/idpf_ethdev.c | 69 ++- drivers/net/idpf/idpf_rxtx.c | 896 +++++++++++++++++++++++++++++++++ drivers/net/idpf/idpf_rxtx.h | 35 ++ 3 files changed, 998 insertions(+), 2 deletions(-) diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index 668e488843..55ec24872e 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -14,12 +14,16 @@ #include "idpf_ethdev.h" #include "idpf_rxtx.h" +#define IDPF_TX_SINGLE_Q "tx_single" +#define IDPF_RX_SINGLE_Q "rx_single" #define REPRESENTOR "representor" struct idpf_adapter *adapter; uint16_t used_vecs_num; static const char * const idpf_valid_args[] = { + IDPF_TX_SINGLE_Q, + IDPF_RX_SINGLE_Q, REPRESENTOR, NULL }; @@ -157,6 +161,30 @@ idpf_init_vport_req_info(__rte_unused struct rte_eth_dev *dev) (struct virtchnl2_create_vport *)adapter->vport_req_info[idx]; vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT); + if (!adapter->txq_model) { + vport_info->txq_model = + rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT); + vport_info->num_tx_q = dev->data->nb_tx_queues; + vport_info->num_tx_complq = + dev->data->nb_tx_queues * IDPF_TX_COMPLQ_PER_GRP; + } else { + vport_info->txq_model = + rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE); + vport_info->num_tx_q = dev->data->nb_tx_queues; + vport_info->num_tx_complq = 0; + } + if (!adapter->rxq_model) { + vport_info->rxq_model = + rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT); + vport_info->num_rx_q = dev->data->nb_rx_queues; + vport_info->num_rx_bufq = + dev->data->nb_rx_queues * IDPF_RX_BUFQ_PER_GRP; + } else { + vport_info->rxq_model = + rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE); + vport_info->num_rx_q = dev->data->nb_rx_queues; + vport_info->num_rx_bufq = 0; + } return 0; } @@ -344,6 +372,9 @@ idpf_dev_start(struct rte_eth_dev *dev) goto err_mtu; } + idpf_set_rx_function(dev); + idpf_set_tx_function(dev); + if (idpf_ena_dis_vport(vport, true)) { PMD_DRV_LOG(ERR, "Failed to enable vport"); goto err_vport; @@ -394,11 +425,31 @@ idpf_dev_close(struct rte_eth_dev *dev) return 0; } +static int +parse_bool(const char *key, const char *value, void *args) +{ + int *i = (int *)args; + char *end; + int num; + + num = strtoul(value, &end, 10); + + if (num != 0 && num != 1) { + PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", " + "value must be 0 or 1", + value, key); + return -1; + } + + *i = num; + return 0; +} + static int idpf_parse_devargs(struct rte_eth_dev *dev) { struct rte_devargs *devargs = dev->device->devargs; struct rte_kvargs *kvlist; - int ret = 0; + int ret; if (!devargs) return 0; @@ -409,6 +460,17 @@ static int idpf_parse_devargs(struct rte_eth_dev *dev) return -EINVAL; } + ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool, + &adapter->txq_model); + if (ret) + goto bail; + + ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool, + &adapter->rxq_model); + if (ret) + goto bail; + +bail: rte_kvargs_free(kvlist); return ret; } @@ -637,8 +699,11 @@ idpf_dev_init(struct rte_eth_dev *dev, __rte_unused void *init_params) /* for secondary processes, we don't initialise any further as primary * has already done this work. */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + idpf_set_rx_function(dev); + idpf_set_tx_function(dev); return ret; + } ret = idpf_adapter_init(dev); if (ret) { diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c index eae762ced3..e6040cece1 100644 --- a/drivers/net/idpf/idpf_rxtx.c +++ b/drivers/net/idpf/idpf_rxtx.c @@ -1312,4 +1312,900 @@ idpf_stop_queues(struct rte_eth_dev *dev) } } +#define IDPF_RX_ERR0_QW1 \ + (BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) | \ + BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) | \ + BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) | \ + BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) + +static inline uint64_t +idpf_splitq_rx_csum_offload(uint8_t err) +{ + uint64_t flags = 0; + + if (unlikely(!(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)))) + return flags; + + if (likely((err & IDPF_RX_ERR0_QW1) == 0)) { + flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | + RTE_MBUF_F_RX_L4_CKSUM_GOOD); + return flags; + } + + if (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S))) + flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; + + if (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S))) + flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + + if (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S))) + flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; + + if (unlikely(err & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))) + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD; + else + flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD; + + return flags; +} + +#define IDPF_RX_FLEX_DESC_HASH1_S 0 +#define IDPF_RX_FLEX_DESC_HASH2_S 16 +#define IDPF_RX_FLEX_DESC_HASH3_S 24 + +static inline uint64_t +idpf_splitq_rx_rss_offload(struct rte_mbuf *mb, + volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) +{ + uint8_t status_err0_qw0; + uint64_t flags = 0; + + status_err0_qw0 = rx_desc->status_err0_qw0; + + if (status_err0_qw0 & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) { + flags |= RTE_MBUF_F_RX_RSS_HASH; + mb->hash.rss = rte_le_to_cpu_16(rx_desc->hash1) | + ((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) << + IDPF_RX_FLEX_DESC_HASH2_S) | + ((uint32_t)(rx_desc->hash3) << + IDPF_RX_FLEX_DESC_HASH3_S); + } + + return flags; +} + +static void +idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq) +{ + volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_ring; + volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_desc; + uint16_t nb_refill = rx_bufq->nb_rx_hold; + uint16_t nb_desc = rx_bufq->nb_rx_desc; + uint16_t next_avail = rx_bufq->rx_tail; + struct rte_mbuf *nmb[nb_refill]; + struct rte_eth_dev *dev; + uint64_t dma_addr; + uint16_t delta; + + if (nb_refill <= rx_bufq->rx_free_thresh) + return; + + if (nb_refill >= nb_desc) + nb_refill = nb_desc - 1; + + rx_buf_ring = + (volatile struct virtchnl2_splitq_rx_buf_desc *)rx_bufq->rx_ring; + delta = nb_desc - next_avail; + if (delta < nb_refill) { + if (likely(!rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, delta))) { + for (int i = 0; i < delta; i++) { + rx_buf_desc = &rx_buf_ring[next_avail + i]; + rx_bufq->sw_ring[next_avail + i] = nmb[i]; + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i])); + rx_buf_desc->hdr_addr = 0; + rx_buf_desc->pkt_addr = dma_addr; + } + nb_refill -= delta; + next_avail = 0; + rx_bufq->nb_rx_hold -= delta; + } else { + dev = &rte_eth_devices[rx_bufq->port_id]; + dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail; + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u", + rx_bufq->port_id, rx_bufq->queue_id); + return; + } + } + + if (nb_desc - next_avail >= nb_refill) { + if (likely(!rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, nb_refill))) { + for (int i = 0; i < nb_refill; i++) { + rx_buf_desc = &rx_buf_ring[next_avail + i]; + rx_bufq->sw_ring[next_avail + i] = nmb[i]; + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i])); + rx_buf_desc->hdr_addr = 0; + rx_buf_desc->pkt_addr = dma_addr; + } + next_avail += nb_refill; + rx_bufq->nb_rx_hold -= nb_refill; + } else { + dev = &rte_eth_devices[rx_bufq->port_id]; + dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail; + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u", + rx_bufq->port_id, rx_bufq->queue_id); + } + } + + IECM_PCI_REG_WRITE(rx_bufq->qrx_tail, next_avail); + + rx_bufq->rx_tail = next_avail; +} + +uint16_t +idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc_ring; + volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc; + uint16_t pktlen_gen_bufq_id; + struct idpf_rx_queue *rxq; + const uint32_t *ptype_tbl; + uint8_t status_err0_qw1; + struct rte_mbuf *rxm; + uint16_t rx_id_bufq1; + uint16_t rx_id_bufq2; + uint64_t pkt_flags; + uint16_t pkt_len; + uint16_t bufq_id; + uint16_t gen_id; + uint16_t rx_id; + uint16_t nb_rx; + + nb_rx = 0; + rxq = (struct idpf_rx_queue *)rx_queue; + + if (unlikely(!rxq) || unlikely(!rxq->q_started)) + return nb_rx; + + rx_id = rxq->rx_tail; + rx_id_bufq1 = rxq->bufq1->rx_next_avail; + rx_id_bufq2 = rxq->bufq2->rx_next_avail; + rx_desc_ring = + (volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *)rxq->rx_ring; + ptype_tbl = rxq->adapter->ptype_tbl; + + while (nb_rx < nb_pkts) { + rx_desc = &rx_desc_ring[rx_id]; + + pktlen_gen_bufq_id = + rte_le_to_cpu_16(rx_desc->pktlen_gen_bufq_id); + gen_id = (pktlen_gen_bufq_id & + VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M) >> + VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S; + if (gen_id != rxq->expected_gen_id) + break; + + pkt_len = (pktlen_gen_bufq_id & + VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M) >> + VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S; + if (!pkt_len) + PMD_RX_LOG(ERR, "Packet length is 0"); + + rx_id++; + if (unlikely(rx_id == rxq->nb_rx_desc)) { + rx_id = 0; + rxq->expected_gen_id ^= 1; + } + + bufq_id = (pktlen_gen_bufq_id & + VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M) >> + VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S; + if (!bufq_id) { + rxm = rxq->bufq1->sw_ring[rx_id_bufq1]; + rx_id_bufq1++; + if (unlikely(rx_id_bufq1 == rxq->bufq1->nb_rx_desc)) + rx_id_bufq1 = 0; + rxq->bufq1->nb_rx_hold++; + } else { + rxm = rxq->bufq2->sw_ring[rx_id_bufq2]; + rx_id_bufq2++; + if (unlikely(rx_id_bufq2 == rxq->bufq2->nb_rx_desc)) + rx_id_bufq2 = 0; + rxq->bufq2->nb_rx_hold++; + } + + pkt_len -= rxq->crc_len; + rxm->pkt_len = pkt_len; + rxm->data_len = pkt_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->next = NULL; + rxm->nb_segs = 1; + rxm->port = rxq->port_id; + rxm->ol_flags = 0; + rxm->packet_type = + ptype_tbl[(rte_le_to_cpu_16(rx_desc->ptype_err_fflags0) & + VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >> + VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S]; + + status_err0_qw1 = rx_desc->status_err0_qw1; + pkt_flags = idpf_splitq_rx_csum_offload(status_err0_qw1); + pkt_flags |= idpf_splitq_rx_rss_offload(rxm, rx_desc); + rxm->ol_flags |= pkt_flags; + + rx_pkts[nb_rx++] = rxm; + } + + if (nb_rx) { + rxq->rx_tail = rx_id; + if (rx_id_bufq1 != rxq->bufq1->rx_next_avail) + rxq->bufq1->rx_next_avail = rx_id_bufq1; + if (rx_id_bufq2 != rxq->bufq2->rx_next_avail) + rxq->bufq2->rx_next_avail = rx_id_bufq2; + + idpf_split_rx_bufq_refill(rxq->bufq1); + idpf_split_rx_bufq_refill(rxq->bufq2); + } + + return nb_rx; +} + +static inline void +idpf_split_tx_free(struct idpf_tx_queue *cq) +{ + volatile struct iecm_splitq_tx_compl_desc *compl_ring = cq->compl_ring; + volatile struct iecm_splitq_tx_compl_desc *txd; + uint16_t next = cq->tx_tail; + struct idpf_tx_entry *txe; + struct idpf_tx_queue *txq; + uint16_t gen, qid, q_head; + uint8_t ctype; + + txd = &compl_ring[next]; + gen = (rte_le_to_cpu_16(txd->qid_comptype_gen) & + IECM_TXD_COMPLQ_GEN_M) >> IECM_TXD_COMPLQ_GEN_S; + if (gen != cq->expected_gen_id) + return; + + ctype = (rte_le_to_cpu_16(txd->qid_comptype_gen) & + IECM_TXD_COMPLQ_COMPL_TYPE_M) >> IECM_TXD_COMPLQ_COMPL_TYPE_S; + qid = (rte_le_to_cpu_16(txd->qid_comptype_gen) & + IECM_TXD_COMPLQ_QID_M) >> IECM_TXD_COMPLQ_QID_S; + q_head = rte_le_to_cpu_16(txd->q_head_compl_tag.compl_tag); + txq = cq->txqs[qid - cq->tx_start_qid]; + + switch (ctype) { + case IECM_TXD_COMPLT_RE: + if (q_head == 0) + txq->last_desc_cleaned = txq->nb_tx_desc - 1; + else + txq->last_desc_cleaned = q_head - 1; + if (unlikely(!(txq->last_desc_cleaned % 32))) { + PMD_DRV_LOG(ERR, "unexpected desc (head = %u) completion.", + q_head); + return; + } + + break; + case IECM_TXD_COMPLT_RS: + txq->nb_free++; + txq->nb_used--; + txe = &txq->sw_ring[q_head]; + if (txe->mbuf) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + break; + default: + PMD_DRV_LOG(ERR, "unknown completion type."); + return; + } + + if (++next == cq->nb_tx_desc) { + next = 0; + cq->expected_gen_id ^= 1; + } + + cq->tx_tail = next; +} + +/* Check if the context descriptor is needed for TX offloading */ +static inline uint16_t +idpf_calc_context_desc(uint64_t flags) +{ + if (flags & RTE_MBUF_F_TX_TCP_SEG) + return 1; + + return 0; +} + +/* set TSO context descriptor + */ +static inline void +idpf_set_splitq_tso_ctx(struct rte_mbuf *mbuf, + union idpf_tx_offload tx_offload, + volatile union iecm_flex_tx_ctx_desc *ctx_desc) +{ + uint16_t cmd_dtype; + uint32_t tso_len; + uint8_t hdr_len; + + if (!tx_offload.l4_len) { + PMD_TX_LOG(DEBUG, "L4 length set to 0"); + return; + } + + hdr_len = tx_offload.l2_len + + tx_offload.l3_len + + tx_offload.l4_len; + cmd_dtype = IECM_TX_DESC_DTYPE_FLEX_TSO_CTX | + IECM_TX_FLEX_CTX_DESC_CMD_TSO; + tso_len = mbuf->pkt_len - hdr_len; + + ctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype); + ctx_desc->tso.qw0.hdr_len = hdr_len; + ctx_desc->tso.qw0.mss_rt = + rte_cpu_to_le_16((uint16_t)mbuf->tso_segsz & + IECM_TXD_FLEX_CTX_MSS_RT_M); + ctx_desc->tso.qw0.flex_tlen = + rte_cpu_to_le_32(tso_len & + IECM_TXD_FLEX_CTX_MSS_RT_M); +} + +uint16_t +idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct idpf_tx_queue *txq = (struct idpf_tx_queue *)tx_queue; + volatile struct iecm_flex_tx_sched_desc *txr; + volatile struct iecm_flex_tx_sched_desc *txd; + struct idpf_tx_entry *sw_ring; + union idpf_tx_offload tx_offload = {0}; + struct idpf_tx_entry *txe, *txn; + uint16_t nb_used, tx_id, sw_id; + struct rte_mbuf *tx_pkt; + uint16_t nb_to_clean; + uint16_t nb_tx = 0; + uint64_t ol_flags; + uint16_t nb_ctx; + + if (unlikely(!txq) || unlikely(!txq->q_started)) + return nb_tx; + + txr = txq->desc_ring; + sw_ring = txq->sw_ring; + tx_id = txq->tx_tail; + sw_id = txq->sw_tail; + txe = &sw_ring[sw_id]; + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + tx_pkt = tx_pkts[nb_tx]; + + if (txq->nb_free <= txq->free_thresh) { + /* TODO: Need to refine + * 1. free and clean: Better to decide a clean destination instead of + * loop times. And don't free mbuf when RS got immediately, free when + * transmit or according to the clean destination. + * Now, just ingnore the RE write back, free mbuf when get RS + * 2. out-of-order rewrite back haven't be supported, SW head and HW head + * need to be separated. + **/ + nb_to_clean = 2 * txq->rs_thresh; + while (nb_to_clean--) + idpf_split_tx_free(txq->complq); + } + + if (txq->nb_free < tx_pkt->nb_segs) + break; + + ol_flags = tx_pkt->ol_flags; + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + /* Calculate the number of context descriptors needed. */ + nb_ctx = idpf_calc_context_desc(ol_flags); + nb_used = tx_pkt->nb_segs + nb_ctx; + + /* context descriptor */ + if (nb_ctx) { + volatile union iecm_flex_tx_ctx_desc *ctx_desc = + (volatile union iecm_flex_tx_ctx_desc *)&txr[tx_id]; + + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) + idpf_set_splitq_tso_ctx(tx_pkt, tx_offload, + ctx_desc); + + tx_id++; + if (tx_id == txq->nb_tx_desc) + tx_id = 0; + } + + do { + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + txe->mbuf = tx_pkt; + + /* Setup TX descriptor */ + txd->buf_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt)); + txd->qw1.cmd_dtype = + rte_cpu_to_le_16(IECM_TX_DESC_DTYPE_FLEX_FLOW_SCHE); + txd->qw1.rxr_bufsize = tx_pkt->data_len; + txd->qw1.compl_tag = sw_id; + tx_id++; + if (tx_id == txq->nb_tx_desc) + tx_id = 0; + sw_id = txe->next_id; + txe = txn; + tx_pkt = tx_pkt->next; + } while (tx_pkt); + + /* fill the last descriptor with End of Packet (EOP) bit */ + txd->qw1.cmd_dtype |= IECM_TXD_FLEX_FLOW_CMD_EOP; + + if (unlikely(!(tx_id % 32))) + txd->qw1.cmd_dtype |= IECM_TXD_FLEX_FLOW_CMD_RE; + if (ol_flags & IDPF_TX_CKSUM_OFFLOAD_MASK) + txd->qw1.cmd_dtype |= IECM_TXD_FLEX_FLOW_CMD_CS_EN; + txq->nb_free = (uint16_t)(txq->nb_free - nb_used); + txq->nb_used = (uint16_t)(txq->nb_used + nb_used); + } + + /* update the tail pointer if any packets were processed */ + if (likely(nb_tx)) { + IECM_PCI_REG_WRITE(txq->qtx_tail, tx_id); + txq->tx_tail = tx_id; + txq->sw_tail = sw_id; + } + + return nb_tx; +} + +static inline void +idpf_update_rx_tail(struct idpf_rx_queue *rxq, uint16_t nb_hold, + uint16_t rx_id) +{ + nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); + + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, + "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u", + rxq->port_id, rxq->queue_id, rx_id, nb_hold); + rx_id = (uint16_t)((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + IECM_PCI_REG_WRITE(rxq->qrx_tail, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; +} + +uint16_t +idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + volatile union virtchnl2_rx_desc *rx_ring; + volatile union virtchnl2_rx_desc *rxdp; + struct idpf_rx_queue *rxq; + const uint32_t *ptype_tbl; + uint16_t rx_id, nb_hold; + struct rte_eth_dev *dev; + uint16_t rx_packet_len; + struct rte_mbuf *rxe; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + uint16_t rx_status0; + uint64_t dma_addr; + uint16_t nb_rx; + + nb_rx = 0; + nb_hold = 0; + rxq = rx_queue; + + if (unlikely(!rxq) || unlikely(!rxq->q_started)) + return nb_rx; + + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + ptype_tbl = rxq->adapter->ptype_tbl; + + while (nb_rx < nb_pkts) { + rxdp = &rx_ring[rx_id]; + rx_status0 = rte_le_to_cpu_16(rxdp->flex_nic_wb.status_error0); + + /* Check the DD bit first */ + if (!(rx_status0 & (1 << VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_S))) + break; + + rx_packet_len = (rte_cpu_to_le_16(rxdp->flex_nic_wb.pkt_len)) - + rxq->crc_len; + + nmb = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!nmb)) { + dev = &rte_eth_devices[rxq->port_id]; + dev->data->rx_mbuf_alloc_failed++; + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u", rxq->port_id, rxq->queue_id); + break; + } + + nb_hold++; + rxe = rxq->sw_ring[rx_id]; + rxq->sw_ring[rx_id] = nmb; + rx_id++; + if (unlikely(rx_id == rxq->nb_rx_desc)) + rx_id = 0; + + /* Prefetch next mbuf */ + rte_prefetch0(rxq->sw_ring[rx_id]); + + /* When next RX descriptor is on a cache line boundary, + * prefetch the next 4 RX descriptors and next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_prefetch0(&rx_ring[rx_id]); + rte_prefetch0(rxq->sw_ring[rx_id]); + } + rxm = rxe; + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxdp->read.hdr_addr = 0; + rxdp->read.pkt_addr = dma_addr; + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM)); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = rx_packet_len; + rxm->data_len = rx_packet_len; + rxm->port = rxq->port_id; + rxm->ol_flags = 0; + rxm->packet_type = + ptype_tbl[(uint8_t)(rte_cpu_to_le_16(rxdp->flex_nic_wb.ptype_flex_flags0) & + VIRTCHNL2_RX_FLEX_DESC_PTYPE_M)]; + + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + idpf_update_rx_tail(rxq, nb_hold, rx_id); + + return nb_rx; +} + +static inline int +idpf_xmit_cleanup(struct idpf_tx_queue *txq) +{ + uint16_t last_desc_cleaned = txq->last_desc_cleaned; + struct idpf_tx_entry *sw_ring = txq->sw_ring; + uint16_t nb_tx_desc = txq->nb_tx_desc; + uint16_t desc_to_clean_to; + uint16_t nb_tx_to_clean; + + volatile struct iecm_base_tx_desc *txd = txq->tx_ring; + + desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh); + if (desc_to_clean_to >= nb_tx_desc) + desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); + + desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; + if ((txd[desc_to_clean_to].qw1 & + rte_cpu_to_le_64(IECM_TXD_QW1_DTYPE_M)) != + rte_cpu_to_le_64(IECM_TX_DESC_DTYPE_DESC_DONE)) { + PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done " + "(port=%d queue=%d)", desc_to_clean_to, + txq->port_id, txq->queue_id); + return -1; + } + + if (last_desc_cleaned > desc_to_clean_to) + nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) + + desc_to_clean_to); + else + nb_tx_to_clean = (uint16_t)(desc_to_clean_to - + last_desc_cleaned); + + txd[desc_to_clean_to].qw1 = 0; + + txq->last_desc_cleaned = desc_to_clean_to; + txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean); + + return 0; +} + +/* set TSO context descriptor + * support IP -> L4 and IP -> IP -> L4 + */ +static inline uint64_t +idpf_set_tso_ctx(struct rte_mbuf *mbuf, union idpf_tx_offload tx_offload) +{ + uint64_t ctx_desc = 0; + uint32_t cd_cmd, hdr_len, cd_tso_len; + + if (!tx_offload.l4_len) { + PMD_TX_LOG(DEBUG, "L4 length set to 0"); + return ctx_desc; + } + + hdr_len = tx_offload.l2_len + + tx_offload.l3_len + + tx_offload.l4_len; + + cd_cmd = IECM_TX_CTX_DESC_TSO; + cd_tso_len = mbuf->pkt_len - hdr_len; + ctx_desc |= ((uint64_t)cd_cmd << IECM_TXD_CTX_QW1_CMD_S) | + ((uint64_t)cd_tso_len << IECM_TXD_CTX_QW1_TSO_LEN_S) | + ((uint64_t)mbuf->tso_segsz << IECM_TXD_CTX_QW1_MSS_S); + + return ctx_desc; +} + +/* Construct the tx flags */ +static inline uint64_t +idpf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size) +{ + return rte_cpu_to_le_64(IECM_TX_DESC_DTYPE_DATA | + ((uint64_t)td_cmd << IECM_TXD_QW1_CMD_S) | + ((uint64_t)td_offset << + IECM_TXD_QW1_OFFSET_S) | + ((uint64_t)size << + IECM_TXD_QW1_TX_BUF_SZ_S)); +} + +/* TX function */ +uint16_t +idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + volatile struct iecm_base_tx_desc *txd; + volatile struct iecm_base_tx_desc *txr; + union idpf_tx_offload tx_offload = {0}; + struct idpf_tx_entry *txe, *txn; + struct idpf_tx_entry *sw_ring; + struct idpf_tx_queue *txq; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint64_t buf_dma_addr; + uint32_t td_offset; + uint64_t ol_flags; + uint16_t tx_last; + uint16_t nb_used; + uint16_t nb_ctx; + uint32_t td_cmd; + uint16_t tx_id; + uint16_t nb_tx; + uint16_t slen; + + nb_tx = 0; + txq = tx_queue; + + if (unlikely(!txq) || unlikely(!txq->q_started)) + return nb_tx; + + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + + /* Check if the descriptor ring needs to be cleaned. */ + if (txq->nb_free < txq->free_thresh) + (void)idpf_xmit_cleanup(txq); + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + td_cmd = 0; + td_offset = 0; + + tx_pkt = *tx_pkts++; + RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + + ol_flags = tx_pkt->ol_flags; + tx_offload.l2_len = tx_pkt->l2_len; + tx_offload.l3_len = tx_pkt->l3_len; + tx_offload.l4_len = tx_pkt->l4_len; + tx_offload.tso_segsz = tx_pkt->tso_segsz; + /* Calculate the number of context descriptors needed. */ + nb_ctx = idpf_calc_context_desc(ol_flags); + + /* The number of descriptors that must be allocated for + * a packet equals to the number of the segments of that + * packet plus 1 context descriptor if needed. + */ + nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); + tx_last = (uint16_t)(tx_id + nb_used - 1); + + /* Circular ring */ + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t)(tx_last - txq->nb_tx_desc); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u" + " tx_first=%u tx_last=%u", + txq->port_id, txq->queue_id, tx_id, tx_last); + + if (nb_used > txq->nb_free) { + if (idpf_xmit_cleanup(txq)) { + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + if (unlikely(nb_used > txq->rs_thresh)) { + while (nb_used > txq->nb_free) { + if (idpf_xmit_cleanup(txq)) { + if (nb_tx == 0) + return 0; + goto end_of_tx; + } + } + } + } + + /* According to datasheet, the bit2 is reserved and must be + * set to 1. + */ + td_cmd |= 0x04; + + if (nb_ctx) { + /* Setup TX context descriptor if required */ + volatile union iecm_flex_tx_ctx_desc *ctx_txd = + (volatile union iecm_flex_tx_ctx_desc *) + &txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + if (txe->mbuf) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + /* TSO enabled */ + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) + idpf_set_splitq_tso_ctx(tx_pkt, tx_offload, + ctx_txd); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + m_seg = tx_pkt; + do { + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + + if (txe->mbuf) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* Setup TX Descriptor */ + slen = m_seg->data_len; + buf_dma_addr = rte_mbuf_data_iova(m_seg); + txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr); + txd->qw1 = idpf_build_ctob(td_cmd, td_offset, slen); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->next; + } while (m_seg); + + /* The last packet data descriptor needs End Of Packet (EOP) */ + td_cmd |= IECM_TX_DESC_CMD_EOP; + txq->nb_used = (uint16_t)(txq->nb_used + nb_used); + txq->nb_free = (uint16_t)(txq->nb_free - nb_used); + + if (txq->nb_used >= txq->rs_thresh) { + PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id=" + "%4u (port=%d queue=%d)", + tx_last, txq->port_id, txq->queue_id); + + td_cmd |= IECM_TX_DESC_CMD_RS; + + /* Update txq RS bit counters */ + txq->nb_used = 0; + } + + txd->qw1 |= + rte_cpu_to_le_64(((uint64_t)td_cmd) << + IECM_TXD_QW1_CMD_S); + } + +end_of_tx: + rte_wmb(); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + txq->port_id, txq->queue_id, tx_id, nb_tx); + + IECM_PCI_REG_WRITE(txq->qtx_tail, tx_id); + txq->tx_tail = tx_id; + + return nb_tx; +} + +/* TX prep functions */ +uint16_t +idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + uint64_t ol_flags; + struct rte_mbuf *m; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + + /* Check condition for nb_segs > IDPF_TX_MAX_MTU_SEG. */ + if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { + if (m->nb_segs > IDPF_TX_MAX_MTU_SEG) { + rte_errno = EINVAL; + return i; + } + } else if ((m->tso_segsz < IDPF_MIN_TSO_MSS) || + (m->tso_segsz > IDPF_MAX_TSO_MSS)) { + /* MSS outside the range are considered malicious */ + rte_errno = EINVAL; + return i; + } + + if (ol_flags & IDPF_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = ENOTSUP; + return i; + } + + if (!m->pkt_len) { + rte_errno = EINVAL; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } + + return i; +} + +void +idpf_set_rx_function(struct rte_eth_dev *dev) +{ + struct idpf_vport *vport = + (struct idpf_vport *)dev->data->dev_private; + + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + dev->rx_pkt_burst = idpf_splitq_recv_pkts; + return; + } else { + dev->rx_pkt_burst = idpf_singleq_recv_pkts; + return; + } +} + +void +idpf_set_tx_function(struct rte_eth_dev *dev) +{ + struct idpf_vport *vport = + (struct idpf_vport *)dev->data->dev_private; + + if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + dev->tx_pkt_burst = idpf_splitq_xmit_pkts; + dev->tx_pkt_prepare = idpf_prep_pkts; + return; + } else { + dev->tx_pkt_burst = idpf_singleq_xmit_pkts; + dev->tx_pkt_prepare = idpf_prep_pkts; + return; + } +} diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h index 5e57995f8c..3e79c0ac2a 100644 --- a/drivers/net/idpf/idpf_rxtx.h +++ b/drivers/net/idpf/idpf_rxtx.h @@ -42,6 +42,25 @@ #define IDPF_TSO_MAX_SEG UINT8_MAX #define IDPF_TX_MAX_MTU_SEG 8 +#define IDPF_TX_CKSUM_OFFLOAD_MASK ( \ + RTE_MBUF_F_TX_IP_CKSUM | \ + RTE_MBUF_F_TX_L4_MASK | \ + RTE_MBUF_F_TX_TCP_SEG) + +#define IDPF_TX_OFFLOAD_MASK ( \ + RTE_MBUF_F_TX_OUTER_IPV6 | \ + RTE_MBUF_F_TX_OUTER_IPV4 | \ + RTE_MBUF_F_TX_IPV6 | \ + RTE_MBUF_F_TX_IPV4 | \ + RTE_MBUF_F_TX_VLAN | \ + RTE_MBUF_F_TX_IP_CKSUM | \ + RTE_MBUF_F_TX_L4_MASK | \ + RTE_MBUF_F_TX_TCP_SEG | \ + RTE_ETH_TX_OFFLOAD_SECURITY) + +#define IDPF_TX_OFFLOAD_NOTSUP_MASK \ + (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK) + struct idpf_rx_queue { struct idpf_adapter *adapter; /* the adapter this queue belongs to */ struct rte_mempool *mp; /* mbuf pool to populate Rx ring */ @@ -176,8 +195,24 @@ int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); +uint16_t idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t idpf_singleq_recv_pkts_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t idpf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + void idpf_stop_queues(struct rte_eth_dev *dev); +void idpf_set_rx_function(struct rte_eth_dev *dev); +void idpf_set_tx_function(struct rte_eth_dev *dev); + void idpf_set_default_ptype_table(struct rte_eth_dev *dev); const uint32_t *idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev);