From patchwork Wed Aug 9 15:51:31 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 130029 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9004443016; Wed, 9 Aug 2023 09:35:09 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 970AF432A9; Wed, 9 Aug 2023 09:33:38 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 78ACF432A5 for ; Wed, 9 Aug 2023 09:33:36 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1691566416; x=1723102416; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=K4HLYXRZgENhd+k7aruSItn0URpJJilXTr+mtBk5Gr0=; b=AA0uSNOpeqJIzO+0d8FvAEfDLEczPISTiNzuR0itJGkBcxVFs1dffE2C /zfrj7P7QNIKsiF4xZI4ZV0KGPU7jII27LsZlzfJ0+nSF9730Fs7uVFRx o9My1IR6zKTbMZezUsyWbS814OvgwQJOGbaQFyzKnYHVwKPVBGx2F6Ua3 f1wHx0igS8IvlNDJjKYFzQa0kMLS28QdSVbTSzhVt6cvTdpeAfSRmGlgp 73Wn8+Jth2lD8UEJGW9AQ3ZwED/FwjKRSP8Kf6qsz7plS32Sd1PC9bzIX UNdEDLufjF1nexox9Dnr/bYpC8yCszjlZ+EFdPxGd2giLJamkWjBI3de9 g==; X-IronPort-AV: E=McAfee;i="6600,9927,10795"; a="356014537" X-IronPort-AV: E=Sophos;i="6.01,158,1684825200"; d="scan'208";a="356014537" Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 09 Aug 2023 00:33:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10795"; a="1062337463" X-IronPort-AV: E=Sophos;i="6.01,158,1684825200"; d="scan'208";a="1062337463" Received: from dpdk-beileix-icelake.sh.intel.com ([10.67.116.252]) by fmsmga005.fm.intel.com with ESMTP; 09 Aug 2023 00:33:34 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com, mingxia.liu@intel.com Cc: dev@dpdk.org, Beilei Xing Subject: [PATCH 16/19] net/cpfl: support representor data path Date: Wed, 9 Aug 2023 15:51:31 +0000 Message-Id: <20230809155134.539287-17-beilei.xing@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230809155134.539287-1-beilei.xing@intel.com> References: <20230809155134.539287-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing Add Rx/Tx burst for port representor. Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_representor.c | 83 +++++++++++++++++++ drivers/net/cpfl/cpfl_rxtx.c | 121 ++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_rxtx.h | 4 + 3 files changed, 208 insertions(+) diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c index 79cb7f76d4..51b70ea346 100644 --- a/drivers/net/cpfl/cpfl_representor.c +++ b/drivers/net/cpfl/cpfl_representor.c @@ -491,6 +491,87 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = { .stats_reset = idpf_repr_stats_reset, }; +#define MAX_IDPF_REPRENSENTOR_BURST 128 +static uint16_t +cpfl_repr_rx_burst(void *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct cpfl_repr_rx_queue *rx_queue = rxq; + struct rte_ring *ring = rx_queue->rx_ring; + struct rte_mbuf *mbuf[MAX_IDPF_REPRENSENTOR_BURST] = {NULL}; + unsigned int nb_recv; + uint16_t i; + + if (unlikely(!ring)) + return 0; + + nb_recv = rte_ring_dequeue_burst(ring, (void **)mbuf, + RTE_MIN(nb_pkts, MAX_IDPF_REPRENSENTOR_BURST), NULL); + for (i = 0; i < nb_recv; i++) { + if (mbuf[i]->pool != rx_queue->mb_pool) { + /* need copy if mpools used for vport and represntor queue are different */ + rx_pkts[i] = rte_pktmbuf_copy(mbuf[i], rx_queue->mb_pool, 0, UINT32_MAX); + rte_pktmbuf_free(mbuf[i]); + } else { + rx_pkts[i] = mbuf[i]; + } + } + + __atomic_fetch_add(&rx_queue->stats.packets, nb_recv, __ATOMIC_RELAXED); + /* TODO: bytes stats */ + return nb_recv; +} + +static uint16_t +cpfl_get_vsi_from_vf_representor(struct cpfl_repr *repr) +{ + return repr->vport_info->vport_info.vsi_id; +} + +static uint16_t +cpfl_repr_tx_burst(void *txq, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct cpfl_repr_tx_queue *tx_queue = txq; + struct idpf_tx_queue *hw_txq = &tx_queue->txq->base; + struct cpfl_repr *repr; + uint16_t vsi_id; + uint16_t nb; + + if (unlikely(!tx_queue->txq)) + return 0; + + repr = tx_queue->repr; + + if (!hw_txq) { + PMD_INIT_LOG(ERR, "No Queue associated with representor host_id: %d, %s %d", + repr->repr_id.host_id, + (repr->repr_id.type == RTE_ETH_REPRESENTOR_VF) ? "vf" : "pf", + (repr->repr_id.type == RTE_ETH_REPRESENTOR_VF) ? repr->repr_id.vf_id : + repr->repr_id.pf_id); + return 0; + } + + if (repr->repr_id.type == RTE_ETH_REPRESENTOR_VF) { + vsi_id = cpfl_get_vsi_from_vf_representor(repr); + } else { + /* TODO: RTE_ETH_REPRESENTOR_PF */ + PMD_INIT_LOG(ERR, "Get vsi from pf representor is not supported."); + return 0; + } + + rte_spinlock_lock(&tx_queue->txq->lock); + nb = cpfl_xmit_pkts_to_vsi(tx_queue->txq, tx_pkts, nb_pkts, vsi_id); + rte_spinlock_unlock(&tx_queue->txq->lock); + + __atomic_fetch_add(&tx_queue->stats.packets, nb, __ATOMIC_RELAXED); + __atomic_fetch_add(&tx_queue->stats.errors, nb, __ATOMIC_RELAXED); + /* TODO: bytes stats */ + return nb; +} + static int cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param) { @@ -507,6 +588,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param) repr->func_up = true; eth_dev->dev_ops = &cpfl_repr_dev_ops; + eth_dev->rx_pkt_burst = cpfl_repr_rx_burst; + eth_dev->tx_pkt_burst = cpfl_repr_tx_burst; eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; /* bit[15:14] type diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index df6a8c1940..882efe04cf 100644 --- a/drivers/net/cpfl/cpfl_rxtx.c +++ b/drivers/net/cpfl/cpfl_rxtx.c @@ -616,6 +616,9 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, txq->ops = &def_txq_ops; cpfl_vport->nb_data_txq++; txq->q_set = true; + + rte_spinlock_init(&cpfl_txq->lock); + dev->data->tx_queues[queue_idx] = cpfl_txq; return 0; @@ -1409,6 +1412,124 @@ cpfl_stop_queues(struct rte_eth_dev *dev) } } +static inline void +cpfl_set_tx_switch_ctx(uint16_t vsi_id, bool is_vsi, + volatile union idpf_flex_tx_ctx_desc *ctx_desc) +{ + uint16_t cmd_dtype; + + /* Use TX Native TSO Context Descriptor to carry VSI + * so TSO is not supported + */ + if (is_vsi) { + cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX | + IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_TARGETVSI; + ctx_desc->tso.qw0.mss_rt = + rte_cpu_to_le_16((uint16_t)vsi_id & + IDPF_TXD_FLEX_CTX_MSS_RT_M); + } else { + cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX | + IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_UPLNK; + } + + ctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype); +} + +/* Transmit pkts to destination VSI, + * much similar as idpf_splitq_xmit_pkts + */ +uint16_t +cpfl_xmit_pkts_to_vsi(struct cpfl_tx_queue *cpfl_txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts, uint16_t vsi_id) +{ + volatile struct idpf_flex_tx_sched_desc *txr; + volatile struct idpf_flex_tx_sched_desc *txd; + volatile union idpf_flex_tx_ctx_desc *ctx_desc; + struct idpf_tx_entry *sw_ring; + struct idpf_tx_entry *txe, *txn; + uint16_t nb_used, tx_id, sw_id; + struct idpf_tx_queue *txq; + struct rte_mbuf *tx_pkt; + uint16_t nb_to_clean; + uint16_t nb_tx = 0; + + if (unlikely(!cpfl_txq)) + return nb_tx; + + txq = &cpfl_txq->base; + if (unlikely(!txq) || unlikely(!txq->q_started)) + return nb_tx; + + txr = txq->desc_ring; + sw_ring = txq->sw_ring; + tx_id = txq->tx_tail; + sw_id = txq->sw_tail; + txe = &sw_ring[sw_id]; + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + tx_pkt = tx_pkts[nb_tx]; + + if (txq->nb_free <= txq->free_thresh) { + /* TODO: Need to refine, refer to idpf_splitq_xmit_pkts */ + nb_to_clean = 2 * txq->rs_thresh; + while (nb_to_clean--) + idpf_split_tx_free(txq->complq); + } + + if (txq->nb_free < tx_pkt->nb_segs + 1) + break; + /* need context desc carry target vsi, no TSO support. */ + nb_used = tx_pkt->nb_segs + 1; + + /* context descriptor prepare*/ + ctx_desc = (volatile union idpf_flex_tx_ctx_desc *)&txr[tx_id]; + + cpfl_set_tx_switch_ctx(vsi_id, true, ctx_desc); + tx_id++; + if (tx_id == txq->nb_tx_desc) + tx_id = 0; + + do { + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + txe->mbuf = tx_pkt; + + /* Setup TX descriptor */ + txd->buf_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt)); + txd->qw1.cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE; + txd->qw1.rxr_bufsize = tx_pkt->data_len; + txd->qw1.compl_tag = sw_id; + tx_id++; + if (tx_id == txq->nb_tx_desc) + tx_id = 0; + sw_id = txe->next_id; + txe = txn; + tx_pkt = tx_pkt->next; + } while (tx_pkt); + + /* fill the last descriptor with End of Packet (EOP) bit */ + txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_EOP; + + txq->nb_free = (uint16_t)(txq->nb_free - nb_used); + txq->nb_used = (uint16_t)(txq->nb_used + nb_used); + + if (txq->nb_used >= 32) { + txd->qw1.cmd_dtype |= IDPF_TXD_FLEX_FLOW_CMD_RE; + /* Update txq RE bit counters */ + txq->nb_used = 0; + } + } + + /* update the tail pointer if any packets were processed */ + if (likely(nb_tx)) { + IDPF_PCI_REG_WRITE(txq->qtx_tail, tx_id); + txq->tx_tail = tx_id; + txq->sw_tail = sw_id; + } + return nb_tx; +} + uint16_t cpfl_dummy_recv_pkts(__rte_unused void *queue, __rte_unused struct rte_mbuf **tx_pkts, diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index 914a0485b5..463ab73323 100644 --- a/drivers/net/cpfl/cpfl_rxtx.h +++ b/drivers/net/cpfl/cpfl_rxtx.h @@ -72,6 +72,7 @@ struct cpfl_txq_hairpin_info { struct cpfl_tx_queue { struct idpf_tx_queue base; struct cpfl_txq_hairpin_info hairpin_info; + rte_spinlock_t lock; }; static inline uint16_t @@ -124,4 +125,7 @@ uint16_t cpfl_dummy_recv_pkts(void *queue, uint16_t cpfl_dummy_xmit_pkts(void *queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t cpfl_xmit_pkts_to_vsi(struct cpfl_tx_queue *txq, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts, uint16_t vsi_id); #endif /* _CPFL_RXTX_H_ */