From patchwork Wed Aug 9 15:51:32 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xing, Beilei" X-Patchwork-Id: 130030 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9638443016; Wed, 9 Aug 2023 09:35:18 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1C6B843259; Wed, 9 Aug 2023 09:34:01 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 0828F40DDB for ; Wed, 9 Aug 2023 09:33:55 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1691566438; x=1723102438; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=adaF0gpl0YLEEh6d7V66scm+srADb8tJPcwWV5UP1oU=; b=LpsvH9oA9I2duvm8vDorZ5ncRBUEIsCIGVOBe3z71c2DFUVve2RTloLh F6t8CF2GClpusQE4lmPm/xOrE7J+TTxCkEjVNGSRXMOkEu2jAh1KLYp5l A98YNrn/ctl9ZPUzoZNUBS+7PPynm4AykaS107c9nfyvm5fppCz3Vl/yb TqrKdbS2nxWSz4y+pVB1niRk0ROynfnY2pNF4xW71x4fa/xftAPGVwQDg cFjqSN06I2PVLboh9R3fdTAW3CkN8ggntqyqJKZ0yHQOxaLye0V/Zb3V7 rKEMRdOugY1TsKccZrqh1g65zFloOyk7P8POdvxiocn3WuE74bRR1zrk3 A==; X-IronPort-AV: E=McAfee;i="6600,9927,10795"; a="356014539" X-IronPort-AV: E=Sophos;i="6.01,158,1684825200"; d="scan'208";a="356014539" Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 09 Aug 2023 00:33:37 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6600,9927,10795"; a="1062337469" X-IronPort-AV: E=Sophos;i="6.01,158,1684825200"; d="scan'208";a="1062337469" Received: from dpdk-beileix-icelake.sh.intel.com ([10.67.116.252]) by fmsmga005.fm.intel.com with ESMTP; 09 Aug 2023 00:33:36 -0700 From: beilei.xing@intel.com To: jingjing.wu@intel.com, mingxia.liu@intel.com Cc: dev@dpdk.org, Beilei Xing Subject: [PATCH 17/19] net/cpfl: support dispatch process Date: Wed, 9 Aug 2023 15:51:32 +0000 Message-Id: <20230809155134.539287-18-beilei.xing@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20230809155134.539287-1-beilei.xing@intel.com> References: <20230809155134.539287-1-beilei.xing@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Beilei Xing Add dispatch process cpfl_packets_dispatch function. Signed-off-by: Beilei Xing --- drivers/net/cpfl/cpfl_ethdev.c | 39 ++++++++- drivers/net/cpfl/cpfl_ethdev.h | 1 + drivers/net/cpfl/cpfl_representor.c | 80 +++++++++++++++++ drivers/net/cpfl/cpfl_rxtx.c | 131 ++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_rxtx.h | 8 ++ 5 files changed, 257 insertions(+), 2 deletions(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index f674d93050..8569a0b81d 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -129,6 +129,13 @@ static const struct rte_cpfl_xstats_name_off rte_cpfl_stats_strings[] = { #define CPFL_NB_XSTATS RTE_DIM(rte_cpfl_stats_strings) +static const struct rte_mbuf_dynfield cpfl_source_metadata_param = { + .name = "cpfl_source_metadata", + .size = sizeof(uint16_t), + .align = __alignof__(uint16_t), + .flags = 0, +}; + static int cpfl_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) @@ -2382,7 +2389,7 @@ static int cpfl_pci_probe_first(struct rte_pci_device *pci_dev) { struct cpfl_adapter_ext *adapter; - int retval; + int retval, offset; uint16_t port_id; adapter = rte_zmalloc("cpfl_adapter_ext", @@ -2432,7 +2439,22 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev) PMD_INIT_LOG(ERR, "Failed to create exceptional vport. "); goto close_ethdev; } + + /* register dynfield to carry src_vsi + * TODO: is this a waste to use dynfield? Can we redefine a recv func like + * below to carry src vsi directly by src_vsi[]? + * idpf_exceptioanl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + * uint16_t src_vsi[], uint16_t nb_pkts) + */ + offset = rte_mbuf_dynfield_register(&cpfl_source_metadata_param); + if (unlikely(offset == -1)) { + retval = -rte_errno; + PMD_INIT_LOG(ERR, "source metadata is disabled in mbuf"); + goto close_ethdev; + } + cpfl_dynfield_source_metadata_offset = offset; } + retval = cpfl_repr_create(pci_dev, adapter); if (retval != 0) { PMD_INIT_LOG(ERR, "Failed to create representors "); @@ -2458,7 +2480,7 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev) static int cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapter) { - int ret; + int ret, offset; ret = cpfl_parse_devargs(pci_dev, adapter, false); if (ret != 0) { @@ -2478,6 +2500,19 @@ cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *ad PMD_INIT_LOG(ERR, "Failed to create exceptional vport. "); return ret; } + + /* register dynfield to carry src_vsi + * TODO: is this a waste to use dynfield? Can we redefine a recv func like + * below to carry src vsi directly by src_vsi[]? + * idpf_exceptioanl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + * uint16_t src_vsi[], uint16_t nb_pkts) + */ + offset = rte_mbuf_dynfield_register(&cpfl_source_metadata_param); + if (unlikely(offset == -1)) { + PMD_INIT_LOG(ERR, "source metadata is disabled in mbuf"); + return -rte_errno; + } + cpfl_dynfield_source_metadata_offset = offset; } ret = cpfl_repr_create(pci_dev, adapter); diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index 33e810408b..5bd6f930b8 100644 --- a/drivers/net/cpfl/cpfl_ethdev.h +++ b/drivers/net/cpfl/cpfl_ethdev.h @@ -227,6 +227,7 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter, struct cpchnl2_vport_id *vport_id, struct cpfl_vport_id *vi, struct cpchnl2_get_vport_info_response *response); +int cpfl_packets_dispatch(void *arg); #define CPFL_DEV_TO_PCI(eth_dev) \ RTE_DEV_TO_PCI((eth_dev)->device) diff --git a/drivers/net/cpfl/cpfl_representor.c b/drivers/net/cpfl/cpfl_representor.c index 51b70ea346..a781cff403 100644 --- a/drivers/net/cpfl/cpfl_representor.c +++ b/drivers/net/cpfl/cpfl_representor.c @@ -4,6 +4,7 @@ #include "cpfl_representor.h" #include "cpfl_rxtx.h" +#include "cpfl_ethdev.h" static int cpfl_repr_whitelist_update(struct cpfl_adapter_ext *adapter, @@ -853,3 +854,82 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *adapte return 0; } + +static struct cpfl_repr * +cpfl_get_repr_by_vsi(struct cpfl_adapter_ext *adapter, + uint16_t vsi_id) +{ + const struct cpfl_repr_id *repr_id; + struct rte_eth_dev *dev; + struct cpfl_repr *repr; + uint32_t iter = 0; + + rte_spinlock_lock(&adapter->repr_lock); + + while (rte_hash_iterate(adapter->repr_whitelist_hash, + (const void **)&repr_id, (void **)&dev, &iter) >= 0) { + if (dev == NULL) + continue; + + repr = CPFL_DEV_TO_REPR(dev); + if (repr->vport_info->vport_info.vsi_id == vsi_id) { + rte_spinlock_unlock(&adapter->repr_lock); + return repr; + } + } + + rte_spinlock_unlock(&adapter->repr_lock); + return NULL; +} + +#define PKT_DISPATCH_BURST 32 +/* Function to dispath packets to representors' rx rings */ +int +cpfl_packets_dispatch(void *arg) +{ + struct rte_eth_dev *dev = arg; + struct cpfl_vport *vport = dev->data->dev_private; + struct cpfl_adapter_ext *adapter = vport->itf.adapter; + struct cpfl_rx_queue **rxq = + (struct cpfl_rx_queue **)dev->data->rx_queues; + struct rte_mbuf *pkts_burst[PKT_DISPATCH_BURST]; + struct cpfl_repr *repr; + struct rte_eth_dev_data *dev_data; + struct cpfl_repr_rx_queue *repr_rxq; + uint16_t src_vsi; + uint32_t nb_rx, nb_enq; + uint8_t i, j; + + if (dev->data->dev_started == 0) { + /* skip if excpetional vport is not started*/ + return 0; + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + nb_rx = cpfl_splitq_recv_pkts(rxq[i], pkts_burst, PKT_DISPATCH_BURST); + for (j = 0; j < nb_rx; j++) { + src_vsi = *CPFL_MBUF_SOURCE_METADATA(pkts_burst[j]); + /* Get the repr according to source vsi */ + repr = cpfl_get_repr_by_vsi(adapter, src_vsi); + if (unlikely(!repr)) { + rte_pktmbuf_free(pkts_burst[j]); + continue; + } + dev_data = (struct rte_eth_dev_data *)repr->itf.data; + if (unlikely(!dev_data->dev_started || !dev_data->rx_queue_state[0])) { + rte_pktmbuf_free(pkts_burst[j]); + continue; + } + repr_rxq = (struct cpfl_repr_rx_queue *) + (((struct rte_eth_dev_data *)repr->itf.data)->rx_queues[0]); + if (unlikely(!repr_rxq || !repr_rxq->rx_ring)) { + rte_pktmbuf_free(pkts_burst[j]); + continue; + } + nb_enq = rte_ring_enqueue_bulk(repr_rxq->rx_ring, + (void *)&pkts_burst[j], 1, NULL); + if (!nb_enq) /* enqueue fails, just free it */ + rte_pktmbuf_free(pkts_burst[j]); + } + } + return 0; +} diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index 882efe04cf..a931b5ec12 100644 --- a/drivers/net/cpfl/cpfl_rxtx.c +++ b/drivers/net/cpfl/cpfl_rxtx.c @@ -1412,6 +1412,137 @@ cpfl_stop_queues(struct rte_eth_dev *dev) } } +int cpfl_dynfield_source_metadata_offset = -1; + +uint16_t +cpfl_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc_ring; + volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc; + uint16_t pktlen_gen_bufq_id; + struct idpf_rx_queue *rxq; + const uint32_t *ptype_tbl; + uint8_t status_err0_qw1; + struct idpf_adapter *ad; + struct rte_mbuf *rxm; + uint16_t rx_id_bufq1; + uint16_t rx_id_bufq2; + uint64_t pkt_flags; + uint16_t pkt_len; + uint16_t bufq_id; + uint16_t gen_id; + uint16_t rx_id; + uint16_t nb_rx; + uint64_t ts_ns; + + nb_rx = 0; + rxq = rx_queue; + ad = rxq->adapter; + + if (unlikely(rxq == NULL) || unlikely(!rxq->q_started)) + return nb_rx; + + rx_id = rxq->rx_tail; + rx_id_bufq1 = rxq->bufq1->rx_next_avail; + rx_id_bufq2 = rxq->bufq2->rx_next_avail; + rx_desc_ring = rxq->rx_ring; + ptype_tbl = rxq->adapter->ptype_tbl; + + if ((rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP) != 0) + rxq->hw_register_set = 1; + + while (nb_rx < nb_pkts) { + rx_desc = &rx_desc_ring[rx_id]; + + pktlen_gen_bufq_id = + rte_le_to_cpu_16(rx_desc->pktlen_gen_bufq_id); + gen_id = (pktlen_gen_bufq_id & + VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M) >> + VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S; + if (gen_id != rxq->expected_gen_id) + break; + + pkt_len = (pktlen_gen_bufq_id & + VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M) >> + VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S; + if (pkt_len == 0) + RX_LOG(ERR, "Packet length is 0"); + + rx_id++; + if (unlikely(rx_id == rxq->nb_rx_desc)) { + rx_id = 0; + rxq->expected_gen_id ^= 1; + } + + bufq_id = (pktlen_gen_bufq_id & + VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M) >> + VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S; + if (bufq_id == 0) { + rxm = rxq->bufq1->sw_ring[rx_id_bufq1]; + rx_id_bufq1++; + if (unlikely(rx_id_bufq1 == rxq->bufq1->nb_rx_desc)) + rx_id_bufq1 = 0; + rxq->bufq1->nb_rx_hold++; + } else { + rxm = rxq->bufq2->sw_ring[rx_id_bufq2]; + rx_id_bufq2++; + if (unlikely(rx_id_bufq2 == rxq->bufq2->nb_rx_desc)) + rx_id_bufq2 = 0; + rxq->bufq2->nb_rx_hold++; + } + + rxm->pkt_len = pkt_len; + rxm->data_len = pkt_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->next = NULL; + rxm->nb_segs = 1; + rxm->port = rxq->port_id; + rxm->ol_flags = 0; + rxm->packet_type = + ptype_tbl[(rte_le_to_cpu_16(rx_desc->ptype_err_fflags0) & + VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >> + VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S]; + + status_err0_qw1 = rx_desc->status_err0_qw1; + pkt_flags = idpf_splitq_rx_csum_offload(status_err0_qw1); + pkt_flags |= idpf_splitq_rx_rss_offload(rxm, rx_desc); + if (idpf_timestamp_dynflag > 0 && + (rxq->offloads & IDPF_RX_OFFLOAD_TIMESTAMP)) { + /* timestamp */ + ts_ns = idpf_tstamp_convert_32b_64b(ad, + rxq->hw_register_set, + rte_le_to_cpu_32(rx_desc->ts_high)); + rxq->hw_register_set = 0; + *RTE_MBUF_DYNFIELD(rxm, + idpf_timestamp_dynfield_offset, + rte_mbuf_timestamp_t *) = ts_ns; + rxm->ol_flags |= idpf_timestamp_dynflag; + } + + if (likely(cpfl_dynfield_source_metadata_offset != -1)) + *CPFL_MBUF_SOURCE_METADATA(rxm) = + rte_le_to_cpu_16(rx_desc->fmd4); + + rxm->ol_flags |= pkt_flags; + + rx_pkts[nb_rx++] = rxm; + } + + if (nb_rx > 0) { + rxq->rx_tail = rx_id; + if (rx_id_bufq1 != rxq->bufq1->rx_next_avail) + rxq->bufq1->rx_next_avail = rx_id_bufq1; + if (rx_id_bufq2 != rxq->bufq2->rx_next_avail) + rxq->bufq2->rx_next_avail = rx_id_bufq2; + + idpf_split_rx_bufq_refill(rxq->bufq1); + idpf_split_rx_bufq_refill(rxq->bufq2); + } + + return nb_rx; +} + static inline void cpfl_set_tx_switch_ctx(uint16_t vsi_id, bool is_vsi, volatile union idpf_flex_tx_ctx_desc *ctx_desc) diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index 463ab73323..39e5e115d6 100644 --- a/drivers/net/cpfl/cpfl_rxtx.h +++ b/drivers/net/cpfl/cpfl_rxtx.h @@ -81,6 +81,11 @@ cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset) return start_qid + offset; } +extern int cpfl_dynfield_source_metadata_offset; + +#define CPFL_MBUF_SOURCE_METADATA(m) \ + RTE_MBUF_DYNFIELD((m), cpfl_dynfield_source_metadata_offset, uint16_t *) + static inline uint64_t cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing) { @@ -128,4 +133,7 @@ uint16_t cpfl_dummy_xmit_pkts(void *queue, uint16_t cpfl_xmit_pkts_to_vsi(struct cpfl_tx_queue *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts, uint16_t vsi_id); +uint16_t cpfl_splitq_recv_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); #endif /* _CPFL_RXTX_H_ */