From patchwork Thu Oct 10 06:32:28 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gagandeep Singh X-Patchwork-Id: 60857 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2102B1E954; Thu, 10 Oct 2019 08:48:38 +0200 (CEST) Received: from inva021.nxp.com (inva021.nxp.com [92.121.34.21]) by dpdk.org (Postfix) with ESMTP id 5C8C11E920 for ; Thu, 10 Oct 2019 08:48:18 +0200 (CEST) Received: from inva021.nxp.com (localhost [127.0.0.1]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id 3E62120044A; Thu, 10 Oct 2019 08:48:18 +0200 (CEST) Received: from invc005.ap-rdc01.nxp.com (invc005.ap-rdc01.nxp.com [165.114.16.14]) by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id A8044200A8D; Thu, 10 Oct 2019 08:48:15 +0200 (CEST) Received: from GDB1.ap.freescale.net (GDB1.ap.freescale.net [10.232.132.179]) by invc005.ap-rdc01.nxp.com (Postfix) with ESMTP id C76C1402E7; Thu, 10 Oct 2019 14:48:11 +0800 (SGT) From: Gagandeep Singh To: dev@dpdk.org, ferruh.yigit@intel.com Cc: thomas@monjalon.net, Gagandeep Singh Date: Thu, 10 Oct 2019 12:02:28 +0530 Message-Id: <20191010063234.32568-9-g.singh@nxp.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20191010063234.32568-1-g.singh@nxp.com> References: <20191001110209.6047-1-g.singh@nxp.com> <20191010063234.32568-1-g.singh@nxp.com> X-Virus-Scanned: ClamAV using ClamSMTP Subject: [dpdk-dev] [PATCH v4 08/14] net/pfe: add queue setup and release operations X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch add RX/TX queue setup operations and supported checksum offloads. Signed-off-by: Gagandeep Singh Acked-by: Nipun Gupta Acked-by: Akhil Goyal --- doc/guides/nics/features/pfe.ini | 2 + doc/guides/nics/pfe.rst | 1 + drivers/net/pfe/pfe_ethdev.c | 93 +++++++++++++++++++++++++ drivers/net/pfe/pfe_hif.c | 115 +++++++++++++++++++++++++++++++ drivers/net/pfe/pfe_hif.h | 1 + drivers/net/pfe/pfe_hif_lib.c | 50 ++++++++++++++ 6 files changed, 262 insertions(+) diff --git a/doc/guides/nics/features/pfe.ini b/doc/guides/nics/features/pfe.ini index dc78e9500..3b96a1a0f 100644 --- a/doc/guides/nics/features/pfe.ini +++ b/doc/guides/nics/features/pfe.ini @@ -4,6 +4,8 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +L3 checksum offload = Y +L4 checksum offload = Y Linux VFIO = Y ARMv8 = Y Usage doc = Y diff --git a/doc/guides/nics/pfe.rst b/doc/guides/nics/pfe.rst index 2e713c3d5..23aa15b0d 100644 --- a/doc/guides/nics/pfe.rst +++ b/doc/guides/nics/pfe.rst @@ -93,6 +93,7 @@ the kernel layer for link status. PFE Features ~~~~~~~~~~~~ +- L3/L4 checksum offload - ARMv8 Supported PFE SoCs diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c index 08f3716b0..d59ccde11 100644 --- a/drivers/net/pfe/pfe_ethdev.c +++ b/drivers/net/pfe/pfe_ethdev.c @@ -17,6 +17,17 @@ struct pfe_vdev_init_params { int8_t gem_id; }; static struct pfe *g_pfe; +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; /* TODO: make pfe_svr a runtime option. * Driver should be able to get the SVR @@ -284,6 +295,8 @@ pfe_eth_info(struct rte_eth_dev *dev, dev_info->max_rx_queues = dev->data->nb_rx_queues; dev_info->max_tx_queues = dev->data->nb_tx_queues; dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE; + dev_info->rx_offload_capa = dev_rx_offloads_sup; + dev_info->tx_offload_capa = dev_tx_offloads_sup; if (pfe_svr == SVR_LS1012A_REV1) dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD; else @@ -292,12 +305,92 @@ pfe_eth_info(struct rte_eth_dev *dev, return 0; } +/* Only first mb_pool given on first call of this API will be used + * in whole system, also nb_rx_desc and rx_conf are unused params + */ +static int +pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) +{ + int rc = 0; + struct pfe *pfe; + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + pfe = priv->pfe; + + if (queue_idx >= EMAC_RXQ_CNT) { + PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", + queue_idx, EMAC_RXQ_CNT); + return -1; + } + + if (!pfe->hif.setuped) { + rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool); + if (rc) { + PFE_PMD_ERR("Could not allocate buffer descriptors"); + return -1; + } + + pfe->hif.shm->pool = mb_pool; + if (pfe_hif_init_buffers(&pfe->hif)) { + PFE_PMD_ERR("Could not initialize buffer descriptors"); + return -1; + } + hif_init(); + hif_rx_enable(); + hif_tx_enable(); + pfe->hif.setuped = 1; + } + dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx]; + priv->client.rx_q[queue_idx].queue_id = queue_idx; + + return 0; +} + +static void +pfe_rx_queue_release(void *q __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static void +pfe_tx_queue_release(void *q __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static int +pfe_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + __rte_unused uint16_t nb_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + if (queue_idx >= emac_txq_cnt) { + PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", + queue_idx, emac_txq_cnt); + return -1; + } + dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx]; + priv->client.tx_q[queue_idx].queue_id = queue_idx; + return 0; +} + static const struct eth_dev_ops ops = { .dev_start = pfe_eth_open, .dev_stop = pfe_eth_stop, .dev_close = pfe_eth_close, .dev_configure = pfe_eth_configure, .dev_infos_get = pfe_eth_info, + .rx_queue_setup = pfe_rx_queue_setup, + .rx_queue_release = pfe_rx_queue_release, + .tx_queue_setup = pfe_tx_queue_setup, + .tx_queue_release = pfe_tx_queue_release, }; static int diff --git a/drivers/net/pfe/pfe_hif.c b/drivers/net/pfe/pfe_hif.c index 39a6ec8d4..bd5bf7a19 100644 --- a/drivers/net/pfe/pfe_hif.c +++ b/drivers/net/pfe/pfe_hif.c @@ -43,6 +43,121 @@ pfe_hif_free_descr(struct pfe_hif *hif) rte_free(hif->descr_baseaddr_v); } +/* + * pfe_hif_init_buffers + * This function initializes the HIF Rx/Tx ring descriptors and + * initialize Rx queue with buffers. + */ +int +pfe_hif_init_buffers(struct pfe_hif *hif) +{ + struct hif_desc *desc, *first_desc_p; + uint32_t i = 0; + + PMD_INIT_FUNC_TRACE(); + + /* Check enough Rx buffers available in the shared memory */ + if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size) + return -ENOMEM; + + hif->rx_base = hif->descr_baseaddr_v; + memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc)); + + /*Initialize Rx descriptors */ + desc = hif->rx_base; + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p; + + for (i = 0; i < hif->rx_ring_size; i++) { + /* Initialize Rx buffers from the shared memory */ + struct rte_mbuf *mbuf = + (struct rte_mbuf *)hif->shm->rx_buf_pool[i]; + + /* PFE mbuf structure is as follow: + * ----------------------------------------------------------+ + * | mbuf | priv | headroom (annotation + PFE data) | data | + * ----------------------------------------------------------+ + * + * As we are expecting additional information like parse + * results, eth id, queue id from PFE block along with data. + * so we have to provide additional memory for each packet to + * HIF rx rings so that PFE block can write its headers. + * so, we are giving the data pointor to HIF rings whose + * calculation is as below: + * mbuf->data_pointor - Required_header_size + * + * We are utilizing the HEADROOM area to receive the PFE + * block headers. On packet reception, HIF driver will use + * PFE headers information based on which it will decide + * the clients and fill the parse results. + * after that application can use/overwrite the HEADROOM area. + */ + hif->rx_buf_vaddr[i] = + (void *)((size_t)mbuf->buf_addr + mbuf->data_off - + PFE_PKT_HEADER_SZ); + hif->rx_buf_addr[i] = + (void *)(size_t)(rte_pktmbuf_iova(mbuf) - + PFE_PKT_HEADER_SZ); + hif->rx_buf_len[i] = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; + + hif->shm->rx_buf_pool[i] = NULL; + + writel(DDR_PHYS_TO_PFE(hif->rx_buf_addr[i]), + &desc->data); + writel(0, &desc->status); + + /* + * Ensure everything else is written to DDR before + * writing bd->ctrl + */ + rte_wmb(); + + writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM + | BD_CTRL_DIR | BD_CTRL_DESC_EN + | BD_BUF_LEN(hif->rx_buf_len[i])), &desc->ctrl); + + /* Chain descriptors */ + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next); + desc++; + } + + /* Overwrite last descriptor to chain it to first one*/ + desc--; + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next); + + hif->rxtoclean_index = 0; + + /*Initialize Rx buffer descriptor ring base address */ + writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR); + + hif->tx_base = hif->rx_base + hif->rx_ring_size; + first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p + + hif->rx_ring_size; + memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc)); + + /*Initialize tx descriptors */ + desc = hif->tx_base; + + for (i = 0; i < hif->tx_ring_size; i++) { + /* Chain descriptors */ + writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next); + writel(0, &desc->ctrl); + desc++; + } + + /* Overwrite last descriptor to chain it to first one */ + desc--; + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next); + hif->txavail = hif->tx_ring_size; + hif->txtosend = 0; + hif->txtoclean = 0; + hif->txtoflush = 0; + + /*Initialize Tx buffer descriptor ring base address */ + writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR); + + return 0; +} + /* * pfe_hif_client_register * diff --git a/drivers/net/pfe/pfe_hif.h b/drivers/net/pfe/pfe_hif.h index 9c4e57730..d8bb26bc4 100644 --- a/drivers/net/pfe/pfe_hif.h +++ b/drivers/net/pfe/pfe_hif.h @@ -143,5 +143,6 @@ void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int int pfe_hif_init(struct pfe *pfe); void pfe_hif_exit(struct pfe *pfe); void pfe_hif_rx_idle(struct pfe_hif *hif); +int pfe_hif_init_buffers(struct pfe_hif *hif); #endif /* _PFE_HIF_H_ */ diff --git a/drivers/net/pfe/pfe_hif_lib.c b/drivers/net/pfe/pfe_hif_lib.c index 2012d896a..f5e290f27 100644 --- a/drivers/net/pfe/pfe_hif_lib.c +++ b/drivers/net/pfe/pfe_hif_lib.c @@ -15,6 +15,56 @@ unsigned int emac_txq_cnt; /*HIF shared memory Global variable */ struct hif_shm ghif_shm; +/* Cleanup the HIF shared memory, release HIF rx_buffer_pool. + * This function should be called after pfe_hif_exit + * + * @param[in] hif_shm Shared memory address location in DDR + */ +void +pfe_hif_shm_clean(struct hif_shm *hif_shm) +{ + unsigned int i; + void *pkt; + + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) { + pkt = hif_shm->rx_buf_pool[i]; + if (pkt) + rte_pktmbuf_free((struct rte_mbuf *)pkt); + } +} + +/* Initialize shared memory used between HIF driver and clients, + * allocate rx_buffer_pool required for HIF Rx descriptors. + * This function should be called before initializing HIF driver. + * + * @param[in] hif_shm Shared memory address location in DDR + * @rerurn 0 - on succes, <0 on fail to initialize + */ +int +pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool) +{ + unsigned int i; + struct rte_mbuf *mbuf; + + memset(hif_shm, 0, sizeof(struct hif_shm)); + hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT; + + for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) { + mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(mb_pool)); + if (mbuf) + hif_shm->rx_buf_pool[i] = mbuf; + else + goto err0; + } + + return 0; + +err0: + PFE_PMD_ERR("Low memory"); + pfe_hif_shm_clean(hif_shm); + return -ENOMEM; +} + /*This function sends indication to HIF driver * * @param[in] hif hif context