From patchwork Fri May 20 03:05:53 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Vargas, Hernan" X-Patchwork-Id: 111462 X-Patchwork-Delegate: gakhil@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 06B08A0503; Fri, 20 May 2022 05:45:46 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 33422427EB; Fri, 20 May 2022 05:45:35 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by mails.dpdk.org (Postfix) with ESMTP id 713534114A for ; Fri, 20 May 2022 05:45:31 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1653018331; x=1684554331; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=1ih7lpWU54VpdLmx8xVoqgiRO1zq5ugqn1LKy2zyl0E=; b=ARRRuWRPjz9vgCql4+lmSeRZt0THtfydsnB6iiX3/VTNGEr8l7qVFqsh 4b5k2hvS8Cls3z2Ut7nf3TvKEmai6IFFga5OWMcYqxmKS27avV3SYLldK lSGy/vaGeutTJZk8rnWKGJpRwV9xrXUYb99wYWLUnG2Qusi5aLbv4Nldj o/bWf4Kl50tzitiAKx8oWngcmgtgi/mQATVnFg0fYGxQ9YsYmNyEaOjco uWdmf0eH19xBBADdxHOT8DOl1XMMWjQYv0eBh1rvKL+LptBR69zFlghv1 iPO3oCuSYRsxfbdwYkzlULMSs3U1kZGAeLVwh8wsGAUxeIY6fCpZFtrnD Q==; X-IronPort-AV: E=McAfee;i="6400,9594,10352"; a="272603033" X-IronPort-AV: E=Sophos;i="5.91,238,1647327600"; d="scan'208";a="272603033" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 May 2022 20:43:47 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.91,238,1647327600"; d="scan'208";a="598980672" Received: from flexran-pae-icx01.an.intel.com (HELO pae-M50CYP2SBSTD.an.intel.com) ([10.123.100.83]) by orsmga008.jf.intel.com with ESMTP; 19 May 2022 20:43:46 -0700 From: Hernan Vargas To: dev@dpdk.org, gakhil@marvell.com, trix@redhat.com Cc: nicolas.chautru@intel.com, qi.z.zhang@intel.com, Hernan Subject: [PATCH v2 2/5] baseband/fpga_5gnr_fec: add FPGA Mutex Date: Thu, 19 May 2022 22:05:53 -0500 Message-Id: <20220520030556.3475133-3-hernan.vargas@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220520030556.3475133-1-hernan.vargas@intel.com> References: <20220520030556.3475133-1-hernan.vargas@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Hernan Explicit FPGA mutex added when using the register interface for HARQ memory preloading to prevent multiple threads from using the same interface in parallel. This featured is implemented through MMIO exposed per VF and common to all queues. Signed-off-by: Hernan --- .../baseband/fpga_5gnr_fec/fpga_5gnr_fec.h | 6 +- .../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 77 ++++++++++++++----- 2 files changed, 62 insertions(+), 21 deletions(-) diff --git a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h index ed8ce26eaa..993cf61974 100644 --- a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h +++ b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h @@ -82,7 +82,9 @@ enum { FPGA_5GNR_FEC_DDR4_RD_DATA_REGS = 0x00000A30, /* len: 8B */ FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS = 0x00000A38, /* len: 1B */ FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS = 0x00000A40, /* len: 1B */ - FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS = 0x00000A48 /* len: 4B */ + FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS = 0x00000A48, /* len: 4B */ + FPGA_5GNR_FEC_MUTEX = 0x00000A60, /* len: 4B */ + FPGA_5GNR_FEC_MUTEX_RESET = 0x00000A68 /* len: 4B */ }; /* FPGA 5GNR FEC Ring Control Registers */ @@ -264,6 +266,8 @@ struct __rte_cache_aligned fpga_queue { uint32_t sw_ring_wrap_mask; uint32_t irq_enable; /* Enable ops dequeue interrupts if set to 1 */ uint8_t q_idx; /* Queue index */ + /** uuid used for MUTEX acquision for DDR */ + uint16_t ddr_mutex_uuid; struct fpga_5gnr_fec_device *d; /* MMIO register of shadow_tail used to enqueue descriptors */ void *shadow_tail_addr; diff --git a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c index 6737b74901..435b4d90d8 100644 --- a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c +++ b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c @@ -1194,11 +1194,45 @@ validate_dec_op(struct rte_bbdev_dec_op *op __rte_unused) } #endif +static inline void +fpga_mutex_acquisition(struct fpga_queue *q) +{ + uint32_t mutex_ctrl, mutex_read, cnt = 0; + /* Assign a unique id for the duration of the DDR access */ + q->ddr_mutex_uuid = rand(); + /* Request and wait for acquisition of the mutex */ + mutex_ctrl = (q->ddr_mutex_uuid << 16) + 1; + do { + if (cnt > 0) + usleep(FPGA_TIMEOUT_CHECK_INTERVAL); + rte_bbdev_log_debug("Acquiring Mutex for %x\n", + q->ddr_mutex_uuid); + fpga_reg_write_32(q->d->mmio_base, + FPGA_5GNR_FEC_MUTEX, + mutex_ctrl); + mutex_read = fpga_reg_read_32(q->d->mmio_base, + FPGA_5GNR_FEC_MUTEX); + rte_bbdev_log_debug("Mutex %x cnt %d owner %x\n", + mutex_read, cnt, q->ddr_mutex_uuid); + cnt++; + } while ((mutex_read >> 16) != q->ddr_mutex_uuid); +} + +static inline void +fpga_mutex_free(struct fpga_queue *q) +{ + uint32_t mutex_ctrl = q->ddr_mutex_uuid << 16; + fpga_reg_write_32(q->d->mmio_base, + FPGA_5GNR_FEC_MUTEX, + mutex_ctrl); +} + static inline int -fpga_harq_write_loopback(struct fpga_5gnr_fec_device *fpga_dev, +fpga_harq_write_loopback(struct fpga_queue *q, struct rte_mbuf *harq_input, uint16_t harq_in_length, uint32_t harq_in_offset, uint32_t harq_out_offset) { + fpga_mutex_acquisition(q); uint32_t out_offset = harq_out_offset; uint32_t in_offset = harq_in_offset; uint32_t left_length = harq_in_length; @@ -1215,7 +1249,7 @@ fpga_harq_write_loopback(struct fpga_5gnr_fec_device *fpga_dev, * Get HARQ buffer size for each VF/PF: When 0x00, there is no * available DDR space for the corresponding VF/PF. */ - reg_32 = fpga_reg_read_32(fpga_dev->mmio_base, + reg_32 = fpga_reg_read_32(q->d->mmio_base, FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS); if (reg_32 < harq_in_length) { left_length = reg_32; @@ -1226,46 +1260,48 @@ fpga_harq_write_loopback(struct fpga_5gnr_fec_device *fpga_dev, uint8_t *, in_offset); while (left_length > 0) { - if (fpga_reg_read_8(fpga_dev->mmio_base, + if (fpga_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) == 1) { - fpga_reg_write_32(fpga_dev->mmio_base, + fpga_reg_write_32(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS, out_offset); - fpga_reg_write_64(fpga_dev->mmio_base, + fpga_reg_write_64(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_WR_DATA_REGS, input[increment]); left_length -= FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES; out_offset += FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES; increment++; - fpga_reg_write_8(fpga_dev->mmio_base, + fpga_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_WR_DONE_REGS, 1); } } while (last_transaction > 0) { - if (fpga_reg_read_8(fpga_dev->mmio_base, + if (fpga_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) == 1) { - fpga_reg_write_32(fpga_dev->mmio_base, + fpga_reg_write_32(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS, out_offset); last_word = input[increment]; last_word &= (uint64_t)(1 << (last_transaction * 4)) - 1; - fpga_reg_write_64(fpga_dev->mmio_base, + fpga_reg_write_64(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_WR_DATA_REGS, last_word); - fpga_reg_write_8(fpga_dev->mmio_base, + fpga_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_WR_DONE_REGS, 1); last_transaction = 0; } } + fpga_mutex_free(q); return 1; } static inline int -fpga_harq_read_loopback(struct fpga_5gnr_fec_device *fpga_dev, +fpga_harq_read_loopback(struct fpga_queue *q, struct rte_mbuf *harq_output, uint16_t harq_in_length, uint32_t harq_in_offset, uint32_t harq_out_offset) { + fpga_mutex_acquisition(q); uint32_t left_length, in_offset = harq_in_offset; uint64_t reg; uint32_t increment = 0; @@ -1276,7 +1312,7 @@ fpga_harq_read_loopback(struct fpga_5gnr_fec_device *fpga_dev, if (last_transaction > 0) harq_in_length += (8 - last_transaction); - reg = fpga_reg_read_32(fpga_dev->mmio_base, + reg = fpga_reg_read_32(q->d->mmio_base, FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS); if (reg < harq_in_length) { harq_in_length = reg; @@ -1302,14 +1338,14 @@ fpga_harq_read_loopback(struct fpga_5gnr_fec_device *fpga_dev, uint8_t *, harq_out_offset); while (left_length > 0) { - fpga_reg_write_32(fpga_dev->mmio_base, + fpga_reg_write_32(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS, in_offset); - fpga_reg_write_8(fpga_dev->mmio_base, + fpga_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 1); - reg = fpga_reg_read_8(fpga_dev->mmio_base, + reg = fpga_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_RDY_REGS); while (reg != 1) { - reg = fpga_reg_read_8(fpga_dev->mmio_base, + reg = fpga_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_RDY_REGS); if (reg == FPGA_DDR_OVERFLOW) { rte_bbdev_log(ERR, @@ -1317,14 +1353,15 @@ fpga_harq_read_loopback(struct fpga_5gnr_fec_device *fpga_dev, return -1; } } - input[increment] = fpga_reg_read_64(fpga_dev->mmio_base, + input[increment] = fpga_reg_read_64(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_DATA_REGS); left_length -= FPGA_5GNR_FEC_DDR_RD_DATA_LEN_IN_BYTES; in_offset += FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES; increment++; - fpga_reg_write_8(fpga_dev->mmio_base, + fpga_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 0); } + fpga_mutex_free(q); return 1; } @@ -1467,13 +1504,13 @@ enqueue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op *op, if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE )) { - ret = fpga_harq_write_loopback(q->d, harq_in, + ret = fpga_harq_write_loopback(q, harq_in, harq_in_length, harq_in_offset, harq_out_offset); } else if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE )) { - ret = fpga_harq_read_loopback(q->d, harq_out, + ret = fpga_harq_read_loopback(q, harq_out, harq_in_length, harq_in_offset, harq_out_offset); dec->harq_combined_output.length = harq_in_length;