[08/11] net/bnxt: update PTP support on Thor

Message ID 20230419201122.338133-9-stuart.schacher@broadcom.com (mailing list archive)
State Superseded, archived
Delegated to: Ajit Khaparde
Headers
Series sync Truflow support with latest release |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Randy Schacher April 19, 2023, 8:11 p.m. UTC
  add locking and time stamp checks to ptp feature

Signed-off-by: Shahaji Bhosle <shahaji.bhosle@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt.h        |  5 ++
 drivers/net/bnxt/bnxt_ethdev.c | 11 +++++
 drivers/net/bnxt/bnxt_hwrm.c   | 11 ++++-
 drivers/net/bnxt/bnxt_ring.c   |  3 ++
 drivers/net/bnxt/bnxt_rxr.c    |  8 +++-
 drivers/net/bnxt/bnxt_txq.c    |  1 +
 drivers/net/bnxt/bnxt_txr.c    | 85 ++++++++++++++++++++++++++++++++--
 drivers/net/bnxt/bnxt_txr.h    |  1 +
 8 files changed, 119 insertions(+), 6 deletions(-)
  

Patch

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 09b108e297..9dd663e0c2 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -349,6 +349,7 @@  struct bnxt_ptp_cfg {
 					 BNXT_PTP_MSG_PDELAY_RESP)
 	uint8_t			tx_tstamp_en:1;
 	int			rx_filter;
+	uint8_t			filter_all;
 
 #define BNXT_PTP_RX_TS_L	0
 #define BNXT_PTP_RX_TS_H	1
@@ -372,6 +373,8 @@  struct bnxt_ptp_cfg {
 	/* On P5, the Rx timestamp is present in the Rx completion record */
 	uint64_t			rx_timestamp;
 	uint64_t			current_time;
+	uint64_t			old_time;
+	rte_spinlock_t			ptp_lock;
 };
 
 struct bnxt_coal {
@@ -733,6 +736,7 @@  struct bnxt {
 #define BNXT_FW_CAP_LINK_ADMIN		BIT(7)
 #define BNXT_FW_CAP_TRUFLOW_EN		BIT(8)
 #define BNXT_FW_CAP_VLAN_TX_INSERT	BIT(9)
+#define BNXT_FW_CAP_RX_ALL_PKT_TS	BIT(10)
 #define BNXT_TRUFLOW_EN(bp)	((bp)->fw_cap & BNXT_FW_CAP_TRUFLOW_EN &&\
 				 (bp)->app_id != 0xFF)
 
@@ -860,6 +864,7 @@  struct bnxt {
 	struct bnxt_led_info	*leds;
 	uint8_t			ieee_1588;
 	struct bnxt_ptp_cfg     *ptp_cfg;
+	uint8_t			ptp_all_rx_tstamp;
 	uint16_t		vf_resv_strategy;
 	struct bnxt_ctx_mem_info        *ctx;
 
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 033ec176bf..feba137959 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -1441,8 +1441,11 @@  static void bnxt_ptp_get_current_time(void *arg)
 	if (!ptp)
 		return;
 
+	rte_spinlock_lock(&ptp->ptp_lock);
+	ptp->old_time = ptp->current_time;
 	bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
 				&ptp->current_time);
+	rte_spinlock_unlock(&ptp->ptp_lock);
 	rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp);
 	if (rc != 0) {
 		PMD_DRV_LOG(ERR, "Failed to re-schedule PTP alarm\n");
@@ -1458,8 +1461,11 @@  static int bnxt_schedule_ptp_alarm(struct bnxt *bp)
 	if (bp->flags2 & BNXT_FLAGS2_PTP_ALARM_SCHEDULED)
 		return 0;
 
+	rte_spinlock_lock(&ptp->ptp_lock);
 	bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
 				&ptp->current_time);
+	ptp->old_time = ptp->current_time;
+	rte_spinlock_unlock(&ptp->ptp_lock);
 
 
 	rc = rte_eal_alarm_set(US_PER_S, bnxt_ptp_get_current_time, (void *)bp);
@@ -3615,12 +3621,15 @@  bnxt_timesync_enable(struct rte_eth_dev *dev)
 
 	ptp->rx_filter = 1;
 	ptp->tx_tstamp_en = 1;
+	ptp->filter_all = 1;
 	ptp->rxctl = BNXT_PTP_MSG_EVENTS;
 
 	rc = bnxt_hwrm_ptp_cfg(bp);
 	if (rc)
 		return rc;
 
+	rte_spinlock_init(&ptp->ptp_lock);
+	bp->ptp_all_rx_tstamp = 1;
 	memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
 	memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
 	memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
@@ -3657,9 +3666,11 @@  bnxt_timesync_disable(struct rte_eth_dev *dev)
 	ptp->rx_filter = 0;
 	ptp->tx_tstamp_en = 0;
 	ptp->rxctl = 0;
+	ptp->filter_all = 0;
 
 	bnxt_hwrm_ptp_cfg(bp);
 
+	bp->ptp_all_rx_tstamp = 0;
 	if (!BNXT_CHIP_P5(bp))
 		bnxt_unmap_ptp_regs(bp);
 	else
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 38da3a114c..0c4b2aaaa9 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -669,6 +669,11 @@  int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
 		flags |=
 			HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
 
+	if (ptp->filter_all)
+		flags |=  HWRM_PORT_MAC_CFG_INPUT_FLAGS_ALL_RX_TS_CAPTURE_ENABLE;
+	else if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
+		flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_ALL_RX_TS_CAPTURE_DISABLE;
+
 	req.flags = rte_cpu_to_le_32(flags);
 	req.enables = rte_cpu_to_le_32
 		(HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
@@ -810,7 +815,7 @@  static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	struct hwrm_func_qcaps_input req = {.req_type = 0 };
 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
 	uint16_t new_max_vfs;
-	uint32_t flags;
+	uint32_t flags, flags_ext2;
 
 	HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
 
@@ -898,6 +903,10 @@  static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 		PMD_DRV_LOG(DEBUG, "Tunnel parsing capability is disabled, flags : %#x\n",
 			    bp->tunnel_disable_flag);
 
+	flags_ext2 = rte_le_to_cpu_32(resp->flags_ext2);
+	if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
+		bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
+
 unlock:
 	HWRM_UNLOCK();
 
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 686c3af4da..5b43a5f138 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -227,6 +227,9 @@  int bnxt_alloc_rings(struct bnxt *bp, unsigned int socket_id, uint16_t qidx,
 		tx_ring->bd_dma = mz_phys_addr + tx_ring_start;
 		tx_ring_info->tx_desc_mapping = tx_ring->bd_dma;
 		tx_ring->mem_zone = (const void *)mz;
+                tx_ring_info->nr_bds = rte_zmalloc("bnxt_nr_bds",
+                                                   sizeof(unsigned short) *
+                                                   tx_ring->ring_size, 0);
 
 		if (!tx_ring->bd)
 			return -ENOMEM;
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 1ab0ef2f5d..0cabfb583c 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -697,7 +697,7 @@  bnxt_get_rx_ts_p5(struct bnxt *bp, uint32_t rx_ts_cmpl)
 	if (!BNXT_CHIP_P5(bp) || !ptp)
 		return;
 
-	/* On Thor, Rx timestamps are provided directly in the
+	/* On P5, Rx timestamps are provided directly in the
 	 * Rx completion records to the driver. Only 32 bits of
 	 * the timestamp is present in the completion. Driver needs
 	 * to read the current 48 bit free running timer using the
@@ -705,6 +705,9 @@  bnxt_get_rx_ts_p5(struct bnxt *bp, uint32_t rx_ts_cmpl)
 	 * from the HWRM response with the lower 32 bits in the
 	 * Rx completion to produce the 48 bit timestamp for the Rx packet
 	 */
+	rte_spinlock_lock(&ptp->ptp_lock);
+	last_hwrm_time = ptp->old_time;
+	rte_spinlock_unlock(&ptp->ptp_lock);
 	pkt_time = (last_hwrm_time & BNXT_PTP_CURRENT_TIME_MASK) | rx_ts_cmpl;
 	if (rx_ts_cmpl < (uint32_t)last_hwrm_time) {
 		/* timer has rolled over */
@@ -922,7 +925,8 @@  static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
 
 	if (unlikely((rte_le_to_cpu_16(rxcmp->flags_type) &
 		      RX_PKT_CMPL_FLAGS_MASK) ==
-		      RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP))
+		      RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP) ||
+		      bp->ptp_all_rx_tstamp)
 		bnxt_get_rx_ts_p5(rxq->bp, rxcmp1->reorder);
 
 	if (cmp_type == CMPL_BASE_TYPE_RX_L2_V2) {
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index d1d1fe8f1f..4df4604975 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -96,6 +96,7 @@  void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
 		if (txq->tx_ring) {
 			bnxt_free_ring(txq->tx_ring->tx_ring_struct);
 			rte_free(txq->tx_ring->tx_ring_struct);
+			rte_free(txq->tx_ring->nr_bds);
 			rte_free(txq->tx_ring);
 		}
 
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 10b716a00b..d46b853c02 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -143,6 +143,75 @@  bnxt_zero_data_len_tso_segsz(struct rte_mbuf *tx_pkt, uint8_t data_len_chk)
 	return false;
 }
 
+static bool
+bnxt_check_pkt_needs_ts(struct rte_mbuf *m)
+{
+	const struct rte_ether_hdr *eth_hdr;
+	struct rte_ether_hdr _eth_hdr;
+	uint16_t eth_type, proto;
+	uint32_t off = 0;
+
+	eth_hdr = rte_pktmbuf_read(m, off, sizeof(_eth_hdr), &_eth_hdr);
+	eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
+	off += sizeof(*eth_hdr);
+	/* Check for single tagged and double tagged VLANs */
+	if (eth_type == RTE_ETHER_TYPE_VLAN) {
+		const struct rte_vlan_hdr *vh;
+		struct rte_vlan_hdr vh_copy;
+
+		vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy);
+		if (unlikely(vh == NULL))
+			return false;
+		off += sizeof(*vh);
+		proto = rte_be_to_cpu_16(vh->eth_proto);
+		if (proto == RTE_ETHER_TYPE_VLAN) {
+			const struct rte_vlan_hdr *vh;
+			struct rte_vlan_hdr vh_copy;
+
+			vh = rte_pktmbuf_read(m, off, sizeof(*vh), &vh_copy);
+			if (unlikely(vh == NULL))
+				return false;
+			off += sizeof(*vh);
+			proto = rte_be_to_cpu_16(vh->eth_proto);
+		}
+	}
+	return false;
+}
+
+static bool
+bnxt_invalid_nb_segs(struct rte_mbuf *tx_pkt)
+{
+	uint16_t nb_segs = 1;
+	struct rte_mbuf *m_seg;
+
+	m_seg = tx_pkt->next;
+	while (m_seg) {
+		nb_segs++;
+		m_seg = m_seg->next;
+	}
+
+	return (nb_segs != tx_pkt->nb_segs);
+}
+
+static int bnxt_invalid_mbuf(struct rte_mbuf *mbuf)
+{
+	uint32_t mbuf_size = sizeof(struct rte_mbuf) + mbuf->priv_size;
+	const char *reason;
+
+	if (unlikely(rte_eal_iova_mode() != RTE_IOVA_VA &&
+		     rte_eal_iova_mode() != RTE_IOVA_PA))
+		return 0;
+
+	if (unlikely(rte_mbuf_check(mbuf, 1, &reason)))
+		return -EINVAL;
+
+	if (unlikely(mbuf->buf_iova < mbuf_size ||
+		     (mbuf->buf_iova != rte_mempool_virt2iova(mbuf) + mbuf_size)))
+		return -EINVAL;
+
+	return 0;
+}
+
 static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 				struct bnxt_tx_queue *txq,
 				uint16_t *coal_pkts,
@@ -157,6 +226,7 @@  static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 	bool long_bd = false;
 	unsigned short nr_bds;
 	uint16_t prod;
+	bool pkt_needs_ts = 0;
 	struct rte_mbuf *m_seg;
 	struct rte_mbuf **tx_buf;
 	static const uint32_t lhint_arr[4] = {
@@ -169,6 +239,12 @@  static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 	if (unlikely(is_bnxt_in_error(txq->bp)))
 		return -EIO;
 
+	if (unlikely(bnxt_invalid_mbuf(tx_pkt)))
+		return -EINVAL;
+
+	if (unlikely(bnxt_invalid_nb_segs(tx_pkt)))
+		return -EINVAL;
+
 	long_bd = bnxt_xmit_need_long_bd(tx_pkt, txq);
 	nr_bds = long_bd + tx_pkt->nb_segs;
 
@@ -202,9 +278,13 @@  static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 	if (unlikely(bnxt_zero_data_len_tso_segsz(tx_pkt, 1)))
 		return -EIO;
 
+	if (unlikely(txq->bp->ptp_cfg != NULL && txq->bp->ptp_all_rx_tstamp == 1))
+		pkt_needs_ts = bnxt_check_pkt_needs_ts(tx_pkt);
+
 	prod = RING_IDX(ring, txr->tx_raw_prod);
 	tx_buf = &txr->tx_buf_ring[prod];
 	*tx_buf = tx_pkt;
+	txr->nr_bds[prod] = nr_bds;
 
 	txbd = &txr->tx_desc_ring[prod];
 	txbd->opaque = *coal_pkts;
@@ -341,7 +421,7 @@  static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 			/* IP CSO */
 			txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
 		} else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) ==
-			   RTE_MBUF_F_TX_IEEE1588_TMST) {
+			   RTE_MBUF_F_TX_IEEE1588_TMST || pkt_needs_ts) {
 			/* PTP */
 			txbd1->lflags |= TX_BD_LONG_LFLAGS_STAMP;
 		}
@@ -427,8 +507,7 @@  static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
 		unsigned short nr_bds;
 
 		tx_buf = &txr->tx_buf_ring[RING_IDX(ring, raw_cons)];
-		nr_bds = (*tx_buf)->nb_segs +
-			 bnxt_xmit_need_long_bd(*tx_buf, txq);
+		nr_bds = txr->nr_bds[RING_IDX(ring, raw_cons)];
 		for (j = 0; j < nr_bds; j++) {
 			mbuf = *tx_buf;
 			*tx_buf = NULL;
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index b9b8a9b1a2..8e391ee58a 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -24,6 +24,7 @@  struct bnxt_tx_ring_info {
 
 	rte_iova_t		tx_desc_mapping;
 
+	unsigned short          *nr_bds;
 	struct bnxt_ring	*tx_ring_struct;
 };