diff mbox series

[v2,56/62] net/cnxk: add timesync enable/disable operations

Message ID 20210607175943.31690-57-ndabilpuram@marvell.com (mailing list archive)
State Changes Requested
Delegated to: Jerin Jacob
Headers show
Series Marvell CNXK Ethdev Driver | expand

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Nithin Dabilpuram June 7, 2021, 5:59 p.m. UTC
From: Sunil Kumar Kori <skori@marvell.com>

Patch implements timesync enable/disable operations for
cn9k and cn10k platforms.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
 drivers/net/cnxk/cn10k_ethdev.c | 50 +++++++++++++++++++++++++++++++++++++++++
 drivers/net/cnxk/cn9k_ethdev.c  | 50 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 100 insertions(+)
diff mbox series

Patch

diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 5de7c7a..d8d3d2d 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -361,6 +361,54 @@  cn10k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en)
 }
 
 static int
+cn10k_nix_timesync_enable(struct rte_eth_dev *eth_dev)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	int i, rc;
+
+	rc = cnxk_nix_timesync_enable(eth_dev);
+	if (rc)
+		return rc;
+
+	dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+	dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+		nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
+
+	/* Setting up the rx[tx]_offload_flags due to change
+	 * in rx[tx]_offloads.
+	 */
+	cn10k_eth_set_rx_function(eth_dev);
+	cn10k_eth_set_tx_function(eth_dev);
+	return 0;
+}
+
+static int
+cn10k_nix_timesync_disable(struct rte_eth_dev *eth_dev)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	int i, rc;
+
+	rc = cnxk_nix_timesync_disable(eth_dev);
+	if (rc)
+		return rc;
+
+	dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
+	dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+		nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
+
+	/* Setting up the rx[tx]_offload_flags due to change
+	 * in rx[tx]_offloads.
+	 */
+	cn10k_eth_set_rx_function(eth_dev);
+	cn10k_eth_set_tx_function(eth_dev);
+	return 0;
+}
+
+static int
 cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
@@ -406,6 +454,8 @@  nix_eth_dev_ops_override(void)
 	cnxk_eth_dev_ops.tx_queue_stop = cn10k_nix_tx_queue_stop;
 	cnxk_eth_dev_ops.dev_start = cn10k_nix_dev_start;
 	cnxk_eth_dev_ops.dev_ptypes_set = cn10k_nix_ptypes_set;
+	cnxk_eth_dev_ops.timesync_enable = cn10k_nix_timesync_enable;
+	cnxk_eth_dev_ops.timesync_disable = cn10k_nix_timesync_disable;
 }
 
 static int
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 130c8b4..0819f67 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -369,6 +369,54 @@  cn9k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en)
 }
 
 static int
+cn9k_nix_timesync_enable(struct rte_eth_dev *eth_dev)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	int i, rc;
+
+	rc = cnxk_nix_timesync_enable(eth_dev);
+	if (rc)
+		return rc;
+
+	dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+	dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+		nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
+
+	/* Setting up the rx[tx]_offload_flags due to change
+	 * in rx[tx]_offloads.
+	 */
+	cn9k_eth_set_rx_function(eth_dev);
+	cn9k_eth_set_tx_function(eth_dev);
+	return 0;
+}
+
+static int
+cn9k_nix_timesync_disable(struct rte_eth_dev *eth_dev)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	int i, rc;
+
+	rc = cnxk_nix_timesync_disable(eth_dev);
+	if (rc)
+		return rc;
+
+	dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
+	dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+		nix_form_default_desc(dev, eth_dev->data->tx_queues[i], i);
+
+	/* Setting up the rx[tx]_offload_flags due to change
+	 * in rx[tx]_offloads.
+	 */
+	cn9k_eth_set_rx_function(eth_dev);
+	cn9k_eth_set_tx_function(eth_dev);
+	return 0;
+}
+
+static int
 cn9k_nix_dev_start(struct rte_eth_dev *eth_dev)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
@@ -414,6 +462,8 @@  nix_eth_dev_ops_override(void)
 	cnxk_eth_dev_ops.tx_queue_stop = cn9k_nix_tx_queue_stop;
 	cnxk_eth_dev_ops.dev_start = cn9k_nix_dev_start;
 	cnxk_eth_dev_ops.dev_ptypes_set = cn9k_nix_ptypes_set;
+	cnxk_eth_dev_ops.timesync_enable = cn9k_nix_timesync_enable;
+	cnxk_eth_dev_ops.timesync_disable = cn9k_nix_timesync_disable;
 }
 
 static int