[v4] net/ice: support IEEE 1588 PTP for E810
Checks
Commit Message
Add ice support for new ethdev APIs to enable/disable and read/write/adjust
IEEE1588 PTP timstamps. Currently, only scalar path supports 1588 PTP,
vector path doesn't.
The example command for running ptpclient is as below:
./build/examples/dpdk-ptpclient -c 1 -n 3 -- -T 0 -p 0x1
Signed-off-by: Simei Su <simei.su@intel.com>
---
v4:
* Rework code to consider ice_dev_start and ice_timesync_enable order.
v3:
* Rework code to support scalar path only.
* Update the doc/guides/nics/features/ice.ini to add "Timesync" feature.
* Add release notes.
v2:
* Change patchset to one patch based on share code update.
* Change per device offload to per queue offload.
doc/guides/nics/features/ice.ini | 1 +
doc/guides/rel_notes/release_21_11.rst | 2 +-
drivers/net/ice/ice_ethdev.c | 193 +++++++++++++++++++++++++++++++++
drivers/net/ice/ice_ethdev.h | 6 +
drivers/net/ice/ice_rxtx.c | 46 +++++++-
5 files changed, 245 insertions(+), 3 deletions(-)
Comments
> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Wednesday, September 22, 2021 4:47 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Su, Simei
> <simei.su@intel.com>
> Subject: [PATCH v4] net/ice: support IEEE 1588 PTP for E810
No need to mention E810, net/ice already imply its E810.
>
> Add ice support for new ethdev APIs to enable/disable and read/write/adjust
> IEEE1588 PTP timstamps. Currently, only scalar path supports 1588 PTP, vector
> path doesn't.
timestamps
>
> The example command for running ptpclient is as below:
> ./build/examples/dpdk-ptpclient -c 1 -n 3 -- -T 0 -p 0x1
>
> Signed-off-by: Simei Su <simei.su@intel.com>
> ---
> v4:
> * Rework code to consider ice_dev_start and ice_timesync_enable order.
>
> v3:
> * Rework code to support scalar path only.
> * Update the doc/guides/nics/features/ice.ini to add "Timesync" feature.
> * Add release notes.
>
> v2:
> * Change patchset to one patch based on share code update.
> * Change per device offload to per queue offload.
>
> doc/guides/nics/features/ice.ini | 1 +
> doc/guides/rel_notes/release_21_11.rst | 2 +-
> drivers/net/ice/ice_ethdev.c | 193
> +++++++++++++++++++++++++++++++++
> drivers/net/ice/ice_ethdev.h | 6 +
> drivers/net/ice/ice_rxtx.c | 46 +++++++-
> 5 files changed, 245 insertions(+), 3 deletions(-)
>
> diff --git a/doc/guides/nics/features/ice.ini b/doc/guides/nics/features/ice.ini
> index e066787..a7978d2 100644
> --- a/doc/guides/nics/features/ice.ini
> +++ b/doc/guides/nics/features/ice.ini
> @@ -43,6 +43,7 @@ Linux = Y
> Windows = Y
> x86-32 = Y
> x86-64 = Y
> +Timesync = Y
>
> [rte_flow items]
> ah = Y
> diff --git a/doc/guides/rel_notes/release_21_11.rst
> b/doc/guides/rel_notes/release_21_11.rst
> index 1b9dac6..2005262 100644
> --- a/doc/guides/rel_notes/release_21_11.rst
> +++ b/doc/guides/rel_notes/release_21_11.rst
> @@ -71,7 +71,7 @@ New Features
>
> Added 1PPS out support by a devargs.
> * Added Rx timstamp support by dynamic mbuf on Flex Descriptor.
> -
> + * Added timesync API support under scalar path for E810.
>
> Removed Items
> -------------
> diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index
> 06adf43..26491c3 100644
> --- a/drivers/net/ice/ice_ethdev.c
> +++ b/drivers/net/ice/ice_ethdev.c
> @@ -18,6 +18,7 @@
> #include "base/ice_flow.h"
> #include "base/ice_dcb.h"
> #include "base/ice_common.h"
> +#include "base/ice_ptp_hw.h"
>
> #include "rte_pmd_ice.h"
> #include "ice_ethdev.h"
> @@ -31,6 +32,8 @@
> #define ICE_HW_DEBUG_MASK_ARG "hw_debug_mask"
> #define ICE_ONE_PPS_OUT_ARG "pps_out"
>
> +#define ICE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
> +
> uint64_t ice_timestamp_dynflag;
> int ice_timestamp_dynfield_offset = -1;
>
> @@ -149,6 +152,18 @@ static int ice_dev_udp_tunnel_port_add(struct
> rte_eth_dev *dev,
> struct rte_eth_udp_tunnel *udp_tunnel); static int
> ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
> struct rte_eth_udp_tunnel *udp_tunnel);
> +static int ice_timesync_enable(struct rte_eth_dev *dev); static int
> +ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
> + struct timespec *timestamp,
> + uint32_t flags);
> +static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
> + struct timespec *timestamp);
> +static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t
> +delta); static int ice_timesync_read_time(struct rte_eth_dev *dev,
> + struct timespec *timestamp);
> +static int ice_timesync_write_time(struct rte_eth_dev *dev,
> + const struct timespec *timestamp); static int
> +ice_timesync_disable(struct rte_eth_dev *dev);
>
> static const struct rte_pci_id pci_id_ice_map[] = {
> { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID,
> ICE_DEV_ID_E823L_BACKPLANE) }, @@ -232,6 +247,13 @@ static const struct
> eth_dev_ops ice_eth_dev_ops = {
> .udp_tunnel_port_del = ice_dev_udp_tunnel_port_del,
> .tx_done_cleanup = ice_tx_done_cleanup,
> .get_monitor_addr = ice_get_monitor_addr,
> + .timesync_enable = ice_timesync_enable,
> + .timesync_read_rx_timestamp = ice_timesync_read_rx_timestamp,
> + .timesync_read_tx_timestamp = ice_timesync_read_tx_timestamp,
> + .timesync_adjust_time = ice_timesync_adjust_time,
> + .timesync_read_time = ice_timesync_read_time,
> + .timesync_write_time = ice_timesync_write_time,
> + .timesync_disable = ice_timesync_disable,
> };
>
> /* store statistics names and its offset in stats structure */ @@ -5488,6
> +5510,177 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, }
>
> static int
> +ice_timesync_enable(struct rte_eth_dev *dev) {
> + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + struct ice_adapter *ad =
> + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> + int ret;
> +
> + if (!dev->data->dev_started)
> + ad->ptp_ena = 1;
Please move ptp_ena =1 to end of the function.
And when this flag be reset ?
> + else {
> + if (!(dev->data->dev_conf.rxmode.offloads &
> + DEV_RX_OFFLOAD_TIMESTAMP)) {
> + PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
> + return -1;
> + } else
> + ad->ptp_ena = 1;
> + }
> +
> + if (hw->func_caps.ts_func_info.src_tmr_owned) {
> + ret = ice_ptp_init_phc(hw);
> + if (ret) {
> + PMD_DRV_LOG(ERR, "Failed to initialize PHC");
> + return -1;
> + }
> +
> + ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
> + if (ret) {
> + PMD_DRV_LOG(ERR,
> + "Failed to write PHC increment time value");
> + return -1;
> + }
> + }
> +
> + /* Initialize cycle counters for system time/RX/TX timestamp */
> + memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
> + memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
> + memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
> +
> + ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
> + ad->systime_tc.cc_shift = 0;
> + ad->systime_tc.nsec_mask = 0;
> +
> + ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
> + ad->rx_tstamp_tc.cc_shift = 0;
> + ad->rx_tstamp_tc.nsec_mask = 0;
> +
> + ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
> + ad->tx_tstamp_tc.cc_shift = 0;
> + ad->tx_tstamp_tc.nsec_mask = 0;
> +
> + return 0;
> +}
> +
> +static int
> +ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
> + struct timespec *timestamp, uint32_t flags) {
> + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + struct ice_adapter *ad =
> + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> + struct ice_rx_queue *rxq;
> + uint32_t ts_high;
> + uint64_t time, ts_ns, ns;
> +
> + rxq = dev->data->rx_queues[flags];
> +
> + time = ice_read_time(hw);
> +
> + ts_high = rxq->time_high;
> + ts_ns = ice_tstamp_convert_32b_64b(time, ts_high);
> + ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
> + *timestamp = rte_ns_to_timespec(ns);
> +
> + return 0;
> +}
> +
> +static int
> +ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
> + struct timespec *timestamp)
> +{
> + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + struct ice_adapter *ad =
> + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> + uint8_t lport;
> + uint64_t time, ts_ns, ns, tstamp;
> + const uint64_t mask = 0xFFFFFFFF;
> + int ret;
> +
> + lport = hw->port_info->lport;
> +
> + ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
> + if (ret) {
> + PMD_DRV_LOG(ERR, "Failed to read phy timestamp");
> + return -1;
> + }
> +
> + time = ice_read_time(hw);
> +
> + ts_ns = ice_tstamp_convert_32b_64b(time, (tstamp >> 8) & mask);
> + ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
> + *timestamp = rte_ns_to_timespec(ns);
> +
> + return 0;
> +}
> +
> +static int
> +ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) {
> + struct ice_adapter *ad =
> + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +
> + ad->systime_tc.nsec += delta;
> + ad->rx_tstamp_tc.nsec += delta;
> + ad->tx_tstamp_tc.nsec += delta;
> +
> + return 0;
> +}
> +
> +static int
> +ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec
> +*ts) {
> + struct ice_adapter *ad =
> + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> + uint64_t ns;
> +
> + ns = rte_timespec_to_ns(ts);
> +
> + ad->systime_tc.nsec = ns;
> + ad->rx_tstamp_tc.nsec = ns;
> + ad->tx_tstamp_tc.nsec = ns;
> +
> + return 0;
> +}
> +
> +static int
> +ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) {
> + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + struct ice_adapter *ad =
> + ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> + uint64_t time, ns;
> +
> + time = ice_read_time(hw);
> + ns = rte_timecounter_update(&ad->systime_tc, time);
> + *ts = rte_ns_to_timespec(ns);
> +
> + return 0;
> +}
> +
> +static int
> +ice_timesync_disable(struct rte_eth_dev *dev) {
> + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + uint64_t val;
> + uint8_t lport;
> +
> + lport = hw->port_info->lport;
> +
> + ice_clear_phy_tstamp(hw, lport, 0);
> +
> + val = ICE_READ_REG(hw, GLTSYN_ENA(0));
> + val &= ~GLTSYN_ENA_TSYN_ENA_M;
> + ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
> +
> + ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
> + ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
> +
> + return 0;
> +}
> +
> +static int
> ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
> struct rte_pci_device *pci_dev)
> {
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index
> ea9d892..448e186 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -6,6 +6,7 @@
> #define _ICE_ETHDEV_H_
>
> #include <rte_kvargs.h>
> +#include <rte_time.h>
>
> #include <ethdev_driver.h>
>
> @@ -501,6 +502,11 @@ struct ice_adapter {
> struct ice_devargs devargs;
> enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
> uint16_t fdir_ref_cnt;
> + /* For PTP */
> + struct rte_timecounter systime_tc;
> + struct rte_timecounter rx_tstamp_tc;
> + struct rte_timecounter tx_tstamp_tc;
> + bool ptp_ena;
> #ifdef RTE_ARCH_X86
> bool rx_use_avx2;
> bool rx_use_avx512;
> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index
> 717d3f0..5b2aa32 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -270,6 +270,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
> struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
> uint32_t rxdid = ICE_RXDID_COMMS_OVS;
> uint32_t regval;
> + struct ice_adapter *ad = rxq->vsi->adapter;
>
> /* Set buffer size as the head split is disabled. */
> buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - @@ -366,7
> +367,10 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
> regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
> QRXFLXP_CNTXT_RXDID_PRIO_M;
>
> - if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
> + if (!ad->ptp_ena) {
> + if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
> + regval |= QRXFLXP_CNTXT_TS_M;
> + } else
> regval |= QRXFLXP_CNTXT_TS_M;
Please simplify above logic.
@@ -43,6 +43,7 @@ Linux = Y
Windows = Y
x86-32 = Y
x86-64 = Y
+Timesync = Y
[rte_flow items]
ah = Y
@@ -71,7 +71,7 @@ New Features
Added 1PPS out support by a devargs.
* Added Rx timstamp support by dynamic mbuf on Flex Descriptor.
-
+ * Added timesync API support under scalar path for E810.
Removed Items
-------------
@@ -18,6 +18,7 @@
#include "base/ice_flow.h"
#include "base/ice_dcb.h"
#include "base/ice_common.h"
+#include "base/ice_ptp_hw.h"
#include "rte_pmd_ice.h"
#include "ice_ethdev.h"
@@ -31,6 +32,8 @@
#define ICE_HW_DEBUG_MASK_ARG "hw_debug_mask"
#define ICE_ONE_PPS_OUT_ARG "pps_out"
+#define ICE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
+
uint64_t ice_timestamp_dynflag;
int ice_timestamp_dynfield_offset = -1;
@@ -149,6 +152,18 @@ static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_timesync_enable(struct rte_eth_dev *dev);
+static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags);
+static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ice_timesync_read_time(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int ice_timesync_write_time(struct rte_eth_dev *dev,
+ const struct timespec *timestamp);
+static int ice_timesync_disable(struct rte_eth_dev *dev);
static const struct rte_pci_id pci_id_ice_map[] = {
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
@@ -232,6 +247,13 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
.udp_tunnel_port_del = ice_dev_udp_tunnel_port_del,
.tx_done_cleanup = ice_tx_done_cleanup,
.get_monitor_addr = ice_get_monitor_addr,
+ .timesync_enable = ice_timesync_enable,
+ .timesync_read_rx_timestamp = ice_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = ice_timesync_read_tx_timestamp,
+ .timesync_adjust_time = ice_timesync_adjust_time,
+ .timesync_read_time = ice_timesync_read_time,
+ .timesync_write_time = ice_timesync_write_time,
+ .timesync_disable = ice_timesync_disable,
};
/* store statistics names and its offset in stats structure */
@@ -5488,6 +5510,177 @@ ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
}
static int
+ice_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int ret;
+
+ if (!dev->data->dev_started)
+ ad->ptp_ena = 1;
+ else {
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_TIMESTAMP)) {
+ PMD_DRV_LOG(ERR, "Rx timestamp offload not configured");
+ return -1;
+ } else
+ ad->ptp_ena = 1;
+ }
+
+ if (hw->func_caps.ts_func_info.src_tmr_owned) {
+ ret = ice_ptp_init_phc(hw);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to initialize PHC");
+ return -1;
+ }
+
+ ret = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to write PHC increment time value");
+ return -1;
+ }
+ }
+
+ /* Initialize cycle counters for system time/RX/TX timestamp */
+ memset(&ad->systime_tc, 0, sizeof(struct rte_timecounter));
+ memset(&ad->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&ad->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ ad->systime_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+ ad->systime_tc.cc_shift = 0;
+ ad->systime_tc.nsec_mask = 0;
+
+ ad->rx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+ ad->rx_tstamp_tc.cc_shift = 0;
+ ad->rx_tstamp_tc.nsec_mask = 0;
+
+ ad->tx_tstamp_tc.cc_mask = ICE_CYCLECOUNTER_MASK;
+ ad->tx_tstamp_tc.cc_shift = 0;
+ ad->tx_tstamp_tc.nsec_mask = 0;
+
+ return 0;
+}
+
+static int
+ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp, uint32_t flags)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct ice_rx_queue *rxq;
+ uint32_t ts_high;
+ uint64_t time, ts_ns, ns;
+
+ rxq = dev->data->rx_queues[flags];
+
+ time = ice_read_time(hw);
+
+ ts_high = rxq->time_high;
+ ts_ns = ice_tstamp_convert_32b_64b(time, ts_high);
+ ns = rte_timecounter_update(&ad->rx_tstamp_tc, ts_ns);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ uint8_t lport;
+ uint64_t time, ts_ns, ns, tstamp;
+ const uint64_t mask = 0xFFFFFFFF;
+ int ret;
+
+ lport = hw->port_info->lport;
+
+ ret = ice_read_phy_tstamp(hw, lport, 0, &tstamp);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to read phy timestamp");
+ return -1;
+ }
+
+ time = ice_read_time(hw);
+
+ ts_ns = ice_tstamp_convert_32b_64b(time, (tstamp >> 8) & mask);
+ ns = rte_timecounter_update(&ad->tx_tstamp_tc, ts_ns);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ ad->systime_tc.nsec += delta;
+ ad->rx_tstamp_tc.nsec += delta;
+ ad->tx_tstamp_tc.nsec += delta;
+
+ return 0;
+}
+
+static int
+ice_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ uint64_t ns;
+
+ ns = rte_timespec_to_ns(ts);
+
+ ad->systime_tc.nsec = ns;
+ ad->rx_tstamp_tc.nsec = ns;
+ ad->tx_tstamp_tc.nsec = ns;
+
+ return 0;
+}
+
+static int
+ice_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ uint64_t time, ns;
+
+ time = ice_read_time(hw);
+ ns = rte_timecounter_update(&ad->systime_tc, time);
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ice_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t val;
+ uint8_t lport;
+
+ lport = hw->port_info->lport;
+
+ ice_clear_phy_tstamp(hw, lport, 0);
+
+ val = ICE_READ_REG(hw, GLTSYN_ENA(0));
+ val &= ~GLTSYN_ENA_TSYN_ENA_M;
+ ICE_WRITE_REG(hw, GLTSYN_ENA(0), val);
+
+ ICE_WRITE_REG(hw, GLTSYN_INCVAL_L(0), 0);
+ ICE_WRITE_REG(hw, GLTSYN_INCVAL_H(0), 0);
+
+ return 0;
+}
+
+static int
ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
@@ -6,6 +6,7 @@
#define _ICE_ETHDEV_H_
#include <rte_kvargs.h>
+#include <rte_time.h>
#include <ethdev_driver.h>
@@ -501,6 +502,11 @@ struct ice_adapter {
struct ice_devargs devargs;
enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
uint16_t fdir_ref_cnt;
+ /* For PTP */
+ struct rte_timecounter systime_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct rte_timecounter tx_tstamp_tc;
+ bool ptp_ena;
#ifdef RTE_ARCH_X86
bool rx_use_avx2;
bool rx_use_avx512;
@@ -270,6 +270,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
uint32_t rxdid = ICE_RXDID_COMMS_OVS;
uint32_t regval;
+ struct ice_adapter *ad = rxq->vsi->adapter;
/* Set buffer size as the head split is disabled. */
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
@@ -366,7 +367,10 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
- if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ if (!ad->ptp_ena) {
+ if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ regval |= QRXFLXP_CNTXT_TS_M;
+ } else
regval |= QRXFLXP_CNTXT_TS_M;
ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
@@ -704,6 +708,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
tx_ctx.tso_ena = 1; /* tso enable */
tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
+ tx_ctx.tsyn_ena = 1;
ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
ice_tlan_ctx_info);
@@ -1603,6 +1608,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
uint64_t time, ts_ns;
+ struct ice_adapter *ad = rxq->vsi->adapter;
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -1660,6 +1666,15 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
}
}
+ if (ad->ptp_ena && ((mb->packet_type &
+ RTE_PTYPE_L2_MASK) ==
+ RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+ mb->timesync = rxq->queue_id;
+ pkt_flags |= PKT_RX_IEEE1588_PTP;
+ }
+
mb->ol_flags |= pkt_flags;
}
@@ -1846,6 +1861,7 @@ ice_recv_scattered_pkts(void *rx_queue,
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
uint64_t time, ts_ns;
+ struct ice_adapter *ad = rxq->vsi->adapter;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
@@ -1971,6 +1987,14 @@ ice_recv_scattered_pkts(void *rx_queue,
}
}
+ if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
+ == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+ first_seg->timesync = rxq->queue_id;
+ pkt_flags |= PKT_RX_IEEE1588_PTP;
+ }
+
first_seg->ol_flags |= pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
@@ -2329,6 +2353,7 @@ ice_recv_pkts(void *rx_queue,
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
uint64_t time, ts_ns;
+ struct ice_adapter *ad = rxq->vsi->adapter;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
@@ -2395,6 +2420,14 @@ ice_recv_pkts(void *rx_queue,
}
}
+ if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
+ RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+ rxq->time_high =
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+ rxm->timesync = rxq->queue_id;
+ pkt_flags |= PKT_RX_IEEE1588_PTP;
+ }
+
rxm->ol_flags |= pkt_flags;
/* copy old mbuf to rx_pkts */
rx_pkts[nb_rx++] = rxm;
@@ -2606,7 +2639,8 @@ ice_calc_context_desc(uint64_t flags)
static uint64_t mask = PKT_TX_TCP_SEG |
PKT_TX_QINQ |
PKT_TX_OUTER_IP_CKSUM |
- PKT_TX_TUNNEL_MASK;
+ PKT_TX_TUNNEL_MASK |
+ PKT_TX_IEEE1588_TMST;
return (flags & mask) ? 1 : 0;
}
@@ -2774,6 +2808,12 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
if (ol_flags & PKT_TX_TCP_SEG)
cd_type_cmd_tso_mss |=
ice_set_tso_ctx(tx_pkt, tx_offload);
+ else {
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cd_type_cmd_tso_mss |=
+ ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
+ ICE_TXD_CTX_QW1_CMD_S);
+ }
ctx_txd->tunneling_params =
rte_cpu_to_le_32(cd_tunneling_params);
@@ -3175,6 +3215,8 @@ ice_set_rx_function(struct rte_eth_dev *dev)
ad->rx_use_avx512 = false;
ad->rx_use_avx2 = false;
rx_check_ret = ice_rx_vec_dev_check(dev);
+ if (ad->ptp_ena)
+ rx_check_ret = -1;
if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
ad->rx_vec_allowed = true;