@@ -88,6 +88,9 @@
#define IGC_I225_RX_LATENCY_1000 300
#define IGC_I225_RX_LATENCY_2500 1485
+uint64_t igc_timestamp_dynflag;
+int igc_timestamp_dynfield_offset = -1;
+
static const struct rte_eth_desc_lim rx_desc_lim = {
.nb_max = IGC_MAX_RXD,
.nb_min = IGC_MIN_RXD,
@@ -267,6 +270,7 @@ static int eth_igc_timesync_read_time(struct rte_eth_dev *dev,
struct timespec *timestamp);
static int eth_igc_timesync_write_time(struct rte_eth_dev *dev,
const struct timespec *timestamp);
+static int eth_igc_read_clock(struct rte_eth_dev *dev, uint64_t *clock);
static const struct eth_dev_ops eth_igc_ops = {
.dev_configure = eth_igc_configure,
@@ -327,6 +331,7 @@ static const struct eth_dev_ops eth_igc_ops = {
.timesync_adjust_time = eth_igc_timesync_adjust_time,
.timesync_read_time = eth_igc_timesync_read_time,
.timesync_write_time = eth_igc_timesync_write_time,
+ .read_clock = eth_igc_read_clock,
};
/*
@@ -949,7 +954,12 @@ eth_igc_start(struct rte_eth_dev *dev)
struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
+ uint32_t nsec, sec, baset_l, baset_h, tqavctrl;
+ struct timespec system_time;
+ int64_t n, systime;
+ uint32_t txqctl = 0;
uint32_t *speeds;
+ uint16_t i;
int ret;
PMD_INIT_FUNC_TRACE();
@@ -1009,6 +1019,55 @@ eth_igc_start(struct rte_eth_dev *dev)
return ret;
}
+ if (igc_timestamp_dynflag > 0) {
+ adapter->base_time = 0;
+ adapter->cycle_time = NSEC_PER_SEC;
+
+ IGC_WRITE_REG(hw, IGC_TSSDP, 0);
+ IGC_WRITE_REG(hw, IGC_TSIM, TSINTR_TXTS);
+ IGC_WRITE_REG(hw, IGC_IMS, IGC_ICR_TS);
+
+ IGC_WRITE_REG(hw, IGC_TSAUXC, 0);
+ IGC_WRITE_REG(hw, IGC_I350_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
+ IGC_WRITE_REG(hw, IGC_TXPBS, IGC_TXPBSIZE_TSN);
+
+ tqavctrl = IGC_READ_REG(hw, IGC_I210_TQAVCTRL);
+ tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
+ IGC_TQAVCTRL_ENHANCED_QAV;
+ IGC_WRITE_REG(hw, IGC_I210_TQAVCTRL, tqavctrl);
+
+ IGC_WRITE_REG(hw, IGC_QBVCYCLET_S, adapter->cycle_time);
+ IGC_WRITE_REG(hw, IGC_QBVCYCLET, adapter->cycle_time);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ IGC_WRITE_REG(hw, IGC_STQT(i), 0);
+ IGC_WRITE_REG(hw, IGC_ENDQT(i), NSEC_PER_SEC);
+
+ txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
+ IGC_WRITE_REG(hw, IGC_TXQCTL(i), txqctl);
+ }
+
+ clock_gettime(CLOCK_REALTIME, &system_time);
+ IGC_WRITE_REG(hw, IGC_SYSTIML, system_time.tv_nsec);
+ IGC_WRITE_REG(hw, IGC_SYSTIMH, system_time.tv_sec);
+
+ nsec = IGC_READ_REG(hw, IGC_SYSTIML);
+ sec = IGC_READ_REG(hw, IGC_SYSTIMH);
+ systime = (int64_t)sec * NSEC_PER_SEC + (int64_t)nsec;
+
+ if (systime > adapter->base_time) {
+ n = (systime - adapter->base_time) /
+ adapter->cycle_time;
+ adapter->base_time = adapter->base_time +
+ (n + 1) * adapter->cycle_time;
+ }
+
+ baset_h = adapter->base_time / NSEC_PER_SEC;
+ baset_l = adapter->base_time % NSEC_PER_SEC;
+ IGC_WRITE_REG(hw, IGC_BASET_H, baset_h);
+ IGC_WRITE_REG(hw, IGC_BASET_L, baset_l);
+ }
+
igc_clear_hw_cntrs_base_generic(hw);
/* VLAN Offload Settings */
@@ -2804,6 +2863,17 @@ eth_igc_timesync_disable(struct rte_eth_dev *dev)
}
static int
+eth_igc_read_clock(__rte_unused struct rte_eth_dev *dev, uint64_t *clock)
+{
+ struct timespec system_time;
+
+ clock_gettime(CLOCK_REALTIME, &system_time);
+ *clock = system_time.tv_sec * NSEC_PER_SEC + system_time.tv_nsec;
+
+ return 0;
+}
+
+static int
eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
@@ -87,7 +87,8 @@ extern "C" {
RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
RTE_ETH_TX_OFFLOAD_TCP_TSO | \
RTE_ETH_TX_OFFLOAD_UDP_TSO | \
- RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+ RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP)
#define IGC_RSS_OFFLOAD_ALL ( \
RTE_ETH_RSS_IPV4 | \
@@ -240,6 +241,9 @@ struct igc_adapter {
struct igc_syn_filter syn_filter;
struct igc_rss_filter rss_filter;
struct igc_flow_list flow_list;
+
+ int64_t base_time;
+ uint32_t cycle_time;
};
#define IGC_DEV_PRIVATE(_dev) ((_dev)->data->dev_private)
@@ -1411,6 +1411,19 @@ what_advctx_update(struct igc_tx_queue *txq, uint64_t flags,
return IGC_CTX_NUM;
}
+static uint32_t igc_tx_launchtime(uint64_t txtime, uint16_t port_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
+ uint64_t base_time = adapter->base_time;
+ uint64_t cycle_time = adapter->cycle_time;
+ uint32_t launchtime;
+
+ launchtime = (txtime - base_time) % cycle_time;
+
+ return rte_cpu_to_le_32(launchtime);
+}
+
/*
* This is a separate function, looking for optimization opportunity here
* Rework required to go with the pre-defined values.
@@ -1418,7 +1431,8 @@ what_advctx_update(struct igc_tx_queue *txq, uint64_t flags,
static inline void
igc_set_xmit_ctx(struct igc_tx_queue *txq,
volatile struct igc_adv_tx_context_desc *ctx_txd,
- uint64_t ol_flags, union igc_tx_offload tx_offload)
+ uint64_t ol_flags, union igc_tx_offload tx_offload,
+ uint64_t txtime)
{
uint32_t type_tucmd_mlhl;
uint32_t mss_l4len_idx;
@@ -1492,16 +1506,23 @@ igc_set_xmit_ctx(struct igc_tx_queue *txq,
}
}
- txq->ctx_cache[ctx_curr].flags = ol_flags;
- txq->ctx_cache[ctx_curr].tx_offload.data =
- tx_offload_mask.data & tx_offload.data;
- txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
+ if (!txtime) {
+ txq->ctx_cache[ctx_curr].flags = ol_flags;
+ txq->ctx_cache[ctx_curr].tx_offload.data =
+ tx_offload_mask.data & tx_offload.data;
+ txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
+ }
ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
vlan_macip_lens = (uint32_t)tx_offload.data;
ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
- ctx_txd->u.launch_time = 0;
+
+ if (txtime)
+ ctx_txd->u.launch_time = igc_tx_launchtime(txtime,
+ txq->port_id);
+ else
+ ctx_txd->u.launch_time = 0;
}
static inline uint32_t
@@ -1551,6 +1572,7 @@ igc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
uint64_t tx_ol_req;
uint32_t new_ctx = 0;
union igc_tx_offload tx_offload = {0};
+ uint64_t ts;
tx_id = txq->tx_tail;
txe = &sw_ring[tx_id];
@@ -1698,8 +1720,16 @@ igc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txe->mbuf = NULL;
}
- igc_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- tx_offload);
+ if (igc_timestamp_dynflag > 0) {
+ ts = *RTE_MBUF_DYNFIELD(tx_pkt,
+ igc_timestamp_dynfield_offset,
+ uint64_t *);
+ igc_set_xmit_ctx(txq, ctx_txd,
+ tx_ol_req, tx_offload, ts);
+ } else {
+ igc_set_xmit_ctx(txq, ctx_txd,
+ tx_ol_req, tx_offload, 0);
+ }
txe->last_id = tx_last;
tx_id = txe->next_id;
@@ -2081,9 +2111,11 @@ void
igc_tx_init(struct rte_eth_dev *dev)
{
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
uint32_t tctl;
uint32_t txdctl;
uint16_t i;
+ int err;
/* Setup the Base and Length of the Tx Descriptor Rings. */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
@@ -2113,6 +2145,16 @@ igc_tx_init(struct rte_eth_dev *dev)
IGC_WRITE_REG(hw, IGC_TXDCTL(txq->reg_idx), txdctl);
}
+ if (offloads & RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP) {
+ err = rte_mbuf_dyn_tx_timestamp_register(
+ &igc_timestamp_dynfield_offset,
+ &igc_timestamp_dynflag);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Cannot register mbuf field/flag for timestamp");
+ }
+ }
+
igc_config_collision_dist(hw);
/* Program the Transmit Control Register. */
@@ -11,6 +11,9 @@
extern "C" {
#endif
+extern uint64_t igc_timestamp_dynflag;
+extern int igc_timestamp_dynfield_offset;
+
struct igc_rx_entry {
struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
};