@@ -264,6 +264,9 @@ for details.
Done
testpmd>
+* Use dev arg option ``drv_ieee1588=1`` to enable ieee 1588 support at
+ driver level. e.g. ``dpaa:fm1-mac3,drv_ieee1588=1``
+
FMAN Config
-----------
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2017-2020 NXP
+ * Copyright 2017-2020,2022-2024 NXP
*
*/
/* System headers */
@@ -30,6 +30,7 @@
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
+#include <rte_kvargs.h>
#include <ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_ring.h>
@@ -50,6 +51,7 @@
#include <process.h>
#include <fmlib/fm_ext.h>
+#define DRIVER_IEEE1588 "drv_ieee1588"
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_REPEAT_TIME 90 /* 9s (90 * 100ms) in total */
@@ -83,6 +85,7 @@ static uint64_t dev_tx_offloads_nodis =
static int is_global_init;
static int fmc_q = 1; /* Indicates the use of static fmc for distribution */
static int default_q; /* use default queue - FMC is not executed*/
+int dpaa_ieee_1588; /* use to indicate if IEEE 1588 is enabled for the driver */
/* At present we only allow up to 4 push mode queues as default - as each of
* this queue need dedicated portal and we are short of portals.
*/
@@ -1826,9 +1829,15 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,
opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
opts.fqd.context_b = 0;
- /* no tx-confirmation */
- opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
- opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
+ if (dpaa_ieee_1588) {
+ opts.fqd.context_a.lo = 0;
+ opts.fqd.context_a.hi = fman_dealloc_bufs_mask_hi;
+ } else {
+ /* no tx-confirmation */
+ opts.fqd.context_a.lo = fman_dealloc_bufs_mask_lo;
+ opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
+ }
+
if (fman_ip_rev >= FMAN_V3) {
/* Set B0V bit in contextA to set ASPID to 0 */
opts.fqd.context_a.hi |= 0x04000000;
@@ -1861,9 +1870,10 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,
return ret;
}
-#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
-/* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
-static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
+/* Initialise a DEBUG FQ ([rt]x_error, rx_default) and DPAA TX CONFIRM queue
+ * to support PTP
+ */
+static int dpaa_def_queue_init(struct qman_fq *fq, uint32_t fqid)
{
struct qm_mcc_initfq opts = {0};
int ret;
@@ -1872,15 +1882,15 @@ static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
ret = qman_reserve_fqid(fqid);
if (ret) {
- DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
+ DPAA_PMD_ERR("Reserve fqid %d failed with ret: %d",
fqid, ret);
return -EINVAL;
}
/* "map" this Rx FQ to one of the interfaces Tx FQID */
- DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
+ DPAA_PMD_DEBUG("Creating fq %p, fqid %d", fq, fqid);
ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
if (ret) {
- DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
+ DPAA_PMD_ERR("create fqid %d failed with ret: %d",
fqid, ret);
return ret;
}
@@ -1888,11 +1898,10 @@ static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
ret = qman_init_fq(fq, 0, &opts);
if (ret)
- DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
+ DPAA_PMD_ERR("init fqid %d failed with ret: %d",
fqid, ret);
return ret;
}
-#endif
/* Initialise a network interface */
static int
@@ -1927,6 +1936,43 @@ dpaa_dev_init_secondary(struct rte_eth_dev *eth_dev)
return 0;
}
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+ __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+dpaa_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+ struct rte_kvargs *kvlist;
+
+ if (!devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (!kvlist)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (rte_kvargs_process(kvlist, key,
+ check_devargs_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);
+
+ return 1;
+}
+
/* Initialise a network interface */
static int
dpaa_dev_init(struct rte_eth_dev *eth_dev)
@@ -1944,6 +1990,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES];
int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES];
int8_t vsp_id = -1;
+ struct rte_device *dev = eth_dev->device;
PMD_INIT_FUNC_TRACE();
@@ -1960,6 +2007,9 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
dpaa_intf->ifid = dev_id;
dpaa_intf->cfg = cfg;
+ if (dpaa_get_devargs(dev->devargs, DRIVER_IEEE1588))
+ dpaa_ieee_1588 = 1;
+
memset((char *)dev_rx_fqids, 0,
sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES);
@@ -2079,6 +2129,14 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
goto free_rx;
}
+ dpaa_intf->tx_conf_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
+ MAX_DPAA_CORES, MAX_CACHELINE);
+ if (!dpaa_intf->tx_conf_queues) {
+ DPAA_PMD_ERR("Failed to alloc mem for TX conf queues\n");
+ ret = -ENOMEM;
+ goto free_rx;
+ }
+
/* If congestion control is enabled globally*/
if (td_tx_threshold) {
dpaa_intf->cgr_tx = rte_zmalloc(NULL,
@@ -2115,22 +2173,32 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
}
dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
-#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
- ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
- [DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
- if (ret) {
- DPAA_PMD_ERR("DPAA RX ERROR queue init failed!");
- goto free_tx;
- }
- dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
- ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
- [DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
- if (ret) {
- DPAA_PMD_ERR("DPAA TX ERROR queue init failed!");
- goto free_tx;
- }
- dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
+#if !defined(RTE_LIBRTE_DPAA_DEBUG_DRIVER)
+ if (dpaa_ieee_1588)
#endif
+ {
+ ret = dpaa_def_queue_init(&dpaa_intf->debug_queues
+ [DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
+ if (ret) {
+ DPAA_PMD_ERR("DPAA RX ERROR queue init failed!");
+ goto free_tx;
+ }
+ dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
+ ret = dpaa_def_queue_init(&dpaa_intf->debug_queues
+ [DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
+ if (ret) {
+ DPAA_PMD_ERR("DPAA TX ERROR queue init failed!");
+ goto free_tx;
+ }
+ dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
+ ret = dpaa_def_queue_init(dpaa_intf->tx_conf_queues,
+ fman_intf->fqid_tx_confirm);
+ if (ret) {
+ DPAA_PMD_ERR("DPAA TX CONFIRM queue init failed!");
+ goto free_tx;
+ }
+ dpaa_intf->tx_conf_queues->dpaa_intf = dpaa_intf;
+ }
DPAA_PMD_DEBUG("All frame queues created");
@@ -2388,4 +2456,6 @@ static struct rte_dpaa_driver rte_dpaa_pmd = {
};
RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(net_dpaa,
+ DRIVER_IEEE1588 "=<int>");
RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_pmd, NOTICE);
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2014-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2017-2019 NXP
+ * Copyright 2017-2024 NXP
*
*/
#ifndef __DPAA_ETHDEV_H__
@@ -112,6 +112,7 @@
#define FMC_FILE "/tmp/fmc.bin"
extern struct rte_mempool *dpaa_tx_sg_pool;
+extern int dpaa_ieee_1588;
/* structure to free external and indirect
* buffers.
@@ -131,6 +132,7 @@ struct dpaa_if {
struct qman_fq *rx_queues;
struct qman_cgr *cgr_rx;
struct qman_fq *tx_queues;
+ struct qman_fq *tx_conf_queues;
struct qman_cgr *cgr_tx;
struct qman_fq debug_queues[2];
uint16_t nb_rx_queues;
@@ -1082,6 +1082,9 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0};
struct dpaa_sw_buf_free buf_to_free[DPAA_MAX_SGS * DPAA_MAX_DEQUEUE_NUM_FRAMES];
uint32_t free_count = 0;
+ struct qman_fq *fq = q;
+ struct dpaa_if *dpaa_intf = fq->dpaa_intf;
+ struct qman_fq *fq_txconf = dpaa_intf->tx_conf_queues;
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
@@ -1162,6 +1165,10 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
mbuf = temp_mbuf;
realloc_mbuf = 0;
}
+
+ if (dpaa_ieee_1588)
+ fd_arr[loop].cmd |= DPAA_FD_CMD_FCO | qman_fq_fqid(fq_txconf);
+
indirect_buf:
state = tx_on_dpaa_pool(mbuf, bp_info,
&fd_arr[loop],
@@ -1190,6 +1197,9 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
sent += frames_to_send;
}
+ if (dpaa_ieee_1588)
+ dpaa_eth_tx_conf(fq_txconf);
+
DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q);
for (loop = 0; loop < free_count; loop++) {
@@ -1200,6 +1210,45 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
return sent;
}
+void
+dpaa_eth_tx_conf(void *q)
+{
+ struct qman_fq *fq = q;
+ struct qm_dqrr_entry *dq;
+ int num_tx_conf, ret, dq_num;
+ uint32_t vdqcr_flags = 0;
+
+ if (unlikely(rte_dpaa_bpid_info == NULL &&
+ rte_eal_process_type() == RTE_PROC_SECONDARY))
+ rte_dpaa_bpid_info = fq->bp_array;
+
+ if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_PMD_ERR("Failure in affining portal");
+ return;
+ }
+ }
+
+ num_tx_conf = DPAA_MAX_DEQUEUE_NUM_FRAMES - 2;
+
+ do {
+ dq_num = 0;
+ ret = qman_set_vdq(fq, num_tx_conf, vdqcr_flags);
+ if (ret)
+ return;
+ do {
+ dq = qman_dequeue(fq);
+ if (!dq)
+ continue;
+ dq_num++;
+ dpaa_display_frame_info(&dq->fd, fq->fqid, true);
+ qman_dqrr_consume(fq, dq);
+ dpaa_free_mbuf(&dq->fd);
+ } while (fq->flags & QMAN_FQ_STATE_VDQCR);
+ } while (dq_num == num_tx_conf);
+}
+
uint16_t
dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -281,6 +281,8 @@ uint16_t dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs,
uint16_t nb_bufs);
uint16_t dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
+void dpaa_eth_tx_conf(void *q);
+
uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused,
struct rte_mbuf **bufs __rte_unused,
uint16_t nb_bufs __rte_unused);