@@ -161,6 +161,8 @@ struct dpaa2_queue {
dpaa2_queue_cb_dqrr_t *cb;
dpaa2_queue_cb_eqresp_free_t *cb_eqresp_free;
struct dpaa2_bp_info *bp_array;
+ /*to store tx_conf_queue corresponding to tx_queue*/
+ struct dpaa2_queue *tx_conf_queue;
};
struct swp_active_dqs {
@@ -253,6 +253,28 @@ struct dpaa2_annot_hdr {
#define PARSE_ERROR_CODE(var) ((uint64_t)(var) & 0xFF00000000000000)
#define SOFT_PARSING_CONTEXT(var) ((uint64_t)(var) & 0x00FFFFFFFFFFFFFF)
+/*FAEAD offset in anmotation area*/
+#define DPAA2_FD_HW_ANNOT_FAEAD_OFFSET 0x58
+
+struct dpaa2_faead {
+ uint32_t fqid;
+ uint32_t ctrl;
+};
+
+/*FAEAD bits */
+/*A2 OMB contains valid data*/
+#define DPAA2_ANNOT_FAEAD_A2V 0x20000000
+/*egress confirmation FQID in FAEAD contains valid data*/
+#define DPAA2_ANNOT_FAEAD_A4V 0x08000000
+/*UPD is valid*/
+#define DPAA2_ANNOT_FAEAD_UPDV 0x00001000
+/*EBDD is valid*/
+#define DPAA2_ANNOT_FAEAD_EBDDV 0x00002000
+/*EBDD (External Buffer Deallocation Disable) */
+#define DPAA2_ANNOT_FAEAD_EBDD 0x00000020
+/*UPD (Update prepended data)*/
+#define DPAA2_ANNOT_FAEAD_UPD 0x00000010
+
/* Debug frame, otherwise supposed to be discarded */
#define DPAA2_ETH_FAS_DISC 0x80000000
/* MACSEC frame */
@@ -290,7 +290,10 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
- tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
+ if (priv->tx_conf_en)
+ tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues;
+ else
+ tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
RTE_CACHE_LINE_SIZE);
if (!mc_q) {
@@ -325,6 +328,28 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
goto fail_tx;
}
+ if (priv->tx_conf_en) {
+ /*Setup tx confirmation queues*/
+ for (i = 0; i < priv->nb_tx_queues; i++) {
+ mc_q->eth_data = dev->data;
+ mc_q->tc_index = i;
+ mc_q->flow_id = 0;
+ priv->tx_conf_vq[i] = mc_q++;
+ dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
+ dpaa2_q->q_storage =
+ rte_malloc("dq_storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!dpaa2_q->q_storage)
+ goto fail_tx_conf;
+
+ memset(dpaa2_q->q_storage, 0,
+ sizeof(struct queue_storage_info_t));
+ if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+ goto fail_tx_conf;
+ }
+ }
+
vq_id = 0;
for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
@@ -334,6 +359,14 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
}
return 0;
+fail_tx_conf:
+ i -= 1;
+ while (i >= 0) {
+ dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
+ rte_free(dpaa2_q->q_storage);
+ priv->tx_conf_vq[i--] = NULL;
+ }
+ i = priv->nb_tx_queues;
fail_tx:
i -= 1;
while (i >= 0) {
@@ -377,6 +410,14 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
rte_free(dpaa2_q->cscn);
}
+ if (priv->tx_conf_en) {
+ /* cleanup tx conf queue storage */
+ for (i = 0; i < priv->nb_tx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)
+ priv->tx_conf_vq[i];
+ rte_free(dpaa2_q->q_storage);
+ }
+ }
/*free memory for all queues (RX+TX) */
rte_free(priv->rx_vq[0]);
priv->rx_vq[0] = NULL;
@@ -673,6 +714,8 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
priv->tx_vq[tx_queue_id];
+ struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *)
+ priv->tx_conf_vq[tx_queue_id];
struct fsl_mc_io *dpni = priv->hw;
struct dpni_queue tx_conf_cfg;
struct dpni_queue tx_flow_cfg;
@@ -708,9 +751,14 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
if (tx_queue_id == 0) {
/*Set tx-conf and error configuration*/
- ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
- priv->token,
- DPNI_CONF_DISABLE);
+ if (priv->tx_conf_en)
+ ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
+ priv->token,
+ DPNI_CONF_AFFINE);
+ else
+ ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
+ priv->token,
+ DPNI_CONF_DISABLE);
if (ret) {
DPAA2_PMD_ERR("Error in set tx conf mode settings: "
"err=%d", ret);
@@ -761,6 +809,31 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
}
dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
dev->data->tx_queues[tx_queue_id] = dpaa2_q;
+
+ if (priv->tx_conf_en) {
+ dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q;
+ options = options | DPNI_QUEUE_OPT_USER_CTX;
+ tx_conf_cfg.user_context = (size_t)(dpaa2_q);
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
+ dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error in setting the tx conf flow: "
+ "tc_index=%d, flow=%d err=%d",
+ dpaa2_tx_conf_q->tc_index,
+ dpaa2_tx_conf_q->flow_id, ret);
+ return -1;
+ }
+
+ ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
+ dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
+ if (ret) {
+ DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
+ return -1;
+ }
+ dpaa2_tx_conf_q->fqid = qid.fqid;
+ }
return 0;
}
@@ -2337,7 +2410,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
/* ... tx buffer layout ... */
memset(&layout, 0, sizeof(struct dpni_buffer_layout));
- layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ if (priv->tx_conf_en) {
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+ layout.pass_timestamp = true;
+ } else {
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ }
layout.pass_frame_status = 1;
ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX, &layout);
@@ -2348,7 +2427,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
/* ... tx-conf and error buffer layout ... */
memset(&layout, 0, sizeof(struct dpni_buffer_layout));
- layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ if (priv->tx_conf_en) {
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+ layout.pass_timestamp = true;
+ } else {
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ }
layout.pass_frame_status = 1;
ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX_CONFIRM, &layout);
@@ -2460,6 +2545,7 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
struct rte_dpaa2_device *dpaa2_dev)
{
struct rte_eth_dev *eth_dev;
+ struct dpaa2_dev_priv *priv;
int diag;
if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
@@ -2507,6 +2593,9 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
return 0;
}
+ priv = eth_dev->data->dev_private;
+ priv->tx_conf_en = 0;
+
rte_eth_dev_release_port(eth_dev);
return diag;
}
@@ -107,8 +107,9 @@ struct dpaa2_dev_priv {
uint32_t options;
void *rx_vq[MAX_RX_QUEUES];
void *tx_vq[MAX_TX_QUEUES];
-
struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
+ void *tx_conf_vq[MAX_TX_QUEUES];
+ uint8_t tx_conf_en;
uint8_t max_mac_filters;
uint8_t max_vlan_filters;
uint8_t num_rx_tc;
@@ -179,5 +180,6 @@ uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
void dpaa2_flow_clean(struct rte_eth_dev *dev);
+uint16_t dpaa2_dev_tx_conf(void *queue) __attribute__((unused));
#endif /* _DPAA2_ETHDEV_H */
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2018 NXP
+ * Copyright 2016-2019 NXP
*
*/
@@ -832,6 +832,110 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
return num_rx;
}
+uint16_t dpaa2_dev_tx_conf(void *queue)
+{
+ /* Function receive frames for a given device and VQ */
+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+ struct qbman_result *dq_storage;
+ uint32_t fqid = dpaa2_q->fqid;
+ int ret, num_tx_conf = 0, num_pulled;
+ uint8_t pending, status;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd, *next_fd;
+ struct qbman_pull_desc pulldesc;
+ struct qbman_release_desc releasedesc;
+ uint32_t bpid;
+ uint64_t buf;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_PMD_ERR("Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ do {
+ dq_storage = dpaa2_q->q_storage->dq_storage[0];
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+
+ qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
+ "QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+
+ rte_prefetch0((void *)((size_t)(dq_storage + 1)));
+ /* Check if the previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+
+ num_pulled = 0;
+ pending = 1;
+ do {
+ /* Loop until the dq_storage is updated with
+ * new token by QBMAN
+ */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+ rte_prefetch0((void *)((size_t)(dq_storage + 2)));
+ /* Check whether Last Pull command is Expired and
+ * setting Condition for Loop termination
+ */
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ pending = 0;
+ /* Check for valid frame. */
+ status = qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status &
+ QBMAN_DQ_STAT_VALIDFRAME) == 0))
+ continue;
+ }
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ next_fd = qbman_result_DQ_fd(dq_storage + 1);
+ /* Prefetch Annotation address for the parse results */
+ rte_prefetch0((void *)(size_t)
+ (DPAA2_GET_FD_ADDR(next_fd) +
+ DPAA2_FD_PTA_SIZE + 16));
+
+ bpid = DPAA2_GET_FD_BPID(fd);
+
+ /* Create a release descriptor required for releasing
+ * buffers into QBMAN
+ */
+ qbman_release_desc_clear(&releasedesc);
+ qbman_release_desc_set_bpid(&releasedesc, bpid);
+
+ buf = DPAA2_GET_FD_ADDR(fd);
+ /* feed them to bman */
+ do {
+ ret = qbman_swp_release(swp, &releasedesc,
+ &buf, 1);
+ } while (ret == -EBUSY);
+
+ dq_storage++;
+ num_tx_conf++;
+ num_pulled++;
+ } while (pending);
+
+ /* Last VDQ provided all packets and more packets are requested */
+ } while (num_pulled == dpaa2_dqrr_size);
+
+ dpaa2_q->rx_pkts += num_tx_conf;
+
+ return num_tx_conf;
+}
+
/*
* Callback to handle sending packets through WRIOP based interface
*/