@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
- * Copyright 2017-2019 NXP
+ * Copyright 2017-2024 NXP
*
*/
#ifndef _FSL_QBMAN_BASE_H
@@ -141,12 +141,23 @@ struct qbman_fd {
uint32_t saddr_hi;
uint32_t len_sl:18;
- uint32_t rsv1:14;
-
+ uint32_t rsv13:2;
+ uint32_t svfid:6;
+ uint32_t rsv12:2;
+ uint32_t spfid:2;
+ uint32_t rsv1:2;
uint32_t sportid:4;
- uint32_t rsv2:22;
+ uint32_t rsv2:1;
+ uint32_t sca:1;
+ uint32_t sat:2;
+ uint32_t sattr:3;
+ uint32_t svfa:1;
+ uint32_t stc:3;
uint32_t bmt:1;
- uint32_t rsv3:1;
+ uint32_t dvfid:6;
+ uint32_t rsv3:2;
+ uint32_t dpfid:2;
+ uint32_t rsv31:2;
uint32_t fmt:2;
uint32_t sl:1;
uint32_t rsv4:1;
@@ -154,12 +165,14 @@ struct qbman_fd {
uint32_t acc_err:4;
uint32_t rsv5:4;
uint32_t ser:1;
- uint32_t rsv6:3;
+ uint32_t rsv6:2;
+ uint32_t wns:1;
uint32_t wrttype:4;
uint32_t dqos:3;
uint32_t drbp:1;
uint32_t dlwc:2;
- uint32_t rsv7:2;
+ uint32_t rsv7:1;
+ uint32_t rns:1;
uint32_t rdttype:4;
uint32_t sqos:3;
uint32_t srbp:1;
@@ -182,7 +195,7 @@ struct qbman_fd {
uint32_t saddr_lo;
uint32_t saddr_hi:17;
- uint32_t rsv1:15;
+ uint32_t rsv1_att:15;
uint32_t len;
@@ -22,7 +22,7 @@ uint32_t dpaa2_coherent_alloc_cache;
static inline int
qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
uint32_t len, struct qbman_fd *fd,
- struct rte_dpaa2_qdma_rbp *rbp, int ser)
+ struct dpaa2_qdma_rbp *rbp, int ser)
{
fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
@@ -93,7 +93,7 @@ qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
static void
dpaa2_qdma_populate_fle(struct qbman_fle *fle,
uint64_t fle_iova,
- struct rte_dpaa2_qdma_rbp *rbp,
+ struct dpaa2_qdma_rbp *rbp,
uint64_t src, uint64_t dest,
size_t len, uint32_t flags, uint32_t fmt)
{
@@ -114,7 +114,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
/* source */
sdd->read_cmd.portid = rbp->sportid;
sdd->rbpcmd_simple.pfid = rbp->spfid;
- sdd->rbpcmd_simple.vfa = rbp->vfa;
sdd->rbpcmd_simple.vfid = rbp->svfid;
if (rbp->srbp) {
@@ -127,7 +126,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
/* destination */
sdd->write_cmd.portid = rbp->dportid;
sdd->rbpcmd_simple.pfid = rbp->dpfid;
- sdd->rbpcmd_simple.vfa = rbp->vfa;
sdd->rbpcmd_simple.vfid = rbp->dvfid;
if (rbp->drbp) {
@@ -178,7 +176,7 @@ dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
struct rte_dpaa2_qdma_job **job,
uint16_t nb_jobs)
{
- struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+ struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
struct rte_dpaa2_qdma_job **ppjob;
size_t iova;
int ret = 0, loop;
@@ -276,7 +274,7 @@ dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
struct rte_dpaa2_qdma_job **job,
uint16_t nb_jobs)
{
- struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+ struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
struct rte_dpaa2_qdma_job **ppjob;
uint16_t i;
void *elem;
@@ -322,7 +320,7 @@ dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
struct rte_dpaa2_qdma_job **job,
uint16_t nb_jobs)
{
- struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+ struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
struct rte_dpaa2_qdma_job **ppjob;
uint16_t i;
int ret;
@@ -375,7 +373,7 @@ dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
struct rte_dpaa2_qdma_job **job,
uint16_t nb_jobs)
{
- struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+ struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
struct rte_dpaa2_qdma_job **ppjob;
void *elem;
struct qbman_fle *fle;
@@ -1223,17 +1221,38 @@ rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
}
-/* Enable RBP */
-void
-rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
- struct rte_dpaa2_qdma_rbp *rbp_config)
+static int
+dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
+ const struct rte_dma_vchan_conf *conf)
{
- struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
- struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
- struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+ if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+ conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+ if (conf->dst_port.port_type != RTE_DMA_PORT_PCIE)
+ return -EINVAL;
+ vq->rbp.enable = 1;
+ vq->rbp.dportid = conf->dst_port.pcie.coreid;
+ vq->rbp.dpfid = conf->dst_port.pcie.pfid;
+ if (conf->dst_port.pcie.vfen) {
+ vq->rbp.dvfa = 1;
+ vq->rbp.dvfid = conf->dst_port.pcie.vfid;
+ }
+ vq->rbp.drbp = 1;
+ }
+ if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+ conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+ if (conf->src_port.port_type != RTE_DMA_PORT_PCIE)
+ return -EINVAL;
+ vq->rbp.enable = 1;
+ vq->rbp.sportid = conf->src_port.pcie.coreid;
+ vq->rbp.spfid = conf->src_port.pcie.pfid;
+ if (conf->src_port.pcie.vfen) {
+ vq->rbp.svfa = 1;
+ vq->rbp.dvfid = conf->src_port.pcie.vfid;
+ }
+ vq->rbp.srbp = 1;
+ }
- memcpy(&qdma_dev->vqs[vchan].rbp, rbp_config,
- sizeof(struct rte_dpaa2_qdma_rbp));
+ return 0;
}
static int
@@ -1247,12 +1266,16 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
char ring_name[32];
char pool_name[64];
int fd_long_format = 1;
- int sg_enable = 0;
+ int sg_enable = 0, ret;
DPAA2_QDMA_FUNC_TRACE();
RTE_SET_USED(conf_sz);
+ ret = dpaa2_qdma_vchan_rbp_set(&qdma_dev->vqs[vchan], conf);
+ if (ret)
+ return ret;
+
if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
sg_enable = 1;
@@ -166,6 +166,42 @@ struct qdma_sg_entry {
};
} __rte_packed;
+struct dpaa2_qdma_rbp {
+ uint32_t use_ultrashort:1;
+ uint32_t enable:1;
+ /**
+ * dportid:
+ * 0000 PCI-Express 1
+ * 0001 PCI-Express 2
+ * 0010 PCI-Express 3
+ * 0011 PCI-Express 4
+ * 0100 PCI-Express 5
+ * 0101 PCI-Express 6
+ */
+ uint32_t dportid:4;
+ uint32_t dpfid:2;
+ uint32_t dvfid:6;
+ uint32_t dvfa:1;
+ /*using route by port for destination */
+ uint32_t drbp:1;
+ /**
+ * sportid:
+ * 0000 PCI-Express 1
+ * 0001 PCI-Express 2
+ * 0010 PCI-Express 3
+ * 0011 PCI-Express 4
+ * 0100 PCI-Express 5
+ * 0101 PCI-Express 6
+ */
+ uint32_t sportid:4;
+ uint32_t spfid:2;
+ uint32_t svfid:6;
+ uint32_t svfa:1;
+ /* using route by port for source */
+ uint32_t srbp:1;
+ uint32_t rsv:2;
+};
+
/** Represents a DPDMAI device */
struct dpaa2_dpdmai_dev {
/** Pointer to Next device instance */
@@ -216,7 +252,7 @@ struct qdma_virt_queue {
/** FLE pool for the queue */
struct rte_mempool *fle_pool;
/** Route by port */
- struct rte_dpaa2_qdma_rbp rbp;
+ struct dpaa2_qdma_rbp rbp;
/** States if this vq is in use or not */
uint8_t in_use;
/** States if this vq has exclusively associated hw queue */
@@ -13,42 +13,6 @@
/** States if the destination addresses is physical. */
#define RTE_DPAA2_QDMA_JOB_DEST_PHY (1ULL << 31)
-struct rte_dpaa2_qdma_rbp {
- uint32_t use_ultrashort:1;
- uint32_t enable:1;
- /**
- * dportid:
- * 0000 PCI-Express 1
- * 0001 PCI-Express 2
- * 0010 PCI-Express 3
- * 0011 PCI-Express 4
- * 0100 PCI-Express 5
- * 0101 PCI-Express 6
- */
- uint32_t dportid:4;
- uint32_t dpfid:2;
- uint32_t dvfid:6;
- /*using route by port for destination */
- uint32_t drbp:1;
- /**
- * sportid:
- * 0000 PCI-Express 1
- * 0001 PCI-Express 2
- * 0010 PCI-Express 3
- * 0011 PCI-Express 4
- * 0100 PCI-Express 5
- * 0101 PCI-Express 6
- */
- uint32_t sportid:4;
- uint32_t spfid:2;
- uint32_t svfid:6;
- /* using route by port for source */
- uint32_t srbp:1;
- /* Virtual Function Active */
- uint32_t vfa:1;
- uint32_t rsv:3;
-};
-
/** Determines a QDMA job */
struct rte_dpaa2_qdma_job {
/** Source Address from where DMA is (to be) performed */
@@ -67,6 +31,7 @@ struct rte_dpaa2_qdma_job {
*/
uint16_t status;
uint16_t vq_id;
+ uint64_t cnxt;
/**
* FLE pool element maintained by user, in case no qDMA response.
* Note: the address must be allocated from DPDK memory pool.
@@ -104,24 +69,6 @@ void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
__rte_experimental
void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable Route-by-port on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- * The identifier of the device.
- * @param vchan
- * The identifier of virtual DMA channel.
- * @param rbp_config
- * Configuration for route-by-port
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
- struct rte_dpaa2_qdma_rbp *rbp_config);
-
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
@@ -10,5 +10,4 @@ EXPERIMENTAL {
rte_dpaa2_qdma_copy_multi;
rte_dpaa2_qdma_vchan_fd_us_enable;
rte_dpaa2_qdma_vchan_internal_sg_enable;
- rte_dpaa2_qdma_vchan_rbp_enable;
};