@@ -30,8 +30,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
int
rte_pmd_dpaa2_set_custom_hash(uint16_t port_id,
- uint16_t offset,
- uint8_t size)
+ uint16_t offset, uint8_t size)
{
struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
@@ -52,8 +51,8 @@ rte_pmd_dpaa2_set_custom_hash(uint16_t port_id,
return -EINVAL;
}
- p_params = rte_zmalloc(
- NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
+ p_params = rte_zmalloc(NULL,
+ DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
if (!p_params) {
DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
return -ENOMEM;
@@ -73,17 +72,23 @@ rte_pmd_dpaa2_set_custom_hash(uint16_t port_id,
}
memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
- tc_cfg.key_cfg_iova = (size_t)(DPAA2_VADDR_TO_IOVA(p_params));
+ tc_cfg.key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(p_params,
+ DIST_PARAM_IOVA_SIZE);
+ if (tc_cfg.key_cfg_iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)",
+ __func__, p_params);
+ rte_free(p_params);
+ return -ENOBUFS;
+ }
+
tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
- &tc_cfg);
+ &tc_cfg);
rte_free(p_params);
if (ret) {
- DPAA2_PMD_ERR(
- "Setting distribution for Rx failed with err: %d",
- ret);
+ DPAA2_PMD_ERR("Set RX TC dist failed(err=%d)", ret);
return ret;
}
@@ -115,8 +120,8 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
if (tc_dist_queues > priv->dist_queues)
tc_dist_queues = priv->dist_queues;
- p_params = rte_malloc(
- NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
+ p_params = rte_malloc(NULL,
+ DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
if (!p_params) {
DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
return -ENOMEM;
@@ -133,7 +138,15 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
return ret;
}
- tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
+ tc_cfg.key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(p_params,
+ DIST_PARAM_IOVA_SIZE);
+ if (tc_cfg.key_cfg_iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)",
+ __func__, p_params);
+ rte_free(p_params);
+ return -ENOBUFS;
+ }
+
tc_cfg.dist_size = tc_dist_queues;
tc_cfg.enable = true;
tc_cfg.tc = tc_index;
@@ -148,17 +161,15 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token, &tc_cfg);
rte_free(p_params);
if (ret) {
- DPAA2_PMD_ERR(
- "Setting distribution for Rx failed with err: %d",
- ret);
+ DPAA2_PMD_ERR("RX Hash dist for failed(err=%d)", ret);
return ret;
}
return 0;
}
-int dpaa2_remove_flow_dist(
- struct rte_eth_dev *eth_dev,
+int
+dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
uint8_t tc_index)
{
struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
@@ -168,8 +179,8 @@ int dpaa2_remove_flow_dist(
void *p_params;
int ret;
- p_params = rte_malloc(
- NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
+ p_params = rte_malloc(NULL,
+ DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
if (!p_params) {
DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
return -ENOMEM;
@@ -177,7 +188,15 @@ int dpaa2_remove_flow_dist(
memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
tc_cfg.dist_size = 0;
- tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
+ tc_cfg.key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(p_params,
+ DIST_PARAM_IOVA_SIZE);
+ if (tc_cfg.key_cfg_iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)",
+ __func__, p_params);
+ rte_free(p_params);
+ return -ENOBUFS;
+ }
+
tc_cfg.enable = true;
tc_cfg.tc = tc_index;
@@ -194,9 +213,7 @@ int dpaa2_remove_flow_dist(
&tc_cfg);
rte_free(p_params);
if (ret)
- DPAA2_PMD_ERR(
- "Setting distribution for Rx failed with err: %d",
- ret);
+ DPAA2_PMD_ERR("RX hash dist failed(err=%d)", ret);
return ret;
}
@@ -123,9 +123,9 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
PMD_INIT_FUNC_TRACE();
- if (dpni == NULL) {
+ if (!dpni) {
DPAA2_PMD_ERR("dpni is NULL");
- return -1;
+ return -EINVAL;
}
if (on)
@@ -174,8 +174,8 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
static int
dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
- enum rte_vlan_type vlan_type __rte_unused,
- uint16_t tpid)
+ enum rte_vlan_type vlan_type __rte_unused,
+ uint16_t tpid)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = dev->process_private;
@@ -212,8 +212,7 @@ dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
static int
dpaa2_fw_version_get(struct rte_eth_dev *dev,
- char *fw_version,
- size_t fw_size)
+ char *fw_version, size_t fw_size)
{
int ret;
struct fsl_mc_io *dpni = dev->process_private;
@@ -245,7 +244,8 @@ dpaa2_fw_version_get(struct rte_eth_dev *dev,
}
static int
-dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+dpaa2_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
@@ -291,8 +291,8 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static int
dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
- __rte_unused uint16_t queue_id,
- struct rte_eth_burst_mode *mode)
+ __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
{
struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
int ret = -EINVAL;
@@ -368,7 +368,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
uint8_t num_rxqueue_per_tc;
struct dpaa2_queue *mc_q, *mcq;
uint32_t tot_queues;
- int i;
+ int i, ret;
struct dpaa2_queue *dpaa2_q;
PMD_INIT_FUNC_TRACE();
@@ -382,7 +382,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
RTE_CACHE_LINE_SIZE);
if (!mc_q) {
DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
- return -1;
+ return -ENOBUFS;
}
for (i = 0; i < priv->nb_rx_queues; i++) {
@@ -404,8 +404,10 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
if (dpaa2_enable_err_queue) {
priv->rx_err_vq = rte_zmalloc("dpni_rx_err",
sizeof(struct dpaa2_queue), 0);
- if (!priv->rx_err_vq)
+ if (!priv->rx_err_vq) {
+ ret = -ENOBUFS;
goto fail;
+ }
dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
dpaa2_q->q_storage = rte_malloc("err_dq_storage",
@@ -424,13 +426,15 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
for (i = 0; i < priv->nb_tx_queues; i++) {
mc_q->eth_data = dev->data;
- mc_q->flow_id = 0xffff;
+ mc_q->flow_id = DPAA2_INVALID_FLOW_ID;
priv->tx_vq[i] = mc_q++;
dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
dpaa2_q->cscn = rte_malloc(NULL,
sizeof(struct qbman_result), 16);
- if (!dpaa2_q->cscn)
+ if (!dpaa2_q->cscn) {
+ ret = -ENOBUFS;
goto fail_tx;
+ }
}
if (priv->flags & DPAA2_TX_CONF_ENABLE) {
@@ -498,7 +502,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
}
rte_free(mc_q);
- return -1;
+ return ret;
}
static void
@@ -718,14 +722,14 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
*/
static int
dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
- uint16_t rx_queue_id,
- uint16_t nb_rx_desc,
- unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mb_pool)
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct fsl_mc_io *dpni = dev->process_private;
struct dpaa2_queue *dpaa2_q;
struct dpni_queue cfg;
uint8_t options = 0;
@@ -747,8 +751,8 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
/* Rx deferred start is not supported */
if (rx_conf->rx_deferred_start) {
- DPAA2_PMD_ERR("%p:Rx deferred start not supported",
- (void *)dev);
+ DPAA2_PMD_ERR("%s:Rx deferred start not supported",
+ dev->data->name);
return -EINVAL;
}
@@ -764,7 +768,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
if (ret)
return ret;
}
- dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
+ dpaa2_q = priv->rx_vq[rx_queue_id];
dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
dpaa2_q->bp_array = rte_dpaa2_bpid_info;
dpaa2_q->nb_desc = UINT16_MAX;
@@ -790,7 +794,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
cfg.cgid = i;
dpaa2_q->cgid = cfg.cgid;
} else {
- dpaa2_q->cgid = 0xff;
+ dpaa2_q->cgid = DPAA2_INVALID_CGID;
}
/*if ls2088 or rev2 device, enable the stashing */
@@ -814,10 +818,10 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
}
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
- dpaa2_q->tc_index, flow_id, options, &cfg);
+ dpaa2_q->tc_index, flow_id, options, &cfg);
if (ret) {
DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
- return -1;
+ return ret;
}
if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
@@ -830,7 +834,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
* There is no HW restriction, but number of CGRs are limited,
* hence this restriction is placed.
*/
- if (dpaa2_q->cgid != 0xff) {
+ if (dpaa2_q->cgid != DPAA2_INVALID_CGID) {
/*enabling per rx queue congestion control */
taildrop.threshold = nb_rx_desc;
taildrop.units = DPNI_CONGESTION_UNIT_FRAMES;
@@ -856,15 +860,15 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
if (ret) {
DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
- ret);
- return -1;
+ ret);
+ return ret;
}
} else { /* Disable tail Drop */
struct dpni_taildrop taildrop = {0};
DPAA2_PMD_INFO("Tail drop is disabled on queue");
taildrop.enable = 0;
- if (dpaa2_q->cgid != 0xff) {
+ if (dpaa2_q->cgid != DPAA2_INVALID_CGID) {
ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
dpaa2_q->tc_index,
@@ -876,8 +880,8 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
if (ret) {
DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
- ret);
- return -1;
+ ret);
+ return ret;
}
}
@@ -887,16 +891,14 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
static int
dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
- uint16_t tx_queue_id,
- uint16_t nb_tx_desc,
- unsigned int socket_id __rte_unused,
- const struct rte_eth_txconf *tx_conf)
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
- priv->tx_vq[tx_queue_id];
- struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *)
- priv->tx_conf_vq[tx_queue_id];
+ struct dpaa2_queue *dpaa2_q = priv->tx_vq[tx_queue_id];
+ struct dpaa2_queue *dpaa2_tx_conf_q = priv->tx_conf_vq[tx_queue_id];
struct fsl_mc_io *dpni = dev->process_private;
struct dpni_queue tx_conf_cfg;
struct dpni_queue tx_flow_cfg;
@@ -906,13 +908,14 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
struct dpni_queue_id qid;
uint32_t tc_id;
int ret;
+ uint64_t iova;
PMD_INIT_FUNC_TRACE();
/* Tx deferred start is not supported */
if (tx_conf->tx_deferred_start) {
- DPAA2_PMD_ERR("%p:Tx deferred start not supported",
- (void *)dev);
+ DPAA2_PMD_ERR("%s:Tx deferred start not supported",
+ dev->data->name);
return -EINVAL;
}
@@ -920,7 +923,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
dpaa2_q->offloads = tx_conf->offloads;
/* Return if queue already configured */
- if (dpaa2_q->flow_id != 0xffff) {
+ if (dpaa2_q->flow_id != DPAA2_INVALID_FLOW_ID) {
dev->data->tx_queues[tx_queue_id] = dpaa2_q;
return 0;
}
@@ -962,7 +965,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
DPAA2_PMD_ERR("Error in setting the tx flow: "
"tc_id=%d, flow=%d err=%d",
tc_id, flow_id, ret);
- return -1;
+ return ret;
}
dpaa2_q->flow_id = flow_id;
@@ -970,11 +973,11 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
dpaa2_q->tc_index = tc_id;
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index),
- dpaa2_q->flow_id, &tx_flow_cfg, &qid);
+ DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index),
+ dpaa2_q->flow_id, &tx_flow_cfg, &qid);
if (ret) {
DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
- return -1;
+ return ret;
}
dpaa2_q->fqid = qid.fqid;
@@ -990,8 +993,17 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
*/
cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10;
cong_notif_cfg.message_ctx = 0;
- cong_notif_cfg.message_iova =
- (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
+
+ iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(dpaa2_q->cscn,
+ sizeof(struct qbman_result));
+ if (iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("No IOMMU map for cscn(%p)(size=%x)",
+ dpaa2_q->cscn, (uint32_t)sizeof(struct qbman_result));
+
+ return -ENOBUFS;
+ }
+
+ cong_notif_cfg.message_iova = iova;
cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
cong_notif_cfg.notification_mode =
DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
@@ -999,16 +1011,13 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
DPNI_CONG_OPT_COHERENT_WRITE;
cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
- ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
- priv->token,
- DPNI_QUEUE_TX,
- ((channel_id << 8) | tc_id),
- &cong_notif_cfg);
+ ret = dpni_set_congestion_notification(dpni,
+ CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
+ ((channel_id << 8) | tc_id), &cong_notif_cfg);
if (ret) {
- DPAA2_PMD_ERR(
- "Error in setting tx congestion notification: "
- "err=%d", ret);
- return -ret;
+ DPAA2_PMD_ERR("Set TX congestion notification err=%d",
+ ret);
+ return ret;
}
}
dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
@@ -1019,22 +1028,24 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
options = options | DPNI_QUEUE_OPT_USER_CTX;
tx_conf_cfg.user_context = (size_t)(dpaa2_q);
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
- dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
+ DPNI_QUEUE_TX_CONFIRM,
+ ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
+ dpaa2_tx_conf_q->flow_id,
+ options, &tx_conf_cfg);
if (ret) {
- DPAA2_PMD_ERR("Error in setting the tx conf flow: "
- "tc_index=%d, flow=%d err=%d",
- dpaa2_tx_conf_q->tc_index,
- dpaa2_tx_conf_q->flow_id, ret);
- return -1;
+ DPAA2_PMD_ERR("Set TC[%d].TX[%d] conf flow err=%d",
+ dpaa2_tx_conf_q->tc_index,
+ dpaa2_tx_conf_q->flow_id, ret);
+ return ret;
}
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
- dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
+ DPNI_QUEUE_TX_CONFIRM,
+ ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
+ dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
if (ret) {
DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
- return -1;
+ return ret;
}
dpaa2_tx_conf_q->fqid = qid.fqid;
}
@@ -1046,8 +1057,7 @@ dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id];
struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
- struct fsl_mc_io *dpni =
- (struct fsl_mc_io *)priv->eth_dev->process_private;
+ struct fsl_mc_io *dpni = priv->eth_dev->process_private;
uint8_t options = 0;
int ret;
struct dpni_queue cfg;
@@ -1057,7 +1067,7 @@ dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
total_nb_rx_desc -= dpaa2_q->nb_desc;
- if (dpaa2_q->cgid != 0xff) {
+ if (dpaa2_q->cgid != DPAA2_INVALID_CGID) {
options = DPNI_QUEUE_OPT_CLEAR_CGID;
cfg.cgid = dpaa2_q->cgid;
@@ -1069,7 +1079,7 @@ dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d",
dpaa2_q->fqid, ret);
priv->cgid_in_use[dpaa2_q->cgid] = 0;
- dpaa2_q->cgid = 0xff;
+ dpaa2_q->cgid = DPAA2_INVALID_CGID;
}
}
@@ -1233,10 +1243,10 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
dpaa2_dev_set_link_up(dev);
for (i = 0; i < data->nb_rx_queues; i++) {
- dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
+ dpaa2_q = data->rx_queues[i];
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_RX, dpaa2_q->tc_index,
- dpaa2_q->flow_id, &cfg, &qid);
+ DPNI_QUEUE_RX, dpaa2_q->tc_index,
+ dpaa2_q->flow_id, &cfg, &qid);
if (ret) {
DPAA2_PMD_ERR("Error in getting flow information: "
"err=%d", ret);
@@ -1253,7 +1263,7 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
ret);
return ret;
}
- dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
+ dpaa2_q = priv->rx_err_vq;
dpaa2_q->fqid = qid.fqid;
dpaa2_q->eth_data = dev->data;
@@ -1318,7 +1328,7 @@ static int
dpaa2_dev_stop(struct rte_eth_dev *dev)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct fsl_mc_io *dpni = dev->process_private;
int ret;
struct rte_eth_link link;
struct rte_device *rdev = dev->device;
@@ -1371,7 +1381,7 @@ static int
dpaa2_dev_close(struct rte_eth_dev *dev)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct fsl_mc_io *dpni = dev->process_private;
int i, ret;
struct rte_eth_link link;
@@ -1382,7 +1392,7 @@ dpaa2_dev_close(struct rte_eth_dev *dev)
if (!dpni) {
DPAA2_PMD_WARN("Already closed or not started");
- return -1;
+ return -EINVAL;
}
dpaa2_tm_deinit(dev);
@@ -1391,7 +1401,7 @@ dpaa2_dev_close(struct rte_eth_dev *dev)
ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
- return -1;
+ return ret;
}
memset(&link, 0, sizeof(link));
@@ -1403,7 +1413,7 @@ dpaa2_dev_close(struct rte_eth_dev *dev)
ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
DPAA2_PMD_ERR("Failure closing dpni device with err code %d",
- ret);
+ ret);
}
/* Free the allocated memory for ethernet private data and dpni*/
@@ -1412,18 +1422,17 @@ dpaa2_dev_close(struct rte_eth_dev *dev)
rte_free(dpni);
for (i = 0; i < MAX_TCS; i++)
- rte_free((void *)(size_t)priv->extract.tc_extract_param[i]);
+ rte_free(priv->extract.tc_extract_param[i]);
if (priv->extract.qos_extract_param)
- rte_free((void *)(size_t)priv->extract.qos_extract_param);
+ rte_free(priv->extract.qos_extract_param);
DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name);
return 0;
}
static int
-dpaa2_dev_promiscuous_enable(
- struct rte_eth_dev *dev)
+dpaa2_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
int ret;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
@@ -1483,7 +1492,7 @@ dpaa2_dev_allmulticast_enable(
{
int ret;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct fsl_mc_io *dpni = dev->process_private;
PMD_INIT_FUNC_TRACE();
@@ -1504,7 +1513,7 @@ dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
int ret;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct fsl_mc_io *dpni = dev->process_private;
PMD_INIT_FUNC_TRACE();
@@ -1529,13 +1538,13 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
int ret;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct fsl_mc_io *dpni = dev->process_private;
uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
+ VLAN_TAG_SIZE;
PMD_INIT_FUNC_TRACE();
- if (dpni == NULL) {
+ if (!dpni) {
DPAA2_PMD_ERR("dpni is NULL");
return -EINVAL;
}
@@ -1547,7 +1556,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
frame_size - RTE_ETHER_CRC_LEN);
if (ret) {
DPAA2_PMD_ERR("Setting the max frame length failed");
- return -1;
+ return ret;
}
dev->data->mtu = mtu;
DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
@@ -1556,36 +1565,35 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
static int
dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
- struct rte_ether_addr *addr,
- __rte_unused uint32_t index,
- __rte_unused uint32_t pool)
+ struct rte_ether_addr *addr,
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
{
int ret;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct fsl_mc_io *dpni = dev->process_private;
PMD_INIT_FUNC_TRACE();
- if (dpni == NULL) {
+ if (!dpni) {
DPAA2_PMD_ERR("dpni is NULL");
- return -1;
+ return -EINVAL;
}
ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token,
addr->addr_bytes, 0, 0, 0);
if (ret)
- DPAA2_PMD_ERR(
- "error: Adding the MAC ADDR failed: err = %d", ret);
- return 0;
+ DPAA2_PMD_ERR("ERR(%d) Adding the MAC ADDR failed", ret);
+ return ret;
}
static void
dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
- uint32_t index)
+ uint32_t index)
{
int ret;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct fsl_mc_io *dpni = dev->process_private;
struct rte_eth_dev_data *data = dev->data;
struct rte_ether_addr *macaddr;
@@ -1593,7 +1601,7 @@ dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
macaddr = &data->mac_addrs[index];
- if (dpni == NULL) {
+ if (!dpni) {
DPAA2_PMD_ERR("dpni is NULL");
return;
}
@@ -1607,15 +1615,15 @@ dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
static int
dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
- struct rte_ether_addr *addr)
+ struct rte_ether_addr *addr)
{
int ret;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct fsl_mc_io *dpni = dev->process_private;
PMD_INIT_FUNC_TRACE();
- if (dpni == NULL) {
+ if (!dpni) {
DPAA2_PMD_ERR("dpni is NULL");
return -EINVAL;
}
@@ -1624,19 +1632,18 @@ dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
priv->token, addr->addr_bytes);
if (ret)
- DPAA2_PMD_ERR(
- "error: Setting the MAC ADDR failed %d", ret);
+ DPAA2_PMD_ERR("ERR(%d) Setting the MAC ADDR failed", ret);
return ret;
}
-static
-int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
- struct rte_eth_stats *stats)
+static int
+dpaa2_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
- int32_t retcode;
+ struct fsl_mc_io *dpni = dev->process_private;
+ int32_t retcode;
uint8_t page0 = 0, page1 = 1, page2 = 2;
union dpni_statistics value;
int i;
@@ -1691,8 +1698,8 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
/* Fill in per queue stats */
for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
(i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
- dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
- dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
+ dpaa2_rxq = priv->rx_vq[i];
+ dpaa2_txq = priv->tx_vq[i];
if (dpaa2_rxq)
stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
if (dpaa2_txq)
@@ -1711,19 +1718,20 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
};
static int
-dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
- unsigned int n)
+dpaa2_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats, unsigned int n)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
- int32_t retcode;
+ int32_t retcode;
union dpni_statistics value[5] = {};
unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
+ uint8_t page_id, stats_id;
if (n < num)
return num;
- if (xstats == NULL)
+ if (!xstats)
return 0;
/* Get Counters from page_0*/
@@ -1758,8 +1766,9 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
for (i = 0; i < num; i++) {
xstats[i].id = i;
- xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
- raw.counter[dpaa2_xstats_strings[i].stats_id];
+ page_id = dpaa2_xstats_strings[i].page_id;
+ stats_id = dpaa2_xstats_strings[i].stats_id;
+ xstats[i].value = value[page_id].raw.counter[stats_id];
}
return i;
err:
@@ -1769,8 +1778,8 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
static int
dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names,
- unsigned int limit)
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int limit)
{
unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
@@ -1788,16 +1797,16 @@ dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
static int
dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
- uint64_t *values, unsigned int n)
+ uint64_t *values, unsigned int n)
{
unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
uint64_t values_copy[stat_cnt];
+ uint8_t page_id, stats_id;
if (!ids) {
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni =
- (struct fsl_mc_io *)dev->process_private;
- int32_t retcode;
+ struct fsl_mc_io *dpni = dev->process_private;
+ int32_t retcode;
union dpni_statistics value[5] = {};
if (n < stat_cnt)
@@ -1831,8 +1840,9 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
return 0;
for (i = 0; i < stat_cnt; i++) {
- values[i] = value[dpaa2_xstats_strings[i].page_id].
- raw.counter[dpaa2_xstats_strings[i].stats_id];
+ page_id = dpaa2_xstats_strings[i].page_id;
+ stats_id = dpaa2_xstats_strings[i].stats_id;
+ values[i] = value[page_id].raw.counter[stats_id];
}
return stat_cnt;
}
@@ -1842,7 +1852,7 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
for (i = 0; i < n; i++) {
if (ids[i] >= stat_cnt) {
DPAA2_PMD_ERR("xstats id value isn't valid");
- return -1;
+ return -EINVAL;
}
values[i] = values_copy[ids[i]];
}
@@ -1850,8 +1860,7 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
}
static int
-dpaa2_xstats_get_names_by_id(
- struct rte_eth_dev *dev,
+dpaa2_xstats_get_names_by_id(struct rte_eth_dev *dev,
const uint64_t *ids,
struct rte_eth_xstat_name *xstats_names,
unsigned int limit)
@@ -1878,14 +1887,14 @@ static int
dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct fsl_mc_io *dpni = dev->process_private;
int retcode;
int i;
struct dpaa2_queue *dpaa2_q;
PMD_INIT_FUNC_TRACE();
- if (dpni == NULL) {
+ if (!dpni) {
DPAA2_PMD_ERR("dpni is NULL");
return -EINVAL;
}
@@ -1896,13 +1905,13 @@ dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
/* Reset the per queue stats in dpaa2_queue structure */
for (i = 0; i < priv->nb_rx_queues; i++) {
- dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
+ dpaa2_q = priv->rx_vq[i];
if (dpaa2_q)
dpaa2_q->rx_pkts = 0;
}
for (i = 0; i < priv->nb_tx_queues; i++) {
- dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+ dpaa2_q = priv->tx_vq[i];
if (dpaa2_q)
dpaa2_q->tx_pkts = 0;
}
@@ -1921,12 +1930,12 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
{
int ret;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct fsl_mc_io *dpni = dev->process_private;
struct rte_eth_link link;
struct dpni_link_state state = {0};
uint8_t count;
- if (dpni == NULL) {
+ if (!dpni) {
DPAA2_PMD_ERR("dpni is NULL");
return 0;
}
@@ -1936,7 +1945,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
&state);
if (ret < 0) {
DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
- return -1;
+ return ret;
}
if (state.up == RTE_ETH_LINK_DOWN &&
wait_to_complete)
@@ -1955,7 +1964,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
ret = rte_eth_linkstatus_set(dev, &link);
- if (ret == -1)
+ if (ret < 0)
DPAA2_PMD_DEBUG("No change in status");
else
DPAA2_PMD_INFO("Port %d Link is %s", dev->data->port_id,
@@ -1978,9 +1987,9 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
struct dpni_link_state state = {0};
priv = dev->data->dev_private;
- dpni = (struct fsl_mc_io *)dev->process_private;
+ dpni = dev->process_private;
- if (dpni == NULL) {
+ if (!dpni) {
DPAA2_PMD_ERR("dpni is NULL");
return ret;
}
@@ -2040,9 +2049,9 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
priv = dev->data->dev_private;
- dpni = (struct fsl_mc_io *)dev->process_private;
+ dpni = dev->process_private;
- if (dpni == NULL) {
+ if (!dpni) {
DPAA2_PMD_ERR("Device has not yet been configured");
return ret;
}
@@ -2094,9 +2103,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
PMD_INIT_FUNC_TRACE();
priv = dev->data->dev_private;
- dpni = (struct fsl_mc_io *)dev->process_private;
+ dpni = dev->process_private;
- if (dpni == NULL || fc_conf == NULL) {
+ if (!dpni || !fc_conf) {
DPAA2_PMD_ERR("device not configured");
return ret;
}
@@ -2149,9 +2158,9 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
PMD_INIT_FUNC_TRACE();
priv = dev->data->dev_private;
- dpni = (struct fsl_mc_io *)dev->process_private;
+ dpni = dev->process_private;
- if (dpni == NULL) {
+ if (!dpni) {
DPAA2_PMD_ERR("dpni is NULL");
return ret;
}
@@ -2394,10 +2403,10 @@ dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
{
struct dpaa2_queue *rxq;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct fsl_mc_io *dpni = dev->process_private;
uint16_t max_frame_length;
- rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id];
+ rxq = dev->data->rx_queues[queue_id];
qinfo->mp = rxq->mb_pool;
qinfo->scattered_rx = dev->data->scattered_rx;
@@ -2513,10 +2522,10 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
* Returns the table of MAC entries (multiple entries)
*/
static int
-populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
- struct rte_ether_addr *mac_entry)
+populate_mac_addr(struct fsl_mc_io *dpni_dev,
+ struct dpaa2_dev_priv *priv, struct rte_ether_addr *mac_entry)
{
- int ret;
+ int ret = 0;
struct rte_ether_addr phy_mac, prime_mac;
memset(&phy_mac, 0, sizeof(struct rte_ether_addr));
@@ -2574,7 +2583,7 @@ populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
return 0;
cleanup:
- return -1;
+ return ret;
}
static int
@@ -2633,7 +2642,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
return -1;
}
dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
- eth_dev->process_private = (void *)dpni_dev;
+ eth_dev->process_private = dpni_dev;
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
@@ -2662,7 +2671,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
"Failure in opening dpni@%d with err code %d",
hw_id, ret);
rte_free(dpni_dev);
- return -1;
+ return ret;
}
if (eth_dev->data->dev_conf.lpbk_mode)
@@ -2813,7 +2822,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
/* Init fields w.r.t. classification */
memset(&priv->extract.qos_key_extract, 0,
sizeof(struct dpaa2_key_extract));
- priv->extract.qos_extract_param = rte_malloc(NULL, 256, 64);
+ priv->extract.qos_extract_param = rte_malloc(NULL,
+ DPAA2_EXTRACT_PARAM_MAX_SIZE,
+ RTE_CACHE_LINE_SIZE);
if (!priv->extract.qos_extract_param) {
DPAA2_PMD_ERR("Memory alloc failed");
goto init_err;
@@ -2822,7 +2833,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
for (i = 0; i < MAX_TCS; i++) {
memset(&priv->extract.tc_key_extract[i], 0,
sizeof(struct dpaa2_key_extract));
- priv->extract.tc_extract_param[i] = rte_malloc(NULL, 256, 64);
+ priv->extract.tc_extract_param[i] = rte_malloc(NULL,
+ DPAA2_EXTRACT_PARAM_MAX_SIZE,
+ RTE_CACHE_LINE_SIZE);
if (!priv->extract.tc_extract_param[i]) {
DPAA2_PMD_ERR("Memory alloc failed");
goto init_err;
@@ -2982,12 +2995,11 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
RTE_PKTMBUF_HEADROOM) {
- DPAA2_PMD_ERR(
- "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)",
- RTE_PKTMBUF_HEADROOM,
- DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE);
+ DPAA2_PMD_ERR("RTE_PKTMBUF_HEADROOM(%d) < DPAA2 Annotation(%d)",
+ RTE_PKTMBUF_HEADROOM,
+ DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE);
- return -1;
+ return -EINVAL;
}
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
@@ -31,6 +31,9 @@
#define MAX_DPNI 8
#define DPAA2_MAX_CHANNELS 16
+#define DPAA2_EXTRACT_PARAM_MAX_SIZE 256
+#define DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE 256
+
#define DPAA2_RX_DEFAULT_NBDESC 512
#define DPAA2_ETH_MAX_LEN (RTE_ETHER_MTU + \
@@ -4318,7 +4318,14 @@ dpaa2_configure_fs_rss_table(struct dpaa2_dev_priv *priv,
tc_extract = &priv->extract.tc_key_extract[tc_id];
key_cfg_buf = priv->extract.tc_extract_param[tc_id];
- key_cfg_iova = DPAA2_VADDR_TO_IOVA(key_cfg_buf);
+ key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_cfg_buf,
+ DPAA2_EXTRACT_PARAM_MAX_SIZE);
+ if (key_cfg_iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)",
+ __func__, key_cfg_buf);
+
+ return -ENOBUFS;
+ }
key_max_size = tc_extract->key_profile.key_max_size;
entry_size = dpaa2_flow_entry_size(key_max_size);
@@ -4402,7 +4409,14 @@ dpaa2_configure_qos_table(struct dpaa2_dev_priv *priv,
qos_extract = &priv->extract.qos_key_extract;
key_cfg_buf = priv->extract.qos_extract_param;
- key_cfg_iova = DPAA2_VADDR_TO_IOVA(key_cfg_buf);
+ key_cfg_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(key_cfg_buf,
+ DPAA2_EXTRACT_PARAM_MAX_SIZE);
+ if (key_cfg_iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("%s: No IOMMU map for key cfg(%p)",
+ __func__, key_cfg_buf);
+
+ return -ENOBUFS;
+ }
key_max_size = qos_extract->key_profile.key_max_size;
entry_size = dpaa2_flow_entry_size(key_max_size);
@@ -4959,6 +4973,7 @@ dpaa2_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
struct dpaa2_dev_flow *flow = NULL;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
int ret;
+ uint64_t iova;
dpaa2_flow_control_log =
getenv("DPAA2_FLOW_CONTROL_LOG");
@@ -4982,34 +4997,66 @@ dpaa2_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
}
/* Allocate DMA'ble memory to write the qos rules */
- flow->qos_key_addr = rte_zmalloc(NULL, 256, 64);
+ flow->qos_key_addr = rte_zmalloc(NULL,
+ DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE, RTE_CACHE_LINE_SIZE);
if (!flow->qos_key_addr) {
DPAA2_PMD_ERR("Memory allocation failed");
goto mem_failure;
}
- flow->qos_rule.key_iova = DPAA2_VADDR_TO_IOVA(flow->qos_key_addr);
+ iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(flow->qos_key_addr,
+ DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE);
+ if (iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("%s: No IOMMU map for qos key(%p)",
+ __func__, flow->qos_key_addr);
+ goto mem_failure;
+ }
+ flow->qos_rule.key_iova = iova;
- flow->qos_mask_addr = rte_zmalloc(NULL, 256, 64);
+ flow->qos_mask_addr = rte_zmalloc(NULL,
+ DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE, RTE_CACHE_LINE_SIZE);
if (!flow->qos_mask_addr) {
DPAA2_PMD_ERR("Memory allocation failed");
goto mem_failure;
}
- flow->qos_rule.mask_iova = DPAA2_VADDR_TO_IOVA(flow->qos_mask_addr);
+ iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(flow->qos_mask_addr,
+ DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE);
+ if (iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("%s: No IOMMU map for qos mask(%p)",
+ __func__, flow->qos_mask_addr);
+ goto mem_failure;
+ }
+ flow->qos_rule.mask_iova = iova;
/* Allocate DMA'ble memory to write the FS rules */
- flow->fs_key_addr = rte_zmalloc(NULL, 256, 64);
+ flow->fs_key_addr = rte_zmalloc(NULL,
+ DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE, RTE_CACHE_LINE_SIZE);
if (!flow->fs_key_addr) {
DPAA2_PMD_ERR("Memory allocation failed");
goto mem_failure;
}
- flow->fs_rule.key_iova = DPAA2_VADDR_TO_IOVA(flow->fs_key_addr);
+ iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(flow->fs_key_addr,
+ DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE);
+ if (iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("%s: No IOMMU map for fs key(%p)",
+ __func__, flow->fs_key_addr);
+ goto mem_failure;
+ }
+ flow->fs_rule.key_iova = iova;
- flow->fs_mask_addr = rte_zmalloc(NULL, 256, 64);
+ flow->fs_mask_addr = rte_zmalloc(NULL,
+ DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE, RTE_CACHE_LINE_SIZE);
if (!flow->fs_mask_addr) {
DPAA2_PMD_ERR("Memory allocation failed");
goto mem_failure;
}
- flow->fs_rule.mask_iova = DPAA2_VADDR_TO_IOVA(flow->fs_mask_addr);
+ iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(flow->fs_mask_addr,
+ DPAA2_EXTRACT_ALLOC_KEY_MAX_SIZE);
+ if (iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("%s: No IOMMU map for fs mask(%p)",
+ __func__, flow->fs_mask_addr);
+ goto mem_failure;
+ }
+ flow->fs_rule.mask_iova = iova;
priv->curr = flow;
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2023 NXP
*/
#include <rte_mbuf.h>
@@ -170,7 +170,14 @@ int dpaa2_eth_load_wriop_soft_parser(struct dpaa2_dev_priv *priv,
}
memcpy(addr, sp_param.byte_code, sp_param.size);
- cfg.ss_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(addr));
+ cfg.ss_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(addr, sp_param.size);
+ if (cfg.ss_iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("No IOMMU map for soft sequence(%p), size=%d",
+ addr, sp_param.size);
+ rte_free(addr);
+
+ return -ENOBUFS;
+ }
ret = dpni_load_sw_sequence(dpni, CMD_PRI_LOW, priv->token, &cfg);
if (ret) {
@@ -179,7 +186,7 @@ int dpaa2_eth_load_wriop_soft_parser(struct dpaa2_dev_priv *priv,
return ret;
}
- priv->ss_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(addr));
+ priv->ss_iova = cfg.ss_iova;
priv->ss_offset += sp_param.size;
DPAA2_PMD_INFO("Soft parser loaded for dpni@%d", priv->hw_id);
@@ -219,7 +226,15 @@ int dpaa2_eth_enable_wriop_soft_parser(struct dpaa2_dev_priv *priv,
}
memcpy(param_addr, sp_param.param_array, cfg.param_size);
- cfg.param_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(param_addr));
+ cfg.param_iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(param_addr,
+ cfg.param_size);
+ if (cfg.param_iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("%s: No IOMMU map for %p, size=%d",
+ __func__, param_addr, cfg.param_size);
+ rte_free(param_addr);
+
+ return -ENOBUFS;
+ }
priv->ss_param_iova = cfg.param_iova;
} else {
cfg.param_iova = 0;
@@ -227,7 +242,7 @@ int dpaa2_eth_enable_wriop_soft_parser(struct dpaa2_dev_priv *priv,
ret = dpni_enable_sw_sequence(dpni, CMD_PRI_LOW, priv->token, &cfg);
if (ret) {
- DPAA2_PMD_ERR("dpni_enable_sw_sequence failed for dpni%d",
+ DPAA2_PMD_ERR("Soft parser enabled for dpni@%d failed",
priv->hw_id);
rte_free(param_addr);
return ret;
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2020-2021 NXP
+ * Copyright 2020-2023 NXP
*/
#include <rte_ethdev.h>
@@ -572,41 +572,42 @@ dpaa2_tm_configure_queue(struct rte_eth_dev *dev, struct dpaa2_tm_node *node)
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct dpaa2_queue *dpaa2_q;
+ uint64_t iova;
memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
- dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id];
+ dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id];
tc_id = node->parent->tc_id;
node->parent->tc_id++;
flow_id = 0;
- if (dpaa2_q == NULL) {
- DPAA2_PMD_ERR("Queue is not configured for node = %d", node->id);
- return -1;
+ if (!dpaa2_q) {
+ DPAA2_PMD_ERR("Queue is not configured for node = %d",
+ node->id);
+ return -ENOMEM;
}
DPAA2_PMD_DEBUG("tc_id = %d, channel = %d", tc_id,
node->parent->channel_id);
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
- ((node->parent->channel_id << 8) | tc_id),
- flow_id, options, &tx_flow_cfg);
+ ((node->parent->channel_id << 8) | tc_id),
+ flow_id, options, &tx_flow_cfg);
if (ret) {
- DPAA2_PMD_ERR("Error in setting the tx flow: "
- "channel id = %d tc_id= %d, param = 0x%x "
- "flow=%d err=%d", node->parent->channel_id, tc_id,
- ((node->parent->channel_id << 8) | tc_id), flow_id,
- ret);
- return -1;
+ DPAA2_PMD_ERR("Set the TC[%d].ch[%d].TX flow[%d] (err=%d)",
+ tc_id, node->parent->channel_id, flow_id,
+ ret);
+ return ret;
}
dpaa2_q->flow_id = flow_id;
dpaa2_q->tc_index = tc_id;
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX, ((node->parent->channel_id << 8) | dpaa2_q->tc_index),
- dpaa2_q->flow_id, &tx_flow_cfg, &qid);
+ DPNI_QUEUE_TX,
+ ((node->parent->channel_id << 8) | dpaa2_q->tc_index),
+ dpaa2_q->flow_id, &tx_flow_cfg, &qid);
if (ret) {
DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
- return -1;
+ return ret;
}
dpaa2_q->fqid = qid.fqid;
@@ -621,8 +622,13 @@ dpaa2_tm_configure_queue(struct rte_eth_dev *dev, struct dpaa2_tm_node *node)
*/
cong_notif_cfg.threshold_exit = (dpaa2_q->nb_desc * 9) / 10;
cong_notif_cfg.message_ctx = 0;
- cong_notif_cfg.message_iova =
- (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
+ iova = DPAA2_VADDR_TO_IOVA_AND_CHECK(dpaa2_q->cscn,
+ sizeof(struct qbman_result));
+ if (iova == RTE_BAD_IOVA) {
+ DPAA2_PMD_ERR("No IOMMU map for cscn(%p)", dpaa2_q->cscn);
+ return -ENOBUFS;
+ }
+ cong_notif_cfg.message_iova = iova;
cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
cong_notif_cfg.notification_mode =
DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
@@ -641,6 +647,7 @@ dpaa2_tm_configure_queue(struct rte_eth_dev *dev, struct dpaa2_tm_node *node)
return -ret;
}
}
+ dpaa2_q->tm_sw_td = true;
return 0;
}