@@ -84,12 +84,12 @@ struct __rte_cache_aligned rte_eth_dev {
* User-supplied functions called from rx_burst to post-process
* received packets before passing them to the user
*/
- RTE_ATOMIC(struct rte_eth_rxtx_callback *) post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
+ RTE_ATOMIC(struct rte_eth_rxtx_callback *) post_rx_burst_cbs[RTE_MAX_ETHPORT_RX_QUEUES];
/**
* User-supplied functions called from tx_burst to pre-process
* received packets before passing them to the driver for transmission
*/
- RTE_ATOMIC(struct rte_eth_rxtx_callback *) pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
+ RTE_ATOMIC(struct rte_eth_rxtx_callback *) pre_tx_burst_cbs[RTE_MAX_ETHPORT_TX_QUEUES];
enum rte_eth_dev_state state; /**< Flag indicating the port state */
void *security_ctx; /**< Context for security ops */
@@ -165,9 +165,9 @@ struct __rte_cache_aligned rte_eth_dev_data {
flow_configured : 1;
/** Queues state: HAIRPIN(2) / STARTED(1) / STOPPED(0) */
- uint8_t rx_queue_state[RTE_MAX_QUEUES_PER_PORT];
+ uint8_t rx_queue_state[RTE_MAX_ETHPORT_RX_QUEUES];
/** Queues state: HAIRPIN(2) / STARTED(1) / STOPPED(0) */
- uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
+ uint8_t tx_queue_state[RTE_MAX_ETHPORT_TX_QUEUES];
uint32_t dev_flags; /**< Capabilities */
int numa_node; /**< NUMA node connection */
@@ -190,7 +190,8 @@ struct dummy_queue {
bool rx_warn_once;
bool tx_warn_once;
};
-static struct dummy_queue *dummy_queues_array[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
+static struct dummy_queue *dummy_rxq_array[RTE_MAX_ETHPORTS][RTE_MAX_ETHPORT_RX_QUEUES];
+static struct dummy_queue *dummy_txq_array[RTE_MAX_ETHPORTS][RTE_MAX_ETHPORT_TX_QUEUES];
static struct dummy_queue per_port_queues[RTE_MAX_ETHPORTS];
RTE_INIT(dummy_queue_init)
{
@@ -199,8 +200,10 @@ RTE_INIT(dummy_queue_init)
for (port_id = 0; port_id < RTE_DIM(per_port_queues); port_id++) {
unsigned int q;
- for (q = 0; q < RTE_DIM(dummy_queues_array[port_id]); q++)
- dummy_queues_array[port_id][q] = &per_port_queues[port_id];
+ for (q = 0; q < RTE_DIM(dummy_rxq_array[port_id]); q++)
+ dummy_rxq_array[port_id][q] = &per_port_queues[port_id];
+ for (q = 0; q < RTE_DIM(dummy_txq_array[port_id]); q++)
+ dummy_txq_array[port_id][q] = &per_port_queues[port_id];
}
}
@@ -245,7 +248,8 @@ dummy_eth_tx_burst(void *txq,
void
eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo)
{
- static RTE_ATOMIC(void *) dummy_data[RTE_MAX_QUEUES_PER_PORT];
+ static RTE_ATOMIC(void *) dummy_rx_data[RTE_MAX_ETHPORT_RX_QUEUES];
+ static RTE_ATOMIC(void *) dummy_tx_data[RTE_MAX_ETHPORT_TX_QUEUES];
uintptr_t port_id = fpo - rte_eth_fp_ops;
per_port_queues[port_id].rx_warn_once = false;
@@ -254,12 +258,12 @@ eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo)
.rx_pkt_burst = dummy_eth_rx_burst,
.tx_pkt_burst = dummy_eth_tx_burst,
.rxq = {
- .data = (void **)&dummy_queues_array[port_id],
- .clbk = dummy_data,
+ .data = (void **)&dummy_rxq_array[port_id],
+ .clbk = dummy_rx_data,
},
.txq = {
- .data = (void **)&dummy_queues_array[port_id],
- .clbk = dummy_data,
+ .data = (void **)&dummy_txq_array[port_id],
+ .clbk = dummy_tx_data,
},
};
}
@@ -420,7 +424,7 @@ eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
sizeof(dev->data->rx_queues[0]) *
- RTE_MAX_QUEUES_PER_PORT,
+ RTE_MAX_ETHPORT_RX_QUEUES,
RTE_CACHE_LINE_SIZE);
if (dev->data->rx_queues == NULL) {
dev->data->nb_rx_queues = 0;
@@ -450,7 +454,7 @@ eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
sizeof(dev->data->tx_queues[0]) *
- RTE_MAX_QUEUES_PER_PORT,
+ RTE_MAX_ETHPORT_TX_QUEUES,
RTE_CACHE_LINE_SIZE);
if (dev->data->tx_queues == NULL) {
dev->data->nb_tx_queues = 0;
@@ -1367,18 +1367,18 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
}
- if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
+ if (nb_rx_q > RTE_MAX_ETHPORT_RX_QUEUES) {
RTE_ETHDEV_LOG_LINE(ERR,
"Number of Rx queues requested (%u) is greater than max supported(%d)",
- nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
+ nb_rx_q, RTE_MAX_ETHPORT_RX_QUEUES);
ret = -EINVAL;
goto rollback;
}
- if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
+ if (nb_tx_q > RTE_MAX_ETHPORT_TX_QUEUES) {
RTE_ETHDEV_LOG_LINE(ERR,
"Number of Tx queues requested (%u) is greater than max supported(%d)",
- nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
+ nb_tx_q, RTE_MAX_ETHPORT_TX_QUEUES);
ret = -EINVAL;
goto rollback;
}
@@ -3811,11 +3811,9 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
return eth_err(port_id, diag);
}
- /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
- dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
- RTE_MAX_QUEUES_PER_PORT);
- dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
- RTE_MAX_QUEUES_PER_PORT);
+ /* Maximum number of queues should be <= RTE_MAX_ETHPORT_RX/TX_QUEUES */
+ dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, RTE_MAX_ETHPORT_RX_QUEUES);
+ dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, RTE_MAX_ETHPORT_TX_QUEUES);
dev_info->driver_name = dev->device->driver->name;
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
@@ -6090,7 +6090,7 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
#ifdef RTE_ETHDEV_DEBUG_RX
if (port_id >= RTE_MAX_ETHPORTS ||
- queue_id >= RTE_MAX_QUEUES_PER_PORT) {
+ queue_id >= RTE_MAX_ETHPORT_RX_QUEUES) {
RTE_ETHDEV_LOG_LINE(ERR,
"Invalid port_id=%u or queue_id=%u",
port_id, queue_id);
@@ -6161,7 +6161,7 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
#ifdef RTE_ETHDEV_DEBUG_RX
if (port_id >= RTE_MAX_ETHPORTS ||
- queue_id >= RTE_MAX_QUEUES_PER_PORT) {
+ queue_id >= RTE_MAX_ETHPORT_RX_QUEUES) {
RTE_ETHDEV_LOG_LINE(ERR,
"Invalid port_id=%u or queue_id=%u",
port_id, queue_id);
@@ -6234,7 +6234,7 @@ rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
#ifdef RTE_ETHDEV_DEBUG_RX
if (port_id >= RTE_MAX_ETHPORTS ||
- queue_id >= RTE_MAX_QUEUES_PER_PORT) {
+ queue_id >= RTE_MAX_ETHPORT_RX_QUEUES) {
RTE_ETHDEV_LOG_LINE(ERR,
"Invalid port_id=%u or queue_id=%u",
port_id, queue_id);
@@ -6305,7 +6305,7 @@ static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
#ifdef RTE_ETHDEV_DEBUG_TX
if (port_id >= RTE_MAX_ETHPORTS ||
- queue_id >= RTE_MAX_QUEUES_PER_PORT) {
+ queue_id >= RTE_MAX_ETHPORT_TX_QUEUES) {
RTE_ETHDEV_LOG_LINE(ERR,
"Invalid port_id=%u or queue_id=%u",
port_id, queue_id);
@@ -6429,7 +6429,7 @@ rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
#ifdef RTE_ETHDEV_DEBUG_TX
if (port_id >= RTE_MAX_ETHPORTS ||
- queue_id >= RTE_MAX_QUEUES_PER_PORT) {
+ queue_id >= RTE_MAX_ETHPORT_TX_QUEUES) {
RTE_ETHDEV_LOG_LINE(ERR,
"Invalid port_id=%u or queue_id=%u",
port_id, queue_id);
@@ -6539,7 +6539,7 @@ rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
#ifdef RTE_ETHDEV_DEBUG_TX
if (port_id >= RTE_MAX_ETHPORTS ||
- queue_id >= RTE_MAX_QUEUES_PER_PORT) {
+ queue_id >= RTE_MAX_ETHPORT_TX_QUEUES) {
RTE_ETHDEV_LOG_LINE(ERR,
"Invalid port_id=%u or queue_id=%u",
port_id, queue_id);
@@ -6744,7 +6744,7 @@ rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
#ifdef RTE_ETHDEV_DEBUG_TX
if (tx_port_id >= RTE_MAX_ETHPORTS ||
- tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
+ tx_queue_id >= RTE_MAX_ETHPORT_TX_QUEUES) {
RTE_ETHDEV_LOG_LINE(ERR,
"Invalid tx_port_id=%u or tx_queue_id=%u",
tx_port_id, tx_queue_id);
@@ -6770,7 +6770,7 @@ rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
#ifdef RTE_ETHDEV_DEBUG_RX
if (rx_port_id >= RTE_MAX_ETHPORTS ||
- rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
+ rx_queue_id >= RTE_MAX_ETHPORT_RX_QUEUES) {
RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u",
rx_port_id, rx_queue_id);
return 0;
@@ -6890,7 +6890,7 @@ rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
goto out;
}
- if (queue_id >= RTE_MAX_QUEUES_PER_PORT) {
+ if (queue_id >= RTE_MAX_ETHPORT_TX_QUEUES) {
RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
queue_id, port_id);
rc = -EINVAL;