Configuring one queue per port fails compilation on my system.
Test to see how much it fails in CI.
Signed-off-by: Morten Brørup <mb@smartsharesystems.com>
---
v5:
* Wrap GCC optimizer pragmas in if defined(RTE_TOOLCHAIN_GCC).
v4:
* Workaound GCC optimizer incorrectly throwing a warning in these network
drivers:
* bnxt
* e1000
* failsafe
* hns3
v3:
* Fix net/ixgbe driver.
v2:
* Fix net/vmxnet3 driver.
---
config/rte_config.h | 4 ++--
drivers/net/bnxt/bnxt_ethdev.c | 7 ++++++
drivers/net/e1000/igb_rxtx.c | 7 ++++++
drivers/net/failsafe/failsafe_ops.c | 14 ++++++++++++
drivers/net/hns3/hns3_rxtx.c | 7 ++++++
drivers/net/ixgbe/ixgbe_ethdev.c | 3 ++-
drivers/net/vmxnet3/vmxnet3_ethdev.c | 32 +++++++++++++++++-----------
7 files changed, 59 insertions(+), 15 deletions(-)
@@ -65,8 +65,8 @@
#define RTE_MBUF_DEFAULT_MEMPOOL_OPS "ring_mp_mc"
/* ether defines */
-#define RTE_MAX_QUEUES_PER_PORT 1024
-#define RTE_ETHDEV_QUEUE_STAT_CNTRS 16 /* max 256 */
+#define RTE_MAX_QUEUES_PER_PORT 1 /* default 1024 */
+#define RTE_ETHDEV_QUEUE_STAT_CNTRS 1 /* max 256, default 16 */
#define RTE_ETHDEV_RXTX_CALLBACKS 1
#define RTE_MAX_MULTI_HOST_CTRLS 4
@@ -842,6 +842,10 @@ static int bnxt_alloc_prev_ring_stats(struct bnxt *bp)
return -ENOMEM;
}
+#if defined(RTE_TOOLCHAIN_GCC)
+#pragma GCC push_options
+#pragma GCC optimize("no-peel-loops")
+#endif
static int bnxt_start_nic(struct bnxt *bp)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
@@ -1006,6 +1010,9 @@ static int bnxt_start_nic(struct bnxt *bp)
return rc;
}
+#if defined(RTE_TOOLCHAIN_GCC)
+#pragma GCC pop_options
+#endif
static int bnxt_shutdown_nic(struct bnxt *bp)
{
@@ -1860,6 +1860,10 @@ eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+#if defined(RTE_TOOLCHAIN_GCC)
+#pragma GCC push_options
+#pragma GCC optimize("no-peel-loops")
+#endif
void
igb_dev_clear_queues(struct rte_eth_dev *dev)
{
@@ -1885,6 +1889,9 @@ igb_dev_clear_queues(struct rte_eth_dev *dev)
}
}
}
+#if defined(RTE_TOOLCHAIN_GCC)
+#pragma GCC pop_options
+#endif
void
igb_dev_free_queues(struct rte_eth_dev *dev)
@@ -103,6 +103,10 @@ fs_dev_configure(struct rte_eth_dev *dev)
return 0;
}
+#if defined(RTE_TOOLCHAIN_GCC)
+#pragma GCC push_options
+#pragma GCC optimize("no-peel-loops")
+#endif
static void
fs_set_queues_state_start(struct rte_eth_dev *dev)
{
@@ -123,6 +127,9 @@ fs_set_queues_state_start(struct rte_eth_dev *dev)
RTE_ETH_QUEUE_STATE_STARTED;
}
}
+#if defined(RTE_TOOLCHAIN_GCC)
+#pragma GCC pop_options
+#endif
static int
fs_dev_start(struct rte_eth_dev *dev)
@@ -171,6 +178,10 @@ fs_dev_start(struct rte_eth_dev *dev)
return 0;
}
+#if defined(RTE_TOOLCHAIN_GCC)
+#pragma GCC push_options
+#pragma GCC optimize("no-peel-loops")
+#endif
static void
fs_set_queues_state_stop(struct rte_eth_dev *dev)
{
@@ -185,6 +196,9 @@ fs_set_queues_state_stop(struct rte_eth_dev *dev)
dev->data->tx_queue_state[i] =
RTE_ETH_QUEUE_STATE_STOPPED;
}
+#if defined(RTE_TOOLCHAIN_GCC)
+#pragma GCC pop_options
+#endif
static int
fs_dev_stop(struct rte_eth_dev *dev)
@@ -1299,6 +1299,10 @@ hns3_init_queues(struct hns3_adapter *hns, bool reset_queue)
return ret;
}
+#if defined(RTE_TOOLCHAIN_GCC)
+#pragma GCC push_options
+#pragma GCC optimize("no-peel-loops")
+#endif
void
hns3_start_tqps(struct hns3_hw *hw)
{
@@ -1322,6 +1326,9 @@ hns3_start_tqps(struct hns3_hw *hw)
RTE_ETH_QUEUE_STATE_STARTED;
}
}
+#if defined(RTE_TOOLCHAIN_GCC)
+#pragma GCC pop_options
+#endif
void
hns3_stop_tqps(struct hns3_hw *hw)
@@ -3385,7 +3385,8 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->opackets = hw_stats->gptc;
stats->obytes = hw_stats->gotc;
- for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
+ for (i = 0; i < RTE_MIN(IXGBE_QUEUE_STAT_COUNTERS,
+ (typeof(IXGBE_QUEUE_STAT_COUNTERS))RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) {
stats->q_ipackets[i] = hw_stats->qprc[i];
stats->q_opackets[i] = hw_stats->qptc[i];
stats->q_ibytes[i] = hw_stats->qbrc[i];
@@ -1470,42 +1470,52 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
struct vmxnet3_hw *hw = dev->data->dev_private;
struct UPT1_TxStats txStats;
struct UPT1_RxStats rxStats;
+ uint64_t packets, bytes;
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
for (i = 0; i < hw->num_tx_queues; i++) {
vmxnet3_tx_stats_get(hw, i, &txStats);
- stats->q_opackets[i] = txStats.ucastPktsTxOK +
+ packets = txStats.ucastPktsTxOK +
txStats.mcastPktsTxOK +
txStats.bcastPktsTxOK;
- stats->q_obytes[i] = txStats.ucastBytesTxOK +
+ bytes = txStats.ucastBytesTxOK +
txStats.mcastBytesTxOK +
txStats.bcastBytesTxOK;
- stats->opackets += stats->q_opackets[i];
- stats->obytes += stats->q_obytes[i];
+ stats->opackets += packets;
+ stats->obytes += bytes;
stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[i] = packets;
+ stats->q_obytes[i] = bytes;
+ }
}
for (i = 0; i < hw->num_rx_queues; i++) {
vmxnet3_rx_stats_get(hw, i, &rxStats);
- stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
+ packets = rxStats.ucastPktsRxOK +
rxStats.mcastPktsRxOK +
rxStats.bcastPktsRxOK;
- stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
+ bytes = rxStats.ucastBytesRxOK +
rxStats.mcastBytesRxOK +
rxStats.bcastBytesRxOK;
- stats->ipackets += stats->q_ipackets[i];
- stats->ibytes += stats->q_ibytes[i];
-
- stats->q_errors[i] = rxStats.pktsRxError;
+ stats->ipackets += packets;
+ stats->ibytes += bytes;
stats->ierrors += rxStats.pktsRxError;
stats->imissed += rxStats.pktsRxOutOfBuf;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[i] = packets;
+ stats->q_ibytes[i] = bytes;
+ stats->q_errors[i] = rxStats.pktsRxError;
+ }
}
return 0;
@@ -1521,8 +1531,6 @@ vmxnet3_dev_stats_reset(struct rte_eth_dev *dev)
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
- RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
-
for (i = 0; i < hw->num_tx_queues; i++) {
vmxnet3_hw_tx_stats_get(hw, i, &txStats);
memcpy(&hw->snapshot_tx_stats[i], &txStats,