[dpdk-dev,PATCHv4,7/9] bonding: queue stats mapping
Commit Message
This patch adds propagation of mapping over the slaves, and fills bonding
port's stats with a sum of corresponding values taken from bonded slaves,
when stats are requested for bonding port.
Signed-off-by: Tomasz Kulasek <tomaszx.kulasek@intel.com>
---
drivers/net/bonding/rte_eth_bond_pmd.c | 34 +++++++++++++++++++++++++++++++-
1 file changed, 33 insertions(+), 1 deletion(-)
Comments
2015-07-15 19:26, Tomasz Kulasek:
> + .queue_stats_mapping_set = bond_ethdev_queue_stats_mapping_set,
As explained with previous version of this patchset, this API should be
removed. It is specific to some Intel devices.
Please do not use it in bonding.
Then we could discuss in another thread how to remove it from ethdev.
> -----Original Message-----
> From: Thomas Monjalon [mailto:thomas.monjalon@6wind.com]
> Sent: Friday, September 25, 2015 12:15
> To: Kulasek, TomaszX
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCHv4 7/9] bonding: queue stats mapping
>
> 2015-07-15 19:26, Tomasz Kulasek:
> > + .queue_stats_mapping_set =
> bond_ethdev_queue_stats_mapping_set,
>
> As explained with previous version of this patchset, this API should be
> removed. It is specific to some Intel devices.
> Please do not use it in bonding.
> Then we could discuss in another thread how to remove it from ethdev.
Ok, I remove it in v5.
@@ -1783,7 +1783,7 @@ bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct bond_dev_private *internals = dev->data->dev_private;
struct rte_eth_stats slave_stats;
- int i;
+ int i, j;
for (i = 0; i < internals->slave_count; i++) {
rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
@@ -1802,6 +1802,15 @@ bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->rx_pause_xon += slave_stats.rx_pause_xon;
stats->tx_pause_xoff += slave_stats.tx_pause_xoff;
stats->rx_pause_xoff += slave_stats.rx_pause_xoff;
+
+ for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
+ stats->q_ipackets[j] += slave_stats.q_ipackets[j];
+ stats->q_opackets[j] += slave_stats.q_opackets[j];
+ stats->q_ibytes[j] += slave_stats.q_ibytes[j];
+ stats->q_obytes[j] += slave_stats.q_obytes[j];
+ stats->q_errors[j] += slave_stats.q_errors[j];
+ }
+
}
}
@@ -2110,6 +2119,28 @@ bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
return 0;
}
+static int
+bond_ethdev_queue_stats_mapping_set(struct rte_eth_dev *dev, uint16_t queue_id,
+ uint8_t stat_idx, uint8_t is_rx)
+{
+ int i;
+ int retval;
+ struct bond_dev_private *internals = dev->data->dev_private;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ if (is_rx)
+ retval = rte_eth_dev_set_rx_queue_stats_mapping(
+ internals->slaves[i].port_id, queue_id, stat_idx);
+ else
+ retval = rte_eth_dev_set_tx_queue_stats_mapping(
+ internals->slaves[i].port_id, queue_id, stat_idx);
+ if (retval != 0)
+ return retval;
+ }
+
+ return 0;
+}
+
struct eth_dev_ops default_dev_ops = {
.dev_start = bond_ethdev_start,
.dev_stop = bond_ethdev_stop,
@@ -2123,6 +2154,7 @@ struct eth_dev_ops default_dev_ops = {
.link_update = bond_ethdev_link_update,
.stats_get = bond_ethdev_stats_get,
.stats_reset = bond_ethdev_stats_reset,
+ .queue_stats_mapping_set = bond_ethdev_queue_stats_mapping_set,
.promiscuous_enable = bond_ethdev_promiscuous_enable,
.promiscuous_disable = bond_ethdev_promiscuous_disable,
.reta_update = bond_ethdev_rss_reta_update,