@@ -121,5 +121,13 @@ uint16_t cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint
uint16_t cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
void cnxk_rep_tx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id);
void cnxk_rep_rx_queue_stop(struct rte_eth_dev *ethdev, uint16_t queue_id);
+int cnxk_rep_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *stats, unsigned int n);
+int cnxk_rep_xstats_reset(struct rte_eth_dev *eth_dev);
+int cnxk_rep_xstats_get_names(struct rte_eth_dev *eth_dev, struct rte_eth_xstat_name *xstats_names,
+ unsigned int n);
+int cnxk_rep_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids, uint64_t *values,
+ unsigned int n);
+int cnxk_rep_xstats_get_names_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids,
+ struct rte_eth_xstat_name *xstats_names, unsigned int n);
#endif /* __CNXK_REP_H__ */
@@ -21,6 +21,8 @@ typedef enum CNXK_REP_MSG {
CNXK_REP_MSG_EXIT,
/* Ethernet operation msgs */
CNXK_REP_MSG_ETH_SET_MAC,
+ CNXK_REP_MSG_ETH_STATS_GET,
+ CNXK_REP_MSG_ETH_STATS_CLEAR,
/* End of messaging sequence */
CNXK_REP_MSG_END,
} cnxk_rep_msg_t;
@@ -89,6 +91,11 @@ typedef struct cnxk_rep_msg_eth_mac_set_meta {
uint8_t addr_bytes[RTE_ETHER_ADDR_LEN];
} __rte_packed cnxk_rep_msg_eth_set_mac_meta_t;
+/* Ethernet op - get/clear stats */
+typedef struct cnxk_rep_msg_eth_stats_meta {
+ uint16_t portid;
+} __rte_packed cnxk_rep_msg_eth_stats_meta_t;
+
void cnxk_rep_msg_populate_command(void *buffer, uint32_t *length, cnxk_rep_msg_t type,
uint32_t size);
void cnxk_rep_msg_populate_command_meta(void *buffer, uint32_t *length, void *msg_meta, uint32_t sz,
@@ -10,6 +10,11 @@
#define RX_DESC_PER_QUEUE 256
#define NB_REP_VDEV_MBUF 1024
+static const struct rte_eth_xstat_name cnxk_rep_xstats_string[] = {
+ {"rep_nb_rx"},
+ {"rep_nb_tx"},
+};
+
static uint16_t
cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
@@ -24,6 +29,7 @@ cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
plt_rep_dbg("Transmitting %d packets on eswitch queue %d", nb_pkts, txq->qid);
n_tx = cnxk_eswitch_dev_tx_burst(rep_dev->parent_dev, txq->qid, tx_pkts, nb_pkts,
NIX_TX_OFFLOAD_VLAN_QINQ_F);
+ txq->stats.pkts += n_tx;
return n_tx;
}
@@ -43,6 +49,7 @@ cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return 0;
plt_rep_dbg("Received %d packets on eswitch queue %d", n_rx, rxq->qid);
+ rxq->stats.pkts += n_rx;
return n_rx;
}
@@ -486,19 +493,154 @@ cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)
plt_err("Failed to release txq %d, rc=%d", rc, txq->qid);
}
+static int
+process_eth_stats(struct cnxk_rep_dev *rep_dev, cnxk_rep_msg_ack_data_t *adata, cnxk_rep_msg_t msg)
+{
+ cnxk_rep_msg_eth_stats_meta_t msg_st_meta;
+ uint32_t len = 0, rc;
+ void *buffer;
+ size_t size;
+
+ size = CNXK_REP_MSG_MAX_BUFFER_SZ;
+ buffer = plt_zmalloc(size, 0);
+ if (!buffer) {
+ plt_err("Failed to allocate mem");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ cnxk_rep_msg_populate_header(buffer, &len);
+
+ msg_st_meta.portid = rep_dev->rep_id;
+ cnxk_rep_msg_populate_command_meta(buffer, &len, &msg_st_meta,
+ sizeof(cnxk_rep_msg_eth_stats_meta_t), msg);
+ cnxk_rep_msg_populate_msg_end(buffer, &len);
+
+ rc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata);
+ if (rc) {
+ plt_err("Failed to process the message, err %d", rc);
+ goto fail;
+ }
+
+ rte_free(buffer);
+
+ return 0;
+fail:
+ rte_free(buffer);
+ return rc;
+}
+
+static int
+native_repte_eth_stats(struct cnxk_rep_dev *rep_dev, struct rte_eth_stats *stats)
+{
+ struct roc_nix_stats nix_stats;
+ int rc = 0;
+
+ rc = roc_eswitch_nix_repte_stats(&rep_dev->parent_dev->nix, rep_dev->hw_func, &nix_stats);
+ if (rc) {
+ plt_err("Failed to get stats for representee %x, err %d", rep_dev->hw_func, rc);
+ goto fail;
+ }
+
+ memset(stats, 0, sizeof(struct rte_eth_stats));
+ stats->opackets = nix_stats.tx_ucast;
+ stats->opackets += nix_stats.tx_mcast;
+ stats->opackets += nix_stats.tx_bcast;
+ stats->oerrors = nix_stats.tx_drop;
+ stats->obytes = nix_stats.tx_octs;
+
+ stats->ipackets = nix_stats.rx_ucast;
+ stats->ipackets += nix_stats.rx_mcast;
+ stats->ipackets += nix_stats.rx_bcast;
+ stats->imissed = nix_stats.rx_drop;
+ stats->ibytes = nix_stats.rx_octs;
+ stats->ierrors = nix_stats.rx_err;
+
+ return 0;
+fail:
+ return rc;
+}
+
int
cnxk_rep_stats_get(struct rte_eth_dev *ethdev, struct rte_eth_stats *stats)
{
- PLT_SET_USED(ethdev);
- PLT_SET_USED(stats);
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ struct rte_eth_stats vf_stats;
+ cnxk_rep_msg_ack_data_t adata;
+ int rc;
+
+ /* If representor not representing any active VF, return 0 */
+ if (!rep_dev->is_vf_active)
+ return 0;
+
+ if (rep_dev->native_repte) {
+ /* For representees which are independent */
+ rc = native_repte_eth_stats(rep_dev, &vf_stats);
+ if (rc) {
+ plt_err("Failed to get stats for vf rep %x (hw_func %x), err %d",
+ rep_dev->port_id, rep_dev->hw_func, rc);
+ goto fail;
+ }
+ } else {
+ /* For representees which are part of companian app */
+ rc = process_eth_stats(rep_dev, &adata, CNXK_REP_MSG_ETH_STATS_GET);
+ if (rc || adata.u.sval < 0) {
+ if (adata.u.sval < 0)
+ rc = adata.u.sval;
+
+ plt_err("Failed to get stats for vf rep %x, err %d", rep_dev->port_id, rc);
+ }
+
+ if (adata.size != sizeof(struct rte_eth_stats)) {
+ rc = -EINVAL;
+ plt_err("Incomplete stats received for vf rep %d", rep_dev->port_id);
+ goto fail;
+ }
+
+ rte_memcpy(&vf_stats, adata.u.data, adata.size);
+ }
+
+ stats->q_ipackets[0] = vf_stats.ipackets;
+ stats->q_ibytes[0] = vf_stats.ibytes;
+ stats->ipackets = vf_stats.ipackets;
+ stats->ibytes = vf_stats.ibytes;
+
+ stats->q_opackets[0] = vf_stats.opackets;
+ stats->q_obytes[0] = vf_stats.obytes;
+ stats->opackets = vf_stats.opackets;
+ stats->obytes = vf_stats.obytes;
+
+ plt_rep_dbg("Input packets %" PRId64 " Output packets %" PRId64 "", stats->ipackets,
+ stats->opackets);
+
return 0;
+fail:
+ return rc;
}
int
cnxk_rep_stats_reset(struct rte_eth_dev *ethdev)
{
- PLT_SET_USED(ethdev);
- return 0;
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);
+ cnxk_rep_msg_ack_data_t adata;
+ int rc = 0;
+
+ /* If representor not representing any active VF, return 0 */
+ if (!rep_dev->is_vf_active)
+ return 0;
+
+ if (rep_dev->native_repte)
+ return -ENOTSUP;
+
+ rc = process_eth_stats(rep_dev, &adata, CNXK_REP_MSG_ETH_STATS_CLEAR);
+ if (rc || adata.u.sval < 0) {
+ if (adata.u.sval < 0)
+ rc = adata.u.sval;
+
+ plt_err("Failed to clear stats for vf rep %x, err %d", rep_dev->port_id, rc);
+ }
+
+ return rc;
}
int
@@ -560,6 +702,124 @@ cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
return rc;
}
+int
+cnxk_rep_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *stats, unsigned int n)
+{
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
+ unsigned int num = RTE_DIM(cnxk_rep_xstats_string);
+ int cnt = 0;
+
+ if (!rep_dev)
+ return -EINVAL;
+
+ if (n < num)
+ return num;
+
+ stats[cnt].id = cnt;
+ stats[cnt].value = rep_dev->rxq->stats.pkts;
+ cnt++;
+ stats[cnt].id = cnt;
+ stats[cnt].value = rep_dev->txq->stats.pkts;
+ cnt++;
+
+ return cnt;
+}
+
+int
+cnxk_rep_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
+ int rc;
+
+ if (!rep_dev)
+ return -EINVAL;
+
+ rc = cnxk_rep_stats_reset(eth_dev);
+ if (rc < 0 && rc != -ENOTSUP)
+ return rc;
+
+ rep_dev->rxq->stats.pkts = 0;
+ rep_dev->txq->stats.pkts = 0;
+
+ return 0;
+}
+
+int
+cnxk_rep_xstats_get_names(__rte_unused struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned int n)
+{
+ unsigned int num = RTE_DIM(cnxk_rep_xstats_string);
+ unsigned int i;
+
+ if (xstats_names == NULL)
+ return num;
+
+ if (n < num)
+ return num;
+
+ for (i = 0; i < num; i++)
+ rte_strscpy(xstats_names[i].name, cnxk_rep_xstats_string[i].name,
+ sizeof(xstats_names[i].name));
+
+ return num;
+}
+
+int
+cnxk_rep_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids, uint64_t *values,
+ unsigned int n)
+{
+ struct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);
+ unsigned int num = RTE_DIM(cnxk_rep_xstats_string);
+ unsigned int i;
+
+ if (!rep_dev)
+ return -EINVAL;
+
+ if (n < num)
+ return num;
+
+ if (n > num)
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ switch (ids[i]) {
+ case 0:
+ values[i] = rep_dev->rxq->stats.pkts;
+ break;
+ case 1:
+ values[i] = rep_dev->txq->stats.pkts;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return n;
+}
+
+int
+cnxk_rep_xstats_get_names_by_id(__rte_unused struct rte_eth_dev *eth_dev, const uint64_t *ids,
+ struct rte_eth_xstat_name *xstats_names, unsigned int n)
+{
+ unsigned int num = RTE_DIM(cnxk_rep_xstats_string);
+ unsigned int i;
+
+ if (n < num)
+ return num;
+
+ if (n > num)
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= num)
+ return -EINVAL;
+ rte_strscpy(xstats_names[i].name, cnxk_rep_xstats_string[ids[i]].name,
+ sizeof(xstats_names[i].name));
+ }
+
+ return n;
+}
+
/* CNXK platform representor dev ops */
struct eth_dev_ops cnxk_rep_dev_ops = {
.dev_infos_get = cnxk_rep_dev_info_get,
@@ -578,5 +838,10 @@ struct eth_dev_ops cnxk_rep_dev_ops = {
.dev_stop = cnxk_rep_dev_stop,
.stats_get = cnxk_rep_stats_get,
.stats_reset = cnxk_rep_stats_reset,
- .flow_ops_get = cnxk_rep_flow_ops_get
+ .flow_ops_get = cnxk_rep_flow_ops_get,
+ .xstats_get = cnxk_rep_xstats_get,
+ .xstats_reset = cnxk_rep_xstats_reset,
+ .xstats_get_names = cnxk_rep_xstats_get_names,
+ .xstats_get_by_id = cnxk_rep_xstats_get_by_id,
+ .xstats_get_names_by_id = cnxk_rep_xstats_get_names_by_id
};