@@ -3090,14 +3090,17 @@ mlx5_devx_cmd_wq_query(void *wq, uint32_t *counter_set_id)
*
* @param[in] ctx
* Context returned from mlx5 open_device() glue function.
+ * @param[out] syndrome
+ * Get syndrome of devx command response.
*
* @return
* Pointer to counter object on success, a NULL value otherwise and
* rte_errno is set.
*/
struct mlx5_devx_obj *
-mlx5_devx_cmd_queue_counter_alloc(void *ctx)
+mlx5_devx_cmd_queue_counter_alloc(void *ctx, int *syndrome)
{
+ int status;
struct mlx5_devx_obj *dcs = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dcs), 0,
SOCKET_ID_ANY);
uint32_t in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
@@ -3112,6 +3115,9 @@ mlx5_devx_cmd_queue_counter_alloc(void *ctx)
sizeof(out));
if (!dcs->obj) {
DEVX_DRV_LOG(DEBUG, out, "create q counter set", NULL, 0);
+ status = MLX5_GET(alloc_q_counter_out, out, status);
+ if (status && syndrome)
+ *syndrome = MLX5_GET(alloc_q_counter_out, out, syndrome);
mlx5_free(dcs);
return NULL;
}
@@ -847,7 +847,7 @@ __rte_internal
int mlx5_devx_cmd_wq_query(void *wq, uint32_t *counter_set_id);
__rte_internal
-struct mlx5_devx_obj *mlx5_devx_cmd_queue_counter_alloc(void *ctx);
+struct mlx5_devx_obj *mlx5_devx_cmd_queue_counter_alloc(void *ctx, int *syndrome);
__rte_internal
int mlx5_devx_cmd_queue_counter_query(struct mlx5_devx_obj *dcs, int clear,
uint32_t *out_of_buffers);
@@ -271,6 +271,9 @@
/* The 32 bit syndrome offset in struct mlx5_err_cqe. */
#define MLX5_ERROR_CQE_SYNDROME_OFFSET 52
+/* Firmware error code for allocating the maximum number of queue counters */
+#define MLX5_Q_COUNTERS_LIMIT_REACHED 0x587239
+
/* The completion mode offset in the WQE control segment line 2. */
#define MLX5_COMP_MODE_OFFSET 2
@@ -973,7 +973,7 @@ mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
void *ctx = priv->sh->cdev->ctx;
- priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
+ priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx, NULL);
if (!priv->q_counters) {
struct ibv_cq *cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
struct ibv_wq *wq;
@@ -3054,23 +3054,11 @@ mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
if (priv->sh) {
if (priv->q_counters != NULL &&
strcmp(ctr_name, "out_of_buffer") == 0) {
- if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
- DRV_LOG(WARNING, "DevX out_of_buffer counter is not supported in the secondary process");
- rte_errno = ENOTSUP;
- return 1;
- }
- return mlx5_devx_cmd_queue_counter_query
- (priv->q_counters, 0, (uint32_t *)stat);
+ return mlx5_read_queue_counter(priv->q_counters, ctr_name, stat);
}
if (priv->q_counters_hairpin != NULL &&
strcmp(ctr_name, "hairpin_out_of_buffer") == 0) {
- if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
- DRV_LOG(WARNING, "DevX out_of_buffer counter is not supported in the secondary process");
- rte_errno = ENOTSUP;
- return 1;
- }
- return mlx5_devx_cmd_queue_counter_query
- (priv->q_counters_hairpin, 0, (uint32_t *)stat);
+ return mlx5_read_queue_counter(priv->q_counters_hairpin, ctr_name, stat);
}
MKSTR(path, "%s/ports/%d/hw_counters/%s",
priv->sh->ibdev_path,
@@ -2389,14 +2389,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
mlx5_proc_priv_uninit(dev);
if (priv->drop_queue.hrxq)
mlx5_drop_action_destroy(dev);
- if (priv->q_counters) {
- mlx5_devx_cmd_destroy(priv->q_counters);
- priv->q_counters = NULL;
- }
- if (priv->q_counters_hairpin) {
- mlx5_devx_cmd_destroy(priv->q_counters_hairpin);
- priv->q_counters_hairpin = NULL;
- }
+ mlx5_q_counters_destroy(dev);
mlx5_mprq_free_mp(dev);
mlx5_os_free_shared_dr(priv);
#ifdef HAVE_MLX5_HWS_SUPPORT
@@ -3387,6 +3380,299 @@ mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev)
return port_id;
}
+/**
+ * Allocates a queue counter for a Rx queue
+ *
+ * @param[in] priv
+ * Pointer to the private device data structure.
+ * @param[in] rxq
+ * Pointer to the queue's private device data structure.
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ *
+ */
+static int
+mlx5_alloc_queue_counter(struct mlx5_priv *priv, struct mlx5_rxq_priv *rxq)
+{
+ int ret = 0;
+
+ if (rxq->q_counters)
+ return 0;
+
+ if (priv->q_counters_allocation_failure != 0) {
+ DRV_LOG(WARNING, "Some of the statistics of port %d "
+ "will not be available.", priv->dev_data->port_id);
+ return -EINVAL;
+ }
+
+ if (priv->obj_ops.rxq_obj_modify_counter_set_id == NULL)
+ return -ENOTSUP;
+
+ rxq->q_counters = mlx5_devx_cmd_queue_counter_alloc(priv->sh->cdev->ctx, &ret);
+ if (rxq->q_counters == NULL) {
+ if (ret == MLX5_Q_COUNTERS_LIMIT_REACHED) {
+ DRV_LOG(WARNING, "Maximum number of queue counters reached. "
+ "Unable to create counter object for Port %d, Queue %d "
+ "using DevX. The counter from this queue will not increment.",
+ priv->dev_data->port_id, rxq->idx);
+ return -rte_errno;
+ }
+ DRV_LOG(WARNING, "Port %d queue %d counter object cannot be created "
+ "by DevX. Counter from this queue will not increment.",
+ priv->dev_data->port_id, rxq->idx);
+ priv->q_counters_allocation_failure = 1;
+ return -ENOMEM;
+ }
+
+ ret = priv->obj_ops.rxq_obj_modify_counter_set_id(rxq, rxq->q_counters->id);
+ if (ret)
+ DRV_LOG(ERR, "failed to modify rq object for port %u"
+ " %s", priv->dev_data->port_id, strerror(rte_errno));
+ return ret;
+}
+
+/**
+ * Deallocates a queue counter for a Rx queue
+ *
+ * @param[in] priv
+ * Pointer to the private device data structure.
+ * @param[in] rxq
+ * Pointer to the queue's private device data structure.
+ * @param[in] counter_set_id
+ * Counter id to replace with the dealloced counter.
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ *
+ */
+static int
+mlx5_dealloc_queue_counter(struct mlx5_priv *priv, struct mlx5_rxq_priv *rxq,
+ uint32_t counter_set_id)
+{
+ int ret = 0;
+
+ if (priv->obj_ops.rxq_obj_modify_counter_set_id == NULL)
+ return -ENOTSUP;
+
+ /* Dealloc rx hairpin counter. */
+ if (rxq->q_counters != NULL) {
+ mlx5_devx_cmd_destroy(rxq->q_counters);
+ rxq->q_counters = NULL;
+ }
+
+ /* Modify rxq. */
+ ret = priv->obj_ops.rxq_obj_modify_counter_set_id(rxq, counter_set_id);
+ if (ret)
+ DRV_LOG(ERR, "Port %u failed to modify rq object "
+ " %s", priv->dev_data->port_id, strerror(rte_errno));
+ return ret;
+}
+
+/**
+ * Creates a queue counter for each hairpin Rx queue.
+ * Disable the port global hairpin counter.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_enable_per_hairpin_queue_counter(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_priv *rxq;
+ unsigned int i;
+ int ret = 0;
+
+ if (priv->per_hairpin_queue_counter_enabled) {
+ DRV_LOG(WARNING, "Per hairpin queue counters are already enabled");
+ return -EINVAL;
+ }
+
+ /* Dealloc global hairpin counter. */
+ if (priv->q_counters_hairpin != NULL) {
+ mlx5_devx_cmd_destroy(priv->q_counters_hairpin);
+ priv->q_counters_hairpin = NULL;
+ mlx5_reset_xstats_by_name(priv, "hairpin_out_of_buffer");
+ priv->q_counters_allocation_failure = 0;
+ }
+
+ /* Go over each hairpin queue and attach a new queue counter */
+ for (i = 0; (i != priv->rxqs_n); ++i) {
+ rxq = mlx5_rxq_get(dev, i);
+ if (rxq == NULL || rxq->ctrl->obj->rq == NULL || !rxq->ctrl->is_hairpin)
+ continue;
+
+ ret = mlx5_alloc_queue_counter(priv, rxq);
+ if (ret != 0)
+ break;
+ }
+
+ mlx5_reset_xstats_rq(dev);
+ priv->per_hairpin_queue_counter_enabled = true;
+ return ret;
+}
+
+/**
+ * Destroys the counter of each of the hairpin queues.
+ * Create the port global hairpin counter.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_disable_per_hairpin_queue_counter(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_priv *rxq;
+ struct mlx5_rxq_data *rxq_data;
+ unsigned int i;
+
+ if (!priv->per_hairpin_queue_counter_enabled) {
+ DRV_LOG(WARNING, "Per hairpin queue counters are already disabled.");
+ return -EINVAL;
+ }
+
+ /* Find first rx hairpin queue and dealloc it's queue counter
+ * to make room for the global hairpin counter
+ */
+ for (i = 0; (i != priv->rxqs_n); ++i) {
+ rxq = mlx5_rxq_get(dev, i);
+
+ if (rxq == NULL || rxq->ctrl->obj->rq == NULL || !rxq->ctrl->is_hairpin)
+ continue;
+
+ if (rxq->q_counters != NULL) {
+ mlx5_devx_cmd_destroy(rxq->q_counters);
+ rxq->q_counters = NULL;
+ priv->q_counters_allocation_failure = 0;
+ }
+
+ /* Alloc global hairpin queue counter. */
+ priv->q_counters_hairpin = mlx5_devx_cmd_queue_counter_alloc
+ (priv->sh->cdev->ctx, NULL);
+ if (priv->q_counters_hairpin)
+ break;
+ }
+
+ if (!priv->q_counters_hairpin) {
+ DRV_LOG(ERR, "Port %d global hairpin queue counter object cannot be created "
+ "by DevX.", priv->dev_data->port_id);
+ priv->q_counters_allocation_failure = 1;
+ return -ENOMEM;
+ }
+
+ /* Reset oob stats. */
+ mlx5_reset_xstats_by_name(priv, "hairpin_out_of_buffer");
+
+ for (i = 0; (i != priv->rxqs_n); ++i) {
+ rxq = mlx5_rxq_get(dev, i);
+ rxq_data = mlx5_rxq_data_get(dev, i);
+
+ if (rxq == NULL || rxq->ctrl->obj->rq == NULL || !rxq->ctrl->is_hairpin)
+ continue;
+
+ mlx5_dealloc_queue_counter(priv, rxq, priv->q_counters_hairpin->id);
+
+ if (rxq_data != NULL) {
+ /* Reset queue oob stats. */
+ rxq_data->stats.oobs = 0;
+ rxq_data->stats_reset.oobs = 0;
+ }
+ }
+
+ priv->per_hairpin_queue_counter_enabled = false;
+ return 0;
+}
+
+int
+mlx5_hairpin_queue_counter_supported(struct mlx5_priv *priv)
+{
+ if (priv->pci_dev == NULL)
+ return -ENOTSUP;
+ switch (priv->pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX7:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
+ case PCI_DEVICE_ID_MELLANOX_BLUEFIELD3:
+ case PCI_DEVICE_ID_MELLANOX_BLUEFIELDVF:
+ return 0;
+ default:
+ return -ENOTSUP;
+ }
+}
+
+/**
+ * Enable/disable per hairpin queue counter.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param on_off
+ * 1 if to enable, 0 to disable.
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+rte_pmd_mlx5_set_per_hairpin_queue_counter(uint16_t port_id, int on_off)
+{
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+ int ret;
+
+ if (!rte_eth_dev_is_valid_port(port_id))
+ return -ENODEV;
+
+ dev = &rte_eth_devices[port_id];
+ priv = dev->data->dev_private;
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ DRV_LOG(WARNING, "DevX out_of_buffer counter is not supported in the secondary process");
+ return -ENOTSUP;
+ }
+
+ if (priv->obj_ops.rxq_obj_modify_counter_set_id == NULL)
+ return -ENOTSUP;
+
+ ret = mlx5_hairpin_queue_counter_supported(priv);
+ if (ret)
+ return ret;
+
+ if (on_off)
+ return mlx5_enable_per_hairpin_queue_counter(dev);
+ return mlx5_disable_per_hairpin_queue_counter(dev);
+}
+
+/**
+ * Read statistics per queue by a named counter.
+ *
+ * @param[in] q_counter
+ * Pointer to the queue's counter object.
+ * @param[in] ctr_name
+ * Pointer to the name of the statistic counter to read
+ * @param[out] stat
+ * Pointer to read statistic value.
+ * @return
+ * 0 on success and stat is valid, 1 if failed to read the value
+ * rte_errno is set.
+ *
+ */
+int
+mlx5_read_queue_counter(struct mlx5_devx_obj *q_counter, const char *ctr_name,
+ uint64_t *stat)
+{
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ DRV_LOG(WARNING,
+ "DevX %s counter is not supported in the secondary process", ctr_name);
+ return -ENOTSUP;
+ }
+
+ if (q_counter == NULL)
+ return -EINVAL;
+
+ return mlx5_devx_cmd_queue_counter_query(q_counter, 0, (uint32_t *)stat);
+}
+
/**
* Callback to remove a device.
*
@@ -1755,6 +1755,7 @@ struct mlx5_priv;
/* HW objects operations structure. */
struct mlx5_obj_ops {
int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_priv *rxq, int on);
+ int (*rxq_obj_modify_counter_set_id)(struct mlx5_rxq_priv *rxq, uint32_t counter_set_id);
int (*rxq_obj_new)(struct mlx5_rxq_priv *rxq);
int (*rxq_event_get)(struct mlx5_rxq_obj *rxq_obj);
int (*rxq_obj_modify)(struct mlx5_rxq_priv *rxq, uint8_t type);
@@ -1993,6 +1994,7 @@ struct mlx5_priv {
uint32_t rss_shared_actions; /* RSS shared actions. */
/* If true, indicates that we failed to allocate a q counter in the past. */
bool q_counters_allocation_failure;
+ bool per_hairpin_queue_counter_enabled;
struct mlx5_devx_obj *q_counters; /* DevX queue counter object. */
uint32_t counter_set_id; /* Queue counter ID to set in DevX objects. */
/* DevX queue counter object for all hairpin queues of the port. */
@@ -2203,6 +2205,9 @@ int mlx5_flow_aso_ct_mng_init(struct mlx5_dev_ctx_shared *sh);
struct mlx5_physical_device *
mlx5_get_locked_physical_device(struct mlx5_priv *priv);
void mlx5_unlock_physical_device(void);
+int mlx5_hairpin_queue_counter_supported(struct mlx5_priv *priv);
+int mlx5_read_queue_counter(struct mlx5_devx_obj *q_counter, const char *ctr_name, uint64_t *stat);
+
/* mlx5_ethdev.c */
@@ -2308,7 +2313,8 @@ int mlx5_xstats_reset(struct rte_eth_dev *dev);
int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_xstat_name *xstats_names,
unsigned int n);
-
+void mlx5_reset_xstats_by_name(struct mlx5_priv *priv, const char *ctr_name);
+void mlx5_reset_xstats_rq(struct rte_eth_dev *dev);
/* mlx5_vlan.c */
int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
@@ -91,6 +91,30 @@ mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)
return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);
}
+/**
+ * Modify the q counter of a given RQ
+ *
+ * @param rxq
+ * Rx queue.
+ * @param counter_set_id
+ * Q counter id to set
+ *
+ * @return
+ * 0 on success, non-0 otherwise
+ */
+static int
+mlx5_rxq_obj_modify_counter(struct mlx5_rxq_priv *rxq, uint32_t counter_set_id)
+{
+ struct mlx5_devx_modify_rq_attr rq_attr;
+
+ memset(&rq_attr, 0, sizeof(rq_attr));
+ rq_attr.rq_state = MLX5_RQC_STATE_RDY;
+ rq_attr.state = MLX5_RQC_STATE_RDY;
+ rq_attr.counter_set_id = counter_set_id;
+ rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID;
+ return mlx5_devx_cmd_modify_rq(rxq->ctrl->obj->rq, &rq_attr);
+}
+
/**
* Modify RQ using DevX API.
*
@@ -508,6 +532,7 @@ mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq)
static uint32_t
mlx5_set_hairpin_queue_counter_obj(struct mlx5_priv *priv)
{
+ int ret;
if (priv->q_counters_hairpin != NULL)
return priv->q_counters_hairpin->id;
@@ -522,28 +547,23 @@ mlx5_set_hairpin_queue_counter_obj(struct mlx5_priv *priv)
return 0;
}
- switch (priv->pci_dev->id.device_id) {
- /* Counting out of buffer drops on hairpin queues is supported only on CX7 and up. */
- case PCI_DEVICE_ID_MELLANOX_CONNECTX7:
- case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
- case PCI_DEVICE_ID_MELLANOX_BLUEFIELD3:
- case PCI_DEVICE_ID_MELLANOX_BLUEFIELDVF:
-
- priv->q_counters_hairpin = mlx5_devx_cmd_queue_counter_alloc(priv->sh->cdev->ctx);
- if (priv->q_counters_hairpin == NULL) {
- /* Failed to allocate */
- DRV_LOG(DEBUG, "Some of the statistics of port %d "
- "will not be available.", priv->dev_data->port_id);
- priv->q_counters_allocation_failure = 1;
- return 0;
- }
- return priv->q_counters_hairpin->id;
- default:
+ ret = mlx5_hairpin_queue_counter_supported(priv);
+ if (ret) {
DRV_LOG(DEBUG, "Hairpin out of buffer counter "
"is not available on this NIC.");
priv->q_counters_allocation_failure = 1;
return 0;
}
+
+ priv->q_counters_hairpin = mlx5_devx_cmd_queue_counter_alloc(priv->sh->cdev->ctx, NULL);
+ if (priv->q_counters_hairpin == NULL) {
+ /* Failed to allocate */
+ DRV_LOG(DEBUG, "Some of the statistics of port %d "
+ "will not be available.", priv->dev_data->port_id);
+ priv->q_counters_allocation_failure = 1;
+ return 0;
+ }
+ return priv->q_counters_hairpin->id;
}
/**
@@ -1710,6 +1730,7 @@ mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
struct mlx5_obj_ops devx_obj_ops = {
.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
+ .rxq_obj_modify_counter_set_id = mlx5_rxq_obj_modify_counter,
.rxq_obj_new = mlx5_rxq_devx_obj_new,
.rxq_event_get = mlx5_rx_devx_get_event,
.rxq_obj_modify = mlx5_devx_modify_rq,
@@ -37,6 +37,18 @@ struct mlx5_rxq_stats {
#endif
uint64_t idropped; /**< Total of packets dropped when RX ring full. */
uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
+ uint64_t oobs; /**< Total of hairpin queue out of buffers. */
+};
+
+/* store statistics names and its offset in stats structure */
+struct mlx5_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned int offset;
+};
+
+struct mlx5_rq_stats {
+ /** Total number of hairpin queue packets received that are dropped. */
+ uint64_t q_oobs[RTE_ETHDEV_QUEUE_STAT_CNTRS];
};
/* Compressed CQE context. */
@@ -184,6 +196,7 @@ struct mlx5_rxq_priv {
uint32_t lwm:16;
uint32_t lwm_event_pending:1;
uint32_t lwm_devx_subscribed:1;
+ struct mlx5_devx_obj *q_counters; /* DevX hairpin queue counter object. */
};
/* mlx5_rxq.c */
@@ -209,6 +222,7 @@ void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);
+void mlx5_q_counters_destroy(struct rte_eth_dev *dev);
struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_rxconf *conf,
@@ -1306,6 +1306,42 @@ mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
return ret;
}
+/**
+ * Destroy all queue counters.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_q_counters_destroy(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ /* Destroy port q counter */
+ if (priv->q_counters) {
+ mlx5_devx_cmd_destroy(priv->q_counters);
+ priv->q_counters = NULL;
+ }
+
+ /* Destroy port global hairpin q counter */
+ if (priv->q_counters_hairpin) {
+ mlx5_devx_cmd_destroy(priv->q_counters_hairpin);
+ priv->q_counters_hairpin = NULL;
+ }
+
+ /* Destroy per hairpin queue counter */
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+
+ if (rxq == NULL || rxq->q_counters == NULL)
+ continue;
+
+ mlx5_devx_cmd_destroy(rxq->q_counters);
+ rxq->q_counters = NULL;
+ }
+}
+
/**
* Callback function to initialize mbufs for Multi-Packet RQ.
*/
@@ -20,6 +20,102 @@
#include "mlx5_tx.h"
#include "mlx5_malloc.h"
+
+static const struct mlx5_xstats_name_off mlx5_rxq_stats_strings[] = {
+ {"oobs", offsetof(struct mlx5_rq_stats, q_oobs)},
+};
+
+#define NB_RXQ_STATS RTE_DIM(mlx5_rxq_stats_strings)
+
+/**
+ * Retrieve extended device statistics
+ * for Rx queues. It appends the specific statistics
+ * before the parts filled by preceding modules (eth stats, etc.)
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] stats
+ * Pointer to rte extended stats table.
+ * @param n
+ * The size of the stats table.
+ * @return
+ * Number of extended stats is filled,
+ * negative on error and rte_errno is set.
+ */
+static int
+mlx5_rq_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *stats,
+ unsigned int n)
+{
+ struct mlx5_rxq_priv *rxq_priv;
+ struct mlx5_rxq_data *rxq_data;
+ unsigned int idx;
+ unsigned int i;
+
+ uint16_t n_stats_rq = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ if (n < n_stats_rq || !stats)
+ return n_stats_rq;
+
+ for (i = 0; (i != n_stats_rq); ++i) {
+ rxq_data = mlx5_rxq_data_get(dev, i);
+ rxq_priv = mlx5_rxq_get(dev, i);
+ stats[i].id = i;
+
+ if (rxq_data == NULL)
+ continue;
+ idx = rxq_data->idx;
+ if (idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ continue;
+
+ if (rxq_priv && rxq_priv->ctrl->is_hairpin)
+ mlx5_read_queue_counter(rxq_priv->q_counters,
+ "hairpin_out_of_buffer", &rxq_data->stats.oobs);
+
+ stats[i].value = rxq_data->stats.oobs - rxq_data->stats_reset.oobs;
+ }
+ return n_stats_rq;
+}
+
+/**
+ * Retrieve names of extended device statistics
+ * for Rx queues. It appends the specific stats names
+ * before the parts filled by preceding modules (eth stats, etc.)
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] xstats_names
+ * Buffer to insert names into.
+ * @param n
+ * Number of names.
+ *
+ * @return
+ * Number of xstats names.
+ */
+static int
+mlx5_rq_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int n)
+{
+ uint32_t idx, id_queue;
+ int cnt_used_entries = 0;
+ uint16_t n_stats_rq = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ if (n < n_stats_rq || !xstats_names)
+ return n_stats_rq;
+
+ for (id_queue = 0; id_queue < n_stats_rq; id_queue++) {
+ for (idx = 0; idx < NB_RXQ_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "rx_q%u_%s",
+ id_queue, mlx5_rxq_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
+ }
+ return n_stats_rq;
+}
+
/**
* DPDK callback to get extended device statistics.
*
@@ -46,6 +142,7 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
uint16_t stats_n_2nd = 0;
uint16_t mlx5_stats_n = xstats_ctrl->mlx5_stats_n;
bool bond_master = (priv->master && priv->pf_bond >= 0);
+ int n_used = mlx5_rq_xstats_get(dev, stats, n);
if (n >= mlx5_stats_n && stats) {
int ret;
@@ -69,27 +166,27 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
if (ret < 0)
return ret;
for (i = 0; i != mlx5_stats_n; i++) {
- stats[i].id = i;
+ stats[i + n_used].id = i + n_used;
if (xstats_ctrl->info[i].dev) {
uint64_t wrap_n;
uint64_t hw_stat = xstats_ctrl->hw_stats[i];
- stats[i].value = (counters[i] -
+ stats[i + n_used].value = (counters[i] -
xstats_ctrl->base[i]) &
(uint64_t)UINT32_MAX;
wrap_n = hw_stat >> 32;
- if (stats[i].value <
+ if (stats[i + n_used].value <
(hw_stat & (uint64_t)UINT32_MAX))
wrap_n++;
- stats[i].value |= (wrap_n) << 32;
- xstats_ctrl->hw_stats[i] = stats[i].value;
+ stats[i + n_used].value |= (wrap_n) << 32;
+ xstats_ctrl->hw_stats[i] = stats[i + n_used].value;
} else {
- stats[i].value =
+ stats[i + n_used].value =
(counters[i] - xstats_ctrl->base[i]);
}
}
}
- mlx5_stats_n = mlx5_txpp_xstats_get(dev, stats, n, mlx5_stats_n);
+ mlx5_stats_n = mlx5_txpp_xstats_get(dev, stats, n, mlx5_stats_n + n_used);
return mlx5_stats_n;
}
@@ -273,11 +370,58 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
xstats_ctrl->base[i] = counters[i];
xstats_ctrl->hw_stats[i] = 0;
}
+ mlx5_reset_xstats_rq(dev);
mlx5_txpp_xstats_reset(dev);
mlx5_free(counters);
return 0;
}
+void
+mlx5_reset_xstats_by_name(struct mlx5_priv *priv, const char *ctr_name)
+{
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ unsigned int mlx5_xstats_n = xstats_ctrl->mlx5_stats_n;
+ unsigned int i;
+
+ for (i = 0; i != mlx5_xstats_n; ++i) {
+ if (strcmp(xstats_ctrl->info[i].ctr_name, ctr_name) == 0) {
+ xstats_ctrl->base[i] = 0;
+ xstats_ctrl->hw_stats[i] = 0;
+ xstats_ctrl->xstats[i] = 0;
+ return;
+ }
+ }
+}
+
+/**
+ * Clear device extended statistics for each Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx5_reset_xstats_rq(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_priv *rxq;
+ struct mlx5_rxq_data *rxq_data;
+ unsigned int i;
+
+ for (i = 0; (i != priv->rxqs_n); ++i) {
+ rxq = mlx5_rxq_get(dev, i);
+ rxq_data = mlx5_rxq_data_get(dev, i);
+
+ if (rxq == NULL || rxq_data == NULL || rxq->q_counters == NULL)
+ continue;
+ if (rxq->ctrl->is_hairpin)
+ mlx5_read_queue_counter(rxq->q_counters,
+ "hairpin_out_of_buffer", &rxq_data->stats_reset.oobs);
+ else
+ mlx5_read_queue_counter(rxq->q_counters,
+ "out_of_buffer", &rxq_data->stats_reset.oobs);
+ }
+}
+
/**
* DPDK callback to retrieve names of extended device statistics
*
@@ -300,14 +444,18 @@ mlx5_xstats_get_names(struct rte_eth_dev *dev,
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
unsigned int mlx5_xstats_n = xstats_ctrl->mlx5_stats_n;
+ unsigned int n_used = mlx5_rq_xstats_get_names(dev, xstats_names, n);
+
if (n >= mlx5_xstats_n && xstats_names) {
for (i = 0; i != mlx5_xstats_n; ++i) {
- strlcpy(xstats_names[i].name,
+ rte_strscpy(xstats_names[i + n_used].name,
xstats_ctrl->info[i].dpdk_name,
RTE_ETH_XSTATS_NAME_SIZE);
+ xstats_names[i + n_used].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
}
}
+
mlx5_xstats_n = mlx5_txpp_xstats_get_names(dev, xstats_names,
- n, mlx5_xstats_n);
+ n, mlx5_xstats_n + n_used);
return mlx5_xstats_n;
}
@@ -164,6 +164,17 @@ mlx5_test_set_port_host_shaper(uint16_t port_id, uint16_t avail_thresh_triggered
return 0;
}
+static void
+mlx5_set_per_hairpin_queue_counter(uint16_t port_id, uint8_t on_off)
+{
+ if (port_id_is_invalid(port_id, ENABLED_WARN)) {
+ print_valid_ports();
+ return;
+ }
+
+ rte_pmd_mlx5_set_per_hairpin_queue_counter(port_id, on_off);
+}
+
#ifndef RTE_EXEC_ENV_WINDOWS
static const char*
mlx5_test_get_socket_path(char *extend)
@@ -480,6 +491,61 @@ static cmdline_parse_inst_t mlx5_test_cmd_port_host_shaper = {
}
};
+/* *** set option to attach a q counter to each hairpin queue *** */
+struct cmd_operate_enable_hairpin_counter_result {
+ cmdline_fixed_string_t mlx5;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t name;
+ cmdline_fixed_string_t on_off;
+ portid_t port_id;
+};
+
+static void cmd_operate_enable_hairpin_counter_parse(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_operate_enable_hairpin_counter_result *res = parsed_result;
+ uint16_t on_off = 0;
+
+ on_off = !strcmp(res->on_off, "on") ? 1 : 0;
+
+ if ((strcmp(res->mlx5, "mlx5") == 0) &&
+ (strcmp(res->port, "port") == 0) &&
+ (strcmp(res->name, "hairpin-per-queue-counter-enable") == 0))
+ mlx5_set_per_hairpin_queue_counter(res->port_id, on_off);
+}
+
+static cmdline_parse_token_string_t cmd_operate_enable_hairpin_counter_mlx5 =
+ TOKEN_STRING_INITIALIZER(struct cmd_operate_enable_hairpin_counter_result, mlx5,
+ "mlx5");
+static cmdline_parse_token_string_t cmd_operate_enable_hairpin_counter_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_operate_enable_hairpin_counter_result,
+ port, "port");
+static cmdline_parse_token_num_t cmd_operate_enable_hairpin_counter_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_operate_enable_hairpin_counter_result,
+ port_id, RTE_UINT16);
+static cmdline_parse_token_string_t cmd_operate_enable_hairpin_counter_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_operate_enable_hairpin_counter_result,
+ name, "hairpin-per-queue-counter-enable");
+static cmdline_parse_token_string_t cmd_operate_enable_hairpin_counter_on_off =
+ TOKEN_STRING_INITIALIZER(struct cmd_operate_enable_hairpin_counter_result,
+ on_off, "on#off");
+
+
+static cmdline_parse_inst_t mlx5_cmd_operate_enable_hairpin_counter = {
+ .f = cmd_operate_enable_hairpin_counter_parse,
+ .data = NULL,
+ .help_str = "mlx5 port (port_id) hairpin-per-queue-counter-enable on|off",
+ .tokens = {
+ (void *)&cmd_operate_enable_hairpin_counter_mlx5,
+ (void *)&cmd_operate_enable_hairpin_counter_port,
+ (void *)&cmd_operate_enable_hairpin_counter_port_id,
+ (void *)&cmd_operate_enable_hairpin_counter_name,
+ (void *)&cmd_operate_enable_hairpin_counter_on_off,
+ NULL,
+ },
+};
+
#ifndef RTE_EXEC_ENV_WINDOWS
/* *** attach a specified port *** */
struct mlx5_cmd_operate_attach_port_result {
@@ -1373,6 +1439,10 @@ static struct testpmd_driver_commands mlx5_driver_cmds = {
"rate (rate_num):\n"
" Set HOST_SHAPER avail_thresh_triggered and rate with port_id\n\n",
},
+ {
+ .ctx = &mlx5_cmd_operate_enable_hairpin_counter,
+ .help = "mlx5 port (port_id) hairpin-per-queue-counter-enable (on|off)\n\n",
+ },
#ifndef RTE_EXEC_ENV_WINDOWS
{
.ctx = &mlx5_cmd_operate_attach_port,
@@ -194,6 +194,19 @@ int rte_pmd_mlx5_external_tx_queue_id_unmap(uint16_t port_id,
__rte_experimental
int rte_pmd_mlx5_host_shaper_config(int port_id, uint8_t rate, uint32_t flags);
+/**
+ * Enable/disable per hairpin queue counter.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param on_off
+ * 1 if to enable, 0 to disable.
+ * @return
+ * Zero if successful. Non-zero otherwise.
+ */
+__rte_experimental
+int rte_pmd_mlx5_set_per_hairpin_queue_counter(uint16_t port_id, int on_off);
+
/**
* Enable traffic for external SQ.
*
@@ -25,4 +25,6 @@ EXPERIMENTAL {
rte_pmd_mlx5_external_tx_queue_id_unmap;
rte_pmd_mlx5_txq_dump_contexts;
rte_pmd_mlx5_rxq_dump_contexts;
+ # added in 24.10
+ rte_pmd_mlx5_set_per_hairpin_queue_counter;
};
@@ -78,7 +78,7 @@ mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
void *ctx = priv->sh->cdev->ctx;
- priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
+ priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx, NULL);
if (!priv->q_counters) {
DRV_LOG(ERR, "Port %d queue counter object cannot be created "
"by DevX - imissed counter will be unavailable",