[4/4] net/mlx5: allow basic counter management fallback

Message ID 1563287696-10509-5-git-send-email-matan@mellanox.com (mailing list archive)
State Accepted, archived
Delegated to: Raslan Darawsheh
Headers
Series [1/4] net/mlx5: accelerate DV flow counter transactions |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Matan Azrad July 16, 2019, 2:34 p.m. UTC
  In case the asynchronous devx commands are not supported in RDMA core
fallback to use a basic counter management.

Here, the PMD counters cashe is redundant and the host thread doesn't
update it. hence, each counter operation will go to the FW and the
acceleration reduces.

Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
---
 drivers/net/mlx5/mlx5.c           |   8 +++
 drivers/net/mlx5/mlx5.h           |   2 +
 drivers/net/mlx5/mlx5_devx_cmds.c |   4 +-
 drivers/net/mlx5/mlx5_flow_dv.c   | 127 ++++++++++++++++++++++++++++++++++++--
 drivers/net/mlx5/mlx5_prm.h       |   4 +-
 5 files changed, 137 insertions(+), 8 deletions(-)
  

Patch

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index a8d824e..f4ad5d2 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1624,11 +1624,19 @@  struct mlx5_dev_spawn_data {
 	mlx5_link_update(eth_dev, 0);
 #ifdef HAVE_IBV_DEVX_OBJ
 	if (config.devx) {
+		priv->counter_fallback = 0;
 		err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr);
 		if (err) {
 			err = -err;
 			goto error;
 		}
+		if (!config.hca_attr.flow_counters_dump)
+			priv->counter_fallback = 1;
+#ifndef HAVE_IBV_DEVX_ASYNC
+		priv->counter_fallback = 1;
+#endif
+		if (priv->counter_fallback)
+			DRV_LOG(INFO, "Use fall-back DV counter management\n");
 	}
 #endif
 #ifdef HAVE_MLX5DV_DR_ESWITCH
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 4ce352a..2bd2aa6 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -168,6 +168,7 @@  struct mlx5_devx_mkey_attr {
 /* HCA attributes. */
 struct mlx5_hca_attr {
 	uint32_t eswitch_manager:1;
+	uint32_t flow_counters_dump:1;
 	uint8_t flow_counter_bulk_alloc_bitmap;
 };
 
@@ -457,6 +458,7 @@  struct mlx5_priv {
 	unsigned int representor:1; /* Device is a port representor. */
 	unsigned int master:1; /* Device is a E-Switch master. */
 	unsigned int dr_shared:1; /* DV/DR data is shared. */
+	unsigned int counter_fallback:1; /* Use counter fallback management. */
 	uint16_t domain_id; /* Switch domain identifier. */
 	uint16_t vport_id; /* Associated VF vport index (if any). */
 	int32_t representor_id; /* Port representor identifier. */
diff --git a/drivers/net/mlx5/mlx5_devx_cmds.c b/drivers/net/mlx5/mlx5_devx_cmds.c
index 28d967a..d26d5bc 100644
--- a/drivers/net/mlx5/mlx5_devx_cmds.c
+++ b/drivers/net/mlx5/mlx5_devx_cmds.c
@@ -57,7 +57,7 @@  struct mlx5_devx_obj *
  * @param[in] clear
  *   Whether hardware should clear the counters after the query or not.
  * @param[in] n_counters
- *   The counter number to read.
+ *   0 in case of 1 counter to read, otherwise the counter number to read.
  *  @param pkts
  *   The number of packets that matched the flow.
  *  @param bytes
@@ -271,6 +271,8 @@  struct mlx5_devx_obj *
 	hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
 	attr->flow_counter_bulk_alloc_bitmap =
 			MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);
+	attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr,
+					    flow_counters_dump);
 	attr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager);
 	return 0;
 }
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 4849bd9..1d1ff90 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -2150,6 +2150,113 @@  struct field_modify_info modify_tcp[] = {
 #define MLX5_CNT_CONTAINER_RESIZE 64
 
 /**
+ * Get or create a flow counter.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] shared
+ *   Indicate if this counter is shared with other flows.
+ * @param[in] id
+ *   Counter identifier.
+ *
+ * @return
+ *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_counter *
+flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
+			       uint32_t id)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flow_counter *cnt = NULL;
+	struct mlx5_devx_obj *dcs = NULL;
+
+	if (!priv->config.devx) {
+		rte_errno = ENOTSUP;
+		return NULL;
+	}
+	if (shared) {
+		TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
+			if (cnt->shared && cnt->id == id) {
+				cnt->ref_cnt++;
+				return cnt;
+			}
+		}
+	}
+	dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
+	if (!dcs)
+		return NULL;
+	cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
+	if (!cnt) {
+		claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+	struct mlx5_flow_counter tmpl = {
+		.shared = shared,
+		.ref_cnt = 1,
+		.id = id,
+		.dcs = dcs,
+	};
+	tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
+	if (!tmpl.action) {
+		claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
+		rte_errno = errno;
+		rte_free(cnt);
+		return NULL;
+	}
+	*cnt = tmpl;
+	TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
+	return cnt;
+}
+
+/**
+ * Release a flow counter.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] counter
+ *   Pointer to the counter handler.
+ */
+static void
+flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
+				 struct mlx5_flow_counter *counter)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	if (!counter)
+		return;
+	if (--counter->ref_cnt == 0) {
+		TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
+		claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
+		rte_free(counter);
+	}
+}
+
+/**
+ * Query a devx flow counter.
+ *
+ * @param[in] dev
+ *   Pointer to the Ethernet device structure.
+ * @param[in] cnt
+ *   Pointer to the flow counter.
+ * @param[out] pkts
+ *   The statistics value of packets.
+ * @param[out] bytes
+ *   The statistics value of bytes.
+ *
+ * @return
+ *   0 on success, otherwise a negative errno value and rte_errno is set.
+ */
+static inline int
+_flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
+		     struct mlx5_flow_counter *cnt, uint64_t *pkts,
+		     uint64_t *bytes)
+{
+	return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
+						0, NULL, NULL, 0);
+}
+
+/**
  * Get a pool by a counter.
  *
  * @param[in] cnt
@@ -2335,14 +2442,18 @@  struct field_modify_info modify_tcp[] = {
  *   0 on success, otherwise a negative errno value and rte_errno is set.
  */
 static inline int
-_flow_dv_query_count(struct rte_eth_dev *dev __rte_unused,
+_flow_dv_query_count(struct rte_eth_dev *dev,
 		     struct mlx5_flow_counter *cnt, uint64_t *pkts,
 		     uint64_t *bytes)
 {
+	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_flow_counter_pool *pool =
 			flow_dv_counter_pool_get(cnt);
 	int offset = cnt - &pool->counters_raw[0];
 
+	if (priv->counter_fallback)
+		return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
+
 	rte_spinlock_lock(&pool->sl);
 	/*
 	 * The single counters allocation may allocate smaller ID than the
@@ -2547,10 +2658,8 @@  struct field_modify_info modify_tcp[] = {
 	struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
 							       0);
 
-#ifndef HAVE_IBV_DEVX_ASYNC
-	rte_errno = ENOTSUP;
-	return NULL;
-#endif
+	if (priv->counter_fallback)
+		return flow_dv_counter_alloc_fallback(dev, shared, id);
 	if (!priv->config.devx) {
 		rte_errno = ENOTSUP;
 		return NULL;
@@ -2636,11 +2745,17 @@  struct field_modify_info modify_tcp[] = {
  *   Pointer to the counter handler.
  */
 static void
-flow_dv_counter_release(struct rte_eth_dev *dev __rte_unused,
+flow_dv_counter_release(struct rte_eth_dev *dev,
 			struct mlx5_flow_counter *counter)
 {
+	struct mlx5_priv *priv = dev->data->dev_private;
+
 	if (!counter)
 		return;
+	if (priv->counter_fallback) {
+		flow_dv_counter_release_fallback(dev, counter);
+		return;
+	}
 	if (--counter->ref_cnt == 0) {
 		struct mlx5_flow_counter_pool *pool =
 				flow_dv_counter_pool_get(counter);
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 79f852b..95ff29a 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -920,7 +920,9 @@  struct mlx5_ifc_cmd_hca_cap_bits {
 	u8 reserved_at_343[0x5];
 	u8 log_max_flow_counter_bulk[0x8];
 	u8 max_flow_counter_15_0[0x10];
-	u8 reserved_at_360[0x3];
+	u8 modify_tis[0x1];
+	u8 flow_counters_dump[0x1];
+	u8 reserved_at_360[0x1];
 	u8 log_max_rq[0x5];
 	u8 reserved_at_368[0x3];
 	u8 log_max_sq[0x5];