[v5,5/8] net/mlx5: create GENEVE TLV option management

Message ID 20210112140241.15914-6-shirik@nvidia.com (mailing list archive)
State Superseded, archived
Delegated to: Ferruh Yigit
Headers
Series ethdev: introduce GENEVE header TLV option item |

Checks

Context Check Description
ci/checkpatch success coding style OK

Commit Message

Shiri Kuzin Jan. 12, 2021, 2:02 p.m. UTC
  Currently firmware supports the only TLV object per device
to match on the GENEVE header option.

This patch adds the simple TLV object management to the mlx5 PMD.

Signed-off-by: Shiri Kuzin <shirik@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.c         |   2 +
 drivers/net/mlx5/mlx5.h         |  13 ++++
 drivers/net/mlx5/mlx5_flow.h    |   4 ++
 drivers/net/mlx5/mlx5_flow_dv.c | 108 ++++++++++++++++++++++++++++++++
 4 files changed, 127 insertions(+)
  

Patch

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 023ef50a77..68d6352d48 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1014,6 +1014,7 @@  mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
 	rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
 	/* Add context to the global device list. */
 	LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
+	rte_spinlock_init(&sh->geneve_tlv_opt_sl);
 exit:
 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
 	return sh;
@@ -1109,6 +1110,7 @@  mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 		mlx5_glue->devx_free_uar(sh->devx_rx_uar);
 	if (sh->ctx)
 		claim_zero(mlx5_glue->close_device(sh->ctx));
+	MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
 	pthread_mutex_destroy(&sh->txpp.mutex);
 	mlx5_free(sh);
 	return;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 41034f5d19..23272950a4 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -540,6 +540,16 @@  struct mlx5_aso_age_mng {
 	struct mlx5_aso_sq aso_sq; /* ASO queue objects. */
 };
 
+/* Management structure for geneve tlv option */
+struct mlx5_geneve_tlv_option_resource {
+	struct mlx5_devx_obj *obj; /* Pointer to the geneve tlv opt object. */
+	rte_be16_t option_class; /* geneve tlv opt class.*/
+	uint8_t option_type; /* geneve tlv opt type.*/
+	uint8_t length; /* geneve tlv opt length. */
+	uint32_t refcnt; /* geneve tlv object reference counter */
+};
+
+
 #define MLX5_AGE_EVENT_NEW		1
 #define MLX5_AGE_TRIGGER		2
 #define MLX5_AGE_SET(age_info, BIT) \
@@ -752,6 +762,9 @@  struct mlx5_dev_ctx_shared {
 	void *devx_rx_uar; /* DevX UAR for Rx. */
 	struct mlx5_aso_age_mng *aso_age_mng;
 	/* Management data for aging mechanism using ASO Flow Hit. */
+	struct mlx5_geneve_tlv_option_resource *geneve_tlv_option_resource;
+	/* Management structure for geneve tlv option */
+	rte_spinlock_t geneve_tlv_opt_sl; /* Lock for geneve tlv resource */
 	struct mlx5_dev_shared_port port[]; /* per device port data array. */
 };
 
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index ee85c9d8a5..0c8861964c 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1046,6 +1046,7 @@  struct rte_flow {
 	uint32_t counter; /**< Holds flow counter. */
 	uint32_t tunnel_id;  /**< Tunnel id */
 	uint32_t age; /**< Holds ASO age bit index. */
+	uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
 } __rte_packed;
 
 /*
@@ -1503,4 +1504,7 @@  void flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list,
 				  struct mlx5_cache_entry *entry);
 struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
 						    uint32_t age_idx);
+int flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
+					     const struct rte_flow_item *item,
+					     struct rte_flow_error *error);
 #endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index e4736ee9b5..0a657cea41 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7244,6 +7244,90 @@  flow_dv_translate_item_geneve(void *matcher, void *key,
 		 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
 }
 
+/**
+ * Create Geneve TLV option resource.
+ *
+ * @param dev[in, out]
+ *   Pointer to rte_eth_dev structure.
+ * @param[in, out] tag_be24
+ *   Tag value in big endian then R-shift 8.
+ * @parm[in, out] dev_flow
+ *   Pointer to the dev_flow.
+ * @param[out] error
+ *   pointer to error structure.
+ *
+ * @return
+ *   0 on success otherwise -errno and errno is set.
+ */
+
+int
+flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
+					     const struct rte_flow_item *item,
+					     struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
+	struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
+			sh->geneve_tlv_option_resource;
+	struct mlx5_devx_obj *obj;
+	const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
+	int ret = 0;
+
+	if (!geneve_opt_v)
+		return -1;
+	rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
+	if (geneve_opt_resource != NULL) {
+		if (geneve_opt_resource->option_class ==
+			geneve_opt_v->option_class &&
+			geneve_opt_resource->option_type ==
+			geneve_opt_v->option_type &&
+			geneve_opt_resource->length ==
+			geneve_opt_v->option_len) {
+			/* We already have GENVE TLV option obj allocated. */
+			__atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
+					   __ATOMIC_RELAXED);
+		} else {
+			ret = rte_flow_error_set(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Only one GENEVE TLV option supported");
+			goto exit;
+		}
+	} else {
+		/* Create a GENEVE TLV object and resource. */
+		obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
+				geneve_opt_v->option_class,
+				geneve_opt_v->option_type,
+				geneve_opt_v->option_len);
+		if (!obj) {
+			ret = rte_flow_error_set(error, ENODATA,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to create GENEVE TLV Devx object");
+			goto exit;
+		}
+		sh->geneve_tlv_option_resource =
+				mlx5_malloc(MLX5_MEM_ZERO,
+						sizeof(*geneve_opt_resource),
+						0, SOCKET_ID_ANY);
+		if (!sh->geneve_tlv_option_resource) {
+			claim_zero(mlx5_devx_cmd_destroy(obj));
+			ret = rte_flow_error_set(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"GENEVE TLV object memory allocation failed");
+			goto exit;
+		}
+		geneve_opt_resource = sh->geneve_tlv_option_resource;
+		geneve_opt_resource->obj = obj;
+		geneve_opt_resource->option_class = geneve_opt_v->option_class;
+		geneve_opt_resource->option_type = geneve_opt_v->option_type;
+		geneve_opt_resource->length = geneve_opt_v->option_len;
+		__atomic_store_n(&geneve_opt_resource->refcnt, 1,
+				__ATOMIC_RELAXED);
+	}
+exit:
+	rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
+	return ret;
+}
+
 /**
  * Add MPLS item to matcher and to the value.
  *
@@ -11210,6 +11294,26 @@  flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
 				     &cache->entry);
 }
 
+static void
+flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
+	struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
+				sh->geneve_tlv_option_resource;
+	rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
+	if (geneve_opt_resource) {
+		if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
+					 __ATOMIC_RELAXED))) {
+			claim_zero(mlx5_devx_cmd_destroy
+					(geneve_opt_resource->obj));
+			mlx5_free(sh->geneve_tlv_option_resource);
+			sh->geneve_tlv_option_resource = NULL;
+		}
+	}
+	rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
+}
+
 /**
  * Remove the flow from the NIC but keeps it in memory.
  * Lock free, (mutex should be acquired by caller).
@@ -11280,6 +11384,10 @@  flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 	}
 	if (flow->age)
 		flow_dv_aso_age_release(dev, flow->age);
+	if (flow->geneve_tlv_option) {
+		flow_dv_geneve_tlv_option_resource_release(dev);
+		flow->geneve_tlv_option = 0;
+	}
 	while (flow->dev_handles) {
 		uint32_t tmp_idx = flow->dev_handles;