[v6,6/8] net/cpfl: add fxp rule module

Message ID 20230822010226.17783-7-yuying.zhang@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series add rte flow support for cpfl |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

Zhang, Yuying Aug. 22, 2023, 1:02 a.m. UTC
  From: Yuying Zhang <yuying.zhang@intel.com>

Added low level fxp module for rule packing / creation / destroying.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c   |  31 ++++
 drivers/net/cpfl/cpfl_ethdev.h   |   6 +
 drivers/net/cpfl/cpfl_fxp_rule.c | 296 +++++++++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_fxp_rule.h |  68 +++++++
 drivers/net/cpfl/meson.build     |   1 +
 5 files changed, 402 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
 create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h
  

Patch

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a2bc6784d0..da78e79652 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -16,6 +16,7 @@ 
 #include <ethdev_private.h>
 #include "cpfl_rxtx.h"
 #include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 #define CPFL_REPRESENTOR	"representor"
 #define CPFL_TX_SINGLE_Q	"tx_single"
@@ -1127,6 +1128,7 @@  cpfl_dev_close(struct rte_eth_dev *dev)
 	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
 	adapter->vports[vport->sw_idx] = NULL;
+	idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
 	rte_free(cpfl_vport);
 
 	return 0;
@@ -2466,6 +2468,26 @@  cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
 	return 0;
 }
 
+int
+cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size,
+			 int batch_size)
+{
+	int i;
+
+	if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
+		PMD_INIT_LOG(ERR, "Could not alloc dma memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < batch_size; i++) {
+		dma[i].va = (void *)((uint64_t)orig_dma->va + size * (i + 1));
+		dma[i].pa = orig_dma->pa + size * (i + 1);
+		dma[i].size = size;
+		dma[i].zone = NULL;
+	}
+	return 0;
+}
+
 static int
 cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
@@ -2515,6 +2537,15 @@  cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 	rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
 			    &dev->data->mac_addrs[0]);
 
+	memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
+	memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
+	ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
+				       cpfl_vport->itf.dma,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       CPFL_FLOW_BATCH_SIZE);
+	if (ret < 0)
+		goto err_mac_addrs;
+
 	if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
 		memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
 		ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7f83d170d7..8eeeac9910 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -147,10 +147,14 @@  enum cpfl_itf_type {
 
 TAILQ_HEAD(cpfl_flow_list, rte_flow);
 
+#define CPFL_FLOW_BATCH_SIZE  490
 struct cpfl_itf {
 	enum cpfl_itf_type type;
 	struct cpfl_adapter_ext *adapter;
 	struct cpfl_flow_list flow_list;
+	struct idpf_dma_mem flow_dma;
+	struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
+	struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
 	void *data;
 };
 
@@ -240,6 +244,8 @@  int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
 int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
 int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
+int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma,
+			     uint32_t size, int batch_size);
 
 #define CPFL_DEV_TO_PCI(eth_dev)		\
 	RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
new file mode 100644
index 0000000000..50fac55432
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.c
@@ -0,0 +1,296 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include "cpfl_ethdev.h"
+
+#include "cpfl_fxp_rule.h"
+#include "cpfl_logs.h"
+
+#define CTLQ_SEND_RETRIES 100
+#define CTLQ_RECEIVE_RETRIES 100
+
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		   struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_msg **msg_ptr_list;
+	u16 clean_count = 0;
+	int num_cleaned = 0;
+	int retries = 0;
+	int ret = 0;
+
+	msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
+	if (!msg_ptr_list) {
+		PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
+		goto send_err;
+	}
+
+	while (retries <= CTLQ_SEND_RETRIES) {
+		clean_count = num_q_msg - num_cleaned;
+		ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
+					       &msg_ptr_list[num_cleaned]);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
+			goto send_err;
+		}
+
+		num_cleaned += clean_count;
+		retries++;
+		if (num_cleaned >= num_q_msg)
+			break;
+		rte_delay_us_sleep(10);
+	}
+
+	if (retries > CTLQ_SEND_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for completions");
+		ret = -1;
+		goto send_err;
+	}
+
+send_err:
+	if (msg_ptr_list)
+		free(msg_ptr_list);
+err:
+	return ret;
+}
+
+static int
+cpfl_process_rx_ctlq_msg(u16 num_q_msg, struct idpf_ctlq_msg *q_msg)
+{
+	u16 i;
+
+	if (!num_q_msg || !q_msg)
+		return -EINVAL;
+
+	for (i = 0; i < num_q_msg; i++) {
+		if (q_msg[i].status == CPFL_CFG_PKT_ERR_OK) {
+			continue;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_EEXIST &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_add_rule) {
+			PMD_INIT_LOG(ERR, "The rule has confliction with already existed one");
+			return -EINVAL;
+		} else if (q_msg[i].status == CPFL_CFG_PKT_ERR_ENOTFND &&
+			   q_msg[i].opcode == cpfl_ctlq_sem_del_rule) {
+			PMD_INIT_LOG(ERR, "The rule has already deleted");
+			return -EINVAL;
+		} else {
+			PMD_INIT_LOG(ERR, "Invalid rule");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		      struct idpf_ctlq_msg q_msg[])
+{
+	int retries = 0;
+	struct idpf_dma_mem *dma;
+	u16 i;
+	uint16_t buff_cnt;
+	int ret = 0, handle_rule = 0;
+
+	retries = 0;
+	while (retries <= CTLQ_RECEIVE_RETRIES) {
+		rte_delay_us_sleep(10);
+		ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
+
+		if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
+		    ret != CPFL_ERR_CTLQ_ERROR) {
+			PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret);
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_NO_WORK) {
+			retries++;
+			continue;
+		}
+
+		if (ret == CPFL_ERR_CTLQ_EMPTY)
+			break;
+
+		ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to process rx_ctrlq msg");
+			handle_rule = ret;
+		}
+
+		for (i = 0; i < num_q_msg; i++) {
+			if (q_msg[i].data_len > 0)
+				dma = q_msg[i].ctx.indirect.payload;
+			else
+				dma = NULL;
+
+			buff_cnt = dma ? 1 : 0;
+			ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);
+			if (ret)
+				PMD_INIT_LOG(WARNING, "could not posted recv bufs\n");
+		}
+		break;
+	}
+
+	if (retries > CTLQ_RECEIVE_RETRIES) {
+		PMD_INIT_LOG(ERR, "timed out while polling for receive response");
+		ret = -1;
+	}
+
+	return ret + handle_rule;
+}
+
+static int
+cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		   struct idpf_ctlq_msg *msg)
+{
+	struct cpfl_mod_rule_info *minfo = &rinfo->mod;
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	struct cpfl_rule_cfg_data cfg = {0};
+
+	/* prepare rule blob */
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(&cfg, 0, sizeof(cfg));
+
+	/* fill info for both query and add/update */
+	cpfl_fill_rule_mod_content(minfo->mod_obj_size,
+				   minfo->pin_mod_content,
+				   minfo->mod_index,
+				   &cfg.ext.mod_content);
+
+	/* only fill content for add/update */
+	memcpy(blob->mod_blob, minfo->mod_content,
+	       minfo->mod_content_byte_len);
+
+#define NO_HOST_NEEDED 0
+	/* pack message */
+	cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
+				       rinfo->cookie,
+				       0, /* vsi_id not used for mod */
+				       rinfo->port_num,
+				       NO_HOST_NEEDED,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       (u16)sizeof(*blob),
+				       (void *)dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+		       struct idpf_ctlq_msg *msg, bool add)
+{
+	union cpfl_rule_cfg_pkt_record *blob = NULL;
+	enum cpfl_ctlq_rule_cfg_opc opc;
+	struct cpfl_rule_cfg_data cfg;
+	uint16_t cfg_ctrl;
+
+	if (!dma->va) {
+		PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+		return -1;
+	}
+	blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+	memset(blob, 0, sizeof(*blob));
+	memset(msg, 0, sizeof(*msg));
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,
+							  rinfo->sem.sub_prof_id,
+							  rinfo->sem.pin_to_cache,
+							  rinfo->sem.fixed_fetch);
+		cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,
+					rinfo->act_bytes, rinfo->act_byte_len,
+					cfg_ctrl, blob);
+		opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
+	} else {
+		PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type);
+		return -1;
+	}
+
+	cpfl_fill_rule_cfg_data_common(opc,
+				       rinfo->cookie,
+				       rinfo->vsi,
+				       rinfo->port_num,
+				       rinfo->host_id,
+				       0, /* time_sel */
+				       0, /* time_sel_val */
+				       0, /* cache_wr_thru */
+				       rinfo->resp_req,
+				       sizeof(union cpfl_rule_cfg_pkt_record),
+				       dma,
+				       &cfg.common);
+	cpfl_prep_rule_desc(&cfg, msg);
+	return 0;
+}
+
+static int
+cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+	       struct idpf_ctlq_msg *msg, bool add)
+{
+	int ret = 0;
+
+	if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+		if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)
+			ret = -1;
+	} else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
+		if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0)
+			ret = -1;
+	} else {
+		PMD_INIT_LOG(ERR, "Invalid type of rule");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+int
+cpfl_rule_process(struct cpfl_itf *itf,
+		  struct idpf_ctlq_info *tx_cq,
+		  struct idpf_ctlq_info *rx_cq,
+		  struct cpfl_rule_info *rinfo,
+		  int rule_num,
+		  bool add)
+{
+	struct idpf_hw *hw = &itf->adapter->base.hw;
+	int i;
+	int ret = 0;
+
+	if (rule_num == 0)
+		return 0;
+
+	for (i = 0; i < rule_num; i++) {
+		ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Could not pack rule");
+			return ret;
+		}
+	}
+	ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to send control message");
+		return ret;
+	}
+	ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
+	if (ret) {
+		PMD_INIT_LOG(ERR, "Failed to update rule");
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
new file mode 100644
index 0000000000..ed757b80b1
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.h
@@ -0,0 +1,68 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FXP_RULE_H_
+#define _CPFL_FXP_RULE_H_
+
+#include "cpfl_rules.h"
+
+#define CPFL_MAX_KEY_LEN 128
+#define CPFL_MAX_RULE_ACTIONS 32
+
+struct cpfl_sem_rule_info {
+	uint16_t prof_id;
+	uint8_t sub_prof_id;
+	uint8_t key[CPFL_MAX_KEY_LEN];
+	uint8_t key_byte_len;
+	uint8_t pin_to_cache;
+	uint8_t fixed_fetch;
+};
+
+#define CPFL_MAX_MOD_CONTENT_LEN 256
+struct cpfl_mod_rule_info {
+	uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
+	uint8_t mod_content_byte_len;
+	uint32_t mod_index;
+	uint8_t pin_mod_content;
+	uint8_t mod_obj_size;
+};
+
+enum cpfl_rule_type {
+	CPFL_RULE_TYPE_NONE,
+	CPFL_RULE_TYPE_SEM,
+	CPFL_RULE_TYPE_MOD
+};
+
+struct cpfl_rule_info {
+	enum cpfl_rule_type type;
+	uint64_t cookie;
+	uint8_t host_id;
+	uint8_t port_num;
+	uint8_t resp_req;
+	/* TODO: change this to be dynamically allocated/reallocated */
+	uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];
+	uint8_t act_byte_len;
+	/* vsi is used for lem and lpm rules */
+	uint16_t vsi;
+	uint8_t clear_mirror_1st_state;
+	/* mod related fields */
+	union {
+		struct cpfl_mod_rule_info mod;
+		struct cpfl_sem_rule_info sem;
+	};
+};
+
+extern struct cpfl_vport_ext *vport;
+
+int cpfl_rule_process(struct cpfl_itf *itf,
+		      struct idpf_ctlq_info *tx_cq,
+		      struct idpf_ctlq_info *rx_cq,
+		      struct cpfl_rule_info *rinfo,
+		      int rule_num,
+		      bool add);
+int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+		       struct idpf_ctlq_msg q_msg[]);
+int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+			  struct idpf_ctlq_msg q_msg[]);
+#endif /*CPFL_FXP_RULE_H*/
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index e2b6621cea..6118a16329 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -45,6 +45,7 @@  if dpdk_conf.has('RTE_HAS_JANSSON')
     sources += files(
 	    'cpfl_flow.c',
             'cpfl_flow_parser.c',
+	    'cpfl_fxp_rule.c',
     )
     ext_deps += jansson_dep
 endif