[v1,5/5] net/cpfl: add fxp flow engine

Message ID 20230812075506.361769-6-yuying.zhang@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Qi Zhang
Headers
Series add rte flow support for cpfl |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel-compilation warning apply issues

Commit Message

Zhang, Yuying Aug. 12, 2023, 7:55 a.m. UTC
  Adapt fxp low level as a flow engine.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.h          |  85 ++++
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 610 ++++++++++++++++++++++++
 drivers/net/cpfl/meson.build            |   1 +
 3 files changed, 696 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
  

Comments

Xing, Beilei Aug. 25, 2023, 9:15 a.m. UTC | #1
> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Saturday, August 12, 2023 3:55 PM
> To: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Zhang, Yuying <yuying.zhang@intel.com>
> Subject: [PATCH v1 5/5] net/cpfl: add fxp flow engine
> 
> Adapt fxp low level as a flow engine.
> 
> Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> ---
>  drivers/net/cpfl/cpfl_ethdev.h          |  85 ++++
>  drivers/net/cpfl/cpfl_flow_engine_fxp.c | 610 ++++++++++++++++++++++++
>  drivers/net/cpfl/meson.build            |   1 +
>  3 files changed, 696 insertions(+)
>  create mode 100644 drivers/net/cpfl/cpfl_flow_engine_fxp.c
> 
> diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
> index 63bcc5551f..d7e9ea1a74 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -92,6 +92,8 @@
<...>
> +static inline uint16_t
> +cpfl_get_vsi_id(struct cpfl_itf *itf)
> +{
> +	struct cpfl_adapter_ext *adapter = itf->adapter;
> +	struct cpfl_vport_info *info;
> +	uint32_t vport_id;
> +	int ret;
> +	struct cpfl_vport_id vport_identity;
> +
> +	if (!itf)
> +		return CPFL_INVALID_HW_ID;
> +
> +	if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
> +		struct cpfl_repr *repr = (void *)itf;
> +
> +		return repr->vport_info->vport_info.vsi_id;
> +	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
> +		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
> +		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
> +		/* host: HOST0_CPF_ID, acc: ACC_CPF_ID */
> +		vport_identity.pf_id = ACC_CPF_ID;
> +		vport_identity.vf_id = 0;
> +		vport_identity.vport_id = vport_id;
> +
> +		ret = rte_hash_lookup_data(adapter->vport_map_hash,
> &vport_identity,
> +					  (void **)&info);
> +		if (ret < 0) {
> +			PMD_DRV_LOG(ERR, "vport id not exist");
> +			goto err;
> +		}
> +
> +		/* rte_spinlock_unlock(&adapter->vport_map_lock); */
 
So do we need lock in the function?

> +		return info->vport_info.vsi_id;
> +	}
> +
> +err:
> +	/* rte_spinlock_unlock(&adapter->vport_map_lock); */
> +	return CPFL_INVALID_HW_ID;
> +}
> +
<...>
> 
>  #endif /* _CPFL_ETHDEV_H_ */
> diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
> b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
> new file mode 100644
> index 0000000000..e10639c842
> --- /dev/null
> +++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
> @@ -0,0 +1,610 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2023 Intel Corporation
> + */
> +
> +#include <sys/queue.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <stdint.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <stdarg.h>
> +#include <math.h>
> +#include <rte_debug.h>
> +#include <rte_ether.h>
> +#include <ethdev_driver.h>
> +#include <rte_log.h>
> +#include <rte_malloc.h>
> +#include <rte_eth_ctrl.h>
> +#include <rte_tailq.h>
> +#include <rte_flow_driver.h>
> +#include <rte_flow.h>
> +#include <rte_bitmap.h>
> +#include "cpfl_rules.h"
> +#include "cpfl_logs.h"
> +#include "cpfl_ethdev.h"
> +#include "cpfl_flow.h"
> +#include "cpfl_fxp_rule.h"
> +#include "cpfl_flow_parser.h"
> +#include "rte_memcpy.h"

#include <rte_memcpy.h> and move above?

> +
> +#define COOKIE_DEF	0x1000
> +#define PREC_MAX	7
> +#define PREC_DEF	1
> +#define PREC_SET	5
> +#define TYPE_ID		3
> +#define OFFSET		0x0a
> +#define HOST_ID_DEF	0
> +#define PF_NUM_DEF	0
> +#define PORT_NUM_DEF	0
> +#define RESP_REQ_DEF	2
> +#define PIN_TO_CACHE_DEF	0
> +#define CLEAR_MIRROR_1ST_STATE_DEF  0
> +#define FIXED_FETCH_DEF 0
> +#define PTI_DEF		0
> +#define MOD_OBJ_SIZE_DEF	0
> +#define PIN_MOD_CONTENT_DEF	0
> +
> +#define MAX_MOD_CONTENT_INDEX	256
> +#define MAX_MR_ACTION_NUM 8

For the new defined macros in PMD, better to use CPFL_ prefix. 

> +
> +struct rule_info_meta {

cpfl_rule_info_meta.
Please check all other macros, global variables, structures and functions, etc. I will not comment for those.

BTW, Could you add some comments for the new structures and the members? Then it will be more readable.

> +	struct cpfl_flow_pr_action pr_action;
> +	uint32_t pr_num;
> +	uint32_t mr_num;
> +	uint32_t rule_num;
> +	struct cpfl_rule_info rules[0];
> +};
> +
> +static uint32_t fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad); static
> +void fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx);
> +uint64_t rule_cookie = COOKIE_DEF;
> +
> +static int
> +cpfl_fxp_create(struct rte_eth_dev *dev,
> +		struct rte_flow *flow,
> +		void *meta,
> +		struct rte_flow_error *error)
> +{
> +	int ret = 0;
> +	uint32_t cpq_id = 0;
> +	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
> +	struct cpfl_adapter_ext *ad = itf->adapter;
> +	struct rule_info_meta *rim = meta;
> +	struct cpfl_vport *vport;
> +
> +	if (!rim)
> +		return ret;
> +
> +	if (itf->type == CPFL_ITF_TYPE_VPORT) {
> +		vport = (struct cpfl_vport *)itf;
> +		cpq_id = vport->base.devarg_id * 2;

Why is vport->base.devarg_id * 2 here? Could you add some comments?

> +	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {

So is the patch support both representor rule and represented port action?
It's better to split VPORT and REPRESENTOR support.

> +		cpq_id = CPFL_FPCP_CFGQ_TX;
> +	} else {
> +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "fail to find correct control queue");
> +		return -rte_errno;
> +	}
> +
> +	ret = cpfl_rule_update(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
> +			       rim->rules, rim->rule_num, true);

OK, I understand the function is to process the rule, right?
So how about cplf_rule_process?

> +	if (ret < 0) {
> +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "cpfl filter create flow fail");
> +		rte_free(rim);
> +		return ret;
> +	}
> +
> +	flow->rule = rim;
> +
> +	return ret;
> +}
> +
> +static inline void
> +cpfl_fxp_rule_free(struct rte_flow *flow) {
> +	rte_free(flow->rule);
> +	flow->rule = NULL;
> +}
> +
> +static int
> +cpfl_fxp_destroy(struct rte_eth_dev *dev,
> +		 struct rte_flow *flow,
> +		 struct rte_flow_error *error)
> +{
> +	int ret = 0;
> +	uint32_t cpq_id = 0;
> +	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
> +	struct cpfl_adapter_ext *ad = itf->adapter;
> +	struct rule_info_meta *rim;
> +	uint32_t i;
> +	struct cpfl_vport *vport;
> +
> +	rim = flow->rule;
> +	if (!rim) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "no such flow create by cpfl filter");
> +
> +		cpfl_fxp_rule_free(flow);

flow->rule is NULL, so no need to call the function.

> +
> +		return -rte_errno;
> +	}
> +
> +	if (itf->type == CPFL_ITF_TYPE_VPORT) {
> +		vport = (struct cpfl_vport *)itf;
> +		cpq_id = vport->base.devarg_id * 2;
> +	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
> +		cpq_id = CPFL_FPCP_CFGQ_TX;
> +	} else {
> +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "fail to find correct control queue");

Need to goto err here?

> +		return -rte_errno;
> +	}
> +
> +	ret = cpfl_rule_update(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
> rim->rules,
> +			       rim->rule_num, false);
> +	if (ret < 0) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "fail to destroy cpfl filter rule");
> +		goto err;
> +	}
> +
> +	/* free mod index */
> +	for (i = rim->pr_num; i < rim->rule_num; i++)
> +		fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index);
> +err:
> +	cpfl_fxp_rule_free(flow);
> +	return ret;
> +}
> +

<...>
> +
> +static int
> +cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter,
> +		       struct cpfl_rule_info *match_rinfo,
> +		       struct cpfl_rule_info *mod_rinfo,
> +		       const struct cpfl_flow_mr_action *mr_action) {
> +	struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod;
> +	uint32_t mod_idx;
> +	int i;
> +	int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set));
> +	union cpfl_action_set *act_set =
> +		&((union cpfl_action_set *)match_rinfo->act_bytes)[next];
> +
> +	if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD)
> +		return -EINVAL;
> +
> +	*act_set = cpfl_act_mod_profile(PREC_DEF,
> +					mr_action->mod.prof,
> +					PTI_DEF,
> +					0, /* append */
> +					0, /* prepend */
> +
> 	CPFL_ACT_MOD_PROFILE_PREFETCH_256B);
> +
> +	act_set++;
> +	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
> +
> +	mod_idx = fxp_mod_idx_alloc(adapter);
> +	if (mod_idx == MAX_MOD_CONTENT_INDEX) {
> +		PMD_DRV_LOG(ERR, "Out of Mod Index.");
> +		return -ENOMEM;
> +	}
> +
> +	*act_set = cpfl_act_mod_addr(PREC_DEF, mod_idx);
> +
> +	act_set++;
> +	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
> +
> +	mod_rinfo->type = CPFL_RULE_TYPE_MOD;
> +	minfo->mod_obj_size = MOD_OBJ_SIZE_DEF;
> +	minfo->pin_mod_content = PIN_MOD_CONTENT_DEF;
> +	minfo->mod_index = mod_idx;
> +	mod_rinfo->cookie = 0x1237561;

How about add a macro for 0x1237561?

> +	mod_rinfo->port_num = PORT_NUM_DEF;
> +	mod_rinfo->resp_req = RESP_REQ_DEF;
> +
> +	minfo->mod_content_byte_len = mr_action->mod.byte_len + 2;
> +	for (i = 0; i < minfo->mod_content_byte_len; i++)
> +		minfo->mod_content[i] = mr_action->mod.data[i];
> +
> +	return 0;
> +}
> +
> +static int
> +cpfl_fxp_parse_action(struct cpfl_itf *itf,
> +		      const struct rte_flow_action *actions,
> +		      const struct cpfl_flow_mr_action *mr_action,
> +		      struct rule_info_meta *rim,
> +		      int priority,
> +		      int index,
> +		      bool is_vport_rule)
> +{
> +	const struct rte_flow_action_ethdev *act_ethdev;
> +	const struct rte_flow_action *action;
> +	const struct rte_flow_action_queue *act_q;
> +	const struct rte_flow_action_rss *rss;
> +	struct rte_eth_dev_data *data;
> +	enum rte_flow_action_type action_type;
> +	struct cpfl_vport *vport;
> +	/* used when action is REPRESENTED_PORT or REPRESENTED_PORT
> type */

Represented port or port representor?
Also, can we split the VPORT and REPRESENTOR flow support?

> +	struct cpfl_itf *dst_itf;
> +	uint16_t dev_id; /*vsi_id or phyical port id*/
> +	bool is_vsi;
> +	bool set_meta_valid = false;
> +	int queue_id = -1;
> +	bool fwd_vsi = false;
> +	bool fwd_q = false;
> +	bool fwd_jump = false;
> +	uint32_t i;
> +	struct cpfl_rule_info *rinfo = &rim->rules[index];
> +	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
> +
> +	priority = PREC_MAX - priority;
> +	for (action = actions; action->type !=
> +			RTE_FLOW_ACTION_TYPE_END; action++) {
> +		action_type = action->type;
> +		switch (action_type) {
> +		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
> +		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
> +			if (!fwd_vsi && !fwd_jump)
> +				fwd_vsi = true;
> +			else
> +				goto err;
> +			if (is_vport_rule) {
> +				dst_itf = itf;
> +			} else {
> +				act_ethdev = action->conf;
> +				dst_itf = cpfl_get_itf_by_port_id(act_ethdev-
> >port_id);
> +			}
> +
> +			if (!dst_itf)
> +				goto err;
> +
> +			if (dst_itf->type == CPFL_ITF_TYPE_VPORT) {
> +				vport = (struct cpfl_vport *)dst_itf;
> +				queue_id = vport-
> >base.chunks_info.rx_start_qid;
> +			} else {
> +				queue_id = -2;

Why's -2 here?

> +			}
> +
> +			is_vsi = (action_type ==
> RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR ||
> +				dst_itf->type ==
> CPFL_ITF_TYPE_REPRESENTOR);
> +			if (is_vsi || is_vport_rule)
> +				dev_id = cpfl_get_vsi_id(dst_itf);
> +			else
> +				dev_id = cpfl_get_port_id(dst_itf);
> +
> +			if (dev_id == CPFL_INVALID_HW_ID)
> +				goto err;
> +
> +			if (is_vsi || is_vport_rule)
> +				*act_set = cpfl_act_fwd_vsi(0, priority, 0,
> dev_id);
> +			else
> +				*act_set = cpfl_act_fwd_port(0, priority, 0,
> dev_id);
> +			act_set++;
> +			rinfo->act_byte_len += sizeof(union cpfl_action_set);
> +			break;
> +		case RTE_FLOW_ACTION_TYPE_QUEUE:
> +			if (!fwd_q && !fwd_jump)
> +				fwd_q = true;
> +			else
> +				goto err;
> +			if (queue_id == -2)
> +				goto err;
> +			act_q = action->conf;
> +			data = itf->data;
> +			if (act_q->index >= data->nb_rx_queues)
> +				goto err;
> +
> +			vport = (struct cpfl_vport *)itf;
> +			if (queue_id < 0)
> +				queue_id = vport-
> >base.chunks_info.rx_start_qid;
> +			queue_id += act_q->index;
> +			*act_set = cpfl_act_set_hash_queue(priority, 0,
> queue_id, 0);
> +			act_set++;
> +			rinfo->act_byte_len += sizeof(union cpfl_action_set);
> +			break;
> +		case RTE_FLOW_ACTION_TYPE_RSS:
> +			rss = action->conf;
> +			if (rss->queue_num <= 1)
> +				goto err;
> +			for (i = 0; i < rss->queue_num - 1; i++) {
> +				if (rss->queue[i + 1] != rss->queue[i] + 1)
> +					goto err;
> +			}
> +			data = itf->data;
> +			if (rss->queue[rss->queue_num - 1] >= data-
> >nb_rx_queues)
> +				goto err;
> +#define FXP_MAX_QREGION_SIZE 128
> +			if (!(rte_is_power_of_2(rss->queue_num) &&
> +			      rss->queue_num <= FXP_MAX_QREGION_SIZE))
> +				goto err;
> +
> +			if (!fwd_q && !fwd_jump)
> +				fwd_q = true;
> +			else
> +				goto err;
> +			if (queue_id == -2)
> +				goto err;
> +			vport = (struct cpfl_vport *)itf;
> +			if (queue_id < 0)
> +				queue_id = vport-
> >base.chunks_info.rx_start_qid;
> +			queue_id += rss->queue[0];
> +			*act_set = cpfl_act_set_hash_queue_region(priority, 0,
> queue_id,
> +								  log(rss-
> >queue_num) / log(2), 0);
> +			act_set++;
> +			rinfo->act_byte_len += sizeof(union cpfl_action_set);
> +			break;
> +		case RTE_FLOW_ACTION_TYPE_DROP:
> +			(*act_set).data = cpfl_act_drop(priority).data;
> +			act_set++;
> +			rinfo->act_byte_len += sizeof(union cpfl_action_set);
> +			(*act_set).data = cpfl_act_set_commit_mode(priority,
> 0).data;
> +			act_set++;
> +			rinfo->act_byte_len += sizeof(union cpfl_action_set);
> +			break;
> +		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
> +		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
> +			break;
> +		case RTE_FLOW_ACTION_TYPE_VOID:
> +			break;
> +		default:
> +			goto err;
> +		}
> +	}
> +
> +	if (mr_action != NULL && !set_meta_valid) {
> +		uint32_t i;
> +
> +		for (i = 0; i < rim->mr_num; i++)
> +			if (cpfl_parse_mod_content(itf->adapter, rinfo,
> +						   &rim->rules[rim->pr_num +
> i],
> +						   &mr_action[i]))
> +				goto err;
> +	}
> +
> +	return 0;
> +
> +err:
> +	PMD_DRV_LOG(ERR, "Invalid action type");
> +	return -EINVAL;
> +}
> +
> [] <...>
> +
> +static int
> +cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
> +			      const struct rte_flow_attr *attr,
> +			      const struct rte_flow_item pattern[],
> +			      const struct rte_flow_action actions[],
> +			      void **meta)
> +{
> +	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
> +	struct cpfl_flow_pr_action pr_action = { 0 };
> +	struct cpfl_adapter_ext *adapter = itf->adapter;
> +	struct cpfl_flow_mr_action mr_action[MAX_MR_ACTION_NUM] = { 0 };
> +	uint32_t pr_num = 0, mr_num = 0;
> +	struct cpfl_vport *vport;
> +	struct rule_info_meta *rim;
> +	bool set_meta_valid = false;
> +	int ret;
> +
> +	if (itf->type == CPFL_ITF_TYPE_VPORT) {
> +		vport = (struct cpfl_vport *)itf;
> +		if (vport->exceptional) {

Exception vport won't be in this release, so remove it.

> +			PMD_DRV_LOG(ERR, "Can't create rte_flow with
> exceptional vport.");
> +			return -EINVAL;
> +		}
> +	}
> +
> +	ret = cpfl_flow_parse_items(adapter->flow_parser, pattern, attr,
> &pr_action);
> +	if (ret) {
> +		PMD_DRV_LOG(ERR, "No Match pattern support.");
> +		return -EINVAL;
> +	}
> +
> +	if (is_mod_action(actions, &set_meta_valid)) {
> +		ret = cpfl_flow_parse_actions(adapter->flow_parser, actions,
> mr_action);
> +		if (ret) {
> +			PMD_DRV_LOG(ERR, "action parse fails.");
> +			return -EINVAL;
> +		}
> +		if (!set_meta_valid)
> +			mr_num++;
> +	}
> +
> +	pr_num = 1;
> +	rim = rte_zmalloc(NULL,
> +			  sizeof(struct rule_info_meta) +
> +			  (pr_num + mr_num) * sizeof(struct cpfl_rule_info),
> +			  0);
> +	if (!rim)
> +		return -ENOMEM;
> +
> +	rim->pr_action = pr_action;
> +	rim->pr_num = pr_num;
> +	rim->mr_num = mr_num;
> +	rim->rule_num = pr_num + mr_num;
> +
> +	if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) {
> +		PMD_DRV_LOG(ERR, "Invalid input set");

Invalid pattern?

> +		rte_free(rim);
> +		return -rte_errno;
> +	}
> +
> +	if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority,
> +				  0, false)) {
> +		PMD_DRV_LOG(ERR, "Invalid input set");

Invalid action?

> +		rte_free(rim);
> +		return -rte_errno;
> +	}
> +
> +	cpfl_fill_rinfo_default_value(&rim->rules[0]);
> +
> +	if (!meta)
> +		rte_free(rim);
> +	else
> +		*meta = rim;
> +
> +	return 0;
> +}
> +
> +static int fxp_mod_init(struct cpfl_adapter_ext *ad) {

Check and refine the functions' coding style.

> +	uint32_t size =
> +rte_bitmap_get_memory_footprint(MAX_MOD_CONTENT_INDEX);
> +
> +	void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
> +
> +	if (!mem)
> +		return -ENOMEM;
> +
> +	/* a set bit represent a free slot */
> +	ad->mod_bm =
> rte_bitmap_init_with_all_set(MAX_MOD_CONTENT_INDEX, mem, size);
> +	if (!ad->mod_bm) {
> +		rte_free(mem);
> +		return -EINVAL;
> +	}
> +
> +	ad->mod_bm_mem = mem;
> +
> +	return 0;
> +}
> +
> +static void fxp_mod_uninit(struct cpfl_adapter_ext *ad) {
> +	rte_free(ad->mod_bm_mem);
> +	ad->mod_bm_mem = NULL;
> +	ad->mod_bm = NULL;
> +}
> +
> +static uint32_t fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad) {
> +	uint64_t slab = 0;
> +	uint32_t pos = 0;
> +
> +	if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab))
> +		return MAX_MOD_CONTENT_INDEX;
> +
> +	pos += __builtin_ffsll(slab) - 1;
> +	rte_bitmap_clear(ad->mod_bm, pos);
> +
> +	return pos;
> +}
> +
> +static void fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx)
> +{
> +	rte_bitmap_set(ad->mod_bm, idx);
> +}
 
What's the benefit of the function? Can we call rte_bitmap_set directly?

> +
> +static int
> +cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused,
> +	       struct rte_flow *flow __rte_unused,
> +	       struct rte_flow_query_count *count __rte_unused,
> +	       struct rte_flow_error *error)
> +{
> +	rte_flow_error_set(error, EINVAL,
> +			   RTE_FLOW_ERROR_TYPE_HANDLE,
> +			   NULL,
> +			   "count action not supported by this module");
> +
> +	return -rte_errno;
> +}
> +
> +static void
> +cpfl_fxp_uninit(struct cpfl_adapter_ext *ad) {
> +	fxp_mod_uninit(ad);
> +}

Why do we need the function wrapper?

> +
> +static int
> +cpfl_fxp_init(struct cpfl_adapter_ext *ad) {
> +	int ret = 0;
> +
> +	ret = fxp_mod_init(ad);
> +	if (ret) {
> +		PMD_DRV_LOG(ERR, "Failed to init mod content bitmap.");
> +		return ret;
> +	}
> +
> +	return ret;
> +}
> +
> +static struct
> +cpfl_flow_engine cpfl_fxp_engine = {
> +	.type = CPFL_FLOW_ENGINE_FXP,
> +	.init = cpfl_fxp_init,
> +	.uninit = cpfl_fxp_uninit,
> +	.create = cpfl_fxp_create,
> +	.destroy = cpfl_fxp_destroy,
> +	.query_count = cpfl_fxp_query,
> +	.parse_pattern_action = cpfl_fxp_parse_pattern_action, };
> +
> +RTE_INIT(cpfl_sw_engine_init)
> +{
> +	struct cpfl_flow_engine *engine = &cpfl_fxp_engine;
> +
> +	cpfl_flow_engine_register(engine);
> +}
> diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index
> 4061123034..ce46d7e76e 100644
> --- a/drivers/net/cpfl/meson.build
> +++ b/drivers/net/cpfl/meson.build
> @@ -43,6 +43,7 @@ js_dep = dependency('json-c', required: false, method :
> 'pkg-config')  if js_dep.found()
>      sources += files(
>          'cpfl_flow.c',
> +	'cpfl_flow_engine_fxp.c',
>          'cpfl_flow_parser.c',
>          'cpfl_rules.c',
>          'cpfl_controlq.c',
> --
> 2.25.1
  

Patch

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 63bcc5551f..d7e9ea1a74 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -92,6 +92,8 @@ 
 
 #define CPFL_RX_CFGQ_NUM	4
 #define CPFL_TX_CFGQ_NUM	4
+#define CPFL_FPCP_CFGQ_TX       0
+#define CPFL_FPCP_CFGQ_RX       1
 #define CPFL_CFGQ_NUM		8
 
 struct cpfl_vport_param {
@@ -230,6 +232,8 @@  struct cpfl_adapter_ext {
 	struct rte_hash *repr_whitelist_hash;
 
 	struct cpfl_flow_js_parser *flow_parser;
+	struct rte_bitmap *mod_bm;
+	void *mod_bm_mem;
 
 	/* ctrl vport and ctrl queues. */
 	struct cpfl_vport ctrl_vport;
@@ -265,5 +269,86 @@  int cpfl_packets_dispatch(void *arg);
 	((struct cpfl_repr *)((dev)->data->dev_private))
 #define CPFL_DEV_TO_ITF(dev)				\
 	((struct cpfl_itf *)((dev)->data->dev_private))
+#define CPFL_INVALID_HW_ID      UINT16_MAX
+
+static inline uint16_t
+cpfl_get_port_id(struct cpfl_itf *itf)
+{
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		struct cpfl_vport *vport = (void *)itf;
+
+		return vport->base.devarg_id;
+	}
+
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline uint16_t
+cpfl_get_vsi_id(struct cpfl_itf *itf)
+{
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_vport_info *info;
+	uint32_t vport_id;
+	int ret;
+	struct cpfl_vport_id vport_identity;
+
+	if (!itf)
+		return CPFL_INVALID_HW_ID;
+
+	if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		struct cpfl_repr *repr = (void *)itf;
+
+		return repr->vport_info->vport_info.vsi_id;
+	} else if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport_id = ((struct cpfl_vport *)itf)->base.vport_id;
+		vport_identity.func_type = CPCHNL2_FUNC_TYPE_PF;
+		/* host: HOST0_CPF_ID, acc: ACC_CPF_ID */
+		vport_identity.pf_id = ACC_CPF_ID;
+		vport_identity.vf_id = 0;
+		vport_identity.vport_id = vport_id;
+
+		ret = rte_hash_lookup_data(adapter->vport_map_hash, &vport_identity,
+					  (void **)&info);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "vport id not exist");
+			goto err;
+		}
+
+		/* rte_spinlock_unlock(&adapter->vport_map_lock); */
+		return info->vport_info.vsi_id;
+	}
+
+err:
+	/* rte_spinlock_unlock(&adapter->vport_map_lock); */
+	return CPFL_INVALID_HW_ID;
+}
+
+static inline struct cpfl_itf *
+cpfl_get_itf_by_port_id(uint16_t port_id)
+{
+	struct rte_eth_dev *dev;
+
+	if (port_id >= RTE_MAX_ETHPORTS) {
+		PMD_DRV_LOG(ERR, "port_id should be < %d.", RTE_MAX_ETHPORTS);
+		return NULL;
+	}
+
+	dev = &rte_eth_devices[port_id];
+
+	if (dev->state == RTE_ETH_DEV_UNUSED) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] is unused.", port_id);
+		return NULL;
+	}
+
+	if (!dev->data) {
+		PMD_DRV_LOG(ERR, "eth_dev[%d] data not be allocated.", port_id);
+		return NULL;
+	}
+
+	return CPFL_DEV_TO_ITF(dev);
+}
 
 #endif /* _CPFL_ETHDEV_H_ */
diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
new file mode 100644
index 0000000000..e10639c842
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -0,0 +1,610 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <math.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+#include "cpfl_rules.h"
+#include "cpfl_logs.h"
+#include "cpfl_ethdev.h"
+#include "cpfl_flow.h"
+#include "cpfl_fxp_rule.h"
+#include "cpfl_flow_parser.h"
+#include "rte_memcpy.h"
+
+#define COOKIE_DEF	0x1000
+#define PREC_MAX	7
+#define PREC_DEF	1
+#define PREC_SET	5
+#define TYPE_ID		3
+#define OFFSET		0x0a
+#define HOST_ID_DEF	0
+#define PF_NUM_DEF	0
+#define PORT_NUM_DEF	0
+#define RESP_REQ_DEF	2
+#define PIN_TO_CACHE_DEF	0
+#define CLEAR_MIRROR_1ST_STATE_DEF  0
+#define FIXED_FETCH_DEF 0
+#define PTI_DEF		0
+#define MOD_OBJ_SIZE_DEF	0
+#define PIN_MOD_CONTENT_DEF	0
+
+#define MAX_MOD_CONTENT_INDEX	256
+#define MAX_MR_ACTION_NUM 8
+
+struct rule_info_meta {
+	struct cpfl_flow_pr_action pr_action;
+	uint32_t pr_num;
+	uint32_t mr_num;
+	uint32_t rule_num;
+	struct cpfl_rule_info rules[0];
+};
+
+static uint32_t fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad);
+static void fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx);
+uint64_t rule_cookie = COOKIE_DEF;
+
+static int
+cpfl_fxp_create(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct rule_info_meta *rim = meta;
+	struct cpfl_vport *vport;
+
+	if (!rim)
+		return ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		cpq_id = CPFL_FPCP_CFGQ_TX;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		return -rte_errno;
+	}
+
+	ret = cpfl_rule_update(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1],
+			       rim->rules, rim->rule_num, true);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "cpfl filter create flow fail");
+		rte_free(rim);
+		return ret;
+	}
+
+	flow->rule = rim;
+
+	return ret;
+}
+
+static inline void
+cpfl_fxp_rule_free(struct rte_flow *flow)
+{
+	rte_free(flow->rule);
+	flow->rule = NULL;
+}
+
+static int
+cpfl_fxp_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	int ret = 0;
+	uint32_t cpq_id = 0;
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_adapter_ext *ad = itf->adapter;
+	struct rule_info_meta *rim;
+	uint32_t i;
+	struct cpfl_vport *vport;
+
+	rim = flow->rule;
+	if (!rim) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "no such flow create by cpfl filter");
+
+		cpfl_fxp_rule_free(flow);
+
+		return -rte_errno;
+	}
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		cpq_id = vport->base.devarg_id * 2;
+	} else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+		cpq_id = CPFL_FPCP_CFGQ_TX;
+	} else {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to find correct control queue");
+		return -rte_errno;
+	}
+
+	ret = cpfl_rule_update(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], rim->rules,
+			       rim->rule_num, false);
+	if (ret < 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "fail to destroy cpfl filter rule");
+		goto err;
+	}
+
+	/* free mod index */
+	for (i = rim->pr_num; i < rim->rule_num; i++)
+		fxp_mod_idx_free(ad, rim->rules[i].mod.mod_index);
+err:
+	cpfl_fxp_rule_free(flow);
+	return ret;
+}
+
+static bool
+cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action,
+		       struct rule_info_meta *rim,
+		       int i)
+{
+	if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_SEM) {
+		struct cpfl_rule_info *rinfo = &rim->rules[i];
+
+		rinfo->type = CPFL_RULE_TYPE_SEM;
+		rinfo->sem.prof_id = pr_action->sem.prof;
+		rinfo->sem.sub_prof_id = pr_action->sem.subprof;
+		rinfo->sem.key_byte_len = pr_action->sem.keysize;
+		rte_memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len);
+		rinfo->sem.pin_to_cache = PIN_TO_CACHE_DEF;
+		rinfo->sem.fixed_fetch = FIXED_FETCH_DEF;
+	} else {
+		PMD_DRV_LOG(ERR, "Invalid pattern item.");
+		return false;
+	}
+
+	return true;
+}
+
+static int
+cpfl_parse_mod_content(struct cpfl_adapter_ext *adapter,
+		       struct cpfl_rule_info *match_rinfo,
+		       struct cpfl_rule_info *mod_rinfo,
+		       const struct cpfl_flow_mr_action *mr_action)
+{
+	struct cpfl_mod_rule_info *minfo = &mod_rinfo->mod;
+	uint32_t mod_idx;
+	int i;
+	int next = match_rinfo->act_byte_len / (sizeof(union cpfl_action_set));
+	union cpfl_action_set *act_set =
+		&((union cpfl_action_set *)match_rinfo->act_bytes)[next];
+
+	if (!mr_action || mr_action->type != CPFL_JS_MR_ACTION_TYPE_MOD)
+		return -EINVAL;
+
+	*act_set = cpfl_act_mod_profile(PREC_DEF,
+					mr_action->mod.prof,
+					PTI_DEF,
+					0, /* append */
+					0, /* prepend */
+					CPFL_ACT_MOD_PROFILE_PREFETCH_256B);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_idx = fxp_mod_idx_alloc(adapter);
+	if (mod_idx == MAX_MOD_CONTENT_INDEX) {
+		PMD_DRV_LOG(ERR, "Out of Mod Index.");
+		return -ENOMEM;
+	}
+
+	*act_set = cpfl_act_mod_addr(PREC_DEF, mod_idx);
+
+	act_set++;
+	match_rinfo->act_byte_len += sizeof(union cpfl_action_set);
+
+	mod_rinfo->type = CPFL_RULE_TYPE_MOD;
+	minfo->mod_obj_size = MOD_OBJ_SIZE_DEF;
+	minfo->pin_mod_content = PIN_MOD_CONTENT_DEF;
+	minfo->mod_index = mod_idx;
+	mod_rinfo->cookie = 0x1237561;
+	mod_rinfo->port_num = PORT_NUM_DEF;
+	mod_rinfo->resp_req = RESP_REQ_DEF;
+
+	minfo->mod_content_byte_len = mr_action->mod.byte_len + 2;
+	for (i = 0; i < minfo->mod_content_byte_len; i++)
+		minfo->mod_content[i] = mr_action->mod.data[i];
+
+	return 0;
+}
+
+static int
+cpfl_fxp_parse_action(struct cpfl_itf *itf,
+		      const struct rte_flow_action *actions,
+		      const struct cpfl_flow_mr_action *mr_action,
+		      struct rule_info_meta *rim,
+		      int priority,
+		      int index,
+		      bool is_vport_rule)
+{
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *rss;
+	struct rte_eth_dev_data *data;
+	enum rte_flow_action_type action_type;
+	struct cpfl_vport *vport;
+	/* used when action is REPRESENTED_PORT or REPRESENTED_PORT type */
+	struct cpfl_itf *dst_itf;
+	uint16_t dev_id; /*vsi_id or phyical port id*/
+	bool is_vsi;
+	bool set_meta_valid = false;
+	int queue_id = -1;
+	bool fwd_vsi = false;
+	bool fwd_q = false;
+	bool fwd_jump = false;
+	uint32_t i;
+	struct cpfl_rule_info *rinfo = &rim->rules[index];
+	union cpfl_action_set *act_set = (void *)rinfo->act_bytes;
+
+	priority = PREC_MAX - priority;
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			if (!fwd_vsi && !fwd_jump)
+				fwd_vsi = true;
+			else
+				goto err;
+			if (is_vport_rule) {
+				dst_itf = itf;
+			} else {
+				act_ethdev = action->conf;
+				dst_itf = cpfl_get_itf_by_port_id(act_ethdev->port_id);
+			}
+
+			if (!dst_itf)
+				goto err;
+
+			if (dst_itf->type == CPFL_ITF_TYPE_VPORT) {
+				vport = (struct cpfl_vport *)dst_itf;
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			} else {
+				queue_id = -2;
+			}
+
+			is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR ||
+				dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR);
+			if (is_vsi || is_vport_rule)
+				dev_id = cpfl_get_vsi_id(dst_itf);
+			else
+				dev_id = cpfl_get_port_id(dst_itf);
+
+			if (dev_id == CPFL_INVALID_HW_ID)
+				goto err;
+
+			if (is_vsi || is_vport_rule)
+				*act_set = cpfl_act_fwd_vsi(0, priority, 0, dev_id);
+			else
+				*act_set = cpfl_act_fwd_port(0, priority, 0, dev_id);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			if (!fwd_q && !fwd_jump)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == -2)
+				goto err;
+			act_q = action->conf;
+			data = itf->data;
+			if (act_q->index >= data->nb_rx_queues)
+				goto err;
+
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += act_q->index;
+			*act_set = cpfl_act_set_hash_queue(priority, 0, queue_id, 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			rss = action->conf;
+			if (rss->queue_num <= 1)
+				goto err;
+			for (i = 0; i < rss->queue_num - 1; i++) {
+				if (rss->queue[i + 1] != rss->queue[i] + 1)
+					goto err;
+			}
+			data = itf->data;
+			if (rss->queue[rss->queue_num - 1] >= data->nb_rx_queues)
+				goto err;
+#define FXP_MAX_QREGION_SIZE 128
+			if (!(rte_is_power_of_2(rss->queue_num) &&
+			      rss->queue_num <= FXP_MAX_QREGION_SIZE))
+				goto err;
+
+			if (!fwd_q && !fwd_jump)
+				fwd_q = true;
+			else
+				goto err;
+			if (queue_id == -2)
+				goto err;
+			vport = (struct cpfl_vport *)itf;
+			if (queue_id < 0)
+				queue_id = vport->base.chunks_info.rx_start_qid;
+			queue_id += rss->queue[0];
+			*act_set = cpfl_act_set_hash_queue_region(priority, 0, queue_id,
+								  log(rss->queue_num) / log(2), 0);
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			(*act_set).data = cpfl_act_drop(priority).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			(*act_set).data = cpfl_act_set_commit_mode(priority, 0).data;
+			act_set++;
+			rinfo->act_byte_len += sizeof(union cpfl_action_set);
+			break;
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+		default:
+			goto err;
+		}
+	}
+
+	if (mr_action != NULL && !set_meta_valid) {
+		uint32_t i;
+
+		for (i = 0; i < rim->mr_num; i++)
+			if (cpfl_parse_mod_content(itf->adapter, rinfo,
+						   &rim->rules[rim->pr_num + i],
+						   &mr_action[i]))
+				goto err;
+	}
+
+	return 0;
+
+err:
+	PMD_DRV_LOG(ERR, "Invalid action type");
+	return -EINVAL;
+}
+
+static void
+cpfl_fill_rinfo_default_value(struct cpfl_rule_info *rinfo)
+{
+	if (rule_cookie == ~0llu)
+		rule_cookie = COOKIE_DEF;
+	rinfo->cookie = rule_cookie++;
+	rinfo->host_id = HOST_ID_DEF;
+	rinfo->port_num = PORT_NUM_DEF;
+	rinfo->resp_req = RESP_REQ_DEF;
+	rinfo->clear_mirror_1st_state = CLEAR_MIRROR_1ST_STATE_DEF;
+}
+
+static bool is_mod_action(const struct rte_flow_action actions[], bool *set_meta_valid)
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+		return false;
+
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+			return true;
+		case RTE_FLOW_ACTION_TYPE_SET_TAG:
+			*set_meta_valid = true;
+			return true;
+		default:
+			continue;
+		}
+	}
+	return false;
+}
+
+static int
+cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
+			      const struct rte_flow_attr *attr,
+			      const struct rte_flow_item pattern[],
+			      const struct rte_flow_action actions[],
+			      void **meta)
+{
+	struct cpfl_itf *itf = CPFL_DEV_TO_ITF(dev);
+	struct cpfl_flow_pr_action pr_action = { 0 };
+	struct cpfl_adapter_ext *adapter = itf->adapter;
+	struct cpfl_flow_mr_action mr_action[MAX_MR_ACTION_NUM] = { 0 };
+	uint32_t pr_num = 0, mr_num = 0;
+	struct cpfl_vport *vport;
+	struct rule_info_meta *rim;
+	bool set_meta_valid = false;
+	int ret;
+
+	if (itf->type == CPFL_ITF_TYPE_VPORT) {
+		vport = (struct cpfl_vport *)itf;
+		if (vport->exceptional) {
+			PMD_DRV_LOG(ERR, "Can't create rte_flow with exceptional vport.");
+			return -EINVAL;
+		}
+	}
+
+	ret = cpfl_flow_parse_items(adapter->flow_parser, pattern, attr, &pr_action);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "No Match pattern support.");
+		return -EINVAL;
+	}
+
+	if (is_mod_action(actions, &set_meta_valid)) {
+		ret = cpfl_flow_parse_actions(adapter->flow_parser, actions, mr_action);
+		if (ret) {
+			PMD_DRV_LOG(ERR, "action parse fails.");
+			return -EINVAL;
+		}
+		if (!set_meta_valid)
+			mr_num++;
+	}
+
+	pr_num = 1;
+	rim = rte_zmalloc(NULL,
+			  sizeof(struct rule_info_meta) +
+			  (pr_num + mr_num) * sizeof(struct cpfl_rule_info),
+			  0);
+	if (!rim)
+		return -ENOMEM;
+
+	rim->pr_action = pr_action;
+	rim->pr_num = pr_num;
+	rim->mr_num = mr_num;
+	rim->rule_num = pr_num + mr_num;
+
+	if (!cpfl_fxp_parse_pattern(&pr_action, rim, 0)) {
+		PMD_DRV_LOG(ERR, "Invalid input set");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	if (cpfl_fxp_parse_action(itf, actions, mr_action, rim, attr->priority,
+				  0, false)) {
+		PMD_DRV_LOG(ERR, "Invalid input set");
+		rte_free(rim);
+		return -rte_errno;
+	}
+
+	cpfl_fill_rinfo_default_value(&rim->rules[0]);
+
+	if (!meta)
+		rte_free(rim);
+	else
+		*meta = rim;
+
+	return 0;
+}
+
+static int fxp_mod_init(struct cpfl_adapter_ext *ad)
+{
+	uint32_t size = rte_bitmap_get_memory_footprint(MAX_MOD_CONTENT_INDEX);
+
+	void *mem = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+	if (!mem)
+		return -ENOMEM;
+
+	/* a set bit represent a free slot */
+	ad->mod_bm = rte_bitmap_init_with_all_set(MAX_MOD_CONTENT_INDEX, mem, size);
+	if (!ad->mod_bm) {
+		rte_free(mem);
+		return -EINVAL;
+	}
+
+	ad->mod_bm_mem = mem;
+
+	return 0;
+}
+
+static void fxp_mod_uninit(struct cpfl_adapter_ext *ad)
+{
+	rte_free(ad->mod_bm_mem);
+	ad->mod_bm_mem = NULL;
+	ad->mod_bm = NULL;
+}
+
+static uint32_t fxp_mod_idx_alloc(struct cpfl_adapter_ext *ad)
+{
+	uint64_t slab = 0;
+	uint32_t pos = 0;
+
+	if (!rte_bitmap_scan(ad->mod_bm, &pos, &slab))
+		return MAX_MOD_CONTENT_INDEX;
+
+	pos += __builtin_ffsll(slab) - 1;
+	rte_bitmap_clear(ad->mod_bm, pos);
+
+	return pos;
+}
+
+static void fxp_mod_idx_free(struct cpfl_adapter_ext *ad, uint32_t idx)
+{
+	rte_bitmap_set(ad->mod_bm, idx);
+}
+
+static int
+cpfl_fxp_query(struct rte_eth_dev *dev __rte_unused,
+	       struct rte_flow *flow __rte_unused,
+	       struct rte_flow_query_count *count __rte_unused,
+	       struct rte_flow_error *error)
+{
+	rte_flow_error_set(error, EINVAL,
+			   RTE_FLOW_ERROR_TYPE_HANDLE,
+			   NULL,
+			   "count action not supported by this module");
+
+	return -rte_errno;
+}
+
+static void
+cpfl_fxp_uninit(struct cpfl_adapter_ext *ad)
+{
+	fxp_mod_uninit(ad);
+}
+
+static int
+cpfl_fxp_init(struct cpfl_adapter_ext *ad)
+{
+	int ret = 0;
+
+	ret = fxp_mod_init(ad);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Failed to init mod content bitmap.");
+		return ret;
+	}
+
+	return ret;
+}
+
+static struct
+cpfl_flow_engine cpfl_fxp_engine = {
+	.type = CPFL_FLOW_ENGINE_FXP,
+	.init = cpfl_fxp_init,
+	.uninit = cpfl_fxp_uninit,
+	.create = cpfl_fxp_create,
+	.destroy = cpfl_fxp_destroy,
+	.query_count = cpfl_fxp_query,
+	.parse_pattern_action = cpfl_fxp_parse_pattern_action,
+};
+
+RTE_INIT(cpfl_sw_engine_init)
+{
+	struct cpfl_flow_engine *engine = &cpfl_fxp_engine;
+
+	cpfl_flow_engine_register(engine);
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 4061123034..ce46d7e76e 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -43,6 +43,7 @@  js_dep = dependency('json-c', required: false, method : 'pkg-config')
 if js_dep.found()
     sources += files(
         'cpfl_flow.c',
+	'cpfl_flow_engine_fxp.c',
         'cpfl_flow_parser.c',
         'cpfl_rules.c',
         'cpfl_controlq.c',