diff mbox series

[27/27] net/cnxk: support meter action to flow create

Message ID 20210906075450.1452123-27-skori@marvell.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers show
Series [01/27] common/cnxk: update policer MBOX APIs and HW definitions | expand

Checks

Context Check Description
ci/intel-Testing fail Testing issues
ci/Intel-compilation success Compilation OK
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-spell-check-testing warning Testing issues
ci/checkpatch success coding style OK

Commit Message

Sunil Kumar Kori Sept. 6, 2021, 7:54 a.m. UTC
From: Sunil Kumar Kori <skori@marvell.com>

Meters are configured per flow using rte_flow_create API.
Implement support for meter action applied on the flow.

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
 doc/guides/nics/features/cnxk.ini    |   1 +
 doc/guides/nics/features/cnxk_vf.ini |   1 +
 drivers/net/cnxk/cn10k_ethdev_mtr.c  | 270 +++++++++++++++++++++++++++
 drivers/net/cnxk/cnxk_ethdev.h       |  10 +
 drivers/net/cnxk/cnxk_rte_flow.c     | 251 +++++++++++++++++++++++++
 5 files changed, 533 insertions(+)
diff mbox series

Patch

diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini
index 5d456257bd..7bbce7dafc 100644
--- a/doc/guides/nics/features/cnxk.ini
+++ b/doc/guides/nics/features/cnxk.ini
@@ -78,6 +78,7 @@  count                = Y
 drop                 = Y
 flag                 = Y
 mark                 = Y
+meter                = Y
 of_pop_vlan          = Y
 of_push_vlan         = Y
 of_set_vlan_pcp      = Y
diff --git a/doc/guides/nics/features/cnxk_vf.ini b/doc/guides/nics/features/cnxk_vf.ini
index 7b4299f0be..89802a27f9 100644
--- a/doc/guides/nics/features/cnxk_vf.ini
+++ b/doc/guides/nics/features/cnxk_vf.ini
@@ -70,6 +70,7 @@  count                = Y
 drop                 = Y
 flag                 = Y
 mark                 = Y
+meter                = Y
 of_pop_vlan          = Y
 of_push_vlan         = Y
 of_set_vlan_pcp      = Y
diff --git a/drivers/net/cnxk/cn10k_ethdev_mtr.c b/drivers/net/cnxk/cn10k_ethdev_mtr.c
index 76e3adcfb1..f38d55cf99 100644
--- a/drivers/net/cnxk/cn10k_ethdev_mtr.c
+++ b/drivers/net/cnxk/cn10k_ethdev_mtr.c
@@ -735,3 +735,273 @@  cn10k_nix_mtr_ops_get(struct rte_eth_dev *dev, void *ops)
 	*(const void **)ops = &nix_mtr_ops;
 	return 0;
 }
+
+int
+nix_mtr_validate(struct rte_eth_dev *eth_dev, uint32_t id)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct cn10k_flow_meter_profile *profile;
+	struct cn10k_flow_meter_policy *policy;
+	struct cn10k_flow_meter *mtr;
+
+	mtr = nix_mtr_find(dev, id);
+	if (mtr == NULL)
+		return -EINVAL;
+
+	profile = nix_mtr_profile_find(dev, mtr->params.meter_profile_id);
+	if (profile == NULL)
+		return -EINVAL;
+
+	policy = nix_mtr_policy_find(dev, mtr->params.meter_policy_id);
+	if (policy == NULL)
+		return -EINVAL;
+
+	return 0;
+}
+
+int
+nix_mtr_policy_act_get(struct rte_eth_dev *eth_dev, uint32_t id,
+		       struct rte_flow_action actions[RTE_COLORS])
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct cn10k_flow_meter_policy *policy;
+	struct cn10k_flow_meter *mtr;
+
+	mtr = nix_mtr_find(dev, id);
+	if (mtr == NULL)
+		return -EINVAL;
+
+	policy = nix_mtr_policy_find(dev, mtr->params.meter_policy_id);
+	if (policy == NULL)
+		return -EINVAL;
+
+	actions[RTE_COLOR_GREEN] = *policy->policy.actions[RTE_COLOR_GREEN];
+	actions[RTE_COLOR_YELLOW] = *policy->policy.actions[RTE_COLOR_YELLOW];
+	actions[RTE_COLOR_RED] = *policy->policy.actions[RTE_COLOR_RED];
+
+	return 0;
+}
+
+int
+nix_mtr_rq_update(struct rte_eth_dev *eth_dev, uint32_t id, uint32_t queue_num,
+		  const uint16_t *queue)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct cn10k_flow_meter *mtr;
+	uint32_t i;
+
+	mtr = nix_mtr_find(dev, id);
+	if (mtr == NULL)
+		return -EINVAL;
+
+	mtr->rq_id = plt_zmalloc(queue_num * sizeof(uint32_t), ROC_ALIGN);
+	if (mtr->rq_id == NULL)
+		return -ENOMEM;
+
+	mtr->rq_num = queue_num;
+	for (i = 0; i < queue_num; i++)
+		mtr->rq_id[i] = queue[i];
+
+	return 0;
+}
+
+int
+nix_mtr_chain_update(struct rte_eth_dev *eth_dev, uint32_t cur_id,
+		     uint32_t prev_id, uint32_t next_id)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct cn10k_flow_meter *mtr;
+
+	mtr = nix_mtr_find(dev, cur_id);
+	if (mtr == NULL)
+		return -EINVAL;
+
+	switch (lvl_map[mtr->level]) {
+	case ROC_NIX_BPF_LEVEL_F_LEAF:
+		mtr->prev_id = ROC_NIX_BPF_ID_INVALID;
+		mtr->next_id = next_id;
+		mtr->is_prev = false;
+		mtr->is_next = true;
+		break;
+	case ROC_NIX_BPF_LEVEL_F_MID:
+		mtr->prev_id = prev_id;
+		mtr->next_id = next_id;
+		mtr->is_prev = true;
+		mtr->is_next = true;
+		break;
+	case ROC_NIX_BPF_LEVEL_F_TOP:
+		mtr->prev_id = prev_id;
+		mtr->next_id = ROC_NIX_BPF_ID_INVALID;
+		mtr->is_prev = true;
+		mtr->is_next = false;
+		break;
+	default:
+		plt_err("Invalid meter level");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int
+nix_mtr_level_update(struct rte_eth_dev *eth_dev, uint32_t id, uint32_t level)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct cn10k_flow_meter *mtr;
+
+	mtr = nix_mtr_find(dev, id);
+	if (mtr == NULL)
+		return -EINVAL;
+
+	mtr->level = level;
+	return 0;
+}
+
+static void
+nix_mtr_config_map(struct cn10k_flow_meter *mtr, struct roc_nix_bpf_cfg *cfg)
+{
+	enum roc_nix_bpf_algo alg_map[] = {
+		ROC_NIX_BPF_ALGO_NONE, ROC_NIX_BPF_ALGO_2697,
+		ROC_NIX_BPF_ALGO_2698, ROC_NIX_BPF_ALGO_4115};
+	struct cn10k_flow_meter_profile *profile = mtr->profile;
+	struct cn10k_flow_meter_policy *policy = mtr->policy;
+
+	cfg->alg = alg_map[profile->profile.alg];
+	cfg->lmode = profile->profile.packet_mode;
+
+	switch (cfg->alg) {
+	case ROC_NIX_BPF_ALGO_2697:
+		cfg->algo2697.cir = profile->profile.srtcm_rfc2697.cir;
+		cfg->algo2697.cbs = profile->profile.srtcm_rfc2697.cbs;
+		cfg->algo2697.ebs = profile->profile.srtcm_rfc2697.ebs;
+		break;
+	case ROC_NIX_BPF_ALGO_2698:
+		cfg->algo2698.cir = profile->profile.trtcm_rfc2698.cir;
+		cfg->algo2698.pir = profile->profile.trtcm_rfc2698.pir;
+		cfg->algo2698.cbs = profile->profile.trtcm_rfc2698.cbs;
+		cfg->algo2698.pbs = profile->profile.trtcm_rfc2698.pbs;
+		break;
+	case ROC_NIX_BPF_ALGO_4115:
+		cfg->algo4115.cir = profile->profile.trtcm_rfc4115.cir;
+		cfg->algo4115.eir = profile->profile.trtcm_rfc4115.eir;
+		cfg->algo4115.cbs = profile->profile.trtcm_rfc4115.cbs;
+		cfg->algo4115.ebs = profile->profile.trtcm_rfc4115.ebs;
+		break;
+	default:
+		break;
+	}
+
+	cfg->action[ROC_NIX_BPF_COLOR_GREEN] = ROC_NIX_BPF_ACTION_PASS;
+	cfg->action[ROC_NIX_BPF_COLOR_YELLOW] = ROC_NIX_BPF_ACTION_PASS;
+	cfg->action[ROC_NIX_BPF_COLOR_RED] = ROC_NIX_BPF_ACTION_PASS;
+
+	if (policy->policy.actions[RTE_COLOR_GREEN]->type ==
+	    RTE_FLOW_ACTION_TYPE_DROP)
+		cfg->action[ROC_NIX_BPF_COLOR_GREEN] = ROC_NIX_BPF_ACTION_DROP;
+
+	if (policy->policy.actions[RTE_COLOR_YELLOW]->type ==
+	    RTE_FLOW_ACTION_TYPE_DROP)
+		cfg->action[ROC_NIX_BPF_COLOR_YELLOW] = ROC_NIX_BPF_ACTION_DROP;
+
+	if (policy->policy.actions[RTE_COLOR_RED]->type ==
+	    RTE_FLOW_ACTION_TYPE_DROP)
+		cfg->action[ROC_NIX_BPF_COLOR_RED] = ROC_NIX_BPF_ACTION_DROP;
+}
+
+static void
+nix_dscp_table_map(struct cn10k_flow_meter *mtr,
+		   struct roc_nix_bpf_precolor *tbl)
+{
+	enum roc_nix_bpf_color color_map[] = {ROC_NIX_BPF_COLOR_GREEN,
+					      ROC_NIX_BPF_COLOR_YELLOW,
+					      ROC_NIX_BPF_COLOR_RED};
+	int i;
+
+	tbl->count = ROC_NIX_BPF_PRE_COLOR_MAX;
+	tbl->mode = ROC_NIX_BPF_PC_MODE_DSCP_OUTER;
+
+	for (i = 0; i < ROC_NIX_BPF_PRE_COLOR_MAX; i++)
+		tbl->color[i] = color_map[mtr->params.dscp_table[i]];
+}
+
+int
+nix_mtr_configure(struct rte_eth_dev *eth_dev, uint32_t id)
+{
+	struct cn10k_flow_meter *mtr[ROC_NIX_BPF_LEVEL_MAX] = {0};
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix_bpf_objs profs[ROC_NIX_BPF_LEVEL_MAX];
+	uint8_t idx0 = ROC_NIX_BPF_LEVEL_IDX_INVALID;
+	uint8_t idx1 = ROC_NIX_BPF_LEVEL_IDX_INVALID;
+	uint8_t idx2 = ROC_NIX_BPF_LEVEL_IDX_INVALID;
+	uint16_t per_lvl_cnt[ROC_NIX_BPF_LEVEL_MAX];
+	struct roc_nix *nix = &dev->nix;
+	struct roc_nix_bpf_precolor tbl;
+	struct roc_nix_bpf_cfg cfg;
+	struct roc_nix_rq *rq;
+	uint8_t lvl_mask;
+	uint32_t i;
+	int rc;
+
+	mtr[0] = nix_mtr_find(dev, id);
+	if (mtr[0] == NULL)
+		return -EINVAL;
+
+	idx0 = roc_nix_bpf_level_to_idx(lvl_map[mtr[0]->level]);
+	lvl_mask = ROC_NIX_BPF_LEVEL_F_LEAF;
+	per_lvl_cnt[idx0] = 1;
+
+	if (mtr[0]->is_next) {
+		mtr[1] = nix_mtr_find(dev, mtr[0]->next_id);
+		if (mtr[1] == NULL)
+			return -EINVAL;
+
+		idx1 = roc_nix_bpf_level_to_idx(lvl_map[mtr[1]->level]);
+		lvl_mask |= ROC_NIX_BPF_LEVEL_F_MID;
+		per_lvl_cnt[idx1] = 1;
+	}
+
+	if (mtr[1] && mtr[1]->is_next) {
+		mtr[2] = nix_mtr_find(dev, mtr[1]->next_id);
+		if (mtr[2] == NULL)
+			return -EINVAL;
+
+		idx2 = roc_nix_bpf_level_to_idx(lvl_map[mtr[2]->level]);
+		lvl_mask |= ROC_NIX_BPF_LEVEL_F_TOP;
+		per_lvl_cnt[idx2] = 1;
+	}
+
+	rc = roc_nix_bpf_alloc(nix, lvl_mask, per_lvl_cnt, profs);
+	if (rc)
+		return rc;
+
+	mtr[0]->bpf_id = profs[idx0].ids[0];
+
+	if (mtr[0]->is_next)
+		mtr[1]->bpf_id = profs[idx1].ids[0];
+
+	if (mtr[1]->is_next)
+		mtr[2]->bpf_id = profs[idx2].ids[0];
+
+	for (i = 0; i < ROC_NIX_BPF_LEVEL_MAX; i++) {
+		memset(&cfg, 0, sizeof(struct roc_nix_bpf_cfg));
+		nix_mtr_config_map(mtr[i], &cfg);
+		rc = roc_nix_bpf_config(nix, mtr[i]->bpf_id,
+					lvl_map[mtr[i]->level], &cfg);
+
+		memset(&tbl, 0, sizeof(struct roc_nix_bpf_cfg));
+		nix_dscp_table_map(mtr[i], &tbl);
+		rc = roc_nix_bpf_pre_color_tbl_setup(nix, mtr[i]->bpf_id,
+						     lvl_map[mtr[i]->level],
+						     &tbl);
+
+		if (mtr[i]->params.meter_enable) {
+			for (i = 0; mtr[i]->rq_num; i++) {
+				rq = &dev->rqs[mtr[i]->rq_id[i]];
+				rc = roc_nix_bpf_ena_dis(nix, mtr[i]->bpf_id,
+							 rq, true);
+			}
+		}
+	}
+
+	return rc;
+}
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 55b1a56a8b..764e6296c4 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -432,6 +432,16 @@  int cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev,
 
 /* Other private functions */
 int nix_recalc_mtu(struct rte_eth_dev *eth_dev);
+int nix_mtr_validate(struct rte_eth_dev *dev, uint32_t id);
+int nix_mtr_policy_act_get(struct rte_eth_dev *eth_dev, uint32_t id,
+			   struct rte_flow_action actions[RTE_COLORS]);
+int nix_mtr_rq_update(struct rte_eth_dev *eth_dev, uint32_t id,
+		      uint32_t queue_num, const uint16_t *queue);
+int nix_mtr_chain_update(struct rte_eth_dev *eth_dev, uint32_t cur_id,
+			 uint32_t prev_id, uint32_t next_id);
+int nix_mtr_level_update(struct rte_eth_dev *eth_dev, uint32_t id,
+			 uint32_t level);
+int nix_mtr_configure(struct rte_eth_dev *eth_dev, uint32_t id);
 
 /* Inlines */
 static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_rte_flow.c b/drivers/net/cnxk/cnxk_rte_flow.c
index 32c1b5dee5..7e3db9458b 100644
--- a/drivers/net/cnxk/cnxk_rte_flow.c
+++ b/drivers/net/cnxk/cnxk_rte_flow.c
@@ -92,6 +92,243 @@  npc_rss_action_validate(struct rte_eth_dev *eth_dev,
 	return 0;
 }
 
+static int
+npc_mtr_green_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
+				    const struct rte_flow_action actions[],
+				    struct rte_flow_action *green_act,
+				    uint32_t *prev_id, uint32_t *next_id)
+{
+	const struct rte_flow_action_queue *q2_conf =
+		(const struct rte_flow_action_queue *)green_act->conf;
+	const struct rte_flow_action_rss *rss2_conf =
+		(const struct rte_flow_action_rss *)green_act->conf;
+	const struct rte_flow_action_meter *mtr_conf;
+	const struct rte_flow_action_queue *q1_conf;
+	const struct rte_flow_action_rss *rss1_conf;
+	int i;
+
+	if (green_act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+		nix_mtr_rq_update(eth_dev, id, 1, &q2_conf->index);
+
+	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
+		if (green_act->type == RTE_FLOW_ACTION_TYPE_QUEUE &&
+		    actions[i].type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+			q1_conf =
+				(const struct rte_flow_action_queue *)actions[i]
+					.conf;
+
+			if (q1_conf->index != q2_conf->index)
+				return -EINVAL;
+		}
+	}
+
+	if (green_act->type == RTE_FLOW_ACTION_TYPE_RSS) {
+		nix_mtr_rq_update(eth_dev, id, rss2_conf->queue_num,
+				  rss2_conf->queue);
+	}
+
+	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
+		if (green_act->type == RTE_FLOW_ACTION_TYPE_RSS &&
+		    actions[i].type == RTE_FLOW_ACTION_TYPE_RSS) {
+			rss1_conf =
+				(const struct rte_flow_action_rss *)actions[i]
+					.conf;
+
+			if (memcmp(rss1_conf, rss2_conf,
+				   sizeof(struct rte_flow_action_rss)))
+				return -EINVAL;
+		}
+	}
+
+	if (green_act->type == RTE_FLOW_ACTION_TYPE_METER) {
+		mtr_conf =
+			(const struct rte_flow_action_meter *)green_act->conf;
+		*next_id = mtr_conf->mtr_id;
+		nix_mtr_level_update(eth_dev, id, 0);
+		nix_mtr_chain_update(eth_dev, id, *prev_id, *next_id);
+		*prev_id = id;
+	}
+
+	return 0;
+}
+
+static int
+npc_mtr_yellow_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
+				     const struct rte_flow_action actions[],
+				     struct rte_flow_action *yellow_act,
+				     uint32_t *prev_id, uint32_t *next_id)
+{
+	const struct rte_flow_action_queue *q2_conf =
+		(const struct rte_flow_action_queue *)yellow_act->conf;
+	const struct rte_flow_action_rss *rss2_conf =
+		(const struct rte_flow_action_rss *)yellow_act->conf;
+	const struct rte_flow_action_meter *mtr_conf;
+	const struct rte_flow_action_rss *rss1_conf;
+	const struct rte_flow_action_queue *q1_conf;
+	int i;
+
+	if (yellow_act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+		nix_mtr_rq_update(eth_dev, id, 1, &q2_conf->index);
+
+	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
+		if (yellow_act->type == RTE_FLOW_ACTION_TYPE_QUEUE &&
+		    actions[i].type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+			q1_conf = (const struct rte_flow_action_queue *)
+					  yellow_act->conf;
+
+			if (q1_conf->index != q2_conf->index)
+				return -EINVAL;
+		}
+	}
+
+	if (yellow_act->type == RTE_FLOW_ACTION_TYPE_RSS) {
+		nix_mtr_rq_update(eth_dev, id, rss2_conf->queue_num,
+				  rss2_conf->queue);
+	}
+
+	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
+		if (yellow_act->type == RTE_FLOW_ACTION_TYPE_RSS &&
+		    actions[i].type == RTE_FLOW_ACTION_TYPE_RSS) {
+			rss1_conf =
+				(const struct rte_flow_action_rss *)actions[i]
+					.conf;
+
+			if (memcmp(rss1_conf, rss2_conf,
+				   sizeof(struct rte_flow_action_rss)))
+				return -EINVAL;
+		}
+	}
+
+	*next_id = 0xffff;
+	if (yellow_act->type == RTE_FLOW_ACTION_TYPE_METER) {
+		mtr_conf =
+			(const struct rte_flow_action_meter *)yellow_act->conf;
+		*next_id = mtr_conf->mtr_id;
+		nix_mtr_level_update(eth_dev, id, 0);
+		nix_mtr_chain_update(eth_dev, id, *prev_id, *next_id);
+		*prev_id = id;
+		return true;
+	}
+
+	return 0;
+}
+
+static int
+npc_mtr_red_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
+				  const struct rte_flow_action actions[],
+				  struct rte_flow_action *red_act,
+				  uint32_t *prev_id, uint32_t *next_id)
+{
+	const struct rte_flow_action_queue *q2_conf =
+		(const struct rte_flow_action_queue *)red_act->conf;
+	const struct rte_flow_action_rss *rss2_conf =
+		(const struct rte_flow_action_rss *)red_act->conf;
+	const struct rte_flow_action_meter *mtr_conf;
+	const struct rte_flow_action_rss *rss1_conf;
+	const struct rte_flow_action_queue *q1_conf;
+	int i;
+
+	if (red_act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+		nix_mtr_rq_update(eth_dev, id, 1, &q2_conf->index);
+
+	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
+		if (red_act->type == RTE_FLOW_ACTION_TYPE_QUEUE &&
+		    actions[i].type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+			q1_conf = (const struct rte_flow_action_queue *)
+					  red_act->conf;
+
+			if (q1_conf->index != q2_conf->index)
+				return -EINVAL;
+		}
+	}
+
+	if (red_act->type == RTE_FLOW_ACTION_TYPE_RSS) {
+		nix_mtr_rq_update(eth_dev, id, rss2_conf->queue_num,
+				  rss2_conf->queue);
+	}
+
+	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
+		if (red_act->type == RTE_FLOW_ACTION_TYPE_RSS &&
+		    actions[i].type == RTE_FLOW_ACTION_TYPE_RSS) {
+			rss1_conf =
+				(const struct rte_flow_action_rss *)actions[i]
+					.conf;
+
+			if (memcmp(rss1_conf, rss2_conf,
+				   sizeof(struct rte_flow_action_rss)))
+				return -EINVAL;
+		}
+	}
+
+	*next_id = 0xffff;
+	if (red_act->type == RTE_FLOW_ACTION_TYPE_METER) {
+		mtr_conf = (const struct rte_flow_action_meter *)red_act->conf;
+		*next_id = mtr_conf->mtr_id;
+		nix_mtr_level_update(eth_dev, id, 0);
+		nix_mtr_chain_update(eth_dev, id, *prev_id, *next_id);
+		*prev_id = id;
+		return true;
+	}
+
+	return 0;
+}
+
+static int
+npc_mtr_configure(struct rte_eth_dev *eth_dev,
+		  const struct rte_flow_action actions[], int *is_mtr)
+{
+	uint32_t mtr_id, prev_mtr_id = 0xffff, next_mtr_id = 0xffff;
+	const struct rte_flow_action_meter *mtr_conf;
+	struct rte_flow_action mtr_acts[RTE_COLORS];
+	bool is_mtr_act = false;
+	int rc = -EINVAL, i;
+
+	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
+		if (actions[i].type == RTE_FLOW_ACTION_TYPE_METER) {
+			mtr_conf = (const struct rte_flow_action_meter
+					    *)(actions->conf);
+			mtr_id = mtr_conf->mtr_id;
+			is_mtr_act = true;
+			*is_mtr = 1;
+			break;
+		}
+	}
+
+	if (!is_mtr_act)
+		return rc;
+
+	prev_mtr_id = mtr_id;
+	next_mtr_id = mtr_id;
+	while (next_mtr_id != 0xffff) {
+		rc = nix_mtr_validate(eth_dev, next_mtr_id);
+		if (rc)
+			return rc;
+
+		rc = nix_mtr_policy_act_get(eth_dev, next_mtr_id, mtr_acts);
+		if (rc)
+			return rc;
+
+		rc = npc_mtr_green_color_action_validate(eth_dev, mtr_id,
+			actions, &mtr_acts[RTE_COLOR_GREEN], &prev_mtr_id,
+			&next_mtr_id);
+		if (rc)
+			return rc;
+
+		rc = npc_mtr_yellow_color_action_validate(eth_dev, mtr_id,
+			actions, &mtr_acts[RTE_COLOR_YELLOW], &prev_mtr_id,
+			&next_mtr_id);
+		if (rc)
+			return rc;
+
+		rc = npc_mtr_red_color_action_validate(eth_dev, mtr_id, actions,
+			&mtr_acts[RTE_COLOR_RED], &prev_mtr_id, &next_mtr_id);
+		if (rc)
+			return rc;
+	}
+
+	return nix_mtr_configure(eth_dev, mtr_id);
+}
+
 static void
 npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev,
 		    const struct roc_npc_action *rss_action,
@@ -195,6 +432,10 @@  cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 				ROC_NPC_ACTION_TYPE_VLAN_PCP_INSERT;
 			in_actions[i].conf = actions->conf;
 			break;
+		case RTE_FLOW_ACTION_TYPE_METER:
+			in_actions[i].type = ROC_NPC_ACTION_TYPE_METER;
+			in_actions[i].conf = actions->conf;
+			break;
 		default:
 			plt_npc_dbg("Action is not supported = %d",
 				    actions->type);
@@ -281,6 +522,7 @@  cnxk_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 	struct roc_npc_attr in_attr;
 	struct roc_npc_flow *flow;
 	int errcode = 0;
+	int is_mtr = 0;
 	int rc;
 
 	rc = cnxk_map_flow_data(eth_dev, attr, pattern, actions, &in_attr,
@@ -292,6 +534,15 @@  cnxk_flow_create(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr,
 		return NULL;
 	}
 
+	if (roc_model_is_cn10k()) {
+		rc = npc_mtr_configure(eth_dev, actions, &is_mtr);
+		if (rc && is_mtr) {
+			rte_flow_error_set(error, 0, RTE_FLOW_ERROR_TYPE_ACTION,
+					   NULL, "Failed to configure mtr ");
+			return NULL;
+		}
+	}
+
 	flow = roc_npc_flow_create(npc, &in_attr, in_pattern, in_actions,
 				   &errcode);
 	if (errcode != 0) {