[v6] sched: enable CMAN at runtime

Message ID 20220621081607.993440-1-marcinx.danilewicz@intel.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series [v6] sched: enable CMAN at runtime |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/iol-aarch64-compile-testing success Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-aarch64-unit-testing success Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-x86_64-unit-testing success Testing PASS
ci/iol-x86_64-compile-testing success Testing PASS
ci/github-robot: build success github build: passed
ci/Intel-compilation success Compilation OK
ci/intel-Testing success Testing PASS
ci/iol-abi-testing warning Testing issues

Commit Message

Danilewicz, MarcinX June 21, 2022, 8:16 a.m. UTC
  Added changes to enable CMAN (RED or PIE) at init
from profile configuration file.

By default CMAN code is enable but not in use, when
there is no RED or PIE profile configured.

Signed-off-by: Marcin Danilewicz <marcinx.danilewicz@intel.com>
---
Log: v2 change in rte_sched.h to avoid ABI breakage.
     v3 changes from comments
     v4 rebase to 22.07-rc1
     v5 rebase to main latest
     v6 commit message fixed
---
 config/rte_config.h                      |   3 -
 drivers/net/softnic/rte_eth_softnic_tm.c |  12 --
 examples/ip_pipeline/tmgr.c              |   4 -
 examples/qos_sched/cfg_file.c            |  11 +-
 examples/qos_sched/cfg_file.h            |   2 -
 examples/qos_sched/init.c                |   4 -
 examples/qos_sched/main.h                |   2 -
 examples/qos_sched/profile.cfg           | 130 ++++++++++-----------
 examples/qos_sched/profile_pie.cfg       | 142 ++++++++++++++++++++++
 examples/qos_sched/profile_red.cfg       | 143 +++++++++++++++++++++++
 lib/sched/rte_sched.c                    |  47 +-------
 11 files changed, 359 insertions(+), 141 deletions(-)
 create mode 100644 examples/qos_sched/profile_pie.cfg
 create mode 100644 examples/qos_sched/profile_red.cfg
  

Comments

Cristian Dumitrescu June 21, 2022, 1:27 p.m. UTC | #1
Hi Marcin,

The code changes in the library (rte-sched.c) look good to me, but I do have  some questions for the qos_sched app and the Soft NIC driver on how CMAN gets enabled/disabled.

How do we control whether congestion management (CMAN) should be disabled on enabled (WRED/PIE)? It looks to me like you are always enabling WRED by default, which is incorrect: we should have CMAN disabled by default (in init.c).

My preferred way to configure CMAN is:
a) CMAN should be disabled by default
b) If the CMAN parameters get configured in the profile.cfg, then and only then CMAN should be enabled according to the configuration.

In init.c, we can safely set the subport CMAN parameters to NULL as the built-in default when the profile.cfg is not provided, so we can safely remove the WRED params from there.

In cfg_file.c, we need to have a good way to disable/enable CMAN.

In profile.cfg, please remove the WRED lines as opposed to commenting them out.

Is this OK with you?

Also, have you tested the qos_sched app and the Soft NIC driver with all 3 configuration files?

Thanks,
Cristian
  
Danilewicz, MarcinX June 22, 2022, 3:12 p.m. UTC | #2
Hi Cristian,

<SNIP>
> The code changes in the library (rte-sched.c) look good to me, but I do have
> some questions for the qos_sched app and the Soft NIC driver on how CMAN
> gets enabled/disabled.
That was open q from start. We finally found a time to discuss this.
 
> How do we control whether congestion management (CMAN) should be
> disabled on enabled (WRED/PIE)? It looks to me like you are always enabling
> WRED by default, which is incorrect: we should have CMAN disabled by
> default (in init.c).
Cannot disagree.

> My preferred way to configure CMAN is:
> a) CMAN should be disabled by default
> b) If the CMAN parameters get configured in the profile.cfg, then and only
> then CMAN should be enabled according to the configuration.
OK

> In init.c, we can safely set the subport CMAN parameters to NULL as the
> built-in default when the profile.cfg is not provided, so we can safely remove
> the WRED params from there.
Agree

> In cfg_file.c, we need to have a good way to disable/enable CMAN.
Let me provide something and then you will have a look at it.

> In profile.cfg, please remove the WRED lines as opposed to commenting
> them out.
OK

> Is this OK with you?
That's perfectly fine and looks good.
 
> Also, have you tested the qos_sched app and the Soft NIC driver with all 3
> configuration files?
Obviously yes, but not the last time. And I will repeat such testing as the last thing.

Kind Regards,
/Marcin
  

Patch

diff --git a/config/rte_config.h b/config/rte_config.h
index 46549cb062..ae56a86394 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -88,9 +88,6 @@ 
 /* rte_power defines */
 #define RTE_MAX_LCORE_FREQS 64
 
-/* rte_sched defines */
-// RTE_SCHED_CMAN is not set
-
 /* rte_graph defines */
 #define RTE_GRAPH_BURST_SIZE 256
 #define RTE_LIBRTE_GRAPH_STATS 1
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
index 6a7766ba1c..3a5fd676e9 100644
--- a/drivers/net/softnic/rte_eth_softnic_tm.c
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -420,11 +420,7 @@  pmd_tm_node_type_get(struct rte_eth_dev *dev,
 	return 0;
 }
 
-#ifdef RTE_SCHED_CMAN
 #define WRED_SUPPORTED						1
-#else
-#define WRED_SUPPORTED						0
-#endif
 
 #define STATS_MASK_DEFAULT					\
 	(RTE_TM_STATS_N_PKTS |					\
@@ -2300,8 +2296,6 @@  tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
 	return NULL;
 }
 
-#ifdef RTE_SCHED_CMAN
-
 static void
 wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)
 {
@@ -2325,12 +2319,6 @@  wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id)
 		}
 }
 
-#else
-
-#define wred_profiles_set(dev, subport_id)
-
-#endif
-
 static struct tm_shared_shaper *
 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
 {
diff --git a/examples/ip_pipeline/tmgr.c b/examples/ip_pipeline/tmgr.c
index b138e885cf..e68e9961be 100644
--- a/examples/ip_pipeline/tmgr.c
+++ b/examples/ip_pipeline/tmgr.c
@@ -17,7 +17,6 @@  static uint32_t n_subport_profiles;
 static struct rte_sched_pipe_params
 	pipe_profile[TMGR_PIPE_PROFILE_MAX];
 
-#ifdef RTE_SCHED_CMAN
 static struct rte_sched_cman_params cman_params = {
 	.red_params = {
 		/* Traffic Class 0 Colors Green / Yellow / Red */
@@ -86,7 +85,6 @@  static struct rte_sched_cman_params cman_params = {
 		[12][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
 		},
 };
-#endif /* RTE_SCHED_CMAN */
 
 static uint32_t n_pipe_profiles;
 
@@ -96,9 +94,7 @@  static const struct rte_sched_subport_params subport_params_default = {
 	.pipe_profiles = pipe_profile,
 	.n_pipe_profiles = 0, /* filled at run time */
 	.n_max_pipe_profiles = RTE_DIM(pipe_profile),
-#ifdef RTE_SCHED_CMAN
 	.cman_params = &cman_params,
-#endif /* RTE_SCHED_CMAN */
 };
 
 static struct tmgr_port_list tmgr_port_list;
diff --git a/examples/qos_sched/cfg_file.c b/examples/qos_sched/cfg_file.c
index 450482f07d..3284b4d252 100644
--- a/examples/qos_sched/cfg_file.c
+++ b/examples/qos_sched/cfg_file.c
@@ -229,11 +229,14 @@  cfg_load_subport_profile(struct rte_cfgfile *cfg,
 	return 0;
 }
 
-#ifdef RTE_SCHED_CMAN
 void set_subport_cman_params(struct rte_sched_subport_params *subport_p,
 					struct rte_sched_cman_params cman_p)
 {
 	int j, k;
+
+	if (subport_p->cman_params != NULL)
+		return;
+
 	subport_p->cman_params->cman_mode = cman_p.cman_mode;
 
 	for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
@@ -261,7 +264,6 @@  void set_subport_cman_params(struct rte_sched_subport_params *subport_p,
 		}
 	}
 }
-#endif
 
 int
 cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subport_params)
@@ -276,9 +278,7 @@  cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subpo
 	memset(active_queues, 0, sizeof(active_queues));
 	n_active_queues = 0;
 
-#ifdef RTE_SCHED_CMAN
 	struct rte_sched_cman_params cman_params = {
-		.cman_mode = RTE_SCHED_CMAN_RED,
 		.red_params = { },
 	};
 
@@ -387,7 +387,6 @@  cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subpo
 
 		}
 	}
-#endif /* RTE_SCHED_CMAN */
 
 	for (i = 0; i < MAX_SCHED_SUBPORTS; i++) {
 		char sec_name[CFG_NAME_LEN];
@@ -465,9 +464,7 @@  cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subpo
 					}
 				}
 			}
-#ifdef RTE_SCHED_CMAN
 			set_subport_cman_params(subport_params+i, cman_params);
-#endif
 		}
 	}
 
diff --git a/examples/qos_sched/cfg_file.h b/examples/qos_sched/cfg_file.h
index 1a9dce9db5..19df91e7ba 100644
--- a/examples/qos_sched/cfg_file.h
+++ b/examples/qos_sched/cfg_file.h
@@ -12,10 +12,8 @@  int cfg_load_port(struct rte_cfgfile *cfg, struct rte_sched_port_params *port);
 
 int cfg_load_pipe(struct rte_cfgfile *cfg, struct rte_sched_pipe_params *pipe);
 
-#ifdef RTE_SCHED_CMAN
 void set_subport_cman_params(struct rte_sched_subport_params *subport_p,
 					struct rte_sched_cman_params cman_p);
-#endif
 
 int cfg_load_subport(struct rte_cfgfile *cfg, struct rte_sched_subport_params *subport);
 
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 8a0fb8a374..0afd553283 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -201,7 +201,6 @@  static struct rte_sched_subport_profile_params
 	},
 };
 
-#ifdef RTE_SCHED_CMAN
 struct rte_sched_cman_params cman_params = {
 	.cman_mode = RTE_SCHED_CMAN_RED,
 	.red_params = {
@@ -271,7 +270,6 @@  struct rte_sched_cman_params cman_params = {
 		[12][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
 	},
 };
-#endif /* RTE_SCHED_CMAN */
 
 struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
 	{
@@ -281,9 +279,7 @@  struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
 		.n_pipe_profiles = sizeof(pipe_profiles) /
 			sizeof(struct rte_sched_pipe_params),
 		.n_max_pipe_profiles = MAX_SCHED_PIPE_PROFILES,
-#ifdef RTE_SCHED_CMAN
 		.cman_params = &cman_params,
-#endif /* RTE_SCHED_CMAN */
 	},
 };
 
diff --git a/examples/qos_sched/main.h b/examples/qos_sched/main.h
index 915311bac8..76a68f585f 100644
--- a/examples/qos_sched/main.h
+++ b/examples/qos_sched/main.h
@@ -153,9 +153,7 @@  extern uint32_t active_queues[RTE_SCHED_QUEUES_PER_PIPE];
 extern uint32_t n_active_queues;
 
 extern struct rte_sched_port_params port_params;
-#ifdef RTE_SCHED_CMAN
 extern struct rte_sched_cman_params cman_params;
-#endif
 extern struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS];
 
 int app_parse_args(int argc, char **argv);
diff --git a/examples/qos_sched/profile.cfg b/examples/qos_sched/profile.cfg
index d4b21c0170..db65b0ed01 100644
--- a/examples/qos_sched/profile.cfg
+++ b/examples/qos_sched/profile.cfg
@@ -142,68 +142,68 @@  tc 12 wrr weights = 1 1 1 1
 ;tc 12 wred inv prob = 10 10 10
 ;tc 12 wred weight = 9 9 9
 
-[pie]
-tc 0 qdelay ref = 15
-tc 0 max burst = 150
-tc 0 update interval = 15
-tc 0 tailq th = 64
-
-tc 1 qdelay ref = 15
-tc 1 max burst = 150
-tc 1 update interval = 15
-tc 1 tailq th = 64
-
-tc 2 qdelay ref = 15
-tc 2 max burst = 150
-tc 2 update interval = 15
-tc 2 tailq th = 64
-
-tc 3 qdelay ref = 15
-tc 3 max burst = 150
-tc 3 update interval = 15
-tc 3 tailq th = 64
-
-tc 4 qdelay ref = 15
-tc 4 max burst = 150
-tc 4 update interval = 15
-tc 4 tailq th = 64
-
-tc 5 qdelay ref = 15
-tc 5 max burst = 150
-tc 5 update interval = 15
-tc 5 tailq th = 64
-
-tc 6 qdelay ref = 15
-tc 6 max burst = 150
-tc 6 update interval = 15
-tc 6 tailq th = 64
-
-tc 7 qdelay ref = 15
-tc 7 max burst = 150
-tc 7 update interval = 15
-tc 7 tailq th = 64
-
-tc 8 qdelay ref = 15
-tc 8 max burst = 150
-tc 8 update interval = 15
-tc 8 tailq th = 64
-
-tc 9 qdelay ref = 15
-tc 9 max burst = 150
-tc 9 update interval = 15
-tc 9 tailq th = 64
-
-tc 10 qdelay ref = 15
-tc 10 max burst = 150
-tc 10 update interval = 15
-tc 10 tailq th = 64
-
-tc 11 qdelay ref = 15
-tc 11 max burst = 150
-tc 11 update interval = 15
-tc 11 tailq th = 64
-
-tc 12 qdelay ref = 15
-tc 12 max burst = 150
-tc 12 update interval = 15
-tc 12 tailq th = 64
+;[pie]
+;tc 0 qdelay ref = 15
+;tc 0 max burst = 150
+;tc 0 update interval = 15
+;tc 0 tailq th = 64
+
+;tc 1 qdelay ref = 15
+;tc 1 max burst = 150
+;tc 1 update interval = 15
+;tc 1 tailq th = 64
+
+;tc 2 qdelay ref = 15
+;tc 2 max burst = 150
+;tc 2 update interval = 15
+;tc 2 tailq th = 64
+
+;tc 3 qdelay ref = 15
+;tc 3 max burst = 150
+;tc 3 update interval = 15
+;tc 3 tailq th = 64
+
+;tc 4 qdelay ref = 15
+;tc 4 max burst = 150
+;tc 4 update interval = 15
+;tc 4 tailq th = 64
+
+;tc 5 qdelay ref = 15
+;tc 5 max burst = 150
+;tc 5 update interval = 15
+;tc 5 tailq th = 64
+
+;tc 6 qdelay ref = 15
+;tc 6 max burst = 150
+;tc 6 update interval = 15
+;tc 6 tailq th = 64
+
+;tc 7 qdelay ref = 15
+;tc 7 max burst = 150
+;tc 7 update interval = 15
+;tc 7 tailq th = 64
+
+;tc 8 qdelay ref = 15
+;tc 8 max burst = 150
+;tc 8 update interval = 15
+;tc 8 tailq th = 64
+
+;tc 9 qdelay ref = 15
+;tc 9 max burst = 150
+;tc 9 update interval = 15
+;tc 9 tailq th = 64
+
+;tc 10 qdelay ref = 15
+;tc 10 max burst = 150
+;tc 10 update interval = 15
+;tc 10 tailq th = 64
+
+;tc 11 qdelay ref = 15
+;tc 11 max burst = 150
+;tc 11 update interval = 15
+;tc 11 tailq th = 64
+
+;tc 12 qdelay ref = 15
+;tc 12 max burst = 150
+;tc 12 update interval = 15
+;tc 12 tailq th = 64
diff --git a/examples/qos_sched/profile_pie.cfg b/examples/qos_sched/profile_pie.cfg
new file mode 100644
index 0000000000..241f748b33
--- /dev/null
+++ b/examples/qos_sched/profile_pie.cfg
@@ -0,0 +1,142 @@ 
+;   SPDX-License-Identifier: BSD-3-Clause
+;   Copyright(c) 2010-2019 Intel Corporation.
+
+; This file enables the following hierarchical scheduler configuration for each
+; 10GbE output port:
+;	* Single subport (subport 0):
+;		- Subport rate set to 100% of port rate
+;		- Each of the 13 traffic classes has rate set to 100% of port rate
+;	* 4K pipes per subport 0 (pipes 0 .. 4095) with identical configuration:
+;		- Pipe rate set to 1/4K of port rate
+;		- Each of the 13 traffic classes has rate set to 100% of pipe rate
+;		- Within lowest priority traffic class (best-effort), the byte-level
+;		  WRR weights for the 4 queues of best effort traffic class are set
+;		  to 1:1:1:1
+;
+; For more details, please refer to chapter "Quality of Service (QoS) Framework"
+; of Data Plane Development Kit (DPDK) Programmer's Guide.
+
+; Port configuration
+[port]
+frame overhead = 24
+number of subports per port = 1
+
+; Subport configuration
+[subport 0]
+number of pipes per subport = 4096
+queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64
+
+subport 0-8 = 0                ; These subports are configured with subport profile 0
+
+[subport profile 0]
+tb rate = 1250000000           ; Bytes per second
+tb size = 1000000              ; Bytes
+
+tc 0 rate = 1250000000         ; Bytes per second
+tc 1 rate = 1250000000         ; Bytes per second
+tc 2 rate = 1250000000         ; Bytes per second
+tc 3 rate = 1250000000         ; Bytes per second
+tc 4 rate = 1250000000         ; Bytes per second
+tc 5 rate = 1250000000         ; Bytes per second
+tc 6 rate = 1250000000         ; Bytes per second
+tc 7 rate = 1250000000         ; Bytes per second
+tc 8 rate = 1250000000         ; Bytes per second
+tc 9 rate = 1250000000         ; Bytes per second
+tc 10 rate = 1250000000        ; Bytes per second
+tc 11 rate = 1250000000        ; Bytes per second
+tc 12 rate = 1250000000        ; Bytes per second
+
+tc period = 10                 ; Milliseconds
+
+pipe 0-4095 = 0                ; These pipes are configured with pipe profile 0
+
+; Pipe configuration
+[pipe profile 0]
+tb rate = 305175               ; Bytes per second
+tb size = 1000000              ; Bytes
+
+tc 0 rate = 305175             ; Bytes per second
+tc 1 rate = 305175             ; Bytes per second
+tc 2 rate = 305175             ; Bytes per second
+tc 3 rate = 305175             ; Bytes per second
+tc 4 rate = 305175             ; Bytes per second
+tc 5 rate = 305175             ; Bytes per second
+tc 6 rate = 305175             ; Bytes per second
+tc 7 rate = 305175             ; Bytes per second
+tc 8 rate = 305175             ; Bytes per second
+tc 9 rate = 305175             ; Bytes per second
+tc 10 rate = 305175            ; Bytes per second
+tc 11 rate = 305175            ; Bytes per second
+tc 12 rate = 305175            ; Bytes per second
+
+tc period = 40                ; Milliseconds
+
+tc 12 oversubscription weight = 1
+
+tc 12 wrr weights = 1 1 1 1
+
+[pie]
+tc 0 qdelay ref = 15
+tc 0 max burst = 150
+tc 0 update interval = 15
+tc 0 tailq th = 64
+
+tc 1 qdelay ref = 15
+tc 1 max burst = 150
+tc 1 update interval = 15
+tc 1 tailq th = 64
+
+tc 2 qdelay ref = 15
+tc 2 max burst = 150
+tc 2 update interval = 15
+tc 2 tailq th = 64
+
+tc 3 qdelay ref = 15
+tc 3 max burst = 150
+tc 3 update interval = 15
+tc 3 tailq th = 64
+
+tc 4 qdelay ref = 15
+tc 4 max burst = 150
+tc 4 update interval = 15
+tc 4 tailq th = 64
+
+tc 5 qdelay ref = 15
+tc 5 max burst = 150
+tc 5 update interval = 15
+tc 5 tailq th = 64
+
+tc 6 qdelay ref = 15
+tc 6 max burst = 150
+tc 6 update interval = 15
+tc 6 tailq th = 64
+
+tc 7 qdelay ref = 15
+tc 7 max burst = 150
+tc 7 update interval = 15
+tc 7 tailq th = 64
+
+tc 8 qdelay ref = 15
+tc 8 max burst = 150
+tc 8 update interval = 15
+tc 8 tailq th = 64
+
+tc 9 qdelay ref = 15
+tc 9 max burst = 150
+tc 9 update interval = 15
+tc 9 tailq th = 64
+
+tc 10 qdelay ref = 15
+tc 10 max burst = 150
+tc 10 update interval = 15
+tc 10 tailq th = 64
+
+tc 11 qdelay ref = 15
+tc 11 max burst = 150
+tc 11 update interval = 15
+tc 11 tailq th = 64
+
+tc 12 qdelay ref = 15
+tc 12 max burst = 150
+tc 12 update interval = 15
+tc 12 tailq th = 64
diff --git a/examples/qos_sched/profile_red.cfg b/examples/qos_sched/profile_red.cfg
new file mode 100644
index 0000000000..4486d2799e
--- /dev/null
+++ b/examples/qos_sched/profile_red.cfg
@@ -0,0 +1,143 @@ 
+;   SPDX-License-Identifier: BSD-3-Clause
+;   Copyright(c) 2010-2019 Intel Corporation.
+
+; This file enables the following hierarchical scheduler configuration for each
+; 10GbE output port:
+;	* Single subport (subport 0):
+;		- Subport rate set to 100% of port rate
+;		- Each of the 13 traffic classes has rate set to 100% of port rate
+;	* 4K pipes per subport 0 (pipes 0 .. 4095) with identical configuration:
+;		- Pipe rate set to 1/4K of port rate
+;		- Each of the 13 traffic classes has rate set to 100% of pipe rate
+;		- Within lowest priority traffic class (best-effort), the byte-level
+;		  WRR weights for the 4 queues of best effort traffic class are set
+;		  to 1:1:1:1
+;
+; For more details, please refer to chapter "Quality of Service (QoS) Framework"
+; of Data Plane Development Kit (DPDK) Programmer's Guide.
+
+; Port configuration
+[port]
+frame overhead = 24
+number of subports per port = 1
+
+; Subport configuration
+[subport 0]
+number of pipes per subport = 4096
+queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64
+
+subport 0-8 = 0                ; These subports are configured with subport profile 0
+
+[subport profile 0]
+tb rate = 1250000000           ; Bytes per second
+tb size = 1000000              ; Bytes
+
+tc 0 rate = 1250000000         ; Bytes per second
+tc 1 rate = 1250000000         ; Bytes per second
+tc 2 rate = 1250000000         ; Bytes per second
+tc 3 rate = 1250000000         ; Bytes per second
+tc 4 rate = 1250000000         ; Bytes per second
+tc 5 rate = 1250000000         ; Bytes per second
+tc 6 rate = 1250000000         ; Bytes per second
+tc 7 rate = 1250000000         ; Bytes per second
+tc 8 rate = 1250000000         ; Bytes per second
+tc 9 rate = 1250000000         ; Bytes per second
+tc 10 rate = 1250000000        ; Bytes per second
+tc 11 rate = 1250000000        ; Bytes per second
+tc 12 rate = 1250000000        ; Bytes per second
+
+tc period = 10                 ; Milliseconds
+
+pipe 0-4095 = 0                ; These pipes are configured with pipe profile 0
+
+; Pipe configuration
+[pipe profile 0]
+tb rate = 305175               ; Bytes per second
+tb size = 1000000              ; Bytes
+
+tc 0 rate = 305175             ; Bytes per second
+tc 1 rate = 305175             ; Bytes per second
+tc 2 rate = 305175             ; Bytes per second
+tc 3 rate = 305175             ; Bytes per second
+tc 4 rate = 305175             ; Bytes per second
+tc 5 rate = 305175             ; Bytes per second
+tc 6 rate = 305175             ; Bytes per second
+tc 7 rate = 305175             ; Bytes per second
+tc 8 rate = 305175             ; Bytes per second
+tc 9 rate = 305175             ; Bytes per second
+tc 10 rate = 305175            ; Bytes per second
+tc 11 rate = 305175            ; Bytes per second
+tc 12 rate = 305175            ; Bytes per second
+
+tc period = 40                ; Milliseconds
+
+tc 12 oversubscription weight = 1
+
+tc 12 wrr weights = 1 1 1 1
+
+; RED params per traffic class and color (Green / Yellow / Red)
+[red]
+tc 0 wred min = 48 40 32
+tc 0 wred max = 64 64 64
+tc 0 wred inv prob = 10 10 10
+tc 0 wred weight = 9 9 9
+
+tc 1 wred min = 48 40 32
+tc 1 wred max = 64 64 64
+tc 1 wred inv prob = 10 10 10
+tc 1 wred weight = 9 9 9
+
+tc 2 wred min = 48 40 32
+tc 2 wred max = 64 64 64
+tc 2 wred inv prob = 10 10 10
+tc 2 wred weight = 9 9 9
+
+tc 3 wred min = 48 40 32
+tc 3 wred max = 64 64 64
+tc 3 wred inv prob = 10 10 10
+tc 3 wred weight = 9 9 9
+
+tc 4 wred min = 48 40 32
+tc 4 wred max = 64 64 64
+tc 4 wred inv prob = 10 10 10
+tc 4 wred weight = 9 9 9
+
+tc 5 wred min = 48 40 32
+tc 5 wred max = 64 64 64
+tc 5 wred inv prob = 10 10 10
+tc 5 wred weight = 9 9 9
+
+tc 6 wred min = 48 40 32
+tc 6 wred max = 64 64 64
+tc 6 wred inv prob = 10 10 10
+tc 6 wred weight = 9 9 9
+
+tc 7 wred min = 48 40 32
+tc 7 wred max = 64 64 64
+tc 7 wred inv prob = 10 10 10
+tc 7 wred weight = 9 9 9
+
+tc 8 wred min = 48 40 32
+tc 8 wred max = 64 64 64
+tc 8 wred inv prob = 10 10 10
+tc 8 wred weight = 9 9 9
+
+tc 9 wred min = 48 40 32
+tc 9 wred max = 64 64 64
+tc 9 wred inv prob = 10 10 10
+tc 9 wred weight = 9 9 9
+
+tc 10 wred min = 48 40 32
+tc 10 wred max = 64 64 64
+tc 10 wred inv prob = 10 10 10
+tc 10 wred weight = 9 9 9
+
+tc 11 wred min = 48 40 32
+tc 11 wred max = 64 64 64
+tc 11 wred inv prob = 10 10 10
+tc 11 wred weight = 9 9 9
+
+tc 12 wred min = 48 40 32
+tc 12 wred max = 64 64 64
+tc 12 wred inv prob = 10 10 10
+tc 12 wred weight = 9 9 9
diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c
index 599c7e9536..c5fa9e4582 100644
--- a/lib/sched/rte_sched.c
+++ b/lib/sched/rte_sched.c
@@ -81,13 +81,11 @@  struct rte_sched_queue {
 
 struct rte_sched_queue_extra {
 	struct rte_sched_queue_stats stats;
-#ifdef RTE_SCHED_CMAN
 	RTE_STD_C11
 	union {
 		struct rte_red red;
 		struct rte_pie pie;
 	};
-#endif
 };
 
 enum grinder_state {
@@ -179,7 +177,6 @@  struct rte_sched_subport {
 	/* Pipe queues size */
 	uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
 
-#ifdef RTE_SCHED_CMAN
 	bool cman_enabled;
 	enum rte_sched_cman_mode cman;
 
@@ -188,7 +185,6 @@  struct rte_sched_subport {
 		struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE][RTE_COLORS];
 		struct rte_pie_config pie_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
 	};
-#endif
 
 	/* Scheduling loop detection */
 	uint32_t pipe_loop;
@@ -1084,7 +1080,6 @@  rte_sched_free_memory(struct rte_sched_port *port, uint32_t n_subports)
 	rte_free(port);
 }
 
-#ifdef RTE_SCHED_CMAN
 static int
 rte_sched_red_config(struct rte_sched_port *port,
 	struct rte_sched_subport *s,
@@ -1166,7 +1161,6 @@  rte_sched_cman_config(struct rte_sched_port *port,
 
 	return -EINVAL;
 }
-#endif
 
 int
 rte_sched_subport_tc_ov_config(struct rte_sched_port *port,
@@ -1285,7 +1279,6 @@  rte_sched_subport_config(struct rte_sched_port *port,
 		/* TC oversubscription is enabled by default */
 		s->tc_ov_enabled = 1;
 
-#ifdef RTE_SCHED_CMAN
 		if (params->cman_params != NULL) {
 			s->cman_enabled = true;
 			status = rte_sched_cman_config(port, s, params, n_subports);
@@ -1297,7 +1290,6 @@  rte_sched_subport_config(struct rte_sched_port *port,
 		} else {
 			s->cman_enabled = false;
 		}
-#endif
 
 		/* Scheduling loop detection */
 		s->pipe_loop = RTE_SCHED_PIPE_INVALID;
@@ -1823,7 +1815,7 @@  rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port,
 	struct rte_sched_subport *subport,
 	uint32_t qindex,
 	struct rte_mbuf *pkt,
-	__rte_unused uint32_t n_pkts_cman_dropped)
+	uint32_t n_pkts_cman_dropped)
 {
 	uint32_t tc_index = rte_sched_port_pipe_tc(port, qindex);
 	uint32_t pkt_len = pkt->pkt_len;
@@ -1849,21 +1841,17 @@  static inline void
 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_subport *subport,
 	uint32_t qindex,
 	struct rte_mbuf *pkt,
-	__rte_unused uint32_t n_pkts_cman_dropped)
+	uint32_t n_pkts_cman_dropped)
 {
 	struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
 	uint32_t pkt_len = pkt->pkt_len;
 
 	qe->stats.n_pkts_dropped += 1;
 	qe->stats.n_bytes_dropped += pkt_len;
-#ifdef RTE_SCHED_CMAN
 	if (subport->cman_enabled)
 		qe->stats.n_pkts_cman_dropped += n_pkts_cman_dropped;
-#endif
 }
 
-#ifdef RTE_SCHED_CMAN
-
 static inline int
 rte_sched_port_cman_drop(struct rte_sched_port *port,
 	struct rte_sched_subport *subport,
@@ -1908,13 +1896,11 @@  static inline void
 rte_sched_port_red_set_queue_empty_timestamp(struct rte_sched_port *port,
 	struct rte_sched_subport *subport, uint32_t qindex)
 {
-	if (subport->cman_enabled) {
+	if (subport->cman_enabled && subport->cman == RTE_SCHED_CMAN_RED) {
 		struct rte_sched_queue_extra *qe = subport->queue_extra + qindex;
-		if (subport->cman == RTE_SCHED_CMAN_RED) {
-			struct rte_red *red = &qe->red;
+		struct rte_red *red = &qe->red;
 
-			rte_red_mark_queue_empty(red, port->time);
-		}
+		rte_red_mark_queue_empty(red, port->time);
 	}
 }
 
@@ -1933,29 +1919,6 @@  uint32_t qindex, uint32_t pkt_len, uint64_t time) {
 	}
 }
 
-#else
-
-static inline int rte_sched_port_cman_drop(struct rte_sched_port *port __rte_unused,
-	struct rte_sched_subport *subport __rte_unused,
-	struct rte_mbuf *pkt __rte_unused,
-	uint32_t qindex __rte_unused,
-	uint16_t qlen __rte_unused)
-{
-	return 0;
-}
-
-#define rte_sched_port_red_set_queue_empty_timestamp(port, subport, qindex)
-
-static inline void
-rte_sched_port_pie_dequeue(struct rte_sched_subport *subport __rte_unused,
-	uint32_t qindex __rte_unused,
-	uint32_t pkt_len __rte_unused,
-	uint64_t time __rte_unused) {
-	/* do-nothing when RTE_SCHED_CMAN not defined */
-}
-
-#endif /* RTE_SCHED_CMAN */
-
 #ifdef RTE_SCHED_DEBUG
 
 static inline void