[dpdk-dev,1/2] rte_sched: whitespace cleanup

Message ID 1440780599-14851-2-git-send-email-stephen@networkplumber.org (mailing list archive)
State Changes Requested, archived
Headers

Commit Message

Stephen Hemminger Aug. 28, 2015, 4:49 p.m. UTC
  Break long lines, remove {} on single line conditionals, fix indentation,
and break needlessly long lines.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 lib/librte_sched/rte_sched.c | 363 ++++++++++++++++++++++---------------------
 1 file changed, 182 insertions(+), 181 deletions(-)
  

Comments

Cristian Dumitrescu Sept. 11, 2015, 5:31 p.m. UTC | #1
> -----Original Message-----
> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> Sent: Friday, August 28, 2015 7:50 PM
> To: Dumitrescu, Cristian
> Cc: dev@dpdk.org; Stephen Hemminger
> Subject: [PATCH 1/2] rte_sched: whitespace cleanup
> 
> Break long lines, remove {} on single line conditionals, fix indentation,
> and break needlessly long lines.
> 
> Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
> ---
>  lib/librte_sched/rte_sched.c | 363 ++++++++++++++++++++++----------------
> -----
>  1 file changed, 182 insertions(+), 181 deletions(-)
> 
> diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
> index 9c9419d..924c172 100644
> --- a/lib/librte_sched/rte_sched.c
> +++ b/lib/librte_sched/rte_sched.c
> @@ -317,88 +317,74 @@ rte_sched_port_check_params(struct
> rte_sched_port_params *params)
>  {
>  	uint32_t i, j;
> 
> -	if (params == NULL) {
> +	if (params == NULL)
>  		return -1;
> -	}
> 
>  	/* socket */
> -	if ((params->socket < 0) || (params->socket >=
> RTE_MAX_NUMA_NODES)) {
> +	if ((params->socket < 0) || (params->socket >=
> RTE_MAX_NUMA_NODES))
>  		return -3;
> -	}
> 
>  	/* rate */
> -	if (params->rate == 0) {
> +	if (params->rate == 0)
>  		return -4;
> -	}
> 
>  	/* mtu */
> -	if (params->mtu == 0) {
> +	if (params->mtu == 0)
>  		return -5;
> -	}
> 
>  	/* n_subports_per_port: non-zero, power of 2 */
> -	if ((params->n_subports_per_port == 0) ||
> (!rte_is_power_of_2(params->n_subports_per_port))) {
> +	if ((params->n_subports_per_port == 0) ||
> (!rte_is_power_of_2(params->n_subports_per_port)))
>  		return -6;
> -	}
> 
>  	/* n_pipes_per_subport: non-zero, power of 2 */
> -	if ((params->n_pipes_per_subport == 0) ||
> (!rte_is_power_of_2(params->n_pipes_per_subport))) {
> +	if ((params->n_pipes_per_subport == 0) ||
> (!rte_is_power_of_2(params->n_pipes_per_subport)))
>  		return -7;
> -	}
> 
>  	/* qsize: non-zero, power of 2, no bigger than 32K (due to 16-bit
> read/write pointers) */
> -	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
> +	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
>  		uint16_t qsize = params->qsize[i];
> 
> -		if ((qsize == 0) || (!rte_is_power_of_2(qsize))) {
> +		if ((qsize == 0) || (!rte_is_power_of_2(qsize)))
>  			return -8;
> -		}
>  	}
> 
>  	/* pipe_profiles and n_pipe_profiles */
>  	if ((params->pipe_profiles == NULL) ||
>  	    (params->n_pipe_profiles == 0) ||
> -	    (params->n_pipe_profiles >
> RTE_SCHED_PIPE_PROFILES_PER_PORT)) {
> +	    (params->n_pipe_profiles >
> RTE_SCHED_PIPE_PROFILES_PER_PORT))
>  		return -9;
> -	}
> 
> -	for (i = 0; i < params->n_pipe_profiles; i ++) {
> +	for (i = 0; i < params->n_pipe_profiles; i++) {
>  		struct rte_sched_pipe_params *p = params->pipe_profiles +
> i;
> 
>  		/* TB rate: non-zero, not greater than port rate */
> -		if ((p->tb_rate == 0) || (p->tb_rate > params->rate)) {
> +		if ((p->tb_rate == 0) || (p->tb_rate > params->rate))
>  			return -10;
> -		}
> 
>  		/* TB size: non-zero */
> -		if (p->tb_size == 0) {
> +		if (p->tb_size == 0)
>  			return -11;
> -		}
> 
>  		/* TC rate: non-zero, less than pipe rate */
> -		for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
> -			if ((p->tc_rate[j] == 0) || (p->tc_rate[j] > p-
> >tb_rate)) {
> +		for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
> +			if ((p->tc_rate[j] == 0) || (p->tc_rate[j] > p-
> >tb_rate))
>  				return -12;
> -			}
>  		}
> 
>  		/* TC period: non-zero */
> -		if (p->tc_period == 0) {
> +		if (p->tc_period == 0)
>  			return -13;
> -		}
> 
>  #ifdef RTE_SCHED_SUBPORT_TC_OV
>  		/* TC3 oversubscription weight: non-zero */
> -		if (p->tc_ov_weight == 0) {
> +		if (p->tc_ov_weight == 0)
>  			return -14;
> -		}
>  #endif
> 
>  		/* Queue WRR weights: non-zero */
> -		for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j ++) {
> -			if (p->wrr_weights[j] == 0) {
> +		for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j++) {
> +			if (p->wrr_weights[j] == 0)
>  				return -15;
> -			}
>  		}
>  	}
> 
> @@ -424,34 +410,42 @@ rte_sched_port_get_array_base(struct
> rte_sched_port_params *params, enum rte_sch
>  	uint32_t base, i;
> 
>  	size_per_pipe_queue_array = 0;
> -	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
> -		size_per_pipe_queue_array +=
> RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * params->qsize[i] * sizeof(struct
> rte_mbuf *);
> -	}
> +	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
> +		size_per_pipe_queue_array +=
> RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS
> +			* params->qsize[i] * sizeof(struct rte_mbuf *);
> +
>  	size_queue_array = n_pipes_per_port *
> size_per_pipe_queue_array;
> 
>  	base = 0;
> 
> -	if (array == e_RTE_SCHED_PORT_ARRAY_SUBPORT) return base;
> +	if (array == e_RTE_SCHED_PORT_ARRAY_SUBPORT)
> +		return base;
> +
>  	base += RTE_CACHE_LINE_ROUNDUP(size_subport);
> +	if (array == e_RTE_SCHED_PORT_ARRAY_PIPE)
> +		return base;
> 
> -	if (array == e_RTE_SCHED_PORT_ARRAY_PIPE) return base;
>  	base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
> +	if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE)
> +		return base;
> 
> -	if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE) return base;
>  	base += RTE_CACHE_LINE_ROUNDUP(size_queue);
> +	if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA)
> +		return base;
> 
> -	if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA) return
> base;
>  	base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
> +	if (array == e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES)
> +		return base;
> 
> -	if (array == e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES) return
> base;
>  	base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
> +	if (array == e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY)
> +		return base;
> 
> -	if (array == e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY) return base;
>  	base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
> +	if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY)
> +		return base;
> 
> -	if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY) return
> base;
>  	base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
> -
>  	return base;
>  }
> 
> @@ -543,8 +537,8 @@ static inline uint64_t
>  rte_sched_time_ms_to_bytes(uint32_t time_ms, uint32_t rate)
>  {
>  	uint64_t time = time_ms;
> -	time = (time * rate) / 1000;
> 
> +	time = (time * rate) / 1000;
>  	return time;
>  }
> 
> @@ -553,7 +547,7 @@ rte_sched_port_config_pipe_profile_table(struct
> rte_sched_port *port, struct rte
>  {
>  	uint32_t i, j;
> 
> -	for (i = 0; i < port->n_pipe_profiles; i ++) {
> +	for (i = 0; i < port->n_pipe_profiles; i++) {
>  		struct rte_sched_pipe_params *src = params->pipe_profiles
> + i;
>  		struct rte_sched_pipe_profile *dst = port->pipe_profiles + i;
> 
> @@ -571,15 +565,16 @@ rte_sched_port_config_pipe_profile_table(struct
> rte_sched_port *port, struct rte
> 
>  		/* Traffic Classes */
>  		dst->tc_period = (uint32_t)
> rte_sched_time_ms_to_bytes(src->tc_period, params->rate);
> -		for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
> -			dst->tc_credits_per_period[j] = (uint32_t)
> rte_sched_time_ms_to_bytes(src->tc_period, src->tc_rate[j]);
> -		}
> +		for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++)
> +			dst->tc_credits_per_period[j] =
> +				rte_sched_time_ms_to_bytes(src-
> >tc_period, src->tc_rate[j]);
> +
>  #ifdef RTE_SCHED_SUBPORT_TC_OV
>  		dst->tc_ov_weight = src->tc_ov_weight;
>  #endif
> 
>  		/* WRR */
> -		for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
> +		for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
>  			uint32_t
> wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
>  			uint32_t lcd, lcd1, lcd2;
>  			uint32_t qindex;
> @@ -610,13 +605,12 @@ rte_sched_port_config_pipe_profile_table(struct
> rte_sched_port *port, struct rte
>  	}
> 
>  	port->pipe_tc3_rate_max = 0;
> -	for (i = 0; i < port->n_pipe_profiles; i ++) {
> +	for (i = 0; i < port->n_pipe_profiles; i++) {
>  		struct rte_sched_pipe_params *src = params->pipe_profiles
> + i;
>  		uint32_t pipe_tc3_rate = src->tc_rate[3];
> 
> -		if (port->pipe_tc3_rate_max < pipe_tc3_rate) {
> +		if (port->pipe_tc3_rate_max < pipe_tc3_rate)
>  			port->pipe_tc3_rate_max = pipe_tc3_rate;
> -		}
>  	}
>  }
> 
> @@ -628,15 +622,13 @@ rte_sched_port_config(struct
> rte_sched_port_params *params)
> 
>  	/* Check user parameters. Determine the amount of memory to
> allocate */
>  	mem_size = rte_sched_port_get_memory_footprint(params);
> -	if (mem_size == 0) {
> +	if (mem_size == 0)
>  		return NULL;
> -	}
> 
>  	/* Allocate memory to store the data structures */
>  	port = rte_zmalloc("qos_params", mem_size,
> RTE_CACHE_LINE_SIZE);
> -	if (port == NULL) {
> +	if (port == NULL)
>  		return NULL;
> -	}
> 
>  	/* User parameters */
>  	port->n_subports_per_port = params->n_subports_per_port;
> @@ -662,9 +654,8 @@ rte_sched_port_config(struct
> rte_sched_port_params *params)
>  				params->red_params[i][j].wq_log2,
>  				params->red_params[i][j].min_th,
>  				params->red_params[i][j].max_th,
> -				params->red_params[i][j].maxp_inv) != 0) {
> +				params->red_params[i][j].maxp_inv) != 0)
>  				return NULL;
> -			}
>  		}
>  	}
>  #endif
> @@ -688,13 +679,20 @@ rte_sched_port_config(struct
> rte_sched_port_params *params)
>  	rte_sched_port_config_qsize(port);
> 
>  	/* Large data structures */
> -	port->subport = (struct rte_sched_subport *) (port->memory +
> rte_sched_port_get_array_base(params,
> e_RTE_SCHED_PORT_ARRAY_SUBPORT));
> -	port->pipe = (struct rte_sched_pipe *) (port->memory +
> rte_sched_port_get_array_base(params,
> e_RTE_SCHED_PORT_ARRAY_PIPE));
> -	port->queue = (struct rte_sched_queue *) (port->memory +
> rte_sched_port_get_array_base(params,
> e_RTE_SCHED_PORT_ARRAY_QUEUE));
> -	port->queue_extra = (struct rte_sched_queue_extra *) (port-
> >memory + rte_sched_port_get_array_base(params,
> e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA));
> -	port->pipe_profiles = (struct rte_sched_pipe_profile *) (port-
> >memory + rte_sched_port_get_array_base(params,
> e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES));
> -	port->bmp_array =  port->memory +
> rte_sched_port_get_array_base(params,
> e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY);
> -	port->queue_array = (struct rte_mbuf **) (port->memory +
> rte_sched_port_get_array_base(params,
> e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY));
> +	port->subport = (struct rte_sched_subport *)
> +		(port->memory + rte_sched_port_get_array_base(params,
> e_RTE_SCHED_PORT_ARRAY_SUBPORT));
> +	port->pipe = (struct rte_sched_pipe *)
> +		(port->memory + rte_sched_port_get_array_base(params,
> e_RTE_SCHED_PORT_ARRAY_PIPE));
> +	port->queue = (struct rte_sched_queue *)
> +		(port->memory + rte_sched_port_get_array_base(params,
> e_RTE_SCHED_PORT_ARRAY_QUEUE));
> +	port->queue_extra = (struct rte_sched_queue_extra *)
> +		(port->memory + rte_sched_port_get_array_base(params,
> e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA));
> +	port->pipe_profiles = (struct rte_sched_pipe_profile *)
> +		(port->memory + rte_sched_port_get_array_base(params,
> e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES));
> +	port->bmp_array =  port->memory
> +		+ rte_sched_port_get_array_base(params,
> e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY);
> +	port->queue_array = (struct rte_mbuf **)
> +		(port->memory + rte_sched_port_get_array_base(params,
> e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY));
> 
>  	/* Pipe profile table */
>  	rte_sched_port_config_pipe_profile_table(port, params);
> @@ -707,9 +705,10 @@ rte_sched_port_config(struct
> rte_sched_port_params *params)
>  		RTE_LOG(ERR, SCHED, "Bitmap init error\n");
>  		return NULL;
>  	}
> -	for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i ++) {
> +
> +	for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
>  		port->grinder_base_bmp_pos[i] =
> RTE_SCHED_PIPE_INVALID;
> -	}
> +
> 
>  	return port;
>  }
> @@ -718,9 +717,8 @@ void
>  rte_sched_port_free(struct rte_sched_port *port)
>  {
>  	/* Check user parameters */
> -	if (port == NULL){
> +	if (port == NULL)
>  		return;
> -	}
> 
>  	rte_bitmap_free(port->bmp);
>  	rte_free(port);
> @@ -765,27 +763,22 @@ rte_sched_subport_config(struct rte_sched_port
> *port,
>  	/* Check user parameters */
>  	if ((port == NULL) ||
>  	    (subport_id >= port->n_subports_per_port) ||
> -		(params == NULL)) {
> +		(params == NULL))
>  		return -1;
> -	}
> 
> -	if ((params->tb_rate == 0) || (params->tb_rate > port->rate)) {
> +	if ((params->tb_rate == 0) || (params->tb_rate > port->rate))
>  		return -2;
> -	}
> 
> -	if (params->tb_size == 0) {
> +	if (params->tb_size == 0)
>  		return -3;
> -	}
> 
> -	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
> -		if ((params->tc_rate[i] == 0) || (params->tc_rate[i] > params-
> >tb_rate)) {
> +	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
> +		if ((params->tc_rate[i] == 0) || (params->tc_rate[i] > params-
> >tb_rate))
>  			return -4;
> -		}
>  	}
> 
> -	if (params->tc_period == 0) {
> +	if (params->tc_period == 0)
>  		return -5;
> -	}
> 
>  	s = port->subport + subport_id;
> 
> @@ -804,19 +797,21 @@ rte_sched_subport_config(struct rte_sched_port
> *port,
>  	s->tb_credits = s->tb_size / 2;
> 
>  	/* Traffic Classes (TCs) */
> -	s->tc_period = (uint32_t) rte_sched_time_ms_to_bytes(params-
> >tc_period, port->rate);
> -	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
> -		s->tc_credits_per_period[i] = (uint32_t)
> rte_sched_time_ms_to_bytes(params->tc_period, params->tc_rate[i]);
> +	s->tc_period = rte_sched_time_ms_to_bytes(params->tc_period,
> port->rate);
> +	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
> +		s->tc_credits_per_period[i] =
> rte_sched_time_ms_to_bytes(params->tc_period,
> +
> params->tc_rate[i]);
>  	}
> +
>  	s->tc_time = port->time + s->tc_period;
> -	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
> +	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
>  		s->tc_credits[i] = s->tc_credits_per_period[i];
> -	}
> 
>  #ifdef RTE_SCHED_SUBPORT_TC_OV
>  	/* TC oversubscription */
>  	s->tc_ov_wm_min = port->mtu;
> -	s->tc_ov_wm_max = (uint32_t)
> rte_sched_time_ms_to_bytes(params->tc_period, port-
> >pipe_tc3_rate_max);
> +	s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(params-
> >tc_period,
> +						     port-
> >pipe_tc3_rate_max);
>  	s->tc_ov_wm = s->tc_ov_wm_max;
>  	s->tc_ov_period_id = 0;
>  	s->tc_ov = 0;
> @@ -846,15 +841,13 @@ rte_sched_pipe_config(struct rte_sched_port
> *port,
>  	if ((port == NULL) ||
>  	    (subport_id >= port->n_subports_per_port) ||
>  		(pipe_id >= port->n_pipes_per_subport) ||
> -		((!deactivate) && (profile >= port->n_pipe_profiles))) {
> +		((!deactivate) && (profile >= port->n_pipe_profiles)))
>  		return -1;
> -	}
> 
>  	/* Check that subport configuration is valid */
>  	s = port->subport + subport_id;
> -	if (s->tb_period == 0) {
> +	if (s->tb_period == 0)
>  		return -2;
> -	}
> 
>  	p = port->pipe + (subport_id * port->n_pipes_per_subport +
> pipe_id);
> 
> @@ -883,9 +876,8 @@ rte_sched_pipe_config(struct rte_sched_port *port,
>  		memset(p, 0, sizeof(struct rte_sched_pipe));
>  	}
> 
> -	if (deactivate) {
> +	if (deactivate)
>  		return 0;
> -	}
> 
>  	/* Apply the new pipe configuration */
>  	p->profile = profile;
> @@ -897,15 +889,17 @@ rte_sched_pipe_config(struct rte_sched_port
> *port,
> 
>  	/* Traffic Classes (TCs) */
>  	p->tc_time = port->time + params->tc_period;
> -	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
> +	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
>  		p->tc_credits[i] = params->tc_credits_per_period[i];
> -	}
> +
> 
>  #ifdef RTE_SCHED_SUBPORT_TC_OV
>  	{
>  		/* Subport TC3 oversubscription */
> -		double subport_tc3_rate = ((double) s-
> >tc_credits_per_period[3]) / ((double) s->tc_period);
> -		double pipe_tc3_rate = ((double) params-
> >tc_credits_per_period[3]) / ((double) params->tc_period);
> +		double subport_tc3_rate = (double)s-
> >tc_credits_per_period[3])
> +					/ (double)s->tc_period;
> +		double pipe_tc3_rate = (double)params-
> >tc_credits_per_period[3])
> +					/ (double)params->tc_period;
>  		uint32_t tc3_ov = s->tc_ov;
> 
>  		s->tc_ov_n += params->tc_ov_weight;
> @@ -976,9 +970,8 @@ rte_sched_subport_read_stats(struct
> rte_sched_port *port,
>  	if ((port == NULL) ||
>  	    (subport_id >= port->n_subports_per_port) ||
>  		(stats == NULL) ||
> -		(tc_ov == NULL)) {
> +		(tc_ov == NULL))
>  		return -1;
> -	}
>  	s = port->subport + subport_id;
> 
>  	/* Copy subport stats and clear */
> @@ -1004,9 +997,8 @@ rte_sched_queue_read_stats(struct rte_sched_port
> *port,
>  	if ((port == NULL) ||
>  	    (queue_id >= rte_sched_port_queues_per_port(port)) ||
>  		(stats == NULL) ||
> -		(qlen == NULL)) {
> +		(qlen == NULL))
>  		return -1;
> -	}
>  	q = port->queue + queue_id;
>  	qe = port->queue_extra + queue_id;
> 
> @@ -1021,7 +1013,8 @@ rte_sched_queue_read_stats(struct
> rte_sched_port *port,
>  }
> 
>  static inline uint32_t
> -rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport,
> uint32_t pipe, uint32_t traffic_class, uint32_t queue)
> +rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport,
> +		      uint32_t pipe, uint32_t traffic_class, uint32_t queue)
>  {
>  	uint32_t result;
> 
> @@ -1038,7 +1031,8 @@ rte_sched_port_qbase(struct rte_sched_port
> *port, uint32_t qindex)
>  	uint32_t pindex = qindex >> 4;
>  	uint32_t qpos = qindex & 0xF;
> 
> -	return (port->queue_array + pindex * port->qsize_sum + port-
> >qsize_add[qpos]);
> +	return port->queue_array + pindex * port->qsize_sum
> +		+ port->qsize_add[qpos];
>  }
> 
>  static inline uint16_t
> @@ -1074,9 +1068,11 @@ rte_sched_port_queue_is_full(struct
> rte_sched_port *port, uint32_t qindex)
>  #ifdef RTE_SCHED_COLLECT_STATS
> 
>  static inline void
> -rte_sched_port_update_subport_stats(struct rte_sched_port *port,
> uint32_t qindex, struct rte_mbuf *pkt)
> +rte_sched_port_update_subport_stats(struct rte_sched_port *port,
> +				    uint32_t qindex, struct rte_mbuf *pkt)
>  {
> -	struct rte_sched_subport *s = port->subport + (qindex /
> rte_sched_port_queues_per_subport(port));
> +	struct rte_sched_subport *s = port->subport
> +		+ (qindex / rte_sched_port_queues_per_subport(port));
>  	uint32_t tc_index = (qindex >> 2) & 0x3;
>  	uint32_t pkt_len = pkt->pkt_len;
> 
> @@ -1085,7 +1081,8 @@ rte_sched_port_update_subport_stats(struct
> rte_sched_port *port, uint32_t qindex
>  }
> 
>  static inline void
> -rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port
> *port, uint32_t qindex, struct rte_mbuf *pkt)
> +rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port
> *port, uint32_t qindex,
> +					    struct rte_mbuf *pkt)
>  {
>  	struct rte_sched_subport *s = port->subport + (qindex /
> rte_sched_port_queues_per_subport(port));
>  	uint32_t tc_index = (qindex >> 2) & 0x3;
> @@ -1096,7 +1093,8 @@
> rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port
> *port, uint32_
>  }
> 
>  static inline void
> -rte_sched_port_update_queue_stats(struct rte_sched_port *port,
> uint32_t qindex, struct rte_mbuf *pkt)
> +rte_sched_port_update_queue_stats(struct rte_sched_port *port,
> uint32_t qindex,
> +				  struct rte_mbuf *pkt)
>  {
>  	struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
>  	uint32_t pkt_len = pkt->pkt_len;
> @@ -1106,7 +1104,8 @@ rte_sched_port_update_queue_stats(struct
> rte_sched_port *port, uint32_t qindex,
>  }
> 
>  static inline void
> -rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port
> *port, uint32_t qindex, struct rte_mbuf *pkt)
> +rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port
> *port,
> +					  uint32_t qindex, struct rte_mbuf
> *pkt)
>  {
>  	struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
>  	uint32_t pkt_len = pkt->pkt_len;
> @@ -1120,11 +1119,12 @@
> rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port
> *port, uint32_t
>  #ifdef RTE_SCHED_RED
> 
>  static inline int
> -rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf
> *pkt, uint32_t qindex, uint16_t qlen)
> +rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf
> *pkt,
> +			uint32_t qindex, uint16_t qlen)
>  {
>  	struct rte_sched_queue_extra *qe;
>  	struct rte_red_config *red_cfg;
> -    struct rte_red *red;
> +	struct rte_red *red;
>  	uint32_t tc_index;
>  	enum rte_meter_color color;
> 
> @@ -1142,10 +1142,11 @@ rte_sched_port_red_drop(struct rte_sched_port
> *port, struct rte_mbuf *pkt, uint3
>  }
> 
>  static inline void
> -rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port
> *port, uint32_t qindex)
> +rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port
> *port,
> +					 uint32_t qindex)
>  {
>  	struct rte_sched_queue_extra *qe;
> -    struct rte_red *red;
> +	struct rte_red *red;
> 
>  	qe = port->queue_extra + qindex;
>  	red = &qe->red;
> @@ -1170,17 +1171,17 @@ debug_pipe_is_empty(struct rte_sched_port
> *port, uint32_t pindex)
> 
>  	qindex = pindex << 4;
> 
> -	for (i = 0; i < 16; i ++){
> +	for (i = 0; i < 16; i++) {
>  		uint32_t queue_empty =
> rte_sched_port_queue_is_empty(port, qindex + i);
>  		uint32_t bmp_bit_clear = (rte_bitmap_get(port->bmp,
> qindex + i) == 0);
> 
> -		if (queue_empty != bmp_bit_clear){
> -			rte_panic("Queue status mismatch for queue %u of
> pipe %u\n", i, pindex);
> -		}
> +		if (queue_empty != bmp_bit_clear)
> +			rte_panic("Queue status mismatch for queue %u of
> pipe %u\n",
> +				  i, pindex);
> 
> -		if (!queue_empty){
> +
> +		if (!queue_empty)
>  			return 0;
> -		}
>  	}
> 
>  	return 1;
> @@ -1192,13 +1193,12 @@ debug_check_queue_slab(struct rte_sched_port
> *port, uint32_t bmp_pos, uint64_t b
>  	uint64_t mask;
>  	uint32_t i, panic;
> 
> -	if (bmp_slab == 0){
> +	if (bmp_slab == 0)
>  		rte_panic("Empty slab at position %u\n", bmp_pos);
> -	}
> 
>  	panic = 0;
> -	for (i = 0, mask = 1; i < 64; i ++, mask <<= 1) {
> -		if (mask & bmp_slab){
> +	for (i = 0, mask = 1; i < 64; i++, mask <<= 1) {
> +		if (mask & bmp_slab) {
>  			if (rte_sched_port_queue_is_empty(port, bmp_pos
> + i)) {
>  				printf("Queue %u (slab offset %u) is
> empty\n", bmp_pos + i, i);
>  				panic = 1;
> @@ -1206,7 +1206,7 @@ debug_check_queue_slab(struct rte_sched_port
> *port, uint32_t bmp_pos, uint64_t b
>  		}
>  	}
> 
> -	if (panic){
> +	if (panic) {
>  		rte_panic("Empty queues in slab 0x%" PRIx64 "starting at
> position %u\n",
>  			bmp_slab, bmp_pos);
>  	}
> @@ -1237,7 +1237,8 @@ rte_sched_port_enqueue_qptrs_prefetch0(struct
> rte_sched_port *port, struct rte_m
>  }
> 
>  static inline void
> -rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port,
> uint32_t qindex, struct rte_mbuf **qbase)
> +rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port,
> +				     uint32_t qindex, struct rte_mbuf **qbase)
>  {
>  	struct rte_sched_queue *q;
>  	struct rte_mbuf **q_qw;
> @@ -1252,7 +1253,8 @@ rte_sched_port_enqueue_qwa_prefetch0(struct
> rte_sched_port *port, uint32_t qinde
>  }
> 
>  static inline int
> -rte_sched_port_enqueue_qwa(struct rte_sched_port *port, uint32_t
> qindex, struct rte_mbuf **qbase, struct rte_mbuf *pkt)
> +rte_sched_port_enqueue_qwa(struct rte_sched_port *port, uint32_t
> qindex,
> +			   struct rte_mbuf **qbase, struct rte_mbuf *pkt)
>  {
>  	struct rte_sched_queue *q;
>  	uint16_t qsize;
> @@ -1274,7 +1276,7 @@ rte_sched_port_enqueue_qwa(struct
> rte_sched_port *port, uint32_t qindex, struct
> 
>  	/* Enqueue packet */
>  	qbase[q->qw & (qsize - 1)] = pkt;
> -	q->qw ++;
> +	q->qw++;
> 
>  	/* Activate queue in the port bitmap */
>  	rte_bitmap_set(port->bmp, qindex);
> @@ -1297,7 +1299,7 @@ rte_sched_port_enqueue(struct rte_sched_port
> *port, struct rte_mbuf **pkts, uint
> 
>  	result = 0;
> 
> -	for (i = 0; i < n_pkts; i ++) {
> +	for (i = 0; i < n_pkts; i++) {
>  		struct rte_mbuf *pkt;
>  		struct rte_mbuf **q_base;
>  		uint32_t subport, pipe, traffic_class, queue, qindex;
> @@ -1324,17 +1326,20 @@ rte_sched_port_enqueue(struct rte_sched_port
> *port, struct rte_mbuf **pkts, uint
>   * diagram below:
>   *
>   *   p00  _______   p10  _______   p20  _______   p30  _______
> - * ----->|       |----->|       |----->|       |----->|       |----->
> + *----->|       |----->|       |----->|       |----->|       |----->
>   *       |   0   |      |   1   |      |   2   |      |   3   |
> - * ----->|_______|----->|_______|----->|_______|----->|_______|----->
> + *----->|_______|----->|_______|----->|_______|----->|_______|----->
>   *   p01            p11            p21            p31
>   *
>   ***/

You are breaking the ASCII diagram above by removing the first space on the line, but not for all lines. Please check that diagram is OK after the space removal.

>  int
>  rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf
> **pkts, uint32_t n_pkts)
>  {
> -	struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21,
> *pkt30, *pkt31, *pkt_last;
> -	struct rte_mbuf **q00_base, **q01_base, **q10_base,
> **q11_base, **q20_base, **q21_base, **q30_base, **q31_base,
> **q_last_base;
> +	struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21,
> +		*pkt30, *pkt31, *pkt_last;
> +	struct rte_mbuf **q00_base, **q01_base, **q10_base,
> +		**q11_base, **q20_base, **q21_base, **q30_base,
> +		**q31_base, **q_last_base;
>  	uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
>  	uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
>  	uint32_t result, i;
> @@ -1347,25 +1352,23 @@ rte_sched_port_enqueue(struct rte_sched_port
> *port, struct rte_mbuf **pkts, uint
>  		uint32_t q[5];
> 
>  		/* Prefetch the mbuf structure of each packet */
> -		for (i = 0; i < n_pkts; i ++) {
> +		for (i = 0; i < n_pkts; i++)
>  			rte_prefetch0(pkts[i]);
> -		}
> 
>  		/* Prefetch the queue structure for each queue */
> -		for (i = 0; i < n_pkts; i ++) {
> +		for (i = 0; i < n_pkts; i++)
>  			q[i] =
> rte_sched_port_enqueue_qptrs_prefetch0(port, pkts[i]);
> -		}
> 
>  		/* Prefetch the write pointer location of each queue */
> -		for (i = 0; i < n_pkts; i ++) {
> +		for (i = 0; i < n_pkts; i++) {
>  			q_base[i] = rte_sched_port_qbase(port, q[i]);
>  			rte_sched_port_enqueue_qwa_prefetch0(port, q[i],
> q_base[i]);
>  		}
> 
>  		/* Write each packet to its queue */
> -		for (i = 0; i < n_pkts; i ++) {
> -			result += rte_sched_port_enqueue_qwa(port, q[i],
> q_base[i], pkts[i]);
> -		}
> +		for (i = 0; i < n_pkts; i++)
> +			result += rte_sched_port_enqueue_qwa(port, q[i],
> +							     q_base[i], pkts[i]);
> 
>  		return result;
>  	}
> @@ -1537,9 +1540,8 @@ grinder_tc_ov_credits_update(struct
> rte_sched_port *port, uint32_t pos)
>  	uint32_t tc_ov_consumption_max;
>  	uint32_t tc_ov_wm = subport->tc_ov_wm;
> 
> -	if (subport->tc_ov == 0) {
> +	if (subport->tc_ov == 0)
>  		return subport->tc_ov_wm_max;
> -	}
> 
>  	tc_ov_consumption[0] = subport->tc_credits_per_period[0] -
> subport->tc_credits[0];
>  	tc_ov_consumption[1] = subport->tc_credits_per_period[1] -
> subport->tc_credits[1];
> @@ -1551,16 +1553,16 @@ grinder_tc_ov_credits_update(struct
> rte_sched_port *port, uint32_t pos)
> 
>  	if (tc_ov_consumption[3] > (tc_ov_consumption_max - port->mtu))
> {
>  		tc_ov_wm  -= tc_ov_wm >> 7;
> -		if (tc_ov_wm < subport->tc_ov_wm_min) {
> +		if (tc_ov_wm < subport->tc_ov_wm_min)
>  			tc_ov_wm = subport->tc_ov_wm_min;
> -		}
> +
>  		return tc_ov_wm;
>  	}
> 
>  	tc_ov_wm += (tc_ov_wm >> 7) + 1;
> -	if (tc_ov_wm > subport->tc_ov_wm_max) {
> +	if (tc_ov_wm > subport->tc_ov_wm_max)
>  		tc_ov_wm = subport->tc_ov_wm_max;
> -	}
> +
>  	return tc_ov_wm;
>  }
> 
> @@ -1595,7 +1597,7 @@ grinder_credits_update(struct rte_sched_port
> *port, uint32_t pos)
>  		subport->tc_credits[3] = subport->tc_credits_per_period[3];
> 
>  		subport->tc_time = port->time + subport->tc_period;
> -		subport->tc_ov_period_id ++;
> +		subport->tc_ov_period_id++;
>  	}
> 
>  	/* Pipe TCs */
> @@ -1642,9 +1644,8 @@ grinder_credits_check(struct rte_sched_port
> *port, uint32_t pos)
>  		(pkt_len <= pipe_tb_credits) &&
>  		(pkt_len <= pipe_tc_credits);
> 
> -	if (!enough_credits) {
> +	if (!enough_credits)
>  		return 0;
> -	}
> 
>  	/* Update port credits */
>  	subport->tb_credits -= pkt_len;
> @@ -1682,9 +1683,8 @@ grinder_credits_check(struct rte_sched_port
> *port, uint32_t pos)
>  		(pkt_len <= pipe_tc_credits) &&
>  		(pkt_len <= pipe_tc_ov_credits);
> 
> -	if (!enough_credits) {
> +	if (!enough_credits)
>  		return 0;
> -	}
> 
>  	/* Update pipe and subport credits */
>  	subport->tb_credits -= pkt_len;
> @@ -1709,17 +1709,16 @@ grinder_schedule(struct rte_sched_port *port,
> uint32_t pos)
>  	uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
> 
>  #if RTE_SCHED_TS_CREDITS_CHECK
> -	if (!grinder_credits_check(port, pos)) {
> +	if (!grinder_credits_check(port, pos))
>  		return 0;
> -	}
>  #endif
> 
>  	/* Advance port time */
>  	port->time += pkt_len;
> 
>  	/* Send packet */
> -	port->pkts_out[port->n_pkts_out ++] = pkt;
> -	queue->qr ++;
> +	port->pkts_out[port->n_pkts_out++] = pkt;
> +	queue->qr++;
>  	grinder->wrr_tokens[grinder->qpos] += pkt_len * grinder-
> >wrr_cost[grinder->qpos];
>  	if (queue->qr == queue->qw) {
>  		uint32_t qindex = grinder->qindex[grinder->qpos];
> @@ -1742,9 +1741,10 @@ grinder_schedule(struct rte_sched_port *port,
> uint32_t pos)
>  static inline int
>  grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
>  {
> -	__m128i index = _mm_set1_epi32 (base_pipe);
> +	__m128i index = _mm_set1_epi32(base_pipe);
>  	__m128i pipes = _mm_load_si128((__m128i *)port-
> >grinder_base_bmp_pos);
>  	__m128i res = _mm_cmpeq_epi32(pipes, index);
> +
>  	pipes = _mm_load_si128((__m128i *)(port->grinder_base_bmp_pos
> + 4));
>  	pipes = _mm_cmpeq_epi32(pipes, index);
>  	res = _mm_or_si128(res, pipes);
> @@ -1762,10 +1762,9 @@ grinder_pipe_exists(struct rte_sched_port *port,
> uint32_t base_pipe)
>  {
>  	uint32_t i;
> 
> -	for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i ++) {
> -		if (port->grinder_base_bmp_pos[i] == base_pipe) {
> +	for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) {
> +		if (port->grinder_base_bmp_pos[i] == base_pipe)
>  			return 1;
> -		}
>  	}
> 
>  	return 0;
> @@ -1774,7 +1773,8 @@ grinder_pipe_exists(struct rte_sched_port *port,
> uint32_t base_pipe)
>  #endif /* RTE_SCHED_OPTIMIZATIONS */
> 
>  static inline void
> -grinder_pcache_populate(struct rte_sched_port *port, uint32_t pos,
> uint32_t bmp_pos, uint64_t bmp_slab)
> +grinder_pcache_populate(struct rte_sched_port *port, uint32_t pos,
> +			uint32_t bmp_pos, uint64_t bmp_slab)
>  {
>  	struct rte_sched_grinder *grinder = port->grinder + pos;
>  	uint16_t w[4];
> @@ -1843,9 +1843,8 @@ grinder_next_tc(struct rte_sched_port *port,
> uint32_t pos)
>  	uint32_t qindex;
>  	uint16_t qsize;
> 
> -	if (grinder->tccache_r == grinder->tccache_w) {
> +	if (grinder->tccache_r == grinder->tccache_w)
>  		return 0;
> -	}
> 
>  	qindex = grinder->tccache_qindex[grinder->tccache_r];
>  	qbase = rte_sched_port_qbase(port, qindex);
> @@ -1870,7 +1869,7 @@ grinder_next_tc(struct rte_sched_port *port,
> uint32_t pos)
>  	grinder->qbase[2] = qbase + 2 * qsize;
>  	grinder->qbase[3] = qbase + 3 * qsize;
> 
> -	grinder->tccache_r ++;
> +	grinder->tccache_r++;
>  	return 1;
>  }
> 
> @@ -1884,15 +1883,14 @@ grinder_next_pipe(struct rte_sched_port *port,
> uint32_t pos)
>  	if (grinder->pcache_r < grinder->pcache_w) {
>  		pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
>  		pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
> -		grinder->pcache_r ++;
> +		grinder->pcache_r++;
>  	} else {
>  		uint64_t bmp_slab = 0;
>  		uint32_t bmp_pos = 0;
> 
>  		/* Get another non-empty pipe group */
> -		if (unlikely(rte_bitmap_scan(port->bmp, &bmp_pos,
> &bmp_slab) <= 0)) {
> +		if (unlikely(rte_bitmap_scan(port->bmp, &bmp_pos,
> &bmp_slab) <= 0))
>  			return 0;
> -		}
> 
>  #if RTE_SCHED_DEBUG
>  		debug_check_queue_slab(port, bmp_pos, bmp_slab);
> @@ -1900,9 +1898,9 @@ grinder_next_pipe(struct rte_sched_port *port,
> uint32_t pos)
> 
>  		/* Return if pipe group already in one of the other grinders
> */
>  		port->grinder_base_bmp_pos[pos] =
> RTE_SCHED_BMP_POS_INVALID;
> -		if (unlikely(grinder_pipe_exists(port, bmp_pos))) {
> +		if (unlikely(grinder_pipe_exists(port, bmp_pos)))
>  			return 0;
> -		}
> +
>  		port->grinder_base_bmp_pos[pos] = bmp_pos;
> 
>  		/* Install new pipe group into grinder's pipe cache */
> @@ -1934,9 +1932,9 @@ grinder_next_pipe(struct rte_sched_port *port,
> uint32_t pos)
> 
>  #if RTE_SCHED_WRR == 0
> 
> -#define grinder_wrr_load(a,b)
> +#define grinder_wrr_load(a, b)
> 
> -#define grinder_wrr_store(a,b)
> +#define grinder_wrr_store(a, b)
> 
>  static inline void
>  grinder_wrr(struct rte_sched_port *port, uint32_t pos)
> @@ -1944,9 +1942,8 @@ grinder_wrr(struct rte_sched_port *port, uint32_t
> pos)
>  	struct rte_sched_grinder *grinder = port->grinder + pos;
>  	uint64_t slab = grinder->qmask;
> 
> -	if (rte_bsf64(slab, &grinder->qpos) == 0) {
> +	if (rte_bsf64(slab, &grinder->qpos) == 0)
>  		rte_panic("grinder wrr\n");
> -	}
>  }
> 
>  #elif RTE_SCHED_WRR == 1
> @@ -1989,10 +1986,14 @@ grinder_wrr_store(struct rte_sched_port *port,
> uint32_t pos)
> 
>  	qindex = tc_index * 4;
> 
> -	pipe->wrr_tokens[qindex] = (uint8_t) ((grinder->wrr_tokens[0] &
> grinder->wrr_mask[0]) >> RTE_SCHED_WRR_SHIFT);
> -	pipe->wrr_tokens[qindex + 1] = (uint8_t) ((grinder->wrr_tokens[1]
> & grinder->wrr_mask[1]) >> RTE_SCHED_WRR_SHIFT);
> -	pipe->wrr_tokens[qindex + 2] = (uint8_t) ((grinder->wrr_tokens[2]
> & grinder->wrr_mask[2]) >> RTE_SCHED_WRR_SHIFT);
> -	pipe->wrr_tokens[qindex + 3] = (uint8_t) ((grinder->wrr_tokens[3]
> & grinder->wrr_mask[3]) >> RTE_SCHED_WRR_SHIFT);
> +	pipe->wrr_tokens[qindex] =
> +		(grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >>
> RTE_SCHED_WRR_SHIFT;
> +	pipe->wrr_tokens[qindex + 1] =
> +		(grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >>
> RTE_SCHED_WRR_SHIFT;
> +	pipe->wrr_tokens[qindex + 2] =
> +		(grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >>
> RTE_SCHED_WRR_SHIFT;
> +	pipe->wrr_tokens[qindex + 3] =
> +		(grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >>
> RTE_SCHED_WRR_SHIFT;
>  }
> 
>  static inline void
> @@ -2083,7 +2084,7 @@ grinder_handle(struct rte_sched_port *port,
> uint32_t pos)
>  	{
>  		if (grinder_next_pipe(port, pos)) {
>  			grinder_prefetch_pipe(port, pos);
> -			port->busy_grinders ++;
> +			port->busy_grinders++;
> 
>  			grinder->state =
> e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
>  			return 0;
> @@ -2134,9 +2135,10 @@ grinder_handle(struct rte_sched_port *port,
> uint32_t pos)
>  			grinder->state = e_GRINDER_PREFETCH_MBUF;
>  			return result;
>  		}
> -		if ((grinder->productive == 0) && (port->pipe_loop ==
> RTE_SCHED_PIPE_INVALID)) {
> +		if ((grinder->productive == 0) &&
> +		    (port->pipe_loop == RTE_SCHED_PIPE_INVALID))
>  			port->pipe_loop = grinder->pindex;
> -		}
> +
>  		grinder_evict(port, pos);
> 
>  		/* Look for another active pipe */
> @@ -2148,7 +2150,7 @@ grinder_handle(struct rte_sched_port *port,
> uint32_t pos)
>  		}
> 
>  		/* No active pipe found */
> -		port->busy_grinders --;
> +		port->busy_grinders--;
> 
>  		grinder->state = e_GRINDER_PREFETCH_PIPE;
>  		return result;
> @@ -2170,9 +2172,8 @@ rte_sched_port_time_resync(struct
> rte_sched_port *port)
>  	/* Advance port time */
>  	port->time_cpu_cycles = cycles;
>  	port->time_cpu_bytes += (uint64_t) bytes_diff;
> -	if (port->time < port->time_cpu_bytes) {
> +	if (port->time < port->time_cpu_bytes)
>  		port->time = port->time_cpu_bytes;
> -	}
> 
>  	/* Reset pipe loop detection */
>  	port->pipe_loop = RTE_SCHED_PIPE_INVALID;
> @@ -2204,7 +2205,7 @@ rte_sched_port_dequeue(struct rte_sched_port
> *port, struct rte_mbuf **pkts, uint
>  	rte_sched_port_time_resync(port);
> 
>  	/* Take each queue in the grinder one step further */
> -	for (i = 0, count = 0; ; i ++)  {
> +	for (i = 0, count = 0; ; i++)  {
>  		count += grinder_handle(port, i &
> (RTE_SCHED_PORT_N_GRINDERS - 1));
>  		if ((count == n_pkts) ||
>  		    rte_sched_port_exceptions(port, i >=
> RTE_SCHED_PORT_N_GRINDERS)) {
> --
> 2.1.4

Fine with me, please fix the ASCII diagram. Thanks, Steve!
  
Stephen Hemminger Sept. 11, 2015, 7:18 p.m. UTC | #2
On Fri, 11 Sep 2015 17:31:10 +0000
"Dumitrescu, Cristian" <cristian.dumitrescu@intel.com> wrote:

> >   *   p00  _______   p10  _______   p20  _______   p30  _______
> > - * ----->|       |----->|       |----->|       |----->|       |----->
> > + *----->|       |----->|       |----->|       |----->|       |----->
> >   *       |   0   |      |   1   |      |   2   |      |   3   |
> > - * ----->|_______|----->|_______|----->|_______|----->|_______|----->
> > + *----->|_______|----->|_______|----->|_______|----->|_______|----->
> >   *   p01            p11            p21            p31
> >   *
> >   ***/  
> 
> You are breaking the ASCII diagram above by removing the first space on the line, but not for all lines. Please check that diagram is OK after the space removal.

Sorry, it was global repace issue.
  

Patch

diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 9c9419d..924c172 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -317,88 +317,74 @@  rte_sched_port_check_params(struct rte_sched_port_params *params)
 {
 	uint32_t i, j;
 
-	if (params == NULL) {
+	if (params == NULL)
 		return -1;
-	}
 
 	/* socket */
-	if ((params->socket < 0) || (params->socket >= RTE_MAX_NUMA_NODES)) {
+	if ((params->socket < 0) || (params->socket >= RTE_MAX_NUMA_NODES))
 		return -3;
-	}
 
 	/* rate */
-	if (params->rate == 0) {
+	if (params->rate == 0)
 		return -4;
-	}
 
 	/* mtu */
-	if (params->mtu == 0) {
+	if (params->mtu == 0)
 		return -5;
-	}
 
 	/* n_subports_per_port: non-zero, power of 2 */
-	if ((params->n_subports_per_port == 0) || (!rte_is_power_of_2(params->n_subports_per_port))) {
+	if ((params->n_subports_per_port == 0) || (!rte_is_power_of_2(params->n_subports_per_port)))
 		return -6;
-	}
 
 	/* n_pipes_per_subport: non-zero, power of 2 */
-	if ((params->n_pipes_per_subport == 0) || (!rte_is_power_of_2(params->n_pipes_per_subport))) {
+	if ((params->n_pipes_per_subport == 0) || (!rte_is_power_of_2(params->n_pipes_per_subport)))
 		return -7;
-	}
 
 	/* qsize: non-zero, power of 2, no bigger than 32K (due to 16-bit read/write pointers) */
-	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
+	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
 		uint16_t qsize = params->qsize[i];
 
-		if ((qsize == 0) || (!rte_is_power_of_2(qsize))) {
+		if ((qsize == 0) || (!rte_is_power_of_2(qsize)))
 			return -8;
-		}
 	}
 
 	/* pipe_profiles and n_pipe_profiles */
 	if ((params->pipe_profiles == NULL) ||
 	    (params->n_pipe_profiles == 0) ||
-	    (params->n_pipe_profiles > RTE_SCHED_PIPE_PROFILES_PER_PORT)) {
+	    (params->n_pipe_profiles > RTE_SCHED_PIPE_PROFILES_PER_PORT))
 		return -9;
-	}
 
-	for (i = 0; i < params->n_pipe_profiles; i ++) {
+	for (i = 0; i < params->n_pipe_profiles; i++) {
 		struct rte_sched_pipe_params *p = params->pipe_profiles + i;
 
 		/* TB rate: non-zero, not greater than port rate */
-		if ((p->tb_rate == 0) || (p->tb_rate > params->rate)) {
+		if ((p->tb_rate == 0) || (p->tb_rate > params->rate))
 			return -10;
-		}
 
 		/* TB size: non-zero */
-		if (p->tb_size == 0) {
+		if (p->tb_size == 0)
 			return -11;
-		}
 
 		/* TC rate: non-zero, less than pipe rate */
-		for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
-			if ((p->tc_rate[j] == 0) || (p->tc_rate[j] > p->tb_rate)) {
+		for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
+			if ((p->tc_rate[j] == 0) || (p->tc_rate[j] > p->tb_rate))
 				return -12;
-			}
 		}
 
 		/* TC period: non-zero */
-		if (p->tc_period == 0) {
+		if (p->tc_period == 0)
 			return -13;
-		}
 
 #ifdef RTE_SCHED_SUBPORT_TC_OV
 		/* TC3 oversubscription weight: non-zero */
-		if (p->tc_ov_weight == 0) {
+		if (p->tc_ov_weight == 0)
 			return -14;
-		}
 #endif
 
 		/* Queue WRR weights: non-zero */
-		for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j ++) {
-			if (p->wrr_weights[j] == 0) {
+		for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j++) {
+			if (p->wrr_weights[j] == 0)
 				return -15;
-			}
 		}
 	}
 
@@ -424,34 +410,42 @@  rte_sched_port_get_array_base(struct rte_sched_port_params *params, enum rte_sch
 	uint32_t base, i;
 
 	size_per_pipe_queue_array = 0;
-	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
-		size_per_pipe_queue_array += RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS * params->qsize[i] * sizeof(struct rte_mbuf *);
-	}
+	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+		size_per_pipe_queue_array += RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS
+			* params->qsize[i] * sizeof(struct rte_mbuf *);
+
 	size_queue_array = n_pipes_per_port * size_per_pipe_queue_array;
 
 	base = 0;
 
-	if (array == e_RTE_SCHED_PORT_ARRAY_SUBPORT) return base;
+	if (array == e_RTE_SCHED_PORT_ARRAY_SUBPORT)
+		return base;
+
 	base += RTE_CACHE_LINE_ROUNDUP(size_subport);
+	if (array == e_RTE_SCHED_PORT_ARRAY_PIPE)
+		return base;
 
-	if (array == e_RTE_SCHED_PORT_ARRAY_PIPE) return base;
 	base += RTE_CACHE_LINE_ROUNDUP(size_pipe);
+	if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE)
+		return base;
 
-	if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE) return base;
 	base += RTE_CACHE_LINE_ROUNDUP(size_queue);
+	if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA)
+		return base;
 
-	if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA) return base;
 	base += RTE_CACHE_LINE_ROUNDUP(size_queue_extra);
+	if (array == e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES)
+		return base;
 
-	if (array == e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES) return base;
 	base += RTE_CACHE_LINE_ROUNDUP(size_pipe_profiles);
+	if (array == e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY)
+		return base;
 
-	if (array == e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY) return base;
 	base += RTE_CACHE_LINE_ROUNDUP(size_bmp_array);
+	if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY)
+		return base;
 
-	if (array == e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY) return base;
 	base += RTE_CACHE_LINE_ROUNDUP(size_queue_array);
-
 	return base;
 }
 
@@ -543,8 +537,8 @@  static inline uint64_t
 rte_sched_time_ms_to_bytes(uint32_t time_ms, uint32_t rate)
 {
 	uint64_t time = time_ms;
-	time = (time * rate) / 1000;
 
+	time = (time * rate) / 1000;
 	return time;
 }
 
@@ -553,7 +547,7 @@  rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port, struct rte
 {
 	uint32_t i, j;
 
-	for (i = 0; i < port->n_pipe_profiles; i ++) {
+	for (i = 0; i < port->n_pipe_profiles; i++) {
 		struct rte_sched_pipe_params *src = params->pipe_profiles + i;
 		struct rte_sched_pipe_profile *dst = port->pipe_profiles + i;
 
@@ -571,15 +565,16 @@  rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port, struct rte
 
 		/* Traffic Classes */
 		dst->tc_period = (uint32_t) rte_sched_time_ms_to_bytes(src->tc_period, params->rate);
-		for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
-			dst->tc_credits_per_period[j] = (uint32_t) rte_sched_time_ms_to_bytes(src->tc_period, src->tc_rate[j]);
-		}
+		for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++)
+			dst->tc_credits_per_period[j] =
+				rte_sched_time_ms_to_bytes(src->tc_period, src->tc_rate[j]);
+
 #ifdef RTE_SCHED_SUBPORT_TC_OV
 		dst->tc_ov_weight = src->tc_ov_weight;
 #endif
 
 		/* WRR */
-		for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j ++) {
+		for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
 			uint32_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
 			uint32_t lcd, lcd1, lcd2;
 			uint32_t qindex;
@@ -610,13 +605,12 @@  rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port, struct rte
 	}
 
 	port->pipe_tc3_rate_max = 0;
-	for (i = 0; i < port->n_pipe_profiles; i ++) {
+	for (i = 0; i < port->n_pipe_profiles; i++) {
 		struct rte_sched_pipe_params *src = params->pipe_profiles + i;
 		uint32_t pipe_tc3_rate = src->tc_rate[3];
 
-		if (port->pipe_tc3_rate_max < pipe_tc3_rate) {
+		if (port->pipe_tc3_rate_max < pipe_tc3_rate)
 			port->pipe_tc3_rate_max = pipe_tc3_rate;
-		}
 	}
 }
 
@@ -628,15 +622,13 @@  rte_sched_port_config(struct rte_sched_port_params *params)
 
 	/* Check user parameters. Determine the amount of memory to allocate */
 	mem_size = rte_sched_port_get_memory_footprint(params);
-	if (mem_size == 0) {
+	if (mem_size == 0)
 		return NULL;
-	}
 
 	/* Allocate memory to store the data structures */
 	port = rte_zmalloc("qos_params", mem_size, RTE_CACHE_LINE_SIZE);
-	if (port == NULL) {
+	if (port == NULL)
 		return NULL;
-	}
 
 	/* User parameters */
 	port->n_subports_per_port = params->n_subports_per_port;
@@ -662,9 +654,8 @@  rte_sched_port_config(struct rte_sched_port_params *params)
 				params->red_params[i][j].wq_log2,
 				params->red_params[i][j].min_th,
 				params->red_params[i][j].max_th,
-				params->red_params[i][j].maxp_inv) != 0) {
+				params->red_params[i][j].maxp_inv) != 0)
 				return NULL;
-			}
 		}
 	}
 #endif
@@ -688,13 +679,20 @@  rte_sched_port_config(struct rte_sched_port_params *params)
 	rte_sched_port_config_qsize(port);
 
 	/* Large data structures */
-	port->subport = (struct rte_sched_subport *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_SUBPORT));
-	port->pipe = (struct rte_sched_pipe *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_PIPE));
-	port->queue = (struct rte_sched_queue *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE));
-	port->queue_extra = (struct rte_sched_queue_extra *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA));
-	port->pipe_profiles = (struct rte_sched_pipe_profile *) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES));
-	port->bmp_array =  port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY);
-	port->queue_array = (struct rte_mbuf **) (port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY));
+	port->subport = (struct rte_sched_subport *)
+		(port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_SUBPORT));
+	port->pipe = (struct rte_sched_pipe *)
+		(port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_PIPE));
+	port->queue = (struct rte_sched_queue *)
+		(port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE));
+	port->queue_extra = (struct rte_sched_queue_extra *)
+		(port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA));
+	port->pipe_profiles = (struct rte_sched_pipe_profile *)
+		(port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES));
+	port->bmp_array =  port->memory
+		+ rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY);
+	port->queue_array = (struct rte_mbuf **)
+		(port->memory + rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY));
 
 	/* Pipe profile table */
 	rte_sched_port_config_pipe_profile_table(port, params);
@@ -707,9 +705,10 @@  rte_sched_port_config(struct rte_sched_port_params *params)
 		RTE_LOG(ERR, SCHED, "Bitmap init error\n");
 		return NULL;
 	}
-	for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i ++) {
+
+	for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++)
 		port->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID;
-	}
+
 
 	return port;
 }
@@ -718,9 +717,8 @@  void
 rte_sched_port_free(struct rte_sched_port *port)
 {
 	/* Check user parameters */
-	if (port == NULL){
+	if (port == NULL)
 		return;
-	}
 
 	rte_bitmap_free(port->bmp);
 	rte_free(port);
@@ -765,27 +763,22 @@  rte_sched_subport_config(struct rte_sched_port *port,
 	/* Check user parameters */
 	if ((port == NULL) ||
 	    (subport_id >= port->n_subports_per_port) ||
-		(params == NULL)) {
+		(params == NULL))
 		return -1;
-	}
 
-	if ((params->tb_rate == 0) || (params->tb_rate > port->rate)) {
+	if ((params->tb_rate == 0) || (params->tb_rate > port->rate))
 		return -2;
-	}
 
-	if (params->tb_size == 0) {
+	if (params->tb_size == 0)
 		return -3;
-	}
 
-	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
-		if ((params->tc_rate[i] == 0) || (params->tc_rate[i] > params->tb_rate)) {
+	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+		if ((params->tc_rate[i] == 0) || (params->tc_rate[i] > params->tb_rate))
 			return -4;
-		}
 	}
 
-	if (params->tc_period == 0) {
+	if (params->tc_period == 0)
 		return -5;
-	}
 
 	s = port->subport + subport_id;
 
@@ -804,19 +797,21 @@  rte_sched_subport_config(struct rte_sched_port *port,
 	s->tb_credits = s->tb_size / 2;
 
 	/* Traffic Classes (TCs) */
-	s->tc_period = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
-	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
-		s->tc_credits_per_period[i] = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, params->tc_rate[i]);
+	s->tc_period = rte_sched_time_ms_to_bytes(params->tc_period, port->rate);
+	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+		s->tc_credits_per_period[i] = rte_sched_time_ms_to_bytes(params->tc_period,
+									 params->tc_rate[i]);
 	}
+
 	s->tc_time = port->time + s->tc_period;
-	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
+	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
 		s->tc_credits[i] = s->tc_credits_per_period[i];
-	}
 
 #ifdef RTE_SCHED_SUBPORT_TC_OV
 	/* TC oversubscription */
 	s->tc_ov_wm_min = port->mtu;
-	s->tc_ov_wm_max = (uint32_t) rte_sched_time_ms_to_bytes(params->tc_period, port->pipe_tc3_rate_max);
+	s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(params->tc_period,
+						     port->pipe_tc3_rate_max);
 	s->tc_ov_wm = s->tc_ov_wm_max;
 	s->tc_ov_period_id = 0;
 	s->tc_ov = 0;
@@ -846,15 +841,13 @@  rte_sched_pipe_config(struct rte_sched_port *port,
 	if ((port == NULL) ||
 	    (subport_id >= port->n_subports_per_port) ||
 		(pipe_id >= port->n_pipes_per_subport) ||
-		((!deactivate) && (profile >= port->n_pipe_profiles))) {
+		((!deactivate) && (profile >= port->n_pipe_profiles)))
 		return -1;
-	}
 
 	/* Check that subport configuration is valid */
 	s = port->subport + subport_id;
-	if (s->tb_period == 0) {
+	if (s->tb_period == 0)
 		return -2;
-	}
 
 	p = port->pipe + (subport_id * port->n_pipes_per_subport + pipe_id);
 
@@ -883,9 +876,8 @@  rte_sched_pipe_config(struct rte_sched_port *port,
 		memset(p, 0, sizeof(struct rte_sched_pipe));
 	}
 
-	if (deactivate) {
+	if (deactivate)
 		return 0;
-	}
 
 	/* Apply the new pipe configuration */
 	p->profile = profile;
@@ -897,15 +889,17 @@  rte_sched_pipe_config(struct rte_sched_port *port,
 
 	/* Traffic Classes (TCs) */
 	p->tc_time = port->time + params->tc_period;
-	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i ++) {
+	for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
 		p->tc_credits[i] = params->tc_credits_per_period[i];
-	}
+
 
 #ifdef RTE_SCHED_SUBPORT_TC_OV
 	{
 		/* Subport TC3 oversubscription */
-		double subport_tc3_rate = ((double) s->tc_credits_per_period[3]) / ((double) s->tc_period);
-		double pipe_tc3_rate = ((double) params->tc_credits_per_period[3]) / ((double) params->tc_period);
+		double subport_tc3_rate = (double)s->tc_credits_per_period[3])
+					/ (double)s->tc_period;
+		double pipe_tc3_rate = (double)params->tc_credits_per_period[3])
+					/ (double)params->tc_period;
 		uint32_t tc3_ov = s->tc_ov;
 
 		s->tc_ov_n += params->tc_ov_weight;
@@ -976,9 +970,8 @@  rte_sched_subport_read_stats(struct rte_sched_port *port,
 	if ((port == NULL) ||
 	    (subport_id >= port->n_subports_per_port) ||
 		(stats == NULL) ||
-		(tc_ov == NULL)) {
+		(tc_ov == NULL))
 		return -1;
-	}
 	s = port->subport + subport_id;
 
 	/* Copy subport stats and clear */
@@ -1004,9 +997,8 @@  rte_sched_queue_read_stats(struct rte_sched_port *port,
 	if ((port == NULL) ||
 	    (queue_id >= rte_sched_port_queues_per_port(port)) ||
 		(stats == NULL) ||
-		(qlen == NULL)) {
+		(qlen == NULL))
 		return -1;
-	}
 	q = port->queue + queue_id;
 	qe = port->queue_extra + queue_id;
 
@@ -1021,7 +1013,8 @@  rte_sched_queue_read_stats(struct rte_sched_port *port,
 }
 
 static inline uint32_t
-rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t queue)
+rte_sched_port_qindex(struct rte_sched_port *port, uint32_t subport,
+		      uint32_t pipe, uint32_t traffic_class, uint32_t queue)
 {
 	uint32_t result;
 
@@ -1038,7 +1031,8 @@  rte_sched_port_qbase(struct rte_sched_port *port, uint32_t qindex)
 	uint32_t pindex = qindex >> 4;
 	uint32_t qpos = qindex & 0xF;
 
-	return (port->queue_array + pindex * port->qsize_sum + port->qsize_add[qpos]);
+	return port->queue_array + pindex * port->qsize_sum
+		+ port->qsize_add[qpos];
 }
 
 static inline uint16_t
@@ -1074,9 +1068,11 @@  rte_sched_port_queue_is_full(struct rte_sched_port *port, uint32_t qindex)
 #ifdef RTE_SCHED_COLLECT_STATS
 
 static inline void
-rte_sched_port_update_subport_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
+rte_sched_port_update_subport_stats(struct rte_sched_port *port,
+				    uint32_t qindex, struct rte_mbuf *pkt)
 {
-	struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
+	struct rte_sched_subport *s = port->subport
+		+ (qindex / rte_sched_port_queues_per_subport(port));
 	uint32_t tc_index = (qindex >> 2) & 0x3;
 	uint32_t pkt_len = pkt->pkt_len;
 
@@ -1085,7 +1081,8 @@  rte_sched_port_update_subport_stats(struct rte_sched_port *port, uint32_t qindex
 }
 
 static inline void
-rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
+rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, uint32_t qindex,
+					    struct rte_mbuf *pkt)
 {
 	struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port));
 	uint32_t tc_index = (qindex >> 2) & 0x3;
@@ -1096,7 +1093,8 @@  rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, uint32_
 }
 
 static inline void
-rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
+rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex,
+				  struct rte_mbuf *pkt)
 {
 	struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
 	uint32_t pkt_len = pkt->pkt_len;
@@ -1106,7 +1104,8 @@  rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex,
 }
 
 static inline void
-rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf *pkt)
+rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port,
+					  uint32_t qindex, struct rte_mbuf *pkt)
 {
 	struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
 	uint32_t pkt_len = pkt->pkt_len;
@@ -1120,11 +1119,12 @@  rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port, uint32_t
 #ifdef RTE_SCHED_RED
 
 static inline int
-rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt, uint32_t qindex, uint16_t qlen)
+rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt,
+			uint32_t qindex, uint16_t qlen)
 {
 	struct rte_sched_queue_extra *qe;
 	struct rte_red_config *red_cfg;
-    struct rte_red *red;
+	struct rte_red *red;
 	uint32_t tc_index;
 	enum rte_meter_color color;
 
@@ -1142,10 +1142,11 @@  rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt, uint3
 }
 
 static inline void
-rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port, uint32_t qindex)
+rte_sched_port_set_queue_empty_timestamp(struct rte_sched_port *port,
+					 uint32_t qindex)
 {
 	struct rte_sched_queue_extra *qe;
-    struct rte_red *red;
+	struct rte_red *red;
 
 	qe = port->queue_extra + qindex;
 	red = &qe->red;
@@ -1170,17 +1171,17 @@  debug_pipe_is_empty(struct rte_sched_port *port, uint32_t pindex)
 
 	qindex = pindex << 4;
 
-	for (i = 0; i < 16; i ++){
+	for (i = 0; i < 16; i++) {
 		uint32_t queue_empty = rte_sched_port_queue_is_empty(port, qindex + i);
 		uint32_t bmp_bit_clear = (rte_bitmap_get(port->bmp, qindex + i) == 0);
 
-		if (queue_empty != bmp_bit_clear){
-			rte_panic("Queue status mismatch for queue %u of pipe %u\n", i, pindex);
-		}
+		if (queue_empty != bmp_bit_clear)
+			rte_panic("Queue status mismatch for queue %u of pipe %u\n",
+				  i, pindex);
 
-		if (!queue_empty){
+
+		if (!queue_empty)
 			return 0;
-		}
 	}
 
 	return 1;
@@ -1192,13 +1193,12 @@  debug_check_queue_slab(struct rte_sched_port *port, uint32_t bmp_pos, uint64_t b
 	uint64_t mask;
 	uint32_t i, panic;
 
-	if (bmp_slab == 0){
+	if (bmp_slab == 0)
 		rte_panic("Empty slab at position %u\n", bmp_pos);
-	}
 
 	panic = 0;
-	for (i = 0, mask = 1; i < 64; i ++, mask <<= 1) {
-		if (mask & bmp_slab){
+	for (i = 0, mask = 1; i < 64; i++, mask <<= 1) {
+		if (mask & bmp_slab) {
 			if (rte_sched_port_queue_is_empty(port, bmp_pos + i)) {
 				printf("Queue %u (slab offset %u) is empty\n", bmp_pos + i, i);
 				panic = 1;
@@ -1206,7 +1206,7 @@  debug_check_queue_slab(struct rte_sched_port *port, uint32_t bmp_pos, uint64_t b
 		}
 	}
 
-	if (panic){
+	if (panic) {
 		rte_panic("Empty queues in slab 0x%" PRIx64 "starting at position %u\n",
 			bmp_slab, bmp_pos);
 	}
@@ -1237,7 +1237,8 @@  rte_sched_port_enqueue_qptrs_prefetch0(struct rte_sched_port *port, struct rte_m
 }
 
 static inline void
-rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf **qbase)
+rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port,
+				     uint32_t qindex, struct rte_mbuf **qbase)
 {
 	struct rte_sched_queue *q;
 	struct rte_mbuf **q_qw;
@@ -1252,7 +1253,8 @@  rte_sched_port_enqueue_qwa_prefetch0(struct rte_sched_port *port, uint32_t qinde
 }
 
 static inline int
-rte_sched_port_enqueue_qwa(struct rte_sched_port *port, uint32_t qindex, struct rte_mbuf **qbase, struct rte_mbuf *pkt)
+rte_sched_port_enqueue_qwa(struct rte_sched_port *port, uint32_t qindex,
+			   struct rte_mbuf **qbase, struct rte_mbuf *pkt)
 {
 	struct rte_sched_queue *q;
 	uint16_t qsize;
@@ -1274,7 +1276,7 @@  rte_sched_port_enqueue_qwa(struct rte_sched_port *port, uint32_t qindex, struct
 
 	/* Enqueue packet */
 	qbase[q->qw & (qsize - 1)] = pkt;
-	q->qw ++;
+	q->qw++;
 
 	/* Activate queue in the port bitmap */
 	rte_bitmap_set(port->bmp, qindex);
@@ -1297,7 +1299,7 @@  rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint
 
 	result = 0;
 
-	for (i = 0; i < n_pkts; i ++) {
+	for (i = 0; i < n_pkts; i++) {
 		struct rte_mbuf *pkt;
 		struct rte_mbuf **q_base;
 		uint32_t subport, pipe, traffic_class, queue, qindex;
@@ -1324,17 +1326,20 @@  rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint
  * diagram below:
  *
  *   p00  _______   p10  _______   p20  _______   p30  _______
- * ----->|       |----->|       |----->|       |----->|       |----->
+ *----->|       |----->|       |----->|       |----->|       |----->
  *       |   0   |      |   1   |      |   2   |      |   3   |
- * ----->|_______|----->|_______|----->|_______|----->|_______|----->
+ *----->|_______|----->|_______|----->|_______|----->|_______|----->
  *   p01            p11            p21            p31
  *
  ***/
 int
 rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint32_t n_pkts)
 {
-	struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21, *pkt30, *pkt31, *pkt_last;
-	struct rte_mbuf **q00_base, **q01_base, **q10_base, **q11_base, **q20_base, **q21_base, **q30_base, **q31_base, **q_last_base;
+	struct rte_mbuf *pkt00, *pkt01, *pkt10, *pkt11, *pkt20, *pkt21,
+		*pkt30, *pkt31, *pkt_last;
+	struct rte_mbuf **q00_base, **q01_base, **q10_base,
+		**q11_base, **q20_base, **q21_base, **q30_base,
+		**q31_base, **q_last_base;
 	uint32_t q00, q01, q10, q11, q20, q21, q30, q31, q_last;
 	uint32_t r00, r01, r10, r11, r20, r21, r30, r31, r_last;
 	uint32_t result, i;
@@ -1347,25 +1352,23 @@  rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint
 		uint32_t q[5];
 
 		/* Prefetch the mbuf structure of each packet */
-		for (i = 0; i < n_pkts; i ++) {
+		for (i = 0; i < n_pkts; i++)
 			rte_prefetch0(pkts[i]);
-		}
 
 		/* Prefetch the queue structure for each queue */
-		for (i = 0; i < n_pkts; i ++) {
+		for (i = 0; i < n_pkts; i++)
 			q[i] = rte_sched_port_enqueue_qptrs_prefetch0(port, pkts[i]);
-		}
 
 		/* Prefetch the write pointer location of each queue */
-		for (i = 0; i < n_pkts; i ++) {
+		for (i = 0; i < n_pkts; i++) {
 			q_base[i] = rte_sched_port_qbase(port, q[i]);
 			rte_sched_port_enqueue_qwa_prefetch0(port, q[i], q_base[i]);
 		}
 
 		/* Write each packet to its queue */
-		for (i = 0; i < n_pkts; i ++) {
-			result += rte_sched_port_enqueue_qwa(port, q[i], q_base[i], pkts[i]);
-		}
+		for (i = 0; i < n_pkts; i++)
+			result += rte_sched_port_enqueue_qwa(port, q[i],
+							     q_base[i], pkts[i]);
 
 		return result;
 	}
@@ -1537,9 +1540,8 @@  grinder_tc_ov_credits_update(struct rte_sched_port *port, uint32_t pos)
 	uint32_t tc_ov_consumption_max;
 	uint32_t tc_ov_wm = subport->tc_ov_wm;
 
-	if (subport->tc_ov == 0) {
+	if (subport->tc_ov == 0)
 		return subport->tc_ov_wm_max;
-	}
 
 	tc_ov_consumption[0] = subport->tc_credits_per_period[0] - subport->tc_credits[0];
 	tc_ov_consumption[1] = subport->tc_credits_per_period[1] - subport->tc_credits[1];
@@ -1551,16 +1553,16 @@  grinder_tc_ov_credits_update(struct rte_sched_port *port, uint32_t pos)
 
 	if (tc_ov_consumption[3] > (tc_ov_consumption_max - port->mtu)) {
 		tc_ov_wm  -= tc_ov_wm >> 7;
-		if (tc_ov_wm < subport->tc_ov_wm_min) {
+		if (tc_ov_wm < subport->tc_ov_wm_min)
 			tc_ov_wm = subport->tc_ov_wm_min;
-		}
+
 		return tc_ov_wm;
 	}
 
 	tc_ov_wm += (tc_ov_wm >> 7) + 1;
-	if (tc_ov_wm > subport->tc_ov_wm_max) {
+	if (tc_ov_wm > subport->tc_ov_wm_max)
 		tc_ov_wm = subport->tc_ov_wm_max;
-	}
+
 	return tc_ov_wm;
 }
 
@@ -1595,7 +1597,7 @@  grinder_credits_update(struct rte_sched_port *port, uint32_t pos)
 		subport->tc_credits[3] = subport->tc_credits_per_period[3];
 
 		subport->tc_time = port->time + subport->tc_period;
-		subport->tc_ov_period_id ++;
+		subport->tc_ov_period_id++;
 	}
 
 	/* Pipe TCs */
@@ -1642,9 +1644,8 @@  grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
 		(pkt_len <= pipe_tb_credits) &&
 		(pkt_len <= pipe_tc_credits);
 
-	if (!enough_credits) {
+	if (!enough_credits)
 		return 0;
-	}
 
 	/* Update port credits */
 	subport->tb_credits -= pkt_len;
@@ -1682,9 +1683,8 @@  grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
 		(pkt_len <= pipe_tc_credits) &&
 		(pkt_len <= pipe_tc_ov_credits);
 
-	if (!enough_credits) {
+	if (!enough_credits)
 		return 0;
-	}
 
 	/* Update pipe and subport credits */
 	subport->tb_credits -= pkt_len;
@@ -1709,17 +1709,16 @@  grinder_schedule(struct rte_sched_port *port, uint32_t pos)
 	uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
 
 #if RTE_SCHED_TS_CREDITS_CHECK
-	if (!grinder_credits_check(port, pos)) {
+	if (!grinder_credits_check(port, pos))
 		return 0;
-	}
 #endif
 
 	/* Advance port time */
 	port->time += pkt_len;
 
 	/* Send packet */
-	port->pkts_out[port->n_pkts_out ++] = pkt;
-	queue->qr ++;
+	port->pkts_out[port->n_pkts_out++] = pkt;
+	queue->qr++;
 	grinder->wrr_tokens[grinder->qpos] += pkt_len * grinder->wrr_cost[grinder->qpos];
 	if (queue->qr == queue->qw) {
 		uint32_t qindex = grinder->qindex[grinder->qpos];
@@ -1742,9 +1741,10 @@  grinder_schedule(struct rte_sched_port *port, uint32_t pos)
 static inline int
 grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
 {
-	__m128i index = _mm_set1_epi32 (base_pipe);
+	__m128i index = _mm_set1_epi32(base_pipe);
 	__m128i pipes = _mm_load_si128((__m128i *)port->grinder_base_bmp_pos);
 	__m128i res = _mm_cmpeq_epi32(pipes, index);
+
 	pipes = _mm_load_si128((__m128i *)(port->grinder_base_bmp_pos + 4));
 	pipes = _mm_cmpeq_epi32(pipes, index);
 	res = _mm_or_si128(res, pipes);
@@ -1762,10 +1762,9 @@  grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
 {
 	uint32_t i;
 
-	for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i ++) {
-		if (port->grinder_base_bmp_pos[i] == base_pipe) {
+	for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) {
+		if (port->grinder_base_bmp_pos[i] == base_pipe)
 			return 1;
-		}
 	}
 
 	return 0;
@@ -1774,7 +1773,8 @@  grinder_pipe_exists(struct rte_sched_port *port, uint32_t base_pipe)
 #endif /* RTE_SCHED_OPTIMIZATIONS */
 
 static inline void
-grinder_pcache_populate(struct rte_sched_port *port, uint32_t pos, uint32_t bmp_pos, uint64_t bmp_slab)
+grinder_pcache_populate(struct rte_sched_port *port, uint32_t pos,
+			uint32_t bmp_pos, uint64_t bmp_slab)
 {
 	struct rte_sched_grinder *grinder = port->grinder + pos;
 	uint16_t w[4];
@@ -1843,9 +1843,8 @@  grinder_next_tc(struct rte_sched_port *port, uint32_t pos)
 	uint32_t qindex;
 	uint16_t qsize;
 
-	if (grinder->tccache_r == grinder->tccache_w) {
+	if (grinder->tccache_r == grinder->tccache_w)
 		return 0;
-	}
 
 	qindex = grinder->tccache_qindex[grinder->tccache_r];
 	qbase = rte_sched_port_qbase(port, qindex);
@@ -1870,7 +1869,7 @@  grinder_next_tc(struct rte_sched_port *port, uint32_t pos)
 	grinder->qbase[2] = qbase + 2 * qsize;
 	grinder->qbase[3] = qbase + 3 * qsize;
 
-	grinder->tccache_r ++;
+	grinder->tccache_r++;
 	return 1;
 }
 
@@ -1884,15 +1883,14 @@  grinder_next_pipe(struct rte_sched_port *port, uint32_t pos)
 	if (grinder->pcache_r < grinder->pcache_w) {
 		pipe_qmask = grinder->pcache_qmask[grinder->pcache_r];
 		pipe_qindex = grinder->pcache_qindex[grinder->pcache_r];
-		grinder->pcache_r ++;
+		grinder->pcache_r++;
 	} else {
 		uint64_t bmp_slab = 0;
 		uint32_t bmp_pos = 0;
 
 		/* Get another non-empty pipe group */
-		if (unlikely(rte_bitmap_scan(port->bmp, &bmp_pos, &bmp_slab) <= 0)) {
+		if (unlikely(rte_bitmap_scan(port->bmp, &bmp_pos, &bmp_slab) <= 0))
 			return 0;
-		}
 
 #if RTE_SCHED_DEBUG
 		debug_check_queue_slab(port, bmp_pos, bmp_slab);
@@ -1900,9 +1898,9 @@  grinder_next_pipe(struct rte_sched_port *port, uint32_t pos)
 
 		/* Return if pipe group already in one of the other grinders */
 		port->grinder_base_bmp_pos[pos] = RTE_SCHED_BMP_POS_INVALID;
-		if (unlikely(grinder_pipe_exists(port, bmp_pos))) {
+		if (unlikely(grinder_pipe_exists(port, bmp_pos)))
 			return 0;
-		}
+
 		port->grinder_base_bmp_pos[pos] = bmp_pos;
 
 		/* Install new pipe group into grinder's pipe cache */
@@ -1934,9 +1932,9 @@  grinder_next_pipe(struct rte_sched_port *port, uint32_t pos)
 
 #if RTE_SCHED_WRR == 0
 
-#define grinder_wrr_load(a,b)
+#define grinder_wrr_load(a, b)
 
-#define grinder_wrr_store(a,b)
+#define grinder_wrr_store(a, b)
 
 static inline void
 grinder_wrr(struct rte_sched_port *port, uint32_t pos)
@@ -1944,9 +1942,8 @@  grinder_wrr(struct rte_sched_port *port, uint32_t pos)
 	struct rte_sched_grinder *grinder = port->grinder + pos;
 	uint64_t slab = grinder->qmask;
 
-	if (rte_bsf64(slab, &grinder->qpos) == 0) {
+	if (rte_bsf64(slab, &grinder->qpos) == 0)
 		rte_panic("grinder wrr\n");
-	}
 }
 
 #elif RTE_SCHED_WRR == 1
@@ -1989,10 +1986,14 @@  grinder_wrr_store(struct rte_sched_port *port, uint32_t pos)
 
 	qindex = tc_index * 4;
 
-	pipe->wrr_tokens[qindex] = (uint8_t) ((grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >> RTE_SCHED_WRR_SHIFT);
-	pipe->wrr_tokens[qindex + 1] = (uint8_t) ((grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >> RTE_SCHED_WRR_SHIFT);
-	pipe->wrr_tokens[qindex + 2] = (uint8_t) ((grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >> RTE_SCHED_WRR_SHIFT);
-	pipe->wrr_tokens[qindex + 3] = (uint8_t) ((grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >> RTE_SCHED_WRR_SHIFT);
+	pipe->wrr_tokens[qindex] =
+		(grinder->wrr_tokens[0] & grinder->wrr_mask[0]) >> RTE_SCHED_WRR_SHIFT;
+	pipe->wrr_tokens[qindex + 1] =
+		(grinder->wrr_tokens[1] & grinder->wrr_mask[1]) >> RTE_SCHED_WRR_SHIFT;
+	pipe->wrr_tokens[qindex + 2] =
+		(grinder->wrr_tokens[2] & grinder->wrr_mask[2]) >> RTE_SCHED_WRR_SHIFT;
+	pipe->wrr_tokens[qindex + 3] =
+		(grinder->wrr_tokens[3] & grinder->wrr_mask[3]) >> RTE_SCHED_WRR_SHIFT;
 }
 
 static inline void
@@ -2083,7 +2084,7 @@  grinder_handle(struct rte_sched_port *port, uint32_t pos)
 	{
 		if (grinder_next_pipe(port, pos)) {
 			grinder_prefetch_pipe(port, pos);
-			port->busy_grinders ++;
+			port->busy_grinders++;
 
 			grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
 			return 0;
@@ -2134,9 +2135,10 @@  grinder_handle(struct rte_sched_port *port, uint32_t pos)
 			grinder->state = e_GRINDER_PREFETCH_MBUF;
 			return result;
 		}
-		if ((grinder->productive == 0) && (port->pipe_loop == RTE_SCHED_PIPE_INVALID)) {
+		if ((grinder->productive == 0) &&
+		    (port->pipe_loop == RTE_SCHED_PIPE_INVALID))
 			port->pipe_loop = grinder->pindex;
-		}
+
 		grinder_evict(port, pos);
 
 		/* Look for another active pipe */
@@ -2148,7 +2150,7 @@  grinder_handle(struct rte_sched_port *port, uint32_t pos)
 		}
 
 		/* No active pipe found */
-		port->busy_grinders --;
+		port->busy_grinders--;
 
 		grinder->state = e_GRINDER_PREFETCH_PIPE;
 		return result;
@@ -2170,9 +2172,8 @@  rte_sched_port_time_resync(struct rte_sched_port *port)
 	/* Advance port time */
 	port->time_cpu_cycles = cycles;
 	port->time_cpu_bytes += (uint64_t) bytes_diff;
-	if (port->time < port->time_cpu_bytes) {
+	if (port->time < port->time_cpu_bytes)
 		port->time = port->time_cpu_bytes;
-	}
 
 	/* Reset pipe loop detection */
 	port->pipe_loop = RTE_SCHED_PIPE_INVALID;
@@ -2204,7 +2205,7 @@  rte_sched_port_dequeue(struct rte_sched_port *port, struct rte_mbuf **pkts, uint
 	rte_sched_port_time_resync(port);
 
 	/* Take each queue in the grinder one step further */
-	for (i = 0, count = 0; ; i ++)  {
+	for (i = 0, count = 0; ; i++)  {
 		count += grinder_handle(port, i & (RTE_SCHED_PORT_N_GRINDERS - 1));
 		if ((count == n_pkts) ||
 		    rte_sched_port_exceptions(port, i >= RTE_SCHED_PORT_N_GRINDERS)) {