From patchwork Mon May 30 13:38:31 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Danilewicz, MarcinX" X-Patchwork-Id: 112062 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E4FB9A0543; Mon, 30 May 2022 15:38:44 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9690A40DFD; Mon, 30 May 2022 15:38:44 +0200 (CEST) Received: from mga06.intel.com (mga06b.intel.com [134.134.136.31]) by mails.dpdk.org (Postfix) with ESMTP id AF12A400D6 for ; Mon, 30 May 2022 15:38:42 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1653917922; x=1685453922; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=tntgsoMy64SPzLDTjvmQs9JAY+Q9DeEj2ZRs8ON3uh0=; b=KvHIiAw+sCjG1fJrbDJpIMJe1IBKNYoWP8otrpUr2XAIlsScBQTguPQn 1lFzE44tft6qMAdXqyR++q28Lf3XrYCwOxbHtlA0rda7hCIkFC8DaVBvv 3UPBqu4B9FuyPc9cjK+UC+wupVRJ9Lx8gnDa6OvoweFhBnQ5elY8Prvc5 uMtpaJn/vG2xF5RT6DVfC7+S2bBQLtPrEkAYmQlSBHN98Egt+nmveXwZm X0Zj/R2qZWFwYozdbYMd4DQkNzIXCgTtldGOicT7ZgoMpWxJR2Z1Ws319 /2wBAwvttEbYkKy/CSD6hv6YTAd3E5EHUJQNl+W2Vlx2o2FlY9OdhAORy w==; X-IronPort-AV: E=McAfee;i="6400,9594,10362"; a="335651924" X-IronPort-AV: E=Sophos;i="5.91,263,1647327600"; d="scan'208";a="335651924" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 30 May 2022 06:38:41 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.91,263,1647327600"; d="scan'208";a="551350859" Received: from silpixa00400629.ir.intel.com ([10.237.213.88]) by orsmga006.jf.intel.com with ESMTP; 30 May 2022 06:38:39 -0700 From: Marcin Danilewicz To: dev@dpdk.org, jasvinder.singh@intel.com, cristian.dumitrescu@intel.com Cc: megha.ajmera@intel.com Subject: [PATCH v8] sched: enable traffic class oversubscription conditionally Date: Mon, 30 May 2022 13:38:31 +0000 Message-Id: <20220530133831.830307-1-marcinx.danilewicz@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220530115500.829250-1-marcinx.danilewicz@intel.com> References: <20220530115500.829250-1-marcinx.danilewicz@intel.com> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Added new flag to enable or disable TC oversubscription for best effort traffic class at subport level. By default TC OV is disabled. Signed-off-by: Marcin Danilewicz --- History: - v1 - TC OV disabled by default - v2 - throughput improvements - v3, v4, v5 - changes from comments - v6 - removed rte_sched_subport_tc_ov_config declaration and map - v7 - changes from comments on v6 --- lib/sched/rte_sched.c | 93 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 90 insertions(+), 3 deletions(-) diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c index ec74bee939..19aab877f0 100644 --- a/lib/sched/rte_sched.c +++ b/lib/sched/rte_sched.c @@ -213,6 +213,9 @@ struct rte_sched_subport { uint8_t *bmp_array; struct rte_mbuf **queue_array; uint8_t memory[0] __rte_cache_aligned; + + /* TC oversubscription activation */ + int tc_ov_enabled; } __rte_cache_aligned; struct rte_sched_port { @@ -1254,6 +1257,9 @@ rte_sched_subport_config(struct rte_sched_port *port, s->n_pipe_profiles = params->n_pipe_profiles; s->n_max_pipe_profiles = params->n_max_pipe_profiles; + /* TC over-subscription is enabled by default */ + s->tc_ov_enabled = 1; + #ifdef RTE_SCHED_CMAN if (params->cman_params != NULL) { s->cman_enabled = true; @@ -2318,6 +2324,45 @@ grinder_credits_update(struct rte_sched_port *port, pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size); pipe->tb_time += n_periods * params->tb_period; + /* Subport TCs */ + if (unlikely(port->time >= subport->tc_time)) { + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) + subport->tc_credits[i] = sp->tc_credits_per_period[i]; + + subport->tc_time = port->time + sp->tc_period; + } + + /* Pipe TCs */ + if (unlikely(port->time >= pipe->tc_time)) { + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) + pipe->tc_credits[i] = params->tc_credits_per_period[i]; + pipe->tc_time = port->time + params->tc_period; + } +} + +static inline void +grinder_credits_update_with_tc_ov(struct rte_sched_port *port, + struct rte_sched_subport *subport, uint32_t pos) +{ + struct rte_sched_grinder *grinder = subport->grinder + pos; + struct rte_sched_pipe *pipe = grinder->pipe; + struct rte_sched_pipe_profile *params = grinder->pipe_params; + struct rte_sched_subport_profile *sp = grinder->subport_params; + uint64_t n_periods; + uint32_t i; + + /* Subport TB */ + n_periods = (port->time - subport->tb_time) / sp->tb_period; + subport->tb_credits += n_periods * sp->tb_credits_per_period; + subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size); + subport->tb_time += n_periods * sp->tb_period; + + /* Pipe TB */ + n_periods = (port->time - pipe->tb_time) / params->tb_period; + pipe->tb_credits += n_periods * params->tb_credits_per_period; + pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size); + pipe->tb_time += n_periods * params->tb_period; + /* Subport TCs */ if (unlikely(port->time >= subport->tc_time)) { subport->tc_ov_wm = @@ -2348,6 +2393,39 @@ grinder_credits_update(struct rte_sched_port *port, static inline int grinder_credits_check(struct rte_sched_port *port, struct rte_sched_subport *subport, uint32_t pos) +{ + struct rte_sched_grinder *grinder = subport->grinder + pos; + struct rte_sched_pipe *pipe = grinder->pipe; + struct rte_mbuf *pkt = grinder->pkt; + uint32_t tc_index = grinder->tc_index; + uint64_t pkt_len = pkt->pkt_len + port->frame_overhead; + uint64_t subport_tb_credits = subport->tb_credits; + uint64_t subport_tc_credits = subport->tc_credits[tc_index]; + uint64_t pipe_tb_credits = pipe->tb_credits; + uint64_t pipe_tc_credits = pipe->tc_credits[tc_index]; + int enough_credits; + + /* Check pipe and subport credits */ + enough_credits = (pkt_len <= subport_tb_credits) && + (pkt_len <= subport_tc_credits) && + (pkt_len <= pipe_tb_credits) && + (pkt_len <= pipe_tc_credits); + + if (!enough_credits) + return 0; + + /* Update pipe and subport credits */ + subport->tb_credits -= pkt_len; + subport->tc_credits[tc_index] -= pkt_len; + pipe->tb_credits -= pkt_len; + pipe->tc_credits[tc_index] -= pkt_len; + + return 1; +} + +static inline int +grinder_credits_check_with_tc_ov(struct rte_sched_port *port, + struct rte_sched_subport *subport, uint32_t pos) { struct rte_sched_grinder *grinder = subport->grinder + pos; struct rte_sched_pipe *pipe = grinder->pipe; @@ -2403,8 +2481,13 @@ grinder_schedule(struct rte_sched_port *port, uint32_t pkt_len = pkt->pkt_len + port->frame_overhead; uint32_t be_tc_active; - if (!grinder_credits_check(port, subport, pos)) - return 0; + if (subport->tc_ov_enabled) { + if (!grinder_credits_check_with_tc_ov(port, subport, pos)) + return 0; + } else { + if (!grinder_credits_check(port, subport, pos)) + return 0; + } /* Advance port time */ port->time += pkt_len; @@ -2770,7 +2853,11 @@ grinder_handle(struct rte_sched_port *port, subport->profile; grinder_prefetch_tc_queue_arrays(subport, pos); - grinder_credits_update(port, subport, pos); + + if (subport->tc_ov_enabled) + grinder_credits_update_with_tc_ov(port, subport, pos); + else + grinder_credits_update(port, subport, pos); grinder->state = e_GRINDER_PREFETCH_MBUF; return 0;