From patchwork Thu Dec 22 01:48:38 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Ling, WeiX" X-Patchwork-Id: 121245 Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id BB0D0A034C; Thu, 22 Dec 2022 02:57:18 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9A7FF40698; Thu, 22 Dec 2022 02:57:18 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by mails.dpdk.org (Postfix) with ESMTP id F25A1400D7 for ; Thu, 22 Dec 2022 02:57:16 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1671674237; x=1703210237; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=CR83/+RzXNSveu1vybUZGdtCPpNJojUUGOUtF5Z4o94=; b=ZXyIg+b3fwBJAWxPsHqx4Iopv1ObcmyVWh1okVLZunVLXmdiDBbkSWQi HyVcLMGxCSUpu55Or70NcJf/7J/gusgmyPsUv7tKlI/Hd/erAgOK8ALN1 +VumrG4LxORVCvDmnRZLUQIY2aR+Ysk8IjsSl0mS+ha2VrBFlYb48dhaC JReC8rw5gJKGZLkxBtj2GqqSLb0UbJcJkemxHVIh/4ZS+1NqZmuk06wec XD3qhXCWh+083I2mXGbaXDXdqCm1D4qLVsIR6mTBnADR7tpM/PSSzWwRv hQWsSQkJ4kNHcCs/4G7q4CA3gEyzAzgDRWEY9zB6/SmwcUqUnfVCFN2uF A==; X-IronPort-AV: E=McAfee;i="6500,9779,10568"; a="384368487" X-IronPort-AV: E=Sophos;i="5.96,264,1665471600"; d="scan'208";a="384368487" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Dec 2022 17:57:16 -0800 X-IronPort-AV: E=McAfee;i="6500,9779,10568"; a="629338947" X-IronPort-AV: E=Sophos;i="5.96,264,1665471600"; d="scan'208";a="629338947" Received: from unknown (HELO localhost.localdomain) ([10.239.252.222]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 21 Dec 2022 17:57:14 -0800 From: Wei Ling To: dts@dpdk.org Cc: Wei Ling Subject: [dts][PATCH V7] framework/qemu_kvm: pin VM's threads to vhost CPU lcore Date: Thu, 22 Dec 2022 09:48:38 +0800 Message-Id: <20221222014838.173362-1-weix.ling@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dts-bounces@dpdk.org 1)Pin VM's threads to vhost CPU lcore after start VM. 2)Fix add_vm_daemon method issue. 3)Modify the pin_threads() to pin the VM's threads to vhost CPU lcores. Signed-off-by: Wei Ling Reviewed-by: Lijuan Tu --- framework/qemu_kvm.py | 41 ++++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/framework/qemu_kvm.py b/framework/qemu_kvm.py index 20aa8008..dd8e7857 100644 --- a/framework/qemu_kvm.py +++ b/framework/qemu_kvm.py @@ -1241,7 +1241,7 @@ class QEMUKvm(VirtBase): By default VM will start with the daemonize status. Not support starting it on the stdin now. """ - if "daemon" in list(options.keys()) and options["enable"] == "no": + if "enable" in list(options.keys()) and options["enable"] == "no": pass else: daemon_boot_line = "-daemonize" @@ -1377,6 +1377,10 @@ class QEMUKvm(VirtBase): self.__get_pci_mapping() + # pin VM threads with host CPU cores + lcores = self.vcpus_pinned_to_vm.split(" ") + self.pin_threads(lcores=lcores) + # query status self.update_status() @@ -2004,13 +2008,32 @@ class QEMUKvm(VirtBase): def pin_threads(self, lcores): """ Pin thread to assigned cores - """ - thread_reg = r"CPU #(\d+): .* thread_id=(\d+)" + If threads <= lcores, like: threads=[427756, 427757], lcores=[48, 49, 50]: + taskset -pc 48 427756 + taskset -pc 49 427757 + + If threads > lcores, like threads=[427756, 427757, 427758, 427759, 427760], lcores=[48,49,50] + taskset -pc 48 427756 + taskset -pc 49 427757 + taskset -pc 50 427758 + taskset -pc 48 427759 + taskset -pc 49 427760 + """ + thread_reg = r"CPU #\d+: thread_id=(\d+)" output = self.__monitor_session("info", "cpus") - thread_cores = re.findall(thread_reg, output) - cores_map = list(zip(thread_cores, lcores)) - for thread_info, core_id in cores_map: - cpu_id, thread_id = thread_info - self.host_session.send_expect( - "taskset -pc %d %s" % (core_id, thread_id), "#" + threads = re.findall(thread_reg, output) + if len(threads) <= len(lcores): + map = list(zip(threads, lcores)) + else: + self.host_logger.warning( + "lcores is less than VM's threads, 1 lcore will pin multiple VM's threads" ) + lcore_len = len(lcores) + for item in threads: + thread_idx = threads.index(item) + if thread_idx >= lcore_len: + lcore_idx = thread_idx % lcore_len + lcores.append(lcores[lcore_idx]) + map = list(zip(threads, lcores)) + for thread, lcore in map: + self.host_session.send_expect("taskset -pc %s %s" % (lcore, thread), "#")