From patchwork Tue Jan 17 15:48:57 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 122188 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 824B5423FE; Tue, 17 Jan 2023 16:49:15 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1B22C400EF; Tue, 17 Jan 2023 16:49:15 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 7287B400D4 for ; Tue, 17 Jan 2023 16:49:13 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 2F17F1D8129; Tue, 17 Jan 2023 16:49:10 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id IpGoQp-rxarC; Tue, 17 Jan 2023 16:49:07 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 2F7871D811C; Tue, 17 Jan 2023 16:49:07 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [PATCH v3 01/10] dts: add node and os abstractions Date: Tue, 17 Jan 2023 15:48:57 +0000 Message-Id: <20230117154906.860916-2-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230117154906.860916-1-juraj.linkes@pantheon.tech> References: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> <20230117154906.860916-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The abstraction model in DTS is as follows: Node, defining and implementing methods common to and the base of SUT (system under test) Node and TG (traffic generator) Node. Remote Session, defining and implementing methods common to any remote session implementation, such as SSH Session. OSSession, defining and implementing methods common to any operating system/distribution, such as Linux. OSSession uses a derived Remote Session and Node in turn uses a derived OSSession. This split delegates OS-specific and connection-specific code to specialized classes designed to handle the differences. The base classes implement the methods or parts of methods that are common to all implementations and defines abstract methods that must be implemented by derived classes. Part of the abstractions is the DTS test execution skeleton: execution setup, build setup and then test execution. Signed-off-by: Juraj Linkeš --- dts/conf.yaml | 11 +- dts/framework/config/__init__.py | 73 +++++++- dts/framework/config/conf_yaml_schema.json | 76 +++++++- dts/framework/dts.py | 162 ++++++++++++++---- dts/framework/exception.py | 46 ++++- dts/framework/logger.py | 24 +-- dts/framework/remote_session/__init__.py | 30 +++- dts/framework/remote_session/linux_session.py | 11 ++ dts/framework/remote_session/os_session.py | 46 +++++ dts/framework/remote_session/posix_session.py | 12 ++ .../remote_session/remote/__init__.py | 16 ++ .../{ => remote}/remote_session.py | 41 +++-- .../{ => remote}/ssh_session.py | 20 +-- dts/framework/testbed_model/__init__.py | 10 +- dts/framework/testbed_model/node.py | 104 ++++++++--- dts/framework/testbed_model/sut_node.py | 13 ++ 16 files changed, 583 insertions(+), 112 deletions(-) create mode 100644 dts/framework/remote_session/linux_session.py create mode 100644 dts/framework/remote_session/os_session.py create mode 100644 dts/framework/remote_session/posix_session.py create mode 100644 dts/framework/remote_session/remote/__init__.py rename dts/framework/remote_session/{ => remote}/remote_session.py (61%) rename dts/framework/remote_session/{ => remote}/ssh_session.py (91%) create mode 100644 dts/framework/testbed_model/sut_node.py diff --git a/dts/conf.yaml b/dts/conf.yaml index 1aaa593612..03696d2bab 100644 --- a/dts/conf.yaml +++ b/dts/conf.yaml @@ -1,9 +1,16 @@ # SPDX-License-Identifier: BSD-3-Clause -# Copyright 2022 The DPDK contributors +# Copyright 2022-2023 The DPDK contributors executions: - - system_under_test: "SUT 1" + - build_targets: + - arch: x86_64 + os: linux + cpu: native + compiler: gcc + compiler_wrapper: ccache + system_under_test: "SUT 1" nodes: - name: "SUT 1" hostname: sut1.change.me.localhost user: root + os: linux diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py index 214be8e7f4..e3e2d74eac 100644 --- a/dts/framework/config/__init__.py +++ b/dts/framework/config/__init__.py @@ -1,15 +1,17 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2010-2021 Intel Corporation -# Copyright(c) 2022 University of New Hampshire +# Copyright(c) 2022-2023 University of New Hampshire +# Copyright(c) 2023 PANTHEON.tech s.r.o. """ -Generic port and topology nodes configuration file load function +Yaml config parsing methods """ import json import os.path import pathlib from dataclasses import dataclass +from enum import Enum, auto, unique from typing import Any import warlock # type: ignore @@ -18,6 +20,47 @@ from framework.settings import SETTINGS +class StrEnum(Enum): + @staticmethod + def _generate_next_value_( + name: str, start: int, count: int, last_values: object + ) -> str: + return name + + +@unique +class Architecture(StrEnum): + i686 = auto() + x86_64 = auto() + x86_32 = auto() + arm64 = auto() + ppc64le = auto() + + +@unique +class OS(StrEnum): + linux = auto() + freebsd = auto() + windows = auto() + + +@unique +class CPUType(StrEnum): + native = auto() + armv8a = auto() + dpaa2 = auto() + thunderx = auto() + xgene1 = auto() + + +@unique +class Compiler(StrEnum): + gcc = auto() + clang = auto() + icc = auto() + msvc = auto() + + # Slots enables some optimizations, by pre-allocating space for the defined # attributes in the underlying data structure. # @@ -29,6 +72,7 @@ class NodeConfiguration: hostname: str user: str password: str | None + os: OS @staticmethod def from_dict(d: dict) -> "NodeConfiguration": @@ -37,19 +81,44 @@ def from_dict(d: dict) -> "NodeConfiguration": hostname=d["hostname"], user=d["user"], password=d.get("password"), + os=OS(d["os"]), + ) + + +@dataclass(slots=True, frozen=True) +class BuildTargetConfiguration: + arch: Architecture + os: OS + cpu: CPUType + compiler: Compiler + name: str + + @staticmethod + def from_dict(d: dict) -> "BuildTargetConfiguration": + return BuildTargetConfiguration( + arch=Architecture(d["arch"]), + os=OS(d["os"]), + cpu=CPUType(d["cpu"]), + compiler=Compiler(d["compiler"]), + name=f"{d['arch']}-{d['os']}-{d['cpu']}-{d['compiler']}", ) @dataclass(slots=True, frozen=True) class ExecutionConfiguration: + build_targets: list[BuildTargetConfiguration] system_under_test: NodeConfiguration @staticmethod def from_dict(d: dict, node_map: dict) -> "ExecutionConfiguration": + build_targets: list[BuildTargetConfiguration] = list( + map(BuildTargetConfiguration.from_dict, d["build_targets"]) + ) sut_name = d["system_under_test"] assert sut_name in node_map, f"Unknown SUT {sut_name} in execution {d}" return ExecutionConfiguration( + build_targets=build_targets, system_under_test=node_map[sut_name], ) diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index 6b8d6ccd05..9170307fbe 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -5,6 +5,68 @@ "node_name": { "type": "string", "description": "A unique identifier for a node" + }, + "OS": { + "type": "string", + "enum": [ + "linux" + ] + }, + "cpu": { + "type": "string", + "description": "Native should be the default on x86", + "enum": [ + "native", + "armv8a", + "dpaa2", + "thunderx", + "xgene1" + ] + }, + "compiler": { + "type": "string", + "enum": [ + "gcc", + "clang", + "icc", + "mscv" + ] + }, + "build_target": { + "type": "object", + "description": "Targets supported by DTS", + "properties": { + "arch": { + "type": "string", + "enum": [ + "ALL", + "x86_64", + "arm64", + "ppc64le", + "other" + ] + }, + "os": { + "$ref": "#/definitions/OS" + }, + "cpu": { + "$ref": "#/definitions/cpu" + }, + "compiler": { + "$ref": "#/definitions/compiler" + }, + "compiler_wrapper": { + "type": "string", + "description": "This will be added before compiler to the CC variable when building DPDK. Optional." + } + }, + "additionalProperties": false, + "required": [ + "arch", + "os", + "cpu", + "compiler" + ] } }, "type": "object", @@ -29,13 +91,17 @@ "password": { "type": "string", "description": "The password to use on this node. Use only as a last resort. SSH keys are STRONGLY preferred." + }, + "os": { + "$ref": "#/definitions/OS" } }, "additionalProperties": false, "required": [ "name", "hostname", - "user" + "user", + "os" ] }, "minimum": 1 @@ -45,12 +111,20 @@ "items": { "type": "object", "properties": { + "build_targets": { + "type": "array", + "items": { + "$ref": "#/definitions/build_target" + }, + "minimum": 1 + }, "system_under_test": { "$ref": "#/definitions/node_name" } }, "additionalProperties": false, "required": [ + "build_targets", "system_under_test" ] }, diff --git a/dts/framework/dts.py b/dts/framework/dts.py index d23cfc4526..6ea7c6e736 100644 --- a/dts/framework/dts.py +++ b/dts/framework/dts.py @@ -1,67 +1,157 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2010-2019 Intel Corporation -# Copyright(c) 2022 PANTHEON.tech s.r.o. -# Copyright(c) 2022 University of New Hampshire +# Copyright(c) 2022-2023 PANTHEON.tech s.r.o. +# Copyright(c) 2022-2023 University of New Hampshire import sys -import traceback -from collections.abc import Iterable -from framework.testbed_model.node import Node - -from .config import CONFIGURATION +from .config import CONFIGURATION, BuildTargetConfiguration, ExecutionConfiguration +from .exception import DTSError, ErrorSeverity from .logger import DTSLOG, getLogger +from .testbed_model import SutNode from .utils import check_dts_python_version -dts_logger: DTSLOG | None = None +dts_logger: DTSLOG = getLogger("dts_runner") +errors = [] def run_all() -> None: """ - Main process of DTS, it will run all test suites in the config file. + The main process of DTS. Runs all build targets in all executions from the main + config file. """ - global dts_logger + global errors # check the python version of the server that run dts check_dts_python_version() - dts_logger = getLogger("dts") - - nodes = {} - # This try/finally block means "Run the try block, if there is an exception, - # run the finally block before passing it upward. If there is not an exception, - # run the finally block after the try block is finished." This helps avoid the - # problem of python's interpreter exit context, which essentially prevents you - # from making certain system calls. This makes cleaning up resources difficult, - # since most of the resources in DTS are network-based, which is restricted. + nodes: dict[str, SutNode] = {} try: # for all Execution sections for execution in CONFIGURATION.executions: - sut_config = execution.system_under_test - if sut_config.name not in nodes: - node = Node(sut_config) - nodes[sut_config.name] = node - node.send_command("echo Hello World") + sut_node = None + if execution.system_under_test.name in nodes: + # a Node with the same name already exists + sut_node = nodes[execution.system_under_test.name] + else: + # the SUT has not been initialized yet + try: + sut_node = SutNode(execution.system_under_test) + except Exception as e: + dts_logger.exception( + f"Connection to node {execution.system_under_test} failed." + ) + errors.append(e) + else: + nodes[sut_node.name] = sut_node + + if sut_node: + _run_execution(sut_node, execution) + + except Exception as e: + dts_logger.exception("An unexpected error has occurred.") + errors.append(e) + raise + + finally: + try: + for node in nodes.values(): + node.close() + except Exception as e: + dts_logger.exception("Final cleanup of nodes failed.") + errors.append(e) + # we need to put the sys.exit call outside the finally clause to make sure + # that unexpected exceptions will propagate + # in that case, the error that should be reported is the uncaught exception as + # that is a severe error originating from the framework + # at that point, we'll only have partial results which could be impacted by the + # error causing the uncaught exception, making them uninterpretable + _exit_dts() + + +def _run_execution(sut_node: SutNode, execution: ExecutionConfiguration) -> None: + """ + Run the given execution. This involves running the execution setup as well as + running all build targets in the given execution. + """ + dts_logger.info(f"Running execution with SUT '{execution.system_under_test.name}'.") + + try: + sut_node.set_up_execution(execution) except Exception as e: - # sys.exit() doesn't produce a stack trace, need to print it explicitly - traceback.print_exc() - raise e + dts_logger.exception("Execution setup failed.") + errors.append(e) + + else: + for build_target in execution.build_targets: + _run_build_target(sut_node, build_target, execution) finally: - quit_execution(nodes.values()) + try: + sut_node.tear_down_execution() + except Exception as e: + dts_logger.exception("Execution teardown failed.") + errors.append(e) -def quit_execution(sut_nodes: Iterable[Node]) -> None: +def _run_build_target( + sut_node: SutNode, + build_target: BuildTargetConfiguration, + execution: ExecutionConfiguration, +) -> None: """ - Close session to SUT and TG before quit. - Return exit status when failure occurred. + Run the given build target. """ - for sut_node in sut_nodes: - # close all session - sut_node.node_exit() + dts_logger.info(f"Running build target '{build_target.name}'.") + + try: + sut_node.set_up_build_target(build_target) + except Exception as e: + dts_logger.exception("Build target setup failed.") + errors.append(e) + + else: + _run_suites(sut_node, execution) + + finally: + try: + sut_node.tear_down_build_target() + except Exception as e: + dts_logger.exception("Build target teardown failed.") + errors.append(e) + + +def _run_suites( + sut_node: SutNode, + execution: ExecutionConfiguration, +) -> None: + """ + Use the given build_target to run execution's test suites + with possibly only a subset of test cases. + If no subset is specified, run all test cases. + """ + + +def _exit_dts() -> None: + """ + Process all errors and exit with the proper exit code. + """ + if errors and dts_logger: + dts_logger.debug("Summary of errors:") + for error in errors: + dts_logger.debug(repr(error)) + + return_code = ErrorSeverity.NO_ERR + for error in errors: + error_return_code = ErrorSeverity.GENERIC_ERR + if isinstance(error, DTSError): + error_return_code = error.severity + + if error_return_code > return_code: + return_code = error_return_code - if dts_logger is not None: + if dts_logger: dts_logger.info("DTS execution has ended.") - sys.exit(0) + sys.exit(return_code) diff --git a/dts/framework/exception.py b/dts/framework/exception.py index 8b2f08a8f0..121a0f7296 100644 --- a/dts/framework/exception.py +++ b/dts/framework/exception.py @@ -1,20 +1,46 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2010-2014 Intel Corporation -# Copyright(c) 2022 PANTHEON.tech s.r.o. -# Copyright(c) 2022 University of New Hampshire +# Copyright(c) 2022-2023 PANTHEON.tech s.r.o. +# Copyright(c) 2022-2023 University of New Hampshire """ User-defined exceptions used across the framework. """ +from enum import IntEnum, unique +from typing import ClassVar -class SSHTimeoutError(Exception): + +@unique +class ErrorSeverity(IntEnum): + """ + The severity of errors that occur during DTS execution. + All exceptions are caught and the most severe error is used as return code. + """ + + NO_ERR = 0 + GENERIC_ERR = 1 + CONFIG_ERR = 2 + SSH_ERR = 3 + + +class DTSError(Exception): + """ + The base exception from which all DTS exceptions are derived. + Stores error severity. + """ + + severity: ClassVar[ErrorSeverity] = ErrorSeverity.GENERIC_ERR + + +class SSHTimeoutError(DTSError): """ Command execution timeout. """ command: str output: str + severity: ClassVar[ErrorSeverity] = ErrorSeverity.SSH_ERR def __init__(self, command: str, output: str): self.command = command @@ -27,12 +53,13 @@ def get_output(self) -> str: return self.output -class SSHConnectionError(Exception): +class SSHConnectionError(DTSError): """ SSH connection error. """ host: str + severity: ClassVar[ErrorSeverity] = ErrorSeverity.SSH_ERR def __init__(self, host: str): self.host = host @@ -41,16 +68,25 @@ def __str__(self) -> str: return f"Error trying to connect with {self.host}" -class SSHSessionDeadError(Exception): +class SSHSessionDeadError(DTSError): """ SSH session is not alive. It can no longer be used. """ host: str + severity: ClassVar[ErrorSeverity] = ErrorSeverity.SSH_ERR def __init__(self, host: str): self.host = host def __str__(self) -> str: return f"SSH session with {self.host} has died" + + +class ConfigurationError(DTSError): + """ + Raised when an invalid configuration is encountered. + """ + + severity: ClassVar[ErrorSeverity] = ErrorSeverity.CONFIG_ERR diff --git a/dts/framework/logger.py b/dts/framework/logger.py index a31fcc8242..bb2991e994 100644 --- a/dts/framework/logger.py +++ b/dts/framework/logger.py @@ -1,7 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2010-2014 Intel Corporation -# Copyright(c) 2022 PANTHEON.tech s.r.o. -# Copyright(c) 2022 University of New Hampshire +# Copyright(c) 2022-2023 PANTHEON.tech s.r.o. +# Copyright(c) 2022-2023 University of New Hampshire """ DTS logger module with several log level. DTS framework and TestSuite logs @@ -33,17 +33,17 @@ class DTSLOG(logging.LoggerAdapter): DTS log class for framework and testsuite. """ - logger: logging.Logger + _logger: logging.Logger node: str sh: logging.StreamHandler fh: logging.FileHandler verbose_fh: logging.FileHandler def __init__(self, logger: logging.Logger, node: str = "suite"): - self.logger = logger + self._logger = logger # 1 means log everything, this will be used by file handlers if their level # is not set - self.logger.setLevel(1) + self._logger.setLevel(1) self.node = node @@ -55,9 +55,13 @@ def __init__(self, logger: logging.Logger, node: str = "suite"): if SETTINGS.verbose is True: sh.setLevel(logging.DEBUG) - self.logger.addHandler(sh) + self._logger.addHandler(sh) self.sh = sh + # prepare the output folder + if not os.path.exists(SETTINGS.output_dir): + os.mkdir(SETTINGS.output_dir) + logging_path_prefix = os.path.join(SETTINGS.output_dir, node) fh = logging.FileHandler(f"{logging_path_prefix}.log") @@ -68,7 +72,7 @@ def __init__(self, logger: logging.Logger, node: str = "suite"): ) ) - self.logger.addHandler(fh) + self._logger.addHandler(fh) self.fh = fh # This outputs EVERYTHING, intended for post-mortem debugging @@ -82,10 +86,10 @@ def __init__(self, logger: logging.Logger, node: str = "suite"): ) ) - self.logger.addHandler(verbose_fh) + self._logger.addHandler(verbose_fh) self.verbose_fh = verbose_fh - super(DTSLOG, self).__init__(self.logger, dict(node=self.node)) + super(DTSLOG, self).__init__(self._logger, dict(node=self.node)) def logger_exit(self) -> None: """ @@ -93,7 +97,7 @@ def logger_exit(self) -> None: """ for handler in (self.sh, self.fh, self.verbose_fh): handler.flush() - self.logger.removeHandler(handler) + self._logger.removeHandler(handler) def getLogger(name: str, node: str = "suite") -> DTSLOG: diff --git a/dts/framework/remote_session/__init__.py b/dts/framework/remote_session/__init__.py index a227d8db22..747316c78a 100644 --- a/dts/framework/remote_session/__init__.py +++ b/dts/framework/remote_session/__init__.py @@ -1,14 +1,30 @@ # SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2022 PANTHEON.tech s.r.o. +# Copyright(c) 2023 PANTHEON.tech s.r.o. -from framework.config import NodeConfiguration +""" +The package provides modules for managing remote connections to a remote host (node), +differentiated by OS. +The package provides a factory function, create_session, that returns the appropriate +remote connection based on the passed configuration. The differences are in the +underlying transport protocol (e.g. SSH) and remote OS (e.g. Linux). +""" + +# pylama:ignore=W0611 + +from framework.config import OS, NodeConfiguration +from framework.exception import ConfigurationError from framework.logger import DTSLOG -from .remote_session import RemoteSession -from .ssh_session import SSHSession +from .linux_session import LinuxSession +from .os_session import OSSession +from .remote import RemoteSession, SSHSession -def create_remote_session( +def create_session( node_config: NodeConfiguration, name: str, logger: DTSLOG -) -> RemoteSession: - return SSHSession(node_config, name, logger) +) -> OSSession: + match node_config.os: + case OS.linux: + return LinuxSession(node_config, name, logger) + case _: + raise ConfigurationError(f"Unsupported OS {node_config.os}") diff --git a/dts/framework/remote_session/linux_session.py b/dts/framework/remote_session/linux_session.py new file mode 100644 index 0000000000..9d14166077 --- /dev/null +++ b/dts/framework/remote_session/linux_session.py @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2023 PANTHEON.tech s.r.o. +# Copyright(c) 2023 University of New Hampshire + +from .posix_session import PosixSession + + +class LinuxSession(PosixSession): + """ + The implementation of non-Posix compliant parts of Linux remote sessions. + """ diff --git a/dts/framework/remote_session/os_session.py b/dts/framework/remote_session/os_session.py new file mode 100644 index 0000000000..7a4cc5e669 --- /dev/null +++ b/dts/framework/remote_session/os_session.py @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2023 PANTHEON.tech s.r.o. +# Copyright(c) 2023 University of New Hampshire + +from abc import ABC + +from framework.config import NodeConfiguration +from framework.logger import DTSLOG + +from .remote import RemoteSession, create_remote_session + + +class OSSession(ABC): + """ + The OS classes create a DTS node remote session and implement OS specific + behavior. There a few control methods implemented by the base class, the rest need + to be implemented by derived classes. + """ + + _config: NodeConfiguration + name: str + _logger: DTSLOG + remote_session: RemoteSession + + def __init__( + self, + node_config: NodeConfiguration, + name: str, + logger: DTSLOG, + ): + self._config = node_config + self.name = name + self._logger = logger + self.remote_session = create_remote_session(node_config, name, logger) + + def close(self, force: bool = False) -> None: + """ + Close the remote session. + """ + self.remote_session.close(force) + + def is_alive(self) -> bool: + """ + Check whether the remote session is still responding. + """ + return self.remote_session.is_alive() diff --git a/dts/framework/remote_session/posix_session.py b/dts/framework/remote_session/posix_session.py new file mode 100644 index 0000000000..110b6a4804 --- /dev/null +++ b/dts/framework/remote_session/posix_session.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2023 PANTHEON.tech s.r.o. +# Copyright(c) 2023 University of New Hampshire + +from .os_session import OSSession + + +class PosixSession(OSSession): + """ + An intermediary class implementing the Posix compliant parts of + Linux and other OS remote sessions. + """ diff --git a/dts/framework/remote_session/remote/__init__.py b/dts/framework/remote_session/remote/__init__.py new file mode 100644 index 0000000000..f3092f8bbe --- /dev/null +++ b/dts/framework/remote_session/remote/__init__.py @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2023 PANTHEON.tech s.r.o. + +# pylama:ignore=W0611 + +from framework.config import NodeConfiguration +from framework.logger import DTSLOG + +from .remote_session import RemoteSession +from .ssh_session import SSHSession + + +def create_remote_session( + node_config: NodeConfiguration, name: str, logger: DTSLOG +) -> RemoteSession: + return SSHSession(node_config, name, logger) diff --git a/dts/framework/remote_session/remote_session.py b/dts/framework/remote_session/remote/remote_session.py similarity index 61% rename from dts/framework/remote_session/remote_session.py rename to dts/framework/remote_session/remote/remote_session.py index 33047d9d0a..7c7b30225f 100644 --- a/dts/framework/remote_session/remote_session.py +++ b/dts/framework/remote_session/remote/remote_session.py @@ -1,7 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2010-2014 Intel Corporation -# Copyright(c) 2022 PANTHEON.tech s.r.o. -# Copyright(c) 2022 University of New Hampshire +# Copyright(c) 2022-2023 PANTHEON.tech s.r.o. +# Copyright(c) 2022-2023 University of New Hampshire import dataclasses from abc import ABC, abstractmethod @@ -19,14 +19,23 @@ class HistoryRecord: class RemoteSession(ABC): + """ + The base class for defining which methods must be implemented in order to connect + to a remote host (node) and maintain a remote session. The derived classes are + supposed to implement/use some underlying transport protocol (e.g. SSH) to + implement the methods. On top of that, it provides some basic services common to + all derived classes, such as keeping history and logging what's being executed + on the remote node. + """ + name: str hostname: str ip: str port: int | None username: str password: str - logger: DTSLOG history: list[HistoryRecord] + _logger: DTSLOG _node_config: NodeConfiguration def __init__( @@ -46,31 +55,34 @@ def __init__( self.port = int(port) self.username = node_config.user self.password = node_config.password or "" - self.logger = logger self.history = [] - self.logger.info(f"Connecting to {self.username}@{self.hostname}.") + self._logger = logger + self._logger.info(f"Connecting to {self.username}@{self.hostname}.") self._connect() - self.logger.info(f"Connection to {self.username}@{self.hostname} successful.") + self._logger.info(f"Connection to {self.username}@{self.hostname} successful.") @abstractmethod def _connect(self) -> None: """ Create connection to assigned node. """ - pass def send_command(self, command: str, timeout: float = SETTINGS.timeout) -> str: - self.logger.info(f"Sending: {command}") + """ + Send a command and return the output. + """ + self._logger.info(f"Sending: {command}") out = self._send_command(command, timeout) - self.logger.debug(f"Received from {command}: {out}") + self._logger.debug(f"Received from {command}: {out}") self._history_add(command=command, output=out) return out @abstractmethod def _send_command(self, command: str, timeout: float) -> str: """ - Send a command and return the output. + Use the underlying protocol to execute the command and return the output + of the command. """ def _history_add(self, command: str, output: str) -> None: @@ -79,17 +91,20 @@ def _history_add(self, command: str, output: str) -> None: ) def close(self, force: bool = False) -> None: - self.logger.logger_exit() + """ + Close the remote session and free all used resources. + """ + self._logger.logger_exit() self._close(force) @abstractmethod def _close(self, force: bool = False) -> None: """ - Close the remote session, freeing all used resources. + Execute protocol specific steps needed to close the session properly. """ @abstractmethod def is_alive(self) -> bool: """ - Check whether the session is still responding. + Check whether the remote session is still responding. """ diff --git a/dts/framework/remote_session/ssh_session.py b/dts/framework/remote_session/remote/ssh_session.py similarity index 91% rename from dts/framework/remote_session/ssh_session.py rename to dts/framework/remote_session/remote/ssh_session.py index 7ec327054d..96175f5284 100644 --- a/dts/framework/remote_session/ssh_session.py +++ b/dts/framework/remote_session/remote/ssh_session.py @@ -1,7 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2010-2014 Intel Corporation -# Copyright(c) 2022 PANTHEON.tech s.r.o. -# Copyright(c) 2022 University of New Hampshire +# Copyright(c) 2022-2023 PANTHEON.tech s.r.o. +# Copyright(c) 2022-2023 University of New Hampshire import time @@ -17,7 +17,7 @@ class SSHSession(RemoteSession): """ - Module for creating Pexpect SSH sessions to a node. + Module for creating Pexpect SSH remote sessions. """ session: pxssh.pxssh @@ -56,9 +56,9 @@ def _connect(self) -> None: ) break except Exception as e: - self.logger.warning(e) + self._logger.warning(e) time.sleep(2) - self.logger.info( + self._logger.info( f"Retrying connection: retry number {retry_attempt + 1}." ) else: @@ -67,13 +67,13 @@ def _connect(self) -> None: self.send_expect("stty -echo", "#") self.send_expect("stty columns 1000", "#") except Exception as e: - self.logger.error(RED(str(e))) + self._logger.error(RED(str(e))) if getattr(self, "port", None): suggestion = ( f"\nSuggestion: Check if the firewall on {self.hostname} is " f"stopped.\n" ) - self.logger.info(GREEN(suggestion)) + self._logger.info(GREEN(suggestion)) raise SSHConnectionError(self.hostname) @@ -87,8 +87,8 @@ def send_expect( try: retval = int(ret_status) if retval: - self.logger.error(f"Command: {command} failure!") - self.logger.error(ret) + self._logger.error(f"Command: {command} failure!") + self._logger.error(ret) return retval else: return ret @@ -97,7 +97,7 @@ def send_expect( else: return ret except Exception as e: - self.logger.error( + self._logger.error( f"Exception happened in [{command}] and output is " f"[{self._get_output()}]" ) diff --git a/dts/framework/testbed_model/__init__.py b/dts/framework/testbed_model/__init__.py index c5512e5812..8ead9db482 100644 --- a/dts/framework/testbed_model/__init__.py +++ b/dts/framework/testbed_model/__init__.py @@ -1,7 +1,13 @@ # SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2022 University of New Hampshire +# Copyright(c) 2022-2023 University of New Hampshire +# Copyright(c) 2023 PANTHEON.tech s.r.o. """ -This module contains the classes used to model the physical traffic generator, +This package contains the classes used to model the physical traffic generator, system under test and any other components that need to be interacted with. """ + +# pylama:ignore=W0611 + +from .node import Node +from .sut_node import SutNode diff --git a/dts/framework/testbed_model/node.py b/dts/framework/testbed_model/node.py index 8437975416..a37f7921e0 100644 --- a/dts/framework/testbed_model/node.py +++ b/dts/framework/testbed_model/node.py @@ -1,62 +1,118 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2010-2014 Intel Corporation -# Copyright(c) 2022 PANTHEON.tech s.r.o. -# Copyright(c) 2022 University of New Hampshire +# Copyright(c) 2022-2023 PANTHEON.tech s.r.o. +# Copyright(c) 2022-2023 University of New Hampshire """ A node is a generic host that DTS connects to and manages. """ -from framework.config import NodeConfiguration +from framework.config import ( + BuildTargetConfiguration, + ExecutionConfiguration, + NodeConfiguration, +) from framework.logger import DTSLOG, getLogger -from framework.remote_session import RemoteSession, create_remote_session -from framework.settings import SETTINGS +from framework.remote_session import OSSession, create_session class Node(object): """ - Basic module for node management. This module implements methods that + Basic class for node management. This class implements methods that manage a node, such as information gathering (of CPU/PCI/NIC) and environment setup. """ + main_session: OSSession + config: NodeConfiguration name: str - main_session: RemoteSession - logger: DTSLOG - _config: NodeConfiguration - _other_sessions: list[RemoteSession] + _logger: DTSLOG + _other_sessions: list[OSSession] def __init__(self, node_config: NodeConfiguration): - self._config = node_config + self.config = node_config + self.name = node_config.name + self._logger = getLogger(self.name) + self.main_session = create_session(self.config, self.name, self._logger) + self._other_sessions = [] - self.name = node_config.name - self.logger = getLogger(self.name) - self.logger.info(f"Created node: {self.name}") - self.main_session = create_remote_session(self._config, self.name, self.logger) + self._logger.info(f"Created node: {self.name}") - def send_command(self, cmds: str, timeout: float = SETTINGS.timeout) -> str: + def set_up_execution(self, execution_config: ExecutionConfiguration) -> None: """ - Send commands to node and return string before timeout. + Perform the execution setup that will be done for each execution + this node is part of. """ + self._set_up_execution(execution_config) - return self.main_session.send_command(cmds, timeout) + def _set_up_execution(self, execution_config: ExecutionConfiguration) -> None: + """ + This method exists to be optionally overwritten by derived classes and + is not decorated so that the derived class doesn't have to use the decorator. + """ - def create_session(self, name: str) -> RemoteSession: - connection = create_remote_session( - self._config, + def tear_down_execution(self) -> None: + """ + Perform the execution teardown that will be done after each execution + this node is part of concludes. + """ + self._tear_down_execution() + + def _tear_down_execution(self) -> None: + """ + This method exists to be optionally overwritten by derived classes and + is not decorated so that the derived class doesn't have to use the decorator. + """ + + def set_up_build_target( + self, build_target_config: BuildTargetConfiguration + ) -> None: + """ + Perform the build target setup that will be done for each build target + tested on this node. + """ + self._set_up_build_target(build_target_config) + + def _set_up_build_target( + self, build_target_config: BuildTargetConfiguration + ) -> None: + """ + This method exists to be optionally overwritten by derived classes and + is not decorated so that the derived class doesn't have to use the decorator. + """ + + def tear_down_build_target(self) -> None: + """ + Perform the build target teardown that will be done after each build target + tested on this node. + """ + self._tear_down_build_target() + + def _tear_down_build_target(self) -> None: + """ + This method exists to be optionally overwritten by derived classes and + is not decorated so that the derived class doesn't have to use the decorator. + """ + + def create_session(self, name: str) -> OSSession: + """ + Create and return a new OSSession tailored to the remote OS. + """ + connection = create_session( + self.config, name, getLogger(name, node=self.name), ) self._other_sessions.append(connection) return connection - def node_exit(self) -> None: + def close(self) -> None: """ - Recover all resource before node exit + Close all connections and free other resources. """ if self.main_session: self.main_session.close() for session in self._other_sessions: session.close() - self.logger.logger_exit() + self._logger.logger_exit() diff --git a/dts/framework/testbed_model/sut_node.py b/dts/framework/testbed_model/sut_node.py new file mode 100644 index 0000000000..42acb6f9b2 --- /dev/null +++ b/dts/framework/testbed_model/sut_node.py @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation +# Copyright(c) 2023 PANTHEON.tech s.r.o. + +from .node import Node + + +class SutNode(Node): + """ + A class for managing connections to the System under Test, providing + methods that retrieve the necessary information about the node (such as + CPU, memory and NIC details) and configuration capabilities. + """ From patchwork Tue Jan 17 15:48:58 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 122190 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6A58D423FE; Tue, 17 Jan 2023 16:49:31 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1B52A42D3E; Tue, 17 Jan 2023 16:49:17 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 2EFE54067E for ; Tue, 17 Jan 2023 16:49:15 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 2236C1D812B; Tue, 17 Jan 2023 16:49:14 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id RofBXgHdfFsm; Tue, 17 Jan 2023 16:49:12 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 035A31D811F; Tue, 17 Jan 2023 16:49:07 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [PATCH v3 02/10] dts: add ssh command verification Date: Tue, 17 Jan 2023 15:48:58 +0000 Message-Id: <20230117154906.860916-3-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230117154906.860916-1-juraj.linkes@pantheon.tech> References: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> <20230117154906.860916-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This is a basic capability needed to check whether the command execution was successful or not. If not, raise a RemoteCommandExecutionError. When a failure is expected, the caller is supposed to catch the exception. Signed-off-by: Juraj Linkeš --- dts/framework/exception.py | 23 +++++++- .../remote_session/remote/remote_session.py | 55 +++++++++++++------ .../remote_session/remote/ssh_session.py | 11 +++- 3 files changed, 68 insertions(+), 21 deletions(-) diff --git a/dts/framework/exception.py b/dts/framework/exception.py index 121a0f7296..e776b42bd9 100644 --- a/dts/framework/exception.py +++ b/dts/framework/exception.py @@ -21,7 +21,8 @@ class ErrorSeverity(IntEnum): NO_ERR = 0 GENERIC_ERR = 1 CONFIG_ERR = 2 - SSH_ERR = 3 + REMOTE_CMD_EXEC_ERR = 3 + SSH_ERR = 4 class DTSError(Exception): @@ -90,3 +91,23 @@ class ConfigurationError(DTSError): """ severity: ClassVar[ErrorSeverity] = ErrorSeverity.CONFIG_ERR + + +class RemoteCommandExecutionError(DTSError): + """ + Raised when a command executed on a Node returns a non-zero exit status. + """ + + command: str + command_return_code: int + severity: ClassVar[ErrorSeverity] = ErrorSeverity.REMOTE_CMD_EXEC_ERR + + def __init__(self, command: str, command_return_code: int): + self.command = command + self.command_return_code = command_return_code + + def __str__(self) -> str: + return ( + f"Command {self.command} returned a non-zero exit code: " + f"{self.command_return_code}" + ) diff --git a/dts/framework/remote_session/remote/remote_session.py b/dts/framework/remote_session/remote/remote_session.py index 7c7b30225f..5ac395ec79 100644 --- a/dts/framework/remote_session/remote/remote_session.py +++ b/dts/framework/remote_session/remote/remote_session.py @@ -7,15 +7,29 @@ from abc import ABC, abstractmethod from framework.config import NodeConfiguration +from framework.exception import RemoteCommandExecutionError from framework.logger import DTSLOG from framework.settings import SETTINGS @dataclasses.dataclass(slots=True, frozen=True) -class HistoryRecord: +class CommandResult: + """ + The result of remote execution of a command. + """ + name: str command: str - output: str | int + stdout: str + stderr: str + return_code: int + + def __str__(self) -> str: + return ( + f"stdout: '{self.stdout}'\n" + f"stderr: '{self.stderr}'\n" + f"return_code: '{self.return_code}'" + ) class RemoteSession(ABC): @@ -34,7 +48,7 @@ class RemoteSession(ABC): port: int | None username: str password: str - history: list[HistoryRecord] + history: list[CommandResult] _logger: DTSLOG _node_config: NodeConfiguration @@ -68,28 +82,33 @@ def _connect(self) -> None: Create connection to assigned node. """ - def send_command(self, command: str, timeout: float = SETTINGS.timeout) -> str: + def send_command( + self, command: str, timeout: float = SETTINGS.timeout, verify: bool = False + ) -> CommandResult: """ - Send a command and return the output. + Send a command to the connected node and return CommandResult. + If verify is True, check the return code of the executed command + and raise a RemoteCommandExecutionError if the command failed. """ - self._logger.info(f"Sending: {command}") - out = self._send_command(command, timeout) - self._logger.debug(f"Received from {command}: {out}") - self._history_add(command=command, output=out) - return out + self._logger.info(f"Sending: '{command}'") + result = self._send_command(command, timeout) + if verify and result.return_code: + self._logger.debug( + f"Command '{command}' failed with return code '{result.return_code}'" + ) + self._logger.debug(f"stdout: '{result.stdout}'") + self._logger.debug(f"stderr: '{result.stderr}'") + raise RemoteCommandExecutionError(command, result.return_code) + self._logger.debug(f"Received from '{command}':\n{result}") + self.history.append(result) + return result @abstractmethod - def _send_command(self, command: str, timeout: float) -> str: + def _send_command(self, command: str, timeout: float) -> CommandResult: """ - Use the underlying protocol to execute the command and return the output - of the command. + Use the underlying protocol to execute the command and return CommandResult. """ - def _history_add(self, command: str, output: str) -> None: - self.history.append( - HistoryRecord(name=self.name, command=command, output=output) - ) - def close(self, force: bool = False) -> None: """ Close the remote session and free all used resources. diff --git a/dts/framework/remote_session/remote/ssh_session.py b/dts/framework/remote_session/remote/ssh_session.py index 96175f5284..6da5be9fff 100644 --- a/dts/framework/remote_session/remote/ssh_session.py +++ b/dts/framework/remote_session/remote/ssh_session.py @@ -12,7 +12,7 @@ from framework.logger import DTSLOG from framework.utils import GREEN, RED -from .remote_session import RemoteSession +from .remote_session import CommandResult, RemoteSession class SSHSession(RemoteSession): @@ -163,7 +163,14 @@ def _flush(self) -> None: def is_alive(self) -> bool: return self.session.isalive() - def _send_command(self, command: str, timeout: float) -> str: + def _send_command(self, command: str, timeout: float) -> CommandResult: + output = self._send_command_get_output(command, timeout) + return_code = int(self._send_command_get_output("echo $?", timeout)) + + # we're capturing only stdout + return CommandResult(self.name, command, output, "", return_code) + + def _send_command_get_output(self, command: str, timeout: float) -> str: try: self._clean_session() self._send_line(command) From patchwork Tue Jan 17 15:48:59 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 122191 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4885C423FE; Tue, 17 Jan 2023 16:49:38 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id EBD3842D47; Tue, 17 Jan 2023 16:49:17 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id E5C3B410EE for ; Tue, 17 Jan 2023 16:49:15 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 4953C1D811F; Tue, 17 Jan 2023 16:49:15 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id MeiEB7k-CBaK; Tue, 17 Jan 2023 16:49:13 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id A598B1D8122; Tue, 17 Jan 2023 16:49:08 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [PATCH v3 03/10] dts: add dpdk build on sut Date: Tue, 17 Jan 2023 15:48:59 +0000 Message-Id: <20230117154906.860916-4-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230117154906.860916-1-juraj.linkes@pantheon.tech> References: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> <20230117154906.860916-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add the ability to build DPDK and apps on the SUT, using a configured target. Signed-off-by: Juraj Linkeš --- dts/framework/config/__init__.py | 2 + dts/framework/exception.py | 17 ++ dts/framework/remote_session/os_session.py | 90 +++++++++- dts/framework/remote_session/posix_session.py | 126 ++++++++++++++ .../remote_session/remote/remote_session.py | 38 ++++- .../remote_session/remote/ssh_session.py | 68 +++++++- dts/framework/settings.py | 55 +++++- dts/framework/testbed_model/__init__.py | 1 + dts/framework/testbed_model/dpdk.py | 33 ++++ dts/framework/testbed_model/sut_node.py | 158 ++++++++++++++++++ dts/framework/utils.py | 19 ++- 11 files changed, 589 insertions(+), 18 deletions(-) create mode 100644 dts/framework/testbed_model/dpdk.py diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py index e3e2d74eac..ca61cb10fe 100644 --- a/dts/framework/config/__init__.py +++ b/dts/framework/config/__init__.py @@ -91,6 +91,7 @@ class BuildTargetConfiguration: os: OS cpu: CPUType compiler: Compiler + compiler_wrapper: str name: str @staticmethod @@ -100,6 +101,7 @@ def from_dict(d: dict) -> "BuildTargetConfiguration": os=OS(d["os"]), cpu=CPUType(d["cpu"]), compiler=Compiler(d["compiler"]), + compiler_wrapper=d.get("compiler_wrapper", ""), name=f"{d['arch']}-{d['os']}-{d['cpu']}-{d['compiler']}", ) diff --git a/dts/framework/exception.py b/dts/framework/exception.py index e776b42bd9..b4545a5a40 100644 --- a/dts/framework/exception.py +++ b/dts/framework/exception.py @@ -23,6 +23,7 @@ class ErrorSeverity(IntEnum): CONFIG_ERR = 2 REMOTE_CMD_EXEC_ERR = 3 SSH_ERR = 4 + DPDK_BUILD_ERR = 10 class DTSError(Exception): @@ -111,3 +112,19 @@ def __str__(self) -> str: f"Command {self.command} returned a non-zero exit code: " f"{self.command_return_code}" ) + + +class RemoteDirectoryExistsError(DTSError): + """ + Raised when a remote directory to be created already exists. + """ + + severity: ClassVar[ErrorSeverity] = ErrorSeverity.REMOTE_CMD_EXEC_ERR + + +class DPDKBuildError(DTSError): + """ + Raised when DPDK build fails for any reason. + """ + + severity: ClassVar[ErrorSeverity] = ErrorSeverity.DPDK_BUILD_ERR diff --git a/dts/framework/remote_session/os_session.py b/dts/framework/remote_session/os_session.py index 7a4cc5e669..06d1ffefdd 100644 --- a/dts/framework/remote_session/os_session.py +++ b/dts/framework/remote_session/os_session.py @@ -2,10 +2,14 @@ # Copyright(c) 2023 PANTHEON.tech s.r.o. # Copyright(c) 2023 University of New Hampshire -from abc import ABC +from abc import ABC, abstractmethod +from pathlib import PurePath -from framework.config import NodeConfiguration +from framework.config import Architecture, NodeConfiguration from framework.logger import DTSLOG +from framework.settings import SETTINGS +from framework.testbed_model import MesonArgs +from framework.utils import EnvVarsDict from .remote import RemoteSession, create_remote_session @@ -44,3 +48,85 @@ def is_alive(self) -> bool: Check whether the remote session is still responding. """ return self.remote_session.is_alive() + + @abstractmethod + def guess_dpdk_remote_dir(self, remote_dir) -> PurePath: + """ + Try to find DPDK remote dir in remote_dir. + """ + + @abstractmethod + def get_remote_tmp_dir(self) -> PurePath: + """ + Get the path of the temporary directory of the remote OS. + """ + + @abstractmethod + def get_dpdk_build_env_vars(self, arch: Architecture) -> dict: + """ + Create extra environment variables needed for the target architecture. Get + information from the node if needed. + """ + + @abstractmethod + def join_remote_path(self, *args: str | PurePath) -> PurePath: + """ + Join path parts using the path separator that fits the remote OS. + """ + + @abstractmethod + def copy_file( + self, + source_file: str | PurePath, + destination_file: str | PurePath, + source_remote: bool = False, + ) -> None: + """ + Copy source_file from local filesystem to destination_file + on the remote Node associated with the remote session. + If source_remote is True, reverse the direction - copy source_file from the + associated remote Node to destination_file on local storage. + """ + + @abstractmethod + def remove_remote_dir( + self, + remote_dir_path: str | PurePath, + recursive: bool = True, + force: bool = True, + ) -> None: + """ + Remove remote directory, by default remove recursively and forcefully. + """ + + @abstractmethod + def extract_remote_tarball( + self, + remote_tarball_path: str | PurePath, + expected_dir: str | PurePath | None = None, + ) -> None: + """ + Extract remote tarball in place. If expected_dir is a non-empty string, check + whether the dir exists after extracting the archive. + """ + + @abstractmethod + def build_dpdk( + self, + env_vars: EnvVarsDict, + meson_args: MesonArgs, + remote_dpdk_dir: str | PurePath, + remote_dpdk_build_dir: str | PurePath, + rebuild: bool = False, + timeout: float = SETTINGS.compile_timeout, + ) -> None: + """ + Build DPDK in the input dir with specified environment variables and meson + arguments. + """ + + @abstractmethod + def get_dpdk_version(self, version_path: str | PurePath) -> str: + """ + Inspect DPDK version on the remote node from version_path. + """ diff --git a/dts/framework/remote_session/posix_session.py b/dts/framework/remote_session/posix_session.py index 110b6a4804..d4da9f114e 100644 --- a/dts/framework/remote_session/posix_session.py +++ b/dts/framework/remote_session/posix_session.py @@ -2,6 +2,14 @@ # Copyright(c) 2023 PANTHEON.tech s.r.o. # Copyright(c) 2023 University of New Hampshire +from pathlib import PurePath, PurePosixPath + +from framework.config import Architecture +from framework.exception import DPDKBuildError, RemoteCommandExecutionError +from framework.settings import SETTINGS +from framework.testbed_model import MesonArgs +from framework.utils import EnvVarsDict + from .os_session import OSSession @@ -10,3 +18,121 @@ class PosixSession(OSSession): An intermediary class implementing the Posix compliant parts of Linux and other OS remote sessions. """ + + @staticmethod + def combine_short_options(**opts: bool) -> str: + ret_opts = "" + for opt, include in opts.items(): + if include: + ret_opts = f"{ret_opts}{opt}" + + if ret_opts: + ret_opts = f" -{ret_opts}" + + return ret_opts + + def guess_dpdk_remote_dir(self, remote_dir) -> PurePosixPath: + remote_guess = self.join_remote_path(remote_dir, "dpdk-*") + result = self.remote_session.send_command(f"ls -d {remote_guess} | tail -1") + return PurePosixPath(result.stdout) + + def get_remote_tmp_dir(self) -> PurePosixPath: + return PurePosixPath("/tmp") + + def get_dpdk_build_env_vars(self, arch: Architecture) -> dict: + """ + Create extra environment variables needed for i686 arch build. Get information + from the node if needed. + """ + env_vars = {} + if arch == Architecture.i686: + # find the pkg-config path and store it in PKG_CONFIG_LIBDIR + out = self.remote_session.send_command("find /usr -type d -name pkgconfig") + pkg_path = "" + res_path = out.stdout.split("\r\n") + for cur_path in res_path: + if "i386" in cur_path: + pkg_path = cur_path + break + assert pkg_path != "", "i386 pkg-config path not found" + + env_vars["CFLAGS"] = "-m32" + env_vars["PKG_CONFIG_LIBDIR"] = pkg_path + + return env_vars + + def join_remote_path(self, *args: str | PurePath) -> PurePosixPath: + return PurePosixPath(*args) + + def copy_file( + self, + source_file: str | PurePath, + destination_file: str | PurePath, + source_remote: bool = False, + ) -> None: + self.remote_session.copy_file(source_file, destination_file, source_remote) + + def remove_remote_dir( + self, + remote_dir_path: str | PurePath, + recursive: bool = True, + force: bool = True, + ) -> None: + opts = PosixSession.combine_short_options(r=recursive, f=force) + self.remote_session.send_command(f"rm{opts} {remote_dir_path}") + + def extract_remote_tarball( + self, + remote_tarball_path: str | PurePath, + expected_dir: str | PurePath | None = None, + ) -> None: + self.remote_session.send_command( + f"tar xfm {remote_tarball_path} " + f"-C {PurePosixPath(remote_tarball_path).parent}", + 60, + ) + if expected_dir: + self.remote_session.send_command(f"ls {expected_dir}", verify=True) + + def build_dpdk( + self, + env_vars: EnvVarsDict, + meson_args: MesonArgs, + remote_dpdk_dir: str | PurePath, + remote_dpdk_build_dir: str | PurePath, + rebuild: bool = False, + timeout: float = SETTINGS.compile_timeout, + ) -> None: + try: + if rebuild: + # reconfigure, then build + self._logger.info("Reconfiguring DPDK build.") + self.remote_session.send_command( + f"meson configure {meson_args} {remote_dpdk_build_dir}", + timeout, + verify=True, + env=env_vars, + ) + else: + # fresh build - remove target dir first, then build from scratch + self._logger.info("Configuring DPDK build from scratch.") + self.remove_remote_dir(remote_dpdk_build_dir) + self.remote_session.send_command( + f"meson {meson_args} {remote_dpdk_dir} {remote_dpdk_build_dir}", + timeout, + verify=True, + env=env_vars, + ) + + self._logger.info("Building DPDK.") + self.remote_session.send_command( + f"ninja -C {remote_dpdk_build_dir}", timeout, verify=True, env=env_vars + ) + except RemoteCommandExecutionError as e: + raise DPDKBuildError(f"DPDK build failed when doing '{e.command}'.") + + def get_dpdk_version(self, build_dir: str | PurePath) -> str: + out = self.remote_session.send_command( + f"cat {self.join_remote_path(build_dir, 'VERSION')}", verify=True + ) + return out.stdout diff --git a/dts/framework/remote_session/remote/remote_session.py b/dts/framework/remote_session/remote/remote_session.py index 5ac395ec79..91dee3cb4f 100644 --- a/dts/framework/remote_session/remote/remote_session.py +++ b/dts/framework/remote_session/remote/remote_session.py @@ -5,11 +5,13 @@ import dataclasses from abc import ABC, abstractmethod +from pathlib import PurePath from framework.config import NodeConfiguration from framework.exception import RemoteCommandExecutionError from framework.logger import DTSLOG from framework.settings import SETTINGS +from framework.utils import EnvVarsDict @dataclasses.dataclass(slots=True, frozen=True) @@ -83,15 +85,22 @@ def _connect(self) -> None: """ def send_command( - self, command: str, timeout: float = SETTINGS.timeout, verify: bool = False + self, + command: str, + timeout: float = SETTINGS.timeout, + verify: bool = False, + env: EnvVarsDict | None = None, ) -> CommandResult: """ - Send a command to the connected node and return CommandResult. + Send a command to the connected node using optional env vars + and return CommandResult. If verify is True, check the return code of the executed command and raise a RemoteCommandExecutionError if the command failed. """ - self._logger.info(f"Sending: '{command}'") - result = self._send_command(command, timeout) + self._logger.info( + f"Sending: '{command}'" + (f" with env vars: '{env}'" if env else "") + ) + result = self._send_command(command, timeout, env) if verify and result.return_code: self._logger.debug( f"Command '{command}' failed with return code '{result.return_code}'" @@ -104,9 +113,12 @@ def send_command( return result @abstractmethod - def _send_command(self, command: str, timeout: float) -> CommandResult: + def _send_command( + self, command: str, timeout: float, env: EnvVarsDict | None + ) -> CommandResult: """ - Use the underlying protocol to execute the command and return CommandResult. + Use the underlying protocol to execute the command using optional env vars + and return CommandResult. """ def close(self, force: bool = False) -> None: @@ -127,3 +139,17 @@ def is_alive(self) -> bool: """ Check whether the remote session is still responding. """ + + @abstractmethod + def copy_file( + self, + source_file: str | PurePath, + destination_file: str | PurePath, + source_remote: bool = False, + ) -> None: + """ + Copy source_file from local filesystem to destination_file on the remote Node + associated with the remote session. + If source_remote is True, reverse the direction - copy source_file from the + associated Node to destination_file on local filesystem. + """ diff --git a/dts/framework/remote_session/remote/ssh_session.py b/dts/framework/remote_session/remote/ssh_session.py index 6da5be9fff..d0863d8791 100644 --- a/dts/framework/remote_session/remote/ssh_session.py +++ b/dts/framework/remote_session/remote/ssh_session.py @@ -4,13 +4,15 @@ # Copyright(c) 2022-2023 University of New Hampshire import time +from pathlib import PurePath +import pexpect # type: ignore from pexpect import pxssh # type: ignore from framework.config import NodeConfiguration from framework.exception import SSHConnectionError, SSHSessionDeadError, SSHTimeoutError from framework.logger import DTSLOG -from framework.utils import GREEN, RED +from framework.utils import GREEN, RED, EnvVarsDict from .remote_session import CommandResult, RemoteSession @@ -163,16 +165,22 @@ def _flush(self) -> None: def is_alive(self) -> bool: return self.session.isalive() - def _send_command(self, command: str, timeout: float) -> CommandResult: - output = self._send_command_get_output(command, timeout) - return_code = int(self._send_command_get_output("echo $?", timeout)) + def _send_command( + self, command: str, timeout: float, env: EnvVarsDict | None + ) -> CommandResult: + output = self._send_command_get_output(command, timeout, env) + return_code = int(self._send_command_get_output("echo $?", timeout, None)) # we're capturing only stdout return CommandResult(self.name, command, output, "", return_code) - def _send_command_get_output(self, command: str, timeout: float) -> str: + def _send_command_get_output( + self, command: str, timeout: float, env: EnvVarsDict | None + ) -> str: try: self._clean_session() + if env: + command = f"{env} {command}" self._send_line(command) except Exception as e: raise e @@ -189,3 +197,53 @@ def _close(self, force: bool = False) -> None: else: if self.is_alive(): self.session.logout() + + def copy_file( + self, + source_file: str | PurePath, + destination_file: str | PurePath, + source_remote: bool = False, + ) -> None: + """ + Send a local file to a remote host. + """ + if source_remote: + source_file = f"{self.username}@{self.ip}:{source_file}" + else: + destination_file = f"{self.username}@{self.ip}:{destination_file}" + + port = "" + if self.port: + port = f" -P {self.port}" + + # this is not OS agnostic, find a Pythonic (and thus OS agnostic) way + # TODO Fabric should handle this + command = ( + f"scp -v{port} -o NoHostAuthenticationForLocalhost=yes" + f" {source_file} {destination_file}" + ) + + self._spawn_scp(command) + + def _spawn_scp(self, scp_cmd: str) -> None: + """ + Transfer a file with SCP + """ + self._logger.info(scp_cmd) + p: pexpect.spawn = pexpect.spawn(scp_cmd) + time.sleep(0.5) + ssh_newkey: str = "Are you sure you want to continue connecting" + i: int = p.expect( + [ssh_newkey, "[pP]assword", "# ", pexpect.EOF, pexpect.TIMEOUT], 120 + ) + if i == 0: # add once in trust list + p.sendline("yes") + i = p.expect([ssh_newkey, "[pP]assword", pexpect.EOF], 2) + + if i == 1: + time.sleep(0.5) + p.sendline(self.password) + p.expect("Exit status 0", 60) + if i == 4: + self._logger.error("SCP TIMEOUT error %d" % i) + p.close() diff --git a/dts/framework/settings.py b/dts/framework/settings.py index 800f2c7b7f..a298b1eaac 100644 --- a/dts/framework/settings.py +++ b/dts/framework/settings.py @@ -1,14 +1,17 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2010-2021 Intel Corporation -# Copyright(c) 2022 PANTHEON.tech s.r.o. -# Copyright(c) 2022 University of New Hampshire +# Copyright(c) 2022-2023 PANTHEON.tech s.r.o. +# Copyright(c) 2022-2023 University of New Hampshire import argparse import os from collections.abc import Callable, Iterable, Sequence from dataclasses import dataclass +from pathlib import Path from typing import Any, TypeVar +from .exception import ConfigurationError + _T = TypeVar("_T") @@ -60,6 +63,9 @@ class _Settings: output_dir: str timeout: float verbose: bool + skip_setup: bool + dpdk_ref: Path | str + compile_timeout: float def _get_parser() -> argparse.ArgumentParser: @@ -88,6 +94,7 @@ def _get_parser() -> argparse.ArgumentParser: "--timeout", action=_env_arg("DTS_TIMEOUT"), default=15, + type=float, required=False, help="[DTS_TIMEOUT] The default timeout for all DTS operations except for " "compiling DPDK.", @@ -103,16 +110,58 @@ def _get_parser() -> argparse.ArgumentParser: "to the console.", ) + parser.add_argument( + "-s", + "--skip-setup", + action=_env_arg("DTS_SKIP_SETUP"), + required=False, + help="[DTS_SKIP_SETUP] Set to 'Y' to skip all setup steps on SUT and TG nodes.", + ) + + parser.add_argument( + "--dpdk-ref", + "--git", + "--snapshot", + action=_env_arg("DTS_DPDK_REF"), + default="dpdk.tar.xz", + required=False, + help="[DTS_DPDK_REF] Reference to DPDK source code, " + "can be either a path to a tarball or a git refspec. " + "In case of a tarball, it will be extracted in the same directory.", + ) + + parser.add_argument( + "--compile-timeout", + action=_env_arg("DTS_COMPILE_TIMEOUT"), + default=1200, + type=float, + required=False, + help="[DTS_COMPILE_TIMEOUT] The timeout for compiling DPDK.", + ) + return parser +def _check_dpdk_ref(parsed_args: argparse.Namespace) -> None: + if not os.path.exists(parsed_args.dpdk_ref): + raise ConfigurationError( + f"DPDK tarball '{parsed_args.dpdk_ref}' doesn't exist." + ) + else: + parsed_args.dpdk_ref = Path(parsed_args.dpdk_ref) + + def _get_settings() -> _Settings: parsed_args = _get_parser().parse_args() + _check_dpdk_ref(parsed_args) return _Settings( config_file_path=parsed_args.config_file, output_dir=parsed_args.output_dir, - timeout=float(parsed_args.timeout), + timeout=parsed_args.timeout, verbose=(parsed_args.verbose == "Y"), + skip_setup=(parsed_args.skip_setup == "Y"), + dpdk_ref=parsed_args.dpdk_ref, + compile_timeout=parsed_args.compile_timeout, ) diff --git a/dts/framework/testbed_model/__init__.py b/dts/framework/testbed_model/__init__.py index 8ead9db482..96e2ab7c3f 100644 --- a/dts/framework/testbed_model/__init__.py +++ b/dts/framework/testbed_model/__init__.py @@ -9,5 +9,6 @@ # pylama:ignore=W0611 +from .dpdk import MesonArgs from .node import Node from .sut_node import SutNode diff --git a/dts/framework/testbed_model/dpdk.py b/dts/framework/testbed_model/dpdk.py new file mode 100644 index 0000000000..0526974f72 --- /dev/null +++ b/dts/framework/testbed_model/dpdk.py @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation +# Copyright(c) 2023 PANTHEON.tech s.r.o. + +""" +Various utilities used for configuring, building and running DPDK. +""" + + +class MesonArgs(object): + """ + Aggregate the arguments needed to build DPDK: + default_library: Default library type, Meson allows "shared", "static" and "both". + Defaults to None, in which case the argument won't be used. + Keyword arguments: The arguments found in meson_option.txt in root DPDK directory. + Do not use -D with them, for example: enable_kmods=True. + """ + + default_library: str + + def __init__(self, default_library: str | None = None, **dpdk_args: str | bool): + self.default_library = ( + f"--default-library={default_library}" if default_library else "" + ) + self.dpdk_args = " ".join( + ( + f"-D{dpdk_arg_name}={dpdk_arg_value}" + for dpdk_arg_name, dpdk_arg_value in dpdk_args.items() + ) + ) + + def __str__(self) -> str: + return " ".join(f"{self.default_library} {self.dpdk_args}".split()) diff --git a/dts/framework/testbed_model/sut_node.py b/dts/framework/testbed_model/sut_node.py index 42acb6f9b2..c97d995b31 100644 --- a/dts/framework/testbed_model/sut_node.py +++ b/dts/framework/testbed_model/sut_node.py @@ -2,6 +2,15 @@ # Copyright(c) 2010-2014 Intel Corporation # Copyright(c) 2023 PANTHEON.tech s.r.o. +import os +import tarfile +from pathlib import PurePath + +from framework.config import BuildTargetConfiguration, NodeConfiguration +from framework.settings import SETTINGS +from framework.utils import EnvVarsDict, skip_setup + +from .dpdk import MesonArgs from .node import Node @@ -10,4 +19,153 @@ class SutNode(Node): A class for managing connections to the System under Test, providing methods that retrieve the necessary information about the node (such as CPU, memory and NIC details) and configuration capabilities. + Another key capability is building DPDK according to given build target. """ + + _build_target_config: BuildTargetConfiguration | None + _env_vars: EnvVarsDict + _remote_tmp_dir: PurePath + __remote_dpdk_dir: PurePath | None + _dpdk_version: str | None + _app_compile_timeout: float + + def __init__(self, node_config: NodeConfiguration): + super(SutNode, self).__init__(node_config) + self._build_target_config = None + self._env_vars = EnvVarsDict() + self._remote_tmp_dir = self.main_session.get_remote_tmp_dir() + self.__remote_dpdk_dir = None + self._dpdk_version = None + self._app_compile_timeout = 90 + + @property + def _remote_dpdk_dir(self) -> PurePath: + if self.__remote_dpdk_dir is None: + self.__remote_dpdk_dir = self._guess_dpdk_remote_dir() + return self.__remote_dpdk_dir + + @_remote_dpdk_dir.setter + def _remote_dpdk_dir(self, value: PurePath) -> None: + self.__remote_dpdk_dir = value + + @property + def remote_dpdk_build_dir(self) -> PurePath: + if self._build_target_config: + return self.main_session.join_remote_path( + self._remote_dpdk_dir, self._build_target_config.name + ) + else: + return self.main_session.join_remote_path(self._remote_dpdk_dir, "build") + + @property + def dpdk_version(self) -> str: + if self._dpdk_version is None: + self._dpdk_version = self.main_session.get_dpdk_version( + self._remote_dpdk_dir + ) + return self._dpdk_version + + def _guess_dpdk_remote_dir(self) -> PurePath: + return self.main_session.guess_dpdk_remote_dir(self._remote_tmp_dir) + + def _set_up_build_target( + self, build_target_config: BuildTargetConfiguration + ) -> None: + """ + Setup DPDK on the SUT node. + """ + self._configure_build_target(build_target_config) + self._copy_dpdk_tarball() + self._build_dpdk() + + def _configure_build_target( + self, build_target_config: BuildTargetConfiguration + ) -> None: + """ + Populate common environment variables and set build target config. + """ + self._env_vars = EnvVarsDict() + self._build_target_config = build_target_config + self._env_vars.update( + self.main_session.get_dpdk_build_env_vars(build_target_config.arch) + ) + self._env_vars["CC"] = build_target_config.compiler.name + if build_target_config.compiler_wrapper: + self._env_vars["CC"] = ( + f"'{build_target_config.compiler_wrapper} " + f"{build_target_config.compiler.name}'" + ) + + @skip_setup + def _copy_dpdk_tarball(self) -> None: + """ + Copy to and extract DPDK tarball on the SUT node. + """ + self._logger.info("Copying DPDK tarball to SUT.") + self.main_session.copy_file(SETTINGS.dpdk_ref, self._remote_tmp_dir) + + # construct remote tarball path + # the basename is the same on local host and on remote Node + remote_tarball_path = self.main_session.join_remote_path( + self._remote_tmp_dir, os.path.basename(SETTINGS.dpdk_ref) + ) + + # construct remote path after extracting + with tarfile.open(SETTINGS.dpdk_ref) as dpdk_tar: + dpdk_top_dir = dpdk_tar.getnames()[0] + self._remote_dpdk_dir = self.main_session.join_remote_path( + self._remote_tmp_dir, dpdk_top_dir + ) + + self._logger.info( + f"Extracting DPDK tarball on SUT: " + f"'{remote_tarball_path}' into '{self._remote_dpdk_dir}'." + ) + # clean remote path where we're extracting + self.main_session.remove_remote_dir(self._remote_dpdk_dir) + + # then extract to remote path + self.main_session.extract_remote_tarball( + remote_tarball_path, self._remote_dpdk_dir + ) + + @skip_setup + def _build_dpdk(self) -> None: + """ + Build DPDK. Uses the already configured target. Assumes that the tarball has + already been copied to and extracted on the SUT node. + """ + self.main_session.build_dpdk( + self._env_vars, + MesonArgs(default_library="static", enable_kmods=True, libdir="lib"), + self._remote_dpdk_dir, + self.remote_dpdk_build_dir, + ) + + def build_dpdk_app(self, app_name: str, **meson_dpdk_args: str | bool) -> PurePath: + """ + Build one or all DPDK apps. Requires DPDK to be already built on the SUT node. + When app_name is 'all', build all example apps. + When app_name is any other string, tries to build that example app. + Return the directory path of the built app. If building all apps, return + the path to the examples directory (where all apps reside). + The meson_dpdk_args are keyword arguments + found in meson_option.txt in root DPDK directory. Do not use -D with them, + for example: enable_kmods=True. + """ + self.main_session.build_dpdk( + self._env_vars, + MesonArgs(examples=app_name, **meson_dpdk_args), + self._remote_dpdk_dir, + self.remote_dpdk_build_dir, + rebuild=True, + timeout=self._app_compile_timeout, + ) + + if app_name == "all": + return self.main_session.join_remote_path( + self.remote_dpdk_build_dir, "examples" + ) + return self.main_session.join_remote_path( + self.remote_dpdk_build_dir, "examples", f"dpdk-{app_name}" + ) diff --git a/dts/framework/utils.py b/dts/framework/utils.py index c28c8f1082..611071604b 100644 --- a/dts/framework/utils.py +++ b/dts/framework/utils.py @@ -1,9 +1,12 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2010-2014 Intel Corporation -# Copyright(c) 2022 PANTHEON.tech s.r.o. -# Copyright(c) 2022 University of New Hampshire +# Copyright(c) 2022-2023 PANTHEON.tech s.r.o. +# Copyright(c) 2022-2023 University of New Hampshire import sys +from typing import Callable + +from .settings import SETTINGS def check_dts_python_version() -> None: @@ -22,9 +25,21 @@ def check_dts_python_version() -> None: print(RED("Please use Python >= 3.10 instead"), file=sys.stderr) +def skip_setup(func) -> Callable[..., None]: + if SETTINGS.skip_setup: + return lambda *args: None + else: + return func + + def GREEN(text: str) -> str: return f"\u001B[32;1m{str(text)}\u001B[0m" def RED(text: str) -> str: return f"\u001B[31;1m{str(text)}\u001B[0m" + + +class EnvVarsDict(dict): + def __str__(self) -> str: + return " ".join(["=".join(item) for item in self.items()]) From patchwork Tue Jan 17 15:49:00 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 122192 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D596A423FE; Tue, 17 Jan 2023 16:49:45 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B868C42D32; Tue, 17 Jan 2023 16:49:19 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 3FEDC42D49 for ; Tue, 17 Jan 2023 16:49:18 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 50EC31D812D; Tue, 17 Jan 2023 16:49:17 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id pc0SEmVRNQlD; Tue, 17 Jan 2023 16:49:15 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 544BF1D8124; Tue, 17 Jan 2023 16:49:09 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [PATCH v3 04/10] dts: add dpdk execution handling Date: Tue, 17 Jan 2023 15:49:00 +0000 Message-Id: <20230117154906.860916-5-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230117154906.860916-1-juraj.linkes@pantheon.tech> References: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> <20230117154906.860916-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add methods for setting up and shutting down DPDK apps and for constructing EAL parameters. Signed-off-by: Juraj Linkeš --- dts/conf.yaml | 4 + dts/framework/config/__init__.py | 8 + dts/framework/config/conf_yaml_schema.json | 24 ++ dts/framework/remote_session/linux_session.py | 18 ++ dts/framework/remote_session/os_session.py | 23 +- dts/framework/remote_session/posix_session.py | 83 ++++++ dts/framework/testbed_model/__init__.py | 8 + dts/framework/testbed_model/dpdk.py | 45 ++++ dts/framework/testbed_model/hw/__init__.py | 27 ++ dts/framework/testbed_model/hw/cpu.py | 253 ++++++++++++++++++ .../testbed_model/hw/virtual_device.py | 16 ++ dts/framework/testbed_model/node.py | 46 ++++ dts/framework/testbed_model/sut_node.py | 82 +++++- dts/framework/utils.py | 20 ++ 14 files changed, 655 insertions(+), 2 deletions(-) create mode 100644 dts/framework/testbed_model/hw/__init__.py create mode 100644 dts/framework/testbed_model/hw/cpu.py create mode 100644 dts/framework/testbed_model/hw/virtual_device.py diff --git a/dts/conf.yaml b/dts/conf.yaml index 03696d2bab..1648e5c3c5 100644 --- a/dts/conf.yaml +++ b/dts/conf.yaml @@ -13,4 +13,8 @@ nodes: - name: "SUT 1" hostname: sut1.change.me.localhost user: root + arch: x86_64 os: linux + lcores: "" + use_first_core: false + memory_channels: 4 diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py index ca61cb10fe..17b917f3b3 100644 --- a/dts/framework/config/__init__.py +++ b/dts/framework/config/__init__.py @@ -72,7 +72,11 @@ class NodeConfiguration: hostname: str user: str password: str | None + arch: Architecture os: OS + lcores: str + use_first_core: bool + memory_channels: int @staticmethod def from_dict(d: dict) -> "NodeConfiguration": @@ -81,7 +85,11 @@ def from_dict(d: dict) -> "NodeConfiguration": hostname=d["hostname"], user=d["user"], password=d.get("password"), + arch=Architecture(d["arch"]), os=OS(d["os"]), + lcores=d.get("lcores", "1"), + use_first_core=d.get("use_first_core", False), + memory_channels=d.get("memory_channels", 1), ) diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index 9170307fbe..81f304da5e 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -6,6 +6,14 @@ "type": "string", "description": "A unique identifier for a node" }, + "ARCH": { + "type": "string", + "enum": [ + "x86_64", + "arm64", + "ppc64le" + ] + }, "OS": { "type": "string", "enum": [ @@ -92,8 +100,23 @@ "type": "string", "description": "The password to use on this node. Use only as a last resort. SSH keys are STRONGLY preferred." }, + "arch": { + "$ref": "#/definitions/ARCH" + }, "os": { "$ref": "#/definitions/OS" + }, + "lcores": { + "type": "string", + "description": "Optional comma-separated list of logical cores to use, e.g.: 1,2,3,4,5,18-22. Defaults to 1. An empty string means use all lcores." + }, + "use_first_core": { + "type": "boolean", + "description": "Indicate whether DPDK should use the first physical core. It won't be used by default." + }, + "memory_channels": { + "type": "integer", + "description": "How many memory channels to use. Optional, defaults to 1." } }, "additionalProperties": false, @@ -101,6 +124,7 @@ "name", "hostname", "user", + "arch", "os" ] }, diff --git a/dts/framework/remote_session/linux_session.py b/dts/framework/remote_session/linux_session.py index 9d14166077..6809102038 100644 --- a/dts/framework/remote_session/linux_session.py +++ b/dts/framework/remote_session/linux_session.py @@ -2,6 +2,8 @@ # Copyright(c) 2023 PANTHEON.tech s.r.o. # Copyright(c) 2023 University of New Hampshire +from framework.testbed_model import LogicalCore + from .posix_session import PosixSession @@ -9,3 +11,19 @@ class LinuxSession(PosixSession): """ The implementation of non-Posix compliant parts of Linux remote sessions. """ + + def get_remote_cpus(self, use_first_core: bool) -> list[LogicalCore]: + cpu_info = self.remote_session.send_command( + "lscpu -p=CPU,CORE,SOCKET,NODE|grep -v \\#" + ).stdout + lcores = [] + for cpu_line in cpu_info.splitlines(): + lcore, core, socket, node = cpu_line.split(",") + if not use_first_core and core == 0 and socket == 0: + self._logger.info("Not using the first physical core.") + continue + lcores.append(LogicalCore(int(lcore), int(core), int(socket), int(node))) + return lcores + + def get_dpdk_file_prefix(self, dpdk_prefix) -> str: + return dpdk_prefix diff --git a/dts/framework/remote_session/os_session.py b/dts/framework/remote_session/os_session.py index 06d1ffefdd..c30753e0b8 100644 --- a/dts/framework/remote_session/os_session.py +++ b/dts/framework/remote_session/os_session.py @@ -3,12 +3,13 @@ # Copyright(c) 2023 University of New Hampshire from abc import ABC, abstractmethod +from collections.abc import Iterable from pathlib import PurePath from framework.config import Architecture, NodeConfiguration from framework.logger import DTSLOG from framework.settings import SETTINGS -from framework.testbed_model import MesonArgs +from framework.testbed_model import LogicalCore, MesonArgs from framework.utils import EnvVarsDict from .remote import RemoteSession, create_remote_session @@ -130,3 +131,23 @@ def get_dpdk_version(self, version_path: str | PurePath) -> str: """ Inspect DPDK version on the remote node from version_path. """ + + @abstractmethod + def get_remote_cpus(self, use_first_core: bool) -> list[LogicalCore]: + """ + Compose a list of LogicalCores present on the remote node. + If use_first_core is False, the first physical core won't be used. + """ + + @abstractmethod + def kill_cleanup_dpdk_apps(self, dpdk_prefix_list: Iterable[str]) -> None: + """ + Kill and cleanup all DPDK apps identified by dpdk_prefix_list. If + dpdk_prefix_list is empty, attempt to find running DPDK apps to kill and clean. + """ + + @abstractmethod + def get_dpdk_file_prefix(self, dpdk_prefix) -> str: + """ + Get the DPDK file prefix that will be used when running DPDK apps. + """ diff --git a/dts/framework/remote_session/posix_session.py b/dts/framework/remote_session/posix_session.py index d4da9f114e..4c8474b804 100644 --- a/dts/framework/remote_session/posix_session.py +++ b/dts/framework/remote_session/posix_session.py @@ -2,6 +2,8 @@ # Copyright(c) 2023 PANTHEON.tech s.r.o. # Copyright(c) 2023 University of New Hampshire +import re +from collections.abc import Iterable from pathlib import PurePath, PurePosixPath from framework.config import Architecture @@ -136,3 +138,84 @@ def get_dpdk_version(self, build_dir: str | PurePath) -> str: f"cat {self.join_remote_path(build_dir, 'VERSION')}", verify=True ) return out.stdout + + def kill_cleanup_dpdk_apps(self, dpdk_prefix_list: Iterable[str]) -> None: + self._logger.info("Cleaning up DPDK apps.") + dpdk_runtime_dirs = self._get_dpdk_runtime_dirs(dpdk_prefix_list) + if dpdk_runtime_dirs: + # kill and cleanup only if DPDK is running + dpdk_pids = self._get_dpdk_pids(dpdk_runtime_dirs) + for dpdk_pid in dpdk_pids: + self.remote_session.send_command(f"kill -9 {dpdk_pid}", 20) + self._check_dpdk_hugepages(dpdk_runtime_dirs) + self._remove_dpdk_runtime_dirs(dpdk_runtime_dirs) + + def _get_dpdk_runtime_dirs( + self, dpdk_prefix_list: Iterable[str] + ) -> list[PurePosixPath]: + prefix = PurePosixPath("/var", "run", "dpdk") + if not dpdk_prefix_list: + remote_prefixes = self._list_remote_dirs(prefix) + if not remote_prefixes: + dpdk_prefix_list = [] + else: + dpdk_prefix_list = remote_prefixes + + return [PurePosixPath(prefix, dpdk_prefix) for dpdk_prefix in dpdk_prefix_list] + + def _list_remote_dirs(self, remote_path: str | PurePath) -> list[str] | None: + """ + Return a list of directories of the remote_dir. + If remote_path doesn't exist, return None. + """ + out = self.remote_session.send_command( + f"ls -l {remote_path} | awk '/^d/ {{print $NF}}'" + ).stdout + if "No such file or directory" in out: + return None + else: + return out.splitlines() + + def _get_dpdk_pids(self, dpdk_runtime_dirs: Iterable[str | PurePath]) -> list[int]: + pids = [] + pid_regex = r"p(\d+)" + for dpdk_runtime_dir in dpdk_runtime_dirs: + dpdk_config_file = PurePosixPath(dpdk_runtime_dir, "config") + if self._remote_files_exists(dpdk_config_file): + out = self.remote_session.send_command( + f"lsof -Fp {dpdk_config_file}" + ).stdout + if out and "No such file or directory" not in out: + for out_line in out.splitlines(): + match = re.match(pid_regex, out_line) + if match: + pids.append(int(match.group(1))) + return pids + + def _remote_files_exists(self, remote_path: PurePath) -> bool: + result = self.remote_session.send_command(f"test -e {remote_path}") + return not result.return_code + + def _check_dpdk_hugepages( + self, dpdk_runtime_dirs: Iterable[str | PurePath] + ) -> None: + for dpdk_runtime_dir in dpdk_runtime_dirs: + hugepage_info = PurePosixPath(dpdk_runtime_dir, "hugepage_info") + if self._remote_files_exists(hugepage_info): + out = self.remote_session.send_command( + f"lsof -Fp {hugepage_info}" + ).stdout + if out and "No such file or directory" not in out: + self._logger.warning("Some DPDK processes did not free hugepages.") + self._logger.warning("*******************************************") + self._logger.warning(out) + self._logger.warning("*******************************************") + + def _remove_dpdk_runtime_dirs( + self, dpdk_runtime_dirs: Iterable[str | PurePath] + ) -> None: + for dpdk_runtime_dir in dpdk_runtime_dirs: + self.remove_remote_dir(dpdk_runtime_dir) + + def get_dpdk_file_prefix(self, dpdk_prefix) -> str: + return "" diff --git a/dts/framework/testbed_model/__init__.py b/dts/framework/testbed_model/__init__.py index 96e2ab7c3f..2be5169dc8 100644 --- a/dts/framework/testbed_model/__init__.py +++ b/dts/framework/testbed_model/__init__.py @@ -10,5 +10,13 @@ # pylama:ignore=W0611 from .dpdk import MesonArgs +from .hw import ( + LogicalCore, + LogicalCoreAmount, + LogicalCoreList, + LogicalCoreListFilter, + VirtualDevice, + lcore_filter, +) from .node import Node from .sut_node import SutNode diff --git a/dts/framework/testbed_model/dpdk.py b/dts/framework/testbed_model/dpdk.py index 0526974f72..9b3a9e7381 100644 --- a/dts/framework/testbed_model/dpdk.py +++ b/dts/framework/testbed_model/dpdk.py @@ -6,6 +6,8 @@ Various utilities used for configuring, building and running DPDK. """ +from .hw import LogicalCoreList, VirtualDevice + class MesonArgs(object): """ @@ -31,3 +33,46 @@ def __init__(self, default_library: str | None = None, **dpdk_args: str | bool): def __str__(self) -> str: return " ".join(f"{self.default_library} {self.dpdk_args}".split()) + + +class EalParameters(object): + def __init__( + self, + lcore_list: LogicalCoreList, + memory_channels: int, + prefix: str, + no_pci: bool, + vdevs: list[VirtualDevice], + other_eal_param: str, + ): + """ + Generate eal parameters character string; + :param lcore_list: the list of logical cores to use. + :param memory_channels: the number of memory channels to use. + :param prefix: set file prefix string, eg: + prefix='vf' + :param no_pci: switch of disable PCI bus eg: + no_pci=True + :param vdevs: virtual device list, eg: + vdevs=['net_ring0', 'net_ring1'] + :param other_eal_param: user defined DPDK eal parameters, eg: + other_eal_param='--single-file-segments' + """ + self._lcore_list = f"-l {lcore_list}" + self._memory_channels = f"-n {memory_channels}" + self._prefix = prefix + if prefix: + self._prefix = f"--file-prefix={prefix}" + self._no_pci = "--no-pci" if no_pci else "" + self._vdevs = " ".join(f"--vdev {vdev}" for vdev in vdevs) + self._other_eal_param = other_eal_param + + def __str__(self) -> str: + return ( + f"{self._lcore_list} " + f"{self._memory_channels} " + f"{self._prefix} " + f"{self._no_pci} " + f"{self._vdevs} " + f"{self._other_eal_param}" + ) diff --git a/dts/framework/testbed_model/hw/__init__.py b/dts/framework/testbed_model/hw/__init__.py new file mode 100644 index 0000000000..fb4cdac8e3 --- /dev/null +++ b/dts/framework/testbed_model/hw/__init__.py @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2023 PANTHEON.tech s.r.o. + +# pylama:ignore=W0611 + +from .cpu import ( + LogicalCore, + LogicalCoreAmount, + LogicalCoreAmountFilter, + LogicalCoreFilter, + LogicalCoreList, + LogicalCoreListFilter, +) +from .virtual_device import VirtualDevice + + +def lcore_filter( + core_list: list[LogicalCore], + filter_specifier: LogicalCoreAmount | LogicalCoreList, + ascending: bool, +) -> LogicalCoreFilter: + if isinstance(filter_specifier, LogicalCoreList): + return LogicalCoreListFilter(core_list, filter_specifier, ascending) + elif isinstance(filter_specifier, LogicalCoreAmount): + return LogicalCoreAmountFilter(core_list, filter_specifier, ascending) + else: + raise ValueError(f"Unsupported filter r{filter_specifier}") diff --git a/dts/framework/testbed_model/hw/cpu.py b/dts/framework/testbed_model/hw/cpu.py new file mode 100644 index 0000000000..96c46ee8c5 --- /dev/null +++ b/dts/framework/testbed_model/hw/cpu.py @@ -0,0 +1,253 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2023 PANTHEON.tech s.r.o. + +import dataclasses +from abc import ABC, abstractmethod +from collections.abc import Iterable +from dataclasses import dataclass + +from framework.utils import expand_range + + +@dataclass(slots=True, frozen=True) +class LogicalCore(object): + """ + Representation of a CPU core. A physical core is represented in OS + by multiple logical cores (lcores) if CPU multithreading is enabled. + """ + + lcore: int + core: int + socket: int + node: int + + def __int__(self) -> int: + return self.lcore + + +class LogicalCoreList(object): + """ + Convert these options into a list of logical core ids. + lcore_list=[LogicalCore1, LogicalCore2] - a list of LogicalCores + lcore_list=[0,1,2,3] - a list of int indices + lcore_list=['0','1','2-3'] - a list of str indices; ranges are supported + lcore_list='0,1,2-3' - a comma delimited str of indices; ranges are supported + + The class creates a unified format used across the framework and allows + the user to use either a str representation (using str(instance) or directly + in f-strings) or a list representation (by accessing instance.lcore_list). + Empty lcore_list is allowed. + """ + + _lcore_list: list[int] + _lcore_str: str + + def __init__(self, lcore_list: list[int] | list[str] | list[LogicalCore] | str): + self._lcore_list = [] + if isinstance(lcore_list, str): + lcore_list = lcore_list.split(",") + for lcore in lcore_list: + if isinstance(lcore, str): + self._lcore_list.extend(expand_range(lcore)) + else: + self._lcore_list.append(int(lcore)) + + # the input lcores may not be sorted + self._lcore_list.sort() + self._lcore_str = ( + f'{",".join(self._get_consecutive_lcores_range(self._lcore_list))}' + ) + + @property + def lcore_list(self) -> list[int]: + return self._lcore_list + + def _get_consecutive_lcores_range(self, lcore_ids_list: list[int]) -> list[str]: + formatted_core_list = [] + segment = lcore_ids_list[:1] + for lcore_id in lcore_ids_list[1:]: + if lcore_id - segment[-1] == 1: + segment.append(lcore_id) + else: + formatted_core_list.append( + f"{segment[0]}-{segment[-1]}" + if len(segment) > 1 + else f"{segment[0]}" + ) + current_core_index = lcore_ids_list.index(lcore_id) + formatted_core_list.extend( + self._get_consecutive_lcores_range( + lcore_ids_list[current_core_index:] + ) + ) + segment.clear() + break + if len(segment) > 0: + formatted_core_list.append( + f"{segment[0]}-{segment[-1]}" if len(segment) > 1 else f"{segment[0]}" + ) + return formatted_core_list + + def __str__(self) -> str: + return self._lcore_str + + +@dataclasses.dataclass(slots=True, frozen=True) +class LogicalCoreAmount(object): + """ + Define the amount of logical cores to use. + If sockets is not None, socket_amount is ignored. + """ + + lcores_per_core: int = 1 + cores_per_socket: int = 2 + socket_amount: int = 1 + sockets: list[int] | None = None + + +class LogicalCoreFilter(ABC): + """ + Filter according to the input filter specifier. Each filter needs to be + implemented in a derived class. + This class only implements operations common to all filters, such as sorting + the list to be filtered beforehand. + """ + + _filter_specifier: LogicalCoreAmount | LogicalCoreList + _lcores_to_filter: list[LogicalCore] + + def __init__( + self, + lcore_list: list[LogicalCore], + filter_specifier: LogicalCoreAmount | LogicalCoreList, + ascending: bool = True, + ): + self._filter_specifier = filter_specifier + + # sorting by core is needed in case hyperthreading is enabled + self._lcores_to_filter = sorted( + lcore_list, key=lambda x: x.core, reverse=not ascending + ) + self.filter() + + @abstractmethod + def filter(self) -> list[LogicalCore]: + """ + Use self._filter_specifier to filter self._lcores_to_filter + and return the list of filtered LogicalCores. + self._lcores_to_filter is a sorted copy of the original list, + so it may be modified. + """ + + +class LogicalCoreAmountFilter(LogicalCoreFilter): + """ + Filter the input list of LogicalCores according to specified rules: + Use cores from the specified amount of sockets or from the specified socket ids. + If sockets is specified, it takes precedence over socket_amount. + From each of those sockets, use only cores_per_socket of cores. + And for each core, use lcores_per_core of logical cores. Hypertheading + must be enabled for this to take effect. + If ascending is True, use cores with the lowest numerical id first + and continue in ascending order. If False, start with the highest + id and continue in descending order. This ordering affects which + sockets to consider first as well. + """ + + _filter_specifier: LogicalCoreAmount + + def filter(self) -> list[LogicalCore]: + return self._filter_cores(self._filter_sockets(self._lcores_to_filter)) + + def _filter_sockets( + self, lcores_to_filter: Iterable[LogicalCore] + ) -> list[LogicalCore]: + allowed_sockets: set[int] = set() + socket_amount = self._filter_specifier.socket_amount + if self._filter_specifier.sockets: + socket_amount = len(self._filter_specifier.sockets) + allowed_sockets = set(self._filter_specifier.sockets) + + filtered_lcores = [] + for lcore in lcores_to_filter: + if not self._filter_specifier.sockets: + if len(allowed_sockets) < socket_amount: + allowed_sockets.add(lcore.socket) + if lcore.socket in allowed_sockets: + filtered_lcores.append(lcore) + + if len(allowed_sockets) < socket_amount: + raise ValueError( + f"The amount of sockets from which to use cores " + f"({socket_amount}) exceeds the actual amount present " + f"on the node ({len(allowed_sockets)})" + ) + + return filtered_lcores + + def _filter_cores( + self, lcores_to_filter: Iterable[LogicalCore] + ) -> list[LogicalCore]: + # no need to use ordered dict, from Python3.7 the dict + # insertion order is preserved (LIFO). + allowed_lcore_per_core_count_map: dict[int, int] = {} + filtered_lcores = [] + for lcore in lcores_to_filter: + if lcore.core in allowed_lcore_per_core_count_map: + lcore_count = allowed_lcore_per_core_count_map[lcore.core] + if self._filter_specifier.lcores_per_core > lcore_count: + # only add lcores of the given core + allowed_lcore_per_core_count_map[lcore.core] += 1 + filtered_lcores.append(lcore) + else: + raise ValueError( + f"The amount of logical cores per core to use " + f"({self._filter_specifier.lcores_per_core}) " + f"exceeds the actual amount present. Is hyperthreading enabled?" + ) + elif self._filter_specifier.cores_per_socket > len( + allowed_lcore_per_core_count_map + ): + # only add lcores if we need more + allowed_lcore_per_core_count_map[lcore.core] = 1 + filtered_lcores.append(lcore) + else: + # lcores are sorted by core, at this point we won't encounter new cores + break + + cores_per_socket = len(allowed_lcore_per_core_count_map) + if cores_per_socket < self._filter_specifier.cores_per_socket: + raise ValueError( + f"The amount of cores per socket to use " + f"({self._filter_specifier.cores_per_socket}) " + f"exceeds the actual amount present ({cores_per_socket})" + ) + + return filtered_lcores + + +class LogicalCoreListFilter(LogicalCoreFilter): + """ + Filter the input list of Logical Cores according to the input list of + lcore indices. + An empty LogicalCoreList won't filter anything. + """ + + _filter_specifier: LogicalCoreList + + def filter(self) -> list[LogicalCore]: + if not len(self._filter_specifier.lcore_list): + return self._lcores_to_filter + + filtered_lcores = [] + for core in self._lcores_to_filter: + if core.lcore in self._filter_specifier.lcore_list: + filtered_lcores.append(core) + + if len(filtered_lcores) != len(self._filter_specifier.lcore_list): + raise ValueError( + f"Not all logical cores from {self._filter_specifier.lcore_list} " + f"were found among {self._lcores_to_filter}" + ) + + return filtered_lcores diff --git a/dts/framework/testbed_model/hw/virtual_device.py b/dts/framework/testbed_model/hw/virtual_device.py new file mode 100644 index 0000000000..eb664d9f17 --- /dev/null +++ b/dts/framework/testbed_model/hw/virtual_device.py @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2023 PANTHEON.tech s.r.o. + + +class VirtualDevice(object): + """ + Base class for virtual devices used by DPDK. + """ + + name: str + + def __init__(self, name: str): + self.name = name + + def __str__(self) -> str: + return self.name diff --git a/dts/framework/testbed_model/node.py b/dts/framework/testbed_model/node.py index a37f7921e0..cf2af2ca72 100644 --- a/dts/framework/testbed_model/node.py +++ b/dts/framework/testbed_model/node.py @@ -15,6 +15,14 @@ from framework.logger import DTSLOG, getLogger from framework.remote_session import OSSession, create_session +from .hw import ( + LogicalCore, + LogicalCoreAmount, + LogicalCoreList, + LogicalCoreListFilter, + lcore_filter, +) + class Node(object): """ @@ -26,6 +34,7 @@ class Node(object): main_session: OSSession config: NodeConfiguration name: str + lcores: list[LogicalCore] _logger: DTSLOG _other_sessions: list[OSSession] @@ -35,6 +44,12 @@ def __init__(self, node_config: NodeConfiguration): self._logger = getLogger(self.name) self.main_session = create_session(self.config, self.name, self._logger) + self._get_remote_cpus() + # filter the node lcores according to user config + self.lcores = LogicalCoreListFilter( + self.lcores, LogicalCoreList(self.config.lcores) + ).filter() + self._other_sessions = [] self._logger.info(f"Created node: {self.name}") @@ -107,6 +122,37 @@ def create_session(self, name: str) -> OSSession: self._other_sessions.append(connection) return connection + def filter_lcores( + self, + filter_specifier: LogicalCoreAmount | LogicalCoreList, + ascending: bool = True, + ) -> list[LogicalCore]: + """ + Filter the LogicalCores found on the Node according to specified rules: + Use cores from the specified amount of sockets or from the specified socket ids. + If sockets is specified, it takes precedence over socket_amount. + From each of those sockets, use only cores_per_socket of cores. + And for each core, use lcores_per_core of logical cores. Hypertheading + must be enabled for this to take effect. + If ascending is True, use cores with the lowest numerical id first + and continue in ascending order. If False, start with the highest + id and continue in descending order. This ordering affects which + sockets to consider first as well. + """ + self._logger.debug(f"Filtering {filter_specifier} from {self.lcores}.") + return lcore_filter( + self.lcores, + filter_specifier, + ascending, + ).filter() + + def _get_remote_cpus(self) -> None: + """ + Scan CPUs in the remote OS and store a list of LogicalCores. + """ + self._logger.info("Getting CPU information.") + self.lcores = self.main_session.get_remote_cpus(self.config.use_first_core) + def close(self) -> None: """ Close all connections and free other resources. diff --git a/dts/framework/testbed_model/sut_node.py b/dts/framework/testbed_model/sut_node.py index c97d995b31..ea0b96d6bf 100644 --- a/dts/framework/testbed_model/sut_node.py +++ b/dts/framework/testbed_model/sut_node.py @@ -4,13 +4,16 @@ import os import tarfile +import time from pathlib import PurePath from framework.config import BuildTargetConfiguration, NodeConfiguration +from framework.remote_session import OSSession from framework.settings import SETTINGS from framework.utils import EnvVarsDict, skip_setup -from .dpdk import MesonArgs +from .dpdk import EalParameters, MesonArgs +from .hw import LogicalCoreAmount, LogicalCoreList, VirtualDevice from .node import Node @@ -22,21 +25,29 @@ class SutNode(Node): Another key capability is building DPDK according to given build target. """ + _dpdk_prefix_list: list[str] + _dpdk_timestamp: str _build_target_config: BuildTargetConfiguration | None _env_vars: EnvVarsDict _remote_tmp_dir: PurePath __remote_dpdk_dir: PurePath | None _dpdk_version: str | None _app_compile_timeout: float + _dpdk_kill_session: OSSession | None def __init__(self, node_config: NodeConfiguration): super(SutNode, self).__init__(node_config) + self._dpdk_prefix_list = [] self._build_target_config = None self._env_vars = EnvVarsDict() self._remote_tmp_dir = self.main_session.get_remote_tmp_dir() self.__remote_dpdk_dir = None self._dpdk_version = None self._app_compile_timeout = 90 + self._dpdk_kill_session = None + self._dpdk_timestamp = ( + f"{str(os.getpid())}_{time.strftime('%Y%m%d%H%M%S', time.localtime())}" + ) @property def _remote_dpdk_dir(self) -> PurePath: @@ -169,3 +180,72 @@ def build_dpdk_app(self, app_name: str, **meson_dpdk_args: str | bool) -> PurePa return self.main_session.join_remote_path( self.remote_dpdk_build_dir, "examples", f"dpdk-{app_name}" ) + + def kill_cleanup_dpdk_apps(self) -> None: + """ + Kill all dpdk applications on the SUT. Cleanup hugepages. + """ + if self._dpdk_kill_session and self._dpdk_kill_session.is_alive(): + # we can use the session if it exists and responds + self._dpdk_kill_session.kill_cleanup_dpdk_apps(self._dpdk_prefix_list) + else: + # otherwise, we need to (re)create it + self._dpdk_kill_session = self.create_session("dpdk_kill") + self._dpdk_prefix_list = [] + + def create_eal_parameters( + self, + lcore_filter_specifier: LogicalCoreAmount + | LogicalCoreList = LogicalCoreAmount(), + ascending_cores: bool = True, + prefix: str = "dpdk", + append_prefix_timestamp: bool = True, + no_pci: bool = False, + vdevs: list[VirtualDevice] = None, + other_eal_param: str = "", + ) -> EalParameters: + """ + Generate eal parameters character string; + :param lcore_filter_specifier: an amount of lcores/cores/sockets to use + or a list of lcore ids to use. + The default will select one lcore for each of two cores + on one socket, in ascending order of core ids. + :param ascending_cores: True, use cores with the lowest numerical id first + and continue in ascending order. If False, start with the + highest id and continue in descending order. This ordering + affects which sockets to consider first as well. + :param prefix: set file prefix string, eg: + prefix='vf' + :param append_prefix_timestamp: if True, will append a timestamp to + DPDK file prefix. + :param no_pci: switch of disable PCI bus eg: + no_pci=True + :param vdevs: virtual device list, eg: + vdevs=['net_ring0', 'net_ring1'] + :param other_eal_param: user defined DPDK eal parameters, eg: + other_eal_param='--single-file-segments' + :return: eal param string, eg: + '-c 0xf -a 0000:88:00.0 --file-prefix=dpdk_1112_20190809143420'; + """ + + lcore_list = LogicalCoreList( + self.filter_lcores(lcore_filter_specifier, ascending_cores) + ) + + if append_prefix_timestamp: + prefix = f"{prefix}_{self._dpdk_timestamp}" + prefix = self.main_session.get_dpdk_file_prefix(prefix) + if prefix: + self._dpdk_prefix_list.append(prefix) + + if vdevs is None: + vdevs = [] + + return EalParameters( + lcore_list=lcore_list, + memory_channels=self.config.memory_channels, + prefix=prefix, + no_pci=no_pci, + vdevs=vdevs, + other_eal_param=other_eal_param, + ) diff --git a/dts/framework/utils.py b/dts/framework/utils.py index 611071604b..eebe76f16c 100644 --- a/dts/framework/utils.py +++ b/dts/framework/utils.py @@ -32,6 +32,26 @@ def skip_setup(func) -> Callable[..., None]: return func +def expand_range(range_str: str) -> list[int]: + """ + Process range string into a list of integers. There are two possible formats: + n - a single integer + n-m - a range of integers + + The returned range includes both n and m. Empty string returns an empty list. + """ + expanded_range: list[int] = [] + if range_str: + range_boundaries = range_str.split("-") + # will throw an exception when items in range_boundaries can't be converted, + # serving as type check + expanded_range.extend( + range(int(range_boundaries[0]), int(range_boundaries[-1]) + 1) + ) + + return expanded_range + + def GREEN(text: str) -> str: return f"\u001B[32;1m{str(text)}\u001B[0m" From patchwork Tue Jan 17 15:49:01 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 122193 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E3FCD423FE; Tue, 17 Jan 2023 16:49:53 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 8E78342D35; Tue, 17 Jan 2023 16:49:20 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 4C24D42D4A for ; Tue, 17 Jan 2023 16:49:18 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 315951D8122; Tue, 17 Jan 2023 16:49:17 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id R5FMtgAIMHPN; Tue, 17 Jan 2023 16:49:15 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id F1A7B1D8125; Tue, 17 Jan 2023 16:49:09 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [PATCH v3 05/10] dts: add node memory setup Date: Tue, 17 Jan 2023 15:49:01 +0000 Message-Id: <20230117154906.860916-6-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230117154906.860916-1-juraj.linkes@pantheon.tech> References: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> <20230117154906.860916-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Setup hugepages on nodes. This is useful not only on SUT nodes, but also on TG nodes which use TGs that utilize hugepages. Signed-off-by: Juraj Linkeš --- dts/framework/config/__init__.py | 16 ++++ dts/framework/config/arch.py | 57 +++++++++++++ dts/framework/remote_session/linux_session.py | 85 +++++++++++++++++++ dts/framework/remote_session/os_session.py | 10 +++ dts/framework/testbed_model/node.py | 15 ++++ 5 files changed, 183 insertions(+) create mode 100644 dts/framework/config/arch.py diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py index 17b917f3b3..ce6e709c6f 100644 --- a/dts/framework/config/__init__.py +++ b/dts/framework/config/__init__.py @@ -19,6 +19,8 @@ from framework.settings import SETTINGS +from .arch import PPC64, Arch, Arm64, i686, x86_32, x86_64 + class StrEnum(Enum): @staticmethod @@ -176,3 +178,17 @@ def load_config() -> Configuration: CONFIGURATION = load_config() + + +def create_arch(node_config: NodeConfiguration) -> Arch: + match node_config.arch: + case Architecture.x86_64: + return x86_64() + case Architecture.x86_32: + return x86_32() + case Architecture.i686: + return i686() + case Architecture.ppc64le: + return PPC64() + case Architecture.arm64: + return Arm64() diff --git a/dts/framework/config/arch.py b/dts/framework/config/arch.py new file mode 100644 index 0000000000..a226b9a6a9 --- /dev/null +++ b/dts/framework/config/arch.py @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2023 PANTHEON.tech s.r.o. + + +class Arch(object): + """ + Stores architecture-specific information. + """ + + @property + def default_hugepage_memory(self) -> int: + """ + Return the default amount of memory allocated for hugepages DPDK will use. + The default is an amount equal to 256 2MB hugepages (512MB memory). + """ + return 256 * 2048 + + @property + def hugepage_force_first_numa(self) -> bool: + """ + An architecture may need to force configuration of hugepages to first socket. + """ + return False + + +class x86_64(Arch): + @property + def default_hugepage_memory(self) -> int: + return 4096 * 2048 + + +class x86_32(Arch): + @property + def hugepage_force_first_numa(self) -> bool: + return True + + +class i686(Arch): + @property + def default_hugepage_memory(self) -> int: + return 512 * 2048 + + @property + def hugepage_force_first_numa(self) -> bool: + return True + + +class PPC64(Arch): + @property + def default_hugepage_memory(self) -> int: + return 512 * 2048 + + +class Arm64(Arch): + @property + def default_hugepage_memory(self) -> int: + return 2048 * 2048 diff --git a/dts/framework/remote_session/linux_session.py b/dts/framework/remote_session/linux_session.py index 6809102038..4dc52132d3 100644 --- a/dts/framework/remote_session/linux_session.py +++ b/dts/framework/remote_session/linux_session.py @@ -2,7 +2,9 @@ # Copyright(c) 2023 PANTHEON.tech s.r.o. # Copyright(c) 2023 University of New Hampshire +from framework.exception import RemoteCommandExecutionError from framework.testbed_model import LogicalCore +from framework.utils import expand_range from .posix_session import PosixSession @@ -27,3 +29,86 @@ def get_remote_cpus(self, use_first_core: bool) -> list[LogicalCore]: def get_dpdk_file_prefix(self, dpdk_prefix) -> str: return dpdk_prefix + + def setup_hugepages( + self, hugepage_amount: int, force_first_numa: bool = False + ) -> None: + self._logger.info("Getting Hugepage information.") + hugepage_size = self._get_hugepage_size() + hugepages_total = self._get_hugepages_total() + self._numa_nodes = self._get_numa_nodes() + + target_hugepages_total = int(hugepage_amount / hugepage_size) + if hugepage_amount % hugepage_size: + target_hugepages_total += 1 + if force_first_numa or hugepages_total != target_hugepages_total: + # when forcing numa, we need to clear existing hugepages regardless + # of size, so they can be moved to the first numa node + self._configure_huge_pages( + target_hugepages_total, hugepage_size, force_first_numa + ) + else: + self._logger.info("Hugepages already configured.") + self._mount_huge_pages() + + def _get_hugepage_size(self) -> int: + hugepage_size = self.remote_session.send_command( + "awk '/Hugepagesize/ {print $2}' /proc/meminfo" + ).stdout + return int(hugepage_size) + + def _get_hugepages_total(self) -> int: + hugepages_total = self.remote_session.send_command( + "awk '/HugePages_Total/ { print $2 }' /proc/meminfo" + ).stdout + return int(hugepages_total) + + def _get_numa_nodes(self) -> list[int]: + try: + numa_count = self.remote_session.send_command( + "cat /sys/devices/system/node/online", verify=True + ).stdout + numa_range = expand_range(numa_count) + except RemoteCommandExecutionError: + # the file doesn't exist, meaning the node doesn't support numa + numa_range = [] + return numa_range + + def _mount_huge_pages(self) -> None: + self._logger.info("Re-mounting Hugepages.") + hugapge_fs_cmd = "awk '/hugetlbfs/ { print $2 }' /proc/mounts" + self.remote_session.send_command(f"umount $({hugapge_fs_cmd})") + result = self.remote_session.send_command(hugapge_fs_cmd) + if result.stdout == "": + remote_mount_path = "/mnt/huge" + self.remote_session.send_command(f"mkdir -p {remote_mount_path}") + self.remote_session.send_command( + f"mount -t hugetlbfs nodev {remote_mount_path}" + ) + + def _supports_numa(self) -> bool: + # the system supports numa if self._numa_nodes is non-empty and there are more + # than one numa node (in the latter case it may actually support numa, but + # there's no reason to do any numa specific configuration) + return len(self._numa_nodes) > 1 + + def _configure_huge_pages( + self, amount: int, size: int, force_first_numa: bool + ) -> None: + self._logger.info("Configuring Hugepages.") + hugepage_config_path = ( + f"/sys/kernel/mm/hugepages/hugepages-{size}kB/nr_hugepages" + ) + if force_first_numa and self._supports_numa(): + # clear non-numa hugepages + self.remote_session.send_command( + f"echo 0 | sudo tee {hugepage_config_path}" + ) + hugepage_config_path = ( + f"/sys/devices/system/node/node{self._numa_nodes[0]}/hugepages" + f"/hugepages-{size}kB/nr_hugepages" + ) + + self.remote_session.send_command( + f"echo {amount} | sudo tee {hugepage_config_path}" + ) diff --git a/dts/framework/remote_session/os_session.py b/dts/framework/remote_session/os_session.py index c30753e0b8..966b7f76d5 100644 --- a/dts/framework/remote_session/os_session.py +++ b/dts/framework/remote_session/os_session.py @@ -151,3 +151,13 @@ def get_dpdk_file_prefix(self, dpdk_prefix) -> str: """ Get the DPDK file prefix that will be used when running DPDK apps. """ + + @abstractmethod + def setup_hugepages( + self, hugepage_amount: int, force_first_numa: bool = False + ) -> None: + """ + Get the node's Hugepage Size, configure the specified amount of hugepages + if needed and mount the hugepages if needed. + If force_first_numa is True, configure hugepages just on the first socket. + """ diff --git a/dts/framework/testbed_model/node.py b/dts/framework/testbed_model/node.py index cf2af2ca72..d22bf3b7d2 100644 --- a/dts/framework/testbed_model/node.py +++ b/dts/framework/testbed_model/node.py @@ -8,9 +8,11 @@ """ from framework.config import ( + Arch, BuildTargetConfiguration, ExecutionConfiguration, NodeConfiguration, + create_arch, ) from framework.logger import DTSLOG, getLogger from framework.remote_session import OSSession, create_session @@ -37,6 +39,7 @@ class Node(object): lcores: list[LogicalCore] _logger: DTSLOG _other_sessions: list[OSSession] + _arch: Arch def __init__(self, node_config: NodeConfiguration): self.config = node_config @@ -51,6 +54,7 @@ def __init__(self, node_config: NodeConfiguration): ).filter() self._other_sessions = [] + self._arch = create_arch(self.config) self._logger.info(f"Created node: {self.name}") @@ -59,6 +63,7 @@ def set_up_execution(self, execution_config: ExecutionConfiguration) -> None: Perform the execution setup that will be done for each execution this node is part of. """ + self._setup_hugepages() self._set_up_execution(execution_config) def _set_up_execution(self, execution_config: ExecutionConfiguration) -> None: @@ -153,6 +158,16 @@ def _get_remote_cpus(self) -> None: self._logger.info("Getting CPU information.") self.lcores = self.main_session.get_remote_cpus(self.config.use_first_core) + def _setup_hugepages(self): + """ + Setup hugepages on the Node. Different architectures can supply different + amounts of memory for hugepages and numa-based hugepage allocation may need + to be considered. + """ + self.main_session.setup_hugepages( + self._arch.default_hugepage_memory, self._arch.hugepage_force_first_numa + ) + def close(self) -> None: """ Close all connections and free other resources. From patchwork Tue Jan 17 15:49:02 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 122195 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id CA5DD423FE; Tue, 17 Jan 2023 16:50:11 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 3F17E42D6A; Tue, 17 Jan 2023 16:49:23 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 3EFA742D36 for ; Tue, 17 Jan 2023 16:49:20 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 7F0651D8125; Tue, 17 Jan 2023 16:49:19 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id Xp9g0HeaSkNk; Tue, 17 Jan 2023 16:49:18 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id AF1271D8128; Tue, 17 Jan 2023 16:49:10 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [PATCH v3 06/10] dts: add test suite module Date: Tue, 17 Jan 2023 15:49:02 +0000 Message-Id: <20230117154906.860916-7-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230117154906.860916-1-juraj.linkes@pantheon.tech> References: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> <20230117154906.860916-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The module implements the base class that all test suites inherit from. It implements methods common to all test suites. The derived test suites implement test cases and any particular setup needed for the suite or tests. Signed-off-by: Juraj Linkeš --- dts/conf.yaml | 2 + dts/framework/config/__init__.py | 4 + dts/framework/config/conf_yaml_schema.json | 10 + dts/framework/exception.py | 16 ++ dts/framework/settings.py | 24 +++ dts/framework/test_suite.py | 228 +++++++++++++++++++++ 6 files changed, 284 insertions(+) create mode 100644 dts/framework/test_suite.py diff --git a/dts/conf.yaml b/dts/conf.yaml index 1648e5c3c5..2111d537cf 100644 --- a/dts/conf.yaml +++ b/dts/conf.yaml @@ -8,6 +8,8 @@ executions: cpu: native compiler: gcc compiler_wrapper: ccache + perf: false + func: true system_under_test: "SUT 1" nodes: - name: "SUT 1" diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py index ce6e709c6f..ce3f20f6a9 100644 --- a/dts/framework/config/__init__.py +++ b/dts/framework/config/__init__.py @@ -119,6 +119,8 @@ def from_dict(d: dict) -> "BuildTargetConfiguration": @dataclass(slots=True, frozen=True) class ExecutionConfiguration: build_targets: list[BuildTargetConfiguration] + perf: bool + func: bool system_under_test: NodeConfiguration @staticmethod @@ -131,6 +133,8 @@ def from_dict(d: dict, node_map: dict) -> "ExecutionConfiguration": return ExecutionConfiguration( build_targets=build_targets, + perf=d["perf"], + func=d["func"], system_under_test=node_map[sut_name], ) diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index 81f304da5e..abf15ebea8 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -142,6 +142,14 @@ }, "minimum": 1 }, + "perf": { + "type": "boolean", + "description": "Enable performance testing." + }, + "func": { + "type": "boolean", + "description": "Enable functional testing." + }, "system_under_test": { "$ref": "#/definitions/node_name" } @@ -149,6 +157,8 @@ "additionalProperties": false, "required": [ "build_targets", + "perf", + "func", "system_under_test" ] }, diff --git a/dts/framework/exception.py b/dts/framework/exception.py index b4545a5a40..ca353d98fc 100644 --- a/dts/framework/exception.py +++ b/dts/framework/exception.py @@ -24,6 +24,7 @@ class ErrorSeverity(IntEnum): REMOTE_CMD_EXEC_ERR = 3 SSH_ERR = 4 DPDK_BUILD_ERR = 10 + TESTCASE_VERIFY_ERR = 20 class DTSError(Exception): @@ -128,3 +129,18 @@ class DPDKBuildError(DTSError): """ severity: ClassVar[ErrorSeverity] = ErrorSeverity.DPDK_BUILD_ERR + + +class TestCaseVerifyError(DTSError): + """ + Used in test cases to verify the expected behavior. + """ + + value: str + severity: ClassVar[ErrorSeverity] = ErrorSeverity.TESTCASE_VERIFY_ERR + + def __init__(self, value: str): + self.value = value + + def __str__(self) -> str: + return repr(self.value) diff --git a/dts/framework/settings.py b/dts/framework/settings.py index a298b1eaac..5762bd2bee 100644 --- a/dts/framework/settings.py +++ b/dts/framework/settings.py @@ -66,6 +66,8 @@ class _Settings: skip_setup: bool dpdk_ref: Path | str compile_timeout: float + test_cases: list + re_run: int def _get_parser() -> argparse.ArgumentParser: @@ -139,6 +141,26 @@ def _get_parser() -> argparse.ArgumentParser: help="[DTS_COMPILE_TIMEOUT] The timeout for compiling DPDK.", ) + parser.add_argument( + "--test-cases", + action=_env_arg("DTS_TESTCASES"), + default="", + required=False, + help="[DTS_TESTCASES] Comma-separated list of test cases to execute. " + "Unknown test cases will be silently ignored.", + ) + + parser.add_argument( + "--re-run", + "--re_run", + action=_env_arg("DTS_RERUN"), + default=0, + type=int, + required=False, + help="[DTS_RERUN] Re-run each test case the specified amount of times " + "if a test failure occurs", + ) + return parser @@ -162,6 +184,8 @@ def _get_settings() -> _Settings: skip_setup=(parsed_args.skip_setup == "Y"), dpdk_ref=parsed_args.dpdk_ref, compile_timeout=parsed_args.compile_timeout, + test_cases=parsed_args.test_cases.split(",") if parsed_args.test_cases else [], + re_run=parsed_args.re_run, ) diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py new file mode 100644 index 0000000000..0972a70c14 --- /dev/null +++ b/dts/framework/test_suite.py @@ -0,0 +1,228 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation +# Copyright(c) 2023 PANTHEON.tech s.r.o. + +""" +Base class for creating DTS test cases. +""" + +import inspect +import re +from collections.abc import MutableSequence +from types import MethodType + +from .exception import SSHTimeoutError, TestCaseVerifyError +from .logger import DTSLOG, getLogger +from .settings import SETTINGS +from .testbed_model import SutNode + + +class TestSuite(object): + """ + The base TestSuite class provides methods for handling basic flow of a test suite: + * test case filtering and collection + * test suite setup/cleanup + * test setup/cleanup + * test case execution + * error handling and results storage + Test cases are implemented by derived classes. Test cases are all methods + starting with test_, further divided into performance test cases + (starting with test_perf_) and functional test cases (all other test cases). + By default, all test cases will be executed. A list of testcase str names + may be specified in conf.yaml or on the command line + to filter which test cases to run. + The methods named [set_up|tear_down]_[suite|test_case] should be overridden + in derived classes if the appropriate suite/test case fixtures are needed. + """ + + sut_node: SutNode + _logger: DTSLOG + _test_cases_to_run: list[str] + _func: bool + _errors: MutableSequence[Exception] + + def __init__( + self, + sut_node: SutNode, + test_cases: list[str], + func: bool, + errors: MutableSequence[Exception], + ): + self.sut_node = sut_node + self._logger = getLogger(self.__class__.__name__) + self._test_cases_to_run = test_cases + self._test_cases_to_run.extend(SETTINGS.test_cases) + self._func = func + self._errors = errors + + def set_up_suite(self) -> None: + """ + Set up test fixtures common to all test cases; this is done before + any test case is run. + """ + + def tear_down_suite(self) -> None: + """ + Tear down the previously created test fixtures common to all test cases. + """ + + def set_up_test_case(self) -> None: + """ + Set up test fixtures before each test case. + """ + + def tear_down_test_case(self) -> None: + """ + Tear down the previously created test fixtures after each test case. + """ + + def verify(self, condition: bool, failure_description: str) -> None: + if not condition: + self._logger.debug( + "A test case failed, showing the last 10 commands executed on SUT:" + ) + for command_res in self.sut_node.main_session.remote_session.history[-10:]: + self._logger.debug(command_res.command) + raise TestCaseVerifyError(failure_description) + + def run(self) -> None: + """ + Setup, execute and teardown the whole suite. + Suite execution consists of running all test cases scheduled to be executed. + A test cast run consists of setup, execution and teardown of said test case. + """ + test_suite_name = self.__class__.__name__ + + try: + self._logger.info(f"Starting test suite setup: {test_suite_name}") + self.set_up_suite() + self._logger.info(f"Test suite setup successful: {test_suite_name}") + except Exception as e: + self._logger.exception(f"Test suite setup ERROR: {test_suite_name}") + self._errors.append(e) + + else: + self._execute_test_suite() + + finally: + try: + self.tear_down_suite() + self.sut_node.kill_cleanup_dpdk_apps() + except Exception as e: + self._logger.exception(f"Test suite teardown ERROR: {test_suite_name}") + self._logger.warning( + f"Test suite '{test_suite_name}' teardown failed, " + f"the next test suite may be affected." + ) + self._errors.append(e) + + def _execute_test_suite(self) -> None: + """ + Execute all test cases scheduled to be executed in this suite. + """ + if self._func: + for test_case_method in self._get_functional_test_cases(): + all_attempts = SETTINGS.re_run + 1 + attempt_nr = 1 + while ( + not self._run_test_case(test_case_method) + and attempt_nr <= all_attempts + ): + attempt_nr += 1 + self._logger.info( + f"Re-running FAILED test case '{test_case_method.__name__}'. " + f"Attempt number {attempt_nr} out of {all_attempts}." + ) + + def _get_functional_test_cases(self) -> list[MethodType]: + """ + Get all functional test cases. + """ + return self._get_test_cases(r"test_(?!perf_)") + + def _get_test_cases(self, test_case_regex: str) -> list[MethodType]: + """ + Return a list of test cases matching test_case_regex. + """ + self._logger.debug(f"Searching for test cases in {self.__class__.__name__}.") + filtered_test_cases = [] + for test_case_name, test_case in inspect.getmembers(self, inspect.ismethod): + if self._should_be_executed(test_case_name, test_case_regex): + filtered_test_cases.append(test_case) + cases_str = ", ".join((x.__name__ for x in filtered_test_cases)) + self._logger.debug( + f"Found test cases '{cases_str}' in {self.__class__.__name__}." + ) + return filtered_test_cases + + def _should_be_executed(self, test_case_name: str, test_case_regex: str) -> bool: + """ + Check whether the test case should be executed. + """ + match = bool(re.match(test_case_regex, test_case_name)) + if self._test_cases_to_run: + return match and test_case_name in self._test_cases_to_run + + return match + + def _run_test_case(self, test_case_method: MethodType) -> bool: + """ + Setup, execute and teardown a test case in this suite. + Exceptions are caught and recorded in logs. + """ + test_case_name = test_case_method.__name__ + result = False + + try: + # run set_up function for each case + self.set_up_test_case() + except SSHTimeoutError as e: + self._logger.exception(f"Test case setup FAILED: {test_case_name}") + self._errors.append(e) + except Exception as e: + self._logger.exception(f"Test case setup ERROR: {test_case_name}") + self._errors.append(e) + + else: + # run test case if setup was successful + result = self._execute_test_case(test_case_method) + + finally: + try: + self.tear_down_test_case() + except Exception as e: + self._logger.exception(f"Test case teardown ERROR: {test_case_name}") + self._logger.warning( + f"Test case '{test_case_name}' teardown failed, " + f"the next test case may be affected." + ) + self._errors.append(e) + result = False + + return result + + def _execute_test_case(self, test_case_method: MethodType) -> bool: + """ + Execute one test case and handle failures. + """ + test_case_name = test_case_method.__name__ + result = False + try: + self._logger.info(f"Starting test case execution: {test_case_name}") + test_case_method() + result = True + self._logger.info(f"Test case execution PASSED: {test_case_name}") + + except TestCaseVerifyError as e: + self._logger.exception(f"Test case execution FAILED: {test_case_name}") + self._errors.append(e) + except Exception as e: + self._logger.exception(f"Test case execution ERROR: {test_case_name}") + self._errors.append(e) + except KeyboardInterrupt: + self._logger.error( + f"Test case execution INTERRUPTED by user: {test_case_name}" + ) + raise KeyboardInterrupt("Stop DTS") + + return result From patchwork Tue Jan 17 15:49:03 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 122194 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 33EAB423FE; Tue, 17 Jan 2023 16:50:05 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5DAF942D66; Tue, 17 Jan 2023 16:49:22 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id D8A2742D36 for ; Tue, 17 Jan 2023 16:49:19 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 241CD1D8124; Tue, 17 Jan 2023 16:49:19 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id SjXqolEL6-rx; Tue, 17 Jan 2023 16:49:18 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 902711D812A; Tue, 17 Jan 2023 16:49:12 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [PATCH v3 07/10] dts: add hello world testplan Date: Tue, 17 Jan 2023 15:49:03 +0000 Message-Id: <20230117154906.860916-8-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230117154906.860916-1-juraj.linkes@pantheon.tech> References: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> <20230117154906.860916-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The testplan describes the capabilities of the tested application along with the description of testcases to test it. Signed-off-by: Juraj Linkeš --- dts/test_plans/hello_world_test_plan.rst | 68 ++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 dts/test_plans/hello_world_test_plan.rst diff --git a/dts/test_plans/hello_world_test_plan.rst b/dts/test_plans/hello_world_test_plan.rst new file mode 100644 index 0000000000..566a9bb10c --- /dev/null +++ b/dts/test_plans/hello_world_test_plan.rst @@ -0,0 +1,68 @@ +.. SPDX-License-Identifier: BSD-3-Clause + Copyright(c) 2010-2017 Intel Corporation + +============================================= +Sample Application Tests: Hello World Example +============================================= + +This example is one of the most simple RTE application that can be +done. The program will just print a "helloworld" message on every +enabled lcore. + +Command Usage:: + + ./dpdk-helloworld -c COREMASK [-m NB] [-r NUM] [-n NUM] + + EAL option list: + -c COREMASK: hexadecimal bitmask of cores we are running on + -m MB : memory to allocate (default = size of hugemem) + -n NUM : force number of memory channels (don't detect) + -r NUM : force number of memory ranks (don't detect) + --huge-file: base filename for hugetlbfs entries + debug options: + --no-huge : use malloc instead of hugetlbfs + --no-pci : disable pci + --no-hpet : disable hpet + --no-shconf: no shared config (mmap'd files) + + +Prerequisites +============= + +Support igb_uio and vfio driver, if used vfio, kernel need 3.6+ and enable vt-d in bios. +When used vfio , used "modprobe vfio" and "modprobe vfio-pci" insmod vfio driver, then used +"./tools/dpdk_nic_bind.py --bind=vfio-pci device_bus_id" to bind vfio driver to test driver. + +To find out the mapping of lcores (processor) to core id and socket (physical +id), the command below can be used:: + + $ grep "processor\|physical id\|core id\|^$" /proc/cpuinfo + +The total logical core number will be used as ``helloworld`` input parameters. + + +Test Case: run hello world on single lcores +=========================================== + +To run example in single lcore :: + + $ ./dpdk-helloworld -c 1 + hello from core 0 + +Check the output is exact the lcore 0 + + +Test Case: run hello world on every lcores +========================================== + +To run the example in all the enabled lcore :: + + $ ./dpdk-helloworld -cffffff + hello from core 1 + hello from core 2 + hello from core 3 + ... + ... + hello from core 0 + +Verify the output of according to all the core masks. From patchwork Tue Jan 17 15:49:04 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 122196 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 28E95423FE; Tue, 17 Jan 2023 16:50:19 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 20E5D42D71; Tue, 17 Jan 2023 16:49:24 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id 797C342D54 for ; Tue, 17 Jan 2023 16:49:21 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id D06B61D8128; Tue, 17 Jan 2023 16:49:20 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id eJf0Us1tgsTS; Tue, 17 Jan 2023 16:49:19 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 394AF1D811B; Tue, 17 Jan 2023 16:49:13 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [PATCH v3 08/10] dts: add hello world testsuite Date: Tue, 17 Jan 2023 15:49:04 +0000 Message-Id: <20230117154906.860916-9-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230117154906.860916-1-juraj.linkes@pantheon.tech> References: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> <20230117154906.860916-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The test suite implements test cases defined in the corresponding test plan. Signed-off-by: Juraj Linkeš --- dts/framework/remote_session/__init__.py | 2 +- dts/framework/remote_session/os_session.py | 16 ++++- .../remote_session/remote/__init__.py | 2 +- dts/framework/testbed_model/__init__.py | 1 + dts/framework/testbed_model/sut_node.py | 12 +++- dts/tests/TestSuite_hello_world.py | 59 +++++++++++++++++++ 6 files changed, 88 insertions(+), 4 deletions(-) create mode 100644 dts/tests/TestSuite_hello_world.py diff --git a/dts/framework/remote_session/__init__.py b/dts/framework/remote_session/__init__.py index 747316c78a..ee221503df 100644 --- a/dts/framework/remote_session/__init__.py +++ b/dts/framework/remote_session/__init__.py @@ -17,7 +17,7 @@ from .linux_session import LinuxSession from .os_session import OSSession -from .remote import RemoteSession, SSHSession +from .remote import CommandResult, RemoteSession, SSHSession def create_session( diff --git a/dts/framework/remote_session/os_session.py b/dts/framework/remote_session/os_session.py index 966b7f76d5..a869c7acde 100644 --- a/dts/framework/remote_session/os_session.py +++ b/dts/framework/remote_session/os_session.py @@ -12,7 +12,7 @@ from framework.testbed_model import LogicalCore, MesonArgs from framework.utils import EnvVarsDict -from .remote import RemoteSession, create_remote_session +from .remote import CommandResult, RemoteSession, create_remote_session class OSSession(ABC): @@ -50,6 +50,20 @@ def is_alive(self) -> bool: """ return self.remote_session.is_alive() + def send_command( + self, + command: str, + timeout: float, + verify: bool = False, + env: EnvVarsDict | None = None, + ) -> CommandResult: + """ + An all-purpose API in case the command to be executed is already + OS-agnostic, such as when the path to the executed command has been + constructed beforehand. + """ + return self.remote_session.send_command(command, timeout, verify, env) + @abstractmethod def guess_dpdk_remote_dir(self, remote_dir) -> PurePath: """ diff --git a/dts/framework/remote_session/remote/__init__.py b/dts/framework/remote_session/remote/__init__.py index f3092f8bbe..8a1512210a 100644 --- a/dts/framework/remote_session/remote/__init__.py +++ b/dts/framework/remote_session/remote/__init__.py @@ -6,7 +6,7 @@ from framework.config import NodeConfiguration from framework.logger import DTSLOG -from .remote_session import RemoteSession +from .remote_session import CommandResult, RemoteSession from .ssh_session import SSHSession diff --git a/dts/framework/testbed_model/__init__.py b/dts/framework/testbed_model/__init__.py index 2be5169dc8..efb463f2e2 100644 --- a/dts/framework/testbed_model/__init__.py +++ b/dts/framework/testbed_model/__init__.py @@ -13,6 +13,7 @@ from .hw import ( LogicalCore, LogicalCoreAmount, + LogicalCoreAmountFilter, LogicalCoreList, LogicalCoreListFilter, VirtualDevice, diff --git a/dts/framework/testbed_model/sut_node.py b/dts/framework/testbed_model/sut_node.py index ea0b96d6bf..6a0472a733 100644 --- a/dts/framework/testbed_model/sut_node.py +++ b/dts/framework/testbed_model/sut_node.py @@ -8,7 +8,7 @@ from pathlib import PurePath from framework.config import BuildTargetConfiguration, NodeConfiguration -from framework.remote_session import OSSession +from framework.remote_session import CommandResult, OSSession from framework.settings import SETTINGS from framework.utils import EnvVarsDict, skip_setup @@ -249,3 +249,13 @@ def create_eal_parameters( vdevs=vdevs, other_eal_param=other_eal_param, ) + + def run_dpdk_app( + self, app_path: PurePath, eal_args: EalParameters, timeout: float = 30 + ) -> CommandResult: + """ + Run DPDK application on the remote node. + """ + return self.main_session.send_command( + f"{app_path} {eal_args}", timeout, verify=True + ) diff --git a/dts/tests/TestSuite_hello_world.py b/dts/tests/TestSuite_hello_world.py new file mode 100644 index 0000000000..5ade941d31 --- /dev/null +++ b/dts/tests/TestSuite_hello_world.py @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2014 Intel Corporation + +""" +DPDK Test suite. +Test HelloWorld example. +""" + +from framework.test_suite import TestSuite +from framework.testbed_model import ( + LogicalCoreAmount, + LogicalCoreAmountFilter, + LogicalCoreList, +) + + +class TestHelloWorld(TestSuite): + def set_up_suite(self) -> None: + """ + Run at the start of each test suite. + hello_world Prerequisites: + helloworld build pass + """ + self.app_helloworld_path = self.sut_node.build_dpdk_app("helloworld") + + def test_hello_world_single_core(self) -> None: + """ + Run hello world on single lcores + Only received hello message from core0 + """ + + # get the mask for the first core + lcore_amount = LogicalCoreAmount(1, 1, 1) + lcores = LogicalCoreAmountFilter(self.sut_node.lcores, lcore_amount).filter() + eal_para = self.sut_node.create_eal_parameters( + lcore_filter_specifier=lcore_amount + ) + result = self.sut_node.run_dpdk_app(self.app_helloworld_path, eal_para) + self.verify( + f"hello from core {int(lcores[0])}" in result.stdout, + f"EAL not started on lcore{lcores[0]}", + ) + + def test_hello_world_all_cores(self) -> None: + """ + Run hello world on all lcores + Received hello message from all lcores + """ + + # get the maximum logical core number + eal_para = self.sut_node.create_eal_parameters( + lcore_filter_specifier=LogicalCoreList(self.sut_node.lcores) + ) + result = self.sut_node.run_dpdk_app(self.app_helloworld_path, eal_para, 50) + for lcore in self.sut_node.lcores: + self.verify( + f"hello from core {int(lcore)}" in result.stdout, + f"EAL not started on lcore{lcore}", + ) From patchwork Tue Jan 17 15:49:05 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 122197 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 550B7423FE; Tue, 17 Jan 2023 16:50:27 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5D80742D7A; Tue, 17 Jan 2023 16:49:25 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id F157042D5B for ; Tue, 17 Jan 2023 16:49:21 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 52B881D812A; Tue, 17 Jan 2023 16:49:21 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id jkvt_cmHLYr2; Tue, 17 Jan 2023 16:49:20 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 0213C1D811C; Tue, 17 Jan 2023 16:49:13 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [PATCH v3 09/10] dts: add test suite config and runner Date: Tue, 17 Jan 2023 15:49:05 +0000 Message-Id: <20230117154906.860916-10-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230117154906.860916-1-juraj.linkes@pantheon.tech> References: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> <20230117154906.860916-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The config allows users to specify which test suites and test cases within test suites to run. Also add test suite running capabilities to dts runner. Signed-off-by: Juraj Linkeš --- dts/conf.yaml | 2 ++ dts/framework/config/__init__.py | 29 +++++++++++++++- dts/framework/config/conf_yaml_schema.json | 40 ++++++++++++++++++++++ dts/framework/dts.py | 19 ++++++++++ dts/framework/test_suite.py | 24 ++++++++++++- 5 files changed, 112 insertions(+), 2 deletions(-) diff --git a/dts/conf.yaml b/dts/conf.yaml index 2111d537cf..2c6ec84282 100644 --- a/dts/conf.yaml +++ b/dts/conf.yaml @@ -10,6 +10,8 @@ executions: compiler_wrapper: ccache perf: false func: true + test_suites: + - hello_world system_under_test: "SUT 1" nodes: - name: "SUT 1" diff --git a/dts/framework/config/__init__.py b/dts/framework/config/__init__.py index ce3f20f6a9..058fbf58db 100644 --- a/dts/framework/config/__init__.py +++ b/dts/framework/config/__init__.py @@ -12,7 +12,7 @@ import pathlib from dataclasses import dataclass from enum import Enum, auto, unique -from typing import Any +from typing import Any, TypedDict import warlock # type: ignore import yaml @@ -116,11 +116,34 @@ def from_dict(d: dict) -> "BuildTargetConfiguration": ) +class TestSuiteConfigDict(TypedDict): + suite: str + cases: list[str] + + +@dataclass(slots=True, frozen=True) +class TestSuiteConfig: + test_suite: str + test_cases: list[str] + + @staticmethod + def from_dict( + entry: str | TestSuiteConfigDict, + ) -> "TestSuiteConfig": + if isinstance(entry, str): + return TestSuiteConfig(test_suite=entry, test_cases=[]) + elif isinstance(entry, dict): + return TestSuiteConfig(test_suite=entry["suite"], test_cases=entry["cases"]) + else: + raise TypeError(f"{type(entry)} is not valid for a test suite config.") + + @dataclass(slots=True, frozen=True) class ExecutionConfiguration: build_targets: list[BuildTargetConfiguration] perf: bool func: bool + test_suites: list[TestSuiteConfig] system_under_test: NodeConfiguration @staticmethod @@ -128,6 +151,9 @@ def from_dict(d: dict, node_map: dict) -> "ExecutionConfiguration": build_targets: list[BuildTargetConfiguration] = list( map(BuildTargetConfiguration.from_dict, d["build_targets"]) ) + test_suites: list[TestSuiteConfig] = list( + map(TestSuiteConfig.from_dict, d["test_suites"]) + ) sut_name = d["system_under_test"] assert sut_name in node_map, f"Unknown SUT {sut_name} in execution {d}" @@ -135,6 +161,7 @@ def from_dict(d: dict, node_map: dict) -> "ExecutionConfiguration": build_targets=build_targets, perf=d["perf"], func=d["func"], + test_suites=test_suites, system_under_test=node_map[sut_name], ) diff --git a/dts/framework/config/conf_yaml_schema.json b/dts/framework/config/conf_yaml_schema.json index abf15ebea8..c4a9e75251 100644 --- a/dts/framework/config/conf_yaml_schema.json +++ b/dts/framework/config/conf_yaml_schema.json @@ -75,6 +75,32 @@ "cpu", "compiler" ] + }, + "test_suite": { + "type": "string", + "enum": [ + "hello_world" + ] + }, + "test_target": { + "type": "object", + "properties": { + "suite": { + "$ref": "#/definitions/test_suite" + }, + "cases": { + "type": "array", + "description": "If specified, only this subset of test suite's test cases will be run. Unknown test cases will be silently ignored.", + "items": { + "type": "string" + }, + "minimum": 1 + } + }, + "required": [ + "suite" + ], + "additionalProperties": false } }, "type": "object", @@ -150,6 +176,19 @@ "type": "boolean", "description": "Enable functional testing." }, + "test_suites": { + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "#/definitions/test_suite" + }, + { + "$ref": "#/definitions/test_target" + } + ] + } + }, "system_under_test": { "$ref": "#/definitions/node_name" } @@ -159,6 +198,7 @@ "build_targets", "perf", "func", + "test_suites", "system_under_test" ] }, diff --git a/dts/framework/dts.py b/dts/framework/dts.py index 6ea7c6e736..f98000450f 100644 --- a/dts/framework/dts.py +++ b/dts/framework/dts.py @@ -8,6 +8,7 @@ from .config import CONFIGURATION, BuildTargetConfiguration, ExecutionConfiguration from .exception import DTSError, ErrorSeverity from .logger import DTSLOG, getLogger +from .test_suite import get_test_suites from .testbed_model import SutNode from .utils import check_dts_python_version @@ -132,6 +133,24 @@ def _run_suites( with possibly only a subset of test cases. If no subset is specified, run all test cases. """ + for test_suite_config in execution.test_suites: + try: + full_suite_path = f"tests.TestSuite_{test_suite_config.test_suite}" + test_suite_classes = get_test_suites(full_suite_path) + suites_str = ", ".join((x.__name__ for x in test_suite_classes)) + dts_logger.debug( + f"Found test suites '{suites_str}' in '{full_suite_path}'." + ) + except Exception as e: + dts_logger.exception("An error occurred when searching for test suites.") + errors.append(e) + + else: + for test_suite_class in test_suite_classes: + test_suite = test_suite_class( + sut_node, test_suite_config.test_cases, execution.func, errors + ) + test_suite.run() def _exit_dts() -> None: diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py index 0972a70c14..0cbedee478 100644 --- a/dts/framework/test_suite.py +++ b/dts/framework/test_suite.py @@ -6,12 +6,13 @@ Base class for creating DTS test cases. """ +import importlib import inspect import re from collections.abc import MutableSequence from types import MethodType -from .exception import SSHTimeoutError, TestCaseVerifyError +from .exception import ConfigurationError, SSHTimeoutError, TestCaseVerifyError from .logger import DTSLOG, getLogger from .settings import SETTINGS from .testbed_model import SutNode @@ -226,3 +227,24 @@ def _execute_test_case(self, test_case_method: MethodType) -> bool: raise KeyboardInterrupt("Stop DTS") return result + + +def get_test_suites(testsuite_module_path: str) -> list[type[TestSuite]]: + def is_test_suite(object) -> bool: + try: + if issubclass(object, TestSuite) and object is not TestSuite: + return True + except TypeError: + return False + return False + + try: + testcase_module = importlib.import_module(testsuite_module_path) + except ModuleNotFoundError as e: + raise ConfigurationError( + f"Testsuite '{testsuite_module_path}' not found." + ) from e + return [ + test_suite_class + for _, test_suite_class in inspect.getmembers(testcase_module, is_test_suite) + ] From patchwork Tue Jan 17 15:49:06 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Patchwork-Submitter: =?utf-8?q?Juraj_Linke=C5=A1?= X-Patchwork-Id: 122198 X-Patchwork-Delegate: thomas@monjalon.net Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7C41A423FE; Tue, 17 Jan 2023 16:50:34 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4AF3942D53; Tue, 17 Jan 2023 16:49:27 +0100 (CET) Received: from lb.pantheon.sk (lb.pantheon.sk [46.229.239.20]) by mails.dpdk.org (Postfix) with ESMTP id CA9C242D6E for ; Tue, 17 Jan 2023 16:49:23 +0100 (CET) Received: from localhost (localhost [127.0.0.1]) by lb.pantheon.sk (Postfix) with ESMTP id 2EEFC1D811B; Tue, 17 Jan 2023 16:49:23 +0100 (CET) X-Virus-Scanned: amavisd-new at siecit.sk Received: from lb.pantheon.sk ([127.0.0.1]) by localhost (lb.pantheon.sk [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id hhtkY47JuyFR; Tue, 17 Jan 2023 16:49:21 +0100 (CET) Received: from entguard.lab.pantheon.local (unknown [46.229.239.141]) by lb.pantheon.sk (Postfix) with ESMTP id 9DB241D812C; Tue, 17 Jan 2023 16:49:14 +0100 (CET) From: =?utf-8?q?Juraj_Linke=C5=A1?= To: thomas@monjalon.net, Honnappa.Nagarahalli@arm.com, ohilyard@iol.unh.edu, lijuan.tu@intel.com, bruce.richardson@intel.com Cc: dev@dpdk.org, =?utf-8?q?Juraj_Linke=C5=A1?= Subject: [PATCH v3 10/10] dts: add test results module Date: Tue, 17 Jan 2023 15:49:06 +0000 Message-Id: <20230117154906.860916-11-juraj.linkes@pantheon.tech> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230117154906.860916-1-juraj.linkes@pantheon.tech> References: <20221114165438.1133783-1-juraj.linkes@pantheon.tech> <20230117154906.860916-1-juraj.linkes@pantheon.tech> MIME-Version: 1.0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The module stores the results and errors from all executions, build targets, test suites and test cases. The result consist of the result of the setup and the teardown of each testing stage (listed above) and the results of the inner stages. The innermost stage is the case, which also contains the result of test case itself. The modules also produces a brief overview of the results and the number of executed tests. It also finds the proper return code to exit with from among the stored errors. Signed-off-by: Juraj Linkeš --- dts/framework/dts.py | 64 +++---- dts/framework/test_result.py | 316 +++++++++++++++++++++++++++++++++++ dts/framework/test_suite.py | 60 +++---- 3 files changed, 382 insertions(+), 58 deletions(-) create mode 100644 dts/framework/test_result.py diff --git a/dts/framework/dts.py b/dts/framework/dts.py index f98000450f..117b7cae83 100644 --- a/dts/framework/dts.py +++ b/dts/framework/dts.py @@ -6,14 +6,14 @@ import sys from .config import CONFIGURATION, BuildTargetConfiguration, ExecutionConfiguration -from .exception import DTSError, ErrorSeverity from .logger import DTSLOG, getLogger +from .test_result import BuildTargetResult, DTSResult, ExecutionResult, Result from .test_suite import get_test_suites from .testbed_model import SutNode from .utils import check_dts_python_version dts_logger: DTSLOG = getLogger("dts_runner") -errors = [] +result: DTSResult = DTSResult(dts_logger) def run_all() -> None: @@ -22,7 +22,7 @@ def run_all() -> None: config file. """ global dts_logger - global errors + global result # check the python version of the server that run dts check_dts_python_version() @@ -39,29 +39,31 @@ def run_all() -> None: # the SUT has not been initialized yet try: sut_node = SutNode(execution.system_under_test) + result.update_setup(Result.PASS) except Exception as e: dts_logger.exception( f"Connection to node {execution.system_under_test} failed." ) - errors.append(e) + result.update_setup(Result.FAIL, e) else: nodes[sut_node.name] = sut_node if sut_node: - _run_execution(sut_node, execution) + _run_execution(sut_node, execution, result) except Exception as e: dts_logger.exception("An unexpected error has occurred.") - errors.append(e) + result.add_error(e) raise finally: try: for node in nodes.values(): node.close() + result.update_teardown(Result.PASS) except Exception as e: dts_logger.exception("Final cleanup of nodes failed.") - errors.append(e) + result.update_teardown(Result.ERROR, e) # we need to put the sys.exit call outside the finally clause to make sure # that unexpected exceptions will propagate @@ -72,61 +74,72 @@ def run_all() -> None: _exit_dts() -def _run_execution(sut_node: SutNode, execution: ExecutionConfiguration) -> None: +def _run_execution( + sut_node: SutNode, execution: ExecutionConfiguration, result: DTSResult +) -> None: """ Run the given execution. This involves running the execution setup as well as running all build targets in the given execution. """ dts_logger.info(f"Running execution with SUT '{execution.system_under_test.name}'.") + execution_result = result.add_execution(sut_node.config) try: sut_node.set_up_execution(execution) + execution_result.update_setup(Result.PASS) except Exception as e: dts_logger.exception("Execution setup failed.") - errors.append(e) + execution_result.update_setup(Result.FAIL, e) else: for build_target in execution.build_targets: - _run_build_target(sut_node, build_target, execution) + _run_build_target(sut_node, build_target, execution, execution_result) finally: try: sut_node.tear_down_execution() + execution_result.update_teardown(Result.PASS) except Exception as e: dts_logger.exception("Execution teardown failed.") - errors.append(e) + execution_result.update_teardown(Result.FAIL, e) def _run_build_target( sut_node: SutNode, build_target: BuildTargetConfiguration, execution: ExecutionConfiguration, + execution_result: ExecutionResult, ) -> None: """ Run the given build target. """ dts_logger.info(f"Running build target '{build_target.name}'.") + build_target_result = execution_result.add_build_target(build_target) try: sut_node.set_up_build_target(build_target) + result.dpdk_version = sut_node.dpdk_version + build_target_result.update_setup(Result.PASS) except Exception as e: dts_logger.exception("Build target setup failed.") - errors.append(e) + build_target_result.update_setup(Result.FAIL, e) else: - _run_suites(sut_node, execution) + _run_suites(sut_node, execution, build_target_result) finally: try: sut_node.tear_down_build_target() + build_target_result.update_teardown(Result.PASS) except Exception as e: dts_logger.exception("Build target teardown failed.") - errors.append(e) + build_target_result.update_teardown(Result.FAIL, e) def _run_suites( sut_node: SutNode, execution: ExecutionConfiguration, + build_target_result: BuildTargetResult, ) -> None: """ Use the given build_target to run execution's test suites @@ -143,12 +156,15 @@ def _run_suites( ) except Exception as e: dts_logger.exception("An error occurred when searching for test suites.") - errors.append(e) + result.update_setup(Result.ERROR, e) else: for test_suite_class in test_suite_classes: test_suite = test_suite_class( - sut_node, test_suite_config.test_cases, execution.func, errors + sut_node, + test_suite_config.test_cases, + execution.func, + build_target_result, ) test_suite.run() @@ -157,20 +173,8 @@ def _exit_dts() -> None: """ Process all errors and exit with the proper exit code. """ - if errors and dts_logger: - dts_logger.debug("Summary of errors:") - for error in errors: - dts_logger.debug(repr(error)) - - return_code = ErrorSeverity.NO_ERR - for error in errors: - error_return_code = ErrorSeverity.GENERIC_ERR - if isinstance(error, DTSError): - error_return_code = error.severity - - if error_return_code > return_code: - return_code = error_return_code + result.process() if dts_logger: dts_logger.info("DTS execution has ended.") - sys.exit(return_code) + sys.exit(result.get_return_code()) diff --git a/dts/framework/test_result.py b/dts/framework/test_result.py new file mode 100644 index 0000000000..743919820c --- /dev/null +++ b/dts/framework/test_result.py @@ -0,0 +1,316 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2023 PANTHEON.tech s.r.o. + +""" +Generic result container and reporters +""" + +import os.path +from collections.abc import MutableSequence +from enum import Enum, auto + +from .config import ( + OS, + Architecture, + BuildTargetConfiguration, + Compiler, + CPUType, + NodeConfiguration, +) +from .exception import DTSError, ErrorSeverity +from .logger import DTSLOG +from .settings import SETTINGS + + +class Result(Enum): + """ + An Enum defining the possible states that + a setup, a teardown or a test case may end up in. + """ + + PASS = auto() + FAIL = auto() + ERROR = auto() + SKIP = auto() + + def __bool__(self) -> bool: + return self is self.PASS + + +class FixtureResult(object): + """ + A record that stored the result of a setup or a teardown. + The default is FAIL because immediately after creating the object + the setup of the corresponding stage will be executed, which also guarantees + the execution of teardown. + """ + + result: Result + error: Exception | None = None + + def __init__( + self, + result: Result = Result.FAIL, + error: Exception | None = None, + ): + self.result = result + self.error = error + + def __bool__(self) -> bool: + return bool(self.result) + + +class Statistics(dict): + """ + A helper class used to store the number of test cases by its result + along a few other basic information. + Using a dict provides a convenient way to format the data. + """ + + def __init__(self, dpdk_version): + super(Statistics, self).__init__() + for result in Result: + self[result.name] = 0 + self["PASS RATE"] = 0.0 + self["DPDK VERSION"] = dpdk_version + + def __iadd__(self, other: Result) -> "Statistics": + """ + Add a Result to the final count. + """ + self[other.name] += 1 + self["PASS RATE"] = ( + float(self[Result.PASS.name]) + * 100 + / sum(self[result.name] for result in Result) + ) + return self + + def __str__(self) -> str: + """ + Provide a string representation of the data. + """ + stats_str = "" + for key, value in self.items(): + stats_str += f"{key:<12} = {value}\n" + # according to docs, we should use \n when writing to text files + # on all platforms + return stats_str + + +class BaseResult(object): + """ + The Base class for all results. Stores the results of + the setup and teardown portions of the corresponding stage + and a list of results from each inner stage in _inner_results. + """ + + setup_result: FixtureResult + teardown_result: FixtureResult + _inner_results: MutableSequence["BaseResult"] + + def __init__(self): + self.setup_result = FixtureResult() + self.teardown_result = FixtureResult() + self._inner_results = [] + + def update_setup(self, result: Result, error: Exception | None = None) -> None: + self.setup_result.result = result + self.setup_result.error = error + + def update_teardown(self, result: Result, error: Exception | None = None) -> None: + self.teardown_result.result = result + self.teardown_result.error = error + + def _get_setup_teardown_errors(self) -> list[Exception]: + errors = [] + if self.setup_result.error: + errors.append(self.setup_result.error) + if self.teardown_result.error: + errors.append(self.teardown_result.error) + return errors + + def _get_inner_errors(self) -> list[Exception]: + return [ + error + for inner_result in self._inner_results + for error in inner_result.get_errors() + ] + + def get_errors(self) -> list[Exception]: + return self._get_setup_teardown_errors() + self._get_inner_errors() + + def add_stats(self, statistics: Statistics) -> None: + for inner_result in self._inner_results: + inner_result.add_stats(statistics) + + +class TestCaseResult(BaseResult, FixtureResult): + """ + The test case specific result. + Stores the result of the actual test case. + Also stores the test case name. + """ + + test_case_name: str + + def __init__(self, test_case_name: str): + super(TestCaseResult, self).__init__() + self.test_case_name = test_case_name + + def update(self, result: Result, error: Exception | None = None) -> None: + self.result = result + self.error = error + + def _get_inner_errors(self) -> list[Exception]: + if self.error: + return [self.error] + return [] + + def add_stats(self, statistics: Statistics) -> None: + statistics += self.result + + def __bool__(self) -> bool: + return ( + bool(self.setup_result) and bool(self.teardown_result) and bool(self.result) + ) + + +class TestSuiteResult(BaseResult): + """ + The test suite specific result. + The _inner_results list stores results of test cases in a given test suite. + Also stores the test suite name. + """ + + suite_name: str + + def __init__(self, suite_name: str): + super(TestSuiteResult, self).__init__() + self.suite_name = suite_name + + def add_test_case(self, test_case_name: str) -> TestCaseResult: + test_case_result = TestCaseResult(test_case_name) + self._inner_results.append(test_case_result) + return test_case_result + + +class BuildTargetResult(BaseResult): + """ + The build target specific result. + The _inner_results list stores results of test suites in a given build target. + Also stores build target specifics, such as compiler used to build DPDK. + """ + + arch: Architecture + os: OS + cpu: CPUType + compiler: Compiler + + def __init__(self, build_target: BuildTargetConfiguration): + super(BuildTargetResult, self).__init__() + self.arch = build_target.arch + self.os = build_target.os + self.cpu = build_target.cpu + self.compiler = build_target.compiler + + def add_test_suite(self, test_suite_name: str) -> TestSuiteResult: + test_suite_result = TestSuiteResult(test_suite_name) + self._inner_results.append(test_suite_result) + return test_suite_result + + +class ExecutionResult(BaseResult): + """ + The execution specific result. + The _inner_results list stores results of build targets in a given execution. + Also stores the SUT node configuration. + """ + + sut_node: NodeConfiguration + + def __init__(self, sut_node: NodeConfiguration): + super(ExecutionResult, self).__init__() + self.sut_node = sut_node + + def add_build_target( + self, build_target: BuildTargetConfiguration + ) -> BuildTargetResult: + build_target_result = BuildTargetResult(build_target) + self._inner_results.append(build_target_result) + return build_target_result + + +class DTSResult(BaseResult): + """ + Stores environment information and test results from a DTS run, which are: + * Execution level information, such as SUT and TG hardware. + * Build target level information, such as compiler, target OS and cpu. + * Test suite results. + * All errors that are caught and recorded during DTS execution. + + The information is stored in nested objects. + + The class is capable of computing the return code used to exit DTS with + from the stored error. + + It also provides a brief statistical summary of passed/failed test cases. + """ + + dpdk_version: str | None + _logger: DTSLOG + _errors: list[Exception] + _return_code: ErrorSeverity + _stats_result: Statistics | None + _stats_filename: str + + def __init__(self, logger: DTSLOG): + super(DTSResult, self).__init__() + self.dpdk_version = None + self._logger = logger + self._errors = [] + self._return_code = ErrorSeverity.NO_ERR + self._stats_result = None + self._stats_filename = os.path.join(SETTINGS.output_dir, "statistics.txt") + + def add_execution(self, sut_node: NodeConfiguration) -> ExecutionResult: + execution_result = ExecutionResult(sut_node) + self._inner_results.append(execution_result) + return execution_result + + def add_error(self, error) -> None: + self._errors.append(error) + + def process(self) -> None: + """ + Process the data after a DTS run. + The data is added to nested objects during runtime and this parent object + is not updated at that time. This requires us to process the nested data + after it's all been gathered. + + The processing gathers all errors and the result statistics of test cases. + """ + self._errors += self.get_errors() + if self._errors and self._logger: + self._logger.debug("Summary of errors:") + for error in self._errors: + self._logger.debug(repr(error)) + + self._stats_result = Statistics(self.dpdk_version) + self.add_stats(self._stats_result) + with open(self._stats_filename, "w+") as stats_file: + stats_file.write(str(self._stats_result)) + + def get_return_code(self) -> int: + """ + Go through all stored Exceptions and return the highest error code found. + """ + for error in self._errors: + error_return_code = ErrorSeverity.GENERIC_ERR + if isinstance(error, DTSError): + error_return_code = error.severity + + if error_return_code > self._return_code: + self._return_code = error_return_code + + return int(self._return_code) diff --git a/dts/framework/test_suite.py b/dts/framework/test_suite.py index 0cbedee478..6cd142ef2f 100644 --- a/dts/framework/test_suite.py +++ b/dts/framework/test_suite.py @@ -9,12 +9,12 @@ import importlib import inspect import re -from collections.abc import MutableSequence from types import MethodType from .exception import ConfigurationError, SSHTimeoutError, TestCaseVerifyError from .logger import DTSLOG, getLogger from .settings import SETTINGS +from .test_result import BuildTargetResult, Result, TestCaseResult, TestSuiteResult from .testbed_model import SutNode @@ -40,21 +40,21 @@ class TestSuite(object): _logger: DTSLOG _test_cases_to_run: list[str] _func: bool - _errors: MutableSequence[Exception] + _result: TestSuiteResult def __init__( self, sut_node: SutNode, test_cases: list[str], func: bool, - errors: MutableSequence[Exception], + build_target_result: BuildTargetResult, ): self.sut_node = sut_node self._logger = getLogger(self.__class__.__name__) self._test_cases_to_run = test_cases self._test_cases_to_run.extend(SETTINGS.test_cases) self._func = func - self._errors = errors + self._result = build_target_result.add_test_suite(self.__class__.__name__) def set_up_suite(self) -> None: """ @@ -97,10 +97,11 @@ def run(self) -> None: try: self._logger.info(f"Starting test suite setup: {test_suite_name}") self.set_up_suite() + self._result.update_setup(Result.PASS) self._logger.info(f"Test suite setup successful: {test_suite_name}") except Exception as e: self._logger.exception(f"Test suite setup ERROR: {test_suite_name}") - self._errors.append(e) + self._result.update_setup(Result.ERROR, e) else: self._execute_test_suite() @@ -109,13 +110,14 @@ def run(self) -> None: try: self.tear_down_suite() self.sut_node.kill_cleanup_dpdk_apps() + self._result.update_teardown(Result.PASS) except Exception as e: self._logger.exception(f"Test suite teardown ERROR: {test_suite_name}") self._logger.warning( f"Test suite '{test_suite_name}' teardown failed, " f"the next test suite may be affected." ) - self._errors.append(e) + self._result.update_setup(Result.ERROR, e) def _execute_test_suite(self) -> None: """ @@ -123,17 +125,18 @@ def _execute_test_suite(self) -> None: """ if self._func: for test_case_method in self._get_functional_test_cases(): + test_case_name = test_case_method.__name__ + test_case_result = self._result.add_test_case(test_case_name) all_attempts = SETTINGS.re_run + 1 attempt_nr = 1 - while ( - not self._run_test_case(test_case_method) - and attempt_nr <= all_attempts - ): + self._run_test_case(test_case_method, test_case_result) + while not test_case_result and attempt_nr <= all_attempts: attempt_nr += 1 self._logger.info( - f"Re-running FAILED test case '{test_case_method.__name__}'. " + f"Re-running FAILED test case '{test_case_name}'. " f"Attempt number {attempt_nr} out of {all_attempts}." ) + self._run_test_case(test_case_method, test_case_result) def _get_functional_test_cases(self) -> list[MethodType]: """ @@ -166,68 +169,69 @@ def _should_be_executed(self, test_case_name: str, test_case_regex: str) -> bool return match - def _run_test_case(self, test_case_method: MethodType) -> bool: + def _run_test_case( + self, test_case_method: MethodType, test_case_result: TestCaseResult + ) -> None: """ Setup, execute and teardown a test case in this suite. - Exceptions are caught and recorded in logs. + Exceptions are caught and recorded in logs and results. """ test_case_name = test_case_method.__name__ - result = False try: # run set_up function for each case self.set_up_test_case() + test_case_result.update_setup(Result.PASS) except SSHTimeoutError as e: self._logger.exception(f"Test case setup FAILED: {test_case_name}") - self._errors.append(e) + test_case_result.update_setup(Result.FAIL, e) except Exception as e: self._logger.exception(f"Test case setup ERROR: {test_case_name}") - self._errors.append(e) + test_case_result.update_setup(Result.ERROR, e) else: # run test case if setup was successful - result = self._execute_test_case(test_case_method) + self._execute_test_case(test_case_method, test_case_result) finally: try: self.tear_down_test_case() + test_case_result.update_teardown(Result.PASS) except Exception as e: self._logger.exception(f"Test case teardown ERROR: {test_case_name}") self._logger.warning( f"Test case '{test_case_name}' teardown failed, " f"the next test case may be affected." ) - self._errors.append(e) - result = False + test_case_result.update_teardown(Result.ERROR, e) + test_case_result.update(Result.ERROR) - return result - - def _execute_test_case(self, test_case_method: MethodType) -> bool: + def _execute_test_case( + self, test_case_method: MethodType, test_case_result: TestCaseResult + ) -> None: """ Execute one test case and handle failures. """ test_case_name = test_case_method.__name__ - result = False try: self._logger.info(f"Starting test case execution: {test_case_name}") test_case_method() - result = True + test_case_result.update(Result.PASS) self._logger.info(f"Test case execution PASSED: {test_case_name}") except TestCaseVerifyError as e: self._logger.exception(f"Test case execution FAILED: {test_case_name}") - self._errors.append(e) + test_case_result.update(Result.FAIL, e) except Exception as e: self._logger.exception(f"Test case execution ERROR: {test_case_name}") - self._errors.append(e) + test_case_result.update(Result.ERROR, e) except KeyboardInterrupt: self._logger.error( f"Test case execution INTERRUPTED by user: {test_case_name}" ) + test_case_result.update(Result.SKIP) raise KeyboardInterrupt("Stop DTS") - return result - def get_test_suites(testsuite_module_path: str) -> list[type[TestSuite]]: def is_test_suite(object) -> bool: