diff options
Diffstat (limited to 'test/py')
-rw-r--r-- | test/py/conftest.py | 103 | ||||
-rw-r--r-- | test/py/requirements.txt | 28 | ||||
-rw-r--r-- | test/py/tests/test_extension.py | 4 | ||||
-rw-r--r-- | test/py/tests/test_smbios.py | 18 | ||||
-rw-r--r-- | test/py/tests/test_spl.py | 2 | ||||
-rw-r--r-- | test/py/tests/test_suite.py | 208 | ||||
-rw-r--r-- | test/py/tests/test_upl.py | 4 | ||||
-rw-r--r-- | test/py/tests/test_usb.py | 2 | ||||
-rw-r--r-- | test/py/tests/test_ut.py | 5 | ||||
-rw-r--r-- | test/py/tests/test_vbe.py | 2 | ||||
-rw-r--r-- | test/py/tests/test_vpl.py | 2 |
11 files changed, 337 insertions, 41 deletions
diff --git a/test/py/conftest.py b/test/py/conftest.py index 509d19b449d..31043a697e2 100644 --- a/test/py/conftest.py +++ b/test/py/conftest.py @@ -25,6 +25,7 @@ import re from _pytest.runner import runtestprotocol import subprocess import sys +import time from u_boot_spawn import BootFail, Timeout, Unexpected, handle_exception # Globals: The HTML log file, and the connection to the U-Boot console. @@ -33,6 +34,9 @@ console = None TEST_PY_DIR = os.path.dirname(os.path.abspath(__file__)) +# Regex for test-function symbols +RE_UT_TEST_LIST = re.compile(r'[^a-zA-Z0-9_]_u_boot_list_2_ut_(.*)_2_(.*)\s*$') + def mkdir_p(path): """Create a directory path. @@ -88,6 +92,9 @@ def pytest_addoption(parser): parser.addoption('--role', help='U-Boot board role (for Labgrid-sjg)') parser.addoption('--use-running-system', default=False, action='store_true', help="Assume that U-Boot is ready and don't wait for a prompt") + parser.addoption('--timing', default=False, action='store_true', + help='Show info on test timing') + def run_build(config, source_dir, build_dir, board_type, log): """run_build: Build U-Boot @@ -158,10 +165,15 @@ def get_details(config): env['U_BOOT_BUILD_DIR'] = build_dir if build_dir_extra: env['U_BOOT_BUILD_DIR_EXTRA'] = build_dir_extra - proc = subprocess.run(cmd, capture_output=True, encoding='utf-8', + + # Make sure the script sees that it is being run from pytest + env['U_BOOT_SOURCE_DIR'] = source_dir + + proc = subprocess.run(cmd, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, encoding='utf-8', env=env) if proc.returncode: - raise ValueError(proc.stderr) + raise ValueError(f"Error {proc.returncode} running {cmd}: '{proc.stderr} '{proc.stdout}'") # For debugging # print('conftest: lab:', proc.stdout) vals = {} @@ -314,6 +326,7 @@ def pytest_configure(config): ubconfig.use_running_system = config.getoption('use_running_system') ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb' ubconfig.connection_ok = True + ubconfig.timing = config.getoption('timing') env_vars = ( 'board_type', @@ -336,7 +349,7 @@ def pytest_configure(config): import u_boot_console_exec_attach console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig) -re_ut_test_list = re.compile(r'[^a-zA-Z0-9_]_u_boot_list_2_ut_(.*)_test_2_(.*)\s*$') + def generate_ut_subtest(metafunc, fixture_name, sym_path): """Provide parametrization for a ut_subtest fixture. @@ -363,7 +376,7 @@ def generate_ut_subtest(metafunc, fixture_name, sym_path): vals = [] for l in lines: - m = re_ut_test_list.search(l) + m = RE_UT_TEST_LIST.search(l) if not m: continue suite, name = m.groups() @@ -508,6 +521,12 @@ tests_skipped = [] tests_warning = [] tests_passed = [] +# Duration of each test: +# key (string): test name +# value (float): duration in ms +test_durations = {} + + def pytest_itemcollected(item): """pytest hook: Called once for each test found during collection. @@ -523,6 +542,73 @@ def pytest_itemcollected(item): tests_not_run.append(item.name) + +def show_timings(): + """Write timings for each test, along with a histogram""" + + def get_time_delta(msecs): + """Convert milliseconds into a user-friendly string""" + if msecs >= 1000: + return f'{msecs / 1000:.1f}s' + else: + return f'{msecs:.0f}ms' + + def show_bar(key, msecs, value): + """Show a single bar (line) of the histogram + + Args: + key (str): Key to write on the left + value (int): Value to display, i.e. the relative length of the bar + """ + if value: + bar_length = int((value / max_count) * max_bar_length) + print(f"{key:>8} : {get_time_delta(msecs):>7} |{'#' * bar_length} {value}", file=buf) + + # Create the buckets we will use, each has a count and a total time + bucket = {} + for power in range(5): + for i in [1, 2, 3, 4, 5, 7.5]: + bucket[i * 10 ** power] = {'count': 0, 'msecs': 0.0} + max_dur = max(bucket.keys()) + + # Collect counts for each bucket; if outside the range, add to too_long + # Also show a sorted list of test timings from longest to shortest + too_long = 0 + too_long_msecs = 0.0 + max_count = 0 + with log.section('Timing Report', 'timing_report'): + for name, dur in sorted(test_durations.items(), key=lambda kv: kv[1], + reverse=True): + log.info(f'{get_time_delta(dur):>8} {name}') + greater = [k for k in bucket.keys() if dur <= k] + if greater: + buck = bucket[min(greater)] + buck['count'] += 1 + max_count = max(max_count, buck['count']) + buck['msecs'] += dur + else: + too_long += 1 + too_long_msecs += dur + + # Set the maximum length of a histogram bar, in characters + max_bar_length = 40 + + # Show a a summary with histogram + buf = io.StringIO() + with log.section('Timing Summary', 'timing_summary'): + print('Duration : Total | Number of tests', file=buf) + print(f'{"=" * 8} : {"=" * 7} |{"=" * max_bar_length}', file=buf) + for dur, buck in bucket.items(): + if buck['count']: + label = get_time_delta(dur) + show_bar(f'<{label}', buck['msecs'], buck['count']) + if too_long: + show_bar(f'>{get_time_delta(max_dur)}', too_long_msecs, too_long) + log.info(buf.getvalue()) + if ubconfig.timing: + print(buf.getvalue(), end='') + + def cleanup(): """Clean up all global state. @@ -572,6 +658,7 @@ def cleanup(): for test in tests_not_run: anchor = anchors.get(test, None) log.status_fail('... ' + test, anchor) + show_timings() log.close() atexit.register(cleanup) @@ -705,7 +792,9 @@ def pytest_runtest_protocol(item, nextitem): log.get_and_reset_warning() ihook = item.ihook ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + start = time.monotonic() reports = runtestprotocol(item, nextitem=nextitem) + duration = round((time.monotonic() - start) * 1000, 1) ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) was_warning = log.get_and_reset_warning() @@ -718,6 +807,7 @@ def pytest_runtest_protocol(item, nextitem): start_test_section(item) failure_cleanup = False + record_duration = True if not was_warning: test_list = tests_passed msg = 'OK' @@ -748,6 +838,11 @@ def pytest_runtest_protocol(item, nextitem): test_list = tests_skipped msg = 'SKIPPED:\n' + str(report.longrepr) msg_log = log.status_skipped + record_duration = False + + msg += f' {duration} ms' + if record_duration: + test_durations[item.name] = duration if failure_cleanup: console.drain_console() diff --git a/test/py/requirements.txt b/test/py/requirements.txt index 75760f96e56..acfe17dce9f 100644 --- a/test/py/requirements.txt +++ b/test/py/requirements.txt @@ -1,30 +1,4 @@ -atomicwrites==1.4.1 -attrs==19.3.0 -concurrencytest==0.1.2 -coverage==6.2 -extras==1.0.0 filelock==3.0.12 -fixtures==3.0.0 -importlib-metadata==0.23 -linecache2==1.0.0 -more-itertools==7.2.0 -packaging==24.1 -pbr==5.4.3 -pluggy==0.13.0 -py==1.11.0 -pycryptodomex==3.19.1 -pyelftools==0.27 -pygit2==1.13.3 -pyparsing==3.0.7 +pycryptodomex==3.21.0 pytest==6.2.5 pytest-xdist==2.5.0 -python-mimeparse==1.6.0 -python-subunit==1.3.0 -requests==2.32.3 -setuptools==70.3.0 -six==1.16.0 -testtools==2.3.0 -traceback2==1.4.0 -unittest2==1.1.0 -wcwidth==0.1.7 -zipp==3.19.2 diff --git a/test/py/tests/test_extension.py b/test/py/tests/test_extension.py index 267cf2ff27c..2a3c5116171 100644 --- a/test/py/tests/test_extension.py +++ b/test/py/tests/test_extension.py @@ -26,7 +26,9 @@ def test_extension(u_boot_console): load_dtb(u_boot_console) output = u_boot_console.run_command('extension list') - assert('No extension' in output) + # extension_bootdev_hunt may have already run. + # Without reboot we cannot make any assumption here. + # assert('No extension' in output) output = u_boot_console.run_command('extension scan') assert output == 'Found 2 extension board(s).' diff --git a/test/py/tests/test_smbios.py b/test/py/tests/test_smbios.py index 82b0b689830..0405a9b9d38 100644 --- a/test/py/tests/test_smbios.py +++ b/test/py/tests/test_smbios.py @@ -32,10 +32,26 @@ def test_cmd_smbios_sandbox(u_boot_console): """Run the smbios command on the sandbox""" output = u_boot_console.run_command('smbios') assert 'DMI type 0,' in output - assert 'String 1: U-Boot' in output + assert 'Vendor: U-Boot' in output assert 'DMI type 1,' in output assert 'Manufacturer: sandbox' in output assert 'DMI type 2,' in output assert 'DMI type 3,' in output assert 'DMI type 4,' in output assert 'DMI type 127,' in output + +@pytest.mark.buildconfigspec('cmd_smbios') +@pytest.mark.buildconfigspec('sysinfo_smbios') +@pytest.mark.buildconfigspec('generate_smbios_table_verbose') +def test_cmd_smbios_sysinfo_verbose(u_boot_console): + """Run the smbios command""" + output = u_boot_console.run_command('smbios') + assert 'DMI type 0,' in output + assert 'Vendor: U-Boot' in output + assert 'DMI type 1,' in output + assert 'Manufacturer: linux' in output + assert 'DMI type 2,' in output + assert 'DMI type 3,' in output + assert 'DMI type 7,' in output + assert 'DMI type 4,' in output + assert 'DMI type 127,' in output diff --git a/test/py/tests/test_spl.py b/test/py/tests/test_spl.py index 42e4c4342b2..474f430a344 100644 --- a/test/py/tests/test_spl.py +++ b/test/py/tests/test_spl.py @@ -36,7 +36,7 @@ def test_spl(u_boot_console, ut_spl_subtest): cons = u_boot_console cons.restart_uboot_with_flags(['-u', '-k', ut_spl_subtest.split()[1]]) output = cons.get_spawn_output().replace('\r', '') - assert 'Failures: 0' in output + assert 'failures: 0' in output finally: # Restart afterward in case a non-SPL test is run next. This should not # happen since SPL tests are run in their own invocation of test.py, but diff --git a/test/py/tests/test_suite.py b/test/py/tests/test_suite.py new file mode 100644 index 00000000000..9ddc883394b --- /dev/null +++ b/test/py/tests/test_suite.py @@ -0,0 +1,208 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2024 Google LLC + +import pytest +import re + +# List of test suites we expect to find with 'ut info' and 'ut all' +EXPECTED_SUITES = [ + 'addrmap', 'bdinfo', 'bloblist', 'bootm', 'bootstd', + 'cmd', 'common', 'dm', 'env', 'exit', 'fdt_overlay', + 'fdt', 'font', 'hush', 'lib', + 'loadm', 'log', 'mbr', 'measurement', 'mem', + 'pci_mps', 'setexpr', 'upl', + ] + + +# Set this to True to aid debugging of tests +DEBUG_ME = False + + +def collect_info(cons, output): + """Process the output from 'ut all' + + Args: + cons: U-Boot console object + output: Output from running 'ut all' + + Returns: + tuple: + set: suite names that were found in output + set: test names that were found in output + dict: test count for each suite: + key: suite name + value: number of tests for the suite found in output + set: missing suites (compared to EXPECTED_SUITES) + set: extra suites (compared to EXPECTED_SUITES) + """ + suites = set() + tests = set() + cur_suite = None + test_count = None + exp_test_count = {} + + # Collect suites{} + for line in output.splitlines(): + line = line.rstrip() + if DEBUG_ME: + cons.log.info(f'line: {line}') + m = re.search('----Running ([^ ]*) tests----', line) + if m: + if DEBUG_ME and cur_suite and cur_suite != 'info': + cons.log.info(f'suite: {cur_suite} expected {exp_test_count[cur_suite]} found {test_count}') + + cur_suite = m.group(1) + if DEBUG_ME: + cons.log.info(f'cur_suite: {cur_suite}') + suites.add(cur_suite) + + test_count = 0 + m = re.match(rf'Running (\d+) {cur_suite} tests', line) + if m: + exp_test_count[cur_suite] = int(m.group(1)) + m = re.search(r'Test: (\w*): ([-a-z0-9_]*\.c)?( .*)?', line) + if m: + test_name = m.group(1) + msg = m.group(3) + if DEBUG_ME: + cons.log.info(f"test_name {test_name} msg '{msg}'") + full_name = f'{cur_suite}.{test_name}' + if msg == ' (flat tree)' and full_name not in tests: + tests.add(full_name) + test_count += 1 + if not msg or 'skipped as it is manual' in msg: + tests.add(full_name) + test_count += 1 + if DEBUG_ME: + cons.log.info(f'test_count {test_count}') + if DEBUG_ME: + cons.log.info(f'suite: {cur_suite} expected {exp_test_count[cur_suite]} found {test_count}') + cons.log.info(f"Tests: {' '.join(sorted(list(tests)))}") + + # Figure out what is missing, or extra + missing = set() + extra = set(suites) + for suite in EXPECTED_SUITES: + if suite in extra: + extra.remove(suite) + else: + missing.add(suite) + + return suites, tests, exp_test_count, missing, extra + + +def process_ut_info(cons, output): + """Process the output of the 'ut info' command + + Args: + cons: U-Boot console object + output: Output from running 'ut all' + + Returns: + tuple: + int: Number of suites reported + int: Number of tests reported + dict: test count for each suite: + key: suite name + value: number of tests reported for the suite + + """ + suite_count = None + total_test_count = None + test_count = {} + for line in output.splitlines(): + line = line.rstrip() + if DEBUG_ME: + cons.log.info(f'line: {line}') + m = re.match(r'Test suites: (.*)', line) + if m: + suite_count = int(m.group(1)) + m = re.match(r'Total tests: (.*)', line) + if m: + total_test_count = int(m.group(1)) + m = re.match(r' *([0-9?]*) (\w*)', line) + if m: + test_count[m.group(2)] = m.group(1) + return suite_count, total_test_count, test_count + + +@pytest.mark.buildconfigspec('sandbox') +@pytest.mark.notbuildconfigspec('sandbox_spl') +@pytest.mark.notbuildconfigspec('sandbox64') +# This test is disabled since it fails; remove the leading 'x' to try it +def xtest_suite(u_boot_console, u_boot_config): + """Perform various checks on the unit tests, including: + + - The number of suites matches that reported by the 'ut info' + - Where available, the number of tests is each suite matches that + reported by 'ut -s info' + - The total number of tests adds up to the total that are actually run + with 'ut all' + - All suites are run with 'ut all' + - The expected set of suites is run (the list is hard-coded in this test) + + """ + cons = u_boot_console + buildconfig = u_boot_config.buildconfig + with cons.log.section('Run all unit tests'): + # ut hush hush_test_simple_dollar prints "Unknown command" on purpose. + with u_boot_console.disable_check('unknown_command'): + output = cons.run_command('ut all') + + # Process the output from the run + with cons.log.section('Check output'): + suites, all_tests, exp_test_count, missing, extra = collect_info(cons, + output) + cons.log.info(f'missing {missing}') + cons.log.info(f'extra {extra}') + + # Make sure we got a test count for each suite + assert not (suites - exp_test_count.keys()) + + # Deal with missing suites + with cons.log.section('Check missing suites'): + if 'config_cmd_seama' not in buildconfig: + cons.log.info("CMD_SEAMA not enabled: Ignoring suite 'seama'") + missing.discard('seama') + + # Run 'ut info' and compare with the log results + with cons.log.section('Check suite test-counts'): + output = cons.run_command('ut -s info') + + suite_count, total_test_count, test_count = process_ut_info(cons, + output) + + if missing or extra: + cons.log.info(f"suites: {' '.join(sorted(list(suites)))}") + cons.log.error(f'missing: {sorted(list(missing))}') + cons.log.error(f'extra: {sorted(list(extra))}') + + assert not missing, f'Missing suites {missing}' + assert not extra, f'Extra suites {extra}' + + cons.log.info(str(exp_test_count)) + for suite in EXPECTED_SUITES: + assert test_count[suite] in ['?', str(exp_test_count[suite])], \ + f'suite {suite} expected {exp_test_count[suite]}' + + assert suite_count == len(EXPECTED_SUITES) + assert total_test_count == len(all_tests) + + # Run three suites + with cons.log.section('Check multiple suites'): + output = cons.run_command('ut bloblist,setexpr,mem') + assert 'Suites run: 3' in output + + # Run a particular test + with cons.log.section('Check single test'): + output = cons.run_command('ut bloblist reloc') + assert 'Test: reloc: bloblist.c' in output + + # Run tests multiple times + with cons.log.section('Check multiple runs'): + output = cons.run_command('ut -r2 bloblist') + lines = output.splitlines() + run = len([line for line in lines if 'Test:' in line]) + count = re.search(r'Tests run: (\d*)', lines[-1]).group(1) + + assert run == 2 * int(count) diff --git a/test/py/tests/test_upl.py b/test/py/tests/test_upl.py index 3164bda6b71..a1ccc8df233 100644 --- a/test/py/tests/test_upl.py +++ b/test/py/tests/test_upl.py @@ -17,7 +17,7 @@ def test_upl_handoff(u_boot_console): proper and runs a test to check that the parameters are correct. The entire FIT is loaded into memory in SPL (in upl_load_from_image()) so - that it can be inpected in upl_test_info_norun + that it can be inspected in upl_test_info_norun """ cons = u_boot_console ram = os.path.join(cons.config.build_dir, 'ram.bin') @@ -35,4 +35,4 @@ def test_upl_handoff(u_boot_console): # Check the FIT offsets look correct output = cons.run_command('ut upl -f upl_test_info_norun') - assert 'Failures: 0' in output + assert 'failures: 0' in output diff --git a/test/py/tests/test_usb.py b/test/py/tests/test_usb.py index e1f203b5cbc..566d73b7c64 100644 --- a/test/py/tests/test_usb.py +++ b/test/py/tests/test_usb.py @@ -242,7 +242,7 @@ def test_usb_part(u_boot_console): elif part_type == '83': print('ext(2/4) detected') output = u_boot_console.run_command( - 'fstype usb %d:%d' % i, part_id + 'fstype usb %d:%d' % (i, part_id) ) if 'ext2' in output: part_ext2.append(part_id) diff --git a/test/py/tests/test_ut.py b/test/py/tests/test_ut.py index 10ec7e582e0..d2d8ce10755 100644 --- a/test/py/tests/test_ut.py +++ b/test/py/tests/test_ut.py @@ -343,9 +343,10 @@ def setup_cros_image(cons): start, size, num, name = line.split(maxsplit=3) parts[int(num)] = Partition(int(start), int(size), name) + # Set up the kernel command-line dummy = os.path.join(cons.config.result_dir, 'dummy.txt') with open(dummy, 'wb') as outf: - outf.write(b'dummy\n') + outf.write(b'BOOT_IMAGE=/vmlinuz-5.15.0-121-generic root=/dev/nvme0n1p1 ro quiet splash vt.handoff=7') # For now we just use dummy kernels. This limits testing to just detecting # a signed kernel. We could add support for the x86 data structures so that @@ -606,4 +607,4 @@ def test_ut(u_boot_console, ut_subtest): assert 'Unknown command \'quux\' - try \'help\'' in output else: output = u_boot_console.run_command('ut ' + ut_subtest) - assert output.endswith('Failures: 0') + assert output.endswith('failures: 0') diff --git a/test/py/tests/test_vbe.py b/test/py/tests/test_vbe.py index 50b6c1cd911..861df3f8266 100644 --- a/test/py/tests/test_vbe.py +++ b/test/py/tests/test_vbe.py @@ -117,4 +117,4 @@ def test_vbe(u_boot_console): with cons.log.section('Kernel load'): output = cons.run_command_list(cmd.splitlines()) - assert 'Failures: 0' in output[-1] + assert 'failures: 0' in output[-1] diff --git a/test/py/tests/test_vpl.py b/test/py/tests/test_vpl.py index 4af578b9173..8c472ca7a92 100644 --- a/test/py/tests/test_vpl.py +++ b/test/py/tests/test_vpl.py @@ -26,7 +26,7 @@ def test_vpl(u_boot_console, ut_vpl_subtest): cons = u_boot_console cons.restart_uboot_with_flags(['-u', '-k', ut_vpl_subtest.split()[1]]) output = cons.get_spawn_output().replace('\r', '') - assert 'Failures: 0' in output + assert 'failures: 0' in output finally: # Restart afterward in case a non-VPL test is run next. This should not # happen since VPL tests are run in their own invocation of test.py, but |