summaryrefslogtreecommitdiff
path: root/test/py
diff options
context:
space:
mode:
authorTom Rini <trini@konsulko.com>2025-01-24 14:35:37 -0600
committerTom Rini <trini@konsulko.com>2025-01-24 14:35:37 -0600
commit8162f35a108441b8d7a44ac9266c1a64dbf79fd5 (patch)
tree257e589845877b4cd0eac6482d8b0563af151aaa /test/py
parentd51f35a553b0c40603c45a8d1d3110640fb119e3 (diff)
parent229d145f2614c7da1ca046af35a7ccec2d688f60 (diff)
Merge patch series "test: Improvements to ut command and test-suite running"
Simon Glass <sjg@chromium.org> says: The current method of running unit tests relies on subcommands of the ut command. Only the code in each subcommand knows how to find the tests related to that subcomand. This is not ideal and we now have quite a few subcommands which do nothing but locate the relevant tests in a linker list, then call a common function to run them. This series adds a list of test suites, so that these subcommands can be removed. An issue with 'ut all' is that it doesn't record how many tests failed overall, so it is necessary to examine copious amounts of output to look for failures. This series adds a new 'total' feature allow recording the total number of failed tests. To help with 'ut all' a new pytest is created which runs it (as well as 'ut info') and makes sure that all is well. Due to the 'ut all' failures this does not pass, so the test is disabled for now. It is here because it provides security against misnaming a test suite and causing it not to run. Future work may: - get 'ut all' passing - enable test_suite() in CL, to ensure that 'ut all' keeps passing - record duration of each suite - allow running the tests in random order to tease out dependencies - tweak the output to remove common prefixes - getting rid of bootstd, optee and seame 'ut' subcommands Link: https://lore.kernel.org/r/20250120212613.516664-1-sjg@chromium.org
Diffstat (limited to 'test/py')
-rw-r--r--test/py/conftest.py7
-rw-r--r--test/py/tests/test_spl.py2
-rw-r--r--test/py/tests/test_suite.py188
-rw-r--r--test/py/tests/test_upl.py2
-rw-r--r--test/py/tests/test_ut.py2
-rw-r--r--test/py/tests/test_vbe.py2
-rw-r--r--test/py/tests/test_vpl.py2
7 files changed, 198 insertions, 7 deletions
diff --git a/test/py/conftest.py b/test/py/conftest.py
index 509d19b449d..6b7ed0586e2 100644
--- a/test/py/conftest.py
+++ b/test/py/conftest.py
@@ -33,6 +33,9 @@ console = None
TEST_PY_DIR = os.path.dirname(os.path.abspath(__file__))
+# Regex for test-function symbols
+RE_UT_TEST_LIST = re.compile(r'[^a-zA-Z0-9_]_u_boot_list_2_ut_(.*)_2_(.*)\s*$')
+
def mkdir_p(path):
"""Create a directory path.
@@ -336,7 +339,7 @@ def pytest_configure(config):
import u_boot_console_exec_attach
console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
-re_ut_test_list = re.compile(r'[^a-zA-Z0-9_]_u_boot_list_2_ut_(.*)_test_2_(.*)\s*$')
+
def generate_ut_subtest(metafunc, fixture_name, sym_path):
"""Provide parametrization for a ut_subtest fixture.
@@ -363,7 +366,7 @@ def generate_ut_subtest(metafunc, fixture_name, sym_path):
vals = []
for l in lines:
- m = re_ut_test_list.search(l)
+ m = RE_UT_TEST_LIST.search(l)
if not m:
continue
suite, name = m.groups()
diff --git a/test/py/tests/test_spl.py b/test/py/tests/test_spl.py
index 42e4c4342b2..474f430a344 100644
--- a/test/py/tests/test_spl.py
+++ b/test/py/tests/test_spl.py
@@ -36,7 +36,7 @@ def test_spl(u_boot_console, ut_spl_subtest):
cons = u_boot_console
cons.restart_uboot_with_flags(['-u', '-k', ut_spl_subtest.split()[1]])
output = cons.get_spawn_output().replace('\r', '')
- assert 'Failures: 0' in output
+ assert 'failures: 0' in output
finally:
# Restart afterward in case a non-SPL test is run next. This should not
# happen since SPL tests are run in their own invocation of test.py, but
diff --git a/test/py/tests/test_suite.py b/test/py/tests/test_suite.py
new file mode 100644
index 00000000000..73c185349b4
--- /dev/null
+++ b/test/py/tests/test_suite.py
@@ -0,0 +1,188 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2024 Google LLC
+
+import pytest
+import re
+
+# List of test suites we expect to find with 'ut info' and 'ut all'
+EXPECTED_SUITES = [
+ 'addrmap', 'bdinfo', 'bloblist', 'bootm', 'bootstd',
+ 'cmd', 'common', 'dm', 'env', 'exit',
+ 'fdt', 'font', 'hush', 'lib',
+ 'loadm', 'log', 'mbr', 'measurement', 'mem',
+ 'overlay', 'pci_mps', 'setexpr', 'upl',
+ ]
+
+
+# Set this to True to aid debugging of tests
+DEBUG_ME = False
+
+
+def collect_info(cons, output):
+ """Process the output from 'ut all'
+
+ Args:
+ cons: U-Boot console object
+ output: Output from running 'ut all'
+
+ Returns:
+ tuple:
+ set: suite names that were found in output
+ set: test names that were found in output
+ dict: test count for each suite:
+ key: suite name
+ value: number of tests for the suite found in output
+ set: missing suites (compared to EXPECTED_SUITES)
+ set: extra suites (compared to EXPECTED_SUITES)
+ """
+ suites = set()
+ tests = set()
+ cur_suite = None
+ test_count = None
+ exp_test_count = {}
+
+ # Collect suites{}
+ for line in output.splitlines():
+ line = line.rstrip()
+ if DEBUG_ME:
+ cons.log.info(f'line: {line}')
+ m = re.search('----Running ([^ ]*) tests----', line)
+ if m:
+ if DEBUG_ME and cur_suite and cur_suite != 'info':
+ cons.log.info(f'suite: {cur_suite} expected {exp_test_count[cur_suite]} found {test_count}')
+
+ cur_suite = m.group(1)
+ if DEBUG_ME:
+ cons.log.info(f'cur_suite: {cur_suite}')
+ suites.add(cur_suite)
+
+ test_count = 0
+ m = re.match(rf'Running (\d+) {cur_suite} tests', line)
+ if m:
+ exp_test_count[cur_suite] = int(m.group(1))
+ m = re.search(r'Test: (\w*): ([-a-z0-9_]*\.c)?( .*)?', line)
+ if m:
+ test_name = m.group(1)
+ msg = m.group(3)
+ if DEBUG_ME:
+ cons.log.info(f"test_name {test_name} msg '{msg}'")
+ if msg == ' (flat tree)' and test_name not in tests:
+ tests.add(test_name)
+ test_count += 1
+ if not msg or 'skipped as it is manual' in msg:
+ tests.add(test_name)
+ test_count += 1
+ if DEBUG_ME:
+ cons.log.info(f'test_count {test_count}')
+ if DEBUG_ME:
+ cons.log.info(f'suite: {cur_suite} expected {exp_test_count[cur_suite]} found {test_count}')
+ cons.log.info(f"Tests: {' '.join(sorted(list(tests)))}")
+
+ # Figure out what is missing, or extra
+ missing = set()
+ extra = set(suites)
+ for suite in EXPECTED_SUITES:
+ if suite in extra:
+ extra.remove(suite)
+ else:
+ missing.add(suite)
+
+ return suites, tests, exp_test_count, missing, extra
+
+
+def process_ut_info(cons, output):
+ """Process the output of the 'ut info' command
+
+ Args:
+ cons: U-Boot console object
+ output: Output from running 'ut all'
+
+ Returns:
+ tuple:
+ int: Number of suites reported
+ int: Number of tests reported
+ dict: test count for each suite:
+ key: suite name
+ value: number of tests reported for the suite
+
+ """
+ suite_count = None
+ total_test_count = None
+ test_count = {}
+ for line in output.splitlines():
+ line = line.rstrip()
+ if DEBUG_ME:
+ cons.log.info(f'line: {line}')
+ m = re.match(r'Test suites: (.*)', line)
+ if m:
+ suite_count = int(m.group(1))
+ m = re.match(r'Total tests: (.*)', line)
+ if m:
+ total_test_count = int(m.group(1))
+ m = re.match(r' *([0-9?]*) (\w*)', line)
+ if m:
+ test_count[m.group(2)] = m.group(1)
+ return suite_count, total_test_count, test_count
+
+
+@pytest.mark.buildconfigspec('sandbox')
+@pytest.mark.notbuildconfigspec('sandbox_spl')
+@pytest.mark.notbuildconfigspec('sandbox64')
+# This test is disabled since it fails; remove the leading 'x' to try it
+def xtest_suite(u_boot_console, u_boot_config):
+ """Perform various checks on the unit tests, including:
+
+ - The number of suites matches that reported by the 'ut info'
+ - Where available, the number of tests is each suite matches that
+ reported by 'ut info -s'
+ - The total number of tests adds up to the total that are actually run
+ with 'ut all'
+ - All suites are run with 'ut all'
+ - The expected set of suites is run (the list is hard-coded in this test)
+
+ """
+ cons = u_boot_console
+ buildconfig = u_boot_config.buildconfig
+ with cons.log.section('Run all unit tests'):
+ # ut hush hush_test_simple_dollar prints "Unknown command" on purpose.
+ with u_boot_console.disable_check('unknown_command'):
+ output = cons.run_command('ut all')
+
+ # Process the output from the run
+ with cons.log.section('Check output'):
+ suites, all_tests, exp_test_count, missing, extra = collect_info(cons,
+ output)
+ cons.log.info(f'missing {missing}')
+ cons.log.info(f'extra {extra}')
+
+ # Make sure we got a test count for each suite
+ assert not (suites - exp_test_count.keys())
+
+ # Deal with missing suites
+ with cons.log.section('Check missing suites'):
+ if 'config_cmd_seama' not in buildconfig:
+ cons.log.info("CMD_SEAMA not enabled: Ignoring suite 'seama'")
+ missing.discard('seama')
+
+ # Run 'ut info' and compare with the log results
+ with cons.log.section('Check suite test-counts'):
+ output = cons.run_command('ut info -s')
+
+ suite_count, total_test_count, test_count = process_ut_info(cons,
+ output)
+
+ if missing or extra:
+ cons.log.info(f"suites: {' '.join(sorted(list(suites)))}")
+ cons.log.error(f'missing: {sorted(list(missing))}')
+ cons.log.error(f'extra: {sorted(list(extra))}')
+
+ assert not missing, f'Missing suites {missing}'
+ assert not extra, f'Extra suites {extra}'
+
+ cons.log.info(str(exp_test_count))
+ for suite in EXPECTED_SUITES:
+ assert test_count[suite] in ['?', str(exp_test_count[suite])], \
+ f'suite {suite} expected {exp_test_count[suite]}'
+
+ assert suite_count == len(EXPECTED_SUITES)
+ assert total_test_count == len(all_tests)
diff --git a/test/py/tests/test_upl.py b/test/py/tests/test_upl.py
index 90125c4dc1b..a1ccc8df233 100644
--- a/test/py/tests/test_upl.py
+++ b/test/py/tests/test_upl.py
@@ -35,4 +35,4 @@ def test_upl_handoff(u_boot_console):
# Check the FIT offsets look correct
output = cons.run_command('ut upl -f upl_test_info_norun')
- assert 'Failures: 0' in output
+ assert 'failures: 0' in output
diff --git a/test/py/tests/test_ut.py b/test/py/tests/test_ut.py
index cacf11f7c0a..d2d8ce10755 100644
--- a/test/py/tests/test_ut.py
+++ b/test/py/tests/test_ut.py
@@ -607,4 +607,4 @@ def test_ut(u_boot_console, ut_subtest):
assert 'Unknown command \'quux\' - try \'help\'' in output
else:
output = u_boot_console.run_command('ut ' + ut_subtest)
- assert output.endswith('Failures: 0')
+ assert output.endswith('failures: 0')
diff --git a/test/py/tests/test_vbe.py b/test/py/tests/test_vbe.py
index 50b6c1cd911..861df3f8266 100644
--- a/test/py/tests/test_vbe.py
+++ b/test/py/tests/test_vbe.py
@@ -117,4 +117,4 @@ def test_vbe(u_boot_console):
with cons.log.section('Kernel load'):
output = cons.run_command_list(cmd.splitlines())
- assert 'Failures: 0' in output[-1]
+ assert 'failures: 0' in output[-1]
diff --git a/test/py/tests/test_vpl.py b/test/py/tests/test_vpl.py
index 4af578b9173..8c472ca7a92 100644
--- a/test/py/tests/test_vpl.py
+++ b/test/py/tests/test_vpl.py
@@ -26,7 +26,7 @@ def test_vpl(u_boot_console, ut_vpl_subtest):
cons = u_boot_console
cons.restart_uboot_with_flags(['-u', '-k', ut_vpl_subtest.split()[1]])
output = cons.get_spawn_output().replace('\r', '')
- assert 'Failures: 0' in output
+ assert 'failures: 0' in output
finally:
# Restart afterward in case a non-VPL test is run next. This should not
# happen since VPL tests are run in their own invocation of test.py, but