summaryrefslogtreecommitdiff
path: root/test/py/conftest.py
diff options
context:
space:
mode:
Diffstat (limited to 'test/py/conftest.py')
-rw-r--r--test/py/conftest.py176
1 files changed, 135 insertions, 41 deletions
diff --git a/test/py/conftest.py b/test/py/conftest.py
index 3bd333bfd24..e59897c1f78 100644
--- a/test/py/conftest.py
+++ b/test/py/conftest.py
@@ -7,7 +7,7 @@
# test, at shutdown etc. These hooks perform functions such as:
# - Parsing custom command-line options.
# - Pullilng in user-specified board configuration.
-# - Creating the U-Boot console test fixture.
+# - Creating the ubman test fixture.
# - Creating the HTML log file.
# - Monitoring each test's results.
# - Implementing custom pytest markers.
@@ -25,11 +25,12 @@ import re
from _pytest.runner import runtestprotocol
import subprocess
import sys
-from u_boot_spawn import BootFail, Timeout, Unexpected, handle_exception
+from spawn import BootFail, Timeout, Unexpected, handle_exception
+import time
-# Globals: The HTML log file, and the connection to the U-Boot console.
+# Globals: The HTML log file, and the top-level fixture
log = None
-console = None
+ubman_fix = None
TEST_PY_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -91,6 +92,9 @@ def pytest_addoption(parser):
parser.addoption('--role', help='U-Boot board role (for Labgrid-sjg)')
parser.addoption('--use-running-system', default=False, action='store_true',
help="Assume that U-Boot is ready and don't wait for a prompt")
+ parser.addoption('--timing', default=False, action='store_true',
+ help='Show info on test timing')
+
def run_build(config, source_dir, build_dir, board_type, log):
"""run_build: Build U-Boot
@@ -243,7 +247,7 @@ def pytest_configure(config):
ubconfig.buildconfig.update(parser.items('root'))
global log
- global console
+ global ubman_fix
global ubconfig
(board_type, board_type_extra, board_identity, build_dir, build_dir_extra,
@@ -285,19 +289,26 @@ def pytest_configure(config):
ubconfig = ArbitraryAttributeContainer()
ubconfig.brd = dict()
ubconfig.env = dict()
-
- modules = [
- (ubconfig.brd, 'u_boot_board_' + board_type_filename),
- (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
- (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
- board_identity_filename),
- ]
- for (dict_to_fill, module_name) in modules:
- try:
- module = __import__(module_name)
- except ImportError:
- continue
- dict_to_fill.update(module.__dict__)
+ not_found = []
+
+ with log.section('Loading lab modules', 'load_modules'):
+ modules = [
+ (ubconfig.brd, 'u_boot_board_' + board_type_filename),
+ (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
+ (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
+ board_identity_filename),
+ ]
+ for (dict_to_fill, module_name) in modules:
+ try:
+ module = __import__(module_name)
+ except ImportError:
+ not_found.append(module_name)
+ continue
+ dict_to_fill.update(module.__dict__)
+ log.info(f"Loaded {module}")
+
+ if not_found:
+ log.warning(f"Failed to find modules: {' '.join(not_found)}")
ubconfig.buildconfig = dict()
@@ -322,6 +333,7 @@ def pytest_configure(config):
ubconfig.use_running_system = config.getoption('use_running_system')
ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
ubconfig.connection_ok = True
+ ubconfig.timing = config.getoption('timing')
env_vars = (
'board_type',
@@ -338,11 +350,11 @@ def pytest_configure(config):
os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
if board_type.startswith('sandbox'):
- import u_boot_console_sandbox
- console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
+ import console_sandbox
+ ubman_fix = console_sandbox.ConsoleSandbox(log, ubconfig)
else:
- import u_boot_console_exec_attach
- console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
+ import console_board
+ ubman_fix = console_board.ConsoleExecAttach(log, ubconfig)
def generate_ut_subtest(metafunc, fixture_name, sym_path):
@@ -361,7 +373,7 @@ def generate_ut_subtest(metafunc, fixture_name, sym_path):
Returns:
Nothing.
"""
- fn = console.config.build_dir + sym_path
+ fn = ubman_fix.config.build_dir + sym_path
try:
with open(fn, 'rt') as f:
lines = f.readlines()
@@ -402,8 +414,8 @@ def generate_config(metafunc, fixture_name):
"""
subconfigs = {
- 'brd': console.config.brd,
- 'env': console.config.env,
+ 'brd': ubman_fix.config.brd,
+ 'env': ubman_fix.config.env,
}
parts = fixture_name.split('__')
if len(parts) < 2:
@@ -465,7 +477,7 @@ def u_boot_log(request):
The fixture value.
"""
- return console.log
+ return ubman_fix.log
@pytest.fixture(scope='session')
def u_boot_config(request):
@@ -478,11 +490,11 @@ def u_boot_config(request):
The fixture value.
"""
- return console.config
+ return ubman_fix.config
@pytest.fixture(scope='function')
-def u_boot_console(request):
- """Generate the value of a test's u_boot_console fixture.
+def ubman(request):
+ """Generate the value of a test's ubman fixture.
Args:
request: The pytest request.
@@ -494,18 +506,18 @@ def u_boot_console(request):
pytest.skip('Cannot get target connection')
return None
try:
- console.ensure_spawned()
+ ubman_fix.ensure_spawned()
except OSError as err:
- handle_exception(ubconfig, console, log, err, 'Lab failure', True)
+ handle_exception(ubconfig, ubman_fix, log, err, 'Lab failure', True)
except Timeout as err:
- handle_exception(ubconfig, console, log, err, 'Lab timeout', True)
+ handle_exception(ubconfig, ubman_fix, log, err, 'Lab timeout', True)
except BootFail as err:
- handle_exception(ubconfig, console, log, err, 'Boot fail', True,
- console.get_spawn_output())
+ handle_exception(ubconfig, ubman_fix, log, err, 'Boot fail', True,
+ ubman.get_spawn_output())
except Unexpected:
- handle_exception(ubconfig, console, log, err, 'Unexpected test output',
+ handle_exception(ubconfig, ubman_fix, log, err, 'Unexpected test output',
False)
- return console
+ return ubman_fix
anchors = {}
tests_not_run = []
@@ -516,6 +528,12 @@ tests_skipped = []
tests_warning = []
tests_passed = []
+# Duration of each test:
+# key (string): test name
+# value (float): duration in ms
+test_durations = {}
+
+
def pytest_itemcollected(item):
"""pytest hook: Called once for each test found during collection.
@@ -531,6 +549,73 @@ def pytest_itemcollected(item):
tests_not_run.append(item.name)
+
+def show_timings():
+ """Write timings for each test, along with a histogram"""
+
+ def get_time_delta(msecs):
+ """Convert milliseconds into a user-friendly string"""
+ if msecs >= 1000:
+ return f'{msecs / 1000:.1f}s'
+ else:
+ return f'{msecs:.0f}ms'
+
+ def show_bar(key, msecs, value):
+ """Show a single bar (line) of the histogram
+
+ Args:
+ key (str): Key to write on the left
+ value (int): Value to display, i.e. the relative length of the bar
+ """
+ if value:
+ bar_length = int((value / max_count) * max_bar_length)
+ print(f"{key:>8} : {get_time_delta(msecs):>7} |{'#' * bar_length} {value}", file=buf)
+
+ # Create the buckets we will use, each has a count and a total time
+ bucket = {}
+ for power in range(5):
+ for i in [1, 2, 3, 4, 5, 7.5]:
+ bucket[i * 10 ** power] = {'count': 0, 'msecs': 0.0}
+ max_dur = max(bucket.keys())
+
+ # Collect counts for each bucket; if outside the range, add to too_long
+ # Also show a sorted list of test timings from longest to shortest
+ too_long = 0
+ too_long_msecs = 0.0
+ max_count = 0
+ with log.section('Timing Report', 'timing_report'):
+ for name, dur in sorted(test_durations.items(), key=lambda kv: kv[1],
+ reverse=True):
+ log.info(f'{get_time_delta(dur):>8} {name}')
+ greater = [k for k in bucket.keys() if dur <= k]
+ if greater:
+ buck = bucket[min(greater)]
+ buck['count'] += 1
+ max_count = max(max_count, buck['count'])
+ buck['msecs'] += dur
+ else:
+ too_long += 1
+ too_long_msecs += dur
+
+ # Set the maximum length of a histogram bar, in characters
+ max_bar_length = 40
+
+ # Show a a summary with histogram
+ buf = io.StringIO()
+ with log.section('Timing Summary', 'timing_summary'):
+ print('Duration : Total | Number of tests', file=buf)
+ print(f'{"=" * 8} : {"=" * 7} |{"=" * max_bar_length}', file=buf)
+ for dur, buck in bucket.items():
+ if buck['count']:
+ label = get_time_delta(dur)
+ show_bar(f'<{label}', buck['msecs'], buck['count'])
+ if too_long:
+ show_bar(f'>{get_time_delta(max_dur)}', too_long_msecs, too_long)
+ log.info(buf.getvalue())
+ if ubconfig.timing:
+ print(buf.getvalue(), end='')
+
+
def cleanup():
"""Clean up all global state.
@@ -545,8 +630,8 @@ def cleanup():
Nothing.
"""
- if console:
- console.close()
+ if ubman_fix:
+ ubman_fix.close()
if log:
with log.section('Status Report', 'status_report'):
log.status_pass('%d passed' % len(tests_passed))
@@ -580,6 +665,7 @@ def cleanup():
for test in tests_not_run:
anchor = anchors.get(test, None)
log.status_fail('... ' + test, anchor)
+ show_timings()
log.close()
atexit.register(cleanup)
@@ -713,7 +799,9 @@ def pytest_runtest_protocol(item, nextitem):
log.get_and_reset_warning()
ihook = item.ihook
ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
+ start = time.monotonic()
reports = runtestprotocol(item, nextitem=nextitem)
+ duration = round((time.monotonic() - start) * 1000, 1)
ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
was_warning = log.get_and_reset_warning()
@@ -726,6 +814,7 @@ def pytest_runtest_protocol(item, nextitem):
start_test_section(item)
failure_cleanup = False
+ record_duration = True
if not was_warning:
test_list = tests_passed
msg = 'OK'
@@ -756,9 +845,14 @@ def pytest_runtest_protocol(item, nextitem):
test_list = tests_skipped
msg = 'SKIPPED:\n' + str(report.longrepr)
msg_log = log.status_skipped
+ record_duration = False
+
+ msg += f' {duration} ms'
+ if record_duration:
+ test_durations[item.name] = duration
if failure_cleanup:
- console.drain_console()
+ ubman_fix.drain_console()
test_list.append(item.name)
tests_not_run.remove(item.name)
@@ -768,7 +862,7 @@ def pytest_runtest_protocol(item, nextitem):
except:
# If something went wrong with logging, it's better to let the test
# process continue, which may report other exceptions that triggered
- # the logging issue (e.g. console.log wasn't created). Hence, just
+ # the logging issue (e.g. ubman_fix.log wasn't created). Hence, just
# squash the exception. If the test setup failed due to e.g. syntax
# error somewhere else, this won't be seen. However, once that issue
# is fixed, if this exception still exists, it will then be logged as
@@ -781,6 +875,6 @@ def pytest_runtest_protocol(item, nextitem):
log.end_section(item.name)
if failure_cleanup:
- console.cleanup_spawn()
+ ubman_fix.cleanup_spawn()
return True