From 3ffdcf7e3b7dff04b055771c03c9646aa383cc1e Mon Sep 17 00:00:00 2001 From: Daniel Latypov Date: Fri, 28 Oct 2022 14:02:55 -0700 Subject: kunit: tool: make unit test not print parsed testdata to stdout Currently, if you run $ ./tools/testing/kunit/kunit_tool_test.py you'll see a lot of output from the parser as we feed it testdata. This makes the output hard to read and fairly confusing, esp. since our testdata includes example failures, which get printed out in red. Silence that output so real failures are easier to see. Signed-off-by: Daniel Latypov Reviewed-by: David Gow Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit_tool_test.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index e2cd2cc2e98f..a6e53945656e 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -80,6 +80,9 @@ class KconfigTest(unittest.TestCase): self.assertEqual(actual_kconfig, expected_kconfig) class KUnitParserTest(unittest.TestCase): + def setUp(self): + self.print_mock = mock.patch('kunit_printer.Printer.print').start() + self.addCleanup(mock.patch.stopall) def assertContains(self, needle: str, haystack: kunit_parser.LineStream): # Clone the iterator so we can print the contents on failure. @@ -485,6 +488,9 @@ class LinuxSourceTreeTest(unittest.TestCase): class KUnitJsonTest(unittest.TestCase): + def setUp(self): + self.print_mock = mock.patch('kunit_printer.Printer.print').start() + self.addCleanup(mock.patch.stopall) def _json_for(self, log_file): with open(test_data_path(log_file)) as file: -- cgit v1.2.3 From f19dd011d8de6f0c1d20abea5158aa4f5d9cea44 Mon Sep 17 00:00:00 2001 From: Daniel Latypov Date: Fri, 28 Oct 2022 14:02:56 -0700 Subject: kunit: tool: print summary of failed tests if a few failed out of a lot E.g. all the hw_breakpoint tests are failing right now. So if I run `kunit.py run --altests --arch=x86_64`, then I see > Testing complete. Ran 408 tests: passed: 392, failed: 9, skipped: 7 Seeing which 9 tests failed out of the hundreds is annoying. If my terminal doesn't have scrollback support, I have to resort to looking at `.kunit/test.log` for the `not ok` lines. Teach kunit.py to print a summarized list of failures if the # of tests reachs an arbitrary threshold (>=100 tests). To try and keep the output from being too long/noisy, this new logic a) just reports "parent_test failed" if every child test failed b) won't print anything if there are >10 failures (also arbitrary). With this patch, we get an extra line of output showing: > Testing complete. Ran 408 tests: passed: 392, failed: 9, skipped: 7 > Failures: hw_breakpoint This also works with parameterized tests, e.g. if I add a fake failure > Failures: kcsan.test_atomic_builtins_missing_barrier.threads=6 Note: we didn't have enough tests for this to be a problem before. But with commit 980ac3ad0512 ("kunit: tool: rename all_test_uml.config, use it for --alltests"), --alltests works and thus running >100 tests will probably become more common. Signed-off-by: Daniel Latypov Reviewed-by: David Gow Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit_parser.py | 47 ++++++++++++++++++++++++++++++++++ tools/testing/kunit/kunit_tool_test.py | 22 ++++++++++++++++ 2 files changed, 69 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 1ae873e3e341..94dba66feec5 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -58,6 +58,10 @@ class Test: self.counts.errors += 1 stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}') + def ok_status(self) -> bool: + """Returns true if the status was ok, i.e. passed or skipped.""" + return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED) + class TestStatus(Enum): """An enumeration class to represent the status of a test.""" SUCCESS = auto() @@ -565,6 +569,40 @@ def print_test_footer(test: Test) -> None: stdout.print_with_timestamp(format_test_divider(message, len(message) - stdout.color_len())) + + +def _summarize_failed_tests(test: Test) -> str: + """Tries to summarize all the failing subtests in `test`.""" + + def failed_names(test: Test, parent_name: str) -> List[str]: + # Note: we use 'main' internally for the top-level test. + if not parent_name or parent_name == 'main': + full_name = test.name + else: + full_name = parent_name + '.' + test.name + + if not test.subtests: # this is a leaf node + return [full_name] + + # If all the children failed, just say this subtest failed. + # Don't summarize it down "the top-level test failed", though. + failed_subtests = [sub for sub in test.subtests if not sub.ok_status()] + if parent_name and len(failed_subtests) == len(test.subtests): + return [full_name] + + all_failures = [] # type: List[str] + for t in failed_subtests: + all_failures.extend(failed_names(t, full_name)) + return all_failures + + failures = failed_names(test, '') + # If there are too many failures, printing them out will just be noisy. + if len(failures) > 10: # this is an arbitrary limit + return '' + + return 'Failures: ' + ', '.join(failures) + + def print_summary_line(test: Test) -> None: """ Prints summary line of test object. Color of line is dependent on @@ -587,6 +625,15 @@ def print_summary_line(test: Test) -> None: color = stdout.red stdout.print_with_timestamp(color(f'Testing complete. {test.counts}')) + # Summarize failures that might have gone off-screen since we had a lot + # of tests (arbitrarily defined as >=100 for now). + if test.ok_status() or test.counts.total() < 100: + return + summarized = _summarize_failed_tests(test) + if not summarized: + return + stdout.print_with_timestamp(color(summarized)) + # Other methods: def bubble_up_test_results(test: Test) -> None: diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index a6e53945656e..7dcd67003b23 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -312,6 +312,28 @@ class KUnitParserTest(unittest.TestCase): result.status) self.assertEqual('kunit-resource-test', result.subtests[0].name) + def test_summarize_failures(self): + output = """ + KTAP version 1 + 1..2 + # Subtest: all_failed_suite + 1..2 + not ok 1 - test1 + not ok 2 - test2 + not ok 1 - all_failed_suite + # Subtest: some_failed_suite + 1..2 + ok 1 - test1 + not ok 2 - test2 + not ok 1 - some_failed_suite + """ + result = kunit_parser.parse_run_tests(output.splitlines()) + self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) + + self.assertEqual(kunit_parser._summarize_failed_tests(result), + 'Failures: all_failed_suite, some_failed_suite.test2') + + def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream: return kunit_parser.LineStream(enumerate(strs, start=1)) -- cgit v1.2.3 From f473dd9488d910aab109e8c6a2e4181125ca322a Mon Sep 17 00:00:00 2001 From: Daniel Latypov Date: Thu, 3 Nov 2022 10:47:38 -0700 Subject: kunit: tool: make TestCounts a dataclass Since we're using Python 3.7+, we can use dataclasses to tersen the code. It also lets us create pre-populated TestCounts() objects and compare them in our unit test. (Before, you could only create empty ones). Signed-off-by: Daniel Latypov Reviewed-by: David Gow Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit_parser.py | 25 ++++++++----------------- tools/testing/kunit/kunit_tool_test.py | 4 +--- 2 files changed, 9 insertions(+), 20 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 94dba66feec5..a56c75a973b5 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -10,6 +10,7 @@ # Author: Rae Moar from __future__ import annotations +from dataclasses import dataclass import re import sys @@ -71,27 +72,17 @@ class TestStatus(Enum): NO_TESTS = auto() FAILURE_TO_PARSE_TESTS = auto() +@dataclass class TestCounts: """ Tracks the counts of statuses of all test cases and any errors within a Test. - - Attributes: - passed : int - the number of tests that have passed - failed : int - the number of tests that have failed - crashed : int - the number of tests that have crashed - skipped : int - the number of tests that have skipped - errors : int - the number of errors in the test and subtests - """ - def __init__(self): - """Creates TestCounts object with counts of all test - statuses and test errors set to 0. - """ - self.passed = 0 - self.failed = 0 - self.crashed = 0 - self.skipped = 0 - self.errors = 0 + """ + passed: int = 0 + failed: int = 0 + crashed: int = 0 + skipped: int = 0 + errors: int = 0 def __str__(self) -> str: """Returns the string representation of a TestCounts object.""" diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 7dcd67003b23..440a273f1d21 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -182,9 +182,7 @@ class KUnitParserTest(unittest.TestCase): kunit_parser.extract_tap_lines( file.readlines())) # A missing test plan is not an error. - self.assertEqual(0, result.counts.errors) - # All tests should be accounted for. - self.assertEqual(10, result.counts.total()) + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=10, errors=0)) self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) -- cgit v1.2.3 From 05d9d2c3ee1e4b587f71455f6d3d1493289204ff Mon Sep 17 00:00:00 2001 From: Daniel Latypov Date: Thu, 3 Nov 2022 10:47:39 -0700 Subject: kunit: tool: unit tests all check parser errors, standardize formatting a bit Let's verify that the parser isn't reporting any errors for valid inputs. This change also * does result.status checking on one line * makes sure we consistently do it outside of the `with` block Signed-off-by: Daniel Latypov Reviewed-by: David Gow Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit_tool_test.py | 93 ++++++++++++++-------------------- 1 file changed, 38 insertions(+), 55 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 440a273f1d21..5e3429a1202b 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -136,33 +136,29 @@ class KUnitParserTest(unittest.TestCase): all_passed_log = test_data_path('test_is_test_passed-all_passed.log') with open(all_passed_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts.errors, 0) def test_parse_successful_nested_tests_log(self): all_passed_log = test_data_path('test_is_test_passed-all_passed_nested.log') with open(all_passed_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts.errors, 0) def test_kselftest_nested(self): kselftest_log = test_data_path('test_is_test_passed-kselftest.log') with open(kselftest_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts.errors, 0) def test_parse_failed_test_log(self): failed_log = test_data_path('test_is_test_passed-failure.log') with open(failed_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.FAILURE, - result.status) + self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) + self.assertEqual(result.counts.errors, 0) def test_no_header(self): empty_log = test_data_path('test_is_test_passed-no_tests_run_no_header.log') @@ -170,9 +166,8 @@ class KUnitParserTest(unittest.TestCase): result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines(file.readlines())) self.assertEqual(0, len(result.subtests)) - self.assertEqual( - kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS, - result.status) + self.assertEqual(kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS, result.status) + self.assertEqual(result.counts.errors, 1) def test_missing_test_plan(self): missing_plan_log = test_data_path('test_is_test_passed-' @@ -183,9 +178,7 @@ class KUnitParserTest(unittest.TestCase): file.readlines())) # A missing test plan is not an error. self.assertEqual(result.counts, kunit_parser.TestCounts(passed=10, errors=0)) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) def test_no_tests(self): header_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log') @@ -193,9 +186,8 @@ class KUnitParserTest(unittest.TestCase): result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines(file.readlines())) self.assertEqual(0, len(result.subtests)) - self.assertEqual( - kunit_parser.TestStatus.NO_TESTS, - result.status) + self.assertEqual(kunit_parser.TestStatus.NO_TESTS, result.status) + self.assertEqual(result.counts.errors, 1) def test_no_tests_no_plan(self): no_plan_log = test_data_path('test_is_test_passed-no_tests_no_plan.log') @@ -206,7 +198,7 @@ class KUnitParserTest(unittest.TestCase): self.assertEqual( kunit_parser.TestStatus.NO_TESTS, result.subtests[0].subtests[0].status) - self.assertEqual(1, result.counts.errors) + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=1, errors=1)) def test_no_kunit_output(self): @@ -218,6 +210,7 @@ class KUnitParserTest(unittest.TestCase): print_mock.assert_any_call(StrContains('could not find any KTAP output!')) print_mock.stop() self.assertEqual(0, len(result.subtests)) + self.assertEqual(result.counts.errors, 1) def test_skipped_test(self): skipped_log = test_data_path('test_skip_tests.log') @@ -225,18 +218,16 @@ class KUnitParserTest(unittest.TestCase): result = kunit_parser.parse_run_tests(file.readlines()) # A skipped test does not fail the whole suite. - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=4, skipped=1)) def test_skipped_all_tests(self): skipped_log = test_data_path('test_skip_all_tests.log') with open(skipped_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SKIPPED, - result.status) + self.assertEqual(kunit_parser.TestStatus.SKIPPED, result.status) + self.assertEqual(result.counts, kunit_parser.TestCounts(skipped=5)) def test_ignores_hyphen(self): hyphen_log = test_data_path('test_strip_hyphen.log') @@ -244,9 +235,7 @@ class KUnitParserTest(unittest.TestCase): result = kunit_parser.parse_run_tests(file.readlines()) # A skipped test does not fail the whole suite. - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual( "sysctl_test", result.subtests[0].name) @@ -260,55 +249,49 @@ class KUnitParserTest(unittest.TestCase): prefix_log = test_data_path('test_config_printk_time.log') with open(prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) - self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) def test_ignores_multiple_prefixes(self): prefix_log = test_data_path('test_multiple_prefixes.log') with open(prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) - self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) def test_prefix_mixed_kernel_output(self): mixed_prefix_log = test_data_path('test_interrupted_tap_output.log') with open(mixed_prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) - self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) def test_prefix_poundsign(self): pound_log = test_data_path('test_pound_sign.log') with open(pound_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) - self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) def test_kernel_panic_end(self): panic_log = test_data_path('test_kernel_panic_interrupt.log') with open(panic_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.TEST_CRASHED, - result.status) - self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(kunit_parser.TestStatus.TEST_CRASHED, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertGreaterEqual(result.counts.errors, 1) def test_pound_no_prefix(self): pound_log = test_data_path('test_pound_no_prefix.log') with open(pound_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) - self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) def test_summarize_failures(self): output = """ -- cgit v1.2.3 From 101e32a025da386ba6f6efbfe3e75b6ec5a358aa Mon Sep 17 00:00:00 2001 From: Daniel Latypov Date: Thu, 3 Nov 2022 10:47:40 -0700 Subject: kunit: tool: remove redundant file.close() call in unit test We're using a `with` block above, so the file object is already closed. Signed-off-by: Daniel Latypov Reviewed-by: David Gow Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit_tool_test.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 5e3429a1202b..90c65b072be9 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -242,8 +242,6 @@ class KUnitParserTest(unittest.TestCase): self.assertEqual( "example", result.subtests[1].name) - file.close() - def test_ignores_prefix_printk_time(self): prefix_log = test_data_path('test_config_printk_time.log') -- cgit v1.2.3 From 0a7d5c30b7f02887319a1382fbb8dc1c8250fe2c Mon Sep 17 00:00:00 2001 From: Daniel Latypov Date: Thu, 10 Nov 2022 19:18:55 -0800 Subject: kunit: tool: tweak error message when no KTAP found We currently tell people we "couldn't find any KTAP output" with no indication as to what this might mean. After this patch, we get: $ ./tools/testing/kunit/kunit.py parse /dev/null ============================================================ [ERROR] Test: : Could not find any KTAP output. Did any KUnit tests run? ============================================================ Testing complete. Ran 0 tests: errors: 1 Note: we could try and generate a more verbose message like > Please check .kunit/test.log to see the raw kernel output. or the like, but we'd need to know what the build dir was to know where test.log actually lives. This patch tries to make a more minimal improvement. Signed-off-by: Daniel Latypov Reviewed-by: David Gow Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit_parser.py | 2 +- tools/testing/kunit/kunit_tool_test.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index a56c75a973b5..d0ed5dd5cfc4 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -782,7 +782,7 @@ def parse_run_tests(kernel_output: Iterable[str]) -> Test: test = Test() if not lines: test.name = '' - test.add_error('could not find any KTAP output!') + test.add_error('Could not find any KTAP output. Did any KUnit tests run?') test.status = TestStatus.FAILURE_TO_PARSE_TESTS else: test = parse_test(lines, 0, []) diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 90c65b072be9..84a08cf07242 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -207,7 +207,7 @@ class KUnitParserTest(unittest.TestCase): with open(crash_log) as file: result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines(file.readlines())) - print_mock.assert_any_call(StrContains('could not find any KTAP output!')) + print_mock.assert_any_call(StrContains('Could not find any KTAP output.')) print_mock.stop() self.assertEqual(0, len(result.subtests)) self.assertEqual(result.counts.errors, 1) @@ -588,7 +588,7 @@ class KUnitMainTest(unittest.TestCase): self.assertEqual(e.exception.code, 1) self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) - self.print_mock.assert_any_call(StrContains('could not find any KTAP output!')) + self.print_mock.assert_any_call(StrContains('Could not find any KTAP output.')) def test_exec_no_tests(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=['TAP version 14', '1..0']) -- cgit v1.2.3 From 309e22effb741a8c65131a2694a49839fd685a27 Mon Sep 17 00:00:00 2001 From: Daniel Latypov Date: Mon, 21 Nov 2022 11:55:26 -0800 Subject: kunit: tool: make --json do nothing if --raw_ouput is set When --raw_output is set (to any value), we don't actually parse the test results. So asking to print the test results as json doesn't make sense. We internally create a fake test with one passing subtest, so --json would actually print out something misleading. This patch: * Rewords the flag descriptions so hopefully this is more obvious. * Also updates --raw_output's description to note the default behavior is to print out only "KUnit" results (actually any KTAP results) * also renames and refactors some related logic for clarity (e.g. test_result => test, it's a kunit_parser.Test object). Notably, this patch does not make it an error to specify --json and --raw_output together. This is an edge case, but I know of at least one wrapper around kunit.py that always sets --json. You'd never be able to use --raw_output with that wrapper. Signed-off-by: Daniel Latypov Reviewed-by: David Gow Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit.py | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py index 4d4663fb578b..e7b6549712d6 100755 --- a/tools/testing/kunit/kunit.py +++ b/tools/testing/kunit/kunit.py @@ -192,12 +192,11 @@ def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus: def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]: parse_start = time.time() - test_result = kunit_parser.Test() - if request.raw_output: # Treat unparsed results as one passing test. - test_result.status = kunit_parser.TestStatus.SUCCESS - test_result.counts.passed = 1 + fake_test = kunit_parser.Test() + fake_test.status = kunit_parser.TestStatus.SUCCESS + fake_test.counts.passed = 1 output: Iterable[str] = input_data if request.raw_output == 'all': @@ -206,14 +205,17 @@ def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input output = kunit_parser.extract_tap_lines(output, lstrip=False) for line in output: print(line.rstrip()) + parse_time = time.time() - parse_start + return KunitResult(KunitStatus.SUCCESS, parse_time), fake_test - else: - test_result = kunit_parser.parse_run_tests(input_data) - parse_end = time.time() + + # Actually parse the test results. + test = kunit_parser.parse_run_tests(input_data) + parse_time = time.time() - parse_start if request.json: json_str = kunit_json.get_json_result( - test=test_result, + test=test, metadata=metadata) if request.json == 'stdout': print(json_str) @@ -223,10 +225,10 @@ def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input stdout.print_with_timestamp("Test results stored in %s" % os.path.abspath(request.json)) - if test_result.status != kunit_parser.TestStatus.SUCCESS: - return KunitResult(KunitStatus.TEST_FAILURE, parse_end - parse_start), test_result + if test.status != kunit_parser.TestStatus.SUCCESS: + return KunitResult(KunitStatus.TEST_FAILURE, parse_time), test - return KunitResult(KunitStatus.SUCCESS, parse_end - parse_start), test_result + return KunitResult(KunitStatus.SUCCESS, parse_time), test def run_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitRequest) -> KunitResult: @@ -359,14 +361,14 @@ def add_exec_opts(parser) -> None: choices=['suite', 'test']) def add_parse_opts(parser) -> None: - parser.add_argument('--raw_output', help='If set don\'t format output from kernel. ' - 'If set to --raw_output=kunit, filters to just KUnit output.', + parser.add_argument('--raw_output', help='If set don\'t parse output from kernel. ' + 'By default, filters to just KUnit output. Use ' + '--raw_output=all to show everything', type=str, nargs='?', const='all', default=None, choices=['all', 'kunit']) parser.add_argument('--json', nargs='?', - help='Stores test results in a JSON, and either ' - 'prints to stdout or saves to file if a ' - 'filename is specified', + help='Prints parsed test results as JSON to stdout or a file if ' + 'a filename is specified. Does nothing if --raw_output is set.', type=str, const='stdout', default=None, metavar='FILE') -- cgit v1.2.3 From 434498a6bee3db729dbdb7f131f3506f4dca85e8 Mon Sep 17 00:00:00 2001 From: Rae Moar Date: Wed, 23 Nov 2022 18:25:57 +0000 Subject: kunit: tool: parse KTAP compliant test output Change the KUnit parser to be able to parse test output that complies with the KTAP version 1 specification format found here: https://kernel.org/doc/html/latest/dev-tools/ktap.html. Ensure the parser is able to parse tests with the original KUnit test output format as well. KUnit parser now accepts any of the following test output formats: Original KUnit test output format: TAP version 14 1..1 # Subtest: kunit-test-suite 1..3 ok 1 - kunit_test_1 ok 2 - kunit_test_2 ok 3 - kunit_test_3 # kunit-test-suite: pass:3 fail:0 skip:0 total:3 # Totals: pass:3 fail:0 skip:0 total:3 ok 1 - kunit-test-suite KTAP version 1 test output format: KTAP version 1 1..1 KTAP version 1 1..3 ok 1 kunit_test_1 ok 2 kunit_test_2 ok 3 kunit_test_3 ok 1 kunit-test-suite New KUnit test output format (changes made in the next patch of this series): KTAP version 1 1..1 KTAP version 1 # Subtest: kunit-test-suite 1..3 ok 1 kunit_test_1 ok 2 kunit_test_2 ok 3 kunit_test_3 # kunit-test-suite: pass:3 fail:0 skip:0 total:3 # Totals: pass:3 fail:0 skip:0 total:3 ok 1 kunit-test-suite Signed-off-by: Rae Moar Reviewed-by: Daniel Latypov Reviewed-by: David Gow Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit_parser.py | 79 ++++++++++++++-------- tools/testing/kunit/kunit_tool_test.py | 14 ++++ .../kunit/test_data/test_parse_ktap_output.log | 8 +++ .../kunit/test_data/test_parse_subtest_header.log | 7 ++ 4 files changed, 80 insertions(+), 28 deletions(-) create mode 100644 tools/testing/kunit/test_data/test_parse_ktap_output.log create mode 100644 tools/testing/kunit/test_data/test_parse_subtest_header.log (limited to 'tools/testing') diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index d0ed5dd5cfc4..4cc2f8b7ecd0 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -441,6 +441,7 @@ def parse_diagnostic(lines: LineStream) -> List[str]: - '# Subtest: [test name]' - '[ok|not ok] [test number] [-] [test name] [optional skip directive]' + - 'KTAP version [version number]' Parameters: lines - LineStream of KTAP output to parse @@ -449,8 +450,9 @@ def parse_diagnostic(lines: LineStream) -> List[str]: Log of diagnostic lines """ log = [] # type: List[str] - while lines and not TEST_RESULT.match(lines.peek()) and not \ - TEST_HEADER.match(lines.peek()): + non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START] + while lines and not any(re.match(lines.peek()) + for re in non_diagnostic_lines): log.append(lines.pop()) return log @@ -496,11 +498,15 @@ def print_test_header(test: Test) -> None: test - Test object representing current test being printed """ message = test.name + if message != "": + # Add a leading space before the subtest counts only if a test name + # is provided using a "# Subtest" header line. + message += " " if test.expected_count: if test.expected_count == 1: - message += ' (1 subtest)' + message += '(1 subtest)' else: - message += f' ({test.expected_count} subtests)' + message += f'({test.expected_count} subtests)' stdout.print_with_timestamp(format_test_divider(message, len(message))) def print_log(log: Iterable[str]) -> None: @@ -647,7 +653,7 @@ def bubble_up_test_results(test: Test) -> None: elif test.counts.get_status() == TestStatus.TEST_CRASHED: test.status = TestStatus.TEST_CRASHED -def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: +def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest: bool) -> Test: """ Finds next test to parse in LineStream, creates new Test object, parses any subtests of the test, populates Test object with all @@ -665,15 +671,32 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: 1..4 [subtests] - - Subtest header line + - Subtest header (must include either the KTAP version line or + "# Subtest" header line) - Example: + Example (preferred format with both KTAP version line and + "# Subtest" line): + + KTAP version 1 + # Subtest: name + 1..3 + [subtests] + ok 1 name + + Example (only "# Subtest" line): # Subtest: name 1..3 [subtests] ok 1 name + Example (only KTAP version line, compliant with KTAP v1 spec): + + KTAP version 1 + 1..3 + [subtests] + ok 1 name + - Test result line Example: @@ -685,28 +708,29 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: expected_num - expected test number for test to be parsed log - list of strings containing any preceding diagnostic lines corresponding to the current test + is_subtest - boolean indicating whether test is a subtest Return: Test object populated with characteristics and any subtests """ test = Test() test.log.extend(log) - parent_test = False - main = parse_ktap_header(lines, test) - if main: - # If KTAP/TAP header is found, attempt to parse + if not is_subtest: + # If parsing the main/top-level test, parse KTAP version line and # test plan test.name = "main" + ktap_line = parse_ktap_header(lines, test) parse_test_plan(lines, test) parent_test = True else: - # If KTAP/TAP header is not found, test must be subtest - # header or test result line so parse attempt to parser - # subtest header - parent_test = parse_test_header(lines, test) + # If not the main test, attempt to parse a test header containing + # the KTAP version line and/or subtest header line + ktap_line = parse_ktap_header(lines, test) + subtest_line = parse_test_header(lines, test) + parent_test = (ktap_line or subtest_line) if parent_test: - # If subtest header is found, attempt to parse - # test plan and print header + # If KTAP version line and/or subtest header is found, attempt + # to parse test plan and print test header parse_test_plan(lines, test) print_test_header(test) expected_count = test.expected_count @@ -721,7 +745,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: sub_log = parse_diagnostic(lines) sub_test = Test() if not lines or (peek_test_name_match(lines, test) and - not main): + is_subtest): if expected_count and test_num <= expected_count: # If parser reaches end of test before # parsing expected number of subtests, print @@ -735,20 +759,19 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: test.log.extend(sub_log) break else: - sub_test = parse_test(lines, test_num, sub_log) + sub_test = parse_test(lines, test_num, sub_log, True) subtests.append(sub_test) test_num += 1 test.subtests = subtests - if not main: + if is_subtest: # If not main test, look for test result line test.log.extend(parse_diagnostic(lines)) - if (parent_test and peek_test_name_match(lines, test)) or \ - not parent_test: - parse_test_result(lines, test, expected_num) - else: + if test.name != "" and not peek_test_name_match(lines, test): test.add_error('missing subtest result line!') + else: + parse_test_result(lines, test, expected_num) - # Check for there being no tests + # Check for there being no subtests within parent test if parent_test and len(subtests) == 0: # Don't override a bad status if this test had one reported. # Assumption: no subtests means CRASHED is from Test.__init__() @@ -758,11 +781,11 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: # Add statuses to TestCounts attribute in Test object bubble_up_test_results(test) - if parent_test and not main: + if parent_test and is_subtest: # If test has subtests and is not the main test object, print # footer. print_test_footer(test) - elif not main: + elif is_subtest: print_test_result(test) return test @@ -785,7 +808,7 @@ def parse_run_tests(kernel_output: Iterable[str]) -> Test: test.add_error('Could not find any KTAP output. Did any KUnit tests run?') test.status = TestStatus.FAILURE_TO_PARSE_TESTS else: - test = parse_test(lines, 0, []) + test = parse_test(lines, 0, [], False) if test.status != TestStatus.NO_TESTS: test.status = test.counts.get_status() stdout.print_with_timestamp(DIVIDER) diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 84a08cf07242..d7f669cbf2a8 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -312,6 +312,20 @@ class KUnitParserTest(unittest.TestCase): self.assertEqual(kunit_parser._summarize_failed_tests(result), 'Failures: all_failed_suite, some_failed_suite.test2') + def test_ktap_format(self): + ktap_log = test_data_path('test_parse_ktap_output.log') + with open(ktap_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=3)) + self.assertEqual('suite', result.subtests[0].name) + self.assertEqual('case_1', result.subtests[0].subtests[0].name) + self.assertEqual('case_2', result.subtests[0].subtests[1].name) + + def test_parse_subtest_header(self): + ktap_log = test_data_path('test_parse_subtest_header.log') + with open(ktap_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.print_mock.assert_any_call(StrContains('suite (1 subtest)')) def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream: return kunit_parser.LineStream(enumerate(strs, start=1)) diff --git a/tools/testing/kunit/test_data/test_parse_ktap_output.log b/tools/testing/kunit/test_data/test_parse_ktap_output.log new file mode 100644 index 000000000000..ccdf244e5303 --- /dev/null +++ b/tools/testing/kunit/test_data/test_parse_ktap_output.log @@ -0,0 +1,8 @@ +KTAP version 1 +1..1 + KTAP version 1 + 1..3 + ok 1 case_1 + ok 2 case_2 + ok 3 case_3 +ok 1 suite diff --git a/tools/testing/kunit/test_data/test_parse_subtest_header.log b/tools/testing/kunit/test_data/test_parse_subtest_header.log new file mode 100644 index 000000000000..216631092e7b --- /dev/null +++ b/tools/testing/kunit/test_data/test_parse_subtest_header.log @@ -0,0 +1,7 @@ +KTAP version 1 +1..1 + KTAP version 1 + # Subtest: suite + 1..1 + ok 1 test +ok 1 suite \ No newline at end of file -- cgit v1.2.3 From 5937e0c04afc7d4b7b737fda93316ba4b74183c0 Mon Sep 17 00:00:00 2001 From: Daniel Latypov Date: Mon, 28 Nov 2022 16:12:34 -0800 Subject: kunit: tool: don't include KTAP headers and the like in the test log We print the "test log" on failure. This is meant to be all the kernel output that happened during the test. But we also include the special KTAP lines in it, which are often redundant. E.g. we include the "not ok" line in the log, right before we print that the test case failed... [13:51:48] Expected 2 + 1 == 2, but [13:51:48] 2 + 1 == 3 (0x3) [13:51:48] not ok 1 example_simple_test [13:51:48] [FAILED] example_simple_test More full example after this patch: [13:51:48] =================== example (4 subtests) =================== [13:51:48] # example_simple_test: initializing [13:51:48] # example_simple_test: EXPECTATION FAILED at lib/kunit/kunit-example-test.c:29 [13:51:48] Expected 2 + 1 == 2, but [13:51:48] 2 + 1 == 3 (0x3) [13:51:48] [FAILED] example_simple_test Signed-off-by: Daniel Latypov Reviewed-by: David Gow Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit_parser.py | 8 ++++---- tools/testing/kunit/kunit_tool_test.py | 17 +++++++++++++++++ 2 files changed, 21 insertions(+), 4 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 4cc2f8b7ecd0..99b8f058db40 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -295,7 +295,7 @@ def parse_ktap_header(lines: LineStream, test: Test) -> bool: check_version(version_num, TAP_VERSIONS, 'TAP', test) else: return False - test.log.append(lines.pop()) + lines.pop() return True TEST_HEADER = re.compile(r'^# Subtest: (.*)$') @@ -318,8 +318,8 @@ def parse_test_header(lines: LineStream, test: Test) -> bool: match = TEST_HEADER.match(lines.peek()) if not match: return False - test.log.append(lines.pop()) test.name = match.group(1) + lines.pop() return True TEST_PLAN = re.compile(r'1\.\.([0-9]+)') @@ -345,9 +345,9 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool: if not match: test.expected_count = None return False - test.log.append(lines.pop()) expected_count = int(match.group(1)) test.expected_count = expected_count + lines.pop() return True TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') @@ -409,7 +409,7 @@ def parse_test_result(lines: LineStream, test: Test, # Check if line matches test result line format if not match: return False - test.log.append(lines.pop()) + lines.pop() # Set name of test object if skip_match: diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index d7f669cbf2a8..1ef921ac4331 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -84,6 +84,10 @@ class KUnitParserTest(unittest.TestCase): self.print_mock = mock.patch('kunit_printer.Printer.print').start() self.addCleanup(mock.patch.stopall) + def noPrintCallContains(self, substr: str): + for call in self.print_mock.mock_calls: + self.assertNotIn(substr, call.args[0]) + def assertContains(self, needle: str, haystack: kunit_parser.LineStream): # Clone the iterator so we can print the contents on failure. copy, backup = itertools.tee(haystack) @@ -327,6 +331,19 @@ class KUnitParserTest(unittest.TestCase): result = kunit_parser.parse_run_tests(file.readlines()) self.print_mock.assert_any_call(StrContains('suite (1 subtest)')) + def test_show_test_output_on_failure(self): + output = """ + KTAP version 1 + 1..1 + Test output. + not ok 1 test1 + """ + result = kunit_parser.parse_run_tests(output.splitlines()) + self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) + + self.print_mock.assert_any_call(StrContains('Test output.')) + self.noPrintCallContains('not ok 1 test1') + def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream: return kunit_parser.LineStream(enumerate(strs, start=1)) -- cgit v1.2.3 From c2bb92bc4ea13842fdd27819c0d5b48df2b86ea5 Mon Sep 17 00:00:00 2001 From: Daniel Latypov Date: Wed, 30 Nov 2022 10:54:19 -0800 Subject: kunit: tool: make parser preserve whitespace when printing test log Currently, kunit_parser.py is stripping all leading whitespace to make parsing easier. But this means we can't accurately show kernel output for failing tests or when the kernel crashes. Embarassingly, this affects even KUnit's own output, e.g. [13:40:46] Expected 2 + 1 == 2, but [13:40:46] 2 + 1 == 3 (0x3) [13:40:46] not ok 1 example_simple_test [13:40:46] [FAILED] example_simple_test After this change, here's what the output in context would look like [13:40:46] =================== example (4 subtests) =================== [13:40:46] # example_simple_test: initializing [13:40:46] # example_simple_test: EXPECTATION FAILED at lib/kunit/kunit-example-test.c:29 [13:40:46] Expected 2 + 1 == 2, but [13:40:46] 2 + 1 == 3 (0x3) [13:40:46] [FAILED] example_simple_test [13:40:46] [SKIPPED] example_skip_test [13:40:46] [SKIPPED] example_mark_skipped_test [13:40:46] [PASSED] example_all_expect_macros_test [13:40:46] # example: initializing suite [13:40:46] # example: pass:1 fail:1 skip:2 total:4 [13:40:46] # Totals: pass:1 fail:1 skip:2 total:4 [13:40:46] ===================== [FAILED] example ===================== This example shows one minor cosmetic defect this approach has. The test counts lines prevent us from dedenting the suite-level output. But at the same time, any form of non-KUnit output would do the same unless it happened to be indented as well. Signed-off-by: Daniel Latypov Reviewed-by: David Gow Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit.py | 2 +- tools/testing/kunit/kunit_parser.py | 27 +++++++++++++-------------- tools/testing/kunit/kunit_tool_test.py | 2 ++ 3 files changed, 16 insertions(+), 15 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py index e7b6549712d6..43fbe96318fe 100755 --- a/tools/testing/kunit/kunit.py +++ b/tools/testing/kunit/kunit.py @@ -202,7 +202,7 @@ def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input if request.raw_output == 'all': pass elif request.raw_output == 'kunit': - output = kunit_parser.extract_tap_lines(output, lstrip=False) + output = kunit_parser.extract_tap_lines(output) for line in output: print(line.rstrip()) parse_time = time.time() - parse_start diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 99b8f058db40..a225799f6b1b 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -13,6 +13,7 @@ from __future__ import annotations from dataclasses import dataclass import re import sys +import textwrap from enum import Enum, auto from typing import Iterable, Iterator, List, Optional, Tuple @@ -208,12 +209,12 @@ class LineStream: # Parsing helper methods: -KTAP_START = re.compile(r'KTAP version ([0-9]+)$') -TAP_START = re.compile(r'TAP version ([0-9]+)$') -KTAP_END = re.compile('(List of all partitions:|' +KTAP_START = re.compile(r'\s*KTAP version ([0-9]+)$') +TAP_START = re.compile(r'\s*TAP version ([0-9]+)$') +KTAP_END = re.compile(r'\s*(List of all partitions:|' 'Kernel panic - not syncing: VFS:|reboot: System halted)') -def extract_tap_lines(kernel_output: Iterable[str], lstrip=True) -> LineStream: +def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream: """Extracts KTAP lines from the kernel output.""" def isolate_ktap_output(kernel_output: Iterable[str]) \ -> Iterator[Tuple[int, str]]: @@ -239,11 +240,8 @@ def extract_tap_lines(kernel_output: Iterable[str], lstrip=True) -> LineStream: # stop extracting KTAP lines break elif started: - # remove the prefix and optionally any leading - # whitespace. Our parsing logic relies on this. + # remove the prefix, if any. line = line[prefix_len:] - if lstrip: - line = line.lstrip() yield line_num, line return LineStream(lines=isolate_ktap_output(kernel_output)) @@ -298,7 +296,7 @@ def parse_ktap_header(lines: LineStream, test: Test) -> bool: lines.pop() return True -TEST_HEADER = re.compile(r'^# Subtest: (.*)$') +TEST_HEADER = re.compile(r'^\s*# Subtest: (.*)$') def parse_test_header(lines: LineStream, test: Test) -> bool: """ @@ -322,7 +320,7 @@ def parse_test_header(lines: LineStream, test: Test) -> bool: lines.pop() return True -TEST_PLAN = re.compile(r'1\.\.([0-9]+)') +TEST_PLAN = re.compile(r'^\s*1\.\.([0-9]+)') def parse_test_plan(lines: LineStream, test: Test) -> bool: """ @@ -350,9 +348,9 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool: lines.pop() return True -TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') +TEST_RESULT = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') -TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$') +TEST_RESULT_SKIP = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$') def peek_test_name_match(lines: LineStream, test: Test) -> bool: """ @@ -511,8 +509,9 @@ def print_test_header(test: Test) -> None: def print_log(log: Iterable[str]) -> None: """Prints all strings in saved log for test in yellow.""" - for m in log: - stdout.print_with_timestamp(stdout.yellow(m)) + formatted = textwrap.dedent('\n'.join(log)) + for line in formatted.splitlines(): + stdout.print_with_timestamp(stdout.yellow(line)) def format_test_result(test: Test) -> str: """ diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 1ef921ac4331..0c2190514103 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -336,12 +336,14 @@ class KUnitParserTest(unittest.TestCase): KTAP version 1 1..1 Test output. + Indented more. not ok 1 test1 """ result = kunit_parser.parse_run_tests(output.splitlines()) self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) self.print_mock.assert_any_call(StrContains('Test output.')) + self.print_mock.assert_any_call(StrContains(' Indented more.')) self.noPrintCallContains('not ok 1 test1') def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream: -- cgit v1.2.3