Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kunit: tool: improve compatibility of kunit_parser with KTAP specification

Update to kunit_parser to improve compatibility with KTAP
specification including arbitrarily nested tests. Patch accomplishes
three major changes:

- Use a general Test object to represent all tests rather than TestCase
and TestSuite objects. This allows for easier implementation of arbitrary
levels of nested tests and promotes the idea that both test suites and test
cases are tests.

- Print errors incrementally rather than all at once after the
parsing finishes to maximize information given to the user in the
case of the parser given invalid input and to increase the helpfulness
of the timestamps given during printing. Note that kunit.py parse does
not print incrementally yet. However, this fix brings us closer to
this feature.

- Increase compatibility for different formats of input. Arbitrary levels
of nested tests supported. Also, test cases and test suites are now
supported to be present on the same level of testing.

This patch now implements the draft KTAP specification here:
https://lore.kernel.org/linux-kselftest/CA+GJov6tdjvY9x12JsJT14qn6c7NViJxqaJk+r-K1YJzPggFDQ@mail.gmail.com/
We'll update the parser as the spec evolves.

This patch adjusts the kunit_tool_test.py file to check for
the correct outputs from the new parser and adds a new test to check
the parsing for a KTAP result log with correct format for multiple nested
subtests (test_is_test_passed-all_passed_nested.log).

This patch also alters the kunit_json.py file to allow for arbitrarily
nested tests.

Signed-off-by: Rae Moar <rmoar@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>

authored by

Rae Moar and committed by
Shuah Khan
d65d07cb 7d7c48df

+940 -385
+15 -4
tools/testing/kunit/kunit.py
··· 135 135 test_glob = request.filter_glob.split('.', maxsplit=2)[1] 136 136 filter_globs = [g + '.'+ test_glob for g in filter_globs] 137 137 138 - overall_status = kunit_parser.TestStatus.SUCCESS 138 + test_counts = kunit_parser.TestCounts() 139 139 exec_time = 0.0 140 140 for i, filter_glob in enumerate(filter_globs): 141 141 kunit_parser.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs))) ··· 154 154 test_end = time.time() 155 155 exec_time += test_end - test_start 156 156 157 - overall_status = kunit_parser.max_status(overall_status, result.status) 157 + test_counts.add_subtest_counts(result.result.test.counts) 158 158 159 - return KunitResult(status=result.status, result=result.result, elapsed_time=exec_time) 159 + kunit_status = _map_to_overall_status(test_counts.get_status()) 160 + return KunitResult(status=kunit_status, result=result.result, elapsed_time=exec_time) 161 + 162 + def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus: 163 + if test_status in (kunit_parser.TestStatus.SUCCESS, kunit_parser.TestStatus.SKIPPED): 164 + return KunitStatus.SUCCESS 165 + else: 166 + return KunitStatus.TEST_FAILURE 160 167 161 168 def parse_tests(request: KunitParseRequest, input_data: Iterable[str]) -> KunitResult: 162 169 parse_start = time.time() 163 170 164 171 test_result = kunit_parser.TestResult(kunit_parser.TestStatus.SUCCESS, 165 - [], 172 + kunit_parser.Test(), 166 173 'Tests not Parsed.') 167 174 168 175 if request.raw_output: 176 + # Treat unparsed results as one passing test. 177 + test_result.test.status = kunit_parser.TestStatus.SUCCESS 178 + test_result.test.counts.passed = 1 179 + 169 180 output: Iterable[str] = input_data 170 181 if request.raw_output == 'all': 171 182 pass
+26 -26
tools/testing/kunit/kunit_json.py
··· 11 11 12 12 import kunit_parser 13 13 14 - from kunit_parser import TestStatus 14 + from kunit_parser import Test, TestResult, TestStatus 15 + from typing import Any, Dict, Optional 15 16 16 - def get_json_result(test_result, def_config, build_dir, json_path) -> str: 17 - sub_groups = [] 17 + JsonObj = Dict[str, Any] 18 18 19 - # Each test suite is mapped to a KernelCI sub_group 20 - for test_suite in test_result.suites: 21 - sub_group = { 22 - "name": test_suite.name, 23 - "arch": "UM", 24 - "defconfig": def_config, 25 - "build_environment": build_dir, 26 - "test_cases": [], 27 - "lab_name": None, 28 - "kernel": None, 29 - "job": None, 30 - "git_branch": "kselftest", 31 - } 32 - test_cases = [] 33 - # TODO: Add attachments attribute in test_case with detailed 34 - # failure message, see https://api.kernelci.org/schema-test-case.html#get 35 - for case in test_suite.cases: 36 - test_case = {"name": case.name, "status": "FAIL"} 37 - if case.status == TestStatus.SUCCESS: 19 + def _get_group_json(test: Test, def_config: str, 20 + build_dir: Optional[str]) -> JsonObj: 21 + sub_groups = [] # List[JsonObj] 22 + test_cases = [] # List[JsonObj] 23 + 24 + for subtest in test.subtests: 25 + if len(subtest.subtests): 26 + sub_group = _get_group_json(subtest, def_config, 27 + build_dir) 28 + sub_groups.append(sub_group) 29 + else: 30 + test_case = {"name": subtest.name, "status": "FAIL"} 31 + if subtest.status == TestStatus.SUCCESS: 38 32 test_case["status"] = "PASS" 39 - elif case.status == TestStatus.TEST_CRASHED: 33 + elif subtest.status == TestStatus.TEST_CRASHED: 40 34 test_case["status"] = "ERROR" 41 35 test_cases.append(test_case) 42 - sub_group["test_cases"] = test_cases 43 - sub_groups.append(sub_group) 36 + 44 37 test_group = { 45 - "name": "KUnit Test Group", 38 + "name": test.name, 46 39 "arch": "UM", 47 40 "defconfig": def_config, 48 41 "build_environment": build_dir, 49 42 "sub_groups": sub_groups, 43 + "test_cases": test_cases, 50 44 "lab_name": None, 51 45 "kernel": None, 52 46 "job": None, 53 47 "git_branch": "kselftest", 54 48 } 49 + return test_group 50 + 51 + def get_json_result(test_result: TestResult, def_config: str, 52 + build_dir: Optional[str], json_path: str) -> str: 53 + test_group = _get_group_json(test_result.test, def_config, build_dir) 54 + test_group["name"] = "KUnit Test Group" 55 55 json_obj = json.dumps(test_group, indent=4) 56 56 if json_path != 'stdout': 57 57 with open(json_path, 'w') as result_path:
+706 -317
tools/testing/kunit/kunit_parser.py
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 # 3 - # Parses test results from a kernel dmesg log. 3 + # Parses KTAP test results from a kernel dmesg log and incrementally prints 4 + # results with reader-friendly format. Stores and returns test results in a 5 + # Test object. 4 6 # 5 7 # Copyright (C) 2019, Google LLC. 6 8 # Author: Felix Guo <felixguoxiuping@gmail.com> 7 9 # Author: Brendan Higgins <brendanhiggins@google.com> 10 + # Author: Rae Moar <rmoar@google.com> 8 11 12 + from __future__ import annotations 9 13 import re 10 14 11 15 from collections import namedtuple ··· 18 14 from functools import reduce 19 15 from typing import Iterable, Iterator, List, Optional, Tuple 20 16 21 - TestResult = namedtuple('TestResult', ['status','suites','log']) 17 + TestResult = namedtuple('TestResult', ['status','test','log']) 22 18 23 - class TestSuite(object): 19 + class Test(object): 20 + """ 21 + A class to represent a test parsed from KTAP results. All KTAP 22 + results within a test log are stored in a main Test object as 23 + subtests. 24 + 25 + Attributes: 26 + status : TestStatus - status of the test 27 + name : str - name of the test 28 + expected_count : int - expected number of subtests (0 if single 29 + test case and None if unknown expected number of subtests) 30 + subtests : List[Test] - list of subtests 31 + log : List[str] - log of KTAP lines that correspond to the test 32 + counts : TestCounts - counts of the test statuses and errors of 33 + subtests or of the test itself if the test is a single 34 + test case. 35 + """ 24 36 def __init__(self) -> None: 25 - self.status = TestStatus.SUCCESS 37 + """Creates Test object with default attributes.""" 38 + self.status = TestStatus.TEST_CRASHED 26 39 self.name = '' 27 - self.cases = [] # type: List[TestCase] 28 - 29 - def __str__(self) -> str: 30 - return 'TestSuite(' + str(self.status) + ',' + self.name + ',' + str(self.cases) + ')' 31 - 32 - def __repr__(self) -> str: 33 - return str(self) 34 - 35 - class TestCase(object): 36 - def __init__(self) -> None: 37 - self.status = TestStatus.SUCCESS 38 - self.name = '' 40 + self.expected_count = 0 # type: Optional[int] 41 + self.subtests = [] # type: List[Test] 39 42 self.log = [] # type: List[str] 43 + self.counts = TestCounts() 40 44 41 45 def __str__(self) -> str: 42 - return 'TestCase(' + str(self.status) + ',' + self.name + ',' + str(self.log) + ')' 46 + """Returns string representation of a Test class object.""" 47 + return ('Test(' + str(self.status) + ', ' + self.name + 48 + ', ' + str(self.expected_count) + ', ' + 49 + str(self.subtests) + ', ' + str(self.log) + ', ' + 50 + str(self.counts) + ')') 43 51 44 52 def __repr__(self) -> str: 53 + """Returns string representation of a Test class object.""" 45 54 return str(self) 55 + 56 + def add_error(self, error_message: str) -> None: 57 + """Records an error that occurred while parsing this test.""" 58 + self.counts.errors += 1 59 + print_error('Test ' + self.name + ': ' + error_message) 46 60 47 61 class TestStatus(Enum): 62 + """An enumeration class to represent the status of a test.""" 48 63 SUCCESS = auto() 49 64 FAILURE = auto() 50 65 SKIPPED = auto() ··· 71 48 NO_TESTS = auto() 72 49 FAILURE_TO_PARSE_TESTS = auto() 73 50 51 + class TestCounts: 52 + """ 53 + Tracks the counts of statuses of all test cases and any errors within 54 + a Test. 55 + 56 + Attributes: 57 + passed : int - the number of tests that have passed 58 + failed : int - the number of tests that have failed 59 + crashed : int - the number of tests that have crashed 60 + skipped : int - the number of tests that have skipped 61 + errors : int - the number of errors in the test and subtests 62 + """ 63 + def __init__(self): 64 + """Creates TestCounts object with counts of all test 65 + statuses and test errors set to 0. 66 + """ 67 + self.passed = 0 68 + self.failed = 0 69 + self.crashed = 0 70 + self.skipped = 0 71 + self.errors = 0 72 + 73 + def __str__(self) -> str: 74 + """Returns the string representation of a TestCounts object. 75 + """ 76 + return ('Passed: ' + str(self.passed) + 77 + ', Failed: ' + str(self.failed) + 78 + ', Crashed: ' + str(self.crashed) + 79 + ', Skipped: ' + str(self.skipped) + 80 + ', Errors: ' + str(self.errors)) 81 + 82 + def total(self) -> int: 83 + """Returns the total number of test cases within a test 84 + object, where a test case is a test with no subtests. 85 + """ 86 + return (self.passed + self.failed + self.crashed + 87 + self.skipped) 88 + 89 + def add_subtest_counts(self, counts: TestCounts) -> None: 90 + """ 91 + Adds the counts of another TestCounts object to the current 92 + TestCounts object. Used to add the counts of a subtest to the 93 + parent test. 94 + 95 + Parameters: 96 + counts - a different TestCounts object whose counts 97 + will be added to the counts of the TestCounts object 98 + """ 99 + self.passed += counts.passed 100 + self.failed += counts.failed 101 + self.crashed += counts.crashed 102 + self.skipped += counts.skipped 103 + self.errors += counts.errors 104 + 105 + def get_status(self) -> TestStatus: 106 + """Returns the aggregated status of a Test using test 107 + counts. 108 + """ 109 + if self.total() == 0: 110 + return TestStatus.NO_TESTS 111 + elif self.crashed: 112 + # If one of the subtests crash, the expected status 113 + # of the Test is crashed. 114 + return TestStatus.TEST_CRASHED 115 + elif self.failed: 116 + # Otherwise if one of the subtests fail, the 117 + # expected status of the Test is failed. 118 + return TestStatus.FAILURE 119 + elif self.passed: 120 + # Otherwise if one of the subtests pass, the 121 + # expected status of the Test is passed. 122 + return TestStatus.SUCCESS 123 + else: 124 + # Finally, if none of the subtests have failed, 125 + # crashed, or passed, the expected status of the 126 + # Test is skipped. 127 + return TestStatus.SKIPPED 128 + 129 + def add_status(self, status: TestStatus) -> None: 130 + """ 131 + Increments count of inputted status. 132 + 133 + Parameters: 134 + status - status to be added to the TestCounts object 135 + """ 136 + if status == TestStatus.SUCCESS: 137 + self.passed += 1 138 + elif status == TestStatus.FAILURE: 139 + self.failed += 1 140 + elif status == TestStatus.SKIPPED: 141 + self.skipped += 1 142 + elif status != TestStatus.NO_TESTS: 143 + self.crashed += 1 144 + 74 145 class LineStream: 75 - """Provides a peek()/pop() interface over an iterator of (line#, text).""" 146 + """ 147 + A class to represent the lines of kernel output. 148 + Provides a peek()/pop() interface over an iterator of 149 + (line#, text). 150 + """ 76 151 _lines: Iterator[Tuple[int, str]] 77 152 _next: Tuple[int, str] 78 153 _done: bool 79 154 80 155 def __init__(self, lines: Iterator[Tuple[int, str]]): 156 + """Creates a new LineStream that wraps the given iterator.""" 81 157 self._lines = lines 82 158 self._done = False 83 159 self._next = (0, '') 84 160 self._get_next() 85 161 86 162 def _get_next(self) -> None: 163 + """Advances the LineSteam to the next line.""" 87 164 try: 88 165 self._next = next(self._lines) 89 166 except StopIteration: 90 167 self._done = True 91 168 92 169 def peek(self) -> str: 170 + """Returns the current line, without advancing the LineStream. 171 + """ 93 172 return self._next[1] 94 173 95 174 def pop(self) -> str: 175 + """Returns the current line and advances the LineStream to 176 + the next line. 177 + """ 96 178 n = self._next 97 179 self._get_next() 98 180 return n[1] 99 181 100 182 def __bool__(self) -> bool: 183 + """Returns True if stream has more lines.""" 101 184 return not self._done 102 185 103 186 # Only used by kunit_tool_test.py. 104 187 def __iter__(self) -> Iterator[str]: 188 + """Empties all lines stored in LineStream object into 189 + Iterator object and returns the Iterator object. 190 + """ 105 191 while bool(self): 106 192 yield self.pop() 107 193 108 194 def line_number(self) -> int: 195 + """Returns the line number of the current line.""" 109 196 return self._next[0] 110 197 111 - kunit_start_re = re.compile(r'TAP version [0-9]+$') 112 - kunit_end_re = re.compile('(List of all partitions:|' 113 - 'Kernel panic - not syncing: VFS:|reboot: System halted)') 198 + # Parsing helper methods: 199 + 200 + KTAP_START = re.compile(r'KTAP version ([0-9]+)$') 201 + TAP_START = re.compile(r'TAP version ([0-9]+)$') 202 + KTAP_END = re.compile('(List of all partitions:|' 203 + 'Kernel panic - not syncing: VFS:|reboot: System halted)') 114 204 115 205 def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream: 116 - def isolate_kunit_output(kernel_output: Iterable[str]) -> Iterator[Tuple[int, str]]: 206 + """Extracts KTAP lines from the kernel output.""" 207 + def isolate_ktap_output(kernel_output: Iterable[str]) \ 208 + -> Iterator[Tuple[int, str]]: 117 209 line_num = 0 118 210 started = False 119 211 for line in kernel_output: 120 212 line_num += 1 121 - line = line.rstrip() # line always has a trailing \n 122 - if kunit_start_re.search(line): 213 + line = line.rstrip() # remove trailing \n 214 + if not started and KTAP_START.search(line): 215 + # start extracting KTAP lines and set prefix 216 + # to number of characters before version line 217 + prefix_len = len( 218 + line.split('KTAP version')[0]) 219 + started = True 220 + yield line_num, line[prefix_len:] 221 + elif not started and TAP_START.search(line): 222 + # start extracting KTAP lines and set prefix 223 + # to number of characters before version line 123 224 prefix_len = len(line.split('TAP version')[0]) 124 225 started = True 125 226 yield line_num, line[prefix_len:] 126 - elif kunit_end_re.search(line): 227 + elif started and KTAP_END.search(line): 228 + # stop extracting KTAP lines 127 229 break 128 230 elif started: 129 - yield line_num, line[prefix_len:] 130 - return LineStream(lines=isolate_kunit_output(kernel_output)) 231 + # remove prefix and any indention and yield 232 + # line with line number 233 + line = line[prefix_len:].lstrip() 234 + yield line_num, line 235 + return LineStream(lines=isolate_ktap_output(kernel_output)) 236 + 237 + KTAP_VERSIONS = [1] 238 + TAP_VERSIONS = [13, 14] 239 + 240 + def check_version(version_num: int, accepted_versions: List[int], 241 + version_type: str, test: Test) -> None: 242 + """ 243 + Adds error to test object if version number is too high or too 244 + low. 245 + 246 + Parameters: 247 + version_num - The inputted version number from the parsed KTAP or TAP 248 + header line 249 + accepted_version - List of accepted KTAP or TAP versions 250 + version_type - 'KTAP' or 'TAP' depending on the type of 251 + version line. 252 + test - Test object for current test being parsed 253 + """ 254 + if version_num < min(accepted_versions): 255 + test.add_error(version_type + 256 + ' version lower than expected!') 257 + elif version_num > max(accepted_versions): 258 + test.add_error( 259 + version_type + ' version higher than expected!') 260 + 261 + def parse_ktap_header(lines: LineStream, test: Test) -> bool: 262 + """ 263 + Parses KTAP/TAP header line and checks version number. 264 + Returns False if fails to parse KTAP/TAP header line. 265 + 266 + Accepted formats: 267 + - 'KTAP version [version number]' 268 + - 'TAP version [version number]' 269 + 270 + Parameters: 271 + lines - LineStream of KTAP output to parse 272 + test - Test object for current test being parsed 273 + 274 + Return: 275 + True if successfully parsed KTAP/TAP header line 276 + """ 277 + ktap_match = KTAP_START.match(lines.peek()) 278 + tap_match = TAP_START.match(lines.peek()) 279 + if ktap_match: 280 + version_num = int(ktap_match.group(1)) 281 + check_version(version_num, KTAP_VERSIONS, 'KTAP', test) 282 + elif tap_match: 283 + version_num = int(tap_match.group(1)) 284 + check_version(version_num, TAP_VERSIONS, 'TAP', test) 285 + else: 286 + return False 287 + test.log.append(lines.pop()) 288 + return True 289 + 290 + TEST_HEADER = re.compile(r'^# Subtest: (.*)$') 291 + 292 + def parse_test_header(lines: LineStream, test: Test) -> bool: 293 + """ 294 + Parses test header and stores test name in test object. 295 + Returns False if fails to parse test header line. 296 + 297 + Accepted format: 298 + - '# Subtest: [test name]' 299 + 300 + Parameters: 301 + lines - LineStream of KTAP output to parse 302 + test - Test object for current test being parsed 303 + 304 + Return: 305 + True if successfully parsed test header line 306 + """ 307 + match = TEST_HEADER.match(lines.peek()) 308 + if not match: 309 + return False 310 + test.log.append(lines.pop()) 311 + test.name = match.group(1) 312 + return True 313 + 314 + TEST_PLAN = re.compile(r'1\.\.([0-9]+)') 315 + 316 + def parse_test_plan(lines: LineStream, test: Test) -> bool: 317 + """ 318 + Parses test plan line and stores the expected number of subtests in 319 + test object. Reports an error if expected count is 0. 320 + Returns False and reports missing test plan error if fails to parse 321 + test plan. 322 + 323 + Accepted format: 324 + - '1..[number of subtests]' 325 + 326 + Parameters: 327 + lines - LineStream of KTAP output to parse 328 + test - Test object for current test being parsed 329 + 330 + Return: 331 + True if successfully parsed test plan line 332 + """ 333 + match = TEST_PLAN.match(lines.peek()) 334 + if not match: 335 + test.expected_count = None 336 + test.add_error('missing plan line!') 337 + return False 338 + test.log.append(lines.pop()) 339 + expected_count = int(match.group(1)) 340 + test.expected_count = expected_count 341 + if expected_count == 0: 342 + test.status = TestStatus.NO_TESTS 343 + test.add_error('0 tests run!') 344 + return True 345 + 346 + TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') 347 + 348 + TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$') 349 + 350 + def peek_test_name_match(lines: LineStream, test: Test) -> bool: 351 + """ 352 + Matches current line with the format of a test result line and checks 353 + if the name matches the name of the current test. 354 + Returns False if fails to match format or name. 355 + 356 + Accepted format: 357 + - '[ok|not ok] [test number] [-] [test name] [optional skip 358 + directive]' 359 + 360 + Parameters: 361 + lines - LineStream of KTAP output to parse 362 + test - Test object for current test being parsed 363 + 364 + Return: 365 + True if matched a test result line and the name matching the 366 + expected test name 367 + """ 368 + line = lines.peek() 369 + match = TEST_RESULT.match(line) 370 + if not match: 371 + return False 372 + name = match.group(4) 373 + return (name == test.name) 374 + 375 + def parse_test_result(lines: LineStream, test: Test, 376 + expected_num: int) -> bool: 377 + """ 378 + Parses test result line and stores the status and name in the test 379 + object. Reports an error if the test number does not match expected 380 + test number. 381 + Returns False if fails to parse test result line. 382 + 383 + Note that the SKIP directive is the only direction that causes a 384 + change in status. 385 + 386 + Accepted format: 387 + - '[ok|not ok] [test number] [-] [test name] [optional skip 388 + directive]' 389 + 390 + Parameters: 391 + lines - LineStream of KTAP output to parse 392 + test - Test object for current test being parsed 393 + expected_num - expected test number for current test 394 + 395 + Return: 396 + True if successfully parsed a test result line. 397 + """ 398 + line = lines.peek() 399 + match = TEST_RESULT.match(line) 400 + skip_match = TEST_RESULT_SKIP.match(line) 401 + 402 + # Check if line matches test result line format 403 + if not match: 404 + return False 405 + test.log.append(lines.pop()) 406 + 407 + # Set name of test object 408 + if skip_match: 409 + test.name = skip_match.group(4) 410 + else: 411 + test.name = match.group(4) 412 + 413 + # Check test num 414 + num = int(match.group(2)) 415 + if num != expected_num: 416 + test.add_error('Expected test number ' + 417 + str(expected_num) + ' but found ' + str(num)) 418 + 419 + # Set status of test object 420 + status = match.group(1) 421 + if skip_match: 422 + test.status = TestStatus.SKIPPED 423 + elif status == 'ok': 424 + test.status = TestStatus.SUCCESS 425 + else: 426 + test.status = TestStatus.FAILURE 427 + return True 428 + 429 + def parse_diagnostic(lines: LineStream) -> List[str]: 430 + """ 431 + Parse lines that do not match the format of a test result line or 432 + test header line and returns them in list. 433 + 434 + Line formats that are not parsed: 435 + - '# Subtest: [test name]' 436 + - '[ok|not ok] [test number] [-] [test name] [optional skip 437 + directive]' 438 + 439 + Parameters: 440 + lines - LineStream of KTAP output to parse 441 + 442 + Return: 443 + Log of diagnostic lines 444 + """ 445 + log = [] # type: List[str] 446 + while lines and not TEST_RESULT.match(lines.peek()) and not \ 447 + TEST_HEADER.match(lines.peek()): 448 + log.append(lines.pop()) 449 + return log 450 + 451 + DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^# .*?: kunit test case crashed!$') 452 + 453 + def parse_crash_in_log(test: Test) -> bool: 454 + """ 455 + Iterate through the lines of the log to parse for crash message. 456 + If crash message found, set status to crashed and return True. 457 + Otherwise return False. 458 + 459 + Parameters: 460 + test - Test object for current test being parsed 461 + 462 + Return: 463 + True if crash message found in log 464 + """ 465 + for line in test.log: 466 + if DIAGNOSTIC_CRASH_MESSAGE.match(line): 467 + test.status = TestStatus.TEST_CRASHED 468 + return True 469 + return False 470 + 471 + 472 + # Printing helper methods: 131 473 132 474 DIVIDER = '=' * 60 133 475 134 476 RESET = '\033[0;0m' 135 477 136 - def red(text) -> str: 478 + def red(text: str) -> str: 479 + """Returns inputted string with red color code.""" 137 480 return '\033[1;31m' + text + RESET 138 481 139 - def yellow(text) -> str: 482 + def yellow(text: str) -> str: 483 + """Returns inputted string with yellow color code.""" 140 484 return '\033[1;33m' + text + RESET 141 485 142 - def green(text) -> str: 486 + def green(text: str) -> str: 487 + """Returns inputted string with green color code.""" 143 488 return '\033[1;32m' + text + RESET 144 489 145 - def print_with_timestamp(message) -> None: 490 + ANSI_LEN = len(red('')) 491 + 492 + def print_with_timestamp(message: str) -> None: 493 + """Prints message with timestamp at beginning.""" 146 494 print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message)) 147 495 148 - def format_suite_divider(message) -> str: 149 - return '======== ' + message + ' ========' 496 + def format_test_divider(message: str, len_message: int) -> str: 497 + """ 498 + Returns string with message centered in fixed width divider. 150 499 151 - def print_suite_divider(message) -> None: 152 - print_with_timestamp(DIVIDER) 153 - print_with_timestamp(format_suite_divider(message)) 500 + Example: 501 + '===================== message example =====================' 154 502 155 - def print_log(log) -> None: 503 + Parameters: 504 + message - message to be centered in divider line 505 + len_message - length of the message to be printed such that 506 + any characters of the color codes are not counted 507 + 508 + Return: 509 + String containing message centered in fixed width divider 510 + """ 511 + default_count = 3 # default number of dashes 512 + len_1 = default_count 513 + len_2 = default_count 514 + difference = len(DIVIDER) - len_message - 2 # 2 spaces added 515 + if difference > 0: 516 + # calculate number of dashes for each side of the divider 517 + len_1 = int(difference / 2) 518 + len_2 = difference - len_1 519 + return ('=' * len_1) + ' ' + message + ' ' + ('=' * len_2) 520 + 521 + def print_test_header(test: Test) -> None: 522 + """ 523 + Prints test header with test name and optionally the expected number 524 + of subtests. 525 + 526 + Example: 527 + '=================== example (2 subtests) ===================' 528 + 529 + Parameters: 530 + test - Test object representing current test being printed 531 + """ 532 + message = test.name 533 + if test.expected_count: 534 + if test.expected_count == 1: 535 + message += (' (' + str(test.expected_count) + 536 + ' subtest)') 537 + else: 538 + message += (' (' + str(test.expected_count) + 539 + ' subtests)') 540 + print_with_timestamp(format_test_divider(message, len(message))) 541 + 542 + def print_log(log: Iterable[str]) -> None: 543 + """ 544 + Prints all strings in saved log for test in yellow. 545 + 546 + Parameters: 547 + log - Iterable object with all strings saved in log for test 548 + """ 156 549 for m in log: 157 - print_with_timestamp(m) 550 + print_with_timestamp(yellow(m)) 158 551 159 - TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*# (Subtest:|.*: kunit test case crashed!)).*$') 552 + def format_test_result(test: Test) -> str: 553 + """ 554 + Returns string with formatted test result with colored status and test 555 + name. 160 556 161 - def consume_non_diagnostic(lines: LineStream) -> None: 162 - while lines and not TAP_ENTRIES.match(lines.peek()): 163 - lines.pop() 557 + Example: 558 + '[PASSED] example' 164 559 165 - def save_non_diagnostic(lines: LineStream, test_case: TestCase) -> None: 166 - while lines and not TAP_ENTRIES.match(lines.peek()): 167 - test_case.log.append(lines.peek()) 168 - lines.pop() 560 + Parameters: 561 + test - Test object representing current test being printed 169 562 170 - OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text']) 171 - 172 - OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$') 173 - 174 - OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$') 175 - 176 - OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$') 177 - 178 - def parse_ok_not_ok_test_case(lines: LineStream, test_case: TestCase) -> bool: 179 - save_non_diagnostic(lines, test_case) 180 - if not lines: 181 - test_case.status = TestStatus.TEST_CRASHED 182 - return True 183 - line = lines.peek() 184 - match = OK_NOT_OK_SUBTEST.match(line) 185 - while not match and lines: 186 - line = lines.pop() 187 - match = OK_NOT_OK_SUBTEST.match(line) 188 - if match: 189 - test_case.log.append(lines.pop()) 190 - test_case.name = match.group(2) 191 - skip_match = OK_NOT_OK_SKIP.match(line) 192 - if skip_match: 193 - test_case.status = TestStatus.SKIPPED 194 - return True 195 - if test_case.status == TestStatus.TEST_CRASHED: 196 - return True 197 - if match.group(1) == 'ok': 198 - test_case.status = TestStatus.SUCCESS 199 - else: 200 - test_case.status = TestStatus.FAILURE 201 - return True 563 + Return: 564 + String containing formatted test result 565 + """ 566 + if test.status == TestStatus.SUCCESS: 567 + return (green('[PASSED] ') + test.name) 568 + elif test.status == TestStatus.SKIPPED: 569 + return (yellow('[SKIPPED] ') + test.name) 570 + elif test.status == TestStatus.TEST_CRASHED: 571 + print_log(test.log) 572 + return (red('[CRASHED] ') + test.name) 202 573 else: 203 - return False 574 + print_log(test.log) 575 + return (red('[FAILED] ') + test.name) 204 576 205 - SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# (.*)$') 206 - DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^[\s]+# .*?: kunit test case crashed!$') 577 + def print_test_result(test: Test) -> None: 578 + """ 579 + Prints result line with status of test. 207 580 208 - def parse_diagnostic(lines: LineStream, test_case: TestCase) -> bool: 209 - save_non_diagnostic(lines, test_case) 210 - if not lines: 211 - return False 212 - line = lines.peek() 213 - match = SUBTEST_DIAGNOSTIC.match(line) 214 - if match: 215 - test_case.log.append(lines.pop()) 216 - crash_match = DIAGNOSTIC_CRASH_MESSAGE.match(line) 217 - if crash_match: 218 - test_case.status = TestStatus.TEST_CRASHED 219 - return True 581 + Example: 582 + '[PASSED] example' 583 + 584 + Parameters: 585 + test - Test object representing current test being printed 586 + """ 587 + print_with_timestamp(format_test_result(test)) 588 + 589 + def print_test_footer(test: Test) -> None: 590 + """ 591 + Prints test footer with status of test. 592 + 593 + Example: 594 + '===================== [PASSED] example =====================' 595 + 596 + Parameters: 597 + test - Test object representing current test being printed 598 + """ 599 + message = format_test_result(test) 600 + print_with_timestamp(format_test_divider(message, 601 + len(message) - ANSI_LEN)) 602 + 603 + def print_summary_line(test: Test) -> None: 604 + """ 605 + Prints summary line of test object. Color of line is dependent on 606 + status of test. Color is green if test passes, yellow if test is 607 + skipped, and red if the test fails or crashes. Summary line contains 608 + counts of the statuses of the tests subtests or the test itself if it 609 + has no subtests. 610 + 611 + Example: 612 + "Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0, 613 + Errors: 0" 614 + 615 + test - Test object representing current test being printed 616 + """ 617 + if test.status == TestStatus.SUCCESS: 618 + color = green 619 + elif test.status == TestStatus.SKIPPED or test.status == TestStatus.NO_TESTS: 620 + color = yellow 220 621 else: 221 - return False 622 + color = red 623 + counts = test.counts 624 + print_with_timestamp(color('Testing complete. ' + str(counts))) 222 625 223 - def parse_test_case(lines: LineStream) -> Optional[TestCase]: 224 - test_case = TestCase() 225 - save_non_diagnostic(lines, test_case) 226 - while parse_diagnostic(lines, test_case): 227 - pass 228 - if parse_ok_not_ok_test_case(lines, test_case): 229 - return test_case 626 + def print_error(error_message: str) -> None: 627 + """ 628 + Prints error message with error format. 629 + 630 + Example: 631 + "[ERROR] Test example: missing test plan!" 632 + 633 + Parameters: 634 + error_message - message describing error 635 + """ 636 + print_with_timestamp(red('[ERROR] ') + error_message) 637 + 638 + # Other methods: 639 + 640 + def bubble_up_test_results(test: Test) -> None: 641 + """ 642 + If the test has subtests, add the test counts of the subtests to the 643 + test and check if any of the tests crashed and if so set the test 644 + status to crashed. Otherwise if the test has no subtests add the 645 + status of the test to the test counts. 646 + 647 + Parameters: 648 + test - Test object for current test being parsed 649 + """ 650 + parse_crash_in_log(test) 651 + subtests = test.subtests 652 + counts = test.counts 653 + status = test.status 654 + for t in subtests: 655 + counts.add_subtest_counts(t.counts) 656 + if counts.total() == 0: 657 + counts.add_status(status) 658 + elif test.counts.get_status() == TestStatus.TEST_CRASHED: 659 + test.status = TestStatus.TEST_CRASHED 660 + 661 + def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: 662 + """ 663 + Finds next test to parse in LineStream, creates new Test object, 664 + parses any subtests of the test, populates Test object with all 665 + information (status, name) about the test and the Test objects for 666 + any subtests, and then returns the Test object. The method accepts 667 + three formats of tests: 668 + 669 + Accepted test formats: 670 + 671 + - Main KTAP/TAP header 672 + 673 + Example: 674 + 675 + KTAP version 1 676 + 1..4 677 + [subtests] 678 + 679 + - Subtest header line 680 + 681 + Example: 682 + 683 + # Subtest: name 684 + 1..3 685 + [subtests] 686 + ok 1 name 687 + 688 + - Test result line 689 + 690 + Example: 691 + 692 + ok 1 - test 693 + 694 + Parameters: 695 + lines - LineStream of KTAP output to parse 696 + expected_num - expected test number for test to be parsed 697 + log - list of strings containing any preceding diagnostic lines 698 + corresponding to the current test 699 + 700 + Return: 701 + Test object populated with characteristics and any subtests 702 + """ 703 + test = Test() 704 + test.log.extend(log) 705 + parent_test = False 706 + main = parse_ktap_header(lines, test) 707 + if main: 708 + # If KTAP/TAP header is found, attempt to parse 709 + # test plan 710 + test.name = "main" 711 + parse_test_plan(lines, test) 230 712 else: 231 - return None 232 - 233 - SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$') 234 - 235 - def parse_subtest_header(lines: LineStream) -> Optional[str]: 236 - consume_non_diagnostic(lines) 237 - if not lines: 238 - return None 239 - match = SUBTEST_HEADER.match(lines.peek()) 240 - if match: 241 - lines.pop() 242 - return match.group(1) 243 - else: 244 - return None 245 - 246 - SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)') 247 - 248 - def parse_subtest_plan(lines: LineStream) -> Optional[int]: 249 - consume_non_diagnostic(lines) 250 - match = SUBTEST_PLAN.match(lines.peek()) 251 - if match: 252 - lines.pop() 253 - return int(match.group(1)) 254 - else: 255 - return None 256 - 257 - def max_status(left: TestStatus, right: TestStatus) -> TestStatus: 258 - if left == right: 259 - return left 260 - elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED: 261 - return TestStatus.TEST_CRASHED 262 - elif left == TestStatus.FAILURE or right == TestStatus.FAILURE: 263 - return TestStatus.FAILURE 264 - elif left == TestStatus.SKIPPED: 265 - return right 266 - else: 267 - return left 268 - 269 - def parse_ok_not_ok_test_suite(lines: LineStream, 270 - test_suite: TestSuite, 271 - expected_suite_index: int) -> bool: 272 - consume_non_diagnostic(lines) 273 - if not lines: 274 - test_suite.status = TestStatus.TEST_CRASHED 275 - return False 276 - line = lines.peek() 277 - match = OK_NOT_OK_MODULE.match(line) 278 - if match: 279 - lines.pop() 280 - if match.group(1) == 'ok': 281 - test_suite.status = TestStatus.SUCCESS 282 - else: 283 - test_suite.status = TestStatus.FAILURE 284 - skip_match = OK_NOT_OK_SKIP.match(line) 285 - if skip_match: 286 - test_suite.status = TestStatus.SKIPPED 287 - suite_index = int(match.group(2)) 288 - if suite_index != expected_suite_index: 289 - print_with_timestamp( 290 - red('[ERROR] ') + 'expected_suite_index ' + 291 - str(expected_suite_index) + ', but got ' + 292 - str(suite_index)) 293 - return True 294 - else: 295 - return False 296 - 297 - def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus: 298 - return reduce(max_status, status_list, TestStatus.SKIPPED) 299 - 300 - def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus: 301 - max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases) 302 - return max_status(max_test_case_status, test_suite.status) 303 - 304 - def parse_test_suite(lines: LineStream, expected_suite_index: int) -> Optional[TestSuite]: 305 - if not lines: 306 - return None 307 - consume_non_diagnostic(lines) 308 - test_suite = TestSuite() 309 - test_suite.status = TestStatus.SUCCESS 310 - name = parse_subtest_header(lines) 311 - if not name: 312 - return None 313 - test_suite.name = name 314 - expected_test_case_num = parse_subtest_plan(lines) 315 - if expected_test_case_num is None: 316 - return None 317 - while expected_test_case_num > 0: 318 - test_case = parse_test_case(lines) 319 - if not test_case: 320 - break 321 - test_suite.cases.append(test_case) 322 - expected_test_case_num -= 1 323 - if parse_ok_not_ok_test_suite(lines, test_suite, expected_suite_index): 324 - test_suite.status = bubble_up_test_case_errors(test_suite) 325 - return test_suite 326 - elif not lines: 327 - print_with_timestamp(red('[ERROR] ') + 'ran out of lines before end token') 328 - return test_suite 329 - else: 330 - print(f'failed to parse end of suite "{name}", at line {lines.line_number()}: {lines.peek()}') 331 - return None 332 - 333 - TAP_HEADER = re.compile(r'^TAP version 14$') 334 - 335 - def parse_tap_header(lines: LineStream) -> bool: 336 - consume_non_diagnostic(lines) 337 - if TAP_HEADER.match(lines.peek()): 338 - lines.pop() 339 - return True 340 - else: 341 - return False 342 - 343 - TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)') 344 - 345 - def parse_test_plan(lines: LineStream) -> Optional[int]: 346 - consume_non_diagnostic(lines) 347 - match = TEST_PLAN.match(lines.peek()) 348 - if match: 349 - lines.pop() 350 - return int(match.group(1)) 351 - else: 352 - return None 353 - 354 - def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus: 355 - return bubble_up_errors(x.status for x in test_suites) 356 - 357 - def parse_test_result(lines: LineStream) -> TestResult: 358 - consume_non_diagnostic(lines) 359 - if not lines or not parse_tap_header(lines): 360 - return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines) 361 - expected_test_suite_num = parse_test_plan(lines) 362 - if expected_test_suite_num == 0: 363 - return TestResult(TestStatus.NO_TESTS, [], lines) 364 - elif expected_test_suite_num is None: 365 - return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines) 366 - test_suites = [] 367 - for i in range(1, expected_test_suite_num + 1): 368 - test_suite = parse_test_suite(lines, i) 369 - if test_suite: 370 - test_suites.append(test_suite) 371 - else: 372 - print_with_timestamp( 373 - red('[ERROR] ') + ' expected ' + 374 - str(expected_test_suite_num) + 375 - ' test suites, but got ' + str(i - 2)) 376 - break 377 - test_suite = parse_test_suite(lines, -1) 378 - if test_suite: 379 - print_with_timestamp(red('[ERROR] ') + 380 - 'got unexpected test suite: ' + test_suite.name) 381 - if test_suites: 382 - return TestResult(bubble_up_suite_errors(test_suites), test_suites, lines) 383 - else: 384 - return TestResult(TestStatus.NO_TESTS, [], lines) 385 - 386 - class TestCounts: 387 - passed: int 388 - failed: int 389 - crashed: int 390 - skipped: int 391 - 392 - def __init__(self): 393 - self.passed = 0 394 - self.failed = 0 395 - self.crashed = 0 396 - self.skipped = 0 397 - 398 - def total(self) -> int: 399 - return self.passed + self.failed + self.crashed + self.skipped 400 - 401 - def print_and_count_results(test_result: TestResult) -> TestCounts: 402 - counts = TestCounts() 403 - for test_suite in test_result.suites: 404 - if test_suite.status == TestStatus.SUCCESS: 405 - print_suite_divider(green('[PASSED] ') + test_suite.name) 406 - elif test_suite.status == TestStatus.SKIPPED: 407 - print_suite_divider(yellow('[SKIPPED] ') + test_suite.name) 408 - elif test_suite.status == TestStatus.TEST_CRASHED: 409 - print_suite_divider(red('[CRASHED] ' + test_suite.name)) 410 - else: 411 - print_suite_divider(red('[FAILED] ') + test_suite.name) 412 - for test_case in test_suite.cases: 413 - if test_case.status == TestStatus.SUCCESS: 414 - counts.passed += 1 415 - print_with_timestamp(green('[PASSED] ') + test_case.name) 416 - elif test_case.status == TestStatus.SKIPPED: 417 - counts.skipped += 1 418 - print_with_timestamp(yellow('[SKIPPED] ') + test_case.name) 419 - elif test_case.status == TestStatus.TEST_CRASHED: 420 - counts.crashed += 1 421 - print_with_timestamp(red('[CRASHED] ' + test_case.name)) 422 - print_log(map(yellow, test_case.log)) 423 - print_with_timestamp('') 713 + # If KTAP/TAP header is not found, test must be subtest 714 + # header or test result line so parse attempt to parser 715 + # subtest header 716 + parent_test = parse_test_header(lines, test) 717 + if parent_test: 718 + # If subtest header is found, attempt to parse 719 + # test plan and print header 720 + parse_test_plan(lines, test) 721 + print_test_header(test) 722 + expected_count = test.expected_count 723 + subtests = [] 724 + test_num = 1 725 + while expected_count is None or test_num <= expected_count: 726 + # Loop to parse any subtests. 727 + # Break after parsing expected number of tests or 728 + # if expected number of tests is unknown break when test 729 + # result line with matching name to subtest header is found 730 + # or no more lines in stream. 731 + sub_log = parse_diagnostic(lines) 732 + sub_test = Test() 733 + if not lines or (peek_test_name_match(lines, test) and 734 + not main): 735 + if expected_count and test_num <= expected_count: 736 + # If parser reaches end of test before 737 + # parsing expected number of subtests, print 738 + # crashed subtest and record error 739 + test.add_error('missing expected subtest!') 740 + sub_test.log.extend(sub_log) 741 + test.counts.add_status( 742 + TestStatus.TEST_CRASHED) 743 + print_test_result(sub_test) 424 744 else: 425 - counts.failed += 1 426 - print_with_timestamp(red('[FAILED] ') + test_case.name) 427 - print_log(map(yellow, test_case.log)) 428 - print_with_timestamp('') 429 - return counts 745 + test.log.extend(sub_log) 746 + break 747 + else: 748 + sub_test = parse_test(lines, test_num, sub_log) 749 + subtests.append(sub_test) 750 + test_num += 1 751 + test.subtests = subtests 752 + if not main: 753 + # If not main test, look for test result line 754 + test.log.extend(parse_diagnostic(lines)) 755 + if (parent_test and peek_test_name_match(lines, test)) or \ 756 + not parent_test: 757 + parse_test_result(lines, test, expected_num) 758 + else: 759 + test.add_error('missing subtest result line!') 760 + # Add statuses to TestCounts attribute in Test object 761 + bubble_up_test_results(test) 762 + if parent_test: 763 + # If test has subtests and is not the main test object, print 764 + # footer. 765 + print_test_footer(test) 766 + elif not main: 767 + print_test_result(test) 768 + return test 430 769 431 770 def parse_run_tests(kernel_output: Iterable[str]) -> TestResult: 432 - counts = TestCounts() 433 - lines = extract_tap_lines(kernel_output) 434 - test_result = parse_test_result(lines) 435 - if test_result.status == TestStatus.NO_TESTS: 436 - print(red('[ERROR] ') + yellow('no tests run!')) 437 - elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS: 438 - print(red('[ERROR] ') + yellow('could not parse test results!')) 439 - else: 440 - counts = print_and_count_results(test_result) 771 + """ 772 + Using kernel output, extract KTAP lines, parse the lines for test 773 + results and print condensed test results and summary line . 774 + 775 + Parameters: 776 + kernel_output - Iterable object contains lines of kernel output 777 + 778 + Return: 779 + TestResult - Tuple containg status of main test object, main test 780 + object with all subtests, and log of all KTAP lines. 781 + """ 441 782 print_with_timestamp(DIVIDER) 442 - if test_result.status == TestStatus.SUCCESS: 443 - fmt = green 444 - elif test_result.status == TestStatus.SKIPPED: 445 - fmt = yellow 783 + lines = extract_tap_lines(kernel_output) 784 + test = Test() 785 + if not lines: 786 + test.add_error('invalid KTAP input!') 787 + test.status = TestStatus.FAILURE_TO_PARSE_TESTS 446 788 else: 447 - fmt =red 448 - print_with_timestamp( 449 - fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' % 450 - (counts.total(), counts.failed, counts.crashed, counts.skipped))) 451 - return test_result 789 + test = parse_test(lines, 0, []) 790 + if test.status != TestStatus.NO_TESTS: 791 + test.status = test.counts.get_status() 792 + print_with_timestamp(DIVIDER) 793 + print_summary_line(test) 794 + return TestResult(test.status, test, lines)
+98 -38
tools/testing/kunit/kunit_tool_test.py
··· 107 107 with open(log_path) as file: 108 108 result = kunit_parser.extract_tap_lines(file.readlines()) 109 109 self.assertContains('TAP version 14', result) 110 - self.assertContains(' # Subtest: example', result) 111 - self.assertContains(' 1..2', result) 112 - self.assertContains(' ok 1 - example_simple_test', result) 113 - self.assertContains(' ok 2 - example_mock_test', result) 110 + self.assertContains('# Subtest: example', result) 111 + self.assertContains('1..2', result) 112 + self.assertContains('ok 1 - example_simple_test', result) 113 + self.assertContains('ok 2 - example_mock_test', result) 114 114 self.assertContains('ok 1 - example', result) 115 115 116 116 def test_output_with_prefix_isolated_correctly(self): ··· 118 118 with open(log_path) as file: 119 119 result = kunit_parser.extract_tap_lines(file.readlines()) 120 120 self.assertContains('TAP version 14', result) 121 - self.assertContains(' # Subtest: kunit-resource-test', result) 122 - self.assertContains(' 1..5', result) 123 - self.assertContains(' ok 1 - kunit_resource_test_init_resources', result) 124 - self.assertContains(' ok 2 - kunit_resource_test_alloc_resource', result) 125 - self.assertContains(' ok 3 - kunit_resource_test_destroy_resource', result) 126 - self.assertContains(' foo bar #', result) 127 - self.assertContains(' ok 4 - kunit_resource_test_cleanup_resources', result) 128 - self.assertContains(' ok 5 - kunit_resource_test_proper_free_ordering', result) 121 + self.assertContains('# Subtest: kunit-resource-test', result) 122 + self.assertContains('1..5', result) 123 + self.assertContains('ok 1 - kunit_resource_test_init_resources', result) 124 + self.assertContains('ok 2 - kunit_resource_test_alloc_resource', result) 125 + self.assertContains('ok 3 - kunit_resource_test_destroy_resource', result) 126 + self.assertContains('foo bar #', result) 127 + self.assertContains('ok 4 - kunit_resource_test_cleanup_resources', result) 128 + self.assertContains('ok 5 - kunit_resource_test_proper_free_ordering', result) 129 129 self.assertContains('ok 1 - kunit-resource-test', result) 130 - self.assertContains(' foo bar # non-kunit output', result) 131 - self.assertContains(' # Subtest: kunit-try-catch-test', result) 132 - self.assertContains(' 1..2', result) 133 - self.assertContains(' ok 1 - kunit_test_try_catch_successful_try_no_catch', 130 + self.assertContains('foo bar # non-kunit output', result) 131 + self.assertContains('# Subtest: kunit-try-catch-test', result) 132 + self.assertContains('1..2', result) 133 + self.assertContains('ok 1 - kunit_test_try_catch_successful_try_no_catch', 134 134 result) 135 - self.assertContains(' ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch', 135 + self.assertContains('ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch', 136 136 result) 137 137 self.assertContains('ok 2 - kunit-try-catch-test', result) 138 - self.assertContains(' # Subtest: string-stream-test', result) 139 - self.assertContains(' 1..3', result) 140 - self.assertContains(' ok 1 - string_stream_test_empty_on_creation', result) 141 - self.assertContains(' ok 2 - string_stream_test_not_empty_after_add', result) 142 - self.assertContains(' ok 3 - string_stream_test_get_string', result) 138 + self.assertContains('# Subtest: string-stream-test', result) 139 + self.assertContains('1..3', result) 140 + self.assertContains('ok 1 - string_stream_test_empty_on_creation', result) 141 + self.assertContains('ok 2 - string_stream_test_not_empty_after_add', result) 142 + self.assertContains('ok 3 - string_stream_test_get_string', result) 143 143 self.assertContains('ok 3 - string-stream-test', result) 144 144 145 145 def test_parse_successful_test_log(self): ··· 149 149 self.assertEqual( 150 150 kunit_parser.TestStatus.SUCCESS, 151 151 result.status) 152 + 153 + def test_parse_successful_nested_tests_log(self): 154 + all_passed_log = test_data_path('test_is_test_passed-all_passed_nested.log') 155 + with open(all_passed_log) as file: 156 + result = kunit_parser.parse_run_tests(file.readlines()) 157 + self.assertEqual( 158 + kunit_parser.TestStatus.SUCCESS, 159 + result.status) 160 + 161 + def test_kselftest_nested(self): 162 + kselftest_log = test_data_path('test_is_test_passed-kselftest.log') 163 + with open(kselftest_log) as file: 164 + result = kunit_parser.parse_run_tests(file.readlines()) 165 + self.assertEqual( 166 + kunit_parser.TestStatus.SUCCESS, 167 + result.status) 152 168 153 169 def test_parse_failed_test_log(self): 154 170 failed_log = test_data_path('test_is_test_passed-failure.log') ··· 179 163 with open(empty_log) as file: 180 164 result = kunit_parser.parse_run_tests( 181 165 kunit_parser.extract_tap_lines(file.readlines())) 182 - self.assertEqual(0, len(result.suites)) 166 + self.assertEqual(0, len(result.test.subtests)) 183 167 self.assertEqual( 184 168 kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS, 185 169 result.status) 186 170 171 + def test_missing_test_plan(self): 172 + missing_plan_log = test_data_path('test_is_test_passed-' 173 + 'missing_plan.log') 174 + with open(missing_plan_log) as file: 175 + result = kunit_parser.parse_run_tests( 176 + kunit_parser.extract_tap_lines( 177 + file.readlines())) 178 + self.assertEqual(2, result.test.counts.errors) 179 + self.assertEqual( 180 + kunit_parser.TestStatus.SUCCESS, 181 + result.status) 182 + 187 183 def test_no_tests(self): 188 - empty_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log') 189 - with open(empty_log) as file: 184 + header_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log') 185 + with open(header_log) as file: 190 186 result = kunit_parser.parse_run_tests( 191 187 kunit_parser.extract_tap_lines(file.readlines())) 192 - self.assertEqual(0, len(result.suites)) 188 + self.assertEqual(0, len(result.test.subtests)) 193 189 self.assertEqual( 194 190 kunit_parser.TestStatus.NO_TESTS, 195 191 result.status) ··· 212 184 with open(crash_log) as file: 213 185 result = kunit_parser.parse_run_tests( 214 186 kunit_parser.extract_tap_lines(file.readlines())) 215 - print_mock.assert_any_call(StrContains('could not parse test results!')) 187 + print_mock.assert_any_call(StrContains('invalid KTAP input!')) 216 188 print_mock.stop() 217 - self.assertEqual(0, len(result.suites)) 189 + self.assertEqual(0, len(result.test.subtests)) 218 190 219 191 def test_crashed_test(self): 220 192 crashed_log = test_data_path('test_is_test_passed-crash.log') 221 193 with open(crashed_log) as file: 222 - result = kunit_parser.parse_run_tests(file.readlines()) 194 + result = kunit_parser.parse_run_tests( 195 + file.readlines()) 223 196 self.assertEqual( 224 197 kunit_parser.TestStatus.TEST_CRASHED, 225 198 result.status) ··· 244 215 kunit_parser.TestStatus.SKIPPED, 245 216 result.status) 246 217 218 + def test_ignores_hyphen(self): 219 + hyphen_log = test_data_path('test_strip_hyphen.log') 220 + file = open(hyphen_log) 221 + result = kunit_parser.parse_run_tests(file.readlines()) 222 + 223 + # A skipped test does not fail the whole suite. 224 + self.assertEqual( 225 + kunit_parser.TestStatus.SUCCESS, 226 + result.status) 227 + self.assertEqual( 228 + "sysctl_test", 229 + result.test.subtests[0].name) 230 + self.assertEqual( 231 + "example", 232 + result.test.subtests[1].name) 233 + file.close() 234 + 247 235 248 236 def test_ignores_prefix_printk_time(self): 249 237 prefix_log = test_data_path('test_config_printk_time.log') ··· 269 223 self.assertEqual( 270 224 kunit_parser.TestStatus.SUCCESS, 271 225 result.status) 272 - self.assertEqual('kunit-resource-test', result.suites[0].name) 226 + self.assertEqual('kunit-resource-test', result.test.subtests[0].name) 273 227 274 228 def test_ignores_multiple_prefixes(self): 275 229 prefix_log = test_data_path('test_multiple_prefixes.log') ··· 278 232 self.assertEqual( 279 233 kunit_parser.TestStatus.SUCCESS, 280 234 result.status) 281 - self.assertEqual('kunit-resource-test', result.suites[0].name) 235 + self.assertEqual('kunit-resource-test', result.test.subtests[0].name) 282 236 283 237 def test_prefix_mixed_kernel_output(self): 284 238 mixed_prefix_log = test_data_path('test_interrupted_tap_output.log') ··· 287 241 self.assertEqual( 288 242 kunit_parser.TestStatus.SUCCESS, 289 243 result.status) 290 - self.assertEqual('kunit-resource-test', result.suites[0].name) 244 + self.assertEqual('kunit-resource-test', result.test.subtests[0].name) 291 245 292 246 def test_prefix_poundsign(self): 293 247 pound_log = test_data_path('test_pound_sign.log') ··· 296 250 self.assertEqual( 297 251 kunit_parser.TestStatus.SUCCESS, 298 252 result.status) 299 - self.assertEqual('kunit-resource-test', result.suites[0].name) 253 + self.assertEqual('kunit-resource-test', result.test.subtests[0].name) 300 254 301 255 def test_kernel_panic_end(self): 302 256 panic_log = test_data_path('test_kernel_panic_interrupt.log') ··· 305 259 self.assertEqual( 306 260 kunit_parser.TestStatus.TEST_CRASHED, 307 261 result.status) 308 - self.assertEqual('kunit-resource-test', result.suites[0].name) 262 + self.assertEqual('kunit-resource-test', result.test.subtests[0].name) 309 263 310 264 def test_pound_no_prefix(self): 311 265 pound_log = test_data_path('test_pound_no_prefix.log') ··· 314 268 self.assertEqual( 315 269 kunit_parser.TestStatus.SUCCESS, 316 270 result.status) 317 - self.assertEqual('kunit-resource-test', result.suites[0].name) 271 + self.assertEqual('kunit-resource-test', result.test.subtests[0].name) 318 272 319 273 class LinuxSourceTreeTest(unittest.TestCase): 320 274 ··· 387 341 result = self._json_for('test_is_test_passed-no_tests_run_with_header.log') 388 342 self.assertEqual(0, len(result['sub_groups'])) 389 343 344 + def test_nested_json(self): 345 + result = self._json_for('test_is_test_passed-all_passed_nested.log') 346 + self.assertEqual( 347 + {'name': 'example_simple_test', 'status': 'PASS'}, 348 + result["sub_groups"][0]["sub_groups"][0]["test_cases"][0]) 349 + 390 350 class StrContains(str): 391 351 def __eq__(self, other): 392 352 return self in other ··· 451 399 self.assertEqual(e.exception.code, 1) 452 400 self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) 453 401 self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) 454 - self.print_mock.assert_any_call(StrContains(' 0 tests run')) 402 + self.print_mock.assert_any_call(StrContains('invalid KTAP input!')) 403 + 404 + def test_exec_no_tests(self): 405 + self.linux_source_mock.run_kernel = mock.Mock(return_value=['TAP version 14', '1..0']) 406 + with self.assertRaises(SystemExit) as e: 407 + kunit.main(['run'], self.linux_source_mock) 408 + self.linux_source_mock.run_kernel.assert_called_once_with( 409 + args=None, build_dir='.kunit', filter_glob='', timeout=300) 410 + self.print_mock.assert_any_call(StrContains(' 0 tests run!')) 455 411 456 412 def test_exec_raw_output(self): 457 413 self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) ··· 467 407 self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) 468 408 for call in self.print_mock.call_args_list: 469 409 self.assertNotEqual(call, mock.call(StrContains('Testing complete.'))) 470 - self.assertNotEqual(call, mock.call(StrContains(' 0 tests run'))) 410 + self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!'))) 471 411 472 412 def test_run_raw_output(self): 473 413 self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) ··· 476 416 self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) 477 417 for call in self.print_mock.call_args_list: 478 418 self.assertNotEqual(call, mock.call(StrContains('Testing complete.'))) 479 - self.assertNotEqual(call, mock.call(StrContains(' 0 tests run'))) 419 + self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!'))) 480 420 481 421 def test_run_raw_output_kunit(self): 482 422 self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+34
tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log
··· 1 + TAP version 14 2 + 1..2 3 + # Subtest: sysctl_test 4 + 1..4 5 + # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed 6 + ok 1 - sysctl_test_dointvec_null_tbl_data 7 + # Subtest: example 8 + 1..2 9 + init_suite 10 + # example_simple_test: initializing 11 + # example_simple_test: example_simple_test passed 12 + ok 1 - example_simple_test 13 + # example_mock_test: initializing 14 + # example_mock_test: example_mock_test passed 15 + ok 2 - example_mock_test 16 + kunit example: all tests passed 17 + ok 2 - example 18 + # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed 19 + ok 3 - sysctl_test_dointvec_table_len_is_zero 20 + # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed 21 + ok 4 - sysctl_test_dointvec_table_read_but_position_set 22 + kunit sysctl_test: all tests passed 23 + ok 1 - sysctl_test 24 + # Subtest: example 25 + 1..2 26 + init_suite 27 + # example_simple_test: initializing 28 + # example_simple_test: example_simple_test passed 29 + ok 1 - example_simple_test 30 + # example_mock_test: initializing 31 + # example_mock_test: example_mock_test passed 32 + ok 2 - example_mock_test 33 + kunit example: all tests passed 34 + ok 2 - example
+14
tools/testing/kunit/test_data/test_is_test_passed-kselftest.log
··· 1 + TAP version 13 2 + 1..2 3 + # selftests: membarrier: membarrier_test_single_thread 4 + # TAP version 13 5 + # 1..2 6 + # ok 1 sys_membarrier available 7 + # ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected 8 + ok 1 selftests: membarrier: membarrier_test_single_thread 9 + # selftests: membarrier: membarrier_test_multi_thread 10 + # TAP version 13 11 + # 1..2 12 + # ok 1 sys_membarrier available 13 + # ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected 14 + ok 2 selftests: membarrier: membarrier_test_multi_thread
+31
tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log
··· 1 + KTAP version 1 2 + # Subtest: sysctl_test 3 + # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed 4 + ok 1 - sysctl_test_dointvec_null_tbl_data 5 + # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed 6 + ok 2 - sysctl_test_dointvec_table_maxlen_unset 7 + # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed 8 + ok 3 - sysctl_test_dointvec_table_len_is_zero 9 + # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed 10 + ok 4 - sysctl_test_dointvec_table_read_but_position_set 11 + # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed 12 + ok 5 - sysctl_test_dointvec_happy_single_positive 13 + # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed 14 + ok 6 - sysctl_test_dointvec_happy_single_negative 15 + # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed 16 + ok 7 - sysctl_test_dointvec_single_less_int_min 17 + # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed 18 + ok 8 - sysctl_test_dointvec_single_greater_int_max 19 + kunit sysctl_test: all tests passed 20 + ok 1 - sysctl_test 21 + # Subtest: example 22 + 1..2 23 + init_suite 24 + # example_simple_test: initializing 25 + # example_simple_test: example_simple_test passed 26 + ok 1 - example_simple_test 27 + # example_mock_test: initializing 28 + # example_mock_test: example_mock_test passed 29 + ok 2 - example_mock_test 30 + kunit example: all tests passed 31 + ok 2 - example
+16
tools/testing/kunit/test_data/test_strip_hyphen.log
··· 1 + KTAP version 1 2 + 1..2 3 + # Subtest: sysctl_test 4 + 1..1 5 + # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed 6 + ok 1 - sysctl_test_dointvec_null_tbl_data 7 + kunit sysctl_test: all tests passed 8 + ok 1 - sysctl_test 9 + # Subtest: example 10 + 1..1 11 + init_suite 12 + # example_simple_test: initializing 13 + # example_simple_test: example_simple_test passed 14 + ok 1 example_simple_test 15 + kunit example: all tests passed 16 + ok 2 example