"Das U-Boot" Source Tree
1# SPDX-License-Identifier: GPL-2.0
2# Copyright (c) 2015 Stephen Warren
3# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
4
5# Implementation of pytest run-time hook functions. These are invoked by
6# pytest at certain points during operation, e.g. startup, for each executed
7# test, at shutdown etc. These hooks perform functions such as:
8# - Parsing custom command-line options.
9# - Pullilng in user-specified board configuration.
10# - Creating the U-Boot console test fixture.
11# - Creating the HTML log file.
12# - Monitoring each test's results.
13# - Implementing custom pytest markers.
14
15import atexit
16import configparser
17import errno
18import filelock
19import io
20import os
21import os.path
22from pathlib import Path
23import pytest
24import re
25from _pytest.runner import runtestprotocol
26import subprocess
27import sys
28import time
29from u_boot_spawn import BootFail, Timeout, Unexpected, handle_exception
30
31# Globals: The HTML log file, and the connection to the U-Boot console.
32log = None
33console = None
34
35TEST_PY_DIR = os.path.dirname(os.path.abspath(__file__))
36
37# Regex for test-function symbols
38RE_UT_TEST_LIST = re.compile(r'[^a-zA-Z0-9_]_u_boot_list_2_ut_(.*)_2_(.*)\s*$')
39
40def mkdir_p(path):
41 """Create a directory path.
42
43 This includes creating any intermediate/parent directories. Any errors
44 caused due to already extant directories are ignored.
45
46 Args:
47 path: The directory path to create.
48
49 Returns:
50 Nothing.
51 """
52
53 try:
54 os.makedirs(path)
55 except OSError as exc:
56 if exc.errno == errno.EEXIST and os.path.isdir(path):
57 pass
58 else:
59 raise
60
61def pytest_addoption(parser):
62 """pytest hook: Add custom command-line options to the cmdline parser.
63
64 Args:
65 parser: The pytest command-line parser.
66
67 Returns:
68 Nothing.
69 """
70
71 parser.addoption('--build-dir', default=None,
72 help='U-Boot build directory (O=)')
73 parser.addoption('--build-dir-extra', default=None,
74 help='U-Boot build directory for extra build (O=)')
75 parser.addoption('--result-dir', default=None,
76 help='U-Boot test result/tmp directory')
77 parser.addoption('--persistent-data-dir', default=None,
78 help='U-Boot test persistent generated data directory')
79 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
80 help='U-Boot board type')
81 parser.addoption('--board-type-extra', '--bde', default='sandbox',
82 help='U-Boot extra board type')
83 parser.addoption('--board-identity', '--id', default='na',
84 help='U-Boot board identity/instance')
85 parser.addoption('--build', default=False, action='store_true',
86 help='Compile U-Boot before running tests')
87 parser.addoption('--buildman', default=False, action='store_true',
88 help='Use buildman to build U-Boot (assuming --build is given)')
89 parser.addoption('--gdbserver', default=None,
90 help='Run sandbox under gdbserver. The argument is the channel '+
91 'over which gdbserver should communicate, e.g. localhost:1234')
92 parser.addoption('--role', help='U-Boot board role (for Labgrid-sjg)')
93 parser.addoption('--use-running-system', default=False, action='store_true',
94 help="Assume that U-Boot is ready and don't wait for a prompt")
95 parser.addoption('--timing', default=False, action='store_true',
96 help='Show info on test timing')
97
98
99def run_build(config, source_dir, build_dir, board_type, log):
100 """run_build: Build U-Boot
101
102 Args:
103 config: The pytest configuration.
104 soruce_dir (str): Directory containing source code
105 build_dir (str): Directory to build in
106 board_type (str): board_type parameter (e.g. 'sandbox')
107 log (Logfile): Log file to use
108 """
109 if config.getoption('buildman'):
110 if build_dir != source_dir:
111 dest_args = ['-o', build_dir, '-w']
112 else:
113 dest_args = ['-i']
114 cmds = (['buildman', '--board', board_type] + dest_args,)
115 name = 'buildman'
116 else:
117 if build_dir != source_dir:
118 o_opt = 'O=%s' % build_dir
119 else:
120 o_opt = ''
121 cmds = (
122 ['make', o_opt, '-s', board_type + '_defconfig'],
123 ['make', o_opt, '-s', '-j{}'.format(os.cpu_count())],
124 )
125 name = 'make'
126
127 with log.section(name):
128 runner = log.get_runner(name, sys.stdout)
129 for cmd in cmds:
130 runner.run(cmd, cwd=source_dir)
131 runner.close()
132 log.status_pass('OK')
133
134def get_details(config):
135 """Obtain salient details about the board and directories to use
136
137 Args:
138 config (pytest.Config): pytest configuration
139
140 Returns:
141 tuple:
142 str: Board type (U-Boot build name)
143 str: Extra board type (where two U-Boot builds are needed)
144 str: Identity for the lab board
145 str: Build directory
146 str: Extra build directory (where two U-Boot builds are needed)
147 str: Source directory
148 """
149 role = config.getoption('role')
150
151 # Get a few provided parameters
152 build_dir = config.getoption('build_dir')
153 build_dir_extra = config.getoption('build_dir_extra')
154
155 # The source tree must be the current directory
156 source_dir = os.path.dirname(os.path.dirname(TEST_PY_DIR))
157 if role:
158 # When using a role, build_dir and build_dir_extra are normally not set,
159 # since they are picked up from Labgrid-sjg via the u-boot-test-getrole
160 # script
161 board_identity = role
162 cmd = ['u-boot-test-getrole', role, '--configure']
163 env = os.environ.copy()
164 if build_dir:
165 env['U_BOOT_BUILD_DIR'] = build_dir
166 if build_dir_extra:
167 env['U_BOOT_BUILD_DIR_EXTRA'] = build_dir_extra
168
169 # Make sure the script sees that it is being run from pytest
170 env['U_BOOT_SOURCE_DIR'] = source_dir
171
172 proc = subprocess.run(cmd, stdout=subprocess.PIPE,
173 stderr=subprocess.STDOUT, encoding='utf-8',
174 env=env)
175 if proc.returncode:
176 raise ValueError(f"Error {proc.returncode} running {cmd}: '{proc.stderr} '{proc.stdout}'")
177 # For debugging
178 # print('conftest: lab:', proc.stdout)
179 vals = {}
180 for line in proc.stdout.splitlines():
181 item, value = line.split(' ', maxsplit=1)
182 k = item.split(':')[-1]
183 vals[k] = value
184 # For debugging
185 # print('conftest: lab info:', vals)
186
187 # Read the build directories here, in case none were provided in the
188 # command-line arguments
189 (board_type, board_type_extra, default_build_dir,
190 default_build_dir_extra) = (vals['board'],
191 vals['board_extra'], vals['build_dir'], vals['build_dir_extra'])
192 else:
193 board_type = config.getoption('board_type')
194 board_type_extra = config.getoption('board_type_extra')
195 board_identity = config.getoption('board_identity')
196
197 default_build_dir = source_dir + '/build-' + board_type
198 default_build_dir_extra = source_dir + '/build-' + board_type_extra
199
200 # Use the provided command-line arguments if present, else fall back to
201 if not build_dir:
202 build_dir = default_build_dir
203 if not build_dir_extra:
204 build_dir_extra = default_build_dir_extra
205
206 return (board_type, board_type_extra, board_identity, build_dir,
207 build_dir_extra, source_dir)
208
209def pytest_xdist_setupnodes(config, specs):
210 """Clear out any 'done' file from a previous build"""
211 global build_done_file
212
213 build_dir = get_details(config)[3]
214
215 build_done_file = Path(build_dir) / 'build.done'
216 if build_done_file.exists():
217 os.remove(build_done_file)
218
219def pytest_configure(config):
220 """pytest hook: Perform custom initialization at startup time.
221
222 Args:
223 config: The pytest configuration.
224
225 Returns:
226 Nothing.
227 """
228 def parse_config(conf_file):
229 """Parse a config file, loading it into the ubconfig container
230
231 Args:
232 conf_file: Filename to load (within build_dir)
233
234 Raises
235 Exception if the file does not exist
236 """
237 dot_config = build_dir + '/' + conf_file
238 if not os.path.exists(dot_config):
239 raise Exception(conf_file + ' does not exist; ' +
240 'try passing --build option?')
241
242 with open(dot_config, 'rt') as f:
243 ini_str = '[root]\n' + f.read()
244 ini_sio = io.StringIO(ini_str)
245 parser = configparser.RawConfigParser()
246 parser.read_file(ini_sio)
247 ubconfig.buildconfig.update(parser.items('root'))
248
249 global log
250 global console
251 global ubconfig
252
253 (board_type, board_type_extra, board_identity, build_dir, build_dir_extra,
254 source_dir) = get_details(config)
255
256 board_type_filename = board_type.replace('-', '_')
257 board_identity_filename = board_identity.replace('-', '_')
258 mkdir_p(build_dir)
259
260 result_dir = config.getoption('result_dir')
261 if not result_dir:
262 result_dir = build_dir
263 mkdir_p(result_dir)
264
265 persistent_data_dir = config.getoption('persistent_data_dir')
266 if not persistent_data_dir:
267 persistent_data_dir = build_dir + '/persistent-data'
268 mkdir_p(persistent_data_dir)
269
270 gdbserver = config.getoption('gdbserver')
271 if gdbserver and not board_type.startswith('sandbox'):
272 raise Exception('--gdbserver only supported with sandbox targets')
273
274 import multiplexed_log
275 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
276
277 if config.getoption('build'):
278 worker_id = os.environ.get("PYTEST_XDIST_WORKER")
279 with filelock.FileLock(os.path.join(build_dir, 'build.lock')):
280 build_done_file = Path(build_dir) / 'build.done'
281 if (not worker_id or worker_id == 'master' or
282 not build_done_file.exists()):
283 run_build(config, source_dir, build_dir, board_type, log)
284 build_done_file.touch()
285
286 class ArbitraryAttributeContainer(object):
287 pass
288
289 ubconfig = ArbitraryAttributeContainer()
290 ubconfig.brd = dict()
291 ubconfig.env = dict()
292
293 modules = [
294 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
295 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
296 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
297 board_identity_filename),
298 ]
299 for (dict_to_fill, module_name) in modules:
300 try:
301 module = __import__(module_name)
302 except ImportError:
303 continue
304 dict_to_fill.update(module.__dict__)
305
306 ubconfig.buildconfig = dict()
307
308 # buildman -k puts autoconf.mk in the rootdir, so handle this as well
309 # as the standard U-Boot build which leaves it in include/autoconf.mk
310 parse_config('.config')
311 if os.path.exists(build_dir + '/' + 'autoconf.mk'):
312 parse_config('autoconf.mk')
313 else:
314 parse_config('include/autoconf.mk')
315
316 ubconfig.test_py_dir = TEST_PY_DIR
317 ubconfig.source_dir = source_dir
318 ubconfig.build_dir = build_dir
319 ubconfig.build_dir_extra = build_dir_extra
320 ubconfig.result_dir = result_dir
321 ubconfig.persistent_data_dir = persistent_data_dir
322 ubconfig.board_type = board_type
323 ubconfig.board_type_extra = board_type_extra
324 ubconfig.board_identity = board_identity
325 ubconfig.gdbserver = gdbserver
326 ubconfig.use_running_system = config.getoption('use_running_system')
327 ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
328 ubconfig.connection_ok = True
329 ubconfig.timing = config.getoption('timing')
330
331 env_vars = (
332 'board_type',
333 'board_type_extra',
334 'board_identity',
335 'source_dir',
336 'test_py_dir',
337 'build_dir',
338 'build_dir_extra',
339 'result_dir',
340 'persistent_data_dir',
341 )
342 for v in env_vars:
343 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
344
345 if board_type.startswith('sandbox'):
346 import u_boot_console_sandbox
347 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
348 else:
349 import u_boot_console_exec_attach
350 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
351
352
353def generate_ut_subtest(metafunc, fixture_name, sym_path):
354 """Provide parametrization for a ut_subtest fixture.
355
356 Determines the set of unit tests built into a U-Boot binary by parsing the
357 list of symbols generated by the build process. Provides this information
358 to test functions by parameterizing their ut_subtest fixture parameter.
359
360 Args:
361 metafunc: The pytest test function.
362 fixture_name: The fixture name to test.
363 sym_path: Relative path to the symbol file with preceding '/'
364 (e.g. '/u-boot.sym')
365
366 Returns:
367 Nothing.
368 """
369 fn = console.config.build_dir + sym_path
370 try:
371 with open(fn, 'rt') as f:
372 lines = f.readlines()
373 except:
374 lines = []
375 lines.sort()
376
377 vals = []
378 for l in lines:
379 m = RE_UT_TEST_LIST.search(l)
380 if not m:
381 continue
382 suite, name = m.groups()
383
384 # Tests marked with _norun should only be run manually using 'ut -f'
385 if name.endswith('_norun'):
386 continue
387
388 vals.append(f'{suite} {name}')
389
390 ids = ['ut_' + s.replace(' ', '_') for s in vals]
391 metafunc.parametrize(fixture_name, vals, ids=ids)
392
393def generate_config(metafunc, fixture_name):
394 """Provide parametrization for {env,brd}__ fixtures.
395
396 If a test function takes parameter(s) (fixture names) of the form brd__xxx
397 or env__xxx, the brd and env configuration dictionaries are consulted to
398 find the list of values to use for those parameters, and the test is
399 parametrized so that it runs once for each combination of values.
400
401 Args:
402 metafunc: The pytest test function.
403 fixture_name: The fixture name to test.
404
405 Returns:
406 Nothing.
407 """
408
409 subconfigs = {
410 'brd': console.config.brd,
411 'env': console.config.env,
412 }
413 parts = fixture_name.split('__')
414 if len(parts) < 2:
415 return
416 if parts[0] not in subconfigs:
417 return
418 subconfig = subconfigs[parts[0]]
419 vals = []
420 val = subconfig.get(fixture_name, [])
421 # If that exact name is a key in the data source:
422 if val:
423 # ... use the dict value as a single parameter value.
424 vals = (val, )
425 else:
426 # ... otherwise, see if there's a key that contains a list of
427 # values to use instead.
428 vals = subconfig.get(fixture_name+ 's', [])
429 def fixture_id(index, val):
430 try:
431 return val['fixture_id']
432 except:
433 return fixture_name + str(index)
434 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
435 metafunc.parametrize(fixture_name, vals, ids=ids)
436
437def pytest_generate_tests(metafunc):
438 """pytest hook: parameterize test functions based on custom rules.
439
440 Check each test function parameter (fixture name) to see if it is one of
441 our custom names, and if so, provide the correct parametrization for that
442 parameter.
443
444 Args:
445 metafunc: The pytest test function.
446
447 Returns:
448 Nothing.
449 """
450 for fn in metafunc.fixturenames:
451 if fn == 'ut_subtest':
452 generate_ut_subtest(metafunc, fn, '/u-boot.sym')
453 continue
454 m_subtest = re.match('ut_(.)pl_subtest', fn)
455 if m_subtest:
456 spl_name = m_subtest.group(1)
457 generate_ut_subtest(
458 metafunc, fn, f'/{spl_name}pl/u-boot-{spl_name}pl.sym')
459 continue
460 generate_config(metafunc, fn)
461
462@pytest.fixture(scope='session')
463def u_boot_log(request):
464 """Generate the value of a test's log fixture.
465
466 Args:
467 request: The pytest request.
468
469 Returns:
470 The fixture value.
471 """
472
473 return console.log
474
475@pytest.fixture(scope='session')
476def u_boot_config(request):
477 """Generate the value of a test's u_boot_config fixture.
478
479 Args:
480 request: The pytest request.
481
482 Returns:
483 The fixture value.
484 """
485
486 return console.config
487
488@pytest.fixture(scope='function')
489def u_boot_console(request):
490 """Generate the value of a test's u_boot_console fixture.
491
492 Args:
493 request: The pytest request.
494
495 Returns:
496 The fixture value.
497 """
498 if not ubconfig.connection_ok:
499 pytest.skip('Cannot get target connection')
500 return None
501 try:
502 console.ensure_spawned()
503 except OSError as err:
504 handle_exception(ubconfig, console, log, err, 'Lab failure', True)
505 except Timeout as err:
506 handle_exception(ubconfig, console, log, err, 'Lab timeout', True)
507 except BootFail as err:
508 handle_exception(ubconfig, console, log, err, 'Boot fail', True,
509 console.get_spawn_output())
510 except Unexpected:
511 handle_exception(ubconfig, console, log, err, 'Unexpected test output',
512 False)
513 return console
514
515anchors = {}
516tests_not_run = []
517tests_failed = []
518tests_xpassed = []
519tests_xfailed = []
520tests_skipped = []
521tests_warning = []
522tests_passed = []
523
524# Duration of each test:
525# key (string): test name
526# value (float): duration in ms
527test_durations = {}
528
529
530def pytest_itemcollected(item):
531 """pytest hook: Called once for each test found during collection.
532
533 This enables our custom result analysis code to see the list of all tests
534 that should eventually be run.
535
536 Args:
537 item: The item that was collected.
538
539 Returns:
540 Nothing.
541 """
542
543 tests_not_run.append(item.name)
544
545
546def show_timings():
547 """Write timings for each test, along with a histogram"""
548
549 def get_time_delta(msecs):
550 """Convert milliseconds into a user-friendly string"""
551 if msecs >= 1000:
552 return f'{msecs / 1000:.1f}s'
553 else:
554 return f'{msecs:.0f}ms'
555
556 def show_bar(key, msecs, value):
557 """Show a single bar (line) of the histogram
558
559 Args:
560 key (str): Key to write on the left
561 value (int): Value to display, i.e. the relative length of the bar
562 """
563 if value:
564 bar_length = int((value / max_count) * max_bar_length)
565 print(f"{key:>8} : {get_time_delta(msecs):>7} |{'#' * bar_length} {value}", file=buf)
566
567 # Create the buckets we will use, each has a count and a total time
568 bucket = {}
569 for power in range(5):
570 for i in [1, 2, 3, 4, 5, 7.5]:
571 bucket[i * 10 ** power] = {'count': 0, 'msecs': 0.0}
572 max_dur = max(bucket.keys())
573
574 # Collect counts for each bucket; if outside the range, add to too_long
575 # Also show a sorted list of test timings from longest to shortest
576 too_long = 0
577 too_long_msecs = 0.0
578 max_count = 0
579 with log.section('Timing Report', 'timing_report'):
580 for name, dur in sorted(test_durations.items(), key=lambda kv: kv[1],
581 reverse=True):
582 log.info(f'{get_time_delta(dur):>8} {name}')
583 greater = [k for k in bucket.keys() if dur <= k]
584 if greater:
585 buck = bucket[min(greater)]
586 buck['count'] += 1
587 max_count = max(max_count, buck['count'])
588 buck['msecs'] += dur
589 else:
590 too_long += 1
591 too_long_msecs += dur
592
593 # Set the maximum length of a histogram bar, in characters
594 max_bar_length = 40
595
596 # Show a a summary with histogram
597 buf = io.StringIO()
598 with log.section('Timing Summary', 'timing_summary'):
599 print('Duration : Total | Number of tests', file=buf)
600 print(f'{"=" * 8} : {"=" * 7} |{"=" * max_bar_length}', file=buf)
601 for dur, buck in bucket.items():
602 if buck['count']:
603 label = get_time_delta(dur)
604 show_bar(f'<{label}', buck['msecs'], buck['count'])
605 if too_long:
606 show_bar(f'>{get_time_delta(max_dur)}', too_long_msecs, too_long)
607 log.info(buf.getvalue())
608 if ubconfig.timing:
609 print(buf.getvalue(), end='')
610
611
612def cleanup():
613 """Clean up all global state.
614
615 Executed (via atexit) once the entire test process is complete. This
616 includes logging the status of all tests, and the identity of any failed
617 or skipped tests.
618
619 Args:
620 None.
621
622 Returns:
623 Nothing.
624 """
625
626 if console:
627 console.close()
628 if log:
629 with log.section('Status Report', 'status_report'):
630 log.status_pass('%d passed' % len(tests_passed))
631 if tests_warning:
632 log.status_warning('%d passed with warning' % len(tests_warning))
633 for test in tests_warning:
634 anchor = anchors.get(test, None)
635 log.status_warning('... ' + test, anchor)
636 if tests_skipped:
637 log.status_skipped('%d skipped' % len(tests_skipped))
638 for test in tests_skipped:
639 anchor = anchors.get(test, None)
640 log.status_skipped('... ' + test, anchor)
641 if tests_xpassed:
642 log.status_xpass('%d xpass' % len(tests_xpassed))
643 for test in tests_xpassed:
644 anchor = anchors.get(test, None)
645 log.status_xpass('... ' + test, anchor)
646 if tests_xfailed:
647 log.status_xfail('%d xfail' % len(tests_xfailed))
648 for test in tests_xfailed:
649 anchor = anchors.get(test, None)
650 log.status_xfail('... ' + test, anchor)
651 if tests_failed:
652 log.status_fail('%d failed' % len(tests_failed))
653 for test in tests_failed:
654 anchor = anchors.get(test, None)
655 log.status_fail('... ' + test, anchor)
656 if tests_not_run:
657 log.status_fail('%d not run' % len(tests_not_run))
658 for test in tests_not_run:
659 anchor = anchors.get(test, None)
660 log.status_fail('... ' + test, anchor)
661 show_timings()
662 log.close()
663atexit.register(cleanup)
664
665def setup_boardspec(item):
666 """Process any 'boardspec' marker for a test.
667
668 Such a marker lists the set of board types that a test does/doesn't
669 support. If tests are being executed on an unsupported board, the test is
670 marked to be skipped.
671
672 Args:
673 item: The pytest test item.
674
675 Returns:
676 Nothing.
677 """
678
679 required_boards = []
680 for boards in item.iter_markers('boardspec'):
681 board = boards.args[0]
682 if board.startswith('!'):
683 if ubconfig.board_type == board[1:]:
684 pytest.skip('board "%s" not supported' % ubconfig.board_type)
685 return
686 else:
687 required_boards.append(board)
688 if required_boards and ubconfig.board_type not in required_boards:
689 pytest.skip('board "%s" not supported' % ubconfig.board_type)
690
691def setup_buildconfigspec(item):
692 """Process any 'buildconfigspec' marker for a test.
693
694 Such a marker lists some U-Boot configuration feature that the test
695 requires. If tests are being executed on an U-Boot build that doesn't
696 have the required feature, the test is marked to be skipped.
697
698 Args:
699 item: The pytest test item.
700
701 Returns:
702 Nothing.
703 """
704
705 for options in item.iter_markers('buildconfigspec'):
706 option = options.args[0]
707 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
708 pytest.skip('.config feature "%s" not enabled' % option.lower())
709 for options in item.iter_markers('notbuildconfigspec'):
710 option = options.args[0]
711 if ubconfig.buildconfig.get('config_' + option.lower(), None):
712 pytest.skip('.config feature "%s" enabled' % option.lower())
713
714def tool_is_in_path(tool):
715 for path in os.environ["PATH"].split(os.pathsep):
716 fn = os.path.join(path, tool)
717 if os.path.isfile(fn) and os.access(fn, os.X_OK):
718 return True
719 return False
720
721def setup_requiredtool(item):
722 """Process any 'requiredtool' marker for a test.
723
724 Such a marker lists some external tool (binary, executable, application)
725 that the test requires. If tests are being executed on a system that
726 doesn't have the required tool, the test is marked to be skipped.
727
728 Args:
729 item: The pytest test item.
730
731 Returns:
732 Nothing.
733 """
734
735 for tools in item.iter_markers('requiredtool'):
736 tool = tools.args[0]
737 if not tool_is_in_path(tool):
738 pytest.skip('tool "%s" not in $PATH' % tool)
739
740def setup_singlethread(item):
741 """Process any 'singlethread' marker for a test.
742
743 Skip this test if running in parallel.
744
745 Args:
746 item: The pytest test item.
747
748 Returns:
749 Nothing.
750 """
751 for single in item.iter_markers('singlethread'):
752 worker_id = os.environ.get("PYTEST_XDIST_WORKER")
753 if worker_id and worker_id != 'master':
754 pytest.skip('must run single-threaded')
755
756def start_test_section(item):
757 anchors[item.name] = log.start_section(item.name)
758
759def pytest_runtest_setup(item):
760 """pytest hook: Configure (set up) a test item.
761
762 Called once for each test to perform any custom configuration. This hook
763 is used to skip the test if certain conditions apply.
764
765 Args:
766 item: The pytest test item.
767
768 Returns:
769 Nothing.
770 """
771
772 start_test_section(item)
773 setup_boardspec(item)
774 setup_buildconfigspec(item)
775 setup_requiredtool(item)
776 setup_singlethread(item)
777
778def pytest_runtest_protocol(item, nextitem):
779 """pytest hook: Called to execute a test.
780
781 This hook wraps the standard pytest runtestprotocol() function in order
782 to acquire visibility into, and record, each test function's result.
783
784 Args:
785 item: The pytest test item to execute.
786 nextitem: The pytest test item that will be executed after this one.
787
788 Returns:
789 A list of pytest reports (test result data).
790 """
791
792 log.get_and_reset_warning()
793 ihook = item.ihook
794 ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
795 start = time.monotonic()
796 reports = runtestprotocol(item, nextitem=nextitem)
797 duration = round((time.monotonic() - start) * 1000, 1)
798 ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
799 was_warning = log.get_and_reset_warning()
800
801 # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
802 # the test is skipped. That call is required to create the test's section
803 # in the log file. The call to log.end_section() requires that the log
804 # contain a section for this test. Create a section for the test if it
805 # doesn't already exist.
806 if not item.name in anchors:
807 start_test_section(item)
808
809 failure_cleanup = False
810 record_duration = True
811 if not was_warning:
812 test_list = tests_passed
813 msg = 'OK'
814 msg_log = log.status_pass
815 else:
816 test_list = tests_warning
817 msg = 'OK (with warning)'
818 msg_log = log.status_warning
819 for report in reports:
820 if report.outcome == 'failed':
821 if hasattr(report, 'wasxfail'):
822 test_list = tests_xpassed
823 msg = 'XPASSED'
824 msg_log = log.status_xpass
825 else:
826 failure_cleanup = True
827 test_list = tests_failed
828 msg = 'FAILED:\n' + str(report.longrepr)
829 msg_log = log.status_fail
830 break
831 if report.outcome == 'skipped':
832 if hasattr(report, 'wasxfail'):
833 failure_cleanup = True
834 test_list = tests_xfailed
835 msg = 'XFAILED:\n' + str(report.longrepr)
836 msg_log = log.status_xfail
837 break
838 test_list = tests_skipped
839 msg = 'SKIPPED:\n' + str(report.longrepr)
840 msg_log = log.status_skipped
841 record_duration = False
842
843 msg += f' {duration} ms'
844 if record_duration:
845 test_durations[item.name] = duration
846
847 if failure_cleanup:
848 console.drain_console()
849
850 test_list.append(item.name)
851 tests_not_run.remove(item.name)
852
853 try:
854 msg_log(msg)
855 except:
856 # If something went wrong with logging, it's better to let the test
857 # process continue, which may report other exceptions that triggered
858 # the logging issue (e.g. console.log wasn't created). Hence, just
859 # squash the exception. If the test setup failed due to e.g. syntax
860 # error somewhere else, this won't be seen. However, once that issue
861 # is fixed, if this exception still exists, it will then be logged as
862 # part of the test's stdout.
863 import traceback
864 print('Exception occurred while logging runtest status:')
865 traceback.print_exc()
866 # FIXME: Can we force a test failure here?
867
868 log.end_section(item.name)
869
870 if failure_cleanup:
871 console.cleanup_spawn()
872
873 return True