Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * Copyright (c) 2021, Andrew Kaster <akaster@serenityos.org>
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8#include <LibTest/Macros.h> // intentionally first -- we redefine VERIFY and friends in here
9
10#include <AK/Function.h>
11#include <LibCore/ArgsParser.h>
12#include <LibTest/TestSuite.h>
13#include <math.h>
14#include <stdlib.h>
15#include <sys/time.h>
16
17namespace Test {
18
19TestSuite* TestSuite::s_global = nullptr;
20
21class TestElapsedTimer {
22public:
23 TestElapsedTimer() { restart(); }
24
25 void restart() { gettimeofday(&m_started, nullptr); }
26
27 u64 elapsed_milliseconds()
28 {
29 struct timeval now = {};
30 gettimeofday(&now, nullptr);
31
32 struct timeval delta = {};
33 timersub(&now, &m_started, &delta);
34
35 return delta.tv_sec * 1000 + delta.tv_usec / 1000;
36 }
37
38private:
39 struct timeval m_started = {};
40};
41
42// Declared in Macros.h
43void current_test_case_did_fail()
44{
45 TestSuite::the().current_test_case_did_fail();
46}
47
48// Declared in TestCase.h
49void add_test_case_to_suite(NonnullRefPtr<TestCase> const& test_case)
50{
51 TestSuite::the().add_case(test_case);
52}
53
54// Declared in TestCase.h
55void set_suite_setup_function(Function<void()> setup)
56{
57 TestSuite::the().set_suite_setup(move(setup));
58}
59
60int TestSuite::main(DeprecatedString const& suite_name, Span<StringView> arguments)
61{
62 m_suite_name = suite_name;
63
64 Core::ArgsParser args_parser;
65
66 bool do_tests_only = getenv("TESTS_ONLY") != nullptr;
67 bool do_benchmarks_only = false;
68 bool do_list_cases = false;
69 StringView search_string = "*"sv;
70
71 args_parser.add_option(do_tests_only, "Only run tests.", "tests", 0);
72 args_parser.add_option(do_benchmarks_only, "Only run benchmarks.", "bench", 0);
73 args_parser.add_option(m_benchmark_repetitions, "Number of times to repeat each benchmark (default 1)", "benchmark_repetitions", 0, "N");
74 args_parser.add_option(do_list_cases, "List available test cases.", "list", 0);
75 args_parser.add_positional_argument(search_string, "Only run matching cases.", "pattern", Core::ArgsParser::Required::No);
76 args_parser.parse(arguments);
77
78 if (m_setup)
79 m_setup();
80
81 auto const& matching_tests = find_cases(search_string, !do_benchmarks_only, !do_tests_only);
82
83 if (do_list_cases) {
84 outln("Available cases for {}:", suite_name);
85 for (auto const& test : matching_tests) {
86 outln(" {}", test->name());
87 }
88 return 0;
89 }
90
91 outln("Running {} cases out of {}.", matching_tests.size(), m_cases.size());
92
93 return run(matching_tests);
94}
95
96Vector<NonnullRefPtr<TestCase>> TestSuite::find_cases(DeprecatedString const& search, bool find_tests, bool find_benchmarks)
97{
98 Vector<NonnullRefPtr<TestCase>> matches;
99 for (auto& t : m_cases) {
100 if (!search.is_empty() && !t->name().matches(search, CaseSensitivity::CaseInsensitive)) {
101 continue;
102 }
103
104 if (!find_tests && !t->is_benchmark()) {
105 continue;
106 }
107 if (!find_benchmarks && t->is_benchmark()) {
108 continue;
109 }
110
111 matches.append(t);
112 }
113 return matches;
114}
115
116int TestSuite::run(Vector<NonnullRefPtr<TestCase>> const& tests)
117{
118 size_t test_count = 0;
119 size_t test_failed_count = 0;
120 size_t benchmark_count = 0;
121 TestElapsedTimer global_timer;
122
123 for (auto const& t : tests) {
124 auto const test_type = t->is_benchmark() ? "benchmark" : "test";
125 auto const repetitions = t->is_benchmark() ? m_benchmark_repetitions : 1;
126
127 warnln("Running {} '{}'.", test_type, t->name());
128 m_current_test_case_passed = true;
129
130 u64 total_time = 0;
131 u64 sum_of_squared_times = 0;
132 u64 min_time = NumericLimits<u64>::max();
133 u64 max_time = 0;
134
135 for (u64 i = 0; i < repetitions; ++i) {
136 TestElapsedTimer timer;
137 t->func()();
138 auto const iteration_time = timer.elapsed_milliseconds();
139 total_time += iteration_time;
140 sum_of_squared_times += iteration_time * iteration_time;
141 min_time = min(min_time, iteration_time);
142 max_time = max(max_time, iteration_time);
143 }
144
145 if (repetitions != 1) {
146 double average = total_time / double(repetitions);
147 double average_squared = average * average;
148 double standard_deviation = sqrt((sum_of_squared_times + repetitions * average_squared - 2 * total_time * average) / (repetitions - 1));
149
150 dbgln("{} {} '{}' on average in {:.1f}±{:.1f}ms (min={}ms, max={}ms, total={}ms)",
151 m_current_test_case_passed ? "Completed" : "Failed", test_type, t->name(),
152 average, standard_deviation, min_time, max_time, total_time);
153 } else {
154 dbgln("{} {} '{}' in {}ms", m_current_test_case_passed ? "Completed" : "Failed", test_type, t->name(), total_time);
155 }
156
157 if (t->is_benchmark()) {
158 m_benchtime += total_time;
159 benchmark_count++;
160 } else {
161 m_testtime += total_time;
162 test_count++;
163 }
164
165 if (!m_current_test_case_passed) {
166 test_failed_count++;
167 }
168 }
169
170 dbgln("Finished {} tests and {} benchmarks in {}ms ({}ms tests, {}ms benchmarks, {}ms other).",
171 test_count,
172 benchmark_count,
173 global_timer.elapsed_milliseconds(),
174 m_testtime,
175 m_benchtime,
176 global_timer.elapsed_milliseconds() - (m_testtime + m_benchtime));
177 dbgln("Out of {} tests, {} passed and {} failed.", test_count, test_count - test_failed_count, test_failed_count);
178
179 return (int)test_failed_count;
180}
181
182}