Serenity Operating System
1/*
2 * Copyright (c) 2021, Brian Gianforcaro <bgianf@serenityos.org>
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 */
6
7#pragma once
8
9#include <Kernel/PerformanceEventBuffer.h>
10#include <Kernel/Process.h>
11#include <Kernel/Thread.h>
12#include <Kernel/Time/TimeManagement.h>
13
14namespace Kernel {
15
16class PerformanceManager {
17public:
18 static void add_process_created_event(Process& process)
19 {
20 if (g_profiling_all_threads) {
21 VERIFY(g_global_perf_events);
22 (void)g_global_perf_events->add_process(process, ProcessEventType::Create);
23 }
24 }
25
26 static void add_process_exec_event(Process& process)
27 {
28 if (auto* event_buffer = process.current_perf_events_buffer()) {
29 (void)event_buffer->add_process(process, ProcessEventType::Exec);
30 }
31 }
32
33 static void add_process_exit_event(Process& process)
34 {
35 if (g_profiling_all_threads) {
36 VERIFY(g_global_perf_events);
37 [[maybe_unused]] auto rc = g_global_perf_events->append_with_ip_and_bp(
38 process.pid(), 0, 0, 0, PERF_EVENT_PROCESS_EXIT, 0, 0, 0, {});
39 }
40 }
41
42 static void add_thread_created_event(Thread& thread)
43 {
44 if (thread.is_profiling_suppressed())
45 return;
46 if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
47 [[maybe_unused]] auto rc = event_buffer->append(PERF_EVENT_THREAD_CREATE, thread.tid().value(), 0, {}, &thread);
48 }
49 }
50
51 static void add_thread_exit_event(Thread& thread)
52 {
53 // As an exception this doesn't check whether profiling is suppressed for
54 // the thread so we can record the thread_exit event anyway.
55 if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
56 [[maybe_unused]] auto rc = event_buffer->append(PERF_EVENT_THREAD_EXIT, thread.tid().value(), 0, {}, &thread);
57 }
58 }
59
60 static void add_cpu_sample_event(Thread& current_thread, RegisterState const& regs, u32 lost_time)
61 {
62 if (current_thread.is_profiling_suppressed())
63 return;
64 if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
65 [[maybe_unused]] auto rc = event_buffer->append_with_ip_and_bp(
66 current_thread.pid(), current_thread.tid(), regs, PERF_EVENT_SAMPLE, lost_time, 0, 0, {});
67 }
68 }
69
70 static void add_mmap_perf_event(Process& current_process, Memory::Region const& region)
71 {
72 if (auto* event_buffer = current_process.current_perf_events_buffer()) {
73 [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MMAP, region.vaddr().get(), region.size(), region.name());
74 }
75 }
76
77 static void add_unmap_perf_event(Process& current_process, Memory::VirtualRange const& region)
78 {
79 if (auto* event_buffer = current_process.current_perf_events_buffer()) {
80 [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_MUNMAP, region.base().get(), region.size(), {});
81 }
82 }
83
84 static void add_context_switch_perf_event(Thread& current_thread, Thread& next_thread)
85 {
86 if (current_thread.is_profiling_suppressed())
87 return;
88 if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
89 [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_CONTEXT_SWITCH, next_thread.pid().value(), next_thread.tid().value(), {});
90 }
91 }
92
93 static void add_kmalloc_perf_event(Thread& current_thread, size_t size, FlatPtr ptr)
94 {
95 if (current_thread.is_profiling_suppressed())
96 return;
97 if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
98 [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_KMALLOC, size, ptr, {});
99 }
100 }
101
102 static void add_kfree_perf_event(Thread& current_thread, size_t size, FlatPtr ptr)
103 {
104 if (current_thread.is_profiling_suppressed())
105 return;
106 if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) {
107 [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_KFREE, size, ptr, {});
108 }
109 }
110
111 static void add_page_fault_event(Thread& thread, RegisterState const& regs)
112 {
113 if (thread.is_profiling_suppressed())
114 return;
115 if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
116 [[maybe_unused]] auto rc = event_buffer->append_with_ip_and_bp(
117 thread.pid(), thread.tid(), regs, PERF_EVENT_PAGE_FAULT, 0, 0, 0, {});
118 }
119 }
120
121 static void add_syscall_event(Thread& thread, RegisterState const& regs)
122 {
123 if (thread.is_profiling_suppressed())
124 return;
125 if (auto* event_buffer = thread.process().current_perf_events_buffer()) {
126 [[maybe_unused]] auto rc = event_buffer->append_with_ip_and_bp(
127 thread.pid(), thread.tid(), regs, PERF_EVENT_SYSCALL, 0, 0, 0, {});
128 }
129 }
130
131 static void add_read_event(Thread& thread, int fd, size_t size, OpenFileDescription const& file_description, u64 start_timestamp, ErrorOr<FlatPtr> const& result)
132 {
133 if (thread.is_profiling_suppressed())
134 return;
135
136 auto* event_buffer = thread.process().current_perf_events_buffer();
137 if (event_buffer == nullptr)
138 return;
139
140 size_t filepath_string_index;
141
142 if (auto path = file_description.original_absolute_path(); !path.is_error()) {
143 auto registered_result = event_buffer->register_string(move(path.value()));
144 if (registered_result.is_error())
145 return;
146 filepath_string_index = registered_result.value();
147 } else if (auto pseudo_path = file_description.pseudo_path(); !pseudo_path.is_error()) {
148 auto registered_result = event_buffer->register_string(move(pseudo_path.value()));
149 if (registered_result.is_error())
150 return;
151 filepath_string_index = registered_result.value();
152 } else {
153 auto invalid_path_string = KString::try_create("<INVALID_FILE_PATH>"sv); // TODO: Performance, unnecessary allocations.
154 if (invalid_path_string.is_error())
155 return;
156 auto registered_result = event_buffer->register_string(move(invalid_path_string.value()));
157 if (registered_result.is_error())
158 return;
159 filepath_string_index = registered_result.value();
160 }
161
162 [[maybe_unused]] auto rc = event_buffer->append(PERF_EVENT_READ, fd, size, {}, &thread, filepath_string_index, start_timestamp, result); // wrong arguments
163 }
164
165 static void timer_tick(RegisterState const& regs)
166 {
167 static Time last_wakeup;
168 auto now = kgettimeofday();
169 constexpr auto ideal_interval = Time::from_microseconds(1000'000 / OPTIMAL_PROFILE_TICKS_PER_SECOND_RATE);
170 auto expected_wakeup = last_wakeup + ideal_interval;
171 auto delay = (now > expected_wakeup) ? now - expected_wakeup : Time::from_microseconds(0);
172 last_wakeup = now;
173 auto* current_thread = Thread::current();
174 // FIXME: We currently don't collect samples while idle.
175 // That will be an interesting mode to add in the future. :^)
176 if (!current_thread || current_thread == Processor::idle_thread())
177 return;
178
179 auto lost_samples = delay.to_microseconds() / ideal_interval.to_microseconds();
180 PerformanceManager::add_cpu_sample_event(*current_thread, regs, lost_samples);
181 }
182};
183
184}