Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include "Profile.h"
28#include "ProfileModel.h"
29#include <AK/HashTable.h>
30#include <AK/MappedFile.h>
31#include <AK/QuickSort.h>
32#include <LibCore/File.h>
33#include <LibELF/ELFLoader.h>
34#include <stdio.h>
35
36static void sort_profile_nodes(Vector<NonnullRefPtr<ProfileNode>>& nodes)
37{
38 quick_sort(nodes.begin(), nodes.end(), [](auto& a, auto& b) {
39 return a->event_count() >= b->event_count();
40 });
41
42 for (auto& child : nodes)
43 child->sort_children();
44}
45
46Profile::Profile(Vector<Event> events)
47 : m_events(move(events))
48{
49 m_first_timestamp = m_events.first().timestamp;
50 m_last_timestamp = m_events.last().timestamp;
51
52 m_model = ProfileModel::create(*this);
53
54 for (auto& event : m_events) {
55 m_deepest_stack_depth = max((u32)event.frames.size(), m_deepest_stack_depth);
56 }
57
58 rebuild_tree();
59}
60
61Profile::~Profile()
62{
63}
64
65GUI::Model& Profile::model()
66{
67 return *m_model;
68}
69
70void Profile::rebuild_tree()
71{
72 u32 filtered_event_count = 0;
73 Vector<NonnullRefPtr<ProfileNode>> roots;
74
75 auto find_or_create_root = [&roots](const String& symbol, u32 address, u32 offset, u64 timestamp) -> ProfileNode& {
76 for (size_t i = 0; i < roots.size(); ++i) {
77 auto& root = roots[i];
78 if (root->symbol() == symbol) {
79 return root;
80 }
81 }
82 auto new_root = ProfileNode::create(symbol, address, offset, timestamp);
83 roots.append(new_root);
84 return new_root;
85 };
86
87 HashTable<FlatPtr> live_allocations;
88
89 for (auto& event : m_events) {
90 if (has_timestamp_filter_range()) {
91 auto timestamp = event.timestamp;
92 if (timestamp < m_timestamp_filter_range_start || timestamp > m_timestamp_filter_range_end)
93 continue;
94 }
95
96 if (event.type == "malloc")
97 live_allocations.set(event.ptr);
98 else if (event.type == "free")
99 live_allocations.remove(event.ptr);
100 }
101
102 for (auto& event : m_events) {
103 if (has_timestamp_filter_range()) {
104 auto timestamp = event.timestamp;
105 if (timestamp < m_timestamp_filter_range_start || timestamp > m_timestamp_filter_range_end)
106 continue;
107 }
108
109 if (event.type == "malloc" && !live_allocations.contains(event.ptr))
110 continue;
111
112 if (event.type == "free")
113 continue;
114
115 ProfileNode* node = nullptr;
116
117 auto for_each_frame = [&]<typename Callback>(Callback callback)
118 {
119 if (!m_inverted) {
120 for (size_t i = 0; i < event.frames.size(); ++i) {
121 if (callback(event.frames.at(i), i == event.frames.size() - 1) == IterationDecision::Break)
122 break;
123 }
124 } else {
125 for (ssize_t i = event.frames.size() - 1; i >= 0; --i) {
126 if (callback(event.frames.at(i), static_cast<size_t>(i) == event.frames.size() - 1) == IterationDecision::Break)
127 break;
128 }
129 }
130 };
131
132 for_each_frame([&](const Frame& frame, bool is_innermost_frame) {
133 auto& symbol = frame.symbol;
134 auto& address = frame.address;
135 auto& offset = frame.offset;
136
137 if (symbol.is_empty())
138 return IterationDecision::Break;
139
140 if (!node)
141 node = &find_or_create_root(symbol, address, offset, event.timestamp);
142 else
143 node = &node->find_or_create_child(symbol, address, offset, event.timestamp);
144
145 node->increment_event_count();
146 if (is_innermost_frame)
147 node->increment_self_count();
148 return IterationDecision::Continue;
149 });
150
151 ++filtered_event_count;
152 }
153
154 sort_profile_nodes(roots);
155
156 m_filtered_event_count = filtered_event_count;
157 m_roots = move(roots);
158 m_model->update();
159}
160
161OwnPtr<Profile> Profile::load_from_perfcore_file(const StringView& path)
162{
163 auto file = Core::File::construct(path);
164 if (!file->open(Core::IODevice::ReadOnly)) {
165 fprintf(stderr, "Unable to open %s, error: %s\n", String(path).characters(), file->error_string());
166 return nullptr;
167 }
168
169 auto json = JsonValue::from_string(file->read_all());
170 if (!json.is_object()) {
171 fprintf(stderr, "Invalid perfcore format (not a JSON object)\n");
172 return nullptr;
173 }
174
175 auto& object = json.as_object();
176 auto executable_path = object.get("executable").to_string();
177
178 MappedFile elf_file(executable_path);
179 if (!elf_file.is_valid()) {
180 fprintf(stderr, "Unable to open executable '%s' for symbolication.\n", executable_path.characters());
181 return nullptr;
182 }
183
184 auto elf_loader = make<ELFLoader>(static_cast<const u8*>(elf_file.data()), elf_file.size());
185
186 MappedFile kernel_elf_file("/boot/kernel");
187 OwnPtr<ELFLoader> kernel_elf_loader;
188 if (kernel_elf_file.is_valid())
189 kernel_elf_loader = make<ELFLoader>(static_cast<const u8*>(kernel_elf_file.data()), kernel_elf_file.size());
190
191 auto events_value = object.get("events");
192 if (!events_value.is_array())
193 return nullptr;
194
195 auto& perf_events = events_value.as_array();
196 if (perf_events.is_empty())
197 return nullptr;
198
199 Vector<Event> events;
200
201 for (auto& perf_event_value : perf_events.values()) {
202 auto& perf_event = perf_event_value.as_object();
203
204 Event event;
205
206 event.timestamp = perf_event.get("timestamp").to_number<u64>();
207 event.type = perf_event.get("type").to_string();
208
209 if (event.type == "malloc") {
210 event.ptr = perf_event.get("ptr").to_number<FlatPtr>();
211 event.size = perf_event.get("size").to_number<size_t>();
212 } else if (event.type == "free") {
213 event.ptr = perf_event.get("ptr").to_number<FlatPtr>();
214 }
215
216 auto stack_array = perf_event.get("stack").as_array();
217 for (ssize_t i = stack_array.values().size() - 1; i >= 1; --i) {
218 auto& frame = stack_array.at(i);
219 auto ptr = frame.to_number<u32>();
220 u32 offset = 0;
221 String symbol;
222
223 if (ptr >= 0xc0000000) {
224 if (kernel_elf_loader) {
225 symbol = kernel_elf_loader->symbolicate(ptr, &offset);
226 } else {
227 symbol = "??";
228 }
229 } else {
230 symbol = elf_loader->symbolicate(ptr, &offset);
231 }
232
233 event.frames.append({ symbol, ptr, offset });
234 }
235
236 if (event.frames.size() < 2)
237 continue;
238
239 FlatPtr innermost_frame_address = event.frames.at(1).address;
240 event.in_kernel = innermost_frame_address >= 0xc0000000;
241
242 events.append(move(event));
243 }
244
245 return NonnullOwnPtr<Profile>(NonnullOwnPtr<Profile>::Adopt, *new Profile(move(events)));
246}
247
248void ProfileNode::sort_children()
249{
250 sort_profile_nodes(m_children);
251}
252
253void Profile::set_timestamp_filter_range(u64 start, u64 end)
254{
255 if (m_has_timestamp_filter_range && m_timestamp_filter_range_start == start && m_timestamp_filter_range_end == end)
256 return;
257 m_has_timestamp_filter_range = true;
258
259 m_timestamp_filter_range_start = min(start, end);
260 m_timestamp_filter_range_end = max(start, end);
261
262 rebuild_tree();
263}
264
265void Profile::clear_timestamp_filter_range()
266{
267 if (!m_has_timestamp_filter_range)
268 return;
269 m_has_timestamp_filter_range = false;
270 rebuild_tree();
271}
272
273void Profile::set_inverted(bool inverted)
274{
275 if (m_inverted == inverted)
276 return;
277 m_inverted = inverted;
278 rebuild_tree();
279}
280
281void Profile::set_show_percentages(bool show_percentages)
282{
283 if (m_show_percentages == show_percentages)
284 return;
285 m_show_percentages = show_percentages;
286}