Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 */
6
7#include <AK/TemporaryChange.h>
8#include <Kernel/Arch/SafeMem.h>
9#include <Kernel/Arch/SmapDisabler.h>
10#include <Kernel/FileSystem/OpenFileDescription.h>
11#include <Kernel/KSyms.h>
12#include <Kernel/Process.h>
13#include <Kernel/Scheduler.h>
14#include <Kernel/Sections.h>
15
16namespace Kernel {
17
18FlatPtr g_lowest_kernel_symbol_address = 0xffffffff;
19FlatPtr g_highest_kernel_symbol_address = 0;
20bool g_kernel_symbols_available = false;
21
22extern "C" {
23__attribute__((section(".kernel_symbols"))) char kernel_symbols[5 * MiB] {};
24}
25
26static KernelSymbol* s_symbols;
27static size_t s_symbol_count = 0;
28
29UNMAP_AFTER_INIT static u8 parse_hex_digit(char nibble)
30{
31 if (nibble >= '0' && nibble <= '9')
32 return nibble - '0';
33 VERIFY(nibble >= 'a' && nibble <= 'f');
34 return 10 + (nibble - 'a');
35}
36
37FlatPtr address_for_kernel_symbol(StringView name)
38{
39 for (size_t i = 0; i < s_symbol_count; ++i) {
40 auto const& symbol = s_symbols[i];
41 if (name == symbol.name)
42 return symbol.address;
43 }
44 return 0;
45}
46
47KernelSymbol const* symbolicate_kernel_address(FlatPtr address)
48{
49 if (address < g_lowest_kernel_symbol_address || address > g_highest_kernel_symbol_address)
50 return nullptr;
51 for (unsigned i = 0; i < s_symbol_count; ++i) {
52 if (address < s_symbols[i + 1].address)
53 return &s_symbols[i];
54 }
55 return nullptr;
56}
57
58UNMAP_AFTER_INIT static void load_kernel_symbols_from_data(Bytes buffer)
59{
60 g_lowest_kernel_symbol_address = 0xffffffff;
61 g_highest_kernel_symbol_address = 0;
62
63 auto* bufptr = (char*)buffer.data();
64 auto* start_of_name = bufptr;
65 FlatPtr address = 0;
66
67 for (size_t i = 0; i < 8; ++i)
68 s_symbol_count = (s_symbol_count << 4) | parse_hex_digit(*(bufptr++));
69 s_symbols = static_cast<KernelSymbol*>(kmalloc(sizeof(KernelSymbol) * s_symbol_count));
70 ++bufptr; // skip newline
71
72 dmesgln("Loading kernel symbol table...");
73
74 size_t current_symbol_index = 0;
75
76 while ((u8 const*)bufptr < buffer.data() + buffer.size()) {
77 for (size_t i = 0; i < sizeof(void*) * 2; ++i)
78 address = (address << 4) | parse_hex_digit(*(bufptr++));
79 bufptr += 3;
80 start_of_name = bufptr;
81 while (*(++bufptr)) {
82 if (*bufptr == '\n') {
83 break;
84 }
85 }
86 auto& ksym = s_symbols[current_symbol_index];
87
88 // FIXME: Remove this ifdef once the aarch64 kernel is loaded by the Prekernel.
89 // Currently, the aarch64 kernel is linked at a high virtual memory address, instead
90 // of zero, so the address of a symbol does not need to be offset by the kernel_load_base.
91#if ARCH(X86_64)
92 ksym.address = kernel_load_base + address;
93#elif ARCH(AARCH64)
94 ksym.address = address;
95#else
96# error "Unknown architecture"
97#endif
98 ksym.name = start_of_name;
99
100 *bufptr = '\0';
101
102 if (ksym.address < g_lowest_kernel_symbol_address)
103 g_lowest_kernel_symbol_address = ksym.address;
104 if (ksym.address > g_highest_kernel_symbol_address)
105 g_highest_kernel_symbol_address = ksym.address;
106
107 ++bufptr;
108 ++current_symbol_index;
109 }
110 g_kernel_symbols_available = true;
111}
112
113NEVER_INLINE static void dump_backtrace_impl(FlatPtr base_pointer, bool use_ksyms, PrintToScreen print_to_screen)
114{
115#define PRINT_LINE(fmtstr, ...) \
116 do { \
117 if (print_to_screen == PrintToScreen::No) \
118 dbgln(fmtstr, __VA_ARGS__); \
119 else \
120 critical_dmesgln(fmtstr, __VA_ARGS__); \
121 } while (0)
122
123 SmapDisabler disabler;
124 if (use_ksyms && !g_kernel_symbols_available)
125 Processor::halt();
126
127 struct RecognizedSymbol {
128 FlatPtr address;
129 KernelSymbol const* symbol { nullptr };
130 };
131 constexpr size_t max_recognized_symbol_count = 256;
132 RecognizedSymbol recognized_symbols[max_recognized_symbol_count];
133 size_t recognized_symbol_count = 0;
134 if (use_ksyms) {
135 FlatPtr copied_stack_ptr[2];
136 for (FlatPtr* stack_ptr = (FlatPtr*)base_pointer; stack_ptr && recognized_symbol_count < max_recognized_symbol_count; stack_ptr = (FlatPtr*)copied_stack_ptr[0]) {
137 if ((FlatPtr)stack_ptr < kernel_mapping_base)
138 break;
139
140 void* fault_at;
141 if (!safe_memcpy(copied_stack_ptr, stack_ptr, sizeof(copied_stack_ptr), fault_at))
142 break;
143 FlatPtr retaddr = copied_stack_ptr[1];
144 recognized_symbols[recognized_symbol_count++] = { retaddr, symbolicate_kernel_address(retaddr) };
145 }
146 } else {
147 void* fault_at;
148 FlatPtr copied_stack_ptr[2];
149 FlatPtr* stack_ptr = (FlatPtr*)base_pointer;
150 while (stack_ptr && safe_memcpy(copied_stack_ptr, stack_ptr, sizeof(copied_stack_ptr), fault_at)) {
151 FlatPtr retaddr = copied_stack_ptr[1];
152 PRINT_LINE("{:p} (next: {:p})", retaddr, stack_ptr ? (FlatPtr*)copied_stack_ptr[0] : 0);
153 stack_ptr = (FlatPtr*)copied_stack_ptr[0];
154 }
155 return;
156 }
157 VERIFY(recognized_symbol_count <= max_recognized_symbol_count);
158 for (size_t i = 0; i < recognized_symbol_count; ++i) {
159 auto& symbol = recognized_symbols[i];
160 if (!symbol.address)
161 break;
162 if (!symbol.symbol) {
163 PRINT_LINE("Kernel + {:p}", symbol.address - kernel_load_base);
164 continue;
165 }
166 size_t offset = symbol.address - symbol.symbol->address;
167 if (symbol.symbol->address == g_highest_kernel_symbol_address && offset > 4096)
168 PRINT_LINE("Kernel + {:p}", symbol.address - kernel_load_base);
169 else
170 PRINT_LINE("Kernel + {:p} {} +{:#x}", symbol.address - kernel_load_base, symbol.symbol->name, offset);
171 }
172}
173
174void dump_backtrace_from_base_pointer(FlatPtr base_pointer)
175{
176 // FIXME: Change signature of dump_backtrace_impl to use an enum instead of a bool.
177 dump_backtrace_impl(base_pointer, /*use_ksym=*/false, PrintToScreen::No);
178}
179
180void dump_backtrace(PrintToScreen print_to_screen)
181{
182 static bool in_dump_backtrace = false;
183 if (in_dump_backtrace)
184 return;
185 TemporaryChange change(in_dump_backtrace, true);
186 TemporaryChange disable_kmalloc_stacks(g_dump_kmalloc_stacks, false);
187
188 FlatPtr base_pointer = (FlatPtr)__builtin_frame_address(0);
189 dump_backtrace_impl(base_pointer, g_kernel_symbols_available, print_to_screen);
190}
191
192UNMAP_AFTER_INIT void load_kernel_symbol_table()
193{
194 auto kernel_symbols_size = strnlen(kernel_symbols, sizeof(kernel_symbols));
195 // If we're hitting this VERIFY the kernel symbol file has grown beyond
196 // the array size of kernel_symbols. Try making the array larger.
197 VERIFY(kernel_symbols_size != sizeof(kernel_symbols));
198 load_kernel_symbols_from_data({ kernel_symbols, kernel_symbols_size });
199}
200
201}