Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 */
6
7#include <AK/Assertions.h>
8#include <AK/MemMem.h>
9#include <AK/Types.h>
10#include <Kernel/Arch/SafeMem.h>
11#include <Kernel/Arch/SmapDisabler.h>
12#include <Kernel/Memory/MemoryManager.h>
13#include <Kernel/StdLib.h>
14
15ErrorOr<NonnullOwnPtr<Kernel::KString>> try_copy_kstring_from_user(Userspace<char const*> user_str, size_t user_str_size)
16{
17 bool is_user = Kernel::Memory::is_user_range(user_str.vaddr(), user_str_size);
18 if (!is_user)
19 return EFAULT;
20 Kernel::SmapDisabler disabler;
21 void* fault_at;
22 ssize_t length = Kernel::safe_strnlen(user_str.unsafe_userspace_ptr(), user_str_size, fault_at);
23 if (length < 0) {
24 dbgln("copy_kstring_from_user({:p}, {}) failed at {} (strnlen)", static_cast<void const*>(user_str.unsafe_userspace_ptr()), user_str_size, VirtualAddress { fault_at });
25 return EFAULT;
26 }
27 char* buffer;
28 auto new_string = TRY(Kernel::KString::try_create_uninitialized(length, buffer));
29
30 buffer[length] = '\0';
31
32 if (length == 0)
33 return new_string;
34
35 if (!Kernel::safe_memcpy(buffer, user_str.unsafe_userspace_ptr(), (size_t)length, fault_at)) {
36 dbgln("copy_kstring_from_user({:p}, {}) failed at {} (memcpy)", static_cast<void const*>(user_str.unsafe_userspace_ptr()), user_str_size, VirtualAddress { fault_at });
37 return EFAULT;
38 }
39 return new_string;
40}
41
42ErrorOr<Time> copy_time_from_user(timespec const* ts_user)
43{
44 timespec ts {};
45 TRY(copy_from_user(&ts, ts_user, sizeof(timespec)));
46 return Time::from_timespec(ts);
47}
48
49ErrorOr<Time> copy_time_from_user(timeval const* tv_user)
50{
51 timeval tv {};
52 TRY(copy_from_user(&tv, tv_user, sizeof(timeval)));
53 return Time::from_timeval(tv);
54}
55
56template<>
57ErrorOr<Time> copy_time_from_user<timeval const>(Userspace<timeval const*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
58template<>
59ErrorOr<Time> copy_time_from_user<timeval>(Userspace<timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
60template<>
61ErrorOr<Time> copy_time_from_user<timespec const>(Userspace<timespec const*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
62template<>
63ErrorOr<Time> copy_time_from_user<timespec>(Userspace<timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
64
65Optional<u32> user_atomic_fetch_add_relaxed(u32 volatile* var, u32 val)
66{
67 if (FlatPtr(var) & 3)
68 return {}; // not aligned!
69 bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
70 if (!is_user)
71 return {};
72 Kernel::SmapDisabler disabler;
73 return Kernel::safe_atomic_fetch_add_relaxed(var, val);
74}
75
76Optional<u32> user_atomic_exchange_relaxed(u32 volatile* var, u32 val)
77{
78 if (FlatPtr(var) & 3)
79 return {}; // not aligned!
80 bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
81 if (!is_user)
82 return {};
83 Kernel::SmapDisabler disabler;
84 return Kernel::safe_atomic_exchange_relaxed(var, val);
85}
86
87Optional<u32> user_atomic_load_relaxed(u32 volatile* var)
88{
89 if (FlatPtr(var) & 3)
90 return {}; // not aligned!
91 bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
92 if (!is_user)
93 return {};
94 Kernel::SmapDisabler disabler;
95 return Kernel::safe_atomic_load_relaxed(var);
96}
97
98bool user_atomic_store_relaxed(u32 volatile* var, u32 val)
99{
100 if (FlatPtr(var) & 3)
101 return false; // not aligned!
102 bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
103 if (!is_user)
104 return false;
105 Kernel::SmapDisabler disabler;
106 return Kernel::safe_atomic_store_relaxed(var, val);
107}
108
109Optional<bool> user_atomic_compare_exchange_relaxed(u32 volatile* var, u32& expected, u32 val)
110{
111 if (FlatPtr(var) & 3)
112 return {}; // not aligned!
113 VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(&expected), sizeof(expected)));
114 bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
115 if (!is_user)
116 return {};
117 Kernel::SmapDisabler disabler;
118 return Kernel::safe_atomic_compare_exchange_relaxed(var, expected, val);
119}
120
121Optional<u32> user_atomic_fetch_and_relaxed(u32 volatile* var, u32 val)
122{
123 if (FlatPtr(var) & 3)
124 return {}; // not aligned!
125 bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
126 if (!is_user)
127 return {};
128 Kernel::SmapDisabler disabler;
129 return Kernel::safe_atomic_fetch_and_relaxed(var, val);
130}
131
132Optional<u32> user_atomic_fetch_and_not_relaxed(u32 volatile* var, u32 val)
133{
134 if (FlatPtr(var) & 3)
135 return {}; // not aligned!
136 bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
137 if (!is_user)
138 return {};
139 Kernel::SmapDisabler disabler;
140 return Kernel::safe_atomic_fetch_and_not_relaxed(var, val);
141}
142
143Optional<u32> user_atomic_fetch_or_relaxed(u32 volatile* var, u32 val)
144{
145 if (FlatPtr(var) & 3)
146 return {}; // not aligned!
147 bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
148 if (!is_user)
149 return {};
150 Kernel::SmapDisabler disabler;
151 return Kernel::safe_atomic_fetch_or_relaxed(var, val);
152}
153
154Optional<u32> user_atomic_fetch_xor_relaxed(u32 volatile* var, u32 val)
155{
156 if (FlatPtr(var) & 3)
157 return {}; // not aligned!
158 bool is_user = Kernel::Memory::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
159 if (!is_user)
160 return {};
161 Kernel::SmapDisabler disabler;
162 return Kernel::safe_atomic_fetch_xor_relaxed(var, val);
163}
164
165ErrorOr<void> copy_to_user(void* dest_ptr, void const* src_ptr, size_t n)
166{
167 if (!Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n))
168 return EFAULT;
169 VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(src_ptr), n));
170 Kernel::SmapDisabler disabler;
171 void* fault_at;
172 if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
173 VERIFY(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
174 dbgln("copy_to_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
175 return EFAULT;
176 }
177 return {};
178}
179
180ErrorOr<void> copy_from_user(void* dest_ptr, void const* src_ptr, size_t n)
181{
182 if (!Kernel::Memory::is_user_range(VirtualAddress(src_ptr), n))
183 return EFAULT;
184 VERIFY(!Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n));
185 Kernel::SmapDisabler disabler;
186 void* fault_at;
187 if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
188 VERIFY(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
189 dbgln("copy_from_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
190 return EFAULT;
191 }
192 return {};
193}
194
195ErrorOr<void> memset_user(void* dest_ptr, int c, size_t n)
196{
197 bool is_user = Kernel::Memory::is_user_range(VirtualAddress(dest_ptr), n);
198 if (!is_user)
199 return EFAULT;
200 Kernel::SmapDisabler disabler;
201 void* fault_at;
202 if (!Kernel::safe_memset(dest_ptr, c, n, fault_at)) {
203 dbgln("memset_user({:p}, {}, {}) failed at {}", dest_ptr, c, n, VirtualAddress { fault_at });
204 return EFAULT;
205 }
206 return {};
207}
208
209#if defined(AK_COMPILER_CLANG) && defined(ENABLE_KERNEL_LTO)
210// Due to a chicken-and-egg situation, certain linker-defined symbols that are added on-demand (like the GOT)
211// need to be present before LTO bitcode files are compiled. And since we don't link to any native object files,
212// the linker does not know that _GLOBAL_OFFSET_TABLE_ is needed, so it doesn't define it, so linking as a PIE fails.
213// See https://bugs.llvm.org/show_bug.cgi?id=39634
214FlatPtr missing_got_workaround()
215{
216 extern volatile FlatPtr _GLOBAL_OFFSET_TABLE_;
217 return _GLOBAL_OFFSET_TABLE_;
218}
219#endif
220
221extern "C" {
222
223void const* memmem(void const* haystack, size_t haystack_length, void const* needle, size_t needle_length)
224{
225 return AK::memmem(haystack, haystack_length, needle, needle_length);
226}
227
228// Functions that are automatically called by the C++ compiler.
229// Declare them first, to tell the silly compiler that they are indeed being used.
230[[noreturn]] void __stack_chk_fail() __attribute__((used));
231[[noreturn]] void __stack_chk_fail_local() __attribute__((used));
232extern "C" int __cxa_atexit(void (*)(void*), void*, void*);
233[[noreturn]] void __cxa_pure_virtual();
234
235[[noreturn]] void __stack_chk_fail()
236{
237 VERIFY_NOT_REACHED();
238}
239
240[[noreturn]] void __stack_chk_fail_local()
241{
242 VERIFY_NOT_REACHED();
243}
244
245extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
246{
247 VERIFY_NOT_REACHED();
248 return 0;
249}
250
251[[noreturn]] void __cxa_pure_virtual()
252{
253 VERIFY_NOT_REACHED();
254}
255}