Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * Copyright (c) 2021, Idan Horowitz <idan.horowitz@serenityos.org>
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8#include <Kernel/Arch/SmapDisabler.h>
9#include <Kernel/InterruptDisabler.h>
10#include <Kernel/Process.h>
11
12namespace Kernel {
13
14ErrorOr<FlatPtr> Process::sys$sigprocmask(int how, Userspace<sigset_t const*> set, Userspace<sigset_t*> old_set)
15{
16 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
17 TRY(require_promise(Pledge::sigaction));
18 auto* current_thread = Thread::current();
19 u32 previous_signal_mask;
20 if (set) {
21 auto set_value = TRY(copy_typed_from_user(set));
22 switch (how) {
23 case SIG_BLOCK:
24 previous_signal_mask = current_thread->signal_mask_block(set_value, true);
25 break;
26 case SIG_UNBLOCK:
27 previous_signal_mask = current_thread->signal_mask_block(set_value, false);
28 break;
29 case SIG_SETMASK:
30 previous_signal_mask = current_thread->update_signal_mask(set_value);
31 break;
32 default:
33 return EINVAL;
34 }
35 } else {
36 previous_signal_mask = current_thread->signal_mask();
37 }
38 if (old_set) {
39 TRY(copy_to_user(old_set, &previous_signal_mask));
40 }
41 return 0;
42}
43
44ErrorOr<FlatPtr> Process::sys$sigpending(Userspace<sigset_t*> set)
45{
46 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
47 TRY(require_promise(Pledge::stdio));
48 auto pending_signals = Thread::current()->pending_signals();
49 TRY(copy_to_user(set, &pending_signals));
50 return 0;
51}
52
53ErrorOr<FlatPtr> Process::sys$sigaction(int signum, Userspace<sigaction const*> user_act, Userspace<sigaction*> user_old_act)
54{
55 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
56 TRY(require_promise(Pledge::sigaction));
57 if (signum < 1 || signum >= NSIG || signum == SIGKILL || signum == SIGSTOP)
58 return EINVAL;
59
60 InterruptDisabler disabler; // FIXME: This should use a narrower lock. Maybe a way to ignore signals temporarily?
61 auto& action = m_signal_action_data[signum];
62 if (user_old_act) {
63 sigaction old_act {};
64 old_act.sa_flags = action.flags;
65 old_act.sa_sigaction = reinterpret_cast<decltype(old_act.sa_sigaction)>(action.handler_or_sigaction.as_ptr());
66 old_act.sa_mask = action.mask;
67 TRY(copy_to_user(user_old_act, &old_act));
68 }
69 if (user_act) {
70 auto act = TRY(copy_typed_from_user(user_act));
71 action.mask = act.sa_mask;
72 action.flags = act.sa_flags;
73 action.handler_or_sigaction = VirtualAddress { reinterpret_cast<void*>(act.sa_sigaction) };
74 }
75 return 0;
76}
77
78ErrorOr<FlatPtr> Process::sys$sigreturn([[maybe_unused]] RegisterState& registers)
79{
80 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
81 TRY(require_promise(Pledge::stdio));
82 SmapDisabler disabler;
83
84 // Here, we restore the state pushed by dispatch signal and asm_signal_trampoline.
85 auto stack_ptr = registers.userspace_sp();
86
87 // Stack state (created by the signal trampoline):
88 // saved_ax, ucontext, signal_info, fpu_state?.
89
90#if ARCH(X86_64)
91 // The FPU state is at the top here, pop it off and restore it.
92 // FIXME: The stack alignment is off by 8 bytes here, figure this out and remove this excessively aligned object.
93 alignas(alignof(FPUState) * 2) FPUState data {};
94 TRY(copy_from_user(&data, bit_cast<FPUState const*>(stack_ptr)));
95 Thread::current()->fpu_state() = data;
96 stack_ptr += sizeof(FPUState);
97#endif
98
99 stack_ptr += sizeof(siginfo); // We don't need this here.
100
101 auto ucontext = TRY(copy_typed_from_user<__ucontext>(stack_ptr));
102 stack_ptr += sizeof(__ucontext);
103
104 auto saved_ax = TRY(copy_typed_from_user<FlatPtr>(stack_ptr));
105
106 Thread::current()->m_signal_mask = ucontext.uc_sigmask;
107 Thread::current()->m_currently_handled_signal = 0;
108#if ARCH(X86_64)
109 auto sp = registers.rsp;
110#endif
111
112 copy_ptrace_registers_into_kernel_registers(registers, static_cast<PtraceRegisters const&>(ucontext.uc_mcontext));
113
114#if ARCH(X86_64)
115 registers.set_userspace_sp(registers.rsp);
116 registers.rsp = sp;
117#endif
118
119 return saved_ax;
120}
121
122ErrorOr<void> Process::remap_range_as_stack(FlatPtr address, size_t size)
123{
124 // FIXME: This duplicates a lot of logic from sys$mprotect, this should be abstracted out somehow
125 auto range_to_remap = TRY(Memory::expand_range_to_page_boundaries(address, size));
126 if (!range_to_remap.size())
127 return EINVAL;
128
129 if (!is_user_range(range_to_remap))
130 return EFAULT;
131
132 return address_space().with([&](auto& space) -> ErrorOr<void> {
133 if (auto* whole_region = space->find_region_from_range(range_to_remap)) {
134 if (!whole_region->is_mmap())
135 return EPERM;
136 if (!whole_region->vmobject().is_anonymous() || whole_region->is_shared())
137 return EINVAL;
138 whole_region->unsafe_clear_access();
139 whole_region->set_readable(true);
140 whole_region->set_writable(true);
141 whole_region->set_stack(true);
142 whole_region->set_syscall_region(false);
143 whole_region->clear_to_zero();
144 whole_region->remap();
145
146 return {};
147 }
148
149 if (auto* old_region = space->find_region_containing(range_to_remap)) {
150 if (!old_region->is_mmap())
151 return EPERM;
152 if (!old_region->vmobject().is_anonymous() || old_region->is_shared())
153 return EINVAL;
154
155 // Remove the old region from our regions tree, since were going to add another region
156 // with the exact same start address.
157 auto region = space->take_region(*old_region);
158 region->unmap();
159
160 // This vector is the region(s) adjacent to our range.
161 // We need to allocate a new region for the range we wanted to change permission bits on.
162 auto adjacent_regions = TRY(space->try_split_region_around_range(*region, range_to_remap));
163
164 size_t new_range_offset_in_vmobject = region->offset_in_vmobject() + (range_to_remap.base().get() - region->range().base().get());
165 auto* new_region = TRY(space->try_allocate_split_region(*region, range_to_remap, new_range_offset_in_vmobject));
166 new_region->unsafe_clear_access();
167 new_region->set_readable(true);
168 new_region->set_writable(true);
169 new_region->set_stack(true);
170 new_region->set_syscall_region(false);
171 new_region->clear_to_zero();
172
173 // Map the new regions using our page directory (they were just allocated and don't have one).
174 for (auto* adjacent_region : adjacent_regions) {
175 TRY(adjacent_region->map(space->page_directory()));
176 }
177 TRY(new_region->map(space->page_directory()));
178
179 return {};
180 }
181
182 if (auto const& regions = TRY(space->find_regions_intersecting(range_to_remap)); regions.size()) {
183 size_t full_size_found = 0;
184 // Check that all intersecting regions are compatible.
185 for (auto const* region : regions) {
186 if (!region->is_mmap())
187 return EPERM;
188 if (!region->vmobject().is_anonymous() || region->is_shared())
189 return EINVAL;
190 full_size_found += region->range().intersect(range_to_remap).size();
191 }
192
193 if (full_size_found != range_to_remap.size())
194 return ENOMEM;
195
196 // Finally, iterate over each region, either updating its access flags if the range covers it wholly,
197 // or carving out a new subregion with the appropriate access flags set.
198 for (auto* old_region : regions) {
199 auto const intersection_to_remap = range_to_remap.intersect(old_region->range());
200 // If the region is completely covered by range, simply update the access flags
201 if (intersection_to_remap == old_region->range()) {
202 old_region->unsafe_clear_access();
203 old_region->set_readable(true);
204 old_region->set_writable(true);
205 old_region->set_stack(true);
206 old_region->set_syscall_region(false);
207 old_region->clear_to_zero();
208 old_region->remap();
209 continue;
210 }
211 // Remove the old region from our regions tree, since were going to add another region
212 // with the exact same start address.
213 auto region = space->take_region(*old_region);
214 region->unmap();
215
216 // This vector is the region(s) adjacent to our range.
217 // We need to allocate a new region for the range we wanted to change permission bits on.
218 auto adjacent_regions = TRY(space->try_split_region_around_range(*old_region, intersection_to_remap));
219
220 // Since the range is not contained in a single region, it can only partially cover its starting and ending region,
221 // therefore carving out a chunk from the region will always produce a single extra region, and not two.
222 VERIFY(adjacent_regions.size() == 1);
223
224 size_t new_range_offset_in_vmobject = old_region->offset_in_vmobject() + (intersection_to_remap.base().get() - old_region->range().base().get());
225 auto* new_region = TRY(space->try_allocate_split_region(*region, intersection_to_remap, new_range_offset_in_vmobject));
226
227 new_region->unsafe_clear_access();
228 new_region->set_readable(true);
229 new_region->set_writable(true);
230 new_region->set_stack(true);
231 new_region->set_syscall_region(false);
232 new_region->clear_to_zero();
233
234 // Map the new region using our page directory (they were just allocated and don't have one) if any.
235 TRY(adjacent_regions[0]->map(space->page_directory()));
236
237 TRY(new_region->map(space->page_directory()));
238 }
239
240 return {};
241 }
242
243 return EINVAL;
244 });
245}
246
247ErrorOr<FlatPtr> Process::sys$sigaltstack(Userspace<stack_t const*> user_ss, Userspace<stack_t*> user_old_ss)
248{
249 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
250 TRY(require_promise(Pledge::sigaction));
251
252 if (user_old_ss) {
253 stack_t old_ss_value {};
254 old_ss_value.ss_sp = (void*)Thread::current()->m_alternative_signal_stack;
255 old_ss_value.ss_size = Thread::current()->m_alternative_signal_stack_size;
256 old_ss_value.ss_flags = 0;
257 if (!Thread::current()->has_alternative_signal_stack())
258 old_ss_value.ss_flags = SS_DISABLE;
259 else if (Thread::current()->is_in_alternative_signal_stack())
260 old_ss_value.ss_flags = SS_ONSTACK;
261 TRY(copy_to_user(user_old_ss, &old_ss_value));
262 }
263
264 if (user_ss) {
265 auto ss = TRY(copy_typed_from_user(user_ss));
266
267 if (Thread::current()->is_in_alternative_signal_stack())
268 return EPERM;
269
270 if (ss.ss_flags == SS_DISABLE) {
271 Thread::current()->m_alternative_signal_stack_size = 0;
272 Thread::current()->m_alternative_signal_stack = 0;
273 } else if (ss.ss_flags == 0) {
274 if (ss.ss_size <= MINSIGSTKSZ)
275 return ENOMEM;
276 if (Checked<FlatPtr>::addition_would_overflow((FlatPtr)ss.ss_sp, ss.ss_size))
277 return ENOMEM;
278
279 // In order to preserve compatibility with our MAP_STACK, W^X and syscall region
280 // protections, sigaltstack ranges are carved out of their regions, zeroed, and
281 // turned into read/writable MAP_STACK-enabled regions.
282 // This is inspired by OpenBSD's solution: https://man.openbsd.org/sigaltstack.2
283 TRY(remap_range_as_stack((FlatPtr)ss.ss_sp, ss.ss_size));
284
285 Thread::current()->m_alternative_signal_stack = (FlatPtr)ss.ss_sp;
286 Thread::current()->m_alternative_signal_stack_size = ss.ss_size;
287 } else {
288 return EINVAL;
289 }
290 }
291
292 return 0;
293}
294
295// https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigtimedwait.html
296ErrorOr<FlatPtr> Process::sys$sigtimedwait(Userspace<sigset_t const*> set, Userspace<siginfo_t*> info, Userspace<timespec const*> timeout)
297{
298 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
299 TRY(require_promise(Pledge::sigaction));
300
301 sigset_t set_value;
302 TRY(copy_from_user(&set_value, set));
303
304 Thread::BlockTimeout block_timeout = {};
305 if (timeout) {
306 auto timeout_time = TRY(copy_time_from_user(timeout));
307 block_timeout = Thread::BlockTimeout(false, &timeout_time);
308 }
309
310 siginfo_t info_value = {};
311 auto block_result = Thread::current()->block<Thread::SignalBlocker>(block_timeout, set_value, info_value);
312 if (block_result.was_interrupted())
313 return EINTR;
314 // We check for an unset signal instead of directly checking for a timeout interruption
315 // in order to allow polling the pending signals by setting the timeout to 0.
316 if (info_value.si_signo == SIGINVAL) {
317 VERIFY(block_result == Thread::BlockResult::InterruptedByTimeout);
318 return EAGAIN;
319 }
320
321 if (info)
322 TRY(copy_to_user(info, &info_value));
323 return info_value.si_signo;
324}
325
326// https://pubs.opengroup.org/onlinepubs/9699919799/functions/sigsuspend.html
327ErrorOr<FlatPtr> Process::sys$sigsuspend(Userspace<sigset_t const*> mask)
328{
329 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
330
331 auto sigmask = TRY(copy_typed_from_user(mask));
332
333 auto* current_thread = Thread::current();
334
335 u32 previous_signal_mask = current_thread->update_signal_mask(sigmask);
336 ScopeGuard rollback_signal_mask([&]() {
337 current_thread->update_signal_mask(previous_signal_mask);
338 });
339
340 // TODO: Ensure that/check if we never return if the action is to terminate the process.
341 // TODO: Ensure that/check if we only return after an eventual signal-catching function returns.
342 Thread::BlockTimeout timeout = {};
343 siginfo_t siginfo = {};
344 if (current_thread->block<Thread::SignalBlocker>(timeout, ~sigmask, siginfo).was_interrupted())
345 return EINTR;
346
347 return 0;
348}
349
350}