Serenity Operating System
1/*
2 * Copyright (c) 2020, Itamar S. <itamar8910@gmail.com>
3 * Copyright (c) 2020-2021, Andreas Kling <kling@serenityos.org>
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8#include <AK/ScopeGuard.h>
9#include <Kernel/Memory/PrivateInodeVMObject.h>
10#include <Kernel/Memory/Region.h>
11#include <Kernel/Memory/ScopedAddressSpaceSwitcher.h>
12#include <Kernel/Memory/SharedInodeVMObject.h>
13#include <Kernel/Process.h>
14#include <Kernel/Scheduler.h>
15#include <Kernel/ThreadTracer.h>
16
17namespace Kernel {
18
19static ErrorOr<FlatPtr> handle_ptrace(Kernel::Syscall::SC_ptrace_params const& params, Process& caller)
20{
21 SpinlockLocker scheduler_lock(g_scheduler_lock);
22 if (params.request == PT_TRACE_ME) {
23 if (Process::current().tracer())
24 return EBUSY;
25
26 caller.set_wait_for_tracer_at_next_execve(true);
27 return 0;
28 }
29
30 // FIXME: PID/TID BUG
31 // This bug allows to request PT_ATTACH (or anything else) the same process, as
32 // long it is not the main thread. Alternatively, if this is desired, then the
33 // bug is that this prevents PT_ATTACH to the main thread from another thread.
34 if (params.tid == caller.pid().value())
35 return EINVAL;
36
37 auto peer = Thread::from_tid(params.tid);
38 if (!peer)
39 return ESRCH;
40
41 MutexLocker ptrace_locker(peer->process().ptrace_lock());
42
43 auto peer_credentials = peer->process().credentials();
44 auto caller_credentials = caller.credentials();
45 if ((peer_credentials->uid() != caller_credentials->euid())
46 || (peer_credentials->uid() != peer_credentials->euid())) // Disallow tracing setuid processes
47 return EACCES;
48
49 if (!peer->process().is_dumpable())
50 return EACCES;
51
52 auto& peer_process = peer->process();
53 if (params.request == PT_ATTACH) {
54 if (peer_process.tracer()) {
55 return EBUSY;
56 }
57 TRY(peer_process.start_tracing_from(caller.pid()));
58 SpinlockLocker lock(peer->get_lock());
59 if (peer->state() == Thread::State::Stopped) {
60 peer_process.tracer()->set_regs(peer->get_register_dump_from_stack());
61 } else {
62 peer->send_signal(SIGSTOP, &caller);
63 }
64 return 0;
65 }
66
67 auto* tracer = peer_process.tracer();
68
69 if (!tracer)
70 return EPERM;
71
72 if (tracer->tracer_pid() != caller.pid())
73 return EBUSY;
74
75 if (peer->state() == Thread::State::Running)
76 return EBUSY;
77
78 scheduler_lock.unlock();
79
80 switch (params.request) {
81 case PT_CONTINUE:
82 peer->send_signal(SIGCONT, &caller);
83 break;
84
85 case PT_DETACH:
86 peer_process.stop_tracing();
87 peer->send_signal(SIGCONT, &caller);
88 break;
89
90 case PT_SYSCALL:
91 tracer->set_trace_syscalls(true);
92 peer->send_signal(SIGCONT, &caller);
93 break;
94
95 case PT_GETREGS: {
96 if (!tracer->has_regs())
97 return EINVAL;
98 auto* regs = reinterpret_cast<PtraceRegisters*>(params.addr);
99 TRY(copy_to_user(regs, &tracer->regs()));
100 break;
101 }
102
103 case PT_SETREGS: {
104 if (!tracer->has_regs())
105 return EINVAL;
106
107 PtraceRegisters regs {};
108 TRY(copy_from_user(®s, (PtraceRegisters const*)params.addr));
109
110 auto& peer_saved_registers = peer->get_register_dump_from_stack();
111 // Verify that the saved registers are in usermode context
112 if (peer_saved_registers.previous_mode() != ExecutionMode::User)
113 return EFAULT;
114
115 tracer->set_regs(regs);
116 copy_ptrace_registers_into_kernel_registers(peer_saved_registers, regs);
117 break;
118 }
119
120 case PT_PEEK: {
121 auto data = TRY(peer->process().peek_user_data(Userspace<FlatPtr const*> { (FlatPtr)params.addr }));
122 TRY(copy_to_user((FlatPtr*)params.data, &data));
123 break;
124 }
125
126 case PT_POKE:
127 TRY(peer->process().poke_user_data(Userspace<FlatPtr*> { (FlatPtr)params.addr }, params.data));
128 return 0;
129
130 case PT_PEEKBUF: {
131 Kernel::Syscall::SC_ptrace_buf_params buf_params {};
132 TRY(copy_from_user(&buf_params, reinterpret_cast<Kernel::Syscall::SC_ptrace_buf_params*>(params.data)));
133 // This is a comparatively large allocation on the Kernel stack.
134 // However, we know that we're close to the root of the call stack, and the following calls shouldn't go too deep.
135 Array<u8, PAGE_SIZE> buf;
136 FlatPtr tracee_ptr = (FlatPtr)params.addr;
137 while (buf_params.buf.size > 0) {
138 size_t copy_this_iteration = min(buf.size(), buf_params.buf.size);
139 TRY(peer->process().peek_user_data(buf.span().slice(0, copy_this_iteration), Userspace<u8 const*> { tracee_ptr }));
140 TRY(copy_to_user((void*)buf_params.buf.data, buf.data(), copy_this_iteration));
141 tracee_ptr += copy_this_iteration;
142 buf_params.buf.data += copy_this_iteration;
143 buf_params.buf.size -= copy_this_iteration;
144 }
145 break;
146 }
147
148 case PT_PEEKDEBUG: {
149 auto data = TRY(peer->peek_debug_register(reinterpret_cast<uintptr_t>(params.addr)));
150 TRY(copy_to_user((FlatPtr*)params.data, &data));
151 break;
152 }
153 case PT_POKEDEBUG:
154 TRY(peer->poke_debug_register(reinterpret_cast<uintptr_t>(params.addr), params.data));
155 return 0;
156 default:
157 return EINVAL;
158 }
159
160 return 0;
161}
162
163ErrorOr<FlatPtr> Process::sys$ptrace(Userspace<Syscall::SC_ptrace_params const*> user_params)
164{
165 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
166 TRY(require_promise(Pledge::ptrace));
167 auto params = TRY(copy_typed_from_user(user_params));
168
169 return handle_ptrace(params, *this);
170}
171
172/**
173 * "Does this process have a thread that is currently being traced by the provided process?"
174 */
175bool Process::has_tracee_thread(ProcessID tracer_pid)
176{
177 if (auto const* tracer = this->tracer())
178 return tracer->tracer_pid() == tracer_pid;
179 return false;
180}
181
182ErrorOr<FlatPtr> Process::peek_user_data(Userspace<FlatPtr const*> address)
183{
184 // This function can be called from the context of another
185 // process that called PT_PEEK
186 ScopedAddressSpaceSwitcher switcher(*this);
187 return TRY(copy_typed_from_user(address));
188}
189
190ErrorOr<void> Process::peek_user_data(Span<u8> destination, Userspace<u8 const*> address)
191{
192 // This function can be called from the context of another
193 // process that called PT_PEEKBUF
194 ScopedAddressSpaceSwitcher switcher(*this);
195 TRY(copy_from_user(destination.data(), address, destination.size()));
196 return {};
197}
198
199ErrorOr<void> Process::poke_user_data(Userspace<FlatPtr*> address, FlatPtr data)
200{
201 Memory::VirtualRange range = { address.vaddr(), sizeof(FlatPtr) };
202
203 return address_space().with([&](auto& space) -> ErrorOr<void> {
204 auto* region = space->find_region_containing(range);
205 if (!region)
206 return EFAULT;
207 ScopedAddressSpaceSwitcher switcher(*this);
208 if (region->is_shared()) {
209 // If the region is shared, we change its vmobject to a PrivateInodeVMObject
210 // to prevent the write operation from changing any shared inode data
211 VERIFY(region->vmobject().is_shared_inode());
212 auto vmobject = TRY(Memory::PrivateInodeVMObject::try_create_with_inode(static_cast<Memory::SharedInodeVMObject&>(region->vmobject()).inode()));
213 region->set_vmobject(move(vmobject));
214 region->set_shared(false);
215 }
216 bool const was_writable = region->is_writable();
217 if (!was_writable) {
218 region->set_writable(true);
219 region->remap();
220 }
221 ScopeGuard rollback([&]() {
222 if (!was_writable) {
223 region->set_writable(false);
224 region->remap();
225 }
226 });
227
228 return copy_to_user(address, &data);
229 });
230}
231
232ErrorOr<FlatPtr> Thread::peek_debug_register(u32 register_index)
233{
234#if ARCH(X86_64)
235 FlatPtr data;
236 switch (register_index) {
237 case 0:
238 data = m_debug_register_state.dr0;
239 break;
240 case 1:
241 data = m_debug_register_state.dr1;
242 break;
243 case 2:
244 data = m_debug_register_state.dr2;
245 break;
246 case 3:
247 data = m_debug_register_state.dr3;
248 break;
249 case 6:
250 data = m_debug_register_state.dr6;
251 break;
252 case 7:
253 data = m_debug_register_state.dr7;
254 break;
255 default:
256 return EINVAL;
257 }
258 return data;
259#elif ARCH(AARCH64)
260 (void)register_index;
261 TODO_AARCH64();
262#else
263# error "Unknown architecture"
264#endif
265}
266
267ErrorOr<void> Thread::poke_debug_register(u32 register_index, FlatPtr data)
268{
269#if ARCH(X86_64)
270 switch (register_index) {
271 case 0:
272 m_debug_register_state.dr0 = data;
273 break;
274 case 1:
275 m_debug_register_state.dr1 = data;
276 break;
277 case 2:
278 m_debug_register_state.dr2 = data;
279 break;
280 case 3:
281 m_debug_register_state.dr3 = data;
282 break;
283 case 7:
284 m_debug_register_state.dr7 = data;
285 break;
286 default:
287 return EINVAL;
288 }
289 return {};
290#elif ARCH(AARCH64)
291 (void)register_index;
292 (void)data;
293 TODO_AARCH64();
294#else
295# error "Unknown architecture"
296#endif
297}
298
299}