Serenity Operating System
at master 1112 lines 38 kB view raw
1/* 2 * Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org> 3 * 4 * SPDX-License-Identifier: BSD-2-Clause 5 */ 6 7#include <AK/Singleton.h> 8#include <AK/StdLibExtras.h> 9#include <AK/Time.h> 10#include <AK/Types.h> 11#include <Kernel/API/Syscall.h> 12#include <Kernel/Coredump.h> 13#include <Kernel/Credentials.h> 14#include <Kernel/Debug.h> 15#include <Kernel/Devices/DeviceManagement.h> 16#include <Kernel/InterruptDisabler.h> 17#ifdef ENABLE_KERNEL_COVERAGE_COLLECTION 18# include <Kernel/Devices/KCOVDevice.h> 19#endif 20#include <Kernel/API/POSIX/errno.h> 21#include <Kernel/API/POSIX/sys/limits.h> 22#include <Kernel/Arch/PageDirectory.h> 23#include <Kernel/Devices/NullDevice.h> 24#include <Kernel/FileSystem/Custody.h> 25#include <Kernel/FileSystem/OpenFileDescription.h> 26#include <Kernel/FileSystem/VirtualFileSystem.h> 27#include <Kernel/KBufferBuilder.h> 28#include <Kernel/KSyms.h> 29#include <Kernel/Memory/AnonymousVMObject.h> 30#include <Kernel/Memory/SharedInodeVMObject.h> 31#include <Kernel/Panic.h> 32#include <Kernel/PerformanceEventBuffer.h> 33#include <Kernel/PerformanceManager.h> 34#include <Kernel/Process.h> 35#include <Kernel/Scheduler.h> 36#include <Kernel/Sections.h> 37#include <Kernel/StdLib.h> 38#include <Kernel/TTY/TTY.h> 39#include <Kernel/Thread.h> 40#include <Kernel/ThreadTracer.h> 41#include <Kernel/TimerQueue.h> 42 43namespace Kernel { 44 45static void create_signal_trampoline(); 46 47extern ProcessID g_init_pid; 48 49RecursiveSpinlock<LockRank::None> g_profiling_lock {}; 50static Atomic<pid_t> next_pid; 51static Singleton<SpinlockProtected<Process::AllProcessesList, LockRank::None>> s_all_instances; 52READONLY_AFTER_INIT Memory::Region* g_signal_trampoline_region; 53 54static Singleton<MutexProtected<OwnPtr<KString>>> s_hostname; 55 56MutexProtected<OwnPtr<KString>>& hostname() 57{ 58 return *s_hostname; 59} 60 61SpinlockProtected<Process::AllProcessesList, LockRank::None>& Process::all_instances() 62{ 63 return *s_all_instances; 64} 65 66ErrorOr<void> Process::for_each_in_same_jail(Function<ErrorOr<void>(Process&)> callback) 67{ 68 return Process::current().m_jail_process_list.with([&](auto const& list_ptr) -> ErrorOr<void> { 69 ErrorOr<void> result {}; 70 if (list_ptr) { 71 list_ptr->attached_processes().with([&](auto const& list) { 72 for (auto& process : list) { 73 result = callback(process); 74 if (result.is_error()) 75 break; 76 } 77 }); 78 return result; 79 } 80 all_instances().with([&](auto const& list) { 81 for (auto& process : list) { 82 result = callback(process); 83 if (result.is_error()) 84 break; 85 } 86 }); 87 return result; 88 }); 89} 90 91ErrorOr<void> Process::for_each_child_in_same_jail(Function<ErrorOr<void>(Process&)> callback) 92{ 93 ProcessID my_pid = pid(); 94 return m_jail_process_list.with([&](auto const& list_ptr) -> ErrorOr<void> { 95 ErrorOr<void> result {}; 96 if (list_ptr) { 97 list_ptr->attached_processes().with([&](auto const& list) { 98 for (auto& process : list) { 99 if (process.ppid() == my_pid || process.has_tracee_thread(pid())) 100 result = callback(process); 101 if (result.is_error()) 102 break; 103 } 104 }); 105 return result; 106 } 107 all_instances().with([&](auto const& list) { 108 for (auto& process : list) { 109 if (process.ppid() == my_pid || process.has_tracee_thread(pid())) 110 result = callback(process); 111 if (result.is_error()) 112 break; 113 } 114 }); 115 return result; 116 }); 117} 118 119ErrorOr<void> Process::for_each_in_pgrp_in_same_jail(ProcessGroupID pgid, Function<ErrorOr<void>(Process&)> callback) 120{ 121 return m_jail_process_list.with([&](auto const& list_ptr) -> ErrorOr<void> { 122 ErrorOr<void> result {}; 123 if (list_ptr) { 124 list_ptr->attached_processes().with([&](auto const& list) { 125 for (auto& process : list) { 126 if (!process.is_dead() && process.pgid() == pgid) 127 result = callback(process); 128 if (result.is_error()) 129 break; 130 } 131 }); 132 return result; 133 } 134 all_instances().with([&](auto const& list) { 135 for (auto& process : list) { 136 if (!process.is_dead() && process.pgid() == pgid) 137 result = callback(process); 138 if (result.is_error()) 139 break; 140 } 141 }); 142 return result; 143 }); 144} 145 146ProcessID Process::allocate_pid() 147{ 148 // Overflow is UB, and negative PIDs wreck havoc. 149 // TODO: Handle PID overflow 150 // For example: Use an Atomic<u32>, mask the most significant bit, 151 // retry if PID is already taken as a PID, taken as a TID, 152 // takes as a PGID, taken as a SID, or zero. 153 return next_pid.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel); 154} 155 156UNMAP_AFTER_INIT void Process::initialize() 157{ 158 next_pid.store(0, AK::MemoryOrder::memory_order_release); 159 160 // Note: This is called before scheduling is initialized, and before APs are booted. 161 // So we can "safely" bypass the lock here. 162 reinterpret_cast<OwnPtr<KString>&>(hostname()) = KString::must_create("courage"sv); 163 164 create_signal_trampoline(); 165} 166 167void Process::kill_threads_except_self() 168{ 169 InterruptDisabler disabler; 170 171 if (thread_count() <= 1) 172 return; 173 174 auto* current_thread = Thread::current(); 175 for_each_thread([&](Thread& thread) { 176 if (&thread == current_thread) 177 return; 178 179 if (auto state = thread.state(); state == Thread::State::Dead 180 || state == Thread::State::Dying) 181 return; 182 183 // We need to detach this thread in case it hasn't been joined 184 thread.detach(); 185 thread.set_should_die(); 186 }); 187 188 u32 dropped_lock_count = 0; 189 if (big_lock().force_unlock_exclusive_if_locked(dropped_lock_count) != LockMode::Unlocked) 190 dbgln("Process {} big lock had {} locks", *this, dropped_lock_count); 191} 192 193void Process::kill_all_threads() 194{ 195 for_each_thread([&](Thread& thread) { 196 // We need to detach this thread in case it hasn't been joined 197 thread.detach(); 198 thread.set_should_die(); 199 }); 200} 201 202void Process::register_new(Process& process) 203{ 204 // Note: this is essentially the same like process->ref() 205 LockRefPtr<Process> new_process = process; 206 all_instances().with([&](auto& list) { 207 list.prepend(process); 208 }); 209} 210 211ErrorOr<NonnullLockRefPtr<Process>> Process::try_create_user_process(LockRefPtr<Thread>& first_thread, StringView path, UserID uid, GroupID gid, Vector<NonnullOwnPtr<KString>> arguments, Vector<NonnullOwnPtr<KString>> environment, TTY* tty) 212{ 213 auto parts = path.split_view('/'); 214 if (arguments.is_empty()) { 215 auto last_part = TRY(KString::try_create(parts.last())); 216 TRY(arguments.try_append(move(last_part))); 217 } 218 219 auto path_string = TRY(KString::try_create(path)); 220 auto name = TRY(KString::try_create(parts.last())); 221 auto process = TRY(Process::try_create(first_thread, move(name), uid, gid, ProcessID(0), false, VirtualFileSystem::the().root_custody(), nullptr, tty)); 222 223 TRY(process->m_fds.with_exclusive([&](auto& fds) -> ErrorOr<void> { 224 TRY(fds.try_resize(Process::OpenFileDescriptions::max_open())); 225 226 auto& device_to_use_as_tty = tty ? (CharacterDevice&)*tty : DeviceManagement::the().null_device(); 227 auto description = TRY(device_to_use_as_tty.open(O_RDWR)); 228 auto setup_description = [&](int fd) { 229 fds.m_fds_metadatas[fd].allocate(); 230 fds[fd].set(*description); 231 }; 232 setup_description(0); 233 setup_description(1); 234 setup_description(2); 235 236 return {}; 237 })); 238 239 Thread* new_main_thread = nullptr; 240 InterruptsState previous_interrupts_state = InterruptsState::Enabled; 241 if (auto result = process->exec(move(path_string), move(arguments), move(environment), new_main_thread, previous_interrupts_state); result.is_error()) { 242 dbgln("Failed to exec {}: {}", path, result.error()); 243 first_thread = nullptr; 244 return result.release_error(); 245 } 246 247 register_new(*process); 248 249 // NOTE: All user processes have a leaked ref on them. It's balanced by Thread::WaitBlockerSet::finalize(). 250 process->ref(); 251 252 { 253 SpinlockLocker lock(g_scheduler_lock); 254 new_main_thread->set_state(Thread::State::Runnable); 255 } 256 257 return process; 258} 259 260LockRefPtr<Process> Process::create_kernel_process(LockRefPtr<Thread>& first_thread, NonnullOwnPtr<KString> name, void (*entry)(void*), void* entry_data, u32 affinity, RegisterProcess do_register) 261{ 262 auto process_or_error = Process::try_create(first_thread, move(name), UserID(0), GroupID(0), ProcessID(0), true); 263 if (process_or_error.is_error()) 264 return {}; 265 auto process = process_or_error.release_value(); 266 267 first_thread->regs().set_entry_function((FlatPtr)entry, (FlatPtr)entry_data); 268 269 if (do_register == RegisterProcess::Yes) 270 register_new(*process); 271 272 SpinlockLocker lock(g_scheduler_lock); 273 first_thread->set_affinity(affinity); 274 first_thread->set_state(Thread::State::Runnable); 275 return process; 276} 277 278void Process::protect_data() 279{ 280 m_protected_data_refs.unref([&]() { 281 MM.set_page_writable_direct(VirtualAddress { &this->m_protected_values_do_not_access_directly }, false); 282 }); 283} 284 285void Process::unprotect_data() 286{ 287 m_protected_data_refs.ref([&]() { 288 MM.set_page_writable_direct(VirtualAddress { &this->m_protected_values_do_not_access_directly }, true); 289 }); 290} 291 292ErrorOr<NonnullLockRefPtr<Process>> Process::try_create(LockRefPtr<Thread>& first_thread, NonnullOwnPtr<KString> name, UserID uid, GroupID gid, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> current_directory, RefPtr<Custody> executable, TTY* tty, Process* fork_parent) 293{ 294 OwnPtr<Memory::AddressSpace> new_address_space; 295 if (fork_parent) { 296 TRY(fork_parent->address_space().with([&](auto& parent_address_space) -> ErrorOr<void> { 297 new_address_space = TRY(Memory::AddressSpace::try_create(parent_address_space.ptr())); 298 return {}; 299 })); 300 } else { 301 new_address_space = TRY(Memory::AddressSpace::try_create(nullptr)); 302 } 303 auto unveil_tree = UnveilNode { TRY(KString::try_create("/"sv)), UnveilMetadata(TRY(KString::try_create("/"sv))) }; 304 auto exec_unveil_tree = UnveilNode { TRY(KString::try_create("/"sv)), UnveilMetadata(TRY(KString::try_create("/"sv))) }; 305 auto credentials = TRY(Credentials::create(uid, gid, uid, gid, uid, gid, {}, fork_parent ? fork_parent->sid() : 0, fork_parent ? fork_parent->pgid() : 0)); 306 auto process = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) Process(move(name), move(credentials), ppid, is_kernel_process, move(current_directory), move(executable), tty, move(unveil_tree), move(exec_unveil_tree)))); 307 TRY(process->attach_resources(new_address_space.release_nonnull(), first_thread, fork_parent)); 308 return process; 309} 310 311Process::Process(NonnullOwnPtr<KString> name, NonnullRefPtr<Credentials> credentials, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> current_directory, RefPtr<Custody> executable, TTY* tty, UnveilNode unveil_tree, UnveilNode exec_unveil_tree) 312 : m_name(move(name)) 313 , m_is_kernel_process(is_kernel_process) 314 , m_executable(move(executable)) 315 , m_current_directory(move(current_directory)) 316 , m_tty(tty) 317 , m_unveil_data(move(unveil_tree)) 318 , m_exec_unveil_data(move(exec_unveil_tree)) 319 , m_wait_blocker_set(*this) 320{ 321 // Ensure that we protect the process data when exiting the constructor. 322 with_mutable_protected_data([&](auto& protected_data) { 323 protected_data.pid = allocate_pid(); 324 protected_data.ppid = ppid; 325 protected_data.credentials = move(credentials); 326 }); 327 328 if constexpr (PROCESS_DEBUG) { 329 this->name().with([&](auto& process_name) { 330 dbgln("Created new process {}({})", process_name->view(), this->pid().value()); 331 }); 332 } 333} 334 335ErrorOr<void> Process::attach_resources(NonnullOwnPtr<Memory::AddressSpace>&& preallocated_space, LockRefPtr<Thread>& first_thread, Process* fork_parent) 336{ 337 m_space.with([&](auto& space) { 338 space = move(preallocated_space); 339 }); 340 341 auto create_first_thread = [&] { 342 if (fork_parent) { 343 // NOTE: fork() doesn't clone all threads; the thread that called fork() becomes the only thread in the new process. 344 return Thread::current()->try_clone(*this); 345 } 346 // NOTE: This non-forked code path is only taken when the kernel creates a process "manually" (at boot.) 347 return Thread::try_create(*this); 348 }; 349 350 first_thread = TRY(create_first_thread()); 351 352 if (!fork_parent) { 353 // FIXME: Figure out if this is really necessary. 354 first_thread->detach(); 355 } 356 357 // This is not actually explicitly verified by any official documentation, 358 // but it's not listed anywhere as being cleared, and rsync expects it to work like this. 359 if (fork_parent) 360 m_signal_action_data = fork_parent->m_signal_action_data; 361 362 return {}; 363} 364 365Process::~Process() 366{ 367 unprotect_data(); 368 369 VERIFY(thread_count() == 0); // all threads should have been finalized 370 VERIFY(!m_alarm_timer); 371 372 PerformanceManager::add_process_exit_event(*this); 373} 374 375// Make sure the compiler doesn't "optimize away" this function: 376extern void signal_trampoline_dummy() __attribute__((used)); 377void signal_trampoline_dummy() 378{ 379#if ARCH(X86_64) 380 // The trampoline preserves the current rax, pushes the signal code and 381 // then calls the signal handler. We do this because, when interrupting a 382 // blocking syscall, that syscall may return some special error code in eax; 383 // This error code would likely be overwritten by the signal handler, so it's 384 // necessary to preserve it here. 385 constexpr static auto offset_to_first_register_slot = sizeof(__ucontext) + sizeof(siginfo) + sizeof(FPUState) + 3 * sizeof(FlatPtr); 386 asm( 387 ".intel_syntax noprefix\n" 388 ".globl asm_signal_trampoline\n" 389 "asm_signal_trampoline:\n" 390 // stack state: 0, ucontext, signal_info (alignment = 16), fpu_state (alignment = 16), ucontext*, siginfo*, signal, handler 391 392 // Pop the handler into rcx 393 "pop rcx\n" // save handler 394 // we have to save rax 'cause it might be the return value from a syscall 395 "mov [rsp+%P1], rax\n" 396 // pop signal number into rdi (first param) 397 "pop rdi\n" 398 // pop siginfo* into rsi (second param) 399 "pop rsi\n" 400 // pop ucontext* into rdx (third param) 401 "pop rdx\n" 402 // Note that the stack is currently aligned to 16 bytes as we popped the extra entries above. 403 // call the signal handler 404 "call rcx\n" 405 // Current stack state is just saved_rax, ucontext, signal_info, fpu_state. 406 // syscall SC_sigreturn 407 "mov rax, %P0\n" 408 "syscall\n" 409 ".globl asm_signal_trampoline_end\n" 410 "asm_signal_trampoline_end:\n" 411 ".att_syntax" 412 : 413 : "i"(Syscall::SC_sigreturn), 414 "i"(offset_to_first_register_slot)); 415#elif ARCH(AARCH64) 416 asm( 417 ".global asm_signal_trampoline\n" 418 "asm_signal_trampoline:\n" 419 // TODO: Implement this when we support userspace for aarch64 420 "wfi\n" 421 "\n" 422 ".global asm_signal_trampoline_end\n" 423 "asm_signal_trampoline_end: \n"); 424#else 425# error Unknown architecture 426#endif 427} 428 429extern "C" char const asm_signal_trampoline[]; 430extern "C" char const asm_signal_trampoline_end[]; 431 432void create_signal_trampoline() 433{ 434 // NOTE: We leak this region. 435 g_signal_trampoline_region = MM.allocate_kernel_region(PAGE_SIZE, "Signal trampolines"sv, Memory::Region::Access::ReadWrite).release_value().leak_ptr(); 436 g_signal_trampoline_region->set_syscall_region(true); 437 438 size_t trampoline_size = asm_signal_trampoline_end - asm_signal_trampoline; 439 440 u8* code_ptr = (u8*)g_signal_trampoline_region->vaddr().as_ptr(); 441 memcpy(code_ptr, asm_signal_trampoline, trampoline_size); 442 443 g_signal_trampoline_region->set_writable(false); 444 g_signal_trampoline_region->remap(); 445} 446 447void Process::crash(int signal, Optional<RegisterState const&> regs, bool out_of_memory) 448{ 449 VERIFY(!is_dead()); 450 VERIFY(&Process::current() == this); 451 452 auto ip = regs.has_value() ? regs->ip() : 0; 453 454 if (out_of_memory) { 455 dbgln("\033[31;1mOut of memory\033[m, killing: {}", *this); 456 } else { 457 if (ip >= kernel_load_base && g_kernel_symbols_available) { 458 auto const* symbol = symbolicate_kernel_address(ip); 459 dbgln("\033[31;1m{:p} {} +{}\033[0m\n", ip, (symbol ? symbol->name : "(k?)"), (symbol ? ip - symbol->address : 0)); 460 } else { 461 dbgln("\033[31;1m{:p} (?)\033[0m\n", ip); 462 } 463#if ARCH(X86_64) 464 constexpr bool userspace_backtrace = false; 465#elif ARCH(AARCH64) 466 constexpr bool userspace_backtrace = true; 467#else 468# error "Unknown architecture" 469#endif 470 if constexpr (userspace_backtrace) { 471 dbgln("Userspace backtrace:"); 472 auto bp = regs.has_value() ? regs->bp() : 0; 473 dump_backtrace_from_base_pointer(bp); 474 } 475 476 dbgln("Kernel backtrace:"); 477 dump_backtrace(); 478 } 479 with_mutable_protected_data([&](auto& protected_data) { 480 protected_data.termination_signal = signal; 481 }); 482 set_should_generate_coredump(!out_of_memory); 483 if constexpr (DUMP_REGIONS_ON_CRASH) { 484 address_space().with([](auto& space) { space->dump_regions(); }); 485 } 486 VERIFY(is_user_process()); 487 die(); 488 // We can not return from here, as there is nowhere 489 // to unwind to, so die right away. 490 Thread::current()->die_if_needed(); 491 VERIFY_NOT_REACHED(); 492} 493 494LockRefPtr<Process> Process::from_pid_in_same_jail(ProcessID pid) 495{ 496 return Process::current().m_jail_process_list.with([&](auto const& list_ptr) -> LockRefPtr<Process> { 497 if (list_ptr) { 498 return list_ptr->attached_processes().with([&](auto const& list) -> LockRefPtr<Process> { 499 for (auto& process : list) { 500 if (process.pid() == pid) { 501 return process; 502 } 503 } 504 return {}; 505 }); 506 } 507 return all_instances().with([&](auto const& list) -> LockRefPtr<Process> { 508 for (auto& process : list) { 509 if (process.pid() == pid) { 510 return process; 511 } 512 } 513 return {}; 514 }); 515 }); 516} 517 518LockRefPtr<Process> Process::from_pid_ignoring_jails(ProcessID pid) 519{ 520 return all_instances().with([&](auto const& list) -> LockRefPtr<Process> { 521 for (auto const& process : list) { 522 if (process.pid() == pid) 523 return &process; 524 } 525 return {}; 526 }); 527} 528 529Process::OpenFileDescriptionAndFlags const* Process::OpenFileDescriptions::get_if_valid(size_t i) const 530{ 531 if (m_fds_metadatas.size() <= i) 532 return nullptr; 533 534 if (auto const& metadata = m_fds_metadatas[i]; metadata.is_valid()) 535 return &metadata; 536 537 return nullptr; 538} 539Process::OpenFileDescriptionAndFlags* Process::OpenFileDescriptions::get_if_valid(size_t i) 540{ 541 if (m_fds_metadatas.size() <= i) 542 return nullptr; 543 544 if (auto& metadata = m_fds_metadatas[i]; metadata.is_valid()) 545 return &metadata; 546 547 return nullptr; 548} 549 550Process::OpenFileDescriptionAndFlags const& Process::OpenFileDescriptions::at(size_t i) const 551{ 552 VERIFY(m_fds_metadatas[i].is_allocated()); 553 return m_fds_metadatas[i]; 554} 555 556Process::OpenFileDescriptionAndFlags& Process::OpenFileDescriptions::at(size_t i) 557{ 558 VERIFY(m_fds_metadatas[i].is_allocated()); 559 return m_fds_metadatas[i]; 560} 561 562ErrorOr<NonnullRefPtr<OpenFileDescription>> Process::OpenFileDescriptions::open_file_description(int fd) const 563{ 564 if (fd < 0) 565 return EBADF; 566 if (static_cast<size_t>(fd) >= m_fds_metadatas.size()) 567 return EBADF; 568 RefPtr description = m_fds_metadatas[fd].description(); 569 if (!description) 570 return EBADF; 571 return description.release_nonnull(); 572} 573 574void Process::OpenFileDescriptions::enumerate(Function<void(OpenFileDescriptionAndFlags const&)> callback) const 575{ 576 for (auto const& file_description_metadata : m_fds_metadatas) { 577 callback(file_description_metadata); 578 } 579} 580 581ErrorOr<void> Process::OpenFileDescriptions::try_enumerate(Function<ErrorOr<void>(OpenFileDescriptionAndFlags const&)> callback) const 582{ 583 for (auto const& file_description_metadata : m_fds_metadatas) { 584 TRY(callback(file_description_metadata)); 585 } 586 return {}; 587} 588 589void Process::OpenFileDescriptions::change_each(Function<void(OpenFileDescriptionAndFlags&)> callback) 590{ 591 for (auto& file_description_metadata : m_fds_metadatas) { 592 callback(file_description_metadata); 593 } 594} 595 596size_t Process::OpenFileDescriptions::open_count() const 597{ 598 size_t count = 0; 599 enumerate([&](auto& file_description_metadata) { 600 if (file_description_metadata.is_valid()) 601 ++count; 602 }); 603 return count; 604} 605 606ErrorOr<Process::ScopedDescriptionAllocation> Process::OpenFileDescriptions::allocate(int first_candidate_fd) 607{ 608 for (size_t i = first_candidate_fd; i < max_open(); ++i) { 609 if (!m_fds_metadatas[i].is_allocated()) { 610 m_fds_metadatas[i].allocate(); 611 return Process::ScopedDescriptionAllocation { static_cast<int>(i), &m_fds_metadatas[i] }; 612 } 613 } 614 return EMFILE; 615} 616 617Time kgettimeofday() 618{ 619 return TimeManagement::now(); 620} 621 622siginfo_t Process::wait_info() const 623{ 624 auto credentials = this->credentials(); 625 siginfo_t siginfo {}; 626 siginfo.si_signo = SIGCHLD; 627 siginfo.si_pid = pid().value(); 628 siginfo.si_uid = credentials->uid().value(); 629 630 with_protected_data([&](auto& protected_data) { 631 if (protected_data.termination_signal != 0) { 632 siginfo.si_status = protected_data.termination_signal; 633 siginfo.si_code = CLD_KILLED; 634 } else { 635 siginfo.si_status = protected_data.termination_status; 636 siginfo.si_code = CLD_EXITED; 637 } 638 }); 639 return siginfo; 640} 641 642NonnullRefPtr<Custody> Process::current_directory() 643{ 644 return m_current_directory.with([&](auto& current_directory) -> NonnullRefPtr<Custody> { 645 if (!current_directory) 646 current_directory = VirtualFileSystem::the().root_custody(); 647 return *current_directory; 648 }); 649} 650 651ErrorOr<NonnullOwnPtr<KString>> Process::get_syscall_path_argument(Userspace<char const*> user_path, size_t path_length) 652{ 653 if (path_length == 0) 654 return EINVAL; 655 if (path_length > PATH_MAX) 656 return ENAMETOOLONG; 657 return try_copy_kstring_from_user(user_path, path_length); 658} 659 660ErrorOr<NonnullOwnPtr<KString>> Process::get_syscall_path_argument(Syscall::StringArgument const& path) 661{ 662 Userspace<char const*> path_characters((FlatPtr)path.characters); 663 return get_syscall_path_argument(path_characters, path.length); 664} 665 666ErrorOr<void> Process::dump_core() 667{ 668 VERIFY(is_dumpable()); 669 VERIFY(should_generate_coredump()); 670 dbgln("Generating coredump for pid: {}", pid().value()); 671 auto coredump_directory_path = TRY(Coredump::directory_path().with([&](auto& coredump_directory_path) -> ErrorOr<NonnullOwnPtr<KString>> { 672 if (coredump_directory_path) 673 return KString::try_create(coredump_directory_path->view()); 674 return KString::try_create(""sv); 675 })); 676 if (coredump_directory_path->view() == ""sv) { 677 dbgln("Generating coredump for pid {} failed because coredump directory was not set.", pid().value()); 678 return {}; 679 } 680 auto coredump_path = TRY(name().with([&](auto& process_name) { 681 return KString::formatted("{}/{}_{}_{}", coredump_directory_path->view(), process_name->view(), pid().value(), kgettimeofday().to_truncated_seconds()); 682 })); 683 auto coredump = TRY(Coredump::try_create(*this, coredump_path->view())); 684 return coredump->write(); 685} 686 687ErrorOr<void> Process::dump_perfcore() 688{ 689 VERIFY(is_dumpable()); 690 VERIFY(m_perf_event_buffer); 691 dbgln("Generating perfcore for pid: {}", pid().value()); 692 693 // Try to generate a filename which isn't already used. 694 auto base_filename = TRY(name().with([&](auto& process_name) { 695 return KString::formatted("{}_{}", process_name->view(), pid().value()); 696 })); 697 auto perfcore_filename = TRY(KString::formatted("{}.profile", base_filename)); 698 RefPtr<OpenFileDescription> description; 699 auto credentials = this->credentials(); 700 for (size_t attempt = 1; attempt <= 10; ++attempt) { 701 auto description_or_error = VirtualFileSystem::the().open(*this, credentials, perfcore_filename->view(), O_CREAT | O_EXCL, 0400, current_directory(), UidAndGid { 0, 0 }); 702 if (!description_or_error.is_error()) { 703 description = description_or_error.release_value(); 704 break; 705 } 706 perfcore_filename = TRY(KString::formatted("{}.{}.profile", base_filename, attempt)); 707 } 708 if (!description) { 709 dbgln("Failed to generate perfcore for pid {}: Could not generate filename for the perfcore file.", pid().value()); 710 return EEXIST; 711 } 712 713 auto builder = TRY(KBufferBuilder::try_create()); 714 TRY(m_perf_event_buffer->to_json(builder)); 715 716 auto json = builder.build(); 717 if (!json) { 718 dbgln("Failed to generate perfcore for pid {}: Could not allocate buffer.", pid().value()); 719 return ENOMEM; 720 } 721 auto json_buffer = UserOrKernelBuffer::for_kernel_buffer(json->data()); 722 TRY(description->write(json_buffer, json->size())); 723 724 dbgln("Wrote perfcore for pid {} to {}", pid().value(), perfcore_filename); 725 return {}; 726} 727 728void Process::finalize() 729{ 730 VERIFY(Thread::current() == g_finalizer); 731 732 dbgln_if(PROCESS_DEBUG, "Finalizing process {}", *this); 733 734 if (veil_state() == VeilState::Dropped) { 735 name().with([&](auto& process_name) { 736 dbgln("\x1b[01;31mProcess '{}' exited with the veil left open\x1b[0m", process_name->view()); 737 }); 738 } 739 740 if (g_init_pid != 0 && pid() == g_init_pid) 741 PANIC("Init process quit unexpectedly. Exit code: {}", termination_status()); 742 743 if (is_dumpable()) { 744 if (m_should_generate_coredump) { 745 auto result = dump_core(); 746 if (result.is_error()) { 747 dmesgln("Failed to write coredump for pid {}: {}", pid(), result.error()); 748 } 749 } 750 if (m_perf_event_buffer) { 751 auto result = dump_perfcore(); 752 if (result.is_error()) 753 dmesgln("Failed to write perfcore for pid {}: {}", pid(), result.error()); 754 TimeManagement::the().disable_profile_timer(); 755 } 756 } 757 758 m_threads_for_coredump.clear(); 759 760 if (m_alarm_timer) 761 TimerQueue::the().cancel_timer(m_alarm_timer.release_nonnull()); 762 m_fds.with_exclusive([](auto& fds) { fds.clear(); }); 763 m_tty = nullptr; 764 m_executable.with([](auto& executable) { executable = nullptr; }); 765 m_jail_process_list.with([this](auto& list_ptr) { 766 if (list_ptr) { 767 list_ptr->attached_processes().with([&](auto& list) { 768 list.remove(*this); 769 }); 770 } 771 }); 772 m_attached_jail.with([](auto& jail) { 773 if (jail) 774 jail->detach({}); 775 jail = nullptr; 776 }); 777 m_arguments.clear(); 778 m_environment.clear(); 779 780 m_state.store(State::Dead, AK::MemoryOrder::memory_order_release); 781 782 { 783 if (auto parent_process = Process::from_pid_ignoring_jails(ppid())) { 784 if (parent_process->is_user_process() && (parent_process->m_signal_action_data[SIGCHLD].flags & SA_NOCLDWAIT) != SA_NOCLDWAIT) 785 (void)parent_process->send_signal(SIGCHLD, this); 786 } 787 } 788 789 if (!!ppid()) { 790 if (auto parent = Process::from_pid_ignoring_jails(ppid())) { 791 parent->m_ticks_in_user_for_dead_children += m_ticks_in_user + m_ticks_in_user_for_dead_children; 792 parent->m_ticks_in_kernel_for_dead_children += m_ticks_in_kernel + m_ticks_in_kernel_for_dead_children; 793 } 794 } 795 796 unblock_waiters(Thread::WaitBlocker::UnblockFlags::Terminated); 797 798 m_space.with([](auto& space) { space->remove_all_regions({}); }); 799 800 VERIFY(ref_count() > 0); 801 // WaitBlockerSet::finalize will be in charge of dropping the last 802 // reference if there are still waiters around, or whenever the last 803 // waitable states are consumed. Unless there is no parent around 804 // anymore, in which case we'll just drop it right away. 805 m_wait_blocker_set.finalize(); 806} 807 808void Process::disowned_by_waiter(Process& process) 809{ 810 m_wait_blocker_set.disowned_by_waiter(process); 811} 812 813void Process::unblock_waiters(Thread::WaitBlocker::UnblockFlags flags, u8 signal) 814{ 815 LockRefPtr<Process> waiter_process; 816 if (auto* my_tracer = tracer()) 817 waiter_process = Process::from_pid_ignoring_jails(my_tracer->tracer_pid()); 818 else 819 waiter_process = Process::from_pid_ignoring_jails(ppid()); 820 821 if (waiter_process) 822 waiter_process->m_wait_blocker_set.unblock(*this, flags, signal); 823} 824 825void Process::die() 826{ 827 auto expected = State::Running; 828 if (!m_state.compare_exchange_strong(expected, State::Dying, AK::memory_order_acquire)) { 829 // It's possible that another thread calls this at almost the same time 830 // as we can't always instantly kill other threads (they may be blocked) 831 // So if we already were called then other threads should stop running 832 // momentarily and we only really need to service the first thread 833 return; 834 } 835 836 // Let go of the TTY, otherwise a slave PTY may keep the master PTY from 837 // getting an EOF when the last process using the slave PTY dies. 838 // If the master PTY owner relies on an EOF to know when to wait() on a 839 // slave owner, we have to allow the PTY pair to be torn down. 840 m_tty = nullptr; 841 842 VERIFY(m_threads_for_coredump.is_empty()); 843 for_each_thread([&](auto& thread) { 844 auto result = m_threads_for_coredump.try_append(thread); 845 if (result.is_error()) 846 dbgln("Failed to add thread {} to coredump due to OOM", thread.tid()); 847 }); 848 849 all_instances().with([&](auto const& list) { 850 for (auto it = list.begin(); it != list.end();) { 851 auto& process = *it; 852 ++it; 853 if (process.has_tracee_thread(pid())) { 854 if constexpr (PROCESS_DEBUG) { 855 process.name().with([&](auto& process_name) { 856 name().with([&](auto& name) { 857 dbgln("Process {} ({}) is attached by {} ({}) which will exit", process_name->view(), process.pid(), name->view(), pid()); 858 }); 859 }); 860 } 861 process.stop_tracing(); 862 auto err = process.send_signal(SIGSTOP, this); 863 if (err.is_error()) { 864 process.name().with([&](auto& process_name) { 865 dbgln("Failed to send the SIGSTOP signal to {} ({})", process_name->view(), process.pid()); 866 }); 867 } 868 } 869 } 870 }); 871 872 kill_all_threads(); 873#ifdef ENABLE_KERNEL_COVERAGE_COLLECTION 874 KCOVDevice::free_process(); 875#endif 876} 877 878void Process::terminate_due_to_signal(u8 signal) 879{ 880 VERIFY_INTERRUPTS_DISABLED(); 881 VERIFY(signal < NSIG); 882 VERIFY(&Process::current() == this); 883 dbgln("Terminating {} due to signal {}", *this, signal); 884 with_mutable_protected_data([&](auto& protected_data) { 885 protected_data.termination_status = 0; 886 protected_data.termination_signal = signal; 887 }); 888 die(); 889} 890 891ErrorOr<void> Process::send_signal(u8 signal, Process* sender) 892{ 893 VERIFY(is_user_process()); 894 // Try to send it to the "obvious" main thread: 895 auto receiver_thread = Thread::from_tid(pid().value()); 896 // If the main thread has died, there may still be other threads: 897 if (!receiver_thread) { 898 // The first one should be good enough. 899 // Neither kill(2) nor kill(3) specify any selection procedure. 900 for_each_thread([&receiver_thread](Thread& thread) -> IterationDecision { 901 receiver_thread = &thread; 902 return IterationDecision::Break; 903 }); 904 } 905 if (receiver_thread) { 906 receiver_thread->send_signal(signal, sender); 907 return {}; 908 } 909 return ESRCH; 910} 911 912LockRefPtr<Thread> Process::create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, NonnullOwnPtr<KString> name, u32 affinity, bool joinable) 913{ 914 VERIFY((priority >= THREAD_PRIORITY_MIN) && (priority <= THREAD_PRIORITY_MAX)); 915 916 // FIXME: Do something with guard pages? 917 918 auto thread_or_error = Thread::try_create(*this); 919 if (thread_or_error.is_error()) 920 return {}; 921 922 auto thread = thread_or_error.release_value(); 923 thread->set_name(move(name)); 924 thread->set_affinity(affinity); 925 thread->set_priority(priority); 926 if (!joinable) 927 thread->detach(); 928 929 auto& regs = thread->regs(); 930 regs.set_ip((FlatPtr)entry); 931 regs.set_sp((FlatPtr)entry_data); // entry function argument is expected to be in the SP register 932 933 SpinlockLocker lock(g_scheduler_lock); 934 thread->set_state(Thread::State::Runnable); 935 return thread; 936} 937 938void Process::OpenFileDescriptionAndFlags::clear() 939{ 940 m_description = nullptr; 941 m_flags = 0; 942} 943 944void Process::OpenFileDescriptionAndFlags::set(NonnullRefPtr<OpenFileDescription> description, u32 flags) 945{ 946 m_description = move(description); 947 m_flags = flags; 948} 949 950void Process::set_tty(TTY* tty) 951{ 952 m_tty = tty; 953} 954 955ErrorOr<void> Process::start_tracing_from(ProcessID tracer) 956{ 957 m_tracer = TRY(ThreadTracer::try_create(tracer)); 958 return {}; 959} 960 961void Process::stop_tracing() 962{ 963 m_tracer = nullptr; 964} 965 966void Process::tracer_trap(Thread& thread, RegisterState const& regs) 967{ 968 VERIFY(m_tracer.ptr()); 969 m_tracer->set_regs(regs); 970 thread.send_urgent_signal_to_self(SIGTRAP); 971} 972 973bool Process::create_perf_events_buffer_if_needed() 974{ 975 if (m_perf_event_buffer) 976 return true; 977 m_perf_event_buffer = PerformanceEventBuffer::try_create_with_size(4 * MiB); 978 if (!m_perf_event_buffer) 979 return false; 980 return !m_perf_event_buffer->add_process(*this, ProcessEventType::Create).is_error(); 981} 982 983void Process::delete_perf_events_buffer() 984{ 985 if (m_perf_event_buffer) 986 m_perf_event_buffer = nullptr; 987} 988 989bool Process::remove_thread(Thread& thread) 990{ 991 u32 thread_count_before = 0; 992 thread_list().with([&](auto& thread_list) { 993 thread_list.remove(thread); 994 with_mutable_protected_data([&](auto& protected_data) { 995 thread_count_before = protected_data.thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel); 996 VERIFY(thread_count_before != 0); 997 }); 998 }); 999 return thread_count_before == 1; 1000} 1001 1002bool Process::add_thread(Thread& thread) 1003{ 1004 bool is_first = false; 1005 thread_list().with([&](auto& thread_list) { 1006 thread_list.append(thread); 1007 with_mutable_protected_data([&](auto& protected_data) { 1008 is_first = protected_data.thread_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed) == 0; 1009 }); 1010 }); 1011 return is_first; 1012} 1013 1014ErrorOr<void> Process::set_coredump_property(NonnullOwnPtr<KString> key, NonnullOwnPtr<KString> value) 1015{ 1016 return m_coredump_properties.with([&](auto& coredump_properties) -> ErrorOr<void> { 1017 // Write it into the first available property slot. 1018 for (auto& slot : coredump_properties) { 1019 if (slot.key) 1020 continue; 1021 slot.key = move(key); 1022 slot.value = move(value); 1023 return {}; 1024 } 1025 1026 return ENOBUFS; 1027 }); 1028} 1029 1030ErrorOr<void> Process::try_set_coredump_property(StringView key, StringView value) 1031{ 1032 auto key_kstring = TRY(KString::try_create(key)); 1033 auto value_kstring = TRY(KString::try_create(value)); 1034 return set_coredump_property(move(key_kstring), move(value_kstring)); 1035}; 1036 1037static constexpr StringView to_string(Pledge promise) 1038{ 1039#define __ENUMERATE_PLEDGE_PROMISE(x) \ 1040 case Pledge::x: \ 1041 return #x##sv; 1042 switch (promise) { 1043 ENUMERATE_PLEDGE_PROMISES 1044 } 1045#undef __ENUMERATE_PLEDGE_PROMISE 1046 VERIFY_NOT_REACHED(); 1047} 1048 1049ErrorOr<void> Process::require_no_promises() const 1050{ 1051 if (!has_promises()) 1052 return {}; 1053 dbgln("Has made a promise"); 1054 Thread::current()->set_promise_violation_pending(true); 1055 return EPROMISEVIOLATION; 1056} 1057 1058ErrorOr<void> Process::require_promise(Pledge promise) 1059{ 1060 if (!has_promises()) 1061 return {}; 1062 1063 if (has_promised(promise)) 1064 return {}; 1065 1066 dbgln("Has not pledged {}", to_string(promise)); 1067 Thread::current()->set_promise_violation_pending(true); 1068 (void)try_set_coredump_property("pledge_violation"sv, to_string(promise)); 1069 return EPROMISEVIOLATION; 1070} 1071 1072NonnullRefPtr<Credentials> Process::credentials() const 1073{ 1074 return with_protected_data([&](auto& protected_data) -> NonnullRefPtr<Credentials> { 1075 return *protected_data.credentials; 1076 }); 1077} 1078 1079RefPtr<Custody> Process::executable() 1080{ 1081 return m_executable.with([](auto& executable) { return executable; }); 1082} 1083 1084RefPtr<Custody const> Process::executable() const 1085{ 1086 return m_executable.with([](auto& executable) { return executable; }); 1087} 1088 1089ErrorOr<NonnullRefPtr<Custody>> Process::custody_for_dirfd(int dirfd) 1090{ 1091 if (dirfd == AT_FDCWD) 1092 return current_directory(); 1093 1094 auto base_description = TRY(open_file_description(dirfd)); 1095 if (!base_description->custody()) 1096 return EINVAL; 1097 return *base_description->custody(); 1098} 1099 1100SpinlockProtected<NonnullOwnPtr<KString>, LockRank::None> const& Process::name() const 1101{ 1102 return m_name; 1103} 1104 1105void Process::set_name(NonnullOwnPtr<KString> name) 1106{ 1107 m_name.with([&](auto& this_name) { 1108 this_name = move(name); 1109 }); 1110} 1111 1112}