Serenity Operating System
1/*
2 * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
3 * Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8#include <Kernel/API/VirtualMemoryAnnotations.h>
9#include <Kernel/Arch/CPU.h>
10#include <Kernel/Arch/PageDirectory.h>
11#include <Kernel/Arch/SafeMem.h>
12#include <Kernel/Arch/SmapDisabler.h>
13#include <Kernel/FileSystem/Custody.h>
14#include <Kernel/FileSystem/OpenFileDescription.h>
15#include <Kernel/Memory/AnonymousVMObject.h>
16#include <Kernel/Memory/MemoryManager.h>
17#include <Kernel/Memory/PrivateInodeVMObject.h>
18#include <Kernel/Memory/Region.h>
19#include <Kernel/Memory/SharedInodeVMObject.h>
20#include <Kernel/PerformanceEventBuffer.h>
21#include <Kernel/PerformanceManager.h>
22#include <Kernel/Process.h>
23#include <LibELF/Validation.h>
24
25#if ARCH(X86_64)
26# include <Kernel/Arch/x86_64/MSR.h>
27#endif
28
29namespace Kernel {
30
31static bool should_make_executable_exception_for_dynamic_loader(bool make_readable, bool make_writable, bool make_executable, Memory::Region const& region)
32{
33 // Normally we don't allow W -> X transitions, but we have to make an exception
34 // for the dynamic loader, which needs to do this after performing text relocations.
35
36 // FIXME: Investigate whether we could get rid of all text relocations entirely.
37
38 // The exception is only made if all the following criteria is fulfilled:
39
40 // The region must be RW
41 if (!(region.is_readable() && region.is_writable() && !region.is_executable()))
42 return false;
43
44 // The region wants to become RX
45 if (!(make_readable && !make_writable && make_executable))
46 return false;
47
48 // The region is backed by a file
49 if (!region.vmobject().is_inode())
50 return false;
51
52 // The file mapping is private, not shared (no relocations in a shared mapping!)
53 if (!region.vmobject().is_private_inode())
54 return false;
55
56 auto const& inode_vm = static_cast<Memory::InodeVMObject const&>(region.vmobject());
57 auto const& inode = inode_vm.inode();
58
59 ElfW(Ehdr) header;
60 auto buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&header);
61 auto result = inode.read_bytes(0, sizeof(header), buffer, nullptr);
62 if (result.is_error() || result.value() != sizeof(header))
63 return false;
64
65 // The file is a valid ELF binary
66 if (!ELF::validate_elf_header(header, inode.size()))
67 return false;
68
69 // The file is an ELF shared object
70 if (header.e_type != ET_DYN)
71 return false;
72
73 // FIXME: Are there any additional checks/validations we could do here?
74 return true;
75}
76
77ErrorOr<void> Process::validate_mmap_prot(int prot, bool map_stack, bool map_anonymous, Memory::Region const* region) const
78{
79 bool make_readable = prot & PROT_READ;
80 bool make_writable = prot & PROT_WRITE;
81 bool make_executable = prot & PROT_EXEC;
82
83 if (map_anonymous && make_executable && !(executable()->mount_flags() & MS_AXALLOWED))
84 return EINVAL;
85
86 if (map_stack && make_executable)
87 return EINVAL;
88
89 if (executable()->mount_flags() & MS_WXALLOWED)
90 return {};
91
92 if (make_writable && make_executable)
93 return EINVAL;
94
95 if (region) {
96 if (make_writable && region->has_been_executable())
97 return EINVAL;
98
99 if (make_executable && region->has_been_writable()) {
100 if (should_make_executable_exception_for_dynamic_loader(make_readable, make_writable, make_executable, *region)) {
101 return {};
102 } else {
103 return EINVAL;
104 };
105 }
106 }
107
108 return {};
109}
110
111ErrorOr<void> Process::validate_inode_mmap_prot(int prot, bool readable_description, bool description_writable, bool map_shared) const
112{
113 auto credentials = this->credentials();
114 if ((prot & PROT_READ) && !readable_description)
115 return EACCES;
116
117 if (map_shared) {
118 // FIXME: What about readonly filesystem mounts? We cannot make a
119 // decision here without knowing the mount flags, so we would need to
120 // keep a Custody or something from mmap time.
121 if ((prot & PROT_WRITE) && !description_writable)
122 return EACCES;
123 }
124 return {};
125}
126
127ErrorOr<FlatPtr> Process::sys$mmap(Userspace<Syscall::SC_mmap_params const*> user_params)
128{
129 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
130 TRY(require_promise(Pledge::stdio));
131 auto params = TRY(copy_typed_from_user(user_params));
132
133 auto addr = (FlatPtr)params.addr;
134 auto size = params.size;
135 auto alignment = params.alignment ? params.alignment : PAGE_SIZE;
136 auto prot = params.prot;
137 auto flags = params.flags;
138 auto fd = params.fd;
139 auto offset = params.offset;
140
141 if (prot & PROT_EXEC) {
142 TRY(require_promise(Pledge::prot_exec));
143 }
144
145 if (prot & MAP_FIXED || prot & MAP_FIXED_NOREPLACE) {
146 TRY(require_promise(Pledge::map_fixed));
147 }
148
149 if (alignment & ~PAGE_MASK)
150 return EINVAL;
151
152 size_t rounded_size = TRY(Memory::page_round_up(size));
153 if (!Memory::is_user_range(VirtualAddress(addr), rounded_size))
154 return EFAULT;
155
156 OwnPtr<KString> name;
157 if (params.name.characters) {
158 if (params.name.length > PATH_MAX)
159 return ENAMETOOLONG;
160 name = TRY(try_copy_kstring_from_user(params.name));
161 }
162
163 if (size == 0)
164 return EINVAL;
165 if ((FlatPtr)addr & ~PAGE_MASK)
166 return EINVAL;
167
168 bool map_shared = flags & MAP_SHARED;
169 bool map_anonymous = flags & MAP_ANONYMOUS;
170 bool map_private = flags & MAP_PRIVATE;
171 bool map_stack = flags & MAP_STACK;
172 bool map_fixed = flags & MAP_FIXED;
173 bool map_noreserve = flags & MAP_NORESERVE;
174 bool map_randomized = flags & MAP_RANDOMIZED;
175 bool map_fixed_noreplace = flags & MAP_FIXED_NOREPLACE;
176
177 if (map_shared && map_private)
178 return EINVAL;
179
180 if (!map_shared && !map_private)
181 return EINVAL;
182
183 if ((map_fixed || map_fixed_noreplace) && map_randomized)
184 return EINVAL;
185
186 TRY(validate_mmap_prot(prot, map_stack, map_anonymous));
187
188 if (map_stack && (!map_private || !map_anonymous))
189 return EINVAL;
190
191 Memory::VirtualRange requested_range { VirtualAddress { addr }, rounded_size };
192 if (addr && !(map_fixed || map_fixed_noreplace)) {
193 // If there's an address but MAP_FIXED wasn't specified, the address is just a hint.
194 requested_range = { {}, rounded_size };
195 }
196
197 Memory::Region* region = nullptr;
198
199 RefPtr<OpenFileDescription> description;
200 LockRefPtr<Memory::VMObject> vmobject;
201 u64 used_offset = 0;
202
203 if (map_anonymous) {
204 auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
205
206 if (flags & MAP_PURGEABLE) {
207 vmobject = TRY(Memory::AnonymousVMObject::try_create_purgeable_with_size(rounded_size, strategy));
208 } else {
209 vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(rounded_size, strategy));
210 }
211 } else {
212 if (offset < 0)
213 return EINVAL;
214 used_offset = static_cast<u64>(offset);
215 if (static_cast<size_t>(offset) & ~PAGE_MASK)
216 return EINVAL;
217 description = TRY(open_file_description(fd));
218 if (description->is_directory())
219 return ENODEV;
220 // Require read access even when read protection is not requested.
221 if (!description->is_readable())
222 return EACCES;
223 if (map_shared) {
224 if ((prot & PROT_WRITE) && !description->is_writable())
225 return EACCES;
226 }
227 if (description->inode())
228 TRY(validate_inode_mmap_prot(prot, description->is_readable(), description->is_writable(), map_shared));
229
230 vmobject = TRY(description->vmobject_for_mmap(*this, requested_range, used_offset, map_shared));
231 }
232
233 return address_space().with([&](auto& space) -> ErrorOr<FlatPtr> {
234 // If MAP_FIXED is specified, existing mappings that intersect the requested range are removed.
235 if (map_fixed)
236 TRY(space->unmap_mmap_range(VirtualAddress(addr), size));
237
238 region = TRY(space->allocate_region_with_vmobject(
239 map_randomized ? Memory::RandomizeVirtualAddress::Yes : Memory::RandomizeVirtualAddress::No,
240 requested_range.base(),
241 requested_range.size(),
242 alignment,
243 vmobject.release_nonnull(),
244 used_offset,
245 {},
246 prot,
247 map_shared));
248
249 if (!region)
250 return ENOMEM;
251
252 if (description)
253 region->set_mmap(true, description->is_readable(), description->is_writable());
254 else
255 region->set_mmap(true, false, false);
256
257 if (map_shared)
258 region->set_shared(true);
259 if (map_stack)
260 region->set_stack(true);
261 if (name)
262 region->set_name(move(name));
263
264 PerformanceManager::add_mmap_perf_event(*this, *region);
265
266 return region->vaddr().get();
267 });
268}
269
270ErrorOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int prot)
271{
272 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
273 TRY(require_promise(Pledge::stdio));
274
275 if (prot & PROT_EXEC) {
276 TRY(require_promise(Pledge::prot_exec));
277 }
278
279 auto range_to_mprotect = TRY(Memory::expand_range_to_page_boundaries(addr.ptr(), size));
280 if (!range_to_mprotect.size())
281 return EINVAL;
282
283 if (!is_user_range(range_to_mprotect))
284 return EFAULT;
285
286 return address_space().with([&](auto& space) -> ErrorOr<FlatPtr> {
287 if (auto* whole_region = space->find_region_from_range(range_to_mprotect)) {
288 if (!whole_region->is_mmap())
289 return EPERM;
290 if (whole_region->is_immutable())
291 return EPERM;
292 TRY(validate_mmap_prot(prot, whole_region->is_stack(), whole_region->vmobject().is_anonymous(), whole_region));
293 if (whole_region->access() == Memory::prot_to_region_access_flags(prot))
294 return 0;
295 if (whole_region->vmobject().is_inode())
296 TRY(validate_inode_mmap_prot(prot, whole_region->mmapped_from_readable(), whole_region->mmapped_from_writable(), whole_region->is_shared()));
297 whole_region->set_readable(prot & PROT_READ);
298 whole_region->set_writable(prot & PROT_WRITE);
299 whole_region->set_executable(prot & PROT_EXEC);
300
301 whole_region->remap();
302 return 0;
303 }
304
305 // Check if we can carve out the desired range from an existing region
306 if (auto* old_region = space->find_region_containing(range_to_mprotect)) {
307 if (!old_region->is_mmap())
308 return EPERM;
309 if (old_region->is_immutable())
310 return EPERM;
311 TRY(validate_mmap_prot(prot, old_region->is_stack(), old_region->vmobject().is_anonymous(), old_region));
312 if (old_region->access() == Memory::prot_to_region_access_flags(prot))
313 return 0;
314 if (old_region->vmobject().is_inode())
315 TRY(validate_inode_mmap_prot(prot, old_region->mmapped_from_readable(), old_region->mmapped_from_writable(), old_region->is_shared()));
316
317 // Remove the old region from our regions tree, since were going to add another region
318 // with the exact same start address.
319 auto region = space->take_region(*old_region);
320 region->unmap();
321
322 // This vector is the region(s) adjacent to our range.
323 // We need to allocate a new region for the range we wanted to change permission bits on.
324 auto adjacent_regions = TRY(space->try_split_region_around_range(*region, range_to_mprotect));
325
326 size_t new_range_offset_in_vmobject = region->offset_in_vmobject() + (range_to_mprotect.base().get() - region->range().base().get());
327 auto* new_region = TRY(space->try_allocate_split_region(*region, range_to_mprotect, new_range_offset_in_vmobject));
328 new_region->set_readable(prot & PROT_READ);
329 new_region->set_writable(prot & PROT_WRITE);
330 new_region->set_executable(prot & PROT_EXEC);
331
332 // Map the new regions using our page directory (they were just allocated and don't have one).
333 for (auto* adjacent_region : adjacent_regions) {
334 TRY(adjacent_region->map(space->page_directory()));
335 }
336 TRY(new_region->map(space->page_directory()));
337 return 0;
338 }
339
340 if (auto const& regions = TRY(space->find_regions_intersecting(range_to_mprotect)); regions.size()) {
341 size_t full_size_found = 0;
342 // Check that all intersecting regions are compatible.
343 for (auto const* region : regions) {
344 if (!region->is_mmap())
345 return EPERM;
346 if (region->is_immutable())
347 return EPERM;
348 TRY(validate_mmap_prot(prot, region->is_stack(), region->vmobject().is_anonymous(), region));
349 if (region->vmobject().is_inode())
350 TRY(validate_inode_mmap_prot(prot, region->mmapped_from_readable(), region->mmapped_from_writable(), region->is_shared()));
351
352 full_size_found += region->range().intersect(range_to_mprotect).size();
353 }
354
355 if (full_size_found != range_to_mprotect.size())
356 return ENOMEM;
357
358 // Finally, iterate over each region, either updating its access flags if the range covers it wholly,
359 // or carving out a new subregion with the appropriate access flags set.
360 for (auto* old_region : regions) {
361 if (old_region->access() == Memory::prot_to_region_access_flags(prot))
362 continue;
363
364 auto const intersection_to_mprotect = range_to_mprotect.intersect(old_region->range());
365 // If the region is completely covered by range, simply update the access flags
366 if (intersection_to_mprotect == old_region->range()) {
367 old_region->set_readable(prot & PROT_READ);
368 old_region->set_writable(prot & PROT_WRITE);
369 old_region->set_executable(prot & PROT_EXEC);
370
371 old_region->remap();
372 continue;
373 }
374 // Remove the old region from our regions tree, since were going to add another region
375 // with the exact same start address.
376 auto region = space->take_region(*old_region);
377 region->unmap();
378
379 // This vector is the region(s) adjacent to our range.
380 // We need to allocate a new region for the range we wanted to change permission bits on.
381 auto adjacent_regions = TRY(space->try_split_region_around_range(*old_region, intersection_to_mprotect));
382
383 // Since the range is not contained in a single region, it can only partially cover its starting and ending region,
384 // therefore carving out a chunk from the region will always produce a single extra region, and not two.
385 VERIFY(adjacent_regions.size() == 1);
386
387 size_t new_range_offset_in_vmobject = old_region->offset_in_vmobject() + (intersection_to_mprotect.base().get() - old_region->range().base().get());
388 auto* new_region = TRY(space->try_allocate_split_region(*region, intersection_to_mprotect, new_range_offset_in_vmobject));
389
390 new_region->set_readable(prot & PROT_READ);
391 new_region->set_writable(prot & PROT_WRITE);
392 new_region->set_executable(prot & PROT_EXEC);
393
394 // Map the new region using our page directory (they were just allocated and don't have one) if any.
395 if (adjacent_regions.size())
396 TRY(adjacent_regions[0]->map(space->page_directory()));
397
398 TRY(new_region->map(space->page_directory()));
399 }
400
401 return 0;
402 }
403
404 return EINVAL;
405 });
406}
407
408ErrorOr<FlatPtr> Process::sys$madvise(Userspace<void*> address, size_t size, int advice)
409{
410 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
411 TRY(require_promise(Pledge::stdio));
412
413 auto range_to_madvise = TRY(Memory::expand_range_to_page_boundaries(address.ptr(), size));
414
415 if (!range_to_madvise.size())
416 return EINVAL;
417
418 if (!is_user_range(range_to_madvise))
419 return EFAULT;
420
421 return address_space().with([&](auto& space) -> ErrorOr<FlatPtr> {
422 auto* region = space->find_region_from_range(range_to_madvise);
423 if (!region)
424 return EINVAL;
425 if (!region->is_mmap())
426 return EPERM;
427 if (region->is_immutable())
428 return EPERM;
429 if (advice == MADV_SET_VOLATILE || advice == MADV_SET_NONVOLATILE) {
430 if (!region->vmobject().is_anonymous())
431 return EINVAL;
432 auto& vmobject = static_cast<Memory::AnonymousVMObject&>(region->vmobject());
433 if (!vmobject.is_purgeable())
434 return EINVAL;
435 bool was_purged = false;
436 TRY(vmobject.set_volatile(advice == MADV_SET_VOLATILE, was_purged));
437 return was_purged ? 1 : 0;
438 }
439 return EINVAL;
440 });
441}
442
443ErrorOr<FlatPtr> Process::sys$set_mmap_name(Userspace<Syscall::SC_set_mmap_name_params const*> user_params)
444{
445 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
446 TRY(require_promise(Pledge::stdio));
447 auto params = TRY(copy_typed_from_user(user_params));
448
449 if (params.name.length > PATH_MAX)
450 return ENAMETOOLONG;
451
452 auto name = TRY(try_copy_kstring_from_user(params.name));
453 auto range = TRY(Memory::expand_range_to_page_boundaries((FlatPtr)params.addr, params.size));
454
455 return address_space().with([&](auto& space) -> ErrorOr<FlatPtr> {
456 auto* region = space->find_region_from_range(range);
457 if (!region)
458 return EINVAL;
459 if (!region->is_mmap())
460 return EPERM;
461
462 if (region->is_immutable())
463 return EPERM;
464
465 region->set_name(move(name));
466 PerformanceManager::add_mmap_perf_event(*this, *region);
467
468 return 0;
469 });
470}
471
472ErrorOr<FlatPtr> Process::sys$munmap(Userspace<void*> addr, size_t size)
473{
474 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
475 TRY(require_promise(Pledge::stdio));
476 TRY(address_space().with([&](auto& space) {
477 return space->unmap_mmap_range(addr.vaddr(), size);
478 }));
479 return 0;
480}
481
482ErrorOr<FlatPtr> Process::sys$mremap(Userspace<Syscall::SC_mremap_params const*> user_params)
483{
484 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
485 TRY(require_promise(Pledge::stdio));
486 auto params = TRY(copy_typed_from_user(user_params));
487
488 auto old_range = TRY(Memory::expand_range_to_page_boundaries((FlatPtr)params.old_address, params.old_size));
489
490 return address_space().with([&](auto& space) -> ErrorOr<FlatPtr> {
491 auto* old_region = space->find_region_from_range(old_range);
492 if (!old_region)
493 return EINVAL;
494
495 if (!old_region->is_mmap())
496 return EPERM;
497
498 if (old_region->is_immutable())
499 return EPERM;
500
501 if (old_region->vmobject().is_shared_inode() && params.flags & MAP_PRIVATE && !(params.flags & (MAP_ANONYMOUS | MAP_NORESERVE))) {
502 auto range = old_region->range();
503 auto old_prot = region_access_flags_to_prot(old_region->access());
504 auto old_offset = old_region->offset_in_vmobject();
505 NonnullLockRefPtr inode = static_cast<Memory::SharedInodeVMObject&>(old_region->vmobject()).inode();
506
507 auto new_vmobject = TRY(Memory::PrivateInodeVMObject::try_create_with_inode(inode));
508 auto old_name = old_region->take_name();
509
510 bool old_region_was_mmapped_from_readable = old_region->mmapped_from_readable();
511 bool old_region_was_mmapped_from_writable = old_region->mmapped_from_writable();
512
513 old_region->unmap();
514 space->deallocate_region(*old_region);
515
516 auto* new_region = TRY(space->allocate_region_with_vmobject(range, move(new_vmobject), old_offset, old_name->view(), old_prot, false));
517 new_region->set_mmap(true, old_region_was_mmapped_from_readable, old_region_was_mmapped_from_writable);
518 return new_region->vaddr().get();
519 }
520
521 dbgln("sys$mremap: Unimplemented remap request (flags={})", params.flags);
522 return ENOTIMPL;
523 });
524}
525
526ErrorOr<FlatPtr> Process::sys$allocate_tls(Userspace<char const*> initial_data, size_t size)
527{
528 VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
529 TRY(require_promise(Pledge::stdio));
530
531 if (!size || size % PAGE_SIZE != 0)
532 return EINVAL;
533
534 if (!m_master_tls_region.is_null())
535 return EEXIST;
536
537 if (thread_count() != 1)
538 return EFAULT;
539
540 Thread* main_thread = nullptr;
541 bool multiple_threads = false;
542 for_each_thread([&main_thread, &multiple_threads](auto& thread) {
543 if (main_thread)
544 multiple_threads = true;
545 main_thread = &thread;
546 return IterationDecision::Break;
547 });
548 VERIFY(main_thread);
549
550 if (multiple_threads)
551 return EINVAL;
552
553 return address_space().with([&](auto& space) -> ErrorOr<FlatPtr> {
554 auto* region = TRY(space->allocate_region(Memory::RandomizeVirtualAddress::Yes, {}, size, PAGE_SIZE, "Master TLS"sv, PROT_READ | PROT_WRITE));
555
556 m_master_tls_region = TRY(region->try_make_weak_ptr());
557 m_master_tls_size = size;
558 m_master_tls_alignment = PAGE_SIZE;
559
560 {
561 Kernel::SmapDisabler disabler;
562 void* fault_at;
563 if (!Kernel::safe_memcpy((char*)m_master_tls_region.unsafe_ptr()->vaddr().as_ptr(), (char*)initial_data.ptr(), size, fault_at))
564 return EFAULT;
565 }
566
567 TRY(main_thread->make_thread_specific_region({}));
568
569 Processor::set_thread_specific_data(main_thread->thread_specific_data());
570
571 return m_master_tls_region.unsafe_ptr()->vaddr().get();
572 });
573}
574
575ErrorOr<FlatPtr> Process::sys$annotate_mapping(Userspace<void*> address, int flags)
576{
577 VERIFY_NO_PROCESS_BIG_LOCK(this);
578 if (flags == to_underlying(VirtualMemoryRangeFlags::None))
579 return EINVAL;
580
581 if (!address)
582 return EINVAL;
583
584 if (!Memory::is_user_address(address.vaddr()))
585 return EFAULT;
586
587 return address_space().with([&](auto& space) -> ErrorOr<FlatPtr> {
588 if (space->enforces_syscall_regions() && (flags & to_underlying(VirtualMemoryRangeFlags::SyscallCode)))
589 return EPERM;
590
591 auto* region = space->find_region_containing(Memory::VirtualRange { address.vaddr(), 1 });
592 if (!region)
593 return EINVAL;
594
595 if (!region->is_mmap())
596 return EINVAL;
597 if (region->is_immutable())
598 return EPERM;
599
600 if (flags & to_underlying(VirtualMemoryRangeFlags::SyscallCode))
601 region->set_syscall_region(true);
602 if (flags & to_underlying(VirtualMemoryRangeFlags::Immutable))
603 region->set_immutable();
604 return 0;
605 });
606}
607
608ErrorOr<FlatPtr> Process::sys$msync(Userspace<void*> address, size_t size, int flags)
609{
610 if ((flags & (MS_SYNC | MS_ASYNC | MS_INVALIDATE)) != flags)
611 return EINVAL;
612
613 bool is_async = (flags & MS_ASYNC) == MS_ASYNC;
614 bool is_sync = (flags & MS_SYNC) == MS_SYNC;
615 if (is_sync == is_async)
616 return EINVAL;
617
618 if (address.ptr() % PAGE_SIZE != 0)
619 return EINVAL;
620
621 // Note: This is not specified
622 auto rounded_size = TRY(Memory::page_round_up(size));
623
624 return address_space().with([&](auto& space) -> ErrorOr<FlatPtr> {
625 auto regions = TRY(space->find_regions_intersecting(Memory::VirtualRange { address.vaddr(), rounded_size }));
626 // All regions from address up to address+size shall be mapped
627 if (regions.is_empty())
628 return ENOMEM;
629
630 size_t total_intersection_size = 0;
631 Memory::VirtualRange range_to_sync { address.vaddr(), rounded_size };
632 for (auto const* region : regions) {
633 // Region was not mapped
634 if (!region->is_mmap())
635 return ENOMEM;
636 total_intersection_size += region->range().intersect(range_to_sync).size();
637 }
638 // Part of the indicated range was not mapped
639 if (total_intersection_size != size)
640 return ENOMEM;
641
642 for (auto* region : regions) {
643 auto& vmobject = region->vmobject();
644 if (!vmobject.is_shared_inode())
645 continue;
646
647 off_t offset = region->offset_in_vmobject() + address.ptr() - region->range().base().get();
648
649 auto& inode_vmobject = static_cast<Memory::SharedInodeVMObject&>(vmobject);
650 // FIXME: If multiple regions belong to the same vmobject we might want to coalesce these writes
651 // FIXME: Handle MS_ASYNC
652 TRY(inode_vmobject.sync(offset / PAGE_SIZE, rounded_size / PAGE_SIZE));
653 // FIXME: Handle MS_INVALIDATE
654 }
655 return 0;
656 });
657}
658
659}