Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * vma.h
4 *
5 * Core VMA manipulation API implemented in vma.c.
6 */
7#ifndef __MM_VMA_H
8#define __MM_VMA_H
9
10/*
11 * VMA lock generalization
12 */
13struct vma_prepare {
14 struct vm_area_struct *vma;
15 struct vm_area_struct *adj_next;
16 struct file *file;
17 struct address_space *mapping;
18 struct anon_vma *anon_vma;
19 struct vm_area_struct *insert;
20 struct vm_area_struct *remove;
21 struct vm_area_struct *remove2;
22
23 bool skip_vma_uprobe :1;
24};
25
26struct unlink_vma_file_batch {
27 int count;
28 struct vm_area_struct *vmas[8];
29};
30
31/*
32 * vma munmap operation
33 */
34struct vma_munmap_struct {
35 struct vma_iterator *vmi;
36 struct vm_area_struct *vma; /* The first vma to munmap */
37 struct vm_area_struct *prev; /* vma before the munmap area */
38 struct vm_area_struct *next; /* vma after the munmap area */
39 struct list_head *uf; /* Userfaultfd list_head */
40 unsigned long start; /* Aligned start addr (inclusive) */
41 unsigned long end; /* Aligned end addr (exclusive) */
42 unsigned long unmap_start; /* Unmap PTE start */
43 unsigned long unmap_end; /* Unmap PTE end */
44 int vma_count; /* Number of vmas that will be removed */
45 bool unlock; /* Unlock after the munmap */
46 bool clear_ptes; /* If there are outstanding PTE to be cleared */
47 /* 2 byte hole */
48 unsigned long nr_pages; /* Number of pages being removed */
49 unsigned long locked_vm; /* Number of locked pages */
50 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
51 unsigned long exec_vm;
52 unsigned long stack_vm;
53 unsigned long data_vm;
54};
55
56enum vma_merge_state {
57 VMA_MERGE_START,
58 VMA_MERGE_ERROR_NOMEM,
59 VMA_MERGE_NOMERGE,
60 VMA_MERGE_SUCCESS,
61};
62
63/*
64 * Describes a VMA merge operation and is threaded throughout it.
65 *
66 * Any of the fields may be mutated by the merge operation, so no guarantees are
67 * made to the contents of this structure after a merge operation has completed.
68 */
69struct vma_merge_struct {
70 struct mm_struct *mm;
71 struct vma_iterator *vmi;
72 /*
73 * Adjacent VMAs, any of which may be NULL if not present:
74 *
75 * |------|--------|------|
76 * | prev | middle | next |
77 * |------|--------|------|
78 *
79 * middle may not yet exist in the case of a proposed new VMA being
80 * merged, or it may be an existing VMA.
81 *
82 * next may be assigned by the caller.
83 */
84 struct vm_area_struct *prev;
85 struct vm_area_struct *middle;
86 struct vm_area_struct *next;
87 /* This is the VMA we ultimately target to become the merged VMA. */
88 struct vm_area_struct *target;
89 /*
90 * Initially, the start, end, pgoff fields are provided by the caller
91 * and describe the proposed new VMA range, whether modifying an
92 * existing VMA (which will be 'middle'), or adding a new one.
93 *
94 * During the merge process these fields are updated to describe the new
95 * range _including those VMAs which will be merged_.
96 */
97 unsigned long start;
98 unsigned long end;
99 pgoff_t pgoff;
100
101 vm_flags_t vm_flags;
102 struct file *file;
103 struct anon_vma *anon_vma;
104 struct mempolicy *policy;
105 struct vm_userfaultfd_ctx uffd_ctx;
106 struct anon_vma_name *anon_name;
107 enum vma_merge_state state;
108
109 /* Flags which callers can use to modify merge behaviour: */
110
111 /*
112 * If we can expand, simply do so. We know there is nothing to merge to
113 * the right. Does not reset state upon failure to merge. The VMA
114 * iterator is assumed to be positioned at the previous VMA, rather than
115 * at the gap.
116 */
117 bool just_expand :1;
118
119 /*
120 * If a merge is possible, but an OOM error occurs, give up and don't
121 * execute the merge, returning NULL.
122 */
123 bool give_up_on_oom :1;
124
125 /*
126 * If set, skip uprobe_mmap upon merged vma.
127 */
128 bool skip_vma_uprobe :1;
129
130 /* Internal flags set during merge process: */
131
132 /*
133 * Internal flag indicating the merge increases vmg->middle->vm_start
134 * (and thereby, vmg->prev->vm_end).
135 */
136 bool __adjust_middle_start :1;
137 /*
138 * Internal flag indicating the merge decreases vmg->next->vm_start
139 * (and thereby, vmg->middle->vm_end).
140 */
141 bool __adjust_next_start :1;
142 /*
143 * Internal flag used during the merge operation to indicate we will
144 * remove vmg->middle.
145 */
146 bool __remove_middle :1;
147 /*
148 * Internal flag used during the merge operation to indicate we will
149 * remove vmg->next.
150 */
151 bool __remove_next :1;
152
153};
154
155static inline bool vmg_nomem(struct vma_merge_struct *vmg)
156{
157 return vmg->state == VMA_MERGE_ERROR_NOMEM;
158}
159
160/* Assumes addr >= vma->vm_start. */
161static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
162 unsigned long addr)
163{
164 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
165}
166
167#define VMG_STATE(name, mm_, vmi_, start_, end_, vm_flags_, pgoff_) \
168 struct vma_merge_struct name = { \
169 .mm = mm_, \
170 .vmi = vmi_, \
171 .start = start_, \
172 .end = end_, \
173 .vm_flags = vm_flags_, \
174 .pgoff = pgoff_, \
175 .state = VMA_MERGE_START, \
176 }
177
178#define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \
179 struct vma_merge_struct name = { \
180 .mm = vma_->vm_mm, \
181 .vmi = vmi_, \
182 .prev = prev_, \
183 .middle = vma_, \
184 .next = NULL, \
185 .start = start_, \
186 .end = end_, \
187 .vm_flags = vma_->vm_flags, \
188 .pgoff = vma_pgoff_offset(vma_, start_), \
189 .file = vma_->vm_file, \
190 .anon_vma = vma_->anon_vma, \
191 .policy = vma_policy(vma_), \
192 .uffd_ctx = vma_->vm_userfaultfd_ctx, \
193 .anon_name = anon_vma_name(vma_), \
194 .state = VMA_MERGE_START, \
195 }
196
197#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
198void validate_mm(struct mm_struct *mm);
199#else
200#define validate_mm(mm) do { } while (0)
201#endif
202
203__must_check int vma_expand(struct vma_merge_struct *vmg);
204__must_check int vma_shrink(struct vma_iterator *vmi,
205 struct vm_area_struct *vma,
206 unsigned long start, unsigned long end, pgoff_t pgoff);
207
208static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
209 struct vm_area_struct *vma, gfp_t gfp)
210
211{
212 if (vmi->mas.status != ma_start &&
213 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
214 vma_iter_invalidate(vmi);
215
216 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
217 mas_store_gfp(&vmi->mas, vma, gfp);
218 if (unlikely(mas_is_err(&vmi->mas)))
219 return -ENOMEM;
220
221 vma_mark_attached(vma);
222 return 0;
223}
224
225/*
226 * Temporary helper function for stacked mmap handlers which specify
227 * f_op->mmap() but which might have an underlying file system which implements
228 * f_op->mmap_prepare().
229 */
230static inline void set_vma_from_desc(struct vm_area_struct *vma,
231 struct vm_area_desc *desc)
232{
233 /*
234 * Since we're invoking .mmap_prepare() despite having a partially
235 * established VMA, we must take care to handle setting fields
236 * correctly.
237 */
238
239 /* Mutable fields. Populated with initial state. */
240 vma->vm_pgoff = desc->pgoff;
241 if (desc->vm_file != vma->vm_file)
242 vma_set_file(vma, desc->vm_file);
243 if (desc->vm_flags != vma->vm_flags)
244 vm_flags_set(vma, desc->vm_flags);
245 vma->vm_page_prot = desc->page_prot;
246
247 /* User-defined fields. */
248 vma->vm_ops = desc->vm_ops;
249 vma->vm_private_data = desc->private_data;
250}
251
252int
253do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
254 struct mm_struct *mm, unsigned long start,
255 unsigned long end, struct list_head *uf, bool unlock);
256
257int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
258 unsigned long start, size_t len, struct list_head *uf,
259 bool unlock);
260
261void remove_vma(struct vm_area_struct *vma);
262
263void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
264 struct vm_area_struct *prev, struct vm_area_struct *next);
265
266/**
267 * vma_modify_flags() - Peform any necessary split/merge in preparation for
268 * setting VMA flags to *@vm_flags in the range @start to @end contained within
269 * @vma.
270 * @vmi: Valid VMA iterator positioned at @vma.
271 * @prev: The VMA immediately prior to @vma or NULL if @vma is the first.
272 * @vma: The VMA containing the range @start to @end to be updated.
273 * @start: The start of the range to update. May be offset within @vma.
274 * @end: The exclusive end of the range to update, may be offset within @vma.
275 * @vm_flags_ptr: A pointer to the VMA flags that the @start to @end range is
276 * about to be set to. On merge, this will be updated to include sticky flags.
277 *
278 * IMPORTANT: The actual modification being requested here is NOT applied,
279 * rather the VMA is perhaps split, perhaps merged to accommodate the change,
280 * and the caller is expected to perform the actual modification.
281 *
282 * In order to account for sticky VMA flags, the @vm_flags_ptr parameter points
283 * to the requested flags which are then updated so the caller, should they
284 * overwrite any existing flags, correctly retains these.
285 *
286 * Returns: A VMA which contains the range @start to @end ready to have its
287 * flags altered to *@vm_flags.
288 */
289__must_check struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
290 struct vm_area_struct *prev, struct vm_area_struct *vma,
291 unsigned long start, unsigned long end,
292 vm_flags_t *vm_flags_ptr);
293
294/**
295 * vma_modify_name() - Peform any necessary split/merge in preparation for
296 * setting anonymous VMA name to @new_name in the range @start to @end contained
297 * within @vma.
298 * @vmi: Valid VMA iterator positioned at @vma.
299 * @prev: The VMA immediately prior to @vma or NULL if @vma is the first.
300 * @vma: The VMA containing the range @start to @end to be updated.
301 * @start: The start of the range to update. May be offset within @vma.
302 * @end: The exclusive end of the range to update, may be offset within @vma.
303 * @new_name: The anonymous VMA name that the @start to @end range is about to
304 * be set to.
305 *
306 * IMPORTANT: The actual modification being requested here is NOT applied,
307 * rather the VMA is perhaps split, perhaps merged to accommodate the change,
308 * and the caller is expected to perform the actual modification.
309 *
310 * Returns: A VMA which contains the range @start to @end ready to have its
311 * anonymous VMA name changed to @new_name.
312 */
313__must_check struct vm_area_struct *vma_modify_name(struct vma_iterator *vmi,
314 struct vm_area_struct *prev, struct vm_area_struct *vma,
315 unsigned long start, unsigned long end,
316 struct anon_vma_name *new_name);
317
318/**
319 * vma_modify_policy() - Peform any necessary split/merge in preparation for
320 * setting NUMA policy to @new_pol in the range @start to @end contained
321 * within @vma.
322 * @vmi: Valid VMA iterator positioned at @vma.
323 * @prev: The VMA immediately prior to @vma or NULL if @vma is the first.
324 * @vma: The VMA containing the range @start to @end to be updated.
325 * @start: The start of the range to update. May be offset within @vma.
326 * @end: The exclusive end of the range to update, may be offset within @vma.
327 * @new_pol: The NUMA policy that the @start to @end range is about to be set
328 * to.
329 *
330 * IMPORTANT: The actual modification being requested here is NOT applied,
331 * rather the VMA is perhaps split, perhaps merged to accommodate the change,
332 * and the caller is expected to perform the actual modification.
333 *
334 * Returns: A VMA which contains the range @start to @end ready to have its
335 * NUMA policy changed to @new_pol.
336 */
337__must_check struct vm_area_struct *vma_modify_policy(struct vma_iterator *vmi,
338 struct vm_area_struct *prev, struct vm_area_struct *vma,
339 unsigned long start, unsigned long end,
340 struct mempolicy *new_pol);
341
342/**
343 * vma_modify_flags_uffd() - Peform any necessary split/merge in preparation for
344 * setting VMA flags to @vm_flags and UFFD context to @new_ctx in the range
345 * @start to @end contained within @vma.
346 * @vmi: Valid VMA iterator positioned at @vma.
347 * @prev: The VMA immediately prior to @vma or NULL if @vma is the first.
348 * @vma: The VMA containing the range @start to @end to be updated.
349 * @start: The start of the range to update. May be offset within @vma.
350 * @end: The exclusive end of the range to update, may be offset within @vma.
351 * @vm_flags: The VMA flags that the @start to @end range is about to be set to.
352 * @new_ctx: The userfaultfd context that the @start to @end range is about to
353 * be set to.
354 * @give_up_on_oom: If an out of memory condition occurs on merge, simply give
355 * up on it and treat the merge as best-effort.
356 *
357 * IMPORTANT: The actual modification being requested here is NOT applied,
358 * rather the VMA is perhaps split, perhaps merged to accommodate the change,
359 * and the caller is expected to perform the actual modification.
360 *
361 * Returns: A VMA which contains the range @start to @end ready to have its VMA
362 * flags changed to @vm_flags and its userfaultfd context changed to @new_ctx.
363 */
364__must_check struct vm_area_struct *vma_modify_flags_uffd(struct vma_iterator *vmi,
365 struct vm_area_struct *prev, struct vm_area_struct *vma,
366 unsigned long start, unsigned long end, vm_flags_t vm_flags,
367 struct vm_userfaultfd_ctx new_ctx, bool give_up_on_oom);
368
369__must_check struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
370
371__must_check struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
372 struct vm_area_struct *vma, unsigned long delta);
373
374void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
375
376void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
377
378void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
379 struct vm_area_struct *vma);
380
381struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
382 unsigned long addr, unsigned long len, pgoff_t pgoff,
383 bool *need_rmap_locks);
384
385struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
386
387bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
388bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
389
390int mm_take_all_locks(struct mm_struct *mm);
391void mm_drop_all_locks(struct mm_struct *mm);
392
393unsigned long mmap_region(struct file *file, unsigned long addr,
394 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
395 struct list_head *uf);
396
397int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
398 unsigned long addr, unsigned long request, unsigned long flags);
399
400unsigned long unmapped_area(struct vm_unmapped_area_info *info);
401unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
402
403static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
404{
405 /*
406 * We want to check manually if we can change individual PTEs writable
407 * if we can't do that automatically for all PTEs in a mapping. For
408 * private mappings, that's always the case when we have write
409 * permissions as we properly have to handle COW.
410 */
411 if (vma->vm_flags & VM_SHARED)
412 return vma_wants_writenotify(vma, vma->vm_page_prot);
413 return !!(vma->vm_flags & VM_WRITE);
414}
415
416#ifdef CONFIG_MMU
417static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, vm_flags_t vm_flags)
418{
419 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
420}
421#endif
422
423static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
424 unsigned long min)
425{
426 return mas_prev(&vmi->mas, min);
427}
428
429/*
430 * These three helpers classifies VMAs for virtual memory accounting.
431 */
432
433/*
434 * Executable code area - executable, not writable, not stack
435 */
436static inline bool is_exec_mapping(vm_flags_t flags)
437{
438 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
439}
440
441/*
442 * Stack area (including shadow stacks)
443 *
444 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
445 * do_mmap() forbids all other combinations.
446 */
447static inline bool is_stack_mapping(vm_flags_t flags)
448{
449 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
450}
451
452/*
453 * Data area - private, writable, not stack
454 */
455static inline bool is_data_mapping(vm_flags_t flags)
456{
457 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
458}
459
460
461static inline void vma_iter_config(struct vma_iterator *vmi,
462 unsigned long index, unsigned long last)
463{
464 __mas_set_range(&vmi->mas, index, last - 1);
465}
466
467static inline void vma_iter_reset(struct vma_iterator *vmi)
468{
469 mas_reset(&vmi->mas);
470}
471
472static inline
473struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
474{
475 return mas_prev_range(&vmi->mas, min);
476}
477
478static inline
479struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
480{
481 return mas_next_range(&vmi->mas, max);
482}
483
484static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
485 unsigned long max, unsigned long size)
486{
487 return mas_empty_area(&vmi->mas, min, max - 1, size);
488}
489
490static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
491 unsigned long max, unsigned long size)
492{
493 return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
494}
495
496/*
497 * VMA Iterator functions shared between nommu and mmap
498 */
499static inline int vma_iter_prealloc(struct vma_iterator *vmi,
500 struct vm_area_struct *vma)
501{
502 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
503}
504
505static inline void vma_iter_clear(struct vma_iterator *vmi)
506{
507 mas_store_prealloc(&vmi->mas, NULL);
508}
509
510static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
511{
512 return mas_walk(&vmi->mas);
513}
514
515/* Store a VMA with preallocated memory */
516static inline void vma_iter_store_overwrite(struct vma_iterator *vmi,
517 struct vm_area_struct *vma)
518{
519 vma_assert_attached(vma);
520
521#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
522 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
523 vmi->mas.index > vma->vm_start)) {
524 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
525 vmi->mas.index, vma->vm_start, vma->vm_start,
526 vma->vm_end, vmi->mas.index, vmi->mas.last);
527 }
528 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
529 vmi->mas.last < vma->vm_start)) {
530 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
531 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
532 vmi->mas.index, vmi->mas.last);
533 }
534#endif
535
536 if (vmi->mas.status != ma_start &&
537 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
538 vma_iter_invalidate(vmi);
539
540 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
541 mas_store_prealloc(&vmi->mas, vma);
542}
543
544static inline void vma_iter_store_new(struct vma_iterator *vmi,
545 struct vm_area_struct *vma)
546{
547 vma_mark_attached(vma);
548 vma_iter_store_overwrite(vmi, vma);
549}
550
551static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
552{
553 return vmi->mas.index;
554}
555
556static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
557{
558 return vmi->mas.last + 1;
559}
560
561static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
562 unsigned long count)
563{
564 return mas_expected_entries(&vmi->mas, count);
565}
566
567static inline
568struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
569{
570 return mas_prev_range(&vmi->mas, 0);
571}
572
573/*
574 * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
575 * if no previous VMA, to index 0.
576 */
577static inline
578struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
579 struct vm_area_struct **pprev)
580{
581 struct vm_area_struct *next = vma_next(vmi);
582 struct vm_area_struct *prev = vma_prev(vmi);
583
584 /*
585 * Consider the case where no previous VMA exists. We advance to the
586 * next VMA, skipping any gap, then rewind to the start of the range.
587 *
588 * If we were to unconditionally advance to the next range we'd wind up
589 * at the next VMA again, so we check to ensure there is a previous VMA
590 * to skip over.
591 */
592 if (prev)
593 vma_iter_next_range(vmi);
594
595 if (pprev)
596 *pprev = prev;
597
598 return next;
599}
600
601#ifdef CONFIG_64BIT
602static inline bool vma_is_sealed(struct vm_area_struct *vma)
603{
604 return (vma->vm_flags & VM_SEALED);
605}
606#else
607static inline bool vma_is_sealed(struct vm_area_struct *vma)
608{
609 return false;
610}
611#endif
612
613#if defined(CONFIG_STACK_GROWSUP)
614int expand_upwards(struct vm_area_struct *vma, unsigned long address);
615#endif
616
617int expand_downwards(struct vm_area_struct *vma, unsigned long address);
618
619int __vm_munmap(unsigned long start, size_t len, bool unlock);
620
621int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma);
622
623/* vma_init.h, shared between CONFIG_MMU and nommu. */
624void __init vma_state_init(void);
625struct vm_area_struct *vm_area_alloc(struct mm_struct *mm);
626struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig);
627void vm_area_free(struct vm_area_struct *vma);
628
629/* vma_exec.c */
630#ifdef CONFIG_MMU
631int create_init_stack_vma(struct mm_struct *mm, struct vm_area_struct **vmap,
632 unsigned long *top_mem_p);
633int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
634#endif
635
636#endif /* __MM_VMA_H */