Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: update core kernel code to use vm_flags_t consistently

The core kernel code is currently very inconsistent in its use of
vm_flags_t vs. unsigned long. This prevents us from changing the type of
vm_flags_t in the future and is simply not correct, so correct this.

While this results in rather a lot of churn, it is a critical
pre-requisite for a future planned change to VMA flag type.

Additionally, update VMA userland tests to account for the changes.

To make review easier and to break things into smaller parts, driver and
architecture-specific changes is left for a subsequent commit.

The code has been adjusted to cascade the changes across all calling code
as far as is needed.

We will adjust architecture-specific and driver code in a subsequent patch.

Overall, this patch does not introduce any functional change.

Link: https://lkml.kernel.org/r/d1588e7bb96d1ea3fe7b9df2c699d5b4592d901d.1750274467.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: Kees Cook <kees@kernel.org>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Jan Kara <jack@suse.cz>
Acked-by: Christian Brauner <brauner@kernel.org>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Pedro Falcato <pfalcato@suse.de>
Acked-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Jann Horn <jannh@google.com>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Lorenzo Stoakes and committed by
Andrew Morton
bfbe7110 78ddaa35

+270 -270
+1 -1
fs/exec.c
··· 604 604 struct mm_struct *mm = current->mm; 605 605 struct vm_area_struct *vma = bprm->vma; 606 606 struct vm_area_struct *prev = NULL; 607 - unsigned long vm_flags; 607 + vm_flags_t vm_flags; 608 608 unsigned long stack_base; 609 609 unsigned long stack_size; 610 610 unsigned long stack_expand;
+1 -1
fs/userfaultfd.c
··· 1242 1242 int ret; 1243 1243 struct uffdio_register uffdio_register; 1244 1244 struct uffdio_register __user *user_uffdio_register; 1245 - unsigned long vm_flags; 1245 + vm_flags_t vm_flags; 1246 1246 bool found; 1247 1247 bool basic_ioctls; 1248 1248 unsigned long start, end;
+1 -1
include/linux/coredump.h
··· 10 10 #ifdef CONFIG_COREDUMP 11 11 struct core_vma_metadata { 12 12 unsigned long start, end; 13 - unsigned long flags; 13 + vm_flags_t flags; 14 14 unsigned long dump_size; 15 15 unsigned long pgoff; 16 16 struct file *file;
+6 -6
include/linux/huge_mm.h
··· 261 261 } 262 262 263 263 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, 264 - unsigned long vm_flags, 264 + vm_flags_t vm_flags, 265 265 unsigned long tva_flags, 266 266 unsigned long orders); 267 267 ··· 282 282 */ 283 283 static inline 284 284 unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, 285 - unsigned long vm_flags, 285 + vm_flags_t vm_flags, 286 286 unsigned long tva_flags, 287 287 unsigned long orders) 288 288 { ··· 317 317 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 318 318 319 319 static inline bool vma_thp_disabled(struct vm_area_struct *vma, 320 - unsigned long vm_flags) 320 + vm_flags_t vm_flags) 321 321 { 322 322 /* 323 323 * Explicitly disabled through madvise or prctl, or some ··· 431 431 __split_huge_pud(__vma, __pud, __address); \ 432 432 } while (0) 433 433 434 - int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, 434 + int hugepage_madvise(struct vm_area_struct *vma, vm_flags_t *vm_flags, 435 435 int advice); 436 436 int madvise_collapse(struct vm_area_struct *vma, 437 437 struct vm_area_struct **prev, ··· 524 524 } 525 525 526 526 static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, 527 - unsigned long vm_flags, 527 + vm_flags_t vm_flags, 528 528 unsigned long tva_flags, 529 529 unsigned long orders) 530 530 { ··· 593 593 do { } while (0) 594 594 595 595 static inline int hugepage_madvise(struct vm_area_struct *vma, 596 - unsigned long *vm_flags, int advice) 596 + vm_flags_t *vm_flags, int advice) 597 597 { 598 598 return -EINVAL; 599 599 }
+2 -2
include/linux/khugepaged.h
··· 12 12 extern void __khugepaged_enter(struct mm_struct *mm); 13 13 extern void __khugepaged_exit(struct mm_struct *mm); 14 14 extern void khugepaged_enter_vma(struct vm_area_struct *vma, 15 - unsigned long vm_flags); 15 + vm_flags_t vm_flags); 16 16 extern void khugepaged_min_free_kbytes_update(void); 17 17 extern bool current_is_khugepaged(void); 18 18 extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, ··· 37 37 { 38 38 } 39 39 static inline void khugepaged_enter_vma(struct vm_area_struct *vma, 40 - unsigned long vm_flags) 40 + vm_flags_t vm_flags) 41 41 { 42 42 } 43 43 static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
+2 -2
include/linux/ksm.h
··· 16 16 17 17 #ifdef CONFIG_KSM 18 18 int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 19 - unsigned long end, int advice, unsigned long *vm_flags); 19 + unsigned long end, int advice, vm_flags_t *vm_flags); 20 20 vm_flags_t ksm_vma_flags(const struct mm_struct *mm, const struct file *file, 21 21 vm_flags_t vm_flags); 22 22 int ksm_enable_merge_any(struct mm_struct *mm); ··· 133 133 134 134 #ifdef CONFIG_MMU 135 135 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 136 - unsigned long end, int advice, unsigned long *vm_flags) 136 + unsigned long end, int advice, vm_flags_t *vm_flags) 137 137 { 138 138 return 0; 139 139 }
+2 -2
include/linux/memfd.h
··· 14 14 * We also update VMA flags if appropriate by manipulating the VMA flags pointed 15 15 * to by vm_flags_ptr. 16 16 */ 17 - int memfd_check_seals_mmap(struct file *file, unsigned long *vm_flags_ptr); 17 + int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr); 18 18 #else 19 19 static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a) 20 20 { ··· 25 25 return ERR_PTR(-EINVAL); 26 26 } 27 27 static inline int memfd_check_seals_mmap(struct file *file, 28 - unsigned long *vm_flags_ptr) 28 + vm_flags_t *vm_flags_ptr) 29 29 { 30 30 return 0; 31 31 }
+3 -3
include/linux/mm.h
··· 2564 2564 unsigned long end, unsigned long cp_flags); 2565 2565 extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, 2566 2566 struct vm_area_struct *vma, struct vm_area_struct **pprev, 2567 - unsigned long start, unsigned long end, unsigned long newflags); 2567 + unsigned long start, unsigned long end, vm_flags_t newflags); 2568 2568 2569 2569 /* 2570 2570 * doesn't attempt to fault and will return short. ··· 3323 3323 3324 3324 extern bool vma_is_special_mapping(const struct vm_area_struct *vma, 3325 3325 const struct vm_special_mapping *sm); 3326 - extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, 3326 + struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, 3327 3327 unsigned long addr, unsigned long len, 3328 - unsigned long flags, 3328 + vm_flags_t vm_flags, 3329 3329 const struct vm_special_mapping *spec); 3330 3330 3331 3331 unsigned long randomize_stack_top(unsigned long stack_top);
+1 -1
include/linux/mm_types.h
··· 1081 1081 unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ 1082 1082 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ 1083 1083 unsigned long stack_vm; /* VM_STACK */ 1084 - unsigned long def_flags; 1084 + vm_flags_t def_flags; 1085 1085 1086 1086 /** 1087 1087 * @write_protect_seq: Locked when any thread is write
+2 -2
include/linux/mman.h
··· 137 137 /* 138 138 * Combine the mmap "prot" argument into "vm_flags" used internally. 139 139 */ 140 - static inline unsigned long 140 + static inline vm_flags_t 141 141 calc_vm_prot_bits(unsigned long prot, unsigned long pkey) 142 142 { 143 143 return _calc_vm_trans(prot, PROT_READ, VM_READ ) | ··· 149 149 /* 150 150 * Combine the mmap "flags" argument into "vm_flags" used internally. 151 151 */ 152 - static inline unsigned long 152 + static inline vm_flags_t 153 153 calc_vm_flag_bits(struct file *file, unsigned long flags) 154 154 { 155 155 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
+2 -2
include/linux/rmap.h
··· 893 893 * Called from mm/vmscan.c to handle paging out 894 894 */ 895 895 int folio_referenced(struct folio *, int is_locked, 896 - struct mem_cgroup *memcg, unsigned long *vm_flags); 896 + struct mem_cgroup *memcg, vm_flags_t *vm_flags); 897 897 898 898 void try_to_migrate(struct folio *folio, enum ttu_flags flags); 899 899 void try_to_unmap(struct folio *, enum ttu_flags flags); ··· 1025 1025 1026 1026 static inline int folio_referenced(struct folio *folio, int is_locked, 1027 1027 struct mem_cgroup *memcg, 1028 - unsigned long *vm_flags) 1028 + vm_flags_t *vm_flags) 1029 1029 { 1030 1030 *vm_flags = 0; 1031 1031 return 0;
+2 -2
include/linux/userfaultfd_k.h
··· 209 209 } 210 210 211 211 static inline bool vma_can_userfault(struct vm_area_struct *vma, 212 - unsigned long vm_flags, 212 + vm_flags_t vm_flags, 213 213 bool wp_async) 214 214 { 215 215 vm_flags &= __VM_UFFD_FLAGS; ··· 281 281 282 282 int userfaultfd_register_range(struct userfaultfd_ctx *ctx, 283 283 struct vm_area_struct *vma, 284 - unsigned long vm_flags, 284 + vm_flags_t vm_flags, 285 285 unsigned long start, unsigned long end, 286 286 bool wp_async); 287 287
+3 -3
include/trace/events/fs_dax.h
··· 15 15 __field(unsigned long, ino) 16 16 __field(unsigned long, vm_start) 17 17 __field(unsigned long, vm_end) 18 - __field(unsigned long, vm_flags) 18 + __field(vm_flags_t, vm_flags) 19 19 __field(unsigned long, address) 20 20 __field(pgoff_t, pgoff) 21 21 __field(pgoff_t, max_pgoff) ··· 67 67 TP_ARGS(inode, vmf, zero_folio, radix_entry), 68 68 TP_STRUCT__entry( 69 69 __field(unsigned long, ino) 70 - __field(unsigned long, vm_flags) 70 + __field(vm_flags_t, vm_flags) 71 71 __field(unsigned long, address) 72 72 __field(struct folio *, zero_folio) 73 73 __field(void *, radix_entry) ··· 107 107 TP_ARGS(inode, vmf, result), 108 108 TP_STRUCT__entry( 109 109 __field(unsigned long, ino) 110 - __field(unsigned long, vm_flags) 110 + __field(vm_flags_t, vm_flags) 111 111 __field(unsigned long, address) 112 112 __field(pgoff_t, pgoff) 113 113 __field(dev_t, dev)
+1 -1
mm/debug.c
··· 290 290 vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0, 291 291 vmg->vmi ? vma_iter_end(vmg->vmi) : 0, 292 292 vmg->prev, vmg->middle, vmg->next, vmg->target, 293 - vmg->start, vmg->end, vmg->flags, 293 + vmg->start, vmg->end, vmg->vm_flags, 294 294 vmg->file, vmg->anon_vma, vmg->policy, 295 295 #ifdef CONFIG_USERFAULTFD 296 296 vmg->uffd_ctx.ctx,
+4 -4
mm/execmem.c
··· 26 26 27 27 #ifdef CONFIG_MMU 28 28 static void *execmem_vmalloc(struct execmem_range *range, size_t size, 29 - pgprot_t pgprot, unsigned long vm_flags) 29 + pgprot_t pgprot, vm_flags_t vm_flags) 30 30 { 31 31 bool kasan = range->flags & EXECMEM_KASAN_SHADOW; 32 32 gfp_t gfp_flags = GFP_KERNEL | __GFP_NOWARN; ··· 82 82 } 83 83 #else 84 84 static void *execmem_vmalloc(struct execmem_range *range, size_t size, 85 - pgprot_t pgprot, unsigned long vm_flags) 85 + pgprot_t pgprot, vm_flags_t vm_flags) 86 86 { 87 87 return vmalloc(size); 88 88 } ··· 256 256 257 257 static int execmem_cache_populate(struct execmem_range *range, size_t size) 258 258 { 259 - unsigned long vm_flags = VM_ALLOW_HUGE_VMAP; 259 + vm_flags_t vm_flags = VM_ALLOW_HUGE_VMAP; 260 260 struct vm_struct *vm; 261 261 size_t alloc_size; 262 262 int err = -ENOMEM; ··· 373 373 { 374 374 struct execmem_range *range = &execmem_info->ranges[type]; 375 375 bool use_cache = range->flags & EXECMEM_ROX_CACHE; 376 - unsigned long vm_flags = VM_FLUSH_RESET_PERMS; 376 + vm_flags_t vm_flags = VM_FLUSH_RESET_PERMS; 377 377 pgprot_t pgprot = range->pgprot; 378 378 void *p; 379 379
+1 -1
mm/filemap.c
··· 3216 3216 struct address_space *mapping = file->f_mapping; 3217 3217 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); 3218 3218 struct file *fpin = NULL; 3219 - unsigned long vm_flags = vmf->vma->vm_flags; 3219 + vm_flags_t vm_flags = vmf->vma->vm_flags; 3220 3220 unsigned short mmap_miss; 3221 3221 3222 3222 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+1 -1
mm/gup.c
··· 2044 2044 { 2045 2045 struct vm_area_struct *vma; 2046 2046 bool must_unlock = false; 2047 - unsigned long vm_flags; 2047 + vm_flags_t vm_flags; 2048 2048 long i; 2049 2049 2050 2050 if (!nr_pages)
+1 -1
mm/huge_memory.c
··· 99 99 } 100 100 101 101 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, 102 - unsigned long vm_flags, 102 + vm_flags_t vm_flags, 103 103 unsigned long tva_flags, 104 104 unsigned long orders) 105 105 {
+2 -2
mm/hugetlb.c
··· 7465 7465 unsigned long s_end = sbase + PUD_SIZE; 7466 7466 7467 7467 /* Allow segments to share if only one is marked locked */ 7468 - unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; 7469 - unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; 7468 + vm_flags_t vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; 7469 + vm_flags_t svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; 7470 7470 7471 7471 /* 7472 7472 * match the virtual addresses, permission and the alignment of the
+2 -2
mm/internal.h
··· 928 928 unsigned long start, unsigned long end, int *locked); 929 929 extern long faultin_page_range(struct mm_struct *mm, unsigned long start, 930 930 unsigned long end, bool write, int *locked); 931 - extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, 931 + extern bool mlock_future_ok(struct mm_struct *mm, vm_flags_t vm_flags, 932 932 unsigned long bytes); 933 933 934 934 /* ··· 1358 1358 1359 1359 struct vm_struct *__get_vm_area_node(unsigned long size, 1360 1360 unsigned long align, unsigned long shift, 1361 - unsigned long flags, unsigned long start, 1361 + vm_flags_t vm_flags, unsigned long start, 1362 1362 unsigned long end, int node, gfp_t gfp_mask, 1363 1363 const void *caller); 1364 1364
+2 -2
mm/khugepaged.c
··· 347 347 #endif /* CONFIG_SYSFS */ 348 348 349 349 int hugepage_madvise(struct vm_area_struct *vma, 350 - unsigned long *vm_flags, int advice) 350 + vm_flags_t *vm_flags, int advice) 351 351 { 352 352 switch (advice) { 353 353 case MADV_HUGEPAGE: ··· 470 470 } 471 471 472 472 void khugepaged_enter_vma(struct vm_area_struct *vma, 473 - unsigned long vm_flags) 473 + vm_flags_t vm_flags) 474 474 { 475 475 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && 476 476 hugepage_pmd_enabled()) {
+1 -1
mm/ksm.c
··· 2840 2840 } 2841 2841 2842 2842 int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 2843 - unsigned long end, int advice, unsigned long *vm_flags) 2843 + unsigned long end, int advice, vm_flags_t *vm_flags) 2844 2844 { 2845 2845 struct mm_struct *mm = vma->vm_mm; 2846 2846 int err;
+2 -2
mm/madvise.c
··· 130 130 */ 131 131 static int madvise_update_vma(struct vm_area_struct *vma, 132 132 struct vm_area_struct **prev, unsigned long start, 133 - unsigned long end, unsigned long new_flags, 133 + unsigned long end, vm_flags_t new_flags, 134 134 struct anon_vma_name *anon_name) 135 135 { 136 136 struct mm_struct *mm = vma->vm_mm; ··· 1258 1258 int behavior = arg->behavior; 1259 1259 int error; 1260 1260 struct anon_vma_name *anon_name; 1261 - unsigned long new_flags = vma->vm_flags; 1261 + vm_flags_t new_flags = vma->vm_flags; 1262 1262 1263 1263 if (unlikely(!can_modify_vma_madv(vma, behavior))) 1264 1264 return -EPERM;
+1 -1
mm/mapping_dirty_helpers.c
··· 218 218 static int wp_clean_test_walk(unsigned long start, unsigned long end, 219 219 struct mm_walk *walk) 220 220 { 221 - unsigned long vm_flags = READ_ONCE(walk->vma->vm_flags); 221 + vm_flags_t vm_flags = READ_ONCE(walk->vma->vm_flags); 222 222 223 223 /* Skip non-applicable VMAs */ 224 224 if ((vm_flags & (VM_SHARED | VM_MAYWRITE | VM_HUGETLB)) !=
+4 -4
mm/memfd.c
··· 332 332 return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE); 333 333 } 334 334 335 - static int check_write_seal(unsigned long *vm_flags_ptr) 335 + static int check_write_seal(vm_flags_t *vm_flags_ptr) 336 336 { 337 - unsigned long vm_flags = *vm_flags_ptr; 338 - unsigned long mask = vm_flags & (VM_SHARED | VM_WRITE); 337 + vm_flags_t vm_flags = *vm_flags_ptr; 338 + vm_flags_t mask = vm_flags & (VM_SHARED | VM_WRITE); 339 339 340 340 /* If a private mapping then writability is irrelevant. */ 341 341 if (!(mask & VM_SHARED)) ··· 357 357 return 0; 358 358 } 359 359 360 - int memfd_check_seals_mmap(struct file *file, unsigned long *vm_flags_ptr) 360 + int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr) 361 361 { 362 362 int err = 0; 363 363 unsigned int *seals_ptr = memfd_file_seals_ptr(file);
+2 -2
mm/memory.c
··· 797 797 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, 798 798 struct vm_area_struct *src_vma, unsigned long addr, int *rss) 799 799 { 800 - unsigned long vm_flags = dst_vma->vm_flags; 800 + vm_flags_t vm_flags = dst_vma->vm_flags; 801 801 pte_t orig_pte = ptep_get(src_pte); 802 802 pte_t pte = orig_pte; 803 803 struct folio *folio; ··· 6128 6128 .gfp_mask = __get_fault_gfp_mask(vma), 6129 6129 }; 6130 6130 struct mm_struct *mm = vma->vm_mm; 6131 - unsigned long vm_flags = vma->vm_flags; 6131 + vm_flags_t vm_flags = vma->vm_flags; 6132 6132 pgd_t *pgd; 6133 6133 p4d_t *p4d; 6134 6134 vm_fault_t ret;
+8 -8
mm/mmap.c
··· 80 80 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 81 81 void vma_set_page_prot(struct vm_area_struct *vma) 82 82 { 83 - unsigned long vm_flags = vma->vm_flags; 83 + vm_flags_t vm_flags = vma->vm_flags; 84 84 pgprot_t vm_page_prot; 85 85 86 86 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); ··· 228 228 return hint; 229 229 } 230 230 231 - bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, 231 + bool mlock_future_ok(struct mm_struct *mm, vm_flags_t vm_flags, 232 232 unsigned long bytes) 233 233 { 234 234 unsigned long locked_pages, limit_pages; 235 235 236 - if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 236 + if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 237 237 return true; 238 238 239 239 locked_pages = bytes >> PAGE_SHIFT; ··· 1207 1207 return ret; 1208 1208 } 1209 1209 1210 - int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) 1210 + int vm_brk_flags(unsigned long addr, unsigned long request, vm_flags_t vm_flags) 1211 1211 { 1212 1212 struct mm_struct *mm = current->mm; 1213 1213 struct vm_area_struct *vma = NULL; ··· 1224 1224 return 0; 1225 1225 1226 1226 /* Until we need other flags, refuse anything except VM_EXEC. */ 1227 - if ((flags & (~VM_EXEC)) != 0) 1227 + if ((vm_flags & (~VM_EXEC)) != 0) 1228 1228 return -EINVAL; 1229 1229 1230 1230 if (mmap_write_lock_killable(mm)) ··· 1239 1239 goto munmap_failed; 1240 1240 1241 1241 vma = vma_prev(&vmi); 1242 - ret = do_brk_flags(&vmi, vma, addr, len, flags); 1242 + ret = do_brk_flags(&vmi, vma, addr, len, vm_flags); 1243 1243 populate = ((mm->def_flags & VM_LOCKED) != 0); 1244 1244 mmap_write_unlock(mm); 1245 1245 userfaultfd_unmap_complete(mm, &uf); ··· 1444 1444 static struct vm_area_struct *__install_special_mapping( 1445 1445 struct mm_struct *mm, 1446 1446 unsigned long addr, unsigned long len, 1447 - unsigned long vm_flags, void *priv, 1447 + vm_flags_t vm_flags, void *priv, 1448 1448 const struct vm_operations_struct *ops) 1449 1449 { 1450 1450 int ret; ··· 1496 1496 struct vm_area_struct *_install_special_mapping( 1497 1497 struct mm_struct *mm, 1498 1498 unsigned long addr, unsigned long len, 1499 - unsigned long vm_flags, const struct vm_special_mapping *spec) 1499 + vm_flags_t vm_flags, const struct vm_special_mapping *spec) 1500 1500 { 1501 1501 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, 1502 1502 &special_mapping_vmops);
+4 -4
mm/mprotect.c
··· 596 596 int 597 597 mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, 598 598 struct vm_area_struct *vma, struct vm_area_struct **pprev, 599 - unsigned long start, unsigned long end, unsigned long newflags) 599 + unsigned long start, unsigned long end, vm_flags_t newflags) 600 600 { 601 601 struct mm_struct *mm = vma->vm_mm; 602 - unsigned long oldflags = READ_ONCE(vma->vm_flags); 602 + vm_flags_t oldflags = READ_ONCE(vma->vm_flags); 603 603 long nrpages = (end - start) >> PAGE_SHIFT; 604 604 unsigned int mm_cp_flags = 0; 605 605 unsigned long charged = 0; ··· 774 774 nstart = start; 775 775 tmp = vma->vm_start; 776 776 for_each_vma_range(vmi, vma, end) { 777 - unsigned long mask_off_old_flags; 778 - unsigned long newflags; 777 + vm_flags_t mask_off_old_flags; 778 + vm_flags_t newflags; 779 779 int new_vma_pkey; 780 780 781 781 if (vma->vm_start != tmp) {
+1 -1
mm/mremap.c
··· 1025 1025 struct vm_area_struct *vma = vrm->vma; 1026 1026 unsigned long old_addr = vrm->addr; 1027 1027 unsigned long old_len = vrm->old_len; 1028 - unsigned long dummy = vma->vm_flags; 1028 + vm_flags_t dummy = vma->vm_flags; 1029 1029 1030 1030 /* 1031 1031 * We'd prefer to avoid failure later on in do_munmap:
+6 -6
mm/nommu.c
··· 126 126 127 127 void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, 128 128 unsigned long start, unsigned long end, gfp_t gfp_mask, 129 - pgprot_t prot, unsigned long vm_flags, int node, 129 + pgprot_t prot, vm_flags_t vm_flags, int node, 130 130 const void *caller) 131 131 { 132 132 return __vmalloc_noprof(size, gfp_mask); ··· 844 844 * we've determined that we can make the mapping, now translate what we 845 845 * now know into VMA flags 846 846 */ 847 - static unsigned long determine_vm_flags(struct file *file, 848 - unsigned long prot, 849 - unsigned long flags, 850 - unsigned long capabilities) 847 + static vm_flags_t determine_vm_flags(struct file *file, 848 + unsigned long prot, 849 + unsigned long flags, 850 + unsigned long capabilities) 851 851 { 852 - unsigned long vm_flags; 852 + vm_flags_t vm_flags; 853 853 854 854 vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(file, flags); 855 855
+2 -2
mm/rmap.c
··· 839 839 struct folio_referenced_arg { 840 840 int mapcount; 841 841 int referenced; 842 - unsigned long vm_flags; 842 + vm_flags_t vm_flags; 843 843 struct mem_cgroup *memcg; 844 844 }; 845 845 ··· 984 984 * the function bailed out due to rmap lock contention. 985 985 */ 986 986 int folio_referenced(struct folio *folio, int is_locked, 987 - struct mem_cgroup *memcg, unsigned long *vm_flags) 987 + struct mem_cgroup *memcg, vm_flags_t *vm_flags) 988 988 { 989 989 bool we_locked = false; 990 990 struct folio_referenced_arg pra = {
+3 -3
mm/shmem.c
··· 615 615 static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index, 616 616 loff_t write_end, bool shmem_huge_force, 617 617 struct vm_area_struct *vma, 618 - unsigned long vm_flags) 618 + vm_flags_t vm_flags) 619 619 { 620 620 unsigned int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ? 621 621 0 : BIT(HPAGE_PMD_ORDER); ··· 862 862 static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index, 863 863 loff_t write_end, bool shmem_huge_force, 864 864 struct vm_area_struct *vma, 865 - unsigned long vm_flags) 865 + vm_flags_t vm_flags) 866 866 { 867 867 return 0; 868 868 } ··· 1753 1753 { 1754 1754 unsigned long mask = READ_ONCE(huge_shmem_orders_always); 1755 1755 unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); 1756 - unsigned long vm_flags = vma ? vma->vm_flags : 0; 1756 + vm_flags_t vm_flags = vma ? vma->vm_flags : 0; 1757 1757 unsigned int global_orders; 1758 1758 1759 1759 if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags)))
+7 -7
mm/userfaultfd.c
··· 1901 1901 } 1902 1902 1903 1903 static void userfaultfd_set_vm_flags(struct vm_area_struct *vma, 1904 - vm_flags_t flags) 1904 + vm_flags_t vm_flags) 1905 1905 { 1906 - const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP; 1906 + const bool uffd_wp_changed = (vma->vm_flags ^ vm_flags) & VM_UFFD_WP; 1907 1907 1908 - vm_flags_reset(vma, flags); 1908 + vm_flags_reset(vma, vm_flags); 1909 1909 /* 1910 1910 * For shared mappings, we want to enable writenotify while 1911 1911 * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply ··· 1917 1917 1918 1918 static void userfaultfd_set_ctx(struct vm_area_struct *vma, 1919 1919 struct userfaultfd_ctx *ctx, 1920 - unsigned long flags) 1920 + vm_flags_t vm_flags) 1921 1921 { 1922 1922 vma_start_write(vma); 1923 1923 vma->vm_userfaultfd_ctx = (struct vm_userfaultfd_ctx){ctx}; 1924 1924 userfaultfd_set_vm_flags(vma, 1925 - (vma->vm_flags & ~__VM_UFFD_FLAGS) | flags); 1925 + (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags); 1926 1926 } 1927 1927 1928 1928 void userfaultfd_reset_ctx(struct vm_area_struct *vma) ··· 1968 1968 /* Assumes mmap write lock taken, and mm_struct pinned. */ 1969 1969 int userfaultfd_register_range(struct userfaultfd_ctx *ctx, 1970 1970 struct vm_area_struct *vma, 1971 - unsigned long vm_flags, 1971 + vm_flags_t vm_flags, 1972 1972 unsigned long start, unsigned long end, 1973 1973 bool wp_async) 1974 1974 { 1975 1975 VMA_ITERATOR(vmi, ctx->mm, start); 1976 1976 struct vm_area_struct *prev = vma_prev(&vmi); 1977 1977 unsigned long vma_end; 1978 - unsigned long new_flags; 1978 + vm_flags_t new_flags; 1979 1979 1980 1980 if (vma->vm_start < start) 1981 1981 prev = vma;
+40 -40
mm/vma.c
··· 15 15 unsigned long end; 16 16 pgoff_t pgoff; 17 17 unsigned long pglen; 18 - unsigned long flags; 18 + vm_flags_t vm_flags; 19 19 struct file *file; 20 20 pgprot_t page_prot; 21 21 ··· 37 37 bool check_ksm_early; 38 38 }; 39 39 40 - #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, flags_, file_) \ 40 + #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, vm_flags_, file_) \ 41 41 struct mmap_state name = { \ 42 42 .mm = mm_, \ 43 43 .vmi = vmi_, \ ··· 45 45 .end = (addr_) + (len_), \ 46 46 .pgoff = pgoff_, \ 47 47 .pglen = PHYS_PFN(len_), \ 48 - .flags = flags_, \ 48 + .vm_flags = vm_flags_, \ 49 49 .file = file_, \ 50 - .page_prot = vm_get_page_prot(flags_), \ 50 + .page_prot = vm_get_page_prot(vm_flags_), \ 51 51 } 52 52 53 53 #define VMG_MMAP_STATE(name, map_, vma_) \ ··· 56 56 .vmi = (map_)->vmi, \ 57 57 .start = (map_)->addr, \ 58 58 .end = (map_)->end, \ 59 - .flags = (map_)->flags, \ 59 + .vm_flags = (map_)->vm_flags, \ 60 60 .pgoff = (map_)->pgoff, \ 61 61 .file = (map_)->file, \ 62 62 .prev = (map_)->prev, \ ··· 95 95 * the kernel to generate new VMAs when old one could be 96 96 * extended instead. 97 97 */ 98 - if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY) 98 + if ((vma->vm_flags ^ vmg->vm_flags) & ~VM_SOFTDIRTY) 99 99 return false; 100 100 if (vma->vm_file != vmg->file) 101 101 return false; ··· 843 843 * furthermost left or right side of the VMA, then we have no chance of 844 844 * merging and should abort. 845 845 */ 846 - if (vmg->flags & VM_SPECIAL || (!left_side && !right_side)) 846 + if (vmg->vm_flags & VM_SPECIAL || (!left_side && !right_side)) 847 847 return NULL; 848 848 849 849 if (left_side) ··· 973 973 if (err || commit_merge(vmg)) 974 974 goto abort; 975 975 976 - khugepaged_enter_vma(vmg->target, vmg->flags); 976 + khugepaged_enter_vma(vmg->target, vmg->vm_flags); 977 977 vmg->state = VMA_MERGE_SUCCESS; 978 978 return vmg->target; 979 979 ··· 1055 1055 vmg->state = VMA_MERGE_NOMERGE; 1056 1056 1057 1057 /* Special VMAs are unmergeable, also if no prev/next. */ 1058 - if ((vmg->flags & VM_SPECIAL) || (!prev && !next)) 1058 + if ((vmg->vm_flags & VM_SPECIAL) || (!prev && !next)) 1059 1059 return NULL; 1060 1060 1061 1061 can_merge_left = can_vma_merge_left(vmg); ··· 1093 1093 * following VMA if we have VMAs on both sides. 1094 1094 */ 1095 1095 if (vmg->target && !vma_expand(vmg)) { 1096 - khugepaged_enter_vma(vmg->target, vmg->flags); 1096 + khugepaged_enter_vma(vmg->target, vmg->vm_flags); 1097 1097 vmg->state = VMA_MERGE_SUCCESS; 1098 1098 return vmg->target; 1099 1099 } ··· 1640 1640 struct vm_area_struct *vma_modify_flags( 1641 1641 struct vma_iterator *vmi, struct vm_area_struct *prev, 1642 1642 struct vm_area_struct *vma, unsigned long start, unsigned long end, 1643 - unsigned long new_flags) 1643 + vm_flags_t vm_flags) 1644 1644 { 1645 1645 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1646 1646 1647 - vmg.flags = new_flags; 1647 + vmg.vm_flags = vm_flags; 1648 1648 1649 1649 return vma_modify(&vmg); 1650 1650 } ··· 1655 1655 struct vm_area_struct *vma, 1656 1656 unsigned long start, 1657 1657 unsigned long end, 1658 - unsigned long new_flags, 1658 + vm_flags_t vm_flags, 1659 1659 struct anon_vma_name *new_name) 1660 1660 { 1661 1661 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1662 1662 1663 - vmg.flags = new_flags; 1663 + vmg.vm_flags = vm_flags; 1664 1664 vmg.anon_name = new_name; 1665 1665 1666 1666 return vma_modify(&vmg); ··· 1685 1685 struct vm_area_struct *prev, 1686 1686 struct vm_area_struct *vma, 1687 1687 unsigned long start, unsigned long end, 1688 - unsigned long new_flags, 1688 + vm_flags_t vm_flags, 1689 1689 struct vm_userfaultfd_ctx new_ctx, 1690 1690 bool give_up_on_oom) 1691 1691 { 1692 1692 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1693 1693 1694 - vmg.flags = new_flags; 1694 + vmg.vm_flags = vm_flags; 1695 1695 vmg.uffd_ctx = new_ctx; 1696 1696 if (give_up_on_oom) 1697 1697 vmg.give_up_on_oom = true; ··· 2327 2327 2328 2328 static void update_ksm_flags(struct mmap_state *map) 2329 2329 { 2330 - map->flags = ksm_vma_flags(map->mm, map->file, map->flags); 2330 + map->vm_flags = ksm_vma_flags(map->mm, map->file, map->vm_flags); 2331 2331 } 2332 2332 2333 2333 /* ··· 2372 2372 } 2373 2373 2374 2374 /* Check against address space limit. */ 2375 - if (!may_expand_vm(map->mm, map->flags, map->pglen - vms->nr_pages)) 2375 + if (!may_expand_vm(map->mm, map->vm_flags, map->pglen - vms->nr_pages)) 2376 2376 return -ENOMEM; 2377 2377 2378 2378 /* Private writable mapping: check memory availability. */ 2379 - if (accountable_mapping(map->file, map->flags)) { 2379 + if (accountable_mapping(map->file, map->vm_flags)) { 2380 2380 map->charged = map->pglen; 2381 2381 map->charged -= vms->nr_accounted; 2382 2382 if (map->charged) { ··· 2386 2386 } 2387 2387 2388 2388 vms->nr_accounted = 0; 2389 - map->flags |= VM_ACCOUNT; 2389 + map->vm_flags |= VM_ACCOUNT; 2390 2390 } 2391 2391 2392 2392 /* ··· 2430 2430 * Drivers should not permit writability when previously it was 2431 2431 * disallowed. 2432 2432 */ 2433 - VM_WARN_ON_ONCE(map->flags != vma->vm_flags && 2434 - !(map->flags & VM_MAYWRITE) && 2433 + VM_WARN_ON_ONCE(map->vm_flags != vma->vm_flags && 2434 + !(map->vm_flags & VM_MAYWRITE) && 2435 2435 (vma->vm_flags & VM_MAYWRITE)); 2436 2436 2437 2437 map->file = vma->vm_file; 2438 - map->flags = vma->vm_flags; 2438 + map->vm_flags = vma->vm_flags; 2439 2439 2440 2440 return 0; 2441 2441 } ··· 2466 2466 2467 2467 vma_iter_config(vmi, map->addr, map->end); 2468 2468 vma_set_range(vma, map->addr, map->end, map->pgoff); 2469 - vm_flags_init(vma, map->flags); 2469 + vm_flags_init(vma, map->vm_flags); 2470 2470 vma->vm_page_prot = map->page_prot; 2471 2471 2472 2472 if (vma_iter_prealloc(vmi, vma)) { ··· 2476 2476 2477 2477 if (map->file) 2478 2478 error = __mmap_new_file_vma(map, vma); 2479 - else if (map->flags & VM_SHARED) 2479 + else if (map->vm_flags & VM_SHARED) 2480 2480 error = shmem_zero_setup(vma); 2481 2481 else 2482 2482 vma_set_anonymous(vma); ··· 2486 2486 2487 2487 if (!map->check_ksm_early) { 2488 2488 update_ksm_flags(map); 2489 - vm_flags_init(vma, map->flags); 2489 + vm_flags_init(vma, map->vm_flags); 2490 2490 } 2491 2491 2492 2492 #ifdef CONFIG_SPARC64 2493 2493 /* TODO: Fix SPARC ADI! */ 2494 - WARN_ON_ONCE(!arch_validate_flags(map->flags)); 2494 + WARN_ON_ONCE(!arch_validate_flags(map->vm_flags)); 2495 2495 #endif 2496 2496 2497 2497 /* Lock the VMA since it is modified after insertion into VMA tree */ ··· 2505 2505 * call covers the non-merge case. 2506 2506 */ 2507 2507 if (!vma_is_anonymous(vma)) 2508 - khugepaged_enter_vma(vma, map->flags); 2508 + khugepaged_enter_vma(vma, map->vm_flags); 2509 2509 *vmap = vma; 2510 2510 return 0; 2511 2511 ··· 2526 2526 static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma) 2527 2527 { 2528 2528 struct mm_struct *mm = map->mm; 2529 - unsigned long vm_flags = vma->vm_flags; 2529 + vm_flags_t vm_flags = vma->vm_flags; 2530 2530 2531 2531 perf_event_mmap(vma); 2532 2532 ··· 2579 2579 2580 2580 .pgoff = map->pgoff, 2581 2581 .file = map->file, 2582 - .vm_flags = map->flags, 2582 + .vm_flags = map->vm_flags, 2583 2583 .page_prot = map->page_prot, 2584 2584 }; 2585 2585 ··· 2591 2591 /* Update fields permitted to be changed. */ 2592 2592 map->pgoff = desc.pgoff; 2593 2593 map->file = desc.file; 2594 - map->flags = desc.vm_flags; 2594 + map->vm_flags = desc.vm_flags; 2595 2595 map->page_prot = desc.page_prot; 2596 2596 /* User-defined fields. */ 2597 2597 map->vm_ops = desc.vm_ops; ··· 2754 2754 * @addr: The start address 2755 2755 * @len: The length of the increase 2756 2756 * @vma: The vma, 2757 - * @flags: The VMA Flags 2757 + * @vm_flags: The VMA Flags 2758 2758 * 2759 2759 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags 2760 2760 * do not match then create a new anonymous VMA. Eventually we may be able to 2761 2761 * do some brk-specific accounting here. 2762 2762 */ 2763 2763 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, 2764 - unsigned long addr, unsigned long len, unsigned long flags) 2764 + unsigned long addr, unsigned long len, vm_flags_t vm_flags) 2765 2765 { 2766 2766 struct mm_struct *mm = current->mm; 2767 2767 ··· 2769 2769 * Check against address space limits by the changed size 2770 2770 * Note: This happens *after* clearing old mappings in some code paths. 2771 2771 */ 2772 - flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 2773 - flags = ksm_vma_flags(mm, NULL, flags); 2774 - if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) 2772 + vm_flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 2773 + vm_flags = ksm_vma_flags(mm, NULL, vm_flags); 2774 + if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) 2775 2775 return -ENOMEM; 2776 2776 2777 2777 if (mm->map_count > sysctl_max_map_count) ··· 2785 2785 * occur after forking, so the expand will only happen on new VMAs. 2786 2786 */ 2787 2787 if (vma && vma->vm_end == addr) { 2788 - VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr)); 2788 + VMG_STATE(vmg, mm, vmi, addr, addr + len, vm_flags, PHYS_PFN(addr)); 2789 2789 2790 2790 vmg.prev = vma; 2791 2791 /* vmi is positioned at prev, which this mode expects. */ ··· 2806 2806 2807 2807 vma_set_anonymous(vma); 2808 2808 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); 2809 - vm_flags_init(vma, flags); 2810 - vma->vm_page_prot = vm_get_page_prot(flags); 2809 + vm_flags_init(vma, vm_flags); 2810 + vma->vm_page_prot = vm_get_page_prot(vm_flags); 2811 2811 vma_start_write(vma); 2812 2812 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) 2813 2813 goto mas_store_fail; ··· 2818 2818 perf_event_mmap(vma); 2819 2819 mm->total_vm += len >> PAGE_SHIFT; 2820 2820 mm->data_vm += len >> PAGE_SHIFT; 2821 - if (flags & VM_LOCKED) 2821 + if (vm_flags & VM_LOCKED) 2822 2822 mm->locked_vm += (len >> PAGE_SHIFT); 2823 2823 vm_flags_set(vma, VM_SOFTDIRTY); 2824 2824 return 0;
+8 -8
mm/vma.h
··· 98 98 unsigned long end; 99 99 pgoff_t pgoff; 100 100 101 - unsigned long flags; 101 + vm_flags_t vm_flags; 102 102 struct file *file; 103 103 struct anon_vma *anon_vma; 104 104 struct mempolicy *policy; ··· 164 164 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start); 165 165 } 166 166 167 - #define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_) \ 167 + #define VMG_STATE(name, mm_, vmi_, start_, end_, vm_flags_, pgoff_) \ 168 168 struct vma_merge_struct name = { \ 169 169 .mm = mm_, \ 170 170 .vmi = vmi_, \ 171 171 .start = start_, \ 172 172 .end = end_, \ 173 - .flags = flags_, \ 173 + .vm_flags = vm_flags_, \ 174 174 .pgoff = pgoff_, \ 175 175 .state = VMA_MERGE_START, \ 176 176 } ··· 184 184 .next = NULL, \ 185 185 .start = start_, \ 186 186 .end = end_, \ 187 - .flags = vma_->vm_flags, \ 187 + .vm_flags = vma_->vm_flags, \ 188 188 .pgoff = vma_pgoff_offset(vma_, start_), \ 189 189 .file = vma_->vm_file, \ 190 190 .anon_vma = vma_->anon_vma, \ ··· 288 288 *vma_modify_flags(struct vma_iterator *vmi, 289 289 struct vm_area_struct *prev, struct vm_area_struct *vma, 290 290 unsigned long start, unsigned long end, 291 - unsigned long new_flags); 291 + vm_flags_t vm_flags); 292 292 293 293 /* We are about to modify the VMA's flags and/or anon_name. */ 294 294 __must_check struct vm_area_struct ··· 297 297 struct vm_area_struct *vma, 298 298 unsigned long start, 299 299 unsigned long end, 300 - unsigned long new_flags, 300 + vm_flags_t vm_flags, 301 301 struct anon_vma_name *new_name); 302 302 303 303 /* We are about to modify the VMA's memory policy. */ ··· 314 314 struct vm_area_struct *prev, 315 315 struct vm_area_struct *vma, 316 316 unsigned long start, unsigned long end, 317 - unsigned long new_flags, 317 + vm_flags_t vm_flags, 318 318 struct vm_userfaultfd_ctx new_ctx, 319 319 bool give_up_on_oom); 320 320 ··· 375 375 } 376 376 377 377 #ifdef CONFIG_MMU 378 - static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) 378 + static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, vm_flags_t vm_flags) 379 379 { 380 380 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); 381 381 }
+2 -2
mm/vmscan.c
··· 907 907 struct scan_control *sc) 908 908 { 909 909 int referenced_ptes, referenced_folio; 910 - unsigned long vm_flags; 910 + vm_flags_t vm_flags; 911 911 912 912 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, 913 913 &vm_flags); ··· 2120 2120 { 2121 2121 unsigned long nr_taken; 2122 2122 unsigned long nr_scanned; 2123 - unsigned long vm_flags; 2123 + vm_flags_t vm_flags; 2124 2124 LIST_HEAD(l_hold); /* The folios which were snipped off */ 2125 2125 LIST_HEAD(l_active); 2126 2126 LIST_HEAD(l_inactive);
+133 -133
tools/testing/vma/vma.c
··· 65 65 unsigned long start, 66 66 unsigned long end, 67 67 pgoff_t pgoff, 68 - vm_flags_t flags) 68 + vm_flags_t vm_flags) 69 69 { 70 70 struct vm_area_struct *ret = vm_area_alloc(mm); 71 71 ··· 75 75 ret->vm_start = start; 76 76 ret->vm_end = end; 77 77 ret->vm_pgoff = pgoff; 78 - ret->__vm_flags = flags; 78 + ret->__vm_flags = vm_flags; 79 79 vma_assert_detached(ret); 80 80 81 81 return ret; ··· 103 103 unsigned long start, 104 104 unsigned long end, 105 105 pgoff_t pgoff, 106 - vm_flags_t flags) 106 + vm_flags_t vm_flags) 107 107 { 108 - struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, flags); 108 + struct vm_area_struct *vma = alloc_vma(mm, start, end, pgoff, vm_flags); 109 109 110 110 if (vma == NULL) 111 111 return NULL; ··· 172 172 * specified new range. 173 173 */ 174 174 static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start, 175 - unsigned long end, pgoff_t pgoff, vm_flags_t flags) 175 + unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags) 176 176 { 177 177 vma_iter_set(vmg->vmi, start); 178 178 ··· 184 184 vmg->start = start; 185 185 vmg->end = end; 186 186 vmg->pgoff = pgoff; 187 - vmg->flags = flags; 187 + vmg->vm_flags = vm_flags; 188 188 189 189 vmg->just_expand = false; 190 190 vmg->__remove_middle = false; ··· 195 195 196 196 /* Helper function to set both the VMG range and its anon_vma. */ 197 197 static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start, 198 - unsigned long end, pgoff_t pgoff, vm_flags_t flags, 198 + unsigned long end, pgoff_t pgoff, vm_flags_t vm_flags, 199 199 struct anon_vma *anon_vma) 200 200 { 201 - vmg_set_range(vmg, start, end, pgoff, flags); 201 + vmg_set_range(vmg, start, end, pgoff, vm_flags); 202 202 vmg->anon_vma = anon_vma; 203 203 } 204 204 ··· 211 211 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm, 212 212 struct vma_merge_struct *vmg, 213 213 unsigned long start, unsigned long end, 214 - pgoff_t pgoff, vm_flags_t flags, 214 + pgoff_t pgoff, vm_flags_t vm_flags, 215 215 bool *was_merged) 216 216 { 217 217 struct vm_area_struct *merged; 218 218 219 - vmg_set_range(vmg, start, end, pgoff, flags); 219 + vmg_set_range(vmg, start, end, pgoff, vm_flags); 220 220 221 221 merged = merge_new(vmg); 222 222 if (merged) { ··· 229 229 230 230 ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE); 231 231 232 - return alloc_and_link_vma(mm, start, end, pgoff, flags); 232 + return alloc_and_link_vma(mm, start, end, pgoff, vm_flags); 233 233 } 234 234 235 235 /* ··· 301 301 static bool test_simple_merge(void) 302 302 { 303 303 struct vm_area_struct *vma; 304 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 304 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 305 305 struct mm_struct mm = {}; 306 - struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, flags); 307 - struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, flags); 306 + struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vm_flags); 307 + struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vm_flags); 308 308 VMA_ITERATOR(vmi, &mm, 0x1000); 309 309 struct vma_merge_struct vmg = { 310 310 .mm = &mm, 311 311 .vmi = &vmi, 312 312 .start = 0x1000, 313 313 .end = 0x2000, 314 - .flags = flags, 314 + .vm_flags = vm_flags, 315 315 .pgoff = 1, 316 316 }; 317 317 ··· 324 324 ASSERT_EQ(vma->vm_start, 0); 325 325 ASSERT_EQ(vma->vm_end, 0x3000); 326 326 ASSERT_EQ(vma->vm_pgoff, 0); 327 - ASSERT_EQ(vma->vm_flags, flags); 327 + ASSERT_EQ(vma->vm_flags, vm_flags); 328 328 329 329 detach_free_vma(vma); 330 330 mtree_destroy(&mm.mm_mt); ··· 335 335 static bool test_simple_modify(void) 336 336 { 337 337 struct vm_area_struct *vma; 338 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 338 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 339 339 struct mm_struct mm = {}; 340 - struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags); 340 + struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags); 341 341 VMA_ITERATOR(vmi, &mm, 0x1000); 342 342 343 343 ASSERT_FALSE(attach_vma(&mm, init_vma)); ··· 394 394 395 395 static bool test_simple_expand(void) 396 396 { 397 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 397 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 398 398 struct mm_struct mm = {}; 399 - struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, flags); 399 + struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vm_flags); 400 400 VMA_ITERATOR(vmi, &mm, 0); 401 401 struct vma_merge_struct vmg = { 402 402 .vmi = &vmi, ··· 422 422 423 423 static bool test_simple_shrink(void) 424 424 { 425 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 425 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 426 426 struct mm_struct mm = {}; 427 - struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags); 427 + struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vm_flags); 428 428 VMA_ITERATOR(vmi, &mm, 0); 429 429 430 430 ASSERT_FALSE(attach_vma(&mm, vma)); ··· 443 443 444 444 static bool test_merge_new(void) 445 445 { 446 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 446 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 447 447 struct mm_struct mm = {}; 448 448 VMA_ITERATOR(vmi, &mm, 0); 449 449 struct vma_merge_struct vmg = { ··· 473 473 * 0123456789abc 474 474 * AA B CC 475 475 */ 476 - vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags); 476 + vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags); 477 477 ASSERT_NE(vma_a, NULL); 478 478 /* We give each VMA a single avc so we can test anon_vma duplication. */ 479 479 INIT_LIST_HEAD(&vma_a->anon_vma_chain); 480 480 list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain); 481 481 482 - vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags); 482 + vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags); 483 483 ASSERT_NE(vma_b, NULL); 484 484 INIT_LIST_HEAD(&vma_b->anon_vma_chain); 485 485 list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain); 486 486 487 - vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, flags); 487 + vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vm_flags); 488 488 ASSERT_NE(vma_c, NULL); 489 489 INIT_LIST_HEAD(&vma_c->anon_vma_chain); 490 490 list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain); ··· 495 495 * 0123456789abc 496 496 * AA B ** CC 497 497 */ 498 - vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, flags, &merged); 498 + vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vm_flags, &merged); 499 499 ASSERT_NE(vma_d, NULL); 500 500 INIT_LIST_HEAD(&vma_d->anon_vma_chain); 501 501 list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain); ··· 510 510 */ 511 511 vma_a->vm_ops = &vm_ops; /* This should have no impact. */ 512 512 vma_b->anon_vma = &dummy_anon_vma; 513 - vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, flags, &merged); 513 + vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vm_flags, &merged); 514 514 ASSERT_EQ(vma, vma_a); 515 515 /* Merge with A, delete B. */ 516 516 ASSERT_TRUE(merged); ··· 527 527 * 0123456789abc 528 528 * AAAA* DD CC 529 529 */ 530 - vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, flags, &merged); 530 + vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vm_flags, &merged); 531 531 ASSERT_EQ(vma, vma_a); 532 532 /* Extend A. */ 533 533 ASSERT_TRUE(merged); ··· 546 546 */ 547 547 vma_d->anon_vma = &dummy_anon_vma; 548 548 vma_d->vm_ops = &vm_ops; /* This should have no impact. */ 549 - vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, flags, &merged); 549 + vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vm_flags, &merged); 550 550 ASSERT_EQ(vma, vma_d); 551 551 /* Prepend. */ 552 552 ASSERT_TRUE(merged); ··· 564 564 * AAAAA*DDD CC 565 565 */ 566 566 vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */ 567 - vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, flags, &merged); 567 + vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vm_flags, &merged); 568 568 ASSERT_EQ(vma, vma_a); 569 569 /* Merge with A, delete D. */ 570 570 ASSERT_TRUE(merged); ··· 582 582 * AAAAAAAAA *CC 583 583 */ 584 584 vma_c->anon_vma = &dummy_anon_vma; 585 - vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, flags, &merged); 585 + vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vm_flags, &merged); 586 586 ASSERT_EQ(vma, vma_c); 587 587 /* Prepend C. */ 588 588 ASSERT_TRUE(merged); ··· 599 599 * 0123456789abc 600 600 * AAAAAAAAA*CCC 601 601 */ 602 - vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, flags, &merged); 602 + vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vm_flags, &merged); 603 603 ASSERT_EQ(vma, vma_a); 604 604 /* Extend A and delete C. */ 605 605 ASSERT_TRUE(merged); ··· 639 639 640 640 static bool test_vma_merge_special_flags(void) 641 641 { 642 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 642 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 643 643 struct mm_struct mm = {}; 644 644 VMA_ITERATOR(vmi, &mm, 0); 645 645 struct vma_merge_struct vmg = { ··· 661 661 * 01234 662 662 * AAA 663 663 */ 664 - vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 664 + vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 665 665 ASSERT_NE(vma_left, NULL); 666 666 667 667 /* 1. Set up new VMA with special flag that would otherwise merge. */ ··· 672 672 * 673 673 * This should merge if not for the VM_SPECIAL flag. 674 674 */ 675 - vmg_set_range(&vmg, 0x3000, 0x4000, 3, flags); 675 + vmg_set_range(&vmg, 0x3000, 0x4000, 3, vm_flags); 676 676 for (i = 0; i < ARRAY_SIZE(special_flags); i++) { 677 677 vm_flags_t special_flag = special_flags[i]; 678 678 679 - vma_left->__vm_flags = flags | special_flag; 680 - vmg.flags = flags | special_flag; 679 + vma_left->__vm_flags = vm_flags | special_flag; 680 + vmg.vm_flags = vm_flags | special_flag; 681 681 vma = merge_new(&vmg); 682 682 ASSERT_EQ(vma, NULL); 683 683 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); ··· 691 691 * 692 692 * Create a VMA to modify. 693 693 */ 694 - vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags); 694 + vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags); 695 695 ASSERT_NE(vma, NULL); 696 696 vmg.middle = vma; 697 697 698 698 for (i = 0; i < ARRAY_SIZE(special_flags); i++) { 699 699 vm_flags_t special_flag = special_flags[i]; 700 700 701 - vma_left->__vm_flags = flags | special_flag; 702 - vmg.flags = flags | special_flag; 701 + vma_left->__vm_flags = vm_flags | special_flag; 702 + vmg.vm_flags = vm_flags | special_flag; 703 703 vma = merge_existing(&vmg); 704 704 ASSERT_EQ(vma, NULL); 705 705 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); ··· 711 711 712 712 static bool test_vma_merge_with_close(void) 713 713 { 714 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 714 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 715 715 struct mm_struct mm = {}; 716 716 VMA_ITERATOR(vmi, &mm, 0); 717 717 struct vma_merge_struct vmg = { ··· 791 791 * PPPPPPNNN 792 792 */ 793 793 794 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 795 - vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags); 794 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 795 + vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags); 796 796 vma_next->vm_ops = &vm_ops; 797 797 798 - vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); 798 + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); 799 799 ASSERT_EQ(merge_new(&vmg), vma_prev); 800 800 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); 801 801 ASSERT_EQ(vma_prev->vm_start, 0); ··· 816 816 * proceed. 817 817 */ 818 818 819 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 820 - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); 819 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 820 + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); 821 821 vma->vm_ops = &vm_ops; 822 822 823 - vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); 823 + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); 824 824 vmg.prev = vma_prev; 825 825 vmg.middle = vma; 826 826 ··· 844 844 * proceed. 845 845 */ 846 846 847 - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); 848 - vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags); 847 + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); 848 + vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags); 849 849 vma->vm_ops = &vm_ops; 850 850 851 - vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); 851 + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); 852 852 vmg.middle = vma; 853 853 ASSERT_EQ(merge_existing(&vmg), NULL); 854 854 /* ··· 872 872 * PPPVVNNNN 873 873 */ 874 874 875 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 876 - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); 877 - vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags); 875 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 876 + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); 877 + vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags); 878 878 vma->vm_ops = &vm_ops; 879 879 880 - vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); 880 + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); 881 881 vmg.prev = vma_prev; 882 882 vmg.middle = vma; 883 883 ··· 898 898 * PPPPPNNNN 899 899 */ 900 900 901 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 902 - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); 903 - vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, flags); 901 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 902 + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); 903 + vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vm_flags); 904 904 vma_next->vm_ops = &vm_ops; 905 905 906 - vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); 906 + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); 907 907 vmg.prev = vma_prev; 908 908 vmg.middle = vma; 909 909 ··· 920 920 921 921 static bool test_vma_merge_new_with_close(void) 922 922 { 923 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 923 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 924 924 struct mm_struct mm = {}; 925 925 VMA_ITERATOR(vmi, &mm, 0); 926 926 struct vma_merge_struct vmg = { 927 927 .mm = &mm, 928 928 .vmi = &vmi, 929 929 }; 930 - struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags); 931 - struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, flags); 930 + struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags); 931 + struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vm_flags); 932 932 const struct vm_operations_struct vm_ops = { 933 933 .close = dummy_close, 934 934 }; ··· 958 958 vma_prev->vm_ops = &vm_ops; 959 959 vma_next->vm_ops = &vm_ops; 960 960 961 - vmg_set_range(&vmg, 0x2000, 0x5000, 2, flags); 961 + vmg_set_range(&vmg, 0x2000, 0x5000, 2, vm_flags); 962 962 vma = merge_new(&vmg); 963 963 ASSERT_NE(vma, NULL); 964 964 ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS); ··· 975 975 976 976 static bool test_merge_existing(void) 977 977 { 978 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 978 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 979 979 struct mm_struct mm = {}; 980 980 VMA_ITERATOR(vmi, &mm, 0); 981 981 struct vm_area_struct *vma, *vma_prev, *vma_next; ··· 998 998 * 0123456789 999 999 * VNNNNNN 1000 1000 */ 1001 - vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags); 1001 + vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags); 1002 1002 vma->vm_ops = &vm_ops; /* This should have no impact. */ 1003 - vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags); 1003 + vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, vm_flags); 1004 1004 vma_next->vm_ops = &vm_ops; /* This should have no impact. */ 1005 - vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, flags, &dummy_anon_vma); 1005 + vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma); 1006 1006 vmg.middle = vma; 1007 1007 vmg.prev = vma; 1008 1008 vma_set_dummy_anon_vma(vma, &avc); ··· 1032 1032 * 0123456789 1033 1033 * NNNNNNN 1034 1034 */ 1035 - vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, flags); 1036 - vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags); 1035 + vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vm_flags); 1036 + vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, vm_flags); 1037 1037 vma_next->vm_ops = &vm_ops; /* This should have no impact. */ 1038 - vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, flags, &dummy_anon_vma); 1038 + vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vm_flags, &dummy_anon_vma); 1039 1039 vmg.middle = vma; 1040 1040 vma_set_dummy_anon_vma(vma, &avc); 1041 1041 ASSERT_EQ(merge_existing(&vmg), vma_next); ··· 1060 1060 * 0123456789 1061 1061 * PPPPPPV 1062 1062 */ 1063 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 1063 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 1064 1064 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */ 1065 - vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags); 1065 + vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags); 1066 1066 vma->vm_ops = &vm_ops; /* This should have no impact. */ 1067 - vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, flags, &dummy_anon_vma); 1067 + vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vm_flags, &dummy_anon_vma); 1068 1068 vmg.prev = vma_prev; 1069 1069 vmg.middle = vma; 1070 1070 vma_set_dummy_anon_vma(vma, &avc); ··· 1094 1094 * 0123456789 1095 1095 * PPPPPPP 1096 1096 */ 1097 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 1097 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 1098 1098 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */ 1099 - vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags); 1100 - vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, flags, &dummy_anon_vma); 1099 + vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags); 1100 + vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma); 1101 1101 vmg.prev = vma_prev; 1102 1102 vmg.middle = vma; 1103 1103 vma_set_dummy_anon_vma(vma, &avc); ··· 1123 1123 * 0123456789 1124 1124 * PPPPPPPPPP 1125 1125 */ 1126 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 1126 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 1127 1127 vma_prev->vm_ops = &vm_ops; /* This should have no impact. */ 1128 - vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags); 1129 - vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags); 1130 - vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, flags, &dummy_anon_vma); 1128 + vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags); 1129 + vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags); 1130 + vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, &dummy_anon_vma); 1131 1131 vmg.prev = vma_prev; 1132 1132 vmg.middle = vma; 1133 1133 vma_set_dummy_anon_vma(vma, &avc); ··· 1158 1158 * PPPVVVVVNNN 1159 1159 */ 1160 1160 1161 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 1162 - vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags); 1163 - vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, flags); 1161 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 1162 + vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags); 1163 + vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, vm_flags); 1164 1164 1165 - vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags); 1165 + vmg_set_range(&vmg, 0x4000, 0x5000, 4, vm_flags); 1166 1166 vmg.prev = vma; 1167 1167 vmg.middle = vma; 1168 1168 ASSERT_EQ(merge_existing(&vmg), NULL); 1169 1169 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 1170 1170 1171 - vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags); 1171 + vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags); 1172 1172 vmg.prev = vma; 1173 1173 vmg.middle = vma; 1174 1174 ASSERT_EQ(merge_existing(&vmg), NULL); 1175 1175 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 1176 1176 1177 - vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags); 1177 + vmg_set_range(&vmg, 0x6000, 0x7000, 6, vm_flags); 1178 1178 vmg.prev = vma; 1179 1179 vmg.middle = vma; 1180 1180 ASSERT_EQ(merge_existing(&vmg), NULL); 1181 1181 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 1182 1182 1183 - vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags); 1183 + vmg_set_range(&vmg, 0x4000, 0x7000, 4, vm_flags); 1184 1184 vmg.prev = vma; 1185 1185 vmg.middle = vma; 1186 1186 ASSERT_EQ(merge_existing(&vmg), NULL); 1187 1187 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 1188 1188 1189 - vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags); 1189 + vmg_set_range(&vmg, 0x4000, 0x6000, 4, vm_flags); 1190 1190 vmg.prev = vma; 1191 1191 vmg.middle = vma; 1192 1192 ASSERT_EQ(merge_existing(&vmg), NULL); 1193 1193 ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE); 1194 1194 1195 - vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags); 1195 + vmg_set_range(&vmg, 0x5000, 0x6000, 5, vm_flags); 1196 1196 vmg.prev = vma; 1197 1197 vmg.middle = vma; 1198 1198 ASSERT_EQ(merge_existing(&vmg), NULL); ··· 1205 1205 1206 1206 static bool test_anon_vma_non_mergeable(void) 1207 1207 { 1208 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 1208 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 1209 1209 struct mm_struct mm = {}; 1210 1210 VMA_ITERATOR(vmi, &mm, 0); 1211 1211 struct vm_area_struct *vma, *vma_prev, *vma_next; ··· 1229 1229 * 0123456789 1230 1230 * PPPPPPPNNN 1231 1231 */ 1232 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 1233 - vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags); 1234 - vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags); 1232 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 1233 + vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vm_flags); 1234 + vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags); 1235 1235 1236 1236 /* 1237 1237 * Give both prev and next single anon_vma_chain fields, so they will ··· 1239 1239 * 1240 1240 * However, when prev is compared to next, the merge should fail. 1241 1241 */ 1242 - vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, flags, NULL); 1242 + vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL); 1243 1243 vmg.prev = vma_prev; 1244 1244 vmg.middle = vma; 1245 1245 vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1); ··· 1267 1267 * 0123456789 1268 1268 * PPPPPPPNNN 1269 1269 */ 1270 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 1271 - vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags); 1270 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 1271 + vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vm_flags); 1272 1272 1273 - vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, flags, NULL); 1273 + vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vm_flags, NULL); 1274 1274 vmg.prev = vma_prev; 1275 1275 vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1); 1276 1276 __vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2); ··· 1292 1292 1293 1293 static bool test_dup_anon_vma(void) 1294 1294 { 1295 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 1295 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 1296 1296 struct mm_struct mm = {}; 1297 1297 VMA_ITERATOR(vmi, &mm, 0); 1298 1298 struct vma_merge_struct vmg = { ··· 1313 1313 * This covers new VMA merging, as these operations amount to a VMA 1314 1314 * expand. 1315 1315 */ 1316 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 1317 - vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); 1316 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 1317 + vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); 1318 1318 vma_next->anon_vma = &dummy_anon_vma; 1319 1319 1320 - vmg_set_range(&vmg, 0, 0x5000, 0, flags); 1320 + vmg_set_range(&vmg, 0, 0x5000, 0, vm_flags); 1321 1321 vmg.target = vma_prev; 1322 1322 vmg.next = vma_next; 1323 1323 ··· 1339 1339 * extend delete delete 1340 1340 */ 1341 1341 1342 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 1343 - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); 1344 - vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags); 1342 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 1343 + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); 1344 + vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags); 1345 1345 1346 1346 /* Initialise avc so mergeability check passes. */ 1347 1347 INIT_LIST_HEAD(&vma_next->anon_vma_chain); 1348 1348 list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain); 1349 1349 1350 1350 vma_next->anon_vma = &dummy_anon_vma; 1351 - vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); 1351 + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); 1352 1352 vmg.prev = vma_prev; 1353 1353 vmg.middle = vma; 1354 1354 ··· 1372 1372 * extend delete delete 1373 1373 */ 1374 1374 1375 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 1376 - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); 1377 - vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags); 1375 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 1376 + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); 1377 + vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags); 1378 1378 vmg.anon_vma = &dummy_anon_vma; 1379 1379 vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain); 1380 - vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); 1380 + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); 1381 1381 vmg.prev = vma_prev; 1382 1382 vmg.middle = vma; 1383 1383 ··· 1401 1401 * extend shrink/delete 1402 1402 */ 1403 1403 1404 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 1405 - vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, flags); 1404 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 1405 + vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vm_flags); 1406 1406 1407 1407 vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain); 1408 - vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); 1408 + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); 1409 1409 vmg.prev = vma_prev; 1410 1410 vmg.middle = vma; 1411 1411 ··· 1429 1429 * shrink/delete extend 1430 1430 */ 1431 1431 1432 - vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, flags); 1433 - vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, flags); 1432 + vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vm_flags); 1433 + vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vm_flags); 1434 1434 1435 1435 vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain); 1436 - vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags); 1436 + vmg_set_range(&vmg, 0x3000, 0x5000, 3, vm_flags); 1437 1437 vmg.prev = vma; 1438 1438 vmg.middle = vma; 1439 1439 ··· 1452 1452 1453 1453 static bool test_vmi_prealloc_fail(void) 1454 1454 { 1455 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 1455 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 1456 1456 struct mm_struct mm = {}; 1457 1457 VMA_ITERATOR(vmi, &mm, 0); 1458 1458 struct vma_merge_struct vmg = { ··· 1468 1468 * the duplicated anon_vma is unlinked. 1469 1469 */ 1470 1470 1471 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 1472 - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); 1471 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 1472 + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); 1473 1473 vma->anon_vma = &dummy_anon_vma; 1474 1474 1475 - vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, flags, &dummy_anon_vma); 1475 + vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vm_flags, &dummy_anon_vma); 1476 1476 vmg.prev = vma_prev; 1477 1477 vmg.middle = vma; 1478 1478 vma_set_dummy_anon_vma(vma, &avc); ··· 1496 1496 * performed in this case too. 1497 1497 */ 1498 1498 1499 - vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, flags); 1500 - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); 1499 + vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vm_flags); 1500 + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); 1501 1501 vma->anon_vma = &dummy_anon_vma; 1502 1502 1503 - vmg_set_range(&vmg, 0, 0x5000, 3, flags); 1503 + vmg_set_range(&vmg, 0, 0x5000, 3, vm_flags); 1504 1504 vmg.target = vma_prev; 1505 1505 vmg.next = vma; 1506 1506 ··· 1518 1518 1519 1519 static bool test_merge_extend(void) 1520 1520 { 1521 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 1521 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 1522 1522 struct mm_struct mm = {}; 1523 1523 VMA_ITERATOR(vmi, &mm, 0x1000); 1524 1524 struct vm_area_struct *vma; 1525 1525 1526 - vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, flags); 1527 - alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags); 1526 + vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vm_flags); 1527 + alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vm_flags); 1528 1528 1529 1529 /* 1530 1530 * Extend a VMA into the gap between itself and the following VMA. ··· 1548 1548 1549 1549 static bool test_copy_vma(void) 1550 1550 { 1551 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 1551 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 1552 1552 struct mm_struct mm = {}; 1553 1553 bool need_locks = false; 1554 1554 VMA_ITERATOR(vmi, &mm, 0); ··· 1556 1556 1557 1557 /* Move backwards and do not merge. */ 1558 1558 1559 - vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); 1559 + vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); 1560 1560 vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks); 1561 1561 ASSERT_NE(vma_new, vma); 1562 1562 ASSERT_EQ(vma_new->vm_start, 0); ··· 1568 1568 1569 1569 /* Move a VMA into position next to another and merge the two. */ 1570 1570 1571 - vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags); 1572 - vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags); 1571 + vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags); 1572 + vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, vm_flags); 1573 1573 vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks); 1574 1574 vma_assert_attached(vma_new); 1575 1575 ··· 1581 1581 1582 1582 static bool test_expand_only_mode(void) 1583 1583 { 1584 - unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 1584 + vm_flags_t vm_flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE; 1585 1585 struct mm_struct mm = {}; 1586 1586 VMA_ITERATOR(vmi, &mm, 0); 1587 1587 struct vm_area_struct *vma_prev, *vma; 1588 - VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, flags, 5); 1588 + VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vm_flags, 5); 1589 1589 1590 1590 /* 1591 1591 * Place a VMA prior to the one we're expanding so we assert that we do ··· 1593 1593 * have, through the use of the just_expand flag, indicated we do not 1594 1594 * need to do so. 1595 1595 */ 1596 - alloc_and_link_vma(&mm, 0, 0x2000, 0, flags); 1596 + alloc_and_link_vma(&mm, 0, 0x2000, 0, vm_flags); 1597 1597 1598 1598 /* 1599 1599 * We will be positioned at the prev VMA, but looking to expand to 1600 1600 * 0x9000. 1601 1601 */ 1602 1602 vma_iter_set(&vmi, 0x3000); 1603 - vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags); 1603 + vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vm_flags); 1604 1604 vmg.prev = vma_prev; 1605 1605 vmg.just_expand = true; 1606 1606
+4 -4
tools/testing/vma/vma_internal.h
··· 1084 1084 } 1085 1085 1086 1086 static inline void khugepaged_enter_vma(struct vm_area_struct *vma, 1087 - unsigned long vm_flags) 1087 + vm_flags_t vm_flags) 1088 1088 { 1089 1089 (void)vma; 1090 1090 (void)vm_flags; ··· 1200 1200 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 1201 1201 static inline void vma_set_page_prot(struct vm_area_struct *vma) 1202 1202 { 1203 - unsigned long vm_flags = vma->vm_flags; 1203 + vm_flags_t vm_flags = vma->vm_flags; 1204 1204 pgprot_t vm_page_prot; 1205 1205 1206 1206 /* testing: we inline vm_pgprot_modify() to avoid clash with vma.h. */ ··· 1280 1280 return true; 1281 1281 } 1282 1282 1283 - static inline bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, 1283 + static inline bool mlock_future_ok(struct mm_struct *mm, vm_flags_t vm_flags, 1284 1284 unsigned long bytes) 1285 1285 { 1286 1286 unsigned long locked_pages, limit_pages; 1287 1287 1288 - if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 1288 + if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) 1289 1289 return true; 1290 1290 1291 1291 locked_pages = bytes >> PAGE_SHIFT;