Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: update architecture and driver code to use vm_flags_t

In future we intend to change the vm_flags_t type, so it isn't correct for
architecture and driver code to assume it is unsigned long. Correct this
assumption across the board.

Overall, this patch does not introduce any functional change.

Link: https://lkml.kernel.org/r/b6eb1894abc5555ece80bb08af5c022ef780c8bc.1750274467.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Christian Brauner <brauner@kernel.org>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Pedro Falcato <pfalcato@suse.de>
Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64]
Acked-by: Zi Yan <ziy@nvidia.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Jann Horn <jannh@google.com>
Cc: Kees Cook <kees@kernel.org>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Jan Kara <jack@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Lorenzo Stoakes and committed by
Andrew Morton
d75fa3c9 bfbe7110

+20 -20
+1 -1
arch/arm/mm/fault.c
··· 268 268 int sig, code; 269 269 vm_fault_t fault; 270 270 unsigned int flags = FAULT_FLAG_DEFAULT; 271 - unsigned long vm_flags = VM_ACCESS_FLAGS; 271 + vm_flags_t vm_flags = VM_ACCESS_FLAGS; 272 272 273 273 if (kprobe_page_fault(regs, fsr)) 274 274 return 0;
+5 -5
arch/arm64/include/asm/mman.h
··· 11 11 #include <linux/shmem_fs.h> 12 12 #include <linux/types.h> 13 13 14 - static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, 14 + static inline vm_flags_t arch_calc_vm_prot_bits(unsigned long prot, 15 15 unsigned long pkey) 16 16 { 17 - unsigned long ret = 0; 17 + vm_flags_t ret = 0; 18 18 19 19 if (system_supports_bti() && (prot & PROT_BTI)) 20 20 ret |= VM_ARM64_BTI; ··· 34 34 } 35 35 #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) 36 36 37 - static inline unsigned long arch_calc_vm_flag_bits(struct file *file, 38 - unsigned long flags) 37 + static inline vm_flags_t arch_calc_vm_flag_bits(struct file *file, 38 + unsigned long flags) 39 39 { 40 40 /* 41 41 * Only allow MTE on anonymous mappings as these are guaranteed to be ··· 68 68 } 69 69 #define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr) 70 70 71 - static inline bool arch_validate_flags(unsigned long vm_flags) 71 + static inline bool arch_validate_flags(vm_flags_t vm_flags) 72 72 { 73 73 if (system_supports_mte()) { 74 74 /*
+1 -1
arch/arm64/mm/fault.c
··· 549 549 const struct fault_info *inf; 550 550 struct mm_struct *mm = current->mm; 551 551 vm_fault_t fault; 552 - unsigned long vm_flags; 552 + vm_flags_t vm_flags; 553 553 unsigned int mm_flags = FAULT_FLAG_DEFAULT; 554 554 unsigned long addr = untagged_addr(far); 555 555 struct vm_area_struct *vma;
+1 -1
arch/arm64/mm/mmu.c
··· 720 720 721 721 static void __init declare_vma(struct vm_struct *vma, 722 722 void *va_start, void *va_end, 723 - unsigned long vm_flags) 723 + vm_flags_t vm_flags) 724 724 { 725 725 phys_addr_t pa_start = __pa_symbol(va_start); 726 726 unsigned long size = va_end - va_start;
+1 -1
arch/powerpc/include/asm/mman.h
··· 14 14 #include <asm/cpu_has_feature.h> 15 15 #include <asm/firmware.h> 16 16 17 - static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, 17 + static inline vm_flags_t arch_calc_vm_prot_bits(unsigned long prot, 18 18 unsigned long pkey) 19 19 { 20 20 #ifdef CONFIG_PPC_MEM_KEYS
+2 -2
arch/powerpc/include/asm/pkeys.h
··· 30 30 #endif 31 31 32 32 33 - static inline u64 pkey_to_vmflag_bits(u16 pkey) 33 + static inline vm_flags_t pkey_to_vmflag_bits(u16 pkey) 34 34 { 35 - return (((u64)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS); 35 + return (((vm_flags_t)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS); 36 36 } 37 37 38 38 static inline int vma_pkey(struct vm_area_struct *vma)
+1 -1
arch/powerpc/kvm/book3s_hv_uvmem.c
··· 393 393 { 394 394 unsigned long gfn = memslot->base_gfn; 395 395 unsigned long end, start = gfn_to_hva(kvm, gfn); 396 - unsigned long vm_flags; 396 + vm_flags_t vm_flags; 397 397 int ret = 0; 398 398 struct vm_area_struct *vma; 399 399 int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
+2 -2
arch/sparc/include/asm/mman.h
··· 28 28 } 29 29 30 30 #define arch_calc_vm_prot_bits(prot, pkey) sparc_calc_vm_prot_bits(prot) 31 - static inline unsigned long sparc_calc_vm_prot_bits(unsigned long prot) 31 + static inline vm_flags_t sparc_calc_vm_prot_bits(unsigned long prot) 32 32 { 33 33 if (adi_capable() && (prot & PROT_ADI)) { 34 34 struct pt_regs *regs; ··· 58 58 /* arch_validate_flags() - Ensure combination of flags is valid for a 59 59 * VMA. 60 60 */ 61 - static inline bool arch_validate_flags(unsigned long vm_flags) 61 + static inline bool arch_validate_flags(vm_flags_t vm_flags) 62 62 { 63 63 /* If ADI is being enabled on this VMA, check for ADI 64 64 * capability on the platform and ensure VMA is suitable
+4 -4
arch/x86/kernel/cpu/sgx/encl.c
··· 279 279 280 280 static struct sgx_encl_page *sgx_encl_load_page_in_vma(struct sgx_encl *encl, 281 281 unsigned long addr, 282 - unsigned long vm_flags) 282 + vm_flags_t vm_flags) 283 283 { 284 284 unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; 285 285 struct sgx_encl_page *entry; ··· 520 520 * Return: 0 on success, -EACCES otherwise 521 521 */ 522 522 int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, 523 - unsigned long end, unsigned long vm_flags) 523 + unsigned long end, vm_flags_t vm_flags) 524 524 { 525 - unsigned long vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; 525 + vm_flags_t vm_prot_bits = vm_flags & VM_ACCESS_FLAGS; 526 526 struct sgx_encl_page *page; 527 527 unsigned long count = 0; 528 528 int ret = 0; ··· 605 605 */ 606 606 static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl, 607 607 unsigned long addr, 608 - unsigned long vm_flags) 608 + vm_flags_t vm_flags) 609 609 { 610 610 struct sgx_encl_page *entry; 611 611
+1 -1
arch/x86/kernel/cpu/sgx/encl.h
··· 101 101 } 102 102 103 103 int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, 104 - unsigned long end, unsigned long vm_flags); 104 + unsigned long end, vm_flags_t vm_flags); 105 105 106 106 bool current_is_ksgxd(void); 107 107 void sgx_encl_release(struct kref *ref);
+1 -1
tools/testing/vma/vma_internal.h
··· 1215 1215 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 1216 1216 } 1217 1217 1218 - static inline bool arch_validate_flags(unsigned long) 1218 + static inline bool arch_validate_flags(vm_flags_t) 1219 1219 { 1220 1220 return true; 1221 1221 }