Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/vma: introduce VM_ACCESS_FLAGS

There are many places where all basic VMA access flags (read, write,
exec) are initialized or checked against as a group. One such example
is during page fault. Existing vma_is_accessible() wrapper already
creates the notion of VMA accessibility as a group access permissions.

Hence lets just create VM_ACCESS_FLAGS (VM_READ|VM_WRITE|VM_EXEC) which
will not only reduce code duplication but also extend the VMA
accessibility concept in general.

Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Springer <rspringer@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Link: http://lkml.kernel.org/r/1583391014-8170-3-git-send-email-anshuman.khandual@arm.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Anshuman Khandual and committed by
Linus Torvalds
6cb4d9a2 c62da0c3

+16 -12
+1 -1
arch/arm/mm/fault.c
··· 189 189 */ 190 190 static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) 191 191 { 192 - unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; 192 + unsigned int mask = VM_ACCESS_FLAGS; 193 193 194 194 if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) 195 195 mask = VM_WRITE;
+1 -1
arch/arm64/mm/fault.c
··· 445 445 const struct fault_info *inf; 446 446 struct mm_struct *mm = current->mm; 447 447 vm_fault_t fault, major = 0; 448 - unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; 448 + unsigned long vm_flags = VM_ACCESS_FLAGS; 449 449 unsigned int mm_flags = FAULT_FLAG_DEFAULT; 450 450 451 451 if (kprobe_page_fault(regs, esr))
+1 -1
arch/nds32/mm/fault.c
··· 79 79 struct vm_area_struct *vma; 80 80 int si_code; 81 81 vm_fault_t fault; 82 - unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; 82 + unsigned int mask = VM_ACCESS_FLAGS; 83 83 unsigned int flags = FAULT_FLAG_DEFAULT; 84 84 85 85 error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE);
+1 -1
arch/powerpc/mm/book3s64/pkeys.c
··· 315 315 static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) 316 316 { 317 317 /* Do this check first since the vm_flags should be hot */ 318 - if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC) 318 + if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC) 319 319 return false; 320 320 321 321 return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey);
+1 -1
arch/s390/mm/fault.c
··· 580 580 int access; 581 581 vm_fault_t fault; 582 582 583 - access = VM_READ | VM_EXEC | VM_WRITE; 583 + access = VM_ACCESS_FLAGS; 584 584 fault = do_exception(regs, access); 585 585 if (unlikely(fault)) 586 586 do_fault_error(regs, access, fault);
+1 -1
arch/unicore32/mm/fault.c
··· 149 149 */ 150 150 static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) 151 151 { 152 - unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; 152 + unsigned int mask = VM_ACCESS_FLAGS; 153 153 154 154 if (!(fsr ^ 0x12)) /* write? */ 155 155 mask = VM_WRITE;
+1 -1
arch/x86/mm/pkeys.c
··· 63 63 static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) 64 64 { 65 65 /* Do this check first since the vm_flags should be hot */ 66 - if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC) 66 + if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC) 67 67 return false; 68 68 if (vma_pkey(vma) != vma->vm_mm->context.execute_only_pkey) 69 69 return false;
+1 -1
drivers/staging/gasket/gasket_core.c
··· 689 689 690 690 /* Make sure that no wrong flags are set. */ 691 691 requested_permissions = 692 - (vma->vm_flags & (VM_WRITE | VM_READ | VM_EXEC)); 692 + (vma->vm_flags & VM_ACCESS_FLAGS); 693 693 if (requested_permissions & ~(bar_permissions)) { 694 694 dev_dbg(gasket_dev->dev, 695 695 "Attempting to map a region with requested permissions 0x%x, but region has permissions 0x%x.\n",
+5 -1
include/linux/mm.h
··· 369 369 370 370 #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 371 371 372 + /* VMA basic access permission flags */ 373 + #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) 374 + 375 + 372 376 /* 373 377 * Special vmas that are non-mergable, non-mlock()able. 374 378 */ ··· 650 646 651 647 static inline bool vma_is_accessible(struct vm_area_struct *vma) 652 648 { 653 - return vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC); 649 + return vma->vm_flags & VM_ACCESS_FLAGS; 654 650 } 655 651 656 652 #ifdef CONFIG_SHMEM
+1 -1
mm/mmap.c
··· 1224 1224 return a->vm_end == b->vm_start && 1225 1225 mpol_equal(vma_policy(a), vma_policy(b)) && 1226 1226 a->vm_file == b->vm_file && 1227 - !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) && 1227 + !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && 1228 1228 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 1229 1229 } 1230 1230
+2 -2
mm/mprotect.c
··· 419 419 */ 420 420 if (arch_has_pfn_modify_check() && 421 421 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && 422 - (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) { 422 + (newflags & VM_ACCESS_FLAGS) == 0) { 423 423 pgprot_t new_pgprot = vm_get_page_prot(newflags); 424 424 425 425 error = walk_page_range(current->mm, start, end, ··· 598 598 newflags |= (vma->vm_flags & ~mask_off_old_flags); 599 599 600 600 /* newflags >> 4 shift VM_MAY% in place of VM_% */ 601 - if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { 601 + if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) { 602 602 error = -EACCES; 603 603 goto out; 604 604 }