Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/core, arch, powerpc: Pass a protection key in to calc_vm_flag_bits()

This plumbs a protection key through calc_vm_flag_bits(). We
could have done this in calc_vm_prot_bits(), but I did not feel
super strongly which way to go. It was pretty arbitrary which
one to use.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Chen Gang <gang.chen.5i5j@gmail.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: David Airlie <airlied@linux.ie>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Geliang Tang <geliangtang@163.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Leon Romanovsky <leon@leon.nu>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Maxime Coquelin <mcoquelin.stm32@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Riley Andrews <riandrews@android.com>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Cc: devel@driverdev.osuosl.org
Cc: linux-api@vger.kernel.org
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/20160212210231.E6F1F0D6@viggo.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Dave Hansen and committed by
Ingo Molnar
e6bfb709 06976945

+12 -11
+3 -2
arch/powerpc/include/asm/mman.h
··· 18 18 * This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits() 19 19 * here. How important is the optimization? 20 20 */ 21 - static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot) 21 + static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, 22 + unsigned long pkey) 22 23 { 23 24 return (prot & PROT_SAO) ? VM_SAO : 0; 24 25 } 25 - #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot) 26 + #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) 26 27 27 28 static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) 28 29 {
+1 -1
drivers/char/agp/frontend.c
··· 156 156 { 157 157 unsigned long prot_bits; 158 158 159 - prot_bits = calc_vm_prot_bits(prot) | VM_SHARED; 159 + prot_bits = calc_vm_prot_bits(prot, 0) | VM_SHARED; 160 160 return vm_get_page_prot(prot_bits); 161 161 } 162 162
+2 -2
drivers/staging/android/ashmem.c
··· 372 372 } 373 373 374 374 /* requested protection bits must match our allowed protection mask */ 375 - if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & 376 - calc_vm_prot_bits(PROT_MASK))) { 375 + if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & 376 + calc_vm_prot_bits(PROT_MASK, 0))) { 377 377 ret = -EPERM; 378 378 goto out; 379 379 }
+3 -3
include/linux/mman.h
··· 35 35 */ 36 36 37 37 #ifndef arch_calc_vm_prot_bits 38 - #define arch_calc_vm_prot_bits(prot) 0 38 + #define arch_calc_vm_prot_bits(prot, pkey) 0 39 39 #endif 40 40 41 41 #ifndef arch_vm_get_page_prot ··· 70 70 * Combine the mmap "prot" argument into "vm_flags" used internally. 71 71 */ 72 72 static inline unsigned long 73 - calc_vm_prot_bits(unsigned long prot) 73 + calc_vm_prot_bits(unsigned long prot, unsigned long pkey) 74 74 { 75 75 return _calc_vm_trans(prot, PROT_READ, VM_READ ) | 76 76 _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | 77 77 _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | 78 - arch_calc_vm_prot_bits(prot); 78 + arch_calc_vm_prot_bits(prot, pkey); 79 79 } 80 80 81 81 /*
+1 -1
mm/mmap.c
··· 1313 1313 * to. we assume access permissions have been handled by the open 1314 1314 * of the memory object, so we don't do any here. 1315 1315 */ 1316 - vm_flags |= calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | 1316 + vm_flags |= calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags) | 1317 1317 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 1318 1318 1319 1319 if (flags & MAP_LOCKED)
+1 -1
mm/mprotect.c
··· 380 380 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 381 381 prot |= PROT_EXEC; 382 382 383 - vm_flags = calc_vm_prot_bits(prot); 383 + vm_flags = calc_vm_prot_bits(prot, 0); 384 384 385 385 down_write(&current->mm->mmap_sem); 386 386
+1 -1
mm/nommu.c
··· 1082 1082 { 1083 1083 unsigned long vm_flags; 1084 1084 1085 - vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); 1085 + vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags); 1086 1086 /* vm_flags |= mm->def_flags; */ 1087 1087 1088 1088 if (!(capabilities & NOMMU_MAP_DIRECT)) {