Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: Allow architectures to define additional protection bits

This patch allows architectures to define functions to deal with
additional protections bits for mmap() and mprotect().

arch_calc_vm_prot_bits() maps additonal protection bits to vm_flags
arch_vm_get_page_prot() maps additional vm_flags to the vma's vm_page_prot
arch_validate_prot() checks for valid values of the protection bits

Note: vm_get_page_prot() is now pretty ugly, but the generated code
should be identical for architectures that don't define additional
protection bits.

Signed-off-by: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
Acked-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Dave Kleikamp and committed by
Benjamin Herrenschmidt
b845f313 e5093ff0

+32 -4
+28 -1
include/linux/mman.h
··· 34 34 } 35 35 36 36 /* 37 + * Allow architectures to handle additional protection bits 38 + */ 39 + 40 + #ifndef arch_calc_vm_prot_bits 41 + #define arch_calc_vm_prot_bits(prot) 0 42 + #endif 43 + 44 + #ifndef arch_vm_get_page_prot 45 + #define arch_vm_get_page_prot(vm_flags) __pgprot(0) 46 + #endif 47 + 48 + #ifndef arch_validate_prot 49 + /* 50 + * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have 51 + * already been masked out. 52 + * 53 + * Returns true if the prot flags are valid 54 + */ 55 + static inline int arch_validate_prot(unsigned long prot) 56 + { 57 + return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; 58 + } 59 + #define arch_validate_prot arch_validate_prot 60 + #endif 61 + 62 + /* 37 63 * Optimisation macro. It is equivalent to: 38 64 * (x & bit1) ? bit2 : 0 39 65 * but this version is faster. ··· 77 51 { 78 52 return _calc_vm_trans(prot, PROT_READ, VM_READ ) | 79 53 _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | 80 - _calc_vm_trans(prot, PROT_EXEC, VM_EXEC ); 54 + _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | 55 + arch_calc_vm_prot_bits(prot); 81 56 } 82 57 83 58 /*
+3 -2
mm/mmap.c
··· 72 72 73 73 pgprot_t vm_get_page_prot(unsigned long vm_flags) 74 74 { 75 - return protection_map[vm_flags & 76 - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; 75 + return __pgprot(pgprot_val(protection_map[vm_flags & 76 + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | 77 + pgprot_val(arch_vm_get_page_prot(vm_flags))); 77 78 } 78 79 EXPORT_SYMBOL(vm_get_page_prot); 79 80
+1 -1
mm/mprotect.c
··· 239 239 end = start + len; 240 240 if (end <= start) 241 241 return -ENOMEM; 242 - if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) 242 + if (!arch_validate_prot(prot)) 243 243 return -EINVAL; 244 244 245 245 reqprot = prot;