Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/uaccess: Enable hardened usercopy

Enables CONFIG_HARDENED_USERCOPY checks on x86. This is done both in
copy_*_user() and __copy_*_user() because copy_*_user() actually calls
down to _copy_*_user() and not __copy_*_user().

Based on code from PaX and grsecurity.

Signed-off-by: Kees Cook <keescook@chromium.org>
Tested-by: Valdis Kletnieks <valdis.kletnieks@vt.edu>

+11 -4
+1
arch/x86/Kconfig
··· 80 80 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 81 81 select HAVE_AOUT if X86_32 82 82 select HAVE_ARCH_AUDITSYSCALL 83 + select HAVE_ARCH_HARDENED_USERCOPY 83 84 select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE 84 85 select HAVE_ARCH_JUMP_LABEL 85 86 select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
+6 -4
arch/x86/include/asm/uaccess.h
··· 742 742 * case, and do only runtime checking for non-constant sizes. 743 743 */ 744 744 745 - if (likely(sz < 0 || sz >= n)) 745 + if (likely(sz < 0 || sz >= n)) { 746 + check_object_size(to, n, false); 746 747 n = _copy_from_user(to, from, n); 747 - else if(__builtin_constant_p(n)) 748 + } else if (__builtin_constant_p(n)) 748 749 copy_from_user_overflow(); 749 750 else 750 751 __copy_from_user_overflow(sz, n); ··· 763 762 might_fault(); 764 763 765 764 /* See the comment in copy_from_user() above. */ 766 - if (likely(sz < 0 || sz >= n)) 765 + if (likely(sz < 0 || sz >= n)) { 766 + check_object_size(from, n, true); 767 767 n = _copy_to_user(to, from, n); 768 - else if(__builtin_constant_p(n)) 768 + } else if (__builtin_constant_p(n)) 769 769 copy_to_user_overflow(); 770 770 else 771 771 __copy_to_user_overflow(sz, n);
+2
arch/x86/include/asm/uaccess_32.h
··· 37 37 static __always_inline unsigned long __must_check 38 38 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 39 39 { 40 + check_object_size(from, n, true); 40 41 return __copy_to_user_ll(to, from, n); 41 42 } 42 43 ··· 96 95 __copy_from_user(void *to, const void __user *from, unsigned long n) 97 96 { 98 97 might_fault(); 98 + check_object_size(to, n, false); 99 99 if (__builtin_constant_p(n)) { 100 100 unsigned long ret; 101 101
+2
arch/x86/include/asm/uaccess_64.h
··· 54 54 { 55 55 int ret = 0; 56 56 57 + check_object_size(dst, size, false); 57 58 if (!__builtin_constant_p(size)) 58 59 return copy_user_generic(dst, (__force void *)src, size); 59 60 switch (size) { ··· 120 119 { 121 120 int ret = 0; 122 121 122 + check_object_size(src, size, true); 123 123 if (!__builtin_constant_p(size)) 124 124 return copy_user_generic((__force void *)dst, src, size); 125 125 switch (size) {