Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'usercopy-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull usercopy protection from Kees Cook:
"Tbhis implements HARDENED_USERCOPY verification of copy_to_user and
copy_from_user bounds checking for most architectures on SLAB and
SLUB"

* tag 'usercopy-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
mm: SLUB hardened usercopy support
mm: SLAB hardened usercopy support
s390/uaccess: Enable hardened usercopy
sparc/uaccess: Enable hardened usercopy
powerpc/uaccess: Enable hardened usercopy
ia64/uaccess: Enable hardened usercopy
arm64/uaccess: Enable hardened usercopy
ARM: uaccess: Enable hardened usercopy
x86/uaccess: Enable hardened usercopy
mm: Hardened usercopy
mm: Implement stack frame object validation
mm: Add is_migrate_cma_page

+555 -22
+9
arch/Kconfig
··· 461 461 462 462 endchoice 463 463 464 + config HAVE_ARCH_WITHIN_STACK_FRAMES 465 + bool 466 + help 467 + An architecture should select this if it can walk the kernel stack 468 + frames to determine if an object is part of either the arguments 469 + or local variables (i.e. that it excludes saved return addresses, 470 + and similar) by implementing an inline arch_within_stack_frames(), 471 + which is used by CONFIG_HARDENED_USERCOPY. 472 + 464 473 config HAVE_CONTEXT_TRACKING 465 474 bool 466 475 help
+1
arch/arm/Kconfig
··· 35 35 select HARDIRQS_SW_RESEND 36 36 select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) 37 37 select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 38 + select HAVE_ARCH_HARDENED_USERCOPY 38 39 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU 39 40 select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU 40 41 select HAVE_ARCH_MMAP_RND_BITS if MMU
+9 -2
arch/arm/include/asm/uaccess.h
··· 480 480 static inline unsigned long __must_check 481 481 __copy_from_user(void *to, const void __user *from, unsigned long n) 482 482 { 483 - unsigned int __ua_flags = uaccess_save_and_enable(); 483 + unsigned int __ua_flags; 484 + 485 + check_object_size(to, n, false); 486 + __ua_flags = uaccess_save_and_enable(); 484 487 n = arm_copy_from_user(to, from, n); 485 488 uaccess_restore(__ua_flags); 486 489 return n; ··· 498 495 __copy_to_user(void __user *to, const void *from, unsigned long n) 499 496 { 500 497 #ifndef CONFIG_UACCESS_WITH_MEMCPY 501 - unsigned int __ua_flags = uaccess_save_and_enable(); 498 + unsigned int __ua_flags; 499 + 500 + check_object_size(from, n, true); 501 + __ua_flags = uaccess_save_and_enable(); 502 502 n = arm_copy_to_user(to, from, n); 503 503 uaccess_restore(__ua_flags); 504 504 return n; 505 505 #else 506 + check_object_size(from, n, true); 506 507 return arm_copy_to_user(to, from, n); 507 508 #endif 508 509 }
+1
arch/arm64/Kconfig
··· 54 54 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 55 55 select HAVE_ARCH_AUDITSYSCALL 56 56 select HAVE_ARCH_BITREVERSE 57 + select HAVE_ARCH_HARDENED_USERCOPY 57 58 select HAVE_ARCH_HUGE_VMAP 58 59 select HAVE_ARCH_JUMP_LABEL 59 60 select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
+10 -5
arch/arm64/include/asm/uaccess.h
··· 265 265 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) 266 266 { 267 267 kasan_check_write(to, n); 268 - return __arch_copy_from_user(to, from, n); 268 + check_object_size(to, n, false); 269 + return __arch_copy_from_user(to, from, n); 269 270 } 270 271 271 272 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) 272 273 { 273 274 kasan_check_read(from, n); 274 - return __arch_copy_to_user(to, from, n); 275 + check_object_size(from, n, true); 276 + return __arch_copy_to_user(to, from, n); 275 277 } 276 278 277 279 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) 278 280 { 279 281 kasan_check_write(to, n); 280 282 281 - if (access_ok(VERIFY_READ, from, n)) 283 + if (access_ok(VERIFY_READ, from, n)) { 284 + check_object_size(to, n, false); 282 285 n = __arch_copy_from_user(to, from, n); 283 - else /* security hole - plug it */ 286 + } else /* security hole - plug it */ 284 287 memset(to, 0, n); 285 288 return n; 286 289 } ··· 292 289 { 293 290 kasan_check_read(from, n); 294 291 295 - if (access_ok(VERIFY_WRITE, to, n)) 292 + if (access_ok(VERIFY_WRITE, to, n)) { 293 + check_object_size(from, n, true); 296 294 n = __arch_copy_to_user(to, from, n); 295 + } 297 296 return n; 298 297 } 299 298
+1
arch/ia64/Kconfig
··· 52 52 select MODULES_USE_ELF_RELA 53 53 select ARCH_USE_CMPXCHG_LOCKREF 54 54 select HAVE_ARCH_AUDITSYSCALL 55 + select HAVE_ARCH_HARDENED_USERCOPY 55 56 default y 56 57 help 57 58 The Itanium Processor Family is Intel's 64-bit successor to
+15 -3
arch/ia64/include/asm/uaccess.h
··· 241 241 static inline unsigned long 242 242 __copy_to_user (void __user *to, const void *from, unsigned long count) 243 243 { 244 + if (!__builtin_constant_p(count)) 245 + check_object_size(from, count, true); 246 + 244 247 return __copy_user(to, (__force void __user *) from, count); 245 248 } 246 249 247 250 static inline unsigned long 248 251 __copy_from_user (void *to, const void __user *from, unsigned long count) 249 252 { 253 + if (!__builtin_constant_p(count)) 254 + check_object_size(to, count, false); 255 + 250 256 return __copy_user((__force void __user *) to, from, count); 251 257 } 252 258 ··· 264 258 const void *__cu_from = (from); \ 265 259 long __cu_len = (n); \ 266 260 \ 267 - if (__access_ok(__cu_to, __cu_len, get_fs())) \ 268 - __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ 261 + if (__access_ok(__cu_to, __cu_len, get_fs())) { \ 262 + if (!__builtin_constant_p(n)) \ 263 + check_object_size(__cu_from, __cu_len, true); \ 264 + __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ 265 + } \ 269 266 __cu_len; \ 270 267 }) 271 268 ··· 279 270 long __cu_len = (n); \ 280 271 \ 281 272 __chk_user_ptr(__cu_from); \ 282 - if (__access_ok(__cu_from, __cu_len, get_fs())) \ 273 + if (__access_ok(__cu_from, __cu_len, get_fs())) { \ 274 + if (!__builtin_constant_p(n)) \ 275 + check_object_size(__cu_to, __cu_len, false); \ 283 276 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ 277 + } \ 284 278 __cu_len; \ 285 279 }) 286 280
+1
arch/powerpc/Kconfig
··· 166 166 select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS 167 167 select GENERIC_CPU_AUTOPROBE 168 168 select HAVE_VIRT_CPU_ACCOUNTING 169 + select HAVE_ARCH_HARDENED_USERCOPY 169 170 170 171 config GENERIC_CSUM 171 172 def_bool CPU_LITTLE_ENDIAN
+19 -2
arch/powerpc/include/asm/uaccess.h
··· 310 310 { 311 311 unsigned long over; 312 312 313 - if (access_ok(VERIFY_READ, from, n)) 313 + if (access_ok(VERIFY_READ, from, n)) { 314 + if (!__builtin_constant_p(n)) 315 + check_object_size(to, n, false); 314 316 return __copy_tofrom_user((__force void __user *)to, from, n); 317 + } 315 318 if ((unsigned long)from < TASK_SIZE) { 316 319 over = (unsigned long)from + n - TASK_SIZE; 320 + if (!__builtin_constant_p(n - over)) 321 + check_object_size(to, n - over, false); 317 322 return __copy_tofrom_user((__force void __user *)to, from, 318 323 n - over) + over; 319 324 } ··· 330 325 { 331 326 unsigned long over; 332 327 333 - if (access_ok(VERIFY_WRITE, to, n)) 328 + if (access_ok(VERIFY_WRITE, to, n)) { 329 + if (!__builtin_constant_p(n)) 330 + check_object_size(from, n, true); 334 331 return __copy_tofrom_user(to, (__force void __user *)from, n); 332 + } 335 333 if ((unsigned long)to < TASK_SIZE) { 336 334 over = (unsigned long)to + n - TASK_SIZE; 335 + if (!__builtin_constant_p(n)) 336 + check_object_size(from, n - over, true); 337 337 return __copy_tofrom_user(to, (__force void __user *)from, 338 338 n - over) + over; 339 339 } ··· 382 372 if (ret == 0) 383 373 return 0; 384 374 } 375 + 376 + if (!__builtin_constant_p(n)) 377 + check_object_size(to, n, false); 378 + 385 379 return __copy_tofrom_user((__force void __user *)to, from, n); 386 380 } 387 381 ··· 412 398 if (ret == 0) 413 399 return 0; 414 400 } 401 + if (!__builtin_constant_p(n)) 402 + check_object_size(from, n, true); 403 + 415 404 return __copy_tofrom_user(to, (__force const void __user *)from, n); 416 405 } 417 406
+1
arch/s390/Kconfig
··· 123 123 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 124 124 select HAVE_ARCH_AUDITSYSCALL 125 125 select HAVE_ARCH_EARLY_PFN_TO_NID 126 + select HAVE_ARCH_HARDENED_USERCOPY 126 127 select HAVE_ARCH_JUMP_LABEL 127 128 select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES 128 129 select HAVE_ARCH_SECCOMP_FILTER
+2
arch/s390/lib/uaccess.c
··· 104 104 105 105 unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) 106 106 { 107 + check_object_size(to, n, false); 107 108 if (static_branch_likely(&have_mvcos)) 108 109 return copy_from_user_mvcos(to, from, n); 109 110 return copy_from_user_mvcp(to, from, n); ··· 178 177 179 178 unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) 180 179 { 180 + check_object_size(from, n, true); 181 181 if (static_branch_likely(&have_mvcos)) 182 182 return copy_to_user_mvcos(to, from, n); 183 183 return copy_to_user_mvcs(to, from, n);
+1
arch/sparc/Kconfig
··· 43 43 select OLD_SIGSUSPEND 44 44 select ARCH_HAS_SG_CHAIN 45 45 select CPU_NO_EFFICIENT_FFS 46 + select HAVE_ARCH_HARDENED_USERCOPY 46 47 47 48 config SPARC32 48 49 def_bool !64BIT
+10 -4
arch/sparc/include/asm/uaccess_32.h
··· 248 248 249 249 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) 250 250 { 251 - if (n && __access_ok((unsigned long) to, n)) 251 + if (n && __access_ok((unsigned long) to, n)) { 252 + if (!__builtin_constant_p(n)) 253 + check_object_size(from, n, true); 252 254 return __copy_user(to, (__force void __user *) from, n); 253 - else 255 + } else 254 256 return n; 255 257 } 256 258 257 259 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) 258 260 { 261 + if (!__builtin_constant_p(n)) 262 + check_object_size(from, n, true); 259 263 return __copy_user(to, (__force void __user *) from, n); 260 264 } 261 265 262 266 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) 263 267 { 264 - if (n && __access_ok((unsigned long) from, n)) 268 + if (n && __access_ok((unsigned long) from, n)) { 269 + if (!__builtin_constant_p(n)) 270 + check_object_size(to, n, false); 265 271 return __copy_user((__force void __user *) to, from, n); 266 - else 272 + } else 267 273 return n; 268 274 } 269 275
+9 -2
arch/sparc/include/asm/uaccess_64.h
··· 210 210 static inline unsigned long __must_check 211 211 copy_from_user(void *to, const void __user *from, unsigned long size) 212 212 { 213 - unsigned long ret = ___copy_from_user(to, from, size); 213 + unsigned long ret; 214 214 215 + if (!__builtin_constant_p(size)) 216 + check_object_size(to, size, false); 217 + 218 + ret = ___copy_from_user(to, from, size); 215 219 if (unlikely(ret)) 216 220 ret = copy_from_user_fixup(to, from, size); 217 221 ··· 231 227 static inline unsigned long __must_check 232 228 copy_to_user(void __user *to, const void *from, unsigned long size) 233 229 { 234 - unsigned long ret = ___copy_to_user(to, from, size); 230 + unsigned long ret; 235 231 232 + if (!__builtin_constant_p(size)) 233 + check_object_size(from, size, true); 234 + ret = ___copy_to_user(to, from, size); 236 235 if (unlikely(ret)) 237 236 ret = copy_to_user_fixup(to, from, size); 238 237 return ret;
+2
arch/x86/Kconfig
··· 80 80 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 81 81 select HAVE_AOUT if X86_32 82 82 select HAVE_ARCH_AUDITSYSCALL 83 + select HAVE_ARCH_HARDENED_USERCOPY 83 84 select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE 84 85 select HAVE_ARCH_JUMP_LABEL 85 86 select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP ··· 92 91 select HAVE_ARCH_SOFT_DIRTY if X86_64 93 92 select HAVE_ARCH_TRACEHOOK 94 93 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 94 + select HAVE_ARCH_WITHIN_STACK_FRAMES 95 95 select HAVE_EBPF_JIT if X86_64 96 96 select HAVE_CC_STACKPROTECTOR 97 97 select HAVE_CMPXCHG_DOUBLE
+44
arch/x86/include/asm/thread_info.h
··· 176 176 return sp; 177 177 } 178 178 179 + /* 180 + * Walks up the stack frames to make sure that the specified object is 181 + * entirely contained by a single stack frame. 182 + * 183 + * Returns: 184 + * 1 if within a frame 185 + * -1 if placed across a frame boundary (or outside stack) 186 + * 0 unable to determine (no frame pointers, etc) 187 + */ 188 + static inline int arch_within_stack_frames(const void * const stack, 189 + const void * const stackend, 190 + const void *obj, unsigned long len) 191 + { 192 + #if defined(CONFIG_FRAME_POINTER) 193 + const void *frame = NULL; 194 + const void *oldframe; 195 + 196 + oldframe = __builtin_frame_address(1); 197 + if (oldframe) 198 + frame = __builtin_frame_address(2); 199 + /* 200 + * low ----------------------------------------------> high 201 + * [saved bp][saved ip][args][local vars][saved bp][saved ip] 202 + * ^----------------^ 203 + * allow copies only within here 204 + */ 205 + while (stack <= frame && frame < stackend) { 206 + /* 207 + * If obj + len extends past the last frame, this 208 + * check won't pass and the next frame will be 0, 209 + * causing us to bail out and correctly report 210 + * the copy as invalid. 211 + */ 212 + if (obj + len <= frame) 213 + return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1; 214 + oldframe = frame; 215 + frame = *(const void * const *)frame; 216 + } 217 + return -1; 218 + #else 219 + return 0; 220 + #endif 221 + } 222 + 179 223 #else /* !__ASSEMBLY__ */ 180 224 181 225 #ifdef CONFIG_X86_64
+6 -4
arch/x86/include/asm/uaccess.h
··· 761 761 * case, and do only runtime checking for non-constant sizes. 762 762 */ 763 763 764 - if (likely(sz < 0 || sz >= n)) 764 + if (likely(sz < 0 || sz >= n)) { 765 + check_object_size(to, n, false); 765 766 n = _copy_from_user(to, from, n); 766 - else if(__builtin_constant_p(n)) 767 + } else if (__builtin_constant_p(n)) 767 768 copy_from_user_overflow(); 768 769 else 769 770 __copy_from_user_overflow(sz, n); ··· 782 781 might_fault(); 783 782 784 783 /* See the comment in copy_from_user() above. */ 785 - if (likely(sz < 0 || sz >= n)) 784 + if (likely(sz < 0 || sz >= n)) { 785 + check_object_size(from, n, true); 786 786 n = _copy_to_user(to, from, n); 787 - else if(__builtin_constant_p(n)) 787 + } else if (__builtin_constant_p(n)) 788 788 copy_to_user_overflow(); 789 789 else 790 790 __copy_to_user_overflow(sz, n);
+2
arch/x86/include/asm/uaccess_32.h
··· 37 37 static __always_inline unsigned long __must_check 38 38 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 39 39 { 40 + check_object_size(from, n, true); 40 41 return __copy_to_user_ll(to, from, n); 41 42 } 42 43 ··· 96 95 __copy_from_user(void *to, const void __user *from, unsigned long n) 97 96 { 98 97 might_fault(); 98 + check_object_size(to, n, false); 99 99 if (__builtin_constant_p(n)) { 100 100 unsigned long ret; 101 101
+2
arch/x86/include/asm/uaccess_64.h
··· 54 54 { 55 55 int ret = 0; 56 56 57 + check_object_size(dst, size, false); 57 58 if (!__builtin_constant_p(size)) 58 59 return copy_user_generic(dst, (__force void *)src, size); 59 60 switch (size) { ··· 120 119 { 121 120 int ret = 0; 122 121 122 + check_object_size(src, size, true); 123 123 if (!__builtin_constant_p(size)) 124 124 return copy_user_generic((__force void *)dst, src, size); 125 125 switch (size) {
+2
include/linux/mmzone.h
··· 68 68 69 69 #ifdef CONFIG_CMA 70 70 # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 71 + # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) 71 72 #else 72 73 # define is_migrate_cma(migratetype) false 74 + # define is_migrate_cma_page(_page) false 73 75 #endif 74 76 75 77 #define for_each_migratetype_order(order, type) \
+12
include/linux/slab.h
··· 155 155 void kzfree(const void *); 156 156 size_t ksize(const void *); 157 157 158 + #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR 159 + const char *__check_heap_object(const void *ptr, unsigned long n, 160 + struct page *page); 161 + #else 162 + static inline const char *__check_heap_object(const void *ptr, 163 + unsigned long n, 164 + struct page *page) 165 + { 166 + return NULL; 167 + } 168 + #endif 169 + 158 170 /* 159 171 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 160 172 * alignment larger than the alignment of a 64-bit integer.
+24
include/linux/thread_info.h
··· 105 105 106 106 #define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) 107 107 108 + #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES 109 + static inline int arch_within_stack_frames(const void * const stack, 110 + const void * const stackend, 111 + const void *obj, unsigned long len) 112 + { 113 + return 0; 114 + } 115 + #endif 116 + 117 + #ifdef CONFIG_HARDENED_USERCOPY 118 + extern void __check_object_size(const void *ptr, unsigned long n, 119 + bool to_user); 120 + 121 + static inline void check_object_size(const void *ptr, unsigned long n, 122 + bool to_user) 123 + { 124 + __check_object_size(ptr, n, to_user); 125 + } 126 + #else 127 + static inline void check_object_size(const void *ptr, unsigned long n, 128 + bool to_user) 129 + { } 130 + #endif /* CONFIG_HARDENED_USERCOPY */ 131 + 108 132 #endif /* __KERNEL__ */ 109 133 110 134 #endif /* _LINUX_THREAD_INFO_H */
+2
init/Kconfig
··· 1761 1761 1762 1762 config SLAB 1763 1763 bool "SLAB" 1764 + select HAVE_HARDENED_USERCOPY_ALLOCATOR 1764 1765 help 1765 1766 The regular slab allocator that is established and known to work 1766 1767 well in all environments. It organizes cache hot objects in ··· 1769 1768 1770 1769 config SLUB 1771 1770 bool "SLUB (Unqueued Allocator)" 1771 + select HAVE_HARDENED_USERCOPY_ALLOCATOR 1772 1772 help 1773 1773 SLUB is a slab allocator that minimizes cache line usage 1774 1774 instead of managing queues of cached objects (SLAB approach).
+4
mm/Makefile
··· 21 21 KCOV_INSTRUMENT_mmzone.o := n 22 22 KCOV_INSTRUMENT_vmstat.o := n 23 23 24 + # Since __builtin_frame_address does work as used, disable the warning. 25 + CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address) 26 + 24 27 mmu-y := nommu.o 25 28 mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ 26 29 mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ ··· 102 99 obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o 103 100 obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o 104 101 obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o 102 + obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
+30
mm/slab.c
··· 4441 4441 module_init(slab_proc_init); 4442 4442 #endif 4443 4443 4444 + #ifdef CONFIG_HARDENED_USERCOPY 4445 + /* 4446 + * Rejects objects that are incorrectly sized. 4447 + * 4448 + * Returns NULL if check passes, otherwise const char * to name of cache 4449 + * to indicate an error. 4450 + */ 4451 + const char *__check_heap_object(const void *ptr, unsigned long n, 4452 + struct page *page) 4453 + { 4454 + struct kmem_cache *cachep; 4455 + unsigned int objnr; 4456 + unsigned long offset; 4457 + 4458 + /* Find and validate object. */ 4459 + cachep = page->slab_cache; 4460 + objnr = obj_to_index(cachep, page, (void *)ptr); 4461 + BUG_ON(objnr >= cachep->num); 4462 + 4463 + /* Find offset within object. */ 4464 + offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); 4465 + 4466 + /* Allow address range falling entirely within object size. */ 4467 + if (offset <= cachep->object_size && n <= cachep->object_size - offset) 4468 + return NULL; 4469 + 4470 + return cachep->name; 4471 + } 4472 + #endif /* CONFIG_HARDENED_USERCOPY */ 4473 + 4444 4474 /** 4445 4475 * ksize - get the actual amount of memory allocated for a given object 4446 4476 * @objp: Pointer to the object
+40
mm/slub.c
··· 3764 3764 EXPORT_SYMBOL(__kmalloc_node); 3765 3765 #endif 3766 3766 3767 + #ifdef CONFIG_HARDENED_USERCOPY 3768 + /* 3769 + * Rejects objects that are incorrectly sized. 3770 + * 3771 + * Returns NULL if check passes, otherwise const char * to name of cache 3772 + * to indicate an error. 3773 + */ 3774 + const char *__check_heap_object(const void *ptr, unsigned long n, 3775 + struct page *page) 3776 + { 3777 + struct kmem_cache *s; 3778 + unsigned long offset; 3779 + size_t object_size; 3780 + 3781 + /* Find object and usable object size. */ 3782 + s = page->slab_cache; 3783 + object_size = slab_ksize(s); 3784 + 3785 + /* Reject impossible pointers. */ 3786 + if (ptr < page_address(page)) 3787 + return s->name; 3788 + 3789 + /* Find offset within object. */ 3790 + offset = (ptr - page_address(page)) % s->size; 3791 + 3792 + /* Adjust for redzone and reject if within the redzone. */ 3793 + if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) { 3794 + if (offset < s->red_left_pad) 3795 + return s->name; 3796 + offset -= s->red_left_pad; 3797 + } 3798 + 3799 + /* Allow address range falling entirely within object size. */ 3800 + if (offset <= object_size && n <= object_size - offset) 3801 + return NULL; 3802 + 3803 + return s->name; 3804 + } 3805 + #endif /* CONFIG_HARDENED_USERCOPY */ 3806 + 3767 3807 static size_t __ksize(const void *object) 3768 3808 { 3769 3809 struct page *page;
+268
mm/usercopy.c
··· 1 + /* 2 + * This implements the various checks for CONFIG_HARDENED_USERCOPY*, 3 + * which are designed to protect kernel memory from needless exposure 4 + * and overwrite under many unintended conditions. This code is based 5 + * on PAX_USERCOPY, which is: 6 + * 7 + * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source 8 + * Security Inc. 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License version 2 as 12 + * published by the Free Software Foundation. 13 + * 14 + */ 15 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 + 17 + #include <linux/mm.h> 18 + #include <linux/slab.h> 19 + #include <asm/sections.h> 20 + 21 + enum { 22 + BAD_STACK = -1, 23 + NOT_STACK = 0, 24 + GOOD_FRAME, 25 + GOOD_STACK, 26 + }; 27 + 28 + /* 29 + * Checks if a given pointer and length is contained by the current 30 + * stack frame (if possible). 31 + * 32 + * Returns: 33 + * NOT_STACK: not at all on the stack 34 + * GOOD_FRAME: fully within a valid stack frame 35 + * GOOD_STACK: fully on the stack (when can't do frame-checking) 36 + * BAD_STACK: error condition (invalid stack position or bad stack frame) 37 + */ 38 + static noinline int check_stack_object(const void *obj, unsigned long len) 39 + { 40 + const void * const stack = task_stack_page(current); 41 + const void * const stackend = stack + THREAD_SIZE; 42 + int ret; 43 + 44 + /* Object is not on the stack at all. */ 45 + if (obj + len <= stack || stackend <= obj) 46 + return NOT_STACK; 47 + 48 + /* 49 + * Reject: object partially overlaps the stack (passing the 50 + * the check above means at least one end is within the stack, 51 + * so if this check fails, the other end is outside the stack). 52 + */ 53 + if (obj < stack || stackend < obj + len) 54 + return BAD_STACK; 55 + 56 + /* Check if object is safely within a valid frame. */ 57 + ret = arch_within_stack_frames(stack, stackend, obj, len); 58 + if (ret) 59 + return ret; 60 + 61 + return GOOD_STACK; 62 + } 63 + 64 + static void report_usercopy(const void *ptr, unsigned long len, 65 + bool to_user, const char *type) 66 + { 67 + pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", 68 + to_user ? "exposure" : "overwrite", 69 + to_user ? "from" : "to", ptr, type ? : "unknown", len); 70 + /* 71 + * For greater effect, it would be nice to do do_group_exit(), 72 + * but BUG() actually hooks all the lock-breaking and per-arch 73 + * Oops code, so that is used here instead. 74 + */ 75 + BUG(); 76 + } 77 + 78 + /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */ 79 + static bool overlaps(const void *ptr, unsigned long n, unsigned long low, 80 + unsigned long high) 81 + { 82 + unsigned long check_low = (uintptr_t)ptr; 83 + unsigned long check_high = check_low + n; 84 + 85 + /* Does not overlap if entirely above or entirely below. */ 86 + if (check_low >= high || check_high < low) 87 + return false; 88 + 89 + return true; 90 + } 91 + 92 + /* Is this address range in the kernel text area? */ 93 + static inline const char *check_kernel_text_object(const void *ptr, 94 + unsigned long n) 95 + { 96 + unsigned long textlow = (unsigned long)_stext; 97 + unsigned long texthigh = (unsigned long)_etext; 98 + unsigned long textlow_linear, texthigh_linear; 99 + 100 + if (overlaps(ptr, n, textlow, texthigh)) 101 + return "<kernel text>"; 102 + 103 + /* 104 + * Some architectures have virtual memory mappings with a secondary 105 + * mapping of the kernel text, i.e. there is more than one virtual 106 + * kernel address that points to the kernel image. It is usually 107 + * when there is a separate linear physical memory mapping, in that 108 + * __pa() is not just the reverse of __va(). This can be detected 109 + * and checked: 110 + */ 111 + textlow_linear = (unsigned long)__va(__pa(textlow)); 112 + /* No different mapping: we're done. */ 113 + if (textlow_linear == textlow) 114 + return NULL; 115 + 116 + /* Check the secondary mapping... */ 117 + texthigh_linear = (unsigned long)__va(__pa(texthigh)); 118 + if (overlaps(ptr, n, textlow_linear, texthigh_linear)) 119 + return "<linear kernel text>"; 120 + 121 + return NULL; 122 + } 123 + 124 + static inline const char *check_bogus_address(const void *ptr, unsigned long n) 125 + { 126 + /* Reject if object wraps past end of memory. */ 127 + if (ptr + n < ptr) 128 + return "<wrapped address>"; 129 + 130 + /* Reject if NULL or ZERO-allocation. */ 131 + if (ZERO_OR_NULL_PTR(ptr)) 132 + return "<null>"; 133 + 134 + return NULL; 135 + } 136 + 137 + static inline const char *check_heap_object(const void *ptr, unsigned long n, 138 + bool to_user) 139 + { 140 + struct page *page, *endpage; 141 + const void *end = ptr + n - 1; 142 + bool is_reserved, is_cma; 143 + 144 + /* 145 + * Some architectures (arm64) return true for virt_addr_valid() on 146 + * vmalloced addresses. Work around this by checking for vmalloc 147 + * first. 148 + */ 149 + if (is_vmalloc_addr(ptr)) 150 + return NULL; 151 + 152 + if (!virt_addr_valid(ptr)) 153 + return NULL; 154 + 155 + page = virt_to_head_page(ptr); 156 + 157 + /* Check slab allocator for flags and size. */ 158 + if (PageSlab(page)) 159 + return __check_heap_object(ptr, n, page); 160 + 161 + /* 162 + * Sometimes the kernel data regions are not marked Reserved (see 163 + * check below). And sometimes [_sdata,_edata) does not cover 164 + * rodata and/or bss, so check each range explicitly. 165 + */ 166 + 167 + /* Allow reads of kernel rodata region (if not marked as Reserved). */ 168 + if (ptr >= (const void *)__start_rodata && 169 + end <= (const void *)__end_rodata) { 170 + if (!to_user) 171 + return "<rodata>"; 172 + return NULL; 173 + } 174 + 175 + /* Allow kernel data region (if not marked as Reserved). */ 176 + if (ptr >= (const void *)_sdata && end <= (const void *)_edata) 177 + return NULL; 178 + 179 + /* Allow kernel bss region (if not marked as Reserved). */ 180 + if (ptr >= (const void *)__bss_start && 181 + end <= (const void *)__bss_stop) 182 + return NULL; 183 + 184 + /* Is the object wholly within one base page? */ 185 + if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == 186 + ((unsigned long)end & (unsigned long)PAGE_MASK))) 187 + return NULL; 188 + 189 + /* Allow if start and end are inside the same compound page. */ 190 + endpage = virt_to_head_page(end); 191 + if (likely(endpage == page)) 192 + return NULL; 193 + 194 + /* 195 + * Reject if range is entirely either Reserved (i.e. special or 196 + * device memory), or CMA. Otherwise, reject since the object spans 197 + * several independently allocated pages. 198 + */ 199 + is_reserved = PageReserved(page); 200 + is_cma = is_migrate_cma_page(page); 201 + if (!is_reserved && !is_cma) 202 + goto reject; 203 + 204 + for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { 205 + page = virt_to_head_page(ptr); 206 + if (is_reserved && !PageReserved(page)) 207 + goto reject; 208 + if (is_cma && !is_migrate_cma_page(page)) 209 + goto reject; 210 + } 211 + 212 + return NULL; 213 + 214 + reject: 215 + return "<spans multiple pages>"; 216 + } 217 + 218 + /* 219 + * Validates that the given object is: 220 + * - not bogus address 221 + * - known-safe heap or stack object 222 + * - not in kernel text 223 + */ 224 + void __check_object_size(const void *ptr, unsigned long n, bool to_user) 225 + { 226 + const char *err; 227 + 228 + /* Skip all tests if size is zero. */ 229 + if (!n) 230 + return; 231 + 232 + /* Check for invalid addresses. */ 233 + err = check_bogus_address(ptr, n); 234 + if (err) 235 + goto report; 236 + 237 + /* Check for bad heap object. */ 238 + err = check_heap_object(ptr, n, to_user); 239 + if (err) 240 + goto report; 241 + 242 + /* Check for bad stack object. */ 243 + switch (check_stack_object(ptr, n)) { 244 + case NOT_STACK: 245 + /* Object is not touching the current process stack. */ 246 + break; 247 + case GOOD_FRAME: 248 + case GOOD_STACK: 249 + /* 250 + * Object is either in the correct frame (when it 251 + * is possible to check) or just generally on the 252 + * process stack (when frame checking not available). 253 + */ 254 + return; 255 + default: 256 + err = "<process stack>"; 257 + goto report; 258 + } 259 + 260 + /* Check for object in kernel to avoid text exposure. */ 261 + err = check_kernel_text_object(ptr, n); 262 + if (!err) 263 + return; 264 + 265 + report: 266 + report_usercopy(ptr, n, to_user, err); 267 + } 268 + EXPORT_SYMBOL(__check_object_size);
+28
security/Kconfig
··· 118 118 this low address space will need the permission specific to the 119 119 systems running LSM. 120 120 121 + config HAVE_HARDENED_USERCOPY_ALLOCATOR 122 + bool 123 + help 124 + The heap allocator implements __check_heap_object() for 125 + validating memory ranges against heap object sizes in 126 + support of CONFIG_HARDENED_USERCOPY. 127 + 128 + config HAVE_ARCH_HARDENED_USERCOPY 129 + bool 130 + help 131 + The architecture supports CONFIG_HARDENED_USERCOPY by 132 + calling check_object_size() just before performing the 133 + userspace copies in the low level implementation of 134 + copy_to_user() and copy_from_user(). 135 + 136 + config HARDENED_USERCOPY 137 + bool "Harden memory copies between kernel and userspace" 138 + depends on HAVE_ARCH_HARDENED_USERCOPY 139 + select BUG 140 + help 141 + This option checks for obviously wrong memory regions when 142 + copying memory to/from the kernel (via copy_to_user() and 143 + copy_from_user() functions) by rejecting memory ranges that 144 + are larger than the specified heap object, span multiple 145 + separately allocates pages, are not on the process stack, 146 + or are part of the kernel text. This kills entire classes 147 + of heap overflow exploits and similar kernel memory exposures. 148 + 121 149 source security/selinux/Kconfig 122 150 source security/smack/Kconfig 123 151 source security/tomoyo/Kconfig