Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.19-rc3 903 lines 24 kB view raw
1/* 2 * This file contains shadow memory manipulation code. 3 * 4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 6 * 7 * Some code borrowed from https://github.com/xairy/kasan-prototype by 8 * Andrey Konovalov <andreyknvl@gmail.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 * 14 */ 15 16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17#define DISABLE_BRANCH_PROFILING 18 19#include <linux/export.h> 20#include <linux/interrupt.h> 21#include <linux/init.h> 22#include <linux/kasan.h> 23#include <linux/kernel.h> 24#include <linux/kmemleak.h> 25#include <linux/linkage.h> 26#include <linux/memblock.h> 27#include <linux/memory.h> 28#include <linux/mm.h> 29#include <linux/module.h> 30#include <linux/printk.h> 31#include <linux/sched.h> 32#include <linux/sched/task_stack.h> 33#include <linux/slab.h> 34#include <linux/stacktrace.h> 35#include <linux/string.h> 36#include <linux/types.h> 37#include <linux/vmalloc.h> 38#include <linux/bug.h> 39 40#include "kasan.h" 41#include "../slab.h" 42 43void kasan_enable_current(void) 44{ 45 current->kasan_depth++; 46} 47 48void kasan_disable_current(void) 49{ 50 current->kasan_depth--; 51} 52 53/* 54 * Poisons the shadow memory for 'size' bytes starting from 'addr'. 55 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. 56 */ 57static void kasan_poison_shadow(const void *address, size_t size, u8 value) 58{ 59 void *shadow_start, *shadow_end; 60 61 shadow_start = kasan_mem_to_shadow(address); 62 shadow_end = kasan_mem_to_shadow(address + size); 63 64 memset(shadow_start, value, shadow_end - shadow_start); 65} 66 67void kasan_unpoison_shadow(const void *address, size_t size) 68{ 69 kasan_poison_shadow(address, size, 0); 70 71 if (size & KASAN_SHADOW_MASK) { 72 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); 73 *shadow = size & KASAN_SHADOW_MASK; 74 } 75} 76 77static void __kasan_unpoison_stack(struct task_struct *task, const void *sp) 78{ 79 void *base = task_stack_page(task); 80 size_t size = sp - base; 81 82 kasan_unpoison_shadow(base, size); 83} 84 85/* Unpoison the entire stack for a task. */ 86void kasan_unpoison_task_stack(struct task_struct *task) 87{ 88 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE); 89} 90 91/* Unpoison the stack for the current task beyond a watermark sp value. */ 92asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) 93{ 94 /* 95 * Calculate the task stack base address. Avoid using 'current' 96 * because this function is called by early resume code which hasn't 97 * yet set up the percpu register (%gs). 98 */ 99 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); 100 101 kasan_unpoison_shadow(base, watermark - base); 102} 103 104/* 105 * Clear all poison for the region between the current SP and a provided 106 * watermark value, as is sometimes required prior to hand-crafted asm function 107 * returns in the middle of functions. 108 */ 109void kasan_unpoison_stack_above_sp_to(const void *watermark) 110{ 111 const void *sp = __builtin_frame_address(0); 112 size_t size = watermark - sp; 113 114 if (WARN_ON(sp > watermark)) 115 return; 116 kasan_unpoison_shadow(sp, size); 117} 118 119/* 120 * All functions below always inlined so compiler could 121 * perform better optimizations in each of __asan_loadX/__assn_storeX 122 * depending on memory access size X. 123 */ 124 125static __always_inline bool memory_is_poisoned_1(unsigned long addr) 126{ 127 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); 128 129 if (unlikely(shadow_value)) { 130 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; 131 return unlikely(last_accessible_byte >= shadow_value); 132 } 133 134 return false; 135} 136 137static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr, 138 unsigned long size) 139{ 140 u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr); 141 142 /* 143 * Access crosses 8(shadow size)-byte boundary. Such access maps 144 * into 2 shadow bytes, so we need to check them both. 145 */ 146 if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1)) 147 return *shadow_addr || memory_is_poisoned_1(addr + size - 1); 148 149 return memory_is_poisoned_1(addr + size - 1); 150} 151 152static __always_inline bool memory_is_poisoned_16(unsigned long addr) 153{ 154 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); 155 156 /* Unaligned 16-bytes access maps into 3 shadow bytes. */ 157 if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) 158 return *shadow_addr || memory_is_poisoned_1(addr + 15); 159 160 return *shadow_addr; 161} 162 163static __always_inline unsigned long bytes_is_nonzero(const u8 *start, 164 size_t size) 165{ 166 while (size) { 167 if (unlikely(*start)) 168 return (unsigned long)start; 169 start++; 170 size--; 171 } 172 173 return 0; 174} 175 176static __always_inline unsigned long memory_is_nonzero(const void *start, 177 const void *end) 178{ 179 unsigned int words; 180 unsigned long ret; 181 unsigned int prefix = (unsigned long)start % 8; 182 183 if (end - start <= 16) 184 return bytes_is_nonzero(start, end - start); 185 186 if (prefix) { 187 prefix = 8 - prefix; 188 ret = bytes_is_nonzero(start, prefix); 189 if (unlikely(ret)) 190 return ret; 191 start += prefix; 192 } 193 194 words = (end - start) / 8; 195 while (words) { 196 if (unlikely(*(u64 *)start)) 197 return bytes_is_nonzero(start, 8); 198 start += 8; 199 words--; 200 } 201 202 return bytes_is_nonzero(start, (end - start) % 8); 203} 204 205static __always_inline bool memory_is_poisoned_n(unsigned long addr, 206 size_t size) 207{ 208 unsigned long ret; 209 210 ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr), 211 kasan_mem_to_shadow((void *)addr + size - 1) + 1); 212 213 if (unlikely(ret)) { 214 unsigned long last_byte = addr + size - 1; 215 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); 216 217 if (unlikely(ret != (unsigned long)last_shadow || 218 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow))) 219 return true; 220 } 221 return false; 222} 223 224static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) 225{ 226 if (__builtin_constant_p(size)) { 227 switch (size) { 228 case 1: 229 return memory_is_poisoned_1(addr); 230 case 2: 231 case 4: 232 case 8: 233 return memory_is_poisoned_2_4_8(addr, size); 234 case 16: 235 return memory_is_poisoned_16(addr); 236 default: 237 BUILD_BUG(); 238 } 239 } 240 241 return memory_is_poisoned_n(addr, size); 242} 243 244static __always_inline void check_memory_region_inline(unsigned long addr, 245 size_t size, bool write, 246 unsigned long ret_ip) 247{ 248 if (unlikely(size == 0)) 249 return; 250 251 if (unlikely((void *)addr < 252 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { 253 kasan_report(addr, size, write, ret_ip); 254 return; 255 } 256 257 if (likely(!memory_is_poisoned(addr, size))) 258 return; 259 260 kasan_report(addr, size, write, ret_ip); 261} 262 263static void check_memory_region(unsigned long addr, 264 size_t size, bool write, 265 unsigned long ret_ip) 266{ 267 check_memory_region_inline(addr, size, write, ret_ip); 268} 269 270void kasan_check_read(const volatile void *p, unsigned int size) 271{ 272 check_memory_region((unsigned long)p, size, false, _RET_IP_); 273} 274EXPORT_SYMBOL(kasan_check_read); 275 276void kasan_check_write(const volatile void *p, unsigned int size) 277{ 278 check_memory_region((unsigned long)p, size, true, _RET_IP_); 279} 280EXPORT_SYMBOL(kasan_check_write); 281 282#undef memset 283void *memset(void *addr, int c, size_t len) 284{ 285 check_memory_region((unsigned long)addr, len, true, _RET_IP_); 286 287 return __memset(addr, c, len); 288} 289 290#undef memmove 291void *memmove(void *dest, const void *src, size_t len) 292{ 293 check_memory_region((unsigned long)src, len, false, _RET_IP_); 294 check_memory_region((unsigned long)dest, len, true, _RET_IP_); 295 296 return __memmove(dest, src, len); 297} 298 299#undef memcpy 300void *memcpy(void *dest, const void *src, size_t len) 301{ 302 check_memory_region((unsigned long)src, len, false, _RET_IP_); 303 check_memory_region((unsigned long)dest, len, true, _RET_IP_); 304 305 return __memcpy(dest, src, len); 306} 307 308void kasan_alloc_pages(struct page *page, unsigned int order) 309{ 310 if (likely(!PageHighMem(page))) 311 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); 312} 313 314void kasan_free_pages(struct page *page, unsigned int order) 315{ 316 if (likely(!PageHighMem(page))) 317 kasan_poison_shadow(page_address(page), 318 PAGE_SIZE << order, 319 KASAN_FREE_PAGE); 320} 321 322/* 323 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. 324 * For larger allocations larger redzones are used. 325 */ 326static unsigned int optimal_redzone(unsigned int object_size) 327{ 328 return 329 object_size <= 64 - 16 ? 16 : 330 object_size <= 128 - 32 ? 32 : 331 object_size <= 512 - 64 ? 64 : 332 object_size <= 4096 - 128 ? 128 : 333 object_size <= (1 << 14) - 256 ? 256 : 334 object_size <= (1 << 15) - 512 ? 512 : 335 object_size <= (1 << 16) - 1024 ? 1024 : 2048; 336} 337 338void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, 339 slab_flags_t *flags) 340{ 341 unsigned int orig_size = *size; 342 int redzone_adjust; 343 344 /* Add alloc meta. */ 345 cache->kasan_info.alloc_meta_offset = *size; 346 *size += sizeof(struct kasan_alloc_meta); 347 348 /* Add free meta. */ 349 if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || 350 cache->object_size < sizeof(struct kasan_free_meta)) { 351 cache->kasan_info.free_meta_offset = *size; 352 *size += sizeof(struct kasan_free_meta); 353 } 354 redzone_adjust = optimal_redzone(cache->object_size) - 355 (*size - cache->object_size); 356 357 if (redzone_adjust > 0) 358 *size += redzone_adjust; 359 360 *size = min_t(unsigned int, KMALLOC_MAX_SIZE, 361 max(*size, cache->object_size + 362 optimal_redzone(cache->object_size))); 363 364 /* 365 * If the metadata doesn't fit, don't enable KASAN at all. 366 */ 367 if (*size <= cache->kasan_info.alloc_meta_offset || 368 *size <= cache->kasan_info.free_meta_offset) { 369 cache->kasan_info.alloc_meta_offset = 0; 370 cache->kasan_info.free_meta_offset = 0; 371 *size = orig_size; 372 return; 373 } 374 375 *flags |= SLAB_KASAN; 376} 377 378void kasan_cache_shrink(struct kmem_cache *cache) 379{ 380 quarantine_remove_cache(cache); 381} 382 383void kasan_cache_shutdown(struct kmem_cache *cache) 384{ 385 if (!__kmem_cache_empty(cache)) 386 quarantine_remove_cache(cache); 387} 388 389size_t kasan_metadata_size(struct kmem_cache *cache) 390{ 391 return (cache->kasan_info.alloc_meta_offset ? 392 sizeof(struct kasan_alloc_meta) : 0) + 393 (cache->kasan_info.free_meta_offset ? 394 sizeof(struct kasan_free_meta) : 0); 395} 396 397void kasan_poison_slab(struct page *page) 398{ 399 kasan_poison_shadow(page_address(page), 400 PAGE_SIZE << compound_order(page), 401 KASAN_KMALLOC_REDZONE); 402} 403 404void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) 405{ 406 kasan_unpoison_shadow(object, cache->object_size); 407} 408 409void kasan_poison_object_data(struct kmem_cache *cache, void *object) 410{ 411 kasan_poison_shadow(object, 412 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), 413 KASAN_KMALLOC_REDZONE); 414} 415 416static inline int in_irqentry_text(unsigned long ptr) 417{ 418 return (ptr >= (unsigned long)&__irqentry_text_start && 419 ptr < (unsigned long)&__irqentry_text_end) || 420 (ptr >= (unsigned long)&__softirqentry_text_start && 421 ptr < (unsigned long)&__softirqentry_text_end); 422} 423 424static inline void filter_irq_stacks(struct stack_trace *trace) 425{ 426 int i; 427 428 if (!trace->nr_entries) 429 return; 430 for (i = 0; i < trace->nr_entries; i++) 431 if (in_irqentry_text(trace->entries[i])) { 432 /* Include the irqentry function into the stack. */ 433 trace->nr_entries = i + 1; 434 break; 435 } 436} 437 438static inline depot_stack_handle_t save_stack(gfp_t flags) 439{ 440 unsigned long entries[KASAN_STACK_DEPTH]; 441 struct stack_trace trace = { 442 .nr_entries = 0, 443 .entries = entries, 444 .max_entries = KASAN_STACK_DEPTH, 445 .skip = 0 446 }; 447 448 save_stack_trace(&trace); 449 filter_irq_stacks(&trace); 450 if (trace.nr_entries != 0 && 451 trace.entries[trace.nr_entries-1] == ULONG_MAX) 452 trace.nr_entries--; 453 454 return depot_save_stack(&trace, flags); 455} 456 457static inline void set_track(struct kasan_track *track, gfp_t flags) 458{ 459 track->pid = current->pid; 460 track->stack = save_stack(flags); 461} 462 463struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, 464 const void *object) 465{ 466 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32); 467 return (void *)object + cache->kasan_info.alloc_meta_offset; 468} 469 470struct kasan_free_meta *get_free_info(struct kmem_cache *cache, 471 const void *object) 472{ 473 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); 474 return (void *)object + cache->kasan_info.free_meta_offset; 475} 476 477void kasan_init_slab_obj(struct kmem_cache *cache, const void *object) 478{ 479 struct kasan_alloc_meta *alloc_info; 480 481 if (!(cache->flags & SLAB_KASAN)) 482 return; 483 484 alloc_info = get_alloc_info(cache, object); 485 __memset(alloc_info, 0, sizeof(*alloc_info)); 486} 487 488void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) 489{ 490 kasan_kmalloc(cache, object, cache->object_size, flags); 491} 492 493static bool __kasan_slab_free(struct kmem_cache *cache, void *object, 494 unsigned long ip, bool quarantine) 495{ 496 s8 shadow_byte; 497 unsigned long rounded_up_size; 498 499 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != 500 object)) { 501 kasan_report_invalid_free(object, ip); 502 return true; 503 } 504 505 /* RCU slabs could be legally used after free within the RCU period */ 506 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) 507 return false; 508 509 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); 510 if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) { 511 kasan_report_invalid_free(object, ip); 512 return true; 513 } 514 515 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); 516 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); 517 518 if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN))) 519 return false; 520 521 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); 522 quarantine_put(get_free_info(cache, object), cache); 523 return true; 524} 525 526bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) 527{ 528 return __kasan_slab_free(cache, object, ip, true); 529} 530 531void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, 532 gfp_t flags) 533{ 534 unsigned long redzone_start; 535 unsigned long redzone_end; 536 537 if (gfpflags_allow_blocking(flags)) 538 quarantine_reduce(); 539 540 if (unlikely(object == NULL)) 541 return; 542 543 redzone_start = round_up((unsigned long)(object + size), 544 KASAN_SHADOW_SCALE_SIZE); 545 redzone_end = round_up((unsigned long)object + cache->object_size, 546 KASAN_SHADOW_SCALE_SIZE); 547 548 kasan_unpoison_shadow(object, size); 549 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, 550 KASAN_KMALLOC_REDZONE); 551 552 if (cache->flags & SLAB_KASAN) 553 set_track(&get_alloc_info(cache, object)->alloc_track, flags); 554} 555EXPORT_SYMBOL(kasan_kmalloc); 556 557void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) 558{ 559 struct page *page; 560 unsigned long redzone_start; 561 unsigned long redzone_end; 562 563 if (gfpflags_allow_blocking(flags)) 564 quarantine_reduce(); 565 566 if (unlikely(ptr == NULL)) 567 return; 568 569 page = virt_to_page(ptr); 570 redzone_start = round_up((unsigned long)(ptr + size), 571 KASAN_SHADOW_SCALE_SIZE); 572 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); 573 574 kasan_unpoison_shadow(ptr, size); 575 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, 576 KASAN_PAGE_REDZONE); 577} 578 579void kasan_krealloc(const void *object, size_t size, gfp_t flags) 580{ 581 struct page *page; 582 583 if (unlikely(object == ZERO_SIZE_PTR)) 584 return; 585 586 page = virt_to_head_page(object); 587 588 if (unlikely(!PageSlab(page))) 589 kasan_kmalloc_large(object, size, flags); 590 else 591 kasan_kmalloc(page->slab_cache, object, size, flags); 592} 593 594void kasan_poison_kfree(void *ptr, unsigned long ip) 595{ 596 struct page *page; 597 598 page = virt_to_head_page(ptr); 599 600 if (unlikely(!PageSlab(page))) { 601 if (ptr != page_address(page)) { 602 kasan_report_invalid_free(ptr, ip); 603 return; 604 } 605 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), 606 KASAN_FREE_PAGE); 607 } else { 608 __kasan_slab_free(page->slab_cache, ptr, ip, false); 609 } 610} 611 612void kasan_kfree_large(void *ptr, unsigned long ip) 613{ 614 if (ptr != page_address(virt_to_head_page(ptr))) 615 kasan_report_invalid_free(ptr, ip); 616 /* The object will be poisoned by page_alloc. */ 617} 618 619int kasan_module_alloc(void *addr, size_t size) 620{ 621 void *ret; 622 size_t scaled_size; 623 size_t shadow_size; 624 unsigned long shadow_start; 625 626 shadow_start = (unsigned long)kasan_mem_to_shadow(addr); 627 scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; 628 shadow_size = round_up(scaled_size, PAGE_SIZE); 629 630 if (WARN_ON(!PAGE_ALIGNED(shadow_start))) 631 return -EINVAL; 632 633 ret = __vmalloc_node_range(shadow_size, 1, shadow_start, 634 shadow_start + shadow_size, 635 GFP_KERNEL | __GFP_ZERO, 636 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, 637 __builtin_return_address(0)); 638 639 if (ret) { 640 find_vm_area(addr)->flags |= VM_KASAN; 641 kmemleak_ignore(ret); 642 return 0; 643 } 644 645 return -ENOMEM; 646} 647 648void kasan_free_shadow(const struct vm_struct *vm) 649{ 650 if (vm->flags & VM_KASAN) 651 vfree(kasan_mem_to_shadow(vm->addr)); 652} 653 654static void register_global(struct kasan_global *global) 655{ 656 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); 657 658 kasan_unpoison_shadow(global->beg, global->size); 659 660 kasan_poison_shadow(global->beg + aligned_size, 661 global->size_with_redzone - aligned_size, 662 KASAN_GLOBAL_REDZONE); 663} 664 665void __asan_register_globals(struct kasan_global *globals, size_t size) 666{ 667 int i; 668 669 for (i = 0; i < size; i++) 670 register_global(&globals[i]); 671} 672EXPORT_SYMBOL(__asan_register_globals); 673 674void __asan_unregister_globals(struct kasan_global *globals, size_t size) 675{ 676} 677EXPORT_SYMBOL(__asan_unregister_globals); 678 679#define DEFINE_ASAN_LOAD_STORE(size) \ 680 void __asan_load##size(unsigned long addr) \ 681 { \ 682 check_memory_region_inline(addr, size, false, _RET_IP_);\ 683 } \ 684 EXPORT_SYMBOL(__asan_load##size); \ 685 __alias(__asan_load##size) \ 686 void __asan_load##size##_noabort(unsigned long); \ 687 EXPORT_SYMBOL(__asan_load##size##_noabort); \ 688 void __asan_store##size(unsigned long addr) \ 689 { \ 690 check_memory_region_inline(addr, size, true, _RET_IP_); \ 691 } \ 692 EXPORT_SYMBOL(__asan_store##size); \ 693 __alias(__asan_store##size) \ 694 void __asan_store##size##_noabort(unsigned long); \ 695 EXPORT_SYMBOL(__asan_store##size##_noabort) 696 697DEFINE_ASAN_LOAD_STORE(1); 698DEFINE_ASAN_LOAD_STORE(2); 699DEFINE_ASAN_LOAD_STORE(4); 700DEFINE_ASAN_LOAD_STORE(8); 701DEFINE_ASAN_LOAD_STORE(16); 702 703void __asan_loadN(unsigned long addr, size_t size) 704{ 705 check_memory_region(addr, size, false, _RET_IP_); 706} 707EXPORT_SYMBOL(__asan_loadN); 708 709__alias(__asan_loadN) 710void __asan_loadN_noabort(unsigned long, size_t); 711EXPORT_SYMBOL(__asan_loadN_noabort); 712 713void __asan_storeN(unsigned long addr, size_t size) 714{ 715 check_memory_region(addr, size, true, _RET_IP_); 716} 717EXPORT_SYMBOL(__asan_storeN); 718 719__alias(__asan_storeN) 720void __asan_storeN_noabort(unsigned long, size_t); 721EXPORT_SYMBOL(__asan_storeN_noabort); 722 723/* to shut up compiler complaints */ 724void __asan_handle_no_return(void) {} 725EXPORT_SYMBOL(__asan_handle_no_return); 726 727/* Emitted by compiler to poison large objects when they go out of scope. */ 728void __asan_poison_stack_memory(const void *addr, size_t size) 729{ 730 /* 731 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded 732 * by redzones, so we simply round up size to simplify logic. 733 */ 734 kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE), 735 KASAN_USE_AFTER_SCOPE); 736} 737EXPORT_SYMBOL(__asan_poison_stack_memory); 738 739/* Emitted by compiler to unpoison large objects when they go into scope. */ 740void __asan_unpoison_stack_memory(const void *addr, size_t size) 741{ 742 kasan_unpoison_shadow(addr, size); 743} 744EXPORT_SYMBOL(__asan_unpoison_stack_memory); 745 746/* Emitted by compiler to poison alloca()ed objects. */ 747void __asan_alloca_poison(unsigned long addr, size_t size) 748{ 749 size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); 750 size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - 751 rounded_up_size; 752 size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE); 753 754 const void *left_redzone = (const void *)(addr - 755 KASAN_ALLOCA_REDZONE_SIZE); 756 const void *right_redzone = (const void *)(addr + rounded_up_size); 757 758 WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE)); 759 760 kasan_unpoison_shadow((const void *)(addr + rounded_down_size), 761 size - rounded_down_size); 762 kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE, 763 KASAN_ALLOCA_LEFT); 764 kasan_poison_shadow(right_redzone, 765 padding_size + KASAN_ALLOCA_REDZONE_SIZE, 766 KASAN_ALLOCA_RIGHT); 767} 768EXPORT_SYMBOL(__asan_alloca_poison); 769 770/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */ 771void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom) 772{ 773 if (unlikely(!stack_top || stack_top > stack_bottom)) 774 return; 775 776 kasan_unpoison_shadow(stack_top, stack_bottom - stack_top); 777} 778EXPORT_SYMBOL(__asan_allocas_unpoison); 779 780/* Emitted by the compiler to [un]poison local variables. */ 781#define DEFINE_ASAN_SET_SHADOW(byte) \ 782 void __asan_set_shadow_##byte(const void *addr, size_t size) \ 783 { \ 784 __memset((void *)addr, 0x##byte, size); \ 785 } \ 786 EXPORT_SYMBOL(__asan_set_shadow_##byte) 787 788DEFINE_ASAN_SET_SHADOW(00); 789DEFINE_ASAN_SET_SHADOW(f1); 790DEFINE_ASAN_SET_SHADOW(f2); 791DEFINE_ASAN_SET_SHADOW(f3); 792DEFINE_ASAN_SET_SHADOW(f5); 793DEFINE_ASAN_SET_SHADOW(f8); 794 795#ifdef CONFIG_MEMORY_HOTPLUG 796static bool shadow_mapped(unsigned long addr) 797{ 798 pgd_t *pgd = pgd_offset_k(addr); 799 p4d_t *p4d; 800 pud_t *pud; 801 pmd_t *pmd; 802 pte_t *pte; 803 804 if (pgd_none(*pgd)) 805 return false; 806 p4d = p4d_offset(pgd, addr); 807 if (p4d_none(*p4d)) 808 return false; 809 pud = pud_offset(p4d, addr); 810 if (pud_none(*pud)) 811 return false; 812 813 /* 814 * We can't use pud_large() or pud_huge(), the first one is 815 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse 816 * pud_bad(), if pud is bad then it's bad because it's huge. 817 */ 818 if (pud_bad(*pud)) 819 return true; 820 pmd = pmd_offset(pud, addr); 821 if (pmd_none(*pmd)) 822 return false; 823 824 if (pmd_bad(*pmd)) 825 return true; 826 pte = pte_offset_kernel(pmd, addr); 827 return !pte_none(*pte); 828} 829 830static int __meminit kasan_mem_notifier(struct notifier_block *nb, 831 unsigned long action, void *data) 832{ 833 struct memory_notify *mem_data = data; 834 unsigned long nr_shadow_pages, start_kaddr, shadow_start; 835 unsigned long shadow_end, shadow_size; 836 837 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; 838 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); 839 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); 840 shadow_size = nr_shadow_pages << PAGE_SHIFT; 841 shadow_end = shadow_start + shadow_size; 842 843 if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) || 844 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT))) 845 return NOTIFY_BAD; 846 847 switch (action) { 848 case MEM_GOING_ONLINE: { 849 void *ret; 850 851 /* 852 * If shadow is mapped already than it must have been mapped 853 * during the boot. This could happen if we onlining previously 854 * offlined memory. 855 */ 856 if (shadow_mapped(shadow_start)) 857 return NOTIFY_OK; 858 859 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, 860 shadow_end, GFP_KERNEL, 861 PAGE_KERNEL, VM_NO_GUARD, 862 pfn_to_nid(mem_data->start_pfn), 863 __builtin_return_address(0)); 864 if (!ret) 865 return NOTIFY_BAD; 866 867 kmemleak_ignore(ret); 868 return NOTIFY_OK; 869 } 870 case MEM_CANCEL_ONLINE: 871 case MEM_OFFLINE: { 872 struct vm_struct *vm; 873 874 /* 875 * shadow_start was either mapped during boot by kasan_init() 876 * or during memory online by __vmalloc_node_range(). 877 * In the latter case we can use vfree() to free shadow. 878 * Non-NULL result of the find_vm_area() will tell us if 879 * that was the second case. 880 * 881 * Currently it's not possible to free shadow mapped 882 * during boot by kasan_init(). It's because the code 883 * to do that hasn't been written yet. So we'll just 884 * leak the memory. 885 */ 886 vm = find_vm_area((void *)shadow_start); 887 if (vm) 888 vfree((void *)shadow_start); 889 } 890 } 891 892 return NOTIFY_OK; 893} 894 895static int __init kasan_memhotplug_init(void) 896{ 897 hotplug_memory_notifier(kasan_mem_notifier, 0); 898 899 return 0; 900} 901 902core_initcall(kasan_memhotplug_init); 903#endif