Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kmemcheck: stop using GFP_NOTRACK and SLAB_NOTRACK

Convert all allocations that used a NOTRACK flag to stop using it.

Link: http://lkml.kernel.org/r/20171007030159.22241-3-alexander.levin@verizon.com
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tim Hansen <devtimhansen@gmail.com>
Cc: Vegard Nossum <vegardno@ifi.uio.no>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Levin, Alexander (Sasha Levin) and committed by
Linus Torvalds
75f296d9 49502766

+36 -48
+1 -1
arch/arm/include/asm/pgalloc.h
··· 57 57 extern pgd_t *pgd_alloc(struct mm_struct *mm); 58 58 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 59 59 60 - #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) 60 + #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) 61 61 62 62 static inline void clean_pte_table(pte_t *pte) 63 63 {
+1 -1
arch/arm64/include/asm/pgalloc.h
··· 26 26 27 27 #define check_pgt_cache() do { } while (0) 28 28 29 - #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) 29 + #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) 30 30 #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) 31 31 32 32 #if CONFIG_PGTABLE_LEVELS > 2
+1 -1
arch/powerpc/include/asm/pgalloc.h
··· 18 18 } 19 19 #endif /* MODULE */ 20 20 21 - #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) 21 + #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) 22 22 23 23 #ifdef CONFIG_PPC_BOOK3S 24 24 #include <asm/book3s/pgalloc.h>
+2 -2
arch/sh/kernel/dwarf.c
··· 1172 1172 1173 1173 dwarf_frame_cachep = kmem_cache_create("dwarf_frames", 1174 1174 sizeof(struct dwarf_frame), 0, 1175 - SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); 1175 + SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL); 1176 1176 1177 1177 dwarf_reg_cachep = kmem_cache_create("dwarf_regs", 1178 1178 sizeof(struct dwarf_reg), 0, 1179 - SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); 1179 + SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL); 1180 1180 1181 1181 dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ, 1182 1182 dwarf_frame_cachep);
+1 -1
arch/sh/kernel/process.c
··· 59 59 60 60 task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, 61 61 __alignof__(union thread_xstate), 62 - SLAB_PANIC | SLAB_NOTRACK, NULL); 62 + SLAB_PANIC, NULL); 63 63 } 64 64 65 65 #ifdef CONFIG_SH_FPU_EMU
+2 -2
arch/sparc/mm/init_64.c
··· 2927 2927 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 2928 2928 unsigned long address) 2929 2929 { 2930 - struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); 2930 + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2931 2931 pte_t *pte = NULL; 2932 2932 2933 2933 if (page) ··· 2939 2939 pgtable_t pte_alloc_one(struct mm_struct *mm, 2940 2940 unsigned long address) 2941 2941 { 2942 - struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); 2942 + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2943 2943 if (!page) 2944 2944 return NULL; 2945 2945 if (!pgtable_page_ctor(page)) {
+1 -1
arch/unicore32/include/asm/pgalloc.h
··· 28 28 #define pgd_alloc(mm) get_pgd_slow(mm) 29 29 #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) 30 30 31 - #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) 31 + #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) 32 32 33 33 /* 34 34 * Allocate one PTE table.
+1 -1
arch/x86/kernel/espfix_64.c
··· 57 57 # error "Need more virtual address space for the ESPFIX hack" 58 58 #endif 59 59 60 - #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) 60 + #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) 61 61 62 62 /* This contains the *bottom* address of the espfix stack */ 63 63 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
+1 -2
arch/x86/mm/init.c
··· 92 92 unsigned int order; 93 93 94 94 order = get_order((unsigned long)num << PAGE_SHIFT); 95 - return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK | 96 - __GFP_ZERO, order); 95 + return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); 97 96 } 98 97 99 98 if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
+1 -1
arch/x86/mm/init_64.c
··· 184 184 void *ptr; 185 185 186 186 if (after_bootmem) 187 - ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); 187 + ptr = (void *) get_zeroed_page(GFP_ATOMIC); 188 188 else 189 189 ptr = alloc_bootmem_pages(PAGE_SIZE); 190 190
+5 -5
arch/x86/mm/pageattr.c
··· 753 753 754 754 if (!debug_pagealloc_enabled()) 755 755 spin_unlock(&cpa_lock); 756 - base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0); 756 + base = alloc_pages(GFP_KERNEL, 0); 757 757 if (!debug_pagealloc_enabled()) 758 758 spin_lock(&cpa_lock); 759 759 if (!base) ··· 904 904 905 905 static int alloc_pte_page(pmd_t *pmd) 906 906 { 907 - pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); 907 + pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL); 908 908 if (!pte) 909 909 return -1; 910 910 ··· 914 914 915 915 static int alloc_pmd_page(pud_t *pud) 916 916 { 917 - pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); 917 + pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); 918 918 if (!pmd) 919 919 return -1; 920 920 ··· 1120 1120 pgd_entry = cpa->pgd + pgd_index(addr); 1121 1121 1122 1122 if (pgd_none(*pgd_entry)) { 1123 - p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); 1123 + p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL); 1124 1124 if (!p4d) 1125 1125 return -1; 1126 1126 ··· 1132 1132 */ 1133 1133 p4d = p4d_offset(pgd_entry, addr); 1134 1134 if (p4d_none(*p4d)) { 1135 - pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); 1135 + pud = (pud_t *)get_zeroed_page(GFP_KERNEL); 1136 1136 if (!pud) 1137 1137 return -1; 1138 1138
+1 -1
arch/x86/mm/pgtable.c
··· 7 7 #include <asm/fixmap.h> 8 8 #include <asm/mtrr.h> 9 9 10 - #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO) 10 + #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) 11 11 12 12 #ifdef CONFIG_HIGHPTE 13 13 #define PGALLOC_USER_GFP __GFP_HIGHMEM
+1 -1
arch/x86/platform/efi/efi_64.c
··· 207 207 if (efi_enabled(EFI_OLD_MEMMAP)) 208 208 return 0; 209 209 210 - gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO; 210 + gfp_mask = GFP_KERNEL | __GFP_ZERO; 211 211 efi_pgd = (pgd_t *)__get_free_page(gfp_mask); 212 212 if (!efi_pgd) 213 213 return -ENOMEM;
+1 -6
crypto/xor.c
··· 122 122 goto out; 123 123 } 124 124 125 - /* 126 - * Note: Since the memory is not actually used for _anything_ but to 127 - * test the XOR speed, we don't really want kmemcheck to warn about 128 - * reading uninitialized bytes here. 129 - */ 130 - b1 = (void *) __get_free_pages(GFP_KERNEL | __GFP_NOTRACK, 2); 125 + b1 = (void *) __get_free_pages(GFP_KERNEL, 2); 131 126 if (!b1) { 132 127 printk(KERN_WARNING "xor: Yikes! No memory available.\n"); 133 128 return -ENOMEM;
+2 -3
include/linux/thread_info.h
··· 44 44 #endif 45 45 46 46 #if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) 47 - # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ 48 - __GFP_ZERO) 47 + # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) 49 48 #else 50 - # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK) 49 + # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT) 51 50 #endif 52 51 53 52 /*
+1 -2
init/do_mounts.c
··· 380 380 381 381 void __init mount_block_root(char *name, int flags) 382 382 { 383 - struct page *page = alloc_page(GFP_KERNEL | 384 - __GFP_NOTRACK_FALSE_POSITIVE); 383 + struct page *page = alloc_page(GFP_KERNEL); 385 384 char *fs_names = page_address(page); 386 385 char *p; 387 386 #ifdef CONFIG_BLOCK
+6 -6
kernel/fork.c
··· 469 469 /* create a slab on which task_structs can be allocated */ 470 470 task_struct_cachep = kmem_cache_create("task_struct", 471 471 arch_task_struct_size, align, 472 - SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL); 472 + SLAB_PANIC|SLAB_ACCOUNT, NULL); 473 473 #endif 474 474 475 475 /* do the arch specific task caches init */ ··· 2205 2205 sighand_cachep = kmem_cache_create("sighand_cache", 2206 2206 sizeof(struct sighand_struct), 0, 2207 2207 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| 2208 - SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor); 2208 + SLAB_ACCOUNT, sighand_ctor); 2209 2209 signal_cachep = kmem_cache_create("signal_cache", 2210 2210 sizeof(struct signal_struct), 0, 2211 - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, 2211 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 2212 2212 NULL); 2213 2213 files_cachep = kmem_cache_create("files_cache", 2214 2214 sizeof(struct files_struct), 0, 2215 - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, 2215 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 2216 2216 NULL); 2217 2217 fs_cachep = kmem_cache_create("fs_cache", 2218 2218 sizeof(struct fs_struct), 0, 2219 - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, 2219 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 2220 2220 NULL); 2221 2221 /* 2222 2222 * FIXME! The "sizeof(struct mm_struct)" currently includes the ··· 2227 2227 */ 2228 2228 mm_cachep = kmem_cache_create("mm_struct", 2229 2229 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 2230 - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, 2230 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, 2231 2231 NULL); 2232 2232 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); 2233 2233 mmap_init();
+1 -2
kernel/signal.c
··· 1036 1036 else 1037 1037 override_rlimit = 0; 1038 1038 1039 - q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, 1040 - override_rlimit); 1039 + q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit); 1041 1040 if (q) { 1042 1041 list_add_tail(&q->list, &pending->list); 1043 1042 switch ((unsigned long) info) {
+1 -1
mm/kmemcheck.c
··· 18 18 * With kmemcheck enabled, we need to allocate a memory area for the 19 19 * shadow bits as well. 20 20 */ 21 - shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); 21 + shadow = alloc_pages_node(node, flags, order); 22 22 if (!shadow) { 23 23 if (printk_ratelimit()) 24 24 pr_err("kmemcheck: failed to allocate shadow bitmap\n");
+1 -1
mm/slab.c
··· 1410 1410 1411 1411 flags |= cachep->allocflags; 1412 1412 1413 - page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); 1413 + page = __alloc_pages_node(nodeid, flags, cachep->gfporder); 1414 1414 if (!page) { 1415 1415 slab_out_of_memory(cachep, flags, nodeid); 1416 1416 return NULL;
+2 -3
mm/slab.h
··· 141 141 #if defined(CONFIG_SLAB) 142 142 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 143 143 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 144 - SLAB_NOTRACK | SLAB_ACCOUNT) 144 + SLAB_ACCOUNT) 145 145 #elif defined(CONFIG_SLUB) 146 146 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 147 - SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT) 147 + SLAB_TEMPORARY | SLAB_ACCOUNT) 148 148 #else 149 149 #define SLAB_CACHE_FLAGS (0) 150 150 #endif ··· 163 163 SLAB_NOLEAKTRACE | \ 164 164 SLAB_RECLAIM_ACCOUNT | \ 165 165 SLAB_TEMPORARY | \ 166 - SLAB_NOTRACK | \ 167 166 SLAB_ACCOUNT) 168 167 169 168 int __kmem_cache_shutdown(struct kmem_cache *);
+1 -1
mm/slab_common.c
··· 44 44 SLAB_FAILSLAB | SLAB_KASAN) 45 45 46 46 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ 47 - SLAB_NOTRACK | SLAB_ACCOUNT) 47 + SLAB_ACCOUNT) 48 48 49 49 /* 50 50 * Merge control. If this is set then no merging of slab caches will occur.
+1 -3
mm/slub.c
··· 1436 1436 struct page *page; 1437 1437 int order = oo_order(oo); 1438 1438 1439 - flags |= __GFP_NOTRACK; 1440 - 1441 1439 if (node == NUMA_NO_NODE) 1442 1440 page = alloc_pages(flags, order); 1443 1441 else ··· 3772 3774 struct page *page; 3773 3775 void *ptr = NULL; 3774 3776 3775 - flags |= __GFP_COMP | __GFP_NOTRACK; 3777 + flags |= __GFP_COMP; 3776 3778 page = alloc_pages_node(node, flags, get_order(size)); 3777 3779 if (page) 3778 3780 ptr = page_address(page);