Merge branch 'kmemleak' of git://linux-arm.org/linux-2.6

* 'kmemleak' of git://linux-arm.org/linux-2.6:
kmemleak: Remove alloc_bootmem annotations introduced in the past
kmemleak: Add callbacks to the bootmem allocator
kmemleak: Allow partial freeing of memory blocks
kmemleak: Trace the kmalloc_large* functions in slub
kmemleak: Scan objects allocated during a scanning episode
kmemleak: Do not acquire scan_mutex in kmemleak_open()
kmemleak: Remove the reported leaks number limitation
kmemleak: Add more cond_resched() calls in the scanning thread
kmemleak: Renice the scanning thread to +10

+185 -94
+4
include/linux/kmemleak.h
··· 27 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, 28 gfp_t gfp); 29 extern void kmemleak_free(const void *ptr); 30 extern void kmemleak_padding(const void *ptr, unsigned long offset, 31 size_t size); 32 extern void kmemleak_not_leak(const void *ptr); ··· 70 { 71 } 72 static inline void kmemleak_free(const void *ptr) 73 { 74 } 75 static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
··· 27 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, 28 gfp_t gfp); 29 extern void kmemleak_free(const void *ptr); 30 + extern void kmemleak_free_part(const void *ptr, size_t size); 31 extern void kmemleak_padding(const void *ptr, unsigned long offset, 32 size_t size); 33 extern void kmemleak_not_leak(const void *ptr); ··· 69 { 70 } 71 static inline void kmemleak_free(const void *ptr) 72 + { 73 + } 74 + static inline void kmemleak_free_part(const void *ptr, size_t size) 75 { 76 } 77 static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+2
include/linux/slub_def.h
··· 11 #include <linux/workqueue.h> 12 #include <linux/kobject.h> 13 #include <linux/kmemtrace.h> 14 15 enum stat_item { 16 ALLOC_FASTPATH, /* Allocation from cpu slab */ ··· 234 unsigned int order = get_order(size); 235 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); 236 237 trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); 238 239 return ret;
··· 11 #include <linux/workqueue.h> 12 #include <linux/kobject.h> 13 #include <linux/kmemtrace.h> 14 + #include <linux/kmemleak.h> 15 16 enum stat_item { 17 ALLOC_FASTPATH, /* Allocation from cpu slab */ ··· 233 unsigned int order = get_order(size); 234 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); 235 236 + kmemleak_alloc(ret, size, 1, flags); 237 trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); 238 239 return ret;
-7
kernel/pid.c
··· 36 #include <linux/pid_namespace.h> 37 #include <linux/init_task.h> 38 #include <linux/syscalls.h> 39 - #include <linux/kmemleak.h> 40 41 #define pid_hashfn(nr, ns) \ 42 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) ··· 512 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); 513 if (!pid_hash) 514 panic("Could not alloc pidhash!\n"); 515 - /* 516 - * pid_hash contains references to allocated struct pid objects and it 517 - * must be scanned by kmemleak to avoid false positives. 518 - */ 519 - kmemleak_alloc(pid_hash, pidhash_size * sizeof(*(pid_hash)), 0, 520 - GFP_KERNEL); 521 for (i = 0; i < pidhash_size; i++) 522 INIT_HLIST_HEAD(&pid_hash[i]); 523 }
··· 36 #include <linux/pid_namespace.h> 37 #include <linux/init_task.h> 38 #include <linux/syscalls.h> 39 40 #define pid_hashfn(nr, ns) \ 41 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) ··· 513 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); 514 if (!pid_hash) 515 panic("Could not alloc pidhash!\n"); 516 for (i = 0; i < pidhash_size; i++) 517 INIT_HLIST_HEAD(&pid_hash[i]); 518 }
+6
mm/bootmem.c
··· 12 #include <linux/pfn.h> 13 #include <linux/bootmem.h> 14 #include <linux/module.h> 15 16 #include <asm/bug.h> 17 #include <asm/io.h> ··· 336 { 337 unsigned long start, end; 338 339 start = PFN_UP(physaddr); 340 end = PFN_DOWN(physaddr + size); 341 ··· 356 void __init free_bootmem(unsigned long addr, unsigned long size) 357 { 358 unsigned long start, end; 359 360 start = PFN_UP(addr); 361 end = PFN_DOWN(addr + size); ··· 521 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + 522 start_off); 523 memset(region, 0, size); 524 return region; 525 } 526
··· 12 #include <linux/pfn.h> 13 #include <linux/bootmem.h> 14 #include <linux/module.h> 15 + #include <linux/kmemleak.h> 16 17 #include <asm/bug.h> 18 #include <asm/io.h> ··· 335 { 336 unsigned long start, end; 337 338 + kmemleak_free_part(__va(physaddr), size); 339 + 340 start = PFN_UP(physaddr); 341 end = PFN_DOWN(physaddr + size); 342 ··· 353 void __init free_bootmem(unsigned long addr, unsigned long size) 354 { 355 unsigned long start, end; 356 + 357 + kmemleak_free_part(__va(addr), size); 358 359 start = PFN_UP(addr); 360 end = PFN_DOWN(addr + size); ··· 516 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + 517 start_off); 518 memset(region, 0, size); 519 + kmemleak_alloc(region, size, 1, 0); 520 return region; 521 } 522
+164 -72
mm/kmemleak.c
··· 103 * Kmemleak configuration and common defines. 104 */ 105 #define MAX_TRACE 16 /* stack trace length */ 106 - #define REPORTS_NR 50 /* maximum number of reported leaks */ 107 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 108 #define SECS_FIRST_SCAN 60 /* delay before the first scan */ 109 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 110 111 #define BYTES_PER_POINTER sizeof(void *) 112 ··· 158 #define OBJECT_REPORTED (1 << 1) 159 /* flag set to not scan the object */ 160 #define OBJECT_NO_SCAN (1 << 2) 161 162 /* the list of all allocated objects */ 163 static LIST_HEAD(object_list); ··· 198 /* protects the memory scanning, parameters and debug/kmemleak file access */ 199 static DEFINE_MUTEX(scan_mutex); 200 201 - /* number of leaks reported (for limitation purposes) */ 202 - static int reported_leaks; 203 - 204 /* 205 * Early object allocation/freeing logging. Kmemleak is initialized after the 206 * kernel allocator. However, both the kernel allocator and kmemleak may ··· 210 enum { 211 KMEMLEAK_ALLOC, 212 KMEMLEAK_FREE, 213 KMEMLEAK_NOT_LEAK, 214 KMEMLEAK_IGNORE, 215 KMEMLEAK_SCAN_AREA, ··· 272 static int color_gray(const struct kmemleak_object *object) 273 { 274 return object->min_count != -1 && object->count >= object->min_count; 275 } 276 277 /* ··· 456 INIT_HLIST_HEAD(&object->area_list); 457 spin_lock_init(&object->lock); 458 atomic_set(&object->use_count, 1); 459 - object->flags = OBJECT_ALLOCATED; 460 object->pointer = ptr; 461 object->size = size; 462 object->min_count = min_count; ··· 524 * Remove the metadata (struct kmemleak_object) for a memory block from the 525 * object_list and object_tree_root and decrement its use_count. 526 */ 527 - static void delete_object(unsigned long ptr) 528 { 529 unsigned long flags; 530 - struct kmemleak_object *object; 531 532 write_lock_irqsave(&kmemleak_lock, flags); 533 - object = lookup_object(ptr, 0); 534 - if (!object) { 535 - #ifdef DEBUG 536 - kmemleak_warn("Freeing unknown object at 0x%08lx\n", 537 - ptr); 538 - #endif 539 - write_unlock_irqrestore(&kmemleak_lock, flags); 540 - return; 541 - } 542 prio_tree_remove(&object_tree_root, &object->tree_node); 543 list_del_rcu(&object->object_list); 544 write_unlock_irqrestore(&kmemleak_lock, flags); 545 546 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); 547 - WARN_ON(atomic_read(&object->use_count) < 1); 548 549 /* 550 * Locking here also ensures that the corresponding memory block ··· 546 put_object(object); 547 } 548 549 /* 550 * Make a object permanently as gray-colored so that it can no longer be 551 * reported as a leak. This is used in general to mark a false positive. ··· 768 pr_debug("%s(0x%p)\n", __func__, ptr); 769 770 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 771 - delete_object((unsigned long)ptr); 772 else if (atomic_read(&kmemleak_early_log)) 773 log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); 774 } 775 EXPORT_SYMBOL_GPL(kmemleak_free); 776 777 /* 778 * Mark an already allocated memory block as a false positive. This will cause ··· 875 * found to the gray list. 876 */ 877 static void scan_block(void *_start, void *_end, 878 - struct kmemleak_object *scanned) 879 { 880 unsigned long *ptr; 881 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); ··· 886 unsigned long pointer = *ptr; 887 struct kmemleak_object *object; 888 889 if (scan_should_stop()) 890 break; 891 ··· 951 goto out; 952 if (hlist_empty(&object->area_list)) 953 scan_block((void *)object->pointer, 954 - (void *)(object->pointer + object->size), object); 955 else 956 hlist_for_each_entry(area, elem, &object->area_list, node) 957 scan_block((void *)(object->pointer + area->offset), 958 (void *)(object->pointer + area->offset 959 - + area->length), object); 960 out: 961 spin_unlock_irqrestore(&object->lock, flags); 962 } ··· 973 struct task_struct *task; 974 int i; 975 int new_leaks = 0; 976 977 jiffies_last_scan = jiffies; 978 ··· 994 #endif 995 /* reset the reference count (whiten the object) */ 996 object->count = 0; 997 if (color_gray(object) && get_object(object)) 998 list_add_tail(&object->gray_list, &gray_list); 999 ··· 1003 rcu_read_unlock(); 1004 1005 /* data/bss scanning */ 1006 - scan_block(_sdata, _edata, NULL); 1007 - scan_block(__bss_start, __bss_stop, NULL); 1008 1009 #ifdef CONFIG_SMP 1010 /* per-cpu sections scanning */ 1011 for_each_possible_cpu(i) 1012 scan_block(__per_cpu_start + per_cpu_offset(i), 1013 - __per_cpu_end + per_cpu_offset(i), NULL); 1014 #endif 1015 1016 /* ··· 1032 /* only scan if page is in use */ 1033 if (page_count(page) == 0) 1034 continue; 1035 - scan_block(page, page + 1, NULL); 1036 } 1037 } 1038 ··· 1044 read_lock(&tasklist_lock); 1045 for_each_process(task) 1046 scan_block(task_stack_page(task), 1047 - task_stack_page(task) + THREAD_SIZE, NULL); 1048 read_unlock(&tasklist_lock); 1049 } 1050 ··· 1057 * kmemleak objects cannot be freed from outside the loop because their 1058 * use_count was increased. 1059 */ 1060 object = list_entry(gray_list.next, typeof(*object), gray_list); 1061 while (&object->gray_list != &gray_list) { 1062 cond_resched(); ··· 1075 1076 object = tmp; 1077 } 1078 WARN_ON(!list_empty(&gray_list)); 1079 1080 /* 1081 - * If scanning was stopped do not report any new unreferenced objects. 1082 */ 1083 - if (scan_should_stop()) 1084 return; 1085 1086 /* ··· 1139 static int first_run = 1; 1140 1141 pr_info("Automatic memory scanning thread started\n"); 1142 1143 /* 1144 * Wait before the first scan to allow the system to fully initialize. ··· 1202 { 1203 struct kmemleak_object *object; 1204 loff_t n = *pos; 1205 1206 - if (!n) 1207 - reported_leaks = 0; 1208 - if (reported_leaks >= REPORTS_NR) 1209 - return NULL; 1210 1211 rcu_read_lock(); 1212 list_for_each_entry_rcu(object, &object_list, object_list) { ··· 1232 struct list_head *n = &prev_obj->object_list; 1233 1234 ++(*pos); 1235 - if (reported_leaks >= REPORTS_NR) 1236 - goto out; 1237 1238 rcu_read_lock(); 1239 list_for_each_continue_rcu(n, &object_list) { ··· 1240 break; 1241 } 1242 rcu_read_unlock(); 1243 - out: 1244 put_object(prev_obj); 1245 return next_obj; 1246 } ··· 1250 */ 1251 static void kmemleak_seq_stop(struct seq_file *seq, void *v) 1252 { 1253 - if (v) 1254 - put_object(v); 1255 } 1256 1257 /* ··· 1270 unsigned long flags; 1271 1272 spin_lock_irqsave(&object->lock, flags); 1273 - if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) { 1274 print_unreferenced(seq, object); 1275 - reported_leaks++; 1276 - } 1277 spin_unlock_irqrestore(&object->lock, flags); 1278 return 0; 1279 } ··· 1285 1286 static int kmemleak_open(struct inode *inode, struct file *file) 1287 { 1288 - int ret = 0; 1289 - 1290 if (!atomic_read(&kmemleak_enabled)) 1291 return -EBUSY; 1292 1293 - ret = mutex_lock_interruptible(&scan_mutex); 1294 - if (ret < 0) 1295 - goto out; 1296 - if (file->f_mode & FMODE_READ) { 1297 - ret = seq_open(file, &kmemleak_seq_ops); 1298 - if (ret < 0) 1299 - goto scan_unlock; 1300 - } 1301 - return ret; 1302 - 1303 - scan_unlock: 1304 - mutex_unlock(&scan_mutex); 1305 - out: 1306 - return ret; 1307 } 1308 1309 static int kmemleak_release(struct inode *inode, struct file *file) 1310 { 1311 - int ret = 0; 1312 - 1313 - if (file->f_mode & FMODE_READ) 1314 - seq_release(inode, file); 1315 - mutex_unlock(&scan_mutex); 1316 - 1317 - return ret; 1318 } 1319 1320 /* ··· 1313 { 1314 char buf[64]; 1315 int buf_size; 1316 - 1317 - if (!atomic_read(&kmemleak_enabled)) 1318 - return -EBUSY; 1319 1320 buf_size = min(size, (sizeof(buf) - 1)); 1321 if (strncpy_from_user(buf, user_buf, buf_size) < 0) 1322 return -EFAULT; 1323 buf[buf_size] = 0; 1324 1325 if (strncmp(buf, "off", 3) == 0) 1326 kmemleak_disable(); ··· 1336 stop_scan_thread(); 1337 else if (strncmp(buf, "scan=", 5) == 0) { 1338 unsigned long secs; 1339 - int err; 1340 1341 - err = strict_strtoul(buf + 5, 0, &secs); 1342 - if (err < 0) 1343 - return err; 1344 stop_scan_thread(); 1345 if (secs) { 1346 jiffies_scan_wait = msecs_to_jiffies(secs * 1000); ··· 1348 } else if (strncmp(buf, "scan", 4) == 0) 1349 kmemleak_scan(); 1350 else 1351 - return -EINVAL; 1352 1353 /* ignore the rest of the buffer, only one command at a time */ 1354 *ppos += size; ··· 1382 1383 rcu_read_lock(); 1384 list_for_each_entry_rcu(object, &object_list, object_list) 1385 - delete_object(object->pointer); 1386 rcu_read_unlock(); 1387 mutex_unlock(&scan_mutex); 1388 ··· 1476 break; 1477 case KMEMLEAK_FREE: 1478 kmemleak_free(log->ptr); 1479 break; 1480 case KMEMLEAK_NOT_LEAK: 1481 kmemleak_not_leak(log->ptr);
··· 103 * Kmemleak configuration and common defines. 104 */ 105 #define MAX_TRACE 16 /* stack trace length */ 106 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 107 #define SECS_FIRST_SCAN 60 /* delay before the first scan */ 108 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 109 + #define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */ 110 111 #define BYTES_PER_POINTER sizeof(void *) 112 ··· 158 #define OBJECT_REPORTED (1 << 1) 159 /* flag set to not scan the object */ 160 #define OBJECT_NO_SCAN (1 << 2) 161 + /* flag set on newly allocated objects */ 162 + #define OBJECT_NEW (1 << 3) 163 164 /* the list of all allocated objects */ 165 static LIST_HEAD(object_list); ··· 196 /* protects the memory scanning, parameters and debug/kmemleak file access */ 197 static DEFINE_MUTEX(scan_mutex); 198 199 /* 200 * Early object allocation/freeing logging. Kmemleak is initialized after the 201 * kernel allocator. However, both the kernel allocator and kmemleak may ··· 211 enum { 212 KMEMLEAK_ALLOC, 213 KMEMLEAK_FREE, 214 + KMEMLEAK_FREE_PART, 215 KMEMLEAK_NOT_LEAK, 216 KMEMLEAK_IGNORE, 217 KMEMLEAK_SCAN_AREA, ··· 272 static int color_gray(const struct kmemleak_object *object) 273 { 274 return object->min_count != -1 && object->count >= object->min_count; 275 + } 276 + 277 + static int color_black(const struct kmemleak_object *object) 278 + { 279 + return object->min_count == -1; 280 } 281 282 /* ··· 451 INIT_HLIST_HEAD(&object->area_list); 452 spin_lock_init(&object->lock); 453 atomic_set(&object->use_count, 1); 454 + object->flags = OBJECT_ALLOCATED | OBJECT_NEW; 455 object->pointer = ptr; 456 object->size = size; 457 object->min_count = min_count; ··· 519 * Remove the metadata (struct kmemleak_object) for a memory block from the 520 * object_list and object_tree_root and decrement its use_count. 521 */ 522 + static void __delete_object(struct kmemleak_object *object) 523 { 524 unsigned long flags; 525 526 write_lock_irqsave(&kmemleak_lock, flags); 527 prio_tree_remove(&object_tree_root, &object->tree_node); 528 list_del_rcu(&object->object_list); 529 write_unlock_irqrestore(&kmemleak_lock, flags); 530 531 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); 532 + WARN_ON(atomic_read(&object->use_count) < 2); 533 534 /* 535 * Locking here also ensures that the corresponding memory block ··· 551 put_object(object); 552 } 553 554 + /* 555 + * Look up the metadata (struct kmemleak_object) corresponding to ptr and 556 + * delete it. 557 + */ 558 + static void delete_object_full(unsigned long ptr) 559 + { 560 + struct kmemleak_object *object; 561 + 562 + object = find_and_get_object(ptr, 0); 563 + if (!object) { 564 + #ifdef DEBUG 565 + kmemleak_warn("Freeing unknown object at 0x%08lx\n", 566 + ptr); 567 + #endif 568 + return; 569 + } 570 + __delete_object(object); 571 + put_object(object); 572 + } 573 + 574 + /* 575 + * Look up the metadata (struct kmemleak_object) corresponding to ptr and 576 + * delete it. If the memory block is partially freed, the function may create 577 + * additional metadata for the remaining parts of the block. 578 + */ 579 + static void delete_object_part(unsigned long ptr, size_t size) 580 + { 581 + struct kmemleak_object *object; 582 + unsigned long start, end; 583 + 584 + object = find_and_get_object(ptr, 1); 585 + if (!object) { 586 + #ifdef DEBUG 587 + kmemleak_warn("Partially freeing unknown object at 0x%08lx " 588 + "(size %zu)\n", ptr, size); 589 + #endif 590 + return; 591 + } 592 + __delete_object(object); 593 + 594 + /* 595 + * Create one or two objects that may result from the memory block 596 + * split. Note that partial freeing is only done by free_bootmem() and 597 + * this happens before kmemleak_init() is called. The path below is 598 + * only executed during early log recording in kmemleak_init(), so 599 + * GFP_KERNEL is enough. 600 + */ 601 + start = object->pointer; 602 + end = object->pointer + object->size; 603 + if (ptr > start) 604 + create_object(start, ptr - start, object->min_count, 605 + GFP_KERNEL); 606 + if (ptr + size < end) 607 + create_object(ptr + size, end - ptr - size, object->min_count, 608 + GFP_KERNEL); 609 + 610 + put_object(object); 611 + } 612 /* 613 * Make a object permanently as gray-colored so that it can no longer be 614 * reported as a leak. This is used in general to mark a false positive. ··· 715 pr_debug("%s(0x%p)\n", __func__, ptr); 716 717 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 718 + delete_object_full((unsigned long)ptr); 719 else if (atomic_read(&kmemleak_early_log)) 720 log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); 721 } 722 EXPORT_SYMBOL_GPL(kmemleak_free); 723 + 724 + /* 725 + * Partial memory freeing function callback. This function is usually called 726 + * from bootmem allocator when (part of) a memory block is freed. 727 + */ 728 + void kmemleak_free_part(const void *ptr, size_t size) 729 + { 730 + pr_debug("%s(0x%p)\n", __func__, ptr); 731 + 732 + if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 733 + delete_object_part((unsigned long)ptr, size); 734 + else if (atomic_read(&kmemleak_early_log)) 735 + log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); 736 + } 737 + EXPORT_SYMBOL_GPL(kmemleak_free_part); 738 739 /* 740 * Mark an already allocated memory block as a false positive. This will cause ··· 807 * found to the gray list. 808 */ 809 static void scan_block(void *_start, void *_end, 810 + struct kmemleak_object *scanned, int allow_resched) 811 { 812 unsigned long *ptr; 813 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); ··· 818 unsigned long pointer = *ptr; 819 struct kmemleak_object *object; 820 821 + if (allow_resched) 822 + cond_resched(); 823 if (scan_should_stop()) 824 break; 825 ··· 881 goto out; 882 if (hlist_empty(&object->area_list)) 883 scan_block((void *)object->pointer, 884 + (void *)(object->pointer + object->size), object, 0); 885 else 886 hlist_for_each_entry(area, elem, &object->area_list, node) 887 scan_block((void *)(object->pointer + area->offset), 888 (void *)(object->pointer + area->offset 889 + + area->length), object, 0); 890 out: 891 spin_unlock_irqrestore(&object->lock, flags); 892 } ··· 903 struct task_struct *task; 904 int i; 905 int new_leaks = 0; 906 + int gray_list_pass = 0; 907 908 jiffies_last_scan = jiffies; 909 ··· 923 #endif 924 /* reset the reference count (whiten the object) */ 925 object->count = 0; 926 + object->flags &= ~OBJECT_NEW; 927 if (color_gray(object) && get_object(object)) 928 list_add_tail(&object->gray_list, &gray_list); 929 ··· 931 rcu_read_unlock(); 932 933 /* data/bss scanning */ 934 + scan_block(_sdata, _edata, NULL, 1); 935 + scan_block(__bss_start, __bss_stop, NULL, 1); 936 937 #ifdef CONFIG_SMP 938 /* per-cpu sections scanning */ 939 for_each_possible_cpu(i) 940 scan_block(__per_cpu_start + per_cpu_offset(i), 941 + __per_cpu_end + per_cpu_offset(i), NULL, 1); 942 #endif 943 944 /* ··· 960 /* only scan if page is in use */ 961 if (page_count(page) == 0) 962 continue; 963 + scan_block(page, page + 1, NULL, 1); 964 } 965 } 966 ··· 972 read_lock(&tasklist_lock); 973 for_each_process(task) 974 scan_block(task_stack_page(task), 975 + task_stack_page(task) + THREAD_SIZE, 976 + NULL, 0); 977 read_unlock(&tasklist_lock); 978 } 979 ··· 984 * kmemleak objects cannot be freed from outside the loop because their 985 * use_count was increased. 986 */ 987 + repeat: 988 object = list_entry(gray_list.next, typeof(*object), gray_list); 989 while (&object->gray_list != &gray_list) { 990 cond_resched(); ··· 1001 1002 object = tmp; 1003 } 1004 + 1005 + if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES) 1006 + goto scan_end; 1007 + 1008 + /* 1009 + * Check for new objects allocated during this scanning and add them 1010 + * to the gray list. 1011 + */ 1012 + rcu_read_lock(); 1013 + list_for_each_entry_rcu(object, &object_list, object_list) { 1014 + spin_lock_irqsave(&object->lock, flags); 1015 + if ((object->flags & OBJECT_NEW) && !color_black(object) && 1016 + get_object(object)) { 1017 + object->flags &= ~OBJECT_NEW; 1018 + list_add_tail(&object->gray_list, &gray_list); 1019 + } 1020 + spin_unlock_irqrestore(&object->lock, flags); 1021 + } 1022 + rcu_read_unlock(); 1023 + 1024 + if (!list_empty(&gray_list)) 1025 + goto repeat; 1026 + 1027 + scan_end: 1028 WARN_ON(!list_empty(&gray_list)); 1029 1030 /* 1031 + * If scanning was stopped or new objects were being allocated at a 1032 + * higher rate than gray list scanning, do not report any new 1033 + * unreferenced objects. 1034 */ 1035 + if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES) 1036 return; 1037 1038 /* ··· 1039 static int first_run = 1; 1040 1041 pr_info("Automatic memory scanning thread started\n"); 1042 + set_user_nice(current, 10); 1043 1044 /* 1045 * Wait before the first scan to allow the system to fully initialize. ··· 1101 { 1102 struct kmemleak_object *object; 1103 loff_t n = *pos; 1104 + int err; 1105 1106 + err = mutex_lock_interruptible(&scan_mutex); 1107 + if (err < 0) 1108 + return ERR_PTR(err); 1109 1110 rcu_read_lock(); 1111 list_for_each_entry_rcu(object, &object_list, object_list) { ··· 1131 struct list_head *n = &prev_obj->object_list; 1132 1133 ++(*pos); 1134 1135 rcu_read_lock(); 1136 list_for_each_continue_rcu(n, &object_list) { ··· 1141 break; 1142 } 1143 rcu_read_unlock(); 1144 + 1145 put_object(prev_obj); 1146 return next_obj; 1147 } ··· 1151 */ 1152 static void kmemleak_seq_stop(struct seq_file *seq, void *v) 1153 { 1154 + if (!IS_ERR(v)) { 1155 + /* 1156 + * kmemleak_seq_start may return ERR_PTR if the scan_mutex 1157 + * waiting was interrupted, so only release it if !IS_ERR. 1158 + */ 1159 + mutex_unlock(&scan_mutex); 1160 + if (v) 1161 + put_object(v); 1162 + } 1163 } 1164 1165 /* ··· 1164 unsigned long flags; 1165 1166 spin_lock_irqsave(&object->lock, flags); 1167 + if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) 1168 print_unreferenced(seq, object); 1169 spin_unlock_irqrestore(&object->lock, flags); 1170 return 0; 1171 } ··· 1181 1182 static int kmemleak_open(struct inode *inode, struct file *file) 1183 { 1184 if (!atomic_read(&kmemleak_enabled)) 1185 return -EBUSY; 1186 1187 + return seq_open(file, &kmemleak_seq_ops); 1188 } 1189 1190 static int kmemleak_release(struct inode *inode, struct file *file) 1191 { 1192 + return seq_release(inode, file); 1193 } 1194 1195 /* ··· 1230 { 1231 char buf[64]; 1232 int buf_size; 1233 + int ret; 1234 1235 buf_size = min(size, (sizeof(buf) - 1)); 1236 if (strncpy_from_user(buf, user_buf, buf_size) < 0) 1237 return -EFAULT; 1238 buf[buf_size] = 0; 1239 + 1240 + ret = mutex_lock_interruptible(&scan_mutex); 1241 + if (ret < 0) 1242 + return ret; 1243 1244 if (strncmp(buf, "off", 3) == 0) 1245 kmemleak_disable(); ··· 1251 stop_scan_thread(); 1252 else if (strncmp(buf, "scan=", 5) == 0) { 1253 unsigned long secs; 1254 1255 + ret = strict_strtoul(buf + 5, 0, &secs); 1256 + if (ret < 0) 1257 + goto out; 1258 stop_scan_thread(); 1259 if (secs) { 1260 jiffies_scan_wait = msecs_to_jiffies(secs * 1000); ··· 1264 } else if (strncmp(buf, "scan", 4) == 0) 1265 kmemleak_scan(); 1266 else 1267 + ret = -EINVAL; 1268 + 1269 + out: 1270 + mutex_unlock(&scan_mutex); 1271 + if (ret < 0) 1272 + return ret; 1273 1274 /* ignore the rest of the buffer, only one command at a time */ 1275 *ppos += size; ··· 1293 1294 rcu_read_lock(); 1295 list_for_each_entry_rcu(object, &object_list, object_list) 1296 + delete_object_full(object->pointer); 1297 rcu_read_unlock(); 1298 mutex_unlock(&scan_mutex); 1299 ··· 1387 break; 1388 case KMEMLEAK_FREE: 1389 kmemleak_free(log->ptr); 1390 + break; 1391 + case KMEMLEAK_FREE_PART: 1392 + kmemleak_free_part(log->ptr, log->size); 1393 break; 1394 case KMEMLEAK_NOT_LEAK: 1395 kmemleak_not_leak(log->ptr);
+3 -11
mm/page_alloc.c
··· 4745 * some pages at the end of hash table which 4746 * alloc_pages_exact() automatically does 4747 */ 4748 - if (get_order(size) < MAX_ORDER) 4749 table = alloc_pages_exact(size, GFP_ATOMIC); 4750 } 4751 } while (!table && size > PAGE_SIZE && --log2qty); 4752 ··· 4765 *_hash_shift = log2qty; 4766 if (_hash_mask) 4767 *_hash_mask = (1 << log2qty) - 1; 4768 - 4769 - /* 4770 - * If hashdist is set, the table allocation is done with __vmalloc() 4771 - * which invokes the kmemleak_alloc() callback. This function may also 4772 - * be called before the slab and kmemleak are initialised when 4773 - * kmemleak simply buffers the request to be executed later 4774 - * (GFP_ATOMIC flag ignored in this case). 4775 - */ 4776 - if (!hashdist) 4777 - kmemleak_alloc(table, size, 1, GFP_ATOMIC); 4778 4779 return table; 4780 }
··· 4745 * some pages at the end of hash table which 4746 * alloc_pages_exact() automatically does 4747 */ 4748 + if (get_order(size) < MAX_ORDER) { 4749 table = alloc_pages_exact(size, GFP_ATOMIC); 4750 + kmemleak_alloc(table, size, 1, GFP_ATOMIC); 4751 + } 4752 } 4753 } while (!table && size > PAGE_SIZE && --log2qty); 4754 ··· 4763 *_hash_shift = log2qty; 4764 if (_hash_mask) 4765 *_hash_mask = (1 << log2qty) - 1; 4766 4767 return table; 4768 }
+6 -4
mm/slub.c
··· 21 #include <linux/kmemcheck.h> 22 #include <linux/cpu.h> 23 #include <linux/cpuset.h> 24 - #include <linux/kmemleak.h> 25 #include <linux/mempolicy.h> 26 #include <linux/ctype.h> 27 #include <linux/debugobjects.h> ··· 2834 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2835 { 2836 struct page *page; 2837 2838 flags |= __GFP_COMP | __GFP_NOTRACK; 2839 page = alloc_pages_node(node, flags, get_order(size)); 2840 if (page) 2841 - return page_address(page); 2842 - else 2843 - return NULL; 2844 } 2845 2846 #ifdef CONFIG_NUMA ··· 2927 page = virt_to_head_page(x); 2928 if (unlikely(!PageSlab(page))) { 2929 BUG_ON(!PageCompound(page)); 2930 put_page(page); 2931 return; 2932 }
··· 21 #include <linux/kmemcheck.h> 22 #include <linux/cpu.h> 23 #include <linux/cpuset.h> 24 #include <linux/mempolicy.h> 25 #include <linux/ctype.h> 26 #include <linux/debugobjects.h> ··· 2835 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2836 { 2837 struct page *page; 2838 + void *ptr = NULL; 2839 2840 flags |= __GFP_COMP | __GFP_NOTRACK; 2841 page = alloc_pages_node(node, flags, get_order(size)); 2842 if (page) 2843 + ptr = page_address(page); 2844 + 2845 + kmemleak_alloc(ptr, size, 1, flags); 2846 + return ptr; 2847 } 2848 2849 #ifdef CONFIG_NUMA ··· 2926 page = virt_to_head_page(x); 2927 if (unlikely(!PageSlab(page))) { 2928 BUG_ON(!PageCompound(page)); 2929 + kmemleak_free(x); 2930 put_page(page); 2931 return; 2932 }