Merge branch 'kmemleak' of git://linux-arm.org/linux-2.6

* 'kmemleak' of git://linux-arm.org/linux-2.6:
kmemleak: Remove alloc_bootmem annotations introduced in the past
kmemleak: Add callbacks to the bootmem allocator
kmemleak: Allow partial freeing of memory blocks
kmemleak: Trace the kmalloc_large* functions in slub
kmemleak: Scan objects allocated during a scanning episode
kmemleak: Do not acquire scan_mutex in kmemleak_open()
kmemleak: Remove the reported leaks number limitation
kmemleak: Add more cond_resched() calls in the scanning thread
kmemleak: Renice the scanning thread to +10

+185 -94
+4
include/linux/kmemleak.h
··· 27 27 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, 28 28 gfp_t gfp); 29 29 extern void kmemleak_free(const void *ptr); 30 + extern void kmemleak_free_part(const void *ptr, size_t size); 30 31 extern void kmemleak_padding(const void *ptr, unsigned long offset, 31 32 size_t size); 32 33 extern void kmemleak_not_leak(const void *ptr); ··· 70 69 { 71 70 } 72 71 static inline void kmemleak_free(const void *ptr) 72 + { 73 + } 74 + static inline void kmemleak_free_part(const void *ptr, size_t size) 73 75 { 74 76 } 75 77 static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+2
include/linux/slub_def.h
··· 11 11 #include <linux/workqueue.h> 12 12 #include <linux/kobject.h> 13 13 #include <linux/kmemtrace.h> 14 + #include <linux/kmemleak.h> 14 15 15 16 enum stat_item { 16 17 ALLOC_FASTPATH, /* Allocation from cpu slab */ ··· 234 233 unsigned int order = get_order(size); 235 234 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); 236 235 236 + kmemleak_alloc(ret, size, 1, flags); 237 237 trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); 238 238 239 239 return ret;
-7
kernel/pid.c
··· 36 36 #include <linux/pid_namespace.h> 37 37 #include <linux/init_task.h> 38 38 #include <linux/syscalls.h> 39 - #include <linux/kmemleak.h> 40 39 41 40 #define pid_hashfn(nr, ns) \ 42 41 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) ··· 512 513 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); 513 514 if (!pid_hash) 514 515 panic("Could not alloc pidhash!\n"); 515 - /* 516 - * pid_hash contains references to allocated struct pid objects and it 517 - * must be scanned by kmemleak to avoid false positives. 518 - */ 519 - kmemleak_alloc(pid_hash, pidhash_size * sizeof(*(pid_hash)), 0, 520 - GFP_KERNEL); 521 516 for (i = 0; i < pidhash_size; i++) 522 517 INIT_HLIST_HEAD(&pid_hash[i]); 523 518 }
+6
mm/bootmem.c
··· 12 12 #include <linux/pfn.h> 13 13 #include <linux/bootmem.h> 14 14 #include <linux/module.h> 15 + #include <linux/kmemleak.h> 15 16 16 17 #include <asm/bug.h> 17 18 #include <asm/io.h> ··· 336 335 { 337 336 unsigned long start, end; 338 337 338 + kmemleak_free_part(__va(physaddr), size); 339 + 339 340 start = PFN_UP(physaddr); 340 341 end = PFN_DOWN(physaddr + size); 341 342 ··· 356 353 void __init free_bootmem(unsigned long addr, unsigned long size) 357 354 { 358 355 unsigned long start, end; 356 + 357 + kmemleak_free_part(__va(addr), size); 359 358 360 359 start = PFN_UP(addr); 361 360 end = PFN_DOWN(addr + size); ··· 521 516 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + 522 517 start_off); 523 518 memset(region, 0, size); 519 + kmemleak_alloc(region, size, 1, 0); 524 520 return region; 525 521 } 526 522
+164 -72
mm/kmemleak.c
··· 103 103 * Kmemleak configuration and common defines. 104 104 */ 105 105 #define MAX_TRACE 16 /* stack trace length */ 106 - #define REPORTS_NR 50 /* maximum number of reported leaks */ 107 106 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 108 107 #define SECS_FIRST_SCAN 60 /* delay before the first scan */ 109 108 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 109 + #define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */ 110 110 111 111 #define BYTES_PER_POINTER sizeof(void *) 112 112 ··· 158 158 #define OBJECT_REPORTED (1 << 1) 159 159 /* flag set to not scan the object */ 160 160 #define OBJECT_NO_SCAN (1 << 2) 161 + /* flag set on newly allocated objects */ 162 + #define OBJECT_NEW (1 << 3) 161 163 162 164 /* the list of all allocated objects */ 163 165 static LIST_HEAD(object_list); ··· 198 196 /* protects the memory scanning, parameters and debug/kmemleak file access */ 199 197 static DEFINE_MUTEX(scan_mutex); 200 198 201 - /* number of leaks reported (for limitation purposes) */ 202 - static int reported_leaks; 203 - 204 199 /* 205 200 * Early object allocation/freeing logging. Kmemleak is initialized after the 206 201 * kernel allocator. However, both the kernel allocator and kmemleak may ··· 210 211 enum { 211 212 KMEMLEAK_ALLOC, 212 213 KMEMLEAK_FREE, 214 + KMEMLEAK_FREE_PART, 213 215 KMEMLEAK_NOT_LEAK, 214 216 KMEMLEAK_IGNORE, 215 217 KMEMLEAK_SCAN_AREA, ··· 272 272 static int color_gray(const struct kmemleak_object *object) 273 273 { 274 274 return object->min_count != -1 && object->count >= object->min_count; 275 + } 276 + 277 + static int color_black(const struct kmemleak_object *object) 278 + { 279 + return object->min_count == -1; 275 280 } 276 281 277 282 /* ··· 456 451 INIT_HLIST_HEAD(&object->area_list); 457 452 spin_lock_init(&object->lock); 458 453 atomic_set(&object->use_count, 1); 459 - object->flags = OBJECT_ALLOCATED; 454 + object->flags = OBJECT_ALLOCATED | OBJECT_NEW; 460 455 object->pointer = ptr; 461 456 object->size = size; 462 457 object->min_count = min_count; ··· 524 519 * Remove the metadata (struct kmemleak_object) for a memory block from the 525 520 * object_list and object_tree_root and decrement its use_count. 526 521 */ 527 - static void delete_object(unsigned long ptr) 522 + static void __delete_object(struct kmemleak_object *object) 528 523 { 529 524 unsigned long flags; 530 - struct kmemleak_object *object; 531 525 532 526 write_lock_irqsave(&kmemleak_lock, flags); 533 - object = lookup_object(ptr, 0); 534 - if (!object) { 535 - #ifdef DEBUG 536 - kmemleak_warn("Freeing unknown object at 0x%08lx\n", 537 - ptr); 538 - #endif 539 - write_unlock_irqrestore(&kmemleak_lock, flags); 540 - return; 541 - } 542 527 prio_tree_remove(&object_tree_root, &object->tree_node); 543 528 list_del_rcu(&object->object_list); 544 529 write_unlock_irqrestore(&kmemleak_lock, flags); 545 530 546 531 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); 547 - WARN_ON(atomic_read(&object->use_count) < 1); 532 + WARN_ON(atomic_read(&object->use_count) < 2); 548 533 549 534 /* 550 535 * Locking here also ensures that the corresponding memory block ··· 546 551 put_object(object); 547 552 } 548 553 554 + /* 555 + * Look up the metadata (struct kmemleak_object) corresponding to ptr and 556 + * delete it. 557 + */ 558 + static void delete_object_full(unsigned long ptr) 559 + { 560 + struct kmemleak_object *object; 561 + 562 + object = find_and_get_object(ptr, 0); 563 + if (!object) { 564 + #ifdef DEBUG 565 + kmemleak_warn("Freeing unknown object at 0x%08lx\n", 566 + ptr); 567 + #endif 568 + return; 569 + } 570 + __delete_object(object); 571 + put_object(object); 572 + } 573 + 574 + /* 575 + * Look up the metadata (struct kmemleak_object) corresponding to ptr and 576 + * delete it. If the memory block is partially freed, the function may create 577 + * additional metadata for the remaining parts of the block. 578 + */ 579 + static void delete_object_part(unsigned long ptr, size_t size) 580 + { 581 + struct kmemleak_object *object; 582 + unsigned long start, end; 583 + 584 + object = find_and_get_object(ptr, 1); 585 + if (!object) { 586 + #ifdef DEBUG 587 + kmemleak_warn("Partially freeing unknown object at 0x%08lx " 588 + "(size %zu)\n", ptr, size); 589 + #endif 590 + return; 591 + } 592 + __delete_object(object); 593 + 594 + /* 595 + * Create one or two objects that may result from the memory block 596 + * split. Note that partial freeing is only done by free_bootmem() and 597 + * this happens before kmemleak_init() is called. The path below is 598 + * only executed during early log recording in kmemleak_init(), so 599 + * GFP_KERNEL is enough. 600 + */ 601 + start = object->pointer; 602 + end = object->pointer + object->size; 603 + if (ptr > start) 604 + create_object(start, ptr - start, object->min_count, 605 + GFP_KERNEL); 606 + if (ptr + size < end) 607 + create_object(ptr + size, end - ptr - size, object->min_count, 608 + GFP_KERNEL); 609 + 610 + put_object(object); 611 + } 549 612 /* 550 613 * Make a object permanently as gray-colored so that it can no longer be 551 614 * reported as a leak. This is used in general to mark a false positive. ··· 768 715 pr_debug("%s(0x%p)\n", __func__, ptr); 769 716 770 717 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 771 - delete_object((unsigned long)ptr); 718 + delete_object_full((unsigned long)ptr); 772 719 else if (atomic_read(&kmemleak_early_log)) 773 720 log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); 774 721 } 775 722 EXPORT_SYMBOL_GPL(kmemleak_free); 723 + 724 + /* 725 + * Partial memory freeing function callback. This function is usually called 726 + * from bootmem allocator when (part of) a memory block is freed. 727 + */ 728 + void kmemleak_free_part(const void *ptr, size_t size) 729 + { 730 + pr_debug("%s(0x%p)\n", __func__, ptr); 731 + 732 + if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 733 + delete_object_part((unsigned long)ptr, size); 734 + else if (atomic_read(&kmemleak_early_log)) 735 + log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); 736 + } 737 + EXPORT_SYMBOL_GPL(kmemleak_free_part); 776 738 777 739 /* 778 740 * Mark an already allocated memory block as a false positive. This will cause ··· 875 807 * found to the gray list. 876 808 */ 877 809 static void scan_block(void *_start, void *_end, 878 - struct kmemleak_object *scanned) 810 + struct kmemleak_object *scanned, int allow_resched) 879 811 { 880 812 unsigned long *ptr; 881 813 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); ··· 886 818 unsigned long pointer = *ptr; 887 819 struct kmemleak_object *object; 888 820 821 + if (allow_resched) 822 + cond_resched(); 889 823 if (scan_should_stop()) 890 824 break; 891 825 ··· 951 881 goto out; 952 882 if (hlist_empty(&object->area_list)) 953 883 scan_block((void *)object->pointer, 954 - (void *)(object->pointer + object->size), object); 884 + (void *)(object->pointer + object->size), object, 0); 955 885 else 956 886 hlist_for_each_entry(area, elem, &object->area_list, node) 957 887 scan_block((void *)(object->pointer + area->offset), 958 888 (void *)(object->pointer + area->offset 959 - + area->length), object); 889 + + area->length), object, 0); 960 890 out: 961 891 spin_unlock_irqrestore(&object->lock, flags); 962 892 } ··· 973 903 struct task_struct *task; 974 904 int i; 975 905 int new_leaks = 0; 906 + int gray_list_pass = 0; 976 907 977 908 jiffies_last_scan = jiffies; 978 909 ··· 994 923 #endif 995 924 /* reset the reference count (whiten the object) */ 996 925 object->count = 0; 926 + object->flags &= ~OBJECT_NEW; 997 927 if (color_gray(object) && get_object(object)) 998 928 list_add_tail(&object->gray_list, &gray_list); 999 929 ··· 1003 931 rcu_read_unlock(); 1004 932 1005 933 /* data/bss scanning */ 1006 - scan_block(_sdata, _edata, NULL); 1007 - scan_block(__bss_start, __bss_stop, NULL); 934 + scan_block(_sdata, _edata, NULL, 1); 935 + scan_block(__bss_start, __bss_stop, NULL, 1); 1008 936 1009 937 #ifdef CONFIG_SMP 1010 938 /* per-cpu sections scanning */ 1011 939 for_each_possible_cpu(i) 1012 940 scan_block(__per_cpu_start + per_cpu_offset(i), 1013 - __per_cpu_end + per_cpu_offset(i), NULL); 941 + __per_cpu_end + per_cpu_offset(i), NULL, 1); 1014 942 #endif 1015 943 1016 944 /* ··· 1032 960 /* only scan if page is in use */ 1033 961 if (page_count(page) == 0) 1034 962 continue; 1035 - scan_block(page, page + 1, NULL); 963 + scan_block(page, page + 1, NULL, 1); 1036 964 } 1037 965 } 1038 966 ··· 1044 972 read_lock(&tasklist_lock); 1045 973 for_each_process(task) 1046 974 scan_block(task_stack_page(task), 1047 - task_stack_page(task) + THREAD_SIZE, NULL); 975 + task_stack_page(task) + THREAD_SIZE, 976 + NULL, 0); 1048 977 read_unlock(&tasklist_lock); 1049 978 } 1050 979 ··· 1057 984 * kmemleak objects cannot be freed from outside the loop because their 1058 985 * use_count was increased. 1059 986 */ 987 + repeat: 1060 988 object = list_entry(gray_list.next, typeof(*object), gray_list); 1061 989 while (&object->gray_list != &gray_list) { 1062 990 cond_resched(); ··· 1075 1001 1076 1002 object = tmp; 1077 1003 } 1004 + 1005 + if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES) 1006 + goto scan_end; 1007 + 1008 + /* 1009 + * Check for new objects allocated during this scanning and add them 1010 + * to the gray list. 1011 + */ 1012 + rcu_read_lock(); 1013 + list_for_each_entry_rcu(object, &object_list, object_list) { 1014 + spin_lock_irqsave(&object->lock, flags); 1015 + if ((object->flags & OBJECT_NEW) && !color_black(object) && 1016 + get_object(object)) { 1017 + object->flags &= ~OBJECT_NEW; 1018 + list_add_tail(&object->gray_list, &gray_list); 1019 + } 1020 + spin_unlock_irqrestore(&object->lock, flags); 1021 + } 1022 + rcu_read_unlock(); 1023 + 1024 + if (!list_empty(&gray_list)) 1025 + goto repeat; 1026 + 1027 + scan_end: 1078 1028 WARN_ON(!list_empty(&gray_list)); 1079 1029 1080 1030 /* 1081 - * If scanning was stopped do not report any new unreferenced objects. 1031 + * If scanning was stopped or new objects were being allocated at a 1032 + * higher rate than gray list scanning, do not report any new 1033 + * unreferenced objects. 1082 1034 */ 1083 - if (scan_should_stop()) 1035 + if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES) 1084 1036 return; 1085 1037 1086 1038 /* ··· 1139 1039 static int first_run = 1; 1140 1040 1141 1041 pr_info("Automatic memory scanning thread started\n"); 1042 + set_user_nice(current, 10); 1142 1043 1143 1044 /* 1144 1045 * Wait before the first scan to allow the system to fully initialize. ··· 1202 1101 { 1203 1102 struct kmemleak_object *object; 1204 1103 loff_t n = *pos; 1104 + int err; 1205 1105 1206 - if (!n) 1207 - reported_leaks = 0; 1208 - if (reported_leaks >= REPORTS_NR) 1209 - return NULL; 1106 + err = mutex_lock_interruptible(&scan_mutex); 1107 + if (err < 0) 1108 + return ERR_PTR(err); 1210 1109 1211 1110 rcu_read_lock(); 1212 1111 list_for_each_entry_rcu(object, &object_list, object_list) { ··· 1232 1131 struct list_head *n = &prev_obj->object_list; 1233 1132 1234 1133 ++(*pos); 1235 - if (reported_leaks >= REPORTS_NR) 1236 - goto out; 1237 1134 1238 1135 rcu_read_lock(); 1239 1136 list_for_each_continue_rcu(n, &object_list) { ··· 1240 1141 break; 1241 1142 } 1242 1143 rcu_read_unlock(); 1243 - out: 1144 + 1244 1145 put_object(prev_obj); 1245 1146 return next_obj; 1246 1147 } ··· 1250 1151 */ 1251 1152 static void kmemleak_seq_stop(struct seq_file *seq, void *v) 1252 1153 { 1253 - if (v) 1254 - put_object(v); 1154 + if (!IS_ERR(v)) { 1155 + /* 1156 + * kmemleak_seq_start may return ERR_PTR if the scan_mutex 1157 + * waiting was interrupted, so only release it if !IS_ERR. 1158 + */ 1159 + mutex_unlock(&scan_mutex); 1160 + if (v) 1161 + put_object(v); 1162 + } 1255 1163 } 1256 1164 1257 1165 /* ··· 1270 1164 unsigned long flags; 1271 1165 1272 1166 spin_lock_irqsave(&object->lock, flags); 1273 - if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) { 1167 + if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) 1274 1168 print_unreferenced(seq, object); 1275 - reported_leaks++; 1276 - } 1277 1169 spin_unlock_irqrestore(&object->lock, flags); 1278 1170 return 0; 1279 1171 } ··· 1285 1181 1286 1182 static int kmemleak_open(struct inode *inode, struct file *file) 1287 1183 { 1288 - int ret = 0; 1289 - 1290 1184 if (!atomic_read(&kmemleak_enabled)) 1291 1185 return -EBUSY; 1292 1186 1293 - ret = mutex_lock_interruptible(&scan_mutex); 1294 - if (ret < 0) 1295 - goto out; 1296 - if (file->f_mode & FMODE_READ) { 1297 - ret = seq_open(file, &kmemleak_seq_ops); 1298 - if (ret < 0) 1299 - goto scan_unlock; 1300 - } 1301 - return ret; 1302 - 1303 - scan_unlock: 1304 - mutex_unlock(&scan_mutex); 1305 - out: 1306 - return ret; 1187 + return seq_open(file, &kmemleak_seq_ops); 1307 1188 } 1308 1189 1309 1190 static int kmemleak_release(struct inode *inode, struct file *file) 1310 1191 { 1311 - int ret = 0; 1312 - 1313 - if (file->f_mode & FMODE_READ) 1314 - seq_release(inode, file); 1315 - mutex_unlock(&scan_mutex); 1316 - 1317 - return ret; 1192 + return seq_release(inode, file); 1318 1193 } 1319 1194 1320 1195 /* ··· 1313 1230 { 1314 1231 char buf[64]; 1315 1232 int buf_size; 1316 - 1317 - if (!atomic_read(&kmemleak_enabled)) 1318 - return -EBUSY; 1233 + int ret; 1319 1234 1320 1235 buf_size = min(size, (sizeof(buf) - 1)); 1321 1236 if (strncpy_from_user(buf, user_buf, buf_size) < 0) 1322 1237 return -EFAULT; 1323 1238 buf[buf_size] = 0; 1239 + 1240 + ret = mutex_lock_interruptible(&scan_mutex); 1241 + if (ret < 0) 1242 + return ret; 1324 1243 1325 1244 if (strncmp(buf, "off", 3) == 0) 1326 1245 kmemleak_disable(); ··· 1336 1251 stop_scan_thread(); 1337 1252 else if (strncmp(buf, "scan=", 5) == 0) { 1338 1253 unsigned long secs; 1339 - int err; 1340 1254 1341 - err = strict_strtoul(buf + 5, 0, &secs); 1342 - if (err < 0) 1343 - return err; 1255 + ret = strict_strtoul(buf + 5, 0, &secs); 1256 + if (ret < 0) 1257 + goto out; 1344 1258 stop_scan_thread(); 1345 1259 if (secs) { 1346 1260 jiffies_scan_wait = msecs_to_jiffies(secs * 1000); ··· 1348 1264 } else if (strncmp(buf, "scan", 4) == 0) 1349 1265 kmemleak_scan(); 1350 1266 else 1351 - return -EINVAL; 1267 + ret = -EINVAL; 1268 + 1269 + out: 1270 + mutex_unlock(&scan_mutex); 1271 + if (ret < 0) 1272 + return ret; 1352 1273 1353 1274 /* ignore the rest of the buffer, only one command at a time */ 1354 1275 *ppos += size; ··· 1382 1293 1383 1294 rcu_read_lock(); 1384 1295 list_for_each_entry_rcu(object, &object_list, object_list) 1385 - delete_object(object->pointer); 1296 + delete_object_full(object->pointer); 1386 1297 rcu_read_unlock(); 1387 1298 mutex_unlock(&scan_mutex); 1388 1299 ··· 1476 1387 break; 1477 1388 case KMEMLEAK_FREE: 1478 1389 kmemleak_free(log->ptr); 1390 + break; 1391 + case KMEMLEAK_FREE_PART: 1392 + kmemleak_free_part(log->ptr, log->size); 1479 1393 break; 1480 1394 case KMEMLEAK_NOT_LEAK: 1481 1395 kmemleak_not_leak(log->ptr);
+3 -11
mm/page_alloc.c
··· 4745 4745 * some pages at the end of hash table which 4746 4746 * alloc_pages_exact() automatically does 4747 4747 */ 4748 - if (get_order(size) < MAX_ORDER) 4748 + if (get_order(size) < MAX_ORDER) { 4749 4749 table = alloc_pages_exact(size, GFP_ATOMIC); 4750 + kmemleak_alloc(table, size, 1, GFP_ATOMIC); 4751 + } 4750 4752 } 4751 4753 } while (!table && size > PAGE_SIZE && --log2qty); 4752 4754 ··· 4765 4763 *_hash_shift = log2qty; 4766 4764 if (_hash_mask) 4767 4765 *_hash_mask = (1 << log2qty) - 1; 4768 - 4769 - /* 4770 - * If hashdist is set, the table allocation is done with __vmalloc() 4771 - * which invokes the kmemleak_alloc() callback. This function may also 4772 - * be called before the slab and kmemleak are initialised when 4773 - * kmemleak simply buffers the request to be executed later 4774 - * (GFP_ATOMIC flag ignored in this case). 4775 - */ 4776 - if (!hashdist) 4777 - kmemleak_alloc(table, size, 1, GFP_ATOMIC); 4778 4766 4779 4767 return table; 4780 4768 }
+6 -4
mm/slub.c
··· 21 21 #include <linux/kmemcheck.h> 22 22 #include <linux/cpu.h> 23 23 #include <linux/cpuset.h> 24 - #include <linux/kmemleak.h> 25 24 #include <linux/mempolicy.h> 26 25 #include <linux/ctype.h> 27 26 #include <linux/debugobjects.h> ··· 2834 2835 static void *kmalloc_large_node(size_t size, gfp_t flags, int node) 2835 2836 { 2836 2837 struct page *page; 2838 + void *ptr = NULL; 2837 2839 2838 2840 flags |= __GFP_COMP | __GFP_NOTRACK; 2839 2841 page = alloc_pages_node(node, flags, get_order(size)); 2840 2842 if (page) 2841 - return page_address(page); 2842 - else 2843 - return NULL; 2843 + ptr = page_address(page); 2844 + 2845 + kmemleak_alloc(ptr, size, 1, flags); 2846 + return ptr; 2844 2847 } 2845 2848 2846 2849 #ifdef CONFIG_NUMA ··· 2927 2926 page = virt_to_head_page(x); 2928 2927 if (unlikely(!PageSlab(page))) { 2929 2928 BUG_ON(!PageCompound(page)); 2929 + kmemleak_free(x); 2930 2930 put_page(page); 2931 2931 return; 2932 2932 }