kmemleak: Allow partial freeing of memory blocks

Functions like free_bootmem() are allowed to free only part of a memory
block. This patch adds support for this via the kmemleak_free_part()
callback which removes the original object and creates one or two
additional objects as a result of the memory block split.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>

+85 -14
+4
include/linux/kmemleak.h
··· 27 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, 28 gfp_t gfp); 29 extern void kmemleak_free(const void *ptr); 30 extern void kmemleak_padding(const void *ptr, unsigned long offset, 31 size_t size); 32 extern void kmemleak_not_leak(const void *ptr); ··· 70 { 71 } 72 static inline void kmemleak_free(const void *ptr) 73 { 74 } 75 static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
··· 27 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, 28 gfp_t gfp); 29 extern void kmemleak_free(const void *ptr); 30 + extern void kmemleak_free_part(const void *ptr, size_t size); 31 extern void kmemleak_padding(const void *ptr, unsigned long offset, 32 size_t size); 33 extern void kmemleak_not_leak(const void *ptr); ··· 69 { 70 } 71 static inline void kmemleak_free(const void *ptr) 72 + { 73 + } 74 + static inline void kmemleak_free_part(const void *ptr, size_t size) 75 { 76 } 77 static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+81 -14
mm/kmemleak.c
··· 210 enum { 211 KMEMLEAK_ALLOC, 212 KMEMLEAK_FREE, 213 KMEMLEAK_NOT_LEAK, 214 KMEMLEAK_IGNORE, 215 KMEMLEAK_SCAN_AREA, ··· 524 * Remove the metadata (struct kmemleak_object) for a memory block from the 525 * object_list and object_tree_root and decrement its use_count. 526 */ 527 - static void delete_object(unsigned long ptr) 528 { 529 unsigned long flags; 530 - struct kmemleak_object *object; 531 532 write_lock_irqsave(&kmemleak_lock, flags); 533 - object = lookup_object(ptr, 0); 534 - if (!object) { 535 - #ifdef DEBUG 536 - kmemleak_warn("Freeing unknown object at 0x%08lx\n", 537 - ptr); 538 - #endif 539 - write_unlock_irqrestore(&kmemleak_lock, flags); 540 - return; 541 - } 542 prio_tree_remove(&object_tree_root, &object->tree_node); 543 list_del_rcu(&object->object_list); 544 write_unlock_irqrestore(&kmemleak_lock, flags); 545 546 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); 547 - WARN_ON(atomic_read(&object->use_count) < 1); 548 549 /* 550 * Locking here also ensures that the corresponding memory block ··· 546 put_object(object); 547 } 548 549 /* 550 * Make a object permanently as gray-colored so that it can no longer be 551 * reported as a leak. This is used in general to mark a false positive. ··· 768 pr_debug("%s(0x%p)\n", __func__, ptr); 769 770 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 771 - delete_object((unsigned long)ptr); 772 else if (atomic_read(&kmemleak_early_log)) 773 log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); 774 } 775 EXPORT_SYMBOL_GPL(kmemleak_free); 776 777 /* 778 * Mark an already allocated memory block as a false positive. This will cause ··· 1382 1383 rcu_read_lock(); 1384 list_for_each_entry_rcu(object, &object_list, object_list) 1385 - delete_object(object->pointer); 1386 rcu_read_unlock(); 1387 mutex_unlock(&scan_mutex); 1388 ··· 1476 break; 1477 case KMEMLEAK_FREE: 1478 kmemleak_free(log->ptr); 1479 break; 1480 case KMEMLEAK_NOT_LEAK: 1481 kmemleak_not_leak(log->ptr);
··· 210 enum { 211 KMEMLEAK_ALLOC, 212 KMEMLEAK_FREE, 213 + KMEMLEAK_FREE_PART, 214 KMEMLEAK_NOT_LEAK, 215 KMEMLEAK_IGNORE, 216 KMEMLEAK_SCAN_AREA, ··· 523 * Remove the metadata (struct kmemleak_object) for a memory block from the 524 * object_list and object_tree_root and decrement its use_count. 525 */ 526 + static void __delete_object(struct kmemleak_object *object) 527 { 528 unsigned long flags; 529 530 write_lock_irqsave(&kmemleak_lock, flags); 531 prio_tree_remove(&object_tree_root, &object->tree_node); 532 list_del_rcu(&object->object_list); 533 write_unlock_irqrestore(&kmemleak_lock, flags); 534 535 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); 536 + WARN_ON(atomic_read(&object->use_count) < 2); 537 538 /* 539 * Locking here also ensures that the corresponding memory block ··· 555 put_object(object); 556 } 557 558 + /* 559 + * Look up the metadata (struct kmemleak_object) corresponding to ptr and 560 + * delete it. 561 + */ 562 + static void delete_object_full(unsigned long ptr) 563 + { 564 + struct kmemleak_object *object; 565 + 566 + object = find_and_get_object(ptr, 0); 567 + if (!object) { 568 + #ifdef DEBUG 569 + kmemleak_warn("Freeing unknown object at 0x%08lx\n", 570 + ptr); 571 + #endif 572 + return; 573 + } 574 + __delete_object(object); 575 + put_object(object); 576 + } 577 + 578 + /* 579 + * Look up the metadata (struct kmemleak_object) corresponding to ptr and 580 + * delete it. If the memory block is partially freed, the function may create 581 + * additional metadata for the remaining parts of the block. 582 + */ 583 + static void delete_object_part(unsigned long ptr, size_t size) 584 + { 585 + struct kmemleak_object *object; 586 + unsigned long start, end; 587 + 588 + object = find_and_get_object(ptr, 1); 589 + if (!object) { 590 + #ifdef DEBUG 591 + kmemleak_warn("Partially freeing unknown object at 0x%08lx " 592 + "(size %zu)\n", ptr, size); 593 + #endif 594 + return; 595 + } 596 + __delete_object(object); 597 + 598 + /* 599 + * Create one or two objects that may result from the memory block 600 + * split. Note that partial freeing is only done by free_bootmem() and 601 + * this happens before kmemleak_init() is called. The path below is 602 + * only executed during early log recording in kmemleak_init(), so 603 + * GFP_KERNEL is enough. 604 + */ 605 + start = object->pointer; 606 + end = object->pointer + object->size; 607 + if (ptr > start) 608 + create_object(start, ptr - start, object->min_count, 609 + GFP_KERNEL); 610 + if (ptr + size < end) 611 + create_object(ptr + size, end - ptr - size, object->min_count, 612 + GFP_KERNEL); 613 + 614 + put_object(object); 615 + } 616 /* 617 * Make a object permanently as gray-colored so that it can no longer be 618 * reported as a leak. This is used in general to mark a false positive. ··· 719 pr_debug("%s(0x%p)\n", __func__, ptr); 720 721 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 722 + delete_object_full((unsigned long)ptr); 723 else if (atomic_read(&kmemleak_early_log)) 724 log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); 725 } 726 EXPORT_SYMBOL_GPL(kmemleak_free); 727 + 728 + /* 729 + * Partial memory freeing function callback. This function is usually called 730 + * from bootmem allocator when (part of) a memory block is freed. 731 + */ 732 + void kmemleak_free_part(const void *ptr, size_t size) 733 + { 734 + pr_debug("%s(0x%p)\n", __func__, ptr); 735 + 736 + if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 737 + delete_object_part((unsigned long)ptr, size); 738 + else if (atomic_read(&kmemleak_early_log)) 739 + log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); 740 + } 741 + EXPORT_SYMBOL_GPL(kmemleak_free_part); 742 743 /* 744 * Mark an already allocated memory block as a false positive. This will cause ··· 1318 1319 rcu_read_lock(); 1320 list_for_each_entry_rcu(object, &object_list, object_list) 1321 + delete_object_full(object->pointer); 1322 rcu_read_unlock(); 1323 mutex_unlock(&scan_mutex); 1324 ··· 1412 break; 1413 case KMEMLEAK_FREE: 1414 kmemleak_free(log->ptr); 1415 + break; 1416 + case KMEMLEAK_FREE_PART: 1417 + kmemleak_free_part(log->ptr, log->size); 1418 break; 1419 case KMEMLEAK_NOT_LEAK: 1420 kmemleak_not_leak(log->ptr);