Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kmemleak' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux

Kmemleak patches

Main features:
- Handle percpu memory allocations (only scanning them, not actually
reporting).
- Memory hotplug support.

Usability improvements:
- Show the origin of early allocations.
- Report previously found leaks even if kmemleak has been disabled by
some error.

* tag 'kmemleak' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux:
kmemleak: Add support for memory hotplug
kmemleak: Handle percpu memory allocation
kmemleak: Report previously found leaks even after an error
kmemleak: When the early log buffer is exceeded, report the actual number
kmemleak: Show where early_log issues come from

+155 -28
+3
Documentation/kmemleak.txt
··· 127 127 128 128 kmemleak_init - initialize kmemleak 129 129 kmemleak_alloc - notify of a memory block allocation 130 + kmemleak_alloc_percpu - notify of a percpu memory block allocation 130 131 kmemleak_free - notify of a memory block freeing 132 + kmemleak_free_part - notify of a partial memory block freeing 133 + kmemleak_free_percpu - notify of a percpu memory block freeing 131 134 kmemleak_not_leak - mark an object as not a leak 132 135 kmemleak_ignore - do not scan or report an object as leak 133 136 kmemleak_scan_area - add scan areas inside a memory block
+8
include/linux/kmemleak.h
··· 26 26 extern void kmemleak_init(void) __ref; 27 27 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, 28 28 gfp_t gfp) __ref; 29 + extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref; 29 30 extern void kmemleak_free(const void *ptr) __ref; 30 31 extern void kmemleak_free_part(const void *ptr, size_t size) __ref; 32 + extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; 31 33 extern void kmemleak_padding(const void *ptr, unsigned long offset, 32 34 size_t size) __ref; 33 35 extern void kmemleak_not_leak(const void *ptr) __ref; ··· 70 68 gfp_t gfp) 71 69 { 72 70 } 71 + static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) 72 + { 73 + } 73 74 static inline void kmemleak_free(const void *ptr) 74 75 { 75 76 } ··· 80 75 { 81 76 } 82 77 static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) 78 + { 79 + } 80 + static inline void kmemleak_free_percpu(const void __percpu *ptr) 83 81 { 84 82 } 85 83 static inline void kmemleak_not_leak(const void *ptr)
+1 -1
lib/Kconfig.debug
··· 414 414 415 415 config DEBUG_KMEMLEAK 416 416 bool "Kernel memory leak detector" 417 - depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ 417 + depends on DEBUG_KERNEL && EXPERIMENTAL && \ 418 418 (X86 || ARM || PPC || MIPS || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) 419 419 420 420 select DEBUG_FS
+132 -26
mm/kmemleak.c
··· 100 100 101 101 #include <linux/kmemcheck.h> 102 102 #include <linux/kmemleak.h> 103 + #include <linux/memory_hotplug.h> 103 104 104 105 /* 105 106 * Kmemleak configuration and common defines. ··· 197 196 static atomic_t kmemleak_initialized = ATOMIC_INIT(0); 198 197 /* enables or disables early logging of the memory operations */ 199 198 static atomic_t kmemleak_early_log = ATOMIC_INIT(1); 200 - /* set if a fata kmemleak error has occurred */ 199 + /* set if a kmemleak warning was issued */ 200 + static atomic_t kmemleak_warning = ATOMIC_INIT(0); 201 + /* set if a fatal kmemleak error has occurred */ 201 202 static atomic_t kmemleak_error = ATOMIC_INIT(0); 202 203 203 204 /* minimum and maximum address that may be valid pointers */ ··· 231 228 /* kmemleak operation type for early logging */ 232 229 enum { 233 230 KMEMLEAK_ALLOC, 231 + KMEMLEAK_ALLOC_PERCPU, 234 232 KMEMLEAK_FREE, 235 233 KMEMLEAK_FREE_PART, 234 + KMEMLEAK_FREE_PERCPU, 236 235 KMEMLEAK_NOT_LEAK, 237 236 KMEMLEAK_IGNORE, 238 237 KMEMLEAK_SCAN_AREA, ··· 264 259 /* 265 260 * Print a warning and dump the stack trace. 266 261 */ 267 - #define kmemleak_warn(x...) do { \ 268 - pr_warning(x); \ 269 - dump_stack(); \ 262 + #define kmemleak_warn(x...) do { \ 263 + pr_warning(x); \ 264 + dump_stack(); \ 265 + atomic_set(&kmemleak_warning, 1); \ 270 266 } while (0) 271 267 272 268 /* ··· 409 403 object = prio_tree_entry(node, struct kmemleak_object, 410 404 tree_node); 411 405 if (!alias && object->pointer != ptr) { 412 - pr_warning("Found object by alias at 0x%08lx\n", ptr); 413 - dump_stack(); 406 + kmemleak_warn("Found object by alias at 0x%08lx\n", 407 + ptr); 414 408 dump_object_info(object); 415 409 object = NULL; 416 410 } ··· 800 794 unsigned long flags; 801 795 struct early_log *log; 802 796 797 + if (atomic_read(&kmemleak_error)) { 798 + /* kmemleak stopped recording, just count the requests */ 799 + crt_early_log++; 800 + return; 801 + } 802 + 803 803 if (crt_early_log >= ARRAY_SIZE(early_log)) { 804 - pr_warning("Early log buffer exceeded, " 805 - "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n"); 806 804 kmemleak_disable(); 807 805 return; 808 806 } ··· 821 811 log->ptr = ptr; 822 812 log->size = size; 823 813 log->min_count = min_count; 824 - if (op_type == KMEMLEAK_ALLOC) 825 - log->trace_len = __save_stack_trace(log->trace); 814 + log->trace_len = __save_stack_trace(log->trace); 826 815 crt_early_log++; 827 816 local_irq_restore(flags); 828 817 } ··· 855 846 rcu_read_unlock(); 856 847 } 857 848 849 + /* 850 + * Log an early allocated block and populate the stack trace. 851 + */ 852 + static void early_alloc_percpu(struct early_log *log) 853 + { 854 + unsigned int cpu; 855 + const void __percpu *ptr = log->ptr; 856 + 857 + for_each_possible_cpu(cpu) { 858 + log->ptr = per_cpu_ptr(ptr, cpu); 859 + early_alloc(log); 860 + } 861 + } 862 + 858 863 /** 859 864 * kmemleak_alloc - register a newly allocated object 860 865 * @ptr: pointer to beginning of the object ··· 894 871 log_early(KMEMLEAK_ALLOC, ptr, size, min_count); 895 872 } 896 873 EXPORT_SYMBOL_GPL(kmemleak_alloc); 874 + 875 + /** 876 + * kmemleak_alloc_percpu - register a newly allocated __percpu object 877 + * @ptr: __percpu pointer to beginning of the object 878 + * @size: size of the object 879 + * 880 + * This function is called from the kernel percpu allocator when a new object 881 + * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL 882 + * allocation. 883 + */ 884 + void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) 885 + { 886 + unsigned int cpu; 887 + 888 + pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size); 889 + 890 + /* 891 + * Percpu allocations are only scanned and not reported as leaks 892 + * (min_count is set to 0). 893 + */ 894 + if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 895 + for_each_possible_cpu(cpu) 896 + create_object((unsigned long)per_cpu_ptr(ptr, cpu), 897 + size, 0, GFP_KERNEL); 898 + else if (atomic_read(&kmemleak_early_log)) 899 + log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); 900 + } 901 + EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); 897 902 898 903 /** 899 904 * kmemleak_free - unregister a previously registered object ··· 960 909 log_early(KMEMLEAK_FREE_PART, ptr, size, 0); 961 910 } 962 911 EXPORT_SYMBOL_GPL(kmemleak_free_part); 912 + 913 + /** 914 + * kmemleak_free_percpu - unregister a previously registered __percpu object 915 + * @ptr: __percpu pointer to beginning of the object 916 + * 917 + * This function is called from the kernel percpu allocator when an object 918 + * (memory block) is freed (free_percpu). 919 + */ 920 + void __ref kmemleak_free_percpu(const void __percpu *ptr) 921 + { 922 + unsigned int cpu; 923 + 924 + pr_debug("%s(0x%p)\n", __func__, ptr); 925 + 926 + if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 927 + for_each_possible_cpu(cpu) 928 + delete_object_full((unsigned long)per_cpu_ptr(ptr, 929 + cpu)); 930 + else if (atomic_read(&kmemleak_early_log)) 931 + log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); 932 + } 933 + EXPORT_SYMBOL_GPL(kmemleak_free_percpu); 963 934 964 935 /** 965 936 * kmemleak_not_leak - mark an allocated object as false positive ··· 1293 1220 #endif 1294 1221 1295 1222 /* 1296 - * Struct page scanning for each node. The code below is not yet safe 1297 - * with MEMORY_HOTPLUG. 1223 + * Struct page scanning for each node. 1298 1224 */ 1225 + lock_memory_hotplug(); 1299 1226 for_each_online_node(i) { 1300 1227 pg_data_t *pgdat = NODE_DATA(i); 1301 1228 unsigned long start_pfn = pgdat->node_start_pfn; ··· 1314 1241 scan_block(page, page + 1, NULL, 1); 1315 1242 } 1316 1243 } 1244 + unlock_memory_hotplug(); 1317 1245 1318 1246 /* 1319 1247 * Scanning the task stacks (may introduce false negatives). ··· 1541 1467 1542 1468 static int kmemleak_open(struct inode *inode, struct file *file) 1543 1469 { 1544 - if (!atomic_read(&kmemleak_enabled)) 1545 - return -EBUSY; 1546 - 1547 1470 return seq_open(file, &kmemleak_seq_ops); 1548 1471 } 1549 1472 ··· 1614 1543 int buf_size; 1615 1544 int ret; 1616 1545 1546 + if (!atomic_read(&kmemleak_enabled)) 1547 + return -EBUSY; 1548 + 1617 1549 buf_size = min(size, (sizeof(buf) - 1)); 1618 1550 if (strncpy_from_user(buf, user_buf, buf_size) < 0) 1619 1551 return -EFAULT; ··· 1676 1602 }; 1677 1603 1678 1604 /* 1679 - * Perform the freeing of the kmemleak internal objects after waiting for any 1680 - * current memory scan to complete. 1605 + * Stop the memory scanning thread and free the kmemleak internal objects if 1606 + * no previous scan thread (otherwise, kmemleak may still have some useful 1607 + * information on memory leaks). 1681 1608 */ 1682 1609 static void kmemleak_do_cleanup(struct work_struct *work) 1683 1610 { 1684 1611 struct kmemleak_object *object; 1612 + bool cleanup = scan_thread == NULL; 1685 1613 1686 1614 mutex_lock(&scan_mutex); 1687 1615 stop_scan_thread(); 1688 1616 1689 - rcu_read_lock(); 1690 - list_for_each_entry_rcu(object, &object_list, object_list) 1691 - delete_object_full(object->pointer); 1692 - rcu_read_unlock(); 1617 + if (cleanup) { 1618 + rcu_read_lock(); 1619 + list_for_each_entry_rcu(object, &object_list, object_list) 1620 + delete_object_full(object->pointer); 1621 + rcu_read_unlock(); 1622 + } 1693 1623 mutex_unlock(&scan_mutex); 1694 1624 } 1695 1625 ··· 1710 1632 return; 1711 1633 1712 1634 /* stop any memory operation tracing */ 1713 - atomic_set(&kmemleak_early_log, 0); 1714 1635 atomic_set(&kmemleak_enabled, 0); 1715 1636 1716 1637 /* check whether it is too early for a kernel thread */ ··· 1736 1659 } 1737 1660 early_param("kmemleak", kmemleak_boot_config); 1738 1661 1662 + static void __init print_log_trace(struct early_log *log) 1663 + { 1664 + struct stack_trace trace; 1665 + 1666 + trace.nr_entries = log->trace_len; 1667 + trace.entries = log->trace; 1668 + 1669 + pr_notice("Early log backtrace:\n"); 1670 + print_stack_trace(&trace, 2); 1671 + } 1672 + 1739 1673 /* 1740 1674 * Kmemleak initialization. 1741 1675 */ ··· 1769 1681 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); 1770 1682 INIT_PRIO_TREE_ROOT(&object_tree_root); 1771 1683 1684 + if (crt_early_log >= ARRAY_SIZE(early_log)) 1685 + pr_warning("Early log buffer exceeded (%d), please increase " 1686 + "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log); 1687 + 1772 1688 /* the kernel is still in UP mode, so disabling the IRQs is enough */ 1773 1689 local_irq_save(flags); 1774 - if (!atomic_read(&kmemleak_error)) { 1690 + atomic_set(&kmemleak_early_log, 0); 1691 + if (atomic_read(&kmemleak_error)) { 1692 + local_irq_restore(flags); 1693 + return; 1694 + } else 1775 1695 atomic_set(&kmemleak_enabled, 1); 1776 - atomic_set(&kmemleak_early_log, 0); 1777 - } 1778 1696 local_irq_restore(flags); 1779 1697 1780 1698 /* ··· 1795 1701 case KMEMLEAK_ALLOC: 1796 1702 early_alloc(log); 1797 1703 break; 1704 + case KMEMLEAK_ALLOC_PERCPU: 1705 + early_alloc_percpu(log); 1706 + break; 1798 1707 case KMEMLEAK_FREE: 1799 1708 kmemleak_free(log->ptr); 1800 1709 break; 1801 1710 case KMEMLEAK_FREE_PART: 1802 1711 kmemleak_free_part(log->ptr, log->size); 1712 + break; 1713 + case KMEMLEAK_FREE_PERCPU: 1714 + kmemleak_free_percpu(log->ptr); 1803 1715 break; 1804 1716 case KMEMLEAK_NOT_LEAK: 1805 1717 kmemleak_not_leak(log->ptr); ··· 1820 1720 kmemleak_no_scan(log->ptr); 1821 1721 break; 1822 1722 default: 1823 - WARN_ON(1); 1723 + kmemleak_warn("Unknown early log operation: %d\n", 1724 + log->op_type); 1725 + } 1726 + 1727 + if (atomic_read(&kmemleak_warning)) { 1728 + print_log_trace(log); 1729 + atomic_set(&kmemleak_warning, 0); 1824 1730 } 1825 1731 } 1826 1732 }
+11 -1
mm/percpu.c
··· 67 67 #include <linux/spinlock.h> 68 68 #include <linux/vmalloc.h> 69 69 #include <linux/workqueue.h> 70 + #include <linux/kmemleak.h> 70 71 71 72 #include <asm/cacheflush.h> 72 73 #include <asm/sections.h> ··· 711 710 const char *err; 712 711 int slot, off, new_alloc; 713 712 unsigned long flags; 713 + void __percpu *ptr; 714 714 715 715 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 716 716 WARN(true, "illegal size (%zu) or align (%zu) for " ··· 804 802 mutex_unlock(&pcpu_alloc_mutex); 805 803 806 804 /* return address relative to base address */ 807 - return __addr_to_pcpu_ptr(chunk->base_addr + off); 805 + ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 806 + kmemleak_alloc_percpu(ptr, size); 807 + return ptr; 808 808 809 809 fail_unlock: 810 810 spin_unlock_irqrestore(&pcpu_lock, flags); ··· 919 915 920 916 if (!ptr) 921 917 return; 918 + 919 + kmemleak_free_percpu(ptr); 922 920 923 921 addr = __pcpu_ptr_to_addr(ptr); 924 922 ··· 1645 1639 rc = -ENOMEM; 1646 1640 goto out_free_areas; 1647 1641 } 1642 + /* kmemleak tracks the percpu allocations separately */ 1643 + kmemleak_free(ptr); 1648 1644 areas[group] = ptr; 1649 1645 1650 1646 base = min(ptr, base); ··· 1761 1753 "for cpu%u\n", psize_str, cpu); 1762 1754 goto enomem; 1763 1755 } 1756 + /* kmemleak tracks the percpu allocations separately */ 1757 + kmemleak_free(ptr); 1764 1758 pages[j++] = virt_to_page(ptr); 1765 1759 } 1766 1760