Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kmemleak: Simplify the kmemleak_scan_area() function prototype

This function was taking non-necessary arguments which can be determined
by kmemleak. The patch also modifies the calling sites.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>

+27 -39
+2 -4
include/linux/kmemleak.h
··· 32 32 size_t size) __ref; 33 33 extern void kmemleak_not_leak(const void *ptr) __ref; 34 34 extern void kmemleak_ignore(const void *ptr) __ref; 35 - extern void kmemleak_scan_area(const void *ptr, unsigned long offset, 36 - size_t length, gfp_t gfp) __ref; 35 + extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; 37 36 extern void kmemleak_no_scan(const void *ptr) __ref; 38 37 39 38 static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, ··· 83 84 static inline void kmemleak_ignore(const void *ptr) 84 85 { 85 86 } 86 - static inline void kmemleak_scan_area(const void *ptr, unsigned long offset, 87 - size_t length, gfp_t gfp) 87 + static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) 88 88 { 89 89 } 90 90 static inline void kmemleak_erase(void **ptr)
+2 -5
kernel/module.c
··· 2043 2043 unsigned int i; 2044 2044 2045 2045 /* only scan the sections containing data */ 2046 - kmemleak_scan_area(mod->module_core, (unsigned long)mod - 2047 - (unsigned long)mod->module_core, 2048 - sizeof(struct module), GFP_KERNEL); 2046 + kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); 2049 2047 2050 2048 for (i = 1; i < hdr->e_shnum; i++) { 2051 2049 if (!(sechdrs[i].sh_flags & SHF_ALLOC)) ··· 2052 2054 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) 2053 2055 continue; 2054 2056 2055 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr - 2056 - (unsigned long)mod->module_core, 2057 + kmemleak_scan_area((void *)sechdrs[i].sh_addr, 2057 2058 sechdrs[i].sh_size, GFP_KERNEL); 2058 2059 } 2059 2060 }
+21 -28
mm/kmemleak.c
··· 119 119 /* scanning area inside a memory block */ 120 120 struct kmemleak_scan_area { 121 121 struct hlist_node node; 122 - unsigned long offset; 123 - size_t length; 122 + unsigned long start; 123 + size_t size; 124 124 }; 125 125 126 126 #define KMEMLEAK_GREY 0 ··· 241 241 const void *ptr; /* allocated/freed memory block */ 242 242 size_t size; /* memory block size */ 243 243 int min_count; /* minimum reference count */ 244 - unsigned long offset; /* scan area offset */ 245 - size_t length; /* scan area length */ 246 244 unsigned long trace[MAX_TRACE]; /* stack trace */ 247 245 unsigned int trace_len; /* stack trace length */ 248 246 }; ··· 718 720 * Add a scanning area to the object. If at least one such area is added, 719 721 * kmemleak will only scan these ranges rather than the whole memory block. 720 722 */ 721 - static void add_scan_area(unsigned long ptr, unsigned long offset, 722 - size_t length, gfp_t gfp) 723 + static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) 723 724 { 724 725 unsigned long flags; 725 726 struct kmemleak_object *object; 726 727 struct kmemleak_scan_area *area; 727 728 728 - object = find_and_get_object(ptr, 0); 729 + object = find_and_get_object(ptr, 1); 729 730 if (!object) { 730 731 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", 731 732 ptr); ··· 738 741 } 739 742 740 743 spin_lock_irqsave(&object->lock, flags); 741 - if (offset + length > object->size) { 744 + if (ptr + size > object->pointer + object->size) { 742 745 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); 743 746 dump_object_info(object); 744 747 kmem_cache_free(scan_area_cache, area); ··· 746 749 } 747 750 748 751 INIT_HLIST_NODE(&area->node); 749 - area->offset = offset; 750 - area->length = length; 752 + area->start = ptr; 753 + area->size = size; 751 754 752 755 hlist_add_head(&area->node, &object->area_list); 753 756 out_unlock: ··· 783 786 * processed later once kmemleak is fully initialized. 784 787 */ 785 788 static void __init log_early(int op_type, const void *ptr, size_t size, 786 - int min_count, unsigned long offset, size_t length) 789 + int min_count) 787 790 { 788 791 unsigned long flags; 789 792 struct early_log *log; ··· 805 808 log->ptr = ptr; 806 809 log->size = size; 807 810 log->min_count = min_count; 808 - log->offset = offset; 809 - log->length = length; 810 811 if (op_type == KMEMLEAK_ALLOC) 811 812 log->trace_len = __save_stack_trace(log->trace); 812 813 crt_early_log++; ··· 853 858 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 854 859 create_object((unsigned long)ptr, size, min_count, gfp); 855 860 else if (atomic_read(&kmemleak_early_log)) 856 - log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0); 861 + log_early(KMEMLEAK_ALLOC, ptr, size, min_count); 857 862 } 858 863 EXPORT_SYMBOL_GPL(kmemleak_alloc); 859 864 ··· 868 873 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 869 874 delete_object_full((unsigned long)ptr); 870 875 else if (atomic_read(&kmemleak_early_log)) 871 - log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); 876 + log_early(KMEMLEAK_FREE, ptr, 0, 0); 872 877 } 873 878 EXPORT_SYMBOL_GPL(kmemleak_free); 874 879 ··· 883 888 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 884 889 delete_object_part((unsigned long)ptr, size); 885 890 else if (atomic_read(&kmemleak_early_log)) 886 - log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); 891 + log_early(KMEMLEAK_FREE_PART, ptr, size, 0); 887 892 } 888 893 EXPORT_SYMBOL_GPL(kmemleak_free_part); 889 894 ··· 898 903 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 899 904 make_gray_object((unsigned long)ptr); 900 905 else if (atomic_read(&kmemleak_early_log)) 901 - log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0); 906 + log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); 902 907 } 903 908 EXPORT_SYMBOL(kmemleak_not_leak); 904 909 ··· 914 919 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 915 920 make_black_object((unsigned long)ptr); 916 921 else if (atomic_read(&kmemleak_early_log)) 917 - log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0); 922 + log_early(KMEMLEAK_IGNORE, ptr, 0, 0); 918 923 } 919 924 EXPORT_SYMBOL(kmemleak_ignore); 920 925 921 926 /* 922 927 * Limit the range to be scanned in an allocated memory block. 923 928 */ 924 - void __ref kmemleak_scan_area(const void *ptr, unsigned long offset, 925 - size_t length, gfp_t gfp) 929 + void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) 926 930 { 927 931 pr_debug("%s(0x%p)\n", __func__, ptr); 928 932 929 933 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 930 - add_scan_area((unsigned long)ptr, offset, length, gfp); 934 + add_scan_area((unsigned long)ptr, size, gfp); 931 935 else if (atomic_read(&kmemleak_early_log)) 932 - log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length); 936 + log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); 933 937 } 934 938 EXPORT_SYMBOL(kmemleak_scan_area); 935 939 ··· 942 948 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) 943 949 object_no_scan((unsigned long)ptr); 944 950 else if (atomic_read(&kmemleak_early_log)) 945 - log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0); 951 + log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); 946 952 } 947 953 EXPORT_SYMBOL(kmemleak_no_scan); 948 954 ··· 1069 1075 } 1070 1076 } else 1071 1077 hlist_for_each_entry(area, elem, &object->area_list, node) 1072 - scan_block((void *)(object->pointer + area->offset), 1073 - (void *)(object->pointer + area->offset 1074 - + area->length), object, 0); 1078 + scan_block((void *)area->start, 1079 + (void *)(area->start + area->size), 1080 + object, 0); 1075 1081 out: 1076 1082 spin_unlock_irqrestore(&object->lock, flags); 1077 1083 } ··· 1636 1642 kmemleak_ignore(log->ptr); 1637 1643 break; 1638 1644 case KMEMLEAK_SCAN_AREA: 1639 - kmemleak_scan_area(log->ptr, log->offset, log->length, 1640 - GFP_KERNEL); 1645 + kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); 1641 1646 break; 1642 1647 case KMEMLEAK_NO_SCAN: 1643 1648 kmemleak_no_scan(log->ptr);
+2 -2
mm/slab.c
··· 2584 2584 * kmemleak does not treat the ->s_mem pointer as a reference 2585 2585 * to the object. Otherwise we will not report the leak. 2586 2586 */ 2587 - kmemleak_scan_area(slabp, offsetof(struct slab, list), 2588 - sizeof(struct list_head), local_flags); 2587 + kmemleak_scan_area(&slabp->list, sizeof(struct list_head), 2588 + local_flags); 2589 2589 if (!slabp) 2590 2590 return NULL; 2591 2591 } else {