kmemleak: Scan objects allocated during a scanning episode

Many of the false positives in kmemleak happen on busy systems where
objects are allocated during a kmemleak scanning episode. These objects
aren't scanned by default until the next memory scan. When such object
is added, for example, at the head of a list, it is possible that all
the other objects in the list become unreferenced until the next scan.

This patch adds checking for newly allocated objects at the end of the
scan and repeats the scanning on these objects. If Linux allocates
new objects at a higher rate than their scanning, it stops after a
predefined number of passes.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

+40 -3
+40 -3
mm/kmemleak.c
··· 106 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 107 #define SECS_FIRST_SCAN 60 /* delay before the first scan */ 108 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 109 110 #define BYTES_PER_POINTER sizeof(void *) 111 ··· 158 #define OBJECT_REPORTED (1 << 1) 159 /* flag set to not scan the object */ 160 #define OBJECT_NO_SCAN (1 << 2) 161 162 /* the list of all allocated objects */ 163 static LIST_HEAD(object_list); ··· 271 static int color_gray(const struct kmemleak_object *object) 272 { 273 return object->min_count != -1 && object->count >= object->min_count; 274 } 275 276 /* ··· 455 INIT_HLIST_HEAD(&object->area_list); 456 spin_lock_init(&object->lock); 457 atomic_set(&object->use_count, 1); 458 - object->flags = OBJECT_ALLOCATED; 459 object->pointer = ptr; 460 object->size = size; 461 object->min_count = min_count; ··· 909 struct task_struct *task; 910 int i; 911 int new_leaks = 0; 912 913 jiffies_last_scan = jiffies; 914 ··· 930 #endif 931 /* reset the reference count (whiten the object) */ 932 object->count = 0; 933 if (color_gray(object) && get_object(object)) 934 list_add_tail(&object->gray_list, &gray_list); 935 ··· 993 * kmemleak objects cannot be freed from outside the loop because their 994 * use_count was increased. 995 */ 996 object = list_entry(gray_list.next, typeof(*object), gray_list); 997 while (&object->gray_list != &gray_list) { 998 cond_resched(); ··· 1011 1012 object = tmp; 1013 } 1014 WARN_ON(!list_empty(&gray_list)); 1015 1016 /* 1017 - * If scanning was stopped do not report any new unreferenced objects. 1018 */ 1019 - if (scan_should_stop()) 1020 return; 1021 1022 /*
··· 106 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 107 #define SECS_FIRST_SCAN 60 /* delay before the first scan */ 108 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 109 + #define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */ 110 111 #define BYTES_PER_POINTER sizeof(void *) 112 ··· 157 #define OBJECT_REPORTED (1 << 1) 158 /* flag set to not scan the object */ 159 #define OBJECT_NO_SCAN (1 << 2) 160 + /* flag set on newly allocated objects */ 161 + #define OBJECT_NEW (1 << 3) 162 163 /* the list of all allocated objects */ 164 static LIST_HEAD(object_list); ··· 268 static int color_gray(const struct kmemleak_object *object) 269 { 270 return object->min_count != -1 && object->count >= object->min_count; 271 + } 272 + 273 + static int color_black(const struct kmemleak_object *object) 274 + { 275 + return object->min_count == -1; 276 } 277 278 /* ··· 447 INIT_HLIST_HEAD(&object->area_list); 448 spin_lock_init(&object->lock); 449 atomic_set(&object->use_count, 1); 450 + object->flags = OBJECT_ALLOCATED | OBJECT_NEW; 451 object->pointer = ptr; 452 object->size = size; 453 object->min_count = min_count; ··· 901 struct task_struct *task; 902 int i; 903 int new_leaks = 0; 904 + int gray_list_pass = 0; 905 906 jiffies_last_scan = jiffies; 907 ··· 921 #endif 922 /* reset the reference count (whiten the object) */ 923 object->count = 0; 924 + object->flags &= ~OBJECT_NEW; 925 if (color_gray(object) && get_object(object)) 926 list_add_tail(&object->gray_list, &gray_list); 927 ··· 983 * kmemleak objects cannot be freed from outside the loop because their 984 * use_count was increased. 985 */ 986 + repeat: 987 object = list_entry(gray_list.next, typeof(*object), gray_list); 988 while (&object->gray_list != &gray_list) { 989 cond_resched(); ··· 1000 1001 object = tmp; 1002 } 1003 + 1004 + if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES) 1005 + goto scan_end; 1006 + 1007 + /* 1008 + * Check for new objects allocated during this scanning and add them 1009 + * to the gray list. 1010 + */ 1011 + rcu_read_lock(); 1012 + list_for_each_entry_rcu(object, &object_list, object_list) { 1013 + spin_lock_irqsave(&object->lock, flags); 1014 + if ((object->flags & OBJECT_NEW) && !color_black(object) && 1015 + get_object(object)) { 1016 + object->flags &= ~OBJECT_NEW; 1017 + list_add_tail(&object->gray_list, &gray_list); 1018 + } 1019 + spin_unlock_irqrestore(&object->lock, flags); 1020 + } 1021 + rcu_read_unlock(); 1022 + 1023 + if (!list_empty(&gray_list)) 1024 + goto repeat; 1025 + 1026 + scan_end: 1027 WARN_ON(!list_empty(&gray_list)); 1028 1029 /* 1030 + * If scanning was stopped or new objects were being allocated at a 1031 + * higher rate than gray list scanning, do not report any new 1032 + * unreferenced objects. 1033 */ 1034 + if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES) 1035 return; 1036 1037 /*