kmemleak: Scan objects allocated during a scanning episode

Many of the false positives in kmemleak happen on busy systems where
objects are allocated during a kmemleak scanning episode. These objects
aren't scanned by default until the next memory scan. When such object
is added, for example, at the head of a list, it is possible that all
the other objects in the list become unreferenced until the next scan.

This patch adds checking for newly allocated objects at the end of the
scan and repeats the scanning on these objects. If Linux allocates
new objects at a higher rate than their scanning, it stops after a
predefined number of passes.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

+40 -3
+40 -3
mm/kmemleak.c
··· 106 106 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 107 107 #define SECS_FIRST_SCAN 60 /* delay before the first scan */ 108 108 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 109 + #define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */ 109 110 110 111 #define BYTES_PER_POINTER sizeof(void *) 111 112 ··· 158 157 #define OBJECT_REPORTED (1 << 1) 159 158 /* flag set to not scan the object */ 160 159 #define OBJECT_NO_SCAN (1 << 2) 160 + /* flag set on newly allocated objects */ 161 + #define OBJECT_NEW (1 << 3) 161 162 162 163 /* the list of all allocated objects */ 163 164 static LIST_HEAD(object_list); ··· 271 268 static int color_gray(const struct kmemleak_object *object) 272 269 { 273 270 return object->min_count != -1 && object->count >= object->min_count; 271 + } 272 + 273 + static int color_black(const struct kmemleak_object *object) 274 + { 275 + return object->min_count == -1; 274 276 } 275 277 276 278 /* ··· 455 447 INIT_HLIST_HEAD(&object->area_list); 456 448 spin_lock_init(&object->lock); 457 449 atomic_set(&object->use_count, 1); 458 - object->flags = OBJECT_ALLOCATED; 450 + object->flags = OBJECT_ALLOCATED | OBJECT_NEW; 459 451 object->pointer = ptr; 460 452 object->size = size; 461 453 object->min_count = min_count; ··· 909 901 struct task_struct *task; 910 902 int i; 911 903 int new_leaks = 0; 904 + int gray_list_pass = 0; 912 905 913 906 jiffies_last_scan = jiffies; 914 907 ··· 930 921 #endif 931 922 /* reset the reference count (whiten the object) */ 932 923 object->count = 0; 924 + object->flags &= ~OBJECT_NEW; 933 925 if (color_gray(object) && get_object(object)) 934 926 list_add_tail(&object->gray_list, &gray_list); 935 927 ··· 993 983 * kmemleak objects cannot be freed from outside the loop because their 994 984 * use_count was increased. 995 985 */ 986 + repeat: 996 987 object = list_entry(gray_list.next, typeof(*object), gray_list); 997 988 while (&object->gray_list != &gray_list) { 998 989 cond_resched(); ··· 1011 1000 1012 1001 object = tmp; 1013 1002 } 1003 + 1004 + if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES) 1005 + goto scan_end; 1006 + 1007 + /* 1008 + * Check for new objects allocated during this scanning and add them 1009 + * to the gray list. 1010 + */ 1011 + rcu_read_lock(); 1012 + list_for_each_entry_rcu(object, &object_list, object_list) { 1013 + spin_lock_irqsave(&object->lock, flags); 1014 + if ((object->flags & OBJECT_NEW) && !color_black(object) && 1015 + get_object(object)) { 1016 + object->flags &= ~OBJECT_NEW; 1017 + list_add_tail(&object->gray_list, &gray_list); 1018 + } 1019 + spin_unlock_irqrestore(&object->lock, flags); 1020 + } 1021 + rcu_read_unlock(); 1022 + 1023 + if (!list_empty(&gray_list)) 1024 + goto repeat; 1025 + 1026 + scan_end: 1014 1027 WARN_ON(!list_empty(&gray_list)); 1015 1028 1016 1029 /* 1017 - * If scanning was stopped do not report any new unreferenced objects. 1030 + * If scanning was stopped or new objects were being allocated at a 1031 + * higher rate than gray list scanning, do not report any new 1032 + * unreferenced objects. 1018 1033 */ 1019 - if (scan_should_stop()) 1034 + if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES) 1020 1035 return; 1021 1036 1022 1037 /*