Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
16 * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
17 * accesses to the object_tree_root. The object_list is the main list
18 * holding the metadata (struct kmemleak_object) for the allocated memory
19 * blocks. The object_tree_root is a red black tree used to look-up
20 * metadata based on a pointer to the corresponding memory block. The
21 * kmemleak_object structures are added to the object_list and
22 * object_tree_root in the create_object() function called from the
23 * kmemleak_alloc() callback and removed in delete_object() called from the
24 * kmemleak_free() callback
25 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
26 * Accesses to the metadata (e.g. count) are protected by this lock. Note
27 * that some members of this structure may be protected by other means
28 * (atomic or kmemleak_lock). This lock is also held when scanning the
29 * corresponding memory block to avoid the kernel freeing it via the
30 * kmemleak_free() callback. This is less heavyweight than holding a global
31 * lock like kmemleak_lock during scanning.
32 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
33 * unreferenced objects at a time. The gray_list contains the objects which
34 * are already referenced or marked as false positives and need to be
35 * scanned. This list is only modified during a scanning episode when the
36 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
37 * Note that the kmemleak_object.use_count is incremented when an object is
38 * added to the gray_list and therefore cannot be freed. This mutex also
39 * prevents multiple users of the "kmemleak" debugfs file together with
40 * modifications to the memory scanning parameters including the scan_thread
41 * pointer
42 *
43 * Locks and mutexes are acquired/nested in the following order:
44 *
45 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
46 *
47 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
48 * regions.
49 *
50 * The kmemleak_object structures have a use_count incremented or decremented
51 * using the get_object()/put_object() functions. When the use_count becomes
52 * 0, this count can no longer be incremented and put_object() schedules the
53 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
54 * function must be protected by rcu_read_lock() to avoid accessing a freed
55 * structure.
56 */
57
58#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59
60#include <linux/init.h>
61#include <linux/kernel.h>
62#include <linux/list.h>
63#include <linux/sched/signal.h>
64#include <linux/sched/task.h>
65#include <linux/sched/task_stack.h>
66#include <linux/jiffies.h>
67#include <linux/delay.h>
68#include <linux/export.h>
69#include <linux/kthread.h>
70#include <linux/rbtree.h>
71#include <linux/fs.h>
72#include <linux/debugfs.h>
73#include <linux/seq_file.h>
74#include <linux/cpumask.h>
75#include <linux/spinlock.h>
76#include <linux/module.h>
77#include <linux/mutex.h>
78#include <linux/rcupdate.h>
79#include <linux/stacktrace.h>
80#include <linux/cache.h>
81#include <linux/percpu.h>
82#include <linux/memblock.h>
83#include <linux/pfn.h>
84#include <linux/mmzone.h>
85#include <linux/slab.h>
86#include <linux/thread_info.h>
87#include <linux/err.h>
88#include <linux/uaccess.h>
89#include <linux/string.h>
90#include <linux/nodemask.h>
91#include <linux/mm.h>
92#include <linux/workqueue.h>
93#include <linux/crc32.h>
94
95#include <asm/sections.h>
96#include <asm/processor.h>
97#include <linux/atomic.h>
98
99#include <linux/kasan.h>
100#include <linux/kfence.h>
101#include <linux/kmemleak.h>
102#include <linux/memory_hotplug.h>
103
104/*
105 * Kmemleak configuration and common defines.
106 */
107#define MAX_TRACE 16 /* stack trace length */
108#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
109#define SECS_FIRST_SCAN 60 /* delay before the first scan */
110#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
111#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
112
113#define BYTES_PER_POINTER sizeof(void *)
114
115/* GFP bitmask for kmemleak internal allocations */
116#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
117 __GFP_NOLOCKDEP)) | \
118 __GFP_NORETRY | __GFP_NOMEMALLOC | \
119 __GFP_NOWARN)
120
121/* scanning area inside a memory block */
122struct kmemleak_scan_area {
123 struct hlist_node node;
124 unsigned long start;
125 size_t size;
126};
127
128#define KMEMLEAK_GREY 0
129#define KMEMLEAK_BLACK -1
130
131/*
132 * Structure holding the metadata for each allocated memory block.
133 * Modifications to such objects should be made while holding the
134 * object->lock. Insertions or deletions from object_list, gray_list or
135 * rb_node are already protected by the corresponding locks or mutex (see
136 * the notes on locking above). These objects are reference-counted
137 * (use_count) and freed using the RCU mechanism.
138 */
139struct kmemleak_object {
140 raw_spinlock_t lock;
141 unsigned int flags; /* object status flags */
142 struct list_head object_list;
143 struct list_head gray_list;
144 struct rb_node rb_node;
145 struct rcu_head rcu; /* object_list lockless traversal */
146 /* object usage count; object freed when use_count == 0 */
147 atomic_t use_count;
148 unsigned long pointer;
149 size_t size;
150 /* pass surplus references to this pointer */
151 unsigned long excess_ref;
152 /* minimum number of a pointers found before it is considered leak */
153 int min_count;
154 /* the total number of pointers found pointing to this object */
155 int count;
156 /* checksum for detecting modified objects */
157 u32 checksum;
158 /* memory ranges to be scanned inside an object (empty for all) */
159 struct hlist_head area_list;
160 unsigned long trace[MAX_TRACE];
161 unsigned int trace_len;
162 unsigned long jiffies; /* creation timestamp */
163 pid_t pid; /* pid of the current task */
164 char comm[TASK_COMM_LEN]; /* executable name */
165};
166
167/* flag representing the memory block allocation status */
168#define OBJECT_ALLOCATED (1 << 0)
169/* flag set after the first reporting of an unreference object */
170#define OBJECT_REPORTED (1 << 1)
171/* flag set to not scan the object */
172#define OBJECT_NO_SCAN (1 << 2)
173/* flag set to fully scan the object when scan_area allocation failed */
174#define OBJECT_FULL_SCAN (1 << 3)
175
176#define HEX_PREFIX " "
177/* number of bytes to print per line; must be 16 or 32 */
178#define HEX_ROW_SIZE 16
179/* number of bytes to print at a time (1, 2, 4, 8) */
180#define HEX_GROUP_SIZE 1
181/* include ASCII after the hex output */
182#define HEX_ASCII 1
183/* max number of lines to be printed */
184#define HEX_MAX_LINES 2
185
186/* the list of all allocated objects */
187static LIST_HEAD(object_list);
188/* the list of gray-colored objects (see color_gray comment below) */
189static LIST_HEAD(gray_list);
190/* memory pool allocation */
191static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
192static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
193static LIST_HEAD(mem_pool_free_list);
194/* search tree for object boundaries */
195static struct rb_root object_tree_root = RB_ROOT;
196/* protecting the access to object_list and object_tree_root */
197static DEFINE_RAW_SPINLOCK(kmemleak_lock);
198
199/* allocation caches for kmemleak internal data */
200static struct kmem_cache *object_cache;
201static struct kmem_cache *scan_area_cache;
202
203/* set if tracing memory operations is enabled */
204static int kmemleak_enabled = 1;
205/* same as above but only for the kmemleak_free() callback */
206static int kmemleak_free_enabled = 1;
207/* set in the late_initcall if there were no errors */
208static int kmemleak_initialized;
209/* set if a kmemleak warning was issued */
210static int kmemleak_warning;
211/* set if a fatal kmemleak error has occurred */
212static int kmemleak_error;
213
214/* minimum and maximum address that may be valid pointers */
215static unsigned long min_addr = ULONG_MAX;
216static unsigned long max_addr;
217
218static struct task_struct *scan_thread;
219/* used to avoid reporting of recently allocated objects */
220static unsigned long jiffies_min_age;
221static unsigned long jiffies_last_scan;
222/* delay between automatic memory scannings */
223static unsigned long jiffies_scan_wait;
224/* enables or disables the task stacks scanning */
225static int kmemleak_stack_scan = 1;
226/* protects the memory scanning, parameters and debug/kmemleak file access */
227static DEFINE_MUTEX(scan_mutex);
228/* setting kmemleak=on, will set this var, skipping the disable */
229static int kmemleak_skip_disable;
230/* If there are leaks that can be reported */
231static bool kmemleak_found_leaks;
232
233static bool kmemleak_verbose;
234module_param_named(verbose, kmemleak_verbose, bool, 0600);
235
236static void kmemleak_disable(void);
237
238/*
239 * Print a warning and dump the stack trace.
240 */
241#define kmemleak_warn(x...) do { \
242 pr_warn(x); \
243 dump_stack(); \
244 kmemleak_warning = 1; \
245} while (0)
246
247/*
248 * Macro invoked when a serious kmemleak condition occurred and cannot be
249 * recovered from. Kmemleak will be disabled and further allocation/freeing
250 * tracing no longer available.
251 */
252#define kmemleak_stop(x...) do { \
253 kmemleak_warn(x); \
254 kmemleak_disable(); \
255} while (0)
256
257#define warn_or_seq_printf(seq, fmt, ...) do { \
258 if (seq) \
259 seq_printf(seq, fmt, ##__VA_ARGS__); \
260 else \
261 pr_warn(fmt, ##__VA_ARGS__); \
262} while (0)
263
264static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
265 int rowsize, int groupsize, const void *buf,
266 size_t len, bool ascii)
267{
268 if (seq)
269 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
270 buf, len, ascii);
271 else
272 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
273 rowsize, groupsize, buf, len, ascii);
274}
275
276/*
277 * Printing of the objects hex dump to the seq file. The number of lines to be
278 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
279 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
280 * with the object->lock held.
281 */
282static void hex_dump_object(struct seq_file *seq,
283 struct kmemleak_object *object)
284{
285 const u8 *ptr = (const u8 *)object->pointer;
286 size_t len;
287
288 /* limit the number of lines to HEX_MAX_LINES */
289 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
290
291 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
292 kasan_disable_current();
293 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
294 HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
295 kasan_enable_current();
296}
297
298/*
299 * Object colors, encoded with count and min_count:
300 * - white - orphan object, not enough references to it (count < min_count)
301 * - gray - not orphan, not marked as false positive (min_count == 0) or
302 * sufficient references to it (count >= min_count)
303 * - black - ignore, it doesn't contain references (e.g. text section)
304 * (min_count == -1). No function defined for this color.
305 * Newly created objects don't have any color assigned (object->count == -1)
306 * before the next memory scan when they become white.
307 */
308static bool color_white(const struct kmemleak_object *object)
309{
310 return object->count != KMEMLEAK_BLACK &&
311 object->count < object->min_count;
312}
313
314static bool color_gray(const struct kmemleak_object *object)
315{
316 return object->min_count != KMEMLEAK_BLACK &&
317 object->count >= object->min_count;
318}
319
320/*
321 * Objects are considered unreferenced only if their color is white, they have
322 * not be deleted and have a minimum age to avoid false positives caused by
323 * pointers temporarily stored in CPU registers.
324 */
325static bool unreferenced_object(struct kmemleak_object *object)
326{
327 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
328 time_before_eq(object->jiffies + jiffies_min_age,
329 jiffies_last_scan);
330}
331
332/*
333 * Printing of the unreferenced objects information to the seq file. The
334 * print_unreferenced function must be called with the object->lock held.
335 */
336static void print_unreferenced(struct seq_file *seq,
337 struct kmemleak_object *object)
338{
339 int i;
340 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
341
342 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
343 object->pointer, object->size);
344 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
345 object->comm, object->pid, object->jiffies,
346 msecs_age / 1000, msecs_age % 1000);
347 hex_dump_object(seq, object);
348 warn_or_seq_printf(seq, " backtrace:\n");
349
350 for (i = 0; i < object->trace_len; i++) {
351 void *ptr = (void *)object->trace[i];
352 warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
353 }
354}
355
356/*
357 * Print the kmemleak_object information. This function is used mainly for
358 * debugging special cases when kmemleak operations. It must be called with
359 * the object->lock held.
360 */
361static void dump_object_info(struct kmemleak_object *object)
362{
363 pr_notice("Object 0x%08lx (size %zu):\n",
364 object->pointer, object->size);
365 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
366 object->comm, object->pid, object->jiffies);
367 pr_notice(" min_count = %d\n", object->min_count);
368 pr_notice(" count = %d\n", object->count);
369 pr_notice(" flags = 0x%x\n", object->flags);
370 pr_notice(" checksum = %u\n", object->checksum);
371 pr_notice(" backtrace:\n");
372 stack_trace_print(object->trace, object->trace_len, 4);
373}
374
375/*
376 * Look-up a memory block metadata (kmemleak_object) in the object search
377 * tree based on a pointer value. If alias is 0, only values pointing to the
378 * beginning of the memory block are allowed. The kmemleak_lock must be held
379 * when calling this function.
380 */
381static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
382{
383 struct rb_node *rb = object_tree_root.rb_node;
384 unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
385
386 while (rb) {
387 struct kmemleak_object *object;
388 unsigned long untagged_objp;
389
390 object = rb_entry(rb, struct kmemleak_object, rb_node);
391 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
392
393 if (untagged_ptr < untagged_objp)
394 rb = object->rb_node.rb_left;
395 else if (untagged_objp + object->size <= untagged_ptr)
396 rb = object->rb_node.rb_right;
397 else if (untagged_objp == untagged_ptr || alias)
398 return object;
399 else {
400 kmemleak_warn("Found object by alias at 0x%08lx\n",
401 ptr);
402 dump_object_info(object);
403 break;
404 }
405 }
406 return NULL;
407}
408
409/*
410 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
411 * that once an object's use_count reached 0, the RCU freeing was already
412 * registered and the object should no longer be used. This function must be
413 * called under the protection of rcu_read_lock().
414 */
415static int get_object(struct kmemleak_object *object)
416{
417 return atomic_inc_not_zero(&object->use_count);
418}
419
420/*
421 * Memory pool allocation and freeing. kmemleak_lock must not be held.
422 */
423static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
424{
425 unsigned long flags;
426 struct kmemleak_object *object;
427
428 /* try the slab allocator first */
429 if (object_cache) {
430 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
431 if (object)
432 return object;
433 }
434
435 /* slab allocation failed, try the memory pool */
436 raw_spin_lock_irqsave(&kmemleak_lock, flags);
437 object = list_first_entry_or_null(&mem_pool_free_list,
438 typeof(*object), object_list);
439 if (object)
440 list_del(&object->object_list);
441 else if (mem_pool_free_count)
442 object = &mem_pool[--mem_pool_free_count];
443 else
444 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
445 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
446
447 return object;
448}
449
450/*
451 * Return the object to either the slab allocator or the memory pool.
452 */
453static void mem_pool_free(struct kmemleak_object *object)
454{
455 unsigned long flags;
456
457 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
458 kmem_cache_free(object_cache, object);
459 return;
460 }
461
462 /* add the object to the memory pool free list */
463 raw_spin_lock_irqsave(&kmemleak_lock, flags);
464 list_add(&object->object_list, &mem_pool_free_list);
465 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
466}
467
468/*
469 * RCU callback to free a kmemleak_object.
470 */
471static void free_object_rcu(struct rcu_head *rcu)
472{
473 struct hlist_node *tmp;
474 struct kmemleak_scan_area *area;
475 struct kmemleak_object *object =
476 container_of(rcu, struct kmemleak_object, rcu);
477
478 /*
479 * Once use_count is 0 (guaranteed by put_object), there is no other
480 * code accessing this object, hence no need for locking.
481 */
482 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
483 hlist_del(&area->node);
484 kmem_cache_free(scan_area_cache, area);
485 }
486 mem_pool_free(object);
487}
488
489/*
490 * Decrement the object use_count. Once the count is 0, free the object using
491 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
492 * delete_object() path, the delayed RCU freeing ensures that there is no
493 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
494 * is also possible.
495 */
496static void put_object(struct kmemleak_object *object)
497{
498 if (!atomic_dec_and_test(&object->use_count))
499 return;
500
501 /* should only get here after delete_object was called */
502 WARN_ON(object->flags & OBJECT_ALLOCATED);
503
504 /*
505 * It may be too early for the RCU callbacks, however, there is no
506 * concurrent object_list traversal when !object_cache and all objects
507 * came from the memory pool. Free the object directly.
508 */
509 if (object_cache)
510 call_rcu(&object->rcu, free_object_rcu);
511 else
512 free_object_rcu(&object->rcu);
513}
514
515/*
516 * Look up an object in the object search tree and increase its use_count.
517 */
518static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
519{
520 unsigned long flags;
521 struct kmemleak_object *object;
522
523 rcu_read_lock();
524 raw_spin_lock_irqsave(&kmemleak_lock, flags);
525 object = lookup_object(ptr, alias);
526 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
527
528 /* check whether the object is still available */
529 if (object && !get_object(object))
530 object = NULL;
531 rcu_read_unlock();
532
533 return object;
534}
535
536/*
537 * Remove an object from the object_tree_root and object_list. Must be called
538 * with the kmemleak_lock held _if_ kmemleak is still enabled.
539 */
540static void __remove_object(struct kmemleak_object *object)
541{
542 rb_erase(&object->rb_node, &object_tree_root);
543 list_del_rcu(&object->object_list);
544}
545
546/*
547 * Look up an object in the object search tree and remove it from both
548 * object_tree_root and object_list. The returned object's use_count should be
549 * at least 1, as initially set by create_object().
550 */
551static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
552{
553 unsigned long flags;
554 struct kmemleak_object *object;
555
556 raw_spin_lock_irqsave(&kmemleak_lock, flags);
557 object = lookup_object(ptr, alias);
558 if (object)
559 __remove_object(object);
560 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
561
562 return object;
563}
564
565/*
566 * Save stack trace to the given array of MAX_TRACE size.
567 */
568static int __save_stack_trace(unsigned long *trace)
569{
570 return stack_trace_save(trace, MAX_TRACE, 2);
571}
572
573/*
574 * Create the metadata (struct kmemleak_object) corresponding to an allocated
575 * memory block and add it to the object_list and object_tree_root.
576 */
577static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
578 int min_count, gfp_t gfp)
579{
580 unsigned long flags;
581 struct kmemleak_object *object, *parent;
582 struct rb_node **link, *rb_parent;
583 unsigned long untagged_ptr;
584 unsigned long untagged_objp;
585
586 object = mem_pool_alloc(gfp);
587 if (!object) {
588 pr_warn("Cannot allocate a kmemleak_object structure\n");
589 kmemleak_disable();
590 return NULL;
591 }
592
593 INIT_LIST_HEAD(&object->object_list);
594 INIT_LIST_HEAD(&object->gray_list);
595 INIT_HLIST_HEAD(&object->area_list);
596 raw_spin_lock_init(&object->lock);
597 atomic_set(&object->use_count, 1);
598 object->flags = OBJECT_ALLOCATED;
599 object->pointer = ptr;
600 object->size = kfence_ksize((void *)ptr) ?: size;
601 object->excess_ref = 0;
602 object->min_count = min_count;
603 object->count = 0; /* white color initially */
604 object->jiffies = jiffies;
605 object->checksum = 0;
606
607 /* task information */
608 if (in_hardirq()) {
609 object->pid = 0;
610 strncpy(object->comm, "hardirq", sizeof(object->comm));
611 } else if (in_serving_softirq()) {
612 object->pid = 0;
613 strncpy(object->comm, "softirq", sizeof(object->comm));
614 } else {
615 object->pid = current->pid;
616 /*
617 * There is a small chance of a race with set_task_comm(),
618 * however using get_task_comm() here may cause locking
619 * dependency issues with current->alloc_lock. In the worst
620 * case, the command line is not correct.
621 */
622 strncpy(object->comm, current->comm, sizeof(object->comm));
623 }
624
625 /* kernel backtrace */
626 object->trace_len = __save_stack_trace(object->trace);
627
628 raw_spin_lock_irqsave(&kmemleak_lock, flags);
629
630 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
631 min_addr = min(min_addr, untagged_ptr);
632 max_addr = max(max_addr, untagged_ptr + size);
633 link = &object_tree_root.rb_node;
634 rb_parent = NULL;
635 while (*link) {
636 rb_parent = *link;
637 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
638 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
639 if (untagged_ptr + size <= untagged_objp)
640 link = &parent->rb_node.rb_left;
641 else if (untagged_objp + parent->size <= untagged_ptr)
642 link = &parent->rb_node.rb_right;
643 else {
644 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
645 ptr);
646 /*
647 * No need for parent->lock here since "parent" cannot
648 * be freed while the kmemleak_lock is held.
649 */
650 dump_object_info(parent);
651 kmem_cache_free(object_cache, object);
652 object = NULL;
653 goto out;
654 }
655 }
656 rb_link_node(&object->rb_node, rb_parent, link);
657 rb_insert_color(&object->rb_node, &object_tree_root);
658
659 list_add_tail_rcu(&object->object_list, &object_list);
660out:
661 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
662 return object;
663}
664
665/*
666 * Mark the object as not allocated and schedule RCU freeing via put_object().
667 */
668static void __delete_object(struct kmemleak_object *object)
669{
670 unsigned long flags;
671
672 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
673 WARN_ON(atomic_read(&object->use_count) < 1);
674
675 /*
676 * Locking here also ensures that the corresponding memory block
677 * cannot be freed when it is being scanned.
678 */
679 raw_spin_lock_irqsave(&object->lock, flags);
680 object->flags &= ~OBJECT_ALLOCATED;
681 raw_spin_unlock_irqrestore(&object->lock, flags);
682 put_object(object);
683}
684
685/*
686 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
687 * delete it.
688 */
689static void delete_object_full(unsigned long ptr)
690{
691 struct kmemleak_object *object;
692
693 object = find_and_remove_object(ptr, 0);
694 if (!object) {
695#ifdef DEBUG
696 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
697 ptr);
698#endif
699 return;
700 }
701 __delete_object(object);
702}
703
704/*
705 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
706 * delete it. If the memory block is partially freed, the function may create
707 * additional metadata for the remaining parts of the block.
708 */
709static void delete_object_part(unsigned long ptr, size_t size)
710{
711 struct kmemleak_object *object;
712 unsigned long start, end;
713
714 object = find_and_remove_object(ptr, 1);
715 if (!object) {
716#ifdef DEBUG
717 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
718 ptr, size);
719#endif
720 return;
721 }
722
723 /*
724 * Create one or two objects that may result from the memory block
725 * split. Note that partial freeing is only done by free_bootmem() and
726 * this happens before kmemleak_init() is called.
727 */
728 start = object->pointer;
729 end = object->pointer + object->size;
730 if (ptr > start)
731 create_object(start, ptr - start, object->min_count,
732 GFP_KERNEL);
733 if (ptr + size < end)
734 create_object(ptr + size, end - ptr - size, object->min_count,
735 GFP_KERNEL);
736
737 __delete_object(object);
738}
739
740static void __paint_it(struct kmemleak_object *object, int color)
741{
742 object->min_count = color;
743 if (color == KMEMLEAK_BLACK)
744 object->flags |= OBJECT_NO_SCAN;
745}
746
747static void paint_it(struct kmemleak_object *object, int color)
748{
749 unsigned long flags;
750
751 raw_spin_lock_irqsave(&object->lock, flags);
752 __paint_it(object, color);
753 raw_spin_unlock_irqrestore(&object->lock, flags);
754}
755
756static void paint_ptr(unsigned long ptr, int color)
757{
758 struct kmemleak_object *object;
759
760 object = find_and_get_object(ptr, 0);
761 if (!object) {
762 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
763 ptr,
764 (color == KMEMLEAK_GREY) ? "Grey" :
765 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
766 return;
767 }
768 paint_it(object, color);
769 put_object(object);
770}
771
772/*
773 * Mark an object permanently as gray-colored so that it can no longer be
774 * reported as a leak. This is used in general to mark a false positive.
775 */
776static void make_gray_object(unsigned long ptr)
777{
778 paint_ptr(ptr, KMEMLEAK_GREY);
779}
780
781/*
782 * Mark the object as black-colored so that it is ignored from scans and
783 * reporting.
784 */
785static void make_black_object(unsigned long ptr)
786{
787 paint_ptr(ptr, KMEMLEAK_BLACK);
788}
789
790/*
791 * Add a scanning area to the object. If at least one such area is added,
792 * kmemleak will only scan these ranges rather than the whole memory block.
793 */
794static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
795{
796 unsigned long flags;
797 struct kmemleak_object *object;
798 struct kmemleak_scan_area *area = NULL;
799
800 object = find_and_get_object(ptr, 1);
801 if (!object) {
802 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
803 ptr);
804 return;
805 }
806
807 if (scan_area_cache)
808 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
809
810 raw_spin_lock_irqsave(&object->lock, flags);
811 if (!area) {
812 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
813 /* mark the object for full scan to avoid false positives */
814 object->flags |= OBJECT_FULL_SCAN;
815 goto out_unlock;
816 }
817 if (size == SIZE_MAX) {
818 size = object->pointer + object->size - ptr;
819 } else if (ptr + size > object->pointer + object->size) {
820 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
821 dump_object_info(object);
822 kmem_cache_free(scan_area_cache, area);
823 goto out_unlock;
824 }
825
826 INIT_HLIST_NODE(&area->node);
827 area->start = ptr;
828 area->size = size;
829
830 hlist_add_head(&area->node, &object->area_list);
831out_unlock:
832 raw_spin_unlock_irqrestore(&object->lock, flags);
833 put_object(object);
834}
835
836/*
837 * Any surplus references (object already gray) to 'ptr' are passed to
838 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
839 * vm_struct may be used as an alternative reference to the vmalloc'ed object
840 * (see free_thread_stack()).
841 */
842static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
843{
844 unsigned long flags;
845 struct kmemleak_object *object;
846
847 object = find_and_get_object(ptr, 0);
848 if (!object) {
849 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
850 ptr);
851 return;
852 }
853
854 raw_spin_lock_irqsave(&object->lock, flags);
855 object->excess_ref = excess_ref;
856 raw_spin_unlock_irqrestore(&object->lock, flags);
857 put_object(object);
858}
859
860/*
861 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
862 * pointer. Such object will not be scanned by kmemleak but references to it
863 * are searched.
864 */
865static void object_no_scan(unsigned long ptr)
866{
867 unsigned long flags;
868 struct kmemleak_object *object;
869
870 object = find_and_get_object(ptr, 0);
871 if (!object) {
872 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
873 return;
874 }
875
876 raw_spin_lock_irqsave(&object->lock, flags);
877 object->flags |= OBJECT_NO_SCAN;
878 raw_spin_unlock_irqrestore(&object->lock, flags);
879 put_object(object);
880}
881
882/**
883 * kmemleak_alloc - register a newly allocated object
884 * @ptr: pointer to beginning of the object
885 * @size: size of the object
886 * @min_count: minimum number of references to this object. If during memory
887 * scanning a number of references less than @min_count is found,
888 * the object is reported as a memory leak. If @min_count is 0,
889 * the object is never reported as a leak. If @min_count is -1,
890 * the object is ignored (not scanned and not reported as a leak)
891 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
892 *
893 * This function is called from the kernel allocators when a new object
894 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
895 */
896void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
897 gfp_t gfp)
898{
899 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
900
901 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
902 create_object((unsigned long)ptr, size, min_count, gfp);
903}
904EXPORT_SYMBOL_GPL(kmemleak_alloc);
905
906/**
907 * kmemleak_alloc_percpu - register a newly allocated __percpu object
908 * @ptr: __percpu pointer to beginning of the object
909 * @size: size of the object
910 * @gfp: flags used for kmemleak internal memory allocations
911 *
912 * This function is called from the kernel percpu allocator when a new object
913 * (memory block) is allocated (alloc_percpu).
914 */
915void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
916 gfp_t gfp)
917{
918 unsigned int cpu;
919
920 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
921
922 /*
923 * Percpu allocations are only scanned and not reported as leaks
924 * (min_count is set to 0).
925 */
926 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
927 for_each_possible_cpu(cpu)
928 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
929 size, 0, gfp);
930}
931EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
932
933/**
934 * kmemleak_vmalloc - register a newly vmalloc'ed object
935 * @area: pointer to vm_struct
936 * @size: size of the object
937 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
938 *
939 * This function is called from the vmalloc() kernel allocator when a new
940 * object (memory block) is allocated.
941 */
942void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
943{
944 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
945
946 /*
947 * A min_count = 2 is needed because vm_struct contains a reference to
948 * the virtual address of the vmalloc'ed block.
949 */
950 if (kmemleak_enabled) {
951 create_object((unsigned long)area->addr, size, 2, gfp);
952 object_set_excess_ref((unsigned long)area,
953 (unsigned long)area->addr);
954 }
955}
956EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
957
958/**
959 * kmemleak_free - unregister a previously registered object
960 * @ptr: pointer to beginning of the object
961 *
962 * This function is called from the kernel allocators when an object (memory
963 * block) is freed (kmem_cache_free, kfree, vfree etc.).
964 */
965void __ref kmemleak_free(const void *ptr)
966{
967 pr_debug("%s(0x%p)\n", __func__, ptr);
968
969 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
970 delete_object_full((unsigned long)ptr);
971}
972EXPORT_SYMBOL_GPL(kmemleak_free);
973
974/**
975 * kmemleak_free_part - partially unregister a previously registered object
976 * @ptr: pointer to the beginning or inside the object. This also
977 * represents the start of the range to be freed
978 * @size: size to be unregistered
979 *
980 * This function is called when only a part of a memory block is freed
981 * (usually from the bootmem allocator).
982 */
983void __ref kmemleak_free_part(const void *ptr, size_t size)
984{
985 pr_debug("%s(0x%p)\n", __func__, ptr);
986
987 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
988 delete_object_part((unsigned long)ptr, size);
989}
990EXPORT_SYMBOL_GPL(kmemleak_free_part);
991
992/**
993 * kmemleak_free_percpu - unregister a previously registered __percpu object
994 * @ptr: __percpu pointer to beginning of the object
995 *
996 * This function is called from the kernel percpu allocator when an object
997 * (memory block) is freed (free_percpu).
998 */
999void __ref kmemleak_free_percpu(const void __percpu *ptr)
1000{
1001 unsigned int cpu;
1002
1003 pr_debug("%s(0x%p)\n", __func__, ptr);
1004
1005 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1006 for_each_possible_cpu(cpu)
1007 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1008 cpu));
1009}
1010EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1011
1012/**
1013 * kmemleak_update_trace - update object allocation stack trace
1014 * @ptr: pointer to beginning of the object
1015 *
1016 * Override the object allocation stack trace for cases where the actual
1017 * allocation place is not always useful.
1018 */
1019void __ref kmemleak_update_trace(const void *ptr)
1020{
1021 struct kmemleak_object *object;
1022 unsigned long flags;
1023
1024 pr_debug("%s(0x%p)\n", __func__, ptr);
1025
1026 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1027 return;
1028
1029 object = find_and_get_object((unsigned long)ptr, 1);
1030 if (!object) {
1031#ifdef DEBUG
1032 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1033 ptr);
1034#endif
1035 return;
1036 }
1037
1038 raw_spin_lock_irqsave(&object->lock, flags);
1039 object->trace_len = __save_stack_trace(object->trace);
1040 raw_spin_unlock_irqrestore(&object->lock, flags);
1041
1042 put_object(object);
1043}
1044EXPORT_SYMBOL(kmemleak_update_trace);
1045
1046/**
1047 * kmemleak_not_leak - mark an allocated object as false positive
1048 * @ptr: pointer to beginning of the object
1049 *
1050 * Calling this function on an object will cause the memory block to no longer
1051 * be reported as leak and always be scanned.
1052 */
1053void __ref kmemleak_not_leak(const void *ptr)
1054{
1055 pr_debug("%s(0x%p)\n", __func__, ptr);
1056
1057 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1058 make_gray_object((unsigned long)ptr);
1059}
1060EXPORT_SYMBOL(kmemleak_not_leak);
1061
1062/**
1063 * kmemleak_ignore - ignore an allocated object
1064 * @ptr: pointer to beginning of the object
1065 *
1066 * Calling this function on an object will cause the memory block to be
1067 * ignored (not scanned and not reported as a leak). This is usually done when
1068 * it is known that the corresponding block is not a leak and does not contain
1069 * any references to other allocated memory blocks.
1070 */
1071void __ref kmemleak_ignore(const void *ptr)
1072{
1073 pr_debug("%s(0x%p)\n", __func__, ptr);
1074
1075 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1076 make_black_object((unsigned long)ptr);
1077}
1078EXPORT_SYMBOL(kmemleak_ignore);
1079
1080/**
1081 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1082 * @ptr: pointer to beginning or inside the object. This also
1083 * represents the start of the scan area
1084 * @size: size of the scan area
1085 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1086 *
1087 * This function is used when it is known that only certain parts of an object
1088 * contain references to other objects. Kmemleak will only scan these areas
1089 * reducing the number false negatives.
1090 */
1091void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1092{
1093 pr_debug("%s(0x%p)\n", __func__, ptr);
1094
1095 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1096 add_scan_area((unsigned long)ptr, size, gfp);
1097}
1098EXPORT_SYMBOL(kmemleak_scan_area);
1099
1100/**
1101 * kmemleak_no_scan - do not scan an allocated object
1102 * @ptr: pointer to beginning of the object
1103 *
1104 * This function notifies kmemleak not to scan the given memory block. Useful
1105 * in situations where it is known that the given object does not contain any
1106 * references to other objects. Kmemleak will not scan such objects reducing
1107 * the number of false negatives.
1108 */
1109void __ref kmemleak_no_scan(const void *ptr)
1110{
1111 pr_debug("%s(0x%p)\n", __func__, ptr);
1112
1113 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1114 object_no_scan((unsigned long)ptr);
1115}
1116EXPORT_SYMBOL(kmemleak_no_scan);
1117
1118/**
1119 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1120 * address argument
1121 * @phys: physical address of the object
1122 * @size: size of the object
1123 * @min_count: minimum number of references to this object.
1124 * See kmemleak_alloc()
1125 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1126 */
1127void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1128 gfp_t gfp)
1129{
1130 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1131 kmemleak_alloc(__va(phys), size, min_count, gfp);
1132}
1133EXPORT_SYMBOL(kmemleak_alloc_phys);
1134
1135/**
1136 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1137 * physical address argument
1138 * @phys: physical address if the beginning or inside an object. This
1139 * also represents the start of the range to be freed
1140 * @size: size to be unregistered
1141 */
1142void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1143{
1144 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1145 kmemleak_free_part(__va(phys), size);
1146}
1147EXPORT_SYMBOL(kmemleak_free_part_phys);
1148
1149/**
1150 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1151 * address argument
1152 * @phys: physical address of the object
1153 */
1154void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1155{
1156 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1157 kmemleak_not_leak(__va(phys));
1158}
1159EXPORT_SYMBOL(kmemleak_not_leak_phys);
1160
1161/**
1162 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1163 * address argument
1164 * @phys: physical address of the object
1165 */
1166void __ref kmemleak_ignore_phys(phys_addr_t phys)
1167{
1168 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1169 kmemleak_ignore(__va(phys));
1170}
1171EXPORT_SYMBOL(kmemleak_ignore_phys);
1172
1173/*
1174 * Update an object's checksum and return true if it was modified.
1175 */
1176static bool update_checksum(struct kmemleak_object *object)
1177{
1178 u32 old_csum = object->checksum;
1179
1180 kasan_disable_current();
1181 kcsan_disable_current();
1182 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1183 kasan_enable_current();
1184 kcsan_enable_current();
1185
1186 return object->checksum != old_csum;
1187}
1188
1189/*
1190 * Update an object's references. object->lock must be held by the caller.
1191 */
1192static void update_refs(struct kmemleak_object *object)
1193{
1194 if (!color_white(object)) {
1195 /* non-orphan, ignored or new */
1196 return;
1197 }
1198
1199 /*
1200 * Increase the object's reference count (number of pointers to the
1201 * memory block). If this count reaches the required minimum, the
1202 * object's color will become gray and it will be added to the
1203 * gray_list.
1204 */
1205 object->count++;
1206 if (color_gray(object)) {
1207 /* put_object() called when removing from gray_list */
1208 WARN_ON(!get_object(object));
1209 list_add_tail(&object->gray_list, &gray_list);
1210 }
1211}
1212
1213/*
1214 * Memory scanning is a long process and it needs to be interruptible. This
1215 * function checks whether such interrupt condition occurred.
1216 */
1217static int scan_should_stop(void)
1218{
1219 if (!kmemleak_enabled)
1220 return 1;
1221
1222 /*
1223 * This function may be called from either process or kthread context,
1224 * hence the need to check for both stop conditions.
1225 */
1226 if (current->mm)
1227 return signal_pending(current);
1228 else
1229 return kthread_should_stop();
1230
1231 return 0;
1232}
1233
1234/*
1235 * Scan a memory block (exclusive range) for valid pointers and add those
1236 * found to the gray list.
1237 */
1238static void scan_block(void *_start, void *_end,
1239 struct kmemleak_object *scanned)
1240{
1241 unsigned long *ptr;
1242 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1243 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1244 unsigned long flags;
1245 unsigned long untagged_ptr;
1246
1247 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1248 for (ptr = start; ptr < end; ptr++) {
1249 struct kmemleak_object *object;
1250 unsigned long pointer;
1251 unsigned long excess_ref;
1252
1253 if (scan_should_stop())
1254 break;
1255
1256 kasan_disable_current();
1257 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1258 kasan_enable_current();
1259
1260 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1261 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1262 continue;
1263
1264 /*
1265 * No need for get_object() here since we hold kmemleak_lock.
1266 * object->use_count cannot be dropped to 0 while the object
1267 * is still present in object_tree_root and object_list
1268 * (with updates protected by kmemleak_lock).
1269 */
1270 object = lookup_object(pointer, 1);
1271 if (!object)
1272 continue;
1273 if (object == scanned)
1274 /* self referenced, ignore */
1275 continue;
1276
1277 /*
1278 * Avoid the lockdep recursive warning on object->lock being
1279 * previously acquired in scan_object(). These locks are
1280 * enclosed by scan_mutex.
1281 */
1282 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1283 /* only pass surplus references (object already gray) */
1284 if (color_gray(object)) {
1285 excess_ref = object->excess_ref;
1286 /* no need for update_refs() if object already gray */
1287 } else {
1288 excess_ref = 0;
1289 update_refs(object);
1290 }
1291 raw_spin_unlock(&object->lock);
1292
1293 if (excess_ref) {
1294 object = lookup_object(excess_ref, 0);
1295 if (!object)
1296 continue;
1297 if (object == scanned)
1298 /* circular reference, ignore */
1299 continue;
1300 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1301 update_refs(object);
1302 raw_spin_unlock(&object->lock);
1303 }
1304 }
1305 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1306}
1307
1308/*
1309 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1310 */
1311#ifdef CONFIG_SMP
1312static void scan_large_block(void *start, void *end)
1313{
1314 void *next;
1315
1316 while (start < end) {
1317 next = min(start + MAX_SCAN_SIZE, end);
1318 scan_block(start, next, NULL);
1319 start = next;
1320 cond_resched();
1321 }
1322}
1323#endif
1324
1325/*
1326 * Scan a memory block corresponding to a kmemleak_object. A condition is
1327 * that object->use_count >= 1.
1328 */
1329static void scan_object(struct kmemleak_object *object)
1330{
1331 struct kmemleak_scan_area *area;
1332 unsigned long flags;
1333
1334 /*
1335 * Once the object->lock is acquired, the corresponding memory block
1336 * cannot be freed (the same lock is acquired in delete_object).
1337 */
1338 raw_spin_lock_irqsave(&object->lock, flags);
1339 if (object->flags & OBJECT_NO_SCAN)
1340 goto out;
1341 if (!(object->flags & OBJECT_ALLOCATED))
1342 /* already freed object */
1343 goto out;
1344 if (hlist_empty(&object->area_list) ||
1345 object->flags & OBJECT_FULL_SCAN) {
1346 void *start = (void *)object->pointer;
1347 void *end = (void *)(object->pointer + object->size);
1348 void *next;
1349
1350 do {
1351 next = min(start + MAX_SCAN_SIZE, end);
1352 scan_block(start, next, object);
1353
1354 start = next;
1355 if (start >= end)
1356 break;
1357
1358 raw_spin_unlock_irqrestore(&object->lock, flags);
1359 cond_resched();
1360 raw_spin_lock_irqsave(&object->lock, flags);
1361 } while (object->flags & OBJECT_ALLOCATED);
1362 } else
1363 hlist_for_each_entry(area, &object->area_list, node)
1364 scan_block((void *)area->start,
1365 (void *)(area->start + area->size),
1366 object);
1367out:
1368 raw_spin_unlock_irqrestore(&object->lock, flags);
1369}
1370
1371/*
1372 * Scan the objects already referenced (gray objects). More objects will be
1373 * referenced and, if there are no memory leaks, all the objects are scanned.
1374 */
1375static void scan_gray_list(void)
1376{
1377 struct kmemleak_object *object, *tmp;
1378
1379 /*
1380 * The list traversal is safe for both tail additions and removals
1381 * from inside the loop. The kmemleak objects cannot be freed from
1382 * outside the loop because their use_count was incremented.
1383 */
1384 object = list_entry(gray_list.next, typeof(*object), gray_list);
1385 while (&object->gray_list != &gray_list) {
1386 cond_resched();
1387
1388 /* may add new objects to the list */
1389 if (!scan_should_stop())
1390 scan_object(object);
1391
1392 tmp = list_entry(object->gray_list.next, typeof(*object),
1393 gray_list);
1394
1395 /* remove the object from the list and release it */
1396 list_del(&object->gray_list);
1397 put_object(object);
1398
1399 object = tmp;
1400 }
1401 WARN_ON(!list_empty(&gray_list));
1402}
1403
1404/*
1405 * Scan data sections and all the referenced memory blocks allocated via the
1406 * kernel's standard allocators. This function must be called with the
1407 * scan_mutex held.
1408 */
1409static void kmemleak_scan(void)
1410{
1411 unsigned long flags;
1412 struct kmemleak_object *object;
1413 struct zone *zone;
1414 int __maybe_unused i;
1415 int new_leaks = 0;
1416
1417 jiffies_last_scan = jiffies;
1418
1419 /* prepare the kmemleak_object's */
1420 rcu_read_lock();
1421 list_for_each_entry_rcu(object, &object_list, object_list) {
1422 raw_spin_lock_irqsave(&object->lock, flags);
1423#ifdef DEBUG
1424 /*
1425 * With a few exceptions there should be a maximum of
1426 * 1 reference to any object at this point.
1427 */
1428 if (atomic_read(&object->use_count) > 1) {
1429 pr_debug("object->use_count = %d\n",
1430 atomic_read(&object->use_count));
1431 dump_object_info(object);
1432 }
1433#endif
1434 /* reset the reference count (whiten the object) */
1435 object->count = 0;
1436 if (color_gray(object) && get_object(object))
1437 list_add_tail(&object->gray_list, &gray_list);
1438
1439 raw_spin_unlock_irqrestore(&object->lock, flags);
1440 }
1441 rcu_read_unlock();
1442
1443#ifdef CONFIG_SMP
1444 /* per-cpu sections scanning */
1445 for_each_possible_cpu(i)
1446 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1447 __per_cpu_end + per_cpu_offset(i));
1448#endif
1449
1450 /*
1451 * Struct page scanning for each node.
1452 */
1453 get_online_mems();
1454 for_each_populated_zone(zone) {
1455 unsigned long start_pfn = zone->zone_start_pfn;
1456 unsigned long end_pfn = zone_end_pfn(zone);
1457 unsigned long pfn;
1458
1459 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1460 struct page *page = pfn_to_online_page(pfn);
1461
1462 if (!page)
1463 continue;
1464
1465 /* only scan pages belonging to this zone */
1466 if (page_zone(page) != zone)
1467 continue;
1468 /* only scan if page is in use */
1469 if (page_count(page) == 0)
1470 continue;
1471 scan_block(page, page + 1, NULL);
1472 if (!(pfn & 63))
1473 cond_resched();
1474 }
1475 }
1476 put_online_mems();
1477
1478 /*
1479 * Scanning the task stacks (may introduce false negatives).
1480 */
1481 if (kmemleak_stack_scan) {
1482 struct task_struct *p, *g;
1483
1484 rcu_read_lock();
1485 for_each_process_thread(g, p) {
1486 void *stack = try_get_task_stack(p);
1487 if (stack) {
1488 scan_block(stack, stack + THREAD_SIZE, NULL);
1489 put_task_stack(p);
1490 }
1491 }
1492 rcu_read_unlock();
1493 }
1494
1495 /*
1496 * Scan the objects already referenced from the sections scanned
1497 * above.
1498 */
1499 scan_gray_list();
1500
1501 /*
1502 * Check for new or unreferenced objects modified since the previous
1503 * scan and color them gray until the next scan.
1504 */
1505 rcu_read_lock();
1506 list_for_each_entry_rcu(object, &object_list, object_list) {
1507 raw_spin_lock_irqsave(&object->lock, flags);
1508 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1509 && update_checksum(object) && get_object(object)) {
1510 /* color it gray temporarily */
1511 object->count = object->min_count;
1512 list_add_tail(&object->gray_list, &gray_list);
1513 }
1514 raw_spin_unlock_irqrestore(&object->lock, flags);
1515 }
1516 rcu_read_unlock();
1517
1518 /*
1519 * Re-scan the gray list for modified unreferenced objects.
1520 */
1521 scan_gray_list();
1522
1523 /*
1524 * If scanning was stopped do not report any new unreferenced objects.
1525 */
1526 if (scan_should_stop())
1527 return;
1528
1529 /*
1530 * Scanning result reporting.
1531 */
1532 rcu_read_lock();
1533 list_for_each_entry_rcu(object, &object_list, object_list) {
1534 raw_spin_lock_irqsave(&object->lock, flags);
1535 if (unreferenced_object(object) &&
1536 !(object->flags & OBJECT_REPORTED)) {
1537 object->flags |= OBJECT_REPORTED;
1538
1539 if (kmemleak_verbose)
1540 print_unreferenced(NULL, object);
1541
1542 new_leaks++;
1543 }
1544 raw_spin_unlock_irqrestore(&object->lock, flags);
1545 }
1546 rcu_read_unlock();
1547
1548 if (new_leaks) {
1549 kmemleak_found_leaks = true;
1550
1551 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1552 new_leaks);
1553 }
1554
1555}
1556
1557/*
1558 * Thread function performing automatic memory scanning. Unreferenced objects
1559 * at the end of a memory scan are reported but only the first time.
1560 */
1561static int kmemleak_scan_thread(void *arg)
1562{
1563 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1564
1565 pr_info("Automatic memory scanning thread started\n");
1566 set_user_nice(current, 10);
1567
1568 /*
1569 * Wait before the first scan to allow the system to fully initialize.
1570 */
1571 if (first_run) {
1572 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1573 first_run = 0;
1574 while (timeout && !kthread_should_stop())
1575 timeout = schedule_timeout_interruptible(timeout);
1576 }
1577
1578 while (!kthread_should_stop()) {
1579 signed long timeout = READ_ONCE(jiffies_scan_wait);
1580
1581 mutex_lock(&scan_mutex);
1582 kmemleak_scan();
1583 mutex_unlock(&scan_mutex);
1584
1585 /* wait before the next scan */
1586 while (timeout && !kthread_should_stop())
1587 timeout = schedule_timeout_interruptible(timeout);
1588 }
1589
1590 pr_info("Automatic memory scanning thread ended\n");
1591
1592 return 0;
1593}
1594
1595/*
1596 * Start the automatic memory scanning thread. This function must be called
1597 * with the scan_mutex held.
1598 */
1599static void start_scan_thread(void)
1600{
1601 if (scan_thread)
1602 return;
1603 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1604 if (IS_ERR(scan_thread)) {
1605 pr_warn("Failed to create the scan thread\n");
1606 scan_thread = NULL;
1607 }
1608}
1609
1610/*
1611 * Stop the automatic memory scanning thread.
1612 */
1613static void stop_scan_thread(void)
1614{
1615 if (scan_thread) {
1616 kthread_stop(scan_thread);
1617 scan_thread = NULL;
1618 }
1619}
1620
1621/*
1622 * Iterate over the object_list and return the first valid object at or after
1623 * the required position with its use_count incremented. The function triggers
1624 * a memory scanning when the pos argument points to the first position.
1625 */
1626static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1627{
1628 struct kmemleak_object *object;
1629 loff_t n = *pos;
1630 int err;
1631
1632 err = mutex_lock_interruptible(&scan_mutex);
1633 if (err < 0)
1634 return ERR_PTR(err);
1635
1636 rcu_read_lock();
1637 list_for_each_entry_rcu(object, &object_list, object_list) {
1638 if (n-- > 0)
1639 continue;
1640 if (get_object(object))
1641 goto out;
1642 }
1643 object = NULL;
1644out:
1645 return object;
1646}
1647
1648/*
1649 * Return the next object in the object_list. The function decrements the
1650 * use_count of the previous object and increases that of the next one.
1651 */
1652static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1653{
1654 struct kmemleak_object *prev_obj = v;
1655 struct kmemleak_object *next_obj = NULL;
1656 struct kmemleak_object *obj = prev_obj;
1657
1658 ++(*pos);
1659
1660 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1661 if (get_object(obj)) {
1662 next_obj = obj;
1663 break;
1664 }
1665 }
1666
1667 put_object(prev_obj);
1668 return next_obj;
1669}
1670
1671/*
1672 * Decrement the use_count of the last object required, if any.
1673 */
1674static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1675{
1676 if (!IS_ERR(v)) {
1677 /*
1678 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1679 * waiting was interrupted, so only release it if !IS_ERR.
1680 */
1681 rcu_read_unlock();
1682 mutex_unlock(&scan_mutex);
1683 if (v)
1684 put_object(v);
1685 }
1686}
1687
1688/*
1689 * Print the information for an unreferenced object to the seq file.
1690 */
1691static int kmemleak_seq_show(struct seq_file *seq, void *v)
1692{
1693 struct kmemleak_object *object = v;
1694 unsigned long flags;
1695
1696 raw_spin_lock_irqsave(&object->lock, flags);
1697 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1698 print_unreferenced(seq, object);
1699 raw_spin_unlock_irqrestore(&object->lock, flags);
1700 return 0;
1701}
1702
1703static const struct seq_operations kmemleak_seq_ops = {
1704 .start = kmemleak_seq_start,
1705 .next = kmemleak_seq_next,
1706 .stop = kmemleak_seq_stop,
1707 .show = kmemleak_seq_show,
1708};
1709
1710static int kmemleak_open(struct inode *inode, struct file *file)
1711{
1712 return seq_open(file, &kmemleak_seq_ops);
1713}
1714
1715static int dump_str_object_info(const char *str)
1716{
1717 unsigned long flags;
1718 struct kmemleak_object *object;
1719 unsigned long addr;
1720
1721 if (kstrtoul(str, 0, &addr))
1722 return -EINVAL;
1723 object = find_and_get_object(addr, 0);
1724 if (!object) {
1725 pr_info("Unknown object at 0x%08lx\n", addr);
1726 return -EINVAL;
1727 }
1728
1729 raw_spin_lock_irqsave(&object->lock, flags);
1730 dump_object_info(object);
1731 raw_spin_unlock_irqrestore(&object->lock, flags);
1732
1733 put_object(object);
1734 return 0;
1735}
1736
1737/*
1738 * We use grey instead of black to ensure we can do future scans on the same
1739 * objects. If we did not do future scans these black objects could
1740 * potentially contain references to newly allocated objects in the future and
1741 * we'd end up with false positives.
1742 */
1743static void kmemleak_clear(void)
1744{
1745 struct kmemleak_object *object;
1746 unsigned long flags;
1747
1748 rcu_read_lock();
1749 list_for_each_entry_rcu(object, &object_list, object_list) {
1750 raw_spin_lock_irqsave(&object->lock, flags);
1751 if ((object->flags & OBJECT_REPORTED) &&
1752 unreferenced_object(object))
1753 __paint_it(object, KMEMLEAK_GREY);
1754 raw_spin_unlock_irqrestore(&object->lock, flags);
1755 }
1756 rcu_read_unlock();
1757
1758 kmemleak_found_leaks = false;
1759}
1760
1761static void __kmemleak_do_cleanup(void);
1762
1763/*
1764 * File write operation to configure kmemleak at run-time. The following
1765 * commands can be written to the /sys/kernel/debug/kmemleak file:
1766 * off - disable kmemleak (irreversible)
1767 * stack=on - enable the task stacks scanning
1768 * stack=off - disable the tasks stacks scanning
1769 * scan=on - start the automatic memory scanning thread
1770 * scan=off - stop the automatic memory scanning thread
1771 * scan=... - set the automatic memory scanning period in seconds (0 to
1772 * disable it)
1773 * scan - trigger a memory scan
1774 * clear - mark all current reported unreferenced kmemleak objects as
1775 * grey to ignore printing them, or free all kmemleak objects
1776 * if kmemleak has been disabled.
1777 * dump=... - dump information about the object found at the given address
1778 */
1779static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1780 size_t size, loff_t *ppos)
1781{
1782 char buf[64];
1783 int buf_size;
1784 int ret;
1785
1786 buf_size = min(size, (sizeof(buf) - 1));
1787 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1788 return -EFAULT;
1789 buf[buf_size] = 0;
1790
1791 ret = mutex_lock_interruptible(&scan_mutex);
1792 if (ret < 0)
1793 return ret;
1794
1795 if (strncmp(buf, "clear", 5) == 0) {
1796 if (kmemleak_enabled)
1797 kmemleak_clear();
1798 else
1799 __kmemleak_do_cleanup();
1800 goto out;
1801 }
1802
1803 if (!kmemleak_enabled) {
1804 ret = -EPERM;
1805 goto out;
1806 }
1807
1808 if (strncmp(buf, "off", 3) == 0)
1809 kmemleak_disable();
1810 else if (strncmp(buf, "stack=on", 8) == 0)
1811 kmemleak_stack_scan = 1;
1812 else if (strncmp(buf, "stack=off", 9) == 0)
1813 kmemleak_stack_scan = 0;
1814 else if (strncmp(buf, "scan=on", 7) == 0)
1815 start_scan_thread();
1816 else if (strncmp(buf, "scan=off", 8) == 0)
1817 stop_scan_thread();
1818 else if (strncmp(buf, "scan=", 5) == 0) {
1819 unsigned secs;
1820 unsigned long msecs;
1821
1822 ret = kstrtouint(buf + 5, 0, &secs);
1823 if (ret < 0)
1824 goto out;
1825
1826 msecs = secs * MSEC_PER_SEC;
1827 if (msecs > UINT_MAX)
1828 msecs = UINT_MAX;
1829
1830 stop_scan_thread();
1831 if (msecs) {
1832 WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
1833 start_scan_thread();
1834 }
1835 } else if (strncmp(buf, "scan", 4) == 0)
1836 kmemleak_scan();
1837 else if (strncmp(buf, "dump=", 5) == 0)
1838 ret = dump_str_object_info(buf + 5);
1839 else
1840 ret = -EINVAL;
1841
1842out:
1843 mutex_unlock(&scan_mutex);
1844 if (ret < 0)
1845 return ret;
1846
1847 /* ignore the rest of the buffer, only one command at a time */
1848 *ppos += size;
1849 return size;
1850}
1851
1852static const struct file_operations kmemleak_fops = {
1853 .owner = THIS_MODULE,
1854 .open = kmemleak_open,
1855 .read = seq_read,
1856 .write = kmemleak_write,
1857 .llseek = seq_lseek,
1858 .release = seq_release,
1859};
1860
1861static void __kmemleak_do_cleanup(void)
1862{
1863 struct kmemleak_object *object, *tmp;
1864
1865 /*
1866 * Kmemleak has already been disabled, no need for RCU list traversal
1867 * or kmemleak_lock held.
1868 */
1869 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1870 __remove_object(object);
1871 __delete_object(object);
1872 }
1873}
1874
1875/*
1876 * Stop the memory scanning thread and free the kmemleak internal objects if
1877 * no previous scan thread (otherwise, kmemleak may still have some useful
1878 * information on memory leaks).
1879 */
1880static void kmemleak_do_cleanup(struct work_struct *work)
1881{
1882 stop_scan_thread();
1883
1884 mutex_lock(&scan_mutex);
1885 /*
1886 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1887 * longer track object freeing. Ordering of the scan thread stopping and
1888 * the memory accesses below is guaranteed by the kthread_stop()
1889 * function.
1890 */
1891 kmemleak_free_enabled = 0;
1892 mutex_unlock(&scan_mutex);
1893
1894 if (!kmemleak_found_leaks)
1895 __kmemleak_do_cleanup();
1896 else
1897 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1898}
1899
1900static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1901
1902/*
1903 * Disable kmemleak. No memory allocation/freeing will be traced once this
1904 * function is called. Disabling kmemleak is an irreversible operation.
1905 */
1906static void kmemleak_disable(void)
1907{
1908 /* atomically check whether it was already invoked */
1909 if (cmpxchg(&kmemleak_error, 0, 1))
1910 return;
1911
1912 /* stop any memory operation tracing */
1913 kmemleak_enabled = 0;
1914
1915 /* check whether it is too early for a kernel thread */
1916 if (kmemleak_initialized)
1917 schedule_work(&cleanup_work);
1918 else
1919 kmemleak_free_enabled = 0;
1920
1921 pr_info("Kernel memory leak detector disabled\n");
1922}
1923
1924/*
1925 * Allow boot-time kmemleak disabling (enabled by default).
1926 */
1927static int __init kmemleak_boot_config(char *str)
1928{
1929 if (!str)
1930 return -EINVAL;
1931 if (strcmp(str, "off") == 0)
1932 kmemleak_disable();
1933 else if (strcmp(str, "on") == 0)
1934 kmemleak_skip_disable = 1;
1935 else
1936 return -EINVAL;
1937 return 0;
1938}
1939early_param("kmemleak", kmemleak_boot_config);
1940
1941/*
1942 * Kmemleak initialization.
1943 */
1944void __init kmemleak_init(void)
1945{
1946#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1947 if (!kmemleak_skip_disable) {
1948 kmemleak_disable();
1949 return;
1950 }
1951#endif
1952
1953 if (kmemleak_error)
1954 return;
1955
1956 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1957 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1958
1959 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1960 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1961
1962 /* register the data/bss sections */
1963 create_object((unsigned long)_sdata, _edata - _sdata,
1964 KMEMLEAK_GREY, GFP_ATOMIC);
1965 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
1966 KMEMLEAK_GREY, GFP_ATOMIC);
1967 /* only register .data..ro_after_init if not within .data */
1968 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
1969 create_object((unsigned long)__start_ro_after_init,
1970 __end_ro_after_init - __start_ro_after_init,
1971 KMEMLEAK_GREY, GFP_ATOMIC);
1972}
1973
1974/*
1975 * Late initialization function.
1976 */
1977static int __init kmemleak_late_init(void)
1978{
1979 kmemleak_initialized = 1;
1980
1981 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
1982
1983 if (kmemleak_error) {
1984 /*
1985 * Some error occurred and kmemleak was disabled. There is a
1986 * small chance that kmemleak_disable() was called immediately
1987 * after setting kmemleak_initialized and we may end up with
1988 * two clean-up threads but serialized by scan_mutex.
1989 */
1990 schedule_work(&cleanup_work);
1991 return -ENOMEM;
1992 }
1993
1994 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
1995 mutex_lock(&scan_mutex);
1996 start_scan_thread();
1997 mutex_unlock(&scan_mutex);
1998 }
1999
2000 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2001 mem_pool_free_count);
2002
2003 return 0;
2004}
2005late_initcall(kmemleak_late_init);