Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Generic infrastructure for lifetime debugging of objects.
4 *
5 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
6 */
7
8#define pr_fmt(fmt) "ODEBUG: " fmt
9
10#include <linux/debugobjects.h>
11#include <linux/interrupt.h>
12#include <linux/sched.h>
13#include <linux/sched/task_stack.h>
14#include <linux/seq_file.h>
15#include <linux/debugfs.h>
16#include <linux/slab.h>
17#include <linux/hash.h>
18#include <linux/kmemleak.h>
19#include <linux/cpu.h>
20
21#define ODEBUG_HASH_BITS 14
22#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
23
24#define ODEBUG_POOL_SIZE 1024
25#define ODEBUG_POOL_MIN_LEVEL 256
26#define ODEBUG_POOL_PERCPU_SIZE 64
27#define ODEBUG_BATCH_SIZE 16
28
29#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
30#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
31#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
32
33/*
34 * We limit the freeing of debug objects via workqueue at a maximum
35 * frequency of 10Hz and about 1024 objects for each freeing operation.
36 * So it is freeing at most 10k debug objects per second.
37 */
38#define ODEBUG_FREE_WORK_MAX 1024
39#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
40
41struct debug_bucket {
42 struct hlist_head list;
43 raw_spinlock_t lock;
44};
45
46/*
47 * Debug object percpu free list
48 * Access is protected by disabling irq
49 */
50struct debug_percpu_free {
51 struct hlist_head free_objs;
52 int obj_free;
53};
54
55static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
56
57static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
58
59static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
60
61static DEFINE_RAW_SPINLOCK(pool_lock);
62
63static HLIST_HEAD(obj_pool);
64static HLIST_HEAD(obj_to_free);
65
66/*
67 * Because of the presence of percpu free pools, obj_pool_free will
68 * under-count those in the percpu free pools. Similarly, obj_pool_used
69 * will over-count those in the percpu free pools. Adjustments will be
70 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
71 * can be off.
72 */
73static int obj_pool_min_free = ODEBUG_POOL_SIZE;
74static int obj_pool_free = ODEBUG_POOL_SIZE;
75static int obj_pool_used;
76static int obj_pool_max_used;
77static bool obj_freeing;
78/* The number of objs on the global free list */
79static int obj_nr_tofree;
80
81static int debug_objects_maxchain __read_mostly;
82static int __maybe_unused debug_objects_maxchecked __read_mostly;
83static int debug_objects_fixups __read_mostly;
84static int debug_objects_warnings __read_mostly;
85static int debug_objects_enabled __read_mostly
86 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
87static int debug_objects_pool_size __read_mostly
88 = ODEBUG_POOL_SIZE;
89static int debug_objects_pool_min_level __read_mostly
90 = ODEBUG_POOL_MIN_LEVEL;
91static const struct debug_obj_descr *descr_test __read_mostly;
92static struct kmem_cache *obj_cache __read_mostly;
93
94/*
95 * Track numbers of kmem_cache_alloc()/free() calls done.
96 */
97static int debug_objects_allocated;
98static int debug_objects_freed;
99
100static void free_obj_work(struct work_struct *work);
101static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
102
103static int __init enable_object_debug(char *str)
104{
105 debug_objects_enabled = 1;
106 return 0;
107}
108
109static int __init disable_object_debug(char *str)
110{
111 debug_objects_enabled = 0;
112 return 0;
113}
114
115early_param("debug_objects", enable_object_debug);
116early_param("no_debug_objects", disable_object_debug);
117
118static const char *obj_states[ODEBUG_STATE_MAX] = {
119 [ODEBUG_STATE_NONE] = "none",
120 [ODEBUG_STATE_INIT] = "initialized",
121 [ODEBUG_STATE_INACTIVE] = "inactive",
122 [ODEBUG_STATE_ACTIVE] = "active",
123 [ODEBUG_STATE_DESTROYED] = "destroyed",
124 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
125};
126
127static void fill_pool(void)
128{
129 gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
130 struct debug_obj *obj;
131 unsigned long flags;
132
133 if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
134 return;
135
136 /*
137 * Reuse objs from the global free list; they will be reinitialized
138 * when allocating.
139 *
140 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
141 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
142 * sections.
143 */
144 while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
145 raw_spin_lock_irqsave(&pool_lock, flags);
146 /*
147 * Recheck with the lock held as the worker thread might have
148 * won the race and freed the global free list already.
149 */
150 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
151 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
152 hlist_del(&obj->node);
153 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
154 hlist_add_head(&obj->node, &obj_pool);
155 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
156 }
157 raw_spin_unlock_irqrestore(&pool_lock, flags);
158 }
159
160 if (unlikely(!obj_cache))
161 return;
162
163 while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
164 struct debug_obj *new[ODEBUG_BATCH_SIZE];
165 int cnt;
166
167 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
168 new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
169 if (!new[cnt])
170 break;
171 }
172 if (!cnt)
173 return;
174
175 raw_spin_lock_irqsave(&pool_lock, flags);
176 while (cnt) {
177 hlist_add_head(&new[--cnt]->node, &obj_pool);
178 debug_objects_allocated++;
179 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
180 }
181 raw_spin_unlock_irqrestore(&pool_lock, flags);
182 }
183}
184
185/*
186 * Lookup an object in the hash bucket.
187 */
188static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
189{
190 struct debug_obj *obj;
191 int cnt = 0;
192
193 hlist_for_each_entry(obj, &b->list, node) {
194 cnt++;
195 if (obj->object == addr)
196 return obj;
197 }
198 if (cnt > debug_objects_maxchain)
199 debug_objects_maxchain = cnt;
200
201 return NULL;
202}
203
204/*
205 * Allocate a new object from the hlist
206 */
207static struct debug_obj *__alloc_object(struct hlist_head *list)
208{
209 struct debug_obj *obj = NULL;
210
211 if (list->first) {
212 obj = hlist_entry(list->first, typeof(*obj), node);
213 hlist_del(&obj->node);
214 }
215
216 return obj;
217}
218
219static struct debug_obj *
220alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
221{
222 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
223 struct debug_obj *obj;
224
225 if (likely(obj_cache)) {
226 obj = __alloc_object(&percpu_pool->free_objs);
227 if (obj) {
228 percpu_pool->obj_free--;
229 goto init_obj;
230 }
231 }
232
233 raw_spin_lock(&pool_lock);
234 obj = __alloc_object(&obj_pool);
235 if (obj) {
236 obj_pool_used++;
237 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
238
239 /*
240 * Looking ahead, allocate one batch of debug objects and
241 * put them into the percpu free pool.
242 */
243 if (likely(obj_cache)) {
244 int i;
245
246 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
247 struct debug_obj *obj2;
248
249 obj2 = __alloc_object(&obj_pool);
250 if (!obj2)
251 break;
252 hlist_add_head(&obj2->node,
253 &percpu_pool->free_objs);
254 percpu_pool->obj_free++;
255 obj_pool_used++;
256 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
257 }
258 }
259
260 if (obj_pool_used > obj_pool_max_used)
261 obj_pool_max_used = obj_pool_used;
262
263 if (obj_pool_free < obj_pool_min_free)
264 obj_pool_min_free = obj_pool_free;
265 }
266 raw_spin_unlock(&pool_lock);
267
268init_obj:
269 if (obj) {
270 obj->object = addr;
271 obj->descr = descr;
272 obj->state = ODEBUG_STATE_NONE;
273 obj->astate = 0;
274 hlist_add_head(&obj->node, &b->list);
275 }
276 return obj;
277}
278
279/*
280 * workqueue function to free objects.
281 *
282 * To reduce contention on the global pool_lock, the actual freeing of
283 * debug objects will be delayed if the pool_lock is busy.
284 */
285static void free_obj_work(struct work_struct *work)
286{
287 struct hlist_node *tmp;
288 struct debug_obj *obj;
289 unsigned long flags;
290 HLIST_HEAD(tofree);
291
292 WRITE_ONCE(obj_freeing, false);
293 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
294 return;
295
296 if (obj_pool_free >= debug_objects_pool_size)
297 goto free_objs;
298
299 /*
300 * The objs on the pool list might be allocated before the work is
301 * run, so recheck if pool list it full or not, if not fill pool
302 * list from the global free list. As it is likely that a workload
303 * may be gearing up to use more and more objects, don't free any
304 * of them until the next round.
305 */
306 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
307 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
308 hlist_del(&obj->node);
309 hlist_add_head(&obj->node, &obj_pool);
310 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
311 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
312 }
313 raw_spin_unlock_irqrestore(&pool_lock, flags);
314 return;
315
316free_objs:
317 /*
318 * Pool list is already full and there are still objs on the free
319 * list. Move remaining free objs to a temporary list to free the
320 * memory outside the pool_lock held region.
321 */
322 if (obj_nr_tofree) {
323 hlist_move_list(&obj_to_free, &tofree);
324 debug_objects_freed += obj_nr_tofree;
325 WRITE_ONCE(obj_nr_tofree, 0);
326 }
327 raw_spin_unlock_irqrestore(&pool_lock, flags);
328
329 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
330 hlist_del(&obj->node);
331 kmem_cache_free(obj_cache, obj);
332 }
333}
334
335static void __free_object(struct debug_obj *obj)
336{
337 struct debug_obj *objs[ODEBUG_BATCH_SIZE];
338 struct debug_percpu_free *percpu_pool;
339 int lookahead_count = 0;
340 unsigned long flags;
341 bool work;
342
343 local_irq_save(flags);
344 if (!obj_cache)
345 goto free_to_obj_pool;
346
347 /*
348 * Try to free it into the percpu pool first.
349 */
350 percpu_pool = this_cpu_ptr(&percpu_obj_pool);
351 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
352 hlist_add_head(&obj->node, &percpu_pool->free_objs);
353 percpu_pool->obj_free++;
354 local_irq_restore(flags);
355 return;
356 }
357
358 /*
359 * As the percpu pool is full, look ahead and pull out a batch
360 * of objects from the percpu pool and free them as well.
361 */
362 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
363 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
364 if (!objs[lookahead_count])
365 break;
366 percpu_pool->obj_free--;
367 }
368
369free_to_obj_pool:
370 raw_spin_lock(&pool_lock);
371 work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
372 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
373 obj_pool_used--;
374
375 if (work) {
376 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
377 hlist_add_head(&obj->node, &obj_to_free);
378 if (lookahead_count) {
379 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
380 obj_pool_used -= lookahead_count;
381 while (lookahead_count) {
382 hlist_add_head(&objs[--lookahead_count]->node,
383 &obj_to_free);
384 }
385 }
386
387 if ((obj_pool_free > debug_objects_pool_size) &&
388 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
389 int i;
390
391 /*
392 * Free one more batch of objects from obj_pool.
393 */
394 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
395 obj = __alloc_object(&obj_pool);
396 hlist_add_head(&obj->node, &obj_to_free);
397 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
398 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
399 }
400 }
401 } else {
402 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
403 hlist_add_head(&obj->node, &obj_pool);
404 if (lookahead_count) {
405 WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
406 obj_pool_used -= lookahead_count;
407 while (lookahead_count) {
408 hlist_add_head(&objs[--lookahead_count]->node,
409 &obj_pool);
410 }
411 }
412 }
413 raw_spin_unlock(&pool_lock);
414 local_irq_restore(flags);
415}
416
417/*
418 * Put the object back into the pool and schedule work to free objects
419 * if necessary.
420 */
421static void free_object(struct debug_obj *obj)
422{
423 __free_object(obj);
424 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
425 WRITE_ONCE(obj_freeing, true);
426 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
427 }
428}
429
430#ifdef CONFIG_HOTPLUG_CPU
431static int object_cpu_offline(unsigned int cpu)
432{
433 struct debug_percpu_free *percpu_pool;
434 struct hlist_node *tmp;
435 struct debug_obj *obj;
436 unsigned long flags;
437
438 /* Remote access is safe as the CPU is dead already */
439 percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
440 hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
441 hlist_del(&obj->node);
442 kmem_cache_free(obj_cache, obj);
443 }
444
445 raw_spin_lock_irqsave(&pool_lock, flags);
446 obj_pool_used -= percpu_pool->obj_free;
447 debug_objects_freed += percpu_pool->obj_free;
448 raw_spin_unlock_irqrestore(&pool_lock, flags);
449
450 percpu_pool->obj_free = 0;
451
452 return 0;
453}
454#endif
455
456/*
457 * We run out of memory. That means we probably have tons of objects
458 * allocated.
459 */
460static void debug_objects_oom(void)
461{
462 struct debug_bucket *db = obj_hash;
463 struct hlist_node *tmp;
464 HLIST_HEAD(freelist);
465 struct debug_obj *obj;
466 unsigned long flags;
467 int i;
468
469 pr_warn("Out of memory. ODEBUG disabled\n");
470
471 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
472 raw_spin_lock_irqsave(&db->lock, flags);
473 hlist_move_list(&db->list, &freelist);
474 raw_spin_unlock_irqrestore(&db->lock, flags);
475
476 /* Now free them */
477 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
478 hlist_del(&obj->node);
479 free_object(obj);
480 }
481 }
482}
483
484/*
485 * We use the pfn of the address for the hash. That way we can check
486 * for freed objects simply by checking the affected bucket.
487 */
488static struct debug_bucket *get_bucket(unsigned long addr)
489{
490 unsigned long hash;
491
492 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
493 return &obj_hash[hash];
494}
495
496static void debug_print_object(struct debug_obj *obj, char *msg)
497{
498 const struct debug_obj_descr *descr = obj->descr;
499 static int limit;
500
501 if (limit < 5 && descr != descr_test) {
502 void *hint = descr->debug_hint ?
503 descr->debug_hint(obj->object) : NULL;
504 limit++;
505 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
506 "object: %p object type: %s hint: %pS\n",
507 msg, obj_states[obj->state], obj->astate,
508 obj->object, descr->name, hint);
509 }
510 debug_objects_warnings++;
511}
512
513/*
514 * Try to repair the damage, so we have a better chance to get useful
515 * debug output.
516 */
517static bool
518debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
519 void * addr, enum debug_obj_state state)
520{
521 if (fixup && fixup(addr, state)) {
522 debug_objects_fixups++;
523 return true;
524 }
525 return false;
526}
527
528static void debug_object_is_on_stack(void *addr, int onstack)
529{
530 int is_on_stack;
531 static int limit;
532
533 if (limit > 4)
534 return;
535
536 is_on_stack = object_is_on_stack(addr);
537 if (is_on_stack == onstack)
538 return;
539
540 limit++;
541 if (is_on_stack)
542 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
543 task_stack_page(current));
544 else
545 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
546 task_stack_page(current));
547
548 WARN_ON(1);
549}
550
551static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
552 const struct debug_obj_descr *descr,
553 bool onstack, bool alloc_ifstatic)
554{
555 struct debug_obj *obj = lookup_object(addr, b);
556 enum debug_obj_state state = ODEBUG_STATE_NONE;
557
558 if (likely(obj))
559 return obj;
560
561 /*
562 * debug_object_init() unconditionally allocates untracked
563 * objects. It does not matter whether it is a static object or
564 * not.
565 *
566 * debug_object_assert_init() and debug_object_activate() allow
567 * allocation only if the descriptor callback confirms that the
568 * object is static and considered initialized. For non-static
569 * objects the allocation needs to be done from the fixup callback.
570 */
571 if (unlikely(alloc_ifstatic)) {
572 if (!descr->is_static_object || !descr->is_static_object(addr))
573 return ERR_PTR(-ENOENT);
574 /* Statically allocated objects are considered initialized */
575 state = ODEBUG_STATE_INIT;
576 }
577
578 obj = alloc_object(addr, b, descr);
579 if (likely(obj)) {
580 obj->state = state;
581 debug_object_is_on_stack(addr, onstack);
582 return obj;
583 }
584
585 /* Out of memory. Do the cleanup outside of the locked region */
586 debug_objects_enabled = 0;
587 return NULL;
588}
589
590static void debug_objects_fill_pool(void)
591{
592 /*
593 * On RT enabled kernels the pool refill must happen in preemptible
594 * context -- for !RT kernels we rely on the fact that spinlock_t and
595 * raw_spinlock_t are basically the same type and this lock-type
596 * inversion works just fine.
597 */
598 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
599 /*
600 * Annotate away the spinlock_t inside raw_spinlock_t warning
601 * by temporarily raising the wait-type to WAIT_SLEEP, matching
602 * the preemptible() condition above.
603 */
604 static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
605 lock_map_acquire_try(&fill_pool_map);
606 fill_pool();
607 lock_map_release(&fill_pool_map);
608 }
609}
610
611static void
612__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
613{
614 enum debug_obj_state state;
615 struct debug_bucket *db;
616 struct debug_obj *obj;
617 unsigned long flags;
618
619 debug_objects_fill_pool();
620
621 db = get_bucket((unsigned long) addr);
622
623 raw_spin_lock_irqsave(&db->lock, flags);
624
625 obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
626 if (unlikely(!obj)) {
627 raw_spin_unlock_irqrestore(&db->lock, flags);
628 debug_objects_oom();
629 return;
630 }
631
632 switch (obj->state) {
633 case ODEBUG_STATE_NONE:
634 case ODEBUG_STATE_INIT:
635 case ODEBUG_STATE_INACTIVE:
636 obj->state = ODEBUG_STATE_INIT;
637 break;
638
639 case ODEBUG_STATE_ACTIVE:
640 state = obj->state;
641 raw_spin_unlock_irqrestore(&db->lock, flags);
642 debug_print_object(obj, "init");
643 debug_object_fixup(descr->fixup_init, addr, state);
644 return;
645
646 case ODEBUG_STATE_DESTROYED:
647 raw_spin_unlock_irqrestore(&db->lock, flags);
648 debug_print_object(obj, "init");
649 return;
650 default:
651 break;
652 }
653
654 raw_spin_unlock_irqrestore(&db->lock, flags);
655}
656
657/**
658 * debug_object_init - debug checks when an object is initialized
659 * @addr: address of the object
660 * @descr: pointer to an object specific debug description structure
661 */
662void debug_object_init(void *addr, const struct debug_obj_descr *descr)
663{
664 if (!debug_objects_enabled)
665 return;
666
667 __debug_object_init(addr, descr, 0);
668}
669EXPORT_SYMBOL_GPL(debug_object_init);
670
671/**
672 * debug_object_init_on_stack - debug checks when an object on stack is
673 * initialized
674 * @addr: address of the object
675 * @descr: pointer to an object specific debug description structure
676 */
677void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
678{
679 if (!debug_objects_enabled)
680 return;
681
682 __debug_object_init(addr, descr, 1);
683}
684EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
685
686/**
687 * debug_object_activate - debug checks when an object is activated
688 * @addr: address of the object
689 * @descr: pointer to an object specific debug description structure
690 * Returns 0 for success, -EINVAL for check failed.
691 */
692int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
693{
694 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
695 enum debug_obj_state state;
696 struct debug_bucket *db;
697 struct debug_obj *obj;
698 unsigned long flags;
699 int ret;
700
701 if (!debug_objects_enabled)
702 return 0;
703
704 debug_objects_fill_pool();
705
706 db = get_bucket((unsigned long) addr);
707
708 raw_spin_lock_irqsave(&db->lock, flags);
709
710 obj = lookup_object_or_alloc(addr, db, descr, false, true);
711 if (likely(!IS_ERR_OR_NULL(obj))) {
712 bool print_object = false;
713
714 switch (obj->state) {
715 case ODEBUG_STATE_INIT:
716 case ODEBUG_STATE_INACTIVE:
717 obj->state = ODEBUG_STATE_ACTIVE;
718 ret = 0;
719 break;
720
721 case ODEBUG_STATE_ACTIVE:
722 state = obj->state;
723 raw_spin_unlock_irqrestore(&db->lock, flags);
724 debug_print_object(obj, "activate");
725 ret = debug_object_fixup(descr->fixup_activate, addr, state);
726 return ret ? 0 : -EINVAL;
727
728 case ODEBUG_STATE_DESTROYED:
729 print_object = true;
730 ret = -EINVAL;
731 break;
732 default:
733 ret = 0;
734 break;
735 }
736 raw_spin_unlock_irqrestore(&db->lock, flags);
737 if (print_object)
738 debug_print_object(obj, "activate");
739 return ret;
740 }
741
742 raw_spin_unlock_irqrestore(&db->lock, flags);
743
744 /* If NULL the allocation has hit OOM */
745 if (!obj) {
746 debug_objects_oom();
747 return 0;
748 }
749
750 /* Object is neither static nor tracked. It's not initialized */
751 debug_print_object(&o, "activate");
752 ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
753 return ret ? 0 : -EINVAL;
754}
755EXPORT_SYMBOL_GPL(debug_object_activate);
756
757/**
758 * debug_object_deactivate - debug checks when an object is deactivated
759 * @addr: address of the object
760 * @descr: pointer to an object specific debug description structure
761 */
762void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
763{
764 struct debug_bucket *db;
765 struct debug_obj *obj;
766 unsigned long flags;
767 bool print_object = false;
768
769 if (!debug_objects_enabled)
770 return;
771
772 db = get_bucket((unsigned long) addr);
773
774 raw_spin_lock_irqsave(&db->lock, flags);
775
776 obj = lookup_object(addr, db);
777 if (obj) {
778 switch (obj->state) {
779 case ODEBUG_STATE_INIT:
780 case ODEBUG_STATE_INACTIVE:
781 case ODEBUG_STATE_ACTIVE:
782 if (!obj->astate)
783 obj->state = ODEBUG_STATE_INACTIVE;
784 else
785 print_object = true;
786 break;
787
788 case ODEBUG_STATE_DESTROYED:
789 print_object = true;
790 break;
791 default:
792 break;
793 }
794 }
795
796 raw_spin_unlock_irqrestore(&db->lock, flags);
797 if (!obj) {
798 struct debug_obj o = { .object = addr,
799 .state = ODEBUG_STATE_NOTAVAILABLE,
800 .descr = descr };
801
802 debug_print_object(&o, "deactivate");
803 } else if (print_object) {
804 debug_print_object(obj, "deactivate");
805 }
806}
807EXPORT_SYMBOL_GPL(debug_object_deactivate);
808
809/**
810 * debug_object_destroy - debug checks when an object is destroyed
811 * @addr: address of the object
812 * @descr: pointer to an object specific debug description structure
813 */
814void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
815{
816 enum debug_obj_state state;
817 struct debug_bucket *db;
818 struct debug_obj *obj;
819 unsigned long flags;
820 bool print_object = false;
821
822 if (!debug_objects_enabled)
823 return;
824
825 db = get_bucket((unsigned long) addr);
826
827 raw_spin_lock_irqsave(&db->lock, flags);
828
829 obj = lookup_object(addr, db);
830 if (!obj)
831 goto out_unlock;
832
833 switch (obj->state) {
834 case ODEBUG_STATE_NONE:
835 case ODEBUG_STATE_INIT:
836 case ODEBUG_STATE_INACTIVE:
837 obj->state = ODEBUG_STATE_DESTROYED;
838 break;
839 case ODEBUG_STATE_ACTIVE:
840 state = obj->state;
841 raw_spin_unlock_irqrestore(&db->lock, flags);
842 debug_print_object(obj, "destroy");
843 debug_object_fixup(descr->fixup_destroy, addr, state);
844 return;
845
846 case ODEBUG_STATE_DESTROYED:
847 print_object = true;
848 break;
849 default:
850 break;
851 }
852out_unlock:
853 raw_spin_unlock_irqrestore(&db->lock, flags);
854 if (print_object)
855 debug_print_object(obj, "destroy");
856}
857EXPORT_SYMBOL_GPL(debug_object_destroy);
858
859/**
860 * debug_object_free - debug checks when an object is freed
861 * @addr: address of the object
862 * @descr: pointer to an object specific debug description structure
863 */
864void debug_object_free(void *addr, const struct debug_obj_descr *descr)
865{
866 enum debug_obj_state state;
867 struct debug_bucket *db;
868 struct debug_obj *obj;
869 unsigned long flags;
870
871 if (!debug_objects_enabled)
872 return;
873
874 db = get_bucket((unsigned long) addr);
875
876 raw_spin_lock_irqsave(&db->lock, flags);
877
878 obj = lookup_object(addr, db);
879 if (!obj)
880 goto out_unlock;
881
882 switch (obj->state) {
883 case ODEBUG_STATE_ACTIVE:
884 state = obj->state;
885 raw_spin_unlock_irqrestore(&db->lock, flags);
886 debug_print_object(obj, "free");
887 debug_object_fixup(descr->fixup_free, addr, state);
888 return;
889 default:
890 hlist_del(&obj->node);
891 raw_spin_unlock_irqrestore(&db->lock, flags);
892 free_object(obj);
893 return;
894 }
895out_unlock:
896 raw_spin_unlock_irqrestore(&db->lock, flags);
897}
898EXPORT_SYMBOL_GPL(debug_object_free);
899
900/**
901 * debug_object_assert_init - debug checks when object should be init-ed
902 * @addr: address of the object
903 * @descr: pointer to an object specific debug description structure
904 */
905void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
906{
907 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
908 struct debug_bucket *db;
909 struct debug_obj *obj;
910 unsigned long flags;
911
912 if (!debug_objects_enabled)
913 return;
914
915 debug_objects_fill_pool();
916
917 db = get_bucket((unsigned long) addr);
918
919 raw_spin_lock_irqsave(&db->lock, flags);
920 obj = lookup_object_or_alloc(addr, db, descr, false, true);
921 raw_spin_unlock_irqrestore(&db->lock, flags);
922 if (likely(!IS_ERR_OR_NULL(obj)))
923 return;
924
925 /* If NULL the allocation has hit OOM */
926 if (!obj) {
927 debug_objects_oom();
928 return;
929 }
930
931 /* Object is neither tracked nor static. It's not initialized. */
932 debug_print_object(&o, "assert_init");
933 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
934}
935EXPORT_SYMBOL_GPL(debug_object_assert_init);
936
937/**
938 * debug_object_active_state - debug checks object usage state machine
939 * @addr: address of the object
940 * @descr: pointer to an object specific debug description structure
941 * @expect: expected state
942 * @next: state to move to if expected state is found
943 */
944void
945debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
946 unsigned int expect, unsigned int next)
947{
948 struct debug_bucket *db;
949 struct debug_obj *obj;
950 unsigned long flags;
951 bool print_object = false;
952
953 if (!debug_objects_enabled)
954 return;
955
956 db = get_bucket((unsigned long) addr);
957
958 raw_spin_lock_irqsave(&db->lock, flags);
959
960 obj = lookup_object(addr, db);
961 if (obj) {
962 switch (obj->state) {
963 case ODEBUG_STATE_ACTIVE:
964 if (obj->astate == expect)
965 obj->astate = next;
966 else
967 print_object = true;
968 break;
969
970 default:
971 print_object = true;
972 break;
973 }
974 }
975
976 raw_spin_unlock_irqrestore(&db->lock, flags);
977 if (!obj) {
978 struct debug_obj o = { .object = addr,
979 .state = ODEBUG_STATE_NOTAVAILABLE,
980 .descr = descr };
981
982 debug_print_object(&o, "active_state");
983 } else if (print_object) {
984 debug_print_object(obj, "active_state");
985 }
986}
987EXPORT_SYMBOL_GPL(debug_object_active_state);
988
989#ifdef CONFIG_DEBUG_OBJECTS_FREE
990static void __debug_check_no_obj_freed(const void *address, unsigned long size)
991{
992 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
993 const struct debug_obj_descr *descr;
994 enum debug_obj_state state;
995 struct debug_bucket *db;
996 struct hlist_node *tmp;
997 struct debug_obj *obj;
998 int cnt, objs_checked = 0;
999
1000 saddr = (unsigned long) address;
1001 eaddr = saddr + size;
1002 paddr = saddr & ODEBUG_CHUNK_MASK;
1003 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
1004 chunks >>= ODEBUG_CHUNK_SHIFT;
1005
1006 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
1007 db = get_bucket(paddr);
1008
1009repeat:
1010 cnt = 0;
1011 raw_spin_lock_irqsave(&db->lock, flags);
1012 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
1013 cnt++;
1014 oaddr = (unsigned long) obj->object;
1015 if (oaddr < saddr || oaddr >= eaddr)
1016 continue;
1017
1018 switch (obj->state) {
1019 case ODEBUG_STATE_ACTIVE:
1020 descr = obj->descr;
1021 state = obj->state;
1022 raw_spin_unlock_irqrestore(&db->lock, flags);
1023 debug_print_object(obj, "free");
1024 debug_object_fixup(descr->fixup_free,
1025 (void *) oaddr, state);
1026 goto repeat;
1027 default:
1028 hlist_del(&obj->node);
1029 __free_object(obj);
1030 break;
1031 }
1032 }
1033 raw_spin_unlock_irqrestore(&db->lock, flags);
1034
1035 if (cnt > debug_objects_maxchain)
1036 debug_objects_maxchain = cnt;
1037
1038 objs_checked += cnt;
1039 }
1040
1041 if (objs_checked > debug_objects_maxchecked)
1042 debug_objects_maxchecked = objs_checked;
1043
1044 /* Schedule work to actually kmem_cache_free() objects */
1045 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1046 WRITE_ONCE(obj_freeing, true);
1047 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1048 }
1049}
1050
1051void debug_check_no_obj_freed(const void *address, unsigned long size)
1052{
1053 if (debug_objects_enabled)
1054 __debug_check_no_obj_freed(address, size);
1055}
1056#endif
1057
1058#ifdef CONFIG_DEBUG_FS
1059
1060static int debug_stats_show(struct seq_file *m, void *v)
1061{
1062 int cpu, obj_percpu_free = 0;
1063
1064 for_each_possible_cpu(cpu)
1065 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1066
1067 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
1068 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
1069 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
1070 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
1071 seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1072 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1073 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1074 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
1075 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1076 seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
1077 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1078 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
1079 return 0;
1080}
1081DEFINE_SHOW_ATTRIBUTE(debug_stats);
1082
1083static int __init debug_objects_init_debugfs(void)
1084{
1085 struct dentry *dbgdir;
1086
1087 if (!debug_objects_enabled)
1088 return 0;
1089
1090 dbgdir = debugfs_create_dir("debug_objects", NULL);
1091
1092 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1093
1094 return 0;
1095}
1096__initcall(debug_objects_init_debugfs);
1097
1098#else
1099static inline void debug_objects_init_debugfs(void) { }
1100#endif
1101
1102#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1103
1104/* Random data structure for the self test */
1105struct self_test {
1106 unsigned long dummy1[6];
1107 int static_init;
1108 unsigned long dummy2[3];
1109};
1110
1111static __initconst const struct debug_obj_descr descr_type_test;
1112
1113static bool __init is_static_object(void *addr)
1114{
1115 struct self_test *obj = addr;
1116
1117 return obj->static_init;
1118}
1119
1120/*
1121 * fixup_init is called when:
1122 * - an active object is initialized
1123 */
1124static bool __init fixup_init(void *addr, enum debug_obj_state state)
1125{
1126 struct self_test *obj = addr;
1127
1128 switch (state) {
1129 case ODEBUG_STATE_ACTIVE:
1130 debug_object_deactivate(obj, &descr_type_test);
1131 debug_object_init(obj, &descr_type_test);
1132 return true;
1133 default:
1134 return false;
1135 }
1136}
1137
1138/*
1139 * fixup_activate is called when:
1140 * - an active object is activated
1141 * - an unknown non-static object is activated
1142 */
1143static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1144{
1145 struct self_test *obj = addr;
1146
1147 switch (state) {
1148 case ODEBUG_STATE_NOTAVAILABLE:
1149 return true;
1150 case ODEBUG_STATE_ACTIVE:
1151 debug_object_deactivate(obj, &descr_type_test);
1152 debug_object_activate(obj, &descr_type_test);
1153 return true;
1154
1155 default:
1156 return false;
1157 }
1158}
1159
1160/*
1161 * fixup_destroy is called when:
1162 * - an active object is destroyed
1163 */
1164static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1165{
1166 struct self_test *obj = addr;
1167
1168 switch (state) {
1169 case ODEBUG_STATE_ACTIVE:
1170 debug_object_deactivate(obj, &descr_type_test);
1171 debug_object_destroy(obj, &descr_type_test);
1172 return true;
1173 default:
1174 return false;
1175 }
1176}
1177
1178/*
1179 * fixup_free is called when:
1180 * - an active object is freed
1181 */
1182static bool __init fixup_free(void *addr, enum debug_obj_state state)
1183{
1184 struct self_test *obj = addr;
1185
1186 switch (state) {
1187 case ODEBUG_STATE_ACTIVE:
1188 debug_object_deactivate(obj, &descr_type_test);
1189 debug_object_free(obj, &descr_type_test);
1190 return true;
1191 default:
1192 return false;
1193 }
1194}
1195
1196static int __init
1197check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1198{
1199 struct debug_bucket *db;
1200 struct debug_obj *obj;
1201 unsigned long flags;
1202 int res = -EINVAL;
1203
1204 db = get_bucket((unsigned long) addr);
1205
1206 raw_spin_lock_irqsave(&db->lock, flags);
1207
1208 obj = lookup_object(addr, db);
1209 if (!obj && state != ODEBUG_STATE_NONE) {
1210 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1211 goto out;
1212 }
1213 if (obj && obj->state != state) {
1214 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1215 obj->state, state);
1216 goto out;
1217 }
1218 if (fixups != debug_objects_fixups) {
1219 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1220 fixups, debug_objects_fixups);
1221 goto out;
1222 }
1223 if (warnings != debug_objects_warnings) {
1224 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1225 warnings, debug_objects_warnings);
1226 goto out;
1227 }
1228 res = 0;
1229out:
1230 raw_spin_unlock_irqrestore(&db->lock, flags);
1231 if (res)
1232 debug_objects_enabled = 0;
1233 return res;
1234}
1235
1236static __initconst const struct debug_obj_descr descr_type_test = {
1237 .name = "selftest",
1238 .is_static_object = is_static_object,
1239 .fixup_init = fixup_init,
1240 .fixup_activate = fixup_activate,
1241 .fixup_destroy = fixup_destroy,
1242 .fixup_free = fixup_free,
1243};
1244
1245static __initdata struct self_test obj = { .static_init = 0 };
1246
1247static void __init debug_objects_selftest(void)
1248{
1249 int fixups, oldfixups, warnings, oldwarnings;
1250 unsigned long flags;
1251
1252 local_irq_save(flags);
1253
1254 fixups = oldfixups = debug_objects_fixups;
1255 warnings = oldwarnings = debug_objects_warnings;
1256 descr_test = &descr_type_test;
1257
1258 debug_object_init(&obj, &descr_type_test);
1259 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1260 goto out;
1261 debug_object_activate(&obj, &descr_type_test);
1262 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1263 goto out;
1264 debug_object_activate(&obj, &descr_type_test);
1265 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1266 goto out;
1267 debug_object_deactivate(&obj, &descr_type_test);
1268 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1269 goto out;
1270 debug_object_destroy(&obj, &descr_type_test);
1271 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1272 goto out;
1273 debug_object_init(&obj, &descr_type_test);
1274 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1275 goto out;
1276 debug_object_activate(&obj, &descr_type_test);
1277 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1278 goto out;
1279 debug_object_deactivate(&obj, &descr_type_test);
1280 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1281 goto out;
1282 debug_object_free(&obj, &descr_type_test);
1283 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1284 goto out;
1285
1286 obj.static_init = 1;
1287 debug_object_activate(&obj, &descr_type_test);
1288 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1289 goto out;
1290 debug_object_init(&obj, &descr_type_test);
1291 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1292 goto out;
1293 debug_object_free(&obj, &descr_type_test);
1294 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1295 goto out;
1296
1297#ifdef CONFIG_DEBUG_OBJECTS_FREE
1298 debug_object_init(&obj, &descr_type_test);
1299 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1300 goto out;
1301 debug_object_activate(&obj, &descr_type_test);
1302 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1303 goto out;
1304 __debug_check_no_obj_freed(&obj, sizeof(obj));
1305 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1306 goto out;
1307#endif
1308 pr_info("selftest passed\n");
1309
1310out:
1311 debug_objects_fixups = oldfixups;
1312 debug_objects_warnings = oldwarnings;
1313 descr_test = NULL;
1314
1315 local_irq_restore(flags);
1316}
1317#else
1318static inline void debug_objects_selftest(void) { }
1319#endif
1320
1321/*
1322 * Called during early boot to initialize the hash buckets and link
1323 * the static object pool objects into the poll list. After this call
1324 * the object tracker is fully operational.
1325 */
1326void __init debug_objects_early_init(void)
1327{
1328 int i;
1329
1330 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1331 raw_spin_lock_init(&obj_hash[i].lock);
1332
1333 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1334 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1335}
1336
1337/*
1338 * Convert the statically allocated objects to dynamic ones:
1339 */
1340static int __init debug_objects_replace_static_objects(void)
1341{
1342 struct debug_bucket *db = obj_hash;
1343 struct hlist_node *tmp;
1344 struct debug_obj *obj, *new;
1345 HLIST_HEAD(objects);
1346 int i, cnt = 0;
1347
1348 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1349 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1350 if (!obj)
1351 goto free;
1352 hlist_add_head(&obj->node, &objects);
1353 }
1354
1355 debug_objects_allocated += i;
1356
1357 /*
1358 * debug_objects_mem_init() is now called early that only one CPU is up
1359 * and interrupts have been disabled, so it is safe to replace the
1360 * active object references.
1361 */
1362
1363 /* Remove the statically allocated objects from the pool */
1364 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1365 hlist_del(&obj->node);
1366 /* Move the allocated objects to the pool */
1367 hlist_move_list(&objects, &obj_pool);
1368
1369 /* Replace the active object references */
1370 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1371 hlist_move_list(&db->list, &objects);
1372
1373 hlist_for_each_entry(obj, &objects, node) {
1374 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1375 hlist_del(&new->node);
1376 /* copy object data */
1377 *new = *obj;
1378 hlist_add_head(&new->node, &db->list);
1379 cnt++;
1380 }
1381 }
1382
1383 pr_debug("%d of %d active objects replaced\n",
1384 cnt, obj_pool_used);
1385 return 0;
1386free:
1387 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1388 hlist_del(&obj->node);
1389 kmem_cache_free(obj_cache, obj);
1390 }
1391 return -ENOMEM;
1392}
1393
1394/*
1395 * Called after the kmem_caches are functional to setup a dedicated
1396 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1397 * prevents that the debug code is called on kmem_cache_free() for the
1398 * debug tracker objects to avoid recursive calls.
1399 */
1400void __init debug_objects_mem_init(void)
1401{
1402 int cpu, extras;
1403
1404 if (!debug_objects_enabled)
1405 return;
1406
1407 /*
1408 * Initialize the percpu object pools
1409 *
1410 * Initialization is not strictly necessary, but was done for
1411 * completeness.
1412 */
1413 for_each_possible_cpu(cpu)
1414 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1415
1416 obj_cache = kmem_cache_create("debug_objects_cache",
1417 sizeof (struct debug_obj), 0,
1418 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1419 NULL);
1420
1421 if (!obj_cache || debug_objects_replace_static_objects()) {
1422 debug_objects_enabled = 0;
1423 kmem_cache_destroy(obj_cache);
1424 pr_warn("out of memory.\n");
1425 return;
1426 } else
1427 debug_objects_selftest();
1428
1429#ifdef CONFIG_HOTPLUG_CPU
1430 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1431 object_cpu_offline);
1432#endif
1433
1434 /*
1435 * Increase the thresholds for allocating and freeing objects
1436 * according to the number of possible CPUs available in the system.
1437 */
1438 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1439 debug_objects_pool_size += extras;
1440 debug_objects_pool_min_level += extras;
1441}