at v2.6.26 22 kB view raw
1/* 2 * Generic infrastructure for lifetime debugging of objects. 3 * 4 * Started by Thomas Gleixner 5 * 6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> 7 * 8 * For licencing details see kernel-base/COPYING 9 */ 10#include <linux/debugobjects.h> 11#include <linux/interrupt.h> 12#include <linux/seq_file.h> 13#include <linux/debugfs.h> 14#include <linux/hash.h> 15 16#define ODEBUG_HASH_BITS 14 17#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) 18 19#define ODEBUG_POOL_SIZE 512 20#define ODEBUG_POOL_MIN_LEVEL 256 21 22#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 23#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 24#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 25 26struct debug_bucket { 27 struct hlist_head list; 28 spinlock_t lock; 29}; 30 31static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 32 33static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; 34 35static DEFINE_SPINLOCK(pool_lock); 36 37static HLIST_HEAD(obj_pool); 38 39static int obj_pool_min_free = ODEBUG_POOL_SIZE; 40static int obj_pool_free = ODEBUG_POOL_SIZE; 41static int obj_pool_used; 42static int obj_pool_max_used; 43static struct kmem_cache *obj_cache; 44 45static int debug_objects_maxchain __read_mostly; 46static int debug_objects_fixups __read_mostly; 47static int debug_objects_warnings __read_mostly; 48static int debug_objects_enabled __read_mostly; 49static struct debug_obj_descr *descr_test __read_mostly; 50 51static int __init enable_object_debug(char *str) 52{ 53 debug_objects_enabled = 1; 54 return 0; 55} 56early_param("debug_objects", enable_object_debug); 57 58static const char *obj_states[ODEBUG_STATE_MAX] = { 59 [ODEBUG_STATE_NONE] = "none", 60 [ODEBUG_STATE_INIT] = "initialized", 61 [ODEBUG_STATE_INACTIVE] = "inactive", 62 [ODEBUG_STATE_ACTIVE] = "active", 63 [ODEBUG_STATE_DESTROYED] = "destroyed", 64 [ODEBUG_STATE_NOTAVAILABLE] = "not available", 65}; 66 67static int fill_pool(void) 68{ 69 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 70 struct debug_obj *new; 71 unsigned long flags; 72 73 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 74 return obj_pool_free; 75 76 if (unlikely(!obj_cache)) 77 return obj_pool_free; 78 79 while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { 80 81 new = kmem_cache_zalloc(obj_cache, gfp); 82 if (!new) 83 return obj_pool_free; 84 85 spin_lock_irqsave(&pool_lock, flags); 86 hlist_add_head(&new->node, &obj_pool); 87 obj_pool_free++; 88 spin_unlock_irqrestore(&pool_lock, flags); 89 } 90 return obj_pool_free; 91} 92 93/* 94 * Lookup an object in the hash bucket. 95 */ 96static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 97{ 98 struct hlist_node *node; 99 struct debug_obj *obj; 100 int cnt = 0; 101 102 hlist_for_each_entry(obj, node, &b->list, node) { 103 cnt++; 104 if (obj->object == addr) 105 return obj; 106 } 107 if (cnt > debug_objects_maxchain) 108 debug_objects_maxchain = cnt; 109 110 return NULL; 111} 112 113/* 114 * Allocate a new object. If the pool is empty, switch off the debugger. 115 */ 116static struct debug_obj * 117alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 118{ 119 struct debug_obj *obj = NULL; 120 121 spin_lock(&pool_lock); 122 if (obj_pool.first) { 123 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 124 125 obj->object = addr; 126 obj->descr = descr; 127 obj->state = ODEBUG_STATE_NONE; 128 hlist_del(&obj->node); 129 130 hlist_add_head(&obj->node, &b->list); 131 132 obj_pool_used++; 133 if (obj_pool_used > obj_pool_max_used) 134 obj_pool_max_used = obj_pool_used; 135 136 obj_pool_free--; 137 if (obj_pool_free < obj_pool_min_free) 138 obj_pool_min_free = obj_pool_free; 139 } 140 spin_unlock(&pool_lock); 141 142 return obj; 143} 144 145/* 146 * Put the object back into the pool or give it back to kmem_cache: 147 */ 148static void free_object(struct debug_obj *obj) 149{ 150 unsigned long idx = (unsigned long)(obj - obj_static_pool); 151 152 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { 153 spin_lock(&pool_lock); 154 hlist_add_head(&obj->node, &obj_pool); 155 obj_pool_free++; 156 obj_pool_used--; 157 spin_unlock(&pool_lock); 158 } else { 159 spin_lock(&pool_lock); 160 obj_pool_used--; 161 spin_unlock(&pool_lock); 162 kmem_cache_free(obj_cache, obj); 163 } 164} 165 166/* 167 * We run out of memory. That means we probably have tons of objects 168 * allocated. 169 */ 170static void debug_objects_oom(void) 171{ 172 struct debug_bucket *db = obj_hash; 173 struct hlist_node *node, *tmp; 174 struct debug_obj *obj; 175 unsigned long flags; 176 int i; 177 178 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); 179 180 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 181 spin_lock_irqsave(&db->lock, flags); 182 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 183 hlist_del(&obj->node); 184 free_object(obj); 185 } 186 spin_unlock_irqrestore(&db->lock, flags); 187 } 188} 189 190/* 191 * We use the pfn of the address for the hash. That way we can check 192 * for freed objects simply by checking the affected bucket. 193 */ 194static struct debug_bucket *get_bucket(unsigned long addr) 195{ 196 unsigned long hash; 197 198 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); 199 return &obj_hash[hash]; 200} 201 202static void debug_print_object(struct debug_obj *obj, char *msg) 203{ 204 static int limit; 205 206 if (limit < 5 && obj->descr != descr_test) { 207 limit++; 208 printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, 209 obj_states[obj->state], obj->descr->name); 210 WARN_ON(1); 211 } 212 debug_objects_warnings++; 213} 214 215/* 216 * Try to repair the damage, so we have a better chance to get useful 217 * debug output. 218 */ 219static void 220debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), 221 void * addr, enum debug_obj_state state) 222{ 223 if (fixup) 224 debug_objects_fixups += fixup(addr, state); 225} 226 227static void debug_object_is_on_stack(void *addr, int onstack) 228{ 229 void *stack = current->stack; 230 int is_on_stack; 231 static int limit; 232 233 if (limit > 4) 234 return; 235 236 is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); 237 238 if (is_on_stack == onstack) 239 return; 240 241 limit++; 242 if (is_on_stack) 243 printk(KERN_WARNING 244 "ODEBUG: object is on stack, but not annotated\n"); 245 else 246 printk(KERN_WARNING 247 "ODEBUG: object is not on stack, but annotated\n"); 248 WARN_ON(1); 249} 250 251static void 252__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) 253{ 254 enum debug_obj_state state; 255 struct debug_bucket *db; 256 struct debug_obj *obj; 257 unsigned long flags; 258 259 fill_pool(); 260 261 db = get_bucket((unsigned long) addr); 262 263 spin_lock_irqsave(&db->lock, flags); 264 265 obj = lookup_object(addr, db); 266 if (!obj) { 267 obj = alloc_object(addr, db, descr); 268 if (!obj) { 269 debug_objects_enabled = 0; 270 spin_unlock_irqrestore(&db->lock, flags); 271 debug_objects_oom(); 272 return; 273 } 274 debug_object_is_on_stack(addr, onstack); 275 } 276 277 switch (obj->state) { 278 case ODEBUG_STATE_NONE: 279 case ODEBUG_STATE_INIT: 280 case ODEBUG_STATE_INACTIVE: 281 obj->state = ODEBUG_STATE_INIT; 282 break; 283 284 case ODEBUG_STATE_ACTIVE: 285 debug_print_object(obj, "init"); 286 state = obj->state; 287 spin_unlock_irqrestore(&db->lock, flags); 288 debug_object_fixup(descr->fixup_init, addr, state); 289 return; 290 291 case ODEBUG_STATE_DESTROYED: 292 debug_print_object(obj, "init"); 293 break; 294 default: 295 break; 296 } 297 298 spin_unlock_irqrestore(&db->lock, flags); 299} 300 301/** 302 * debug_object_init - debug checks when an object is initialized 303 * @addr: address of the object 304 * @descr: pointer to an object specific debug description structure 305 */ 306void debug_object_init(void *addr, struct debug_obj_descr *descr) 307{ 308 if (!debug_objects_enabled) 309 return; 310 311 __debug_object_init(addr, descr, 0); 312} 313 314/** 315 * debug_object_init_on_stack - debug checks when an object on stack is 316 * initialized 317 * @addr: address of the object 318 * @descr: pointer to an object specific debug description structure 319 */ 320void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) 321{ 322 if (!debug_objects_enabled) 323 return; 324 325 __debug_object_init(addr, descr, 1); 326} 327 328/** 329 * debug_object_activate - debug checks when an object is activated 330 * @addr: address of the object 331 * @descr: pointer to an object specific debug description structure 332 */ 333void debug_object_activate(void *addr, struct debug_obj_descr *descr) 334{ 335 enum debug_obj_state state; 336 struct debug_bucket *db; 337 struct debug_obj *obj; 338 unsigned long flags; 339 340 if (!debug_objects_enabled) 341 return; 342 343 db = get_bucket((unsigned long) addr); 344 345 spin_lock_irqsave(&db->lock, flags); 346 347 obj = lookup_object(addr, db); 348 if (obj) { 349 switch (obj->state) { 350 case ODEBUG_STATE_INIT: 351 case ODEBUG_STATE_INACTIVE: 352 obj->state = ODEBUG_STATE_ACTIVE; 353 break; 354 355 case ODEBUG_STATE_ACTIVE: 356 debug_print_object(obj, "activate"); 357 state = obj->state; 358 spin_unlock_irqrestore(&db->lock, flags); 359 debug_object_fixup(descr->fixup_activate, addr, state); 360 return; 361 362 case ODEBUG_STATE_DESTROYED: 363 debug_print_object(obj, "activate"); 364 break; 365 default: 366 break; 367 } 368 spin_unlock_irqrestore(&db->lock, flags); 369 return; 370 } 371 372 spin_unlock_irqrestore(&db->lock, flags); 373 /* 374 * This happens when a static object is activated. We 375 * let the type specific code decide whether this is 376 * true or not. 377 */ 378 debug_object_fixup(descr->fixup_activate, addr, 379 ODEBUG_STATE_NOTAVAILABLE); 380} 381 382/** 383 * debug_object_deactivate - debug checks when an object is deactivated 384 * @addr: address of the object 385 * @descr: pointer to an object specific debug description structure 386 */ 387void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) 388{ 389 struct debug_bucket *db; 390 struct debug_obj *obj; 391 unsigned long flags; 392 393 if (!debug_objects_enabled) 394 return; 395 396 db = get_bucket((unsigned long) addr); 397 398 spin_lock_irqsave(&db->lock, flags); 399 400 obj = lookup_object(addr, db); 401 if (obj) { 402 switch (obj->state) { 403 case ODEBUG_STATE_INIT: 404 case ODEBUG_STATE_INACTIVE: 405 case ODEBUG_STATE_ACTIVE: 406 obj->state = ODEBUG_STATE_INACTIVE; 407 break; 408 409 case ODEBUG_STATE_DESTROYED: 410 debug_print_object(obj, "deactivate"); 411 break; 412 default: 413 break; 414 } 415 } else { 416 struct debug_obj o = { .object = addr, 417 .state = ODEBUG_STATE_NOTAVAILABLE, 418 .descr = descr }; 419 420 debug_print_object(&o, "deactivate"); 421 } 422 423 spin_unlock_irqrestore(&db->lock, flags); 424} 425 426/** 427 * debug_object_destroy - debug checks when an object is destroyed 428 * @addr: address of the object 429 * @descr: pointer to an object specific debug description structure 430 */ 431void debug_object_destroy(void *addr, struct debug_obj_descr *descr) 432{ 433 enum debug_obj_state state; 434 struct debug_bucket *db; 435 struct debug_obj *obj; 436 unsigned long flags; 437 438 if (!debug_objects_enabled) 439 return; 440 441 db = get_bucket((unsigned long) addr); 442 443 spin_lock_irqsave(&db->lock, flags); 444 445 obj = lookup_object(addr, db); 446 if (!obj) 447 goto out_unlock; 448 449 switch (obj->state) { 450 case ODEBUG_STATE_NONE: 451 case ODEBUG_STATE_INIT: 452 case ODEBUG_STATE_INACTIVE: 453 obj->state = ODEBUG_STATE_DESTROYED; 454 break; 455 case ODEBUG_STATE_ACTIVE: 456 debug_print_object(obj, "destroy"); 457 state = obj->state; 458 spin_unlock_irqrestore(&db->lock, flags); 459 debug_object_fixup(descr->fixup_destroy, addr, state); 460 return; 461 462 case ODEBUG_STATE_DESTROYED: 463 debug_print_object(obj, "destroy"); 464 break; 465 default: 466 break; 467 } 468out_unlock: 469 spin_unlock_irqrestore(&db->lock, flags); 470} 471 472/** 473 * debug_object_free - debug checks when an object is freed 474 * @addr: address of the object 475 * @descr: pointer to an object specific debug description structure 476 */ 477void debug_object_free(void *addr, struct debug_obj_descr *descr) 478{ 479 enum debug_obj_state state; 480 struct debug_bucket *db; 481 struct debug_obj *obj; 482 unsigned long flags; 483 484 if (!debug_objects_enabled) 485 return; 486 487 db = get_bucket((unsigned long) addr); 488 489 spin_lock_irqsave(&db->lock, flags); 490 491 obj = lookup_object(addr, db); 492 if (!obj) 493 goto out_unlock; 494 495 switch (obj->state) { 496 case ODEBUG_STATE_ACTIVE: 497 debug_print_object(obj, "free"); 498 state = obj->state; 499 spin_unlock_irqrestore(&db->lock, flags); 500 debug_object_fixup(descr->fixup_free, addr, state); 501 return; 502 default: 503 hlist_del(&obj->node); 504 free_object(obj); 505 break; 506 } 507out_unlock: 508 spin_unlock_irqrestore(&db->lock, flags); 509} 510 511#ifdef CONFIG_DEBUG_OBJECTS_FREE 512static void __debug_check_no_obj_freed(const void *address, unsigned long size) 513{ 514 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 515 struct hlist_node *node, *tmp; 516 struct debug_obj_descr *descr; 517 enum debug_obj_state state; 518 struct debug_bucket *db; 519 struct debug_obj *obj; 520 int cnt; 521 522 saddr = (unsigned long) address; 523 eaddr = saddr + size; 524 paddr = saddr & ODEBUG_CHUNK_MASK; 525 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); 526 chunks >>= ODEBUG_CHUNK_SHIFT; 527 528 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { 529 db = get_bucket(paddr); 530 531repeat: 532 cnt = 0; 533 spin_lock_irqsave(&db->lock, flags); 534 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 535 cnt++; 536 oaddr = (unsigned long) obj->object; 537 if (oaddr < saddr || oaddr >= eaddr) 538 continue; 539 540 switch (obj->state) { 541 case ODEBUG_STATE_ACTIVE: 542 debug_print_object(obj, "free"); 543 descr = obj->descr; 544 state = obj->state; 545 spin_unlock_irqrestore(&db->lock, flags); 546 debug_object_fixup(descr->fixup_free, 547 (void *) oaddr, state); 548 goto repeat; 549 default: 550 hlist_del(&obj->node); 551 free_object(obj); 552 break; 553 } 554 } 555 spin_unlock_irqrestore(&db->lock, flags); 556 if (cnt > debug_objects_maxchain) 557 debug_objects_maxchain = cnt; 558 } 559} 560 561void debug_check_no_obj_freed(const void *address, unsigned long size) 562{ 563 if (debug_objects_enabled) 564 __debug_check_no_obj_freed(address, size); 565} 566#endif 567 568#ifdef CONFIG_DEBUG_FS 569 570static int debug_stats_show(struct seq_file *m, void *v) 571{ 572 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 573 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 574 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 575 seq_printf(m, "pool_free :%d\n", obj_pool_free); 576 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 577 seq_printf(m, "pool_used :%d\n", obj_pool_used); 578 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 579 return 0; 580} 581 582static int debug_stats_open(struct inode *inode, struct file *filp) 583{ 584 return single_open(filp, debug_stats_show, NULL); 585} 586 587static const struct file_operations debug_stats_fops = { 588 .open = debug_stats_open, 589 .read = seq_read, 590 .llseek = seq_lseek, 591 .release = single_release, 592}; 593 594static int __init debug_objects_init_debugfs(void) 595{ 596 struct dentry *dbgdir, *dbgstats; 597 598 if (!debug_objects_enabled) 599 return 0; 600 601 dbgdir = debugfs_create_dir("debug_objects", NULL); 602 if (!dbgdir) 603 return -ENOMEM; 604 605 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, 606 &debug_stats_fops); 607 if (!dbgstats) 608 goto err; 609 610 return 0; 611 612err: 613 debugfs_remove(dbgdir); 614 615 return -ENOMEM; 616} 617__initcall(debug_objects_init_debugfs); 618 619#else 620static inline void debug_objects_init_debugfs(void) { } 621#endif 622 623#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST 624 625/* Random data structure for the self test */ 626struct self_test { 627 unsigned long dummy1[6]; 628 int static_init; 629 unsigned long dummy2[3]; 630}; 631 632static __initdata struct debug_obj_descr descr_type_test; 633 634/* 635 * fixup_init is called when: 636 * - an active object is initialized 637 */ 638static int __init fixup_init(void *addr, enum debug_obj_state state) 639{ 640 struct self_test *obj = addr; 641 642 switch (state) { 643 case ODEBUG_STATE_ACTIVE: 644 debug_object_deactivate(obj, &descr_type_test); 645 debug_object_init(obj, &descr_type_test); 646 return 1; 647 default: 648 return 0; 649 } 650} 651 652/* 653 * fixup_activate is called when: 654 * - an active object is activated 655 * - an unknown object is activated (might be a statically initialized object) 656 */ 657static int __init fixup_activate(void *addr, enum debug_obj_state state) 658{ 659 struct self_test *obj = addr; 660 661 switch (state) { 662 case ODEBUG_STATE_NOTAVAILABLE: 663 if (obj->static_init == 1) { 664 debug_object_init(obj, &descr_type_test); 665 debug_object_activate(obj, &descr_type_test); 666 /* 667 * Real code should return 0 here ! This is 668 * not a fixup of some bad behaviour. We 669 * merily call the debug_init function to keep 670 * track of the object. 671 */ 672 return 1; 673 } else { 674 /* Real code needs to emit a warning here */ 675 } 676 return 0; 677 678 case ODEBUG_STATE_ACTIVE: 679 debug_object_deactivate(obj, &descr_type_test); 680 debug_object_activate(obj, &descr_type_test); 681 return 1; 682 683 default: 684 return 0; 685 } 686} 687 688/* 689 * fixup_destroy is called when: 690 * - an active object is destroyed 691 */ 692static int __init fixup_destroy(void *addr, enum debug_obj_state state) 693{ 694 struct self_test *obj = addr; 695 696 switch (state) { 697 case ODEBUG_STATE_ACTIVE: 698 debug_object_deactivate(obj, &descr_type_test); 699 debug_object_destroy(obj, &descr_type_test); 700 return 1; 701 default: 702 return 0; 703 } 704} 705 706/* 707 * fixup_free is called when: 708 * - an active object is freed 709 */ 710static int __init fixup_free(void *addr, enum debug_obj_state state) 711{ 712 struct self_test *obj = addr; 713 714 switch (state) { 715 case ODEBUG_STATE_ACTIVE: 716 debug_object_deactivate(obj, &descr_type_test); 717 debug_object_free(obj, &descr_type_test); 718 return 1; 719 default: 720 return 0; 721 } 722} 723 724static int 725check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 726{ 727 struct debug_bucket *db; 728 struct debug_obj *obj; 729 unsigned long flags; 730 int res = -EINVAL; 731 732 db = get_bucket((unsigned long) addr); 733 734 spin_lock_irqsave(&db->lock, flags); 735 736 obj = lookup_object(addr, db); 737 if (!obj && state != ODEBUG_STATE_NONE) { 738 printk(KERN_ERR "ODEBUG: selftest object not found\n"); 739 WARN_ON(1); 740 goto out; 741 } 742 if (obj && obj->state != state) { 743 printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 744 obj->state, state); 745 WARN_ON(1); 746 goto out; 747 } 748 if (fixups != debug_objects_fixups) { 749 printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 750 fixups, debug_objects_fixups); 751 WARN_ON(1); 752 goto out; 753 } 754 if (warnings != debug_objects_warnings) { 755 printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 756 warnings, debug_objects_warnings); 757 WARN_ON(1); 758 goto out; 759 } 760 res = 0; 761out: 762 spin_unlock_irqrestore(&db->lock, flags); 763 if (res) 764 debug_objects_enabled = 0; 765 return res; 766} 767 768static __initdata struct debug_obj_descr descr_type_test = { 769 .name = "selftest", 770 .fixup_init = fixup_init, 771 .fixup_activate = fixup_activate, 772 .fixup_destroy = fixup_destroy, 773 .fixup_free = fixup_free, 774}; 775 776static __initdata struct self_test obj = { .static_init = 0 }; 777 778static void __init debug_objects_selftest(void) 779{ 780 int fixups, oldfixups, warnings, oldwarnings; 781 unsigned long flags; 782 783 local_irq_save(flags); 784 785 fixups = oldfixups = debug_objects_fixups; 786 warnings = oldwarnings = debug_objects_warnings; 787 descr_test = &descr_type_test; 788 789 debug_object_init(&obj, &descr_type_test); 790 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 791 goto out; 792 debug_object_activate(&obj, &descr_type_test); 793 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 794 goto out; 795 debug_object_activate(&obj, &descr_type_test); 796 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) 797 goto out; 798 debug_object_deactivate(&obj, &descr_type_test); 799 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) 800 goto out; 801 debug_object_destroy(&obj, &descr_type_test); 802 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) 803 goto out; 804 debug_object_init(&obj, &descr_type_test); 805 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 806 goto out; 807 debug_object_activate(&obj, &descr_type_test); 808 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 809 goto out; 810 debug_object_deactivate(&obj, &descr_type_test); 811 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 812 goto out; 813 debug_object_free(&obj, &descr_type_test); 814 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 815 goto out; 816 817 obj.static_init = 1; 818 debug_object_activate(&obj, &descr_type_test); 819 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) 820 goto out; 821 debug_object_init(&obj, &descr_type_test); 822 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 823 goto out; 824 debug_object_free(&obj, &descr_type_test); 825 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 826 goto out; 827 828#ifdef CONFIG_DEBUG_OBJECTS_FREE 829 debug_object_init(&obj, &descr_type_test); 830 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 831 goto out; 832 debug_object_activate(&obj, &descr_type_test); 833 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 834 goto out; 835 __debug_check_no_obj_freed(&obj, sizeof(obj)); 836 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 837 goto out; 838#endif 839 printk(KERN_INFO "ODEBUG: selftest passed\n"); 840 841out: 842 debug_objects_fixups = oldfixups; 843 debug_objects_warnings = oldwarnings; 844 descr_test = NULL; 845 846 local_irq_restore(flags); 847} 848#else 849static inline void debug_objects_selftest(void) { } 850#endif 851 852/* 853 * Called during early boot to initialize the hash buckets and link 854 * the static object pool objects into the poll list. After this call 855 * the object tracker is fully operational. 856 */ 857void __init debug_objects_early_init(void) 858{ 859 int i; 860 861 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 862 spin_lock_init(&obj_hash[i].lock); 863 864 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 865 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 866} 867 868/* 869 * Called after the kmem_caches are functional to setup a dedicated 870 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 871 * prevents that the debug code is called on kmem_cache_free() for the 872 * debug tracker objects to avoid recursive calls. 873 */ 874void __init debug_objects_mem_init(void) 875{ 876 if (!debug_objects_enabled) 877 return; 878 879 obj_cache = kmem_cache_create("debug_objects_cache", 880 sizeof (struct debug_obj), 0, 881 SLAB_DEBUG_OBJECTS, NULL); 882 883 if (!obj_cache) 884 debug_objects_enabled = 0; 885 else 886 debug_objects_selftest(); 887}