at v2.6.31 24 kB view raw
1/* 2 * Generic infrastructure for lifetime debugging of objects. 3 * 4 * Started by Thomas Gleixner 5 * 6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> 7 * 8 * For licencing details see kernel-base/COPYING 9 */ 10#include <linux/debugobjects.h> 11#include <linux/interrupt.h> 12#include <linux/seq_file.h> 13#include <linux/debugfs.h> 14#include <linux/hash.h> 15 16#define ODEBUG_HASH_BITS 14 17#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) 18 19#define ODEBUG_POOL_SIZE 512 20#define ODEBUG_POOL_MIN_LEVEL 256 21 22#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 23#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 24#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 25 26struct debug_bucket { 27 struct hlist_head list; 28 spinlock_t lock; 29}; 30 31static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 32 33static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 34 35static DEFINE_SPINLOCK(pool_lock); 36 37static HLIST_HEAD(obj_pool); 38 39static int obj_pool_min_free = ODEBUG_POOL_SIZE; 40static int obj_pool_free = ODEBUG_POOL_SIZE; 41static int obj_pool_used; 42static int obj_pool_max_used; 43static struct kmem_cache *obj_cache; 44 45static int debug_objects_maxchain __read_mostly; 46static int debug_objects_fixups __read_mostly; 47static int debug_objects_warnings __read_mostly; 48static int debug_objects_enabled __read_mostly 49 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; 50 51static struct debug_obj_descr *descr_test __read_mostly; 52 53static void free_obj_work(struct work_struct *work); 54static DECLARE_WORK(debug_obj_work, free_obj_work); 55 56static int __init enable_object_debug(char *str) 57{ 58 debug_objects_enabled = 1; 59 return 0; 60} 61 62static int __init disable_object_debug(char *str) 63{ 64 debug_objects_enabled = 0; 65 return 0; 66} 67 68early_param("debug_objects", enable_object_debug); 69early_param("no_debug_objects", disable_object_debug); 70 71static const char *obj_states[ODEBUG_STATE_MAX] = { 72 [ODEBUG_STATE_NONE] = "none", 73 [ODEBUG_STATE_INIT] = "initialized", 74 [ODEBUG_STATE_INACTIVE] = "inactive", 75 [ODEBUG_STATE_ACTIVE] = "active", 76 [ODEBUG_STATE_DESTROYED] = "destroyed", 77 [ODEBUG_STATE_NOTAVAILABLE] = "not available", 78}; 79 80static int fill_pool(void) 81{ 82 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 83 struct debug_obj *new; 84 unsigned long flags; 85 86 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 87 return obj_pool_free; 88 89 if (unlikely(!obj_cache)) 90 return obj_pool_free; 91 92 while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { 93 94 new = kmem_cache_zalloc(obj_cache, gfp); 95 if (!new) 96 return obj_pool_free; 97 98 spin_lock_irqsave(&pool_lock, flags); 99 hlist_add_head(&new->node, &obj_pool); 100 obj_pool_free++; 101 spin_unlock_irqrestore(&pool_lock, flags); 102 } 103 return obj_pool_free; 104} 105 106/* 107 * Lookup an object in the hash bucket. 108 */ 109static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 110{ 111 struct hlist_node *node; 112 struct debug_obj *obj; 113 int cnt = 0; 114 115 hlist_for_each_entry(obj, node, &b->list, node) { 116 cnt++; 117 if (obj->object == addr) 118 return obj; 119 } 120 if (cnt > debug_objects_maxchain) 121 debug_objects_maxchain = cnt; 122 123 return NULL; 124} 125 126/* 127 * Allocate a new object. If the pool is empty, switch off the debugger. 128 * Must be called with interrupts disabled. 129 */ 130static struct debug_obj * 131alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 132{ 133 struct debug_obj *obj = NULL; 134 135 spin_lock(&pool_lock); 136 if (obj_pool.first) { 137 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 138 139 obj->object = addr; 140 obj->descr = descr; 141 obj->state = ODEBUG_STATE_NONE; 142 hlist_del(&obj->node); 143 144 hlist_add_head(&obj->node, &b->list); 145 146 obj_pool_used++; 147 if (obj_pool_used > obj_pool_max_used) 148 obj_pool_max_used = obj_pool_used; 149 150 obj_pool_free--; 151 if (obj_pool_free < obj_pool_min_free) 152 obj_pool_min_free = obj_pool_free; 153 } 154 spin_unlock(&pool_lock); 155 156 return obj; 157} 158 159/* 160 * workqueue function to free objects. 161 */ 162static void free_obj_work(struct work_struct *work) 163{ 164 struct debug_obj *obj; 165 unsigned long flags; 166 167 spin_lock_irqsave(&pool_lock, flags); 168 while (obj_pool_free > ODEBUG_POOL_SIZE) { 169 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 170 hlist_del(&obj->node); 171 obj_pool_free--; 172 /* 173 * We release pool_lock across kmem_cache_free() to 174 * avoid contention on pool_lock. 175 */ 176 spin_unlock_irqrestore(&pool_lock, flags); 177 kmem_cache_free(obj_cache, obj); 178 spin_lock_irqsave(&pool_lock, flags); 179 } 180 spin_unlock_irqrestore(&pool_lock, flags); 181} 182 183/* 184 * Put the object back into the pool and schedule work to free objects 185 * if necessary. 186 */ 187static void free_object(struct debug_obj *obj) 188{ 189 unsigned long flags; 190 int sched = 0; 191 192 spin_lock_irqsave(&pool_lock, flags); 193 /* 194 * schedule work when the pool is filled and the cache is 195 * initialized: 196 */ 197 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) 198 sched = !work_pending(&debug_obj_work); 199 hlist_add_head(&obj->node, &obj_pool); 200 obj_pool_free++; 201 obj_pool_used--; 202 spin_unlock_irqrestore(&pool_lock, flags); 203 if (sched) 204 schedule_work(&debug_obj_work); 205} 206 207/* 208 * We run out of memory. That means we probably have tons of objects 209 * allocated. 210 */ 211static void debug_objects_oom(void) 212{ 213 struct debug_bucket *db = obj_hash; 214 struct hlist_node *node, *tmp; 215 HLIST_HEAD(freelist); 216 struct debug_obj *obj; 217 unsigned long flags; 218 int i; 219 220 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); 221 222 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 223 spin_lock_irqsave(&db->lock, flags); 224 hlist_move_list(&db->list, &freelist); 225 spin_unlock_irqrestore(&db->lock, flags); 226 227 /* Now free them */ 228 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 229 hlist_del(&obj->node); 230 free_object(obj); 231 } 232 } 233} 234 235/* 236 * We use the pfn of the address for the hash. That way we can check 237 * for freed objects simply by checking the affected bucket. 238 */ 239static struct debug_bucket *get_bucket(unsigned long addr) 240{ 241 unsigned long hash; 242 243 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); 244 return &obj_hash[hash]; 245} 246 247static void debug_print_object(struct debug_obj *obj, char *msg) 248{ 249 static int limit; 250 251 if (limit < 5 && obj->descr != descr_test) { 252 limit++; 253 WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, 254 obj_states[obj->state], obj->descr->name); 255 } 256 debug_objects_warnings++; 257} 258 259/* 260 * Try to repair the damage, so we have a better chance to get useful 261 * debug output. 262 */ 263static void 264debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), 265 void * addr, enum debug_obj_state state) 266{ 267 if (fixup) 268 debug_objects_fixups += fixup(addr, state); 269} 270 271static void debug_object_is_on_stack(void *addr, int onstack) 272{ 273 int is_on_stack; 274 static int limit; 275 276 if (limit > 4) 277 return; 278 279 is_on_stack = object_is_on_stack(addr); 280 if (is_on_stack == onstack) 281 return; 282 283 limit++; 284 if (is_on_stack) 285 printk(KERN_WARNING 286 "ODEBUG: object is on stack, but not annotated\n"); 287 else 288 printk(KERN_WARNING 289 "ODEBUG: object is not on stack, but annotated\n"); 290 WARN_ON(1); 291} 292 293static void 294__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) 295{ 296 enum debug_obj_state state; 297 struct debug_bucket *db; 298 struct debug_obj *obj; 299 unsigned long flags; 300 301 fill_pool(); 302 303 db = get_bucket((unsigned long) addr); 304 305 spin_lock_irqsave(&db->lock, flags); 306 307 obj = lookup_object(addr, db); 308 if (!obj) { 309 obj = alloc_object(addr, db, descr); 310 if (!obj) { 311 debug_objects_enabled = 0; 312 spin_unlock_irqrestore(&db->lock, flags); 313 debug_objects_oom(); 314 return; 315 } 316 debug_object_is_on_stack(addr, onstack); 317 } 318 319 switch (obj->state) { 320 case ODEBUG_STATE_NONE: 321 case ODEBUG_STATE_INIT: 322 case ODEBUG_STATE_INACTIVE: 323 obj->state = ODEBUG_STATE_INIT; 324 break; 325 326 case ODEBUG_STATE_ACTIVE: 327 debug_print_object(obj, "init"); 328 state = obj->state; 329 spin_unlock_irqrestore(&db->lock, flags); 330 debug_object_fixup(descr->fixup_init, addr, state); 331 return; 332 333 case ODEBUG_STATE_DESTROYED: 334 debug_print_object(obj, "init"); 335 break; 336 default: 337 break; 338 } 339 340 spin_unlock_irqrestore(&db->lock, flags); 341} 342 343/** 344 * debug_object_init - debug checks when an object is initialized 345 * @addr: address of the object 346 * @descr: pointer to an object specific debug description structure 347 */ 348void debug_object_init(void *addr, struct debug_obj_descr *descr) 349{ 350 if (!debug_objects_enabled) 351 return; 352 353 __debug_object_init(addr, descr, 0); 354} 355 356/** 357 * debug_object_init_on_stack - debug checks when an object on stack is 358 * initialized 359 * @addr: address of the object 360 * @descr: pointer to an object specific debug description structure 361 */ 362void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) 363{ 364 if (!debug_objects_enabled) 365 return; 366 367 __debug_object_init(addr, descr, 1); 368} 369 370/** 371 * debug_object_activate - debug checks when an object is activated 372 * @addr: address of the object 373 * @descr: pointer to an object specific debug description structure 374 */ 375void debug_object_activate(void *addr, struct debug_obj_descr *descr) 376{ 377 enum debug_obj_state state; 378 struct debug_bucket *db; 379 struct debug_obj *obj; 380 unsigned long flags; 381 382 if (!debug_objects_enabled) 383 return; 384 385 db = get_bucket((unsigned long) addr); 386 387 spin_lock_irqsave(&db->lock, flags); 388 389 obj = lookup_object(addr, db); 390 if (obj) { 391 switch (obj->state) { 392 case ODEBUG_STATE_INIT: 393 case ODEBUG_STATE_INACTIVE: 394 obj->state = ODEBUG_STATE_ACTIVE; 395 break; 396 397 case ODEBUG_STATE_ACTIVE: 398 debug_print_object(obj, "activate"); 399 state = obj->state; 400 spin_unlock_irqrestore(&db->lock, flags); 401 debug_object_fixup(descr->fixup_activate, addr, state); 402 return; 403 404 case ODEBUG_STATE_DESTROYED: 405 debug_print_object(obj, "activate"); 406 break; 407 default: 408 break; 409 } 410 spin_unlock_irqrestore(&db->lock, flags); 411 return; 412 } 413 414 spin_unlock_irqrestore(&db->lock, flags); 415 /* 416 * This happens when a static object is activated. We 417 * let the type specific code decide whether this is 418 * true or not. 419 */ 420 debug_object_fixup(descr->fixup_activate, addr, 421 ODEBUG_STATE_NOTAVAILABLE); 422} 423 424/** 425 * debug_object_deactivate - debug checks when an object is deactivated 426 * @addr: address of the object 427 * @descr: pointer to an object specific debug description structure 428 */ 429void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) 430{ 431 struct debug_bucket *db; 432 struct debug_obj *obj; 433 unsigned long flags; 434 435 if (!debug_objects_enabled) 436 return; 437 438 db = get_bucket((unsigned long) addr); 439 440 spin_lock_irqsave(&db->lock, flags); 441 442 obj = lookup_object(addr, db); 443 if (obj) { 444 switch (obj->state) { 445 case ODEBUG_STATE_INIT: 446 case ODEBUG_STATE_INACTIVE: 447 case ODEBUG_STATE_ACTIVE: 448 obj->state = ODEBUG_STATE_INACTIVE; 449 break; 450 451 case ODEBUG_STATE_DESTROYED: 452 debug_print_object(obj, "deactivate"); 453 break; 454 default: 455 break; 456 } 457 } else { 458 struct debug_obj o = { .object = addr, 459 .state = ODEBUG_STATE_NOTAVAILABLE, 460 .descr = descr }; 461 462 debug_print_object(&o, "deactivate"); 463 } 464 465 spin_unlock_irqrestore(&db->lock, flags); 466} 467 468/** 469 * debug_object_destroy - debug checks when an object is destroyed 470 * @addr: address of the object 471 * @descr: pointer to an object specific debug description structure 472 */ 473void debug_object_destroy(void *addr, struct debug_obj_descr *descr) 474{ 475 enum debug_obj_state state; 476 struct debug_bucket *db; 477 struct debug_obj *obj; 478 unsigned long flags; 479 480 if (!debug_objects_enabled) 481 return; 482 483 db = get_bucket((unsigned long) addr); 484 485 spin_lock_irqsave(&db->lock, flags); 486 487 obj = lookup_object(addr, db); 488 if (!obj) 489 goto out_unlock; 490 491 switch (obj->state) { 492 case ODEBUG_STATE_NONE: 493 case ODEBUG_STATE_INIT: 494 case ODEBUG_STATE_INACTIVE: 495 obj->state = ODEBUG_STATE_DESTROYED; 496 break; 497 case ODEBUG_STATE_ACTIVE: 498 debug_print_object(obj, "destroy"); 499 state = obj->state; 500 spin_unlock_irqrestore(&db->lock, flags); 501 debug_object_fixup(descr->fixup_destroy, addr, state); 502 return; 503 504 case ODEBUG_STATE_DESTROYED: 505 debug_print_object(obj, "destroy"); 506 break; 507 default: 508 break; 509 } 510out_unlock: 511 spin_unlock_irqrestore(&db->lock, flags); 512} 513 514/** 515 * debug_object_free - debug checks when an object is freed 516 * @addr: address of the object 517 * @descr: pointer to an object specific debug description structure 518 */ 519void debug_object_free(void *addr, struct debug_obj_descr *descr) 520{ 521 enum debug_obj_state state; 522 struct debug_bucket *db; 523 struct debug_obj *obj; 524 unsigned long flags; 525 526 if (!debug_objects_enabled) 527 return; 528 529 db = get_bucket((unsigned long) addr); 530 531 spin_lock_irqsave(&db->lock, flags); 532 533 obj = lookup_object(addr, db); 534 if (!obj) 535 goto out_unlock; 536 537 switch (obj->state) { 538 case ODEBUG_STATE_ACTIVE: 539 debug_print_object(obj, "free"); 540 state = obj->state; 541 spin_unlock_irqrestore(&db->lock, flags); 542 debug_object_fixup(descr->fixup_free, addr, state); 543 return; 544 default: 545 hlist_del(&obj->node); 546 spin_unlock_irqrestore(&db->lock, flags); 547 free_object(obj); 548 return; 549 } 550out_unlock: 551 spin_unlock_irqrestore(&db->lock, flags); 552} 553 554#ifdef CONFIG_DEBUG_OBJECTS_FREE 555static void __debug_check_no_obj_freed(const void *address, unsigned long size) 556{ 557 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 558 struct hlist_node *node, *tmp; 559 HLIST_HEAD(freelist); 560 struct debug_obj_descr *descr; 561 enum debug_obj_state state; 562 struct debug_bucket *db; 563 struct debug_obj *obj; 564 int cnt; 565 566 saddr = (unsigned long) address; 567 eaddr = saddr + size; 568 paddr = saddr & ODEBUG_CHUNK_MASK; 569 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); 570 chunks >>= ODEBUG_CHUNK_SHIFT; 571 572 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { 573 db = get_bucket(paddr); 574 575repeat: 576 cnt = 0; 577 spin_lock_irqsave(&db->lock, flags); 578 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 579 cnt++; 580 oaddr = (unsigned long) obj->object; 581 if (oaddr < saddr || oaddr >= eaddr) 582 continue; 583 584 switch (obj->state) { 585 case ODEBUG_STATE_ACTIVE: 586 debug_print_object(obj, "free"); 587 descr = obj->descr; 588 state = obj->state; 589 spin_unlock_irqrestore(&db->lock, flags); 590 debug_object_fixup(descr->fixup_free, 591 (void *) oaddr, state); 592 goto repeat; 593 default: 594 hlist_del(&obj->node); 595 hlist_add_head(&obj->node, &freelist); 596 break; 597 } 598 } 599 spin_unlock_irqrestore(&db->lock, flags); 600 601 /* Now free them */ 602 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 603 hlist_del(&obj->node); 604 free_object(obj); 605 } 606 607 if (cnt > debug_objects_maxchain) 608 debug_objects_maxchain = cnt; 609 } 610} 611 612void debug_check_no_obj_freed(const void *address, unsigned long size) 613{ 614 if (debug_objects_enabled) 615 __debug_check_no_obj_freed(address, size); 616} 617#endif 618 619#ifdef CONFIG_DEBUG_FS 620 621static int debug_stats_show(struct seq_file *m, void *v) 622{ 623 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 624 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 625 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 626 seq_printf(m, "pool_free :%d\n", obj_pool_free); 627 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 628 seq_printf(m, "pool_used :%d\n", obj_pool_used); 629 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 630 return 0; 631} 632 633static int debug_stats_open(struct inode *inode, struct file *filp) 634{ 635 return single_open(filp, debug_stats_show, NULL); 636} 637 638static const struct file_operations debug_stats_fops = { 639 .open = debug_stats_open, 640 .read = seq_read, 641 .llseek = seq_lseek, 642 .release = single_release, 643}; 644 645static int __init debug_objects_init_debugfs(void) 646{ 647 struct dentry *dbgdir, *dbgstats; 648 649 if (!debug_objects_enabled) 650 return 0; 651 652 dbgdir = debugfs_create_dir("debug_objects", NULL); 653 if (!dbgdir) 654 return -ENOMEM; 655 656 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, 657 &debug_stats_fops); 658 if (!dbgstats) 659 goto err; 660 661 return 0; 662 663err: 664 debugfs_remove(dbgdir); 665 666 return -ENOMEM; 667} 668__initcall(debug_objects_init_debugfs); 669 670#else 671static inline void debug_objects_init_debugfs(void) { } 672#endif 673 674#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST 675 676/* Random data structure for the self test */ 677struct self_test { 678 unsigned long dummy1[6]; 679 int static_init; 680 unsigned long dummy2[3]; 681}; 682 683static __initdata struct debug_obj_descr descr_type_test; 684 685/* 686 * fixup_init is called when: 687 * - an active object is initialized 688 */ 689static int __init fixup_init(void *addr, enum debug_obj_state state) 690{ 691 struct self_test *obj = addr; 692 693 switch (state) { 694 case ODEBUG_STATE_ACTIVE: 695 debug_object_deactivate(obj, &descr_type_test); 696 debug_object_init(obj, &descr_type_test); 697 return 1; 698 default: 699 return 0; 700 } 701} 702 703/* 704 * fixup_activate is called when: 705 * - an active object is activated 706 * - an unknown object is activated (might be a statically initialized object) 707 */ 708static int __init fixup_activate(void *addr, enum debug_obj_state state) 709{ 710 struct self_test *obj = addr; 711 712 switch (state) { 713 case ODEBUG_STATE_NOTAVAILABLE: 714 if (obj->static_init == 1) { 715 debug_object_init(obj, &descr_type_test); 716 debug_object_activate(obj, &descr_type_test); 717 /* 718 * Real code should return 0 here ! This is 719 * not a fixup of some bad behaviour. We 720 * merily call the debug_init function to keep 721 * track of the object. 722 */ 723 return 1; 724 } else { 725 /* Real code needs to emit a warning here */ 726 } 727 return 0; 728 729 case ODEBUG_STATE_ACTIVE: 730 debug_object_deactivate(obj, &descr_type_test); 731 debug_object_activate(obj, &descr_type_test); 732 return 1; 733 734 default: 735 return 0; 736 } 737} 738 739/* 740 * fixup_destroy is called when: 741 * - an active object is destroyed 742 */ 743static int __init fixup_destroy(void *addr, enum debug_obj_state state) 744{ 745 struct self_test *obj = addr; 746 747 switch (state) { 748 case ODEBUG_STATE_ACTIVE: 749 debug_object_deactivate(obj, &descr_type_test); 750 debug_object_destroy(obj, &descr_type_test); 751 return 1; 752 default: 753 return 0; 754 } 755} 756 757/* 758 * fixup_free is called when: 759 * - an active object is freed 760 */ 761static int __init fixup_free(void *addr, enum debug_obj_state state) 762{ 763 struct self_test *obj = addr; 764 765 switch (state) { 766 case ODEBUG_STATE_ACTIVE: 767 debug_object_deactivate(obj, &descr_type_test); 768 debug_object_free(obj, &descr_type_test); 769 return 1; 770 default: 771 return 0; 772 } 773} 774 775static int 776check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 777{ 778 struct debug_bucket *db; 779 struct debug_obj *obj; 780 unsigned long flags; 781 int res = -EINVAL; 782 783 db = get_bucket((unsigned long) addr); 784 785 spin_lock_irqsave(&db->lock, flags); 786 787 obj = lookup_object(addr, db); 788 if (!obj && state != ODEBUG_STATE_NONE) { 789 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); 790 goto out; 791 } 792 if (obj && obj->state != state) { 793 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 794 obj->state, state); 795 goto out; 796 } 797 if (fixups != debug_objects_fixups) { 798 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 799 fixups, debug_objects_fixups); 800 goto out; 801 } 802 if (warnings != debug_objects_warnings) { 803 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 804 warnings, debug_objects_warnings); 805 goto out; 806 } 807 res = 0; 808out: 809 spin_unlock_irqrestore(&db->lock, flags); 810 if (res) 811 debug_objects_enabled = 0; 812 return res; 813} 814 815static __initdata struct debug_obj_descr descr_type_test = { 816 .name = "selftest", 817 .fixup_init = fixup_init, 818 .fixup_activate = fixup_activate, 819 .fixup_destroy = fixup_destroy, 820 .fixup_free = fixup_free, 821}; 822 823static __initdata struct self_test obj = { .static_init = 0 }; 824 825static void __init debug_objects_selftest(void) 826{ 827 int fixups, oldfixups, warnings, oldwarnings; 828 unsigned long flags; 829 830 local_irq_save(flags); 831 832 fixups = oldfixups = debug_objects_fixups; 833 warnings = oldwarnings = debug_objects_warnings; 834 descr_test = &descr_type_test; 835 836 debug_object_init(&obj, &descr_type_test); 837 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 838 goto out; 839 debug_object_activate(&obj, &descr_type_test); 840 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 841 goto out; 842 debug_object_activate(&obj, &descr_type_test); 843 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) 844 goto out; 845 debug_object_deactivate(&obj, &descr_type_test); 846 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) 847 goto out; 848 debug_object_destroy(&obj, &descr_type_test); 849 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) 850 goto out; 851 debug_object_init(&obj, &descr_type_test); 852 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 853 goto out; 854 debug_object_activate(&obj, &descr_type_test); 855 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 856 goto out; 857 debug_object_deactivate(&obj, &descr_type_test); 858 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 859 goto out; 860 debug_object_free(&obj, &descr_type_test); 861 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 862 goto out; 863 864 obj.static_init = 1; 865 debug_object_activate(&obj, &descr_type_test); 866 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) 867 goto out; 868 debug_object_init(&obj, &descr_type_test); 869 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 870 goto out; 871 debug_object_free(&obj, &descr_type_test); 872 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 873 goto out; 874 875#ifdef CONFIG_DEBUG_OBJECTS_FREE 876 debug_object_init(&obj, &descr_type_test); 877 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 878 goto out; 879 debug_object_activate(&obj, &descr_type_test); 880 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 881 goto out; 882 __debug_check_no_obj_freed(&obj, sizeof(obj)); 883 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 884 goto out; 885#endif 886 printk(KERN_INFO "ODEBUG: selftest passed\n"); 887 888out: 889 debug_objects_fixups = oldfixups; 890 debug_objects_warnings = oldwarnings; 891 descr_test = NULL; 892 893 local_irq_restore(flags); 894} 895#else 896static inline void debug_objects_selftest(void) { } 897#endif 898 899/* 900 * Called during early boot to initialize the hash buckets and link 901 * the static object pool objects into the poll list. After this call 902 * the object tracker is fully operational. 903 */ 904void __init debug_objects_early_init(void) 905{ 906 int i; 907 908 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 909 spin_lock_init(&obj_hash[i].lock); 910 911 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 912 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 913} 914 915/* 916 * Convert the statically allocated objects to dynamic ones: 917 */ 918static int debug_objects_replace_static_objects(void) 919{ 920 struct debug_bucket *db = obj_hash; 921 struct hlist_node *node, *tmp; 922 struct debug_obj *obj, *new; 923 HLIST_HEAD(objects); 924 int i, cnt = 0; 925 926 for (i = 0; i < ODEBUG_POOL_SIZE; i++) { 927 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); 928 if (!obj) 929 goto free; 930 hlist_add_head(&obj->node, &objects); 931 } 932 933 /* 934 * When debug_objects_mem_init() is called we know that only 935 * one CPU is up, so disabling interrupts is enough 936 * protection. This avoids the lockdep hell of lock ordering. 937 */ 938 local_irq_disable(); 939 940 /* Remove the statically allocated objects from the pool */ 941 hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) 942 hlist_del(&obj->node); 943 /* Move the allocated objects to the pool */ 944 hlist_move_list(&objects, &obj_pool); 945 946 /* Replace the active object references */ 947 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 948 hlist_move_list(&db->list, &objects); 949 950 hlist_for_each_entry(obj, node, &objects, node) { 951 new = hlist_entry(obj_pool.first, typeof(*obj), node); 952 hlist_del(&new->node); 953 /* copy object data */ 954 *new = *obj; 955 hlist_add_head(&new->node, &db->list); 956 cnt++; 957 } 958 } 959 960 printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt, 961 obj_pool_used); 962 local_irq_enable(); 963 return 0; 964free: 965 hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { 966 hlist_del(&obj->node); 967 kmem_cache_free(obj_cache, obj); 968 } 969 return -ENOMEM; 970} 971 972/* 973 * Called after the kmem_caches are functional to setup a dedicated 974 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 975 * prevents that the debug code is called on kmem_cache_free() for the 976 * debug tracker objects to avoid recursive calls. 977 */ 978void __init debug_objects_mem_init(void) 979{ 980 if (!debug_objects_enabled) 981 return; 982 983 obj_cache = kmem_cache_create("debug_objects_cache", 984 sizeof (struct debug_obj), 0, 985 SLAB_DEBUG_OBJECTS, NULL); 986 987 if (!obj_cache || debug_objects_replace_static_objects()) { 988 debug_objects_enabled = 0; 989 if (obj_cache) 990 kmem_cache_destroy(obj_cache); 991 printk(KERN_WARNING "ODEBUG: out of memory.\n"); 992 } else 993 debug_objects_selftest(); 994}