at v3.1-rc3 26 kB view raw
1/* 2 * Generic infrastructure for lifetime debugging of objects. 3 * 4 * Started by Thomas Gleixner 5 * 6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> 7 * 8 * For licencing details see kernel-base/COPYING 9 */ 10#include <linux/debugobjects.h> 11#include <linux/interrupt.h> 12#include <linux/sched.h> 13#include <linux/seq_file.h> 14#include <linux/debugfs.h> 15#include <linux/slab.h> 16#include <linux/hash.h> 17 18#define ODEBUG_HASH_BITS 14 19#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) 20 21#define ODEBUG_POOL_SIZE 512 22#define ODEBUG_POOL_MIN_LEVEL 256 23 24#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 25#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 26#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 27 28struct debug_bucket { 29 struct hlist_head list; 30 raw_spinlock_t lock; 31}; 32 33static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 34 35static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 36 37static DEFINE_RAW_SPINLOCK(pool_lock); 38 39static HLIST_HEAD(obj_pool); 40 41static int obj_pool_min_free = ODEBUG_POOL_SIZE; 42static int obj_pool_free = ODEBUG_POOL_SIZE; 43static int obj_pool_used; 44static int obj_pool_max_used; 45static struct kmem_cache *obj_cache; 46 47static int debug_objects_maxchain __read_mostly; 48static int debug_objects_fixups __read_mostly; 49static int debug_objects_warnings __read_mostly; 50static int debug_objects_enabled __read_mostly 51 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; 52 53static struct debug_obj_descr *descr_test __read_mostly; 54 55static void free_obj_work(struct work_struct *work); 56static DECLARE_WORK(debug_obj_work, free_obj_work); 57 58static int __init enable_object_debug(char *str) 59{ 60 debug_objects_enabled = 1; 61 return 0; 62} 63 64static int __init disable_object_debug(char *str) 65{ 66 debug_objects_enabled = 0; 67 return 0; 68} 69 70early_param("debug_objects", enable_object_debug); 71early_param("no_debug_objects", disable_object_debug); 72 73static const char *obj_states[ODEBUG_STATE_MAX] = { 74 [ODEBUG_STATE_NONE] = "none", 75 [ODEBUG_STATE_INIT] = "initialized", 76 [ODEBUG_STATE_INACTIVE] = "inactive", 77 [ODEBUG_STATE_ACTIVE] = "active", 78 [ODEBUG_STATE_DESTROYED] = "destroyed", 79 [ODEBUG_STATE_NOTAVAILABLE] = "not available", 80}; 81 82static int fill_pool(void) 83{ 84 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 85 struct debug_obj *new; 86 unsigned long flags; 87 88 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 89 return obj_pool_free; 90 91 if (unlikely(!obj_cache)) 92 return obj_pool_free; 93 94 while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { 95 96 new = kmem_cache_zalloc(obj_cache, gfp); 97 if (!new) 98 return obj_pool_free; 99 100 raw_spin_lock_irqsave(&pool_lock, flags); 101 hlist_add_head(&new->node, &obj_pool); 102 obj_pool_free++; 103 raw_spin_unlock_irqrestore(&pool_lock, flags); 104 } 105 return obj_pool_free; 106} 107 108/* 109 * Lookup an object in the hash bucket. 110 */ 111static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 112{ 113 struct hlist_node *node; 114 struct debug_obj *obj; 115 int cnt = 0; 116 117 hlist_for_each_entry(obj, node, &b->list, node) { 118 cnt++; 119 if (obj->object == addr) 120 return obj; 121 } 122 if (cnt > debug_objects_maxchain) 123 debug_objects_maxchain = cnt; 124 125 return NULL; 126} 127 128/* 129 * Allocate a new object. If the pool is empty, switch off the debugger. 130 * Must be called with interrupts disabled. 131 */ 132static struct debug_obj * 133alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 134{ 135 struct debug_obj *obj = NULL; 136 137 raw_spin_lock(&pool_lock); 138 if (obj_pool.first) { 139 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 140 141 obj->object = addr; 142 obj->descr = descr; 143 obj->state = ODEBUG_STATE_NONE; 144 obj->astate = 0; 145 hlist_del(&obj->node); 146 147 hlist_add_head(&obj->node, &b->list); 148 149 obj_pool_used++; 150 if (obj_pool_used > obj_pool_max_used) 151 obj_pool_max_used = obj_pool_used; 152 153 obj_pool_free--; 154 if (obj_pool_free < obj_pool_min_free) 155 obj_pool_min_free = obj_pool_free; 156 } 157 raw_spin_unlock(&pool_lock); 158 159 return obj; 160} 161 162/* 163 * workqueue function to free objects. 164 */ 165static void free_obj_work(struct work_struct *work) 166{ 167 struct debug_obj *obj; 168 unsigned long flags; 169 170 raw_spin_lock_irqsave(&pool_lock, flags); 171 while (obj_pool_free > ODEBUG_POOL_SIZE) { 172 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 173 hlist_del(&obj->node); 174 obj_pool_free--; 175 /* 176 * We release pool_lock across kmem_cache_free() to 177 * avoid contention on pool_lock. 178 */ 179 raw_spin_unlock_irqrestore(&pool_lock, flags); 180 kmem_cache_free(obj_cache, obj); 181 raw_spin_lock_irqsave(&pool_lock, flags); 182 } 183 raw_spin_unlock_irqrestore(&pool_lock, flags); 184} 185 186/* 187 * Put the object back into the pool and schedule work to free objects 188 * if necessary. 189 */ 190static void free_object(struct debug_obj *obj) 191{ 192 unsigned long flags; 193 int sched = 0; 194 195 raw_spin_lock_irqsave(&pool_lock, flags); 196 /* 197 * schedule work when the pool is filled and the cache is 198 * initialized: 199 */ 200 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) 201 sched = keventd_up() && !work_pending(&debug_obj_work); 202 hlist_add_head(&obj->node, &obj_pool); 203 obj_pool_free++; 204 obj_pool_used--; 205 raw_spin_unlock_irqrestore(&pool_lock, flags); 206 if (sched) 207 schedule_work(&debug_obj_work); 208} 209 210/* 211 * We run out of memory. That means we probably have tons of objects 212 * allocated. 213 */ 214static void debug_objects_oom(void) 215{ 216 struct debug_bucket *db = obj_hash; 217 struct hlist_node *node, *tmp; 218 HLIST_HEAD(freelist); 219 struct debug_obj *obj; 220 unsigned long flags; 221 int i; 222 223 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); 224 225 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 226 raw_spin_lock_irqsave(&db->lock, flags); 227 hlist_move_list(&db->list, &freelist); 228 raw_spin_unlock_irqrestore(&db->lock, flags); 229 230 /* Now free them */ 231 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 232 hlist_del(&obj->node); 233 free_object(obj); 234 } 235 } 236} 237 238/* 239 * We use the pfn of the address for the hash. That way we can check 240 * for freed objects simply by checking the affected bucket. 241 */ 242static struct debug_bucket *get_bucket(unsigned long addr) 243{ 244 unsigned long hash; 245 246 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); 247 return &obj_hash[hash]; 248} 249 250static void debug_print_object(struct debug_obj *obj, char *msg) 251{ 252 struct debug_obj_descr *descr = obj->descr; 253 static int limit; 254 255 if (limit < 5 && descr != descr_test) { 256 void *hint = descr->debug_hint ? 257 descr->debug_hint(obj->object) : NULL; 258 limit++; 259 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " 260 "object type: %s hint: %pS\n", 261 msg, obj_states[obj->state], obj->astate, 262 descr->name, hint); 263 } 264 debug_objects_warnings++; 265} 266 267/* 268 * Try to repair the damage, so we have a better chance to get useful 269 * debug output. 270 */ 271static void 272debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), 273 void * addr, enum debug_obj_state state) 274{ 275 if (fixup) 276 debug_objects_fixups += fixup(addr, state); 277} 278 279static void debug_object_is_on_stack(void *addr, int onstack) 280{ 281 int is_on_stack; 282 static int limit; 283 284 if (limit > 4) 285 return; 286 287 is_on_stack = object_is_on_stack(addr); 288 if (is_on_stack == onstack) 289 return; 290 291 limit++; 292 if (is_on_stack) 293 printk(KERN_WARNING 294 "ODEBUG: object is on stack, but not annotated\n"); 295 else 296 printk(KERN_WARNING 297 "ODEBUG: object is not on stack, but annotated\n"); 298 WARN_ON(1); 299} 300 301static void 302__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) 303{ 304 enum debug_obj_state state; 305 struct debug_bucket *db; 306 struct debug_obj *obj; 307 unsigned long flags; 308 309 fill_pool(); 310 311 db = get_bucket((unsigned long) addr); 312 313 raw_spin_lock_irqsave(&db->lock, flags); 314 315 obj = lookup_object(addr, db); 316 if (!obj) { 317 obj = alloc_object(addr, db, descr); 318 if (!obj) { 319 debug_objects_enabled = 0; 320 raw_spin_unlock_irqrestore(&db->lock, flags); 321 debug_objects_oom(); 322 return; 323 } 324 debug_object_is_on_stack(addr, onstack); 325 } 326 327 switch (obj->state) { 328 case ODEBUG_STATE_NONE: 329 case ODEBUG_STATE_INIT: 330 case ODEBUG_STATE_INACTIVE: 331 obj->state = ODEBUG_STATE_INIT; 332 break; 333 334 case ODEBUG_STATE_ACTIVE: 335 debug_print_object(obj, "init"); 336 state = obj->state; 337 raw_spin_unlock_irqrestore(&db->lock, flags); 338 debug_object_fixup(descr->fixup_init, addr, state); 339 return; 340 341 case ODEBUG_STATE_DESTROYED: 342 debug_print_object(obj, "init"); 343 break; 344 default: 345 break; 346 } 347 348 raw_spin_unlock_irqrestore(&db->lock, flags); 349} 350 351/** 352 * debug_object_init - debug checks when an object is initialized 353 * @addr: address of the object 354 * @descr: pointer to an object specific debug description structure 355 */ 356void debug_object_init(void *addr, struct debug_obj_descr *descr) 357{ 358 if (!debug_objects_enabled) 359 return; 360 361 __debug_object_init(addr, descr, 0); 362} 363 364/** 365 * debug_object_init_on_stack - debug checks when an object on stack is 366 * initialized 367 * @addr: address of the object 368 * @descr: pointer to an object specific debug description structure 369 */ 370void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) 371{ 372 if (!debug_objects_enabled) 373 return; 374 375 __debug_object_init(addr, descr, 1); 376} 377 378/** 379 * debug_object_activate - debug checks when an object is activated 380 * @addr: address of the object 381 * @descr: pointer to an object specific debug description structure 382 */ 383void debug_object_activate(void *addr, struct debug_obj_descr *descr) 384{ 385 enum debug_obj_state state; 386 struct debug_bucket *db; 387 struct debug_obj *obj; 388 unsigned long flags; 389 390 if (!debug_objects_enabled) 391 return; 392 393 db = get_bucket((unsigned long) addr); 394 395 raw_spin_lock_irqsave(&db->lock, flags); 396 397 obj = lookup_object(addr, db); 398 if (obj) { 399 switch (obj->state) { 400 case ODEBUG_STATE_INIT: 401 case ODEBUG_STATE_INACTIVE: 402 obj->state = ODEBUG_STATE_ACTIVE; 403 break; 404 405 case ODEBUG_STATE_ACTIVE: 406 debug_print_object(obj, "activate"); 407 state = obj->state; 408 raw_spin_unlock_irqrestore(&db->lock, flags); 409 debug_object_fixup(descr->fixup_activate, addr, state); 410 return; 411 412 case ODEBUG_STATE_DESTROYED: 413 debug_print_object(obj, "activate"); 414 break; 415 default: 416 break; 417 } 418 raw_spin_unlock_irqrestore(&db->lock, flags); 419 return; 420 } 421 422 raw_spin_unlock_irqrestore(&db->lock, flags); 423 /* 424 * This happens when a static object is activated. We 425 * let the type specific code decide whether this is 426 * true or not. 427 */ 428 debug_object_fixup(descr->fixup_activate, addr, 429 ODEBUG_STATE_NOTAVAILABLE); 430} 431 432/** 433 * debug_object_deactivate - debug checks when an object is deactivated 434 * @addr: address of the object 435 * @descr: pointer to an object specific debug description structure 436 */ 437void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) 438{ 439 struct debug_bucket *db; 440 struct debug_obj *obj; 441 unsigned long flags; 442 443 if (!debug_objects_enabled) 444 return; 445 446 db = get_bucket((unsigned long) addr); 447 448 raw_spin_lock_irqsave(&db->lock, flags); 449 450 obj = lookup_object(addr, db); 451 if (obj) { 452 switch (obj->state) { 453 case ODEBUG_STATE_INIT: 454 case ODEBUG_STATE_INACTIVE: 455 case ODEBUG_STATE_ACTIVE: 456 if (!obj->astate) 457 obj->state = ODEBUG_STATE_INACTIVE; 458 else 459 debug_print_object(obj, "deactivate"); 460 break; 461 462 case ODEBUG_STATE_DESTROYED: 463 debug_print_object(obj, "deactivate"); 464 break; 465 default: 466 break; 467 } 468 } else { 469 struct debug_obj o = { .object = addr, 470 .state = ODEBUG_STATE_NOTAVAILABLE, 471 .descr = descr }; 472 473 debug_print_object(&o, "deactivate"); 474 } 475 476 raw_spin_unlock_irqrestore(&db->lock, flags); 477} 478 479/** 480 * debug_object_destroy - debug checks when an object is destroyed 481 * @addr: address of the object 482 * @descr: pointer to an object specific debug description structure 483 */ 484void debug_object_destroy(void *addr, struct debug_obj_descr *descr) 485{ 486 enum debug_obj_state state; 487 struct debug_bucket *db; 488 struct debug_obj *obj; 489 unsigned long flags; 490 491 if (!debug_objects_enabled) 492 return; 493 494 db = get_bucket((unsigned long) addr); 495 496 raw_spin_lock_irqsave(&db->lock, flags); 497 498 obj = lookup_object(addr, db); 499 if (!obj) 500 goto out_unlock; 501 502 switch (obj->state) { 503 case ODEBUG_STATE_NONE: 504 case ODEBUG_STATE_INIT: 505 case ODEBUG_STATE_INACTIVE: 506 obj->state = ODEBUG_STATE_DESTROYED; 507 break; 508 case ODEBUG_STATE_ACTIVE: 509 debug_print_object(obj, "destroy"); 510 state = obj->state; 511 raw_spin_unlock_irqrestore(&db->lock, flags); 512 debug_object_fixup(descr->fixup_destroy, addr, state); 513 return; 514 515 case ODEBUG_STATE_DESTROYED: 516 debug_print_object(obj, "destroy"); 517 break; 518 default: 519 break; 520 } 521out_unlock: 522 raw_spin_unlock_irqrestore(&db->lock, flags); 523} 524 525/** 526 * debug_object_free - debug checks when an object is freed 527 * @addr: address of the object 528 * @descr: pointer to an object specific debug description structure 529 */ 530void debug_object_free(void *addr, struct debug_obj_descr *descr) 531{ 532 enum debug_obj_state state; 533 struct debug_bucket *db; 534 struct debug_obj *obj; 535 unsigned long flags; 536 537 if (!debug_objects_enabled) 538 return; 539 540 db = get_bucket((unsigned long) addr); 541 542 raw_spin_lock_irqsave(&db->lock, flags); 543 544 obj = lookup_object(addr, db); 545 if (!obj) 546 goto out_unlock; 547 548 switch (obj->state) { 549 case ODEBUG_STATE_ACTIVE: 550 debug_print_object(obj, "free"); 551 state = obj->state; 552 raw_spin_unlock_irqrestore(&db->lock, flags); 553 debug_object_fixup(descr->fixup_free, addr, state); 554 return; 555 default: 556 hlist_del(&obj->node); 557 raw_spin_unlock_irqrestore(&db->lock, flags); 558 free_object(obj); 559 return; 560 } 561out_unlock: 562 raw_spin_unlock_irqrestore(&db->lock, flags); 563} 564 565/** 566 * debug_object_active_state - debug checks object usage state machine 567 * @addr: address of the object 568 * @descr: pointer to an object specific debug description structure 569 * @expect: expected state 570 * @next: state to move to if expected state is found 571 */ 572void 573debug_object_active_state(void *addr, struct debug_obj_descr *descr, 574 unsigned int expect, unsigned int next) 575{ 576 struct debug_bucket *db; 577 struct debug_obj *obj; 578 unsigned long flags; 579 580 if (!debug_objects_enabled) 581 return; 582 583 db = get_bucket((unsigned long) addr); 584 585 raw_spin_lock_irqsave(&db->lock, flags); 586 587 obj = lookup_object(addr, db); 588 if (obj) { 589 switch (obj->state) { 590 case ODEBUG_STATE_ACTIVE: 591 if (obj->astate == expect) 592 obj->astate = next; 593 else 594 debug_print_object(obj, "active_state"); 595 break; 596 597 default: 598 debug_print_object(obj, "active_state"); 599 break; 600 } 601 } else { 602 struct debug_obj o = { .object = addr, 603 .state = ODEBUG_STATE_NOTAVAILABLE, 604 .descr = descr }; 605 606 debug_print_object(&o, "active_state"); 607 } 608 609 raw_spin_unlock_irqrestore(&db->lock, flags); 610} 611 612#ifdef CONFIG_DEBUG_OBJECTS_FREE 613static void __debug_check_no_obj_freed(const void *address, unsigned long size) 614{ 615 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 616 struct hlist_node *node, *tmp; 617 HLIST_HEAD(freelist); 618 struct debug_obj_descr *descr; 619 enum debug_obj_state state; 620 struct debug_bucket *db; 621 struct debug_obj *obj; 622 int cnt; 623 624 saddr = (unsigned long) address; 625 eaddr = saddr + size; 626 paddr = saddr & ODEBUG_CHUNK_MASK; 627 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); 628 chunks >>= ODEBUG_CHUNK_SHIFT; 629 630 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { 631 db = get_bucket(paddr); 632 633repeat: 634 cnt = 0; 635 raw_spin_lock_irqsave(&db->lock, flags); 636 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 637 cnt++; 638 oaddr = (unsigned long) obj->object; 639 if (oaddr < saddr || oaddr >= eaddr) 640 continue; 641 642 switch (obj->state) { 643 case ODEBUG_STATE_ACTIVE: 644 debug_print_object(obj, "free"); 645 descr = obj->descr; 646 state = obj->state; 647 raw_spin_unlock_irqrestore(&db->lock, flags); 648 debug_object_fixup(descr->fixup_free, 649 (void *) oaddr, state); 650 goto repeat; 651 default: 652 hlist_del(&obj->node); 653 hlist_add_head(&obj->node, &freelist); 654 break; 655 } 656 } 657 raw_spin_unlock_irqrestore(&db->lock, flags); 658 659 /* Now free them */ 660 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 661 hlist_del(&obj->node); 662 free_object(obj); 663 } 664 665 if (cnt > debug_objects_maxchain) 666 debug_objects_maxchain = cnt; 667 } 668} 669 670void debug_check_no_obj_freed(const void *address, unsigned long size) 671{ 672 if (debug_objects_enabled) 673 __debug_check_no_obj_freed(address, size); 674} 675#endif 676 677#ifdef CONFIG_DEBUG_FS 678 679static int debug_stats_show(struct seq_file *m, void *v) 680{ 681 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 682 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 683 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 684 seq_printf(m, "pool_free :%d\n", obj_pool_free); 685 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 686 seq_printf(m, "pool_used :%d\n", obj_pool_used); 687 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 688 return 0; 689} 690 691static int debug_stats_open(struct inode *inode, struct file *filp) 692{ 693 return single_open(filp, debug_stats_show, NULL); 694} 695 696static const struct file_operations debug_stats_fops = { 697 .open = debug_stats_open, 698 .read = seq_read, 699 .llseek = seq_lseek, 700 .release = single_release, 701}; 702 703static int __init debug_objects_init_debugfs(void) 704{ 705 struct dentry *dbgdir, *dbgstats; 706 707 if (!debug_objects_enabled) 708 return 0; 709 710 dbgdir = debugfs_create_dir("debug_objects", NULL); 711 if (!dbgdir) 712 return -ENOMEM; 713 714 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, 715 &debug_stats_fops); 716 if (!dbgstats) 717 goto err; 718 719 return 0; 720 721err: 722 debugfs_remove(dbgdir); 723 724 return -ENOMEM; 725} 726__initcall(debug_objects_init_debugfs); 727 728#else 729static inline void debug_objects_init_debugfs(void) { } 730#endif 731 732#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST 733 734/* Random data structure for the self test */ 735struct self_test { 736 unsigned long dummy1[6]; 737 int static_init; 738 unsigned long dummy2[3]; 739}; 740 741static __initdata struct debug_obj_descr descr_type_test; 742 743/* 744 * fixup_init is called when: 745 * - an active object is initialized 746 */ 747static int __init fixup_init(void *addr, enum debug_obj_state state) 748{ 749 struct self_test *obj = addr; 750 751 switch (state) { 752 case ODEBUG_STATE_ACTIVE: 753 debug_object_deactivate(obj, &descr_type_test); 754 debug_object_init(obj, &descr_type_test); 755 return 1; 756 default: 757 return 0; 758 } 759} 760 761/* 762 * fixup_activate is called when: 763 * - an active object is activated 764 * - an unknown object is activated (might be a statically initialized object) 765 */ 766static int __init fixup_activate(void *addr, enum debug_obj_state state) 767{ 768 struct self_test *obj = addr; 769 770 switch (state) { 771 case ODEBUG_STATE_NOTAVAILABLE: 772 if (obj->static_init == 1) { 773 debug_object_init(obj, &descr_type_test); 774 debug_object_activate(obj, &descr_type_test); 775 /* 776 * Real code should return 0 here ! This is 777 * not a fixup of some bad behaviour. We 778 * merily call the debug_init function to keep 779 * track of the object. 780 */ 781 return 1; 782 } else { 783 /* Real code needs to emit a warning here */ 784 } 785 return 0; 786 787 case ODEBUG_STATE_ACTIVE: 788 debug_object_deactivate(obj, &descr_type_test); 789 debug_object_activate(obj, &descr_type_test); 790 return 1; 791 792 default: 793 return 0; 794 } 795} 796 797/* 798 * fixup_destroy is called when: 799 * - an active object is destroyed 800 */ 801static int __init fixup_destroy(void *addr, enum debug_obj_state state) 802{ 803 struct self_test *obj = addr; 804 805 switch (state) { 806 case ODEBUG_STATE_ACTIVE: 807 debug_object_deactivate(obj, &descr_type_test); 808 debug_object_destroy(obj, &descr_type_test); 809 return 1; 810 default: 811 return 0; 812 } 813} 814 815/* 816 * fixup_free is called when: 817 * - an active object is freed 818 */ 819static int __init fixup_free(void *addr, enum debug_obj_state state) 820{ 821 struct self_test *obj = addr; 822 823 switch (state) { 824 case ODEBUG_STATE_ACTIVE: 825 debug_object_deactivate(obj, &descr_type_test); 826 debug_object_free(obj, &descr_type_test); 827 return 1; 828 default: 829 return 0; 830 } 831} 832 833static int __init 834check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 835{ 836 struct debug_bucket *db; 837 struct debug_obj *obj; 838 unsigned long flags; 839 int res = -EINVAL; 840 841 db = get_bucket((unsigned long) addr); 842 843 raw_spin_lock_irqsave(&db->lock, flags); 844 845 obj = lookup_object(addr, db); 846 if (!obj && state != ODEBUG_STATE_NONE) { 847 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); 848 goto out; 849 } 850 if (obj && obj->state != state) { 851 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 852 obj->state, state); 853 goto out; 854 } 855 if (fixups != debug_objects_fixups) { 856 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 857 fixups, debug_objects_fixups); 858 goto out; 859 } 860 if (warnings != debug_objects_warnings) { 861 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 862 warnings, debug_objects_warnings); 863 goto out; 864 } 865 res = 0; 866out: 867 raw_spin_unlock_irqrestore(&db->lock, flags); 868 if (res) 869 debug_objects_enabled = 0; 870 return res; 871} 872 873static __initdata struct debug_obj_descr descr_type_test = { 874 .name = "selftest", 875 .fixup_init = fixup_init, 876 .fixup_activate = fixup_activate, 877 .fixup_destroy = fixup_destroy, 878 .fixup_free = fixup_free, 879}; 880 881static __initdata struct self_test obj = { .static_init = 0 }; 882 883static void __init debug_objects_selftest(void) 884{ 885 int fixups, oldfixups, warnings, oldwarnings; 886 unsigned long flags; 887 888 local_irq_save(flags); 889 890 fixups = oldfixups = debug_objects_fixups; 891 warnings = oldwarnings = debug_objects_warnings; 892 descr_test = &descr_type_test; 893 894 debug_object_init(&obj, &descr_type_test); 895 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 896 goto out; 897 debug_object_activate(&obj, &descr_type_test); 898 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 899 goto out; 900 debug_object_activate(&obj, &descr_type_test); 901 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) 902 goto out; 903 debug_object_deactivate(&obj, &descr_type_test); 904 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) 905 goto out; 906 debug_object_destroy(&obj, &descr_type_test); 907 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) 908 goto out; 909 debug_object_init(&obj, &descr_type_test); 910 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 911 goto out; 912 debug_object_activate(&obj, &descr_type_test); 913 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 914 goto out; 915 debug_object_deactivate(&obj, &descr_type_test); 916 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 917 goto out; 918 debug_object_free(&obj, &descr_type_test); 919 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 920 goto out; 921 922 obj.static_init = 1; 923 debug_object_activate(&obj, &descr_type_test); 924 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) 925 goto out; 926 debug_object_init(&obj, &descr_type_test); 927 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 928 goto out; 929 debug_object_free(&obj, &descr_type_test); 930 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 931 goto out; 932 933#ifdef CONFIG_DEBUG_OBJECTS_FREE 934 debug_object_init(&obj, &descr_type_test); 935 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 936 goto out; 937 debug_object_activate(&obj, &descr_type_test); 938 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 939 goto out; 940 __debug_check_no_obj_freed(&obj, sizeof(obj)); 941 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 942 goto out; 943#endif 944 printk(KERN_INFO "ODEBUG: selftest passed\n"); 945 946out: 947 debug_objects_fixups = oldfixups; 948 debug_objects_warnings = oldwarnings; 949 descr_test = NULL; 950 951 local_irq_restore(flags); 952} 953#else 954static inline void debug_objects_selftest(void) { } 955#endif 956 957/* 958 * Called during early boot to initialize the hash buckets and link 959 * the static object pool objects into the poll list. After this call 960 * the object tracker is fully operational. 961 */ 962void __init debug_objects_early_init(void) 963{ 964 int i; 965 966 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 967 raw_spin_lock_init(&obj_hash[i].lock); 968 969 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 970 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 971} 972 973/* 974 * Convert the statically allocated objects to dynamic ones: 975 */ 976static int __init debug_objects_replace_static_objects(void) 977{ 978 struct debug_bucket *db = obj_hash; 979 struct hlist_node *node, *tmp; 980 struct debug_obj *obj, *new; 981 HLIST_HEAD(objects); 982 int i, cnt = 0; 983 984 for (i = 0; i < ODEBUG_POOL_SIZE; i++) { 985 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); 986 if (!obj) 987 goto free; 988 hlist_add_head(&obj->node, &objects); 989 } 990 991 /* 992 * When debug_objects_mem_init() is called we know that only 993 * one CPU is up, so disabling interrupts is enough 994 * protection. This avoids the lockdep hell of lock ordering. 995 */ 996 local_irq_disable(); 997 998 /* Remove the statically allocated objects from the pool */ 999 hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) 1000 hlist_del(&obj->node); 1001 /* Move the allocated objects to the pool */ 1002 hlist_move_list(&objects, &obj_pool); 1003 1004 /* Replace the active object references */ 1005 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 1006 hlist_move_list(&db->list, &objects); 1007 1008 hlist_for_each_entry(obj, node, &objects, node) { 1009 new = hlist_entry(obj_pool.first, typeof(*obj), node); 1010 hlist_del(&new->node); 1011 /* copy object data */ 1012 *new = *obj; 1013 hlist_add_head(&new->node, &db->list); 1014 cnt++; 1015 } 1016 } 1017 1018 printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt, 1019 obj_pool_used); 1020 local_irq_enable(); 1021 return 0; 1022free: 1023 hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { 1024 hlist_del(&obj->node); 1025 kmem_cache_free(obj_cache, obj); 1026 } 1027 return -ENOMEM; 1028} 1029 1030/* 1031 * Called after the kmem_caches are functional to setup a dedicated 1032 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 1033 * prevents that the debug code is called on kmem_cache_free() for the 1034 * debug tracker objects to avoid recursive calls. 1035 */ 1036void __init debug_objects_mem_init(void) 1037{ 1038 if (!debug_objects_enabled) 1039 return; 1040 1041 obj_cache = kmem_cache_create("debug_objects_cache", 1042 sizeof (struct debug_obj), 0, 1043 SLAB_DEBUG_OBJECTS, NULL); 1044 1045 if (!obj_cache || debug_objects_replace_static_objects()) { 1046 debug_objects_enabled = 0; 1047 if (obj_cache) 1048 kmem_cache_destroy(obj_cache); 1049 printk(KERN_WARNING "ODEBUG: out of memory.\n"); 1050 } else 1051 debug_objects_selftest(); 1052}