Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue: Add debugobjects support

+166 -15
+3 -1
arch/x86/kernel/smpboot.c
··· 687 687 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 688 688 }; 689 689 690 - INIT_WORK(&c_idle.work, do_fork_idle); 690 + INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle); 691 691 692 692 alternatives_smp_switch(1); 693 693 ··· 713 713 714 714 if (IS_ERR(c_idle.idle)) { 715 715 printk("failed fork for CPU %d\n", cpu); 716 + destroy_work_on_stack(&c_idle.work); 716 717 return PTR_ERR(c_idle.idle); 717 718 } 718 719 ··· 832 831 smpboot_restore_warm_reset_vector(); 833 832 } 834 833 834 + destroy_work_on_stack(&c_idle.work); 835 835 return boot_error; 836 836 } 837 837
+27 -11
include/linux/workqueue.h
··· 25 25 struct work_struct { 26 26 atomic_long_t data; 27 27 #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ 28 + #define WORK_STRUCT_STATIC 1 /* static initializer (debugobjects) */ 28 29 #define WORK_STRUCT_FLAG_MASK (3UL) 29 30 #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) 30 31 struct list_head entry; ··· 36 35 }; 37 36 38 37 #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) 38 + #define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(2) 39 39 40 40 struct delayed_work { 41 41 struct work_struct work; ··· 65 63 #endif 66 64 67 65 #define __WORK_INITIALIZER(n, f) { \ 68 - .data = WORK_DATA_INIT(), \ 66 + .data = WORK_DATA_STATIC_INIT(), \ 69 67 .entry = { &(n).entry, &(n).entry }, \ 70 68 .func = (f), \ 71 69 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ ··· 93 91 #define PREPARE_DELAYED_WORK(_work, _func) \ 94 92 PREPARE_WORK(&(_work)->work, (_func)) 95 93 94 + #ifdef CONFIG_DEBUG_OBJECTS_WORK 95 + extern void __init_work(struct work_struct *work, int onstack); 96 + extern void destroy_work_on_stack(struct work_struct *work); 97 + #else 98 + static inline void __init_work(struct work_struct *work, int onstack) { } 99 + static inline void destroy_work_on_stack(struct work_struct *work) { } 100 + #endif 101 + 96 102 /* 97 103 * initialize all of a work item in one go 98 104 * ··· 109 99 * to generate better code. 110 100 */ 111 101 #ifdef CONFIG_LOCKDEP 112 - #define INIT_WORK(_work, _func) \ 102 + #define __INIT_WORK(_work, _func, _onstack) \ 113 103 do { \ 114 104 static struct lock_class_key __key; \ 115 105 \ 106 + __init_work((_work), _onstack); \ 116 107 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 117 108 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\ 118 109 INIT_LIST_HEAD(&(_work)->entry); \ 119 110 PREPARE_WORK((_work), (_func)); \ 120 111 } while (0) 121 112 #else 122 - #define INIT_WORK(_work, _func) \ 113 + #define __INIT_WORK(_work, _func, _onstack) \ 123 114 do { \ 115 + __init_work((_work), _onstack); \ 124 116 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 125 117 INIT_LIST_HEAD(&(_work)->entry); \ 126 118 PREPARE_WORK((_work), (_func)); \ 127 119 } while (0) 128 120 #endif 121 + 122 + #define INIT_WORK(_work, _func) \ 123 + do { \ 124 + __INIT_WORK((_work), (_func), 0); \ 125 + } while (0) 126 + 127 + #define INIT_WORK_ON_STACK(_work, _func) \ 128 + do { \ 129 + __INIT_WORK((_work), (_func), 1); \ 130 + } while (0) 129 131 130 132 #define INIT_DELAYED_WORK(_work, _func) \ 131 133 do { \ ··· 147 125 148 126 #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ 149 127 do { \ 150 - INIT_WORK(&(_work)->work, (_func)); \ 128 + INIT_WORK_ON_STACK(&(_work)->work, (_func)); \ 151 129 init_timer_on_stack(&(_work)->timer); \ 152 130 } while (0) 153 131 154 - #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ 132 + #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ 155 133 do { \ 156 134 INIT_WORK(&(_work)->work, (_func)); \ 157 135 init_timer_deferrable(&(_work)->timer); \ 158 - } while (0) 159 - 160 - #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ 161 - do { \ 162 - INIT_WORK(&(_work)->work, (_func)); \ 163 - init_timer_on_stack(&(_work)->timer); \ 164 136 } while (0) 165 137 166 138 /**
+128 -3
kernel/workqueue.c
··· 68 68 #endif 69 69 }; 70 70 71 + #ifdef CONFIG_DEBUG_OBJECTS_WORK 72 + 73 + static struct debug_obj_descr work_debug_descr; 74 + 75 + /* 76 + * fixup_init is called when: 77 + * - an active object is initialized 78 + */ 79 + static int work_fixup_init(void *addr, enum debug_obj_state state) 80 + { 81 + struct work_struct *work = addr; 82 + 83 + switch (state) { 84 + case ODEBUG_STATE_ACTIVE: 85 + cancel_work_sync(work); 86 + debug_object_init(work, &work_debug_descr); 87 + return 1; 88 + default: 89 + return 0; 90 + } 91 + } 92 + 93 + /* 94 + * fixup_activate is called when: 95 + * - an active object is activated 96 + * - an unknown object is activated (might be a statically initialized object) 97 + */ 98 + static int work_fixup_activate(void *addr, enum debug_obj_state state) 99 + { 100 + struct work_struct *work = addr; 101 + 102 + switch (state) { 103 + 104 + case ODEBUG_STATE_NOTAVAILABLE: 105 + /* 106 + * This is not really a fixup. The work struct was 107 + * statically initialized. We just make sure that it 108 + * is tracked in the object tracker. 109 + */ 110 + if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) { 111 + debug_object_init(work, &work_debug_descr); 112 + debug_object_activate(work, &work_debug_descr); 113 + return 0; 114 + } 115 + WARN_ON_ONCE(1); 116 + return 0; 117 + 118 + case ODEBUG_STATE_ACTIVE: 119 + WARN_ON(1); 120 + 121 + default: 122 + return 0; 123 + } 124 + } 125 + 126 + /* 127 + * fixup_free is called when: 128 + * - an active object is freed 129 + */ 130 + static int work_fixup_free(void *addr, enum debug_obj_state state) 131 + { 132 + struct work_struct *work = addr; 133 + 134 + switch (state) { 135 + case ODEBUG_STATE_ACTIVE: 136 + cancel_work_sync(work); 137 + debug_object_free(work, &work_debug_descr); 138 + return 1; 139 + default: 140 + return 0; 141 + } 142 + } 143 + 144 + static struct debug_obj_descr work_debug_descr = { 145 + .name = "work_struct", 146 + .fixup_init = work_fixup_init, 147 + .fixup_activate = work_fixup_activate, 148 + .fixup_free = work_fixup_free, 149 + }; 150 + 151 + static inline void debug_work_activate(struct work_struct *work) 152 + { 153 + debug_object_activate(work, &work_debug_descr); 154 + } 155 + 156 + static inline void debug_work_deactivate(struct work_struct *work) 157 + { 158 + debug_object_deactivate(work, &work_debug_descr); 159 + } 160 + 161 + void __init_work(struct work_struct *work, int onstack) 162 + { 163 + if (onstack) 164 + debug_object_init_on_stack(work, &work_debug_descr); 165 + else 166 + debug_object_init(work, &work_debug_descr); 167 + } 168 + EXPORT_SYMBOL_GPL(__init_work); 169 + 170 + void destroy_work_on_stack(struct work_struct *work) 171 + { 172 + debug_object_free(work, &work_debug_descr); 173 + } 174 + EXPORT_SYMBOL_GPL(destroy_work_on_stack); 175 + 176 + #else 177 + static inline void debug_work_activate(struct work_struct *work) { } 178 + static inline void debug_work_deactivate(struct work_struct *work) { } 179 + #endif 180 + 71 181 /* Serializes the accesses to the list of workqueues. */ 72 182 static DEFINE_SPINLOCK(workqueue_lock); 73 183 static LIST_HEAD(workqueues); ··· 255 145 { 256 146 unsigned long flags; 257 147 148 + debug_work_activate(work); 258 149 spin_lock_irqsave(&cwq->lock, flags); 259 150 insert_work(cwq, work, &cwq->worklist); 260 151 spin_unlock_irqrestore(&cwq->lock, flags); ··· 391 280 struct lockdep_map lockdep_map = work->lockdep_map; 392 281 #endif 393 282 trace_workqueue_execution(cwq->thread, work); 283 + debug_work_deactivate(work); 394 284 cwq->current_work = work; 395 285 list_del_init(cwq->worklist.next); 396 286 spin_unlock_irq(&cwq->lock); ··· 462 350 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 463 351 struct wq_barrier *barr, struct list_head *head) 464 352 { 465 - INIT_WORK(&barr->work, wq_barrier_func); 353 + /* 354 + * debugobject calls are safe here even with cwq->lock locked 355 + * as we know for sure that this will not trigger any of the 356 + * checks and call back into the fixup functions where we 357 + * might deadlock. 358 + */ 359 + INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); 466 360 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); 467 361 468 362 init_completion(&barr->done); 469 363 364 + debug_work_activate(&barr->work); 470 365 insert_work(cwq, &barr->work, head); 471 366 } 472 367 ··· 491 372 } 492 373 spin_unlock_irq(&cwq->lock); 493 374 494 - if (active) 375 + if (active) { 495 376 wait_for_completion(&barr.done); 377 + destroy_work_on_stack(&barr.work); 378 + } 496 379 497 380 return active; 498 381 } ··· 572 451 return 0; 573 452 574 453 wait_for_completion(&barr.done); 454 + destroy_work_on_stack(&barr.work); 575 455 return 1; 576 456 } 577 457 EXPORT_SYMBOL_GPL(flush_work); ··· 607 485 */ 608 486 smp_rmb(); 609 487 if (cwq == get_wq_data(work)) { 488 + debug_work_deactivate(work); 610 489 list_del_init(&work->entry); 611 490 ret = 1; 612 491 } ··· 630 507 } 631 508 spin_unlock_irq(&cwq->lock); 632 509 633 - if (unlikely(running)) 510 + if (unlikely(running)) { 634 511 wait_for_completion(&barr.done); 512 + destroy_work_on_stack(&barr.work); 513 + } 635 514 } 636 515 637 516 static void wait_on_work(struct work_struct *work)
+8
lib/Kconfig.debug
··· 298 298 timer routines to track the life time of timer objects and 299 299 validate the timer operations. 300 300 301 + config DEBUG_OBJECTS_WORK 302 + bool "Debug work objects" 303 + depends on DEBUG_OBJECTS 304 + help 305 + If you say Y here, additional code will be inserted into the 306 + work queue routines to track the life time of work objects and 307 + validate the work operations. 308 + 301 309 config DEBUG_OBJECTS_ENABLE_DEFAULT 302 310 int "debug_objects bootup default value (0-1)" 303 311 range 0 1