···687687 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),688688 };689689690690- INIT_WORK(&c_idle.work, do_fork_idle);690690+ INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle);691691692692 alternatives_smp_switch(1);693693···713713714714 if (IS_ERR(c_idle.idle)) {715715 printk("failed fork for CPU %d\n", cpu);716716+ destroy_work_on_stack(&c_idle.work);716717 return PTR_ERR(c_idle.idle);717718 }718719···832831 smpboot_restore_warm_reset_vector();833832 }834833834834+ destroy_work_on_stack(&c_idle.work);835835 return boot_error;836836}837837
+27-11
include/linux/workqueue.h
···2525struct work_struct {2626 atomic_long_t data;2727#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */2828+#define WORK_STRUCT_STATIC 1 /* static initializer (debugobjects) */2829#define WORK_STRUCT_FLAG_MASK (3UL)2930#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)3031 struct list_head entry;···3635};37363837#define WORK_DATA_INIT() ATOMIC_LONG_INIT(0)3838+#define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(2)39394040struct delayed_work {4141 struct work_struct work;···6563#endif66646765#define __WORK_INITIALIZER(n, f) { \6868- .data = WORK_DATA_INIT(), \6666+ .data = WORK_DATA_STATIC_INIT(), \6967 .entry = { &(n).entry, &(n).entry }, \7068 .func = (f), \7169 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \···9391#define PREPARE_DELAYED_WORK(_work, _func) \9492 PREPARE_WORK(&(_work)->work, (_func))95939494+#ifdef CONFIG_DEBUG_OBJECTS_WORK9595+extern void __init_work(struct work_struct *work, int onstack);9696+extern void destroy_work_on_stack(struct work_struct *work);9797+#else9898+static inline void __init_work(struct work_struct *work, int onstack) { }9999+static inline void destroy_work_on_stack(struct work_struct *work) { }100100+#endif101101+96102/*97103 * initialize all of a work item in one go98104 *···10999 * to generate better code.110100 */111101#ifdef CONFIG_LOCKDEP112112-#define INIT_WORK(_work, _func) \102102+#define __INIT_WORK(_work, _func, _onstack) \113103 do { \114104 static struct lock_class_key __key; \115105 \106106+ __init_work((_work), _onstack); \116107 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \117108 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\118109 INIT_LIST_HEAD(&(_work)->entry); \119110 PREPARE_WORK((_work), (_func)); \120111 } while (0)121112#else122122-#define INIT_WORK(_work, _func) \113113+#define __INIT_WORK(_work, _func, _onstack) \123114 do { \115115+ __init_work((_work), _onstack); \124116 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \125117 INIT_LIST_HEAD(&(_work)->entry); \126118 PREPARE_WORK((_work), (_func)); \127119 } while (0)128120#endif121121+122122+#define INIT_WORK(_work, _func) \123123+ do { \124124+ __INIT_WORK((_work), (_func), 0); \125125+ } while (0)126126+127127+#define INIT_WORK_ON_STACK(_work, _func) \128128+ do { \129129+ __INIT_WORK((_work), (_func), 1); \130130+ } while (0)129131130132#define INIT_DELAYED_WORK(_work, _func) \131133 do { \···147125148126#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \149127 do { \150150- INIT_WORK(&(_work)->work, (_func)); \128128+ INIT_WORK_ON_STACK(&(_work)->work, (_func)); \151129 init_timer_on_stack(&(_work)->timer); \152130 } while (0)153131154154-#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \132132+#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \155133 do { \156134 INIT_WORK(&(_work)->work, (_func)); \157135 init_timer_deferrable(&(_work)->timer); \158158- } while (0)159159-160160-#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \161161- do { \162162- INIT_WORK(&(_work)->work, (_func)); \163163- init_timer_on_stack(&(_work)->timer); \164136 } while (0)165137166138/**
+128-3
kernel/workqueue.c
···6868#endif6969};70707171+#ifdef CONFIG_DEBUG_OBJECTS_WORK7272+7373+static struct debug_obj_descr work_debug_descr;7474+7575+/*7676+ * fixup_init is called when:7777+ * - an active object is initialized7878+ */7979+static int work_fixup_init(void *addr, enum debug_obj_state state)8080+{8181+ struct work_struct *work = addr;8282+8383+ switch (state) {8484+ case ODEBUG_STATE_ACTIVE:8585+ cancel_work_sync(work);8686+ debug_object_init(work, &work_debug_descr);8787+ return 1;8888+ default:8989+ return 0;9090+ }9191+}9292+9393+/*9494+ * fixup_activate is called when:9595+ * - an active object is activated9696+ * - an unknown object is activated (might be a statically initialized object)9797+ */9898+static int work_fixup_activate(void *addr, enum debug_obj_state state)9999+{100100+ struct work_struct *work = addr;101101+102102+ switch (state) {103103+104104+ case ODEBUG_STATE_NOTAVAILABLE:105105+ /*106106+ * This is not really a fixup. The work struct was107107+ * statically initialized. We just make sure that it108108+ * is tracked in the object tracker.109109+ */110110+ if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) {111111+ debug_object_init(work, &work_debug_descr);112112+ debug_object_activate(work, &work_debug_descr);113113+ return 0;114114+ }115115+ WARN_ON_ONCE(1);116116+ return 0;117117+118118+ case ODEBUG_STATE_ACTIVE:119119+ WARN_ON(1);120120+121121+ default:122122+ return 0;123123+ }124124+}125125+126126+/*127127+ * fixup_free is called when:128128+ * - an active object is freed129129+ */130130+static int work_fixup_free(void *addr, enum debug_obj_state state)131131+{132132+ struct work_struct *work = addr;133133+134134+ switch (state) {135135+ case ODEBUG_STATE_ACTIVE:136136+ cancel_work_sync(work);137137+ debug_object_free(work, &work_debug_descr);138138+ return 1;139139+ default:140140+ return 0;141141+ }142142+}143143+144144+static struct debug_obj_descr work_debug_descr = {145145+ .name = "work_struct",146146+ .fixup_init = work_fixup_init,147147+ .fixup_activate = work_fixup_activate,148148+ .fixup_free = work_fixup_free,149149+};150150+151151+static inline void debug_work_activate(struct work_struct *work)152152+{153153+ debug_object_activate(work, &work_debug_descr);154154+}155155+156156+static inline void debug_work_deactivate(struct work_struct *work)157157+{158158+ debug_object_deactivate(work, &work_debug_descr);159159+}160160+161161+void __init_work(struct work_struct *work, int onstack)162162+{163163+ if (onstack)164164+ debug_object_init_on_stack(work, &work_debug_descr);165165+ else166166+ debug_object_init(work, &work_debug_descr);167167+}168168+EXPORT_SYMBOL_GPL(__init_work);169169+170170+void destroy_work_on_stack(struct work_struct *work)171171+{172172+ debug_object_free(work, &work_debug_descr);173173+}174174+EXPORT_SYMBOL_GPL(destroy_work_on_stack);175175+176176+#else177177+static inline void debug_work_activate(struct work_struct *work) { }178178+static inline void debug_work_deactivate(struct work_struct *work) { }179179+#endif180180+71181/* Serializes the accesses to the list of workqueues. */72182static DEFINE_SPINLOCK(workqueue_lock);73183static LIST_HEAD(workqueues);···255145{256146 unsigned long flags;257147148148+ debug_work_activate(work);258149 spin_lock_irqsave(&cwq->lock, flags);259150 insert_work(cwq, work, &cwq->worklist);260151 spin_unlock_irqrestore(&cwq->lock, flags);···391280 struct lockdep_map lockdep_map = work->lockdep_map;392281#endif393282 trace_workqueue_execution(cwq->thread, work);283283+ debug_work_deactivate(work);394284 cwq->current_work = work;395285 list_del_init(cwq->worklist.next);396286 spin_unlock_irq(&cwq->lock);···462350static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,463351 struct wq_barrier *barr, struct list_head *head)464352{465465- INIT_WORK(&barr->work, wq_barrier_func);353353+ /*354354+ * debugobject calls are safe here even with cwq->lock locked355355+ * as we know for sure that this will not trigger any of the356356+ * checks and call back into the fixup functions where we357357+ * might deadlock.358358+ */359359+ INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);466360 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));467361468362 init_completion(&barr->done);469363364364+ debug_work_activate(&barr->work);470365 insert_work(cwq, &barr->work, head);471366}472367···491372 }492373 spin_unlock_irq(&cwq->lock);493374494494- if (active)375375+ if (active) {495376 wait_for_completion(&barr.done);377377+ destroy_work_on_stack(&barr.work);378378+ }496379497380 return active;498381}···572451 return 0;573452574453 wait_for_completion(&barr.done);454454+ destroy_work_on_stack(&barr.work);575455 return 1;576456}577457EXPORT_SYMBOL_GPL(flush_work);···607485 */608486 smp_rmb();609487 if (cwq == get_wq_data(work)) {488488+ debug_work_deactivate(work);610489 list_del_init(&work->entry);611490 ret = 1;612491 }···630507 }631508 spin_unlock_irq(&cwq->lock);632509633633- if (unlikely(running))510510+ if (unlikely(running)) {634511 wait_for_completion(&barr.done);512512+ destroy_work_on_stack(&barr.work);513513+ }635514}636515637516static void wait_on_work(struct work_struct *work)
+8
lib/Kconfig.debug
···298298 timer routines to track the life time of timer objects and299299 validate the timer operations.300300301301+config DEBUG_OBJECTS_WORK302302+ bool "Debug work objects"303303+ depends on DEBUG_OBJECTS304304+ help305305+ If you say Y here, additional code will be inserted into the306306+ work queue routines to track the life time of work objects and307307+ validate the work operations.308308+301309config DEBUG_OBJECTS_ENABLE_DEFAULT302310 int "debug_objects bootup default value (0-1)"303311 range 0 1