at v3.15-rc4 20 kB view raw
1/* 2 * workqueue.h --- work queue handling for Linux. 3 */ 4 5#ifndef _LINUX_WORKQUEUE_H 6#define _LINUX_WORKQUEUE_H 7 8#include <linux/timer.h> 9#include <linux/linkage.h> 10#include <linux/bitops.h> 11#include <linux/lockdep.h> 12#include <linux/threads.h> 13#include <linux/atomic.h> 14#include <linux/cpumask.h> 15 16struct workqueue_struct; 17 18struct work_struct; 19typedef void (*work_func_t)(struct work_struct *work); 20void delayed_work_timer_fn(unsigned long __data); 21 22/* 23 * The first word is the work queue pointer and the flags rolled into 24 * one 25 */ 26#define work_data_bits(work) ((unsigned long *)(&(work)->data)) 27 28enum { 29 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 30 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ 31 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ 32 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ 33#ifdef CONFIG_DEBUG_OBJECTS_WORK 34 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ 35 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ 36#else 37 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ 38#endif 39 40 WORK_STRUCT_COLOR_BITS = 4, 41 42 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, 43 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, 44 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, 45 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, 46#ifdef CONFIG_DEBUG_OBJECTS_WORK 47 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, 48#else 49 WORK_STRUCT_STATIC = 0, 50#endif 51 52 /* 53 * The last color is no color used for works which don't 54 * participate in workqueue flushing. 55 */ 56 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, 57 WORK_NO_COLOR = WORK_NR_COLORS, 58 59 /* special cpu IDs */ 60 WORK_CPU_UNBOUND = NR_CPUS, 61 WORK_CPU_END = NR_CPUS + 1, 62 63 /* 64 * Reserve 7 bits off of pwq pointer w/ debugobjects turned off. 65 * This makes pwqs aligned to 256 bytes and allows 15 workqueue 66 * flush colors. 67 */ 68 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + 69 WORK_STRUCT_COLOR_BITS, 70 71 /* data contains off-queue information when !WORK_STRUCT_PWQ */ 72 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, 73 74 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), 75 76 /* 77 * When a work item is off queue, its high bits point to the last 78 * pool it was on. Cap at 31 bits and use the highest number to 79 * indicate that no pool is associated. 80 */ 81 WORK_OFFQ_FLAG_BITS = 1, 82 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, 83 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, 84 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, 85 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1, 86 87 /* convenience constants */ 88 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, 89 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, 90 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT, 91 92 /* bit mask for work_busy() return values */ 93 WORK_BUSY_PENDING = 1 << 0, 94 WORK_BUSY_RUNNING = 1 << 1, 95 96 /* maximum string length for set_worker_desc() */ 97 WORKER_DESC_LEN = 24, 98}; 99 100struct work_struct { 101 atomic_long_t data; 102 struct list_head entry; 103 work_func_t func; 104#ifdef CONFIG_LOCKDEP 105 struct lockdep_map lockdep_map; 106#endif 107}; 108 109#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL) 110#define WORK_DATA_STATIC_INIT() \ 111 ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC) 112 113struct delayed_work { 114 struct work_struct work; 115 struct timer_list timer; 116 117 /* target workqueue and CPU ->timer uses to queue ->work */ 118 struct workqueue_struct *wq; 119 int cpu; 120}; 121 122/* 123 * A struct for workqueue attributes. This can be used to change 124 * attributes of an unbound workqueue. 125 * 126 * Unlike other fields, ->no_numa isn't a property of a worker_pool. It 127 * only modifies how apply_workqueue_attrs() select pools and thus doesn't 128 * participate in pool hash calculations or equality comparisons. 129 */ 130struct workqueue_attrs { 131 int nice; /* nice level */ 132 cpumask_var_t cpumask; /* allowed CPUs */ 133 bool no_numa; /* disable NUMA affinity */ 134}; 135 136static inline struct delayed_work *to_delayed_work(struct work_struct *work) 137{ 138 return container_of(work, struct delayed_work, work); 139} 140 141struct execute_work { 142 struct work_struct work; 143}; 144 145#ifdef CONFIG_LOCKDEP 146/* 147 * NB: because we have to copy the lockdep_map, setting _key 148 * here is required, otherwise it could get initialised to the 149 * copy of the lockdep_map! 150 */ 151#define __WORK_INIT_LOCKDEP_MAP(n, k) \ 152 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), 153#else 154#define __WORK_INIT_LOCKDEP_MAP(n, k) 155#endif 156 157#define __WORK_INITIALIZER(n, f) { \ 158 .data = WORK_DATA_STATIC_INIT(), \ 159 .entry = { &(n).entry, &(n).entry }, \ 160 .func = (f), \ 161 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ 162 } 163 164#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ 165 .work = __WORK_INITIALIZER((n).work, (f)), \ 166 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \ 167 0, (unsigned long)&(n), \ 168 (tflags) | TIMER_IRQSAFE), \ 169 } 170 171#define DECLARE_WORK(n, f) \ 172 struct work_struct n = __WORK_INITIALIZER(n, f) 173 174#define DECLARE_DELAYED_WORK(n, f) \ 175 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) 176 177#define DECLARE_DEFERRABLE_WORK(n, f) \ 178 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) 179 180#ifdef CONFIG_DEBUG_OBJECTS_WORK 181extern void __init_work(struct work_struct *work, int onstack); 182extern void destroy_work_on_stack(struct work_struct *work); 183extern void destroy_delayed_work_on_stack(struct delayed_work *work); 184static inline unsigned int work_static(struct work_struct *work) 185{ 186 return *work_data_bits(work) & WORK_STRUCT_STATIC; 187} 188#else 189static inline void __init_work(struct work_struct *work, int onstack) { } 190static inline void destroy_work_on_stack(struct work_struct *work) { } 191static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } 192static inline unsigned int work_static(struct work_struct *work) { return 0; } 193#endif 194 195/* 196 * initialize all of a work item in one go 197 * 198 * NOTE! No point in using "atomic_long_set()": using a direct 199 * assignment of the work data initializer allows the compiler 200 * to generate better code. 201 */ 202#ifdef CONFIG_LOCKDEP 203#define __INIT_WORK(_work, _func, _onstack) \ 204 do { \ 205 static struct lock_class_key __key; \ 206 \ 207 __init_work((_work), _onstack); \ 208 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 209 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \ 210 INIT_LIST_HEAD(&(_work)->entry); \ 211 (_work)->func = (_func); \ 212 } while (0) 213#else 214#define __INIT_WORK(_work, _func, _onstack) \ 215 do { \ 216 __init_work((_work), _onstack); \ 217 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 218 INIT_LIST_HEAD(&(_work)->entry); \ 219 (_work)->func = (_func); \ 220 } while (0) 221#endif 222 223#define INIT_WORK(_work, _func) \ 224 do { \ 225 __INIT_WORK((_work), (_func), 0); \ 226 } while (0) 227 228#define INIT_WORK_ONSTACK(_work, _func) \ 229 do { \ 230 __INIT_WORK((_work), (_func), 1); \ 231 } while (0) 232 233#define __INIT_DELAYED_WORK(_work, _func, _tflags) \ 234 do { \ 235 INIT_WORK(&(_work)->work, (_func)); \ 236 __setup_timer(&(_work)->timer, delayed_work_timer_fn, \ 237 (unsigned long)(_work), \ 238 (_tflags) | TIMER_IRQSAFE); \ 239 } while (0) 240 241#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ 242 do { \ 243 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ 244 __setup_timer_on_stack(&(_work)->timer, \ 245 delayed_work_timer_fn, \ 246 (unsigned long)(_work), \ 247 (_tflags) | TIMER_IRQSAFE); \ 248 } while (0) 249 250#define INIT_DELAYED_WORK(_work, _func) \ 251 __INIT_DELAYED_WORK(_work, _func, 0) 252 253#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ 254 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) 255 256#define INIT_DEFERRABLE_WORK(_work, _func) \ 257 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) 258 259#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ 260 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) 261 262/** 263 * work_pending - Find out whether a work item is currently pending 264 * @work: The work item in question 265 */ 266#define work_pending(work) \ 267 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) 268 269/** 270 * delayed_work_pending - Find out whether a delayable work item is currently 271 * pending 272 * @work: The work item in question 273 */ 274#define delayed_work_pending(w) \ 275 work_pending(&(w)->work) 276 277/** 278 * work_clear_pending - for internal use only, mark a work item as not pending 279 * @work: The work item in question 280 */ 281#define work_clear_pending(work) \ 282 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) 283 284/* 285 * Workqueue flags and constants. For details, please refer to 286 * Documentation/workqueue.txt. 287 */ 288enum { 289 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 290 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ 291 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ 292 WQ_HIGHPRI = 1 << 4, /* high priority */ 293 WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ 294 WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */ 295 296 /* 297 * Per-cpu workqueues are generally preferred because they tend to 298 * show better performance thanks to cache locality. Per-cpu 299 * workqueues exclude the scheduler from choosing the CPU to 300 * execute the worker threads, which has an unfortunate side effect 301 * of increasing power consumption. 302 * 303 * The scheduler considers a CPU idle if it doesn't have any task 304 * to execute and tries to keep idle cores idle to conserve power; 305 * however, for example, a per-cpu work item scheduled from an 306 * interrupt handler on an idle CPU will force the scheduler to 307 * excute the work item on that CPU breaking the idleness, which in 308 * turn may lead to more scheduling choices which are sub-optimal 309 * in terms of power consumption. 310 * 311 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default 312 * but become unbound if workqueue.power_efficient kernel param is 313 * specified. Per-cpu workqueues which are identified to 314 * contribute significantly to power-consumption are identified and 315 * marked with this flag and enabling the power_efficient mode 316 * leads to noticeable power saving at the cost of small 317 * performance disadvantage. 318 * 319 * http://thread.gmane.org/gmane.linux.kernel/1480396 320 */ 321 WQ_POWER_EFFICIENT = 1 << 7, 322 323 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ 324 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ 325 326 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 327 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ 328 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, 329}; 330 331/* unbound wq's aren't per-cpu, scale max_active according to #cpus */ 332#define WQ_UNBOUND_MAX_ACTIVE \ 333 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) 334 335/* 336 * System-wide workqueues which are always present. 337 * 338 * system_wq is the one used by schedule[_delayed]_work[_on](). 339 * Multi-CPU multi-threaded. There are users which expect relatively 340 * short queue flush time. Don't queue works which can run for too 341 * long. 342 * 343 * system_long_wq is similar to system_wq but may host long running 344 * works. Queue flushing might take relatively long. 345 * 346 * system_unbound_wq is unbound workqueue. Workers are not bound to 347 * any specific CPU, not concurrency managed, and all queued works are 348 * executed immediately as long as max_active limit is not reached and 349 * resources are available. 350 * 351 * system_freezable_wq is equivalent to system_wq except that it's 352 * freezable. 353 * 354 * *_power_efficient_wq are inclined towards saving power and converted 355 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, 356 * they are same as their non-power-efficient counterparts - e.g. 357 * system_power_efficient_wq is identical to system_wq if 358 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. 359 */ 360extern struct workqueue_struct *system_wq; 361extern struct workqueue_struct *system_long_wq; 362extern struct workqueue_struct *system_unbound_wq; 363extern struct workqueue_struct *system_freezable_wq; 364extern struct workqueue_struct *system_power_efficient_wq; 365extern struct workqueue_struct *system_freezable_power_efficient_wq; 366 367static inline struct workqueue_struct * __deprecated __system_nrt_wq(void) 368{ 369 return system_wq; 370} 371 372static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void) 373{ 374 return system_freezable_wq; 375} 376 377/* equivlalent to system_wq and system_freezable_wq, deprecated */ 378#define system_nrt_wq __system_nrt_wq() 379#define system_nrt_freezable_wq __system_nrt_freezable_wq() 380 381extern struct workqueue_struct * 382__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, 383 struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6); 384 385/** 386 * alloc_workqueue - allocate a workqueue 387 * @fmt: printf format for the name of the workqueue 388 * @flags: WQ_* flags 389 * @max_active: max in-flight work items, 0 for default 390 * @args: args for @fmt 391 * 392 * Allocate a workqueue with the specified parameters. For detailed 393 * information on WQ_* flags, please refer to Documentation/workqueue.txt. 394 * 395 * The __lock_name macro dance is to guarantee that single lock_class_key 396 * doesn't end up with different namesm, which isn't allowed by lockdep. 397 * 398 * RETURNS: 399 * Pointer to the allocated workqueue on success, %NULL on failure. 400 */ 401#ifdef CONFIG_LOCKDEP 402#define alloc_workqueue(fmt, flags, max_active, args...) \ 403({ \ 404 static struct lock_class_key __key; \ 405 const char *__lock_name; \ 406 \ 407 __lock_name = #fmt#args; \ 408 \ 409 __alloc_workqueue_key((fmt), (flags), (max_active), \ 410 &__key, __lock_name, ##args); \ 411}) 412#else 413#define alloc_workqueue(fmt, flags, max_active, args...) \ 414 __alloc_workqueue_key((fmt), (flags), (max_active), \ 415 NULL, NULL, ##args) 416#endif 417 418/** 419 * alloc_ordered_workqueue - allocate an ordered workqueue 420 * @fmt: printf format for the name of the workqueue 421 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) 422 * @args: args for @fmt 423 * 424 * Allocate an ordered workqueue. An ordered workqueue executes at 425 * most one work item at any given time in the queued order. They are 426 * implemented as unbound workqueues with @max_active of one. 427 * 428 * RETURNS: 429 * Pointer to the allocated workqueue on success, %NULL on failure. 430 */ 431#define alloc_ordered_workqueue(fmt, flags, args...) \ 432 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) 433 434#define create_workqueue(name) \ 435 alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name)) 436#define create_freezable_workqueue(name) \ 437 alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \ 438 1, (name)) 439#define create_singlethread_workqueue(name) \ 440 alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, (name)) 441 442extern void destroy_workqueue(struct workqueue_struct *wq); 443 444struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); 445void free_workqueue_attrs(struct workqueue_attrs *attrs); 446int apply_workqueue_attrs(struct workqueue_struct *wq, 447 const struct workqueue_attrs *attrs); 448 449extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 450 struct work_struct *work); 451extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 452 struct delayed_work *work, unsigned long delay); 453extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 454 struct delayed_work *dwork, unsigned long delay); 455 456extern void flush_workqueue(struct workqueue_struct *wq); 457extern void drain_workqueue(struct workqueue_struct *wq); 458extern void flush_scheduled_work(void); 459 460extern int schedule_on_each_cpu(work_func_t func); 461 462int execute_in_process_context(work_func_t fn, struct execute_work *); 463 464extern bool flush_work(struct work_struct *work); 465extern bool cancel_work_sync(struct work_struct *work); 466 467extern bool flush_delayed_work(struct delayed_work *dwork); 468extern bool cancel_delayed_work(struct delayed_work *dwork); 469extern bool cancel_delayed_work_sync(struct delayed_work *dwork); 470 471extern void workqueue_set_max_active(struct workqueue_struct *wq, 472 int max_active); 473extern bool current_is_workqueue_rescuer(void); 474extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); 475extern unsigned int work_busy(struct work_struct *work); 476extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); 477extern void print_worker_info(const char *log_lvl, struct task_struct *task); 478 479/** 480 * queue_work - queue work on a workqueue 481 * @wq: workqueue to use 482 * @work: work to queue 483 * 484 * Returns %false if @work was already on a queue, %true otherwise. 485 * 486 * We queue the work to the CPU on which it was submitted, but if the CPU dies 487 * it can be processed by another CPU. 488 */ 489static inline bool queue_work(struct workqueue_struct *wq, 490 struct work_struct *work) 491{ 492 return queue_work_on(WORK_CPU_UNBOUND, wq, work); 493} 494 495/** 496 * queue_delayed_work - queue work on a workqueue after delay 497 * @wq: workqueue to use 498 * @dwork: delayable work to queue 499 * @delay: number of jiffies to wait before queueing 500 * 501 * Equivalent to queue_delayed_work_on() but tries to use the local CPU. 502 */ 503static inline bool queue_delayed_work(struct workqueue_struct *wq, 504 struct delayed_work *dwork, 505 unsigned long delay) 506{ 507 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 508} 509 510/** 511 * mod_delayed_work - modify delay of or queue a delayed work 512 * @wq: workqueue to use 513 * @dwork: work to queue 514 * @delay: number of jiffies to wait before queueing 515 * 516 * mod_delayed_work_on() on local CPU. 517 */ 518static inline bool mod_delayed_work(struct workqueue_struct *wq, 519 struct delayed_work *dwork, 520 unsigned long delay) 521{ 522 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 523} 524 525/** 526 * schedule_work_on - put work task on a specific cpu 527 * @cpu: cpu to put the work task on 528 * @work: job to be done 529 * 530 * This puts a job on a specific cpu 531 */ 532static inline bool schedule_work_on(int cpu, struct work_struct *work) 533{ 534 return queue_work_on(cpu, system_wq, work); 535} 536 537/** 538 * schedule_work - put work task in global workqueue 539 * @work: job to be done 540 * 541 * Returns %false if @work was already on the kernel-global workqueue and 542 * %true otherwise. 543 * 544 * This puts a job in the kernel-global workqueue if it was not already 545 * queued and leaves it in the same position on the kernel-global 546 * workqueue otherwise. 547 */ 548static inline bool schedule_work(struct work_struct *work) 549{ 550 return queue_work(system_wq, work); 551} 552 553/** 554 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 555 * @cpu: cpu to use 556 * @dwork: job to be done 557 * @delay: number of jiffies to wait 558 * 559 * After waiting for a given time this puts a job in the kernel-global 560 * workqueue on the specified CPU. 561 */ 562static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 563 unsigned long delay) 564{ 565 return queue_delayed_work_on(cpu, system_wq, dwork, delay); 566} 567 568/** 569 * schedule_delayed_work - put work task in global workqueue after delay 570 * @dwork: job to be done 571 * @delay: number of jiffies to wait or 0 for immediate execution 572 * 573 * After waiting for a given time this puts a job in the kernel-global 574 * workqueue. 575 */ 576static inline bool schedule_delayed_work(struct delayed_work *dwork, 577 unsigned long delay) 578{ 579 return queue_delayed_work(system_wq, dwork, delay); 580} 581 582/** 583 * keventd_up - is workqueue initialized yet? 584 */ 585static inline bool keventd_up(void) 586{ 587 return system_wq != NULL; 588} 589 590/* used to be different but now identical to flush_work(), deprecated */ 591static inline bool __deprecated flush_work_sync(struct work_struct *work) 592{ 593 return flush_work(work); 594} 595 596/* used to be different but now identical to flush_delayed_work(), deprecated */ 597static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork) 598{ 599 return flush_delayed_work(dwork); 600} 601 602#ifndef CONFIG_SMP 603static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 604{ 605 return fn(arg); 606} 607#else 608long work_on_cpu(int cpu, long (*fn)(void *), void *arg); 609#endif /* CONFIG_SMP */ 610 611#ifdef CONFIG_FREEZER 612extern void freeze_workqueues_begin(void); 613extern bool freeze_workqueues_busy(void); 614extern void thaw_workqueues(void); 615#endif /* CONFIG_FREEZER */ 616 617#ifdef CONFIG_SYSFS 618int workqueue_sysfs_register(struct workqueue_struct *wq); 619#else /* CONFIG_SYSFS */ 620static inline int workqueue_sysfs_register(struct workqueue_struct *wq) 621{ return 0; } 622#endif /* CONFIG_SYSFS */ 623 624#endif