Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

unify flush_work/flush_work_keventd and rename it to cancel_work_sync

flush_work(wq, work) doesn't need the first parameter, we can use cwq->wq
(this was possible from the very beginnig, I missed this). So we can unify
flush_work_keventd and flush_work.

Also, rename flush_work() to cancel_work_sync() and fix all callers.
Perhaps this is not the best name, but "flush_work" is really bad.

(akpm: this is why the earlier patches bypassed maintainers)

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Tejun Heo <htejun@gmail.com>
Cc: Auke Kok <auke-jan.h.kok@intel.com>,
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Oleg Nesterov and committed by
Linus Torvalds
28e53bdd 5830c590

+41 -40
+1 -1
block/ll_rw_blk.c
··· 3633 3633 3634 3634 void kblockd_flush_work(struct work_struct *work) 3635 3635 { 3636 - flush_work(kblockd_workqueue, work); 3636 + cancel_work_sync(work); 3637 3637 } 3638 3638 EXPORT_SYMBOL(kblockd_flush_work); 3639 3639
+4 -4
drivers/ata/libata-core.c
··· 1316 1316 spin_unlock_irqrestore(ap->lock, flags); 1317 1317 1318 1318 DPRINTK("flush #1\n"); 1319 - flush_work(ata_wq, &ap->port_task.work); /* akpm: seems unneeded */ 1319 + cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */ 1320 1320 1321 1321 /* 1322 1322 * At this point, if a task is running, it's guaranteed to see ··· 1327 1327 if (ata_msg_ctl(ap)) 1328 1328 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", 1329 1329 __FUNCTION__); 1330 - flush_work(ata_wq, &ap->port_task.work); 1330 + cancel_work_sync(&ap->port_task.work); 1331 1331 } 1332 1332 1333 1333 spin_lock_irqsave(ap->lock, flags); ··· 6475 6475 /* Flush hotplug task. The sequence is similar to 6476 6476 * ata_port_flush_task(). 6477 6477 */ 6478 - flush_work(ata_aux_wq, &ap->hotplug_task.work); /* akpm: why? */ 6478 + cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */ 6479 6479 cancel_delayed_work(&ap->hotplug_task); 6480 - flush_work(ata_aux_wq, &ap->hotplug_task.work); 6480 + cancel_work_sync(&ap->hotplug_task.work); 6481 6481 6482 6482 skip_eh: 6483 6483 /* remove the associated SCSI host */
+1 -1
drivers/net/e1000/e1000_main.c
··· 1214 1214 int i; 1215 1215 #endif 1216 1216 1217 - flush_work_keventd(&adapter->reset_task); 1217 + cancel_work_sync(&adapter->reset_task); 1218 1218 1219 1219 e1000_release_manageability(adapter); 1220 1220
+2 -2
drivers/net/phy/phy.c
··· 663 663 664 664 /* 665 665 * Finish any pending work; we might have been scheduled to be called 666 - * from keventd ourselves, but flush_work_keventd() handles that. 666 + * from keventd ourselves, but cancel_work_sync() handles that. 667 667 */ 668 - flush_work_keventd(&phydev->phy_queue); 668 + cancel_work_sync(&phydev->phy_queue); 669 669 670 670 free_irq(phydev->irq, phydev); 671 671
+1 -1
drivers/net/tg3.c
··· 7386 7386 { 7387 7387 struct tg3 *tp = netdev_priv(dev); 7388 7388 7389 - flush_work_keventd(&tp->reset_task); 7389 + cancel_work_sync(&tp->reset_task); 7390 7390 7391 7391 netif_stop_queue(dev); 7392 7392
+2 -2
fs/aio.c
··· 348 348 /* 349 349 * Ensure we don't leave the ctx on the aio_wq 350 350 */ 351 - flush_work(aio_wq, &ctx->wq.work); 351 + cancel_work_sync(&ctx->wq.work); 352 352 353 353 if (1 != atomic_read(&ctx->users)) 354 354 printk(KERN_DEBUG ··· 371 371 BUG_ON(ctx->reqs_active); 372 372 373 373 cancel_delayed_work(&ctx->wq); 374 - flush_work(aio_wq, &ctx->wq.work); 374 + cancel_work_sync(&ctx->wq.work); 375 375 aio_free_ring(ctx); 376 376 mmdrop(ctx->mm); 377 377 ctx->mm = NULL;
+12 -9
include/linux/workqueue.h
··· 128 128 extern void destroy_workqueue(struct workqueue_struct *wq); 129 129 130 130 extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); 131 - extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay)); 131 + extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, 132 + struct delayed_work *work, unsigned long delay)); 132 133 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 133 - struct delayed_work *work, unsigned long delay); 134 + struct delayed_work *work, unsigned long delay); 135 + 134 136 extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 135 - extern void flush_work(struct workqueue_struct *wq, struct work_struct *work); 136 - extern void flush_work_keventd(struct work_struct *work); 137 + extern void flush_scheduled_work(void); 137 138 138 139 extern int FASTCALL(schedule_work(struct work_struct *work)); 139 - extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); 140 - 141 - extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); 140 + extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, 141 + unsigned long delay)); 142 + extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, 143 + unsigned long delay); 142 144 extern int schedule_on_each_cpu(work_func_t func); 143 - extern void flush_scheduled_work(void); 144 145 extern int current_is_keventd(void); 145 146 extern int keventd_up(void); 146 147 147 148 extern void init_workqueues(void); 148 149 int execute_in_process_context(work_func_t fn, struct execute_work *); 149 150 151 + extern void cancel_work_sync(struct work_struct *work); 152 + 150 153 /* 151 154 * Kill off a pending schedule_delayed_work(). Note that the work callback 152 155 * function may still be running on return from cancel_delayed_work(), unless 153 156 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or 154 - * flush_work() or cancel_work_sync() to wait on it. 157 + * cancel_work_sync() to wait on it. 155 158 */ 156 159 static inline int cancel_delayed_work(struct delayed_work *work) 157 160 {
+17 -19
kernel/workqueue.c
··· 413 413 } 414 414 415 415 /** 416 - * flush_work - block until a work_struct's callback has terminated 417 - * @wq: the workqueue on which the work is queued 416 + * cancel_work_sync - block until a work_struct's callback has terminated 418 417 * @work: the work which is to be flushed 419 418 * 420 - * flush_work() will attempt to cancel the work if it is queued. If the work's 421 - * callback appears to be running, flush_work() will block until it has 422 - * completed. 419 + * cancel_work_sync() will attempt to cancel the work if it is queued. If the 420 + * work's callback appears to be running, cancel_work_sync() will block until 421 + * it has completed. 423 422 * 424 - * flush_work() is designed to be used when the caller is tearing down data 425 - * structures which the callback function operates upon. It is expected that, 426 - * prior to calling flush_work(), the caller has arranged for the work to not 427 - * be requeued. 423 + * cancel_work_sync() is designed to be used when the caller is tearing down 424 + * data structures which the callback function operates upon. It is expected 425 + * that, prior to calling cancel_work_sync(), the caller has arranged for the 426 + * work to not be requeued. 428 427 */ 429 - void flush_work(struct workqueue_struct *wq, struct work_struct *work) 428 + void cancel_work_sync(struct work_struct *work) 430 429 { 431 - const cpumask_t *cpu_map = wq_cpu_map(wq); 432 430 struct cpu_workqueue_struct *cwq; 431 + struct workqueue_struct *wq; 432 + const cpumask_t *cpu_map; 433 433 int cpu; 434 434 435 435 might_sleep(); ··· 448 448 work_clear_pending(work); 449 449 spin_unlock_irq(&cwq->lock); 450 450 451 + wq = cwq->wq; 452 + cpu_map = wq_cpu_map(wq); 453 + 451 454 for_each_cpu_mask(cpu, *cpu_map) 452 455 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 453 456 } 454 - EXPORT_SYMBOL_GPL(flush_work); 457 + EXPORT_SYMBOL_GPL(cancel_work_sync); 455 458 456 459 457 460 static struct workqueue_struct *keventd_wq; ··· 543 540 } 544 541 EXPORT_SYMBOL(flush_scheduled_work); 545 542 546 - void flush_work_keventd(struct work_struct *work) 547 - { 548 - flush_work(keventd_wq, work); 549 - } 550 - EXPORT_SYMBOL(flush_work_keventd); 551 - 552 543 /** 553 544 * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work. 554 545 * @dwork: the delayed work struct 555 546 * 556 547 * Note that the work callback function may still be running on return from 557 - * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it. 548 + * cancel_delayed_work(). Run flush_workqueue() or cancel_work_sync() to wait 549 + * on it. 558 550 */ 559 551 void cancel_rearming_delayed_work(struct delayed_work *dwork) 560 552 {
+1 -1
net/ipv4/ipvs/ip_vs_ctl.c
··· 2387 2387 EnterFunction(2); 2388 2388 ip_vs_trash_cleanup(); 2389 2389 cancel_rearming_delayed_work(&defense_work); 2390 - flush_work_keventd(&defense_work.work); 2390 + cancel_work_sync(&defense_work.work); 2391 2391 ip_vs_kill_estimator(&ip_vs_stats); 2392 2392 unregister_sysctl_table(sysctl_header); 2393 2393 proc_net_remove("ip_vs_stats");