Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue changes from Tejun Heo:
"This is workqueue updates for v3.7-rc1. A lot of activities this
round including considerable API and behavior cleanups.

* delayed_work combines a timer and a work item. The handling of the
timer part has always been a bit clunky leading to confusing
cancelation API with weird corner-case behaviors. delayed_work is
updated to use new IRQ safe timer and cancelation now works as
expected.

* Another deficiency of delayed_work was lack of the counterpart of
mod_timer() which led to cancel+queue combinations or open-coded
timer+work usages. mod_delayed_work[_on]() are added.

These two delayed_work changes make delayed_work provide interface
and behave like timer which is executed with process context.

* A work item could be executed concurrently on multiple CPUs, which
is rather unintuitive and made flush_work() behavior confusing and
half-broken under certain circumstances. This problem doesn't
exist for non-reentrant workqueues. While non-reentrancy check
isn't free, the overhead is incurred only when a work item bounces
across different CPUs and even in simulated pathological scenario
the overhead isn't too high.

All workqueues are made non-reentrant. This removes the
distinction between flush_[delayed_]work() and
flush_[delayed_]_work_sync(). The former is now as strong as the
latter and the specified work item is guaranteed to have finished
execution of any previous queueing on return.

* In addition to the various bug fixes, Lai redid and simplified CPU
hotplug handling significantly.

* Joonsoo introduced system_highpri_wq and used it during CPU
hotplug.

There are two merge commits - one to pull in IRQ safe timer from
tip/timers/core and the other to pull in CPU hotplug fixes from
wq/for-3.6-fixes as Lai's hotplug restructuring depended on them."

Fixed a number of trivial conflicts, but the more interesting conflicts
were silent ones where the deprecated interfaces had been used by new
code in the merge window, and thus didn't cause any real data conflicts.

Tejun pointed out a few of them, I fixed a couple more.

* 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (46 commits)
workqueue: remove spurious WARN_ON_ONCE(in_irq()) from try_to_grab_pending()
workqueue: use cwq_set_max_active() helper for workqueue_set_max_active()
workqueue: introduce cwq_set_max_active() helper for thaw_workqueues()
workqueue: remove @delayed from cwq_dec_nr_in_flight()
workqueue: fix possible stall on try_to_grab_pending() of a delayed work item
workqueue: use hotcpu_notifier() for workqueue_cpu_down_callback()
workqueue: use __cpuinit instead of __devinit for cpu callbacks
workqueue: rename manager_mutex to assoc_mutex
workqueue: WORKER_REBIND is no longer necessary for idle rebinding
workqueue: WORKER_REBIND is no longer necessary for busy rebinding
workqueue: reimplement idle worker rebinding
workqueue: deprecate __cancel_delayed_work()
workqueue: reimplement cancel_delayed_work() using try_to_grab_pending()
workqueue: use mod_delayed_work() instead of __cancel + queue
workqueue: use irqsafe timer for delayed_work
workqueue: clean up delayed_work initializers and add missing one
workqueue: make deferrable delayed_work initializer names consistent
workqueue: cosmetic whitespace updates for macro definitions
workqueue: deprecate system_nrt[_freezable]_wq
workqueue: deprecate flush[_delayed]_work_sync()
...

+995 -1002
+2 -2
arch/arm/mach-pxa/sharpsl_pm.c
··· 579 579 static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state) 580 580 { 581 581 sharpsl_pm.flags |= SHARPSL_SUSPENDED; 582 - flush_delayed_work_sync(&toggle_charger); 583 - flush_delayed_work_sync(&sharpsl_bat); 582 + flush_delayed_work(&toggle_charger); 583 + flush_delayed_work(&sharpsl_bat); 584 584 585 585 if (sharpsl_pm.charge_mode == CHRG_ON) 586 586 sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;
+1 -1
arch/arm/plat-omap/mailbox.c
··· 310 310 omap_mbox_disable_irq(mbox, IRQ_RX); 311 311 free_irq(mbox->irq, mbox); 312 312 tasklet_kill(&mbox->txq->tasklet); 313 - flush_work_sync(&mbox->rxq->work); 313 + flush_work(&mbox->rxq->work); 314 314 mbox_queue_free(mbox->txq); 315 315 mbox_queue_free(mbox->rxq); 316 316 }
+1 -1
arch/powerpc/platforms/cell/cpufreq_spudemand.c
··· 76 76 static void spu_gov_init_work(struct spu_gov_info_struct *info) 77 77 { 78 78 int delay = usecs_to_jiffies(info->poll_int); 79 - INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work); 79 + INIT_DEFERRABLE_WORK(&info->work, spu_gov_work); 80 80 schedule_delayed_work_on(info->policy->cpu, &info->work, delay); 81 81 } 82 82
+1 -1
arch/sh/drivers/push-switch.c
··· 107 107 device_remove_file(&pdev->dev, &dev_attr_switch); 108 108 109 109 platform_set_drvdata(pdev, NULL); 110 - flush_work_sync(&psw->work); 110 + flush_work(&psw->work); 111 111 del_timer_sync(&psw->debounce); 112 112 free_irq(irq, pdev); 113 113
+3 -5
block/blk-core.c
··· 262 262 **/ 263 263 void blk_stop_queue(struct request_queue *q) 264 264 { 265 - __cancel_delayed_work(&q->delay_work); 265 + cancel_delayed_work(&q->delay_work); 266 266 queue_flag_set(QUEUE_FLAG_STOPPED, q); 267 267 } 268 268 EXPORT_SYMBOL(blk_stop_queue); ··· 319 319 */ 320 320 void blk_run_queue_async(struct request_queue *q) 321 321 { 322 - if (likely(!blk_queue_stopped(q))) { 323 - __cancel_delayed_work(&q->delay_work); 324 - queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 325 - } 322 + if (likely(!blk_queue_stopped(q))) 323 + mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); 326 324 } 327 325 EXPORT_SYMBOL(blk_run_queue_async); 328 326
+4 -10
block/blk-throttle.c
··· 180 180 181 181 /* 182 182 * Worker for allocating per cpu stat for tgs. This is scheduled on the 183 - * system_nrt_wq once there are some groups on the alloc_list waiting for 183 + * system_wq once there are some groups on the alloc_list waiting for 184 184 * allocation. 185 185 */ 186 186 static void tg_stats_alloc_fn(struct work_struct *work) ··· 194 194 stats_cpu = alloc_percpu(struct tg_stats_cpu); 195 195 if (!stats_cpu) { 196 196 /* allocation failed, try again after some time */ 197 - queue_delayed_work(system_nrt_wq, dwork, 198 - msecs_to_jiffies(10)); 197 + schedule_delayed_work(dwork, msecs_to_jiffies(10)); 199 198 return; 200 199 } 201 200 } ··· 237 238 */ 238 239 spin_lock_irqsave(&tg_stats_alloc_lock, flags); 239 240 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list); 240 - queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0); 241 + schedule_delayed_work(&tg_stats_alloc_work, 0); 241 242 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); 242 243 } 243 244 ··· 929 930 930 931 /* schedule work if limits changed even if no bio is queued */ 931 932 if (total_nr_queued(td) || td->limits_changed) { 932 - /* 933 - * We might have a work scheduled to be executed in future. 934 - * Cancel that and schedule a new one. 935 - */ 936 - __cancel_delayed_work(dwork); 937 - queue_delayed_work(kthrotld_workqueue, dwork, delay); 933 + mod_delayed_work(kthrotld_workqueue, dwork, delay); 938 934 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", 939 935 delay, jiffies); 940 936 }
+6 -8
block/genhd.c
··· 1490 1490 intv = disk_events_poll_jiffies(disk); 1491 1491 set_timer_slack(&ev->dwork.timer, intv / 4); 1492 1492 if (check_now) 1493 - queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); 1493 + queue_delayed_work(system_freezable_wq, &ev->dwork, 0); 1494 1494 else if (intv) 1495 - queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); 1495 + queue_delayed_work(system_freezable_wq, &ev->dwork, intv); 1496 1496 out_unlock: 1497 1497 spin_unlock_irqrestore(&ev->lock, flags); 1498 1498 } ··· 1534 1534 1535 1535 spin_lock_irq(&ev->lock); 1536 1536 ev->clearing |= mask; 1537 - if (!ev->block) { 1538 - cancel_delayed_work(&ev->dwork); 1539 - queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); 1540 - } 1537 + if (!ev->block) 1538 + mod_delayed_work(system_freezable_wq, &ev->dwork, 0); 1541 1539 spin_unlock_irq(&ev->lock); 1542 1540 } 1543 1541 ··· 1571 1573 1572 1574 /* uncondtionally schedule event check and wait for it to finish */ 1573 1575 disk_block_events(disk); 1574 - queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); 1576 + queue_delayed_work(system_freezable_wq, &ev->dwork, 0); 1575 1577 flush_delayed_work(&ev->dwork); 1576 1578 __disk_unblock_events(disk, false); 1577 1579 ··· 1608 1610 1609 1611 intv = disk_events_poll_jiffies(disk); 1610 1612 if (!ev->block && intv) 1611 - queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); 1613 + queue_delayed_work(system_freezable_wq, &ev->dwork, intv); 1612 1614 1613 1615 spin_unlock_irq(&ev->lock); 1614 1616
+2 -3
drivers/block/floppy.c
··· 672 672 673 673 if (drive == current_reqD) 674 674 drive = current_drive; 675 - __cancel_delayed_work(&fd_timeout); 676 675 677 676 if (drive < 0 || drive >= N_DRIVE) { 678 677 delay = 20UL * HZ; ··· 679 680 } else 680 681 delay = UDP->timeout; 681 682 682 - queue_delayed_work(floppy_wq, &fd_timeout, delay); 683 + mod_delayed_work(floppy_wq, &fd_timeout, delay); 683 684 if (UDP->flags & FD_DEBUG) 684 685 DPRINT("reschedule timeout %s\n", message); 685 686 timeout_message = message; ··· 890 891 891 892 raw_cmd = NULL; 892 893 command_status = FD_COMMAND_NONE; 893 - __cancel_delayed_work(&fd_timeout); 894 + cancel_delayed_work(&fd_timeout); 894 895 do_floppy = NULL; 895 896 cont = NULL; 896 897 clear_bit(0, &fdc_busy);
+2 -2
drivers/block/xen-blkfront.c
··· 670 670 spin_unlock_irqrestore(&info->io_lock, flags); 671 671 672 672 /* Flush gnttab callback work. Must be done with no locks held. */ 673 - flush_work_sync(&info->work); 673 + flush_work(&info->work); 674 674 675 675 del_gendisk(info->gd); 676 676 ··· 719 719 spin_unlock_irq(&info->io_lock); 720 720 721 721 /* Flush gnttab callback work. Must be done with no locks held. */ 722 - flush_work_sync(&info->work); 722 + flush_work(&info->work); 723 723 724 724 /* Free resources associated with old device channel. */ 725 725 if (info->ring_ref != GRANT_INVALID_REF) {
+1 -1
drivers/cdrom/gdrom.c
··· 840 840 841 841 static int __devexit remove_gdrom(struct platform_device *devptr) 842 842 { 843 - flush_work_sync(&work); 843 + flush_work(&work); 844 844 blk_cleanup_queue(gd.gdrom_rq); 845 845 free_irq(HW_EVENT_GDROM_CMD, &gd); 846 846 free_irq(HW_EVENT_GDROM_DMA, &gd);
+1 -1
drivers/char/sonypi.c
··· 1433 1433 sonypi_disable(); 1434 1434 1435 1435 synchronize_irq(sonypi_device.irq); 1436 - flush_work_sync(&sonypi_device.input_work); 1436 + flush_work(&sonypi_device.input_work); 1437 1437 1438 1438 if (useinput) { 1439 1439 input_unregister_device(sonypi_device.input_key_dev);
+2 -2
drivers/char/tpm/tpm.c
··· 1172 1172 struct tpm_chip *chip = file->private_data; 1173 1173 1174 1174 del_singleshot_timer_sync(&chip->user_read_timer); 1175 - flush_work_sync(&chip->work); 1175 + flush_work(&chip->work); 1176 1176 file->private_data = NULL; 1177 1177 atomic_set(&chip->data_pending, 0); 1178 1178 kfree(chip->data_buffer); ··· 1225 1225 int rc; 1226 1226 1227 1227 del_singleshot_timer_sync(&chip->user_read_timer); 1228 - flush_work_sync(&chip->work); 1228 + flush_work(&chip->work); 1229 1229 ret_size = atomic_read(&chip->data_pending); 1230 1230 atomic_set(&chip->data_pending, 0); 1231 1231 if (ret_size > 0) { /* relay data */
+1 -1
drivers/cpufreq/cpufreq_conservative.c
··· 466 466 delay -= jiffies % delay; 467 467 468 468 dbs_info->enable = 1; 469 - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 469 + INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer); 470 470 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); 471 471 } 472 472
+1 -1
drivers/cpufreq/cpufreq_ondemand.c
··· 644 644 delay -= jiffies % delay; 645 645 646 646 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 647 - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 647 + INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer); 648 648 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); 649 649 } 650 650
+1 -1
drivers/devfreq/devfreq.c
··· 607 607 mutex_lock(&devfreq_list_lock); 608 608 polling = false; 609 609 devfreq_wq = create_freezable_workqueue("devfreq_wq"); 610 - INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor); 610 + INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor); 611 611 mutex_unlock(&devfreq_list_lock); 612 612 613 613 devfreq_monitor(&devfreq_work.work);
+1 -16
drivers/edac/edac_mc.c
··· 559 559 return; 560 560 561 561 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); 562 - queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); 562 + mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); 563 563 } 564 564 565 565 /* ··· 597 597 struct mem_ctl_info *mci; 598 598 struct list_head *item; 599 599 600 - mutex_lock(&mem_ctls_mutex); 601 - 602 - /* scan the list and turn off all workq timers, doing so under lock 603 - */ 604 - list_for_each(item, &mc_devices) { 605 - mci = list_entry(item, struct mem_ctl_info, link); 606 - 607 - if (mci->op_state == OP_RUNNING_POLL) 608 - cancel_delayed_work(&mci->work); 609 - } 610 - 611 - mutex_unlock(&mem_ctls_mutex); 612 - 613 - 614 - /* re-walk the list, and reset the poll delay */ 615 600 mutex_lock(&mem_ctls_mutex); 616 601 617 602 list_for_each(item, &mc_devices) {
+1 -1
drivers/extcon/extcon-adc-jack.c
··· 143 143 144 144 data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms); 145 145 146 - INIT_DELAYED_WORK_DEFERRABLE(&data->handler, adc_jack_handler); 146 + INIT_DEFERRABLE_WORK(&data->handler, adc_jack_handler); 147 147 148 148 platform_set_drvdata(pdev, data); 149 149
+3 -3
drivers/gpu/drm/drm_crtc_helper.c
··· 968 968 } 969 969 970 970 if (repoll) 971 - queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD); 971 + schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); 972 972 } 973 973 974 974 void drm_kms_helper_poll_disable(struct drm_device *dev) ··· 993 993 } 994 994 995 995 if (poll) 996 - queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); 996 + schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); 997 997 } 998 998 EXPORT_SYMBOL(drm_kms_helper_poll_enable); 999 999 ··· 1020 1020 /* kill timer and schedule immediate execution, this doesn't block */ 1021 1021 cancel_delayed_work(&dev->mode_config.output_poll_work); 1022 1022 if (drm_kms_helper_poll) 1023 - queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); 1023 + schedule_delayed_work(&dev->mode_config.output_poll_work, 0); 1024 1024 } 1025 1025 EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+1 -1
drivers/gpu/drm/exynos/exynos_drm_g2d.c
··· 878 878 /* FIXME: good range? */ 879 879 usleep_range(500, 1000); 880 880 881 - flush_work_sync(&g2d->runqueue_work); 881 + flush_work(&g2d->runqueue_work); 882 882 883 883 return 0; 884 884 }
+1 -1
drivers/gpu/drm/nouveau/nouveau_gpio.c
··· 302 302 spin_unlock_irqrestore(&pgpio->lock, flags); 303 303 304 304 list_for_each_entry_safe(isr, tmp, &tofree, head) { 305 - flush_work_sync(&isr->work); 305 + flush_work(&isr->work); 306 306 kfree(isr); 307 307 } 308 308 }
+1 -1
drivers/gpu/drm/radeon/radeon_irq_kms.c
··· 277 277 if (rdev->msi_enabled) 278 278 pci_disable_msi(rdev->pdev); 279 279 } 280 - flush_work_sync(&rdev->hotplug_work); 280 + flush_work(&rdev->hotplug_work); 281 281 } 282 282 283 283 /**
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
··· 594 594 par->dirty.active = false; 595 595 spin_unlock_irqrestore(&par->dirty.lock, flags); 596 596 597 - flush_delayed_work_sync(&info->deferred_work); 597 + flush_delayed_work(&info->deferred_work); 598 598 599 599 par->bo_ptr = NULL; 600 600 ttm_bo_kunmap(&par->map);
+1 -1
drivers/hid/hid-picolcd_fb.c
··· 608 608 /* make sure there is no running update - thus that fbdata->picolcd 609 609 * once obtained under lock is guaranteed not to get free() under 610 610 * the feet of the deferred work */ 611 - flush_delayed_work_sync(&info->deferred_work); 611 + flush_delayed_work(&info->deferred_work); 612 612 613 613 data->fb_info = NULL; 614 614 unregister_framebuffer(info);
+1 -1
drivers/hid/hid-wiimote-ext.c
··· 229 229 /* schedule work only once, otherwise mark for reschedule */ 230 230 static void wiiext_schedule(struct wiimote_ext *ext) 231 231 { 232 - queue_work(system_nrt_wq, &ext->worker); 232 + schedule_work(&ext->worker); 233 233 } 234 234 235 235 /*
+1 -3
drivers/infiniband/core/addr.c
··· 152 152 { 153 153 unsigned long delay; 154 154 155 - cancel_delayed_work(&work); 156 - 157 155 delay = time - jiffies; 158 156 if ((long)delay <= 0) 159 157 delay = 1; 160 158 161 - queue_delayed_work(addr_wq, &work, delay); 159 + mod_delayed_work(addr_wq, &work, delay); 162 160 } 163 161 164 162 static void queue_req(struct addr_req *req)
+6 -10
drivers/infiniband/core/mad.c
··· 2004 2004 unsigned long delay; 2005 2005 2006 2006 if (list_empty(&mad_agent_priv->wait_list)) { 2007 - __cancel_delayed_work(&mad_agent_priv->timed_work); 2007 + cancel_delayed_work(&mad_agent_priv->timed_work); 2008 2008 } else { 2009 2009 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2010 2010 struct ib_mad_send_wr_private, ··· 2013 2013 if (time_after(mad_agent_priv->timeout, 2014 2014 mad_send_wr->timeout)) { 2015 2015 mad_agent_priv->timeout = mad_send_wr->timeout; 2016 - __cancel_delayed_work(&mad_agent_priv->timed_work); 2017 2016 delay = mad_send_wr->timeout - jiffies; 2018 2017 if ((long)delay <= 0) 2019 2018 delay = 1; 2020 - queue_delayed_work(mad_agent_priv->qp_info-> 2021 - port_priv->wq, 2022 - &mad_agent_priv->timed_work, delay); 2019 + mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2020 + &mad_agent_priv->timed_work, delay); 2023 2021 } 2024 2022 } 2025 2023 } ··· 2050 2052 list_add(&mad_send_wr->agent_list, list_item); 2051 2053 2052 2054 /* Reschedule a work item if we have a shorter timeout */ 2053 - if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { 2054 - __cancel_delayed_work(&mad_agent_priv->timed_work); 2055 - queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2056 - &mad_agent_priv->timed_work, delay); 2057 - } 2055 + if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) 2056 + mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2057 + &mad_agent_priv->timed_work, delay); 2058 2058 } 2059 2059 2060 2060 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
+2 -4
drivers/infiniband/hw/nes/nes_hw.c
··· 2679 2679 } 2680 2680 } 2681 2681 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) { 2682 - if (nesdev->link_recheck) 2683 - cancel_delayed_work(&nesdev->work); 2684 2682 nesdev->link_recheck = 1; 2685 - schedule_delayed_work(&nesdev->work, 2686 - NES_LINK_RECHECK_DELAY); 2683 + mod_delayed_work(system_wq, &nesdev->work, 2684 + NES_LINK_RECHECK_DELAY); 2687 2685 } 2688 2686 } 2689 2687
+2 -3
drivers/infiniband/hw/nes/nes_nic.c
··· 243 243 244 244 spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); 245 245 if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) { 246 - if (nesdev->link_recheck) 247 - cancel_delayed_work(&nesdev->work); 248 246 nesdev->link_recheck = 1; 249 - schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY); 247 + mod_delayed_work(system_wq, &nesdev->work, 248 + NES_LINK_RECHECK_DELAY); 250 249 } 251 250 spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); 252 251
+1 -2
drivers/input/keyboard/qt2160.c
··· 156 156 157 157 spin_lock_irqsave(&qt2160->lock, flags); 158 158 159 - __cancel_delayed_work(&qt2160->dwork); 160 - schedule_delayed_work(&qt2160->dwork, 0); 159 + mod_delayed_work(system_wq, &qt2160->dwork, 0); 161 160 162 161 spin_unlock_irqrestore(&qt2160->lock, flags); 163 162
+1 -6
drivers/input/mouse/synaptics_i2c.c
··· 376 376 377 377 spin_lock_irqsave(&touch->lock, flags); 378 378 379 - /* 380 - * If work is already scheduled then subsequent schedules will not 381 - * change the scheduled time that's why we have to cancel it first. 382 - */ 383 - __cancel_delayed_work(&touch->dwork); 384 - schedule_delayed_work(&touch->dwork, delay); 379 + mod_delayed_work(system_wq, &touch->dwork, delay); 385 380 386 381 spin_unlock_irqrestore(&touch->lock, flags); 387 382 }
+1 -1
drivers/input/touchscreen/wm831x-ts.c
··· 221 221 synchronize_irq(wm831x_ts->pd_irq); 222 222 223 223 /* Make sure the IRQ completion work is quiesced */ 224 - flush_work_sync(&wm831x_ts->pd_data_work); 224 + flush_work(&wm831x_ts->pd_data_work); 225 225 226 226 /* If we ended up with the pen down then make sure we revert back 227 227 * to pen detection state for the next time we start up.
+1 -1
drivers/isdn/mISDN/hwchannel.c
··· 116 116 } 117 117 skb_queue_purge(&ch->squeue); 118 118 skb_queue_purge(&ch->rqueue); 119 - flush_work_sync(&ch->workq); 119 + flush_work(&ch->workq); 120 120 return 0; 121 121 } 122 122 EXPORT_SYMBOL(mISDN_freedchannel);
+3 -3
drivers/leds/leds-lm3533.c
··· 737 737 sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group); 738 738 err_unregister: 739 739 led_classdev_unregister(&led->cdev); 740 - flush_work_sync(&led->work); 740 + flush_work(&led->work); 741 741 742 742 return ret; 743 743 } ··· 751 751 lm3533_ctrlbank_disable(&led->cb); 752 752 sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group); 753 753 led_classdev_unregister(&led->cdev); 754 - flush_work_sync(&led->work); 754 + flush_work(&led->work); 755 755 756 756 return 0; 757 757 } ··· 765 765 766 766 lm3533_ctrlbank_disable(&led->cb); 767 767 lm3533_led_set(&led->cdev, LED_OFF); /* disable blink */ 768 - flush_work_sync(&led->work); 768 + flush_work(&led->work); 769 769 } 770 770 771 771 static struct platform_driver lm3533_led_driver = {
+1 -1
drivers/leds/leds-lp8788.c
··· 172 172 struct lp8788_led *led = platform_get_drvdata(pdev); 173 173 174 174 led_classdev_unregister(&led->led_dev); 175 - flush_work_sync(&led->work); 175 + flush_work(&led->work); 176 176 177 177 return 0; 178 178 }
+1 -1
drivers/leds/leds-wm8350.c
··· 275 275 struct wm8350_led *led = platform_get_drvdata(pdev); 276 276 277 277 led_classdev_unregister(&led->cdev); 278 - flush_work_sync(&led->work); 278 + flush_work(&led->work); 279 279 wm8350_led_disable(led); 280 280 regulator_put(led->dcdc); 281 281 regulator_put(led->isink);
+1 -1
drivers/macintosh/ams/ams-core.c
··· 226 226 * We do this after ams_info.exit(), because an interrupt might 227 227 * have arrived before disabling them. 228 228 */ 229 - flush_work_sync(&ams_info.worker); 229 + flush_work(&ams_info.worker); 230 230 231 231 /* Remove device */ 232 232 of_device_unregister(ams_info.of_dev);
+1 -1
drivers/md/dm-mpath.c
··· 944 944 flush_workqueue(kmpath_handlerd); 945 945 multipath_wait_for_pg_init_completion(m); 946 946 flush_workqueue(kmultipathd); 947 - flush_work_sync(&m->trigger_event); 947 + flush_work(&m->trigger_event); 948 948 } 949 949 950 950 static void multipath_dtr(struct dm_target *ti)
+1 -1
drivers/md/dm-raid1.c
··· 1146 1146 1147 1147 del_timer_sync(&ms->timer); 1148 1148 flush_workqueue(ms->kmirrord_wq); 1149 - flush_work_sync(&ms->trigger_event); 1149 + flush_work(&ms->trigger_event); 1150 1150 dm_kcopyd_client_destroy(ms->kcopyd_client); 1151 1151 destroy_workqueue(ms->kmirrord_wq); 1152 1152 free_context(ms, ti, ms->nr_mirrors);
+1 -1
drivers/md/dm-stripe.c
··· 199 199 for (i = 0; i < sc->stripes; i++) 200 200 dm_put_device(ti, sc->stripe[i].dev); 201 201 202 - flush_work_sync(&sc->trigger_event); 202 + flush_work(&sc->trigger_event); 203 203 kfree(sc); 204 204 } 205 205
+2 -2
drivers/media/dvb/dvb-core/dvb_net.c
··· 1329 1329 return -EBUSY; 1330 1330 1331 1331 dvb_net_stop(net); 1332 - flush_work_sync(&priv->set_multicast_list_wq); 1333 - flush_work_sync(&priv->restart_net_feed_wq); 1332 + flush_work(&priv->set_multicast_list_wq); 1333 + flush_work(&priv->restart_net_feed_wq); 1334 1334 printk("dvb_net: removed network interface %s\n", net->name); 1335 1335 unregister_netdev(net); 1336 1336 dvbnet->state[num]=0;
+1 -1
drivers/media/dvb/mantis/mantis_evm.c
··· 111 111 struct mantis_pci *mantis = ca->ca_priv; 112 112 113 113 dprintk(MANTIS_DEBUG, 1, "Mantis Host I/F Event manager exiting"); 114 - flush_work_sync(&ca->hif_evm_work); 114 + flush_work(&ca->hif_evm_work); 115 115 mantis_hif_exit(ca); 116 116 mantis_pcmcia_exit(ca); 117 117 }
+1 -1
drivers/media/dvb/mantis/mantis_uart.c
··· 183 183 { 184 184 /* disable interrupt */ 185 185 mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL); 186 - flush_work_sync(&mantis->uart_work); 186 + flush_work(&mantis->uart_work); 187 187 } 188 188 EXPORT_SYMBOL_GPL(mantis_uart_exit);
+1 -1
drivers/media/video/bt8xx/bttv-driver.c
··· 196 196 197 197 static void flush_request_modules(struct bttv *dev) 198 198 { 199 - flush_work_sync(&dev->request_module_wk); 199 + flush_work(&dev->request_module_wk); 200 200 } 201 201 #else 202 202 #define request_modules(dev)
+1 -1
drivers/media/video/cx18/cx18-driver.c
··· 272 272 273 273 static void flush_request_modules(struct cx18 *dev) 274 274 { 275 - flush_work_sync(&dev->request_module_wk); 275 + flush_work(&dev->request_module_wk); 276 276 } 277 277 #else 278 278 #define request_modules(dev)
+1 -1
drivers/media/video/cx231xx/cx231xx-cards.c
··· 1002 1002 1003 1003 static void flush_request_modules(struct cx231xx *dev) 1004 1004 { 1005 - flush_work_sync(&dev->request_module_wk); 1005 + flush_work(&dev->request_module_wk); 1006 1006 } 1007 1007 #else 1008 1008 #define request_modules(dev)
+3 -3
drivers/media/video/cx23885/cx23885-input.c
··· 231 231 v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params); 232 232 v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params); 233 233 } 234 - flush_work_sync(&dev->cx25840_work); 235 - flush_work_sync(&dev->ir_rx_work); 236 - flush_work_sync(&dev->ir_tx_work); 234 + flush_work(&dev->cx25840_work); 235 + flush_work(&dev->ir_rx_work); 236 + flush_work(&dev->ir_tx_work); 237 237 } 238 238 239 239 static void cx23885_input_ir_close(struct rc_dev *rc)
+1 -1
drivers/media/video/cx88/cx88-mpeg.c
··· 70 70 71 71 static void flush_request_modules(struct cx8802_dev *dev) 72 72 { 73 - flush_work_sync(&dev->request_module_wk); 73 + flush_work(&dev->request_module_wk); 74 74 } 75 75 #else 76 76 #define request_modules(dev)
+1 -1
drivers/media/video/em28xx/em28xx-cards.c
··· 2900 2900 2901 2901 static void flush_request_modules(struct em28xx *dev) 2902 2902 { 2903 - flush_work_sync(&dev->request_module_wk); 2903 + flush_work(&dev->request_module_wk); 2904 2904 } 2905 2905 #else 2906 2906 #define request_modules(dev)
+3 -3
drivers/media/video/omap24xxcam.c
··· 1198 1198 1199 1199 atomic_inc(&cam->reset_disable); 1200 1200 1201 - flush_work_sync(&cam->sensor_reset_work); 1201 + flush_work(&cam->sensor_reset_work); 1202 1202 1203 1203 rval = videobuf_streamoff(q); 1204 1204 if (!rval) { ··· 1512 1512 1513 1513 atomic_inc(&cam->reset_disable); 1514 1514 1515 - flush_work_sync(&cam->sensor_reset_work); 1515 + flush_work(&cam->sensor_reset_work); 1516 1516 1517 1517 /* stop streaming capture */ 1518 1518 videobuf_streamoff(&fh->vbq); ··· 1536 1536 * not be scheduled anymore since streaming is already 1537 1537 * disabled.) 1538 1538 */ 1539 - flush_work_sync(&cam->sensor_reset_work); 1539 + flush_work(&cam->sensor_reset_work); 1540 1540 1541 1541 mutex_lock(&cam->mutex); 1542 1542 if (atomic_dec_return(&cam->users) == 0) {
+1 -1
drivers/media/video/saa7134/saa7134-core.c
··· 170 170 171 171 static void flush_request_submodules(struct saa7134_dev *dev) 172 172 { 173 - flush_work_sync(&dev->request_module_wk); 173 + flush_work(&dev->request_module_wk); 174 174 } 175 175 176 176 #else
+1 -1
drivers/media/video/saa7134/saa7134-empress.c
··· 556 556 557 557 if (NULL == dev->empress_dev) 558 558 return 0; 559 - flush_work_sync(&dev->empress_workqueue); 559 + flush_work(&dev->empress_workqueue); 560 560 video_unregister_device(dev->empress_dev); 561 561 dev->empress_dev = NULL; 562 562 return 0;
+1 -1
drivers/media/video/tm6000/tm6000-cards.c
··· 1074 1074 1075 1075 static void flush_request_modules(struct tm6000_core *dev) 1076 1076 { 1077 - flush_work_sync(&dev->request_module_wk); 1077 + flush_work(&dev->request_module_wk); 1078 1078 } 1079 1079 #else 1080 1080 #define request_modules(dev)
+2 -2
drivers/mfd/menelaus.c
··· 1259 1259 return 0; 1260 1260 fail2: 1261 1261 free_irq(client->irq, menelaus); 1262 - flush_work_sync(&menelaus->work); 1262 + flush_work(&menelaus->work); 1263 1263 fail1: 1264 1264 kfree(menelaus); 1265 1265 return err; ··· 1270 1270 struct menelaus_chip *menelaus = i2c_get_clientdata(client); 1271 1271 1272 1272 free_irq(client->irq, menelaus); 1273 - flush_work_sync(&menelaus->work); 1273 + flush_work(&menelaus->work); 1274 1274 kfree(menelaus); 1275 1275 the_menelaus = NULL; 1276 1276 return 0;
+1 -1
drivers/misc/ioc4.c
··· 487 487 ioc4_exit(void) 488 488 { 489 489 /* Ensure ioc4_load_modules() has completed before exiting */ 490 - flush_work_sync(&ioc4_load_modules_work); 490 + flush_work(&ioc4_load_modules_work); 491 491 pci_unregister_driver(&ioc4_driver); 492 492 } 493 493
+2 -2
drivers/mmc/core/host.c
··· 204 204 host->clk_requests--; 205 205 if (mmc_host_may_gate_card(host->card) && 206 206 !host->clk_requests) 207 - queue_delayed_work(system_nrt_wq, &host->clk_gate_work, 208 - msecs_to_jiffies(host->clkgate_delay)); 207 + schedule_delayed_work(&host->clk_gate_work, 208 + msecs_to_jiffies(host->clkgate_delay)); 209 209 spin_unlock_irqrestore(&host->clk_lock, flags); 210 210 } 211 211
+2 -2
drivers/mtd/mtdoops.c
··· 387 387 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n"); 388 388 389 389 cxt->mtd = NULL; 390 - flush_work_sync(&cxt->work_erase); 391 - flush_work_sync(&cxt->work_write); 390 + flush_work(&cxt->work_erase); 391 + flush_work(&cxt->work_write); 392 392 } 393 393 394 394
+1 -1
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
··· 1394 1394 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group); 1395 1395 1396 1396 /* Flush work scheduled while releasing TIDs */ 1397 - flush_work_sync(&td->tid_release_task); 1397 + flush_work(&td->tid_release_task); 1398 1398 1399 1399 tdev->lldev = NULL; 1400 1400 cxgb3_set_dummy_ops(tdev);
+1 -1
drivers/net/ethernet/mellanox/mlx4/sense.c
··· 139 139 for (port = 1; port <= dev->caps.num_ports; port++) 140 140 sense->do_sense_port[port] = 1; 141 141 142 - INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port); 142 + INIT_DEFERRABLE_WORK(&sense->sense_poll, mlx4_sense_port); 143 143 }
+1 -1
drivers/net/ethernet/neterion/vxge/vxge-main.c
··· 3521 3521 3522 3522 strncpy(buf, dev->name, IFNAMSIZ); 3523 3523 3524 - flush_work_sync(&vdev->reset_task); 3524 + flush_work(&vdev->reset_task); 3525 3525 3526 3526 /* in 2.6 will call stop() if device is up */ 3527 3527 unregister_netdev(dev);
+1 -1
drivers/net/ethernet/sun/cassini.c
··· 3890 3890 schedule_work(&cp->reset_task); 3891 3891 #endif 3892 3892 3893 - flush_work_sync(&cp->reset_task); 3893 + flush_work(&cp->reset_task); 3894 3894 return 0; 3895 3895 } 3896 3896
+1 -1
drivers/net/ethernet/sun/niu.c
··· 9927 9927 if (!netif_running(dev)) 9928 9928 return 0; 9929 9929 9930 - flush_work_sync(&np->reset_task); 9930 + flush_work(&np->reset_task); 9931 9931 niu_netif_stop(np); 9932 9932 9933 9933 del_timer_sync(&np->timer);
+6 -6
drivers/net/virtio_net.c
··· 521 521 /* In theory, this can happen: if we don't get any buffers in 522 522 * we will *never* try to fill again. */ 523 523 if (still_empty) 524 - queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2); 524 + schedule_delayed_work(&vi->refill, HZ/2); 525 525 } 526 526 527 527 static int virtnet_poll(struct napi_struct *napi, int budget) ··· 540 540 541 541 if (vi->num < vi->max / 2) { 542 542 if (!try_fill_recv(vi, GFP_ATOMIC)) 543 - queue_delayed_work(system_nrt_wq, &vi->refill, 0); 543 + schedule_delayed_work(&vi->refill, 0); 544 544 } 545 545 546 546 /* Out of packets? */ ··· 745 745 746 746 /* Make sure we have some buffers: if oom use wq. */ 747 747 if (!try_fill_recv(vi, GFP_KERNEL)) 748 - queue_delayed_work(system_nrt_wq, &vi->refill, 0); 748 + schedule_delayed_work(&vi->refill, 0); 749 749 750 750 virtnet_napi_enable(vi); 751 751 return 0; ··· 1020 1020 { 1021 1021 struct virtnet_info *vi = vdev->priv; 1022 1022 1023 - queue_work(system_nrt_wq, &vi->config_work); 1023 + schedule_work(&vi->config_work); 1024 1024 } 1025 1025 1026 1026 static int init_vqs(struct virtnet_info *vi) ··· 1152 1152 otherwise get link status from config. */ 1153 1153 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 1154 1154 netif_carrier_off(dev); 1155 - queue_work(system_nrt_wq, &vi->config_work); 1155 + schedule_work(&vi->config_work); 1156 1156 } else { 1157 1157 vi->status = VIRTIO_NET_S_LINK_UP; 1158 1158 netif_carrier_on(dev); ··· 1264 1264 netif_device_attach(vi->dev); 1265 1265 1266 1266 if (!try_fill_recv(vi, GFP_KERNEL)) 1267 - queue_delayed_work(system_nrt_wq, &vi->refill, 0); 1267 + schedule_delayed_work(&vi->refill, 0); 1268 1268 1269 1269 mutex_lock(&vi->config_lock); 1270 1270 vi->config_enable = true;
+2 -2
drivers/net/wireless/hostap/hostap_ap.c
··· 860 860 return; 861 861 } 862 862 863 - flush_work_sync(&ap->add_sta_proc_queue); 863 + flush_work(&ap->add_sta_proc_queue); 864 864 865 865 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT 866 - flush_work_sync(&ap->wds_oper_queue); 866 + flush_work(&ap->wds_oper_queue); 867 867 if (ap->crypt) 868 868 ap->crypt->deinit(ap->crypt_priv); 869 869 ap->crypt = ap->crypt_priv = NULL;
+5 -5
drivers/net/wireless/hostap/hostap_hw.c
··· 3311 3311 3312 3312 unregister_netdev(local->dev); 3313 3313 3314 - flush_work_sync(&local->reset_queue); 3315 - flush_work_sync(&local->set_multicast_list_queue); 3316 - flush_work_sync(&local->set_tim_queue); 3314 + flush_work(&local->reset_queue); 3315 + flush_work(&local->set_multicast_list_queue); 3316 + flush_work(&local->set_tim_queue); 3317 3317 #ifndef PRISM2_NO_STATION_MODES 3318 - flush_work_sync(&local->info_queue); 3318 + flush_work(&local->info_queue); 3319 3319 #endif 3320 - flush_work_sync(&local->comms_qual_update); 3320 + flush_work(&local->comms_qual_update); 3321 3321 3322 3322 lib80211_crypt_info_free(&local->crypt_info); 3323 3323
+3 -5
drivers/net/wireless/ipw2x00/ipw2100.c
··· 2181 2181 2182 2182 /* Make sure the RF Kill check timer is running */ 2183 2183 priv->stop_rf_kill = 0; 2184 - cancel_delayed_work(&priv->rf_kill); 2185 - schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ)); 2184 + mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ)); 2186 2185 } 2187 2186 2188 2187 static void send_scan_event(void *data) ··· 4321 4322 "disabled by HW switch\n"); 4322 4323 /* Make sure the RF_KILL check timer is running */ 4323 4324 priv->stop_rf_kill = 0; 4324 - cancel_delayed_work(&priv->rf_kill); 4325 - schedule_delayed_work(&priv->rf_kill, 4326 - round_jiffies_relative(HZ)); 4325 + mod_delayed_work(system_wq, &priv->rf_kill, 4326 + round_jiffies_relative(HZ)); 4327 4327 } else 4328 4328 schedule_reset(priv); 4329 4329 }
+1 -2
drivers/net/wireless/zd1211rw/zd_usb.c
··· 1164 1164 { 1165 1165 struct zd_usb_rx *rx = &usb->rx; 1166 1166 1167 - cancel_delayed_work(&rx->idle_work); 1168 - queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL); 1167 + mod_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL); 1169 1168 } 1170 1169 1171 1170 static inline void init_usb_interrupt(struct zd_usb *usb)
+5 -15
drivers/platform/x86/thinkpad_acpi.c
··· 7685 7685 7686 7686 static void fan_watchdog_reset(void) 7687 7687 { 7688 - static int fan_watchdog_active; 7689 - 7690 7688 if (fan_control_access_mode == TPACPI_FAN_WR_NONE) 7691 7689 return; 7692 7690 7693 - if (fan_watchdog_active) 7694 - cancel_delayed_work(&fan_watchdog_task); 7695 - 7696 7691 if (fan_watchdog_maxinterval > 0 && 7697 - tpacpi_lifecycle != TPACPI_LIFE_EXITING) { 7698 - fan_watchdog_active = 1; 7699 - if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task, 7700 - msecs_to_jiffies(fan_watchdog_maxinterval 7701 - * 1000))) { 7702 - pr_err("failed to queue the fan watchdog, " 7703 - "watchdog will not trigger\n"); 7704 - } 7705 - } else 7706 - fan_watchdog_active = 0; 7692 + tpacpi_lifecycle != TPACPI_LIFE_EXITING) 7693 + mod_delayed_work(tpacpi_wq, &fan_watchdog_task, 7694 + msecs_to_jiffies(fan_watchdog_maxinterval * 1000)); 7695 + else 7696 + cancel_delayed_work(&fan_watchdog_task); 7707 7697 } 7708 7698 7709 7699 static void fan_watchdog_fire(struct work_struct *ignored)
+1 -1
drivers/power/ab8500_btemp.c
··· 1018 1018 } 1019 1019 1020 1020 /* Init work for measuring temperature periodically */ 1021 - INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work, 1021 + INIT_DEFERRABLE_WORK(&di->btemp_periodic_work, 1022 1022 ab8500_btemp_periodic_work); 1023 1023 1024 1024 /* Identify the battery */
+4 -4
drivers/power/ab8500_charger.c
··· 2618 2618 } 2619 2619 2620 2620 /* Init work for HW failure check */ 2621 - INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work, 2621 + INIT_DEFERRABLE_WORK(&di->check_hw_failure_work, 2622 2622 ab8500_charger_check_hw_failure_work); 2623 - INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work, 2623 + INIT_DEFERRABLE_WORK(&di->check_usbchgnotok_work, 2624 2624 ab8500_charger_check_usbchargernotok_work); 2625 2625 2626 2626 /* ··· 2632 2632 * watchdog have to be kicked by the charger driver 2633 2633 * when the AC charger is disabled 2634 2634 */ 2635 - INIT_DELAYED_WORK_DEFERRABLE(&di->kick_wd_work, 2635 + INIT_DEFERRABLE_WORK(&di->kick_wd_work, 2636 2636 ab8500_charger_kick_watchdog_work); 2637 2637 2638 - INIT_DELAYED_WORK_DEFERRABLE(&di->check_vbat_work, 2638 + INIT_DEFERRABLE_WORK(&di->check_vbat_work, 2639 2639 ab8500_charger_check_vbat_work); 2640 2640 2641 2641 /* Init work for charger detection */
+4 -4
drivers/power/ab8500_fg.c
··· 2516 2516 INIT_WORK(&di->fg_acc_cur_work, ab8500_fg_acc_cur_work); 2517 2517 2518 2518 /* Init work for reinitialising the fg algorithm */ 2519 - INIT_DELAYED_WORK_DEFERRABLE(&di->fg_reinit_work, 2519 + INIT_DEFERRABLE_WORK(&di->fg_reinit_work, 2520 2520 ab8500_fg_reinit_work); 2521 2521 2522 2522 /* Work delayed Queue to run the state machine */ 2523 - INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work, 2523 + INIT_DEFERRABLE_WORK(&di->fg_periodic_work, 2524 2524 ab8500_fg_periodic_work); 2525 2525 2526 2526 /* Work to check low battery condition */ 2527 - INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work, 2527 + INIT_DEFERRABLE_WORK(&di->fg_low_bat_work, 2528 2528 ab8500_fg_low_bat_work); 2529 2529 2530 2530 /* Init work for HW failure check */ 2531 - INIT_DELAYED_WORK_DEFERRABLE(&di->fg_check_hw_failure_work, 2531 + INIT_DEFERRABLE_WORK(&di->fg_check_hw_failure_work, 2532 2532 ab8500_fg_check_hw_failure_work); 2533 2533 2534 2534 /* Initialize OVV, and other registers */
+2 -2
drivers/power/abx500_chargalg.c
··· 1848 1848 } 1849 1849 1850 1850 /* Init work for chargalg */ 1851 - INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_periodic_work, 1851 + INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work, 1852 1852 abx500_chargalg_periodic_work); 1853 - INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_wd_work, 1853 + INIT_DEFERRABLE_WORK(&di->chargalg_wd_work, 1854 1854 abx500_chargalg_wd_work); 1855 1855 1856 1856 /* Init work for chargalg */
+3 -6
drivers/power/charger-manager.c
··· 509 509 if (!delayed_work_pending(&cm_monitor_work) || 510 510 (delayed_work_pending(&cm_monitor_work) && 511 511 time_after(next_polling, _next_polling))) { 512 - cancel_delayed_work_sync(&cm_monitor_work); 513 512 next_polling = jiffies + polling_jiffy; 514 - queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy); 513 + mod_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy); 515 514 } 516 515 517 516 out: ··· 545 546 if (cm_suspended) 546 547 device_set_wakeup_capable(cm->dev, true); 547 548 548 - if (delayed_work_pending(&cm->fullbatt_vchk_work)) 549 - cancel_delayed_work(&cm->fullbatt_vchk_work); 550 - queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work, 551 - msecs_to_jiffies(desc->fullbatt_vchkdrop_ms)); 549 + mod_delayed_work(cm_wq, &cm->fullbatt_vchk_work, 550 + msecs_to_jiffies(desc->fullbatt_vchkdrop_ms)); 552 551 cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies( 553 552 desc->fullbatt_vchkdrop_ms); 554 553
+1 -1
drivers/power/collie_battery.c
··· 290 290 static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state) 291 291 { 292 292 /* flush all pending status updates */ 293 - flush_work_sync(&bat_work); 293 + flush_work(&bat_work); 294 294 return 0; 295 295 } 296 296
+3 -6
drivers/power/ds2760_battery.c
··· 355 355 356 356 dev_dbg(di->dev, "%s\n", __func__); 357 357 358 - cancel_delayed_work(&di->monitor_work); 359 - queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10); 358 + mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10); 360 359 } 361 360 362 361 ··· 400 401 401 402 /* postpone the actual work by 20 secs. This is for debouncing GPIO 402 403 * signals and to let the current value settle. See AN4188. */ 403 - cancel_delayed_work(&di->set_charged_work); 404 - queue_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20); 404 + mod_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20); 405 405 } 406 406 407 407 static int ds2760_battery_get_property(struct power_supply *psy, ··· 614 616 di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN; 615 617 power_supply_changed(&di->bat); 616 618 617 - cancel_delayed_work(&di->monitor_work); 618 - queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ); 619 + mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ); 619 620 620 621 return 0; 621 622 }
+2 -4
drivers/power/jz4740-battery.c
··· 173 173 { 174 174 struct jz_battery *jz_battery = psy_to_jz_battery(psy); 175 175 176 - cancel_delayed_work(&jz_battery->work); 177 - schedule_delayed_work(&jz_battery->work, 0); 176 + mod_delayed_work(system_wq, &jz_battery->work, 0); 178 177 } 179 178 180 179 static irqreturn_t jz_battery_charge_irq(int irq, void *data) 181 180 { 182 181 struct jz_battery *jz_battery = data; 183 182 184 - cancel_delayed_work(&jz_battery->work); 185 - schedule_delayed_work(&jz_battery->work, 0); 183 + mod_delayed_work(system_wq, &jz_battery->work, 0); 186 184 187 185 return IRQ_HANDLED; 188 186 }
+1 -1
drivers/power/max17040_battery.c
··· 232 232 max17040_reset(client); 233 233 max17040_get_version(client); 234 234 235 - INIT_DELAYED_WORK_DEFERRABLE(&chip->work, max17040_work); 235 + INIT_DEFERRABLE_WORK(&chip->work, max17040_work); 236 236 schedule_delayed_work(&chip->work, MAX17040_DELAY); 237 237 238 238 return 0;
+1 -1
drivers/power/tosa_battery.c
··· 327 327 static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state) 328 328 { 329 329 /* flush all pending status updates */ 330 - flush_work_sync(&bat_work); 330 + flush_work(&bat_work); 331 331 return 0; 332 332 } 333 333
+1 -1
drivers/power/wm97xx_battery.c
··· 146 146 #ifdef CONFIG_PM 147 147 static int wm97xx_bat_suspend(struct device *dev) 148 148 { 149 - flush_work_sync(&bat_work); 149 + flush_work(&bat_work); 150 150 return 0; 151 151 } 152 152
+1 -1
drivers/power/z2_battery.c
··· 276 276 struct i2c_client *client = to_i2c_client(dev); 277 277 struct z2_charger *charger = i2c_get_clientdata(client); 278 278 279 - flush_work_sync(&charger->bat_work); 279 + flush_work(&charger->bat_work); 280 280 return 0; 281 281 } 282 282
+1 -1
drivers/regulator/core.c
··· 3476 3476 regulator_put(rdev->supply); 3477 3477 mutex_lock(&regulator_list_mutex); 3478 3478 debugfs_remove_recursive(rdev->debugfs); 3479 - flush_work_sync(&rdev->disable_work.work); 3479 + flush_work(&rdev->disable_work.work); 3480 3480 WARN_ON(rdev->open_count); 3481 3481 unset_regulator_supplies(rdev); 3482 3482 list_del(&rdev->list);
+2 -2
drivers/scsi/arcmsr/arcmsr_hba.c
··· 999 999 int poll_count = 0; 1000 1000 arcmsr_free_sysfs_attr(acb); 1001 1001 scsi_remove_host(host); 1002 - flush_work_sync(&acb->arcmsr_do_message_isr_bh); 1002 + flush_work(&acb->arcmsr_do_message_isr_bh); 1003 1003 del_timer_sync(&acb->eternal_timer); 1004 1004 arcmsr_disable_outbound_ints(acb); 1005 1005 arcmsr_stop_adapter_bgrb(acb); ··· 1045 1045 (struct AdapterControlBlock *)host->hostdata; 1046 1046 del_timer_sync(&acb->eternal_timer); 1047 1047 arcmsr_disable_outbound_ints(acb); 1048 - flush_work_sync(&acb->arcmsr_do_message_isr_bh); 1048 + flush_work(&acb->arcmsr_do_message_isr_bh); 1049 1049 arcmsr_stop_adapter_bgrb(acb); 1050 1050 arcmsr_flush_adapter_cache(acb); 1051 1051 }
+1 -1
drivers/scsi/ipr.c
··· 9020 9020 9021 9021 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 9022 9022 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 9023 - flush_work_sync(&ioa_cfg->work_q); 9023 + flush_work(&ioa_cfg->work_q); 9024 9024 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 9025 9025 9026 9026 spin_lock(&ipr_driver_lock);
+1 -1
drivers/scsi/pmcraid.c
··· 5459 5459 pmcraid_shutdown(pdev); 5460 5460 5461 5461 pmcraid_disable_interrupts(pinstance, ~0); 5462 - flush_work_sync(&pinstance->worker_q); 5462 + flush_work(&pinstance->worker_q); 5463 5463 5464 5464 pmcraid_kill_tasklets(pinstance); 5465 5465 pmcraid_unregister_interrupt_handler(pinstance);
+1 -1
drivers/scsi/qla2xxx/qla_target.c
··· 969 969 spin_unlock_irqrestore(&ha->hardware_lock, flags); 970 970 mutex_unlock(&ha->tgt.tgt_mutex); 971 971 972 - flush_delayed_work_sync(&tgt->sess_del_work); 972 + flush_delayed_work(&tgt->sess_del_work); 973 973 974 974 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 975 975 "Waiting for sess works (tgt %p)", tgt);
+1 -1
drivers/staging/ccg/u_ether.c
··· 827 827 return; 828 828 829 829 unregister_netdev(the_dev->net); 830 - flush_work_sync(&the_dev->work); 830 + flush_work(&the_dev->work); 831 831 free_netdev(the_dev->net); 832 832 833 833 the_dev = NULL;
+2 -2
drivers/staging/nvec/nvec.c
··· 264 264 list_add_tail(&msg->node, &nvec->tx_data); 265 265 spin_unlock_irqrestore(&nvec->tx_lock, flags); 266 266 267 - queue_work(system_nrt_wq, &nvec->tx_work); 267 + schedule_work(&nvec->tx_work); 268 268 269 269 return 0; 270 270 } ··· 471 471 if (!nvec_msg_is_event(nvec->rx)) 472 472 complete(&nvec->ec_transfer); 473 473 474 - queue_work(system_nrt_wq, &nvec->rx_work); 474 + schedule_work(&nvec->rx_work); 475 475 } 476 476 477 477 /**
+6 -9
drivers/thermal/thermal_sys.c
··· 694 694 static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, 695 695 int delay) 696 696 { 697 - cancel_delayed_work(&(tz->poll_queue)); 698 - 699 - if (!delay) 700 - return; 701 - 702 697 if (delay > 1000) 703 - queue_delayed_work(system_freezable_wq, &(tz->poll_queue), 704 - round_jiffies(msecs_to_jiffies(delay))); 698 + mod_delayed_work(system_freezable_wq, &tz->poll_queue, 699 + round_jiffies(msecs_to_jiffies(delay))); 700 + else if (delay) 701 + mod_delayed_work(system_freezable_wq, &tz->poll_queue, 702 + msecs_to_jiffies(delay)); 705 703 else 706 - queue_delayed_work(system_freezable_wq, &(tz->poll_queue), 707 - msecs_to_jiffies(delay)); 704 + cancel_delayed_work(&tz->poll_queue); 708 705 } 709 706 710 707 static void thermal_zone_device_passive(struct thermal_zone_device *tz,
+1 -1
drivers/tty/hvc/hvsi.c
··· 765 765 766 766 /* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */ 767 767 cancel_delayed_work_sync(&hp->writer); 768 - flush_work_sync(&hp->handshaker); 768 + flush_work(&hp->handshaker); 769 769 770 770 /* 771 771 * it's also possible that our timeout expired and hvsi_write_worker
+1 -1
drivers/tty/ipwireless/hardware.c
··· 1729 1729 1730 1730 ipwireless_stop_interrupts(hw); 1731 1731 1732 - flush_work_sync(&hw->work_rx); 1732 + flush_work(&hw->work_rx); 1733 1733 1734 1734 for (i = 0; i < NL_NUM_OF_ADDRESSES; i++) 1735 1735 if (hw->packet_assembler[i] != NULL)
+2 -2
drivers/tty/ipwireless/network.c
··· 435 435 network->shutting_down = 1; 436 436 437 437 ipwireless_ppp_close(network); 438 - flush_work_sync(&network->work_go_online); 439 - flush_work_sync(&network->work_go_offline); 438 + flush_work(&network->work_go_online); 439 + flush_work(&network->work_go_offline); 440 440 441 441 ipwireless_stop_interrupts(network->hardware); 442 442 ipwireless_associate_network(network->hardware, NULL);
+1 -1
drivers/tty/serial/kgdboc.c
··· 122 122 i--; 123 123 } 124 124 } 125 - flush_work_sync(&kgdboc_restore_input_work); 125 + flush_work(&kgdboc_restore_input_work); 126 126 } 127 127 #else /* ! CONFIG_KDB_KEYBOARD */ 128 128 #define kgdboc_register_kbd(x) 0
+1 -1
drivers/tty/serial/omap-serial.c
··· 1227 1227 struct uart_omap_port *up = dev_get_drvdata(dev); 1228 1228 1229 1229 uart_suspend_port(&serial_omap_reg, &up->port); 1230 - flush_work_sync(&up->qos_work); 1230 + flush_work(&up->qos_work); 1231 1231 1232 1232 return 0; 1233 1233 }
+3 -3
drivers/tty/tty_ldisc.c
··· 523 523 */ 524 524 static void tty_ldisc_flush_works(struct tty_struct *tty) 525 525 { 526 - flush_work_sync(&tty->hangup_work); 527 - flush_work_sync(&tty->SAK_work); 528 - flush_work_sync(&tty->buf.work); 526 + flush_work(&tty->hangup_work); 527 + flush_work(&tty->SAK_work); 528 + flush_work(&tty->buf.work); 529 529 } 530 530 531 531 /**
+1 -1
drivers/usb/atm/speedtch.c
··· 718 718 del_timer_sync(&instance->resubmit_timer); 719 719 usb_free_urb(int_urb); 720 720 721 - flush_work_sync(&instance->status_check_work); 721 + flush_work(&instance->status_check_work); 722 722 } 723 723 724 724 static int speedtch_pre_reset(struct usb_interface *intf)
+1 -1
drivers/usb/atm/ueagle-atm.c
··· 2262 2262 usb_free_urb(sc->urb_int); 2263 2263 2264 2264 /* flush the work item, when no one can schedule it */ 2265 - flush_work_sync(&sc->task); 2265 + flush_work(&sc->task); 2266 2266 2267 2267 release_firmware(sc->dsp_firm); 2268 2268 uea_leaves(INS_TO_USBDEV(sc));
+1 -1
drivers/usb/gadget/u_ether.c
··· 834 834 return; 835 835 836 836 unregister_netdev(the_dev->net); 837 - flush_work_sync(&the_dev->work); 837 + flush_work(&the_dev->work); 838 838 free_netdev(the_dev->net); 839 839 840 840 the_dev = NULL;
+1 -1
drivers/usb/host/ohci-hcd.c
··· 893 893 ohci_dump (ohci, 1); 894 894 895 895 if (quirk_nec(ohci)) 896 - flush_work_sync(&ohci->nec_work); 896 + flush_work(&ohci->nec_work); 897 897 898 898 ohci_usb_reset (ohci); 899 899 ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
+1 -1
drivers/usb/otg/isp1301_omap.c
··· 1230 1230 isp->timer.data = 0; 1231 1231 set_bit(WORK_STOP, &isp->todo); 1232 1232 del_timer_sync(&isp->timer); 1233 - flush_work_sync(&isp->work); 1233 + flush_work(&isp->work); 1234 1234 1235 1235 put_device(&i2c->dev); 1236 1236 the_transceiver = NULL;
+3 -3
drivers/video/omap2/displays/panel-taal.c
··· 906 906 r = -ENOMEM; 907 907 goto err_wq; 908 908 } 909 - INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work); 909 + INIT_DEFERRABLE_WORK(&td->esd_work, taal_esd_work); 910 910 INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work); 911 911 912 912 dev_set_drvdata(&dssdev->dev, td); ··· 962 962 goto err_irq; 963 963 } 964 964 965 - INIT_DELAYED_WORK_DEFERRABLE(&td->te_timeout_work, 966 - taal_te_timeout_work_callback); 965 + INIT_DEFERRABLE_WORK(&td->te_timeout_work, 966 + taal_te_timeout_work_callback); 967 967 968 968 dev_dbg(&dssdev->dev, "Using GPIO TE\n"); 969 969 }
+3 -3
drivers/video/omap2/dss/dsi.c
··· 4306 4306 * and is sending the data. 4307 4307 */ 4308 4308 4309 - __cancel_delayed_work(&dsi->framedone_timeout_work); 4309 + cancel_delayed_work(&dsi->framedone_timeout_work); 4310 4310 4311 4311 dsi_handle_framedone(dsidev, 0); 4312 4312 } ··· 4863 4863 mutex_init(&dsi->lock); 4864 4864 sema_init(&dsi->bus_lock, 1); 4865 4865 4866 - INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work, 4867 - dsi_framedone_timeout_work_callback); 4866 + INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work, 4867 + dsi_framedone_timeout_work_callback); 4868 4868 4869 4869 #ifdef DSI_CATCH_MISSING_TE 4870 4870 init_timer(&dsi->te_timer);
+1 -1
fs/affs/super.c
··· 551 551 return -EINVAL; 552 552 } 553 553 554 - flush_delayed_work_sync(&sbi->sb_work); 554 + flush_delayed_work(&sbi->sb_work); 555 555 replace_mount_options(sb, new_opts); 556 556 557 557 sbi->s_flags = mount_flags;
+1 -3
fs/afs/callback.c
··· 351 351 */ 352 352 void afs_flush_callback_breaks(struct afs_server *server) 353 353 { 354 - cancel_delayed_work(&server->cb_break_work); 355 - queue_delayed_work(afs_callback_update_worker, 356 - &server->cb_break_work, 0); 354 + mod_delayed_work(afs_callback_update_worker, &server->cb_break_work, 0); 357 355 } 358 356 359 357 #if 0
+2 -8
fs/afs/server.c
··· 285 285 expiry = server->time_of_death + afs_server_timeout; 286 286 if (expiry > now) { 287 287 delay = (expiry - now) * HZ; 288 - if (!queue_delayed_work(afs_wq, &afs_server_reaper, 289 - delay)) { 290 - cancel_delayed_work(&afs_server_reaper); 291 - queue_delayed_work(afs_wq, &afs_server_reaper, 292 - delay); 293 - } 288 + mod_delayed_work(afs_wq, &afs_server_reaper, delay); 294 289 break; 295 290 } 296 291 ··· 318 323 void __exit afs_purge_servers(void) 319 324 { 320 325 afs_server_timeout = 0; 321 - cancel_delayed_work(&afs_server_reaper); 322 - queue_delayed_work(afs_wq, &afs_server_reaper, 0); 326 + mod_delayed_work(afs_wq, &afs_server_reaper, 0); 323 327 }
+3 -11
fs/afs/vlocation.c
··· 561 561 if (expiry > now) { 562 562 delay = (expiry - now) * HZ; 563 563 _debug("delay %lu", delay); 564 - if (!queue_delayed_work(afs_wq, &afs_vlocation_reap, 565 - delay)) { 566 - cancel_delayed_work(&afs_vlocation_reap); 567 - queue_delayed_work(afs_wq, &afs_vlocation_reap, 568 - delay); 569 - } 564 + mod_delayed_work(afs_wq, &afs_vlocation_reap, delay); 570 565 break; 571 566 } 572 567 ··· 609 614 spin_lock(&afs_vlocation_updates_lock); 610 615 list_del_init(&afs_vlocation_updates); 611 616 spin_unlock(&afs_vlocation_updates_lock); 612 - cancel_delayed_work(&afs_vlocation_update); 613 - queue_delayed_work(afs_vlocation_update_worker, 614 - &afs_vlocation_update, 0); 617 + mod_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0); 615 618 destroy_workqueue(afs_vlocation_update_worker); 616 619 617 - cancel_delayed_work(&afs_vlocation_reap); 618 - queue_delayed_work(afs_wq, &afs_vlocation_reap, 0); 620 + mod_delayed_work(afs_wq, &afs_vlocation_reap, 0); 619 621 } 620 622 621 623 /*
+1 -1
fs/gfs2/lock_dlm.c
··· 1289 1289 spin_lock(&ls->ls_recover_spin); 1290 1290 set_bit(DFL_UNMOUNT, &ls->ls_recover_flags); 1291 1291 spin_unlock(&ls->ls_recover_spin); 1292 - flush_delayed_work_sync(&sdp->sd_control_work); 1292 + flush_delayed_work(&sdp->sd_control_work); 1293 1293 1294 1294 /* mounted_lock and control_lock will be purged in dlm recovery */ 1295 1295 release:
+1 -1
fs/gfs2/super.c
··· 1579 1579 clear_inode(inode); 1580 1580 gfs2_dir_hash_inval(ip); 1581 1581 ip->i_gl->gl_object = NULL; 1582 - flush_delayed_work_sync(&ip->i_gl->gl_work); 1582 + flush_delayed_work(&ip->i_gl->gl_work); 1583 1583 gfs2_glock_add_to_lru(ip->i_gl); 1584 1584 gfs2_glock_put(ip->i_gl); 1585 1585 ip->i_gl = NULL;
+1 -1
fs/hfs/inode.c
··· 644 644 645 645 /* sync the superblock to buffers */ 646 646 sb = inode->i_sb; 647 - flush_delayed_work_sync(&HFS_SB(sb)->mdb_work); 647 + flush_delayed_work(&HFS_SB(sb)->mdb_work); 648 648 /* .. finally sync the buffers to disk */ 649 649 err = sync_blockdev(sb->s_bdev); 650 650 if (!ret)
+3 -3
fs/ncpfs/inode.c
··· 314 314 release_sock(sk); 315 315 del_timer_sync(&server->timeout_tm); 316 316 317 - flush_work_sync(&server->rcv.tq); 317 + flush_work(&server->rcv.tq); 318 318 if (sk->sk_socket->type == SOCK_STREAM) 319 - flush_work_sync(&server->tx.tq); 319 + flush_work(&server->tx.tq); 320 320 else 321 - flush_work_sync(&server->timeout_tq); 321 + flush_work(&server->timeout_tq); 322 322 } 323 323 324 324 static int ncp_show_options(struct seq_file *seq, struct dentry *root)
+1 -2
fs/nfs/nfs4renewd.c
··· 117 117 timeout = 5 * HZ; 118 118 dprintk("%s: requeueing work. Lease period = %ld\n", 119 119 __func__, (timeout + HZ - 1) / HZ); 120 - cancel_delayed_work(&clp->cl_renewd); 121 - schedule_delayed_work(&clp->cl_renewd, timeout); 120 + mod_delayed_work(system_wq, &clp->cl_renewd, timeout); 122 121 set_bit(NFS_CS_RENEWD, &clp->cl_res_state); 123 122 spin_unlock(&clp->cl_lock); 124 123 }
+1 -1
fs/ocfs2/cluster/quorum.c
··· 327 327 { 328 328 struct o2quo_state *qs = &o2quo_state; 329 329 330 - flush_work_sync(&qs->qs_work); 330 + flush_work(&qs->qs_work); 331 331 }
+1 -1
fs/xfs/xfs_super.c
··· 954 954 * We schedule xfssyncd now (now that the disk is 955 955 * active) instead of later (when it might not be). 956 956 */ 957 - flush_delayed_work_sync(&mp->m_sync_work); 957 + flush_delayed_work(&mp->m_sync_work); 958 958 } 959 959 960 960 return 0;
+1 -1
fs/xfs/xfs_sync.c
··· 475 475 struct xfs_mount *mp = ip->i_mount; 476 476 477 477 queue_work(xfs_syncd_wq, &mp->m_flush_work); 478 - flush_work_sync(&mp->m_flush_work); 478 + flush_work(&mp->m_flush_work); 479 479 } 480 480 481 481 STATIC void
+122 -98
include/linux/workqueue.h
··· 16 16 17 17 struct work_struct; 18 18 typedef void (*work_func_t)(struct work_struct *work); 19 + void delayed_work_timer_fn(unsigned long __data); 19 20 20 21 /* 21 22 * The first word is the work queue pointer and the flags rolled into ··· 68 67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + 69 68 WORK_STRUCT_COLOR_BITS, 70 69 70 + /* data contains off-queue information when !WORK_STRUCT_CWQ */ 71 + WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS, 72 + 73 + WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), 74 + 75 + WORK_OFFQ_FLAG_BITS = 1, 76 + WORK_OFFQ_CPU_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, 77 + 78 + /* convenience constants */ 71 79 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, 72 80 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, 73 - WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS, 81 + WORK_STRUCT_NO_CPU = (unsigned long)WORK_CPU_NONE << WORK_OFFQ_CPU_SHIFT, 74 82 75 83 /* bit mask for work_busy() return values */ 76 84 WORK_BUSY_PENDING = 1 << 0, ··· 102 92 struct delayed_work { 103 93 struct work_struct work; 104 94 struct timer_list timer; 95 + int cpu; 105 96 }; 106 97 107 98 static inline struct delayed_work *to_delayed_work(struct work_struct *work) ··· 126 115 #define __WORK_INIT_LOCKDEP_MAP(n, k) 127 116 #endif 128 117 129 - #define __WORK_INITIALIZER(n, f) { \ 130 - .data = WORK_DATA_STATIC_INIT(), \ 131 - .entry = { &(n).entry, &(n).entry }, \ 132 - .func = (f), \ 133 - __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ 118 + #define __WORK_INITIALIZER(n, f) { \ 119 + .data = WORK_DATA_STATIC_INIT(), \ 120 + .entry = { &(n).entry, &(n).entry }, \ 121 + .func = (f), \ 122 + __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ 134 123 } 135 124 136 - #define __DELAYED_WORK_INITIALIZER(n, f) { \ 137 - .work = __WORK_INITIALIZER((n).work, (f)), \ 138 - .timer = TIMER_INITIALIZER(NULL, 0, 0), \ 125 + #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ 126 + .work = __WORK_INITIALIZER((n).work, (f)), \ 127 + .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \ 128 + 0, (unsigned long)&(n), \ 129 + (tflags) | TIMER_IRQSAFE), \ 139 130 } 140 131 141 - #define __DEFERRED_WORK_INITIALIZER(n, f) { \ 142 - .work = __WORK_INITIALIZER((n).work, (f)), \ 143 - .timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0), \ 144 - } 145 - 146 - #define DECLARE_WORK(n, f) \ 132 + #define DECLARE_WORK(n, f) \ 147 133 struct work_struct n = __WORK_INITIALIZER(n, f) 148 134 149 - #define DECLARE_DELAYED_WORK(n, f) \ 150 - struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) 135 + #define DECLARE_DELAYED_WORK(n, f) \ 136 + struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) 151 137 152 - #define DECLARE_DEFERRED_WORK(n, f) \ 153 - struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f) 138 + #define DECLARE_DEFERRABLE_WORK(n, f) \ 139 + struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) 154 140 155 141 /* 156 142 * initialize a work item's function pointer 157 143 */ 158 - #define PREPARE_WORK(_work, _func) \ 159 - do { \ 160 - (_work)->func = (_func); \ 144 + #define PREPARE_WORK(_work, _func) \ 145 + do { \ 146 + (_work)->func = (_func); \ 161 147 } while (0) 162 148 163 - #define PREPARE_DELAYED_WORK(_work, _func) \ 149 + #define PREPARE_DELAYED_WORK(_work, _func) \ 164 150 PREPARE_WORK(&(_work)->work, (_func)) 165 151 166 152 #ifdef CONFIG_DEBUG_OBJECTS_WORK ··· 187 179 \ 188 180 __init_work((_work), _onstack); \ 189 181 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 190 - lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\ 182 + lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \ 191 183 INIT_LIST_HEAD(&(_work)->entry); \ 192 184 PREPARE_WORK((_work), (_func)); \ 193 185 } while (0) ··· 201 193 } while (0) 202 194 #endif 203 195 204 - #define INIT_WORK(_work, _func) \ 205 - do { \ 206 - __INIT_WORK((_work), (_func), 0); \ 196 + #define INIT_WORK(_work, _func) \ 197 + do { \ 198 + __INIT_WORK((_work), (_func), 0); \ 207 199 } while (0) 208 200 209 - #define INIT_WORK_ONSTACK(_work, _func) \ 210 - do { \ 211 - __INIT_WORK((_work), (_func), 1); \ 201 + #define INIT_WORK_ONSTACK(_work, _func) \ 202 + do { \ 203 + __INIT_WORK((_work), (_func), 1); \ 212 204 } while (0) 213 205 214 - #define INIT_DELAYED_WORK(_work, _func) \ 215 - do { \ 216 - INIT_WORK(&(_work)->work, (_func)); \ 217 - init_timer(&(_work)->timer); \ 206 + #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ 207 + do { \ 208 + INIT_WORK(&(_work)->work, (_func)); \ 209 + __setup_timer(&(_work)->timer, delayed_work_timer_fn, \ 210 + (unsigned long)(_work), \ 211 + (_tflags) | TIMER_IRQSAFE); \ 218 212 } while (0) 219 213 220 - #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ 221 - do { \ 222 - INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ 223 - init_timer_on_stack(&(_work)->timer); \ 214 + #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ 215 + do { \ 216 + INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ 217 + __setup_timer_on_stack(&(_work)->timer, \ 218 + delayed_work_timer_fn, \ 219 + (unsigned long)(_work), \ 220 + (_tflags) | TIMER_IRQSAFE); \ 224 221 } while (0) 225 222 226 - #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ 227 - do { \ 228 - INIT_WORK(&(_work)->work, (_func)); \ 229 - init_timer_deferrable(&(_work)->timer); \ 230 - } while (0) 223 + #define INIT_DELAYED_WORK(_work, _func) \ 224 + __INIT_DELAYED_WORK(_work, _func, 0) 225 + 226 + #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ 227 + __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) 228 + 229 + #define INIT_DEFERRABLE_WORK(_work, _func) \ 230 + __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) 231 + 232 + #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ 233 + __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) 231 234 232 235 /** 233 236 * work_pending - Find out whether a work item is currently pending ··· 297 278 * system_long_wq is similar to system_wq but may host long running 298 279 * works. Queue flushing might take relatively long. 299 280 * 300 - * system_nrt_wq is non-reentrant and guarantees that any given work 301 - * item is never executed in parallel by multiple CPUs. Queue 302 - * flushing might take relatively long. 303 - * 304 281 * system_unbound_wq is unbound workqueue. Workers are not bound to 305 282 * any specific CPU, not concurrency managed, and all queued works are 306 283 * executed immediately as long as max_active limit is not reached and ··· 304 289 * 305 290 * system_freezable_wq is equivalent to system_wq except that it's 306 291 * freezable. 307 - * 308 - * system_nrt_freezable_wq is equivalent to system_nrt_wq except that 309 - * it's freezable. 310 292 */ 311 293 extern struct workqueue_struct *system_wq; 312 294 extern struct workqueue_struct *system_long_wq; 313 - extern struct workqueue_struct *system_nrt_wq; 314 295 extern struct workqueue_struct *system_unbound_wq; 315 296 extern struct workqueue_struct *system_freezable_wq; 316 - extern struct workqueue_struct *system_nrt_freezable_wq; 297 + 298 + static inline struct workqueue_struct * __deprecated __system_nrt_wq(void) 299 + { 300 + return system_wq; 301 + } 302 + 303 + static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void) 304 + { 305 + return system_freezable_wq; 306 + } 307 + 308 + /* equivlalent to system_wq and system_freezable_wq, deprecated */ 309 + #define system_nrt_wq __system_nrt_wq() 310 + #define system_nrt_freezable_wq __system_nrt_freezable_wq() 317 311 318 312 extern struct workqueue_struct * 319 313 __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, ··· 345 321 * Pointer to the allocated workqueue on success, %NULL on failure. 346 322 */ 347 323 #ifdef CONFIG_LOCKDEP 348 - #define alloc_workqueue(fmt, flags, max_active, args...) \ 349 - ({ \ 350 - static struct lock_class_key __key; \ 351 - const char *__lock_name; \ 352 - \ 353 - if (__builtin_constant_p(fmt)) \ 354 - __lock_name = (fmt); \ 355 - else \ 356 - __lock_name = #fmt; \ 357 - \ 358 - __alloc_workqueue_key((fmt), (flags), (max_active), \ 359 - &__key, __lock_name, ##args); \ 324 + #define alloc_workqueue(fmt, flags, max_active, args...) \ 325 + ({ \ 326 + static struct lock_class_key __key; \ 327 + const char *__lock_name; \ 328 + \ 329 + if (__builtin_constant_p(fmt)) \ 330 + __lock_name = (fmt); \ 331 + else \ 332 + __lock_name = #fmt; \ 333 + \ 334 + __alloc_workqueue_key((fmt), (flags), (max_active), \ 335 + &__key, __lock_name, ##args); \ 360 336 }) 361 337 #else 362 - #define alloc_workqueue(fmt, flags, max_active, args...) \ 363 - __alloc_workqueue_key((fmt), (flags), (max_active), \ 338 + #define alloc_workqueue(fmt, flags, max_active, args...) \ 339 + __alloc_workqueue_key((fmt), (flags), (max_active), \ 364 340 NULL, NULL, ##args) 365 341 #endif 366 342 ··· 377 353 * RETURNS: 378 354 * Pointer to the allocated workqueue on success, %NULL on failure. 379 355 */ 380 - #define alloc_ordered_workqueue(fmt, flags, args...) \ 356 + #define alloc_ordered_workqueue(fmt, flags, args...) \ 381 357 alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args) 382 358 383 - #define create_workqueue(name) \ 359 + #define create_workqueue(name) \ 384 360 alloc_workqueue((name), WQ_MEM_RECLAIM, 1) 385 - #define create_freezable_workqueue(name) \ 361 + #define create_freezable_workqueue(name) \ 386 362 alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 387 - #define create_singlethread_workqueue(name) \ 363 + #define create_singlethread_workqueue(name) \ 388 364 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 389 365 390 366 extern void destroy_workqueue(struct workqueue_struct *wq); 391 367 392 - extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); 393 - extern int queue_work_on(int cpu, struct workqueue_struct *wq, 368 + extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 394 369 struct work_struct *work); 395 - extern int queue_delayed_work(struct workqueue_struct *wq, 370 + extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work); 371 + extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 396 372 struct delayed_work *work, unsigned long delay); 397 - extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 373 + extern bool queue_delayed_work(struct workqueue_struct *wq, 398 374 struct delayed_work *work, unsigned long delay); 375 + extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 376 + struct delayed_work *dwork, unsigned long delay); 377 + extern bool mod_delayed_work(struct workqueue_struct *wq, 378 + struct delayed_work *dwork, unsigned long delay); 399 379 400 380 extern void flush_workqueue(struct workqueue_struct *wq); 401 381 extern void drain_workqueue(struct workqueue_struct *wq); 402 382 extern void flush_scheduled_work(void); 403 383 404 - extern int schedule_work(struct work_struct *work); 405 - extern int schedule_work_on(int cpu, struct work_struct *work); 406 - extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); 407 - extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, 408 - unsigned long delay); 384 + extern bool schedule_work_on(int cpu, struct work_struct *work); 385 + extern bool schedule_work(struct work_struct *work); 386 + extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work, 387 + unsigned long delay); 388 + extern bool schedule_delayed_work(struct delayed_work *work, 389 + unsigned long delay); 409 390 extern int schedule_on_each_cpu(work_func_t func); 410 391 extern int keventd_up(void); 411 392 412 393 int execute_in_process_context(work_func_t fn, struct execute_work *); 413 394 414 395 extern bool flush_work(struct work_struct *work); 415 - extern bool flush_work_sync(struct work_struct *work); 416 396 extern bool cancel_work_sync(struct work_struct *work); 417 397 418 398 extern bool flush_delayed_work(struct delayed_work *dwork); 419 - extern bool flush_delayed_work_sync(struct delayed_work *work); 399 + extern bool cancel_delayed_work(struct delayed_work *dwork); 420 400 extern bool cancel_delayed_work_sync(struct delayed_work *dwork); 421 401 422 402 extern void workqueue_set_max_active(struct workqueue_struct *wq, ··· 430 402 extern unsigned int work_busy(struct work_struct *work); 431 403 432 404 /* 433 - * Kill off a pending schedule_delayed_work(). Note that the work callback 434 - * function may still be running on return from cancel_delayed_work(), unless 435 - * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or 436 - * cancel_work_sync() to wait on it. 437 - */ 438 - static inline bool cancel_delayed_work(struct delayed_work *work) 439 - { 440 - bool ret; 441 - 442 - ret = del_timer_sync(&work->timer); 443 - if (ret) 444 - work_clear_pending(&work->work); 445 - return ret; 446 - } 447 - 448 - /* 449 405 * Like above, but uses del_timer() instead of del_timer_sync(). This means, 450 406 * if it returns 0 the timer function may be running and the queueing is in 451 407 * progress. 452 408 */ 453 - static inline bool __cancel_delayed_work(struct delayed_work *work) 409 + static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work) 454 410 { 455 411 bool ret; 456 412 ··· 442 430 if (ret) 443 431 work_clear_pending(&work->work); 444 432 return ret; 433 + } 434 + 435 + /* used to be different but now identical to flush_work(), deprecated */ 436 + static inline bool __deprecated flush_work_sync(struct work_struct *work) 437 + { 438 + return flush_work(work); 439 + } 440 + 441 + /* used to be different but now identical to flush_delayed_work(), deprecated */ 442 + static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork) 443 + { 444 + return flush_delayed_work(dwork); 445 445 } 446 446 447 447 #ifndef CONFIG_SMP
+2 -2
kernel/srcu.c
··· 379 379 rcu_batch_queue(&sp->batch_queue, head); 380 380 if (!sp->running) { 381 381 sp->running = true; 382 - queue_delayed_work(system_nrt_wq, &sp->work, 0); 382 + schedule_delayed_work(&sp->work, 0); 383 383 } 384 384 spin_unlock_irqrestore(&sp->queue_lock, flags); 385 385 } ··· 631 631 } 632 632 633 633 if (pending) 634 - queue_delayed_work(system_nrt_wq, &sp->work, SRCU_INTERVAL); 634 + schedule_delayed_work(&sp->work, SRCU_INTERVAL); 635 635 } 636 636 637 637 /*
+646 -585
kernel/workqueue.c
··· 58 58 * be executing on any CPU. The gcwq behaves as an unbound one. 59 59 * 60 60 * Note that DISASSOCIATED can be flipped only while holding 61 - * managership of all pools on the gcwq to avoid changing binding 61 + * assoc_mutex of all pools on the gcwq to avoid changing binding 62 62 * state while create_worker() is in progress. 63 63 */ 64 64 GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */ ··· 73 73 WORKER_DIE = 1 << 1, /* die die die */ 74 74 WORKER_IDLE = 1 << 2, /* is idle */ 75 75 WORKER_PREP = 1 << 3, /* preparing to run works */ 76 - WORKER_REBIND = 1 << 5, /* mom is home, come back */ 77 76 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 78 77 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 79 78 80 - WORKER_NOT_RUNNING = WORKER_PREP | WORKER_REBIND | WORKER_UNBOUND | 79 + WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND | 81 80 WORKER_CPU_INTENSIVE, 82 81 83 82 NR_WORKER_POOLS = 2, /* # worker pools per gcwq */ ··· 125 126 126 127 struct global_cwq; 127 128 struct worker_pool; 128 - struct idle_rebind; 129 129 130 130 /* 131 131 * The poor guys doing the actual heavy lifting. All on-duty workers ··· 148 150 int id; /* I: worker id */ 149 151 150 152 /* for rebinding worker to CPU */ 151 - struct idle_rebind *idle_rebind; /* L: for idle worker */ 152 153 struct work_struct rebind_work; /* L: for busy worker */ 153 154 }; 154 155 ··· 157 160 158 161 struct list_head worklist; /* L: list of pending works */ 159 162 int nr_workers; /* L: total number of workers */ 163 + 164 + /* nr_idle includes the ones off idle_list for rebinding */ 160 165 int nr_idle; /* L: currently idle ones */ 161 166 162 167 struct list_head idle_list; /* X: list of idle workers */ 163 168 struct timer_list idle_timer; /* L: worker idle timeout */ 164 169 struct timer_list mayday_timer; /* L: SOS timer for workers */ 165 170 166 - struct mutex manager_mutex; /* mutex manager should hold */ 171 + struct mutex assoc_mutex; /* protect GCWQ_DISASSOCIATED */ 167 172 struct ida worker_ida; /* L: for worker IDs */ 168 173 }; 169 174 ··· 183 184 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; 184 185 /* L: hash of busy workers */ 185 186 186 - struct worker_pool pools[2]; /* normal and highpri pools */ 187 - 188 - wait_queue_head_t rebind_hold; /* rebind hold wait */ 187 + struct worker_pool pools[NR_WORKER_POOLS]; 188 + /* normal and highpri pools */ 189 189 } ____cacheline_aligned_in_smp; 190 190 191 191 /* ··· 267 269 }; 268 270 269 271 struct workqueue_struct *system_wq __read_mostly; 270 - struct workqueue_struct *system_long_wq __read_mostly; 271 - struct workqueue_struct *system_nrt_wq __read_mostly; 272 - struct workqueue_struct *system_unbound_wq __read_mostly; 273 - struct workqueue_struct *system_freezable_wq __read_mostly; 274 - struct workqueue_struct *system_nrt_freezable_wq __read_mostly; 275 272 EXPORT_SYMBOL_GPL(system_wq); 273 + struct workqueue_struct *system_highpri_wq __read_mostly; 274 + EXPORT_SYMBOL_GPL(system_highpri_wq); 275 + struct workqueue_struct *system_long_wq __read_mostly; 276 276 EXPORT_SYMBOL_GPL(system_long_wq); 277 - EXPORT_SYMBOL_GPL(system_nrt_wq); 277 + struct workqueue_struct *system_unbound_wq __read_mostly; 278 278 EXPORT_SYMBOL_GPL(system_unbound_wq); 279 + struct workqueue_struct *system_freezable_wq __read_mostly; 279 280 EXPORT_SYMBOL_GPL(system_freezable_wq); 280 - EXPORT_SYMBOL_GPL(system_nrt_freezable_wq); 281 281 282 282 #define CREATE_TRACE_POINTS 283 283 #include <trace/events/workqueue.h> ··· 530 534 } 531 535 532 536 /* 533 - * A work's data points to the cwq with WORK_STRUCT_CWQ set while the 534 - * work is on queue. Once execution starts, WORK_STRUCT_CWQ is 535 - * cleared and the work data contains the cpu number it was last on. 537 + * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data 538 + * contain the pointer to the queued cwq. Once execution starts, the flag 539 + * is cleared and the high bits contain OFFQ flags and CPU number. 536 540 * 537 - * set_work_{cwq|cpu}() and clear_work_data() can be used to set the 538 - * cwq, cpu or clear work->data. These functions should only be 539 - * called while the work is owned - ie. while the PENDING bit is set. 541 + * set_work_cwq(), set_work_cpu_and_clear_pending(), mark_work_canceling() 542 + * and clear_work_data() can be used to set the cwq, cpu or clear 543 + * work->data. These functions should only be called while the work is 544 + * owned - ie. while the PENDING bit is set. 540 545 * 541 - * get_work_[g]cwq() can be used to obtain the gcwq or cwq 542 - * corresponding to a work. gcwq is available once the work has been 543 - * queued anywhere after initialization. cwq is available only from 544 - * queueing until execution starts. 546 + * get_work_[g]cwq() can be used to obtain the gcwq or cwq corresponding to 547 + * a work. gcwq is available once the work has been queued anywhere after 548 + * initialization until it is sync canceled. cwq is available only while 549 + * the work item is queued. 550 + * 551 + * %WORK_OFFQ_CANCELING is used to mark a work item which is being 552 + * canceled. While being canceled, a work item may have its PENDING set 553 + * but stay off timer and worklist for arbitrarily long and nobody should 554 + * try to steal the PENDING bit. 545 555 */ 546 556 static inline void set_work_data(struct work_struct *work, unsigned long data, 547 557 unsigned long flags) ··· 564 562 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); 565 563 } 566 564 567 - static void set_work_cpu(struct work_struct *work, unsigned int cpu) 565 + static void set_work_cpu_and_clear_pending(struct work_struct *work, 566 + unsigned int cpu) 568 567 { 569 - set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING); 568 + /* 569 + * The following wmb is paired with the implied mb in 570 + * test_and_set_bit(PENDING) and ensures all updates to @work made 571 + * here are visible to and precede any updates by the next PENDING 572 + * owner. 573 + */ 574 + smp_wmb(); 575 + set_work_data(work, (unsigned long)cpu << WORK_OFFQ_CPU_SHIFT, 0); 570 576 } 571 577 572 578 static void clear_work_data(struct work_struct *work) 573 579 { 580 + smp_wmb(); /* see set_work_cpu_and_clear_pending() */ 574 581 set_work_data(work, WORK_STRUCT_NO_CPU, 0); 575 582 } 576 583 ··· 602 591 return ((struct cpu_workqueue_struct *) 603 592 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq; 604 593 605 - cpu = data >> WORK_STRUCT_FLAG_BITS; 594 + cpu = data >> WORK_OFFQ_CPU_SHIFT; 606 595 if (cpu == WORK_CPU_NONE) 607 596 return NULL; 608 597 609 598 BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND); 610 599 return get_gcwq(cpu); 600 + } 601 + 602 + static void mark_work_canceling(struct work_struct *work) 603 + { 604 + struct global_cwq *gcwq = get_work_gcwq(work); 605 + unsigned long cpu = gcwq ? gcwq->cpu : WORK_CPU_NONE; 606 + 607 + set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING, 608 + WORK_STRUCT_PENDING); 609 + } 610 + 611 + static bool work_is_canceling(struct work_struct *work) 612 + { 613 + unsigned long data = atomic_long_read(&work->data); 614 + 615 + return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING); 611 616 } 612 617 613 618 /* ··· 683 656 bool managing = pool->flags & POOL_MANAGING_WORKERS; 684 657 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 685 658 int nr_busy = pool->nr_workers - nr_idle; 659 + 660 + /* 661 + * nr_idle and idle_list may disagree if idle rebinding is in 662 + * progress. Never return %true if idle_list is empty. 663 + */ 664 + if (list_empty(&pool->idle_list)) 665 + return false; 686 666 687 667 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 688 668 } ··· 937 903 } 938 904 939 905 /** 906 + * move_linked_works - move linked works to a list 907 + * @work: start of series of works to be scheduled 908 + * @head: target list to append @work to 909 + * @nextp: out paramter for nested worklist walking 910 + * 911 + * Schedule linked works starting from @work to @head. Work series to 912 + * be scheduled starts at @work and includes any consecutive work with 913 + * WORK_STRUCT_LINKED set in its predecessor. 914 + * 915 + * If @nextp is not NULL, it's updated to point to the next work of 916 + * the last scheduled work. This allows move_linked_works() to be 917 + * nested inside outer list_for_each_entry_safe(). 918 + * 919 + * CONTEXT: 920 + * spin_lock_irq(gcwq->lock). 921 + */ 922 + static void move_linked_works(struct work_struct *work, struct list_head *head, 923 + struct work_struct **nextp) 924 + { 925 + struct work_struct *n; 926 + 927 + /* 928 + * Linked worklist will always end before the end of the list, 929 + * use NULL for list head. 930 + */ 931 + list_for_each_entry_safe_from(work, n, NULL, entry) { 932 + list_move_tail(&work->entry, head); 933 + if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 934 + break; 935 + } 936 + 937 + /* 938 + * If we're already inside safe list traversal and have moved 939 + * multiple works to the scheduled queue, the next position 940 + * needs to be updated. 941 + */ 942 + if (nextp) 943 + *nextp = n; 944 + } 945 + 946 + static void cwq_activate_delayed_work(struct work_struct *work) 947 + { 948 + struct cpu_workqueue_struct *cwq = get_work_cwq(work); 949 + 950 + trace_workqueue_activate_work(work); 951 + move_linked_works(work, &cwq->pool->worklist, NULL); 952 + __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); 953 + cwq->nr_active++; 954 + } 955 + 956 + static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) 957 + { 958 + struct work_struct *work = list_first_entry(&cwq->delayed_works, 959 + struct work_struct, entry); 960 + 961 + cwq_activate_delayed_work(work); 962 + } 963 + 964 + /** 965 + * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 966 + * @cwq: cwq of interest 967 + * @color: color of work which left the queue 968 + * 969 + * A work either has completed or is removed from pending queue, 970 + * decrement nr_in_flight of its cwq and handle workqueue flushing. 971 + * 972 + * CONTEXT: 973 + * spin_lock_irq(gcwq->lock). 974 + */ 975 + static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 976 + { 977 + /* ignore uncolored works */ 978 + if (color == WORK_NO_COLOR) 979 + return; 980 + 981 + cwq->nr_in_flight[color]--; 982 + 983 + cwq->nr_active--; 984 + if (!list_empty(&cwq->delayed_works)) { 985 + /* one down, submit a delayed one */ 986 + if (cwq->nr_active < cwq->max_active) 987 + cwq_activate_first_delayed(cwq); 988 + } 989 + 990 + /* is flush in progress and are we at the flushing tip? */ 991 + if (likely(cwq->flush_color != color)) 992 + return; 993 + 994 + /* are there still in-flight works? */ 995 + if (cwq->nr_in_flight[color]) 996 + return; 997 + 998 + /* this cwq is done, clear flush_color */ 999 + cwq->flush_color = -1; 1000 + 1001 + /* 1002 + * If this was the last cwq, wake up the first flusher. It 1003 + * will handle the rest. 1004 + */ 1005 + if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) 1006 + complete(&cwq->wq->first_flusher->done); 1007 + } 1008 + 1009 + /** 1010 + * try_to_grab_pending - steal work item from worklist and disable irq 1011 + * @work: work item to steal 1012 + * @is_dwork: @work is a delayed_work 1013 + * @flags: place to store irq state 1014 + * 1015 + * Try to grab PENDING bit of @work. This function can handle @work in any 1016 + * stable state - idle, on timer or on worklist. Return values are 1017 + * 1018 + * 1 if @work was pending and we successfully stole PENDING 1019 + * 0 if @work was idle and we claimed PENDING 1020 + * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1021 + * -ENOENT if someone else is canceling @work, this state may persist 1022 + * for arbitrarily long 1023 + * 1024 + * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1025 + * interrupted while holding PENDING and @work off queue, irq must be 1026 + * disabled on entry. This, combined with delayed_work->timer being 1027 + * irqsafe, ensures that we return -EAGAIN for finite short period of time. 1028 + * 1029 + * On successful return, >= 0, irq is disabled and the caller is 1030 + * responsible for releasing it using local_irq_restore(*@flags). 1031 + * 1032 + * This function is safe to call from any context including IRQ handler. 1033 + */ 1034 + static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1035 + unsigned long *flags) 1036 + { 1037 + struct global_cwq *gcwq; 1038 + 1039 + local_irq_save(*flags); 1040 + 1041 + /* try to steal the timer if it exists */ 1042 + if (is_dwork) { 1043 + struct delayed_work *dwork = to_delayed_work(work); 1044 + 1045 + /* 1046 + * dwork->timer is irqsafe. If del_timer() fails, it's 1047 + * guaranteed that the timer is not queued anywhere and not 1048 + * running on the local CPU. 1049 + */ 1050 + if (likely(del_timer(&dwork->timer))) 1051 + return 1; 1052 + } 1053 + 1054 + /* try to claim PENDING the normal way */ 1055 + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 1056 + return 0; 1057 + 1058 + /* 1059 + * The queueing is in progress, or it is already queued. Try to 1060 + * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 1061 + */ 1062 + gcwq = get_work_gcwq(work); 1063 + if (!gcwq) 1064 + goto fail; 1065 + 1066 + spin_lock(&gcwq->lock); 1067 + if (!list_empty(&work->entry)) { 1068 + /* 1069 + * This work is queued, but perhaps we locked the wrong gcwq. 1070 + * In that case we must see the new value after rmb(), see 1071 + * insert_work()->wmb(). 1072 + */ 1073 + smp_rmb(); 1074 + if (gcwq == get_work_gcwq(work)) { 1075 + debug_work_deactivate(work); 1076 + 1077 + /* 1078 + * A delayed work item cannot be grabbed directly 1079 + * because it might have linked NO_COLOR work items 1080 + * which, if left on the delayed_list, will confuse 1081 + * cwq->nr_active management later on and cause 1082 + * stall. Make sure the work item is activated 1083 + * before grabbing. 1084 + */ 1085 + if (*work_data_bits(work) & WORK_STRUCT_DELAYED) 1086 + cwq_activate_delayed_work(work); 1087 + 1088 + list_del_init(&work->entry); 1089 + cwq_dec_nr_in_flight(get_work_cwq(work), 1090 + get_work_color(work)); 1091 + 1092 + spin_unlock(&gcwq->lock); 1093 + return 1; 1094 + } 1095 + } 1096 + spin_unlock(&gcwq->lock); 1097 + fail: 1098 + local_irq_restore(*flags); 1099 + if (work_is_canceling(work)) 1100 + return -ENOENT; 1101 + cpu_relax(); 1102 + return -EAGAIN; 1103 + } 1104 + 1105 + /** 940 1106 * insert_work - insert a work into gcwq 941 1107 * @cwq: cwq @work belongs to 942 1108 * @work: work to insert ··· 1216 982 struct cpu_workqueue_struct *cwq; 1217 983 struct list_head *worklist; 1218 984 unsigned int work_flags; 1219 - unsigned long flags; 985 + unsigned int req_cpu = cpu; 986 + 987 + /* 988 + * While a work item is PENDING && off queue, a task trying to 989 + * steal the PENDING will busy-loop waiting for it to either get 990 + * queued or lose PENDING. Grabbing PENDING and queueing should 991 + * happen with IRQ disabled. 992 + */ 993 + WARN_ON_ONCE(!irqs_disabled()); 1220 994 1221 995 debug_work_activate(work); 1222 996 ··· 1237 995 if (!(wq->flags & WQ_UNBOUND)) { 1238 996 struct global_cwq *last_gcwq; 1239 997 1240 - if (unlikely(cpu == WORK_CPU_UNBOUND)) 998 + if (cpu == WORK_CPU_UNBOUND) 1241 999 cpu = raw_smp_processor_id(); 1242 1000 1243 1001 /* 1244 - * It's multi cpu. If @wq is non-reentrant and @work 1245 - * was previously on a different cpu, it might still 1246 - * be running there, in which case the work needs to 1247 - * be queued on that cpu to guarantee non-reentrance. 1002 + * It's multi cpu. If @work was previously on a different 1003 + * cpu, it might still be running there, in which case the 1004 + * work needs to be queued on that cpu to guarantee 1005 + * non-reentrancy. 1248 1006 */ 1249 1007 gcwq = get_gcwq(cpu); 1250 - if (wq->flags & WQ_NON_REENTRANT && 1251 - (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) { 1008 + last_gcwq = get_work_gcwq(work); 1009 + 1010 + if (last_gcwq && last_gcwq != gcwq) { 1252 1011 struct worker *worker; 1253 1012 1254 - spin_lock_irqsave(&last_gcwq->lock, flags); 1013 + spin_lock(&last_gcwq->lock); 1255 1014 1256 1015 worker = find_worker_executing_work(last_gcwq, work); 1257 1016 ··· 1260 1017 gcwq = last_gcwq; 1261 1018 else { 1262 1019 /* meh... not running there, queue here */ 1263 - spin_unlock_irqrestore(&last_gcwq->lock, flags); 1264 - spin_lock_irqsave(&gcwq->lock, flags); 1020 + spin_unlock(&last_gcwq->lock); 1021 + spin_lock(&gcwq->lock); 1265 1022 } 1266 - } else 1267 - spin_lock_irqsave(&gcwq->lock, flags); 1023 + } else { 1024 + spin_lock(&gcwq->lock); 1025 + } 1268 1026 } else { 1269 1027 gcwq = get_gcwq(WORK_CPU_UNBOUND); 1270 - spin_lock_irqsave(&gcwq->lock, flags); 1028 + spin_lock(&gcwq->lock); 1271 1029 } 1272 1030 1273 1031 /* gcwq determined, get cwq and queue */ 1274 1032 cwq = get_cwq(gcwq->cpu, wq); 1275 - trace_workqueue_queue_work(cpu, cwq, work); 1033 + trace_workqueue_queue_work(req_cpu, cwq, work); 1276 1034 1277 1035 if (WARN_ON(!list_empty(&work->entry))) { 1278 - spin_unlock_irqrestore(&gcwq->lock, flags); 1036 + spin_unlock(&gcwq->lock); 1279 1037 return; 1280 1038 } 1281 1039 ··· 1294 1050 1295 1051 insert_work(cwq, work, worklist, work_flags); 1296 1052 1297 - spin_unlock_irqrestore(&gcwq->lock, flags); 1053 + spin_unlock(&gcwq->lock); 1298 1054 } 1299 - 1300 - /** 1301 - * queue_work - queue work on a workqueue 1302 - * @wq: workqueue to use 1303 - * @work: work to queue 1304 - * 1305 - * Returns 0 if @work was already on a queue, non-zero otherwise. 1306 - * 1307 - * We queue the work to the CPU on which it was submitted, but if the CPU dies 1308 - * it can be processed by another CPU. 1309 - */ 1310 - int queue_work(struct workqueue_struct *wq, struct work_struct *work) 1311 - { 1312 - int ret; 1313 - 1314 - ret = queue_work_on(get_cpu(), wq, work); 1315 - put_cpu(); 1316 - 1317 - return ret; 1318 - } 1319 - EXPORT_SYMBOL_GPL(queue_work); 1320 1055 1321 1056 /** 1322 1057 * queue_work_on - queue work on specific cpu ··· 1303 1080 * @wq: workqueue to use 1304 1081 * @work: work to queue 1305 1082 * 1306 - * Returns 0 if @work was already on a queue, non-zero otherwise. 1083 + * Returns %false if @work was already on a queue, %true otherwise. 1307 1084 * 1308 1085 * We queue the work to a specific CPU, the caller must ensure it 1309 1086 * can't go away. 1310 1087 */ 1311 - int 1312 - queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) 1088 + bool queue_work_on(int cpu, struct workqueue_struct *wq, 1089 + struct work_struct *work) 1313 1090 { 1314 - int ret = 0; 1091 + bool ret = false; 1092 + unsigned long flags; 1093 + 1094 + local_irq_save(flags); 1315 1095 1316 1096 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1317 1097 __queue_work(cpu, wq, work); 1318 - ret = 1; 1098 + ret = true; 1319 1099 } 1100 + 1101 + local_irq_restore(flags); 1320 1102 return ret; 1321 1103 } 1322 1104 EXPORT_SYMBOL_GPL(queue_work_on); 1323 1105 1324 - static void delayed_work_timer_fn(unsigned long __data) 1106 + /** 1107 + * queue_work - queue work on a workqueue 1108 + * @wq: workqueue to use 1109 + * @work: work to queue 1110 + * 1111 + * Returns %false if @work was already on a queue, %true otherwise. 1112 + * 1113 + * We queue the work to the CPU on which it was submitted, but if the CPU dies 1114 + * it can be processed by another CPU. 1115 + */ 1116 + bool queue_work(struct workqueue_struct *wq, struct work_struct *work) 1117 + { 1118 + return queue_work_on(WORK_CPU_UNBOUND, wq, work); 1119 + } 1120 + EXPORT_SYMBOL_GPL(queue_work); 1121 + 1122 + void delayed_work_timer_fn(unsigned long __data) 1325 1123 { 1326 1124 struct delayed_work *dwork = (struct delayed_work *)__data; 1327 1125 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work); 1328 1126 1329 - __queue_work(smp_processor_id(), cwq->wq, &dwork->work); 1127 + /* should have been called from irqsafe timer with irq already off */ 1128 + __queue_work(dwork->cpu, cwq->wq, &dwork->work); 1330 1129 } 1130 + EXPORT_SYMBOL_GPL(delayed_work_timer_fn); 1331 1131 1332 - /** 1333 - * queue_delayed_work - queue work on a workqueue after delay 1334 - * @wq: workqueue to use 1335 - * @dwork: delayable work to queue 1336 - * @delay: number of jiffies to wait before queueing 1337 - * 1338 - * Returns 0 if @work was already on a queue, non-zero otherwise. 1339 - */ 1340 - int queue_delayed_work(struct workqueue_struct *wq, 1341 - struct delayed_work *dwork, unsigned long delay) 1132 + static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 1133 + struct delayed_work *dwork, unsigned long delay) 1342 1134 { 1343 - if (delay == 0) 1344 - return queue_work(wq, &dwork->work); 1135 + struct timer_list *timer = &dwork->timer; 1136 + struct work_struct *work = &dwork->work; 1137 + unsigned int lcpu; 1345 1138 1346 - return queue_delayed_work_on(-1, wq, dwork, delay); 1139 + WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 1140 + timer->data != (unsigned long)dwork); 1141 + BUG_ON(timer_pending(timer)); 1142 + BUG_ON(!list_empty(&work->entry)); 1143 + 1144 + timer_stats_timer_set_start_info(&dwork->timer); 1145 + 1146 + /* 1147 + * This stores cwq for the moment, for the timer_fn. Note that the 1148 + * work's gcwq is preserved to allow reentrance detection for 1149 + * delayed works. 1150 + */ 1151 + if (!(wq->flags & WQ_UNBOUND)) { 1152 + struct global_cwq *gcwq = get_work_gcwq(work); 1153 + 1154 + /* 1155 + * If we cannot get the last gcwq from @work directly, 1156 + * select the last CPU such that it avoids unnecessarily 1157 + * triggering non-reentrancy check in __queue_work(). 1158 + */ 1159 + lcpu = cpu; 1160 + if (gcwq) 1161 + lcpu = gcwq->cpu; 1162 + if (lcpu == WORK_CPU_UNBOUND) 1163 + lcpu = raw_smp_processor_id(); 1164 + } else { 1165 + lcpu = WORK_CPU_UNBOUND; 1166 + } 1167 + 1168 + set_work_cwq(work, get_cwq(lcpu, wq), 0); 1169 + 1170 + dwork->cpu = cpu; 1171 + timer->expires = jiffies + delay; 1172 + 1173 + if (unlikely(cpu != WORK_CPU_UNBOUND)) 1174 + add_timer_on(timer, cpu); 1175 + else 1176 + add_timer(timer); 1347 1177 } 1348 - EXPORT_SYMBOL_GPL(queue_delayed_work); 1349 1178 1350 1179 /** 1351 1180 * queue_delayed_work_on - queue work on specific CPU after delay ··· 1406 1131 * @dwork: work to queue 1407 1132 * @delay: number of jiffies to wait before queueing 1408 1133 * 1409 - * Returns 0 if @work was already on a queue, non-zero otherwise. 1134 + * Returns %false if @work was already on a queue, %true otherwise. If 1135 + * @delay is zero and @dwork is idle, it will be scheduled for immediate 1136 + * execution. 1410 1137 */ 1411 - int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 1412 - struct delayed_work *dwork, unsigned long delay) 1138 + bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 1139 + struct delayed_work *dwork, unsigned long delay) 1413 1140 { 1414 - int ret = 0; 1415 - struct timer_list *timer = &dwork->timer; 1416 1141 struct work_struct *work = &dwork->work; 1142 + bool ret = false; 1143 + unsigned long flags; 1144 + 1145 + if (!delay) 1146 + return queue_work_on(cpu, wq, &dwork->work); 1147 + 1148 + /* read the comment in __queue_work() */ 1149 + local_irq_save(flags); 1417 1150 1418 1151 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1419 - unsigned int lcpu; 1420 - 1421 - BUG_ON(timer_pending(timer)); 1422 - BUG_ON(!list_empty(&work->entry)); 1423 - 1424 - timer_stats_timer_set_start_info(&dwork->timer); 1425 - 1426 - /* 1427 - * This stores cwq for the moment, for the timer_fn. 1428 - * Note that the work's gcwq is preserved to allow 1429 - * reentrance detection for delayed works. 1430 - */ 1431 - if (!(wq->flags & WQ_UNBOUND)) { 1432 - struct global_cwq *gcwq = get_work_gcwq(work); 1433 - 1434 - if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND) 1435 - lcpu = gcwq->cpu; 1436 - else 1437 - lcpu = raw_smp_processor_id(); 1438 - } else 1439 - lcpu = WORK_CPU_UNBOUND; 1440 - 1441 - set_work_cwq(work, get_cwq(lcpu, wq), 0); 1442 - 1443 - timer->expires = jiffies + delay; 1444 - timer->data = (unsigned long)dwork; 1445 - timer->function = delayed_work_timer_fn; 1446 - 1447 - if (unlikely(cpu >= 0)) 1448 - add_timer_on(timer, cpu); 1449 - else 1450 - add_timer(timer); 1451 - ret = 1; 1152 + __queue_delayed_work(cpu, wq, dwork, delay); 1153 + ret = true; 1452 1154 } 1155 + 1156 + local_irq_restore(flags); 1453 1157 return ret; 1454 1158 } 1455 1159 EXPORT_SYMBOL_GPL(queue_delayed_work_on); 1160 + 1161 + /** 1162 + * queue_delayed_work - queue work on a workqueue after delay 1163 + * @wq: workqueue to use 1164 + * @dwork: delayable work to queue 1165 + * @delay: number of jiffies to wait before queueing 1166 + * 1167 + * Equivalent to queue_delayed_work_on() but tries to use the local CPU. 1168 + */ 1169 + bool queue_delayed_work(struct workqueue_struct *wq, 1170 + struct delayed_work *dwork, unsigned long delay) 1171 + { 1172 + return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 1173 + } 1174 + EXPORT_SYMBOL_GPL(queue_delayed_work); 1175 + 1176 + /** 1177 + * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 1178 + * @cpu: CPU number to execute work on 1179 + * @wq: workqueue to use 1180 + * @dwork: work to queue 1181 + * @delay: number of jiffies to wait before queueing 1182 + * 1183 + * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 1184 + * modify @dwork's timer so that it expires after @delay. If @delay is 1185 + * zero, @work is guaranteed to be scheduled immediately regardless of its 1186 + * current state. 1187 + * 1188 + * Returns %false if @dwork was idle and queued, %true if @dwork was 1189 + * pending and its timer was modified. 1190 + * 1191 + * This function is safe to call from any context including IRQ handler. 1192 + * See try_to_grab_pending() for details. 1193 + */ 1194 + bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 1195 + struct delayed_work *dwork, unsigned long delay) 1196 + { 1197 + unsigned long flags; 1198 + int ret; 1199 + 1200 + do { 1201 + ret = try_to_grab_pending(&dwork->work, true, &flags); 1202 + } while (unlikely(ret == -EAGAIN)); 1203 + 1204 + if (likely(ret >= 0)) { 1205 + __queue_delayed_work(cpu, wq, dwork, delay); 1206 + local_irq_restore(flags); 1207 + } 1208 + 1209 + /* -ENOENT from try_to_grab_pending() becomes %true */ 1210 + return ret; 1211 + } 1212 + EXPORT_SYMBOL_GPL(mod_delayed_work_on); 1213 + 1214 + /** 1215 + * mod_delayed_work - modify delay of or queue a delayed work 1216 + * @wq: workqueue to use 1217 + * @dwork: work to queue 1218 + * @delay: number of jiffies to wait before queueing 1219 + * 1220 + * mod_delayed_work_on() on local CPU. 1221 + */ 1222 + bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, 1223 + unsigned long delay) 1224 + { 1225 + return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 1226 + } 1227 + EXPORT_SYMBOL_GPL(mod_delayed_work); 1456 1228 1457 1229 /** 1458 1230 * worker_enter_idle - enter idle state ··· 1627 1305 } 1628 1306 } 1629 1307 1630 - struct idle_rebind { 1631 - int cnt; /* # workers to be rebound */ 1632 - struct completion done; /* all workers rebound */ 1633 - }; 1634 - 1635 1308 /* 1636 - * Rebind an idle @worker to its CPU. During CPU onlining, this has to 1637 - * happen synchronously for idle workers. worker_thread() will test 1638 - * %WORKER_REBIND before leaving idle and call this function. 1309 + * Rebind an idle @worker to its CPU. worker_thread() will test 1310 + * list_empty(@worker->entry) before leaving idle and call this function. 1639 1311 */ 1640 1312 static void idle_worker_rebind(struct worker *worker) 1641 1313 { 1642 1314 struct global_cwq *gcwq = worker->pool->gcwq; 1643 1315 1644 - /* CPU must be online at this point */ 1645 - WARN_ON(!worker_maybe_bind_and_lock(worker)); 1646 - if (!--worker->idle_rebind->cnt) 1647 - complete(&worker->idle_rebind->done); 1648 - spin_unlock_irq(&worker->pool->gcwq->lock); 1316 + /* CPU may go down again inbetween, clear UNBOUND only on success */ 1317 + if (worker_maybe_bind_and_lock(worker)) 1318 + worker_clr_flags(worker, WORKER_UNBOUND); 1649 1319 1650 - /* we did our part, wait for rebind_workers() to finish up */ 1651 - wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND)); 1652 - 1653 - /* 1654 - * rebind_workers() shouldn't finish until all workers passed the 1655 - * above WORKER_REBIND wait. Tell it when done. 1656 - */ 1657 - spin_lock_irq(&worker->pool->gcwq->lock); 1658 - if (!--worker->idle_rebind->cnt) 1659 - complete(&worker->idle_rebind->done); 1660 - spin_unlock_irq(&worker->pool->gcwq->lock); 1320 + /* rebind complete, become available again */ 1321 + list_add(&worker->entry, &worker->pool->idle_list); 1322 + spin_unlock_irq(&gcwq->lock); 1661 1323 } 1662 1324 1663 1325 /* ··· 1655 1349 struct worker *worker = container_of(work, struct worker, rebind_work); 1656 1350 struct global_cwq *gcwq = worker->pool->gcwq; 1657 1351 1658 - worker_maybe_bind_and_lock(worker); 1659 - 1660 - /* 1661 - * %WORKER_REBIND must be cleared even if the above binding failed; 1662 - * otherwise, we may confuse the next CPU_UP cycle or oops / get 1663 - * stuck by calling idle_worker_rebind() prematurely. If CPU went 1664 - * down again inbetween, %WORKER_UNBOUND would be set, so clearing 1665 - * %WORKER_REBIND is always safe. 1666 - */ 1667 - worker_clr_flags(worker, WORKER_REBIND); 1352 + if (worker_maybe_bind_and_lock(worker)) 1353 + worker_clr_flags(worker, WORKER_UNBOUND); 1668 1354 1669 1355 spin_unlock_irq(&gcwq->lock); 1670 1356 } ··· 1668 1370 * @gcwq->cpu is coming online. Rebind all workers to the CPU. Rebinding 1669 1371 * is different for idle and busy ones. 1670 1372 * 1671 - * The idle ones should be rebound synchronously and idle rebinding should 1672 - * be complete before any worker starts executing work items with 1673 - * concurrency management enabled; otherwise, scheduler may oops trying to 1674 - * wake up non-local idle worker from wq_worker_sleeping(). 1373 + * Idle ones will be removed from the idle_list and woken up. They will 1374 + * add themselves back after completing rebind. This ensures that the 1375 + * idle_list doesn't contain any unbound workers when re-bound busy workers 1376 + * try to perform local wake-ups for concurrency management. 1675 1377 * 1676 - * This is achieved by repeatedly requesting rebinding until all idle 1677 - * workers are known to have been rebound under @gcwq->lock and holding all 1678 - * idle workers from becoming busy until idle rebinding is complete. 1378 + * Busy workers can rebind after they finish their current work items. 1379 + * Queueing the rebind work item at the head of the scheduled list is 1380 + * enough. Note that nr_running will be properly bumped as busy workers 1381 + * rebind. 1679 1382 * 1680 - * Once idle workers are rebound, busy workers can be rebound as they 1681 - * finish executing their current work items. Queueing the rebind work at 1682 - * the head of their scheduled lists is enough. Note that nr_running will 1683 - * be properbly bumped as busy workers rebind. 1684 - * 1685 - * On return, all workers are guaranteed to either be bound or have rebind 1686 - * work item scheduled. 1383 + * On return, all non-manager workers are scheduled for rebind - see 1384 + * manage_workers() for the manager special case. Any idle worker 1385 + * including the manager will not appear on @idle_list until rebind is 1386 + * complete, making local wake-ups safe. 1687 1387 */ 1688 1388 static void rebind_workers(struct global_cwq *gcwq) 1689 - __releases(&gcwq->lock) __acquires(&gcwq->lock) 1690 1389 { 1691 - struct idle_rebind idle_rebind; 1692 1390 struct worker_pool *pool; 1693 - struct worker *worker; 1391 + struct worker *worker, *n; 1694 1392 struct hlist_node *pos; 1695 1393 int i; 1696 1394 1697 1395 lockdep_assert_held(&gcwq->lock); 1698 1396 1699 1397 for_each_worker_pool(pool, gcwq) 1700 - lockdep_assert_held(&pool->manager_mutex); 1398 + lockdep_assert_held(&pool->assoc_mutex); 1701 1399 1702 - /* 1703 - * Rebind idle workers. Interlocked both ways. We wait for 1704 - * workers to rebind via @idle_rebind.done. Workers will wait for 1705 - * us to finish up by watching %WORKER_REBIND. 1706 - */ 1707 - init_completion(&idle_rebind.done); 1708 - retry: 1709 - idle_rebind.cnt = 1; 1710 - INIT_COMPLETION(idle_rebind.done); 1711 - 1712 - /* set REBIND and kick idle ones, we'll wait for these later */ 1400 + /* dequeue and kick idle ones */ 1713 1401 for_each_worker_pool(pool, gcwq) { 1714 - list_for_each_entry(worker, &pool->idle_list, entry) { 1715 - unsigned long worker_flags = worker->flags; 1402 + list_for_each_entry_safe(worker, n, &pool->idle_list, entry) { 1403 + /* 1404 + * idle workers should be off @pool->idle_list 1405 + * until rebind is complete to avoid receiving 1406 + * premature local wake-ups. 1407 + */ 1408 + list_del_init(&worker->entry); 1716 1409 1717 - if (worker->flags & WORKER_REBIND) 1718 - continue; 1719 - 1720 - /* morph UNBOUND to REBIND atomically */ 1721 - worker_flags &= ~WORKER_UNBOUND; 1722 - worker_flags |= WORKER_REBIND; 1723 - ACCESS_ONCE(worker->flags) = worker_flags; 1724 - 1725 - idle_rebind.cnt++; 1726 - worker->idle_rebind = &idle_rebind; 1727 - 1728 - /* worker_thread() will call idle_worker_rebind() */ 1410 + /* 1411 + * worker_thread() will see the above dequeuing 1412 + * and call idle_worker_rebind(). 1413 + */ 1729 1414 wake_up_process(worker->task); 1730 1415 } 1731 1416 } 1732 1417 1733 - if (--idle_rebind.cnt) { 1734 - spin_unlock_irq(&gcwq->lock); 1735 - wait_for_completion(&idle_rebind.done); 1736 - spin_lock_irq(&gcwq->lock); 1737 - /* busy ones might have become idle while waiting, retry */ 1738 - goto retry; 1739 - } 1740 - 1741 - /* all idle workers are rebound, rebind busy workers */ 1418 + /* rebind busy workers */ 1742 1419 for_each_busy_worker(worker, i, pos, gcwq) { 1743 1420 struct work_struct *rebind_work = &worker->rebind_work; 1744 - unsigned long worker_flags = worker->flags; 1745 - 1746 - /* morph UNBOUND to REBIND atomically */ 1747 - worker_flags &= ~WORKER_UNBOUND; 1748 - worker_flags |= WORKER_REBIND; 1749 - ACCESS_ONCE(worker->flags) = worker_flags; 1421 + struct workqueue_struct *wq; 1750 1422 1751 1423 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, 1752 1424 work_data_bits(rebind_work))) 1753 1425 continue; 1754 1426 1755 - /* wq doesn't matter, use the default one */ 1756 1427 debug_work_activate(rebind_work); 1757 - insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work, 1758 - worker->scheduled.next, 1759 - work_color_to_flags(WORK_NO_COLOR)); 1760 - } 1761 1428 1762 - /* 1763 - * All idle workers are rebound and waiting for %WORKER_REBIND to 1764 - * be cleared inside idle_worker_rebind(). Clear and release. 1765 - * Clearing %WORKER_REBIND from this foreign context is safe 1766 - * because these workers are still guaranteed to be idle. 1767 - * 1768 - * We need to make sure all idle workers passed WORKER_REBIND wait 1769 - * in idle_worker_rebind() before returning; otherwise, workers can 1770 - * get stuck at the wait if hotplug cycle repeats. 1771 - */ 1772 - idle_rebind.cnt = 1; 1773 - INIT_COMPLETION(idle_rebind.done); 1429 + /* 1430 + * wq doesn't really matter but let's keep @worker->pool 1431 + * and @cwq->pool consistent for sanity. 1432 + */ 1433 + if (worker_pool_pri(worker->pool)) 1434 + wq = system_highpri_wq; 1435 + else 1436 + wq = system_wq; 1774 1437 1775 - for_each_worker_pool(pool, gcwq) { 1776 - list_for_each_entry(worker, &pool->idle_list, entry) { 1777 - worker->flags &= ~WORKER_REBIND; 1778 - idle_rebind.cnt++; 1779 - } 1780 - } 1781 - 1782 - wake_up_all(&gcwq->rebind_hold); 1783 - 1784 - if (--idle_rebind.cnt) { 1785 - spin_unlock_irq(&gcwq->lock); 1786 - wait_for_completion(&idle_rebind.done); 1787 - spin_lock_irq(&gcwq->lock); 1438 + insert_work(get_cwq(gcwq->cpu, wq), rebind_work, 1439 + worker->scheduled.next, 1440 + work_color_to_flags(WORK_NO_COLOR)); 1788 1441 } 1789 1442 } 1790 1443 ··· 2093 1844 * grab %POOL_MANAGING_WORKERS to achieve this because that can 2094 1845 * lead to idle worker depletion (all become busy thinking someone 2095 1846 * else is managing) which in turn can result in deadlock under 2096 - * extreme circumstances. Use @pool->manager_mutex to synchronize 1847 + * extreme circumstances. Use @pool->assoc_mutex to synchronize 2097 1848 * manager against CPU hotplug. 2098 1849 * 2099 - * manager_mutex would always be free unless CPU hotplug is in 1850 + * assoc_mutex would always be free unless CPU hotplug is in 2100 1851 * progress. trylock first without dropping @gcwq->lock. 2101 1852 */ 2102 - if (unlikely(!mutex_trylock(&pool->manager_mutex))) { 1853 + if (unlikely(!mutex_trylock(&pool->assoc_mutex))) { 2103 1854 spin_unlock_irq(&pool->gcwq->lock); 2104 - mutex_lock(&pool->manager_mutex); 1855 + mutex_lock(&pool->assoc_mutex); 2105 1856 /* 2106 1857 * CPU hotplug could have happened while we were waiting 2107 - * for manager_mutex. Hotplug itself can't handle us 1858 + * for assoc_mutex. Hotplug itself can't handle us 2108 1859 * because manager isn't either on idle or busy list, and 2109 1860 * @gcwq's state and ours could have deviated. 2110 1861 * 2111 - * As hotplug is now excluded via manager_mutex, we can 1862 + * As hotplug is now excluded via assoc_mutex, we can 2112 1863 * simply try to bind. It will succeed or fail depending 2113 1864 * on @gcwq's current state. Try it and adjust 2114 1865 * %WORKER_UNBOUND accordingly. ··· 2131 1882 ret |= maybe_create_worker(pool); 2132 1883 2133 1884 pool->flags &= ~POOL_MANAGING_WORKERS; 2134 - mutex_unlock(&pool->manager_mutex); 1885 + mutex_unlock(&pool->assoc_mutex); 2135 1886 return ret; 2136 - } 2137 - 2138 - /** 2139 - * move_linked_works - move linked works to a list 2140 - * @work: start of series of works to be scheduled 2141 - * @head: target list to append @work to 2142 - * @nextp: out paramter for nested worklist walking 2143 - * 2144 - * Schedule linked works starting from @work to @head. Work series to 2145 - * be scheduled starts at @work and includes any consecutive work with 2146 - * WORK_STRUCT_LINKED set in its predecessor. 2147 - * 2148 - * If @nextp is not NULL, it's updated to point to the next work of 2149 - * the last scheduled work. This allows move_linked_works() to be 2150 - * nested inside outer list_for_each_entry_safe(). 2151 - * 2152 - * CONTEXT: 2153 - * spin_lock_irq(gcwq->lock). 2154 - */ 2155 - static void move_linked_works(struct work_struct *work, struct list_head *head, 2156 - struct work_struct **nextp) 2157 - { 2158 - struct work_struct *n; 2159 - 2160 - /* 2161 - * Linked worklist will always end before the end of the list, 2162 - * use NULL for list head. 2163 - */ 2164 - list_for_each_entry_safe_from(work, n, NULL, entry) { 2165 - list_move_tail(&work->entry, head); 2166 - if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 2167 - break; 2168 - } 2169 - 2170 - /* 2171 - * If we're already inside safe list traversal and have moved 2172 - * multiple works to the scheduled queue, the next position 2173 - * needs to be updated. 2174 - */ 2175 - if (nextp) 2176 - *nextp = n; 2177 - } 2178 - 2179 - static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) 2180 - { 2181 - struct work_struct *work = list_first_entry(&cwq->delayed_works, 2182 - struct work_struct, entry); 2183 - 2184 - trace_workqueue_activate_work(work); 2185 - move_linked_works(work, &cwq->pool->worklist, NULL); 2186 - __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); 2187 - cwq->nr_active++; 2188 - } 2189 - 2190 - /** 2191 - * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 2192 - * @cwq: cwq of interest 2193 - * @color: color of work which left the queue 2194 - * @delayed: for a delayed work 2195 - * 2196 - * A work either has completed or is removed from pending queue, 2197 - * decrement nr_in_flight of its cwq and handle workqueue flushing. 2198 - * 2199 - * CONTEXT: 2200 - * spin_lock_irq(gcwq->lock). 2201 - */ 2202 - static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color, 2203 - bool delayed) 2204 - { 2205 - /* ignore uncolored works */ 2206 - if (color == WORK_NO_COLOR) 2207 - return; 2208 - 2209 - cwq->nr_in_flight[color]--; 2210 - 2211 - if (!delayed) { 2212 - cwq->nr_active--; 2213 - if (!list_empty(&cwq->delayed_works)) { 2214 - /* one down, submit a delayed one */ 2215 - if (cwq->nr_active < cwq->max_active) 2216 - cwq_activate_first_delayed(cwq); 2217 - } 2218 - } 2219 - 2220 - /* is flush in progress and are we at the flushing tip? */ 2221 - if (likely(cwq->flush_color != color)) 2222 - return; 2223 - 2224 - /* are there still in-flight works? */ 2225 - if (cwq->nr_in_flight[color]) 2226 - return; 2227 - 2228 - /* this cwq is done, clear flush_color */ 2229 - cwq->flush_color = -1; 2230 - 2231 - /* 2232 - * If this was the last cwq, wake up the first flusher. It 2233 - * will handle the rest. 2234 - */ 2235 - if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush)) 2236 - complete(&cwq->wq->first_flusher->done); 2237 1887 } 2238 1888 2239 1889 /** ··· 2178 2030 * necessary to avoid spurious warnings from rescuers servicing the 2179 2031 * unbound or a disassociated gcwq. 2180 2032 */ 2181 - WARN_ON_ONCE(!(worker->flags & (WORKER_UNBOUND | WORKER_REBIND)) && 2033 + WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) && 2182 2034 !(gcwq->flags & GCWQ_DISASSOCIATED) && 2183 2035 raw_smp_processor_id() != gcwq->cpu); 2184 2036 ··· 2194 2046 return; 2195 2047 } 2196 2048 2197 - /* claim and process */ 2049 + /* claim and dequeue */ 2198 2050 debug_work_deactivate(work); 2199 2051 hlist_add_head(&worker->hentry, bwh); 2200 2052 worker->current_work = work; 2201 2053 worker->current_cwq = cwq; 2202 2054 work_color = get_work_color(work); 2203 2055 2204 - /* record the current cpu number in the work data and dequeue */ 2205 - set_work_cpu(work, gcwq->cpu); 2206 2056 list_del_init(&work->entry); 2207 2057 2208 2058 /* ··· 2217 2071 if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) 2218 2072 wake_up_worker(pool); 2219 2073 2074 + /* 2075 + * Record the last CPU and clear PENDING which should be the last 2076 + * update to @work. Also, do this inside @gcwq->lock so that 2077 + * PENDING and queued state changes happen together while IRQ is 2078 + * disabled. 2079 + */ 2080 + set_work_cpu_and_clear_pending(work, gcwq->cpu); 2081 + 2220 2082 spin_unlock_irq(&gcwq->lock); 2221 2083 2222 - work_clear_pending(work); 2223 2084 lock_map_acquire_read(&cwq->wq->lockdep_map); 2224 2085 lock_map_acquire(&lockdep_map); 2225 2086 trace_workqueue_execute_start(work); ··· 2240 2087 lock_map_release(&cwq->wq->lockdep_map); 2241 2088 2242 2089 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2243 - printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 2244 - "%s/0x%08x/%d\n", 2245 - current->comm, preempt_count(), task_pid_nr(current)); 2246 - printk(KERN_ERR " last function: "); 2247 - print_symbol("%s\n", (unsigned long)f); 2090 + pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2091 + " last function: %pf\n", 2092 + current->comm, preempt_count(), task_pid_nr(current), f); 2248 2093 debug_show_held_locks(current); 2249 2094 dump_stack(); 2250 2095 } ··· 2257 2106 hlist_del_init(&worker->hentry); 2258 2107 worker->current_work = NULL; 2259 2108 worker->current_cwq = NULL; 2260 - cwq_dec_nr_in_flight(cwq, work_color, false); 2109 + cwq_dec_nr_in_flight(cwq, work_color); 2261 2110 } 2262 2111 2263 2112 /** ··· 2302 2151 woke_up: 2303 2152 spin_lock_irq(&gcwq->lock); 2304 2153 2305 - /* 2306 - * DIE can be set only while idle and REBIND set while busy has 2307 - * @worker->rebind_work scheduled. Checking here is enough. 2308 - */ 2309 - if (unlikely(worker->flags & (WORKER_REBIND | WORKER_DIE))) { 2154 + /* we are off idle list if destruction or rebind is requested */ 2155 + if (unlikely(list_empty(&worker->entry))) { 2310 2156 spin_unlock_irq(&gcwq->lock); 2311 2157 2158 + /* if DIE is set, destruction is requested */ 2312 2159 if (worker->flags & WORKER_DIE) { 2313 2160 worker->task->flags &= ~PF_WQ_WORKER; 2314 2161 return 0; 2315 2162 } 2316 2163 2164 + /* otherwise, rebind */ 2317 2165 idle_worker_rebind(worker); 2318 2166 goto woke_up; 2319 2167 } ··· 2795 2645 2796 2646 if (++flush_cnt == 10 || 2797 2647 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 2798 - pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n", 2799 - wq->name, flush_cnt); 2648 + pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n", 2649 + wq->name, flush_cnt); 2800 2650 goto reflush; 2801 2651 } 2802 2652 ··· 2807 2657 } 2808 2658 EXPORT_SYMBOL_GPL(drain_workqueue); 2809 2659 2810 - static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, 2811 - bool wait_executing) 2660 + static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) 2812 2661 { 2813 2662 struct worker *worker = NULL; 2814 2663 struct global_cwq *gcwq; ··· 2829 2680 cwq = get_work_cwq(work); 2830 2681 if (unlikely(!cwq || gcwq != cwq->pool->gcwq)) 2831 2682 goto already_gone; 2832 - } else if (wait_executing) { 2683 + } else { 2833 2684 worker = find_worker_executing_work(gcwq, work); 2834 2685 if (!worker) 2835 2686 goto already_gone; 2836 2687 cwq = worker->current_cwq; 2837 - } else 2838 - goto already_gone; 2688 + } 2839 2689 2840 2690 insert_wq_barrier(cwq, barr, work, worker); 2841 2691 spin_unlock_irq(&gcwq->lock); ··· 2861 2713 * flush_work - wait for a work to finish executing the last queueing instance 2862 2714 * @work: the work to flush 2863 2715 * 2864 - * Wait until @work has finished execution. This function considers 2865 - * only the last queueing instance of @work. If @work has been 2866 - * enqueued across different CPUs on a non-reentrant workqueue or on 2867 - * multiple workqueues, @work might still be executing on return on 2868 - * some of the CPUs from earlier queueing. 2869 - * 2870 - * If @work was queued only on a non-reentrant, ordered or unbound 2871 - * workqueue, @work is guaranteed to be idle on return if it hasn't 2872 - * been requeued since flush started. 2716 + * Wait until @work has finished execution. @work is guaranteed to be idle 2717 + * on return if it hasn't been requeued since flush started. 2873 2718 * 2874 2719 * RETURNS: 2875 2720 * %true if flush_work() waited for the work to finish execution, ··· 2875 2734 lock_map_acquire(&work->lockdep_map); 2876 2735 lock_map_release(&work->lockdep_map); 2877 2736 2878 - if (start_flush_work(work, &barr, true)) { 2737 + if (start_flush_work(work, &barr)) { 2879 2738 wait_for_completion(&barr.done); 2880 2739 destroy_work_on_stack(&barr.work); 2881 2740 return true; 2882 - } else 2741 + } else { 2883 2742 return false; 2743 + } 2884 2744 } 2885 2745 EXPORT_SYMBOL_GPL(flush_work); 2886 2746 2887 - static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work) 2747 + static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 2888 2748 { 2889 - struct wq_barrier barr; 2890 - struct worker *worker; 2891 - 2892 - spin_lock_irq(&gcwq->lock); 2893 - 2894 - worker = find_worker_executing_work(gcwq, work); 2895 - if (unlikely(worker)) 2896 - insert_wq_barrier(worker->current_cwq, &barr, work, worker); 2897 - 2898 - spin_unlock_irq(&gcwq->lock); 2899 - 2900 - if (unlikely(worker)) { 2901 - wait_for_completion(&barr.done); 2902 - destroy_work_on_stack(&barr.work); 2903 - return true; 2904 - } else 2905 - return false; 2906 - } 2907 - 2908 - static bool wait_on_work(struct work_struct *work) 2909 - { 2910 - bool ret = false; 2911 - int cpu; 2912 - 2913 - might_sleep(); 2914 - 2915 - lock_map_acquire(&work->lockdep_map); 2916 - lock_map_release(&work->lockdep_map); 2917 - 2918 - for_each_gcwq_cpu(cpu) 2919 - ret |= wait_on_cpu_work(get_gcwq(cpu), work); 2920 - return ret; 2921 - } 2922 - 2923 - /** 2924 - * flush_work_sync - wait until a work has finished execution 2925 - * @work: the work to flush 2926 - * 2927 - * Wait until @work has finished execution. On return, it's 2928 - * guaranteed that all queueing instances of @work which happened 2929 - * before this function is called are finished. In other words, if 2930 - * @work hasn't been requeued since this function was called, @work is 2931 - * guaranteed to be idle on return. 2932 - * 2933 - * RETURNS: 2934 - * %true if flush_work_sync() waited for the work to finish execution, 2935 - * %false if it was already idle. 2936 - */ 2937 - bool flush_work_sync(struct work_struct *work) 2938 - { 2939 - struct wq_barrier barr; 2940 - bool pending, waited; 2941 - 2942 - /* we'll wait for executions separately, queue barr only if pending */ 2943 - pending = start_flush_work(work, &barr, false); 2944 - 2945 - /* wait for executions to finish */ 2946 - waited = wait_on_work(work); 2947 - 2948 - /* wait for the pending one */ 2949 - if (pending) { 2950 - wait_for_completion(&barr.done); 2951 - destroy_work_on_stack(&barr.work); 2952 - } 2953 - 2954 - return pending || waited; 2955 - } 2956 - EXPORT_SYMBOL_GPL(flush_work_sync); 2957 - 2958 - /* 2959 - * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 2960 - * so this work can't be re-armed in any way. 2961 - */ 2962 - static int try_to_grab_pending(struct work_struct *work) 2963 - { 2964 - struct global_cwq *gcwq; 2965 - int ret = -1; 2966 - 2967 - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 2968 - return 0; 2969 - 2970 - /* 2971 - * The queueing is in progress, or it is already queued. Try to 2972 - * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 2973 - */ 2974 - gcwq = get_work_gcwq(work); 2975 - if (!gcwq) 2976 - return ret; 2977 - 2978 - spin_lock_irq(&gcwq->lock); 2979 - if (!list_empty(&work->entry)) { 2980 - /* 2981 - * This work is queued, but perhaps we locked the wrong gcwq. 2982 - * In that case we must see the new value after rmb(), see 2983 - * insert_work()->wmb(). 2984 - */ 2985 - smp_rmb(); 2986 - if (gcwq == get_work_gcwq(work)) { 2987 - debug_work_deactivate(work); 2988 - list_del_init(&work->entry); 2989 - cwq_dec_nr_in_flight(get_work_cwq(work), 2990 - get_work_color(work), 2991 - *work_data_bits(work) & WORK_STRUCT_DELAYED); 2992 - ret = 1; 2993 - } 2994 - } 2995 - spin_unlock_irq(&gcwq->lock); 2996 - 2997 - return ret; 2998 - } 2999 - 3000 - static bool __cancel_work_timer(struct work_struct *work, 3001 - struct timer_list* timer) 3002 - { 2749 + unsigned long flags; 3003 2750 int ret; 3004 2751 3005 2752 do { 3006 - ret = (timer && likely(del_timer(timer))); 3007 - if (!ret) 3008 - ret = try_to_grab_pending(work); 3009 - wait_on_work(work); 2753 + ret = try_to_grab_pending(work, is_dwork, &flags); 2754 + /* 2755 + * If someone else is canceling, wait for the same event it 2756 + * would be waiting for before retrying. 2757 + */ 2758 + if (unlikely(ret == -ENOENT)) 2759 + flush_work(work); 3010 2760 } while (unlikely(ret < 0)); 3011 2761 2762 + /* tell other tasks trying to grab @work to back off */ 2763 + mark_work_canceling(work); 2764 + local_irq_restore(flags); 2765 + 2766 + flush_work(work); 3012 2767 clear_work_data(work); 3013 2768 return ret; 3014 2769 } ··· 2929 2892 */ 2930 2893 bool cancel_work_sync(struct work_struct *work) 2931 2894 { 2932 - return __cancel_work_timer(work, NULL); 2895 + return __cancel_work_timer(work, false); 2933 2896 } 2934 2897 EXPORT_SYMBOL_GPL(cancel_work_sync); 2935 2898 ··· 2947 2910 */ 2948 2911 bool flush_delayed_work(struct delayed_work *dwork) 2949 2912 { 2913 + local_irq_disable(); 2950 2914 if (del_timer_sync(&dwork->timer)) 2951 - __queue_work(raw_smp_processor_id(), 2915 + __queue_work(dwork->cpu, 2952 2916 get_work_cwq(&dwork->work)->wq, &dwork->work); 2917 + local_irq_enable(); 2953 2918 return flush_work(&dwork->work); 2954 2919 } 2955 2920 EXPORT_SYMBOL(flush_delayed_work); 2956 2921 2957 2922 /** 2958 - * flush_delayed_work_sync - wait for a dwork to finish 2959 - * @dwork: the delayed work to flush 2923 + * cancel_delayed_work - cancel a delayed work 2924 + * @dwork: delayed_work to cancel 2960 2925 * 2961 - * Delayed timer is cancelled and the pending work is queued for 2962 - * execution immediately. Other than timer handling, its behavior 2963 - * is identical to flush_work_sync(). 2926 + * Kill off a pending delayed_work. Returns %true if @dwork was pending 2927 + * and canceled; %false if wasn't pending. Note that the work callback 2928 + * function may still be running on return, unless it returns %true and the 2929 + * work doesn't re-arm itself. Explicitly flush or use 2930 + * cancel_delayed_work_sync() to wait on it. 2964 2931 * 2965 - * RETURNS: 2966 - * %true if flush_work_sync() waited for the work to finish execution, 2967 - * %false if it was already idle. 2932 + * This function is safe to call from any context including IRQ handler. 2968 2933 */ 2969 - bool flush_delayed_work_sync(struct delayed_work *dwork) 2934 + bool cancel_delayed_work(struct delayed_work *dwork) 2970 2935 { 2971 - if (del_timer_sync(&dwork->timer)) 2972 - __queue_work(raw_smp_processor_id(), 2973 - get_work_cwq(&dwork->work)->wq, &dwork->work); 2974 - return flush_work_sync(&dwork->work); 2936 + unsigned long flags; 2937 + int ret; 2938 + 2939 + do { 2940 + ret = try_to_grab_pending(&dwork->work, true, &flags); 2941 + } while (unlikely(ret == -EAGAIN)); 2942 + 2943 + if (unlikely(ret < 0)) 2944 + return false; 2945 + 2946 + set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work)); 2947 + local_irq_restore(flags); 2948 + return true; 2975 2949 } 2976 - EXPORT_SYMBOL(flush_delayed_work_sync); 2950 + EXPORT_SYMBOL(cancel_delayed_work); 2977 2951 2978 2952 /** 2979 2953 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish ··· 2997 2949 */ 2998 2950 bool cancel_delayed_work_sync(struct delayed_work *dwork) 2999 2951 { 3000 - return __cancel_work_timer(&dwork->work, &dwork->timer); 2952 + return __cancel_work_timer(&dwork->work, true); 3001 2953 } 3002 2954 EXPORT_SYMBOL(cancel_delayed_work_sync); 3003 2955 3004 2956 /** 3005 - * schedule_work - put work task in global workqueue 3006 - * @work: job to be done 3007 - * 3008 - * Returns zero if @work was already on the kernel-global workqueue and 3009 - * non-zero otherwise. 3010 - * 3011 - * This puts a job in the kernel-global workqueue if it was not already 3012 - * queued and leaves it in the same position on the kernel-global 3013 - * workqueue otherwise. 3014 - */ 3015 - int schedule_work(struct work_struct *work) 3016 - { 3017 - return queue_work(system_wq, work); 3018 - } 3019 - EXPORT_SYMBOL(schedule_work); 3020 - 3021 - /* 3022 2957 * schedule_work_on - put work task on a specific cpu 3023 2958 * @cpu: cpu to put the work task on 3024 2959 * @work: job to be done 3025 2960 * 3026 2961 * This puts a job on a specific cpu 3027 2962 */ 3028 - int schedule_work_on(int cpu, struct work_struct *work) 2963 + bool schedule_work_on(int cpu, struct work_struct *work) 3029 2964 { 3030 2965 return queue_work_on(cpu, system_wq, work); 3031 2966 } 3032 2967 EXPORT_SYMBOL(schedule_work_on); 3033 2968 3034 2969 /** 3035 - * schedule_delayed_work - put work task in global workqueue after delay 3036 - * @dwork: job to be done 3037 - * @delay: number of jiffies to wait or 0 for immediate execution 2970 + * schedule_work - put work task in global workqueue 2971 + * @work: job to be done 3038 2972 * 3039 - * After waiting for a given time this puts a job in the kernel-global 3040 - * workqueue. 2973 + * Returns %false if @work was already on the kernel-global workqueue and 2974 + * %true otherwise. 2975 + * 2976 + * This puts a job in the kernel-global workqueue if it was not already 2977 + * queued and leaves it in the same position on the kernel-global 2978 + * workqueue otherwise. 3041 2979 */ 3042 - int schedule_delayed_work(struct delayed_work *dwork, 3043 - unsigned long delay) 2980 + bool schedule_work(struct work_struct *work) 3044 2981 { 3045 - return queue_delayed_work(system_wq, dwork, delay); 2982 + return queue_work(system_wq, work); 3046 2983 } 3047 - EXPORT_SYMBOL(schedule_delayed_work); 2984 + EXPORT_SYMBOL(schedule_work); 3048 2985 3049 2986 /** 3050 2987 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay ··· 3040 3007 * After waiting for a given time this puts a job in the kernel-global 3041 3008 * workqueue on the specified CPU. 3042 3009 */ 3043 - int schedule_delayed_work_on(int cpu, 3044 - struct delayed_work *dwork, unsigned long delay) 3010 + bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 3011 + unsigned long delay) 3045 3012 { 3046 3013 return queue_delayed_work_on(cpu, system_wq, dwork, delay); 3047 3014 } 3048 3015 EXPORT_SYMBOL(schedule_delayed_work_on); 3016 + 3017 + /** 3018 + * schedule_delayed_work - put work task in global workqueue after delay 3019 + * @dwork: job to be done 3020 + * @delay: number of jiffies to wait or 0 for immediate execution 3021 + * 3022 + * After waiting for a given time this puts a job in the kernel-global 3023 + * workqueue. 3024 + */ 3025 + bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) 3026 + { 3027 + return queue_delayed_work(system_wq, dwork, delay); 3028 + } 3029 + EXPORT_SYMBOL(schedule_delayed_work); 3049 3030 3050 3031 /** 3051 3032 * schedule_on_each_cpu - execute a function synchronously on each online CPU ··· 3208 3161 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; 3209 3162 3210 3163 if (max_active < 1 || max_active > lim) 3211 - printk(KERN_WARNING "workqueue: max_active %d requested for %s " 3212 - "is out of range, clamping between %d and %d\n", 3213 - max_active, name, 1, lim); 3164 + pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 3165 + max_active, name, 1, lim); 3214 3166 3215 3167 return clamp_val(max_active, 1, lim); 3216 3168 } ··· 3365 3319 EXPORT_SYMBOL_GPL(destroy_workqueue); 3366 3320 3367 3321 /** 3322 + * cwq_set_max_active - adjust max_active of a cwq 3323 + * @cwq: target cpu_workqueue_struct 3324 + * @max_active: new max_active value. 3325 + * 3326 + * Set @cwq->max_active to @max_active and activate delayed works if 3327 + * increased. 3328 + * 3329 + * CONTEXT: 3330 + * spin_lock_irq(gcwq->lock). 3331 + */ 3332 + static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active) 3333 + { 3334 + cwq->max_active = max_active; 3335 + 3336 + while (!list_empty(&cwq->delayed_works) && 3337 + cwq->nr_active < cwq->max_active) 3338 + cwq_activate_first_delayed(cwq); 3339 + } 3340 + 3341 + /** 3368 3342 * workqueue_set_max_active - adjust max_active of a workqueue 3369 3343 * @wq: target workqueue 3370 3344 * @max_active: new max_active value. ··· 3411 3345 3412 3346 if (!(wq->flags & WQ_FREEZABLE) || 3413 3347 !(gcwq->flags & GCWQ_FREEZING)) 3414 - get_cwq(gcwq->cpu, wq)->max_active = max_active; 3348 + cwq_set_max_active(get_cwq(gcwq->cpu, wq), max_active); 3415 3349 3416 3350 spin_unlock_irq(&gcwq->lock); 3417 3351 } ··· 3506 3440 */ 3507 3441 3508 3442 /* claim manager positions of all pools */ 3509 - static void gcwq_claim_management_and_lock(struct global_cwq *gcwq) 3443 + static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq) 3510 3444 { 3511 3445 struct worker_pool *pool; 3512 3446 3513 3447 for_each_worker_pool(pool, gcwq) 3514 - mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools); 3448 + mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools); 3515 3449 spin_lock_irq(&gcwq->lock); 3516 3450 } 3517 3451 3518 3452 /* release manager positions */ 3519 - static void gcwq_release_management_and_unlock(struct global_cwq *gcwq) 3453 + static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq) 3520 3454 { 3521 3455 struct worker_pool *pool; 3522 3456 3523 3457 spin_unlock_irq(&gcwq->lock); 3524 3458 for_each_worker_pool(pool, gcwq) 3525 - mutex_unlock(&pool->manager_mutex); 3459 + mutex_unlock(&pool->assoc_mutex); 3526 3460 } 3527 3461 3528 3462 static void gcwq_unbind_fn(struct work_struct *work) ··· 3535 3469 3536 3470 BUG_ON(gcwq->cpu != smp_processor_id()); 3537 3471 3538 - gcwq_claim_management_and_lock(gcwq); 3472 + gcwq_claim_assoc_and_lock(gcwq); 3539 3473 3540 3474 /* 3541 3475 * We've claimed all manager positions. Make all workers unbound ··· 3552 3486 3553 3487 gcwq->flags |= GCWQ_DISASSOCIATED; 3554 3488 3555 - gcwq_release_management_and_unlock(gcwq); 3489 + gcwq_release_assoc_and_unlock(gcwq); 3556 3490 3557 3491 /* 3558 3492 * Call schedule() so that we cross rq->lock and thus can guarantee ··· 3580 3514 * Workqueues should be brought up before normal priority CPU notifiers. 3581 3515 * This will be registered high priority CPU notifier. 3582 3516 */ 3583 - static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb, 3517 + static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, 3584 3518 unsigned long action, 3585 3519 void *hcpu) 3586 3520 { ··· 3608 3542 3609 3543 case CPU_DOWN_FAILED: 3610 3544 case CPU_ONLINE: 3611 - gcwq_claim_management_and_lock(gcwq); 3545 + gcwq_claim_assoc_and_lock(gcwq); 3612 3546 gcwq->flags &= ~GCWQ_DISASSOCIATED; 3613 3547 rebind_workers(gcwq); 3614 - gcwq_release_management_and_unlock(gcwq); 3548 + gcwq_release_assoc_and_unlock(gcwq); 3615 3549 break; 3616 3550 } 3617 3551 return NOTIFY_OK; ··· 3621 3555 * Workqueues should be brought down after normal priority CPU notifiers. 3622 3556 * This will be registered as low priority CPU notifier. 3623 3557 */ 3624 - static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb, 3558 + static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb, 3625 3559 unsigned long action, 3626 3560 void *hcpu) 3627 3561 { ··· 3632 3566 case CPU_DOWN_PREPARE: 3633 3567 /* unbinding should happen on the local CPU */ 3634 3568 INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn); 3635 - schedule_work_on(cpu, &unbind_work); 3569 + queue_work_on(cpu, system_highpri_wq, &unbind_work); 3636 3570 flush_work(&unbind_work); 3637 3571 break; 3638 3572 } ··· 3801 3735 continue; 3802 3736 3803 3737 /* restore max_active and repopulate worklist */ 3804 - cwq->max_active = wq->saved_max_active; 3805 - 3806 - while (!list_empty(&cwq->delayed_works) && 3807 - cwq->nr_active < cwq->max_active) 3808 - cwq_activate_first_delayed(cwq); 3738 + cwq_set_max_active(cwq, wq->saved_max_active); 3809 3739 } 3810 3740 3811 3741 for_each_worker_pool(pool, gcwq) ··· 3821 3759 unsigned int cpu; 3822 3760 int i; 3823 3761 3762 + /* make sure we have enough bits for OFFQ CPU number */ 3763 + BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) < 3764 + WORK_CPU_LAST); 3765 + 3824 3766 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); 3825 - cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); 3767 + hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); 3826 3768 3827 3769 /* initialize gcwqs */ 3828 3770 for_each_gcwq_cpu(cpu) { ··· 3852 3786 setup_timer(&pool->mayday_timer, gcwq_mayday_timeout, 3853 3787 (unsigned long)pool); 3854 3788 3855 - mutex_init(&pool->manager_mutex); 3789 + mutex_init(&pool->assoc_mutex); 3856 3790 ida_init(&pool->worker_ida); 3857 3791 } 3858 - 3859 - init_waitqueue_head(&gcwq->rebind_hold); 3860 3792 } 3861 3793 3862 3794 /* create the initial worker */ ··· 3877 3813 } 3878 3814 3879 3815 system_wq = alloc_workqueue("events", 0, 0); 3816 + system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 3880 3817 system_long_wq = alloc_workqueue("events_long", 0, 0); 3881 - system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); 3882 3818 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 3883 3819 WQ_UNBOUND_MAX_ACTIVE); 3884 3820 system_freezable_wq = alloc_workqueue("events_freezable", 3885 3821 WQ_FREEZABLE, 0); 3886 - system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable", 3887 - WQ_NON_REENTRANT | WQ_FREEZABLE, 0); 3888 - BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || 3889 - !system_unbound_wq || !system_freezable_wq || 3890 - !system_nrt_freezable_wq); 3822 + BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 3823 + !system_unbound_wq || !system_freezable_wq); 3891 3824 return 0; 3892 3825 } 3893 3826 early_initcall(init_workqueues);
+1 -1
mm/slab.c
··· 900 900 */ 901 901 if (keventd_up() && reap_work->work.func == NULL) { 902 902 init_reap_node(cpu); 903 - INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap); 903 + INIT_DEFERRABLE_WORK(reap_work, cache_reap); 904 904 schedule_delayed_work_on(cpu, reap_work, 905 905 __round_jiffies_relative(HZ, cpu)); 906 906 }
+1 -1
mm/vmstat.c
··· 1157 1157 { 1158 1158 struct delayed_work *work = &per_cpu(vmstat_work, cpu); 1159 1159 1160 - INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update); 1160 + INIT_DEFERRABLE_WORK(work, vmstat_update); 1161 1161 schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu)); 1162 1162 } 1163 1163
+1 -1
net/9p/trans_fd.c
··· 1083 1083 1084 1084 void p9_trans_fd_exit(void) 1085 1085 { 1086 - flush_work_sync(&p9_poll_work); 1086 + flush_work(&p9_poll_work); 1087 1087 v9fs_unregister_trans(&p9_tcp_trans); 1088 1088 v9fs_unregister_trans(&p9_unix_trans); 1089 1089 v9fs_unregister_trans(&p9_fd_trans);
+2 -2
net/core/dst.c
··· 222 222 if (dst_garbage.timer_inc > DST_GC_INC) { 223 223 dst_garbage.timer_inc = DST_GC_INC; 224 224 dst_garbage.timer_expires = DST_GC_MIN; 225 - cancel_delayed_work(&dst_gc_work); 226 - schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); 225 + mod_delayed_work(system_wq, &dst_gc_work, 226 + dst_garbage.timer_expires); 227 227 } 228 228 spin_unlock_bh(&dst_garbage.lock); 229 229 }
+1 -1
net/core/neighbour.c
··· 1545 1545 panic("cannot allocate neighbour cache hashes"); 1546 1546 1547 1547 rwlock_init(&tbl->lock); 1548 - INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work); 1548 + INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work); 1549 1549 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time); 1550 1550 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl); 1551 1551 skb_queue_head_init_class(&tbl->proxy_queue,
+1 -1
net/dsa/dsa.c
··· 370 370 if (dst->link_poll_needed) 371 371 del_timer_sync(&dst->link_poll_timer); 372 372 373 - flush_work_sync(&dst->link_poll_work); 373 + flush_work(&dst->link_poll_work); 374 374 375 375 for (i = 0; i < dst->pd->nr_chips; i++) { 376 376 struct dsa_switch *ds = dst->ds[i];
+1 -1
net/ipv4/inetpeer.c
··· 194 194 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 195 195 NULL); 196 196 197 - INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker); 197 + INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker); 198 198 } 199 199 200 200 static int addr_compare(const struct inetpeer_addr *a,
+1 -2
net/rfkill/input.c
··· 164 164 rfkill_op_pending = true; 165 165 if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { 166 166 /* bypass the limiter for EPO */ 167 - cancel_delayed_work(&rfkill_op_work); 168 - schedule_delayed_work(&rfkill_op_work, 0); 167 + mod_delayed_work(system_wq, &rfkill_op_work, 0); 169 168 rfkill_last_scheduled = jiffies; 170 169 } else 171 170 rfkill_schedule_ratelimited();
+1 -1
net/sunrpc/cache.c
··· 1635 1635 1636 1636 void __init cache_initialize(void) 1637 1637 { 1638 - INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean); 1638 + INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean); 1639 1639 } 1640 1640 1641 1641 int cache_register_net(struct cache_detail *cd, struct net *net)
+4 -4
security/keys/gc.c
··· 62 62 63 63 if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) { 64 64 kdebug("IMMEDIATE"); 65 - queue_work(system_nrt_wq, &key_gc_work); 65 + schedule_work(&key_gc_work); 66 66 } else if (gc_at < key_gc_next_run) { 67 67 kdebug("DEFERRED"); 68 68 key_gc_next_run = gc_at; ··· 77 77 void key_schedule_gc_links(void) 78 78 { 79 79 set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags); 80 - queue_work(system_nrt_wq, &key_gc_work); 80 + schedule_work(&key_gc_work); 81 81 } 82 82 83 83 /* ··· 120 120 set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags); 121 121 122 122 kdebug("schedule"); 123 - queue_work(system_nrt_wq, &key_gc_work); 123 + schedule_work(&key_gc_work); 124 124 125 125 kdebug("sleep"); 126 126 wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit, ··· 369 369 } 370 370 371 371 if (gc_state & KEY_GC_REAP_AGAIN) 372 - queue_work(system_nrt_wq, &key_gc_work); 372 + schedule_work(&key_gc_work); 373 373 kleave(" [end %x]", gc_state); 374 374 return; 375 375
+1 -1
security/keys/key.c
··· 598 598 key_check(key); 599 599 600 600 if (atomic_dec_and_test(&key->usage)) 601 - queue_work(system_nrt_wq, &key_gc_work); 601 + schedule_work(&key_gc_work); 602 602 } 603 603 } 604 604 EXPORT_SYMBOL(key_put);
+1 -1
sound/i2c/other/ak4113.c
··· 141 141 { 142 142 chip->init = 1; 143 143 mb(); 144 - flush_delayed_work_sync(&chip->work); 144 + flush_delayed_work(&chip->work); 145 145 ak4113_init_regs(chip); 146 146 /* bring up statistics / event queing */ 147 147 chip->init = 0;
+1 -1
sound/i2c/other/ak4114.c
··· 154 154 { 155 155 chip->init = 1; 156 156 mb(); 157 - flush_delayed_work_sync(&chip->work); 157 + flush_delayed_work(&chip->work); 158 158 ak4114_init_regs(chip); 159 159 /* bring up statistics / event queing */ 160 160 chip->init = 0;
+4 -4
sound/pci/oxygen/oxygen_lib.c
··· 573 573 oxygen_shutdown(chip); 574 574 if (chip->irq >= 0) 575 575 free_irq(chip->irq, chip); 576 - flush_work_sync(&chip->spdif_input_bits_work); 577 - flush_work_sync(&chip->gpio_work); 576 + flush_work(&chip->spdif_input_bits_work); 577 + flush_work(&chip->gpio_work); 578 578 chip->model.cleanup(chip); 579 579 kfree(chip->model_data); 580 580 mutex_destroy(&chip->mutex); ··· 751 751 spin_unlock_irq(&chip->reg_lock); 752 752 753 753 synchronize_irq(chip->irq); 754 - flush_work_sync(&chip->spdif_input_bits_work); 755 - flush_work_sync(&chip->gpio_work); 754 + flush_work(&chip->spdif_input_bits_work); 755 + flush_work(&chip->gpio_work); 756 756 chip->interrupt_mask = saved_interrupt_mask; 757 757 758 758 pci_disable_device(pci);
+1 -1
sound/soc/codecs/wm8350.c
··· 1601 1601 1602 1602 /* if there was any work waiting then we run it now and 1603 1603 * wait for its completion */ 1604 - flush_delayed_work_sync(&codec->dapm.delayed_work); 1604 + flush_delayed_work(&codec->dapm.delayed_work); 1605 1605 1606 1606 wm8350_set_bias_level(codec, SND_SOC_BIAS_OFF); 1607 1607
+1 -1
sound/soc/codecs/wm8753.c
··· 1509 1509 /* power down chip */ 1510 1510 static int wm8753_remove(struct snd_soc_codec *codec) 1511 1511 { 1512 - flush_delayed_work_sync(&codec->dapm.delayed_work); 1512 + flush_delayed_work(&codec->dapm.delayed_work); 1513 1513 wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF); 1514 1514 1515 1515 return 0;
+3 -3
sound/soc/soc-core.c
··· 591 591 592 592 /* close any waiting streams and save state */ 593 593 for (i = 0; i < card->num_rtd; i++) { 594 - flush_delayed_work_sync(&card->rtd[i].delayed_work); 594 + flush_delayed_work(&card->rtd[i].delayed_work); 595 595 card->rtd[i].codec->dapm.suspend_bias_level = card->rtd[i].codec->dapm.bias_level; 596 596 } 597 597 ··· 1848 1848 /* make sure any delayed work runs */ 1849 1849 for (i = 0; i < card->num_rtd; i++) { 1850 1850 struct snd_soc_pcm_runtime *rtd = &card->rtd[i]; 1851 - flush_delayed_work_sync(&rtd->delayed_work); 1851 + flush_delayed_work(&rtd->delayed_work); 1852 1852 } 1853 1853 1854 1854 /* remove auxiliary devices */ ··· 1892 1892 * now, we're shutting down so no imminent restart. */ 1893 1893 for (i = 0; i < card->num_rtd; i++) { 1894 1894 struct snd_soc_pcm_runtime *rtd = &card->rtd[i]; 1895 - flush_delayed_work_sync(&rtd->delayed_work); 1895 + flush_delayed_work(&rtd->delayed_work); 1896 1896 } 1897 1897 1898 1898 snd_soc_dapm_shutdown(card);
+1 -1
virt/kvm/eventfd.c
··· 90 90 * We know no new events will be scheduled at this point, so block 91 91 * until all previously outstanding events have completed 92 92 */ 93 - flush_work_sync(&irqfd->inject); 93 + flush_work(&irqfd->inject); 94 94 95 95 /* 96 96 * It is now safe to release the object's resources