Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

workqueue: use mod_delayed_work() instead of cancel + queue

Convert delayed_work users doing cancel_delayed_work() followed by
queue_delayed_work() to mod_delayed_work().

Most conversions are straight-forward. Ones worth mentioning are,

* drivers/edac/edac_mc.c: edac_mc_workq_setup() converted to always
use mod_delayed_work() and cancel loop in
edac_mc_reset_delay_period() is dropped.

* drivers/platform/x86/thinkpad_acpi.c: No need to remember whether
watchdog is active or not. @fan_watchdog_active and related code
dropped.

* drivers/power/charger-manager.c: Seemingly a lot of
delayed_work_pending() abuse going on here.
[delayed_]work_pending() are unsynchronized and racy when used like
this. I converted one instance in fullbatt_handler(). Please
conver the rest so that it invokes workqueue APIs for the intended
target state rather than trying to game work item pending state
transitions. e.g. if timer should be modified - call
mod_delayed_work(), canceled - call cancel_delayed_work[_sync]().

* drivers/thermal/thermal_sys.c: thermal_zone_device_set_polling()
simplified. Note that round_jiffies() calls in this function are
meaningless. round_jiffies() work on absolute jiffies not delta
delay used by delayed_work.

v2: Tomi pointed out that __cancel_delayed_work() users can't be
safely converted to mod_delayed_work(). They could be calling it
from irq context and if that happens while delayed_work_timer_fn()
is running, it could deadlock. __cancel_delayed_work() users are
dropped.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Henrique de Moraes Holschuh <hmh@hmh.eng.br>
Acked-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
Acked-by: Anton Vorontsov <cbouatmailru@gmail.com>
Acked-by: David Howells <dhowells@redhat.com>
Cc: Tomi Valkeinen <tomi.valkeinen@ti.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Doug Thompson <dougthompson@xmission.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Roland Dreier <roland@kernel.org>
Cc: "John W. Linville" <linville@tuxdriver.com>
Cc: Zhang Rui <rui.zhang@intel.com>
Cc: Len Brown <len.brown@intel.com>
Cc: "J. Bruce Fields" <bfields@fieldses.org>
Cc: Johannes Berg <johannes@sipsolutions.net>

Tejun Heo 41f63c53 8376fe22

+41 -105
+2 -4
block/genhd.c
··· 1534 1534 1535 1535 spin_lock_irq(&ev->lock); 1536 1536 ev->clearing |= mask; 1537 - if (!ev->block) { 1538 - cancel_delayed_work(&ev->dwork); 1539 - queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); 1540 - } 1537 + if (!ev->block) 1538 + mod_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); 1541 1539 spin_unlock_irq(&ev->lock); 1542 1540 } 1543 1541
+1 -16
drivers/edac/edac_mc.c
··· 538 538 return; 539 539 540 540 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); 541 - queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); 541 + mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); 542 542 } 543 543 544 544 /* ··· 576 576 struct mem_ctl_info *mci; 577 577 struct list_head *item; 578 578 579 - mutex_lock(&mem_ctls_mutex); 580 - 581 - /* scan the list and turn off all workq timers, doing so under lock 582 - */ 583 - list_for_each(item, &mc_devices) { 584 - mci = list_entry(item, struct mem_ctl_info, link); 585 - 586 - if (mci->op_state == OP_RUNNING_POLL) 587 - cancel_delayed_work(&mci->work); 588 - } 589 - 590 - mutex_unlock(&mem_ctls_mutex); 591 - 592 - 593 - /* re-walk the list, and reset the poll delay */ 594 579 mutex_lock(&mem_ctls_mutex); 595 580 596 581 list_for_each(item, &mc_devices) {
+1 -3
drivers/infiniband/core/addr.c
··· 152 152 { 153 153 unsigned long delay; 154 154 155 - cancel_delayed_work(&work); 156 - 157 155 delay = time - jiffies; 158 156 if ((long)delay <= 0) 159 157 delay = 1; 160 158 161 - queue_delayed_work(addr_wq, &work, delay); 159 + mod_delayed_work(addr_wq, &work, delay); 162 160 } 163 161 164 162 static void queue_req(struct addr_req *req)
+2 -4
drivers/infiniband/hw/nes/nes_hw.c
··· 2679 2679 } 2680 2680 } 2681 2681 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) { 2682 - if (nesdev->link_recheck) 2683 - cancel_delayed_work(&nesdev->work); 2684 2682 nesdev->link_recheck = 1; 2685 - schedule_delayed_work(&nesdev->work, 2686 - NES_LINK_RECHECK_DELAY); 2683 + mod_delayed_work(system_wq, &nesdev->work, 2684 + NES_LINK_RECHECK_DELAY); 2687 2685 } 2688 2686 } 2689 2687
+2 -3
drivers/infiniband/hw/nes/nes_nic.c
··· 243 243 244 244 spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); 245 245 if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) { 246 - if (nesdev->link_recheck) 247 - cancel_delayed_work(&nesdev->work); 248 246 nesdev->link_recheck = 1; 249 - schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY); 247 + mod_delayed_work(system_wq, &nesdev->work, 248 + NES_LINK_RECHECK_DELAY); 250 249 } 251 250 spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); 252 251
+3 -5
drivers/net/wireless/ipw2x00/ipw2100.c
··· 2180 2180 2181 2181 /* Make sure the RF Kill check timer is running */ 2182 2182 priv->stop_rf_kill = 0; 2183 - cancel_delayed_work(&priv->rf_kill); 2184 - schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ)); 2183 + mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ)); 2185 2184 } 2186 2185 2187 2186 static void send_scan_event(void *data) ··· 4320 4321 "disabled by HW switch\n"); 4321 4322 /* Make sure the RF_KILL check timer is running */ 4322 4323 priv->stop_rf_kill = 0; 4323 - cancel_delayed_work(&priv->rf_kill); 4324 - schedule_delayed_work(&priv->rf_kill, 4325 - round_jiffies_relative(HZ)); 4324 + mod_delayed_work(system_wq, &priv->rf_kill, 4325 + round_jiffies_relative(HZ)); 4326 4326 } else 4327 4327 schedule_reset(priv); 4328 4328 }
+1 -2
drivers/net/wireless/zd1211rw/zd_usb.c
··· 1164 1164 { 1165 1165 struct zd_usb_rx *rx = &usb->rx; 1166 1166 1167 - cancel_delayed_work(&rx->idle_work); 1168 - queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL); 1167 + mod_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL); 1169 1168 } 1170 1169 1171 1170 static inline void init_usb_interrupt(struct zd_usb *usb)
+5 -15
drivers/platform/x86/thinkpad_acpi.c
··· 7682 7682 7683 7683 static void fan_watchdog_reset(void) 7684 7684 { 7685 - static int fan_watchdog_active; 7686 - 7687 7685 if (fan_control_access_mode == TPACPI_FAN_WR_NONE) 7688 7686 return; 7689 7687 7690 - if (fan_watchdog_active) 7691 - cancel_delayed_work(&fan_watchdog_task); 7692 - 7693 7688 if (fan_watchdog_maxinterval > 0 && 7694 - tpacpi_lifecycle != TPACPI_LIFE_EXITING) { 7695 - fan_watchdog_active = 1; 7696 - if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task, 7697 - msecs_to_jiffies(fan_watchdog_maxinterval 7698 - * 1000))) { 7699 - pr_err("failed to queue the fan watchdog, " 7700 - "watchdog will not trigger\n"); 7701 - } 7702 - } else 7703 - fan_watchdog_active = 0; 7689 + tpacpi_lifecycle != TPACPI_LIFE_EXITING) 7690 + mod_delayed_work(tpacpi_wq, &fan_watchdog_task, 7691 + msecs_to_jiffies(fan_watchdog_maxinterval * 1000)); 7692 + else 7693 + cancel_delayed_work(&fan_watchdog_task); 7704 7694 } 7705 7695 7706 7696 static void fan_watchdog_fire(struct work_struct *ignored)
+3 -6
drivers/power/charger-manager.c
··· 509 509 if (!delayed_work_pending(&cm_monitor_work) || 510 510 (delayed_work_pending(&cm_monitor_work) && 511 511 time_after(next_polling, _next_polling))) { 512 - cancel_delayed_work_sync(&cm_monitor_work); 513 512 next_polling = jiffies + polling_jiffy; 514 - queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy); 513 + mod_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy); 515 514 } 516 515 517 516 out: ··· 545 546 if (cm_suspended) 546 547 device_set_wakeup_capable(cm->dev, true); 547 548 548 - if (delayed_work_pending(&cm->fullbatt_vchk_work)) 549 - cancel_delayed_work(&cm->fullbatt_vchk_work); 550 - queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work, 551 - msecs_to_jiffies(desc->fullbatt_vchkdrop_ms)); 549 + mod_delayed_work(cm_wq, &cm->fullbatt_vchk_work, 550 + msecs_to_jiffies(desc->fullbatt_vchkdrop_ms)); 552 551 cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies( 553 552 desc->fullbatt_vchkdrop_ms); 554 553
+3 -6
drivers/power/ds2760_battery.c
··· 355 355 356 356 dev_dbg(di->dev, "%s\n", __func__); 357 357 358 - cancel_delayed_work(&di->monitor_work); 359 - queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10); 358 + mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10); 360 359 } 361 360 362 361 ··· 400 401 401 402 /* postpone the actual work by 20 secs. This is for debouncing GPIO 402 403 * signals and to let the current value settle. See AN4188. */ 403 - cancel_delayed_work(&di->set_charged_work); 404 - queue_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20); 404 + mod_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20); 405 405 } 406 406 407 407 static int ds2760_battery_get_property(struct power_supply *psy, ··· 614 616 di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN; 615 617 power_supply_changed(&di->bat); 616 618 617 - cancel_delayed_work(&di->monitor_work); 618 - queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ); 619 + mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ); 619 620 620 621 return 0; 621 622 }
+2 -4
drivers/power/jz4740-battery.c
··· 173 173 { 174 174 struct jz_battery *jz_battery = psy_to_jz_battery(psy); 175 175 176 - cancel_delayed_work(&jz_battery->work); 177 - schedule_delayed_work(&jz_battery->work, 0); 176 + mod_delayed_work(system_wq, &jz_battery->work, 0); 178 177 } 179 178 180 179 static irqreturn_t jz_battery_charge_irq(int irq, void *data) 181 180 { 182 181 struct jz_battery *jz_battery = data; 183 182 184 - cancel_delayed_work(&jz_battery->work); 185 - schedule_delayed_work(&jz_battery->work, 0); 183 + mod_delayed_work(system_wq, &jz_battery->work, 0); 186 184 187 185 return IRQ_HANDLED; 188 186 }
+6 -9
drivers/thermal/thermal_sys.c
··· 694 694 static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, 695 695 int delay) 696 696 { 697 - cancel_delayed_work(&(tz->poll_queue)); 698 - 699 - if (!delay) 700 - return; 701 - 702 697 if (delay > 1000) 703 - queue_delayed_work(system_freezable_wq, &(tz->poll_queue), 704 - round_jiffies(msecs_to_jiffies(delay))); 698 + mod_delayed_work(system_freezable_wq, &tz->poll_queue, 699 + round_jiffies(msecs_to_jiffies(delay))); 700 + else if (delay) 701 + mod_delayed_work(system_freezable_wq, &tz->poll_queue, 702 + msecs_to_jiffies(delay)); 705 703 else 706 - queue_delayed_work(system_freezable_wq, &(tz->poll_queue), 707 - msecs_to_jiffies(delay)); 704 + cancel_delayed_work(&tz->poll_queue); 708 705 } 709 706 710 707 static void thermal_zone_device_passive(struct thermal_zone_device *tz,
+1 -3
fs/afs/callback.c
··· 351 351 */ 352 352 void afs_flush_callback_breaks(struct afs_server *server) 353 353 { 354 - cancel_delayed_work(&server->cb_break_work); 355 - queue_delayed_work(afs_callback_update_worker, 356 - &server->cb_break_work, 0); 354 + mod_delayed_work(afs_callback_update_worker, &server->cb_break_work, 0); 357 355 } 358 356 359 357 #if 0
+2 -8
fs/afs/server.c
··· 285 285 expiry = server->time_of_death + afs_server_timeout; 286 286 if (expiry > now) { 287 287 delay = (expiry - now) * HZ; 288 - if (!queue_delayed_work(afs_wq, &afs_server_reaper, 289 - delay)) { 290 - cancel_delayed_work(&afs_server_reaper); 291 - queue_delayed_work(afs_wq, &afs_server_reaper, 292 - delay); 293 - } 288 + mod_delayed_work(afs_wq, &afs_server_reaper, delay); 294 289 break; 295 290 } 296 291 ··· 318 323 void __exit afs_purge_servers(void) 319 324 { 320 325 afs_server_timeout = 0; 321 - cancel_delayed_work(&afs_server_reaper); 322 - queue_delayed_work(afs_wq, &afs_server_reaper, 0); 326 + mod_delayed_work(afs_wq, &afs_server_reaper, 0); 323 327 }
+3 -11
fs/afs/vlocation.c
··· 561 561 if (expiry > now) { 562 562 delay = (expiry - now) * HZ; 563 563 _debug("delay %lu", delay); 564 - if (!queue_delayed_work(afs_wq, &afs_vlocation_reap, 565 - delay)) { 566 - cancel_delayed_work(&afs_vlocation_reap); 567 - queue_delayed_work(afs_wq, &afs_vlocation_reap, 568 - delay); 569 - } 564 + mod_delayed_work(afs_wq, &afs_vlocation_reap, delay); 570 565 break; 571 566 } 572 567 ··· 609 614 spin_lock(&afs_vlocation_updates_lock); 610 615 list_del_init(&afs_vlocation_updates); 611 616 spin_unlock(&afs_vlocation_updates_lock); 612 - cancel_delayed_work(&afs_vlocation_update); 613 - queue_delayed_work(afs_vlocation_update_worker, 614 - &afs_vlocation_update, 0); 617 + mod_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0); 615 618 destroy_workqueue(afs_vlocation_update_worker); 616 619 617 - cancel_delayed_work(&afs_vlocation_reap); 618 - queue_delayed_work(afs_wq, &afs_vlocation_reap, 0); 620 + mod_delayed_work(afs_wq, &afs_vlocation_reap, 0); 619 621 } 620 622 621 623 /*
+1 -2
fs/nfs/nfs4renewd.c
··· 117 117 timeout = 5 * HZ; 118 118 dprintk("%s: requeueing work. Lease period = %ld\n", 119 119 __func__, (timeout + HZ - 1) / HZ); 120 - cancel_delayed_work(&clp->cl_renewd); 121 - schedule_delayed_work(&clp->cl_renewd, timeout); 120 + mod_delayed_work(system_wq, &clp->cl_renewd, timeout); 122 121 set_bit(NFS_CS_RENEWD, &clp->cl_res_state); 123 122 spin_unlock(&clp->cl_lock); 124 123 }
+2 -2
net/core/dst.c
··· 214 214 if (dst_garbage.timer_inc > DST_GC_INC) { 215 215 dst_garbage.timer_inc = DST_GC_INC; 216 216 dst_garbage.timer_expires = DST_GC_MIN; 217 - cancel_delayed_work(&dst_gc_work); 218 - schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); 217 + mod_delayed_work(system_wq, &dst_gc_work, 218 + dst_garbage.timer_expires); 219 219 } 220 220 spin_unlock_bh(&dst_garbage.lock); 221 221 }
+1 -2
net/rfkill/input.c
··· 164 164 rfkill_op_pending = true; 165 165 if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { 166 166 /* bypass the limiter for EPO */ 167 - cancel_delayed_work(&rfkill_op_work); 168 - schedule_delayed_work(&rfkill_op_work, 0); 167 + mod_delayed_work(system_wq, &rfkill_op_work, 0); 169 168 rfkill_last_scheduled = jiffies; 170 169 } else 171 170 rfkill_schedule_ratelimited();