Merge tag 'timers-cleanups-2025-04-06' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer cleanups from Thomas Gleixner:
"A set of final cleanups for the timer subsystem:

- Convert all del_timer[_sync]() instances over to the new
timer_delete[_sync]() API and remove the legacy wrappers.

Conversion was done with coccinelle plus some manual fixups as
coccinelle chokes on scoped_guard().

- The final cleanup of the hrtimer_init() to hrtimer_setup()
conversion.

This has been delayed to the end of the merge window, so that all
patches which have been merged through other trees are in mainline
and all new users are catched.

Doing this right before rc1 ensures that new code which is merged post
rc1 is not introducing new instances of the original functionality"

* tag 'timers-cleanups-2025-04-06' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
tracing/timers: Rename the hrtimer_init event to hrtimer_setup
hrtimers: Rename debug_init_on_stack() to debug_setup_on_stack()
hrtimers: Rename debug_init() to debug_setup()
hrtimers: Rename __hrtimer_init_sleeper() to __hrtimer_setup_sleeper()
hrtimers: Remove unnecessary NULL check in hrtimer_start_range_ns()
hrtimers: Make callback function pointer private
hrtimers: Merge __hrtimer_init() into __hrtimer_setup()
hrtimers: Switch to use __htimer_setup()
hrtimers: Delete hrtimer_init()
treewide: Convert new and leftover hrtimer_init() users
treewide: Switch/rename to timer_delete[_sync]()

+1650 -1718
+2 -2
Documentation/trace/ftrace.rst
··· 3077 # cat set_ftrace_filter 3078 hrtimer_run_queues 3079 hrtimer_run_pending 3080 - hrtimer_init 3081 hrtimer_cancel 3082 hrtimer_try_to_cancel 3083 hrtimer_forward ··· 3115 # cat set_ftrace_filter 3116 hrtimer_run_queues 3117 hrtimer_run_pending 3118 - hrtimer_init 3119 hrtimer_cancel 3120 hrtimer_try_to_cancel 3121 hrtimer_forward
··· 3077 # cat set_ftrace_filter 3078 hrtimer_run_queues 3079 hrtimer_run_pending 3080 + hrtimer_setup 3081 hrtimer_cancel 3082 hrtimer_try_to_cancel 3083 hrtimer_forward ··· 3115 # cat set_ftrace_filter 3116 hrtimer_run_queues 3117 hrtimer_run_pending 3118 + hrtimer_setup 3119 hrtimer_cancel 3120 hrtimer_try_to_cancel 3121 hrtimer_forward
+1 -1
arch/alpha/kernel/srmcons.c
··· 177 178 if (tty->count == 1) { 179 port->tty = NULL; 180 - del_timer(&srmconsp->timer); 181 } 182 183 spin_unlock_irqrestore(&port->lock, flags);
··· 177 178 if (tty->count == 1) { 179 port->tty = NULL; 180 + timer_delete(&srmconsp->timer); 181 } 182 183 spin_unlock_irqrestore(&port->lock, flags);
+1 -1
arch/arm/mach-footbridge/dc21285.c
··· 135 136 static void dc21285_enable_error(struct timer_list *timer) 137 { 138 - del_timer(timer); 139 140 if (timer == &serr_timer) 141 enable_irq(IRQ_PCI_SERR);
··· 135 136 static void dc21285_enable_error(struct timer_list *timer) 137 { 138 + timer_delete(timer); 139 140 if (timer == &serr_timer) 141 enable_irq(IRQ_PCI_SERR);
+2 -2
arch/arm/mach-pxa/sharpsl_pm.c
··· 913 if (sharpsl_pm.machinfo->exit) 914 sharpsl_pm.machinfo->exit(); 915 916 - del_timer_sync(&sharpsl_pm.chrg_full_timer); 917 - del_timer_sync(&sharpsl_pm.ac_timer); 918 } 919 920 static struct platform_driver sharpsl_pm_driver = {
··· 913 if (sharpsl_pm.machinfo->exit) 914 sharpsl_pm.machinfo->exit(); 915 916 + timer_delete_sync(&sharpsl_pm.chrg_full_timer); 917 + timer_delete_sync(&sharpsl_pm.ac_timer); 918 } 919 920 static struct platform_driver sharpsl_pm_driver = {
+1 -1
arch/m68k/amiga/amisound.c
··· 78 return; 79 80 local_irq_save(flags); 81 - del_timer( &sound_timer ); 82 83 if (hz > 20 && hz < 32767) { 84 unsigned long period = (clock_constant / hz);
··· 78 return; 79 80 local_irq_save(flags); 81 + timer_delete(&sound_timer); 82 83 if (hz > 20 && hz < 32767) { 84 unsigned long period = (clock_constant / hz);
+2 -2
arch/m68k/mac/macboing.c
··· 183 184 local_irq_save(flags); 185 186 - del_timer( &mac_sound_timer ); 187 188 for ( i = 0; i < 0x800; i++ ) 189 mac_asc_regs[ i ] = 0; ··· 277 278 local_irq_save(flags); 279 280 - del_timer( &mac_sound_timer ); 281 282 if ( mac_bell_duration-- > 0 ) 283 {
··· 183 184 local_irq_save(flags); 185 186 + timer_delete(&mac_sound_timer); 187 188 for ( i = 0; i < 0x800; i++ ) 189 mac_asc_regs[ i ] = 0; ··· 277 278 local_irq_save(flags); 279 280 + timer_delete(&mac_sound_timer); 281 282 if ( mac_bell_duration-- > 0 ) 283 {
+1 -1
arch/mips/sgi-ip22/ip22-reset.c
··· 98 99 static void debounce(struct timer_list *unused) 100 { 101 - del_timer(&debounce_timer); 102 if (sgint->istat1 & SGINT_ISTAT1_PWR) { 103 /* Interrupt still being sent. */ 104 debounce_timer.expires = jiffies + (HZ / 20); /* 0.05s */
··· 98 99 static void debounce(struct timer_list *unused) 100 { 101 + timer_delete(&debounce_timer); 102 if (sgint->istat1 & SGINT_ISTAT1_PWR) { 103 /* Interrupt still being sent. */ 104 debounce_timer.expires = jiffies + (HZ / 20); /* 0.05s */
+2 -2
arch/powerpc/kvm/booke.c
··· 622 if (nr_jiffies < NEXT_TIMER_MAX_DELTA) 623 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies); 624 else 625 - del_timer(&vcpu->arch.wdt_timer); 626 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags); 627 } 628 ··· 1441 1442 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) 1443 { 1444 - del_timer_sync(&vcpu->arch.wdt_timer); 1445 } 1446 1447 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
··· 622 if (nr_jiffies < NEXT_TIMER_MAX_DELTA) 623 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies); 624 else 625 + timer_delete(&vcpu->arch.wdt_timer); 626 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags); 627 } 628 ··· 1441 1442 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) 1443 { 1444 + timer_delete_sync(&vcpu->arch.wdt_timer); 1445 } 1446 1447 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+3 -3
arch/powerpc/platforms/cell/spufs/sched.c
··· 508 509 if (!list_empty(&ctx->rq)) { 510 if (!--spu_prio->nr_waiting) 511 - del_timer(&spusched_timer); 512 list_del_init(&ctx->rq); 513 514 if (list_empty(&spu_prio->runq[prio])) ··· 1126 1127 remove_proc_entry("spu_loadavg", NULL); 1128 1129 - del_timer_sync(&spusched_timer); 1130 - del_timer_sync(&spuloadavg_timer); 1131 kthread_stop(spusched_task); 1132 1133 for (node = 0; node < MAX_NUMNODES; node++) {
··· 508 509 if (!list_empty(&ctx->rq)) { 510 if (!--spu_prio->nr_waiting) 511 + timer_delete(&spusched_timer); 512 list_del_init(&ctx->rq); 513 514 if (list_empty(&spu_prio->runq[prio])) ··· 1126 1127 remove_proc_entry("spu_loadavg", NULL); 1128 1129 + timer_delete_sync(&spusched_timer); 1130 + timer_delete_sync(&spuloadavg_timer); 1131 kthread_stop(spusched_task); 1132 1133 for (node = 0; node < MAX_NUMNODES; node++) {
+1 -1
arch/powerpc/platforms/powermac/low_i2c.c
··· 347 unsigned long flags; 348 349 spin_lock_irqsave(&host->lock, flags); 350 - del_timer(&host->timeout_timer); 351 kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); 352 if (host->state != state_idle) { 353 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
··· 347 unsigned long flags; 348 349 spin_lock_irqsave(&host->lock, flags); 350 + timer_delete(&host->timeout_timer); 351 kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); 352 if (host->state != state_idle) { 353 host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
+1 -1
arch/s390/kernel/time.c
··· 680 681 if (!stp_online) { 682 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL); 683 - del_timer_sync(&stp_timer); 684 goto out_unlock; 685 } 686
··· 680 681 if (!stp_online) { 682 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL); 683 + timer_delete_sync(&stp_timer); 684 goto out_unlock; 685 } 686
+3 -3
arch/s390/mm/cmm.c
··· 201 { 202 if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) { 203 if (timer_pending(&cmm_timer)) 204 - del_timer(&cmm_timer); 205 return; 206 } 207 mod_timer(&cmm_timer, jiffies + secs_to_jiffies(cmm_timeout_seconds)); ··· 424 #endif 425 unregister_sysctl_table(cmm_sysctl_header); 426 out_sysctl: 427 - del_timer_sync(&cmm_timer); 428 return rc; 429 } 430 module_init(cmm_init); ··· 437 #endif 438 unregister_oom_notifier(&cmm_oom_nb); 439 kthread_stop(cmm_thread_ptr); 440 - del_timer_sync(&cmm_timer); 441 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); 442 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); 443 }
··· 201 { 202 if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) { 203 if (timer_pending(&cmm_timer)) 204 + timer_delete(&cmm_timer); 205 return; 206 } 207 mod_timer(&cmm_timer, jiffies + secs_to_jiffies(cmm_timeout_seconds)); ··· 424 #endif 425 unregister_sysctl_table(cmm_sysctl_header); 426 out_sysctl: 427 + timer_delete_sync(&cmm_timer); 428 return rc; 429 } 430 module_init(cmm_init); ··· 437 #endif 438 unregister_oom_notifier(&cmm_oom_nb); 439 kthread_stop(cmm_thread_ptr); 440 + timer_delete_sync(&cmm_timer); 441 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); 442 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); 443 }
+2 -2
arch/sh/drivers/pci/common.c
··· 90 { 91 struct pci_channel *hose = from_timer(hose, t, err_timer); 92 93 - del_timer(&hose->err_timer); 94 printk(KERN_DEBUG "PCI: re-enabling error IRQ.\n"); 95 enable_irq(hose->err_irq); 96 } ··· 99 { 100 struct pci_channel *hose = from_timer(hose, t, serr_timer); 101 102 - del_timer(&hose->serr_timer); 103 printk(KERN_DEBUG "PCI: re-enabling system error IRQ.\n"); 104 enable_irq(hose->serr_irq); 105 }
··· 90 { 91 struct pci_channel *hose = from_timer(hose, t, err_timer); 92 93 + timer_delete(&hose->err_timer); 94 printk(KERN_DEBUG "PCI: re-enabling error IRQ.\n"); 95 enable_irq(hose->err_irq); 96 } ··· 99 { 100 struct pci_channel *hose = from_timer(hose, t, serr_timer); 101 102 + timer_delete(&hose->serr_timer); 103 printk(KERN_DEBUG "PCI: re-enabling system error IRQ.\n"); 104 enable_irq(hose->serr_irq); 105 }
+2 -2
arch/sparc/kernel/led.c
··· 84 /* before we change anything we want to stop any running timers, 85 * otherwise calls such as on will have no persistent effect 86 */ 87 - del_timer_sync(&led_blink_timer); 88 89 if (!strcmp(buf, "on")) { 90 auxio_set_led(AUXIO_LED_ON); ··· 134 static void __exit led_exit(void) 135 { 136 remove_proc_entry("led", NULL); 137 - del_timer_sync(&led_blink_timer); 138 } 139 140 module_init(led_init);
··· 84 /* before we change anything we want to stop any running timers, 85 * otherwise calls such as on will have no persistent effect 86 */ 87 + timer_delete_sync(&led_blink_timer); 88 89 if (!strcmp(buf, "on")) { 90 auxio_set_led(AUXIO_LED_ON); ··· 134 static void __exit led_exit(void) 135 { 136 remove_proc_entry("led", NULL); 137 + timer_delete_sync(&led_blink_timer); 138 } 139 140 module_init(led_init);
+1 -1
arch/um/drivers/vector_kern.c
··· 1112 struct vector_private *vp = netdev_priv(dev); 1113 1114 netif_stop_queue(dev); 1115 - del_timer(&vp->tl); 1116 1117 vp->opened = false; 1118
··· 1112 struct vector_private *vp = netdev_priv(dev); 1113 1114 netif_stop_queue(dev); 1115 + timer_delete(&vp->tl); 1116 1117 vp->opened = false; 1118
+3 -3
arch/x86/kernel/cpu/mce/core.c
··· 1786 __this_cpu_write(mce_next_interval, check_interval * HZ); 1787 } 1788 1789 - /* Must not be called in IRQ context where del_timer_sync() can deadlock */ 1790 static void mce_timer_delete_all(void) 1791 { 1792 int cpu; 1793 1794 for_each_online_cpu(cpu) 1795 - del_timer_sync(&per_cpu(mce_timer, cpu)); 1796 } 1797 1798 static void __mcheck_cpu_mce_banks_init(void) ··· 2820 struct timer_list *t = this_cpu_ptr(&mce_timer); 2821 2822 mce_disable_cpu(); 2823 - del_timer_sync(t); 2824 mce_threshold_remove_device(cpu); 2825 mce_device_remove(cpu); 2826 return 0;
··· 1786 __this_cpu_write(mce_next_interval, check_interval * HZ); 1787 } 1788 1789 + /* Must not be called in IRQ context where timer_delete_sync() can deadlock */ 1790 static void mce_timer_delete_all(void) 1791 { 1792 int cpu; 1793 1794 for_each_online_cpu(cpu) 1795 + timer_delete_sync(&per_cpu(mce_timer, cpu)); 1796 } 1797 1798 static void __mcheck_cpu_mce_banks_init(void) ··· 2820 struct timer_list *t = this_cpu_ptr(&mce_timer); 2821 2822 mce_disable_cpu(); 2823 + timer_delete_sync(t); 2824 mce_threshold_remove_device(cpu); 2825 mce_device_remove(cpu); 2826 return 0;
+2 -2
arch/x86/kvm/xen.c
··· 1553 kvm_vcpu_halt(vcpu); 1554 1555 if (sched_poll.timeout) 1556 - del_timer(&vcpu->arch.xen.poll_timer); 1557 1558 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE); 1559 } ··· 2308 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); 2309 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); 2310 2311 - del_timer_sync(&vcpu->arch.xen.poll_timer); 2312 } 2313 2314 void kvm_xen_init_vm(struct kvm *kvm)
··· 1553 kvm_vcpu_halt(vcpu); 1554 1555 if (sched_poll.timeout) 1556 + timer_delete(&vcpu->arch.xen.poll_timer); 1557 1558 kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE); 1559 } ··· 2308 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); 2309 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); 2310 2311 + timer_delete_sync(&vcpu->arch.xen.poll_timer); 2312 } 2313 2314 void kvm_xen_init_vm(struct kvm *kvm)
+1 -1
arch/xtensa/platforms/iss/console.c
··· 48 static void rs_close(struct tty_struct *tty, struct file * filp) 49 { 50 if (tty->count == 1) 51 - del_timer_sync(&serial_timer); 52 } 53 54
··· 48 static void rs_close(struct tty_struct *tty, struct file * filp) 49 { 50 if (tty->count == 1) 51 + timer_delete_sync(&serial_timer); 52 } 53 54
+1 -1
arch/xtensa/platforms/iss/network.c
··· 375 struct iss_net_private *lp = netdev_priv(dev); 376 377 netif_stop_queue(dev); 378 - del_timer_sync(&lp->timer); 379 lp->tp.net_ops->close(lp); 380 381 return 0;
··· 375 struct iss_net_private *lp = netdev_priv(dev); 376 377 netif_stop_queue(dev); 378 + timer_delete_sync(&lp->timer); 379 lp->tp.net_ops->close(lp); 380 381 return 0;
+1 -1
block/blk-core.c
··· 219 */ 220 void blk_sync_queue(struct request_queue *q) 221 { 222 - del_timer_sync(&q->timeout); 223 cancel_work_sync(&q->timeout_work); 224 } 225 EXPORT_SYMBOL(blk_sync_queue);
··· 219 */ 220 void blk_sync_queue(struct request_queue *q) 221 { 222 + timer_delete_sync(&q->timeout); 223 cancel_work_sync(&q->timeout_work); 224 } 225 EXPORT_SYMBOL(blk_sync_queue);
+1 -1
block/blk-stat.c
··· 162 blk_queue_flag_clear(QUEUE_FLAG_STATS, q); 163 spin_unlock_irqrestore(&q->stats->lock, flags); 164 165 - del_timer_sync(&cb->timer); 166 } 167 168 static void blk_stat_free_callback_rcu(struct rcu_head *head)
··· 162 blk_queue_flag_clear(QUEUE_FLAG_STATS, q); 163 spin_unlock_irqrestore(&q->stats->lock, flags); 164 165 + timer_delete_sync(&cb->timer); 166 } 167 168 static void blk_stat_free_callback_rcu(struct rcu_head *head)
+1 -1
block/blk-stat.h
··· 148 149 static inline void blk_stat_deactivate(struct blk_stat_callback *cb) 150 { 151 - del_timer_sync(&cb->timer); 152 } 153 154 /**
··· 148 149 static inline void blk_stat_deactivate(struct blk_stat_callback *cb) 150 { 151 + timer_delete_sync(&cb->timer); 152 } 153 154 /**
+2 -2
block/blk-throttle.c
··· 333 { 334 struct throtl_grp *tg = pd_to_tg(pd); 335 336 - del_timer_sync(&tg->service_queue.pending_timer); 337 blkg_rwstat_exit(&tg->stat_bytes); 338 blkg_rwstat_exit(&tg->stat_ios); 339 kfree(tg); ··· 1711 if (!blk_throtl_activated(q)) 1712 return; 1713 1714 - del_timer_sync(&q->td->service_queue.pending_timer); 1715 throtl_shutdown_wq(q); 1716 blkcg_deactivate_policy(disk, &blkcg_policy_throtl); 1717 kfree(q->td);
··· 333 { 334 struct throtl_grp *tg = pd_to_tg(pd); 335 336 + timer_delete_sync(&tg->service_queue.pending_timer); 337 blkg_rwstat_exit(&tg->stat_bytes); 338 blkg_rwstat_exit(&tg->stat_ios); 339 kfree(tg); ··· 1711 if (!blk_throtl_activated(q)) 1712 return; 1713 1714 + timer_delete_sync(&q->td->service_queue.pending_timer); 1715 throtl_shutdown_wq(q); 1716 blkcg_deactivate_policy(disk, &blkcg_policy_throtl); 1717 kfree(q->td);
+1 -1
drivers/accel/qaic/qaic_timesync.c
··· 221 { 222 struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev); 223 224 - del_timer_sync(&mqtsdev->timer); 225 mhi_unprepare_from_transfer(mqtsdev->mhi_dev); 226 kfree(mqtsdev->sync_msg); 227 kfree(mqtsdev);
··· 221 { 222 struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev); 223 224 + timer_delete_sync(&mqtsdev->timer); 225 mhi_unprepare_from_transfer(mqtsdev->mhi_dev); 226 kfree(mqtsdev->sync_msg); 227 kfree(mqtsdev);
+9 -9
drivers/accessibility/speakup/main.c
··· 1172 if (cursor_track == read_all_mode) { 1173 switch (value) { 1174 case KVAL(K_SHIFT): 1175 - del_timer(&cursor_timer); 1176 spk_shut_up &= 0xfe; 1177 spk_do_flush(); 1178 read_all_doc(vc); 1179 break; 1180 case KVAL(K_CTRL): 1181 - del_timer(&cursor_timer); 1182 cursor_track = prev_cursor_track; 1183 spk_shut_up &= 0xfe; 1184 spk_do_flush(); ··· 1399 1400 static void kbd_fakekey2(struct vc_data *vc, enum read_all_command command) 1401 { 1402 - del_timer(&cursor_timer); 1403 speakup_fake_down_arrow(); 1404 start_read_all_timer(vc, command); 1405 } ··· 1415 cursor_track = read_all_mode; 1416 spk_reset_index_count(0); 1417 if (get_sentence_buf(vc, 0) == -1) { 1418 - del_timer(&cursor_timer); 1419 if (!in_keyboard_notifier) 1420 speakup_fake_down_arrow(); 1421 start_read_all_timer(vc, RA_DOWN_ARROW); ··· 1428 1429 static void stop_read_all(struct vc_data *vc) 1430 { 1431 - del_timer(&cursor_timer); 1432 cursor_track = prev_cursor_track; 1433 spk_shut_up &= 0xfe; 1434 spk_do_flush(); ··· 1528 spin_unlock_irqrestore(&speakup_info.spinlock, flags); 1529 return NOTIFY_STOP; 1530 } 1531 - del_timer(&cursor_timer); 1532 spk_shut_up &= 0xfe; 1533 spk_do_flush(); 1534 start_read_all_timer(vc, value + 1); ··· 1692 struct vc_data *vc = vc_cons[cursor_con].d; 1693 unsigned long flags; 1694 1695 - del_timer(&cursor_timer); 1696 spin_lock_irqsave(&speakup_info.spinlock, flags); 1697 if (cursor_con != fg_console) { 1698 is_cursor = 0; ··· 2333 speakup_unregister_devsynth(); 2334 speakup_cancel_selection(); 2335 speakup_cancel_paste(); 2336 - del_timer_sync(&cursor_timer); 2337 kthread_stop(speakup_task); 2338 speakup_task = NULL; 2339 mutex_lock(&spk_mutex); ··· 2437 2438 error_vtnotifier: 2439 unregister_keyboard_notifier(&keyboard_notifier_block); 2440 - del_timer(&cursor_timer); 2441 2442 error_kbdnotifier: 2443 speakup_unregister_devsynth();
··· 1172 if (cursor_track == read_all_mode) { 1173 switch (value) { 1174 case KVAL(K_SHIFT): 1175 + timer_delete(&cursor_timer); 1176 spk_shut_up &= 0xfe; 1177 spk_do_flush(); 1178 read_all_doc(vc); 1179 break; 1180 case KVAL(K_CTRL): 1181 + timer_delete(&cursor_timer); 1182 cursor_track = prev_cursor_track; 1183 spk_shut_up &= 0xfe; 1184 spk_do_flush(); ··· 1399 1400 static void kbd_fakekey2(struct vc_data *vc, enum read_all_command command) 1401 { 1402 + timer_delete(&cursor_timer); 1403 speakup_fake_down_arrow(); 1404 start_read_all_timer(vc, command); 1405 } ··· 1415 cursor_track = read_all_mode; 1416 spk_reset_index_count(0); 1417 if (get_sentence_buf(vc, 0) == -1) { 1418 + timer_delete(&cursor_timer); 1419 if (!in_keyboard_notifier) 1420 speakup_fake_down_arrow(); 1421 start_read_all_timer(vc, RA_DOWN_ARROW); ··· 1428 1429 static void stop_read_all(struct vc_data *vc) 1430 { 1431 + timer_delete(&cursor_timer); 1432 cursor_track = prev_cursor_track; 1433 spk_shut_up &= 0xfe; 1434 spk_do_flush(); ··· 1528 spin_unlock_irqrestore(&speakup_info.spinlock, flags); 1529 return NOTIFY_STOP; 1530 } 1531 + timer_delete(&cursor_timer); 1532 spk_shut_up &= 0xfe; 1533 spk_do_flush(); 1534 start_read_all_timer(vc, value + 1); ··· 1692 struct vc_data *vc = vc_cons[cursor_con].d; 1693 unsigned long flags; 1694 1695 + timer_delete(&cursor_timer); 1696 spin_lock_irqsave(&speakup_info.spinlock, flags); 1697 if (cursor_con != fg_console) { 1698 is_cursor = 0; ··· 2333 speakup_unregister_devsynth(); 2334 speakup_cancel_selection(); 2335 speakup_cancel_paste(); 2336 + timer_delete_sync(&cursor_timer); 2337 kthread_stop(speakup_task); 2338 speakup_task = NULL; 2339 mutex_lock(&spk_mutex); ··· 2437 2438 error_vtnotifier: 2439 unregister_keyboard_notifier(&keyboard_notifier_block); 2440 + timer_delete(&cursor_timer); 2441 2442 error_kbdnotifier: 2443 speakup_unregister_devsynth();
+1 -1
drivers/accessibility/speakup/synth.c
··· 521 spin_lock_irqsave(&speakup_info.spinlock, flags); 522 pr_info("releasing synth %s\n", synth->name); 523 synth->alive = 0; 524 - del_timer(&thread_timer); 525 spin_unlock_irqrestore(&speakup_info.spinlock, flags); 526 if (synth->attributes.name) 527 sysfs_remove_group(speakup_kobj, &synth->attributes);
··· 521 spin_lock_irqsave(&speakup_info.spinlock, flags); 522 pr_info("releasing synth %s\n", synth->name); 523 synth->alive = 0; 524 + timer_delete(&thread_timer); 525 spin_unlock_irqrestore(&speakup_info.spinlock, flags); 526 if (synth->attributes.name) 527 sysfs_remove_group(speakup_kobj, &synth->attributes);
+1 -1
drivers/ata/libata-eh.c
··· 700 ata_eh_acquire(ap); 701 repeat: 702 /* kill fast drain timer */ 703 - del_timer_sync(&ap->fastdrain_timer); 704 705 /* process port resume request */ 706 ata_eh_handle_port_resume(ap);
··· 700 ata_eh_acquire(ap); 701 repeat: 702 /* kill fast drain timer */ 703 + timer_delete_sync(&ap->fastdrain_timer); 704 705 /* process port resume request */ 706 ata_eh_handle_port_resume(ap);
+2 -2
drivers/atm/idt77105.c
··· 366 static void __exit idt77105_exit(void) 367 { 368 /* turn off timers */ 369 - del_timer_sync(&stats_timer); 370 - del_timer_sync(&restart_timer); 371 } 372 373 module_exit(idt77105_exit);
··· 366 static void __exit idt77105_exit(void) 367 { 368 /* turn off timers */ 369 + timer_delete_sync(&stats_timer); 370 + timer_delete_sync(&restart_timer); 371 } 372 373 module_exit(idt77105_exit);
+1 -1
drivers/atm/iphase.c
··· 3283 { 3284 pci_unregister_driver(&ia_driver); 3285 3286 - del_timer_sync(&ia_timer); 3287 } 3288 3289 module_init(ia_module_init);
··· 3283 { 3284 pci_unregister_driver(&ia_driver); 3285 3286 + timer_delete_sync(&ia_timer); 3287 } 3288 3289 module_init(ia_module_init);
+1 -1
drivers/atm/lanai.c
··· 1792 1793 static inline void lanai_timed_poll_stop(struct lanai_dev *lanai) 1794 { 1795 - del_timer_sync(&lanai->timer); 1796 } 1797 1798 /* -------------------- INTERRUPT SERVICE: */
··· 1792 1793 static inline void lanai_timed_poll_stop(struct lanai_dev *lanai) 1794 { 1795 + timer_delete_sync(&lanai->timer); 1796 } 1797 1798 /* -------------------- INTERRUPT SERVICE: */
+1 -1
drivers/atm/nicstar.c
··· 300 { 301 XPRINTK("nicstar: nicstar_cleanup() called.\n"); 302 303 - del_timer_sync(&ns_timer); 304 305 pci_unregister_driver(&nicstar_driver); 306
··· 300 { 301 XPRINTK("nicstar: nicstar_cleanup() called.\n"); 302 303 + timer_delete_sync(&ns_timer); 304 305 pci_unregister_driver(&nicstar_driver); 306
+1 -1
drivers/atm/suni.c
··· 347 for (walk = &sunis; *walk != PRIV(dev); 348 walk = &PRIV((*walk)->dev)->next); 349 *walk = PRIV((*walk)->dev)->next; 350 - if (!sunis) del_timer_sync(&poll_timer); 351 spin_unlock_irqrestore(&sunis_lock,flags); 352 kfree(PRIV(dev)); 353
··· 347 for (walk = &sunis; *walk != PRIV(dev); 348 walk = &PRIV((*walk)->dev)->next); 349 *walk = PRIV((*walk)->dev)->next; 350 + if (!sunis) timer_delete_sync(&poll_timer); 351 spin_unlock_irqrestore(&sunis_lock,flags); 352 kfree(PRIV(dev)); 353
+4 -4
drivers/auxdisplay/line-display.c
··· 84 char *new_msg; 85 86 /* stop the scroll timer */ 87 - del_timer_sync(&linedisp->timer); 88 89 if (count == -1) 90 count = strlen(msg); ··· 183 184 linedisp->scroll_rate = msecs_to_jiffies(ms); 185 if (linedisp->message && linedisp->message_len > linedisp->num_chars) { 186 - del_timer_sync(&linedisp->timer); 187 if (linedisp->scroll_rate) 188 linedisp_scroll(&linedisp->timer); 189 } ··· 376 out_del_dev: 377 device_del(&linedisp->dev); 378 out_del_timer: 379 - del_timer_sync(&linedisp->timer); 380 out_put_device: 381 put_device(&linedisp->dev); 382 return err; ··· 391 void linedisp_unregister(struct linedisp *linedisp) 392 { 393 device_del(&linedisp->dev); 394 - del_timer_sync(&linedisp->timer); 395 put_device(&linedisp->dev); 396 } 397 EXPORT_SYMBOL_NS_GPL(linedisp_unregister, "LINEDISP");
··· 84 char *new_msg; 85 86 /* stop the scroll timer */ 87 + timer_delete_sync(&linedisp->timer); 88 89 if (count == -1) 90 count = strlen(msg); ··· 183 184 linedisp->scroll_rate = msecs_to_jiffies(ms); 185 if (linedisp->message && linedisp->message_len > linedisp->num_chars) { 186 + timer_delete_sync(&linedisp->timer); 187 if (linedisp->scroll_rate) 188 linedisp_scroll(&linedisp->timer); 189 } ··· 376 out_del_dev: 377 device_del(&linedisp->dev); 378 out_del_timer: 379 + timer_delete_sync(&linedisp->timer); 380 out_put_device: 381 put_device(&linedisp->dev); 382 return err; ··· 391 void linedisp_unregister(struct linedisp *linedisp) 392 { 393 device_del(&linedisp->dev); 394 + timer_delete_sync(&linedisp->timer); 395 put_device(&linedisp->dev); 396 } 397 EXPORT_SYMBOL_NS_GPL(linedisp_unregister, "LINEDISP");
+2 -2
drivers/auxdisplay/panel.c
··· 1654 1655 err_lcd_unreg: 1656 if (scan_timer.function) 1657 - del_timer_sync(&scan_timer); 1658 if (lcd.enabled) 1659 charlcd_unregister(lcd.charlcd); 1660 err_unreg_device: ··· 1675 return; 1676 } 1677 if (scan_timer.function) 1678 - del_timer_sync(&scan_timer); 1679 1680 if (keypad.enabled) { 1681 misc_deregister(&keypad_dev);
··· 1654 1655 err_lcd_unreg: 1656 if (scan_timer.function) 1657 + timer_delete_sync(&scan_timer); 1658 if (lcd.enabled) 1659 charlcd_unregister(lcd.charlcd); 1660 err_unreg_device: ··· 1675 return; 1676 } 1677 if (scan_timer.function) 1678 + timer_delete_sync(&scan_timer); 1679 1680 if (keypad.enabled) { 1681 misc_deregister(&keypad_dev);
+1 -1
drivers/base/devcoredump.c
··· 41 * devcd_data_write() 42 * mod_delayed_work() 43 * try_to_grab_pending() 44 - * del_timer() 45 * debug_assert_init() 46 * INIT_DELAYED_WORK() 47 * schedule_delayed_work()
··· 41 * devcd_data_write() 42 * mod_delayed_work() 43 * try_to_grab_pending() 44 + * timer_delete() 45 * debug_assert_init() 46 * INIT_DELAYED_WORK() 47 * schedule_delayed_work()
+1 -1
drivers/base/power/main.c
··· 559 { 560 struct timer_list *timer = &wd->timer; 561 562 - del_timer_sync(timer); 563 destroy_timer_on_stack(timer); 564 } 565 #else
··· 559 { 560 struct timer_list *timer = &wd->timer; 561 562 + timer_delete_sync(timer); 563 destroy_timer_on_stack(timer); 564 } 565 #else
+3 -3
drivers/base/power/wakeup.c
··· 197 raw_spin_unlock_irqrestore(&events_lock, flags); 198 synchronize_srcu(&wakeup_srcu); 199 200 - del_timer_sync(&ws->timer); 201 /* 202 * Clear timer.function to make wakeup_source_not_registered() treat 203 * this wakeup source as not registered. ··· 613 spin_lock_irqsave(&ws->lock, flags); 614 615 wakeup_source_report_event(ws, false); 616 - del_timer(&ws->timer); 617 ws->timer_expires = 0; 618 619 spin_unlock_irqrestore(&ws->lock, flags); ··· 693 ws->max_time = duration; 694 695 ws->last_time = now; 696 - del_timer(&ws->timer); 697 ws->timer_expires = 0; 698 699 if (ws->autosleep_enabled)
··· 197 raw_spin_unlock_irqrestore(&events_lock, flags); 198 synchronize_srcu(&wakeup_srcu); 199 200 + timer_delete_sync(&ws->timer); 201 /* 202 * Clear timer.function to make wakeup_source_not_registered() treat 203 * this wakeup source as not registered. ··· 613 spin_lock_irqsave(&ws->lock, flags); 614 615 wakeup_source_report_event(ws, false); 616 + timer_delete(&ws->timer); 617 ws->timer_expires = 0; 618 619 spin_unlock_irqrestore(&ws->lock, flags); ··· 693 ws->max_time = duration; 694 695 ws->last_time = now; 696 + timer_delete(&ws->timer); 697 ws->timer_expires = 0; 698 699 if (ws->autosleep_enabled)
+5 -5
drivers/block/amiflop.c
··· 457 { 458 nr &= 3; 459 460 - del_timer(motor_off_timer + nr); 461 462 if (!unit[nr].motor) { 463 unit[nr].motor = 1; ··· 1393 1394 nr&=3; 1395 writefromint = 0; 1396 - del_timer(&post_write_timer); 1397 get_fdc(nr); 1398 if (!fd_motor_on(nr)) { 1399 writepending = 0; ··· 1435 } 1436 1437 if (unit[drive].dirty == 1) { 1438 - del_timer (flush_track_timer + drive); 1439 non_int_flush_track (drive); 1440 } 1441 errcnt = 0; ··· 1591 case FDDEFPRM: 1592 return -EINVAL; 1593 case FDFLUSH: /* unconditionally, even if not needed */ 1594 - del_timer (flush_track_timer + drive); 1595 non_int_flush_track(drive); 1596 break; 1597 #ifdef RAW_IOCTL ··· 1714 1715 mutex_lock(&amiflop_mutex); 1716 if (unit[drive].dirty == 1) { 1717 - del_timer (flush_track_timer + drive); 1718 non_int_flush_track (drive); 1719 } 1720
··· 457 { 458 nr &= 3; 459 460 + timer_delete(motor_off_timer + nr); 461 462 if (!unit[nr].motor) { 463 unit[nr].motor = 1; ··· 1393 1394 nr&=3; 1395 writefromint = 0; 1396 + timer_delete(&post_write_timer); 1397 get_fdc(nr); 1398 if (!fd_motor_on(nr)) { 1399 writepending = 0; ··· 1435 } 1436 1437 if (unit[drive].dirty == 1) { 1438 + timer_delete(flush_track_timer + drive); 1439 non_int_flush_track (drive); 1440 } 1441 errcnt = 0; ··· 1591 case FDDEFPRM: 1592 return -EINVAL; 1593 case FDFLUSH: /* unconditionally, even if not needed */ 1594 + timer_delete(flush_track_timer + drive); 1595 non_int_flush_track(drive); 1596 break; 1597 #ifdef RAW_IOCTL ··· 1714 1715 mutex_lock(&amiflop_mutex); 1716 if (unit[drive].dirty == 1) { 1717 + timer_delete(flush_track_timer + drive); 1718 non_int_flush_track (drive); 1719 } 1720
+1 -1
drivers/block/aoe/aoedev.c
··· 274 if (!freeing) 275 return; 276 277 - del_timer_sync(&d->timer); 278 if (d->gd) { 279 aoedisk_rm_debugfs(d); 280 del_gendisk(d->gd);
··· 274 if (!freeing) 275 return; 276 277 + timer_delete_sync(&d->timer); 278 if (d->gd) { 279 aoedisk_rm_debugfs(d); 280 del_gendisk(d->gd);
+1 -1
drivers/block/aoe/aoemain.c
··· 28 static void __exit 29 aoe_exit(void) 30 { 31 - del_timer_sync(&timer); 32 33 aoenet_exit(); 34 unregister_blkdev(AOE_MAJOR, DEVICE_NAME);
··· 28 static void __exit 29 aoe_exit(void) 30 { 31 + timer_delete_sync(&timer); 32 33 aoenet_exit(); 34 unregister_blkdev(AOE_MAJOR, DEVICE_NAME);
+6 -6
drivers/block/ataflop.c
··· 494 495 static inline void stop_timeout(void) 496 { 497 - del_timer(&timeout_timer); 498 } 499 500 /* Select the side to use. */ ··· 784 contents become invalid! */ 785 BufferDrive = -1; 786 /* stop deselect timer */ 787 - del_timer( &motor_off_timer ); 788 789 FILL( 60 * (nsect / 9), 0x4e ); 790 for( sect = 0; sect < nsect; ++sect ) { ··· 1138 DPRINT(("fd_rwsec_done()\n")); 1139 1140 if (read_track) { 1141 - del_timer(&readtrack_timer); 1142 if (!MultReadInProgress) 1143 return; 1144 MultReadInProgress = 0; ··· 1356 /* If the timeout occurred while the readtrack_check timer was 1357 * active, we need to cancel it, else bad things will happen */ 1358 if (UseTrackbuffer) 1359 - del_timer( &readtrack_timer ); 1360 FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI ); 1361 udelay( 25 ); 1362 ··· 1566 } 1567 1568 /* stop deselect timer */ 1569 - del_timer( &motor_off_timer ); 1570 1571 ReqCnt = 0; 1572 ReqCmd = rq_data_dir(fd_request); ··· 2055 blk_mq_free_tag_set(&unit[i].tag_set); 2056 } 2057 2058 - del_timer_sync(&fd_timer); 2059 atari_stram_free(DMABuffer); 2060 } 2061
··· 494 495 static inline void stop_timeout(void) 496 { 497 + timer_delete(&timeout_timer); 498 } 499 500 /* Select the side to use. */ ··· 784 contents become invalid! */ 785 BufferDrive = -1; 786 /* stop deselect timer */ 787 + timer_delete(&motor_off_timer); 788 789 FILL( 60 * (nsect / 9), 0x4e ); 790 for( sect = 0; sect < nsect; ++sect ) { ··· 1138 DPRINT(("fd_rwsec_done()\n")); 1139 1140 if (read_track) { 1141 + timer_delete(&readtrack_timer); 1142 if (!MultReadInProgress) 1143 return; 1144 MultReadInProgress = 0; ··· 1356 /* If the timeout occurred while the readtrack_check timer was 1357 * active, we need to cancel it, else bad things will happen */ 1358 if (UseTrackbuffer) 1359 + timer_delete(&readtrack_timer); 1360 FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI ); 1361 udelay( 25 ); 1362 ··· 1566 } 1567 1568 /* stop deselect timer */ 1569 + timer_delete(&motor_off_timer); 1570 1571 ReqCnt = 0; 1572 ReqCmd = rq_data_dir(fd_request); ··· 2055 blk_mq_free_tag_set(&unit[i].tag_set); 2056 } 2057 2058 + timer_delete_sync(&fd_timer); 2059 atari_stram_free(DMABuffer); 2060 } 2061
+1 -1
drivers/block/drbd/drbd_main.c
··· 3034 BUILD_BUG_ON(UI_SIZE != 4); 3035 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096); 3036 3037 - del_timer(&device->md_sync_timer); 3038 /* timer may be rearmed by drbd_md_mark_dirty() now. */ 3039 if (!test_and_clear_bit(MD_DIRTY, &device->flags)) 3040 return;
··· 3034 BUILD_BUG_ON(UI_SIZE != 4); 3035 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096); 3036 3037 + timer_delete(&device->md_sync_timer); 3038 /* timer may be rearmed by drbd_md_mark_dirty() now. */ 3039 if (!test_and_clear_bit(MD_DIRTY, &device->flags)) 3040 return;
+1 -1
drivers/block/drbd/drbd_nl.c
··· 1033 /* We do some synchronous IO below, which may take some time. 1034 * Clear the timer, to avoid scary "timer expired!" messages, 1035 * "Superblock" is written out at least twice below, anyways. */ 1036 - del_timer(&device->md_sync_timer); 1037 1038 /* We won't change the "al-extents" setting, we just may need 1039 * to move the on-disk location of the activity log ringbuffer.
··· 1033 /* We do some synchronous IO below, which may take some time. 1034 * Clear the timer, to avoid scary "timer expired!" messages, 1035 * "Superblock" is written out at least twice below, anyways. */ 1036 + timer_delete(&device->md_sync_timer); 1037 1038 /* We won't change the "al-extents" setting, we just may need 1039 * to move the on-disk location of the activity log ringbuffer.
+1 -1
drivers/block/drbd/drbd_receiver.c
··· 5187 atomic_set(&device->rs_pending_cnt, 0); 5188 wake_up(&device->misc_wait); 5189 5190 - del_timer_sync(&device->resync_timer); 5191 resync_timer_fn(&device->resync_timer); 5192 5193 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
··· 5187 atomic_set(&device->rs_pending_cnt, 0); 5188 wake_up(&device->misc_wait); 5189 5190 + timer_delete_sync(&device->resync_timer); 5191 resync_timer_fn(&device->resync_timer); 5192 5193 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
+4 -4
drivers/block/floppy.c
··· 937 if (!(fdc_state[fdc].dor & (0x10 << UNIT(drive)))) 938 return; 939 940 - del_timer(motor_off_timer + drive); 941 942 /* make spindle stop in a position which minimizes spinup time 943 * next time */ ··· 1918 mask &= ~(0x10 << UNIT(current_drive)); 1919 1920 /* starts motor and selects floppy */ 1921 - del_timer(motor_off_timer + current_drive); 1922 set_dor(current_fdc, mask, data); 1923 1924 /* wait_for_completion also schedules reset if needed. */ ··· 4762 for (drive = 0; drive < N_DRIVE; drive++) { 4763 if (!disks[drive][0]) 4764 break; 4765 - del_timer_sync(&motor_off_timer[drive]); 4766 put_disk(disks[drive][0]); 4767 blk_mq_free_tag_set(&tag_sets[drive]); 4768 } ··· 4983 destroy_workqueue(floppy_wq); 4984 4985 for (drive = 0; drive < N_DRIVE; drive++) { 4986 - del_timer_sync(&motor_off_timer[drive]); 4987 4988 if (floppy_available(drive)) { 4989 for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
··· 937 if (!(fdc_state[fdc].dor & (0x10 << UNIT(drive)))) 938 return; 939 940 + timer_delete(motor_off_timer + drive); 941 942 /* make spindle stop in a position which minimizes spinup time 943 * next time */ ··· 1918 mask &= ~(0x10 << UNIT(current_drive)); 1919 1920 /* starts motor and selects floppy */ 1921 + timer_delete(motor_off_timer + current_drive); 1922 set_dor(current_fdc, mask, data); 1923 1924 /* wait_for_completion also schedules reset if needed. */ ··· 4762 for (drive = 0; drive < N_DRIVE; drive++) { 4763 if (!disks[drive][0]) 4764 break; 4765 + timer_delete_sync(&motor_off_timer[drive]); 4766 put_disk(disks[drive][0]); 4767 blk_mq_free_tag_set(&tag_sets[drive]); 4768 } ··· 4983 destroy_workqueue(floppy_wq); 4984 4985 for (drive = 0; drive < N_DRIVE; drive++) { 4986 + timer_delete_sync(&motor_off_timer[drive]); 4987 4988 if (floppy_available(drive)) { 4989 for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
+1 -1
drivers/block/sunvdc.c
··· 1070 1071 flush_work(&port->ldc_reset_work); 1072 cancel_delayed_work_sync(&port->ldc_reset_timer_work); 1073 - del_timer_sync(&port->vio.timer); 1074 1075 del_gendisk(port->disk); 1076 put_disk(port->disk);
··· 1070 1071 flush_work(&port->ldc_reset_work); 1072 cancel_delayed_work_sync(&port->ldc_reset_timer_work); 1073 + timer_delete_sync(&port->vio.timer); 1074 1075 del_gendisk(port->disk); 1076 put_disk(port->disk);
+5 -5
drivers/block/swim3.c
··· 362 void (*proc)(struct timer_list *t)) 363 { 364 if (fs->timeout_pending) 365 - del_timer(&fs->timeout); 366 fs->timeout.expires = jiffies + nticks; 367 fs->timeout.function = proc; 368 add_timer(&fs->timeout); ··· 677 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); 678 out_8(&sw->select, RELAX); 679 out_8(&sw->intr_enable, 0); 680 - del_timer(&fs->timeout); 681 fs->timeout_pending = 0; 682 if (sw->ctrack == 0xff) { 683 swim3_err("%s", "Seen sector but cyl=ff?\n"); ··· 706 out_8(&sw->control_bic, DO_SEEK); 707 out_8(&sw->select, RELAX); 708 out_8(&sw->intr_enable, 0); 709 - del_timer(&fs->timeout); 710 fs->timeout_pending = 0; 711 if (fs->state == seeking) 712 ++fs->retries; ··· 716 break; 717 case settling: 718 out_8(&sw->intr_enable, 0); 719 - del_timer(&fs->timeout); 720 fs->timeout_pending = 0; 721 act(fs); 722 break; ··· 726 out_8(&sw->intr_enable, 0); 727 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); 728 out_8(&sw->select, RELAX); 729 - del_timer(&fs->timeout); 730 fs->timeout_pending = 0; 731 dr = fs->dma; 732 cp = fs->dma_cmd;
··· 362 void (*proc)(struct timer_list *t)) 363 { 364 if (fs->timeout_pending) 365 + timer_delete(&fs->timeout); 366 fs->timeout.expires = jiffies + nticks; 367 fs->timeout.function = proc; 368 add_timer(&fs->timeout); ··· 677 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); 678 out_8(&sw->select, RELAX); 679 out_8(&sw->intr_enable, 0); 680 + timer_delete(&fs->timeout); 681 fs->timeout_pending = 0; 682 if (sw->ctrack == 0xff) { 683 swim3_err("%s", "Seen sector but cyl=ff?\n"); ··· 706 out_8(&sw->control_bic, DO_SEEK); 707 out_8(&sw->select, RELAX); 708 out_8(&sw->intr_enable, 0); 709 + timer_delete(&fs->timeout); 710 fs->timeout_pending = 0; 711 if (fs->state == seeking) 712 ++fs->retries; ··· 716 break; 717 case settling: 718 out_8(&sw->intr_enable, 0); 719 + timer_delete(&fs->timeout); 720 fs->timeout_pending = 0; 721 act(fs); 722 break; ··· 726 out_8(&sw->intr_enable, 0); 727 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); 728 out_8(&sw->select, RELAX); 729 + timer_delete(&fs->timeout); 730 fs->timeout_pending = 0; 731 dr = fs->dma; 732 cp = fs->dma_cmd;
+2 -2
drivers/bluetooth/bluecard_cs.c
··· 638 bluecard_hci_flush(hdev); 639 640 /* Stop LED timer */ 641 - del_timer_sync(&(info->timer)); 642 643 /* Disable power LED */ 644 outb(0x00, iobase + 0x30); ··· 885 886 bluecard_close(info); 887 888 - del_timer_sync(&(info->timer)); 889 890 pcmcia_disable_device(link); 891 }
··· 638 bluecard_hci_flush(hdev); 639 640 /* Stop LED timer */ 641 + timer_delete_sync(&(info->timer)); 642 643 /* Disable power LED */ 644 outb(0x00, iobase + 0x30); ··· 885 886 bluecard_close(info); 887 888 + timer_delete_sync(&(info->timer)); 889 890 pcmcia_disable_device(link); 891 }
+1 -1
drivers/bluetooth/hci_bcsp.c
··· 382 } 383 384 if (skb_queue_empty(&bcsp->unack)) 385 - del_timer(&bcsp->tbcsp); 386 387 spin_unlock_irqrestore(&bcsp->unack.lock, flags); 388
··· 382 } 383 384 if (skb_queue_empty(&bcsp->unack)) 385 + timer_delete(&bcsp->tbcsp); 386 387 spin_unlock_irqrestore(&bcsp->unack.lock, flags); 388
+3 -3
drivers/bluetooth/hci_h5.c
··· 197 198 h5->state = H5_UNINITIALIZED; 199 200 - del_timer(&h5->timer); 201 202 skb_queue_purge(&h5->rel); 203 skb_queue_purge(&h5->unrel); ··· 254 { 255 struct h5 *h5 = hu->priv; 256 257 - del_timer_sync(&h5->timer); 258 259 skb_queue_purge(&h5->unack); 260 skb_queue_purge(&h5->rel); ··· 318 } 319 320 if (skb_queue_empty(&h5->unack)) 321 - del_timer(&h5->timer); 322 323 unlock: 324 spin_unlock_irqrestore(&h5->unack.lock, flags);
··· 197 198 h5->state = H5_UNINITIALIZED; 199 200 + timer_delete(&h5->timer); 201 202 skb_queue_purge(&h5->rel); 203 skb_queue_purge(&h5->unrel); ··· 254 { 255 struct h5 *h5 = hu->priv; 256 257 + timer_delete_sync(&h5->timer); 258 259 skb_queue_purge(&h5->unack); 260 skb_queue_purge(&h5->rel); ··· 318 } 319 320 if (skb_queue_empty(&h5->unack)) 321 + timer_delete(&h5->timer); 322 323 unlock: 324 spin_unlock_irqrestore(&h5->unack.lock, flags);
+5 -5
drivers/bluetooth/hci_qca.c
··· 867 skb_queue_tail(&qca->txq, skb); 868 869 /* Switch timers and change state to HCI_IBS_TX_AWAKE */ 870 - del_timer(&qca->wake_retrans_timer); 871 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); 872 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); 873 qca->tx_ibs_state = HCI_IBS_TX_AWAKE; ··· 2239 hu->hdev->hw_error = NULL; 2240 hu->hdev->reset = NULL; 2241 2242 - del_timer_sync(&qca->wake_retrans_timer); 2243 - del_timer_sync(&qca->tx_idle_timer); 2244 2245 /* Stop sending shutdown command if soc crashes. */ 2246 if (soc_type != QCA_ROME ··· 2629 2630 switch (qca->tx_ibs_state) { 2631 case HCI_IBS_TX_WAKING: 2632 - del_timer(&qca->wake_retrans_timer); 2633 fallthrough; 2634 case HCI_IBS_TX_AWAKE: 2635 - del_timer(&qca->tx_idle_timer); 2636 2637 serdev_device_write_flush(hu->serdev); 2638 cmd = HCI_IBS_SLEEP_IND;
··· 867 skb_queue_tail(&qca->txq, skb); 868 869 /* Switch timers and change state to HCI_IBS_TX_AWAKE */ 870 + timer_delete(&qca->wake_retrans_timer); 871 idle_delay = msecs_to_jiffies(qca->tx_idle_delay); 872 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); 873 qca->tx_ibs_state = HCI_IBS_TX_AWAKE; ··· 2239 hu->hdev->hw_error = NULL; 2240 hu->hdev->reset = NULL; 2241 2242 + timer_delete_sync(&qca->wake_retrans_timer); 2243 + timer_delete_sync(&qca->tx_idle_timer); 2244 2245 /* Stop sending shutdown command if soc crashes. */ 2246 if (soc_type != QCA_ROME ··· 2629 2630 switch (qca->tx_ibs_state) { 2631 case HCI_IBS_TX_WAKING: 2632 + timer_delete(&qca->wake_retrans_timer); 2633 fallthrough; 2634 case HCI_IBS_TX_AWAKE: 2635 + timer_delete(&qca->tx_idle_timer); 2636 2637 serdev_device_write_flush(hu->serdev); 2638 cmd = HCI_IBS_SLEEP_IND;
+4 -4
drivers/bus/mhi/host/pci_generic.c
··· 1096 1097 dev_warn(&pdev->dev, "device recovery started\n"); 1098 1099 - del_timer(&mhi_pdev->health_check_timer); 1100 pm_runtime_forbid(&pdev->dev); 1101 1102 /* Clean up MHI state */ ··· 1293 struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); 1294 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; 1295 1296 - del_timer_sync(&mhi_pdev->health_check_timer); 1297 cancel_work_sync(&mhi_pdev->recovery_work); 1298 1299 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { ··· 1321 1322 dev_info(&pdev->dev, "reset\n"); 1323 1324 - del_timer(&mhi_pdev->health_check_timer); 1325 1326 /* Clean up MHI state */ 1327 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { ··· 1431 if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) 1432 return 0; 1433 1434 - del_timer(&mhi_pdev->health_check_timer); 1435 cancel_work_sync(&mhi_pdev->recovery_work); 1436 1437 if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
··· 1096 1097 dev_warn(&pdev->dev, "device recovery started\n"); 1098 1099 + timer_delete(&mhi_pdev->health_check_timer); 1100 pm_runtime_forbid(&pdev->dev); 1101 1102 /* Clean up MHI state */ ··· 1293 struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); 1294 struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; 1295 1296 + timer_delete_sync(&mhi_pdev->health_check_timer); 1297 cancel_work_sync(&mhi_pdev->recovery_work); 1298 1299 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { ··· 1321 1322 dev_info(&pdev->dev, "reset\n"); 1323 1324 + timer_delete(&mhi_pdev->health_check_timer); 1325 1326 /* Clean up MHI state */ 1327 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { ··· 1431 if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) 1432 return 0; 1433 1434 + timer_delete(&mhi_pdev->health_check_timer); 1435 cancel_work_sync(&mhi_pdev->recovery_work); 1436 1437 if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
+3 -3
drivers/char/dtlk.c
··· 243 poll_wait(file, &dtlk_process_list, wait); 244 245 if (dtlk_has_indexing && dtlk_readable()) { 246 - del_timer(&dtlk_timer); 247 mask = EPOLLIN | EPOLLRDNORM; 248 } 249 if (dtlk_writeable()) { 250 - del_timer(&dtlk_timer); 251 mask |= EPOLLOUT | EPOLLWRNORM; 252 } 253 /* there are no exception conditions */ ··· 322 } 323 TRACE_RET; 324 325 - del_timer_sync(&dtlk_timer); 326 327 return 0; 328 }
··· 243 poll_wait(file, &dtlk_process_list, wait); 244 245 if (dtlk_has_indexing && dtlk_readable()) { 246 + timer_delete(&dtlk_timer); 247 mask = EPOLLIN | EPOLLRDNORM; 248 } 249 if (dtlk_writeable()) { 250 + timer_delete(&dtlk_timer); 251 mask |= EPOLLOUT | EPOLLWRNORM; 252 } 253 /* there are no exception conditions */ ··· 322 } 323 TRACE_RET; 324 325 + timer_delete_sync(&dtlk_timer); 326 327 return 0; 328 }
+1 -1
drivers/char/hangcheck-timer.c
··· 167 168 static void __exit hangcheck_exit(void) 169 { 170 - del_timer_sync(&hangcheck_ticktock); 171 printk("Hangcheck: Stopped hangcheck timer.\n"); 172 } 173
··· 167 168 static void __exit hangcheck_exit(void) 169 { 170 + timer_delete_sync(&hangcheck_ticktock); 171 printk("Hangcheck: Stopped hangcheck timer.\n"); 172 } 173
+1 -1
drivers/char/hw_random/xgene-rng.c
··· 93 /* Clear failure counter as timer expired */ 94 disable_irq(ctx->irq); 95 ctx->failure_cnt = 0; 96 - del_timer(&ctx->failure_timer); 97 enable_irq(ctx->irq); 98 } 99
··· 93 /* Clear failure counter as timer expired */ 94 disable_irq(ctx->irq); 95 ctx->failure_cnt = 0; 96 + timer_delete(&ctx->failure_timer); 97 enable_irq(ctx->irq); 98 } 99
+1 -1
drivers/char/ipmi/bt-bmc.c
··· 465 466 misc_deregister(&bt_bmc->miscdev); 467 if (bt_bmc->irq < 0) 468 - del_timer_sync(&bt_bmc->poll_timer); 469 } 470 471 static const struct of_device_id bt_bmc_match[] = {
··· 465 466 misc_deregister(&bt_bmc->miscdev); 467 if (bt_bmc->irq < 0) 468 + timer_delete_sync(&bt_bmc->poll_timer); 469 } 470 471 static const struct of_device_id bt_bmc_match[] = {
+1 -1
drivers/char/ipmi/ipmi_msghandler.c
··· 5538 * here. 5539 */ 5540 atomic_set(&stop_operation, 1); 5541 - del_timer_sync(&ipmi_timer); 5542 5543 initialized = false; 5544
··· 5538 * here. 5539 */ 5540 atomic_set(&stop_operation, 1); 5541 + timer_delete_sync(&ipmi_timer); 5542 5543 initialized = false; 5544
+2 -2
drivers/char/ipmi/ipmi_si_intf.c
··· 859 860 if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { 861 /* Ok it if fails, the timer will just go off. */ 862 - if (del_timer(&smi_info->si_timer)) 863 smi_info->timer_running = false; 864 } 865 ··· 1839 } 1840 1841 smi_info->timer_can_start = false; 1842 - del_timer_sync(&smi_info->si_timer); 1843 } 1844 1845 static struct smi_info *find_dup_si(struct smi_info *info)
··· 859 860 if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { 861 /* Ok it if fails, the timer will just go off. */ 862 + if (timer_delete(&smi_info->si_timer)) 863 smi_info->timer_running = false; 864 } 865 ··· 1839 } 1840 1841 smi_info->timer_can_start = false; 1842 + timer_delete_sync(&smi_info->si_timer); 1843 } 1844 1845 static struct smi_info *find_dup_si(struct smi_info *info)
+3 -3
drivers/char/ipmi/ipmi_ssif.c
··· 599 flags = ipmi_ssif_lock_cond(ssif_info, &oflags); 600 if (ssif_info->waiting_alert) { 601 ssif_info->waiting_alert = false; 602 - del_timer(&ssif_info->retry_timer); 603 do_get = true; 604 } else if (ssif_info->curr_msg) { 605 ssif_info->got_alert = true; ··· 1268 schedule_timeout(1); 1269 1270 ssif_info->stopping = true; 1271 - del_timer_sync(&ssif_info->watch_timer); 1272 - del_timer_sync(&ssif_info->retry_timer); 1273 if (ssif_info->thread) { 1274 complete(&ssif_info->wake_thread); 1275 kthread_stop(ssif_info->thread);
··· 599 flags = ipmi_ssif_lock_cond(ssif_info, &oflags); 600 if (ssif_info->waiting_alert) { 601 ssif_info->waiting_alert = false; 602 + timer_delete(&ssif_info->retry_timer); 603 do_get = true; 604 } else if (ssif_info->curr_msg) { 605 ssif_info->got_alert = true; ··· 1268 schedule_timeout(1); 1269 1270 ssif_info->stopping = true; 1271 + timer_delete_sync(&ssif_info->watch_timer); 1272 + timer_delete_sync(&ssif_info->retry_timer); 1273 if (ssif_info->thread) { 1274 complete(&ssif_info->wake_thread); 1275 kthread_stop(ssif_info->thread);
+2 -2
drivers/char/ipmi/kcs_bmc_aspeed.c
··· 428 if (rc == -ETIMEDOUT) 429 mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); 430 } else { 431 - del_timer(&priv->obe.timer); 432 } 433 } 434 ··· 655 spin_lock_irq(&priv->obe.lock); 656 priv->obe.remove = true; 657 spin_unlock_irq(&priv->obe.lock); 658 - del_timer_sync(&priv->obe.timer); 659 } 660 661 static const struct of_device_id ast_kcs_bmc_match[] = {
··· 428 if (rc == -ETIMEDOUT) 429 mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); 430 } else { 431 + timer_delete(&priv->obe.timer); 432 } 433 } 434 ··· 655 spin_lock_irq(&priv->obe.lock); 656 priv->obe.remove = true; 657 spin_unlock_irq(&priv->obe.lock); 658 + timer_delete_sync(&priv->obe.timer); 659 } 660 661 static const struct of_device_id ast_kcs_bmc_match[] = {
+1 -1
drivers/char/ipmi/ssif_bmc.c
··· 209 if (ret) 210 goto exit; 211 212 - del_timer(&ssif_bmc->response_timer); 213 ssif_bmc->response_timer_inited = false; 214 215 memcpy(&ssif_bmc->response, &msg, count);
··· 209 if (ret) 210 goto exit; 211 212 + timer_delete(&ssif_bmc->response_timer); 213 ssif_bmc->response_timer_inited = false; 214 215 memcpy(&ssif_bmc->response, &msg, count);
+1 -1
drivers/char/random.c
··· 1352 } 1353 mix_pool_bytes(&stack->entropy, sizeof(stack->entropy)); 1354 1355 - del_timer_sync(&stack->timer); 1356 destroy_timer_on_stack(&stack->timer); 1357 } 1358
··· 1352 } 1353 mix_pool_bytes(&stack->entropy, sizeof(stack->entropy)); 1354 1355 + timer_delete_sync(&stack->timer); 1356 destroy_timer_on_stack(&stack->timer); 1357 } 1358
+2 -2
drivers/char/tlclk.c
··· 838 unregister_chrdev(tlclk_major, "telco_clock"); 839 840 release_region(TLCLK_BASE, 8); 841 - del_timer_sync(&switchover_timer); 842 kfree(alarm_events); 843 844 } ··· 856 } 857 858 /* Alarm processing is done, wake up read task */ 859 - del_timer(&switchover_timer); 860 got_event = 1; 861 wake_up(&wq); 862 }
··· 838 unregister_chrdev(tlclk_major, "telco_clock"); 839 840 release_region(TLCLK_BASE, 8); 841 + timer_delete_sync(&switchover_timer); 842 kfree(alarm_events); 843 844 } ··· 856 } 857 858 /* Alarm processing is done, wake up read task */ 859 + timer_delete(&switchover_timer); 860 got_event = 1; 861 wake_up(&wq); 862 }
+2 -2
drivers/char/tpm/tpm-dev-common.c
··· 160 out: 161 if (!priv->response_length) { 162 *off = 0; 163 - del_timer_sync(&priv->user_read_timer); 164 flush_work(&priv->timeout_work); 165 } 166 mutex_unlock(&priv->buffer_mutex); ··· 267 void tpm_common_release(struct file *file, struct file_priv *priv) 268 { 269 flush_work(&priv->async_work); 270 - del_timer_sync(&priv->user_read_timer); 271 flush_work(&priv->timeout_work); 272 file->private_data = NULL; 273 priv->response_length = 0;
··· 160 out: 161 if (!priv->response_length) { 162 *off = 0; 163 + timer_delete_sync(&priv->user_read_timer); 164 flush_work(&priv->timeout_work); 165 } 166 mutex_unlock(&priv->buffer_mutex); ··· 267 void tpm_common_release(struct file *file, struct file_priv *priv) 268 { 269 flush_work(&priv->async_work); 270 + timer_delete_sync(&priv->user_read_timer); 271 flush_work(&priv->timeout_work); 272 file->private_data = NULL; 273 priv->response_length = 0;
+6 -6
drivers/comedi/drivers/comedi_test.c
··· 418 spin_unlock_bh(&dev->spinlock); 419 if (in_softirq()) { 420 /* Assume we were called from the timer routine itself. */ 421 - del_timer(&devpriv->ai_timer); 422 } else { 423 - del_timer_sync(&devpriv->ai_timer); 424 } 425 return 0; 426 } ··· 628 spin_unlock_bh(&dev->spinlock); 629 if (in_softirq()) { 630 /* Assume we were called from the timer routine itself. */ 631 - del_timer(&devpriv->ao_timer); 632 } else { 633 - del_timer_sync(&devpriv->ao_timer); 634 } 635 return 0; 636 } ··· 791 struct waveform_private *devpriv = dev->private; 792 793 if (devpriv) { 794 - del_timer_sync(&devpriv->ai_timer); 795 - del_timer_sync(&devpriv->ao_timer); 796 } 797 } 798
··· 418 spin_unlock_bh(&dev->spinlock); 419 if (in_softirq()) { 420 /* Assume we were called from the timer routine itself. */ 421 + timer_delete(&devpriv->ai_timer); 422 } else { 423 + timer_delete_sync(&devpriv->ai_timer); 424 } 425 return 0; 426 } ··· 628 spin_unlock_bh(&dev->spinlock); 629 if (in_softirq()) { 630 /* Assume we were called from the timer routine itself. */ 631 + timer_delete(&devpriv->ao_timer); 632 } else { 633 + timer_delete_sync(&devpriv->ao_timer); 634 } 635 return 0; 636 } ··· 791 struct waveform_private *devpriv = dev->private; 792 793 if (devpriv) { 794 + timer_delete_sync(&devpriv->ai_timer); 795 + timer_delete_sync(&devpriv->ao_timer); 796 } 797 } 798
+2 -2
drivers/comedi/drivers/das16.c
··· 775 /* disable SW timer */ 776 if (devpriv->timer_running) { 777 devpriv->timer_running = 0; 778 - del_timer(&devpriv->timer); 779 } 780 781 if (devpriv->can_burst) ··· 940 struct das16_private_struct *devpriv = dev->private; 941 942 if (devpriv) { 943 - del_timer_sync(&devpriv->timer); 944 comedi_isadma_free(devpriv->dma); 945 } 946 }
··· 775 /* disable SW timer */ 776 if (devpriv->timer_running) { 777 devpriv->timer_running = 0; 778 + timer_delete(&devpriv->timer); 779 } 780 781 if (devpriv->can_burst) ··· 940 struct das16_private_struct *devpriv = dev->private; 941 942 if (devpriv) { 943 + timer_delete_sync(&devpriv->timer); 944 comedi_isadma_free(devpriv->dma); 945 } 946 }
+1 -1
drivers/comedi/drivers/jr3_pci.c
··· 758 struct jr3_pci_dev_private *devpriv = dev->private; 759 760 if (devpriv) 761 - del_timer_sync(&devpriv->timer); 762 763 comedi_pci_detach(dev); 764 }
··· 758 struct jr3_pci_dev_private *devpriv = dev->private; 759 760 if (devpriv) 761 + timer_delete_sync(&devpriv->timer); 762 763 comedi_pci_detach(dev); 764 }
+2 -2
drivers/cpufreq/powernv-cpufreq.c
··· 802 if (gpstate_idx != new_index) 803 queue_gpstate_timer(gpstates); 804 else 805 - del_timer_sync(&gpstates->timer); 806 807 gpstates_done: 808 freq_data.gpstate_id = idx_to_pstate(gpstate_idx); ··· 880 freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min); 881 smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1); 882 if (gpstates) 883 - del_timer_sync(&gpstates->timer); 884 885 kfree(policy->driver_data); 886 }
··· 802 if (gpstate_idx != new_index) 803 queue_gpstate_timer(gpstates); 804 else 805 + timer_delete_sync(&gpstates->timer); 806 807 gpstates_done: 808 freq_data.gpstate_id = idx_to_pstate(gpstate_idx); ··· 880 freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min); 881 smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1); 882 if (gpstates) 883 + timer_delete_sync(&gpstates->timer); 884 885 kfree(policy->driver_data); 886 }
+2 -2
drivers/crypto/axis/artpec6_crypto.c
··· 2067 if (ac->pending_count) 2068 mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100)); 2069 else 2070 - del_timer(&ac->timer); 2071 } 2072 2073 static void artpec6_crypto_timeout(struct timer_list *t) ··· 2963 tasklet_disable(&ac->task); 2964 devm_free_irq(&pdev->dev, irq, ac); 2965 tasklet_kill(&ac->task); 2966 - del_timer_sync(&ac->timer); 2967 2968 artpec6_crypto_disable_hw(ac); 2969
··· 2067 if (ac->pending_count) 2068 mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100)); 2069 else 2070 + timer_delete(&ac->timer); 2071 } 2072 2073 static void artpec6_crypto_timeout(struct timer_list *t) ··· 2963 tasklet_disable(&ac->task); 2964 devm_free_irq(&pdev->dev, irq, ac); 2965 tasklet_kill(&ac->task); 2966 + timer_delete_sync(&ac->timer); 2967 2968 artpec6_crypto_disable_hw(ac); 2969
+1 -1
drivers/dma-buf/st-dma-fence.c
··· 412 413 err = 0; 414 err_free: 415 - del_timer_sync(&wt.timer); 416 destroy_timer_on_stack(&wt.timer); 417 dma_fence_signal(wt.f); 418 dma_fence_put(wt.f);
··· 412 413 err = 0; 414 err_free: 415 + timer_delete_sync(&wt.timer); 416 destroy_timer_on_stack(&wt.timer); 417 dma_fence_signal(wt.f); 418 dma_fence_put(wt.f);
+2 -2
drivers/dma/imx-dma.c
··· 324 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel); 325 326 if (imxdma_hw_chain(imxdmac)) 327 - del_timer(&imxdmac->watchdog); 328 329 local_irq_save(flags); 330 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) | ··· 454 } 455 456 if (imxdma_hw_chain(imxdmac)) { 457 - del_timer(&imxdmac->watchdog); 458 return; 459 } 460 }
··· 324 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel); 325 326 if (imxdma_hw_chain(imxdmac)) 327 + timer_delete(&imxdmac->watchdog); 328 329 local_irq_save(flags); 330 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) | ··· 454 } 455 456 if (imxdma_hw_chain(imxdmac)) { 457 + timer_delete(&imxdmac->watchdog); 458 return; 459 } 460 }
+1 -1
drivers/dma/ioat/dma.c
··· 159 } 160 161 /* flush inflight timers */ 162 - del_timer_sync(&ioat_chan->timer); 163 164 /* flush inflight tasklet runs */ 165 tasklet_kill(&ioat_chan->cleanup_task);
··· 159 } 160 161 /* flush inflight timers */ 162 + timer_delete_sync(&ioat_chan->timer); 163 164 /* flush inflight tasklet runs */ 165 tasklet_kill(&ioat_chan->cleanup_task);
+2 -2
drivers/dma/ioat/init.c
··· 1224 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); 1225 spin_unlock_bh(&ioat_chan->prep_lock); 1226 /* 1227 - * Synchronization rule for del_timer_sync(): 1228 * - The caller must not hold locks which would prevent 1229 * completion of the timer's handler. 1230 * So prep_lock cannot be held before calling it. 1231 */ 1232 - del_timer_sync(&ioat_chan->timer); 1233 1234 /* this should quiesce then reset */ 1235 ioat_reset_hw(ioat_chan);
··· 1224 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); 1225 spin_unlock_bh(&ioat_chan->prep_lock); 1226 /* 1227 + * Synchronization rule for timer_delete_sync(): 1228 * - The caller must not hold locks which would prevent 1229 * completion of the timer's handler. 1230 * So prep_lock cannot be held before calling it. 1231 */ 1232 + timer_delete_sync(&ioat_chan->timer); 1233 1234 /* this should quiesce then reset */ 1235 ioat_reset_hw(ioat_chan);
+1 -1
drivers/firewire/core-transaction.c
··· 39 static int try_cancel_split_timeout(struct fw_transaction *t) 40 { 41 if (t->is_split_transaction) 42 - return del_timer(&t->split_timeout_timer); 43 else 44 return 1; 45 }
··· 39 static int try_cancel_split_timeout(struct fw_transaction *t) 40 { 41 if (t->is_split_transaction) 42 + return timer_delete(&t->split_timeout_timer); 43 else 44 return 1; 45 }
+1 -1
drivers/firmware/psci/psci_checker.c
··· 342 * Disable the timer to make sure that the timer will not trigger 343 * later. 344 */ 345 - del_timer(&wakeup_timer); 346 destroy_timer_on_stack(&wakeup_timer); 347 348 if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
··· 342 * Disable the timer to make sure that the timer will not trigger 343 * later. 344 */ 345 + timer_delete(&wakeup_timer); 346 destroy_timer_on_stack(&wakeup_timer); 347 348 if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
··· 280 281 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); 282 283 - if (del_timer(&ring->fence_drv.fallback_timer) && 284 seq != ring->fence_drv.sync_seq) 285 amdgpu_fence_schedule_fallback(ring); 286 ··· 618 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 619 ring->fence_drv.irq_type); 620 621 - del_timer_sync(&ring->fence_drv.fallback_timer); 622 } 623 } 624
··· 280 281 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); 282 283 + if (timer_delete(&ring->fence_drv.fallback_timer) && 284 seq != ring->fence_drv.sync_seq) 285 amdgpu_fence_schedule_fallback(ring); 286 ··· 618 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 619 ring->fence_drv.irq_type); 620 621 + timer_delete_sync(&ring->fence_drv.fallback_timer); 622 } 623 } 624
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
··· 1239 return; 1240 1241 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id); 1242 - del_timer_sync(&ring->fence_drv.fallback_timer); 1243 amdgpu_ring_fini(ring); 1244 kfree(ring); 1245 }
··· 1239 return; 1240 1241 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id); 1242 + timer_delete_sync(&ring->fence_drv.fallback_timer); 1243 amdgpu_ring_fini(ring); 1244 kfree(ring); 1245 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
··· 124 } 125 } 126 127 - del_timer(&mux->resubmit_timer); 128 mux->s_resubmit = false; 129 } 130
··· 124 } 125 } 126 127 + timer_delete(&mux->resubmit_timer); 128 mux->s_resubmit = false; 129 } 130
+1 -1
drivers/gpu/drm/bridge/tda998x_drv.c
··· 1763 if (priv->hdmi->irq) 1764 free_irq(priv->hdmi->irq, priv); 1765 1766 - del_timer_sync(&priv->edid_delay_timer); 1767 cancel_work_sync(&priv->detect_work); 1768 1769 i2c_unregister_device(priv->cec);
··· 1763 if (priv->hdmi->irq) 1764 free_irq(priv->hdmi->irq, priv); 1765 1766 + timer_delete_sync(&priv->edid_delay_timer); 1767 cancel_work_sync(&priv->detect_work); 1768 1769 i2c_unregister_device(priv->cec);
+1 -1
drivers/gpu/drm/drm_vblank.c
··· 508 drm_core_check_feature(dev, DRIVER_MODESET)); 509 510 drm_vblank_destroy_worker(vblank); 511 - del_timer_sync(&vblank->disable_timer); 512 } 513 514 /**
··· 508 drm_core_check_feature(dev, DRIVER_MODESET)); 509 510 drm_vblank_destroy_worker(vblank); 511 + timer_delete_sync(&vblank->disable_timer); 512 } 513 514 /**
+1 -1
drivers/gpu/drm/exynos/exynos_drm_vidi.c
··· 427 { 428 struct vidi_context *ctx = dev_get_drvdata(dev); 429 430 - del_timer_sync(&ctx->timer); 431 } 432 433 static const struct component_ops vidi_component_ops = {
··· 427 { 428 struct vidi_context *ctx = dev_get_drvdata(dev); 429 430 + timer_delete_sync(&ctx->timer); 431 } 432 433 static const struct component_ops vidi_component_ops = {
+1 -1
drivers/gpu/drm/gud/gud_pipe.c
··· 254 255 usb_sg_wait(&ctx.sgr); 256 257 - if (!del_timer_sync(&ctx.timer)) 258 ret = -ETIMEDOUT; 259 else if (ctx.sgr.status < 0) 260 ret = ctx.sgr.status;
··· 254 255 usb_sg_wait(&ctx.sgr); 256 257 + if (!timer_delete_sync(&ctx.timer)) 258 ret = -ETIMEDOUT; 259 else if (ctx.sgr.status < 0) 260 ret = ctx.sgr.status;
+3 -3
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
··· 2502 ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI)); 2503 ENGINE_TRACE(engine, "semaphore yield: %08x\n", 2504 engine->execlists.yield); 2505 - if (del_timer(&engine->execlists.timer)) 2506 tasklet = true; 2507 } 2508 ··· 3370 static void execlists_shutdown(struct intel_engine_cs *engine) 3371 { 3372 /* Synchronise with residual timers and any softirq they raise */ 3373 - del_timer_sync(&engine->execlists.timer); 3374 - del_timer_sync(&engine->execlists.preempt); 3375 tasklet_kill(&engine->sched_engine->tasklet); 3376 } 3377
··· 2502 ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI)); 2503 ENGINE_TRACE(engine, "semaphore yield: %08x\n", 2504 engine->execlists.yield); 2505 + if (timer_delete(&engine->execlists.timer)) 2506 tasklet = true; 2507 } 2508 ··· 3370 static void execlists_shutdown(struct intel_engine_cs *engine) 3371 { 3372 /* Synchronise with residual timers and any softirq they raise */ 3373 + timer_delete_sync(&engine->execlists.timer); 3374 + timer_delete_sync(&engine->execlists.preempt); 3375 tasklet_kill(&engine->sched_engine->tasklet); 3376 } 3377
+1 -1
drivers/gpu/drm/i915/gt/intel_rps.c
··· 161 162 static void rps_stop_timer(struct intel_rps *rps) 163 { 164 - del_timer_sync(&rps->timer); 165 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 166 cancel_work_sync(&rps->work); 167 }
··· 161 162 static void rps_stop_timer(struct intel_rps *rps) 163 { 164 + timer_delete_sync(&rps->timer); 165 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 166 cancel_work_sync(&rps->work); 167 }
+2 -2
drivers/gpu/drm/i915/gt/mock_engine.c
··· 297 struct i915_request *rq; 298 unsigned long flags; 299 300 - del_timer_sync(&mock->hw_delay); 301 302 spin_lock_irqsave(&engine->sched_engine->lock, flags); 303 ··· 432 container_of(engine, typeof(*mock), base); 433 struct i915_request *request, *rn; 434 435 - del_timer_sync(&mock->hw_delay); 436 437 spin_lock_irq(&mock->hw_lock); 438 list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
··· 297 struct i915_request *rq; 298 unsigned long flags; 299 300 + timer_delete_sync(&mock->hw_delay); 301 302 spin_lock_irqsave(&engine->sched_engine->lock, flags); 303 ··· 432 container_of(engine, typeof(*mock), base); 433 struct i915_request *request, *rn; 434 435 + timer_delete_sync(&mock->hw_delay); 436 437 spin_lock_irq(&mock->hw_lock); 438 list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
+2 -2
drivers/gpu/drm/i915/gt/selftest_execlists.c
··· 1198 ENGINE_TRACE(engine, "forcing tasklet for rewind\n"); 1199 while (i915_request_is_active(rq[A2])) { /* semaphore yield! */ 1200 /* Wait for the timeslice to kick in */ 1201 - del_timer(&engine->execlists.timer); 1202 tasklet_hi_schedule(&engine->sched_engine->tasklet); 1203 intel_engine_flush_submission(engine); 1204 } ··· 2357 /* force preempt reset [failure] */ 2358 while (!engine->execlists.pending[0]) 2359 intel_engine_flush_submission(engine); 2360 - del_timer_sync(&engine->execlists.preempt); 2361 intel_engine_flush_submission(engine); 2362 2363 cancel_reset_timeout(engine);
··· 1198 ENGINE_TRACE(engine, "forcing tasklet for rewind\n"); 1199 while (i915_request_is_active(rq[A2])) { /* semaphore yield! */ 1200 /* Wait for the timeslice to kick in */ 1201 + timer_delete(&engine->execlists.timer); 1202 tasklet_hi_schedule(&engine->sched_engine->tasklet); 1203 intel_engine_flush_submission(engine); 1204 } ··· 2357 /* force preempt reset [failure] */ 2358 while (!engine->execlists.pending[0]) 2359 intel_engine_flush_submission(engine); 2360 + timer_delete_sync(&engine->execlists.preempt); 2361 intel_engine_flush_submission(engine); 2362 2363 cancel_reset_timeout(engine);
+1 -1
drivers/gpu/drm/i915/gt/selftest_migrate.c
··· 660 661 out_rq: 662 i915_request_add(rq); /* GEM_BUG_ON(rq->reserved_space > ring->space)? */ 663 - del_timer_sync(&st.timer); 664 destroy_timer_on_stack(&st.timer); 665 out_unpin: 666 intel_context_unpin(ce);
··· 660 661 out_rq: 662 i915_request_add(rq); /* GEM_BUG_ON(rq->reserved_space > ring->space)? */ 663 + timer_delete_sync(&st.timer); 664 destroy_timer_on_stack(&st.timer); 665 out_unpin: 666 intel_context_unpin(ce);
+1 -1
drivers/gpu/drm/i915/i915_utils.c
··· 52 if (!timer_active(t)) 53 return; 54 55 - del_timer(t); 56 WRITE_ONCE(t->expires, 0); 57 } 58
··· 52 if (!timer_active(t)) 53 return; 54 55 + timer_delete(t); 56 WRITE_ONCE(t->expires, 0); 57 } 58
+1 -1
drivers/gpu/drm/i915/intel_wakeref.c
··· 163 unsigned long flags; 164 165 if (!timeout) { 166 - if (del_timer_sync(&wf->timer)) 167 wakeref_auto_timeout(&wf->timer); 168 return; 169 }
··· 163 unsigned long flags; 164 165 if (!timeout) { 166 + if (timer_delete_sync(&wf->timer)) 167 wakeref_auto_timeout(&wf->timer); 168 return; 169 }
+1 -1
drivers/gpu/drm/i915/selftests/lib_sw_fence.c
··· 74 75 void timed_fence_fini(struct timed_fence *tf) 76 { 77 - if (del_timer_sync(&tf->timer)) 78 i915_sw_fence_commit(&tf->fence); 79 80 destroy_timer_on_stack(&tf->timer);
··· 74 75 void timed_fence_fini(struct timed_fence *tf) 76 { 77 + if (timer_delete_sync(&tf->timer)) 78 i915_sw_fence_commit(&tf->fence); 79 80 destroy_timer_on_stack(&tf->timer);
+1 -1
drivers/gpu/drm/mediatek/mtk_dp.c
··· 2847 pm_runtime_put(&pdev->dev); 2848 pm_runtime_disable(&pdev->dev); 2849 if (mtk_dp->data->bridge_type != DRM_MODE_CONNECTOR_eDP) 2850 - del_timer_sync(&mtk_dp->debounce_timer); 2851 platform_device_unregister(mtk_dp->phy_dev); 2852 if (mtk_dp->audio_pdev) 2853 platform_device_unregister(mtk_dp->audio_pdev);
··· 2847 pm_runtime_put(&pdev->dev); 2848 pm_runtime_disable(&pdev->dev); 2849 if (mtk_dp->data->bridge_type != DRM_MODE_CONNECTOR_eDP) 2850 + timer_delete_sync(&mtk_dp->debounce_timer); 2851 platform_device_unregister(mtk_dp->phy_dev); 2852 if (mtk_dp->audio_pdev) 2853 platform_device_unregister(mtk_dp->audio_pdev);
+1 -1
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
··· 1253 gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ)); 1254 1255 /* Turn off the hangcheck timer to keep it from bothering us */ 1256 - del_timer(&gpu->hangcheck_timer); 1257 1258 kthread_queue_work(gpu->worker, &gpu->recover_work); 1259 }
··· 1253 gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ)); 1254 1255 /* Turn off the hangcheck timer to keep it from bothering us */ 1256 + timer_delete(&gpu->hangcheck_timer); 1257 1258 kthread_queue_work(gpu->worker, &gpu->recover_work); 1259 }
+1 -1
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
··· 182 return; 183 184 /* Delete the preemption watchdog timer */ 185 - del_timer(&a5xx_gpu->preempt_timer); 186 187 /* 188 * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
··· 182 return; 183 184 /* Delete the preemption watchdog timer */ 185 + timer_delete(&a5xx_gpu->preempt_timer); 186 187 /* 188 * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
+1 -1
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 28 gmu->hung = true; 29 30 /* Turn off the hangcheck timer while we are resetting */ 31 - del_timer(&gpu->hangcheck_timer); 32 33 /* Queue the GPU handler because we need to treat this as a recovery */ 34 kthread_queue_work(gpu->worker, &gpu->recover_work);
··· 28 gmu->hung = true; 29 30 /* Turn off the hangcheck timer while we are resetting */ 31 + timer_delete(&gpu->hangcheck_timer); 32 33 /* Queue the GPU handler because we need to treat this as a recovery */ 34 kthread_queue_work(gpu->worker, &gpu->recover_work);
+2 -2
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
··· 1706 gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE)); 1707 1708 /* Turn off the hangcheck timer to keep it from bothering us */ 1709 - del_timer(&gpu->hangcheck_timer); 1710 1711 kthread_queue_work(gpu->worker, &gpu->recover_work); 1712 } ··· 1726 */ 1727 if (status & (A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING | 1728 A7XX_CX_MISC_SW_FUSE_VALUE_LPAC)) { 1729 - del_timer(&gpu->hangcheck_timer); 1730 1731 kthread_queue_work(gpu->worker, &gpu->recover_work); 1732 }
··· 1706 gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE)); 1707 1708 /* Turn off the hangcheck timer to keep it from bothering us */ 1709 + timer_delete(&gpu->hangcheck_timer); 1710 1711 kthread_queue_work(gpu->worker, &gpu->recover_work); 1712 } ··· 1726 */ 1727 if (status & (A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING | 1728 A7XX_CX_MISC_SW_FUSE_VALUE_LPAC)) { 1729 + timer_delete(&gpu->hangcheck_timer); 1730 1731 kthread_queue_work(gpu->worker, &gpu->recover_work); 1732 }
+1 -1
drivers/gpu/drm/msm/adreno/a6xx_preempt.c
··· 146 return; 147 148 /* Delete the preemption watchdog timer */ 149 - del_timer(&a6xx_gpu->preempt_timer); 150 151 /* 152 * The hardware should be setting the stop bit of CP_CONTEXT_SWITCH_CNTL
··· 146 return; 147 148 /* Delete the preemption watchdog timer */ 149 + timer_delete(&a6xx_gpu->preempt_timer); 150 151 /* 152 * The hardware should be setting the stop bit of CP_CONTEXT_SWITCH_CNTL
+1 -1
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 292 293 if (do_devcoredump) { 294 /* Turn off the hangcheck timer to keep it from bothering us */ 295 - del_timer(&gpu->hangcheck_timer); 296 297 gpu->fault_info.ttbr0 = info->ttbr0; 298 gpu->fault_info.iova = iova;
··· 292 293 if (do_devcoredump) { 294 /* Turn off the hangcheck timer to keep it from bothering us */ 295 + timer_delete(&gpu->hangcheck_timer); 296 297 gpu->fault_info.ttbr0 = info->ttbr0; 298 gpu->fault_info.iova = iova;
+2 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
··· 1410 /* after phys waits for frame-done, should be no more frames pending */ 1411 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { 1412 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); 1413 - del_timer_sync(&dpu_enc->frame_done_timer); 1414 } 1415 1416 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); ··· 1582 1583 if (!dpu_enc->frame_busy_mask[0]) { 1584 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 1585 - del_timer(&dpu_enc->frame_done_timer); 1586 1587 dpu_encoder_resource_control(drm_enc, 1588 DPU_ENC_RC_EVENT_FRAME_DONE);
··· 1410 /* after phys waits for frame-done, should be no more frames pending */ 1411 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { 1412 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); 1413 + timer_delete_sync(&dpu_enc->frame_done_timer); 1414 } 1415 1416 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); ··· 1582 1583 if (!dpu_enc->frame_busy_mask[0]) { 1584 atomic_set(&dpu_enc->frame_done_timeout_ms, 0); 1585 + timer_delete(&dpu_enc->frame_done_timer); 1586 1587 dpu_encoder_resource_control(drm_enc, 1588 DPU_ENC_RC_EVENT_FRAME_DONE);
+1 -1
drivers/gpu/drm/omapdrm/dss/dsi.c
··· 452 453 #ifdef DSI_CATCH_MISSING_TE 454 if (irqstatus & DSI_IRQ_TE_TRIGGER) 455 - del_timer(&dsi->te_timer); 456 #endif 457 458 /* make a copy and unlock, so that isrs can unregister
··· 452 453 #ifdef DSI_CATCH_MISSING_TE 454 if (irqstatus & DSI_IRQ_TE_TRIGGER) 455 + timer_delete(&dsi->te_timer); 456 #endif 457 458 /* make a copy and unlock, so that isrs can unregister
+1 -1
drivers/gpu/drm/vc4/vc4_bo.c
··· 1043 struct vc4_dev *vc4 = to_vc4_dev(dev); 1044 int i; 1045 1046 - del_timer(&vc4->bo_cache.time_timer); 1047 cancel_work_sync(&vc4->bo_cache.time_work); 1048 1049 vc4_bo_cache_purge(dev);
··· 1043 struct vc4_dev *vc4 = to_vc4_dev(dev); 1044 int i; 1045 1046 + timer_delete(&vc4->bo_cache.time_timer); 1047 cancel_work_sync(&vc4->bo_cache.time_work); 1048 1049 vc4_bo_cache_purge(dev);
+1 -1
drivers/gpu/drm/vgem/vgem_fence.c
··· 49 { 50 struct vgem_fence *fence = container_of(base, typeof(*fence), base); 51 52 - del_timer_sync(&fence->timer); 53 dma_fence_free(&fence->base); 54 } 55
··· 49 { 50 struct vgem_fence *fence = container_of(base, typeof(*fence), base); 51 52 + timer_delete_sync(&fence->timer); 53 dma_fence_free(&fence->base); 54 } 55
+1 -1
drivers/gpu/drm/xe/xe_execlist.c
··· 297 298 void xe_execlist_port_destroy(struct xe_execlist_port *port) 299 { 300 - del_timer(&port->irq_fail); 301 302 /* Prevent an interrupt while we're destroying */ 303 spin_lock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
··· 297 298 void xe_execlist_port_destroy(struct xe_execlist_port *port) 299 { 300 + timer_delete(&port->irq_fail); 301 302 /* Prevent an interrupt while we're destroying */ 303 spin_lock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
+1 -1
drivers/greybus/operation.c
··· 279 if (gb_operation_is_incoming(operation)) { 280 gb_operation_request_handle(operation); 281 } else { 282 - ret = del_timer_sync(&operation->timer); 283 if (!ret) { 284 /* Cancel request message if scheduled by timeout. */ 285 if (gb_operation_result(operation) == -ETIMEDOUT)
··· 279 if (gb_operation_is_incoming(operation)) { 280 gb_operation_request_handle(operation); 281 } else { 282 + ret = timer_delete_sync(&operation->timer); 283 if (!ret) { 284 /* Cancel request message if scheduled by timeout. */ 285 if (gb_operation_result(operation) == -ETIMEDOUT)
+2 -2
drivers/hid/hid-apple.c
··· 950 return 0; 951 952 out_err: 953 - del_timer_sync(&asc->battery_timer); 954 hid_hw_stop(hdev); 955 return ret; 956 } ··· 959 { 960 struct apple_sc *asc = hid_get_drvdata(hdev); 961 962 - del_timer_sync(&asc->battery_timer); 963 964 hid_hw_stop(hdev); 965 }
··· 950 return 0; 951 952 out_err: 953 + timer_delete_sync(&asc->battery_timer); 954 hid_hw_stop(hdev); 955 return ret; 956 } ··· 959 { 960 struct apple_sc *asc = hid_get_drvdata(hdev); 961 962 + timer_delete_sync(&asc->battery_timer); 963 964 hid_hw_stop(hdev); 965 }
+1 -1
drivers/hid/hid-appleir.c
··· 319 { 320 struct appleir *appleir = hid_get_drvdata(hid); 321 hid_hw_stop(hid); 322 - del_timer_sync(&appleir->key_up_timer); 323 } 324 325 static const struct hid_device_id appleir_devices[] = {
··· 319 { 320 struct appleir *appleir = hid_get_drvdata(hid); 321 hid_hw_stop(hid); 322 + timer_delete_sync(&appleir->key_up_timer); 323 } 324 325 static const struct hid_device_id appleir_devices[] = {
+1 -1
drivers/hid/hid-appletb-kbd.c
··· 448 appletb_kbd_set_mode(kbd, APPLETB_KBD_MODE_OFF); 449 450 input_unregister_handler(&kbd->inp_handler); 451 - del_timer_sync(&kbd->inactivity_timer); 452 453 hid_hw_close(hdev); 454 hid_hw_stop(hdev);
··· 448 appletb_kbd_set_mode(kbd, APPLETB_KBD_MODE_OFF); 449 450 input_unregister_handler(&kbd->inp_handler); 451 + timer_delete_sync(&kbd->inactivity_timer); 452 453 hid_hw_close(hdev); 454 hid_hw_stop(hdev);
+2 -2
drivers/hid/hid-magicmouse.c
··· 915 916 return 0; 917 err_stop_hw: 918 - del_timer_sync(&msc->battery_timer); 919 hid_hw_stop(hdev); 920 return ret; 921 } ··· 926 927 if (msc) { 928 cancel_delayed_work_sync(&msc->work); 929 - del_timer_sync(&msc->battery_timer); 930 } 931 932 hid_hw_stop(hdev);
··· 915 916 return 0; 917 err_stop_hw: 918 + timer_delete_sync(&msc->battery_timer); 919 hid_hw_stop(hdev); 920 return ret; 921 } ··· 926 927 if (msc) { 928 cancel_delayed_work_sync(&msc->work); 929 + timer_delete_sync(&msc->battery_timer); 930 } 931 932 hid_hw_stop(hdev);
+2 -2
drivers/hid/hid-multitouch.c
··· 1299 mod_timer(&td->release_timer, 1300 jiffies + msecs_to_jiffies(100)); 1301 else 1302 - del_timer(&td->release_timer); 1303 } 1304 1305 clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); ··· 1881 { 1882 struct mt_device *td = hid_get_drvdata(hdev); 1883 1884 - del_timer_sync(&td->release_timer); 1885 1886 sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group); 1887 hid_hw_stop(hdev);
··· 1299 mod_timer(&td->release_timer, 1300 jiffies + msecs_to_jiffies(100)); 1301 else 1302 + timer_delete(&td->release_timer); 1303 } 1304 1305 clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags); ··· 1881 { 1882 struct mt_device *td = hid_get_drvdata(hdev); 1883 1884 + timer_delete_sync(&td->release_timer); 1885 1886 sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group); 1887 hid_hw_stop(hdev);
+1 -1
drivers/hid/hid-nvidia-shield.c
··· 1102 1103 hid_hw_close(hdev); 1104 thunderstrike_destroy(ts); 1105 - del_timer_sync(&ts->psy_stats_timer); 1106 cancel_work_sync(&ts->hostcmd_req_work); 1107 hid_hw_stop(hdev); 1108 }
··· 1102 1103 hid_hw_close(hdev); 1104 thunderstrike_destroy(ts); 1105 + timer_delete_sync(&ts->psy_stats_timer); 1106 cancel_work_sync(&ts->hostcmd_req_work); 1107 hid_hw_stop(hdev); 1108 }
+1 -1
drivers/hid/hid-prodikeys.c
··· 254 for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { 255 pms = &pm->sustained_notes[i]; 256 pms->in_use = 1; 257 - del_timer_sync(&pms->timer); 258 } 259 } 260
··· 254 for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { 255 pms = &pm->sustained_notes[i]; 256 pms->in_use = 1; 257 + timer_delete_sync(&pms->timer); 258 } 259 } 260
+1 -1
drivers/hid/hid-sony.c
··· 2164 struct sony_sc *sc = hid_get_drvdata(hdev); 2165 2166 if (sc->quirks & (GHL_GUITAR_PS3WIIU | GHL_GUITAR_PS4)) { 2167 - del_timer_sync(&sc->ghl_poke_timer); 2168 usb_free_urb(sc->ghl_urb); 2169 } 2170
··· 2164 struct sony_sc *sc = hid_get_drvdata(hdev); 2165 2166 if (sc->quirks & (GHL_GUITAR_PS3WIIU | GHL_GUITAR_PS4)) { 2167 + timer_delete_sync(&sc->ghl_poke_timer); 2168 usb_free_urb(sc->ghl_urb); 2169 } 2170
+1 -1
drivers/hid/hid-uclogic-core.c
··· 474 { 475 struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev); 476 477 - del_timer_sync(&drvdata->inrange_timer); 478 hid_hw_stop(hdev); 479 kfree(drvdata->desc_ptr); 480 uclogic_params_cleanup(&drvdata->params);
··· 474 { 475 struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev); 476 477 + timer_delete_sync(&drvdata->inrange_timer); 478 hid_hw_stop(hdev); 479 kfree(drvdata->desc_ptr); 480 uclogic_params_cleanup(&drvdata->params);
+1 -1
drivers/hid/hid-wiimote-core.c
··· 1171 wiimote_cmd_release(wdata); 1172 1173 /* delete MP hotplug timer */ 1174 - del_timer_sync(&wdata->timer); 1175 } else { 1176 /* reschedule MP hotplug timer */ 1177 if (!(flags & WIIPROTO_FLAG_BUILTIN_MP) &&
··· 1171 wiimote_cmd_release(wdata); 1172 1173 /* delete MP hotplug timer */ 1174 + timer_delete_sync(&wdata->timer); 1175 } else { 1176 /* reschedule MP hotplug timer */ 1177 if (!(flags & WIIPROTO_FLAG_BUILTIN_MP) &&
+2 -2
drivers/hid/usbhid/hid-core.c
··· 1462 1463 static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid) 1464 { 1465 - del_timer_sync(&usbhid->io_retry); 1466 cancel_work_sync(&usbhid->reset_work); 1467 } 1468 1469 static void hid_cease_io(struct usbhid_device *usbhid) 1470 { 1471 - del_timer_sync(&usbhid->io_retry); 1472 usb_kill_urb(usbhid->urbin); 1473 usb_kill_urb(usbhid->urbctrl); 1474 usb_kill_urb(usbhid->urbout);
··· 1462 1463 static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid) 1464 { 1465 + timer_delete_sync(&usbhid->io_retry); 1466 cancel_work_sync(&usbhid->reset_work); 1467 } 1468 1469 static void hid_cease_io(struct usbhid_device *usbhid) 1470 { 1471 + timer_delete_sync(&usbhid->io_retry); 1472 usb_kill_urb(usbhid->urbin); 1473 usb_kill_urb(usbhid->urbctrl); 1474 usb_kill_urb(usbhid->urbout);
+1 -1
drivers/hid/wacom_sys.c
··· 2896 cancel_work_sync(&wacom->battery_work); 2897 cancel_work_sync(&wacom->remote_work); 2898 cancel_work_sync(&wacom->mode_change_work); 2899 - del_timer_sync(&wacom->idleprox_timer); 2900 if (hdev->bus == BUS_BLUETOOTH) 2901 device_remove_file(&hdev->dev, &dev_attr_speed); 2902
··· 2896 cancel_work_sync(&wacom->battery_work); 2897 cancel_work_sync(&wacom->remote_work); 2898 cancel_work_sync(&wacom->mode_change_work); 2899 + timer_delete_sync(&wacom->idleprox_timer); 2900 if (hdev->bus == BUS_BLUETOOTH) 2901 device_remove_file(&hdev->dev, &dev_attr_speed); 2902
+9 -9
drivers/hsi/clients/ssi_protocol.c
··· 281 ssi->recv_state = state; 282 switch (state) { 283 case RECV_IDLE: 284 - del_timer(&ssi->rx_wd); 285 if (ssi->send_state == SEND_IDLE) 286 - del_timer(&ssi->keep_alive); 287 break; 288 case RECV_READY: 289 /* CMT speech workaround */ ··· 306 switch (state) { 307 case SEND_IDLE: 308 case SEND_READY: 309 - del_timer(&ssi->tx_wd); 310 if (ssi->recv_state == RECV_IDLE) 311 - del_timer(&ssi->keep_alive); 312 break; 313 case WAIT4READY: 314 case SENDING: ··· 398 if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) 399 ssi_waketest(cl, 0); /* FIXME: To be removed */ 400 spin_lock_bh(&ssi->lock); 401 - del_timer(&ssi->rx_wd); 402 - del_timer(&ssi->tx_wd); 403 - del_timer(&ssi->keep_alive); 404 cancel_work_sync(&ssi->work); 405 ssi->main_state = 0; 406 ssi->send_state = 0; ··· 648 ssip_error(cl); 649 return; 650 } 651 - del_timer(&ssi->rx_wd); /* FIXME: Revisit */ 652 skb = msg->context; 653 ssip_pn_rx(skb); 654 hsi_free_msg(msg); ··· 731 732 spin_lock_bh(&ssi->lock); 733 ssi->main_state = ACTIVE; 734 - del_timer(&ssi->tx_wd); /* Stop boot handshake timer */ 735 spin_unlock_bh(&ssi->lock); 736 737 dev_notice(&cl->device, "WAKELINES TEST %s\n",
··· 281 ssi->recv_state = state; 282 switch (state) { 283 case RECV_IDLE: 284 + timer_delete(&ssi->rx_wd); 285 if (ssi->send_state == SEND_IDLE) 286 + timer_delete(&ssi->keep_alive); 287 break; 288 case RECV_READY: 289 /* CMT speech workaround */ ··· 306 switch (state) { 307 case SEND_IDLE: 308 case SEND_READY: 309 + timer_delete(&ssi->tx_wd); 310 if (ssi->recv_state == RECV_IDLE) 311 + timer_delete(&ssi->keep_alive); 312 break; 313 case WAIT4READY: 314 case SENDING: ··· 398 if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) 399 ssi_waketest(cl, 0); /* FIXME: To be removed */ 400 spin_lock_bh(&ssi->lock); 401 + timer_delete(&ssi->rx_wd); 402 + timer_delete(&ssi->tx_wd); 403 + timer_delete(&ssi->keep_alive); 404 cancel_work_sync(&ssi->work); 405 ssi->main_state = 0; 406 ssi->send_state = 0; ··· 648 ssip_error(cl); 649 return; 650 } 651 + timer_delete(&ssi->rx_wd); /* FIXME: Revisit */ 652 skb = msg->context; 653 ssip_pn_rx(skb); 654 hsi_free_msg(msg); ··· 731 732 spin_lock_bh(&ssi->lock); 733 ssi->main_state = ACTIVE; 734 + timer_delete(&ssi->tx_wd); /* Stop boot handshake timer */ 735 spin_unlock_bh(&ssi->lock); 736 737 dev_notice(&cl->device, "WAKELINES TEST %s\n",
+1 -1
drivers/hte/hte-tegra194-test.c
··· 221 free_irq(hte.gpio_in_irq, &hte); 222 gpiod_put(hte.gpio_in); 223 gpiod_put(hte.gpio_out); 224 - del_timer_sync(&hte.timer); 225 } 226 227 static struct platform_driver tegra_hte_test_driver = {
··· 221 free_irq(hte.gpio_in_irq, &hte); 222 gpiod_put(hte.gpio_in); 223 gpiod_put(hte.gpio_out); 224 + timer_delete_sync(&hte.timer); 225 } 226 227 static struct platform_driver tegra_hte_test_driver = {
+1 -1
drivers/hwmon/pwm-fan.c
··· 483 { 484 struct pwm_fan_ctx *ctx = __ctx; 485 486 - del_timer_sync(&ctx->rpm_timer); 487 /* Switch off everything */ 488 ctx->enable_mode = pwm_disable_reg_disable; 489 pwm_fan_power_off(ctx, true);
··· 483 { 484 struct pwm_fan_ctx *ctx = __ctx; 485 486 + timer_delete_sync(&ctx->rpm_timer); 487 /* Switch off everything */ 488 ctx->enable_mode = pwm_disable_reg_disable; 489 pwm_fan_power_off(ctx, true);
+1 -1
drivers/i2c/busses/i2c-img-scb.c
··· 1122 1123 time_left = wait_for_completion_timeout(&i2c->msg_complete, 1124 IMG_I2C_TIMEOUT); 1125 - del_timer_sync(&i2c->check_timer); 1126 1127 if (time_left == 0) 1128 i2c->msg_status = -ETIMEDOUT;
··· 1122 1123 time_left = wait_for_completion_timeout(&i2c->msg_complete, 1124 IMG_I2C_TIMEOUT); 1125 + timer_delete_sync(&i2c->check_timer); 1126 1127 if (time_left == 0) 1128 i2c->msg_status = -ETIMEDOUT;
+2 -2
drivers/iio/common/ssp_sensors/ssp_dev.c
··· 190 191 static void ssp_disable_wdt_timer(struct ssp_data *data) 192 { 193 - del_timer_sync(&data->wdt_timer); 194 cancel_work_sync(&data->work_wdt); 195 } 196 ··· 589 590 free_irq(data->spi->irq, data); 591 592 - del_timer_sync(&data->wdt_timer); 593 cancel_work_sync(&data->work_wdt); 594 595 mutex_destroy(&data->comm_lock);
··· 190 191 static void ssp_disable_wdt_timer(struct ssp_data *data) 192 { 193 + timer_delete_sync(&data->wdt_timer); 194 cancel_work_sync(&data->work_wdt); 195 } 196 ··· 589 590 free_irq(data->spi->irq, data); 591 592 + timer_delete_sync(&data->wdt_timer); 593 cancel_work_sync(&data->work_wdt); 594 595 mutex_destroy(&data->comm_lock);
+1 -1
drivers/infiniband/hw/cxgb4/cm.c
··· 191 static int stop_ep_timer(struct c4iw_ep *ep) 192 { 193 pr_debug("ep %p stopping\n", ep); 194 - del_timer_sync(&ep->timer); 195 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 196 c4iw_put_ep(&ep->com); 197 return 0;
··· 191 static int stop_ep_timer(struct c4iw_ep *ep) 192 { 193 pr_debug("ep %p stopping\n", ep); 194 + timer_delete_sync(&ep->timer); 195 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 196 c4iw_put_ep(&ep->com); 197 return 0;
+1 -1
drivers/infiniband/hw/hfi1/aspm.c
··· 191 for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) { 192 rcd = hfi1_rcd_get_by_index(dd, i); 193 if (rcd) { 194 - del_timer_sync(&rcd->aspm_timer); 195 spin_lock_irqsave(&rcd->aspm_lock, flags); 196 rcd->aspm_intr_enable = false; 197 spin_unlock_irqrestore(&rcd->aspm_lock, flags);
··· 191 for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) { 192 rcd = hfi1_rcd_get_by_index(dd, i); 193 if (rcd) { 194 + timer_delete_sync(&rcd->aspm_timer); 195 spin_lock_irqsave(&rcd->aspm_lock, flags); 196 rcd->aspm_intr_enable = false; 197 spin_unlock_irqrestore(&rcd->aspm_lock, flags);
+2 -2
drivers/infiniband/hw/hfi1/chip.c
··· 5576 static void free_rcverr(struct hfi1_devdata *dd) 5577 { 5578 if (dd->rcverr_timer.function) 5579 - del_timer_sync(&dd->rcverr_timer); 5580 } 5581 5582 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) ··· 12308 int i; 12309 12310 if (dd->synth_stats_timer.function) 12311 - del_timer_sync(&dd->synth_stats_timer); 12312 cancel_work_sync(&dd->update_cntr_work); 12313 ppd = (struct hfi1_pportdata *)(dd + 1); 12314 for (i = 0; i < dd->num_pports; i++, ppd++) {
··· 5576 static void free_rcverr(struct hfi1_devdata *dd) 5577 { 5578 if (dd->rcverr_timer.function) 5579 + timer_delete_sync(&dd->rcverr_timer); 5580 } 5581 5582 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) ··· 12308 int i; 12309 12310 if (dd->synth_stats_timer.function) 12311 + timer_delete_sync(&dd->synth_stats_timer); 12312 cancel_work_sync(&dd->update_cntr_work); 12313 ppd = (struct hfi1_pportdata *)(dd + 1); 12314 for (i = 0; i < dd->num_pports; i++, ppd++) {
+1 -1
drivers/infiniband/hw/hfi1/driver.c
··· 1303 */ 1304 smp_rmb(); 1305 if (atomic_read(&ppd->led_override_timer_active)) { 1306 - del_timer_sync(&ppd->led_override_timer); 1307 atomic_set(&ppd->led_override_timer_active, 0); 1308 /* Ensure the atomic_set is visible to all CPUs */ 1309 smp_wmb();
··· 1303 */ 1304 smp_rmb(); 1305 if (atomic_read(&ppd->led_override_timer_active)) { 1306 + timer_delete_sync(&ppd->led_override_timer); 1307 atomic_set(&ppd->led_override_timer_active, 0); 1308 /* Ensure the atomic_set is visible to all CPUs */ 1309 smp_wmb();
+1 -1
drivers/infiniband/hw/hfi1/init.c
··· 985 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 986 ppd = dd->pport + pidx; 987 if (ppd->led_override_timer.function) { 988 - del_timer_sync(&ppd->led_override_timer); 989 atomic_set(&ppd->led_override_timer_active, 0); 990 } 991 }
··· 985 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 986 ppd = dd->pport + pidx; 987 if (ppd->led_override_timer.function) { 988 + timer_delete_sync(&ppd->led_override_timer); 989 atomic_set(&ppd->led_override_timer_active, 0); 990 } 991 }
+1 -1
drivers/infiniband/hw/hfi1/sdma.c
··· 1575 sde->this_idx); 1576 sdma_process_event(sde, sdma_event_e00_go_hw_down); 1577 1578 - del_timer_sync(&sde->err_progress_check_timer); 1579 1580 /* 1581 * This waits for the state machine to exit so it is not
··· 1575 sde->this_idx); 1576 sdma_process_event(sde, sdma_event_e00_go_hw_down); 1577 1578 + timer_delete_sync(&sde->err_progress_check_timer); 1579 1580 /* 1581 * This waits for the state machine to exit so it is not
+4 -4
drivers/infiniband/hw/hfi1/tid_rdma.c
··· 3965 3966 lockdep_assert_held(&qp->s_lock); 3967 if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) { 3968 - rval = del_timer(&qpriv->s_tid_timer); 3969 qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER; 3970 } 3971 return rval; ··· 3975 { 3976 struct hfi1_qp_priv *qpriv = qp->priv; 3977 3978 - del_timer_sync(&qpriv->s_tid_timer); 3979 qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER; 3980 } 3981 ··· 4781 4782 lockdep_assert_held(&qp->s_lock); 4783 if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) { 4784 - rval = del_timer(&priv->s_tid_retry_timer); 4785 priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER; 4786 } 4787 return rval; ··· 4791 { 4792 struct hfi1_qp_priv *priv = qp->priv; 4793 4794 - del_timer_sync(&priv->s_tid_retry_timer); 4795 priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER; 4796 } 4797
··· 3965 3966 lockdep_assert_held(&qp->s_lock); 3967 if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) { 3968 + rval = timer_delete(&qpriv->s_tid_timer); 3969 qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER; 3970 } 3971 return rval; ··· 3975 { 3976 struct hfi1_qp_priv *qpriv = qp->priv; 3977 3978 + timer_delete_sync(&qpriv->s_tid_timer); 3979 qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER; 3980 } 3981 ··· 4781 4782 lockdep_assert_held(&qp->s_lock); 4783 if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) { 4784 + rval = timer_delete(&priv->s_tid_retry_timer); 4785 priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER; 4786 } 4787 return rval; ··· 4791 { 4792 struct hfi1_qp_priv *priv = qp->priv; 4793 4794 + timer_delete_sync(&priv->s_tid_retry_timer); 4795 priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER; 4796 } 4797
+1 -1
drivers/infiniband/hw/hfi1/verbs.c
··· 1900 if (!list_empty(&dev->memwait)) 1901 dd_dev_err(dd, "memwait list not empty!\n"); 1902 1903 - del_timer_sync(&dev->mem_timer); 1904 verbs_txreq_exit(dev); 1905 1906 kfree(dev_cntr_descs);
··· 1900 if (!list_empty(&dev->memwait)) 1901 dd_dev_err(dd, "memwait list not empty!\n"); 1902 1903 + timer_delete_sync(&dev->mem_timer); 1904 verbs_txreq_exit(dev); 1905 1906 kfree(dev_cntr_descs);
+1 -1
drivers/infiniband/hw/irdma/cm.c
··· 3303 if (!cm_core) 3304 return; 3305 3306 - del_timer_sync(&cm_core->tcp_timer); 3307 3308 destroy_workqueue(cm_core->event_wq); 3309 cm_core->dev->ws_reset(&cm_core->iwdev->vsi);
··· 3303 if (!cm_core) 3304 return; 3305 3306 + timer_delete_sync(&cm_core->tcp_timer); 3307 3308 destroy_workqueue(cm_core->event_wq); 3309 cm_core->dev->ws_reset(&cm_core->iwdev->vsi);
+2 -2
drivers/infiniband/hw/irdma/utils.c
··· 963 int ret; 964 965 iwqp = qp->qp_uk.back_qp; 966 - ret = del_timer(&iwqp->terminate_timer); 967 if (ret) 968 irdma_qp_rem_ref(&iwqp->ibqp); 969 } ··· 1570 { 1571 struct irdma_vsi_pestat *devstat = vsi->pestat; 1572 1573 - del_timer_sync(&devstat->stats_timer); 1574 } 1575 1576 /**
··· 963 int ret; 964 965 iwqp = qp->qp_uk.back_qp; 966 + ret = timer_delete(&iwqp->terminate_timer); 967 if (ret) 968 irdma_qp_rem_ref(&iwqp->ibqp); 969 } ··· 1570 { 1571 struct irdma_vsi_pestat *devstat = vsi->pestat; 1572 1573 + timer_delete_sync(&devstat->stats_timer); 1574 } 1575 1576 /**
+1 -1
drivers/infiniband/hw/mlx5/mr.c
··· 1026 mlx5r_destroy_cache_entries(dev); 1027 1028 destroy_workqueue(dev->cache.wq); 1029 - del_timer_sync(&dev->delay_timer); 1030 } 1031 1032 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
··· 1026 mlx5r_destroy_cache_entries(dev); 1027 1028 destroy_workqueue(dev->cache.wq); 1029 + timer_delete_sync(&dev->delay_timer); 1030 } 1031 1032 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
+1 -1
drivers/infiniband/hw/mthca/mthca_catas.c
··· 171 172 void mthca_stop_catas_poll(struct mthca_dev *dev) 173 { 174 - del_timer_sync(&dev->catas_err.timer); 175 176 if (dev->catas_err.map) 177 iounmap(dev->catas_err.map);
··· 171 172 void mthca_stop_catas_poll(struct mthca_dev *dev) 173 { 174 + timer_delete_sync(&dev->catas_err.timer); 175 176 if (dev->catas_err.map) 177 iounmap(dev->catas_err.map);
+1 -1
drivers/infiniband/hw/qib/qib_driver.c
··· 768 ppd = dd->pport + pidx; 769 if (atomic_read(&ppd->led_override_timer_active)) { 770 /* Need to stop LED timer, _then_ shut off LEDs */ 771 - del_timer_sync(&ppd->led_override_timer); 772 atomic_set(&ppd->led_override_timer_active, 0); 773 } 774
··· 768 ppd = dd->pport + pidx; 769 if (atomic_read(&ppd->led_override_timer_active)) { 770 /* Need to stop LED timer, _then_ shut off LEDs */ 771 + timer_delete_sync(&ppd->led_override_timer); 772 atomic_set(&ppd->led_override_timer_active, 0); 773 } 774
+2 -2
drivers/infiniband/hw/qib/qib_iba7220.c
··· 1656 1657 ppd->cpspec->chase_end = 0; 1658 if (ppd->cpspec->chase_timer.function) /* if initted */ 1659 - del_timer_sync(&ppd->cpspec->chase_timer); 1660 1661 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta || 1662 ppd->cpspec->ibdeltainprog) { ··· 2605 * wait forpending timer, but don't clear .data (ppd)! 2606 */ 2607 if (ppd->cpspec->chase_timer.expires) { 2608 - del_timer_sync(&ppd->cpspec->chase_timer); 2609 ppd->cpspec->chase_timer.expires = 0; 2610 } 2611 break;
··· 1656 1657 ppd->cpspec->chase_end = 0; 1658 if (ppd->cpspec->chase_timer.function) /* if initted */ 1659 + timer_delete_sync(&ppd->cpspec->chase_timer); 1660 1661 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta || 1662 ppd->cpspec->ibdeltainprog) { ··· 2605 * wait forpending timer, but don't clear .data (ppd)! 2606 */ 2607 if (ppd->cpspec->chase_timer.expires) { 2608 + timer_delete_sync(&ppd->cpspec->chase_timer); 2609 ppd->cpspec->chase_timer.expires = 0; 2610 } 2611 break;
+2 -2
drivers/infiniband/hw/qib/qib_iba7322.c
··· 2512 2513 ppd->cpspec->chase_end = 0; 2514 if (ppd->cpspec->chase_timer.function) /* if initted */ 2515 - del_timer_sync(&ppd->cpspec->chase_timer); 2516 2517 /* 2518 * Despite the name, actually disables IBC as well. Do it when ··· 4239 * wait forpending timer, but don't clear .data (ppd)! 4240 */ 4241 if (ppd->cpspec->chase_timer.expires) { 4242 - del_timer_sync(&ppd->cpspec->chase_timer); 4243 ppd->cpspec->chase_timer.expires = 0; 4244 } 4245 break;
··· 2512 2513 ppd->cpspec->chase_end = 0; 2514 if (ppd->cpspec->chase_timer.function) /* if initted */ 2515 + timer_delete_sync(&ppd->cpspec->chase_timer); 2516 2517 /* 2518 * Despite the name, actually disables IBC as well. Do it when ··· 4239 * wait forpending timer, but don't clear .data (ppd)! 4240 */ 4241 if (ppd->cpspec->chase_timer.expires) { 4242 + timer_delete_sync(&ppd->cpspec->chase_timer); 4243 ppd->cpspec->chase_timer.expires = 0; 4244 } 4245 break;
+5 -5
drivers/infiniband/hw/qib/qib_init.c
··· 796 int pidx; 797 798 if (dd->stats_timer.function) 799 - del_timer_sync(&dd->stats_timer); 800 if (dd->intrchk_timer.function) 801 - del_timer_sync(&dd->intrchk_timer); 802 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 803 ppd = dd->pport + pidx; 804 if (ppd->hol_timer.function) 805 - del_timer_sync(&ppd->hol_timer); 806 if (ppd->led_override_timer.function) { 807 - del_timer_sync(&ppd->led_override_timer); 808 atomic_set(&ppd->led_override_timer_active, 0); 809 } 810 if (ppd->symerr_clear_timer.function) 811 - del_timer_sync(&ppd->symerr_clear_timer); 812 } 813 } 814
··· 796 int pidx; 797 798 if (dd->stats_timer.function) 799 + timer_delete_sync(&dd->stats_timer); 800 if (dd->intrchk_timer.function) 801 + timer_delete_sync(&dd->intrchk_timer); 802 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 803 ppd = dd->pport + pidx; 804 if (ppd->hol_timer.function) 805 + timer_delete_sync(&ppd->hol_timer); 806 if (ppd->led_override_timer.function) { 807 + timer_delete_sync(&ppd->led_override_timer); 808 atomic_set(&ppd->led_override_timer_active, 0); 809 } 810 if (ppd->symerr_clear_timer.function) 811 + timer_delete_sync(&ppd->symerr_clear_timer); 812 } 813 } 814
+1 -1
drivers/infiniband/hw/qib/qib_mad.c
··· 2441 struct qib_devdata, verbs_dev); 2442 2443 if (dd->pport[port_idx].cong_stats.timer.function) 2444 - del_timer_sync(&dd->pport[port_idx].cong_stats.timer); 2445 2446 if (dd->pport[port_idx].ibport_data.smi_ah) 2447 rdma_destroy_ah(&dd->pport[port_idx].ibport_data.smi_ah->ibah,
··· 2441 struct qib_devdata, verbs_dev); 2442 2443 if (dd->pport[port_idx].cong_stats.timer.function) 2444 + timer_delete_sync(&dd->pport[port_idx].cong_stats.timer); 2445 2446 if (dd->pport[port_idx].ibport_data.smi_ah) 2447 rdma_destroy_ah(&dd->pport[port_idx].ibport_data.smi_ah->ibah,
+1 -1
drivers/infiniband/hw/qib/qib_sd7220.c
··· 1375 void shutdown_7220_relock_poll(struct qib_devdata *dd) 1376 { 1377 if (dd->cspec->relock_timer_active) 1378 - del_timer_sync(&dd->cspec->relock_timer); 1379 } 1380 1381 static unsigned qib_relock_by_timer = 1;
··· 1375 void shutdown_7220_relock_poll(struct qib_devdata *dd) 1376 { 1377 if (dd->cspec->relock_timer_active) 1378 + timer_delete_sync(&dd->cspec->relock_timer); 1379 } 1380 1381 static unsigned qib_relock_by_timer = 1;
+1 -1
drivers/infiniband/hw/qib/qib_verbs.c
··· 1655 if (!list_empty(&dev->memwait)) 1656 qib_dev_err(dd, "memwait list not empty!\n"); 1657 1658 - del_timer_sync(&dev->mem_timer); 1659 while (!list_empty(&dev->txreq_free)) { 1660 struct list_head *l = dev->txreq_free.next; 1661 struct qib_verbs_txreq *tx;
··· 1655 if (!list_empty(&dev->memwait)) 1656 qib_dev_err(dd, "memwait list not empty!\n"); 1657 1658 + timer_delete_sync(&dev->mem_timer); 1659 while (!list_empty(&dev->txreq_free)) { 1660 struct list_head *l = dev->txreq_free.next; 1661 struct qib_verbs_txreq *tx;
+4 -4
drivers/infiniband/sw/rdmavt/qp.c
··· 1297 1298 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 1299 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 1300 - del_timer(&qp->s_timer); 1301 } 1302 1303 if (qp->s_flags & RVT_S_ANY_WAIT_SEND) ··· 2546 /* Remove QP from all timers */ 2547 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 2548 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 2549 - del_timer(&qp->s_timer); 2550 hrtimer_try_to_cancel(&qp->s_rnr_timer); 2551 } 2552 } ··· 2575 */ 2576 void rvt_del_timers_sync(struct rvt_qp *qp) 2577 { 2578 - del_timer_sync(&qp->s_timer); 2579 hrtimer_cancel(&qp->s_rnr_timer); 2580 } 2581 EXPORT_SYMBOL(rvt_del_timers_sync); ··· 2596 2597 qp->s_flags &= ~RVT_S_TIMER; 2598 rvp->n_rc_timeouts++; 2599 - del_timer(&qp->s_timer); 2600 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1); 2601 if (rdi->driver_f.notify_restart_rc) 2602 rdi->driver_f.notify_restart_rc(qp,
··· 1297 1298 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 1299 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 1300 + timer_delete(&qp->s_timer); 1301 } 1302 1303 if (qp->s_flags & RVT_S_ANY_WAIT_SEND) ··· 2546 /* Remove QP from all timers */ 2547 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { 2548 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); 2549 + timer_delete(&qp->s_timer); 2550 hrtimer_try_to_cancel(&qp->s_rnr_timer); 2551 } 2552 } ··· 2575 */ 2576 void rvt_del_timers_sync(struct rvt_qp *qp) 2577 { 2578 + timer_delete_sync(&qp->s_timer); 2579 hrtimer_cancel(&qp->s_rnr_timer); 2580 } 2581 EXPORT_SYMBOL(rvt_del_timers_sync); ··· 2596 2597 qp->s_flags &= ~RVT_S_TIMER; 2598 rvp->n_rc_timeouts++; 2599 + timer_delete(&qp->s_timer); 2600 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1); 2601 if (rdi->driver_f.notify_restart_rc) 2602 rdi->driver_f.notify_restart_rc(qp,
+2 -2
drivers/infiniband/sw/rxe/rxe_qp.c
··· 812 qp->qp_timeout_jiffies = 0; 813 814 if (qp_type(qp) == IB_QPT_RC) { 815 - del_timer_sync(&qp->retrans_timer); 816 - del_timer_sync(&qp->rnr_nak_timer); 817 } 818 819 if (qp->recv_task.func)
··· 812 qp->qp_timeout_jiffies = 0; 813 814 if (qp_type(qp) == IB_QPT_RC) { 815 + timer_delete_sync(&qp->retrans_timer); 816 + timer_delete_sync(&qp->rnr_nak_timer); 817 } 818 819 if (qp->recv_task.func)
+2 -2
drivers/input/ff-memless.c
··· 136 137 if (!events) { 138 pr_debug("no actions\n"); 139 - del_timer(&ml->timer); 140 } else { 141 pr_debug("timer set\n"); 142 mod_timer(&ml->timer, earliest); ··· 489 * do not actually stop the timer, and therefore we should 490 * do it here. 491 */ 492 - del_timer_sync(&ml->timer); 493 494 kfree(ml->private); 495 }
··· 136 137 if (!events) { 138 pr_debug("no actions\n"); 139 + timer_delete(&ml->timer); 140 } else { 141 pr_debug("timer set\n"); 142 mod_timer(&ml->timer, earliest); ··· 489 * do not actually stop the timer, and therefore we should 490 * do it here. 491 */ 492 + timer_delete_sync(&ml->timer); 493 494 kfree(ml->private); 495 }
+2 -2
drivers/input/gameport/gameport.c
··· 191 spin_lock(&gameport->timer_lock); 192 193 if (!--gameport->poll_cnt) 194 - del_timer(&gameport->poll_timer); 195 196 spin_unlock(&gameport->timer_lock); 197 } ··· 847 848 void gameport_close(struct gameport *gameport) 849 { 850 - del_timer_sync(&gameport->poll_timer); 851 gameport->poll_handler = NULL; 852 gameport->poll_interval = 0; 853 gameport_set_drv(gameport, NULL);
··· 191 spin_lock(&gameport->timer_lock); 192 193 if (!--gameport->poll_cnt) 194 + timer_delete(&gameport->poll_timer); 195 196 spin_unlock(&gameport->timer_lock); 197 } ··· 847 848 void gameport_close(struct gameport *gameport) 849 { 850 + timer_delete_sync(&gameport->poll_timer); 851 gameport->poll_handler = NULL; 852 gameport->poll_interval = 0; 853 gameport_set_drv(gameport, NULL);
+2 -2
drivers/input/input.c
··· 96 97 static void input_stop_autorepeat(struct input_dev *dev) 98 { 99 - del_timer(&dev->timer); 100 } 101 102 /* ··· 2223 handle->handler->disconnect(handle); 2224 WARN_ON(!list_empty(&dev->h_list)); 2225 2226 - del_timer_sync(&dev->timer); 2227 list_del_init(&dev->node); 2228 2229 input_wakeup_procfs_readers();
··· 96 97 static void input_stop_autorepeat(struct input_dev *dev) 98 { 99 + timer_delete(&dev->timer); 100 } 101 102 /* ··· 2223 handle->handler->disconnect(handle); 2224 WARN_ON(!list_empty(&dev->h_list)); 2225 2226 + timer_delete_sync(&dev->timer); 2227 list_del_init(&dev->node); 2228 2229 input_wakeup_procfs_readers();
+1 -1
drivers/input/joystick/db9.c
··· 531 guard(mutex)(&db9->mutex); 532 533 if (!--db9->used) { 534 - del_timer_sync(&db9->timer); 535 parport_write_control(port, 0x00); 536 parport_data_forward(port); 537 parport_release(db9->pd);
··· 531 guard(mutex)(&db9->mutex); 532 533 if (!--db9->used) { 534 + timer_delete_sync(&db9->timer); 535 parport_write_control(port, 0x00); 536 parport_data_forward(port); 537 parport_release(db9->pd);
+1 -1
drivers/input/joystick/gamecon.c
··· 786 guard(mutex)(&gc->mutex); 787 788 if (!--gc->used) { 789 - del_timer_sync(&gc->timer); 790 parport_write_control(gc->pd->port, 0x00); 791 parport_release(gc->pd); 792 }
··· 786 guard(mutex)(&gc->mutex); 787 788 if (!--gc->used) { 789 + timer_delete_sync(&gc->timer); 790 parport_write_control(gc->pd->port, 0x00); 791 parport_release(gc->pd); 792 }
+1 -1
drivers/input/joystick/n64joy.c
··· 216 guard(mutex)(&priv->n64joy_mutex); 217 218 if (!--priv->n64joy_opened) 219 - del_timer_sync(&priv->timer); 220 } 221 222 static const u64 __initconst scandata[] ____cacheline_aligned = {
··· 216 guard(mutex)(&priv->n64joy_mutex); 217 218 if (!--priv->n64joy_opened) 219 + timer_delete_sync(&priv->timer); 220 } 221 222 static const u64 __initconst scandata[] ____cacheline_aligned = {
+1 -1
drivers/input/joystick/turbografx.c
··· 124 guard(mutex)(&tgfx->sem); 125 126 if (!--tgfx->used) { 127 - del_timer_sync(&tgfx->timer); 128 parport_write_control(tgfx->pd->port, 0x00); 129 parport_release(tgfx->pd); 130 }
··· 124 guard(mutex)(&tgfx->sem); 125 126 if (!--tgfx->used) { 127 + timer_delete_sync(&tgfx->timer); 128 parport_write_control(tgfx->pd->port, 0x00); 129 parport_release(tgfx->pd); 130 }
+4 -4
drivers/input/keyboard/gpio_keys.c
··· 590 591 INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func); 592 593 - hrtimer_setup(&bdata->debounce_timer, gpio_keys_debounce_timer, CLOCK_REALTIME, 594 - HRTIMER_MODE_REL); 595 596 isr = gpio_keys_gpio_isr; 597 irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; ··· 627 } 628 629 bdata->release_delay = button->debounce_interval; 630 - hrtimer_setup(&bdata->release_timer, gpio_keys_irq_timer, CLOCK_REALTIME, 631 - HRTIMER_MODE_REL_HARD); 632 633 isr = gpio_keys_irq_isr; 634 irqflags = 0;
··· 590 591 INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func); 592 593 + hrtimer_setup(&bdata->debounce_timer, gpio_keys_debounce_timer, 594 + CLOCK_REALTIME, HRTIMER_MODE_REL); 595 596 isr = gpio_keys_gpio_isr; 597 irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; ··· 627 } 628 629 bdata->release_delay = button->debounce_interval; 630 + hrtimer_setup(&bdata->release_timer, gpio_keys_irq_timer, 631 + CLOCK_REALTIME, HRTIMER_MODE_REL_HARD); 632 633 isr = gpio_keys_irq_isr; 634 irqflags = 0;
+1 -1
drivers/input/keyboard/imx_keypad.c
··· 370 /* Mark keypad as being inactive */ 371 keypad->enabled = false; 372 synchronize_irq(keypad->irq); 373 - del_timer_sync(&keypad->check_matrix_timer); 374 375 imx_keypad_inhibit(keypad); 376
··· 370 /* Mark keypad as being inactive */ 371 keypad->enabled = false; 372 synchronize_irq(keypad->irq); 373 + timer_delete_sync(&keypad->check_matrix_timer); 374 375 imx_keypad_inhibit(keypad); 376
+1 -1
drivers/input/keyboard/snvs_pwrkey.c
··· 104 { 105 struct pwrkey_drv_data *pd = pdata; 106 107 - del_timer_sync(&pd->check_timer); 108 } 109 110 static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
··· 104 { 105 struct pwrkey_drv_data *pd = pdata; 106 107 + timer_delete_sync(&pd->check_timer); 108 } 109 110 static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
+2 -2
drivers/input/keyboard/tegra-kbc.c
··· 416 } 417 418 disable_irq(kbc->irq); 419 - del_timer_sync(&kbc->timer); 420 421 clk_disable_unprepare(kbc->clk); 422 } ··· 703 704 if (device_may_wakeup(&pdev->dev)) { 705 disable_irq(kbc->irq); 706 - del_timer_sync(&kbc->timer); 707 tegra_kbc_set_fifo_interrupt(kbc, false); 708 709 /* Forcefully clear the interrupt status */
··· 416 } 417 418 disable_irq(kbc->irq); 419 + timer_delete_sync(&kbc->timer); 420 421 clk_disable_unprepare(kbc->clk); 422 } ··· 703 704 if (device_may_wakeup(&pdev->dev)) { 705 disable_irq(kbc->irq); 706 + timer_delete_sync(&kbc->timer); 707 tegra_kbc_set_fifo_interrupt(kbc, false); 708 709 /* Forcefully clear the interrupt status */
+1 -1
drivers/input/mouse/alps.c
··· 1519 return PSMOUSE_GOOD_DATA; 1520 } 1521 1522 - del_timer(&priv->timer); 1523 1524 if (psmouse->packet[6] & 0x80) { 1525
··· 1519 return PSMOUSE_GOOD_DATA; 1520 } 1521 1522 + timer_delete(&priv->timer); 1523 1524 if (psmouse->packet[6] & 0x80) { 1525
+1 -1
drivers/input/mouse/byd.c
··· 425 struct byd_data *priv = psmouse->private; 426 427 if (priv) { 428 - del_timer(&priv->timer); 429 kfree(psmouse->private); 430 psmouse->private = NULL; 431 }
··· 425 struct byd_data *priv = psmouse->private; 426 427 if (priv) { 428 + timer_delete(&priv->timer); 429 kfree(psmouse->private); 430 psmouse->private = NULL; 431 }
+1 -1
drivers/input/serio/hil_mlc.c
··· 1017 1018 static void __exit hil_mlc_exit(void) 1019 { 1020 - del_timer_sync(&hil_mlcs_kicker); 1021 tasklet_kill(&hil_mlcs_tasklet); 1022 } 1023
··· 1017 1018 static void __exit hil_mlc_exit(void) 1019 { 1020 + timer_delete_sync(&hil_mlcs_kicker); 1021 tasklet_kill(&hil_mlcs_tasklet); 1022 } 1023
+1 -1
drivers/input/serio/hp_sdc.c
··· 980 free_irq(hp_sdc.irq, &hp_sdc); 981 write_unlock_irq(&hp_sdc.lock); 982 983 - del_timer_sync(&hp_sdc.kicker); 984 985 tasklet_kill(&hp_sdc.task); 986
··· 980 free_irq(hp_sdc.irq, &hp_sdc); 981 write_unlock_irq(&hp_sdc.lock); 982 983 + timer_delete_sync(&hp_sdc.kicker); 984 985 tasklet_kill(&hp_sdc.task); 986
+1 -1
drivers/input/touchscreen/ad7877.c
··· 415 ts->disabled = true; 416 disable_irq(ts->spi->irq); 417 418 - if (del_timer_sync(&ts->timer)) 419 ad7877_ts_event_release(ts); 420 } 421
··· 415 ts->disabled = true; 416 disable_irq(ts->spi->irq); 417 418 + if (timer_delete_sync(&ts->timer)) 419 ad7877_ts_event_release(ts); 420 } 421
+1 -1
drivers/input/touchscreen/ad7879.c
··· 273 AD7879_PM(AD7879_PM_SHUTDOWN); 274 disable_irq(ts->irq); 275 276 - if (del_timer_sync(&ts->timer)) 277 ad7879_ts_event_release(ts); 278 279 ad7879_write(ts, AD7879_REG_CTRL2, reg);
··· 273 AD7879_PM(AD7879_PM_SHUTDOWN); 274 disable_irq(ts->irq); 275 276 + if (timer_delete_sync(&ts->timer)) 277 ad7879_ts_event_release(ts); 278 279 ad7879_write(ts, AD7879_REG_CTRL2, reg);
+1 -1
drivers/input/touchscreen/bu21029_ts.c
··· 325 struct bu21029_ts_data *bu21029 = input_get_drvdata(dev); 326 327 disable_irq(bu21029->client->irq); 328 - del_timer_sync(&bu21029->timer); 329 330 bu21029_put_chip_in_reset(bu21029); 331 regulator_disable(bu21029->vdd);
··· 325 struct bu21029_ts_data *bu21029 = input_get_drvdata(dev); 326 327 disable_irq(bu21029->client->irq); 328 + timer_delete_sync(&bu21029->timer); 329 330 bu21029_put_chip_in_reset(bu21029); 331 regulator_disable(bu21029->vdd);
+1 -1
drivers/input/touchscreen/exc3000.c
··· 174 /* 175 * We read full state successfully, no contacts will be "stuck". 176 */ 177 - del_timer_sync(&data->timer); 178 179 while (total_slots > 0) { 180 int slots = min(total_slots, EXC3000_SLOTS_PER_FRAME);
··· 174 /* 175 * We read full state successfully, no contacts will be "stuck". 176 */ 177 + timer_delete_sync(&data->timer); 178 179 while (total_slots > 0) { 180 int slots = min(total_slots, EXC3000_SLOTS_PER_FRAME);
+1 -1
drivers/input/touchscreen/sx8654.c
··· 290 disable_irq(client->irq); 291 292 if (!sx8654->data->has_irq_penrelease) 293 - del_timer_sync(&sx8654->timer); 294 295 /* enable manual mode mode */ 296 error = i2c_smbus_write_byte(client, sx8654->data->cmd_manual);
··· 290 disable_irq(client->irq); 291 292 if (!sx8654->data->has_irq_penrelease) 293 + timer_delete_sync(&sx8654->timer); 294 295 /* enable manual mode mode */ 296 error = i2c_smbus_write_byte(client, sx8654->data->cmd_manual);
+2 -2
drivers/input/touchscreen/tsc200x-core.c
··· 229 230 guard(disable_irq)(&ts->irq); 231 232 - del_timer_sync(&ts->penup_timer); 233 cancel_delayed_work_sync(&ts->esd_work); 234 } 235 ··· 388 dev_info(ts->dev, "TSC200X not responding - resetting\n"); 389 390 scoped_guard(disable_irq, &ts->irq) { 391 - del_timer_sync(&ts->penup_timer); 392 tsc200x_update_pen_state(ts, 0, 0, 0); 393 tsc200x_reset(ts); 394 }
··· 229 230 guard(disable_irq)(&ts->irq); 231 232 + timer_delete_sync(&ts->penup_timer); 233 cancel_delayed_work_sync(&ts->esd_work); 234 } 235 ··· 388 dev_info(ts->dev, "TSC200X not responding - resetting\n"); 389 390 scoped_guard(disable_irq, &ts->irq) { 391 + timer_delete_sync(&ts->penup_timer); 392 tsc200x_update_pen_state(ts, 0, 0, 0); 393 tsc200x_reset(ts); 394 }
+1 -1
drivers/iommu/dma-iommu.c
··· 271 if (!cookie->fq_domain) 272 return; 273 274 - del_timer_sync(&cookie->fq_timer); 275 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) 276 iommu_dma_free_fq_single(cookie->single_fq); 277 else
··· 271 if (!cookie->fq_domain) 272 return; 273 274 + timer_delete_sync(&cookie->fq_timer); 275 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) 276 iommu_dma_free_fq_single(cookie->single_fq); 277 else
+3 -3
drivers/isdn/hardware/mISDN/hfcmulti.c
··· 3249 } 3250 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 3251 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 3252 - del_timer(&dch->timer); 3253 spin_unlock_irqrestore(&hc->lock, flags); 3254 __skb_queue_purge(&free_queue); 3255 break; ··· 3394 } 3395 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 3396 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 3397 - del_timer(&dch->timer); 3398 #ifdef FIXME 3399 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags)) 3400 dchannel_sched_event(&hc->dch, D_CLEARBUSY); ··· 4522 spin_lock_irqsave(&hc->lock, flags); 4523 4524 if (dch->timer.function) { 4525 - del_timer(&dch->timer); 4526 dch->timer.function = NULL; 4527 } 4528
··· 3249 } 3250 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 3251 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 3252 + timer_delete(&dch->timer); 3253 spin_unlock_irqrestore(&hc->lock, flags); 3254 __skb_queue_purge(&free_queue); 3255 break; ··· 3394 } 3395 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 3396 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 3397 + timer_delete(&dch->timer); 3398 #ifdef FIXME 3399 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags)) 3400 dchannel_sched_event(&hc->dch, D_CLEARBUSY); ··· 4522 spin_lock_irqsave(&hc->lock, flags); 4523 4524 if (dch->timer.function) { 4525 + timer_delete(&dch->timer); 4526 dch->timer.function = NULL; 4527 } 4528
+7 -7
drivers/isdn/hardware/mISDN/hfcpci.c
··· 158 { 159 /* disable memory mapped ports + busmaster */ 160 pci_write_config_word(hc->pdev, PCI_COMMAND, 0); 161 - del_timer(&hc->hw.timer); 162 dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos, 163 hc->hw.dmahandle); 164 iounmap(hc->hw.pci_io); ··· 1087 } 1088 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 1089 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 1090 - del_timer(&dch->timer); 1091 break; 1092 case HW_POWERUP_REQ: 1093 Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION); ··· 1216 receive_dmsg(hc); 1217 if (val & 0x04) { /* D tx */ 1218 if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags)) 1219 - del_timer(&hc->dch.timer); 1220 tx_dirq(&hc->dch); 1221 } 1222 spin_unlock(&hc->lock); ··· 1635 } 1636 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 1637 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 1638 - del_timer(&dch->timer); 1639 #ifdef FIXME 1640 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags)) 1641 dchannel_sched_event(&hc->dch, D_CLEARBUSY); ··· 2064 mode_hfcpci(&hc->bch[0], 1, ISDN_P_NONE); 2065 mode_hfcpci(&hc->bch[1], 2, ISDN_P_NONE); 2066 if (hc->dch.timer.function != NULL) { 2067 - del_timer(&hc->dch.timer); 2068 hc->dch.timer.function = NULL; 2069 } 2070 spin_unlock_irqrestore(&hc->lock, flags); ··· 2342 err = pci_register_driver(&hfc_driver); 2343 if (err) { 2344 if (timer_pending(&hfc_tl)) 2345 - del_timer(&hfc_tl); 2346 } 2347 2348 return err; ··· 2351 static void __exit 2352 HFC_cleanup(void) 2353 { 2354 - del_timer_sync(&hfc_tl); 2355 2356 pci_unregister_driver(&hfc_driver); 2357 }
··· 158 { 159 /* disable memory mapped ports + busmaster */ 160 pci_write_config_word(hc->pdev, PCI_COMMAND, 0); 161 + timer_delete(&hc->hw.timer); 162 dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos, 163 hc->hw.dmahandle); 164 iounmap(hc->hw.pci_io); ··· 1087 } 1088 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 1089 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 1090 + timer_delete(&dch->timer); 1091 break; 1092 case HW_POWERUP_REQ: 1093 Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION); ··· 1216 receive_dmsg(hc); 1217 if (val & 0x04) { /* D tx */ 1218 if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags)) 1219 + timer_delete(&hc->dch.timer); 1220 tx_dirq(&hc->dch); 1221 } 1222 spin_unlock(&hc->lock); ··· 1635 } 1636 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 1637 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 1638 + timer_delete(&dch->timer); 1639 #ifdef FIXME 1640 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags)) 1641 dchannel_sched_event(&hc->dch, D_CLEARBUSY); ··· 2064 mode_hfcpci(&hc->bch[0], 1, ISDN_P_NONE); 2065 mode_hfcpci(&hc->bch[1], 2, ISDN_P_NONE); 2066 if (hc->dch.timer.function != NULL) { 2067 + timer_delete(&hc->dch.timer); 2068 hc->dch.timer.function = NULL; 2069 } 2070 spin_unlock_irqrestore(&hc->lock, flags); ··· 2342 err = pci_register_driver(&hfc_driver); 2343 if (err) { 2344 if (timer_pending(&hfc_tl)) 2345 + timer_delete(&hfc_tl); 2346 } 2347 2348 return err; ··· 2351 static void __exit 2352 HFC_cleanup(void) 2353 { 2354 + timer_delete_sync(&hfc_tl); 2355 2356 pci_unregister_driver(&hfc_driver); 2357 }
+5 -5
drivers/isdn/hardware/mISDN/mISDNipac.c
··· 158 WriteISAC(isac, ISAC_CMDR, more ? 0x8 : 0xa); 159 if (test_and_set_bit(FLG_BUSY_TIMER, &isac->dch.Flags)) { 160 pr_debug("%s: %s dbusytimer running\n", isac->name, __func__); 161 - del_timer(&isac->dch.timer); 162 } 163 isac->dch.timer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ)/1000); 164 add_timer(&isac->dch.timer); ··· 206 isac_xpr_irq(struct isac_hw *isac) 207 { 208 if (test_and_clear_bit(FLG_BUSY_TIMER, &isac->dch.Flags)) 209 - del_timer(&isac->dch.timer); 210 if (isac->dch.tx_skb && isac->dch.tx_idx < isac->dch.tx_skb->len) { 211 isac_fill_fifo(isac); 212 } else { ··· 220 isac_retransmit(struct isac_hw *isac) 221 { 222 if (test_and_clear_bit(FLG_BUSY_TIMER, &isac->dch.Flags)) 223 - del_timer(&isac->dch.timer); 224 if (test_bit(FLG_TX_BUSY, &isac->dch.Flags)) { 225 /* Restart frame */ 226 isac->dch.tx_idx = 0; ··· 665 } 666 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 667 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 668 - del_timer(&dch->timer); 669 break; 670 case HW_POWERUP_REQ: 671 spin_lock_irqsave(isac->hwlock, flags); ··· 698 else if (isac->type != 0) 699 WriteISAC(isac, ISAC_MASK, 0xff); 700 if (isac->dch.timer.function != NULL) { 701 - del_timer(&isac->dch.timer); 702 isac->dch.timer.function = NULL; 703 } 704 kfree(isac->mon_rx);
··· 158 WriteISAC(isac, ISAC_CMDR, more ? 0x8 : 0xa); 159 if (test_and_set_bit(FLG_BUSY_TIMER, &isac->dch.Flags)) { 160 pr_debug("%s: %s dbusytimer running\n", isac->name, __func__); 161 + timer_delete(&isac->dch.timer); 162 } 163 isac->dch.timer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ)/1000); 164 add_timer(&isac->dch.timer); ··· 206 isac_xpr_irq(struct isac_hw *isac) 207 { 208 if (test_and_clear_bit(FLG_BUSY_TIMER, &isac->dch.Flags)) 209 + timer_delete(&isac->dch.timer); 210 if (isac->dch.tx_skb && isac->dch.tx_idx < isac->dch.tx_skb->len) { 211 isac_fill_fifo(isac); 212 } else { ··· 220 isac_retransmit(struct isac_hw *isac) 221 { 222 if (test_and_clear_bit(FLG_BUSY_TIMER, &isac->dch.Flags)) 223 + timer_delete(&isac->dch.timer); 224 if (test_bit(FLG_TX_BUSY, &isac->dch.Flags)) { 225 /* Restart frame */ 226 isac->dch.tx_idx = 0; ··· 665 } 666 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 667 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 668 + timer_delete(&dch->timer); 669 break; 670 case HW_POWERUP_REQ: 671 spin_lock_irqsave(isac->hwlock, flags); ··· 698 else if (isac->type != 0) 699 WriteISAC(isac, ISAC_MASK, 0xff); 700 if (isac->dch.timer.function != NULL) { 701 + timer_delete(&isac->dch.timer); 702 isac->dch.timer.function = NULL; 703 } 704 kfree(isac->mon_rx);
+3 -3
drivers/isdn/hardware/mISDN/mISDNisar.c
··· 930 /* 1s (200 ms) Flags before data */ 931 if (test_and_set_bit(FLG_FTI_RUN, 932 &ch->bch.Flags)) 933 - del_timer(&ch->ftimer); 934 ch->ftimer.expires = 935 jiffies + ((delay * HZ) / 1000); 936 test_and_set_bit(FLG_LL_CONN, ··· 1603 { 1604 modeisar(&isar->ch[0], ISDN_P_NONE); 1605 modeisar(&isar->ch[1], ISDN_P_NONE); 1606 - del_timer(&isar->ch[0].ftimer); 1607 - del_timer(&isar->ch[1].ftimer); 1608 test_and_clear_bit(FLG_INITIALIZED, &isar->ch[0].bch.Flags); 1609 test_and_clear_bit(FLG_INITIALIZED, &isar->ch[1].bch.Flags); 1610 }
··· 930 /* 1s (200 ms) Flags before data */ 931 if (test_and_set_bit(FLG_FTI_RUN, 932 &ch->bch.Flags)) 933 + timer_delete(&ch->ftimer); 934 ch->ftimer.expires = 935 jiffies + ((delay * HZ) / 1000); 936 test_and_set_bit(FLG_LL_CONN, ··· 1603 { 1604 modeisar(&isar->ch[0], ISDN_P_NONE); 1605 modeisar(&isar->ch[1], ISDN_P_NONE); 1606 + timer_delete(&isar->ch[0].ftimer); 1607 + timer_delete(&isar->ch[1].ftimer); 1608 test_and_clear_bit(FLG_INITIALIZED, &isar->ch[0].bch.Flags); 1609 test_and_clear_bit(FLG_INITIALIZED, &isar->ch[1].bch.Flags); 1610 }
+4 -4
drivers/isdn/hardware/mISDN/w6692.c
··· 294 WriteW6692(card, W_D_CMDR, cmd); 295 if (test_and_set_bit(FLG_BUSY_TIMER, &dch->Flags)) { 296 pr_debug("%s: fill_Dfifo dbusytimer running\n", card->name); 297 - del_timer(&dch->timer); 298 } 299 dch->timer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ) / 1000); 300 add_timer(&dch->timer); ··· 311 struct dchannel *dch = &card->dch; 312 313 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 314 - del_timer(&dch->timer); 315 #ifdef FIXME 316 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags)) 317 dchannel_sched_event(dch, D_CLEARBUSY); ··· 372 static void 373 handle_txD(struct w6692_hw *card) { 374 if (test_and_clear_bit(FLG_BUSY_TIMER, &card->dch.Flags)) 375 - del_timer(&card->dch.timer); 376 if (card->dch.tx_skb && card->dch.tx_idx < card->dch.tx_skb->len) { 377 W6692_fill_Dfifo(card); 378 } else { ··· 1130 } 1131 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 1132 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 1133 - del_timer(&dch->timer); 1134 break; 1135 case HW_POWERUP_REQ: 1136 spin_lock_irqsave(&card->lock, flags);
··· 294 WriteW6692(card, W_D_CMDR, cmd); 295 if (test_and_set_bit(FLG_BUSY_TIMER, &dch->Flags)) { 296 pr_debug("%s: fill_Dfifo dbusytimer running\n", card->name); 297 + timer_delete(&dch->timer); 298 } 299 dch->timer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ) / 1000); 300 add_timer(&dch->timer); ··· 311 struct dchannel *dch = &card->dch; 312 313 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 314 + timer_delete(&dch->timer); 315 #ifdef FIXME 316 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags)) 317 dchannel_sched_event(dch, D_CLEARBUSY); ··· 372 static void 373 handle_txD(struct w6692_hw *card) { 374 if (test_and_clear_bit(FLG_BUSY_TIMER, &card->dch.Flags)) 375 + timer_delete(&card->dch.timer); 376 if (card->dch.tx_skb && card->dch.tx_idx < card->dch.tx_skb->len) { 377 W6692_fill_Dfifo(card); 378 } else { ··· 1130 } 1131 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 1132 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) 1133 + timer_delete(&dch->timer); 1134 break; 1135 case HW_POWERUP_REQ: 1136 spin_lock_irqsave(&card->lock, flags);
+3 -3
drivers/isdn/mISDN/dsp_core.c
··· 928 dsp->tone.hardware = 0; 929 dsp->tone.software = 0; 930 if (timer_pending(&dsp->tone.tl)) 931 - del_timer(&dsp->tone.tl); 932 if (dsp->conf) 933 dsp_cmx_conf(dsp, 0); /* dsp_cmx_hardware will also be 934 called here */ ··· 975 cancel_work_sync(&dsp->workq); 976 spin_lock_irqsave(&dsp_lock, flags); 977 if (timer_pending(&dsp->tone.tl)) 978 - del_timer(&dsp->tone.tl); 979 skb_queue_purge(&dsp->sendq); 980 if (dsp_debug & DEBUG_DSP_CTRL) 981 printk(KERN_DEBUG "%s: releasing member %s\n", ··· 1209 { 1210 mISDN_unregister_Bprotocol(&DSP); 1211 1212 - del_timer_sync(&dsp_spl_tl); 1213 1214 if (!list_empty(&dsp_ilist)) { 1215 printk(KERN_ERR "mISDN_dsp: Audio DSP object inst list not "
··· 928 dsp->tone.hardware = 0; 929 dsp->tone.software = 0; 930 if (timer_pending(&dsp->tone.tl)) 931 + timer_delete(&dsp->tone.tl); 932 if (dsp->conf) 933 dsp_cmx_conf(dsp, 0); /* dsp_cmx_hardware will also be 934 called here */ ··· 975 cancel_work_sync(&dsp->workq); 976 spin_lock_irqsave(&dsp_lock, flags); 977 if (timer_pending(&dsp->tone.tl)) 978 + timer_delete(&dsp->tone.tl); 979 skb_queue_purge(&dsp->sendq); 980 if (dsp_debug & DEBUG_DSP_CTRL) 981 printk(KERN_DEBUG "%s: releasing member %s\n", ··· 1209 { 1210 mISDN_unregister_Bprotocol(&DSP); 1211 1212 + timer_delete_sync(&dsp_spl_tl); 1213 1214 if (!list_empty(&dsp_ilist)) { 1215 printk(KERN_ERR "mISDN_dsp: Audio DSP object inst list not "
+2 -2
drivers/isdn/mISDN/dsp_tones.c
··· 505 /* we turn off the tone */ 506 if (!tone) { 507 if (dsp->features.hfc_loops && timer_pending(&tonet->tl)) 508 - del_timer(&tonet->tl); 509 if (dsp->features.hfc_loops) 510 dsp_tone_hw_message(dsp, NULL, 0); 511 tonet->tone = 0; ··· 539 dsp_tone_hw_message(dsp, pat->data[0], *(pat->siz[0])); 540 /* set timer */ 541 if (timer_pending(&tonet->tl)) 542 - del_timer(&tonet->tl); 543 tonet->tl.expires = jiffies + (pat->seq[0] * HZ) / 8000; 544 add_timer(&tonet->tl); 545 } else {
··· 505 /* we turn off the tone */ 506 if (!tone) { 507 if (dsp->features.hfc_loops && timer_pending(&tonet->tl)) 508 + timer_delete(&tonet->tl); 509 if (dsp->features.hfc_loops) 510 dsp_tone_hw_message(dsp, NULL, 0); 511 tonet->tone = 0; ··· 539 dsp_tone_hw_message(dsp, pat->data[0], *(pat->siz[0])); 540 /* set timer */ 541 if (timer_pending(&tonet->tl)) 542 + timer_delete(&tonet->tl); 543 tonet->tl.expires = jiffies + (pat->seq[0] * HZ) / 8000; 544 add_timer(&tonet->tl); 545 } else {
+2 -2
drivers/isdn/mISDN/fsm.c
··· 123 ft->fi->printdebug(ft->fi, "mISDN_FsmDelTimer %lx %d", 124 (long) ft, where); 125 #endif 126 - del_timer(&ft->tl); 127 } 128 EXPORT_SYMBOL(mISDN_FsmDelTimer); 129 ··· 167 #endif 168 169 if (timer_pending(&ft->tl)) 170 - del_timer(&ft->tl); 171 ft->event = event; 172 ft->arg = arg; 173 ft->tl.expires = jiffies + (millisec * HZ) / 1000;
··· 123 ft->fi->printdebug(ft->fi, "mISDN_FsmDelTimer %lx %d", 124 (long) ft, where); 125 #endif 126 + timer_delete(&ft->tl); 127 } 128 EXPORT_SYMBOL(mISDN_FsmDelTimer); 129 ··· 167 #endif 168 169 if (timer_pending(&ft->tl)) 170 + timer_delete(&ft->tl); 171 ft->event = event; 172 ft->arg = arg; 173 ft->tl.expires = jiffies + (millisec * HZ) / 1000;
+2 -2
drivers/leds/flash/leds-rt8515.c
··· 127 mod_timer(&rt->powerdown_timer, 128 jiffies + usecs_to_jiffies(timeout->val)); 129 } else { 130 - del_timer_sync(&rt->powerdown_timer); 131 /* Turn the LED off */ 132 rt8515_gpio_led_off(rt); 133 } ··· 372 struct rt8515 *rt = platform_get_drvdata(pdev); 373 374 rt8515_v4l2_flash_release(rt); 375 - del_timer_sync(&rt->powerdown_timer); 376 mutex_destroy(&rt->lock); 377 } 378
··· 127 mod_timer(&rt->powerdown_timer, 128 jiffies + usecs_to_jiffies(timeout->val)); 129 } else { 130 + timer_delete_sync(&rt->powerdown_timer); 131 /* Turn the LED off */ 132 rt8515_gpio_led_off(rt); 133 } ··· 372 struct rt8515 *rt = platform_get_drvdata(pdev); 373 374 rt8515_v4l2_flash_release(rt); 375 + timer_delete_sync(&rt->powerdown_timer); 376 mutex_destroy(&rt->lock); 377 } 378
+3 -3
drivers/leds/flash/leds-sgm3140.c
··· 55 mod_timer(&priv->powerdown_timer, 56 jiffies + usecs_to_jiffies(priv->timeout)); 57 } else { 58 - del_timer_sync(&priv->powerdown_timer); 59 gpiod_set_value_cansleep(priv->enable_gpio, 0); 60 gpiod_set_value_cansleep(priv->flash_gpio, 0); 61 ret = regulator_disable(priv->vin_regulator); ··· 117 gpiod_set_value_cansleep(priv->flash_gpio, 0); 118 gpiod_set_value_cansleep(priv->enable_gpio, 1); 119 } else { 120 - del_timer_sync(&priv->powerdown_timer); 121 gpiod_set_value_cansleep(priv->flash_gpio, 0); 122 gpiod_set_value_cansleep(priv->enable_gpio, 0); 123 ret = regulator_disable(priv->vin_regulator); ··· 285 { 286 struct sgm3140 *priv = platform_get_drvdata(pdev); 287 288 - del_timer_sync(&priv->powerdown_timer); 289 290 v4l2_flash_release(priv->v4l2_flash); 291 }
··· 55 mod_timer(&priv->powerdown_timer, 56 jiffies + usecs_to_jiffies(priv->timeout)); 57 } else { 58 + timer_delete_sync(&priv->powerdown_timer); 59 gpiod_set_value_cansleep(priv->enable_gpio, 0); 60 gpiod_set_value_cansleep(priv->flash_gpio, 0); 61 ret = regulator_disable(priv->vin_regulator); ··· 117 gpiod_set_value_cansleep(priv->flash_gpio, 0); 118 gpiod_set_value_cansleep(priv->enable_gpio, 1); 119 } else { 120 + timer_delete_sync(&priv->powerdown_timer); 121 gpiod_set_value_cansleep(priv->flash_gpio, 0); 122 gpiod_set_value_cansleep(priv->enable_gpio, 0); 123 ret = regulator_disable(priv->vin_regulator); ··· 285 { 286 struct sgm3140 *priv = platform_get_drvdata(pdev); 287 288 + timer_delete_sync(&priv->powerdown_timer); 289 290 v4l2_flash_release(priv->v4l2_flash); 291 }
+2 -2
drivers/leds/led-core.c
··· 245 unsigned long *delay_on, 246 unsigned long *delay_off) 247 { 248 - del_timer_sync(&led_cdev->blink_timer); 249 250 clear_bit(LED_BLINK_SW, &led_cdev->work_flags); 251 clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); ··· 294 295 void led_stop_software_blink(struct led_classdev *led_cdev) 296 { 297 - del_timer_sync(&led_cdev->blink_timer); 298 led_cdev->blink_delay_on = 0; 299 led_cdev->blink_delay_off = 0; 300 clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
··· 245 unsigned long *delay_on, 246 unsigned long *delay_off) 247 { 248 + timer_delete_sync(&led_cdev->blink_timer); 249 250 clear_bit(LED_BLINK_SW, &led_cdev->work_flags); 251 clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); ··· 294 295 void led_stop_software_blink(struct led_classdev *led_cdev) 296 { 297 + timer_delete_sync(&led_cdev->blink_timer); 298 led_cdev->blink_delay_on = 0; 299 led_cdev->blink_delay_off = 0; 300 clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
+1 -1
drivers/leds/trigger/ledtrig-pattern.c
··· 94 if (data->type == PATTERN_TYPE_HR) 95 hrtimer_cancel(&data->hrtimer); 96 else 97 - del_timer_sync(&data->timer); 98 } 99 100 static void pattern_trig_timer_restart(struct pattern_trig_data *data,
··· 94 if (data->type == PATTERN_TYPE_HR) 95 hrtimer_cancel(&data->hrtimer); 96 else 97 + timer_delete_sync(&data->timer); 98 } 99 100 static void pattern_trig_timer_restart(struct pattern_trig_data *data,
+1 -1
drivers/leds/trigger/ledtrig-transient.c
··· 66 67 /* cancel the running timer */ 68 if (state == 0 && transient_data->activate == 1) { 69 - del_timer(&transient_data->timer); 70 transient_data->activate = state; 71 led_set_brightness_nosleep(led_cdev, 72 transient_data->restore_state);
··· 66 67 /* cancel the running timer */ 68 if (state == 0 && transient_data->activate == 1) { 69 + timer_delete(&transient_data->timer); 70 transient_data->activate = state; 71 led_set_brightness_nosleep(led_cdev, 72 transient_data->restore_state);
+1 -1
drivers/macintosh/adbhid.c
··· 724 int i; 725 for (i = 1; i < 16; i++) { 726 if (adbhid[i]) 727 - del_timer_sync(&adbhid[i]->input->timer); 728 } 729 } 730
··· 724 int i; 725 for (i = 1; i < 16; i++) { 726 if (adbhid[i]) 727 + timer_delete_sync(&adbhid[i]->input->timer); 728 } 729 } 730
+1 -1
drivers/mailbox/mailbox-altera.c
··· 270 writel_relaxed(~0, mbox->mbox_base + MAILBOX_INTMASK_REG); 271 free_irq(mbox->irq, chan); 272 } else if (!mbox->is_sender) { 273 - del_timer_sync(&mbox->rxpoll_timer); 274 } 275 } 276
··· 270 writel_relaxed(~0, mbox->mbox_base + MAILBOX_INTMASK_REG); 271 free_irq(mbox->irq, chan); 272 } else if (!mbox->is_sender) { 273 + timer_delete_sync(&mbox->rxpoll_timer); 274 } 275 } 276
+1 -1
drivers/md/bcache/stats.c
··· 123 kobject_put(&acc->day.kobj); 124 125 atomic_set(&acc->closing, 1); 126 - if (del_timer_sync(&acc->timer)) 127 closure_return(&acc->cl); 128 } 129
··· 123 kobject_put(&acc->day.kobj); 124 125 atomic_set(&acc->closing, 1); 126 + if (timer_delete_sync(&acc->timer)) 127 closure_return(&acc->cl); 128 } 129
+2 -2
drivers/md/dm-integrity.c
··· 2707 unsigned int i, j, n; 2708 struct bio *flushes; 2709 2710 - del_timer(&ic->autocommit_timer); 2711 2712 if (ic->mode == 'I') 2713 return; ··· 3606 3607 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier)); 3608 3609 - del_timer_sync(&ic->autocommit_timer); 3610 3611 if (ic->recalc_wq) 3612 drain_workqueue(ic->recalc_wq);
··· 2707 unsigned int i, j, n; 2708 struct bio *flushes; 2709 2710 + timer_delete(&ic->autocommit_timer); 2711 2712 if (ic->mode == 'I') 2713 return; ··· 3606 3607 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier)); 3608 3609 + timer_delete_sync(&ic->autocommit_timer); 3610 3611 if (ic->recalc_wq) 3612 drain_workqueue(ic->recalc_wq);
+1 -1
drivers/md/dm-mpath.c
··· 815 816 static void disable_nopath_timeout(struct multipath *m) 817 { 818 - del_timer_sync(&m->nopath_timer); 819 } 820 821 /*
··· 815 816 static void disable_nopath_timeout(struct multipath *m) 817 { 818 + timer_delete_sync(&m->nopath_timer); 819 } 820 821 /*
+1 -1
drivers/md/dm-raid1.c
··· 1182 { 1183 struct mirror_set *ms = ti->private; 1184 1185 - del_timer_sync(&ms->timer); 1186 flush_workqueue(ms->kmirrord_wq); 1187 flush_work(&ms->trigger_event); 1188 dm_kcopyd_client_destroy(ms->kcopyd_client);
··· 1182 { 1183 struct mirror_set *ms = ti->private; 1184 1185 + timer_delete_sync(&ms->timer); 1186 flush_workqueue(ms->kmirrord_wq); 1187 flush_work(&ms->trigger_event); 1188 dm_kcopyd_client_destroy(ms->kcopyd_client);
+1 -1
drivers/md/dm-vdo/dedupe.c
··· 2261 if ((atomic_read(&zone->timer_state) == DEDUPE_QUERY_TIMER_IDLE) || 2262 change_timer_state(zone, DEDUPE_QUERY_TIMER_RUNNING, 2263 DEDUPE_QUERY_TIMER_IDLE)) { 2264 - del_timer_sync(&zone->timer); 2265 } else { 2266 /* 2267 * There is an in flight time-out, which must get processed before we can continue.
··· 2261 if ((atomic_read(&zone->timer_state) == DEDUPE_QUERY_TIMER_IDLE) || 2262 change_timer_state(zone, DEDUPE_QUERY_TIMER_RUNNING, 2263 DEDUPE_QUERY_TIMER_IDLE)) { 2264 + timer_delete_sync(&zone->timer); 2265 } else { 2266 /* 2267 * There is an in flight time-out, which must get processed before we can continue.
+3 -3
drivers/md/dm-writecache.c
··· 797 bool need_flush_after_free; 798 799 wc->uncommitted_blocks = 0; 800 - del_timer(&wc->autocommit_timer); 801 802 if (list_empty(&wc->lru)) 803 return; ··· 927 struct dm_writecache *wc = ti->private; 928 bool flush_on_suspend; 929 930 - del_timer_sync(&wc->autocommit_timer); 931 - del_timer_sync(&wc->max_age_timer); 932 933 wc_lock(wc); 934 writecache_flush(wc);
··· 797 bool need_flush_after_free; 798 799 wc->uncommitted_blocks = 0; 800 + timer_delete(&wc->autocommit_timer); 801 802 if (list_empty(&wc->lru)) 803 return; ··· 927 struct dm_writecache *wc = ti->private; 928 bool flush_on_suspend; 929 930 + timer_delete_sync(&wc->autocommit_timer); 931 + timer_delete_sync(&wc->max_age_timer); 932 933 wc_lock(wc); 934 writecache_flush(wc);
+2 -2
drivers/md/md.c
··· 4064 * it must always be in_sync 4065 */ 4066 mddev->in_sync = 1; 4067 - del_timer_sync(&mddev->safemode_timer); 4068 } 4069 pers->run(mddev); 4070 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); ··· 6405 6406 static void __md_stop_writes(struct mddev *mddev) 6407 { 6408 - del_timer_sync(&mddev->safemode_timer); 6409 6410 if (mddev->pers && mddev->pers->quiesce) { 6411 mddev->pers->quiesce(mddev, 1);
··· 4064 * it must always be in_sync 4065 */ 4066 mddev->in_sync = 1; 4067 + timer_delete_sync(&mddev->safemode_timer); 4068 } 4069 pers->run(mddev); 4070 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); ··· 6405 6406 static void __md_stop_writes(struct mddev *mddev) 6407 { 6408 + timer_delete_sync(&mddev->safemode_timer); 6409 6410 if (mddev->pers && mddev->pers->quiesce) { 6411 mddev->pers->quiesce(mddev, 1);
+1 -1
drivers/media/common/saa7146/saa7146_fops.c
··· 147 printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1)); 148 */ 149 } 150 - del_timer(&q->timeout); 151 } 152 } 153
··· 147 printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1)); 148 */ 149 } 150 + timer_delete(&q->timeout); 151 } 152 } 153
+2 -2
drivers/media/common/saa7146/saa7146_vbi.c
··· 322 /* shut down dma 3 transfers */ 323 saa7146_write(dev, MC1, MASK_20); 324 325 - del_timer(&vv->vbi_dmaq.timeout); 326 - del_timer(&vv->vbi_read_timeout); 327 328 spin_unlock_irqrestore(&dev->slock, flags); 329 }
··· 322 /* shut down dma 3 transfers */ 323 saa7146_write(dev, MC1, MASK_20); 324 325 + timer_delete(&vv->vbi_dmaq.timeout); 326 + timer_delete(&vv->vbi_read_timeout); 327 328 spin_unlock_irqrestore(&dev->slock, flags); 329 }
+1 -1
drivers/media/common/saa7146/saa7146_video.c
··· 668 struct saa7146_dev *dev = vb2_get_drv_priv(q); 669 struct saa7146_dmaqueue *dq = &dev->vv_data->video_dmaq; 670 671 - del_timer(&dq->timeout); 672 video_end(dev); 673 return_buffers(q, VB2_BUF_STATE_ERROR); 674 }
··· 668 struct saa7146_dev *dev = vb2_get_drv_priv(q); 669 struct saa7146_dmaqueue *dq = &dev->vv_data->video_dmaq; 670 671 + timer_delete(&dq->timeout); 672 video_end(dev); 673 return_buffers(q, VB2_BUF_STATE_ERROR); 674 }
+3 -3
drivers/media/dvb-core/dmxdev.c
··· 365 { 366 struct dmx_sct_filter_params *para = &dmxdevfilter->params.sec; 367 368 - del_timer(&dmxdevfilter->timer); 369 if (para->timeout) { 370 dmxdevfilter->timer.expires = 371 jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000; ··· 391 spin_unlock(&dmxdevfilter->dev->lock); 392 return 0; 393 } 394 - del_timer(&dmxdevfilter->timer); 395 dprintk("section callback %*ph\n", 6, buffer1); 396 if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) { 397 ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, ··· 482 483 switch (dmxdevfilter->type) { 484 case DMXDEV_TYPE_SEC: 485 - del_timer(&dmxdevfilter->timer); 486 dmxdevfilter->feed.sec->stop_filtering(dmxdevfilter->feed.sec); 487 break; 488 case DMXDEV_TYPE_PES:
··· 365 { 366 struct dmx_sct_filter_params *para = &dmxdevfilter->params.sec; 367 368 + timer_delete(&dmxdevfilter->timer); 369 if (para->timeout) { 370 dmxdevfilter->timer.expires = 371 jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000; ··· 391 spin_unlock(&dmxdevfilter->dev->lock); 392 return 0; 393 } 394 + timer_delete(&dmxdevfilter->timer); 395 dprintk("section callback %*ph\n", 6, buffer1); 396 if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) { 397 ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, ··· 482 483 switch (dmxdevfilter->type) { 484 case DMXDEV_TYPE_SEC: 485 + timer_delete(&dmxdevfilter->timer); 486 dmxdevfilter->feed.sec->stop_filtering(dmxdevfilter->feed.sec); 487 break; 488 case DMXDEV_TYPE_PES:
+2 -2
drivers/media/i2c/tc358743.c
··· 2201 err_work_queues: 2202 cec_unregister_adapter(state->cec_adap); 2203 if (!state->i2c_client->irq) { 2204 - del_timer(&state->timer); 2205 flush_work(&state->work_i2c_poll); 2206 } 2207 cancel_delayed_work(&state->delayed_work_enable_hotplug); ··· 2218 struct tc358743_state *state = to_state(sd); 2219 2220 if (!state->i2c_client->irq) { 2221 - del_timer_sync(&state->timer); 2222 flush_work(&state->work_i2c_poll); 2223 } 2224 cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
··· 2201 err_work_queues: 2202 cec_unregister_adapter(state->cec_adap); 2203 if (!state->i2c_client->irq) { 2204 + timer_delete(&state->timer); 2205 flush_work(&state->work_i2c_poll); 2206 } 2207 cancel_delayed_work(&state->delayed_work_enable_hotplug); ··· 2218 struct tc358743_state *state = to_state(sd); 2219 2220 if (!state->i2c_client->irq) { 2221 + timer_delete_sync(&state->timer); 2222 flush_work(&state->work_i2c_poll); 2223 } 2224 cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
+2 -2
drivers/media/i2c/tvaudio.c
··· 1787 struct CHIPSTATE *chip = to_state(sd); 1788 1789 chip->radio = 1; 1790 - /* del_timer(&chip->wt); */ 1791 return 0; 1792 } 1793 ··· 2071 struct v4l2_subdev *sd = i2c_get_clientdata(client); 2072 struct CHIPSTATE *chip = to_state(sd); 2073 2074 - del_timer_sync(&chip->wt); 2075 if (chip->thread) { 2076 /* shutdown async thread */ 2077 kthread_stop(chip->thread);
··· 1787 struct CHIPSTATE *chip = to_state(sd); 1788 1789 chip->radio = 1; 1790 + /* timer_delete(&chip->wt); */ 1791 return 0; 1792 } 1793 ··· 2071 struct v4l2_subdev *sd = i2c_get_clientdata(client); 2072 struct CHIPSTATE *chip = to_state(sd); 2073 2074 + timer_delete_sync(&chip->wt); 2075 if (chip->thread) { 2076 /* shutdown async thread */ 2077 kthread_stop(chip->thread);
+1 -1
drivers/media/pci/bt8xx/bttv-driver.c
··· 3491 3492 /* free resources */ 3493 free_irq(btv->c.pci->irq,btv); 3494 - del_timer_sync(&btv->timeout); 3495 iounmap(btv->bt848_mmio); 3496 release_mem_region(pci_resource_start(btv->c.pci,0), 3497 pci_resource_len(btv->c.pci,0));
··· 3491 3492 /* free resources */ 3493 free_irq(btv->c.pci->irq,btv); 3494 + timer_delete_sync(&btv->timeout); 3495 iounmap(btv->bt848_mmio); 3496 release_mem_region(pci_resource_start(btv->c.pci,0), 3497 pci_resource_len(btv->c.pci,0));
+2 -2
drivers/media/pci/bt8xx/bttv-input.c
··· 304 static void bttv_ir_stop(struct bttv *btv) 305 { 306 if (btv->remote->polling) 307 - del_timer_sync(&btv->remote->timer); 308 309 if (btv->remote->rc5_gpio) { 310 u32 gpio; 311 312 - del_timer_sync(&btv->remote->timer); 313 314 gpio = bttv_gpio_read(&btv->c); 315 bttv_gpio_write(&btv->c, gpio & ~(1 << 4));
··· 304 static void bttv_ir_stop(struct bttv *btv) 305 { 306 if (btv->remote->polling) 307 + timer_delete_sync(&btv->remote->timer); 308 309 if (btv->remote->rc5_gpio) { 310 u32 gpio; 311 312 + timer_delete_sync(&btv->remote->timer); 313 314 gpio = bttv_gpio_read(&btv->c); 315 bttv_gpio_write(&btv->c, gpio & ~(1 << 4));
+1 -1
drivers/media/pci/bt8xx/bttv-risc.c
··· 376 if (btv->curr.frame_irq || btv->loop_irq || btv->cvbi) 377 mod_timer(&btv->timeout, jiffies + BTTV_TIMEOUT); 378 else 379 - del_timer(&btv->timeout); 380 } 381 382 static int bttv_set_capture_control(struct bttv *btv, int start_capture)
··· 376 if (btv->curr.frame_irq || btv->loop_irq || btv->cvbi) 377 mod_timer(&btv->timeout, jiffies + BTTV_TIMEOUT); 378 else 379 + timer_delete(&btv->timeout); 380 } 381 382 static int bttv_set_capture_control(struct bttv *btv, int start_capture)
+3 -3
drivers/media/pci/ivtv/ivtv-irq.c
··· 532 533 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n"); 534 535 - del_timer(&itv->dma_timer); 536 537 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) 538 return; ··· 597 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); 598 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream); 599 600 - del_timer(&itv->dma_timer); 601 602 if (itv->cur_dma_stream < 0) 603 return; ··· 670 u32 data[CX2341X_MBOX_MAX_DATA]; 671 u32 status; 672 673 - del_timer(&itv->dma_timer); 674 675 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); 676 status = read_reg(IVTV_REG_DMASTATUS);
··· 532 533 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n"); 534 535 + timer_delete(&itv->dma_timer); 536 537 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) 538 return; ··· 597 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); 598 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream); 599 600 + timer_delete(&itv->dma_timer); 601 602 if (itv->cur_dma_stream < 0) 603 return; ··· 670 u32 data[CX2341X_MBOX_MAX_DATA]; 671 u32 status; 672 673 + timer_delete(&itv->dma_timer); 674 675 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); 676 status = read_reg(IVTV_REG_DMASTATUS);
+2 -2
drivers/media/pci/ivtv/ivtv-streams.c
··· 891 892 /* Set the following Interrupt mask bits for capture */ 893 ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE); 894 - del_timer(&itv->dma_timer); 895 896 /* event notification (off) */ 897 if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) { ··· 956 ivtv_vapi(itv, CX2341X_DEC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_DEC_AUD_MODE_CHG, -1); 957 958 ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_DECODE); 959 - del_timer(&itv->dma_timer); 960 961 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags); 962 clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
··· 891 892 /* Set the following Interrupt mask bits for capture */ 893 ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE); 894 + timer_delete(&itv->dma_timer); 895 896 /* event notification (off) */ 897 if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) { ··· 956 ivtv_vapi(itv, CX2341X_DEC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_DEC_AUD_MODE_CHG, -1); 957 958 ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_DECODE); 959 + timer_delete(&itv->dma_timer); 960 961 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags); 962 clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
+1 -1
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
··· 698 netup_unidvb_dma_enable(dma, 0); 699 msleep(50); 700 cancel_work_sync(&dma->work); 701 - del_timer_sync(&dma->timeout); 702 } 703 704 static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
··· 698 netup_unidvb_dma_enable(dma, 0); 699 msleep(50); 700 cancel_work_sync(&dma->work); 701 + timer_delete_sync(&dma->timeout); 702 } 703 704 static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
+5 -5
drivers/media/pci/saa7134/saa7134-core.c
··· 322 /* nothing to do -- just stop DMA */ 323 core_dbg("buffer_next %p\n", NULL); 324 saa7134_set_dmabits(dev); 325 - del_timer(&q->timeout); 326 } 327 } 328 ··· 364 tmp = NULL; 365 } 366 spin_unlock_irqrestore(&dev->slock, flags); 367 - saa7134_buffer_timeout(&q->timeout); /* also calls del_timer(&q->timeout) */ 368 } 369 EXPORT_SYMBOL_GPL(saa7134_stop_streaming); 370 ··· 1390 /* Disable timeout timers - if we have active buffers, we will 1391 fill them on resume*/ 1392 1393 - del_timer(&dev->video_q.timeout); 1394 - del_timer(&dev->vbi_q.timeout); 1395 - del_timer(&dev->ts_q.timeout); 1396 1397 if (dev->remote && dev->remote->dev->users) 1398 saa7134_ir_close(dev->remote->dev);
··· 322 /* nothing to do -- just stop DMA */ 323 core_dbg("buffer_next %p\n", NULL); 324 saa7134_set_dmabits(dev); 325 + timer_delete(&q->timeout); 326 } 327 } 328 ··· 364 tmp = NULL; 365 } 366 spin_unlock_irqrestore(&dev->slock, flags); 367 + saa7134_buffer_timeout(&q->timeout); /* also calls timer_delete(&q->timeout) */ 368 } 369 EXPORT_SYMBOL_GPL(saa7134_stop_streaming); 370 ··· 1390 /* Disable timeout timers - if we have active buffers, we will 1391 fill them on resume*/ 1392 1393 + timer_delete(&dev->video_q.timeout); 1394 + timer_delete(&dev->vbi_q.timeout); 1395 + timer_delete(&dev->ts_q.timeout); 1396 1397 if (dev->remote && dev->remote->dev->users) 1398 saa7134_ir_close(dev->remote->dev);
+1 -1
drivers/media/pci/saa7134/saa7134-input.c
··· 496 struct saa7134_card_ir *ir = dev->remote; 497 498 if (ir->polling) 499 - del_timer_sync(&ir->timer); 500 501 ir->running = false; 502 }
··· 496 struct saa7134_card_ir *ir = dev->remote; 497 498 if (ir->polling) 499 + timer_delete_sync(&ir->timer); 500 501 ir->running = false; 502 }
+1 -1
drivers/media/pci/saa7134/saa7134-ts.c
··· 298 299 int saa7134_ts_fini(struct saa7134_dev *dev) 300 { 301 - del_timer_sync(&dev->ts_q.timeout); 302 saa7134_pgtable_free(dev->pci, &dev->ts_q.pt); 303 return 0; 304 }
··· 298 299 int saa7134_ts_fini(struct saa7134_dev *dev) 300 { 301 + timer_delete_sync(&dev->ts_q.timeout); 302 saa7134_pgtable_free(dev->pci, &dev->ts_q.pt); 303 return 0; 304 }
+1 -1
drivers/media/pci/saa7134/saa7134-vbi.c
··· 183 int saa7134_vbi_fini(struct saa7134_dev *dev) 184 { 185 /* nothing */ 186 - del_timer_sync(&dev->vbi_q.timeout); 187 return 0; 188 } 189
··· 183 int saa7134_vbi_fini(struct saa7134_dev *dev) 184 { 185 /* nothing */ 186 + timer_delete_sync(&dev->vbi_q.timeout); 187 return 0; 188 } 189
+1 -1
drivers/media/pci/saa7134/saa7134-video.c
··· 1741 1742 void saa7134_video_fini(struct saa7134_dev *dev) 1743 { 1744 - del_timer_sync(&dev->video_q.timeout); 1745 /* free stuff */ 1746 saa7134_pgtable_free(dev->pci, &dev->video_q.pt); 1747 saa7134_pgtable_free(dev->pci, &dev->vbi_q.pt);
··· 1741 1742 void saa7134_video_fini(struct saa7134_dev *dev) 1743 { 1744 + timer_delete_sync(&dev->video_q.timeout); 1745 /* free stuff */ 1746 saa7134_pgtable_free(dev->pci, &dev->video_q.pt); 1747 saa7134_pgtable_free(dev->pci, &dev->vbi_q.pt);
+1 -1
drivers/media/pci/tw686x/tw686x-core.c
··· 373 374 tw686x_video_free(dev); 375 tw686x_audio_free(dev); 376 - del_timer_sync(&dev->dma_delay_timer); 377 378 pci_iounmap(pci_dev, dev->mmio); 379 pci_release_regions(pci_dev);
··· 373 374 tw686x_video_free(dev); 375 tw686x_audio_free(dev); 376 + timer_delete_sync(&dev->dma_delay_timer); 377 378 pci_iounmap(pci_dev, dev->mmio); 379 pci_release_regions(pci_dev);
+3 -3
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
··· 935 if (dev->num_inst == 1) { 936 if (s5p_mfc_power_off(dev) < 0) 937 mfc_err("power off failed\n"); 938 - del_timer_sync(&dev->watchdog_timer); 939 } 940 err_ctrls_setup: 941 s5p_mfc_dec_ctrls_delete(ctx); ··· 985 if (dev->num_inst == 0) { 986 mfc_debug(2, "Last instance\n"); 987 s5p_mfc_deinit_hw(dev); 988 - del_timer_sync(&dev->watchdog_timer); 989 s5p_mfc_clock_off(dev); 990 if (s5p_mfc_power_off(dev) < 0) 991 mfc_err("Power off failed\n"); ··· 1461 } 1462 mutex_unlock(&dev->mfc_mutex); 1463 1464 - del_timer_sync(&dev->watchdog_timer); 1465 flush_work(&dev->watchdog_work); 1466 1467 video_unregister_device(dev->vfd_enc);
··· 935 if (dev->num_inst == 1) { 936 if (s5p_mfc_power_off(dev) < 0) 937 mfc_err("power off failed\n"); 938 + timer_delete_sync(&dev->watchdog_timer); 939 } 940 err_ctrls_setup: 941 s5p_mfc_dec_ctrls_delete(ctx); ··· 985 if (dev->num_inst == 0) { 986 mfc_debug(2, "Last instance\n"); 987 s5p_mfc_deinit_hw(dev); 988 + timer_delete_sync(&dev->watchdog_timer); 989 s5p_mfc_clock_off(dev); 990 if (s5p_mfc_power_off(dev) < 0) 991 mfc_err("Power off failed\n"); ··· 1461 } 1462 mutex_unlock(&dev->mfc_mutex); 1463 1464 + timer_delete_sync(&dev->watchdog_timer); 1465 flush_work(&dev->watchdog_work); 1466 1467 video_unregister_device(dev->vfd_enc);
+1 -1
drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
··· 351 dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n" 352 , __func__, __LINE__, fei->global_feed_count); 353 354 - del_timer(&fei->timer); 355 } 356 357 mutex_unlock(&fei->lock);
··· 351 dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n" 352 , __func__, __LINE__, fei->global_feed_count); 353 354 + timer_delete(&fei->timer); 355 } 356 357 mutex_unlock(&fei->lock);
+1 -1
drivers/media/radio/radio-cadet.c
··· 471 472 mutex_lock(&dev->lock); 473 if (v4l2_fh_is_singular_file(file) && dev->rdsstat) { 474 - del_timer_sync(&dev->readtimer); 475 dev->rdsstat = 0; 476 } 477 v4l2_fh_release(file);
··· 471 472 mutex_lock(&dev->lock); 473 if (v4l2_fh_is_singular_file(file) && dev->rdsstat) { 474 + timer_delete_sync(&dev->readtimer); 475 dev->rdsstat = 0; 476 } 477 v4l2_fh_release(file);
+1 -1
drivers/media/rc/ene_ir.c
··· 1104 unsigned long flags; 1105 1106 rc_unregister_device(dev->rdev); 1107 - del_timer_sync(&dev->tx_sim_timer); 1108 spin_lock_irqsave(&dev->hw_lock, flags); 1109 ene_rx_disable(dev); 1110 ene_rx_restore_hw_buffer(dev);
··· 1104 unsigned long flags; 1105 1106 rc_unregister_device(dev->rdev); 1107 + timer_delete_sync(&dev->tx_sim_timer); 1108 spin_lock_irqsave(&dev->hw_lock, flags); 1109 ene_rx_disable(dev); 1110 ene_rx_restore_hw_buffer(dev);
+2 -2
drivers/media/rc/igorplugusb.c
··· 223 return 0; 224 fail: 225 usb_poison_urb(ir->urb); 226 - del_timer(&ir->timer); 227 usb_unpoison_urb(ir->urb); 228 usb_free_urb(ir->urb); 229 rc_free_device(ir->rc); ··· 238 239 rc_unregister_device(ir->rc); 240 usb_poison_urb(ir->urb); 241 - del_timer_sync(&ir->timer); 242 usb_set_intfdata(intf, NULL); 243 usb_unpoison_urb(ir->urb); 244 usb_free_urb(ir->urb);
··· 223 return 0; 224 fail: 225 usb_poison_urb(ir->urb); 226 + timer_delete(&ir->timer); 227 usb_unpoison_urb(ir->urb); 228 usb_free_urb(ir->urb); 229 rc_free_device(ir->rc); ··· 238 239 rc_unregister_device(ir->rc); 240 usb_poison_urb(ir->urb); 241 + timer_delete_sync(&ir->timer); 242 usb_set_intfdata(intf, NULL); 243 usb_unpoison_urb(ir->urb); 244 usb_free_urb(ir->urb);
+2 -2
drivers/media/rc/img-ir/img-ir-hw.c
··· 556 * acquires the lock and we don't want to deadlock waiting for it. 557 */ 558 spin_unlock_irq(&priv->lock); 559 - del_timer_sync(&hw->end_timer); 560 - del_timer_sync(&hw->suspend_timer); 561 spin_lock_irq(&priv->lock); 562 563 hw->stopping = false;
··· 556 * acquires the lock and we don't want to deadlock waiting for it. 557 */ 558 spin_unlock_irq(&priv->lock); 559 + timer_delete_sync(&hw->end_timer); 560 + timer_delete_sync(&hw->suspend_timer); 561 spin_lock_irq(&priv->lock); 562 563 hw->stopping = false;
+1 -1
drivers/media/rc/img-ir/img-ir-raw.c
··· 147 148 rc_unregister_device(rdev); 149 150 - del_timer_sync(&raw->timer); 151 }
··· 147 148 rc_unregister_device(rdev); 149 150 + timer_delete_sync(&raw->timer); 151 }
+1 -1
drivers/media/rc/imon.c
··· 2534 ictx->dev_present_intf1 = false; 2535 usb_kill_urb(ictx->rx_urb_intf1); 2536 if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { 2537 - del_timer_sync(&ictx->ttimer); 2538 input_unregister_device(ictx->touch); 2539 } 2540 usb_put_dev(ictx->usbdev_intf1);
··· 2534 ictx->dev_present_intf1 = false; 2535 usb_kill_urb(ictx->rx_urb_intf1); 2536 if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { 2537 + timer_delete_sync(&ictx->ttimer); 2538 input_unregister_device(ictx->touch); 2539 } 2540 usb_put_dev(ictx->usbdev_intf1);
+2 -2
drivers/media/rc/ir-mce_kbd-decoder.c
··· 324 msecs_to_jiffies(100); 325 mod_timer(&data->rx_timeout, jiffies + delay); 326 } else { 327 - del_timer(&data->rx_timeout); 328 } 329 /* Pass data to keyboard buffer parser */ 330 ir_mce_kbd_process_keyboard_data(dev, scancode); ··· 372 { 373 struct mce_kbd_dec *mce_kbd = &dev->raw->mce_kbd; 374 375 - del_timer_sync(&mce_kbd->rx_timeout); 376 377 return 0; 378 }
··· 324 msecs_to_jiffies(100); 325 mod_timer(&data->rx_timeout, jiffies + delay); 326 } else { 327 + timer_delete(&data->rx_timeout); 328 } 329 /* Pass data to keyboard buffer parser */ 330 ir_mce_kbd_process_keyboard_data(dev, scancode); ··· 372 { 373 struct mce_kbd_dec *mce_kbd = &dev->raw->mce_kbd; 374 375 + timer_delete_sync(&mce_kbd->rx_timeout); 376 377 return 0; 378 }
+1 -1
drivers/media/rc/rc-ir-raw.c
··· 662 return; 663 664 kthread_stop(dev->raw->thread); 665 - del_timer_sync(&dev->raw->edge_handle); 666 667 mutex_lock(&ir_raw_handler_lock); 668 list_del(&dev->raw->list);
··· 662 return; 663 664 kthread_stop(dev->raw->thread); 665 + timer_delete_sync(&dev->raw->edge_handle); 666 667 mutex_lock(&ir_raw_handler_lock); 668 list_del(&dev->raw->list);
+3 -3
drivers/media/rc/rc-main.c
··· 639 return; 640 641 dev_dbg(&dev->dev, "keyup key 0x%04x\n", dev->last_keycode); 642 - del_timer(&dev->timer_repeat); 643 input_report_key(dev->input_dev, dev->last_keycode, 0); 644 led_trigger_event(led_feedback, LED_OFF); 645 if (sync) ··· 2021 if (dev->driver_type == RC_DRIVER_IR_RAW) 2022 ir_raw_event_unregister(dev); 2023 2024 - del_timer_sync(&dev->timer_keyup); 2025 - del_timer_sync(&dev->timer_repeat); 2026 2027 mutex_lock(&dev->lock); 2028 if (dev->users && dev->close)
··· 639 return; 640 641 dev_dbg(&dev->dev, "keyup key 0x%04x\n", dev->last_keycode); 642 + timer_delete(&dev->timer_repeat); 643 input_report_key(dev->input_dev, dev->last_keycode, 0); 644 led_trigger_event(led_feedback, LED_OFF); 645 if (sync) ··· 2021 if (dev->driver_type == RC_DRIVER_IR_RAW) 2022 ir_raw_event_unregister(dev); 2023 2024 + timer_delete_sync(&dev->timer_keyup); 2025 + timer_delete_sync(&dev->timer_repeat); 2026 2027 mutex_lock(&dev->lock); 2028 if (dev->users && dev->close)
+1 -1
drivers/media/rc/serial_ir.c
··· 798 799 static void __exit serial_ir_exit_module(void) 800 { 801 - del_timer_sync(&serial_ir.timeout_timer); 802 serial_ir_exit(); 803 } 804
··· 798 799 static void __exit serial_ir_exit_module(void) 800 { 801 + timer_delete_sync(&serial_ir.timeout_timer); 802 serial_ir_exit(); 803 } 804
+2 -2
drivers/media/usb/au0828/au0828-dvb.c
··· 143 */ 144 dprintk(1, "%s cancelling bulk timeout\n", __func__); 145 dev->bulk_timeout_running = 0; 146 - del_timer(&dev->bulk_timeout); 147 } 148 149 /* Feed the transport payload into the kernel demux */ ··· 168 169 if (dev->bulk_timeout_running == 1) { 170 dev->bulk_timeout_running = 0; 171 - del_timer(&dev->bulk_timeout); 172 } 173 174 dev->urb_streaming = false;
··· 143 */ 144 dprintk(1, "%s cancelling bulk timeout\n", __func__); 145 dev->bulk_timeout_running = 0; 146 + timer_delete(&dev->bulk_timeout); 147 } 148 149 /* Feed the transport payload into the kernel demux */ ··· 168 169 if (dev->bulk_timeout_running == 1) { 170 dev->bulk_timeout_running = 0; 171 + timer_delete(&dev->bulk_timeout); 172 } 173 174 dev->urb_streaming = false;
+6 -6
drivers/media/usb/au0828/au0828-video.c
··· 857 } 858 859 dev->vid_timeout_running = 0; 860 - del_timer_sync(&dev->vid_timeout); 861 862 spin_lock_irqsave(&dev->slock, flags); 863 if (dev->isoc_ctl.buf != NULL) { ··· 905 spin_unlock_irqrestore(&dev->slock, flags); 906 907 dev->vbi_timeout_running = 0; 908 - del_timer_sync(&dev->vbi_timeout); 909 } 910 911 static const struct vb2_ops au0828_video_qops = { ··· 1040 if (vdev->vfl_type == VFL_TYPE_VIDEO && dev->vid_timeout_running) { 1041 /* Cancel timeout thread in case they didn't call streamoff */ 1042 dev->vid_timeout_running = 0; 1043 - del_timer_sync(&dev->vid_timeout); 1044 } else if (vdev->vfl_type == VFL_TYPE_VBI && 1045 dev->vbi_timeout_running) { 1046 /* Cancel timeout thread in case they didn't call streamoff */ 1047 dev->vbi_timeout_running = 0; 1048 - del_timer_sync(&dev->vbi_timeout); 1049 } 1050 1051 if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) ··· 1694 } 1695 1696 if (dev->vid_timeout_running) 1697 - del_timer_sync(&dev->vid_timeout); 1698 if (dev->vbi_timeout_running) 1699 - del_timer_sync(&dev->vbi_timeout); 1700 } 1701 1702 void au0828_v4l2_resume(struct au0828_dev *dev)
··· 857 } 858 859 dev->vid_timeout_running = 0; 860 + timer_delete_sync(&dev->vid_timeout); 861 862 spin_lock_irqsave(&dev->slock, flags); 863 if (dev->isoc_ctl.buf != NULL) { ··· 905 spin_unlock_irqrestore(&dev->slock, flags); 906 907 dev->vbi_timeout_running = 0; 908 + timer_delete_sync(&dev->vbi_timeout); 909 } 910 911 static const struct vb2_ops au0828_video_qops = { ··· 1040 if (vdev->vfl_type == VFL_TYPE_VIDEO && dev->vid_timeout_running) { 1041 /* Cancel timeout thread in case they didn't call streamoff */ 1042 dev->vid_timeout_running = 0; 1043 + timer_delete_sync(&dev->vid_timeout); 1044 } else if (vdev->vfl_type == VFL_TYPE_VBI && 1045 dev->vbi_timeout_running) { 1046 /* Cancel timeout thread in case they didn't call streamoff */ 1047 dev->vbi_timeout_running = 0; 1048 + timer_delete_sync(&dev->vbi_timeout); 1049 } 1050 1051 if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) ··· 1694 } 1695 1696 if (dev->vid_timeout_running) 1697 + timer_delete_sync(&dev->vid_timeout); 1698 if (dev->vbi_timeout_running) 1699 + timer_delete_sync(&dev->vbi_timeout); 1700 } 1701 1702 void au0828_v4l2_resume(struct au0828_dev *dev)
+1 -1
drivers/media/usb/pvrusb2/pvrusb2-encoder.c
··· 257 ret = -EBUSY; 258 } 259 if (ret) { 260 - del_timer_sync(&hdw->encoder_run_timer); 261 hdw->state_encoder_ok = 0; 262 pvr2_trace(PVR2_TRACE_STBITS, 263 "State bit %s <-- %s",
··· 257 ret = -EBUSY; 258 } 259 if (ret) { 260 + timer_delete_sync(&hdw->encoder_run_timer); 261 hdw->state_encoder_ok = 0; 262 pvr2_trace(PVR2_TRACE_STBITS, 263 "State bit %s <-- %s",
+8 -8
drivers/media/usb/pvrusb2/pvrusb2-hdw.c
··· 1527 1528 /* Encoder is about to be reset so note that as far as we're 1529 concerned now, the encoder has never been run. */ 1530 - del_timer_sync(&hdw->encoder_run_timer); 1531 if (hdw->state_encoder_runok) { 1532 hdw->state_encoder_runok = 0; 1533 trace_stbit("state_encoder_runok",hdw->state_encoder_runok); ··· 3724 hdw->cmd_debug_state = 5; 3725 3726 /* Stop timer */ 3727 - del_timer_sync(&timer.timer); 3728 3729 hdw->cmd_debug_state = 6; 3730 status = 0; ··· 4248 hdw->state_encoder_waitok = 0; 4249 trace_stbit("state_encoder_waitok",hdw->state_encoder_waitok); 4250 /* paranoia - solve race if timer just completed */ 4251 - del_timer_sync(&hdw->encoder_wait_timer); 4252 } else { 4253 if (!hdw->state_pathway_ok || 4254 (hdw->pathway_state != PVR2_PATHWAY_ANALOG) || ··· 4261 anything has happened that might have disturbed 4262 the encoder. This should be a rare case. */ 4263 if (timer_pending(&hdw->encoder_wait_timer)) { 4264 - del_timer_sync(&hdw->encoder_wait_timer); 4265 } 4266 if (hdw->state_encoder_waitok) { 4267 /* Must clear the state - therefore we did ··· 4399 if (hdw->state_encoder_run) { 4400 if (!state_check_disable_encoder_run(hdw)) return 0; 4401 if (hdw->state_encoder_ok) { 4402 - del_timer_sync(&hdw->encoder_run_timer); 4403 if (pvr2_encoder_stop(hdw) < 0) return !0; 4404 } 4405 hdw->state_encoder_run = 0; ··· 4479 hdw->state_decoder_quiescent = 0; 4480 hdw->state_decoder_run = 0; 4481 /* paranoia - solve race if timer(s) just completed */ 4482 - del_timer_sync(&hdw->quiescent_timer); 4483 /* Kill the stabilization timer, in case we're killing the 4484 encoder before the previous stabilization interval has 4485 been properly timed. */ 4486 - del_timer_sync(&hdw->decoder_stabilization_timer); 4487 hdw->state_decoder_ready = 0; 4488 } else { 4489 if (!hdw->state_decoder_quiescent) { ··· 4517 !hdw->state_pipeline_config || 4518 !hdw->state_encoder_config || 4519 !hdw->state_encoder_ok) return 0; 4520 - del_timer_sync(&hdw->quiescent_timer); 4521 if (hdw->flag_decoder_missed) return 0; 4522 if (pvr2_decoder_enable(hdw,!0) < 0) return 0; 4523 hdw->state_decoder_quiescent = 0;
··· 1527 1528 /* Encoder is about to be reset so note that as far as we're 1529 concerned now, the encoder has never been run. */ 1530 + timer_delete_sync(&hdw->encoder_run_timer); 1531 if (hdw->state_encoder_runok) { 1532 hdw->state_encoder_runok = 0; 1533 trace_stbit("state_encoder_runok",hdw->state_encoder_runok); ··· 3724 hdw->cmd_debug_state = 5; 3725 3726 /* Stop timer */ 3727 + timer_delete_sync(&timer.timer); 3728 3729 hdw->cmd_debug_state = 6; 3730 status = 0; ··· 4248 hdw->state_encoder_waitok = 0; 4249 trace_stbit("state_encoder_waitok",hdw->state_encoder_waitok); 4250 /* paranoia - solve race if timer just completed */ 4251 + timer_delete_sync(&hdw->encoder_wait_timer); 4252 } else { 4253 if (!hdw->state_pathway_ok || 4254 (hdw->pathway_state != PVR2_PATHWAY_ANALOG) || ··· 4261 anything has happened that might have disturbed 4262 the encoder. This should be a rare case. */ 4263 if (timer_pending(&hdw->encoder_wait_timer)) { 4264 + timer_delete_sync(&hdw->encoder_wait_timer); 4265 } 4266 if (hdw->state_encoder_waitok) { 4267 /* Must clear the state - therefore we did ··· 4399 if (hdw->state_encoder_run) { 4400 if (!state_check_disable_encoder_run(hdw)) return 0; 4401 if (hdw->state_encoder_ok) { 4402 + timer_delete_sync(&hdw->encoder_run_timer); 4403 if (pvr2_encoder_stop(hdw) < 0) return !0; 4404 } 4405 hdw->state_encoder_run = 0; ··· 4479 hdw->state_decoder_quiescent = 0; 4480 hdw->state_decoder_run = 0; 4481 /* paranoia - solve race if timer(s) just completed */ 4482 + timer_delete_sync(&hdw->quiescent_timer); 4483 /* Kill the stabilization timer, in case we're killing the 4484 encoder before the previous stabilization interval has 4485 been properly timed. */ 4486 + timer_delete_sync(&hdw->decoder_stabilization_timer); 4487 hdw->state_decoder_ready = 0; 4488 } else { 4489 if (!hdw->state_decoder_quiescent) { ··· 4517 !hdw->state_pipeline_config || 4518 !hdw->state_encoder_config || 4519 !hdw->state_encoder_ok) return 0; 4520 + timer_delete_sync(&hdw->quiescent_timer); 4521 if (hdw->flag_decoder_missed) return 0; 4522 if (pvr2_decoder_enable(hdw,!0) < 0) return 0; 4523 hdw->state_decoder_quiescent = 0;
+2 -2
drivers/memory/tegra/tegra210-emc-core.c
··· 583 584 static void tegra210_emc_training_stop(struct tegra210_emc *emc) 585 { 586 - del_timer(&emc->training); 587 } 588 589 static unsigned int tegra210_emc_get_temperature(struct tegra210_emc *emc) ··· 666 static void tegra210_emc_poll_refresh_stop(struct tegra210_emc *emc) 667 { 668 atomic_set(&emc->refresh_poll, 0); 669 - del_timer_sync(&emc->refresh_timer); 670 } 671 672 static void tegra210_emc_poll_refresh_start(struct tegra210_emc *emc)
··· 583 584 static void tegra210_emc_training_stop(struct tegra210_emc *emc) 585 { 586 + timer_delete(&emc->training); 587 } 588 589 static unsigned int tegra210_emc_get_temperature(struct tegra210_emc *emc) ··· 666 static void tegra210_emc_poll_refresh_stop(struct tegra210_emc *emc) 667 { 668 atomic_set(&emc->refresh_poll, 0); 669 + timer_delete_sync(&emc->refresh_timer); 670 } 671 672 static void tegra210_emc_poll_refresh_start(struct tegra210_emc *emc)
+2 -2
drivers/memstick/core/ms_block.c
··· 1510 if (msb->cache_block_lba == MS_BLOCK_INVALID) 1511 return; 1512 1513 - del_timer_sync(&msb->cache_flush_timer); 1514 1515 dbg_verbose("Discarding the write cache"); 1516 msb->cache_block_lba = MS_BLOCK_INVALID; ··· 2027 msb->io_queue_stopped = true; 2028 spin_unlock_irqrestore(&msb->q_lock, flags); 2029 2030 - del_timer_sync(&msb->cache_flush_timer); 2031 flush_workqueue(msb->io_queue); 2032 2033 spin_lock_irqsave(&msb->q_lock, flags);
··· 1510 if (msb->cache_block_lba == MS_BLOCK_INVALID) 1511 return; 1512 1513 + timer_delete_sync(&msb->cache_flush_timer); 1514 1515 dbg_verbose("Discarding the write cache"); 1516 msb->cache_block_lba = MS_BLOCK_INVALID; ··· 2027 msb->io_queue_stopped = true; 2028 spin_unlock_irqrestore(&msb->q_lock, flags); 2029 2030 + timer_delete_sync(&msb->cache_flush_timer); 2031 flush_workqueue(msb->io_queue); 2032 2033 spin_lock_irqsave(&msb->q_lock, flags);
+1 -1
drivers/memstick/host/jmb38x_ms.c
··· 469 unsigned int t_val = 0; 470 int rc; 471 472 - del_timer(&host->timer); 473 474 dev_dbg(&msh->dev, "c control %08x\n", 475 readl(host->addr + HOST_CONTROL));
··· 469 unsigned int t_val = 0; 470 int rc; 471 472 + timer_delete(&host->timer); 473 474 dev_dbg(&msh->dev, "c control %08x\n", 475 readl(host->addr + HOST_CONTROL));
+2 -2
drivers/memstick/host/r592.c
··· 827 /* Stop the processing thread. 828 That ensures that we won't take any more requests */ 829 kthread_stop(dev->io_thread); 830 - del_timer_sync(&dev->detect_timer); 831 r592_enable_device(dev, false); 832 833 while (!error && dev->req) { ··· 854 855 r592_clear_interrupts(dev); 856 memstick_suspend_host(dev->host); 857 - del_timer_sync(&dev->detect_timer); 858 return 0; 859 } 860
··· 827 /* Stop the processing thread. 828 That ensures that we won't take any more requests */ 829 kthread_stop(dev->io_thread); 830 + timer_delete_sync(&dev->detect_timer); 831 r592_enable_device(dev, false); 832 833 while (!error && dev->req) { ··· 854 855 r592_clear_interrupts(dev); 856 memstick_suspend_host(dev->host); 857 + timer_delete_sync(&dev->detect_timer); 858 return 0; 859 } 860
+2 -2
drivers/memstick/host/tifm_ms.c
··· 337 struct memstick_host *msh = tifm_get_drvdata(sock); 338 int rc; 339 340 - del_timer(&host->timer); 341 342 host->req->int_reg = readl(sock->addr + SOCK_MS_STATUS) & 0xff; 343 host->req->int_reg = (host->req->int_reg & 1) ··· 600 spin_lock_irqsave(&sock->lock, flags); 601 host->eject = 1; 602 if (host->req) { 603 - del_timer(&host->timer); 604 writel(TIFM_FIFO_INT_SETALL, 605 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); 606 writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL);
··· 337 struct memstick_host *msh = tifm_get_drvdata(sock); 338 int rc; 339 340 + timer_delete(&host->timer); 341 342 host->req->int_reg = readl(sock->addr + SOCK_MS_STATUS) & 0xff; 343 host->req->int_reg = (host->req->int_reg & 1) ··· 600 spin_lock_irqsave(&sock->lock, flags); 601 host->eject = 1; 602 if (host->req) { 603 + timer_delete(&host->timer); 604 writel(TIFM_FIFO_INT_SETALL, 605 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); 606 writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL);
+2 -2
drivers/misc/bcm-vk/bcm_vk_tty.c
··· 177 vk->tty[tty->index].is_opened = false; 178 179 if (tty->count == 1) 180 - del_timer_sync(&vk->serial_timer); 181 } 182 183 static void bcm_vk_tty_doorbell(struct bcm_vk *vk, u32 db_val) ··· 304 { 305 int i; 306 307 - del_timer_sync(&vk->serial_timer); 308 for (i = 0; i < BCM_VK_NUM_TTY; ++i) { 309 tty_port_unregister_device(&vk->tty[i].port, 310 vk->tty_drv,
··· 177 vk->tty[tty->index].is_opened = false; 178 179 if (tty->count == 1) 180 + timer_delete_sync(&vk->serial_timer); 181 } 182 183 static void bcm_vk_tty_doorbell(struct bcm_vk *vk, u32 db_val) ··· 304 { 305 int i; 306 307 + timer_delete_sync(&vk->serial_timer); 308 for (i = 0; i < BCM_VK_NUM_TTY; ++i) { 309 tty_port_unregister_device(&vk->tty[i].port, 310 vk->tty_drv,
+1 -1
drivers/misc/cardreader/rtsx_usb.c
··· 53 ucr->sg_timer.expires = jiffies + msecs_to_jiffies(timeout); 54 add_timer(&ucr->sg_timer); 55 usb_sg_wait(&ucr->current_sg); 56 - if (!del_timer_sync(&ucr->sg_timer)) 57 ret = -ETIMEDOUT; 58 else 59 ret = ucr->current_sg.status;
··· 53 ucr->sg_timer.expires = jiffies + msecs_to_jiffies(timeout); 54 add_timer(&ucr->sg_timer); 55 usb_sg_wait(&ucr->current_sg); 56 + if (!timer_delete_sync(&ucr->sg_timer)) 57 ret = -ETIMEDOUT; 58 else 59 ret = ucr->current_sg.status;
+1 -1
drivers/misc/sgi-xp/xpc_main.c
··· 202 static void 203 xpc_stop_hb_beater(void) 204 { 205 - del_timer_sync(&xpc_hb_timer); 206 xpc_arch_ops.heartbeat_exit(); 207 } 208
··· 202 static void 203 xpc_stop_hb_beater(void) 204 { 205 + timer_delete_sync(&xpc_hb_timer); 206 xpc_arch_ops.heartbeat_exit(); 207 } 208
+1 -1
drivers/misc/sgi-xp/xpc_partition.c
··· 291 292 /* Cancel the timer function if not called from it */ 293 if (!from_timer) 294 - del_timer_sync(&part->disengage_timer); 295 296 DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING && 297 part->act_state != XPC_P_AS_INACTIVE);
··· 291 292 /* Cancel the timer function if not called from it */ 293 if (!from_timer) 294 + timer_delete_sync(&part->disengage_timer); 295 296 DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING && 297 part->act_state != XPC_P_AS_INACTIVE);
+2 -2
drivers/mmc/core/host.c
··· 147 { 148 mmc_retune_unpause(host); 149 host->can_retune = 0; 150 - del_timer_sync(&host->retune_timer); 151 mmc_retune_clear(host); 152 } 153 154 void mmc_retune_timer_stop(struct mmc_host *host) 155 { 156 - del_timer_sync(&host->retune_timer); 157 } 158 EXPORT_SYMBOL(mmc_retune_timer_stop); 159
··· 147 { 148 mmc_retune_unpause(host); 149 host->can_retune = 0; 150 + timer_delete_sync(&host->retune_timer); 151 mmc_retune_clear(host); 152 } 153 154 void mmc_retune_timer_stop(struct mmc_host *host) 155 { 156 + timer_delete_sync(&host->retune_timer); 157 } 158 EXPORT_SYMBOL(mmc_retune_timer_stop); 159
+4 -4
drivers/mmc/host/atmel-mci.c
··· 1592 1593 WARN_ON(host->cmd || host->data); 1594 1595 - del_timer(&host->timer); 1596 1597 /* 1598 * Update the MMC clock rate if necessary. This may be ··· 2357 2358 if (slot->detect_pin) { 2359 free_irq(gpiod_to_irq(slot->detect_pin), slot); 2360 - del_timer_sync(&slot->detect_timer); 2361 } 2362 2363 slot->host->slot[id] = NULL; ··· 2585 pm_runtime_disable(dev); 2586 pm_runtime_put_noidle(dev); 2587 2588 - del_timer_sync(&host->timer); 2589 if (!IS_ERR(host->dma.chan)) 2590 dma_release_channel(host->dma.chan); 2591 err_dma_probe_defer: ··· 2613 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); 2614 atmci_readl(host, ATMCI_SR); 2615 2616 - del_timer_sync(&host->timer); 2617 if (!IS_ERR(host->dma.chan)) 2618 dma_release_channel(host->dma.chan); 2619
··· 1592 1593 WARN_ON(host->cmd || host->data); 1594 1595 + timer_delete(&host->timer); 1596 1597 /* 1598 * Update the MMC clock rate if necessary. This may be ··· 2357 2358 if (slot->detect_pin) { 2359 free_irq(gpiod_to_irq(slot->detect_pin), slot); 2360 + timer_delete_sync(&slot->detect_timer); 2361 } 2362 2363 slot->host->slot[id] = NULL; ··· 2585 pm_runtime_disable(dev); 2586 pm_runtime_put_noidle(dev); 2587 2588 + timer_delete_sync(&host->timer); 2589 if (!IS_ERR(host->dma.chan)) 2590 dma_release_channel(host->dma.chan); 2591 err_dma_probe_defer: ··· 2613 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); 2614 atmci_readl(host, ATMCI_SR); 2615 2616 + timer_delete_sync(&host->timer); 2617 if (!IS_ERR(host->dma.chan)) 2618 dma_release_channel(host->dma.chan); 2619
+8 -8
drivers/mmc/host/dw_mmc.c
··· 2040 * Really be certain that the timer has stopped. This is a bit of 2041 * paranoia and could only really happen if we had really bad 2042 * interrupt latency and the interrupt routine and timeout were 2043 - * running concurrently so that the del_timer() in the interrupt 2044 * handler couldn't run. 2045 */ 2046 - WARN_ON(del_timer_sync(&host->cto_timer)); 2047 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2048 2049 return true; ··· 2055 return false; 2056 2057 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */ 2058 - WARN_ON(del_timer_sync(&host->dto_timer)); 2059 clear_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2060 2061 return true; ··· 2788 2789 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 2790 { 2791 - del_timer(&host->cto_timer); 2792 2793 if (!host->cmd_status) 2794 host->cmd_status = status; ··· 2832 dw_mci_cmd_interrupt(host, pending); 2833 spin_unlock(&host->irq_lock); 2834 2835 - del_timer(&host->cmd11_timer); 2836 } 2837 2838 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 2839 spin_lock(&host->irq_lock); 2840 2841 - del_timer(&host->cto_timer); 2842 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2843 host->cmd_status = pending; 2844 smp_wmb(); /* drain writebuffer */ ··· 2851 spin_lock(&host->irq_lock); 2852 2853 if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT) 2854 - del_timer(&host->dto_timer); 2855 2856 /* if there is an error report DATA_ERROR */ 2857 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); ··· 2872 if (pending & SDMMC_INT_DATA_OVER) { 2873 spin_lock(&host->irq_lock); 2874 2875 - del_timer(&host->dto_timer); 2876 2877 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2878 if (!host->data_status)
··· 2040 * Really be certain that the timer has stopped. This is a bit of 2041 * paranoia and could only really happen if we had really bad 2042 * interrupt latency and the interrupt routine and timeout were 2043 + * running concurrently so that the timer_delete() in the interrupt 2044 * handler couldn't run. 2045 */ 2046 + WARN_ON(timer_delete_sync(&host->cto_timer)); 2047 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2048 2049 return true; ··· 2055 return false; 2056 2057 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */ 2058 + WARN_ON(timer_delete_sync(&host->dto_timer)); 2059 clear_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2060 2061 return true; ··· 2788 2789 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 2790 { 2791 + timer_delete(&host->cto_timer); 2792 2793 if (!host->cmd_status) 2794 host->cmd_status = status; ··· 2832 dw_mci_cmd_interrupt(host, pending); 2833 spin_unlock(&host->irq_lock); 2834 2835 + timer_delete(&host->cmd11_timer); 2836 } 2837 2838 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 2839 spin_lock(&host->irq_lock); 2840 2841 + timer_delete(&host->cto_timer); 2842 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2843 host->cmd_status = pending; 2844 smp_wmb(); /* drain writebuffer */ ··· 2851 spin_lock(&host->irq_lock); 2852 2853 if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT) 2854 + timer_delete(&host->dto_timer); 2855 2856 /* if there is an error report DATA_ERROR */ 2857 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); ··· 2872 if (pending & SDMMC_INT_DATA_OVER) { 2873 spin_lock(&host->irq_lock); 2874 2875 + timer_delete(&host->dto_timer); 2876 2877 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2878 if (!host->data_status)
+2 -2
drivers/mmc/host/jz4740_mmc.c
··· 862 863 if (host->req && cmd && irq_reg) { 864 if (test_and_clear_bit(0, &host->waiting)) { 865 - del_timer(&host->timeout_timer); 866 867 if (status & JZ_MMC_STATUS_TIMEOUT_RES) { 868 cmd->error = -ETIMEDOUT; ··· 1162 { 1163 struct jz4740_mmc_host *host = platform_get_drvdata(pdev); 1164 1165 - del_timer_sync(&host->timeout_timer); 1166 jz4740_mmc_set_irq_enabled(host, 0xff, false); 1167 jz4740_mmc_reset(host); 1168
··· 862 863 if (host->req && cmd && irq_reg) { 864 if (test_and_clear_bit(0, &host->waiting)) { 865 + timer_delete(&host->timeout_timer); 866 867 if (status & JZ_MMC_STATUS_TIMEOUT_RES) { 868 cmd->error = -ETIMEDOUT; ··· 1162 { 1163 struct jz4740_mmc_host *host = platform_get_drvdata(pdev); 1164 1165 + timer_delete_sync(&host->timeout_timer); 1166 jz4740_mmc_set_irq_enabled(host, 0xff, false); 1167 jz4740_mmc_reset(host); 1168
+2 -2
drivers/mmc/host/meson-mx-sdio.c
··· 446 if (WARN_ON(!cmd)) 447 return IRQ_HANDLED; 448 449 - del_timer_sync(&host->cmd_timeout); 450 451 if (cmd->data) { 452 dma_unmap_sg(mmc_dev(host->mmc), cmd->data->sg, ··· 733 struct meson_mx_mmc_host *host = platform_get_drvdata(pdev); 734 struct device *slot_dev = mmc_dev(host->mmc); 735 736 - del_timer_sync(&host->cmd_timeout); 737 738 mmc_remove_host(host->mmc); 739
··· 446 if (WARN_ON(!cmd)) 447 return IRQ_HANDLED; 448 449 + timer_delete_sync(&host->cmd_timeout); 450 451 if (cmd->data) { 452 dma_unmap_sg(mmc_dev(host->mmc), cmd->data->sg, ··· 733 struct meson_mx_mmc_host *host = platform_get_drvdata(pdev); 734 struct device *slot_dev = mmc_dev(host->mmc); 735 736 + timer_delete_sync(&host->cmd_timeout); 737 738 mmc_remove_host(host->mmc); 739
+2 -2
drivers/mmc/host/mvsdio.c
··· 464 struct mmc_command *cmd = mrq->cmd; 465 u32 err_status = 0; 466 467 - del_timer(&host->timer); 468 host->mrq = NULL; 469 470 host->intr_en &= MVSD_NOR_CARD_INT; ··· 803 struct mvsd_host *host = mmc_priv(mmc); 804 805 mmc_remove_host(mmc); 806 - del_timer_sync(&host->timer); 807 mvsd_power_down(host); 808 809 if (!IS_ERR(host->clk))
··· 464 struct mmc_command *cmd = mrq->cmd; 465 u32 err_status = 0; 466 467 + timer_delete(&host->timer); 468 host->mrq = NULL; 469 470 host->intr_en &= MVSD_NOR_CARD_INT; ··· 803 struct mvsd_host *host = mmc_priv(mmc); 804 805 mmc_remove_host(mmc); 806 + timer_delete_sync(&host->timer); 807 mvsd_power_down(host); 808 809 if (!IS_ERR(host->clk))
+2 -2
drivers/mmc/host/mxcmmc.c
··· 352 struct mxcmci_host *host = data; 353 u32 stat; 354 355 - del_timer(&host->watchdog); 356 357 stat = mxcmci_readl(host, MMC_REG_STATUS); 358 ··· 737 mxcmci_cmd_done(host, stat); 738 739 if (mxcmci_use_dma(host) && (stat & STATUS_WRITE_OP_DONE)) { 740 - del_timer(&host->watchdog); 741 mxcmci_data_done(host, stat); 742 } 743
··· 352 struct mxcmci_host *host = data; 353 u32 stat; 354 355 + timer_delete(&host->watchdog); 356 357 stat = mxcmci_readl(host, MMC_REG_STATUS); 358 ··· 737 mxcmci_cmd_done(host, stat); 738 739 if (mxcmci_use_dma(host) && (stat & STATUS_WRITE_OP_DONE)) { 740 + timer_delete(&host->watchdog); 741 mxcmci_data_done(host, stat); 742 } 743
+5 -5
drivers/mmc/host/omap.c
··· 214 host->mmc = slot->mmc; 215 spin_unlock_irqrestore(&host->slot_lock, flags); 216 no_claim: 217 - del_timer(&host->clk_timer); 218 if (host->current_slot != slot || !claimed) 219 mmc_omap_fclk_offdelay(host->current_slot); 220 ··· 273 /* Keeps clock running for at least 8 cycles on valid freq */ 274 mod_timer(&host->clk_timer, jiffies + HZ/10); 275 else { 276 - del_timer(&host->clk_timer); 277 mmc_omap_fclk_offdelay(slot); 278 mmc_omap_fclk_enable(host, 0); 279 } ··· 564 { 565 host->cmd = NULL; 566 567 - del_timer(&host->cmd_abort_timer); 568 569 if (cmd->flags & MMC_RSP_PRESENT) { 570 if (cmd->flags & MMC_RSP_136) { ··· 836 } 837 838 if (cmd_error && host->data) { 839 - del_timer(&host->cmd_abort_timer); 840 host->abort = 1; 841 OMAP_MMC_WRITE(host, IE, 0); 842 disable_irq_nosync(host->irq); ··· 1365 device_remove_file(&mmc->class_dev, &dev_attr_cover_switch); 1366 1367 cancel_work_sync(&slot->cover_bh_work); 1368 - del_timer_sync(&slot->cover_timer); 1369 flush_workqueue(slot->host->mmc_omap_wq); 1370 1371 mmc_remove_host(mmc);
··· 214 host->mmc = slot->mmc; 215 spin_unlock_irqrestore(&host->slot_lock, flags); 216 no_claim: 217 + timer_delete(&host->clk_timer); 218 if (host->current_slot != slot || !claimed) 219 mmc_omap_fclk_offdelay(host->current_slot); 220 ··· 273 /* Keeps clock running for at least 8 cycles on valid freq */ 274 mod_timer(&host->clk_timer, jiffies + HZ/10); 275 else { 276 + timer_delete(&host->clk_timer); 277 mmc_omap_fclk_offdelay(slot); 278 mmc_omap_fclk_enable(host, 0); 279 } ··· 564 { 565 host->cmd = NULL; 566 567 + timer_delete(&host->cmd_abort_timer); 568 569 if (cmd->flags & MMC_RSP_PRESENT) { 570 if (cmd->flags & MMC_RSP_136) { ··· 836 } 837 838 if (cmd_error && host->data) { 839 + timer_delete(&host->cmd_abort_timer); 840 host->abort = 1; 841 OMAP_MMC_WRITE(host, IE, 0); 842 disable_irq_nosync(host->irq); ··· 1365 device_remove_file(&mmc->class_dev, &dev_attr_cover_switch); 1366 1367 cancel_work_sync(&slot->cover_bh_work); 1368 + timer_delete_sync(&slot->cover_timer); 1369 flush_workqueue(slot->host->mmc_omap_wq); 1370 1371 mmc_remove_host(mmc);
+4 -4
drivers/mmc/host/sdhci.c
··· 517 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 518 { 519 if (sdhci_data_line_cmd(mrq->cmd)) 520 - del_timer(&host->data_timer); 521 else 522 - del_timer(&host->timer); 523 } 524 525 static inline bool sdhci_has_requests(struct sdhci_host *host) ··· 4976 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4977 free_irq(host->irq, host); 4978 4979 - del_timer_sync(&host->timer); 4980 - del_timer_sync(&host->data_timer); 4981 4982 destroy_workqueue(host->complete_wq); 4983
··· 517 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 518 { 519 if (sdhci_data_line_cmd(mrq->cmd)) 520 + timer_delete(&host->data_timer); 521 else 522 + timer_delete(&host->timer); 523 } 524 525 static inline bool sdhci_has_requests(struct sdhci_host *host) ··· 4976 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4977 free_irq(host->irq, host); 4978 4979 + timer_delete_sync(&host->timer); 4980 + timer_delete_sync(&host->data_timer); 4981 4982 destroy_workqueue(host->complete_wq); 4983
+1 -1
drivers/mmc/host/tifm_sd.c
··· 735 736 spin_lock_irqsave(&sock->lock, flags); 737 738 - del_timer(&host->timer); 739 mrq = host->req; 740 host->req = NULL; 741
··· 735 736 spin_lock_irqsave(&sock->lock, flags); 737 738 + timer_delete(&host->timer); 739 mrq = host->req; 740 host->req = NULL; 741
+2 -2
drivers/mmc/host/via-sdmmc.c
··· 971 972 spin_lock_irqsave(&host->lock, flags); 973 974 - del_timer(&host->timer); 975 mrq = host->mrq; 976 host->mrq = NULL; 977 host->cmd = NULL; ··· 1202 1203 free_irq(pcidev->irq, sdhost); 1204 1205 - del_timer_sync(&sdhost->timer); 1206 1207 cancel_work_sync(&sdhost->finish_bh_work); 1208
··· 971 972 spin_lock_irqsave(&host->lock, flags); 973 974 + timer_delete(&host->timer); 975 mrq = host->mrq; 976 host->mrq = NULL; 977 host->cmd = NULL; ··· 1202 1203 free_irq(pcidev->irq, sdhost); 1204 1205 + timer_delete_sync(&sdhost->timer); 1206 1207 cancel_work_sync(&sdhost->finish_bh_work); 1208
+3 -3
drivers/mmc/host/vub300.c
··· 1452 (linear_length / 16384)); 1453 add_timer(&vub300->sg_transfer_timer); 1454 usb_sg_wait(&vub300->sg_request); 1455 - del_timer(&vub300->sg_transfer_timer); 1456 if (vub300->sg_request.status < 0) { 1457 cmd->error = vub300->sg_request.status; 1458 data->bytes_xfered = 0; ··· 1572 if (cmd->error) { 1573 data->bytes_xfered = 0; 1574 } else { 1575 - del_timer(&vub300->sg_transfer_timer); 1576 if (vub300->sg_request.status < 0) { 1577 cmd->error = vub300->sg_request.status; 1578 data->bytes_xfered = 0; ··· 2339 2340 return 0; 2341 error6: 2342 - del_timer_sync(&vub300->inactivity_timer); 2343 error5: 2344 mmc_free_host(mmc); 2345 /*
··· 1452 (linear_length / 16384)); 1453 add_timer(&vub300->sg_transfer_timer); 1454 usb_sg_wait(&vub300->sg_request); 1455 + timer_delete(&vub300->sg_transfer_timer); 1456 if (vub300->sg_request.status < 0) { 1457 cmd->error = vub300->sg_request.status; 1458 data->bytes_xfered = 0; ··· 1572 if (cmd->error) { 1573 data->bytes_xfered = 0; 1574 } else { 1575 + timer_delete(&vub300->sg_transfer_timer); 1576 if (vub300->sg_request.status < 0) { 1577 cmd->error = vub300->sg_request.status; 1578 data->bytes_xfered = 0; ··· 2339 2340 return 0; 2341 error6: 2342 + timer_delete_sync(&vub300->inactivity_timer); 2343 error5: 2344 mmc_free_host(mmc); 2345 /*
+1 -1
drivers/mmc/host/wbsd.c
··· 1261 host = mmc_priv(mmc); 1262 BUG_ON(host == NULL); 1263 1264 - del_timer_sync(&host->ignore_timer); 1265 1266 mmc_free_host(mmc); 1267 }
··· 1261 host = mmc_priv(mmc); 1262 BUG_ON(host == NULL); 1263 1264 + timer_delete_sync(&host->ignore_timer); 1265 1266 mmc_free_host(mmc); 1267 }
+2 -2
drivers/most/most_usb.c
··· 257 mdev->padding_active[channel] = false; 258 259 if (mdev->conf[channel].data_type == MOST_CH_ASYNC) { 260 - del_timer_sync(&mdev->link_stat_timer); 261 cancel_work_sync(&mdev->poll_work_obj); 262 } 263 mutex_unlock(&mdev->io_mutex); ··· 1115 mdev->usb_device = NULL; 1116 mutex_unlock(&mdev->io_mutex); 1117 1118 - del_timer_sync(&mdev->link_stat_timer); 1119 cancel_work_sync(&mdev->poll_work_obj); 1120 1121 if (mdev->dci)
··· 257 mdev->padding_active[channel] = false; 258 259 if (mdev->conf[channel].data_type == MOST_CH_ASYNC) { 260 + timer_delete_sync(&mdev->link_stat_timer); 261 cancel_work_sync(&mdev->poll_work_obj); 262 } 263 mutex_unlock(&mdev->io_mutex); ··· 1115 mdev->usb_device = NULL; 1116 mutex_unlock(&mdev->io_mutex); 1117 1118 + timer_delete_sync(&mdev->link_stat_timer); 1119 cancel_work_sync(&mdev->poll_work_obj); 1120 1121 if (mdev->dci)
+2 -2
drivers/mtd/sm_ftl.c
··· 1067 sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset); 1068 1069 /* No need in flush thread running now */ 1070 - del_timer(&ftl->timer); 1071 mutex_lock(&ftl->mutex); 1072 1073 zone = sm_get_zone(ftl, zone_num); ··· 1111 { 1112 struct sm_ftl *ftl = dev->priv; 1113 1114 - del_timer_sync(&ftl->timer); 1115 cancel_work_sync(&ftl->flush_work); 1116 mutex_lock(&ftl->mutex); 1117 sm_cache_flush(ftl);
··· 1067 sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset); 1068 1069 /* No need in flush thread running now */ 1070 + timer_delete(&ftl->timer); 1071 mutex_lock(&ftl->mutex); 1072 1073 zone = sm_get_zone(ftl, zone_num); ··· 1111 { 1112 struct sm_ftl *ftl = dev->priv; 1113 1114 + timer_delete_sync(&ftl->timer); 1115 cancel_work_sync(&ftl->flush_work); 1116 mutex_lock(&ftl->mutex); 1117 sm_cache_flush(ftl);
+1 -1
drivers/net/arcnet/arcnet.c
··· 616 struct arcnet_local *lp = netdev_priv(dev); 617 618 arcnet_led_event(dev, ARCNET_LED_EVENT_STOP); 619 - del_timer_sync(&lp->timer); 620 621 netif_stop_queue(dev); 622 netif_carrier_off(dev);
··· 616 struct arcnet_local *lp = netdev_priv(dev); 617 618 arcnet_led_event(dev, ARCNET_LED_EVENT_STOP); 619 + timer_delete_sync(&lp->timer); 620 621 netif_stop_queue(dev); 622 netif_carrier_off(dev);
+6 -6
drivers/net/can/grcan.c
··· 778 */ 779 if (priv->need_txbug_workaround && 780 (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_TXLOSS))) { 781 - del_timer(&priv->hang_timer); 782 } 783 784 /* Frame(s) received or transmitted */ ··· 817 spin_lock_irqsave(&priv->lock, flags); 818 819 priv->resetting = false; 820 - del_timer(&priv->hang_timer); 821 - del_timer(&priv->rr_timer); 822 823 if (!priv->closing) { 824 /* Save and reset - config register preserved by grcan_reset */ ··· 1108 priv->closing = true; 1109 if (priv->need_txbug_workaround) { 1110 spin_unlock_irqrestore(&priv->lock, flags); 1111 - del_timer_sync(&priv->hang_timer); 1112 - del_timer_sync(&priv->rr_timer); 1113 spin_lock_irqsave(&priv->lock, flags); 1114 } 1115 netif_stop_queue(dev); ··· 1147 * so prevent a running reset while catching up 1148 */ 1149 if (priv->need_txbug_workaround) 1150 - del_timer(&priv->hang_timer); 1151 } 1152 1153 spin_unlock_irqrestore(&priv->lock, flags);
··· 778 */ 779 if (priv->need_txbug_workaround && 780 (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_TXLOSS))) { 781 + timer_delete(&priv->hang_timer); 782 } 783 784 /* Frame(s) received or transmitted */ ··· 817 spin_lock_irqsave(&priv->lock, flags); 818 819 priv->resetting = false; 820 + timer_delete(&priv->hang_timer); 821 + timer_delete(&priv->rr_timer); 822 823 if (!priv->closing) { 824 /* Save and reset - config register preserved by grcan_reset */ ··· 1108 priv->closing = true; 1109 if (priv->need_txbug_workaround) { 1110 spin_unlock_irqrestore(&priv->lock, flags); 1111 + timer_delete_sync(&priv->hang_timer); 1112 + timer_delete_sync(&priv->rr_timer); 1113 spin_lock_irqsave(&priv->lock, flags); 1114 } 1115 netif_stop_queue(dev); ··· 1147 * so prevent a running reset while catching up 1148 */ 1149 if (priv->need_txbug_workaround) 1150 + timer_delete(&priv->hang_timer); 1151 } 1152 1153 spin_unlock_irqrestore(&priv->lock, flags);
+3 -3
drivers/net/can/kvaser_pciefd.c
··· 631 u32 mode; 632 unsigned long irq; 633 634 - del_timer(&can->bec_poll_timer); 635 if (!completion_done(&can->flush_comp)) 636 kvaser_pciefd_start_controller_flush(can); 637 ··· 742 ret = -ETIMEDOUT; 743 } else { 744 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 745 - del_timer(&can->bec_poll_timer); 746 } 747 can->can.state = CAN_STATE_STOPPED; 748 close_candev(netdev); ··· 1854 if (can) { 1855 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1856 unregister_candev(can->can.dev); 1857 - del_timer(&can->bec_poll_timer); 1858 kvaser_pciefd_pwm_stop(can); 1859 free_candev(can->can.dev); 1860 }
··· 631 u32 mode; 632 unsigned long irq; 633 634 + timer_delete(&can->bec_poll_timer); 635 if (!completion_done(&can->flush_comp)) 636 kvaser_pciefd_start_controller_flush(can); 637 ··· 742 ret = -ETIMEDOUT; 743 } else { 744 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 745 + timer_delete(&can->bec_poll_timer); 746 } 747 can->can.state = CAN_STATE_STOPPED; 748 close_candev(netdev); ··· 1854 if (can) { 1855 iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); 1856 unregister_candev(can->can.dev); 1857 + timer_delete(&can->bec_poll_timer); 1858 kvaser_pciefd_pwm_stop(can); 1859 free_candev(can->can.dev); 1860 }
+1 -1
drivers/net/can/sja1000/peak_pcmcia.c
··· 167 */ 168 static void pcan_stop_led_timer(struct pcan_pccard *card) 169 { 170 - del_timer_sync(&card->led_timer); 171 } 172 173 /*
··· 167 */ 168 static void pcan_stop_led_timer(struct pcan_pccard *card) 169 { 170 + timer_delete_sync(&card->led_timer); 171 } 172 173 /*
+2 -2
drivers/net/dsa/mv88e6xxx/phy.c
··· 206 } 207 chip->ppu_disabled = 1; 208 } else { 209 - del_timer(&chip->ppu_timer); 210 ret = 0; 211 } 212 ··· 230 static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip) 231 { 232 mutex_lock(&chip->ppu_mutex); 233 - del_timer_sync(&chip->ppu_timer); 234 cancel_work_sync(&chip->ppu_work); 235 mutex_unlock(&chip->ppu_mutex); 236 }
··· 206 } 207 chip->ppu_disabled = 1; 208 } else { 209 + timer_delete(&chip->ppu_timer); 210 ret = 0; 211 } 212 ··· 230 static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip) 231 { 232 mutex_lock(&chip->ppu_mutex); 233 + timer_delete_sync(&chip->ppu_timer); 234 cancel_work_sync(&chip->ppu_work); 235 mutex_unlock(&chip->ppu_mutex); 236 }
+2 -2
drivers/net/dsa/sja1105/sja1105_ptp.c
··· 842 if (on) 843 sja1105_ptp_extts_setup_timer(&priv->ptp_data); 844 else 845 - del_timer_sync(&priv->ptp_data.extts_timer); 846 847 return 0; 848 } ··· 939 if (IS_ERR_OR_NULL(ptp_data->clock)) 940 return; 941 942 - del_timer_sync(&ptp_data->extts_timer); 943 ptp_cancel_worker_sync(ptp_data->clock); 944 skb_queue_purge(&ptp_data->skb_txtstamp_queue); 945 skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
··· 842 if (on) 843 sja1105_ptp_extts_setup_timer(&priv->ptp_data); 844 else 845 + timer_delete_sync(&priv->ptp_data.extts_timer); 846 847 return 0; 848 } ··· 939 if (IS_ERR_OR_NULL(ptp_data->clock)) 940 return; 941 942 + timer_delete_sync(&ptp_data->extts_timer); 943 ptp_cancel_worker_sync(ptp_data->clock); 944 skb_queue_purge(&ptp_data->skb_txtstamp_queue); 945 skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
+1 -1
drivers/net/eql.c
··· 254 * at the data structure it scans every so often... 255 */ 256 257 - del_timer_sync(&eql->timer); 258 259 eql_kill_slave_queue(&eql->queue); 260
··· 254 * at the data structure it scans every so often... 255 */ 256 257 + timer_delete_sync(&eql->timer); 258 259 eql_kill_slave_queue(&eql->queue); 260
+1 -1
drivers/net/ethernet/3com/3c515.c
··· 1414 dev->name, rx_nocopy, rx_copy, queued_packet); 1415 } 1416 1417 - del_timer_sync(&vp->timer); 1418 1419 /* Turn off statistics ASAP. We update lp->stats below. */ 1420 outw(StatsDisable, ioaddr + EL3_CMD);
··· 1414 dev->name, rx_nocopy, rx_copy, queued_packet); 1415 } 1416 1417 + timer_delete_sync(&vp->timer); 1418 1419 /* Turn off statistics ASAP. We update lp->stats below. */ 1420 outw(StatsDisable, ioaddr + EL3_CMD);
+1 -1
drivers/net/ethernet/3com/3c574_cs.c
··· 1140 1141 link->open--; 1142 netif_stop_queue(dev); 1143 - del_timer_sync(&lp->media); 1144 1145 return 0; 1146 }
··· 1140 1141 link->open--; 1142 netif_stop_queue(dev); 1143 + timer_delete_sync(&lp->media); 1144 1145 return 0; 1146 }
+1 -1
drivers/net/ethernet/3com/3c589_cs.c
··· 946 947 link->open--; 948 netif_stop_queue(dev); 949 - del_timer_sync(&lp->media); 950 951 return 0; 952 }
··· 946 947 link->open--; 948 netif_stop_queue(dev); 949 + timer_delete_sync(&lp->media); 950 951 return 0; 952 }
+1 -1
drivers/net/ethernet/3com/3c59x.c
··· 2691 netdev_reset_queue(dev); 2692 netif_stop_queue(dev); 2693 2694 - del_timer_sync(&vp->timer); 2695 2696 /* Turn off statistics ASAP. We update dev->stats below. */ 2697 iowrite16(StatsDisable, ioaddr + EL3_CMD);
··· 2691 netdev_reset_queue(dev); 2692 netif_stop_queue(dev); 2693 2694 + timer_delete_sync(&vp->timer); 2695 2696 /* Turn off statistics ASAP. We update dev->stats below. */ 2697 iowrite16(StatsDisable, ioaddr + EL3_CMD);
+1 -1
drivers/net/ethernet/8390/axnet_cs.c
··· 504 505 link->open--; 506 netif_stop_queue(dev); 507 - del_timer_sync(&info->watchdog); 508 509 return 0; 510 } /* axnet_close */
··· 504 505 link->open--; 506 netif_stop_queue(dev); 507 + timer_delete_sync(&info->watchdog); 508 509 return 0; 510 } /* axnet_close */
+1 -1
drivers/net/ethernet/8390/pcnet_cs.c
··· 947 948 link->open--; 949 netif_stop_queue(dev); 950 - del_timer_sync(&info->watchdog); 951 952 return 0; 953 } /* pcnet_close */
··· 947 948 link->open--; 949 netif_stop_queue(dev); 950 + timer_delete_sync(&info->watchdog); 951 952 return 0; 953 } /* pcnet_close */
+1 -1
drivers/net/ethernet/agere/et131x.c
··· 3639 free_irq(adapter->pdev->irq, netdev); 3640 3641 /* Stop the error timer */ 3642 - return del_timer_sync(&adapter->error_timer); 3643 } 3644 3645 /* et131x_set_packet_filter - Configures the Rx Packet filtering */
··· 3639 free_irq(adapter->pdev->irq, netdev); 3640 3641 /* Stop the error timer */ 3642 + return timer_delete_sync(&adapter->error_timer); 3643 } 3644 3645 /* et131x_set_packet_filter - Configures the Rx Packet filtering */
+3 -3
drivers/net/ethernet/amazon/ena/ena_netdev.c
··· 3245 3246 netif_carrier_off(netdev); 3247 3248 - del_timer_sync(&adapter->timer_service); 3249 3250 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); 3251 adapter->dev_up_before_reset = dev_up; ··· 4065 ena_free_mgmnt_irq(adapter); 4066 ena_disable_msix(adapter); 4067 err_worker_destroy: 4068 - del_timer(&adapter->timer_service); 4069 err_device_destroy: 4070 ena_com_delete_host_info(ena_dev); 4071 ena_com_admin_destroy(ena_dev); ··· 4104 /* Make sure timer and reset routine won't be called after 4105 * freeing device resources. 4106 */ 4107 - del_timer_sync(&adapter->timer_service); 4108 cancel_work_sync(&adapter->reset_task); 4109 4110 rtnl_lock(); /* lock released inside the below if-else block */
··· 3245 3246 netif_carrier_off(netdev); 3247 3248 + timer_delete_sync(&adapter->timer_service); 3249 3250 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); 3251 adapter->dev_up_before_reset = dev_up; ··· 4065 ena_free_mgmnt_irq(adapter); 4066 ena_disable_msix(adapter); 4067 err_worker_destroy: 4068 + timer_delete(&adapter->timer_service); 4069 err_device_destroy: 4070 ena_com_delete_host_info(ena_dev); 4071 ena_com_admin_destroy(ena_dev); ··· 4104 /* Make sure timer and reset routine won't be called after 4105 * freeing device resources. 4106 */ 4107 + timer_delete_sync(&adapter->timer_service); 4108 cancel_work_sync(&adapter->reset_task); 4109 4110 rtnl_lock(); /* lock released inside the below if-else block */
+1 -1
drivers/net/ethernet/amd/a2065.c
··· 486 volatile struct lance_regs *ll = lp->ll; 487 488 netif_stop_queue(dev); 489 - del_timer_sync(&lp->multicast_timer); 490 491 /* Stop the card */ 492 ll->rap = LE_CSR0;
··· 486 volatile struct lance_regs *ll = lp->ll; 487 488 netif_stop_queue(dev); 489 + timer_delete_sync(&lp->multicast_timer); 490 491 /* Stop the card */ 492 ll->rap = LE_CSR0;
+2 -2
drivers/net/ethernet/amd/amd8111e.c
··· 1173 1174 /* Delete ipg timer */ 1175 if (lp->options & OPTION_DYN_IPG_ENABLE) 1176 - del_timer_sync(&lp->ipg_data.ipg_timer); 1177 1178 spin_unlock_irq(&lp->lock); 1179 free_irq(dev->irq, dev); ··· 1598 /* stop chip */ 1599 spin_lock_irq(&lp->lock); 1600 if (lp->options & OPTION_DYN_IPG_ENABLE) 1601 - del_timer_sync(&lp->ipg_data.ipg_timer); 1602 amd8111e_stop_chip(lp); 1603 spin_unlock_irq(&lp->lock); 1604
··· 1173 1174 /* Delete ipg timer */ 1175 if (lp->options & OPTION_DYN_IPG_ENABLE) 1176 + timer_delete_sync(&lp->ipg_data.ipg_timer); 1177 1178 spin_unlock_irq(&lp->lock); 1179 free_irq(dev->irq, dev); ··· 1598 /* stop chip */ 1599 spin_lock_irq(&lp->lock); 1600 if (lp->options & OPTION_DYN_IPG_ENABLE) 1601 + timer_delete_sync(&lp->ipg_data.ipg_timer); 1602 amd8111e_stop_chip(lp); 1603 spin_unlock_irq(&lp->lock); 1604
+1 -1
drivers/net/ethernet/amd/declance.c
··· 842 volatile struct lance_regs *ll = lp->ll; 843 844 netif_stop_queue(dev); 845 - del_timer_sync(&lp->multicast_timer); 846 847 /* Stop the card */ 848 writereg(&ll->rap, LE_CSR0);
··· 842 volatile struct lance_regs *ll = lp->ll; 843 844 netif_stop_queue(dev); 845 + timer_delete_sync(&lp->multicast_timer); 846 847 /* Stop the card */ 848 writereg(&ll->rap, LE_CSR0);
+1 -1
drivers/net/ethernet/amd/pcnet32.c
··· 2630 struct pcnet32_private *lp = netdev_priv(dev); 2631 unsigned long flags; 2632 2633 - del_timer_sync(&lp->watchdog_timer); 2634 2635 netif_stop_queue(dev); 2636 napi_disable(&lp->napi);
··· 2630 struct pcnet32_private *lp = netdev_priv(dev); 2631 unsigned long flags; 2632 2633 + timer_delete_sync(&lp->watchdog_timer); 2634 2635 netif_stop_queue(dev); 2636 napi_disable(&lp->napi);
+1 -1
drivers/net/ethernet/amd/sunlance.c
··· 963 struct lance_private *lp = netdev_priv(dev); 964 965 netif_stop_queue(dev); 966 - del_timer_sync(&lp->multicast_timer); 967 968 STOP_LANCE(lp); 969
··· 963 struct lance_private *lp = netdev_priv(dev); 964 965 netif_stop_queue(dev); 966 + timer_delete_sync(&lp->multicast_timer); 967 968 STOP_LANCE(lp); 969
+2 -2
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
··· 728 struct xgbe_channel *channel; 729 unsigned int i; 730 731 - del_timer_sync(&pdata->service_timer); 732 733 for (i = 0; i < pdata->channel_count; i++) { 734 channel = pdata->channel[i]; ··· 736 break; 737 738 /* Deactivate the Tx timer */ 739 - del_timer_sync(&channel->tx_timer); 740 channel->tx_timer_active = 0; 741 } 742 }
··· 728 struct xgbe_channel *channel; 729 unsigned int i; 730 731 + timer_delete_sync(&pdata->service_timer); 732 733 for (i = 0; i < pdata->channel_count; i++) { 734 channel = pdata->channel[i]; ··· 736 break; 737 738 /* Deactivate the Tx timer */ 739 + timer_delete_sync(&channel->tx_timer); 740 channel->tx_timer_active = 0; 741 } 742 }
+3 -3
drivers/net/ethernet/apple/bmac.c
··· 461 /* prolly should wait for dma to finish & turn off the chip */ 462 spin_lock_irqsave(&bp->lock, flags); 463 if (bp->timeout_active) { 464 - del_timer(&bp->tx_timeout); 465 bp->timeout_active = 0; 466 } 467 disable_irq(dev->irq); ··· 546 547 spin_lock_irqsave(&bp->lock, flags); 548 if (bp->timeout_active) 549 - del_timer(&bp->tx_timeout); 550 bp->tx_timeout.expires = jiffies + TX_TIMEOUT; 551 add_timer(&bp->tx_timeout); 552 bp->timeout_active = 1; ··· 755 XXDEBUG(("bmac_txdma_intr\n")); 756 } 757 758 - /* del_timer(&bp->tx_timeout); */ 759 /* bp->timeout_active = 0; */ 760 761 while (1) {
··· 461 /* prolly should wait for dma to finish & turn off the chip */ 462 spin_lock_irqsave(&bp->lock, flags); 463 if (bp->timeout_active) { 464 + timer_delete(&bp->tx_timeout); 465 bp->timeout_active = 0; 466 } 467 disable_irq(dev->irq); ··· 546 547 spin_lock_irqsave(&bp->lock, flags); 548 if (bp->timeout_active) 549 + timer_delete(&bp->tx_timeout); 550 bp->tx_timeout.expires = jiffies + TX_TIMEOUT; 551 add_timer(&bp->tx_timeout); 552 bp->timeout_active = 1; ··· 755 XXDEBUG(("bmac_txdma_intr\n")); 756 } 757 758 + /* timer_delete(&bp->tx_timeout); */ 759 /* bp->timeout_active = 0; */ 760 761 while (1) {
+2 -2
drivers/net/ethernet/apple/mace.c
··· 523 struct mace_data *mp = netdev_priv(dev); 524 525 if (mp->timeout_active) 526 - del_timer(&mp->tx_timeout); 527 mp->tx_timeout.expires = jiffies + TX_TIMEOUT; 528 add_timer(&mp->tx_timeout); 529 mp->timeout_active = 1; ··· 676 677 i = mp->tx_empty; 678 while (in_8(&mb->pr) & XMTSV) { 679 - del_timer(&mp->tx_timeout); 680 mp->timeout_active = 0; 681 /* 682 * Clear any interrupt indication associated with this status
··· 523 struct mace_data *mp = netdev_priv(dev); 524 525 if (mp->timeout_active) 526 + timer_delete(&mp->tx_timeout); 527 mp->tx_timeout.expires = jiffies + TX_TIMEOUT; 528 add_timer(&mp->tx_timeout); 529 mp->timeout_active = 1; ··· 676 677 i = mp->tx_empty; 678 while (in_8(&mb->pr) & XMTSV) { 679 + timer_delete(&mp->tx_timeout); 680 mp->timeout_active = 0; 681 /* 682 * Clear any interrupt indication associated with this status
+2 -2
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
··· 1389 netif_tx_disable(self->ndev); 1390 netif_carrier_off(self->ndev); 1391 1392 - del_timer_sync(&self->service_timer); 1393 cancel_work_sync(&self->service_task); 1394 1395 self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK); 1396 1397 if (self->aq_nic_cfg.is_polling) 1398 - del_timer_sync(&self->polling_timer); 1399 else 1400 aq_pci_func_free_irqs(self); 1401
··· 1389 netif_tx_disable(self->ndev); 1390 netif_carrier_off(self->ndev); 1391 1392 + timer_delete_sync(&self->service_timer); 1393 cancel_work_sync(&self->service_task); 1394 1395 self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK); 1396 1397 if (self->aq_nic_cfg.is_polling) 1398 + timer_delete_sync(&self->polling_timer); 1399 else 1400 aq_pci_func_free_irqs(self); 1401
+1 -1
drivers/net/ethernet/atheros/ag71xx.c
··· 1391 ag71xx_dma_reset(ag); 1392 1393 napi_disable(&ag->napi); 1394 - del_timer_sync(&ag->oom_timer); 1395 1396 ag71xx_rings_cleanup(ag); 1397 }
··· 1391 ag71xx_dma_reset(ag); 1392 1393 napi_disable(&ag->napi); 1394 + timer_delete_sync(&ag->oom_timer); 1395 1396 ag71xx_rings_cleanup(ag); 1397 }
+1 -1
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
··· 357 358 static void atl1c_del_timer(struct atl1c_adapter *adapter) 359 { 360 - del_timer_sync(&adapter->phy_config_timer); 361 } 362 363
··· 357 358 static void atl1c_del_timer(struct atl1c_adapter *adapter) 359 { 360 + timer_delete_sync(&adapter->phy_config_timer); 361 } 362 363
+1 -1
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
··· 232 233 static void atl1e_del_timer(struct atl1e_adapter *adapter) 234 { 235 - del_timer_sync(&adapter->phy_config_timer); 236 } 237 238 static void atl1e_cancel_work(struct atl1e_adapter *adapter)
··· 232 233 static void atl1e_del_timer(struct atl1e_adapter *adapter) 234 { 235 + timer_delete_sync(&adapter->phy_config_timer); 236 } 237 238 static void atl1e_cancel_work(struct atl1e_adapter *adapter)
+1 -1
drivers/net/ethernet/atheros/atlx/atl1.c
··· 2641 2642 napi_disable(&adapter->napi); 2643 netif_stop_queue(netdev); 2644 - del_timer_sync(&adapter->phy_config_timer); 2645 adapter->phy_timer_pending = false; 2646 2647 atlx_irq_disable(adapter);
··· 2641 2642 napi_disable(&adapter->napi); 2643 netif_stop_queue(netdev); 2644 + timer_delete_sync(&adapter->phy_config_timer); 2645 adapter->phy_timer_pending = false; 2646 2647 atlx_irq_disable(adapter);
+4 -4
drivers/net/ethernet/atheros/atlx/atl2.c
··· 752 753 atl2_irq_disable(adapter); 754 755 - del_timer_sync(&adapter->watchdog_timer); 756 - del_timer_sync(&adapter->phy_config_timer); 757 clear_bit(0, &adapter->cfg_phy); 758 759 netif_carrier_off(netdev); ··· 1468 * explicitly disable watchdog tasks from being rescheduled */ 1469 set_bit(__ATL2_DOWN, &adapter->flags); 1470 1471 - del_timer_sync(&adapter->watchdog_timer); 1472 - del_timer_sync(&adapter->phy_config_timer); 1473 cancel_work_sync(&adapter->reset_task); 1474 cancel_work_sync(&adapter->link_chg_task); 1475
··· 752 753 atl2_irq_disable(adapter); 754 755 + timer_delete_sync(&adapter->watchdog_timer); 756 + timer_delete_sync(&adapter->phy_config_timer); 757 clear_bit(0, &adapter->cfg_phy); 758 759 netif_carrier_off(netdev); ··· 1468 * explicitly disable watchdog tasks from being rescheduled */ 1469 set_bit(__ATL2_DOWN, &adapter->flags); 1470 1471 + timer_delete_sync(&adapter->watchdog_timer); 1472 + timer_delete_sync(&adapter->phy_config_timer); 1473 cancel_work_sync(&adapter->reset_task); 1474 cancel_work_sync(&adapter->link_chg_task); 1475
+2 -2
drivers/net/ethernet/broadcom/b44.c
··· 1628 1629 napi_disable(&bp->napi); 1630 1631 - del_timer_sync(&bp->timer); 1632 1633 spin_lock_irq(&bp->lock); 1634 ··· 2473 if (!netif_running(dev)) 2474 return 0; 2475 2476 - del_timer_sync(&bp->timer); 2477 2478 spin_lock_irq(&bp->lock); 2479
··· 1628 1629 napi_disable(&bp->napi); 1630 1631 + timer_delete_sync(&bp->timer); 1632 1633 spin_lock_irq(&bp->lock); 1634 ··· 2473 if (!netif_running(dev)) 2474 return 0; 2475 2476 + timer_delete_sync(&bp->timer); 2477 2478 spin_lock_irq(&bp->lock); 2479
+3 -3
drivers/net/ethernet/broadcom/bcm63xx_enet.c
··· 1195 napi_disable(&priv->napi); 1196 if (priv->has_phy) 1197 phy_stop(dev->phydev); 1198 - del_timer_sync(&priv->rx_timeout); 1199 1200 /* mask all interrupts */ 1201 enet_writel(priv, 0, ENET_IRMASK_REG); ··· 2346 priv = netdev_priv(dev); 2347 kdev = &priv->pdev->dev; 2348 2349 - del_timer_sync(&priv->swphy_poll); 2350 netif_stop_queue(dev); 2351 napi_disable(&priv->napi); 2352 - del_timer_sync(&priv->rx_timeout); 2353 2354 /* mask all interrupts */ 2355 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
··· 1195 napi_disable(&priv->napi); 1196 if (priv->has_phy) 1197 phy_stop(dev->phydev); 1198 + timer_delete_sync(&priv->rx_timeout); 1199 1200 /* mask all interrupts */ 1201 enet_writel(priv, 0, ENET_IRMASK_REG); ··· 2346 priv = netdev_priv(dev); 2347 kdev = &priv->pdev->dev; 2348 2349 + timer_delete_sync(&priv->swphy_poll); 2350 netif_stop_queue(dev); 2351 napi_disable(&priv->napi); 2352 + timer_delete_sync(&priv->rx_timeout); 2353 2354 /* mask all interrupts */ 2355 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
+5 -5
drivers/net/ethernet/broadcom/bnx2.c
··· 6400 rc = bnx2_request_irq(bp); 6401 6402 if (rc) { 6403 - del_timer_sync(&bp->timer); 6404 goto open_err; 6405 } 6406 bnx2_enable_int(bp); ··· 6752 bnx2_disable_int_sync(bp); 6753 bnx2_napi_disable(bp); 6754 netif_tx_disable(dev); 6755 - del_timer_sync(&bp->timer); 6756 bnx2_shutdown_chip(bp); 6757 bnx2_free_irq(bp); 6758 bnx2_free_skbs(bp); ··· 8602 8603 unregister_netdev(dev); 8604 8605 - del_timer_sync(&bp->timer); 8606 cancel_work_sync(&bp->reset_task); 8607 8608 pci_iounmap(bp->pdev, bp->regview); ··· 8629 cancel_work_sync(&bp->reset_task); 8630 bnx2_netif_stop(bp, true); 8631 netif_device_detach(dev); 8632 - del_timer_sync(&bp->timer); 8633 bnx2_shutdown_chip(bp); 8634 __bnx2_free_irq(bp); 8635 bnx2_free_skbs(bp); ··· 8687 8688 if (netif_running(dev)) { 8689 bnx2_netif_stop(bp, true); 8690 - del_timer_sync(&bp->timer); 8691 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); 8692 } 8693
··· 6400 rc = bnx2_request_irq(bp); 6401 6402 if (rc) { 6403 + timer_delete_sync(&bp->timer); 6404 goto open_err; 6405 } 6406 bnx2_enable_int(bp); ··· 6752 bnx2_disable_int_sync(bp); 6753 bnx2_napi_disable(bp); 6754 netif_tx_disable(dev); 6755 + timer_delete_sync(&bp->timer); 6756 bnx2_shutdown_chip(bp); 6757 bnx2_free_irq(bp); 6758 bnx2_free_skbs(bp); ··· 8602 8603 unregister_netdev(dev); 8604 8605 + timer_delete_sync(&bp->timer); 8606 cancel_work_sync(&bp->reset_task); 8607 8608 pci_iounmap(bp->pdev, bp->regview); ··· 8629 cancel_work_sync(&bp->reset_task); 8630 bnx2_netif_stop(bp, true); 8631 netif_device_detach(dev); 8632 + timer_delete_sync(&bp->timer); 8633 bnx2_shutdown_chip(bp); 8634 __bnx2_free_irq(bp); 8635 bnx2_free_skbs(bp); ··· 8687 8688 if (netif_running(dev)) { 8689 bnx2_netif_stop(bp, true); 8690 + timer_delete_sync(&bp->timer); 8691 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); 8692 } 8693
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 3059 3060 bp->rx_mode = BNX2X_RX_MODE_NONE; 3061 3062 - del_timer_sync(&bp->timer); 3063 3064 if (IS_PF(bp) && !BP_NOMCP(bp)) { 3065 /* Set ALWAYS_ALIVE bit in shmem */
··· 3059 3060 bp->rx_mode = BNX2X_RX_MODE_NONE; 3061 3062 + timer_delete_sync(&bp->timer); 3063 3064 if (IS_PF(bp) && !BP_NOMCP(bp)) { 3065 /* Set ALWAYS_ALIVE bit in shmem */
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 14140 bnx2x_tx_disable(bp); 14141 netdev_reset_tc(bp->dev); 14142 14143 - del_timer_sync(&bp->timer); 14144 cancel_delayed_work_sync(&bp->sp_task); 14145 cancel_delayed_work_sync(&bp->period_task); 14146
··· 14140 bnx2x_tx_disable(bp); 14141 netdev_reset_tc(bp->dev); 14142 14143 + timer_delete_sync(&bp->timer); 14144 cancel_delayed_work_sync(&bp->sp_task); 14145 cancel_delayed_work_sync(&bp->period_task); 14146
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 12958 12959 bnxt_debug_dev_exit(bp); 12960 bnxt_disable_napi(bp); 12961 - del_timer_sync(&bp->timer); 12962 bnxt_free_skbs(bp); 12963 12964 /* Save ring stats before shutdown */
··· 12958 12959 bnxt_debug_dev_exit(bp); 12960 bnxt_disable_napi(bp); 12961 + timer_delete_sync(&bp->timer); 12962 bnxt_free_skbs(bp); 12963 12964 /* Save ring stats before shutdown */
+1 -1
drivers/net/ethernet/broadcom/tg3.c
··· 11252 11253 static void tg3_timer_stop(struct tg3 *tp) 11254 { 11255 - del_timer_sync(&tp->timer); 11256 } 11257 11258 /* Restart hardware after configuration changes, self-test, etc.
··· 11252 11253 static void tg3_timer_stop(struct tg3 *tp) 11254 { 11255 + timer_delete_sync(&tp->timer); 11256 } 11257 11258 /* Restart hardware after configuration changes, self-test, etc.
+13 -13
drivers/net/ethernet/brocade/bna/bfa_ioc.c
··· 314 { 315 switch (event) { 316 case IOC_E_FWRSP_GETATTR: 317 - del_timer(&ioc->ioc_timer); 318 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 319 break; 320 321 case IOC_E_PFFAILED: 322 case IOC_E_HWERROR: 323 - del_timer(&ioc->ioc_timer); 324 fallthrough; 325 case IOC_E_TIMEOUT: 326 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); ··· 330 break; 331 332 case IOC_E_DISABLE: 333 - del_timer(&ioc->ioc_timer); 334 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 335 break; 336 ··· 659 break; 660 661 case IOCPF_E_DISABLE: 662 - del_timer(&ioc->iocpf_timer); 663 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 664 bfa_ioc_pf_disabled(ioc); 665 break; 666 667 case IOCPF_E_STOP: 668 - del_timer(&ioc->iocpf_timer); 669 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 670 break; 671 ··· 741 break; 742 743 case IOCPF_E_DISABLE: 744 - del_timer(&ioc->iocpf_timer); 745 bfa_ioc_sync_leave(ioc); 746 bfa_nw_ioc_hw_sem_release(ioc); 747 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); ··· 774 775 switch (event) { 776 case IOCPF_E_FWRSP_ENABLE: 777 - del_timer(&ioc->iocpf_timer); 778 bfa_nw_ioc_hw_sem_release(ioc); 779 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); 780 break; 781 782 case IOCPF_E_INITFAIL: 783 - del_timer(&ioc->iocpf_timer); 784 fallthrough; 785 786 case IOCPF_E_TIMEOUT: ··· 791 break; 792 793 case IOCPF_E_DISABLE: 794 - del_timer(&ioc->iocpf_timer); 795 bfa_nw_ioc_hw_sem_release(ioc); 796 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 797 break; ··· 844 845 switch (event) { 846 case IOCPF_E_FWRSP_DISABLE: 847 - del_timer(&ioc->iocpf_timer); 848 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 849 break; 850 851 case IOCPF_E_FAIL: 852 - del_timer(&ioc->iocpf_timer); 853 fallthrough; 854 855 case IOCPF_E_TIMEOUT: ··· 1210 static void 1211 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc) 1212 { 1213 - del_timer(&ioc->sem_timer); 1214 } 1215 1216 /* Initialize LPU local memory (aka secondary memory / SRAM) */ ··· 1982 static void 1983 bfa_ioc_hb_stop(struct bfa_ioc *ioc) 1984 { 1985 - del_timer(&ioc->hb_timer); 1986 } 1987 1988 /* Initiate a full firmware download. */
··· 314 { 315 switch (event) { 316 case IOC_E_FWRSP_GETATTR: 317 + timer_delete(&ioc->ioc_timer); 318 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 319 break; 320 321 case IOC_E_PFFAILED: 322 case IOC_E_HWERROR: 323 + timer_delete(&ioc->ioc_timer); 324 fallthrough; 325 case IOC_E_TIMEOUT: 326 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); ··· 330 break; 331 332 case IOC_E_DISABLE: 333 + timer_delete(&ioc->ioc_timer); 334 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); 335 break; 336 ··· 659 break; 660 661 case IOCPF_E_DISABLE: 662 + timer_delete(&ioc->iocpf_timer); 663 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 664 bfa_ioc_pf_disabled(ioc); 665 break; 666 667 case IOCPF_E_STOP: 668 + timer_delete(&ioc->iocpf_timer); 669 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); 670 break; 671 ··· 741 break; 742 743 case IOCPF_E_DISABLE: 744 + timer_delete(&ioc->iocpf_timer); 745 bfa_ioc_sync_leave(ioc); 746 bfa_nw_ioc_hw_sem_release(ioc); 747 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); ··· 774 775 switch (event) { 776 case IOCPF_E_FWRSP_ENABLE: 777 + timer_delete(&ioc->iocpf_timer); 778 bfa_nw_ioc_hw_sem_release(ioc); 779 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); 780 break; 781 782 case IOCPF_E_INITFAIL: 783 + timer_delete(&ioc->iocpf_timer); 784 fallthrough; 785 786 case IOCPF_E_TIMEOUT: ··· 791 break; 792 793 case IOCPF_E_DISABLE: 794 + timer_delete(&ioc->iocpf_timer); 795 bfa_nw_ioc_hw_sem_release(ioc); 796 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); 797 break; ··· 844 845 switch (event) { 846 case IOCPF_E_FWRSP_DISABLE: 847 + timer_delete(&ioc->iocpf_timer); 848 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); 849 break; 850 851 case IOCPF_E_FAIL: 852 + timer_delete(&ioc->iocpf_timer); 853 fallthrough; 854 855 case IOCPF_E_TIMEOUT: ··· 1210 static void 1211 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc) 1212 { 1213 + timer_delete(&ioc->sem_timer); 1214 } 1215 1216 /* Initialize LPU local memory (aka secondary memory / SRAM) */ ··· 1982 static void 1983 bfa_ioc_hb_stop(struct bfa_ioc *ioc) 1984 { 1985 + timer_delete(&ioc->hb_timer); 1986 } 1987 1988 /* Initiate a full firmware download. */
+8 -8
drivers/net/ethernet/brocade/bna/bnad.c
··· 1837 to_del = 1; 1838 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1839 if (to_del) 1840 - del_timer_sync(&bnad->stats_timer); 1841 } 1842 1843 /* Utilities */ ··· 2160 } 2161 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2162 if (to_del) 2163 - del_timer_sync(&bnad->dim_timer); 2164 } 2165 2166 init_completion(&bnad->bnad_completions.rx_comp); ··· 3726 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); 3727 disable_ioceth: 3728 bnad_ioceth_disable(bnad); 3729 - del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); 3730 - del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); 3731 - del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); 3732 spin_lock_irqsave(&bnad->bna_lock, flags); 3733 bna_uninit(bna); 3734 spin_unlock_irqrestore(&bnad->bna_lock, flags); ··· 3769 3770 mutex_lock(&bnad->conf_mutex); 3771 bnad_ioceth_disable(bnad); 3772 - del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); 3773 - del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); 3774 - del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); 3775 spin_lock_irqsave(&bnad->bna_lock, flags); 3776 bna_uninit(bna); 3777 spin_unlock_irqrestore(&bnad->bna_lock, flags);
··· 1837 to_del = 1; 1838 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1839 if (to_del) 1840 + timer_delete_sync(&bnad->stats_timer); 1841 } 1842 1843 /* Utilities */ ··· 2160 } 2161 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2162 if (to_del) 2163 + timer_delete_sync(&bnad->dim_timer); 2164 } 2165 2166 init_completion(&bnad->bnad_completions.rx_comp); ··· 3726 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); 3727 disable_ioceth: 3728 bnad_ioceth_disable(bnad); 3729 + timer_delete_sync(&bnad->bna.ioceth.ioc.ioc_timer); 3730 + timer_delete_sync(&bnad->bna.ioceth.ioc.sem_timer); 3731 + timer_delete_sync(&bnad->bna.ioceth.ioc.hb_timer); 3732 spin_lock_irqsave(&bnad->bna_lock, flags); 3733 bna_uninit(bna); 3734 spin_unlock_irqrestore(&bnad->bna_lock, flags); ··· 3769 3770 mutex_lock(&bnad->conf_mutex); 3771 bnad_ioceth_disable(bnad); 3772 + timer_delete_sync(&bnad->bna.ioceth.ioc.ioc_timer); 3773 + timer_delete_sync(&bnad->bna.ioceth.ioc.sem_timer); 3774 + timer_delete_sync(&bnad->bna.ioceth.ioc.hb_timer); 3775 spin_lock_irqsave(&bnad->bna_lock, flags); 3776 bna_uninit(bna); 3777 spin_unlock_irqrestore(&bnad->bna_lock, flags);
+1 -1
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
··· 373 } 374 spin_unlock_irqrestore(&bnad->bna_lock, flags); 375 if (to_del) 376 - del_timer_sync(&bnad->dim_timer); 377 spin_lock_irqsave(&bnad->bna_lock, flags); 378 bnad_rx_coalescing_timeo_set(bnad); 379 }
··· 373 } 374 spin_unlock_irqrestore(&bnad->bna_lock, flags); 375 if (to_del) 376 + timer_delete_sync(&bnad->dim_timer); 377 spin_lock_irqsave(&bnad->bna_lock, flags); 378 bnad_rx_coalescing_timeo_set(bnad); 379 }
+2 -2
drivers/net/ethernet/chelsio/cxgb/sge.c
··· 1984 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ 1985 1986 if (is_T2(sge->adapter)) 1987 - del_timer_sync(&sge->espibug_timer); 1988 1989 - del_timer_sync(&sge->tx_reclaim_timer); 1990 if (sge->tx_sched) 1991 tx_sched_stop(sge); 1992
··· 1984 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ 1985 1986 if (is_T2(sge->adapter)) 1987 + timer_delete_sync(&sge->espibug_timer); 1988 1989 + timer_delete_sync(&sge->tx_reclaim_timer); 1990 if (sge->tx_sched) 1991 tx_sched_stop(sge); 1992
+2 -2
drivers/net/ethernet/chelsio/cxgb3/sge.c
··· 3223 struct sge_qset *q = &adap->sge.qs[i]; 3224 3225 if (q->tx_reclaim_timer.function) 3226 - del_timer_sync(&q->tx_reclaim_timer); 3227 if (q->rx_reclaim_timer.function) 3228 - del_timer_sync(&q->rx_reclaim_timer); 3229 } 3230 } 3231
··· 3223 struct sge_qset *q = &adap->sge.qs[i]; 3224 3225 if (q->tx_reclaim_timer.function) 3226 + timer_delete_sync(&q->tx_reclaim_timer); 3227 if (q->rx_reclaim_timer.function) 3228 + timer_delete_sync(&q->rx_reclaim_timer); 3229 } 3230 } 3231
+2 -2
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 4996 struct sge *s = &adap->sge; 4997 4998 if (s->rx_timer.function) 4999 - del_timer_sync(&s->rx_timer); 5000 if (s->tx_timer.function) 5001 - del_timer_sync(&s->tx_timer); 5002 5003 if (is_offload(adap)) { 5004 struct sge_uld_txq_info *txq_info;
··· 4996 struct sge *s = &adap->sge; 4997 4998 if (s->rx_timer.function) 4999 + timer_delete_sync(&s->rx_timer); 5000 if (s->tx_timer.function) 5001 + timer_delete_sync(&s->tx_timer); 5002 5003 if (is_offload(adap)) { 5004 struct sge_uld_txq_info *txq_info;
+2 -2
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
··· 2609 struct sge *s = &adapter->sge; 2610 2611 if (s->rx_timer.function) 2612 - del_timer_sync(&s->rx_timer); 2613 if (s->tx_timer.function) 2614 - del_timer_sync(&s->tx_timer); 2615 } 2616 2617 /**
··· 2609 struct sge *s = &adapter->sge; 2610 2611 if (s->rx_timer.function) 2612 + timer_delete_sync(&s->rx_timer); 2613 if (s->tx_timer.function) 2614 + timer_delete_sync(&s->tx_timer); 2615 } 2616 2617 /**
+1 -1
drivers/net/ethernet/cisco/enic/enic_clsf.h
··· 26 27 static inline void enic_rfs_timer_stop(struct enic *enic) 28 { 29 - del_timer_sync(&enic->rfs_h.rfs_may_expire); 30 } 31 #else 32 static inline void enic_rfs_timer_start(struct enic *enic) {}
··· 26 27 static inline void enic_rfs_timer_stop(struct enic *enic) 28 { 29 + timer_delete_sync(&enic->rfs_h.rfs_may_expire); 30 } 31 #else 32 static inline void enic_rfs_timer_start(struct enic *enic) {}
+1 -1
drivers/net/ethernet/cisco/enic/enic_main.c
··· 1787 1788 enic_synchronize_irqs(enic); 1789 1790 - del_timer_sync(&enic->notify_timer); 1791 enic_rfs_flw_tbl_free(enic); 1792 1793 enic_dev_disable(enic);
··· 1787 1788 enic_synchronize_irqs(enic); 1789 1790 + timer_delete_sync(&enic->notify_timer); 1791 enic_rfs_flw_tbl_free(enic); 1792 1793 enic_dev_disable(enic);
+2 -2
drivers/net/ethernet/dec/tulip/21142.c
··· 216 (csr12 & 2) == 2) || 217 (tp->nway && (csr5 & (TPLnkFail)))) { 218 /* Link blew? Maybe restart NWay. */ 219 - del_timer_sync(&tp->timer); 220 t21142_start_nway(dev); 221 tp->timer.expires = RUN_AT(3*HZ); 222 add_timer(&tp->timer); ··· 226 medianame[dev->if_port], 227 (csr12 & 2) ? "failed" : "good"); 228 if ((csr12 & 2) && ! tp->medialock) { 229 - del_timer_sync(&tp->timer); 230 t21142_start_nway(dev); 231 tp->timer.expires = RUN_AT(3*HZ); 232 add_timer(&tp->timer);
··· 216 (csr12 & 2) == 2) || 217 (tp->nway && (csr5 & (TPLnkFail)))) { 218 /* Link blew? Maybe restart NWay. */ 219 + timer_delete_sync(&tp->timer); 220 t21142_start_nway(dev); 221 tp->timer.expires = RUN_AT(3*HZ); 222 add_timer(&tp->timer); ··· 226 medianame[dev->if_port], 227 (csr12 & 2) ? "failed" : "good"); 228 if ((csr12 & 2) && ! tp->medialock) { 229 + timer_delete_sync(&tp->timer); 230 t21142_start_nway(dev); 231 tp->timer.expires = RUN_AT(3*HZ); 232 add_timer(&tp->timer);
+3 -3
drivers/net/ethernet/dec/tulip/de2104x.c
··· 1428 1429 netif_dbg(de, ifdown, dev, "disabling interface\n"); 1430 1431 - del_timer_sync(&de->media_timer); 1432 1433 spin_lock_irqsave(&de->lock, flags); 1434 de_stop_hw(de); ··· 1452 dr32(MacStatus), dr32(MacMode), dr32(SIAStatus), 1453 de->rx_tail, de->tx_head, de->tx_tail); 1454 1455 - del_timer_sync(&de->media_timer); 1456 1457 disable_irq(irq); 1458 spin_lock_irq(&de->lock); ··· 2126 if (netif_running (dev)) { 2127 const int irq = pdev->irq; 2128 2129 - del_timer_sync(&de->media_timer); 2130 2131 disable_irq(irq); 2132 spin_lock_irq(&de->lock);
··· 1428 1429 netif_dbg(de, ifdown, dev, "disabling interface\n"); 1430 1431 + timer_delete_sync(&de->media_timer); 1432 1433 spin_lock_irqsave(&de->lock, flags); 1434 de_stop_hw(de); ··· 1452 dr32(MacStatus), dr32(MacMode), dr32(SIAStatus), 1453 de->rx_tail, de->tx_head, de->tx_tail); 1454 1455 + timer_delete_sync(&de->media_timer); 1456 1457 disable_irq(irq); 1458 spin_lock_irq(&de->lock); ··· 2126 if (netif_running (dev)) { 2127 const int irq = pdev->irq; 2128 2129 + timer_delete_sync(&de->media_timer); 2130 2131 disable_irq(irq); 2132 spin_lock_irq(&de->lock);
+1 -1
drivers/net/ethernet/dec/tulip/dmfe.c
··· 745 netif_stop_queue(dev); 746 747 /* deleted timer */ 748 - del_timer_sync(&db->timer); 749 750 /* Reset & stop DM910X board */ 751 dw32(DCR0, DM910X_RESET);
··· 745 netif_stop_queue(dev); 746 747 /* deleted timer */ 748 + timer_delete_sync(&db->timer); 749 750 /* Reset & stop DM910X board */ 751 dw32(DCR0, DM910X_RESET);
+2 -2
drivers/net/ethernet/dec/tulip/interrupt.c
··· 699 tulip_start_rxtx(tp); 700 } 701 /* 702 - * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this 703 - * call is ever done under the spinlock 704 */ 705 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) { 706 if (tp->link_change)
··· 699 tulip_start_rxtx(tp); 700 } 701 /* 702 + * NB: t21142_lnk_change() does a timer_delete_sync(), so be careful 703 + * if this call is ever done under the spinlock 704 */ 705 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) { 706 if (tp->link_change)
+3 -3
drivers/net/ethernet/dec/tulip/pnic2.c
··· 323 if (tulip_debug > 2) 324 netdev_dbg(dev, "Ugh! Link blew?\n"); 325 326 - del_timer_sync(&tp->timer); 327 pnic2_start_nway(dev); 328 tp->timer.expires = RUN_AT(3*HZ); 329 add_timer(&tp->timer); ··· 348 349 /* if failed then try doing an nway to get in sync */ 350 if ((csr12 & 2) && ! tp->medialock) { 351 - del_timer_sync(&tp->timer); 352 pnic2_start_nway(dev); 353 tp->timer.expires = RUN_AT(3*HZ); 354 add_timer(&tp->timer); ··· 372 373 /* if failed, try doing an nway to get in sync */ 374 if ((csr12 & 4) && ! tp->medialock) { 375 - del_timer_sync(&tp->timer); 376 pnic2_start_nway(dev); 377 tp->timer.expires = RUN_AT(3*HZ); 378 add_timer(&tp->timer);
··· 323 if (tulip_debug > 2) 324 netdev_dbg(dev, "Ugh! Link blew?\n"); 325 326 + timer_delete_sync(&tp->timer); 327 pnic2_start_nway(dev); 328 tp->timer.expires = RUN_AT(3*HZ); 329 add_timer(&tp->timer); ··· 348 349 /* if failed then try doing an nway to get in sync */ 350 if ((csr12 & 2) && ! tp->medialock) { 351 + timer_delete_sync(&tp->timer); 352 pnic2_start_nway(dev); 353 tp->timer.expires = RUN_AT(3*HZ); 354 add_timer(&tp->timer); ··· 372 373 /* if failed, try doing an nway to get in sync */ 374 if ((csr12 & 4) && ! tp->medialock) { 375 + timer_delete_sync(&tp->timer); 376 pnic2_start_nway(dev); 377 tp->timer.expires = RUN_AT(3*HZ); 378 add_timer(&tp->timer);
+2 -2
drivers/net/ethernet/dec/tulip/tulip_core.c
··· 747 napi_disable(&tp->napi); 748 #endif 749 750 - del_timer_sync (&tp->timer); 751 #ifdef CONFIG_TULIP_NAPI 752 - del_timer_sync (&tp->oom_timer); 753 #endif 754 spin_lock_irqsave (&tp->lock, flags); 755
··· 747 napi_disable(&tp->napi); 748 #endif 749 750 + timer_delete_sync(&tp->timer); 751 #ifdef CONFIG_TULIP_NAPI 752 + timer_delete_sync(&tp->oom_timer); 753 #endif 754 spin_lock_irqsave (&tp->lock, flags); 755
+1 -1
drivers/net/ethernet/dec/tulip/uli526x.c
··· 656 netif_stop_queue(dev); 657 658 /* deleted timer */ 659 - del_timer_sync(&db->timer); 660 661 /* Reset & stop ULI526X board */ 662 uw32(DCR0, ULI526X_RESET);
··· 656 netif_stop_queue(dev); 657 658 /* deleted timer */ 659 + timer_delete_sync(&db->timer); 660 661 /* Reset & stop ULI526X board */ 662 uw32(DCR0, ULI526X_RESET);
+2 -2
drivers/net/ethernet/dec/tulip/winbond-840.c
··· 1509 } 1510 #endif /* __i386__ debugging only */ 1511 1512 - del_timer_sync(&np->timer); 1513 1514 free_rxtx_rings(np); 1515 free_ringdesc(np); ··· 1560 1561 rtnl_lock(); 1562 if (netif_running (dev)) { 1563 - del_timer_sync(&np->timer); 1564 1565 spin_lock_irq(&np->lock); 1566 netif_device_detach(dev);
··· 1509 } 1510 #endif /* __i386__ debugging only */ 1511 1512 + timer_delete_sync(&np->timer); 1513 1514 free_rxtx_rings(np); 1515 free_ringdesc(np); ··· 1560 1561 rtnl_lock(); 1562 if (netif_running (dev)) { 1563 + timer_delete_sync(&np->timer); 1564 1565 spin_lock_irq(&np->lock); 1566 netif_device_detach(dev);
+2 -2
drivers/net/ethernet/dlink/dl2k.c
··· 1778 rio_hw_stop(dev); 1779 1780 free_irq(pdev->irq, dev); 1781 - del_timer_sync (&np->timer); 1782 1783 free_list(dev); 1784 ··· 1818 return 0; 1819 1820 netif_device_detach(dev); 1821 - del_timer_sync(&np->timer); 1822 rio_hw_stop(dev); 1823 1824 return 0;
··· 1778 rio_hw_stop(dev); 1779 1780 free_irq(pdev->irq, dev); 1781 + timer_delete_sync(&np->timer); 1782 1783 free_list(dev); 1784 ··· 1818 return 0; 1819 1820 netif_device_detach(dev); 1821 + timer_delete_sync(&np->timer); 1822 rio_hw_stop(dev); 1823 1824 return 0;
+2 -2
drivers/net/ethernet/fealnx.c
··· 1900 /* Stop the chip's Tx and Rx processes. */ 1901 stop_nic_rxtx(ioaddr, 0); 1902 1903 - del_timer_sync(&np->timer); 1904 - del_timer_sync(&np->reset_timer); 1905 1906 free_irq(np->pci_dev->irq, dev); 1907
··· 1900 /* Stop the chip's Tx and Rx processes. */ 1901 stop_nic_rxtx(ioaddr, 0); 1902 1903 + timer_delete_sync(&np->timer); 1904 + timer_delete_sync(&np->reset_timer); 1905 1906 free_irq(np->pci_dev->irq, dev); 1907
+1 -1
drivers/net/ethernet/google/gve/gve_ethtool.c
··· 705 706 memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) * 707 sizeof(struct stats)); 708 - del_timer_sync(&priv->stats_report_timer); 709 } 710 return 0; 711 }
··· 705 706 memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) * 707 sizeof(struct stats)); 708 + timer_delete_sync(&priv->stats_report_timer); 709 } 710 return 0; 711 }
+2 -2
drivers/net/ethernet/google/gve/gve_main.c
··· 302 if (!priv->stats_report) 303 return; 304 305 - del_timer_sync(&priv->stats_report_timer); 306 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len, 307 priv->stats_report, priv->stats_report_bus); 308 priv->stats_report = NULL; ··· 1408 goto err; 1409 gve_clear_device_rings_ok(priv); 1410 } 1411 - del_timer_sync(&priv->stats_report_timer); 1412 1413 gve_unreg_xdp_info(priv); 1414
··· 302 if (!priv->stats_report) 303 return; 304 305 + timer_delete_sync(&priv->stats_report_timer); 306 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len, 307 priv->stats_report, priv->stats_report_bus); 308 priv->stats_report = NULL; ··· 1408 goto err; 1409 gve_clear_device_rings_ok(priv); 1410 } 1411 + timer_delete_sync(&priv->stats_report_timer); 1412 1413 gve_unreg_xdp_info(priv); 1414
+1 -1
drivers/net/ethernet/hisilicon/hns/hns_enet.c
··· 1402 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state)) 1403 return; 1404 1405 - (void)del_timer_sync(&priv->service_timer); 1406 netif_tx_stop_all_queues(ndev); 1407 netif_carrier_off(ndev); 1408 netif_tx_disable(ndev);
··· 1402 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state)) 1403 return; 1404 1405 + (void) timer_delete_sync(&priv->service_timer); 1406 netif_tx_stop_all_queues(ndev); 1407 netif_carrier_off(ndev); 1408 netif_tx_disable(ndev);
+1 -1
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 11492 set_bit(HCLGE_STATE_REMOVING, &hdev->state); 11493 11494 if (hdev->reset_timer.function) 11495 - del_timer_sync(&hdev->reset_timer); 11496 if (hdev->service_task.work.func) 11497 cancel_delayed_work_sync(&hdev->service_task); 11498 }
··· 11492 set_bit(HCLGE_STATE_REMOVING, &hdev->state); 11493 11494 if (hdev->reset_timer.function) 11495 + timer_delete_sync(&hdev->reset_timer); 11496 if (hdev->service_task.work.func) 11497 cancel_delayed_work_sync(&hdev->service_task); 11498 }
+2 -2
drivers/net/ethernet/intel/e100.c
··· 2293 return 0; 2294 2295 err_no_irq: 2296 - del_timer_sync(&nic->watchdog); 2297 err_clean_cbs: 2298 e100_clean_cbs(nic); 2299 err_rx_clean_list: ··· 2308 netif_stop_queue(nic->netdev); 2309 e100_hw_reset(nic); 2310 free_irq(nic->pdev->irq, nic->netdev); 2311 - del_timer_sync(&nic->watchdog); 2312 netif_carrier_off(nic->netdev); 2313 e100_clean_cbs(nic); 2314 e100_rx_clean_list(nic);
··· 2293 return 0; 2294 2295 err_no_irq: 2296 + timer_delete_sync(&nic->watchdog); 2297 err_clean_cbs: 2298 e100_clean_cbs(nic); 2299 err_rx_clean_list: ··· 2308 netif_stop_queue(nic->netdev); 2309 e100_hw_reset(nic); 2310 free_irq(nic->pdev->irq, nic->netdev); 2311 + timer_delete_sync(&nic->watchdog); 2312 netif_carrier_off(nic->netdev); 2313 e100_clean_cbs(nic); 2314 e100_rx_clean_list(nic);
+4 -4
drivers/net/ethernet/intel/e1000e/netdev.c
··· 4287 4288 napi_synchronize(&adapter->napi); 4289 4290 - del_timer_sync(&adapter->watchdog_timer); 4291 - del_timer_sync(&adapter->phy_info_timer); 4292 4293 spin_lock(&adapter->stats64_lock); 4294 e1000e_update_stats(adapter); ··· 7741 * from being rescheduled. 7742 */ 7743 set_bit(__E1000_DOWN, &adapter->state); 7744 - del_timer_sync(&adapter->watchdog_timer); 7745 - del_timer_sync(&adapter->phy_info_timer); 7746 7747 cancel_work_sync(&adapter->reset_task); 7748 cancel_work_sync(&adapter->watchdog_task);
··· 4287 4288 napi_synchronize(&adapter->napi); 4289 4290 + timer_delete_sync(&adapter->watchdog_timer); 4291 + timer_delete_sync(&adapter->phy_info_timer); 4292 4293 spin_lock(&adapter->stats64_lock); 4294 e1000e_update_stats(adapter); ··· 7741 * from being rescheduled. 7742 */ 7743 set_bit(__E1000_DOWN, &adapter->state); 7744 + timer_delete_sync(&adapter->watchdog_timer); 7745 + timer_delete_sync(&adapter->phy_info_timer); 7746 7747 cancel_work_sync(&adapter->reset_task); 7748 cancel_work_sync(&adapter->watchdog_task);
+1 -1
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
··· 2245 struct fm10k_intfc *interface = pci_get_drvdata(pdev); 2246 struct net_device *netdev = interface->netdev; 2247 2248 - del_timer_sync(&interface->service_timer); 2249 2250 fm10k_stop_service_event(interface); 2251 fm10k_stop_macvlan_task(interface);
··· 2245 struct fm10k_intfc *interface = pci_get_drvdata(pdev); 2246 struct net_device *netdev = interface->netdev; 2247 2248 + timer_delete_sync(&interface->service_timer); 2249 2250 fm10k_stop_service_event(interface); 2251 fm10k_stop_macvlan_task(interface);
+2 -2
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 16382 set_bit(__I40E_DOWN, pf->state); 16383 16384 /* Ensure service task will not be running */ 16385 - del_timer_sync(&pf->service_timer); 16386 cancel_work_sync(&pf->service_task); 16387 16388 /* Client close must be called explicitly here because the timer ··· 16581 set_bit(__I40E_SUSPENDED, pf->state); 16582 set_bit(__I40E_DOWN, pf->state); 16583 16584 - del_timer_sync(&pf->service_timer); 16585 cancel_work_sync(&pf->service_task); 16586 i40e_cloud_filter_exit(pf); 16587 i40e_fdir_teardown(pf);
··· 16382 set_bit(__I40E_DOWN, pf->state); 16383 16384 /* Ensure service task will not be running */ 16385 + timer_delete_sync(&pf->service_timer); 16386 cancel_work_sync(&pf->service_task); 16387 16388 /* Client close must be called explicitly here because the timer ··· 16581 set_bit(__I40E_SUSPENDED, pf->state); 16582 set_bit(__I40E_DOWN, pf->state); 16583 16584 + timer_delete_sync(&pf->service_timer); 16585 cancel_work_sync(&pf->service_task); 16586 i40e_cloud_filter_exit(pf); 16587 i40e_fdir_teardown(pf);
+1 -1
drivers/net/ethernet/intel/ice/ice_main.c
··· 1717 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); 1718 1719 if (pf->serv_tmr.function) 1720 - del_timer_sync(&pf->serv_tmr); 1721 if (pf->serv_task.func) 1722 cancel_work_sync(&pf->serv_task); 1723
··· 1717 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); 1718 1719 if (pf->serv_tmr.function) 1720 + timer_delete_sync(&pf->serv_tmr); 1721 if (pf->serv_task.func) 1722 cancel_work_sync(&pf->serv_task); 1723
+2 -2
drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
··· 1521 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc)); 1522 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1523 1524 - ret = del_timer(&ctx_irq->rx_tmr); 1525 if (!ret) 1526 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); 1527 ··· 1916 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq; 1917 unsigned long flags; 1918 1919 - del_timer(&ctx->rx_tmr); 1920 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1921 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1922 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
··· 1521 memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc)); 1522 spin_unlock_irqrestore(&fdir->ctx_lock, flags); 1523 1524 + ret = timer_delete(&ctx_irq->rx_tmr); 1525 if (!ret) 1526 dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); 1527 ··· 1916 struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq; 1917 unsigned long flags; 1918 1919 + timer_delete(&ctx->rx_tmr); 1920 spin_lock_irqsave(&vf->fdir.ctx_lock, flags); 1921 ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; 1922 spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
+4 -4
drivers/net/ethernet/intel/igb/igb_main.c
··· 2185 } 2186 } 2187 2188 - del_timer_sync(&adapter->watchdog_timer); 2189 - del_timer_sync(&adapter->phy_info_timer); 2190 2191 /* record the stats before reset*/ 2192 spin_lock(&adapter->stats64_lock); ··· 3860 * disable watchdog from being rescheduled. 3861 */ 3862 set_bit(__IGB_DOWN, &adapter->state); 3863 - del_timer_sync(&adapter->watchdog_timer); 3864 - del_timer_sync(&adapter->phy_info_timer); 3865 3866 cancel_work_sync(&adapter->reset_task); 3867 cancel_work_sync(&adapter->watchdog_task);
··· 2185 } 2186 } 2187 2188 + timer_delete_sync(&adapter->watchdog_timer); 2189 + timer_delete_sync(&adapter->phy_info_timer); 2190 2191 /* record the stats before reset*/ 2192 spin_lock(&adapter->stats64_lock); ··· 3860 * disable watchdog from being rescheduled. 3861 */ 3862 set_bit(__IGB_DOWN, &adapter->state); 3863 + timer_delete_sync(&adapter->watchdog_timer); 3864 + timer_delete_sync(&adapter->phy_info_timer); 3865 3866 cancel_work_sync(&adapter->reset_task); 3867 cancel_work_sync(&adapter->watchdog_task);
+2 -2
drivers/net/ethernet/intel/igbvf/netdev.c
··· 1592 1593 igbvf_irq_disable(adapter); 1594 1595 - del_timer_sync(&adapter->watchdog_timer); 1596 1597 /* record the stats before reset*/ 1598 igbvf_update_stats(adapter); ··· 2912 * disable it from being rescheduled. 2913 */ 2914 set_bit(__IGBVF_DOWN, &adapter->state); 2915 - del_timer_sync(&adapter->watchdog_timer); 2916 2917 cancel_work_sync(&adapter->reset_task); 2918 cancel_work_sync(&adapter->watchdog_task);
··· 1592 1593 igbvf_irq_disable(adapter); 1594 1595 + timer_delete_sync(&adapter->watchdog_timer); 1596 1597 /* record the stats before reset*/ 1598 igbvf_update_stats(adapter); ··· 2912 * disable it from being rescheduled. 2913 */ 2914 set_bit(__IGBVF_DOWN, &adapter->state); 2915 + timer_delete_sync(&adapter->watchdog_timer); 2916 2917 cancel_work_sync(&adapter->reset_task); 2918 cancel_work_sync(&adapter->watchdog_task);
+4 -4
drivers/net/ethernet/intel/igc/igc_main.c
··· 5291 } 5292 } 5293 5294 - del_timer_sync(&adapter->watchdog_timer); 5295 - del_timer_sync(&adapter->phy_info_timer); 5296 5297 /* record the stats before reset*/ 5298 spin_lock(&adapter->stats64_lock); ··· 7272 7273 set_bit(__IGC_DOWN, &adapter->state); 7274 7275 - del_timer_sync(&adapter->watchdog_timer); 7276 - del_timer_sync(&adapter->phy_info_timer); 7277 7278 cancel_work_sync(&adapter->reset_task); 7279 cancel_work_sync(&adapter->watchdog_task);
··· 5291 } 5292 } 5293 5294 + timer_delete_sync(&adapter->watchdog_timer); 5295 + timer_delete_sync(&adapter->phy_info_timer); 5296 5297 /* record the stats before reset*/ 5298 spin_lock(&adapter->stats64_lock); ··· 7272 7273 set_bit(__IGC_DOWN, &adapter->state); 7274 7275 + timer_delete_sync(&adapter->watchdog_timer); 7276 + timer_delete_sync(&adapter->phy_info_timer); 7277 7278 cancel_work_sync(&adapter->reset_task); 7279 cancel_work_sync(&adapter->watchdog_task);
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6538 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; 6539 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 6540 6541 - del_timer_sync(&adapter->service_timer); 6542 6543 if (adapter->num_vfs) { 6544 /* Clear EITR Select mapping */
··· 6538 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; 6539 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 6540 6541 + timer_delete_sync(&adapter->service_timer); 6542 6543 if (adapter->num_vfs) { 6544 /* Clear EITR Select mapping */
+1 -1
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 2514 2515 ixgbevf_napi_disable_all(adapter); 2516 2517 - del_timer_sync(&adapter->service_timer); 2518 2519 /* disable transmits in the hardware now that interrupts are off */ 2520 for (i = 0; i < adapter->num_tx_queues; i++) {
··· 2514 2515 ixgbevf_napi_disable_all(adapter); 2516 2517 + timer_delete_sync(&adapter->service_timer); 2518 2519 /* disable transmits in the hardware now that interrupts are off */ 2520 for (i = 0; i < adapter->num_tx_queues; i++) {
+1 -1
drivers/net/ethernet/korina.c
··· 1239 struct korina_private *lp = netdev_priv(dev); 1240 u32 tmp; 1241 1242 - del_timer(&lp->media_check_timer); 1243 1244 /* Disable interrupts */ 1245 disable_irq(lp->rx_irq);
··· 1239 struct korina_private *lp = netdev_priv(dev); 1240 u32 tmp; 1241 1242 + timer_delete(&lp->media_check_timer); 1243 1244 /* Disable interrupts */ 1245 disable_irq(lp->rx_irq);
+3 -3
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 2247 2248 if (unlikely(mp->oom)) { 2249 mp->oom = 0; 2250 - del_timer(&mp->rx_oom); 2251 } 2252 2253 work_done = 0; ··· 2521 2522 napi_disable(&mp->napi); 2523 2524 - del_timer_sync(&mp->rx_oom); 2525 2526 netif_carrier_off(dev); 2527 if (dev->phydev) ··· 2531 port_reset(mp); 2532 mv643xx_eth_get_stats(dev); 2533 mib_counters_update(mp); 2534 - del_timer_sync(&mp->mib_counters_timer); 2535 2536 for (i = 0; i < mp->rxq_count; i++) 2537 rxq_deinit(mp->rxq + i);
··· 2247 2248 if (unlikely(mp->oom)) { 2249 mp->oom = 0; 2250 + timer_delete(&mp->rx_oom); 2251 } 2252 2253 work_done = 0; ··· 2521 2522 napi_disable(&mp->napi); 2523 2524 + timer_delete_sync(&mp->rx_oom); 2525 2526 netif_carrier_off(dev); 2527 if (dev->phydev) ··· 2531 port_reset(mp); 2532 mv643xx_eth_get_stats(dev); 2533 mib_counters_update(mp); 2534 + timer_delete_sync(&mp->mib_counters_timer); 2535 2536 for (i = 0; i < mp->rxq_count; i++) 2537 rxq_deinit(mp->rxq + i);
+1 -1
drivers/net/ethernet/marvell/pxa168_eth.c
··· 1175 /* Write to ICR to clear interrupts. */ 1176 wrl(pep, INT_W_CLEAR, 0); 1177 napi_disable(&pep->napi); 1178 - del_timer_sync(&pep->timeout); 1179 netif_carrier_off(dev); 1180 free_irq(dev->irq, dev); 1181 rxq_deinit(dev);
··· 1175 /* Write to ICR to clear interrupts. */ 1176 wrl(pep, INT_W_CLEAR, 0); 1177 napi_disable(&pep->napi); 1178 + timer_delete_sync(&pep->timeout); 1179 netif_carrier_off(dev); 1180 free_irq(dev->irq, dev); 1181 rxq_deinit(dev);
+1 -1
drivers/net/ethernet/marvell/skge.c
··· 2662 netif_tx_disable(dev); 2663 2664 if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) 2665 - del_timer_sync(&skge->link_timer); 2666 2667 napi_disable(&skge->napi); 2668 netif_carrier_off(dev);
··· 2662 netif_tx_disable(dev); 2663 2664 if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) 2665 + timer_delete_sync(&skge->link_timer); 2666 2667 napi_disable(&skge->napi); 2668 netif_carrier_off(dev);
+1 -1
drivers/net/ethernet/marvell/sky2.c
··· 5052 if (!hw) 5053 return 0; 5054 5055 - del_timer_sync(&hw->watchdog_timer); 5056 cancel_work_sync(&hw->restart_work); 5057 5058 rtnl_lock();
··· 5052 if (!hw) 5053 return 0; 5054 5055 + timer_delete_sync(&hw->watchdog_timer); 5056 cancel_work_sync(&hw->restart_work); 5057 5058 rtnl_lock();
+1 -1
drivers/net/ethernet/mellanox/mlx4/catas.c
··· 305 { 306 struct mlx4_priv *priv = mlx4_priv(dev); 307 308 - del_timer_sync(&priv->catas_err.timer); 309 310 if (priv->catas_err.map) { 311 iounmap(priv->catas_err.map);
··· 305 { 306 struct mlx4_priv *priv = mlx4_priv(dev); 307 308 + timer_delete_sync(&priv->catas_err.timer); 309 310 if (priv->catas_err.map) { 311 iounmap(priv->catas_err.map);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
··· 246 { 247 struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; 248 249 - del_timer_sync(&fw_reset->timer); 250 } 251 252 static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
··· 246 { 247 struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; 248 249 + timer_delete_sync(&fw_reset->timer); 250 } 251 252 static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/health.c
··· 847 if (disable_health) 848 set_bit(MLX5_DROP_HEALTH_WORK, &health->flags); 849 850 - del_timer_sync(&health->timer); 851 } 852 853 void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev)
··· 847 if (disable_health) 848 set_bit(MLX5_DROP_HEALTH_WORK, &health->flags); 849 850 + timer_delete_sync(&health->timer); 851 } 852 853 void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev)
+1 -1
drivers/net/ethernet/micrel/ksz884x.c
··· 3951 { 3952 if (info->max) { 3953 info->max = 0; 3954 - del_timer_sync(&info->timer); 3955 } 3956 } 3957
··· 3951 { 3952 if (info->max) { 3953 info->max = 0; 3954 + timer_delete_sync(&info->timer); 3955 } 3956 } 3957
+1 -1
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
··· 2482 if (mgp->ss[0].tx.req_bytes == NULL) 2483 return 0; 2484 2485 - del_timer_sync(&mgp->watchdog_timer); 2486 mgp->running = MYRI10GE_ETH_STOPPING; 2487 for (i = 0; i < mgp->num_slices; i++) 2488 napi_disable(&mgp->ss[i].napi);
··· 2482 if (mgp->ss[0].tx.req_bytes == NULL) 2483 return 0; 2484 2485 + timer_delete_sync(&mgp->watchdog_timer); 2486 mgp->running = MYRI10GE_ETH_STOPPING; 2487 for (i = 0; i < mgp->num_slices; i++) 2488 napi_disable(&mgp->ss[i].napi);
+2 -2
drivers/net/ethernet/natsemi/natsemi.c
··· 3179 * the final WOL settings? 3180 */ 3181 3182 - del_timer_sync(&np->timer); 3183 disable_irq(irq); 3184 spin_lock_irq(&np->lock); 3185 natsemi_irq_disable(dev); ··· 3278 if (netif_running (dev)) { 3279 const int irq = np->pci_dev->irq; 3280 3281 - del_timer_sync(&np->timer); 3282 3283 disable_irq(irq); 3284 spin_lock_irq(&np->lock);
··· 3179 * the final WOL settings? 3180 */ 3181 3182 + timer_delete_sync(&np->timer); 3183 disable_irq(irq); 3184 spin_lock_irq(&np->lock); 3185 natsemi_irq_disable(dev); ··· 3278 if (netif_running (dev)) { 3279 const int irq = np->pci_dev->irq; 3280 3281 + timer_delete_sync(&np->timer); 3282 3283 disable_irq(irq); 3284 spin_lock_irq(&np->lock);
+1 -1
drivers/net/ethernet/natsemi/ns83820.c
··· 1527 struct ns83820 *dev = PRIV(ndev); 1528 1529 /* FIXME: protect against interrupt handler? */ 1530 - del_timer_sync(&dev->tx_watchdog); 1531 1532 ns83820_disable_interrupts(dev); 1533
··· 1527 struct ns83820 *dev = PRIV(ndev); 1528 1529 /* FIXME: protect against interrupt handler? */ 1530 + timer_delete_sync(&dev->tx_watchdog); 1531 1532 ns83820_disable_interrupts(dev); 1533
+1 -1
drivers/net/ethernet/neterion/s2io.c
··· 7019 if (!is_s2io_card_up(sp)) 7020 return; 7021 7022 - del_timer_sync(&sp->alarm_timer); 7023 /* If s2io_set_link task is executing, wait till it completes. */ 7024 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) 7025 msleep(50);
··· 7019 if (!is_s2io_card_up(sp)) 7020 return; 7021 7022 + timer_delete_sync(&sp->alarm_timer); 7023 /* If s2io_set_link task is executing, wait till it completes. */ 7024 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) 7025 msleep(50);
+1 -1
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 227 spin_unlock_bh(&nn->reconfig_lock); 228 229 if (cancelled_timer) { 230 - del_timer_sync(&nn->reconfig_timer); 231 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); 232 } 233
··· 227 spin_unlock_bh(&nn->reconfig_lock); 228 229 if (cancelled_timer) { 230 + timer_delete_sync(&nn->reconfig_timer); 231 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); 232 } 233
+3 -3
drivers/net/ethernet/nvidia/forcedeth.c
··· 5623 napi_disable(&np->napi); 5624 synchronize_irq(np->pci_dev->irq); 5625 5626 - del_timer_sync(&np->oom_kick); 5627 - del_timer_sync(&np->nic_poll); 5628 - del_timer_sync(&np->stats_poll); 5629 5630 netif_stop_queue(dev); 5631 spin_lock_irq(&np->lock);
··· 5623 napi_disable(&np->napi); 5624 synchronize_irq(np->pci_dev->irq); 5625 5626 + timer_delete_sync(&np->oom_kick); 5627 + timer_delete_sync(&np->nic_poll); 5628 + timer_delete_sync(&np->stats_poll); 5629 5630 netif_stop_queue(dev); 5631 spin_lock_irq(&np->lock);
+1 -1
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
··· 1916 pch_gbe_irq_disable(adapter); 1917 pch_gbe_free_irq(adapter); 1918 1919 - del_timer_sync(&adapter->watchdog_timer); 1920 1921 netdev->tx_queue_len = adapter->tx_queue_len; 1922 netif_carrier_off(netdev);
··· 1916 pch_gbe_irq_disable(adapter); 1917 pch_gbe_free_irq(adapter); 1918 1919 + timer_delete_sync(&adapter->watchdog_timer); 1920 1921 netdev->tx_queue_len = adapter->tx_queue_len; 1922 netif_carrier_off(netdev);
+1 -1
drivers/net/ethernet/packetengines/hamachi.c
··· 1712 1713 free_irq(hmp->pci_dev->irq, dev); 1714 1715 - del_timer_sync(&hmp->timer); 1716 1717 /* Free all the skbuffs in the Rx queue. */ 1718 for (i = 0; i < RX_RING_SIZE; i++) {
··· 1712 1713 free_irq(hmp->pci_dev->irq, dev); 1714 1715 + timer_delete_sync(&hmp->timer); 1716 1717 /* Free all the skbuffs in the Rx queue. */ 1718 for (i = 0; i < RX_RING_SIZE; i++) {
+1 -1
drivers/net/ethernet/packetengines/yellowfin.c
··· 1222 iowrite32(0x80000000, ioaddr + RxCtrl); 1223 iowrite32(0x80000000, ioaddr + TxCtrl); 1224 1225 - del_timer(&yp->timer); 1226 1227 #if defined(__i386__) 1228 if (yellowfin_debug > 2) {
··· 1222 iowrite32(0x80000000, ioaddr + RxCtrl); 1223 iowrite32(0x80000000, ioaddr + TxCtrl); 1224 1225 + timer_delete(&yp->timer); 1226 1227 #if defined(__i386__) 1228 if (yellowfin_debug > 2) {
+1 -1
drivers/net/ethernet/pasemi/pasemi_mac.c
··· 1288 phy_disconnect(dev->phydev); 1289 } 1290 1291 - del_timer_sync(&mac->tx->clean_timer); 1292 1293 netif_stop_queue(dev); 1294 napi_disable(&mac->napi);
··· 1288 phy_disconnect(dev->phydev); 1289 } 1290 1291 + timer_delete_sync(&mac->tx->clean_timer); 1292 1293 netif_stop_queue(dev); 1294 napi_disable(&mac->napi);
+1 -1
drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
··· 441 442 set_bit(IONIC_LIF_F_FW_RESET, lif->state); 443 444 - del_timer_sync(&ionic->watchdog_timer); 445 cancel_work_sync(&lif->deferred.work); 446 447 mutex_lock(&lif->queue_lock);
··· 441 442 set_bit(IONIC_LIF_F_FW_RESET, lif->state); 443 444 + timer_delete_sync(&ionic->watchdog_timer); 445 cancel_work_sync(&lif->deferred.work); 446 447 mutex_lock(&lif->queue_lock);
+1 -1
drivers/net/ethernet/qlogic/qla3xxx.c
··· 3420 pci_disable_msi(qdev->pdev); 3421 } 3422 3423 - del_timer_sync(&qdev->adapter_timer); 3424 3425 napi_disable(&qdev->napi); 3426
··· 3420 pci_disable_msi(qdev->pdev); 3421 } 3422 3423 + timer_delete_sync(&qdev->adapter_timer); 3424 3425 napi_disable(&qdev->napi); 3426
+1 -1
drivers/net/ethernet/realtek/atp.c
··· 832 833 netif_stop_queue(dev); 834 835 - del_timer_sync(&lp->timer); 836 837 /* Flush the Tx and disable Rx here. */ 838 lp->addr_mode = CMR2h_OFF;
··· 832 833 netif_stop_queue(dev); 834 835 + timer_delete_sync(&lp->timer); 836 837 /* Flush the Tx and disable Rx here. */ 838 lp->addr_mode = CMR2h_OFF;
+1 -1
drivers/net/ethernet/rocker/rocker_ofdpa.c
··· 2386 struct hlist_node *tmp; 2387 int bkt; 2388 2389 - del_timer_sync(&ofdpa->fdb_cleanup_timer); 2390 flush_workqueue(rocker->rocker_owq); 2391 2392 spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
··· 2386 struct hlist_node *tmp; 2387 int bkt; 2388 2389 + timer_delete_sync(&ofdpa->fdb_cleanup_timer); 2390 flush_workqueue(rocker->rocker_owq); 2391 2392 spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
+3 -3
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
··· 91 { 92 /* Exit and disable EEE in case of we are in LPI state. */ 93 priv->hw->mac->reset_eee_mode(priv->ioaddr); 94 - del_timer_sync(&priv->eee_ctrl_timer); 95 priv->tx_path_in_lpi_mode = false; 96 } 97 ··· 1044 1045 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { 1046 struct sxgbe_tx_queue *p = priv->txq[queue_num]; 1047 - del_timer_sync(&p->txtimer); 1048 } 1049 } 1050 ··· 1208 struct sxgbe_priv_data *priv = netdev_priv(dev); 1209 1210 if (priv->eee_enabled) 1211 - del_timer_sync(&priv->eee_ctrl_timer); 1212 1213 /* Stop and disconnect the PHY */ 1214 if (dev->phydev) {
··· 91 { 92 /* Exit and disable EEE in case of we are in LPI state. */ 93 priv->hw->mac->reset_eee_mode(priv->ioaddr); 94 + timer_delete_sync(&priv->eee_ctrl_timer); 95 priv->tx_path_in_lpi_mode = false; 96 } 97 ··· 1044 1045 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { 1046 struct sxgbe_tx_queue *p = priv->txq[queue_num]; 1047 + timer_delete_sync(&p->txtimer); 1048 } 1049 } 1050 ··· 1208 struct sxgbe_priv_data *priv = netdev_priv(dev); 1209 1210 if (priv->eee_enabled) 1211 + timer_delete_sync(&priv->eee_ctrl_timer); 1212 1213 /* Stop and disconnect the PHY */ 1214 if (dev->phydev) {
+3 -3
drivers/net/ethernet/seeq/ether3.c
··· 181 */ 182 static inline void ether3_ledon(struct net_device *dev) 183 { 184 - del_timer(&priv(dev)->timer); 185 priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */ 186 add_timer(&priv(dev)->timer); 187 if (priv(dev)->regs.config2 & CFG2_CTRLO) ··· 454 { 455 unsigned long flags; 456 457 - del_timer(&priv(dev)->timer); 458 459 local_irq_save(flags); 460 printk(KERN_ERR "%s: transmit timed out, network cable problem?\n", dev->name); ··· 851 ecard_set_drvdata(ec, NULL); 852 853 unregister_netdev(dev); 854 - del_timer_sync(&priv(dev)->timer); 855 free_netdev(dev); 856 ecard_release_resources(ec); 857 }
··· 181 */ 182 static inline void ether3_ledon(struct net_device *dev) 183 { 184 + timer_delete(&priv(dev)->timer); 185 priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */ 186 add_timer(&priv(dev)->timer); 187 if (priv(dev)->regs.config2 & CFG2_CTRLO) ··· 454 { 455 unsigned long flags; 456 457 + timer_delete(&priv(dev)->timer); 458 459 local_irq_save(flags); 460 printk(KERN_ERR "%s: transmit timed out, network cable problem?\n", dev->name); ··· 851 ecard_set_drvdata(ec, NULL); 852 853 unregister_netdev(dev); 854 + timer_delete_sync(&priv(dev)->timer); 855 free_netdev(dev); 856 ecard_release_resources(ec); 857 }
+1 -1
drivers/net/ethernet/sfc/falcon/falcon.c
··· 2657 ++nic_data->stats_disable_count; 2658 spin_unlock_bh(&efx->stats_lock); 2659 2660 - del_timer_sync(&nic_data->stats_timer); 2661 2662 /* Wait enough time for the most recent transfer to 2663 * complete. */
··· 2657 ++nic_data->stats_disable_count; 2658 spin_unlock_bh(&efx->stats_lock); 2659 2660 + timer_delete_sync(&nic_data->stats_timer); 2661 2662 /* Wait enough time for the most recent transfer to 2663 * complete. */
+1 -1
drivers/net/ethernet/sfc/falcon/rx.c
··· 791 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 792 "shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue)); 793 794 - del_timer_sync(&rx_queue->slow_fill); 795 796 /* Release RX buffers from the current read ptr to the write ptr */ 797 if (rx_queue->buffer) {
··· 791 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 792 "shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue)); 793 794 + timer_delete_sync(&rx_queue->slow_fill); 795 796 /* Release RX buffers from the current read ptr to the write ptr */ 797 if (rx_queue->buffer) {
+2 -2
drivers/net/ethernet/sfc/mcdi.c
··· 530 * of it aborting the next request. 531 */ 532 if (!timeout) 533 - del_timer_sync(&mcdi->async_timer); 534 535 spin_lock(&mcdi->async_lock); 536 async = list_first_entry(&mcdi->async_list, ··· 1122 /* We must be in poll or fail mode so no more requests can be queued */ 1123 BUG_ON(mcdi->mode == MCDI_MODE_EVENTS); 1124 1125 - del_timer_sync(&mcdi->async_timer); 1126 1127 /* If a request is still running, make sure we give the MC 1128 * time to complete it so that the response won't overwrite our
··· 530 * of it aborting the next request. 531 */ 532 if (!timeout) 533 + timer_delete_sync(&mcdi->async_timer); 534 535 spin_lock(&mcdi->async_lock); 536 async = list_first_entry(&mcdi->async_list, ··· 1122 /* We must be in poll or fail mode so no more requests can be queued */ 1123 BUG_ON(mcdi->mode == MCDI_MODE_EVENTS); 1124 1125 + timer_delete_sync(&mcdi->async_timer); 1126 1127 /* If a request is still running, make sure we give the MC 1128 * time to complete it so that the response won't overwrite our
+1 -1
drivers/net/ethernet/sfc/rx_common.c
··· 285 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 286 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); 287 288 - del_timer_sync(&rx_queue->slow_fill); 289 if (rx_queue->grant_credits) 290 flush_work(&rx_queue->grant_work); 291
··· 285 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 286 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); 287 288 + timer_delete_sync(&rx_queue->slow_fill); 289 if (rx_queue->grant_credits) 290 flush_work(&rx_queue->grant_work); 291
+2 -2
drivers/net/ethernet/sfc/siena/mcdi.c
··· 534 * of it aborting the next request. 535 */ 536 if (!timeout) 537 - del_timer_sync(&mcdi->async_timer); 538 539 spin_lock(&mcdi->async_lock); 540 async = list_first_entry(&mcdi->async_list, ··· 1145 /* We must be in poll or fail mode so no more requests can be queued */ 1146 BUG_ON(mcdi->mode == MCDI_MODE_EVENTS); 1147 1148 - del_timer_sync(&mcdi->async_timer); 1149 1150 /* If a request is still running, make sure we give the MC 1151 * time to complete it so that the response won't overwrite our
··· 534 * of it aborting the next request. 535 */ 536 if (!timeout) 537 + timer_delete_sync(&mcdi->async_timer); 538 539 spin_lock(&mcdi->async_lock); 540 async = list_first_entry(&mcdi->async_list, ··· 1145 /* We must be in poll or fail mode so no more requests can be queued */ 1146 BUG_ON(mcdi->mode == MCDI_MODE_EVENTS); 1147 1148 + timer_delete_sync(&mcdi->async_timer); 1149 1150 /* If a request is still running, make sure we give the MC 1151 * time to complete it so that the response won't overwrite our
+1 -1
drivers/net/ethernet/sfc/siena/rx_common.c
··· 284 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 285 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); 286 287 - del_timer_sync(&rx_queue->slow_fill); 288 289 /* Release RX buffers from the current read ptr to the write ptr */ 290 if (rx_queue->buffer) {
··· 284 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 285 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); 286 287 + timer_delete_sync(&rx_queue->slow_fill); 288 289 /* Release RX buffers from the current read ptr to the write ptr */ 290 if (rx_queue->buffer) {
+4 -4
drivers/net/ethernet/sgi/ioc3-eth.c
··· 718 struct ioc3_private *ip = netdev_priv(dev); 719 struct ioc3_ethregs *regs = ip->regs; 720 721 - del_timer_sync(&ip->ioc3_timer); /* Kill if running */ 722 723 writel(EMCR_RST, &regs->emcr); /* Reset */ 724 readl(&regs->emcr); /* Flush WB */ ··· 801 { 802 struct ioc3_private *ip = netdev_priv(dev); 803 804 - del_timer_sync(&ip->ioc3_timer); 805 806 netif_stop_queue(dev); 807 ··· 950 return 0; 951 952 out_stop: 953 - del_timer_sync(&ip->ioc3_timer); 954 if (ip->rxr) 955 dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, 956 ip->rxr_dma); ··· 971 dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma); 972 973 unregister_netdev(dev); 974 - del_timer_sync(&ip->ioc3_timer); 975 free_netdev(dev); 976 } 977
··· 718 struct ioc3_private *ip = netdev_priv(dev); 719 struct ioc3_ethregs *regs = ip->regs; 720 721 + timer_delete_sync(&ip->ioc3_timer); /* Kill if running */ 722 723 writel(EMCR_RST, &regs->emcr); /* Reset */ 724 readl(&regs->emcr); /* Flush WB */ ··· 801 { 802 struct ioc3_private *ip = netdev_priv(dev); 803 804 + timer_delete_sync(&ip->ioc3_timer); 805 806 netif_stop_queue(dev); 807 ··· 950 return 0; 951 952 out_stop: 953 + timer_delete_sync(&ip->ioc3_timer); 954 if (ip->rxr) 955 dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, 956 ip->rxr_dma); ··· 971 dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma); 972 973 unregister_netdev(dev); 974 + timer_delete_sync(&ip->ioc3_timer); 975 free_netdev(dev); 976 } 977
+2 -2
drivers/net/ethernet/sis/sis190.c
··· 758 759 if (status & LinkChange) { 760 netif_info(tp, intr, dev, "link change\n"); 761 - del_timer(&tp->timer); 762 schedule_work(&tp->phy_task); 763 } 764 ··· 1034 { 1035 struct sis190_private *tp = netdev_priv(dev); 1036 1037 - del_timer_sync(&tp->timer); 1038 } 1039 1040 static inline void sis190_request_timer(struct net_device *dev)
··· 758 759 if (status & LinkChange) { 760 netif_info(tp, intr, dev, "link change\n"); 761 + timer_delete(&tp->timer); 762 schedule_work(&tp->phy_task); 763 } 764 ··· 1034 { 1035 struct sis190_private *tp = netdev_priv(dev); 1036 1037 + timer_delete_sync(&tp->timer); 1038 } 1039 1040 static inline void sis190_request_timer(struct net_device *dev)
+1 -1
drivers/net/ethernet/sis/sis900.c
··· 1983 /* Stop the chip's Tx and Rx Status Machine */ 1984 sw32(cr, RxDIS | TxDIS | sr32(cr)); 1985 1986 - del_timer(&sis_priv->timer); 1987 1988 free_irq(pdev->irq, net_dev); 1989
··· 1983 /* Stop the chip's Tx and Rx Status Machine */ 1984 sw32(cr, RxDIS | TxDIS | sr32(cr)); 1985 1986 + timer_delete(&sis_priv->timer); 1987 1988 free_irq(pdev->irq, net_dev); 1989
+1 -1
drivers/net/ethernet/smsc/epic100.c
··· 1292 netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n", 1293 er32(INTSTAT)); 1294 1295 - del_timer_sync(&ep->timer); 1296 1297 epic_disable_int(dev, ep); 1298
··· 1292 netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n", 1293 er32(INTSTAT)); 1294 1295 + timer_delete_sync(&ep->timer); 1296 1297 epic_disable_int(dev, ep); 1298
+1 -1
drivers/net/ethernet/smsc/smc91c92_cs.c
··· 1105 outw(CTL_POWERDOWN, ioaddr + CONTROL ); 1106 1107 link->open--; 1108 - del_timer_sync(&smc->media); 1109 1110 return 0; 1111 } /* smc_close */
··· 1105 outw(CTL_POWERDOWN, ioaddr + CONTROL ); 1106 1107 link->open--; 1108 + timer_delete_sync(&smc->media); 1109 1110 return 0; 1111 } /* smc_close */
+3 -3
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 467 */ 468 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv) 469 { 470 - del_timer_sync(&priv->eee_ctrl_timer); 471 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0); 472 priv->tx_path_in_lpi_mode = false; 473 } ··· 1082 1083 netdev_dbg(priv->dev, "disable EEE\n"); 1084 priv->eee_sw_timer_en = false; 1085 - del_timer_sync(&priv->eee_ctrl_timer); 1086 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0); 1087 priv->tx_path_in_lpi_mode = false; 1088 ··· 7842 7843 if (priv->eee_sw_timer_en) { 7844 priv->tx_path_in_lpi_mode = false; 7845 - del_timer_sync(&priv->eee_ctrl_timer); 7846 } 7847 7848 /* Stop TX/RX DMA */
··· 467 */ 468 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv) 469 { 470 + timer_delete_sync(&priv->eee_ctrl_timer); 471 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0); 472 priv->tx_path_in_lpi_mode = false; 473 } ··· 1082 1083 netdev_dbg(priv->dev, "disable EEE\n"); 1084 priv->eee_sw_timer_en = false; 1085 + timer_delete_sync(&priv->eee_ctrl_timer); 1086 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0); 1087 priv->tx_path_in_lpi_mode = false; 1088 ··· 7842 7843 if (priv->eee_sw_timer_en) { 7844 priv->tx_path_in_lpi_mode = false; 7845 + timer_delete_sync(&priv->eee_ctrl_timer); 7846 } 7847 7848 /* Stop TX/RX DMA */
+1 -1
drivers/net/ethernet/sun/cassini.c
··· 3779 /* Make us not-running to avoid timers respawning */ 3780 cp->hw_running = 0; 3781 3782 - del_timer_sync(&cp->link_timer); 3783 3784 /* Stop the reset task */ 3785 #if 0
··· 3779 /* Make us not-running to avoid timers respawning */ 3780 cp->hw_running = 0; 3781 3782 + timer_delete_sync(&cp->link_timer); 3783 3784 /* Stop the reset task */ 3785 #if 0
+3 -3
drivers/net/ethernet/sun/ldmvsw.c
··· 390 return 0; 391 392 err_out_del_timer: 393 - del_timer_sync(&port->clean_timer); 394 list_del_rcu(&port->list); 395 synchronize_rcu(); 396 netif_napi_del(&port->napi); ··· 408 unsigned long flags; 409 410 if (port) { 411 - del_timer_sync(&port->vio.timer); 412 - del_timer_sync(&port->clean_timer); 413 414 napi_disable(&port->napi); 415 unregister_netdev(port->dev);
··· 390 return 0; 391 392 err_out_del_timer: 393 + timer_delete_sync(&port->clean_timer); 394 list_del_rcu(&port->list); 395 synchronize_rcu(); 396 netif_napi_del(&port->napi); ··· 408 unsigned long flags; 409 410 if (port) { 411 + timer_delete_sync(&port->vio.timer); 412 + timer_delete_sync(&port->clean_timer); 413 414 napi_disable(&port->napi); 415 unregister_netdev(port->dev);
+3 -3
drivers/net/ethernet/sun/niu.c
··· 6165 niu_disable_napi(np); 6166 netif_tx_stop_all_queues(dev); 6167 6168 - del_timer_sync(&np->timer); 6169 6170 spin_lock_irq(&np->lock); 6171 ··· 6511 6512 spin_unlock_irqrestore(&np->lock, flags); 6513 6514 - del_timer_sync(&np->timer); 6515 6516 niu_netif_stop(np); 6517 ··· 9914 flush_work(&np->reset_task); 9915 niu_netif_stop(np); 9916 9917 - del_timer_sync(&np->timer); 9918 9919 spin_lock_irqsave(&np->lock, flags); 9920 niu_enable_interrupts(np, 0);
··· 6165 niu_disable_napi(np); 6166 netif_tx_stop_all_queues(dev); 6167 6168 + timer_delete_sync(&np->timer); 6169 6170 spin_lock_irq(&np->lock); 6171 ··· 6511 6512 spin_unlock_irqrestore(&np->lock, flags); 6513 6514 + timer_delete_sync(&np->timer); 6515 6516 niu_netif_stop(np); 6517 ··· 9914 flush_work(&np->reset_task); 9915 niu_netif_stop(np); 9916 9917 + timer_delete_sync(&np->timer); 9918 9919 spin_lock_irqsave(&np->lock, flags); 9920 niu_enable_interrupts(np, 0);
+1 -1
drivers/net/ethernet/sun/sunbmac.c
··· 931 { 932 struct bigmac *bp = netdev_priv(dev); 933 934 - del_timer(&bp->bigmac_timer); 935 bp->timer_state = asleep; 936 bp->timer_ticks = 0; 937
··· 931 { 932 struct bigmac *bp = netdev_priv(dev); 933 934 + timer_delete(&bp->bigmac_timer); 935 bp->timer_state = asleep; 936 bp->timer_ticks = 0; 937
+4 -4
drivers/net/ethernet/sun/sungem.c
··· 2180 gem_disable_ints(gp); 2181 2182 /* Stop the link timer */ 2183 - del_timer_sync(&gp->link_timer); 2184 2185 /* We cannot cancel the reset task while holding the 2186 * rtnl lock, we'd get an A->B / B->A deadlock stituation ··· 2230 } 2231 2232 /* Stop the link timer */ 2233 - del_timer_sync(&gp->link_timer); 2234 2235 /* Stop NAPI and tx */ 2236 gem_netif_stop(gp); ··· 2610 2611 /* Apply settings and restart link process. */ 2612 if (netif_device_present(gp->dev)) { 2613 - del_timer_sync(&gp->link_timer); 2614 gem_begin_auto_negotiation(gp, cmd); 2615 } 2616 ··· 2626 2627 /* Restart link process */ 2628 if (netif_device_present(gp->dev)) { 2629 - del_timer_sync(&gp->link_timer); 2630 gem_begin_auto_negotiation(gp, NULL); 2631 } 2632
··· 2180 gem_disable_ints(gp); 2181 2182 /* Stop the link timer */ 2183 + timer_delete_sync(&gp->link_timer); 2184 2185 /* We cannot cancel the reset task while holding the 2186 * rtnl lock, we'd get an A->B / B->A deadlock stituation ··· 2230 } 2231 2232 /* Stop the link timer */ 2233 + timer_delete_sync(&gp->link_timer); 2234 2235 /* Stop NAPI and tx */ 2236 gem_netif_stop(gp); ··· 2610 2611 /* Apply settings and restart link process. */ 2612 if (netif_device_present(gp->dev)) { 2613 + timer_delete_sync(&gp->link_timer); 2614 gem_begin_auto_negotiation(gp, cmd); 2615 } 2616 ··· 2626 2627 /* Restart link process */ 2628 if (netif_device_present(gp->dev)) { 2629 + timer_delete_sync(&gp->link_timer); 2630 gem_begin_auto_negotiation(gp, NULL); 2631 } 2632
+3 -3
drivers/net/ethernet/sun/sunhme.c
··· 1265 u32 regtmp, rxcfg; 1266 1267 /* If auto-negotiation timer is running, kill it. */ 1268 - del_timer(&hp->happy_timer); 1269 1270 HMD("happy_flags[%08x]\n", hp->happy_flags); 1271 if (!(hp->happy_flags & HFLAG_INIT)) { ··· 1922 happy_meal_clean_rings(hp); 1923 1924 /* If auto-negotiation timer is running, kill it. */ 1925 - del_timer(&hp->happy_timer); 1926 1927 spin_unlock_irq(&hp->happy_lock); 1928 ··· 2184 2185 /* Ok, do it to it. */ 2186 spin_lock_irq(&hp->happy_lock); 2187 - del_timer(&hp->happy_timer); 2188 happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd); 2189 spin_unlock_irq(&hp->happy_lock); 2190
··· 1265 u32 regtmp, rxcfg; 1266 1267 /* If auto-negotiation timer is running, kill it. */ 1268 + timer_delete(&hp->happy_timer); 1269 1270 HMD("happy_flags[%08x]\n", hp->happy_flags); 1271 if (!(hp->happy_flags & HFLAG_INIT)) { ··· 1922 happy_meal_clean_rings(hp); 1923 1924 /* If auto-negotiation timer is running, kill it. */ 1925 + timer_delete(&hp->happy_timer); 1926 1927 spin_unlock_irq(&hp->happy_lock); 1928 ··· 2184 2185 /* Ok, do it to it. */ 2186 spin_lock_irq(&hp->happy_lock); 2187 + timer_delete(&hp->happy_timer); 2188 happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd); 2189 spin_unlock_irq(&hp->happy_lock); 2190
+1 -1
drivers/net/ethernet/sun/sunvnet.c
··· 505 struct vnet_port *port = dev_get_drvdata(&vdev->dev); 506 507 if (port) { 508 - del_timer_sync(&port->vio.timer); 509 510 napi_disable(&port->napi); 511
··· 505 struct vnet_port *port = dev_get_drvdata(&vdev->dev); 506 507 if (port) { 508 + timer_delete_sync(&port->vio.timer); 509 510 napi_disable(&port->napi); 511
+3 -3
drivers/net/ethernet/sun/sunvnet_common.c
··· 1058 (void)mod_timer(&port->clean_timer, 1059 jiffies + VNET_CLEAN_TIMEOUT); 1060 else 1061 - del_timer(&port->clean_timer); 1062 } 1063 EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common); 1064 ··· 1513 (void)mod_timer(&port->clean_timer, 1514 jiffies + VNET_CLEAN_TIMEOUT); 1515 else if (port) 1516 - del_timer(&port->clean_timer); 1517 rcu_read_unlock(); 1518 dev_kfree_skb(skb); 1519 vnet_free_skbs(freeskbs); ··· 1707 1708 void vnet_port_reset(struct vnet_port *port) 1709 { 1710 - del_timer(&port->clean_timer); 1711 sunvnet_port_free_tx_bufs_common(port); 1712 port->rmtu = 0; 1713 port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */
··· 1058 (void)mod_timer(&port->clean_timer, 1059 jiffies + VNET_CLEAN_TIMEOUT); 1060 else 1061 + timer_delete(&port->clean_timer); 1062 } 1063 EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common); 1064 ··· 1513 (void)mod_timer(&port->clean_timer, 1514 jiffies + VNET_CLEAN_TIMEOUT); 1515 else if (port) 1516 + timer_delete(&port->clean_timer); 1517 rcu_read_unlock(); 1518 dev_kfree_skb(skb); 1519 vnet_free_skbs(freeskbs); ··· 1707 1708 void vnet_port_reset(struct vnet_port *port) 1709 { 1710 + timer_delete(&port->clean_timer); 1711 sunvnet_port_free_tx_bufs_common(port); 1712 port->rmtu = 0; 1713 port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */
+1 -1
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
··· 405 if (!channel->tx_ring) 406 break; 407 408 - del_timer_sync(&channel->tx_timer); 409 } 410 } 411
··· 405 if (!channel->tx_ring) 406 break; 407 408 + timer_delete_sync(&channel->tx_timer); 409 } 410 } 411
+1 -1
drivers/net/ethernet/ti/cpsw_ale.c
··· 1287 return; 1288 } 1289 1290 - del_timer_sync(&ale->timer); 1291 } 1292 1293 void cpsw_ale_start(struct cpsw_ale *ale)
··· 1287 return; 1288 } 1289 1290 + timer_delete_sync(&ale->timer); 1291 } 1292 1293 void cpsw_ale_start(struct cpsw_ale *ale)
+1 -1
drivers/net/ethernet/ti/netcp_ethss.c
··· 3796 { 3797 struct gbe_priv *gbe_dev = inst_priv; 3798 3799 - del_timer_sync(&gbe_dev->timer); 3800 cpts_release(gbe_dev->cpts); 3801 cpsw_ale_stop(gbe_dev->ale); 3802 netcp_txpipe_close(&gbe_dev->tx_pipe);
··· 3796 { 3797 struct gbe_priv *gbe_dev = inst_priv; 3798 3799 + timer_delete_sync(&gbe_dev->timer); 3800 cpts_release(gbe_dev->cpts); 3801 cpsw_ale_stop(gbe_dev->ale); 3802 netcp_txpipe_close(&gbe_dev->tx_pipe);
+2 -2
drivers/net/ethernet/ti/tlan.c
··· 332 { 333 struct tlan_priv *priv = netdev_priv(dev); 334 335 - del_timer_sync(&priv->media_timer); 336 tlan_read_and_clear_stats(dev, TLAN_RECORD); 337 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD); 338 /* Reset and power down phy */ 339 tlan_reset_adapter(dev); 340 if (priv->timer.function != NULL) { 341 - del_timer_sync(&priv->timer); 342 priv->timer.function = NULL; 343 } 344 }
··· 332 { 333 struct tlan_priv *priv = netdev_priv(dev); 334 335 + timer_delete_sync(&priv->media_timer); 336 tlan_read_and_clear_stats(dev, TLAN_RECORD); 337 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD); 338 /* Reset and power down phy */ 339 tlan_reset_adapter(dev); 340 if (priv->timer.function != NULL) { 341 + timer_delete_sync(&priv->timer); 342 priv->timer.function = NULL; 343 } 344 }
+1 -1
drivers/net/ethernet/tundra/tsi108_eth.c
··· 1379 netif_stop_queue(dev); 1380 napi_disable(&data->napi); 1381 1382 - del_timer_sync(&data->timer); 1383 1384 tsi108_stop_ethernet(dev); 1385 tsi108_kill_phy(dev);
··· 1379 netif_stop_queue(dev); 1380 napi_disable(&data->napi); 1381 1382 + timer_delete_sync(&data->timer); 1383 1384 tsi108_stop_ethernet(dev); 1385 tsi108_kill_phy(dev);
+5 -5
drivers/net/fddi/defza.c
··· 983 984 case FZA_STATE_UNINITIALIZED: 985 netif_carrier_off(dev); 986 - del_timer_sync(&fp->reset_timer); 987 fp->ring_cmd_index = 0; 988 fp->ring_uns_index = 0; 989 fp->ring_rmc_tx_index = 0; ··· 1017 fp->queue_active = 0; 1018 netif_stop_queue(dev); 1019 pr_debug("%s: queue stopped\n", fp->name); 1020 - del_timer_sync(&fp->reset_timer); 1021 pr_warn("%s: halted, reason: %x\n", fp->name, 1022 FZA_STATUS_GET_HALT(status)); 1023 fza_regs_dump(fp); ··· 1227 netif_stop_queue(dev); 1228 pr_debug("%s: queue stopped\n", fp->name); 1229 1230 - del_timer_sync(&fp->reset_timer); 1231 spin_lock_irqsave(&fp->lock, flags); 1232 fp->state = FZA_STATE_UNINITIALIZED; 1233 fp->state_chg_flag = 0; ··· 1493 return 0; 1494 1495 err_out_irq: 1496 - del_timer_sync(&fp->reset_timer); 1497 fza_do_shutdown(fp); 1498 free_irq(dev->irq, dev); 1499 ··· 1520 1521 unregister_netdev(dev); 1522 1523 - del_timer_sync(&fp->reset_timer); 1524 fza_do_shutdown(fp); 1525 free_irq(dev->irq, dev); 1526
··· 983 984 case FZA_STATE_UNINITIALIZED: 985 netif_carrier_off(dev); 986 + timer_delete_sync(&fp->reset_timer); 987 fp->ring_cmd_index = 0; 988 fp->ring_uns_index = 0; 989 fp->ring_rmc_tx_index = 0; ··· 1017 fp->queue_active = 0; 1018 netif_stop_queue(dev); 1019 pr_debug("%s: queue stopped\n", fp->name); 1020 + timer_delete_sync(&fp->reset_timer); 1021 pr_warn("%s: halted, reason: %x\n", fp->name, 1022 FZA_STATUS_GET_HALT(status)); 1023 fza_regs_dump(fp); ··· 1227 netif_stop_queue(dev); 1228 pr_debug("%s: queue stopped\n", fp->name); 1229 1230 + timer_delete_sync(&fp->reset_timer); 1231 spin_lock_irqsave(&fp->lock, flags); 1232 fp->state = FZA_STATE_UNINITIALIZED; 1233 fp->state_chg_flag = 0; ··· 1493 return 0; 1494 1495 err_out_irq: 1496 + timer_delete_sync(&fp->reset_timer); 1497 fza_do_shutdown(fp); 1498 free_irq(dev->irq, dev); 1499 ··· 1520 1521 unregister_netdev(dev); 1522 1523 + timer_delete_sync(&fp->reset_timer); 1524 fza_do_shutdown(fp); 1525 free_irq(dev->irq, dev); 1526
+3 -3
drivers/net/hamradio/6pack.c
··· 660 661 unregister_netdev(sp->dev); 662 663 - del_timer_sync(&sp->tx_t); 664 - del_timer_sync(&sp->resync_t); 665 666 /* Free all 6pack frame buffers after unreg. */ 667 kfree(sp->xbuff); ··· 937 inbyte = pre_rbuff[count1]; 938 if (inbyte == SIXP_FOUND_TNC) { 939 tnc_set_sync_state(sp, TNC_IN_SYNC); 940 - del_timer(&sp->resync_t); 941 } 942 if ((inbyte & SIXP_PRIO_CMD_MASK) != 0) 943 decode_prio_command(sp, inbyte);
··· 660 661 unregister_netdev(sp->dev); 662 663 + timer_delete_sync(&sp->tx_t); 664 + timer_delete_sync(&sp->resync_t); 665 666 /* Free all 6pack frame buffers after unreg. */ 667 kfree(sp->xbuff); ··· 937 inbyte = pre_rbuff[count1]; 938 if (inbyte == SIXP_FOUND_TNC) { 939 tnc_set_sync_state(sp, TNC_IN_SYNC); 940 + timer_delete(&sp->resync_t); 941 } 942 if ((inbyte & SIXP_PRIO_CMD_MASK) != 0) 943 decode_prio_command(sp, inbyte);
+13 -13
drivers/net/hamradio/scc.c
··· 794 795 static void init_channel(struct scc_channel *scc) 796 { 797 - del_timer(&scc->tx_t); 798 - del_timer(&scc->tx_wdog); 799 800 disable_irq(scc->irq); 801 ··· 999 void (*handler)(struct timer_list *t), 1000 unsigned long when) 1001 { 1002 - del_timer(&scc->tx_t); 1003 1004 if (when == 0) 1005 { ··· 1029 unsigned long flags; 1030 1031 spin_lock_irqsave(&scc->lock, flags); 1032 - del_timer(&scc->tx_wdog); 1033 1034 if (scc->kiss.maxdefer != 0 && scc->kiss.maxdefer != TIMER_OFF) 1035 { ··· 1045 unsigned long flags; 1046 1047 spin_lock_irqsave(&scc->lock, flags); 1048 - del_timer(&scc->tx_wdog); 1049 1050 if (scc->kiss.maxkeyup != 0 && scc->kiss.maxkeyup != TIMER_OFF) 1051 { ··· 1194 unsigned long flags; 1195 1196 spin_lock_irqsave(&scc->lock, flags); 1197 - del_timer(&scc->tx_wdog); 1198 scc_key_trx(scc, TX_OFF); 1199 spin_unlock_irqrestore(&scc->lock, flags); 1200 ··· 1219 { 1220 struct scc_channel *scc = from_timer(scc, t, tx_wdog); 1221 1222 - del_timer(&scc->tx_t); 1223 netif_stop_queue(scc->dev); /* don't pile on the wabbit! */ 1224 1225 scc_discard_buffers(scc); ··· 1248 netif_stop_queue(scc->dev); 1249 scc_discard_buffers(scc); 1250 1251 - del_timer(&scc->tx_t); 1252 1253 cl(scc, R1, TxINT_ENAB); /* force an ABORT, but don't */ 1254 cl(scc, R15, TxUIE); /* count it. */ ··· 1272 { 1273 struct scc_channel *scc = from_timer(scc, t, tx_t); 1274 1275 - del_timer(&scc->tx_wdog); 1276 1277 scc_key_trx(scc, TX_OFF); 1278 if(scc->kiss.mintime) ··· 1407 unsigned long flags; 1408 1409 spin_lock_irqsave(&scc->lock, flags); 1410 - del_timer(&scc->tx_wdog); 1411 scc_key_trx(scc, TX_OFF); 1412 wr(scc, R6, 0); 1413 wr(scc, R7, FLAG); ··· 1428 netif_stop_queue(scc->dev); 1429 scc_discard_buffers(scc); 1430 1431 - del_timer(&scc->tx_wdog); 1432 1433 scc->tx_wdog.function = scc_stop_calibrate; 1434 scc->tx_wdog.expires = jiffies + HZ*duration; ··· 1609 wr(scc,R3,0); 1610 spin_unlock_irqrestore(&scc->lock, flags); 1611 1612 - del_timer_sync(&scc->tx_t); 1613 - del_timer_sync(&scc->tx_wdog); 1614 1615 scc_discard_buffers(scc); 1616
··· 794 795 static void init_channel(struct scc_channel *scc) 796 { 797 + timer_delete(&scc->tx_t); 798 + timer_delete(&scc->tx_wdog); 799 800 disable_irq(scc->irq); 801 ··· 999 void (*handler)(struct timer_list *t), 1000 unsigned long when) 1001 { 1002 + timer_delete(&scc->tx_t); 1003 1004 if (when == 0) 1005 { ··· 1029 unsigned long flags; 1030 1031 spin_lock_irqsave(&scc->lock, flags); 1032 + timer_delete(&scc->tx_wdog); 1033 1034 if (scc->kiss.maxdefer != 0 && scc->kiss.maxdefer != TIMER_OFF) 1035 { ··· 1045 unsigned long flags; 1046 1047 spin_lock_irqsave(&scc->lock, flags); 1048 + timer_delete(&scc->tx_wdog); 1049 1050 if (scc->kiss.maxkeyup != 0 && scc->kiss.maxkeyup != TIMER_OFF) 1051 { ··· 1194 unsigned long flags; 1195 1196 spin_lock_irqsave(&scc->lock, flags); 1197 + timer_delete(&scc->tx_wdog); 1198 scc_key_trx(scc, TX_OFF); 1199 spin_unlock_irqrestore(&scc->lock, flags); 1200 ··· 1219 { 1220 struct scc_channel *scc = from_timer(scc, t, tx_wdog); 1221 1222 + timer_delete(&scc->tx_t); 1223 netif_stop_queue(scc->dev); /* don't pile on the wabbit! */ 1224 1225 scc_discard_buffers(scc); ··· 1248 netif_stop_queue(scc->dev); 1249 scc_discard_buffers(scc); 1250 1251 + timer_delete(&scc->tx_t); 1252 1253 cl(scc, R1, TxINT_ENAB); /* force an ABORT, but don't */ 1254 cl(scc, R15, TxUIE); /* count it. */ ··· 1272 { 1273 struct scc_channel *scc = from_timer(scc, t, tx_t); 1274 1275 + timer_delete(&scc->tx_wdog); 1276 1277 scc_key_trx(scc, TX_OFF); 1278 if(scc->kiss.mintime) ··· 1407 unsigned long flags; 1408 1409 spin_lock_irqsave(&scc->lock, flags); 1410 + timer_delete(&scc->tx_wdog); 1411 scc_key_trx(scc, TX_OFF); 1412 wr(scc, R6, 0); 1413 wr(scc, R7, FLAG); ··· 1428 netif_stop_queue(scc->dev); 1429 scc_discard_buffers(scc); 1430 1431 + timer_delete(&scc->tx_wdog); 1432 1433 scc->tx_wdog.function = scc_stop_calibrate; 1434 scc->tx_wdog.expires = jiffies + HZ*duration; ··· 1609 wr(scc,R3,0); 1610 spin_unlock_irqrestore(&scc->lock, flags); 1611 1612 + timer_delete_sync(&scc->tx_t); 1613 + timer_delete_sync(&scc->tx_wdog); 1614 1615 scc_discard_buffers(scc); 1616
+1 -1
drivers/net/hamradio/yam.c
··· 1158 struct yam_mcs *p; 1159 int i; 1160 1161 - del_timer_sync(&yam_timer); 1162 for (i = 0; i < NR_PORTS; i++) { 1163 struct net_device *dev = yam_devs[i]; 1164 if (dev) {
··· 1158 struct yam_mcs *p; 1159 int i; 1160 1161 + timer_delete_sync(&yam_timer); 1162 for (i = 0; i < NR_PORTS; i++) { 1163 struct net_device *dev = yam_devs[i]; 1164 if (dev) {
+1 -1
drivers/net/hippi/rrunner.c
··· 1357 rrpriv->fw_running = 0; 1358 1359 spin_unlock_irqrestore(&rrpriv->lock, flags); 1360 - del_timer_sync(&rrpriv->timer); 1361 spin_lock_irqsave(&rrpriv->lock, flags); 1362 1363 writel(0, &regs->TxPi);
··· 1357 rrpriv->fw_running = 0; 1358 1359 spin_unlock_irqrestore(&rrpriv->lock, flags); 1360 + timer_delete_sync(&rrpriv->timer); 1361 spin_lock_irqsave(&rrpriv->lock, flags); 1362 1363 writel(0, &regs->TxPi);
+2 -2
drivers/net/netdevsim/netdev.c
··· 441 442 static void nsim_rq_timer_init(struct nsim_rq *rq) 443 { 444 - hrtimer_init(&rq->napi_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 445 - rq->napi_timer.function = nsim_napi_schedule; 446 } 447 448 static void nsim_enable_napi(struct netdevsim *ns)
··· 441 442 static void nsim_rq_timer_init(struct nsim_rq *rq) 443 { 444 + hrtimer_setup(&rq->napi_timer, nsim_napi_schedule, CLOCK_MONOTONIC, 445 + HRTIMER_MODE_REL); 446 } 447 448 static void nsim_enable_napi(struct netdevsim *ns)
+1 -1
drivers/net/ntb_netdev.c
··· 291 while ((skb = ntb_transport_rx_remove(dev->qp, &len))) 292 dev_kfree_skb(skb); 293 294 - del_timer_sync(&dev->tx_timer); 295 296 return 0; 297 }
··· 291 while ((skb = ntb_transport_rx_remove(dev->qp, &len))) 292 dev_kfree_skb(skb); 293 294 + timer_delete_sync(&dev->tx_timer); 295 296 return 0; 297 }
+2 -2
drivers/net/phy/phylink.c
··· 952 static void phylink_pcs_poll_stop(struct phylink *pl) 953 { 954 if (pl->cfg_link_an_mode == MLO_AN_INBAND) 955 - del_timer(&pl->link_poll); 956 } 957 958 static void phylink_pcs_poll_start(struct phylink *pl) ··· 2448 sfp_upstream_stop(pl->sfp_bus); 2449 if (pl->phydev) 2450 phy_stop(pl->phydev); 2451 - del_timer_sync(&pl->link_poll); 2452 if (pl->link_irq) { 2453 free_irq(pl->link_irq, pl); 2454 pl->link_irq = 0;
··· 952 static void phylink_pcs_poll_stop(struct phylink *pl) 953 { 954 if (pl->cfg_link_an_mode == MLO_AN_INBAND) 955 + timer_delete(&pl->link_poll); 956 } 957 958 static void phylink_pcs_poll_start(struct phylink *pl) ··· 2448 sfp_upstream_stop(pl->sfp_bus); 2449 if (pl->phydev) 2450 phy_stop(pl->phydev); 2451 + timer_delete_sync(&pl->link_poll); 2452 if (pl->link_irq) { 2453 free_irq(pl->link_irq, pl); 2454 pl->link_irq = 0;
+7 -7
drivers/net/slip/slip.c
··· 899 900 /* VSV = very important to remove timers */ 901 #ifdef CONFIG_SLIP_SMART 902 - del_timer_sync(&sl->keepalive_timer); 903 - del_timer_sync(&sl->outfill_timer); 904 #endif 905 /* Flush network side */ 906 unregister_netdev(sl->dev); ··· 1137 jiffies + sl->keepalive * HZ); 1138 set_bit(SLF_KEEPTEST, &sl->flags); 1139 } else 1140 - del_timer(&sl->keepalive_timer); 1141 spin_unlock_bh(&sl->lock); 1142 return 0; 1143 ··· 1162 jiffies + sl->outfill * HZ); 1163 set_bit(SLF_OUTWAIT, &sl->flags); 1164 } else 1165 - del_timer(&sl->outfill_timer); 1166 spin_unlock_bh(&sl->lock); 1167 return 0; 1168 ··· 1217 jiffies + sl->keepalive * HZ); 1218 set_bit(SLF_KEEPTEST, &sl->flags); 1219 } else 1220 - del_timer(&sl->keepalive_timer); 1221 break; 1222 1223 case SIOCGKEEPALIVE: ··· 1235 jiffies + sl->outfill * HZ); 1236 set_bit(SLF_OUTWAIT, &sl->flags); 1237 } else 1238 - del_timer(&sl->outfill_timer); 1239 break; 1240 1241 case SIOCGOUTFILL: ··· 1421 /* keepalive still high :(, we must hangup */ 1422 if (sl->outfill) 1423 /* outfill timer must be deleted too */ 1424 - (void)del_timer(&sl->outfill_timer); 1425 printk(KERN_DEBUG "%s: no packets received during keepalive timeout, hangup.\n", sl->dev->name); 1426 /* this must hangup tty & close slip */ 1427 tty_hangup(sl->tty);
··· 899 900 /* VSV = very important to remove timers */ 901 #ifdef CONFIG_SLIP_SMART 902 + timer_delete_sync(&sl->keepalive_timer); 903 + timer_delete_sync(&sl->outfill_timer); 904 #endif 905 /* Flush network side */ 906 unregister_netdev(sl->dev); ··· 1137 jiffies + sl->keepalive * HZ); 1138 set_bit(SLF_KEEPTEST, &sl->flags); 1139 } else 1140 + timer_delete(&sl->keepalive_timer); 1141 spin_unlock_bh(&sl->lock); 1142 return 0; 1143 ··· 1162 jiffies + sl->outfill * HZ); 1163 set_bit(SLF_OUTWAIT, &sl->flags); 1164 } else 1165 + timer_delete(&sl->outfill_timer); 1166 spin_unlock_bh(&sl->lock); 1167 return 0; 1168 ··· 1217 jiffies + sl->keepalive * HZ); 1218 set_bit(SLF_KEEPTEST, &sl->flags); 1219 } else 1220 + timer_delete(&sl->keepalive_timer); 1221 break; 1222 1223 case SIOCGKEEPALIVE: ··· 1235 jiffies + sl->outfill * HZ); 1236 set_bit(SLF_OUTWAIT, &sl->flags); 1237 } else 1238 + timer_delete(&sl->outfill_timer); 1239 break; 1240 1241 case SIOCGOUTFILL: ··· 1421 /* keepalive still high :(, we must hangup */ 1422 if (sl->outfill) 1423 /* outfill timer must be deleted too */ 1424 + (void) timer_delete(&sl->outfill_timer); 1425 printk(KERN_DEBUG "%s: no packets received during keepalive timeout, hangup.\n", sl->dev->name); 1426 /* this must hangup tty & close slip */ 1427 tty_hangup(sl->tty);
+1 -1
drivers/net/tun.c
··· 1295 1296 static void tun_flow_uninit(struct tun_struct *tun) 1297 { 1298 - del_timer_sync(&tun->flow_gc_timer); 1299 tun_flow_flush(tun); 1300 } 1301
··· 1295 1296 static void tun_flow_uninit(struct tun_struct *tun) 1297 { 1298 + timer_delete_sync(&tun->flow_gc_timer); 1299 tun_flow_flush(tun); 1300 } 1301
+1 -1
drivers/net/usb/catc.c
··· 738 netif_stop_queue(netdev); 739 740 if (!catc->is_f5u011) 741 - del_timer_sync(&catc->timer); 742 743 usb_kill_urb(catc->rx_urb); 744 usb_kill_urb(catc->tx_urb);
··· 738 netif_stop_queue(netdev); 739 740 if (!catc->is_f5u011) 741 + timer_delete_sync(&catc->timer); 742 743 usb_kill_urb(catc->rx_urb); 744 usb_kill_urb(catc->tx_urb);
+3 -3
drivers/net/usb/lan78xx.c
··· 1661 if (ret < 0) 1662 return ret; 1663 1664 - del_timer(&dev->stat_monitor); 1665 } else if (link && !dev->link_on) { 1666 dev->link_on = true; 1667 ··· 3304 mutex_lock(&dev->dev_mutex); 3305 3306 if (timer_pending(&dev->stat_monitor)) 3307 - del_timer_sync(&dev->stat_monitor); 3308 3309 clear_bit(EVENT_DEV_OPEN, &dev->flags); 3310 netif_stop_queue(net); ··· 4938 /* reattach */ 4939 netif_device_attach(dev->net); 4940 4941 - del_timer(&dev->stat_monitor); 4942 4943 if (PMSG_IS_AUTO(message)) { 4944 ret = lan78xx_set_auto_suspend(dev);
··· 1661 if (ret < 0) 1662 return ret; 1663 1664 + timer_delete(&dev->stat_monitor); 1665 } else if (link && !dev->link_on) { 1666 dev->link_on = true; 1667 ··· 3304 mutex_lock(&dev->dev_mutex); 3305 3306 if (timer_pending(&dev->stat_monitor)) 3307 + timer_delete_sync(&dev->stat_monitor); 3308 3309 clear_bit(EVENT_DEV_OPEN, &dev->flags); 3310 netif_stop_queue(net); ··· 4938 /* reattach */ 4939 netif_device_attach(dev->net); 4940 4941 + timer_delete(&dev->stat_monitor); 4942 4943 if (PMSG_IS_AUTO(message)) { 4944 ret = lan78xx_set_auto_suspend(dev);
+1 -1
drivers/net/usb/sierra_net.c
··· 522 " stopping sync timer", 523 hh.msgspecific.byte); 524 /* Got sync resp - stop timer & clear mask */ 525 - del_timer_sync(&priv->sync_timer); 526 clear_bit(SIERRA_NET_TIMER_EXPIRY, 527 &priv->kevent_flags); 528 break;
··· 522 " stopping sync timer", 523 hh.msgspecific.byte); 524 /* Got sync resp - stop timer & clear mask */ 525 + timer_delete_sync(&priv->sync_timer); 526 clear_bit(SIERRA_NET_TIMER_EXPIRY, 527 &priv->kevent_flags); 528 break;
+3 -3
drivers/net/usb/usbnet.c
··· 860 861 /* deferred work (timer, softirq, task) must also stop */ 862 dev->flags = 0; 863 - del_timer_sync(&dev->delay); 864 tasklet_kill(&dev->bh); 865 cancel_work_sync(&dev->kevent); 866 ··· 869 * we have a flag 870 */ 871 tasklet_kill(&dev->bh); 872 - del_timer_sync(&dev->delay); 873 cancel_work_sync(&dev->kevent); 874 875 if (!pm) ··· 1882 */ 1883 usbnet_mark_going_away(dev); 1884 cancel_work_sync(&dev->kevent); 1885 - del_timer_sync(&dev->delay); 1886 free_netdev(net); 1887 out: 1888 return status;
··· 860 861 /* deferred work (timer, softirq, task) must also stop */ 862 dev->flags = 0; 863 + timer_delete_sync(&dev->delay); 864 tasklet_kill(&dev->bh); 865 cancel_work_sync(&dev->kevent); 866 ··· 869 * we have a flag 870 */ 871 tasklet_kill(&dev->bh); 872 + timer_delete_sync(&dev->delay); 873 cancel_work_sync(&dev->kevent); 874 875 if (!pm) ··· 1882 */ 1883 usbnet_mark_going_away(dev); 1884 cancel_work_sync(&dev->kevent); 1885 + timer_delete_sync(&dev->delay); 1886 free_netdev(net); 1887 out: 1888 return status;
+1 -1
drivers/net/vxlan/vxlan_core.c
··· 3193 3194 vxlan_multicast_leave(vxlan); 3195 3196 - del_timer_sync(&vxlan->age_timer); 3197 3198 vxlan_flush(vxlan, &desc); 3199 vxlan_sock_release(vxlan);
··· 3193 3194 vxlan_multicast_leave(vxlan); 3195 3196 + timer_delete_sync(&vxlan->age_timer); 3197 3198 vxlan_flush(vxlan, &desc); 3199 vxlan_sock_release(vxlan);
+1 -1
drivers/net/wan/hdlc_cisco.c
··· 285 struct cisco_state *st = state(hdlc); 286 unsigned long flags; 287 288 - del_timer_sync(&st->timer); 289 290 spin_lock_irqsave(&st->lock, flags); 291 netif_dormant_on(dev);
··· 285 struct cisco_state *st = state(hdlc); 286 unsigned long flags; 287 288 + timer_delete_sync(&st->timer); 289 290 spin_lock_irqsave(&st->lock, flags); 291 netif_dormant_on(dev);
+1 -1
drivers/net/wan/hdlc_fr.c
··· 1025 printk(KERN_DEBUG "fr_stop\n"); 1026 #endif 1027 if (state(hdlc)->settings.lmi != LMI_NONE) 1028 - del_timer_sync(&state(hdlc)->timer); 1029 fr_set_link_state(0, dev); 1030 } 1031
··· 1025 printk(KERN_DEBUG "fr_stop\n"); 1026 #endif 1027 if (state(hdlc)->settings.lmi != LMI_NONE) 1028 + timer_delete_sync(&state(hdlc)->timer); 1029 fr_set_link_state(0, dev); 1030 } 1031
+1 -1
drivers/net/wan/hdlc_ppp.c
··· 358 } 359 } 360 if (old_state != CLOSED && proto->state == CLOSED) 361 - del_timer(&proto->timer); 362 363 #if DEBUG_STATE 364 printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name,
··· 358 } 359 } 360 if (old_state != CLOSED && proto->state == CLOSED) 361 + timer_delete(&proto->timer); 362 363 #if DEBUG_STATE 364 printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name,
+1 -1
drivers/net/wireguard/device.c
··· 81 list_for_each_entry(wg, &device_list, device_list) { 82 mutex_lock(&wg->device_update_lock); 83 list_for_each_entry(peer, &wg->peer_list, peer_list) { 84 - del_timer(&peer->timer_zero_key_material); 85 wg_noise_handshake_clear(&peer->handshake); 86 wg_noise_keypairs_clear(&peer->keypairs); 87 }
··· 81 list_for_each_entry(wg, &device_list, device_list) { 82 mutex_lock(&wg->device_update_lock); 83 list_for_each_entry(peer, &wg->peer_list, peer_list) { 84 + timer_delete(&peer->timer_zero_key_material); 85 wg_noise_handshake_clear(&peer->handshake); 86 wg_noise_keypairs_clear(&peer->keypairs); 87 }
+4 -4
drivers/net/wireguard/timers.c
··· 48 peer->device->dev->name, peer->internal_id, 49 &peer->endpoint.addr, (int)MAX_TIMER_HANDSHAKES + 2); 50 51 - del_timer(&peer->timer_send_keepalive); 52 /* We drop all packets without a keypair and don't try again, 53 * if we try unsuccessfully for too long to make a handshake. 54 */ ··· 167 */ 168 void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer) 169 { 170 - del_timer(&peer->timer_send_keepalive); 171 } 172 173 /* Should be called after any type of authenticated packet is received, whether ··· 175 */ 176 void wg_timers_any_authenticated_packet_received(struct wg_peer *peer) 177 { 178 - del_timer(&peer->timer_new_handshake); 179 } 180 181 /* Should be called after a handshake initiation message is sent. */ ··· 191 */ 192 void wg_timers_handshake_complete(struct wg_peer *peer) 193 { 194 - del_timer(&peer->timer_retransmit_handshake); 195 peer->timer_handshake_attempts = 0; 196 peer->sent_lastminute_handshake = false; 197 ktime_get_real_ts64(&peer->walltime_last_handshake);
··· 48 peer->device->dev->name, peer->internal_id, 49 &peer->endpoint.addr, (int)MAX_TIMER_HANDSHAKES + 2); 50 51 + timer_delete(&peer->timer_send_keepalive); 52 /* We drop all packets without a keypair and don't try again, 53 * if we try unsuccessfully for too long to make a handshake. 54 */ ··· 167 */ 168 void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer) 169 { 170 + timer_delete(&peer->timer_send_keepalive); 171 } 172 173 /* Should be called after any type of authenticated packet is received, whether ··· 175 */ 176 void wg_timers_any_authenticated_packet_received(struct wg_peer *peer) 177 { 178 + timer_delete(&peer->timer_new_handshake); 179 } 180 181 /* Should be called after a handshake initiation message is sent. */ ··· 191 */ 192 void wg_timers_handshake_complete(struct wg_peer *peer) 193 { 194 + timer_delete(&peer->timer_retransmit_handshake); 195 peer->timer_handshake_attempts = 0; 196 peer->sent_lastminute_handshake = false; 197 ktime_get_real_ts64(&peer->walltime_last_handshake);
+2 -2
drivers/net/wireless/ath/ar5523/ar5523.c
··· 733 { 734 atomic_dec(&ar->tx_nr_total); 735 if (!atomic_dec_return(&ar->tx_nr_pending)) { 736 - del_timer(&ar->tx_wd_timer); 737 wake_up(&ar->tx_flush_waitq); 738 } 739 ··· 1076 1077 ar5523_cmd_write(ar, WDCMSG_TARGET_STOP, NULL, 0, 0); 1078 1079 - del_timer_sync(&ar->tx_wd_timer); 1080 cancel_work_sync(&ar->tx_wd_work); 1081 cancel_work_sync(&ar->rx_refill_work); 1082 ar5523_cancel_rx_bufs(ar);
··· 733 { 734 atomic_dec(&ar->tx_nr_total); 735 if (!atomic_dec_return(&ar->tx_nr_pending)) { 736 + timer_delete(&ar->tx_wd_timer); 737 wake_up(&ar->tx_flush_waitq); 738 } 739 ··· 1076 1077 ar5523_cmd_write(ar, WDCMSG_TARGET_STOP, NULL, 0, 0); 1078 1079 + timer_delete_sync(&ar->tx_wd_timer); 1080 cancel_work_sync(&ar->tx_wd_work); 1081 cancel_work_sync(&ar->rx_refill_work); 1082 ar5523_cancel_rx_bufs(ar);
+1 -1
drivers/net/wireless/ath/ath10k/debug.c
··· 1751 1752 /* Must not use _sync to avoid deadlock, we do that in 1753 * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid 1754 - * warning from del_timer(). 1755 */ 1756 if (ar->debug.htt_stats_mask != 0) 1757 cancel_delayed_work(&ar->debug.htt_stats_dwork);
··· 1751 1752 /* Must not use _sync to avoid deadlock, we do that in 1753 * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid 1754 + * warning from timer_delete(). 1755 */ 1756 if (ar->debug.htt_stats_mask != 0) 1757 cancel_delayed_work(&ar->debug.htt_stats_dwork);
+1 -1
drivers/net/wireless/ath/ath10k/htt_rx.c
··· 287 if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 288 return; 289 290 - del_timer_sync(&htt->rx_ring.refill_retry_timer); 291 292 skb_queue_purge(&htt->rx_msdus_q); 293 skb_queue_purge(&htt->rx_in_ord_compl_q);
··· 287 if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 288 return; 289 290 + timer_delete_sync(&htt->rx_ring.refill_retry_timer); 291 292 skb_queue_purge(&htt->rx_msdus_q); 293 skb_queue_purge(&htt->rx_in_ord_compl_q);
+2 -2
drivers/net/wireless/ath/ath10k/pci.c
··· 619 return; 620 } 621 622 - del_timer_sync(&ar_pci->ps_timer); 623 624 spin_lock_irqsave(&ar_pci->ps_lock, flags); 625 WARN_ON(ar_pci->ps_wake_refcount > 0); ··· 1817 { 1818 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1819 1820 - del_timer_sync(&ar_pci->rx_post_retry); 1821 } 1822 1823 int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
··· 619 return; 620 } 621 622 + timer_delete_sync(&ar_pci->ps_timer); 623 624 spin_lock_irqsave(&ar_pci->ps_lock, flags); 625 WARN_ON(ar_pci->ps_wake_refcount > 0); ··· 1817 { 1818 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1819 1820 + timer_delete_sync(&ar_pci->rx_post_retry); 1821 } 1822 1823 int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
+1 -1
drivers/net/wireless/ath/ath10k/sdio.c
··· 1621 1622 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n"); 1623 1624 - del_timer_sync(&ar_sdio->sleep_timer); 1625 ath10k_sdio_set_mbox_sleep(ar, true); 1626 1627 /* Disable the card */
··· 1621 1622 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n"); 1623 1624 + timer_delete_sync(&ar_sdio->sleep_timer); 1625 ath10k_sdio_set_mbox_sleep(ar, true); 1626 1627 /* Disable the card */
+1 -1
drivers/net/wireless/ath/ath10k/snoc.c
··· 911 struct ath10k_snoc_pipe *pipe_info; 912 int pipe_num; 913 914 - del_timer_sync(&ar_snoc->rx_post_retry); 915 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 916 pipe_info = &ar_snoc->pipe_info[pipe_num]; 917 ath10k_snoc_rx_pipe_cleanup(pipe_info);
··· 911 struct ath10k_snoc_pipe *pipe_info; 912 int pipe_num; 913 914 + timer_delete_sync(&ar_snoc->rx_post_retry); 915 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 916 pipe_info = &ar_snoc->pipe_info[pipe_num]; 917 ath10k_snoc_rx_pipe_cleanup(pipe_info);
+1 -1
drivers/net/wireless/ath/ath11k/ahb.c
··· 397 ath11k_ahb_ce_irqs_disable(ab); 398 ath11k_ahb_sync_ce_irqs(ab); 399 ath11k_ahb_kill_tasklets(ab); 400 - del_timer_sync(&ab->rx_replenish_retry); 401 ath11k_ce_cleanup_pipes(ab); 402 } 403
··· 397 ath11k_ahb_ce_irqs_disable(ab); 398 ath11k_ahb_sync_ce_irqs(ab); 399 ath11k_ahb_kill_tasklets(ab); 400 + timer_delete_sync(&ab->rx_replenish_retry); 401 ath11k_ce_cleanup_pipes(ab); 402 } 403
+2 -2
drivers/net/wireless/ath/ath11k/dp.c
··· 875 struct ath11k *ar; 876 int i; 877 878 - del_timer_sync(&ab->mon_reap_timer); 879 880 for (i = 0; i < ab->num_radios; i++) { 881 ar = ab->pdevs[i].ar; ··· 1170 if (!update_timer->init) 1171 return; 1172 1173 - del_timer_sync(&update_timer->timer); 1174 } 1175 1176 void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
··· 875 struct ath11k *ar; 876 int i; 877 878 + timer_delete_sync(&ab->mon_reap_timer); 879 880 for (i = 0; i < ab->num_radios; i++) { 881 ar = ab->pdevs[i].ar; ··· 1170 if (!update_timer->init) 1171 return; 1172 1173 + timer_delete_sync(&update_timer->timer); 1174 } 1175 1176 void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
+4 -4
drivers/net/wireless/ath/ath11k/dp_rx.c
··· 906 rx_tid = &peer->rx_tid[i]; 907 908 spin_unlock_bh(&ar->ab->base_lock); 909 - del_timer_sync(&rx_tid->frag_timer); 910 spin_lock_bh(&ar->ab->base_lock); 911 912 ath11k_dp_rx_frags_cleanup(rx_tid, true); ··· 927 ath11k_dp_rx_frags_cleanup(rx_tid, true); 928 929 spin_unlock_bh(&ar->ab->base_lock); 930 - del_timer_sync(&rx_tid->frag_timer); 931 spin_lock_bh(&ar->ab->base_lock); 932 } 933 } ··· 3710 } 3711 3712 spin_unlock_bh(&ab->base_lock); 3713 - del_timer_sync(&rx_tid->frag_timer); 3714 spin_lock_bh(&ab->base_lock); 3715 3716 peer = ath11k_peer_find_by_id(ab, peer_id); ··· 5781 int ret; 5782 5783 if (stop_timer) 5784 - del_timer_sync(&ab->mon_reap_timer); 5785 5786 /* reap all the monitor related rings */ 5787 ret = ath11k_dp_purge_mon_ring(ab);
··· 906 rx_tid = &peer->rx_tid[i]; 907 908 spin_unlock_bh(&ar->ab->base_lock); 909 + timer_delete_sync(&rx_tid->frag_timer); 910 spin_lock_bh(&ar->ab->base_lock); 911 912 ath11k_dp_rx_frags_cleanup(rx_tid, true); ··· 927 ath11k_dp_rx_frags_cleanup(rx_tid, true); 928 929 spin_unlock_bh(&ar->ab->base_lock); 930 + timer_delete_sync(&rx_tid->frag_timer); 931 spin_lock_bh(&ar->ab->base_lock); 932 } 933 } ··· 3710 } 3711 3712 spin_unlock_bh(&ab->base_lock); 3713 + timer_delete_sync(&rx_tid->frag_timer); 3714 spin_lock_bh(&ab->base_lock); 3715 3716 peer = ath11k_peer_find_by_id(ab, peer_id); ··· 5781 int ret; 5782 5783 if (stop_timer) 5784 + timer_delete_sync(&ab->mon_reap_timer); 5785 5786 /* reap all the monitor related rings */ 5787 ret = ath11k_dp_purge_mon_ring(ab);
+1 -1
drivers/net/wireless/ath/ath12k/dp.c
··· 985 if (!ab->mon_reap_timer.function) 986 return; 987 988 - del_timer_sync(&ab->mon_reap_timer); 989 990 for (i = 0; i < ab->num_radios; i++) 991 ath12k_dp_rx_pdev_free(ab, i);
··· 985 if (!ab->mon_reap_timer.function) 986 return; 987 988 + timer_delete_sync(&ab->mon_reap_timer); 989 990 for (i = 0; i < ab->num_radios; i++) 991 ath12k_dp_rx_pdev_free(ab, i);
+2 -2
drivers/net/wireless/ath/ath12k/dp_rx.c
··· 895 ath12k_dp_rx_frags_cleanup(rx_tid, true); 896 897 spin_unlock_bh(&ar->ab->base_lock); 898 - del_timer_sync(&rx_tid->frag_timer); 899 spin_lock_bh(&ar->ab->base_lock); 900 } 901 } ··· 3451 } 3452 3453 spin_unlock_bh(&ab->base_lock); 3454 - del_timer_sync(&rx_tid->frag_timer); 3455 spin_lock_bh(&ab->base_lock); 3456 3457 peer = ath12k_peer_find_by_id(ab, peer_id);
··· 895 ath12k_dp_rx_frags_cleanup(rx_tid, true); 896 897 spin_unlock_bh(&ar->ab->base_lock); 898 + timer_delete_sync(&rx_tid->frag_timer); 899 spin_lock_bh(&ar->ab->base_lock); 900 } 901 } ··· 3451 } 3452 3453 spin_unlock_bh(&ab->base_lock); 3454 + timer_delete_sync(&rx_tid->frag_timer); 3455 spin_lock_bh(&ab->base_lock); 3456 3457 peer = ath12k_peer_find_by_id(ab, peer_id);
+3 -3
drivers/net/wireless/ath/ath6kl/cfg80211.c
··· 149 if (!test_and_clear_bit(SCHED_SCANNING, &vif->flags)) 150 return false; 151 152 - del_timer_sync(&vif->sched_scan_timer); 153 154 if (ar->state == ATH6KL_STATE_RECOVERY) 155 return true; ··· 1200 if (((vif->auth_mode == WPA_PSK_AUTH) || 1201 (vif->auth_mode == WPA2_PSK_AUTH)) && 1202 (key_usage & GROUP_USAGE)) 1203 - del_timer(&vif->disconnect_timer); 1204 1205 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 1206 "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n", ··· 3612 discon_issued = test_bit(CONNECTED, &vif->flags) || 3613 test_bit(CONNECT_PEND, &vif->flags); 3614 ath6kl_disconnect(vif); 3615 - del_timer(&vif->disconnect_timer); 3616 3617 if (discon_issued) 3618 ath6kl_disconnect_event(vif, DISCONNECT_CMD,
··· 149 if (!test_and_clear_bit(SCHED_SCANNING, &vif->flags)) 150 return false; 151 152 + timer_delete_sync(&vif->sched_scan_timer); 153 154 if (ar->state == ATH6KL_STATE_RECOVERY) 155 return true; ··· 1200 if (((vif->auth_mode == WPA_PSK_AUTH) || 1201 (vif->auth_mode == WPA2_PSK_AUTH)) && 1202 (key_usage & GROUP_USAGE)) 1203 + timer_delete(&vif->disconnect_timer); 1204 1205 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 1206 "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n", ··· 3612 discon_issued = test_bit(CONNECTED, &vif->flags) || 3613 test_bit(CONNECT_PEND, &vif->flags); 3614 ath6kl_disconnect(vif); 3615 + timer_delete(&vif->disconnect_timer); 3616 3617 if (discon_issued) 3618 ath6kl_disconnect_event(vif, DISCONNECT_CMD,
+1 -1
drivers/net/wireless/ath/ath6kl/init.c
··· 1915 clear_bit(WMI_READY, &ar->flag); 1916 1917 if (ar->fw_recovery.enable) 1918 - del_timer_sync(&ar->fw_recovery.hb_timer); 1919 1920 /* 1921 * After wmi_shudown all WMI events will be dropped. We
··· 1915 clear_bit(WMI_READY, &ar->flag); 1916 1917 if (ar->fw_recovery.enable) 1918 + timer_delete_sync(&ar->fw_recovery.hb_timer); 1919 1920 /* 1921 * After wmi_shudown all WMI events will be dropped. We
+1 -1
drivers/net/wireless/ath/ath6kl/main.c
··· 1027 1028 aggr_reset_state(vif->aggr_cntxt->aggr_conn); 1029 1030 - del_timer(&vif->disconnect_timer); 1031 1032 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason); 1033
··· 1027 1028 aggr_reset_state(vif->aggr_cntxt->aggr_conn); 1029 1030 + timer_delete(&vif->disconnect_timer); 1031 1032 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason); 1033
+2 -2
drivers/net/wireless/ath/ath6kl/recovery.c
··· 25 26 ar->state = ATH6KL_STATE_RECOVERY; 27 28 - del_timer_sync(&ar->fw_recovery.hb_timer); 29 30 ath6kl_init_hw_restart(ar); 31 ··· 119 120 set_bit(RECOVERY_CLEANUP, &ar->flag); 121 122 - del_timer_sync(&ar->fw_recovery.hb_timer); 123 cancel_work_sync(&ar->fw_recovery.recovery_work); 124 } 125
··· 25 26 ar->state = ATH6KL_STATE_RECOVERY; 27 28 + timer_delete_sync(&ar->fw_recovery.hb_timer); 29 30 ath6kl_init_hw_restart(ar); 31 ··· 119 120 set_bit(RECOVERY_CLEANUP, &ar->flag); 121 122 + timer_delete_sync(&ar->fw_recovery.hb_timer); 123 cancel_work_sync(&ar->fw_recovery.recovery_work); 124 } 125
+1 -1
drivers/net/wireless/ath/ath6kl/txrx.c
··· 1827 return; 1828 1829 if (aggr_conn->timer_scheduled) { 1830 - del_timer(&aggr_conn->timer); 1831 aggr_conn->timer_scheduled = false; 1832 } 1833
··· 1827 return; 1828 1829 if (aggr_conn->timer_scheduled) { 1830 + timer_delete(&aggr_conn->timer); 1831 aggr_conn->timer_scheduled = false; 1832 } 1833
+1 -1
drivers/net/wireless/ath/ath9k/channel.c
··· 1556 struct ath_node *an; 1557 u32 tsf; 1558 1559 - del_timer_sync(&sc->sched.timer); 1560 ath9k_hw_gen_timer_stop(sc->sc_ah, sc->p2p_ps_timer); 1561 ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER); 1562
··· 1556 struct ath_node *an; 1557 u32 tsf; 1558 1559 + timer_delete_sync(&sc->sched.timer); 1560 ath9k_hw_gen_timer_stop(sc->sc_ah, sc->p2p_ps_timer); 1561 ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER); 1562
+4 -4
drivers/net/wireless/ath/ath9k/gpio.c
··· 305 ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n"); 306 307 /* make sure duty cycle timer is also stopped when resuming */ 308 - del_timer_sync(&btcoex->no_stomp_timer); 309 310 btcoex->bt_priority_cnt = 0; 311 btcoex->bt_priority_time = jiffies; ··· 329 330 ath_dbg(ath9k_hw_common(ah), BTCOEX, "Stopping btcoex timers\n"); 331 332 - del_timer_sync(&btcoex->period_timer); 333 - del_timer_sync(&btcoex->no_stomp_timer); 334 } 335 336 void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc) 337 { 338 struct ath_btcoex *btcoex = &sc->btcoex; 339 340 - del_timer_sync(&btcoex->no_stomp_timer); 341 } 342 343 u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
··· 305 ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n"); 306 307 /* make sure duty cycle timer is also stopped when resuming */ 308 + timer_delete_sync(&btcoex->no_stomp_timer); 309 310 btcoex->bt_priority_cnt = 0; 311 btcoex->bt_priority_time = jiffies; ··· 329 330 ath_dbg(ath9k_hw_common(ah), BTCOEX, "Stopping btcoex timers\n"); 331 332 + timer_delete_sync(&btcoex->period_timer); 333 + timer_delete_sync(&btcoex->no_stomp_timer); 334 } 335 336 void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc) 337 { 338 struct ath_btcoex *btcoex = &sc->btcoex; 339 340 + timer_delete_sync(&btcoex->no_stomp_timer); 341 } 342 343 u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
+3 -3
drivers/net/wireless/ath/ath9k/htc_drv_main.c
··· 198 ath9k_htc_stop_ani(priv); 199 ieee80211_stop_queues(priv->hw); 200 201 - del_timer_sync(&priv->tx.cleanup_timer); 202 ath9k_htc_tx_drain(priv); 203 204 WMI_CMD(WMI_DISABLE_INTR_CMDID); ··· 260 ath9k_htc_ps_wakeup(priv); 261 262 ath9k_htc_stop_ani(priv); 263 - del_timer_sync(&priv->tx.cleanup_timer); 264 ath9k_htc_tx_drain(priv); 265 266 WMI_CMD(WMI_DISABLE_INTR_CMDID); ··· 997 998 tasklet_kill(&priv->rx_tasklet); 999 1000 - del_timer_sync(&priv->tx.cleanup_timer); 1001 ath9k_htc_tx_drain(priv); 1002 ath9k_wmi_event_drain(priv); 1003
··· 198 ath9k_htc_stop_ani(priv); 199 ieee80211_stop_queues(priv->hw); 200 201 + timer_delete_sync(&priv->tx.cleanup_timer); 202 ath9k_htc_tx_drain(priv); 203 204 WMI_CMD(WMI_DISABLE_INTR_CMDID); ··· 260 ath9k_htc_ps_wakeup(priv); 261 262 ath9k_htc_stop_ani(priv); 263 + timer_delete_sync(&priv->tx.cleanup_timer); 264 ath9k_htc_tx_drain(priv); 265 266 WMI_CMD(WMI_DISABLE_INTR_CMDID); ··· 997 998 tasklet_kill(&priv->rx_tasklet); 999 1000 + timer_delete_sync(&priv->tx.cleanup_timer); 1001 ath9k_htc_tx_drain(priv); 1002 ath9k_wmi_event_drain(priv); 1003
+1 -1
drivers/net/wireless/ath/ath9k/init.c
··· 1099 if (ATH_TXQ_SETUP(sc, i)) 1100 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 1101 1102 - del_timer_sync(&sc->sleep_timer); 1103 ath9k_hw_deinit(sc->sc_ah); 1104 if (sc->dfs_detector != NULL) 1105 sc->dfs_detector->exit(sc->dfs_detector);
··· 1099 if (ATH_TXQ_SETUP(sc, i)) 1100 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 1101 1102 + timer_delete_sync(&sc->sleep_timer); 1103 ath9k_hw_deinit(sc->sc_ah); 1104 if (sc->dfs_detector != NULL) 1105 sc->dfs_detector->exit(sc->dfs_detector);
+1 -1
drivers/net/wireless/ath/ath9k/link.c
··· 472 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 473 474 ath_dbg(common, ANI, "Stopping ANI\n"); 475 - del_timer_sync(&common->ani.timer); 476 } 477 478 void ath_check_ani(struct ath_softc *sc)
··· 472 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 473 474 ath_dbg(common, ANI, "Stopping ANI\n"); 475 + timer_delete_sync(&common->ani.timer); 476 } 477 478 void ath_check_ani(struct ath_softc *sc)
+5 -5
drivers/net/wireless/ath/ath9k/main.c
··· 123 if (++sc->ps_usecount != 1) 124 goto unlock; 125 126 - del_timer_sync(&sc->sleep_timer); 127 power_mode = sc->sc_ah->power_mode; 128 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 129 ··· 2418 ath_dbg(common, CHAN_CTX, 2419 "%s: Aborting RoC\n", __func__); 2420 2421 - del_timer_sync(&sc->offchannel.timer); 2422 if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START) 2423 ath_roc_complete(sc, ATH_ROC_COMPLETE_ABORT); 2424 } ··· 2427 ath_dbg(common, CHAN_CTX, 2428 "%s: Aborting HW scan\n", __func__); 2429 2430 - del_timer_sync(&sc->offchannel.timer); 2431 ath_scan_complete(sc, true); 2432 } 2433 } ··· 2476 ath_dbg(common, CHAN_CTX, "Cancel HW scan on vif: %pM\n", vif->addr); 2477 2478 mutex_lock(&sc->mutex); 2479 - del_timer_sync(&sc->offchannel.timer); 2480 ath_scan_complete(sc, true); 2481 mutex_unlock(&sc->mutex); 2482 } ··· 2526 mutex_lock(&sc->mutex); 2527 2528 ath_dbg(common, CHAN_CTX, "Cancel RoC\n"); 2529 - del_timer_sync(&sc->offchannel.timer); 2530 2531 if (sc->offchannel.roc_vif) { 2532 if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
··· 123 if (++sc->ps_usecount != 1) 124 goto unlock; 125 126 + timer_delete_sync(&sc->sleep_timer); 127 power_mode = sc->sc_ah->power_mode; 128 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 129 ··· 2418 ath_dbg(common, CHAN_CTX, 2419 "%s: Aborting RoC\n", __func__); 2420 2421 + timer_delete_sync(&sc->offchannel.timer); 2422 if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START) 2423 ath_roc_complete(sc, ATH_ROC_COMPLETE_ABORT); 2424 } ··· 2427 ath_dbg(common, CHAN_CTX, 2428 "%s: Aborting HW scan\n", __func__); 2429 2430 + timer_delete_sync(&sc->offchannel.timer); 2431 ath_scan_complete(sc, true); 2432 } 2433 } ··· 2476 ath_dbg(common, CHAN_CTX, "Cancel HW scan on vif: %pM\n", vif->addr); 2477 2478 mutex_lock(&sc->mutex); 2479 + timer_delete_sync(&sc->offchannel.timer); 2480 ath_scan_complete(sc, true); 2481 mutex_unlock(&sc->mutex); 2482 } ··· 2526 mutex_lock(&sc->mutex); 2527 2528 ath_dbg(common, CHAN_CTX, "Cancel RoC\n"); 2529 + timer_delete_sync(&sc->offchannel.timer); 2530 2531 if (sc->offchannel.roc_vif) { 2532 if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
+1 -1
drivers/net/wireless/ath/ath9k/pci.c
··· 1029 */ 1030 ath9k_stop_btcoex(sc); 1031 ath9k_hw_disable(sc->sc_ah); 1032 - del_timer_sync(&sc->sleep_timer); 1033 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); 1034 1035 return 0;
··· 1029 */ 1030 ath9k_stop_btcoex(sc); 1031 ath9k_hw_disable(sc->sc_ah); 1032 + timer_delete_sync(&sc->sleep_timer); 1033 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); 1034 1035 return 0;
+2 -2
drivers/net/wireless/ath/wcn36xx/dxe.c
··· 350 spin_lock_irqsave(&wcn->dxe_lock, flags); 351 skb = wcn->tx_ack_skb; 352 wcn->tx_ack_skb = NULL; 353 - del_timer(&wcn->tx_ack_timer); 354 spin_unlock_irqrestore(&wcn->dxe_lock, flags); 355 356 if (!skb) { ··· 1055 1056 free_irq(wcn->tx_irq, wcn); 1057 free_irq(wcn->rx_irq, wcn); 1058 - del_timer(&wcn->tx_ack_timer); 1059 1060 if (wcn->tx_ack_skb) { 1061 ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
··· 350 spin_lock_irqsave(&wcn->dxe_lock, flags); 351 skb = wcn->tx_ack_skb; 352 wcn->tx_ack_skb = NULL; 353 + timer_delete(&wcn->tx_ack_timer); 354 spin_unlock_irqrestore(&wcn->dxe_lock, flags); 355 356 if (!skb) { ··· 1055 1056 free_irq(wcn->tx_irq, wcn); 1057 free_irq(wcn->rx_irq, wcn); 1058 + timer_delete(&wcn->tx_ack_timer); 1059 1060 if (wcn->tx_ack_skb) { 1061 ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
+1 -1
drivers/net/wireless/ath/wil6210/cfg80211.c
··· 1017 1018 out_restore: 1019 if (rc) { 1020 - del_timer_sync(&vif->scan_timer); 1021 if (vif->mid == 0) 1022 wil->radio_wdev = wil->main_ndev->ieee80211_ptr; 1023 vif->scan_request = NULL;
··· 1017 1018 out_restore: 1019 if (rc) { 1020 + timer_delete_sync(&vif->scan_timer); 1021 if (vif->mid == 0) 1022 wil->radio_wdev = wil->main_ndev->ieee80211_ptr; 1023 vif->scan_request = NULL;
+3 -3
drivers/net/wireless/ath/wil6210/main.c
··· 798 799 wil_dbg_misc(wil, "disconnecting\n"); 800 801 - del_timer_sync(&vif->connect_timer); 802 _wil6210_disconnect(vif, bssid, reason_code); 803 } 804 ··· 818 819 wil_dbg_misc(wil, "got disconnect\n"); 820 821 - del_timer_sync(&vif->connect_timer); 822 _wil6210_disconnect_complete(vif, bssid, reason_code); 823 } 824 ··· 1465 return; 1466 1467 wil_dbg_misc(wil, "Abort scan_request 0x%p\n", vif->scan_request); 1468 - del_timer_sync(&vif->scan_timer); 1469 mutex_unlock(&wil->vif_mutex); 1470 rc = wmi_abort_scan(vif); 1471 if (!rc && sync)
··· 798 799 wil_dbg_misc(wil, "disconnecting\n"); 800 801 + timer_delete_sync(&vif->connect_timer); 802 _wil6210_disconnect(vif, bssid, reason_code); 803 } 804 ··· 818 819 wil_dbg_misc(wil, "got disconnect\n"); 820 821 + timer_delete_sync(&vif->connect_timer); 822 _wil6210_disconnect_complete(vif, bssid, reason_code); 823 } 824 ··· 1465 return; 1466 1467 wil_dbg_misc(wil, "Abort scan_request 0x%p\n", vif->scan_request); 1468 + timer_delete_sync(&vif->scan_timer); 1469 mutex_unlock(&wil->vif_mutex); 1470 rc = wmi_abort_scan(vif); 1471 if (!rc && sync)
+3 -3
drivers/net/wireless/ath/wil6210/netdev.c
··· 200 201 static void wil_vif_deinit(struct wil6210_vif *vif) 202 { 203 - del_timer_sync(&vif->scan_timer); 204 - del_timer_sync(&vif->p2p.discovery_timer); 205 cancel_work_sync(&vif->disconnect_worker); 206 cancel_work_sync(&vif->p2p.discovery_expired_work); 207 cancel_work_sync(&vif->p2p.delayed_listen_work); ··· 533 mutex_unlock(&wil->vif_mutex); 534 535 flush_work(&wil->wmi_event_worker); 536 - del_timer_sync(&vif->connect_timer); 537 cancel_work_sync(&vif->disconnect_worker); 538 wil_probe_client_flush(vif); 539 cancel_work_sync(&vif->probe_client_worker);
··· 200 201 static void wil_vif_deinit(struct wil6210_vif *vif) 202 { 203 + timer_delete_sync(&vif->scan_timer); 204 + timer_delete_sync(&vif->p2p.discovery_timer); 205 cancel_work_sync(&vif->disconnect_worker); 206 cancel_work_sync(&vif->p2p.discovery_expired_work); 207 cancel_work_sync(&vif->p2p.delayed_listen_work); ··· 533 mutex_unlock(&wil->vif_mutex); 534 535 flush_work(&wil->wmi_event_worker); 536 + timer_delete_sync(&vif->connect_timer); 537 cancel_work_sync(&vif->disconnect_worker); 538 wil_probe_client_flush(vif); 539 cancel_work_sync(&vif->probe_client_worker);
+1 -1
drivers/net/wireless/ath/wil6210/p2p.c
··· 184 /* discovery not really started, only pending */ 185 p2p->pending_listen_wdev = NULL; 186 } else { 187 - del_timer_sync(&p2p->discovery_timer); 188 wmi_stop_discovery(vif); 189 } 190 p2p->discovery_started = 0;
··· 184 /* discovery not really started, only pending */ 185 p2p->pending_listen_wdev = NULL; 186 } else { 187 + timer_delete_sync(&p2p->discovery_timer); 188 wmi_stop_discovery(vif); 189 } 190 p2p->discovery_started = 0;
+3 -3
drivers/net/wireless/ath/wil6210/wmi.c
··· 933 wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", status); 934 wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n", 935 vif->scan_request, info.aborted); 936 - del_timer_sync(&vif->scan_timer); 937 cfg80211_scan_done(vif->scan_request, &info); 938 if (vif->mid == 0) 939 wil->radio_wdev = wil->main_ndev->ieee80211_ptr; ··· 1023 mutex_unlock(&wil->mutex); 1024 return; 1025 } 1026 - del_timer_sync(&vif->connect_timer); 1027 } else if ((wdev->iftype == NL80211_IFTYPE_AP) || 1028 (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { 1029 if (wil->sta[evt->cid].status != wil_sta_unused) { ··· 1814 wil->sta[cid].stats.ft_roams++; 1815 ether_addr_copy(wil->sta[cid].addr, vif->bss->bssid); 1816 mutex_unlock(&wil->mutex); 1817 - del_timer_sync(&vif->connect_timer); 1818 1819 cfg80211_ref_bss(wiphy, vif->bss); 1820 freq = ieee80211_channel_to_frequency(ch, NL80211_BAND_60GHZ);
··· 933 wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", status); 934 wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n", 935 vif->scan_request, info.aborted); 936 + timer_delete_sync(&vif->scan_timer); 937 cfg80211_scan_done(vif->scan_request, &info); 938 if (vif->mid == 0) 939 wil->radio_wdev = wil->main_ndev->ieee80211_ptr; ··· 1023 mutex_unlock(&wil->mutex); 1024 return; 1025 } 1026 + timer_delete_sync(&vif->connect_timer); 1027 } else if ((wdev->iftype == NL80211_IFTYPE_AP) || 1028 (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { 1029 if (wil->sta[evt->cid].status != wil_sta_unused) { ··· 1814 wil->sta[cid].stats.ft_roams++; 1815 ether_addr_copy(wil->sta[cid].addr, vif->bss->bssid); 1816 mutex_unlock(&wil->mutex); 1817 + timer_delete_sync(&vif->connect_timer); 1818 1819 cfg80211_ref_bss(wiphy, vif->bss); 1820 freq = ieee80211_channel_to_frequency(ch, NL80211_BAND_60GHZ);
+1 -1
drivers/net/wireless/atmel/at76c50x-usb.c
··· 2417 2418 kfree(priv->bulk_out_buffer); 2419 2420 - del_timer_sync(&ledtrig_tx_timer); 2421 2422 kfree_skb(priv->rx_skb); 2423
··· 2417 2418 kfree(priv->bulk_out_buffer); 2419 2420 + timer_delete_sync(&ledtrig_tx_timer); 2421 2422 kfree_skb(priv->rx_skb); 2423
+2 -2
drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
··· 289 btci = container_of(work, struct brcmf_btcoex_info, work); 290 if (btci->timer_on) { 291 btci->timer_on = false; 292 - del_timer_sync(&btci->timer); 293 } 294 295 switch (btci->bt_state) { ··· 428 if (btci->timer_on) { 429 brcmf_dbg(INFO, "disable BT DHCP Timer\n"); 430 btci->timer_on = false; 431 - del_timer_sync(&btci->timer); 432 433 /* schedule worker if transition to IDLE is needed */ 434 if (btci->bt_state != BRCMF_BT_DHCP_IDLE) {
··· 289 btci = container_of(work, struct brcmf_btcoex_info, work); 290 if (btci->timer_on) { 291 btci->timer_on = false; 292 + timer_delete_sync(&btci->timer); 293 } 294 295 switch (btci->bt_state) { ··· 428 if (btci->timer_on) { 429 brcmf_dbg(INFO, "disable BT DHCP Timer\n"); 430 btci->timer_on = false; 431 + timer_delete_sync(&btci->timer); 432 433 /* schedule worker if transition to IDLE is needed */ 434 if (btci->bt_state != BRCMF_BT_DHCP_IDLE) {
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
··· 2304 { 2305 if (!active) { 2306 if (devinfo->console_active) { 2307 - del_timer_sync(&devinfo->timer); 2308 devinfo->console_active = false; 2309 } 2310 return;
··· 2304 { 2305 if (!active) { 2306 if (devinfo->console_active) { 2307 + timer_delete_sync(&devinfo->timer); 2308 devinfo->console_active = false; 2309 } 2310 return;
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
··· 4611 { 4612 /* Totally stop the timer */ 4613 if (!active && bus->wd_active) { 4614 - del_timer_sync(&bus->timer); 4615 bus->wd_active = false; 4616 return; 4617 }
··· 4611 { 4612 /* Totally stop the timer */ 4613 if (!active && bus->wd_active) { 4614 + timer_delete_sync(&bus->timer); 4615 bus->wd_active = false; 4616 return; 4617 }
+1 -1
drivers/net/wireless/intel/ipw2x00/libipw_crypto.c
··· 59 int i; 60 61 libipw_crypt_quiescing(info); 62 - del_timer_sync(&info->crypt_deinit_timer); 63 libipw_crypt_deinit_entries(info, 1); 64 65 for (i = 0; i < NUM_WEP_KEYS; i++) {
··· 59 int i; 60 61 libipw_crypt_quiescing(info); 62 + timer_delete_sync(&info->crypt_deinit_timer); 63 libipw_crypt_deinit_entries(info, 1); 64 65 for (i = 0; i < NUM_WEP_KEYS; i++) {
+1 -1
drivers/net/wireless/intel/iwlegacy/3945-mac.c
··· 2188 2189 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set 2190 * to prevent rearm timer */ 2191 - del_timer_sync(&il->watchdog); 2192 2193 /* Station information will now be cleared in device */ 2194 il_clear_ucode_stations(il);
··· 2188 2189 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set 2190 * to prevent rearm timer */ 2191 + timer_delete_sync(&il->watchdog); 2192 2193 /* Station information will now be cleared in device */ 2194 il_clear_ucode_stations(il);
+1 -1
drivers/net/wireless/intel/iwlegacy/3945-rs.c
··· 413 * to use il_priv to print out debugging) since it may not be fully 414 * initialized at this point. 415 */ 416 - del_timer_sync(&rs_sta->rate_scale_flush); 417 } 418 419 /*
··· 413 * to use il_priv to print out debugging) since it may not be fully 414 * initialized at this point. 415 */ 416 + timer_delete_sync(&rs_sta->rate_scale_flush); 417 } 418 419 /*
+2 -2
drivers/net/wireless/intel/iwlegacy/4965-mac.c
··· 5350 5351 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set 5352 * to prevent rearm timer */ 5353 - del_timer_sync(&il->watchdog); 5354 5355 il_clear_ucode_stations(il); 5356 ··· 6243 6244 il_cancel_scan_deferred_work(il); 6245 6246 - del_timer_sync(&il->stats_periodic); 6247 } 6248 6249 static void
··· 5350 5351 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set 5352 * to prevent rearm timer */ 5353 + timer_delete_sync(&il->watchdog); 5354 5355 il_clear_ucode_stations(il); 5356 ··· 6243 6244 il_cancel_scan_deferred_work(il); 6245 6246 + timer_delete_sync(&il->stats_periodic); 6247 } 6248 6249 static void
+1 -1
drivers/net/wireless/intel/iwlegacy/common.c
··· 4842 mod_timer(&il->watchdog, 4843 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); 4844 else 4845 - del_timer(&il->watchdog); 4846 } 4847 EXPORT_SYMBOL(il_setup_watchdog); 4848
··· 4842 mod_timer(&il->watchdog, 4843 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); 4844 else 4845 + timer_delete(&il->watchdog); 4846 } 4847 EXPORT_SYMBOL(il_setup_watchdog); 4848
+1 -1
drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
··· 1870 } 1871 } else { 1872 priv->event_log.ucode_trace = false; 1873 - del_timer_sync(&priv->ucode_trace); 1874 } 1875 1876 return count;
··· 1870 } 1871 } else { 1872 priv->event_log.ucode_trace = false; 1873 + timer_delete_sync(&priv->ucode_trace); 1874 } 1875 1876 return count;
+2 -2
drivers/net/wireless/intel/iwlwifi/dvm/main.c
··· 1082 cancel_work_sync(&priv->bt_full_concurrency); 1083 cancel_work_sync(&priv->bt_runtime_config); 1084 1085 - del_timer_sync(&priv->statistics_periodic); 1086 - del_timer_sync(&priv->ucode_trace); 1087 } 1088 1089 static int iwl_init_drv(struct iwl_priv *priv)
··· 1082 cancel_work_sync(&priv->bt_full_concurrency); 1083 cancel_work_sync(&priv->bt_runtime_config); 1084 1085 + timer_delete_sync(&priv->statistics_periodic); 1086 + timer_delete_sync(&priv->ucode_trace); 1087 } 1088 1089 static int iwl_init_drv(struct iwl_priv *priv)
+5 -5
drivers/net/wireless/intel/iwlwifi/dvm/tt.c
··· 257 tt->tt_previous_temp = temp; 258 #endif 259 /* stop ct_kill_waiting_tm timer */ 260 - del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm); 261 if (tt->state != old_state) { 262 switch (tt->state) { 263 case IWL_TI_0: ··· 378 } 379 } 380 /* stop ct_kill_waiting_tm timer */ 381 - del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm); 382 if (changed) { 383 if (tt->state >= IWL_TI_1) { 384 /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */ ··· 506 return; 507 508 /* stop ct_kill_exit_tm timer */ 509 - del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm); 510 511 if (tt->state == IWL_TI_CT_KILL) { 512 IWL_ERR(priv, ··· 640 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 641 642 /* stop ct_kill_exit_tm timer if activated */ 643 - del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm); 644 /* stop ct_kill_waiting_tm timer if activated */ 645 - del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm); 646 cancel_work_sync(&priv->tt_work); 647 cancel_work_sync(&priv->ct_enter); 648 cancel_work_sync(&priv->ct_exit);
··· 257 tt->tt_previous_temp = temp; 258 #endif 259 /* stop ct_kill_waiting_tm timer */ 260 + timer_delete_sync(&priv->thermal_throttle.ct_kill_waiting_tm); 261 if (tt->state != old_state) { 262 switch (tt->state) { 263 case IWL_TI_0: ··· 378 } 379 } 380 /* stop ct_kill_waiting_tm timer */ 381 + timer_delete_sync(&priv->thermal_throttle.ct_kill_waiting_tm); 382 if (changed) { 383 if (tt->state >= IWL_TI_1) { 384 /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */ ··· 506 return; 507 508 /* stop ct_kill_exit_tm timer */ 509 + timer_delete_sync(&priv->thermal_throttle.ct_kill_exit_tm); 510 511 if (tt->state == IWL_TI_CT_KILL) { 512 IWL_ERR(priv, ··· 640 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 641 642 /* stop ct_kill_exit_tm timer if activated */ 643 + timer_delete_sync(&priv->thermal_throttle.ct_kill_exit_tm); 644 /* stop ct_kill_waiting_tm timer if activated */ 645 + timer_delete_sync(&priv->thermal_throttle.ct_kill_waiting_tm); 646 cancel_work_sync(&priv->tt_work); 647 cancel_work_sync(&priv->ct_enter); 648 cancel_work_sync(&priv->ct_exit);
+1 -1
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
··· 1697 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 1698 if (!trans_pcie->txqs.txq[i]) 1699 continue; 1700 - del_timer(&trans_pcie->txqs.txq[i]->stuck_timer); 1701 } 1702 1703 /* The STATUS_FW_ERROR bit is set in this function. This must happen
··· 1697 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 1698 if (!trans_pcie->txqs.txq[i]) 1699 continue; 1700 + timer_delete(&trans_pcie->txqs.txq[i]->stuck_timer); 1701 } 1702 1703 /* The STATUS_FW_ERROR bit is set in this function. This must happen
+1 -1
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
··· 911 kfree_sensitive(txq->entries[i].cmd); 912 kfree_sensitive(txq->entries[i].free_buf); 913 } 914 - del_timer_sync(&txq->stuck_timer); 915 916 iwl_txq_gen2_free_memory(trans, txq); 917
··· 911 kfree_sensitive(txq->entries[i].cmd); 912 kfree_sensitive(txq->entries[i].free_buf); 913 } 914 + timer_delete_sync(&txq->stuck_timer); 915 916 iwl_txq_gen2_free_memory(trans, txq); 917
+3 -3
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
··· 469 kfree(txq->entries); 470 txq->entries = NULL; 471 472 - del_timer_sync(&txq->stuck_timer); 473 474 /* 0-fill queue descriptor structure */ 475 memset(txq, 0, sizeof(*txq)); ··· 1054 * since we're making progress on this queue 1055 */ 1056 if (txq->read_ptr == txq->write_ptr) 1057 - del_timer(&txq->stuck_timer); 1058 else 1059 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1060 } ··· 2529 /* remember how long until the timer fires */ 2530 txq->frozen_expiry_remainder = 2531 txq->stuck_timer.expires - now; 2532 - del_timer(&txq->stuck_timer); 2533 goto next_queue; 2534 } 2535
··· 469 kfree(txq->entries); 470 txq->entries = NULL; 471 472 + timer_delete_sync(&txq->stuck_timer); 473 474 /* 0-fill queue descriptor structure */ 475 memset(txq, 0, sizeof(*txq)); ··· 1054 * since we're making progress on this queue 1055 */ 1056 if (txq->read_ptr == txq->write_ptr) 1057 + timer_delete(&txq->stuck_timer); 1058 else 1059 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1060 } ··· 2529 /* remember how long until the timer fires */ 2530 txq->frozen_expiry_remainder = 2531 txq->stuck_timer.expires - now; 2532 + timer_delete(&txq->stuck_timer); 2533 goto next_queue; 2534 } 2535
+1 -1
drivers/net/wireless/marvell/libertas/cmdresp.c
··· 119 } 120 121 /* Now we got response from FW, cancel the command timer */ 122 - del_timer(&priv->command_timer); 123 priv->cmd_timed_out = 0; 124 125 if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) {
··· 119 } 120 121 /* Now we got response from FW, cancel the command timer */ 122 + timer_delete(&priv->command_timer); 123 priv->cmd_timed_out = 0; 124 125 if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) {
+1 -1
drivers/net/wireless/marvell/libertas/if_usb.c
··· 897 /* ... and wait for the process to complete */ 898 wait_event_interruptible(cardp->fw_wq, cardp->surprise_removed || cardp->fwdnldover); 899 900 - del_timer_sync(&cardp->fw_timeout); 901 usb_kill_urb(cardp->rx_urb); 902 903 if (!cardp->fwdnldover) {
··· 897 /* ... and wait for the process to complete */ 898 wait_event_interruptible(cardp->fw_wq, cardp->surprise_removed || cardp->fwdnldover); 899 900 + timer_delete_sync(&cardp->fw_timeout); 901 usb_kill_urb(cardp->rx_urb); 902 903 if (!cardp->fwdnldover) {
+6 -6
drivers/net/wireless/marvell/libertas/main.c
··· 202 spin_unlock_irqrestore(&priv->driver_lock, flags); 203 204 cancel_work_sync(&priv->mcast_work); 205 - del_timer_sync(&priv->tx_lockup_timer); 206 207 /* Disable command processing, and wait for all commands to complete */ 208 lbs_deb_main("waiting for commands to complete\n"); ··· 250 unsigned long flags; 251 252 spin_lock_irqsave(&priv->driver_lock, flags); 253 - del_timer(&priv->tx_lockup_timer); 254 255 priv->dnld_sent = DNLD_RES_RECEIVED; 256 ··· 594 spin_unlock_irq(&priv->driver_lock); 595 } 596 597 - del_timer(&priv->command_timer); 598 - del_timer(&priv->tx_lockup_timer); 599 600 return 0; 601 } ··· 798 { 799 lbs_free_cmd_buffer(priv); 800 kfifo_free(&priv->event_fifo); 801 - del_timer(&priv->command_timer); 802 - del_timer(&priv->tx_lockup_timer); 803 } 804 805 static const struct net_device_ops lbs_netdev_ops = {
··· 202 spin_unlock_irqrestore(&priv->driver_lock, flags); 203 204 cancel_work_sync(&priv->mcast_work); 205 + timer_delete_sync(&priv->tx_lockup_timer); 206 207 /* Disable command processing, and wait for all commands to complete */ 208 lbs_deb_main("waiting for commands to complete\n"); ··· 250 unsigned long flags; 251 252 spin_lock_irqsave(&priv->driver_lock, flags); 253 + timer_delete(&priv->tx_lockup_timer); 254 255 priv->dnld_sent = DNLD_RES_RECEIVED; 256 ··· 594 spin_unlock_irq(&priv->driver_lock); 595 } 596 597 + timer_delete(&priv->command_timer); 598 + timer_delete(&priv->tx_lockup_timer); 599 600 return 0; 601 } ··· 798 { 799 lbs_free_cmd_buffer(priv); 800 kfifo_free(&priv->event_fifo); 801 + timer_delete(&priv->command_timer); 802 + timer_delete(&priv->tx_lockup_timer); 803 } 804 805 static const struct net_device_ops lbs_netdev_ops = {
+1 -1
drivers/net/wireless/marvell/libertas_tf/cmd.c
··· 757 } 758 759 /* Now we got response from FW, cancel the command timer */ 760 - del_timer(&priv->command_timer); 761 priv->cmd_timed_out = 0; 762 if (priv->nr_retries) 763 priv->nr_retries = 0;
··· 757 } 758 759 /* Now we got response from FW, cancel the command timer */ 760 + timer_delete(&priv->command_timer); 761 priv->cmd_timed_out = 0; 762 if (priv->nr_retries) 763 priv->nr_retries = 0;
+1 -1
drivers/net/wireless/marvell/libertas_tf/if_usb.c
··· 875 wait_event_interruptible(cardp->fw_wq, cardp->priv->surpriseremoved || 876 cardp->fwdnldover); 877 878 - del_timer_sync(&cardp->fw_timeout); 879 usb_kill_urb(cardp->rx_urb); 880 881 if (!cardp->fwdnldover) {
··· 875 wait_event_interruptible(cardp->fw_wq, cardp->priv->surpriseremoved || 876 cardp->fwdnldover); 877 878 + timer_delete_sync(&cardp->fw_timeout); 879 usb_kill_urb(cardp->rx_urb); 880 881 if (!cardp->fwdnldover) {
+2 -2
drivers/net/wireless/marvell/libertas_tf/main.c
··· 174 { 175 lbtf_deb_enter(LBTF_DEB_MAIN); 176 lbtf_free_cmd_buffer(priv); 177 - del_timer(&priv->command_timer); 178 lbtf_deb_leave(LBTF_DEB_MAIN); 179 } 180 ··· 642 lbtf_deb_enter(LBTF_DEB_MAIN); 643 644 priv->surpriseremoved = 1; 645 - del_timer(&priv->command_timer); 646 lbtf_free_adapter(priv); 647 priv->hw = NULL; 648 ieee80211_unregister_hw(hw);
··· 174 { 175 lbtf_deb_enter(LBTF_DEB_MAIN); 176 lbtf_free_cmd_buffer(priv); 177 + timer_delete(&priv->command_timer); 178 lbtf_deb_leave(LBTF_DEB_MAIN); 179 } 180 ··· 642 lbtf_deb_enter(LBTF_DEB_MAIN); 643 644 priv->surpriseremoved = 1; 645 + timer_delete(&priv->command_timer); 646 lbtf_free_adapter(priv); 647 priv->hw = NULL; 648 ieee80211_unregister_hw(hw);
+1 -1
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
··· 206 start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1); 207 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win); 208 209 - del_timer_sync(&tbl->timer_context.timer); 210 tbl->timer_context.timer_is_set = false; 211 212 spin_lock_bh(&priv->rx_reorder_tbl_lock);
··· 206 start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1); 207 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win); 208 209 + timer_delete_sync(&tbl->timer_context.timer); 210 tbl->timer_context.timer_is_set = false; 211 212 spin_lock_bh(&priv->rx_reorder_tbl_lock);
+1 -1
drivers/net/wireless/marvell/mwifiex/cmdevt.c
··· 836 return -1; 837 } 838 /* Now we got response from FW, cancel the command timer */ 839 - del_timer_sync(&adapter->cmd_timer); 840 clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags); 841 842 if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
··· 836 return -1; 837 } 838 /* Now we got response from FW, cancel the command timer */ 839 + timer_delete_sync(&adapter->cmd_timer); 840 clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags); 841 842 if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
+2 -2
drivers/net/wireless/marvell/mwifiex/init.c
··· 390 static void 391 mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) 392 { 393 - del_timer(&adapter->wakeup_timer); 394 cancel_delayed_work_sync(&adapter->devdump_work); 395 mwifiex_cancel_all_pending_cmd(adapter); 396 wake_up_interruptible(&adapter->cmd_wait_q.wait); ··· 613 if (adapter->curr_cmd) { 614 mwifiex_dbg(adapter, WARN, 615 "curr_cmd is still in processing\n"); 616 - del_timer_sync(&adapter->cmd_timer); 617 mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); 618 adapter->curr_cmd = NULL; 619 }
··· 390 static void 391 mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) 392 { 393 + timer_delete(&adapter->wakeup_timer); 394 cancel_delayed_work_sync(&adapter->devdump_work); 395 mwifiex_cancel_all_pending_cmd(adapter); 396 wake_up_interruptible(&adapter->cmd_wait_q.wait); ··· 613 if (adapter->curr_cmd) { 614 mwifiex_dbg(adapter, WARN, 615 "curr_cmd is still in processing\n"); 616 + timer_delete_sync(&adapter->cmd_timer); 617 mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); 618 adapter->curr_cmd = NULL; 619 }
+1 -1
drivers/net/wireless/marvell/mwifiex/main.c
··· 307 if (IS_CARD_RX_RCVD(adapter)) { 308 adapter->data_received = false; 309 adapter->pm_wakeup_fw_try = false; 310 - del_timer(&adapter->wakeup_timer); 311 if (adapter->ps_state == PS_STATE_SLEEP) 312 adapter->ps_state = PS_STATE_AWAKE; 313 } else {
··· 307 if (IS_CARD_RX_RCVD(adapter)) { 308 adapter->data_received = false; 309 adapter->pm_wakeup_fw_try = false; 310 + timer_delete(&adapter->wakeup_timer); 311 if (adapter->ps_state == PS_STATE_SLEEP) 312 adapter->ps_state = PS_STATE_AWAKE; 313 } else {
+2 -2
drivers/net/wireless/marvell/mwifiex/pcie.c
··· 2437 */ 2438 adapter->ps_state = PS_STATE_AWAKE; 2439 adapter->pm_wakeup_fw_try = false; 2440 - del_timer(&adapter->wakeup_timer); 2441 } 2442 2443 spin_lock_irqsave(&adapter->int_lock, flags); ··· 2527 adapter->ps_state == PS_STATE_SLEEP) { 2528 adapter->ps_state = PS_STATE_AWAKE; 2529 adapter->pm_wakeup_fw_try = false; 2530 - del_timer(&adapter->wakeup_timer); 2531 } 2532 } 2533 }
··· 2437 */ 2438 adapter->ps_state = PS_STATE_AWAKE; 2439 adapter->pm_wakeup_fw_try = false; 2440 + timer_delete(&adapter->wakeup_timer); 2441 } 2442 2443 spin_lock_irqsave(&adapter->int_lock, flags); ··· 2527 adapter->ps_state == PS_STATE_SLEEP) { 2528 adapter->ps_state = PS_STATE_AWAKE; 2529 adapter->pm_wakeup_fw_try = false; 2530 + timer_delete(&adapter->wakeup_timer); 2531 } 2532 } 2533 }
+2 -2
drivers/net/wireless/marvell/mwifiex/sta_event.c
··· 789 adapter->ps_state = PS_STATE_AWAKE; 790 adapter->pm_wakeup_card_req = false; 791 adapter->pm_wakeup_fw_try = false; 792 - del_timer(&adapter->wakeup_timer); 793 break; 794 } 795 if (!mwifiex_send_null_packet ··· 804 adapter->ps_state = PS_STATE_AWAKE; 805 adapter->pm_wakeup_card_req = false; 806 adapter->pm_wakeup_fw_try = false; 807 - del_timer(&adapter->wakeup_timer); 808 809 break; 810
··· 789 adapter->ps_state = PS_STATE_AWAKE; 790 adapter->pm_wakeup_card_req = false; 791 adapter->pm_wakeup_fw_try = false; 792 + timer_delete(&adapter->wakeup_timer); 793 break; 794 } 795 if (!mwifiex_send_null_packet ··· 804 adapter->ps_state = PS_STATE_AWAKE; 805 adapter->pm_wakeup_card_req = false; 806 adapter->pm_wakeup_fw_try = false; 807 + timer_delete(&adapter->wakeup_timer); 808 809 break; 810
+1 -1
drivers/net/wireless/marvell/mwifiex/tdls.c
··· 1490 priv->adapter->auto_tdls && 1491 priv->bss_type == MWIFIEX_BSS_TYPE_STA) { 1492 priv->auto_tdls_timer_active = false; 1493 - del_timer(&priv->auto_tdls_timer); 1494 mwifiex_flush_auto_tdls_list(priv); 1495 } 1496 }
··· 1490 priv->adapter->auto_tdls && 1491 priv->bss_type == MWIFIEX_BSS_TYPE_STA) { 1492 priv->auto_tdls_timer_active = false; 1493 + timer_delete(&priv->auto_tdls_timer); 1494 mwifiex_flush_auto_tdls_list(priv); 1495 } 1496 }
+3 -3
drivers/net/wireless/marvell/mwifiex/usb.c
··· 877 * write complete, delete the tx_aggr timer 878 */ 879 if (port->tx_aggr.timer_cnxt.is_hold_timer_set) { 880 - del_timer(&port->tx_aggr.timer_cnxt.hold_timer); 881 port->tx_aggr.timer_cnxt.is_hold_timer_set = false; 882 port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0; 883 } ··· 1354 mwifiex_write_data_complete(adapter, skb_tmp, 1355 0, -1); 1356 if (port->tx_aggr.timer_cnxt.hold_timer.function) 1357 - del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer); 1358 port->tx_aggr.timer_cnxt.is_hold_timer_set = false; 1359 port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0; 1360 } ··· 1557 { 1558 /* Simulation of HS_AWAKE event */ 1559 adapter->pm_wakeup_fw_try = false; 1560 - del_timer(&adapter->wakeup_timer); 1561 adapter->pm_wakeup_card_req = false; 1562 adapter->ps_state = PS_STATE_AWAKE; 1563
··· 877 * write complete, delete the tx_aggr timer 878 */ 879 if (port->tx_aggr.timer_cnxt.is_hold_timer_set) { 880 + timer_delete(&port->tx_aggr.timer_cnxt.hold_timer); 881 port->tx_aggr.timer_cnxt.is_hold_timer_set = false; 882 port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0; 883 } ··· 1354 mwifiex_write_data_complete(adapter, skb_tmp, 1355 0, -1); 1356 if (port->tx_aggr.timer_cnxt.hold_timer.function) 1357 + timer_delete_sync(&port->tx_aggr.timer_cnxt.hold_timer); 1358 port->tx_aggr.timer_cnxt.is_hold_timer_set = false; 1359 port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0; 1360 } ··· 1557 { 1558 /* Simulation of HS_AWAKE event */ 1559 adapter->pm_wakeup_fw_try = false; 1560 + timer_delete(&adapter->wakeup_timer); 1561 adapter->pm_wakeup_card_req = false; 1562 adapter->ps_state = PS_STATE_AWAKE; 1563
+2 -2
drivers/net/wireless/mediatek/mt76/mt7615/main.c
··· 97 struct mt7615_phy *phy = mt7615_hw_phy(hw); 98 99 cancel_delayed_work_sync(&phy->mt76->mac_work); 100 - del_timer_sync(&phy->roc_timer); 101 cancel_work_sync(&phy->roc_work); 102 103 cancel_delayed_work_sync(&dev->pm.ps_work); ··· 1194 if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) 1195 return 0; 1196 1197 - del_timer_sync(&phy->roc_timer); 1198 cancel_work_sync(&phy->roc_work); 1199 1200 mt7615_mutex_acquire(phy->dev);
··· 97 struct mt7615_phy *phy = mt7615_hw_phy(hw); 98 99 cancel_delayed_work_sync(&phy->mt76->mac_work); 100 + timer_delete_sync(&phy->roc_timer); 101 cancel_work_sync(&phy->roc_work); 102 103 cancel_delayed_work_sync(&dev->pm.ps_work); ··· 1194 if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) 1195 return 0; 1196 1197 + timer_delete_sync(&phy->roc_timer); 1198 cancel_work_sync(&phy->roc_work); 1199 1200 mt7615_mutex_acquire(phy->dev);
+2 -2
drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
··· 220 set_bit(MT76_MCU_RESET, &dev->mphy.state); 221 wake_up(&dev->mt76.mcu.wait); 222 cancel_delayed_work_sync(&dev->mphy.mac_work); 223 - del_timer_sync(&dev->phy.roc_timer); 224 cancel_work_sync(&dev->phy.roc_work); 225 if (phy2) { 226 set_bit(MT76_RESET, &phy2->mt76->state); 227 cancel_delayed_work_sync(&phy2->mt76->mac_work); 228 - del_timer_sync(&phy2->roc_timer); 229 cancel_work_sync(&phy2->roc_work); 230 } 231
··· 220 set_bit(MT76_MCU_RESET, &dev->mphy.state); 221 wake_up(&dev->mt76.mcu.wait); 222 cancel_delayed_work_sync(&dev->mphy.mac_work); 223 + timer_delete_sync(&dev->phy.roc_timer); 224 cancel_work_sync(&dev->phy.roc_work); 225 if (phy2) { 226 set_bit(MT76_RESET, &phy2->mt76->state); 227 cancel_delayed_work_sync(&phy2->mt76->mac_work); 228 + timer_delete_sync(&phy2->roc_timer); 229 cancel_work_sync(&phy2->roc_work); 230 } 231
+1 -1
drivers/net/wireless/mediatek/mt76/mt7615/usb.c
··· 85 struct mt7615_dev *dev = hw->priv; 86 87 clear_bit(MT76_STATE_RUNNING, &dev->mphy.state); 88 - del_timer_sync(&phy->roc_timer); 89 cancel_work_sync(&phy->roc_work); 90 cancel_delayed_work_sync(&phy->scan_work); 91 cancel_delayed_work_sync(&phy->mt76->mac_work);
··· 85 struct mt7615_dev *dev = hw->priv; 86 87 clear_bit(MT76_STATE_RUNNING, &dev->mphy.state); 88 + timer_delete_sync(&phy->roc_timer); 89 cancel_work_sync(&phy->roc_work); 90 cancel_delayed_work_sync(&phy->scan_work); 91 cancel_delayed_work_sync(&phy->mt76->mac_work);
+3 -3
drivers/net/wireless/mediatek/mt76/mt7921/main.c
··· 364 { 365 struct mt792x_phy *phy = &dev->phy; 366 367 - del_timer_sync(&phy->roc_timer); 368 cancel_work_sync(&phy->roc_work); 369 if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) 370 ieee80211_iterate_interfaces(mt76_hw(dev), ··· 395 { 396 int err = 0; 397 398 - del_timer_sync(&phy->roc_timer); 399 cancel_work_sync(&phy->roc_work); 400 401 mt792x_mutex_acquire(phy->dev); ··· 1476 { 1477 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 1478 1479 - del_timer_sync(&mvif->csa_timer); 1480 cancel_work_sync(&mvif->csa_work); 1481 } 1482
··· 364 { 365 struct mt792x_phy *phy = &dev->phy; 366 367 + timer_delete_sync(&phy->roc_timer); 368 cancel_work_sync(&phy->roc_work); 369 if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) 370 ieee80211_iterate_interfaces(mt76_hw(dev), ··· 395 { 396 int err = 0; 397 398 + timer_delete_sync(&phy->roc_timer); 399 cancel_work_sync(&phy->roc_work); 400 401 mt792x_mutex_acquire(phy->dev); ··· 1476 { 1477 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 1478 1479 + timer_delete_sync(&mvif->csa_timer); 1480 cancel_work_sync(&mvif->csa_work); 1481 } 1482
+2 -2
drivers/net/wireless/mediatek/mt76/mt7925/main.c
··· 453 { 454 struct mt792x_phy *phy = &dev->phy; 455 456 - del_timer_sync(&phy->roc_timer); 457 cancel_work_sync(&phy->roc_work); 458 if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) 459 ieee80211_iterate_interfaces(mt76_hw(dev), ··· 485 { 486 int err = 0; 487 488 - del_timer_sync(&phy->roc_timer); 489 cancel_work_sync(&phy->roc_work); 490 491 mt792x_mutex_acquire(phy->dev);
··· 453 { 454 struct mt792x_phy *phy = &dev->phy; 455 456 + timer_delete_sync(&phy->roc_timer); 457 cancel_work_sync(&phy->roc_work); 458 if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) 459 ieee80211_iterate_interfaces(mt76_hw(dev), ··· 485 { 486 int err = 0; 487 488 + timer_delete_sync(&phy->roc_timer); 489 cancel_work_sync(&phy->roc_work); 490 491 mt792x_mutex_acquire(phy->dev);
+1 -1
drivers/net/wireless/mediatek/mt76/mt792x_core.c
··· 340 mutex_unlock(&dev->mt76.mutex); 341 342 if (vif->bss_conf.csa_active) { 343 - del_timer_sync(&mvif->csa_timer); 344 cancel_work_sync(&mvif->csa_work); 345 } 346 }
··· 340 mutex_unlock(&dev->mt76.mutex); 341 342 if (vif->bss_conf.csa_active) { 343 + timer_delete_sync(&mvif->csa_timer); 344 cancel_work_sync(&mvif->csa_work); 345 } 346 }
+9 -9
drivers/net/wireless/microchip/wilc1000/hif.c
··· 643 } 644 } 645 646 - del_timer(&hif_drv->connect_timer); 647 conn_info->conn_result(CONN_DISCONN_EVENT_CONN_RESP, mac_status, 648 hif_drv->conn_info.priv); 649 ··· 669 struct host_if_drv *hif_drv = vif->hif_drv; 670 671 if (hif_drv->usr_scan_req.scan_result) { 672 - del_timer(&hif_drv->scan_timer); 673 handle_scan_done(vif, SCAN_EVENT_ABORTED); 674 } 675 ··· 713 if (hif_drv->hif_state == HOST_IF_CONNECTED) { 714 wilc_handle_disconnect(vif); 715 } else if (hif_drv->usr_scan_req.scan_result) { 716 - del_timer(&hif_drv->scan_timer); 717 handle_scan_done(vif, SCAN_EVENT_ABORTED); 718 } 719 } ··· 746 conn_info = &hif_drv->conn_info; 747 748 if (scan_req->scan_result) { 749 - del_timer(&hif_drv->scan_timer); 750 scan_req->scan_result(SCAN_EVENT_ABORTED, NULL, scan_req->priv); 751 scan_req->scan_result = NULL; 752 } ··· 754 if (conn_info->conn_result) { 755 if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP || 756 hif_drv->hif_state == HOST_IF_EXTERNAL_AUTH) 757 - del_timer(&hif_drv->connect_timer); 758 759 conn_info->conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF, 0, 760 conn_info->priv); ··· 959 int result; 960 struct host_if_msg *msg; 961 962 - del_timer(&vif->hif_drv->remain_on_ch_timer); 963 964 msg = wilc_alloc_work(vif, wilc_handle_listen_state_expired, false); 965 if (IS_ERR(msg)) ··· 1066 { 1067 struct host_if_msg *msg = container_of(work, struct host_if_msg, work); 1068 1069 - del_timer(&msg->vif->hif_drv->scan_timer); 1070 1071 handle_scan_done(msg->vif, SCAN_EVENT_DONE); 1072 ··· 1551 1552 timer_shutdown_sync(&hif_drv->scan_timer); 1553 timer_shutdown_sync(&hif_drv->connect_timer); 1554 - del_timer_sync(&vif->periodic_rssi); 1555 timer_shutdown_sync(&hif_drv->remain_on_ch_timer); 1556 1557 if (hif_drv->usr_scan_req.scan_result) { ··· 1718 return -EFAULT; 1719 } 1720 1721 - del_timer(&vif->hif_drv->remain_on_ch_timer); 1722 1723 return wilc_handle_roc_expired(vif, cookie); 1724 }
··· 643 } 644 } 645 646 + timer_delete(&hif_drv->connect_timer); 647 conn_info->conn_result(CONN_DISCONN_EVENT_CONN_RESP, mac_status, 648 hif_drv->conn_info.priv); 649 ··· 669 struct host_if_drv *hif_drv = vif->hif_drv; 670 671 if (hif_drv->usr_scan_req.scan_result) { 672 + timer_delete(&hif_drv->scan_timer); 673 handle_scan_done(vif, SCAN_EVENT_ABORTED); 674 } 675 ··· 713 if (hif_drv->hif_state == HOST_IF_CONNECTED) { 714 wilc_handle_disconnect(vif); 715 } else if (hif_drv->usr_scan_req.scan_result) { 716 + timer_delete(&hif_drv->scan_timer); 717 handle_scan_done(vif, SCAN_EVENT_ABORTED); 718 } 719 } ··· 746 conn_info = &hif_drv->conn_info; 747 748 if (scan_req->scan_result) { 749 + timer_delete(&hif_drv->scan_timer); 750 scan_req->scan_result(SCAN_EVENT_ABORTED, NULL, scan_req->priv); 751 scan_req->scan_result = NULL; 752 } ··· 754 if (conn_info->conn_result) { 755 if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP || 756 hif_drv->hif_state == HOST_IF_EXTERNAL_AUTH) 757 + timer_delete(&hif_drv->connect_timer); 758 759 conn_info->conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF, 0, 760 conn_info->priv); ··· 959 int result; 960 struct host_if_msg *msg; 961 962 + timer_delete(&vif->hif_drv->remain_on_ch_timer); 963 964 msg = wilc_alloc_work(vif, wilc_handle_listen_state_expired, false); 965 if (IS_ERR(msg)) ··· 1066 { 1067 struct host_if_msg *msg = container_of(work, struct host_if_msg, work); 1068 1069 + timer_delete(&msg->vif->hif_drv->scan_timer); 1070 1071 handle_scan_done(msg->vif, SCAN_EVENT_DONE); 1072 ··· 1551 1552 timer_shutdown_sync(&hif_drv->scan_timer); 1553 timer_shutdown_sync(&hif_drv->connect_timer); 1554 + timer_delete_sync(&vif->periodic_rssi); 1555 timer_shutdown_sync(&hif_drv->remain_on_ch_timer); 1556 1557 if (hif_drv->usr_scan_req.scan_result) { ··· 1718 return -EFAULT; 1719 } 1720 1721 + timer_delete(&vif->hif_drv->remain_on_ch_timer); 1722 1723 return wilc_handle_roc_expired(vif, cookie); 1724 }
+2 -2
drivers/net/wireless/purelifi/plfxlc/usb.c
··· 714 mac = plfxlc_hw_mac(hw); 715 usb = &mac->chip.usb; 716 717 - del_timer_sync(&usb->tx.tx_retry_timer); 718 - del_timer_sync(&usb->sta_queue_cleanup); 719 720 ieee80211_unregister_hw(hw); 721
··· 714 mac = plfxlc_hw_mac(hw); 715 usb = &mac->chip.usb; 716 717 + timer_delete_sync(&usb->tx.tx_retry_timer); 718 + timer_delete_sync(&usb->sta_queue_cleanup); 719 720 ieee80211_unregister_hw(hw); 721
+1 -1
drivers/net/wireless/realtek/rtlwifi/base.c
··· 473 { 474 struct rtl_priv *rtlpriv = rtl_priv(hw); 475 476 - del_timer_sync(&rtlpriv->works.watchdog_timer); 477 478 cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq); 479 if (ips_wq)
··· 473 { 474 struct rtl_priv *rtlpriv = rtl_priv(hw); 475 476 + timer_delete_sync(&rtlpriv->works.watchdog_timer); 477 478 cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq); 479 if (ips_wq)
+2 -2
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
··· 179 } 180 181 if (rtlpriv->psc.low_power_enable) 182 - del_timer_sync(&rtlpriv->works.fw_clockoff_timer); 183 184 - del_timer_sync(&rtlpriv->works.fast_antenna_training_timer); 185 } 186 187 /* get bt coexist status */
··· 179 } 180 181 if (rtlpriv->psc.low_power_enable) 182 + timer_delete_sync(&rtlpriv->works.fw_clockoff_timer); 183 184 + timer_delete_sync(&rtlpriv->works.fast_antenna_training_timer); 185 } 186 187 /* get bt coexist status */
+2 -2
drivers/net/wireless/rsi/rsi_91x_hal.c
··· 493 struct rsi_hw *adapter = from_timer(adapter, t, bl_cmd_timer); 494 495 adapter->blcmd_timer_expired = true; 496 - del_timer(&adapter->bl_cmd_timer); 497 } 498 499 static int bl_start_cmd_timer(struct rsi_hw *adapter, u32 timeout) ··· 511 { 512 adapter->blcmd_timer_expired = false; 513 if (timer_pending(&adapter->bl_cmd_timer)) 514 - del_timer(&adapter->bl_cmd_timer); 515 516 return 0; 517 }
··· 493 struct rsi_hw *adapter = from_timer(adapter, t, bl_cmd_timer); 494 495 adapter->blcmd_timer_expired = true; 496 + timer_delete(&adapter->bl_cmd_timer); 497 } 498 499 static int bl_start_cmd_timer(struct rsi_hw *adapter, u32 timeout) ··· 511 { 512 adapter->blcmd_timer_expired = false; 513 if (timer_pending(&adapter->bl_cmd_timer)) 514 + timer_delete(&adapter->bl_cmd_timer); 515 516 return 0; 517 }
+3 -3
drivers/net/wireless/rsi/rsi_91x_mac80211.c
··· 1754 ieee80211_remain_on_channel_expired(common->priv->hw); 1755 1756 if (timer_pending(&common->roc_timer)) 1757 - del_timer(&common->roc_timer); 1758 1759 rsi_resume_conn_channel(common); 1760 mutex_unlock(&common->mutex); ··· 1776 1777 if (timer_pending(&common->roc_timer)) { 1778 rsi_dbg(INFO_ZONE, "Stop on-going ROC\n"); 1779 - del_timer(&common->roc_timer); 1780 } 1781 common->roc_timer.expires = msecs_to_jiffies(duration) + jiffies; 1782 add_timer(&common->roc_timer); ··· 1820 return 0; 1821 } 1822 1823 - del_timer(&common->roc_timer); 1824 1825 rsi_resume_conn_channel(common); 1826 mutex_unlock(&common->mutex);
··· 1754 ieee80211_remain_on_channel_expired(common->priv->hw); 1755 1756 if (timer_pending(&common->roc_timer)) 1757 + timer_delete(&common->roc_timer); 1758 1759 rsi_resume_conn_channel(common); 1760 mutex_unlock(&common->mutex); ··· 1776 1777 if (timer_pending(&common->roc_timer)) { 1778 rsi_dbg(INFO_ZONE, "Stop on-going ROC\n"); 1779 + timer_delete(&common->roc_timer); 1780 } 1781 common->roc_timer.expires = msecs_to_jiffies(duration) + jiffies; 1782 add_timer(&common->roc_timer); ··· 1820 return 0; 1821 } 1822 1823 + timer_delete(&common->roc_timer); 1824 1825 rsi_resume_conn_channel(common); 1826 mutex_unlock(&common->mutex);
+1 -1
drivers/net/wireless/st/cw1200/main.c
··· 458 459 ieee80211_unregister_hw(dev); 460 461 - del_timer_sync(&priv->mcast_timeout); 462 cw1200_unregister_bh(priv); 463 464 cw1200_debug_release(priv);
··· 458 459 ieee80211_unregister_hw(dev); 460 461 + timer_delete_sync(&priv->mcast_timeout); 462 cw1200_unregister_bh(priv); 463 464 cw1200_debug_release(priv);
+1 -1
drivers/net/wireless/st/cw1200/pm.c
··· 105 106 void cw1200_pm_deinit(struct cw1200_pm_state *pm) 107 { 108 - del_timer_sync(&pm->stay_awake); 109 } 110 111 void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
··· 105 106 void cw1200_pm_deinit(struct cw1200_pm_state *pm) 107 { 108 + timer_delete_sync(&pm->stay_awake); 109 } 110 111 void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
+1 -1
drivers/net/wireless/st/cw1200/queue.c
··· 244 void cw1200_queue_deinit(struct cw1200_queue *queue) 245 { 246 cw1200_queue_clear(queue); 247 - del_timer_sync(&queue->gc); 248 INIT_LIST_HEAD(&queue->free_pool); 249 kfree(queue->pool); 250 kfree(queue->link_map_cache);
··· 244 void cw1200_queue_deinit(struct cw1200_queue *queue) 245 { 246 cw1200_queue_clear(queue); 247 + timer_delete_sync(&queue->gc); 248 INIT_LIST_HEAD(&queue->free_pool); 249 kfree(queue->pool); 250 kfree(queue->link_map_cache);
+3 -3
drivers/net/wireless/st/cw1200/sta.c
··· 113 cancel_work_sync(&priv->unjoin_work); 114 cancel_delayed_work_sync(&priv->link_id_gc_work); 115 flush_workqueue(priv->workqueue); 116 - del_timer_sync(&priv->mcast_timeout); 117 mutex_lock(&priv->conf_mutex); 118 priv->mode = NL80211_IFTYPE_UNSPECIFIED; 119 priv->listening = false; ··· 2102 container_of(work, struct cw1200_common, multicast_stop_work); 2103 2104 if (priv->aid0_bit_set) { 2105 - del_timer_sync(&priv->mcast_timeout); 2106 wsm_lock_tx(priv); 2107 priv->aid0_bit_set = false; 2108 cw1200_set_tim_impl(priv, false); ··· 2170 } 2171 spin_unlock_bh(&priv->ps_state_lock); 2172 if (cancel_tmo) 2173 - del_timer_sync(&priv->mcast_timeout); 2174 } else { 2175 spin_lock_bh(&priv->ps_state_lock); 2176 cw1200_ps_notify(priv, arg->link_id, arg->stop);
··· 113 cancel_work_sync(&priv->unjoin_work); 114 cancel_delayed_work_sync(&priv->link_id_gc_work); 115 flush_workqueue(priv->workqueue); 116 + timer_delete_sync(&priv->mcast_timeout); 117 mutex_lock(&priv->conf_mutex); 118 priv->mode = NL80211_IFTYPE_UNSPECIFIED; 119 priv->listening = false; ··· 2102 container_of(work, struct cw1200_common, multicast_stop_work); 2103 2104 if (priv->aid0_bit_set) { 2105 + timer_delete_sync(&priv->mcast_timeout); 2106 wsm_lock_tx(priv); 2107 priv->aid0_bit_set = false; 2108 cw1200_set_tim_impl(priv, false); ··· 2170 } 2171 spin_unlock_bh(&priv->ps_state_lock); 2172 if (cancel_tmo) 2173 + timer_delete_sync(&priv->mcast_timeout); 2174 } else { 2175 spin_lock_bh(&priv->ps_state_lock); 2176 cw1200_ps_notify(priv, arg->link_id, arg->stop);
+2 -2
drivers/net/wireless/ti/wlcore/main.c
··· 117 else { 118 ret = wl1271_set_rx_streaming(wl, wlvif, false); 119 /* don't cancel_work_sync since we might deadlock */ 120 - del_timer_sync(&wlvif->rx_streaming_timer); 121 } 122 out: 123 return ret; ··· 2841 unlock: 2842 mutex_unlock(&wl->mutex); 2843 2844 - del_timer_sync(&wlvif->rx_streaming_timer); 2845 cancel_work_sync(&wlvif->rx_streaming_enable_work); 2846 cancel_work_sync(&wlvif->rx_streaming_disable_work); 2847 cancel_work_sync(&wlvif->rc_update_work);
··· 117 else { 118 ret = wl1271_set_rx_streaming(wl, wlvif, false); 119 /* don't cancel_work_sync since we might deadlock */ 120 + timer_delete_sync(&wlvif->rx_streaming_timer); 121 } 122 out: 123 return ret; ··· 2841 unlock: 2842 mutex_unlock(&wl->mutex); 2843 2844 + timer_delete_sync(&wlvif->rx_streaming_timer); 2845 cancel_work_sync(&wlvif->rx_streaming_enable_work); 2846 cancel_work_sync(&wlvif->rx_streaming_disable_work); 2847 cancel_work_sync(&wlvif->rc_update_work);
+1 -1
drivers/net/xen-netback/interface.c
··· 329 if (queue->tx_irq != queue->rx_irq) 330 disable_irq(queue->rx_irq); 331 napi_disable(&queue->napi); 332 - del_timer_sync(&queue->credit_timeout); 333 } 334 } 335
··· 329 if (queue->tx_irq != queue->rx_irq) 330 disable_irq(queue->rx_irq); 331 napi_disable(&queue->napi); 332 + timer_delete_sync(&queue->credit_timeout); 333 } 334 } 335
+1 -1
drivers/net/xen-netfront.c
··· 1819 for (i = 0; i < num_queues && info->queues; ++i) { 1820 struct netfront_queue *queue = &info->queues[i]; 1821 1822 - del_timer_sync(&queue->rx_refill_timer); 1823 1824 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 1825 unbind_from_irqhandler(queue->tx_irq, queue);
··· 1819 for (i = 0; i < num_queues && info->queues; ++i) { 1820 struct netfront_queue *queue = &info->queues[i]; 1821 1822 + timer_delete_sync(&queue->rx_refill_timer); 1823 1824 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 1825 unbind_from_irqhandler(queue->tx_irq, queue);
+3 -3
drivers/nfc/nfcmrvl/fw_dnld.c
··· 102 atomic_set(&priv->ndev->cmd_cnt, 0); 103 104 if (timer_pending(&priv->ndev->cmd_timer)) 105 - del_timer_sync(&priv->ndev->cmd_timer); 106 107 if (timer_pending(&priv->fw_dnld.timer)) 108 - del_timer_sync(&priv->fw_dnld.timer); 109 110 nfc_info(priv->dev, "FW loading over (%d)]\n", error); 111 ··· 464 { 465 /* Discard command timer */ 466 if (timer_pending(&priv->ndev->cmd_timer)) 467 - del_timer_sync(&priv->ndev->cmd_timer); 468 469 /* Allow next command */ 470 atomic_set(&priv->ndev->cmd_cnt, 1);
··· 102 atomic_set(&priv->ndev->cmd_cnt, 0); 103 104 if (timer_pending(&priv->ndev->cmd_timer)) 105 + timer_delete_sync(&priv->ndev->cmd_timer); 106 107 if (timer_pending(&priv->fw_dnld.timer)) 108 + timer_delete_sync(&priv->fw_dnld.timer); 109 110 nfc_info(priv->dev, "FW loading over (%d)]\n", error); 111 ··· 464 { 465 /* Discard command timer */ 466 if (timer_pending(&priv->ndev->cmd_timer)) 467 + timer_delete_sync(&priv->ndev->cmd_timer); 468 469 /* Allow next command */ 470 atomic_set(&priv->ndev->cmd_cnt, 1);
+2 -2
drivers/nfc/pn533/pn533.c
··· 1515 cur_mod = dev->poll_mod_active[dev->poll_mod_curr]; 1516 1517 if (cur_mod->len == 0) { /* Target mode */ 1518 - del_timer(&dev->listen_timer); 1519 rc = pn533_init_target_complete(dev, resp); 1520 goto done; 1521 } ··· 1749 { 1750 struct pn533 *dev = nfc_get_drvdata(nfc_dev); 1751 1752 - del_timer(&dev->listen_timer); 1753 1754 if (!dev->poll_mod_count) { 1755 dev_dbg(dev->dev,
··· 1515 cur_mod = dev->poll_mod_active[dev->poll_mod_curr]; 1516 1517 if (cur_mod->len == 0) { /* Target mode */ 1518 + timer_delete(&dev->listen_timer); 1519 rc = pn533_init_target_complete(dev, resp); 1520 goto done; 1521 } ··· 1749 { 1750 struct pn533 *dev = nfc_get_drvdata(nfc_dev); 1751 1752 + timer_delete(&dev->listen_timer); 1753 1754 if (!dev->poll_mod_count) { 1755 dev_dbg(dev->dev,
+1 -1
drivers/nfc/pn533/uart.c
··· 209 struct pn532_uart_phy *dev = serdev_device_get_drvdata(serdev); 210 size_t i; 211 212 - del_timer(&dev->cmd_timeout); 213 for (i = 0; i < count; i++) { 214 skb_put_u8(dev->recv_skb, *data++); 215 if (!pn532_uart_rx_is_frame(dev->recv_skb))
··· 209 struct pn532_uart_phy *dev = serdev_device_get_drvdata(serdev); 210 size_t i; 211 212 + timer_delete(&dev->cmd_timeout); 213 for (i = 0; i < count; i++) { 214 skb_put_u8(dev->recv_skb, *data++); 215 if (!pn532_uart_rx_is_frame(dev->recv_skb))
+6 -6
drivers/nfc/st-nci/ndlc.c
··· 161 case PCB_SYNC_ACK: 162 skb = skb_dequeue(&ndlc->ack_pending_q); 163 kfree_skb(skb); 164 - del_timer_sync(&ndlc->t1_timer); 165 - del_timer_sync(&ndlc->t2_timer); 166 ndlc->t2_active = false; 167 ndlc->t1_active = false; 168 break; ··· 213 pr_debug("Handle T2(recv DATA) elapsed (T2 now inactive)\n"); 214 ndlc->t2_active = false; 215 ndlc->t1_active = false; 216 - del_timer_sync(&ndlc->t1_timer); 217 - del_timer_sync(&ndlc->t2_timer); 218 ndlc_close(ndlc); 219 ndlc->hard_fault = -EREMOTEIO; 220 } ··· 283 void ndlc_remove(struct llt_ndlc *ndlc) 284 { 285 /* cancel timers */ 286 - del_timer_sync(&ndlc->t1_timer); 287 - del_timer_sync(&ndlc->t2_timer); 288 ndlc->t2_active = false; 289 ndlc->t1_active = false; 290 /* cancel work */
··· 161 case PCB_SYNC_ACK: 162 skb = skb_dequeue(&ndlc->ack_pending_q); 163 kfree_skb(skb); 164 + timer_delete_sync(&ndlc->t1_timer); 165 + timer_delete_sync(&ndlc->t2_timer); 166 ndlc->t2_active = false; 167 ndlc->t1_active = false; 168 break; ··· 213 pr_debug("Handle T2(recv DATA) elapsed (T2 now inactive)\n"); 214 ndlc->t2_active = false; 215 ndlc->t1_active = false; 216 + timer_delete_sync(&ndlc->t1_timer); 217 + timer_delete_sync(&ndlc->t2_timer); 218 ndlc_close(ndlc); 219 ndlc->hard_fault = -EREMOTEIO; 220 } ··· 283 void ndlc_remove(struct llt_ndlc *ndlc) 284 { 285 /* cancel timers */ 286 + timer_delete_sync(&ndlc->t1_timer); 287 + timer_delete_sync(&ndlc->t2_timer); 288 ndlc->t2_active = false; 289 ndlc->t1_active = false; 290 /* cancel work */
+5 -5
drivers/nfc/st-nci/se.c
··· 257 case ST_NCI_EVT_HOT_PLUG: 258 if (info->se_info.se_active) { 259 if (!ST_NCI_EVT_HOT_PLUG_IS_INHIBITED(skb)) { 260 - del_timer_sync(&info->se_info.se_active_timer); 261 info->se_info.se_active = false; 262 complete(&info->se_info.req_completion); 263 } else { ··· 282 283 switch (event) { 284 case ST_NCI_EVT_TRANSMIT_DATA: 285 - del_timer_sync(&info->se_info.bwi_timer); 286 info->se_info.bwi_active = false; 287 info->se_info.cb(info->se_info.cb_context, 288 skb->data, skb->len, 0); ··· 415 416 if (ndev->hci_dev->count_pipes == 417 ndev->hci_dev->expected_pipes) { 418 - del_timer_sync(&info->se_info.se_active_timer); 419 info->se_info.se_active = false; 420 ndev->hci_dev->count_pipes = 0; 421 complete(&info->se_info.req_completion); ··· 751 struct st_nci_info *info = nci_get_drvdata(ndev); 752 753 if (info->se_info.bwi_active) 754 - del_timer_sync(&info->se_info.bwi_timer); 755 if (info->se_info.se_active) 756 - del_timer_sync(&info->se_info.se_active_timer); 757 758 info->se_info.se_active = false; 759 info->se_info.bwi_active = false;
··· 257 case ST_NCI_EVT_HOT_PLUG: 258 if (info->se_info.se_active) { 259 if (!ST_NCI_EVT_HOT_PLUG_IS_INHIBITED(skb)) { 260 + timer_delete_sync(&info->se_info.se_active_timer); 261 info->se_info.se_active = false; 262 complete(&info->se_info.req_completion); 263 } else { ··· 282 283 switch (event) { 284 case ST_NCI_EVT_TRANSMIT_DATA: 285 + timer_delete_sync(&info->se_info.bwi_timer); 286 info->se_info.bwi_active = false; 287 info->se_info.cb(info->se_info.cb_context, 288 skb->data, skb->len, 0); ··· 415 416 if (ndev->hci_dev->count_pipes == 417 ndev->hci_dev->expected_pipes) { 418 + timer_delete_sync(&info->se_info.se_active_timer); 419 info->se_info.se_active = false; 420 ndev->hci_dev->count_pipes = 0; 421 complete(&info->se_info.req_completion); ··· 751 struct st_nci_info *info = nci_get_drvdata(ndev); 752 753 if (info->se_info.bwi_active) 754 + timer_delete_sync(&info->se_info.bwi_timer); 755 if (info->se_info.se_active) 756 + timer_delete_sync(&info->se_info.se_active_timer); 757 758 info->se_info.se_active = false; 759 info->se_info.bwi_active = false;
+2 -2
drivers/nfc/st21nfca/core.c
··· 844 info->se_info.count_pipes++; 845 846 if (info->se_info.count_pipes == info->se_info.expected_pipes) { 847 - del_timer_sync(&info->se_info.se_active_timer); 848 info->se_info.se_active = false; 849 info->se_info.count_pipes = 0; 850 complete(&info->se_info.req_completion); ··· 864 case ST21NFCA_EVT_HOT_PLUG: 865 if (info->se_info.se_active) { 866 if (!ST21NFCA_EVT_HOT_PLUG_IS_INHIBITED(skb)) { 867 - del_timer_sync(&info->se_info.se_active_timer); 868 info->se_info.se_active = false; 869 complete(&info->se_info.req_completion); 870 } else {
··· 844 info->se_info.count_pipes++; 845 846 if (info->se_info.count_pipes == info->se_info.expected_pipes) { 847 + timer_delete_sync(&info->se_info.se_active_timer); 848 info->se_info.se_active = false; 849 info->se_info.count_pipes = 0; 850 complete(&info->se_info.req_completion); ··· 864 case ST21NFCA_EVT_HOT_PLUG: 865 if (info->se_info.se_active) { 866 if (!ST21NFCA_EVT_HOT_PLUG_IS_INHIBITED(skb)) { 867 + timer_delete_sync(&info->se_info.se_active_timer); 868 info->se_info.se_active = false; 869 complete(&info->se_info.req_completion); 870 } else {
+3 -3
drivers/nfc/st21nfca/se.c
··· 380 381 switch (event) { 382 case ST21NFCA_EVT_TRANSMIT_DATA: 383 - del_timer_sync(&info->se_info.bwi_timer); 384 cancel_work_sync(&info->se_info.timeout_work); 385 info->se_info.bwi_active = false; 386 r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE, ··· 435 struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); 436 437 if (info->se_info.bwi_active) 438 - del_timer_sync(&info->se_info.bwi_timer); 439 if (info->se_info.se_active) 440 - del_timer_sync(&info->se_info.se_active_timer); 441 442 cancel_work_sync(&info->se_info.timeout_work); 443 info->se_info.bwi_active = false;
··· 380 381 switch (event) { 382 case ST21NFCA_EVT_TRANSMIT_DATA: 383 + timer_delete_sync(&info->se_info.bwi_timer); 384 cancel_work_sync(&info->se_info.timeout_work); 385 info->se_info.bwi_active = false; 386 r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE, ··· 435 struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); 436 437 if (info->se_info.bwi_active) 438 + timer_delete_sync(&info->se_info.bwi_timer); 439 if (info->se_info.se_active) 440 + timer_delete_sync(&info->se_info.se_active_timer); 441 442 cancel_work_sync(&info->se_info.timeout_work); 443 info->se_info.bwi_active = false;
+2 -2
drivers/nvme/host/multipath.c
··· 860 if (nr_change_groups) 861 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies); 862 else 863 - del_timer_sync(&ctrl->anatt_timer); 864 out_unlock: 865 mutex_unlock(&ctrl->ana_lock); 866 return error; ··· 900 { 901 if (!nvme_ctrl_use_ana(ctrl)) 902 return; 903 - del_timer_sync(&ctrl->anatt_timer); 904 cancel_work_sync(&ctrl->ana_work); 905 } 906
··· 860 if (nr_change_groups) 861 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies); 862 else 863 + timer_delete_sync(&ctrl->anatt_timer); 864 out_unlock: 865 mutex_unlock(&ctrl->ana_lock); 866 return error; ··· 900 { 901 if (!nvme_ctrl_use_ana(ctrl)) 902 return; 903 + timer_delete_sync(&ctrl->anatt_timer); 904 cancel_work_sync(&ctrl->ana_work); 905 } 906
+1 -1
drivers/parport/ieee1284.c
··· 73 timer_setup(&port->timer, timeout_waiting_on_port, 0); 74 mod_timer(&port->timer, jiffies + timeout); 75 ret = down_interruptible (&port->physport->ieee1284.irq); 76 - if (!del_timer_sync(&port->timer) && !ret) 77 /* Timed out. */ 78 ret = 1; 79
··· 73 timer_setup(&port->timer, timeout_waiting_on_port, 0); 74 mod_timer(&port->timer, jiffies + timeout); 75 ret = down_interruptible (&port->physport->ieee1284.irq); 76 + if (!timer_delete_sync(&port->timer) && !ret) 77 /* Timed out. */ 78 ret = 1; 79
+1 -1
drivers/pci/hotplug/cpqphp_ctrl.c
··· 1794 } else if (ctrl->event_queue[loop].event_type == 1795 INT_BUTTON_CANCEL) { 1796 dbg("button cancel\n"); 1797 - del_timer(&p_slot->task_event); 1798 1799 mutex_lock(&ctrl->crit_sect); 1800
··· 1794 } else if (ctrl->event_queue[loop].event_type == 1795 INT_BUTTON_CANCEL) { 1796 dbg("button cancel\n"); 1797 + timer_delete(&p_slot->task_event); 1798 1799 mutex_lock(&ctrl->crit_sect); 1800
+1 -1
drivers/pci/hotplug/shpchp_hpc.c
··· 564 shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int); 565 566 if (shpchp_poll_mode) 567 - del_timer(&ctrl->poll_timer); 568 else { 569 free_irq(ctrl->pci_dev->irq, ctrl); 570 pci_disable_msi(ctrl->pci_dev);
··· 564 shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int); 565 566 if (shpchp_poll_mode) 567 + timer_delete(&ctrl->poll_timer); 568 else { 569 free_irq(ctrl->pci_dev->irq, ctrl); 570 pci_disable_msi(ctrl->pci_dev);
+1 -1
drivers/pcmcia/i82365.c
··· 1324 } 1325 platform_device_unregister(i82365_device); 1326 if (poll_interval != 0) 1327 - del_timer_sync(&poll_timer); 1328 if (grab_irq != 0) 1329 free_irq(cs_irq, pcic_interrupt); 1330 for (i = 0; i < sockets; i++) {
··· 1324 } 1325 platform_device_unregister(i82365_device); 1326 if (poll_interval != 0) 1327 + timer_delete_sync(&poll_timer); 1328 if (grab_irq != 0) 1329 free_irq(cs_irq, pcic_interrupt); 1330 for (i = 0; i < sockets; i++) {
+2 -2
drivers/pcmcia/soc_common.c
··· 766 767 void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt) 768 { 769 - del_timer_sync(&skt->poll_timer); 770 771 pcmcia_unregister_socket(&skt->socket); 772 ··· 865 return ret; 866 867 out_err_8: 868 - del_timer_sync(&skt->poll_timer); 869 pcmcia_unregister_socket(&skt->socket); 870 871 out_err_7:
··· 766 767 void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt) 768 { 769 + timer_delete_sync(&skt->poll_timer); 770 771 pcmcia_unregister_socket(&skt->socket); 772 ··· 865 return ret; 866 867 out_err_8: 868 + timer_delete_sync(&skt->poll_timer); 869 pcmcia_unregister_socket(&skt->socket); 870 871 out_err_7:
+1 -1
drivers/pcmcia/tcic.c
··· 509 { 510 int i; 511 512 - del_timer_sync(&poll_timer); 513 if (cs_irq != 0) { 514 tcic_aux_setw(TCIC_AUX_SYSCFG, TCIC_SYSCFG_AUTOBUSY|0x0a00); 515 free_irq(cs_irq, tcic_interrupt);
··· 509 { 510 int i; 511 512 + timer_delete_sync(&poll_timer); 513 if (cs_irq != 0) { 514 tcic_aux_setw(TCIC_AUX_SYSCFG, TCIC_SYSCFG_AUTOBUSY|0x0a00); 515 free_irq(cs_irq, tcic_interrupt);
+1 -1
drivers/platform/mellanox/mlxbf-tmfifo.c
··· 1320 int i; 1321 1322 fifo->is_ready = false; 1323 - del_timer_sync(&fifo->timer); 1324 mlxbf_tmfifo_disable_irqs(fifo); 1325 cancel_work_sync(&fifo->work); 1326 for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++)
··· 1320 int i; 1321 1322 fifo->is_ready = false; 1323 + timer_delete_sync(&fifo->timer); 1324 mlxbf_tmfifo_disable_irqs(fifo); 1325 cancel_work_sync(&fifo->work); 1326 for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++)
+1 -1
drivers/platform/x86/intel_ips.c
··· 1108 last_sample_period = 1; 1109 } while (!kthread_should_stop()); 1110 1111 - del_timer_sync(&ips->timer); 1112 1113 dev_dbg(ips->dev, "ips-monitor thread stopped\n"); 1114
··· 1108 last_sample_period = 1; 1109 } while (!kthread_should_stop()); 1110 1111 + timer_delete_sync(&ips->timer); 1112 1113 dev_dbg(ips->dev, "ips-monitor thread stopped\n"); 1114
+1 -1
drivers/platform/x86/sony-laptop.c
··· 538 if (!atomic_dec_and_test(&sony_laptop_input.users)) 539 return; 540 541 - del_timer_sync(&sony_laptop_input.release_key_timer); 542 543 /* 544 * Generate key-up events for remaining keys. Note that we don't
··· 538 if (!atomic_dec_and_test(&sony_laptop_input.users)) 539 return; 540 541 + timer_delete_sync(&sony_laptop_input.release_key_timer); 542 543 /* 544 * Generate key-up events for remaining keys. Note that we don't
+1 -1
drivers/pps/clients/pps-gpio.c
··· 229 struct pps_gpio_device_data *data = platform_get_drvdata(pdev); 230 231 pps_unregister_source(data->pps); 232 - del_timer_sync(&data->echo_timer); 233 /* reset echo pin in any case */ 234 gpiod_set_value(data->echo_pin, 0); 235 dev_info(&pdev->dev, "removed IRQ %d as PPS source\n", data->irq);
··· 229 struct pps_gpio_device_data *data = platform_get_drvdata(pdev); 230 231 pps_unregister_source(data->pps); 232 + timer_delete_sync(&data->echo_timer); 233 /* reset echo pin in any case */ 234 gpiod_set_value(data->echo_pin, 0); 235 dev_info(&pdev->dev, "removed IRQ %d as PPS source\n", data->irq);
+1 -1
drivers/pps/clients/pps-ktimer.c
··· 58 { 59 dev_dbg(&pps->dev, "ktimer PPS source unregistered\n"); 60 61 - del_timer_sync(&ktimer); 62 pps_unregister_source(pps); 63 } 64
··· 58 { 59 dev_dbg(&pps->dev, "ktimer PPS source unregistered\n"); 60 61 + timer_delete_sync(&ktimer); 62 pps_unregister_source(pps); 63 } 64
+2 -2
drivers/pps/generators/pps_gen-dummy.c
··· 52 if (enable) 53 mod_timer(&ktimer, jiffies + get_random_delay()); 54 else 55 - del_timer_sync(&ktimer); 56 57 return 0; 58 } ··· 73 74 static void __exit pps_gen_dummy_exit(void) 75 { 76 - del_timer_sync(&ktimer); 77 pps_gen_unregister_source(pps_gen); 78 } 79
··· 52 if (enable) 53 mod_timer(&ktimer, jiffies + get_random_delay()); 54 else 55 + timer_delete_sync(&ktimer); 56 57 return 0; 58 } ··· 73 74 static void __exit pps_gen_dummy_exit(void) 75 { 76 + timer_delete_sync(&ktimer); 77 pps_gen_unregister_source(pps_gen); 78 } 79
+2 -2
drivers/pps/generators/pps_gen_tio.c
··· 227 return PTR_ERR(tio->base); 228 229 pps_tio_disable(tio); 230 - hrtimer_init(&tio->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 231 - tio->timer.function = hrtimer_callback; 232 spin_lock_init(&tio->lock); 233 platform_set_drvdata(pdev, &tio); 234
··· 227 return PTR_ERR(tio->base); 228 229 pps_tio_disable(tio); 230 + hrtimer_setup(&tio->timer, hrtimer_callback, CLOCK_REALTIME, 231 + HRTIMER_MODE_ABS); 232 spin_lock_init(&tio->lock); 233 platform_set_drvdata(pdev, &tio); 234
+1 -1
drivers/ptp/ptp_ocp.c
··· 4499 ptp_ocp_detach_sysfs(bp); 4500 ptp_ocp_attr_group_del(bp); 4501 if (timer_pending(&bp->watchdog)) 4502 - del_timer_sync(&bp->watchdog); 4503 if (bp->ts0) 4504 ptp_ocp_unregister_ext(bp->ts0); 4505 if (bp->ts1)
··· 4499 ptp_ocp_detach_sysfs(bp); 4500 ptp_ocp_attr_group_del(bp); 4501 if (timer_pending(&bp->watchdog)) 4502 + timer_delete_sync(&bp->watchdog); 4503 if (bp->ts0) 4504 ptp_ocp_unregister_ext(bp->ts0); 4505 if (bp->ts1)
+1 -1
drivers/rtc/dev.c
··· 90 rtc->stop_uie_polling = 1; 91 if (rtc->uie_timer_active) { 92 spin_unlock_irq(&rtc->irq_lock); 93 - del_timer_sync(&rtc->uie_timer); 94 spin_lock_irq(&rtc->irq_lock); 95 rtc->uie_timer_active = 0; 96 }
··· 90 rtc->stop_uie_polling = 1; 91 if (rtc->uie_timer_active) { 92 spin_unlock_irq(&rtc->irq_lock); 93 + timer_delete_sync(&rtc->uie_timer); 94 spin_lock_irq(&rtc->irq_lock); 95 rtc->uie_timer_active = 0; 96 }
+2 -2
drivers/rtc/rtc-test.c
··· 44 timeout = rtc_tm_to_time64(&alrm->time) - ktime_get_real_seconds(); 45 timeout -= rtd->offset; 46 47 - del_timer(&rtd->alarm); 48 49 expires = jiffies + timeout * HZ; 50 if (expires > U32_MAX) ··· 86 if (enable) 87 add_timer(&rtd->alarm); 88 else 89 - del_timer(&rtd->alarm); 90 91 return 0; 92 }
··· 44 timeout = rtc_tm_to_time64(&alrm->time) - ktime_get_real_seconds(); 45 timeout -= rtd->offset; 46 47 + timer_delete(&rtd->alarm); 48 49 expires = jiffies + timeout * HZ; 50 if (expires > U32_MAX) ··· 86 if (enable) 87 add_timer(&rtd->alarm); 88 else 89 + timer_delete(&rtd->alarm); 90 91 return 0; 92 }
+4 -4
drivers/s390/block/dasd.c
··· 1507 void dasd_device_set_timer(struct dasd_device *device, int expires) 1508 { 1509 if (expires == 0) 1510 - del_timer(&device->timer); 1511 else 1512 mod_timer(&device->timer, jiffies + expires); 1513 } ··· 1518 */ 1519 void dasd_device_clear_timer(struct dasd_device *device) 1520 { 1521 - del_timer(&device->timer); 1522 } 1523 EXPORT_SYMBOL(dasd_device_clear_timer); 1524 ··· 2692 void dasd_block_set_timer(struct dasd_block *block, int expires) 2693 { 2694 if (expires == 0) 2695 - del_timer(&block->timer); 2696 else 2697 mod_timer(&block->timer, jiffies + expires); 2698 } ··· 2703 */ 2704 void dasd_block_clear_timer(struct dasd_block *block) 2705 { 2706 - del_timer(&block->timer); 2707 } 2708 EXPORT_SYMBOL(dasd_block_clear_timer); 2709
··· 1507 void dasd_device_set_timer(struct dasd_device *device, int expires) 1508 { 1509 if (expires == 0) 1510 + timer_delete(&device->timer); 1511 else 1512 mod_timer(&device->timer, jiffies + expires); 1513 } ··· 1518 */ 1519 void dasd_device_clear_timer(struct dasd_device *device) 1520 { 1521 + timer_delete(&device->timer); 1522 } 1523 EXPORT_SYMBOL(dasd_device_clear_timer); 1524 ··· 2692 void dasd_block_set_timer(struct dasd_block *block, int expires) 2693 { 2694 if (expires == 0) 2695 + timer_delete(&block->timer); 2696 else 2697 mod_timer(&block->timer, jiffies + expires); 2698 } ··· 2703 */ 2704 void dasd_block_clear_timer(struct dasd_block *block) 2705 { 2706 + timer_delete(&block->timer); 2707 } 2708 EXPORT_SYMBOL(dasd_block_clear_timer); 2709
+2 -2
drivers/s390/char/con3270.c
··· 793 { 794 struct tty3270 *tp = container_of(view, struct tty3270, view); 795 796 - del_timer(&tp->timer); 797 } 798 799 static void tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) ··· 1060 { 1061 struct tty3270 *tp = container_of(view, struct tty3270, view); 1062 1063 - del_timer_sync(&tp->timer); 1064 tty3270_free_screen(tp->screen, tp->allocated_lines); 1065 free_page((unsigned long)tp->converted_line); 1066 kfree(tp->input);
··· 793 { 794 struct tty3270 *tp = container_of(view, struct tty3270, view); 795 796 + timer_delete(&tp->timer); 797 } 798 799 static void tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) ··· 1060 { 1061 struct tty3270 *tp = container_of(view, struct tty3270, view); 1062 1063 + timer_delete_sync(&tp->timer); 1064 tty3270_free_screen(tp->screen, tp->allocated_lines); 1065 free_page((unsigned long)tp->converted_line); 1066 kfree(tp->input);
+6 -6
drivers/s390/char/sclp.c
··· 261 static inline void 262 __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *)) 263 { 264 - del_timer(&sclp_request_timer); 265 sclp_request_timer.function = cb; 266 sclp_request_timer.expires = jiffies + time; 267 add_timer(&sclp_request_timer); ··· 407 408 if (sclp_running_state != sclp_running_state_idle) 409 return 0; 410 - del_timer(&sclp_request_timer); 411 rc = sclp_service_call_trace(req->command, req->sccb); 412 req->start_count++; 413 ··· 442 spin_unlock_irqrestore(&sclp_lock, flags); 443 return; 444 } 445 - del_timer(&sclp_request_timer); 446 while (!list_empty(&sclp_req_queue)) { 447 req = list_entry(sclp_req_queue.next, struct sclp_req, list); 448 rc = __sclp_start_request(req); ··· 662 !ok_response(finished_sccb, active_cmd)); 663 664 if (finished_sccb) { 665 - del_timer(&sclp_request_timer); 666 sclp_running_state = sclp_running_state_reset_pending; 667 req = __sclp_find_req(finished_sccb); 668 if (req) { ··· 739 /* Loop until driver state indicates finished request */ 740 while (sclp_running_state != sclp_running_state_idle) { 741 /* Check for expired request timer */ 742 - if (get_tod_clock_fast() > timeout && del_timer(&sclp_request_timer)) 743 sclp_request_timer.function(&sclp_request_timer); 744 cpu_relax(); 745 } ··· 1165 * with IRQs enabled. */ 1166 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); 1167 spin_lock_irqsave(&sclp_lock, flags); 1168 - del_timer(&sclp_request_timer); 1169 rc = -EBUSY; 1170 if (sclp_init_req.status == SCLP_REQ_DONE) { 1171 if (sccb->header.response_code == 0x20) {
··· 261 static inline void 262 __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *)) 263 { 264 + timer_delete(&sclp_request_timer); 265 sclp_request_timer.function = cb; 266 sclp_request_timer.expires = jiffies + time; 267 add_timer(&sclp_request_timer); ··· 407 408 if (sclp_running_state != sclp_running_state_idle) 409 return 0; 410 + timer_delete(&sclp_request_timer); 411 rc = sclp_service_call_trace(req->command, req->sccb); 412 req->start_count++; 413 ··· 442 spin_unlock_irqrestore(&sclp_lock, flags); 443 return; 444 } 445 + timer_delete(&sclp_request_timer); 446 while (!list_empty(&sclp_req_queue)) { 447 req = list_entry(sclp_req_queue.next, struct sclp_req, list); 448 rc = __sclp_start_request(req); ··· 662 !ok_response(finished_sccb, active_cmd)); 663 664 if (finished_sccb) { 665 + timer_delete(&sclp_request_timer); 666 sclp_running_state = sclp_running_state_reset_pending; 667 req = __sclp_find_req(finished_sccb); 668 if (req) { ··· 739 /* Loop until driver state indicates finished request */ 740 while (sclp_running_state != sclp_running_state_idle) { 741 /* Check for expired request timer */ 742 + if (get_tod_clock_fast() > timeout && timer_delete(&sclp_request_timer)) 743 sclp_request_timer.function(&sclp_request_timer); 744 cpu_relax(); 745 } ··· 1165 * with IRQs enabled. */ 1166 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); 1167 spin_lock_irqsave(&sclp_lock, flags); 1168 + timer_delete(&sclp_request_timer); 1169 rc = -EBUSY; 1170 if (sclp_init_req.status == SCLP_REQ_DONE) { 1171 if (sccb->header.response_code == 0x20) {
+1 -1
drivers/s390/char/sclp_con.c
··· 109 unsigned long flags; 110 111 spin_lock_irqsave(&sclp_con_lock, flags); 112 - del_timer(&sclp_con_timer); 113 while (sclp_con_queue_running) { 114 spin_unlock_irqrestore(&sclp_con_lock, flags); 115 sclp_sync_wait();
··· 109 unsigned long flags; 110 111 spin_lock_irqsave(&sclp_con_lock, flags); 112 + timer_delete(&sclp_con_timer); 113 while (sclp_con_queue_running) { 114 spin_unlock_irqrestore(&sclp_con_lock, flags); 115 sclp_sync_wait();
+2 -2
drivers/s390/char/sclp_vt220.c
··· 231 list_add_tail(&sclp_vt220_current_request->list, 232 &sclp_vt220_outqueue); 233 sclp_vt220_current_request = NULL; 234 - del_timer(&sclp_vt220_timer); 235 } 236 sclp_vt220_flush_later = 0; 237 } ··· 798 sclp_vt220_emit_current(); 799 800 spin_lock_irqsave(&sclp_vt220_lock, flags); 801 - del_timer(&sclp_vt220_timer); 802 while (sclp_vt220_queue_running) { 803 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 804 sclp_sync_wait();
··· 231 list_add_tail(&sclp_vt220_current_request->list, 232 &sclp_vt220_outqueue); 233 sclp_vt220_current_request = NULL; 234 + timer_delete(&sclp_vt220_timer); 235 } 236 sclp_vt220_flush_later = 0; 237 } ··· 798 sclp_vt220_emit_current(); 799 800 spin_lock_irqsave(&sclp_vt220_lock, flags); 801 + timer_delete(&sclp_vt220_timer); 802 while (sclp_vt220_queue_running) { 803 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 804 sclp_sync_wait();
+1 -1
drivers/s390/char/tape_core.c
··· 1108 struct tape_request, list); 1109 if (req->status == TAPE_REQUEST_LONG_BUSY) { 1110 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); 1111 - if (del_timer(&device->lb_timeout)) { 1112 tape_put_device(device); 1113 __tape_start_next_request(device); 1114 }
··· 1108 struct tape_request, list); 1109 if (req->status == TAPE_REQUEST_LONG_BUSY) { 1110 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); 1111 + if (timer_delete(&device->lb_timeout)) { 1112 tape_put_device(device); 1113 __tape_start_next_request(device); 1114 }
+1 -1
drivers/s390/char/tape_std.c
··· 73 74 rc = tape_do_io_interruptible(device, request); 75 76 - del_timer_sync(&request->timer); 77 78 if (rc != 0) { 79 DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
··· 73 74 rc = tape_do_io_interruptible(device, request); 75 76 + timer_delete_sync(&request->timer); 77 78 if (rc != 0) { 79 DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
+1 -1
drivers/s390/cio/device_fsm.c
··· 115 ccw_device_set_timeout(struct ccw_device *cdev, int expires) 116 { 117 if (expires == 0) 118 - del_timer(&cdev->private->timer); 119 else 120 mod_timer(&cdev->private->timer, jiffies + expires); 121 }
··· 115 ccw_device_set_timeout(struct ccw_device *cdev, int expires) 116 { 117 if (expires == 0) 118 + timer_delete(&cdev->private->timer); 119 else 120 mod_timer(&cdev->private->timer, jiffies + expires); 121 }
+1 -1
drivers/s390/cio/eadm_sch.c
··· 114 struct eadm_private *private = get_eadm_private(sch); 115 116 if (expires == 0) 117 - del_timer(&private->timer); 118 else 119 mod_timer(&private->timer, jiffies + expires); 120 }
··· 114 struct eadm_private *private = get_eadm_private(sch); 115 116 if (expires == 0) 117 + timer_delete(&private->timer); 118 else 119 mod_timer(&private->timer, jiffies + expires); 120 }
+1 -1
drivers/s390/crypto/ap_queue.c
··· 1289 /* move queue device state to SHUTDOWN in progress */ 1290 aq->dev_state = AP_DEV_STATE_SHUTDOWN; 1291 spin_unlock_bh(&aq->lock); 1292 - del_timer_sync(&aq->timeout); 1293 } 1294 1295 void ap_queue_remove(struct ap_queue *aq)
··· 1289 /* move queue device state to SHUTDOWN in progress */ 1290 aq->dev_state = AP_DEV_STATE_SHUTDOWN; 1291 spin_unlock_bh(&aq->lock); 1292 + timer_delete_sync(&aq->timeout); 1293 } 1294 1295 void ap_queue_remove(struct ap_queue *aq)
+2 -2
drivers/s390/net/fsm.c
··· 158 printk(KERN_DEBUG "fsm(%s): Delete timer %p\n", this->fi->name, 159 this); 160 #endif 161 - del_timer(&this->tl); 162 } 163 164 int ··· 188 this->fi->name, this, millisec); 189 #endif 190 191 - del_timer(&this->tl); 192 timer_setup(&this->tl, fsm_expire_timer, 0); 193 this->expire_event = event; 194 this->event_arg = arg;
··· 158 printk(KERN_DEBUG "fsm(%s): Delete timer %p\n", this->fi->name, 159 this); 160 #endif 161 + timer_delete(&this->tl); 162 } 163 164 int ··· 188 this->fi->name, this, millisec); 189 #endif 190 191 + timer_delete(&this->tl); 192 timer_setup(&this->tl, fsm_expire_timer, 0); 193 this->expire_event = event; 194 this->event_arg = arg;
+1 -1
drivers/s390/net/qeth_core_main.c
··· 7088 netif_tx_disable(dev); 7089 7090 qeth_for_each_output_queue(card, queue, i) { 7091 - del_timer_sync(&queue->timer); 7092 /* Queues may get re-allocated, so remove the NAPIs. */ 7093 netif_napi_del(&queue->napi); 7094 }
··· 7088 netif_tx_disable(dev); 7089 7090 qeth_for_each_output_queue(card, queue, i) { 7091 + timer_delete_sync(&queue->timer); 7092 /* Queues may get re-allocated, so remove the NAPIs. */ 7093 netif_napi_del(&queue->napi); 7094 }
+2 -2
drivers/s390/scsi/zfcp_fsf.c
··· 458 return; 459 } 460 461 - del_timer_sync(&req->timer); 462 zfcp_fsf_protstatus_eval(req); 463 zfcp_fsf_fsfstatus_eval(req); 464 req->handler(req); ··· 891 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); 892 req->issued = get_tod_clock(); 893 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 894 - del_timer_sync(&req->timer); 895 896 /* lookup request again, list might have changed */ 897 if (zfcp_reqlist_find_rm(adapter->req_list, req_id) == NULL)
··· 458 return; 459 } 460 461 + timer_delete_sync(&req->timer); 462 zfcp_fsf_protstatus_eval(req); 463 zfcp_fsf_fsfstatus_eval(req); 464 req->handler(req); ··· 891 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); 892 req->issued = get_tod_clock(); 893 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 894 + timer_delete_sync(&req->timer); 895 896 /* lookup request again, list might have changed */ 897 if (zfcp_reqlist_find_rm(adapter->req_list, req_id) == NULL)
+1 -1
drivers/s390/scsi/zfcp_qdio.c
··· 408 409 tasklet_disable(&qdio->irq_tasklet); 410 tasklet_disable(&qdio->request_tasklet); 411 - del_timer_sync(&qdio->request_timer); 412 qdio_stop_irq(adapter->ccw_device); 413 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); 414
··· 408 409 tasklet_disable(&qdio->irq_tasklet); 410 tasklet_disable(&qdio->request_tasklet); 411 + timer_delete_sync(&qdio->request_timer); 412 qdio_stop_irq(adapter->ccw_device); 413 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); 414
+2 -2
drivers/scsi/aic7xxx/aic79xx_core.c
··· 6181 /* 6182 * Stop periodic timer callbacks. 6183 */ 6184 - del_timer_sync(&ahd->stat_timer); 6185 6186 /* This will reset most registers to 0, but not all */ 6187 ahd_reset(ahd, /*reinit*/FALSE); ··· 6975 static void 6976 ahd_timer_reset(struct timer_list *timer, int usec) 6977 { 6978 - del_timer(timer); 6979 timer->expires = jiffies + (usec * HZ)/1000000; 6980 add_timer(timer); 6981 }
··· 6181 /* 6182 * Stop periodic timer callbacks. 6183 */ 6184 + timer_delete_sync(&ahd->stat_timer); 6185 6186 /* This will reset most registers to 0, but not all */ 6187 ahd_reset(ahd, /*reinit*/FALSE); ··· 6975 static void 6976 ahd_timer_reset(struct timer_list *timer, int usec) 6977 { 6978 + timer_delete(timer); 6979 timer->expires = jiffies + (usec * HZ)/1000000; 6980 add_timer(timer); 6981 }
+1 -1
drivers/scsi/aic94xx/aic94xx_hwi.c
··· 731 goto next_1; 732 } else if (ascb->scb->header.opcode == EMPTY_SCB) { 733 goto out; 734 - } else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) { 735 goto next_1; 736 } 737 spin_lock_irqsave(&seq->pend_q_lock, flags);
··· 731 goto next_1; 732 } else if (ascb->scb->header.opcode == EMPTY_SCB) { 733 goto out; 734 + } else if (!ascb->uldd_timer && !timer_delete(&ascb->timer)) { 735 goto next_1; 736 } 737 spin_lock_irqsave(&seq->pend_q_lock, flags);
+1 -1
drivers/scsi/aic94xx/aic94xx_init.c
··· 851 * times out. Apparently we don't wait for the CONTROL PHY 852 * to complete, so it doesn't matter if we kill the timer. 853 */ 854 - del_timer_sync(&ascb->timer); 855 WARN_ON(ascb->scb->header.opcode != CONTROL_PHY); 856 857 list_del_init(pos);
··· 851 * times out. Apparently we don't wait for the CONTROL PHY 852 * to complete, so it doesn't matter if we kill the timer. 853 */ 854 + timer_delete_sync(&ascb->timer); 855 WARN_ON(ascb->scb->header.opcode != CONTROL_PHY); 856 857 list_del_init(pos);
+3 -3
drivers/scsi/aic94xx/aic94xx_tmf.c
··· 31 32 res = asd_post_ascb_list(ascb->ha, ascb, 1); 33 if (unlikely(res)) 34 - del_timer(&ascb->timer); 35 return res; 36 } 37 ··· 58 { 59 struct tasklet_completion_status *tcs = ascb->uldd_task; 60 ASD_DPRINTK("%s: here\n", __func__); 61 - if (!del_timer(&ascb->timer)) { 62 ASD_DPRINTK("%s: couldn't delete timer\n", __func__); 63 return; 64 } ··· 303 { 304 struct tasklet_completion_status *tcs; 305 306 - if (!del_timer(&ascb->timer)) 307 return; 308 309 tcs = ascb->uldd_task;
··· 31 32 res = asd_post_ascb_list(ascb->ha, ascb, 1); 33 if (unlikely(res)) 34 + timer_delete(&ascb->timer); 35 return res; 36 } 37 ··· 58 { 59 struct tasklet_completion_status *tcs = ascb->uldd_task; 60 ASD_DPRINTK("%s: here\n", __func__); 61 + if (!timer_delete(&ascb->timer)) { 62 ASD_DPRINTK("%s: couldn't delete timer\n", __func__); 63 return; 64 } ··· 303 { 304 struct tasklet_completion_status *tcs; 305 306 + if (!timer_delete(&ascb->timer)) 307 return; 308 309 tcs = ascb->uldd_task;
+10 -10
drivers/scsi/arcmsr/arcmsr_hba.c
··· 1161 return 0; 1162 out_free_sysfs: 1163 if (set_date_time) 1164 - del_timer_sync(&acb->refresh_timer); 1165 - del_timer_sync(&acb->eternal_timer); 1166 flush_work(&acb->arcmsr_do_message_isr_bh); 1167 arcmsr_stop_adapter_bgrb(acb); 1168 arcmsr_flush_adapter_cache(acb); ··· 1204 1205 arcmsr_disable_outbound_ints(acb); 1206 arcmsr_free_irq(pdev, acb); 1207 - del_timer_sync(&acb->eternal_timer); 1208 if (set_date_time) 1209 - del_timer_sync(&acb->refresh_timer); 1210 flush_work(&acb->arcmsr_do_message_isr_bh); 1211 arcmsr_stop_adapter_bgrb(acb); 1212 arcmsr_flush_adapter_cache(acb); ··· 1685 arcmsr_free_sysfs_attr(acb); 1686 scsi_remove_host(host); 1687 flush_work(&acb->arcmsr_do_message_isr_bh); 1688 - del_timer_sync(&acb->eternal_timer); 1689 if (set_date_time) 1690 - del_timer_sync(&acb->refresh_timer); 1691 pdev = acb->pdev; 1692 arcmsr_free_irq(pdev, acb); 1693 arcmsr_free_ccb_pool(acb); ··· 1718 arcmsr_free_sysfs_attr(acb); 1719 scsi_remove_host(host); 1720 flush_work(&acb->arcmsr_do_message_isr_bh); 1721 - del_timer_sync(&acb->eternal_timer); 1722 if (set_date_time) 1723 - del_timer_sync(&acb->refresh_timer); 1724 arcmsr_disable_outbound_ints(acb); 1725 arcmsr_stop_adapter_bgrb(acb); 1726 arcmsr_flush_adapter_cache(acb); ··· 1765 (struct AdapterControlBlock *)host->hostdata; 1766 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) 1767 return; 1768 - del_timer_sync(&acb->eternal_timer); 1769 if (set_date_time) 1770 - del_timer_sync(&acb->refresh_timer); 1771 arcmsr_disable_outbound_ints(acb); 1772 arcmsr_free_irq(pdev, acb); 1773 flush_work(&acb->arcmsr_do_message_isr_bh);
··· 1161 return 0; 1162 out_free_sysfs: 1163 if (set_date_time) 1164 + timer_delete_sync(&acb->refresh_timer); 1165 + timer_delete_sync(&acb->eternal_timer); 1166 flush_work(&acb->arcmsr_do_message_isr_bh); 1167 arcmsr_stop_adapter_bgrb(acb); 1168 arcmsr_flush_adapter_cache(acb); ··· 1204 1205 arcmsr_disable_outbound_ints(acb); 1206 arcmsr_free_irq(pdev, acb); 1207 + timer_delete_sync(&acb->eternal_timer); 1208 if (set_date_time) 1209 + timer_delete_sync(&acb->refresh_timer); 1210 flush_work(&acb->arcmsr_do_message_isr_bh); 1211 arcmsr_stop_adapter_bgrb(acb); 1212 arcmsr_flush_adapter_cache(acb); ··· 1685 arcmsr_free_sysfs_attr(acb); 1686 scsi_remove_host(host); 1687 flush_work(&acb->arcmsr_do_message_isr_bh); 1688 + timer_delete_sync(&acb->eternal_timer); 1689 if (set_date_time) 1690 + timer_delete_sync(&acb->refresh_timer); 1691 pdev = acb->pdev; 1692 arcmsr_free_irq(pdev, acb); 1693 arcmsr_free_ccb_pool(acb); ··· 1718 arcmsr_free_sysfs_attr(acb); 1719 scsi_remove_host(host); 1720 flush_work(&acb->arcmsr_do_message_isr_bh); 1721 + timer_delete_sync(&acb->eternal_timer); 1722 if (set_date_time) 1723 + timer_delete_sync(&acb->refresh_timer); 1724 arcmsr_disable_outbound_ints(acb); 1725 arcmsr_stop_adapter_bgrb(acb); 1726 arcmsr_flush_adapter_cache(acb); ··· 1765 (struct AdapterControlBlock *)host->hostdata; 1766 if (acb->acb_flags & ACB_F_ADAPTER_REMOVED) 1767 return; 1768 + timer_delete_sync(&acb->eternal_timer); 1769 if (set_date_time) 1770 + timer_delete_sync(&acb->refresh_timer); 1771 arcmsr_disable_outbound_ints(acb); 1772 arcmsr_free_irq(pdev, acb); 1773 flush_work(&acb->arcmsr_do_message_isr_bh);
+3 -3
drivers/scsi/arm/fas216.c
··· 2331 2332 fas216_log(info, LOG_ERROR, "error handling timed out\n"); 2333 2334 - del_timer(&info->eh_timer); 2335 2336 if (info->rst_bus_status == 0) 2337 info->rst_bus_status = -1; ··· 2532 */ 2533 wait_event(info->eh_wait, info->rst_dev_status); 2534 2535 - del_timer_sync(&info->eh_timer); 2536 spin_lock_irqsave(&info->host_lock, flags); 2537 info->rstSCpnt = NULL; 2538 ··· 2622 * Wait one second for the interrupt. 2623 */ 2624 wait_event(info->eh_wait, info->rst_bus_status); 2625 - del_timer_sync(&info->eh_timer); 2626 2627 fas216_log(info, LOG_ERROR, "bus reset complete: %s\n", 2628 info->rst_bus_status == 1 ? "success" : "failed");
··· 2331 2332 fas216_log(info, LOG_ERROR, "error handling timed out\n"); 2333 2334 + timer_delete(&info->eh_timer); 2335 2336 if (info->rst_bus_status == 0) 2337 info->rst_bus_status = -1; ··· 2532 */ 2533 wait_event(info->eh_wait, info->rst_dev_status); 2534 2535 + timer_delete_sync(&info->eh_timer); 2536 spin_lock_irqsave(&info->host_lock, flags); 2537 info->rstSCpnt = NULL; 2538 ··· 2622 * Wait one second for the interrupt. 2623 */ 2624 wait_event(info->eh_wait, info->rst_bus_status); 2625 + timer_delete_sync(&info->eh_timer); 2626 2627 fas216_log(info, LOG_ERROR, "bus reset complete: %s\n", 2628 info->rst_bus_status == 1 ? "success" : "failed");
+2 -2
drivers/scsi/be2iscsi/be_main.c
··· 5448 "BM_%d : EEH error detected\n"); 5449 5450 /* first stop UE detection when PCI error detected */ 5451 - del_timer_sync(&phba->hw_check); 5452 cancel_delayed_work_sync(&phba->recover_port); 5453 5454 /* sessions are no longer valid, so first fail the sessions */ ··· 5746 } 5747 5748 /* first stop UE detection before unloading */ 5749 - del_timer_sync(&phba->hw_check); 5750 cancel_delayed_work_sync(&phba->recover_port); 5751 cancel_work_sync(&phba->sess_work); 5752
··· 5448 "BM_%d : EEH error detected\n"); 5449 5450 /* first stop UE detection when PCI error detected */ 5451 + timer_delete_sync(&phba->hw_check); 5452 cancel_delayed_work_sync(&phba->recover_port); 5453 5454 /* sessions are no longer valid, so first fail the sessions */ ··· 5746 } 5747 5748 /* first stop UE detection before unloading */ 5749 + timer_delete_sync(&phba->hw_check); 5750 cancel_delayed_work_sync(&phba->recover_port); 5751 cancel_work_sync(&phba->sess_work); 5752
+5 -5
drivers/scsi/bfa/bfad.c
··· 327 case BFAD_E_EXIT_COMP: 328 bfa_sm_set_state(bfad, bfad_sm_uninit); 329 bfad_remove_intr(bfad); 330 - del_timer_sync(&bfad->hal_tmo); 331 break; 332 333 default: ··· 376 case BFAD_E_EXIT_COMP: 377 bfa_sm_set_state(bfad, bfad_sm_uninit); 378 bfad_remove_intr(bfad); 379 - del_timer_sync(&bfad->hal_tmo); 380 bfad_im_probe_undo(bfad); 381 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; 382 bfad_uncfg_pport(bfad); ··· 1421 /* Suspend/fail all bfa operations */ 1422 bfa_ioc_suspend(&bfad->bfa.ioc); 1423 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1424 - del_timer_sync(&bfad->hal_tmo); 1425 ret = PCI_ERS_RESULT_CAN_RECOVER; 1426 break; 1427 case pci_channel_io_frozen: /* fatal error */ ··· 1435 wait_for_completion(&bfad->comp); 1436 1437 bfad_remove_intr(bfad); 1438 - del_timer_sync(&bfad->hal_tmo); 1439 pci_disable_device(pdev); 1440 ret = PCI_ERS_RESULT_NEED_RESET; 1441 break; ··· 1566 wait_for_completion(&bfad->comp); 1567 1568 bfad_remove_intr(bfad); 1569 - del_timer_sync(&bfad->hal_tmo); 1570 pci_disable_device(pdev); 1571 1572 return PCI_ERS_RESULT_NEED_RESET;
··· 327 case BFAD_E_EXIT_COMP: 328 bfa_sm_set_state(bfad, bfad_sm_uninit); 329 bfad_remove_intr(bfad); 330 + timer_delete_sync(&bfad->hal_tmo); 331 break; 332 333 default: ··· 376 case BFAD_E_EXIT_COMP: 377 bfa_sm_set_state(bfad, bfad_sm_uninit); 378 bfad_remove_intr(bfad); 379 + timer_delete_sync(&bfad->hal_tmo); 380 bfad_im_probe_undo(bfad); 381 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; 382 bfad_uncfg_pport(bfad); ··· 1421 /* Suspend/fail all bfa operations */ 1422 bfa_ioc_suspend(&bfad->bfa.ioc); 1423 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1424 + timer_delete_sync(&bfad->hal_tmo); 1425 ret = PCI_ERS_RESULT_CAN_RECOVER; 1426 break; 1427 case pci_channel_io_frozen: /* fatal error */ ··· 1435 wait_for_completion(&bfad->comp); 1436 1437 bfad_remove_intr(bfad); 1438 + timer_delete_sync(&bfad->hal_tmo); 1439 pci_disable_device(pdev); 1440 ret = PCI_ERS_RESULT_NEED_RESET; 1441 break; ··· 1566 wait_for_completion(&bfad->comp); 1567 1568 bfad_remove_intr(bfad); 1569 + timer_delete_sync(&bfad->hal_tmo); 1570 pci_disable_device(pdev); 1571 1572 return PCI_ERS_RESULT_NEED_RESET;
+2 -2
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 1599 struct bnx2fc_hba *hba = interface->hba; 1600 1601 /* Stop the transmit retry timer */ 1602 - del_timer_sync(&port->timer); 1603 1604 /* Free existing transmit skbs */ 1605 fcoe_clean_pending_queue(lport); ··· 1938 if (signal_pending(current)) 1939 flush_signals(current); 1940 1941 - del_timer_sync(&hba->destroy_timer); 1942 } 1943 bnx2fc_unbind_adapter_devices(hba); 1944 }
··· 1599 struct bnx2fc_hba *hba = interface->hba; 1600 1601 /* Stop the transmit retry timer */ 1602 + timer_delete_sync(&port->timer); 1603 1604 /* Free existing transmit skbs */ 1605 fcoe_clean_pending_queue(lport); ··· 1938 if (signal_pending(current)) 1939 flush_signals(current); 1940 1941 + timer_delete_sync(&hba->destroy_timer); 1942 } 1943 bnx2fc_unbind_adapter_devices(hba); 1944 }
+2 -2
drivers/scsi/bnx2fc/bnx2fc_tgt.c
··· 74 &tgt->flags))); 75 if (signal_pending(current)) 76 flush_signals(current); 77 - del_timer_sync(&tgt->ofld_timer); 78 } 79 80 static void bnx2fc_offload_session(struct fcoe_port *port, ··· 283 &tgt->flags))); 284 if (signal_pending(current)) 285 flush_signals(current); 286 - del_timer_sync(&tgt->upld_timer); 287 } 288 289 static void bnx2fc_upload_session(struct fcoe_port *port,
··· 74 &tgt->flags))); 75 if (signal_pending(current)) 76 flush_signals(current); 77 + timer_delete_sync(&tgt->ofld_timer); 78 } 79 80 static void bnx2fc_offload_session(struct fcoe_port *port, ··· 283 &tgt->flags))); 284 if (signal_pending(current)) 285 flush_signals(current); 286 + timer_delete_sync(&tgt->upld_timer); 287 } 288 289 static void bnx2fc_upload_session(struct fcoe_port *port,
+4 -4
drivers/scsi/bnx2i/bnx2i_iscsi.c
··· 1626 1627 if (signal_pending(current)) 1628 flush_signals(current); 1629 - del_timer_sync(&bnx2i_conn->ep->ofld_timer); 1630 1631 iscsi_conn_start(cls_conn); 1632 return 0; ··· 1749 1750 if (signal_pending(current)) 1751 flush_signals(current); 1752 - del_timer_sync(&ep->ofld_timer); 1753 1754 bnx2i_ep_destroy_list_del(hba, ep); 1755 ··· 1861 1862 if (signal_pending(current)) 1863 flush_signals(current); 1864 - del_timer_sync(&bnx2i_ep->ofld_timer); 1865 1866 bnx2i_ep_ofld_list_del(hba, bnx2i_ep); 1867 ··· 2100 2101 if (signal_pending(current)) 2102 flush_signals(current); 2103 - del_timer_sync(&bnx2i_ep->ofld_timer); 2104 2105 destroy_conn: 2106 bnx2i_ep_active_list_del(hba, bnx2i_ep);
··· 1626 1627 if (signal_pending(current)) 1628 flush_signals(current); 1629 + timer_delete_sync(&bnx2i_conn->ep->ofld_timer); 1630 1631 iscsi_conn_start(cls_conn); 1632 return 0; ··· 1749 1750 if (signal_pending(current)) 1751 flush_signals(current); 1752 + timer_delete_sync(&ep->ofld_timer); 1753 1754 bnx2i_ep_destroy_list_del(hba, ep); 1755 ··· 1861 1862 if (signal_pending(current)) 1863 flush_signals(current); 1864 + timer_delete_sync(&bnx2i_ep->ofld_timer); 1865 1866 bnx2i_ep_ofld_list_del(hba, bnx2i_ep); 1867 ··· 2100 2101 if (signal_pending(current)) 2102 flush_signals(current); 2103 + timer_delete_sync(&bnx2i_ep->ofld_timer); 2104 2105 destroy_conn: 2106 bnx2i_ep_active_list_del(hba, bnx2i_ep);
+2 -2
drivers/scsi/csiostor/csio_hw.c
··· 3701 struct csio_mb *mbp_next; 3702 int rv; 3703 3704 - del_timer_sync(&mbm->timer); 3705 3706 spin_lock_irq(&hw->lock); 3707 if (list_empty(&mbm->cbfn_q)) { ··· 4210 static void 4211 csio_mgmtm_exit(struct csio_mgmtm *mgmtm) 4212 { 4213 - del_timer_sync(&mgmtm->mgmt_timer); 4214 } 4215 4216
··· 3701 struct csio_mb *mbp_next; 3702 int rv; 3703 3704 + timer_delete_sync(&mbm->timer); 3705 3706 spin_lock_irq(&hw->lock); 3707 if (list_empty(&mbm->cbfn_q)) { ··· 4210 static void 4211 csio_mgmtm_exit(struct csio_mgmtm *mgmtm) 4212 { 4213 + timer_delete_sync(&mgmtm->mgmt_timer); 4214 } 4215 4216
+2 -2
drivers/scsi/csiostor/csio_mb.c
··· 1619 mbp = mbm->mcurrent; 1620 1621 /* Stop mailbox completion timer */ 1622 - del_timer_sync(&mbm->timer); 1623 1624 /* Add completion to tail of cbfn queue */ 1625 list_add_tail(&mbp->list, cbfn_q); ··· 1682 void 1683 csio_mbm_exit(struct csio_mbm *mbm) 1684 { 1685 - del_timer_sync(&mbm->timer); 1686 1687 CSIO_DB_ASSERT(mbm->mcurrent == NULL); 1688 CSIO_DB_ASSERT(list_empty(&mbm->req_q));
··· 1619 mbp = mbm->mcurrent; 1620 1621 /* Stop mailbox completion timer */ 1622 + timer_delete_sync(&mbm->timer); 1623 1624 /* Add completion to tail of cbfn queue */ 1625 list_add_tail(&mbp->list, cbfn_q); ··· 1682 void 1683 csio_mbm_exit(struct csio_mbm *mbm) 1684 { 1685 + timer_delete_sync(&mbm->timer); 1686 1687 CSIO_DB_ASSERT(mbm->mcurrent == NULL); 1688 CSIO_DB_ASSERT(list_empty(&mbm->req_q));
+1 -1
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
··· 495 496 spin_lock_bh(&csk->lock); 497 if (csk->retry_timer.function) { 498 - del_timer(&csk->retry_timer); 499 csk->retry_timer.function = NULL; 500 } 501
··· 495 496 spin_lock_bh(&csk->lock); 497 if (csk->retry_timer.function) { 498 + timer_delete(&csk->retry_timer); 499 csk->retry_timer.function = NULL; 500 } 501
+1 -1
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
··· 930 csk, csk->state, csk->flags, csk->tid); 931 932 if (csk->retry_timer.function) { 933 - del_timer(&csk->retry_timer); 934 csk->retry_timer.function = NULL; 935 } 936
··· 930 csk, csk->state, csk->flags, csk->tid); 931 932 if (csk->retry_timer.function) { 933 + timer_delete(&csk->retry_timer); 934 csk->retry_timer.function = NULL; 935 } 936
+6 -6
drivers/scsi/dc395x.c
··· 765 return; 766 767 if (timer_pending(&acb->waiting_timer)) 768 - del_timer(&acb->waiting_timer); 769 770 if (list_empty(dcb_list_head)) 771 return; ··· 1153 cmd, cmd->device->id, (u8)cmd->device->lun, cmd); 1154 1155 if (timer_pending(&acb->waiting_timer)) 1156 - del_timer(&acb->waiting_timer); 1157 1158 /* 1159 * disable interrupt ··· 1561 /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */ 1562 1563 if (timer_pending(&acb->selto_timer)) 1564 - del_timer(&acb->selto_timer); 1565 1566 if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) { 1567 disconnect(acb); /* bus free interrupt */ ··· 3454 dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb); 3455 /* delay half a second */ 3456 if (timer_pending(&acb->waiting_timer)) 3457 - del_timer(&acb->waiting_timer); 3458 3459 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE); 3460 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE); ··· 4415 4416 /* remove timers */ 4417 if (timer_pending(&acb->waiting_timer)) 4418 - del_timer(&acb->waiting_timer); 4419 if (timer_pending(&acb->selto_timer)) 4420 - del_timer(&acb->selto_timer); 4421 4422 adapter_uninit_chip(acb); 4423 adapter_remove_and_free_all_devices(acb);
··· 765 return; 766 767 if (timer_pending(&acb->waiting_timer)) 768 + timer_delete(&acb->waiting_timer); 769 770 if (list_empty(dcb_list_head)) 771 return; ··· 1153 cmd, cmd->device->id, (u8)cmd->device->lun, cmd); 1154 1155 if (timer_pending(&acb->waiting_timer)) 1156 + timer_delete(&acb->waiting_timer); 1157 1158 /* 1159 * disable interrupt ··· 1561 /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */ 1562 1563 if (timer_pending(&acb->selto_timer)) 1564 + timer_delete(&acb->selto_timer); 1565 1566 if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) { 1567 disconnect(acb); /* bus free interrupt */ ··· 3454 dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb); 3455 /* delay half a second */ 3456 if (timer_pending(&acb->waiting_timer)) 3457 + timer_delete(&acb->waiting_timer); 3458 3459 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE); 3460 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE); ··· 4415 4416 /* remove timers */ 4417 if (timer_pending(&acb->waiting_timer)) 4418 + timer_delete(&acb->waiting_timer); 4419 if (timer_pending(&acb->selto_timer)) 4420 + timer_delete(&acb->selto_timer); 4421 4422 adapter_uninit_chip(acb); 4423 adapter_remove_and_free_all_devices(acb);
+1 -1
drivers/scsi/elx/efct/efct_driver.c
··· 310 * during attach. 311 */ 312 if (timer_pending(&efct->xport->stats_timer)) 313 - del_timer(&efct->xport->stats_timer); 314 315 if (efct_hw_reset(&efct->hw, EFCT_HW_RESET_FIRMWARE)) { 316 efc_log_info(efct, "failed to reset firmware\n");
··· 310 * during attach. 311 */ 312 if (timer_pending(&efct->xport->stats_timer)) 313 + timer_delete(&efct->xport->stats_timer); 314 315 if (efct_hw_reset(&efct->hw, EFCT_HW_RESET_FIRMWARE)) { 316 efc_log_info(efct, "failed to reset firmware\n");
+1 -1
drivers/scsi/elx/efct/efct_xport.c
··· 508 509 /*Shutdown FC Statistics timer*/ 510 if (timer_pending(&xport->stats_timer)) 511 - del_timer(&xport->stats_timer); 512 513 efct_hw_teardown(&efct->hw); 514
··· 508 509 /*Shutdown FC Statistics timer*/ 510 if (timer_pending(&xport->stats_timer)) 511 + timer_delete(&xport->stats_timer); 512 513 efct_hw_teardown(&efct->hw); 514
+1 -1
drivers/scsi/elx/libefc/efc_fabric.c
··· 888 { 889 struct efc_node *node = from_timer(node, t, gidpt_delay_timer); 890 891 - del_timer(&node->gidpt_delay_timer); 892 893 efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL); 894 }
··· 888 { 889 struct efc_node *node = from_timer(node, t, gidpt_delay_timer); 890 891 + timer_delete(&node->gidpt_delay_timer); 892 893 efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL); 894 }
+1 -1
drivers/scsi/elx/libefc/efc_node.c
··· 149 150 /* if the gidpt_delay_timer is still running, then delete it */ 151 if (timer_pending(&node->gidpt_delay_timer)) 152 - del_timer(&node->gidpt_delay_timer); 153 154 xa_erase(&nport->lookup, node->rnode.fc_id); 155
··· 149 150 /* if the gidpt_delay_timer is still running, then delete it */ 151 if (timer_pending(&node->gidpt_delay_timer)) 152 + timer_delete(&node->gidpt_delay_timer); 153 154 xa_erase(&nport->lookup, node->rnode.fc_id); 155
+1 -1
drivers/scsi/esas2r/esas2r_init.c
··· 439 if ((test_bit(AF2_INIT_DONE, &a->flags2)) 440 && (!test_bit(AF_DEGRADED_MODE, &a->flags))) { 441 if (!power_management) { 442 - del_timer_sync(&a->timer); 443 tasklet_kill(&a->tasklet); 444 } 445 esas2r_power_down(a);
··· 439 if ((test_bit(AF2_INIT_DONE, &a->flags2)) 440 && (!test_bit(AF_DEGRADED_MODE, &a->flags))) { 441 if (!power_management) { 442 + timer_delete_sync(&a->timer); 443 tasklet_kill(&a->tasklet); 444 } 445 esas2r_power_down(a);
+1 -1
drivers/scsi/fcoe/fcoe.c
··· 1013 fc_lport_destroy(lport); 1014 1015 /* Stop the transmit retry timer */ 1016 - del_timer_sync(&port->timer); 1017 1018 /* Free existing transmit skbs */ 1019 fcoe_clean_pending_queue(lport);
··· 1013 fc_lport_destroy(lport); 1014 1015 /* Stop the transmit retry timer */ 1016 + timer_delete_sync(&port->timer); 1017 1018 /* Free existing transmit skbs */ 1019 fcoe_clean_pending_queue(lport);
+2 -2
drivers/scsi/fcoe/fcoe_ctlr.c
··· 302 fcoe_ctlr_set_state(fip, FIP_ST_DISABLED); 303 fcoe_ctlr_reset_fcfs(fip); 304 mutex_unlock(&fip->ctlr_mutex); 305 - del_timer_sync(&fip->timer); 306 cancel_work_sync(&fip->timer_work); 307 } 308 EXPORT_SYMBOL(fcoe_ctlr_destroy); ··· 478 static void fcoe_ctlr_reset(struct fcoe_ctlr *fip) 479 { 480 fcoe_ctlr_reset_fcfs(fip); 481 - del_timer(&fip->timer); 482 fip->ctlr_ka_time = 0; 483 fip->port_ka_time = 0; 484 fip->sol_time = 0;
··· 302 fcoe_ctlr_set_state(fip, FIP_ST_DISABLED); 303 fcoe_ctlr_reset_fcfs(fip); 304 mutex_unlock(&fip->ctlr_mutex); 305 + timer_delete_sync(&fip->timer); 306 cancel_work_sync(&fip->timer_work); 307 } 308 EXPORT_SYMBOL(fcoe_ctlr_destroy); ··· 478 static void fcoe_ctlr_reset(struct fcoe_ctlr *fip) 479 { 480 fcoe_ctlr_reset_fcfs(fip); 481 + timer_delete(&fip->timer); 482 fip->ctlr_ka_time = 0; 483 fip->port_ka_time = 0; 484 fip->sol_time = 0;
+6 -6
drivers/scsi/fnic/fdls_disc.c
··· 394 { 395 fnic->iport.fabric.del_timer_inprogress = 1; 396 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); 397 - del_timer_sync(&fnic->iport.fabric.retry_timer); 398 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); 399 fnic->iport.fabric.del_timer_inprogress = 0; 400 } ··· 404 { 405 tport->del_timer_inprogress = 1; 406 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); 407 - del_timer_sync(&tport->retry_timer); 408 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); 409 tport->del_timer_inprogress = 0; 410 } ··· 3617 fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi); 3618 3619 if (ntoh24(fchdr->fh_s_id) == FC_FID_MGMT_SERV) { 3620 - del_timer_sync(&iport->fabric.fdmi_timer); 3621 iport->fabric.fdmi_pending = 0; 3622 switch (plogi_rsp->els.fl_cmd) { 3623 case ELS_LS_ACC: ··· 3686 iport->fcid); 3687 3688 if (!iport->fabric.fdmi_pending) { 3689 - del_timer_sync(&iport->fabric.fdmi_timer); 3690 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 3691 "iport fcid: 0x%x: Canceling FDMI timer\n", 3692 iport->fcid); ··· 3728 break; 3729 } 3730 3731 - del_timer_sync(&iport->fabric.fdmi_timer); 3732 iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING; 3733 3734 fdls_send_fdmi_plogi(iport); ··· 4971 } 4972 4973 if ((fnic_fdmi_support == 1) && (iport->fabric.fdmi_pending > 0)) { 4974 - del_timer_sync(&iport->fabric.fdmi_timer); 4975 iport->fabric.fdmi_pending = 0; 4976 } 4977
··· 394 { 395 fnic->iport.fabric.del_timer_inprogress = 1; 396 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); 397 + timer_delete_sync(&fnic->iport.fabric.retry_timer); 398 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); 399 fnic->iport.fabric.del_timer_inprogress = 0; 400 } ··· 404 { 405 tport->del_timer_inprogress = 1; 406 spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); 407 + timer_delete_sync(&tport->retry_timer); 408 spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); 409 tport->del_timer_inprogress = 0; 410 } ··· 3617 fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi); 3618 3619 if (ntoh24(fchdr->fh_s_id) == FC_FID_MGMT_SERV) { 3620 + timer_delete_sync(&iport->fabric.fdmi_timer); 3621 iport->fabric.fdmi_pending = 0; 3622 switch (plogi_rsp->els.fl_cmd) { 3623 case ELS_LS_ACC: ··· 3686 iport->fcid); 3687 3688 if (!iport->fabric.fdmi_pending) { 3689 + timer_delete_sync(&iport->fabric.fdmi_timer); 3690 FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, 3691 "iport fcid: 0x%x: Canceling FDMI timer\n", 3692 iport->fcid); ··· 3728 break; 3729 } 3730 3731 + timer_delete_sync(&iport->fabric.fdmi_timer); 3732 iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING; 3733 3734 fdls_send_fdmi_plogi(iport); ··· 4971 } 4972 4973 if ((fnic_fdmi_support == 1) && (iport->fabric.fdmi_pending > 0)) { 4974 + timer_delete_sync(&iport->fabric.fdmi_timer); 4975 iport->fabric.fdmi_pending = 0; 4976 } 4977
+6 -6
drivers/scsi/fnic/fip.c
··· 319 round_jiffies(fcs_ka_tov)); 320 } else { 321 if (timer_pending(&fnic->fcs_ka_timer)) 322 - del_timer_sync(&fnic->fcs_ka_timer); 323 } 324 325 if (fka_has_changed) { ··· 497 498 oxid = FNIC_STD_GET_OX_ID(fchdr); 499 fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); 500 - del_timer_sync(&fnic->retry_fip_timer); 501 502 if ((be16_to_cpu(flogi_rsp->fip.fip_dl_len) == FIP_FLOGI_LEN) 503 && (flogi_rsp->rsp_desc.flogi.els.fl_cmd == ELS_LS_ACC)) { ··· 580 581 iport->fip.state = FDLS_FIP_INIT; 582 583 - del_timer_sync(&fnic->retry_fip_timer); 584 - del_timer_sync(&fnic->fcs_ka_timer); 585 - del_timer_sync(&fnic->enode_ka_timer); 586 - del_timer_sync(&fnic->vn_ka_timer); 587 588 if (!is_zero_ether_addr(iport->fpma)) 589 vnic_dev_del_addr(fnic->vdev, iport->fpma);
··· 319 round_jiffies(fcs_ka_tov)); 320 } else { 321 if (timer_pending(&fnic->fcs_ka_timer)) 322 + timer_delete_sync(&fnic->fcs_ka_timer); 323 } 324 325 if (fka_has_changed) { ··· 497 498 oxid = FNIC_STD_GET_OX_ID(fchdr); 499 fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); 500 + timer_delete_sync(&fnic->retry_fip_timer); 501 502 if ((be16_to_cpu(flogi_rsp->fip.fip_dl_len) == FIP_FLOGI_LEN) 503 && (flogi_rsp->rsp_desc.flogi.els.fl_cmd == ELS_LS_ACC)) { ··· 580 581 iport->fip.state = FDLS_FIP_INIT; 582 583 + timer_delete_sync(&fnic->retry_fip_timer); 584 + timer_delete_sync(&fnic->fcs_ka_timer); 585 + timer_delete_sync(&fnic->enode_ka_timer); 586 + timer_delete_sync(&fnic->vn_ka_timer); 587 588 if (!is_zero_ether_addr(iport->fpma)) 589 vnic_dev_del_addr(fnic->vdev, iport->fpma);
+6 -6
drivers/scsi/fnic/fnic_main.c
··· 1149 fnic_scsi_unload(fnic); 1150 1151 if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) 1152 - del_timer_sync(&fnic->notify_timer); 1153 1154 if (fnic->config.flags & VFCF_FIP_CAPABLE) { 1155 - del_timer_sync(&fnic->retry_fip_timer); 1156 - del_timer_sync(&fnic->fcs_ka_timer); 1157 - del_timer_sync(&fnic->enode_ka_timer); 1158 - del_timer_sync(&fnic->vn_ka_timer); 1159 1160 fnic_free_txq(&fnic->fip_frame_queue); 1161 fnic_fcoe_reset_vlans(fnic); 1162 } 1163 1164 if ((fnic_fdmi_support == 1) && (fnic->iport.fabric.fdmi_pending > 0)) 1165 - del_timer_sync(&fnic->iport.fabric.fdmi_timer); 1166 1167 fnic_stats_debugfs_remove(fnic); 1168
··· 1149 fnic_scsi_unload(fnic); 1150 1151 if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) 1152 + timer_delete_sync(&fnic->notify_timer); 1153 1154 if (fnic->config.flags & VFCF_FIP_CAPABLE) { 1155 + timer_delete_sync(&fnic->retry_fip_timer); 1156 + timer_delete_sync(&fnic->fcs_ka_timer); 1157 + timer_delete_sync(&fnic->enode_ka_timer); 1158 + timer_delete_sync(&fnic->vn_ka_timer); 1159 1160 fnic_free_txq(&fnic->fip_frame_queue); 1161 fnic_fcoe_reset_vlans(fnic); 1162 } 1163 1164 if ((fnic_fdmi_support == 1) && (fnic->iport.fabric.fdmi_pending > 0)) 1165 + timer_delete_sync(&fnic->iport.fabric.fdmi_timer); 1166 1167 fnic_stats_debugfs_remove(fnic); 1168
+3 -3
drivers/scsi/hisi_sas/hisi_sas_main.c
··· 1548 * which is also only used for v1/v2 hw to skip it for v3 hw 1549 */ 1550 if (hisi_hba->hw->sht) 1551 - del_timer_sync(&hisi_hba->timer); 1552 1553 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1554 } ··· 2363 for (i = 0; i < hisi_hba->n_phy; i++) { 2364 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2365 2366 - del_timer_sync(&phy->timer); 2367 } 2368 2369 if (hisi_hba->wq) ··· 2625 struct hisi_hba *hisi_hba = sha->lldd_ha; 2626 struct Scsi_Host *shost = sha->shost; 2627 2628 - del_timer_sync(&hisi_hba->timer); 2629 2630 sas_unregister_ha(sha); 2631 sas_remove_host(shost);
··· 1548 * which is also only used for v1/v2 hw to skip it for v3 hw 1549 */ 1550 if (hisi_hba->hw->sht) 1551 + timer_delete_sync(&hisi_hba->timer); 1552 1553 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1554 } ··· 2363 for (i = 0; i < hisi_hba->n_phy; i++) { 2364 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2365 2366 + timer_delete_sync(&phy->timer); 2367 } 2368 2369 if (hisi_hba->wq) ··· 2625 struct hisi_hba *hisi_hba = sha->lldd_ha; 2626 struct Scsi_Host *shost = sha->shost; 2627 2628 + timer_delete_sync(&hisi_hba->timer); 2629 2630 sas_unregister_ha(sha); 2631 sas_remove_host(shost);
+7 -7
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
··· 2372 case STAT_IO_COMPLETE: 2373 /* internal abort command complete */ 2374 ts->stat = TMF_RESP_FUNC_SUCC; 2375 - del_timer_sync(&slot->internal_abort_timer); 2376 goto out; 2377 case STAT_IO_NO_DEVICE: 2378 ts->stat = TMF_RESP_FUNC_COMPLETE; 2379 - del_timer_sync(&slot->internal_abort_timer); 2380 goto out; 2381 case STAT_IO_NOT_VALID: 2382 /* abort single io, controller don't find 2383 * the io need to abort 2384 */ 2385 ts->stat = TMF_RESP_FUNC_FAILED; 2386 - del_timer_sync(&slot->internal_abort_timer); 2387 goto out; 2388 default: 2389 break; ··· 2654 if (is_sata_phy_v2_hw(hisi_hba, phy_no)) 2655 goto end; 2656 2657 - del_timer(&phy->timer); 2658 2659 if (phy_no == 8) { 2660 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); ··· 2730 struct hisi_sas_port *port = phy->port; 2731 struct device *dev = hisi_hba->dev; 2732 2733 - del_timer(&phy->timer); 2734 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 2735 2736 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); ··· 2744 if (port && !get_wideport_bitmap_v2_hw(hisi_hba, port->id)) 2745 if (!check_any_wideports_v2_hw(hisi_hba) && 2746 timer_pending(&hisi_hba->timer)) 2747 - del_timer(&hisi_hba->timer); 2748 2749 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 2750 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, ··· 3204 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 3205 int phy_no, offset; 3206 3207 - del_timer(&phy->timer); 3208 3209 phy_no = sas_phy->id; 3210 initial_fis = &hisi_hba->initial_fis[phy_no];
··· 2372 case STAT_IO_COMPLETE: 2373 /* internal abort command complete */ 2374 ts->stat = TMF_RESP_FUNC_SUCC; 2375 + timer_delete_sync(&slot->internal_abort_timer); 2376 goto out; 2377 case STAT_IO_NO_DEVICE: 2378 ts->stat = TMF_RESP_FUNC_COMPLETE; 2379 + timer_delete_sync(&slot->internal_abort_timer); 2380 goto out; 2381 case STAT_IO_NOT_VALID: 2382 /* abort single io, controller don't find 2383 * the io need to abort 2384 */ 2385 ts->stat = TMF_RESP_FUNC_FAILED; 2386 + timer_delete_sync(&slot->internal_abort_timer); 2387 goto out; 2388 default: 2389 break; ··· 2654 if (is_sata_phy_v2_hw(hisi_hba, phy_no)) 2655 goto end; 2656 2657 + timer_delete(&phy->timer); 2658 2659 if (phy_no == 8) { 2660 u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); ··· 2730 struct hisi_sas_port *port = phy->port; 2731 struct device *dev = hisi_hba->dev; 2732 2733 + timer_delete(&phy->timer); 2734 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 2735 2736 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); ··· 2744 if (port && !get_wideport_bitmap_v2_hw(hisi_hba, port->id)) 2745 if (!check_any_wideports_v2_hw(hisi_hba) && 2746 timer_pending(&hisi_hba->timer)) 2747 + timer_delete(&hisi_hba->timer); 2748 2749 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO); 2750 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, ··· 3204 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; 3205 int phy_no, offset; 3206 3207 + timer_delete(&phy->timer); 3208 3209 phy_no = sas_phy->id; 3210 initial_fis = &hisi_hba->initial_fis[phy_no];
+2 -2
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
··· 1609 phy->port_id = port_id; 1610 spin_lock(&phy->lock); 1611 /* Delete timer and set phy_attached atomically */ 1612 - del_timer(&phy->timer); 1613 phy->phy_attached = 1; 1614 spin_unlock(&phy->lock); 1615 ··· 1643 1644 atomic_inc(&phy->down_cnt); 1645 1646 - del_timer(&phy->timer); 1647 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 1648 1649 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
··· 1609 phy->port_id = port_id; 1610 spin_lock(&phy->lock); 1611 /* Delete timer and set phy_attached atomically */ 1612 + timer_delete(&phy->timer); 1613 phy->phy_attached = 1; 1614 spin_unlock(&phy->lock); 1615 ··· 1643 1644 atomic_inc(&phy->down_cnt); 1645 1646 + timer_delete(&phy->timer); 1647 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 1648 1649 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+8 -8
drivers/scsi/ibmvscsi/ibmvfc.c
··· 1110 } else 1111 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED); 1112 1113 - del_timer(&evt->timer); 1114 } 1115 1116 /** ··· 1754 atomic_set(&evt->active, 0); 1755 list_del(&evt->queue_list); 1756 spin_unlock_irqrestore(&evt->queue->l_lock, flags); 1757 - del_timer(&evt->timer); 1758 1759 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY. 1760 * Firmware will send a CRQ with a transport event (0xFF) to ··· 3832 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3833 3834 list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) { 3835 - del_timer(&evt->timer); 3836 list_del(&evt->queue_list); 3837 ibmvfc_trc_end(evt); 3838 evt->done(evt); ··· 3938 spin_unlock_irqrestore(scrq->q_lock, flags); 3939 3940 list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) { 3941 - del_timer(&evt->timer); 3942 list_del(&evt->queue_list); 3943 ibmvfc_trc_end(evt); 3944 evt->done(evt); ··· 4542 4543 vhost->discovery_threads--; 4544 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 4545 - del_timer(&tgt->timer); 4546 4547 switch (status) { 4548 case IBMVFC_MAD_SUCCESS: ··· 4741 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); 4742 if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) { 4743 vhost->discovery_threads--; 4744 - del_timer(&tgt->timer); 4745 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 4746 kref_put(&tgt->kref, ibmvfc_release_tgt); 4747 } else ··· 5519 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT); 5520 spin_unlock_irqrestore(vhost->host->host_lock, flags); 5521 fc_remote_port_delete(rport); 5522 - del_timer_sync(&tgt->timer); 5523 kref_put(&tgt->kref, ibmvfc_release_tgt); 5524 return; 5525 } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) { ··· 5672 spin_unlock_irqrestore(vhost->host->host_lock, flags); 5673 if (rport) 5674 fc_remote_port_delete(rport); 5675 - del_timer_sync(&tgt->timer); 5676 kref_put(&tgt->kref, ibmvfc_release_tgt); 5677 return; 5678 } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
··· 1110 } else 1111 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED); 1112 1113 + timer_delete(&evt->timer); 1114 } 1115 1116 /** ··· 1754 atomic_set(&evt->active, 0); 1755 list_del(&evt->queue_list); 1756 spin_unlock_irqrestore(&evt->queue->l_lock, flags); 1757 + timer_delete(&evt->timer); 1758 1759 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY. 1760 * Firmware will send a CRQ with a transport event (0xFF) to ··· 3832 spin_unlock_irqrestore(vhost->host->host_lock, flags); 3833 3834 list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) { 3835 + timer_delete(&evt->timer); 3836 list_del(&evt->queue_list); 3837 ibmvfc_trc_end(evt); 3838 evt->done(evt); ··· 3938 spin_unlock_irqrestore(scrq->q_lock, flags); 3939 3940 list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) { 3941 + timer_delete(&evt->timer); 3942 list_del(&evt->queue_list); 3943 ibmvfc_trc_end(evt); 3944 evt->done(evt); ··· 4542 4543 vhost->discovery_threads--; 4544 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 4545 + timer_delete(&tgt->timer); 4546 4547 switch (status) { 4548 case IBMVFC_MAD_SUCCESS: ··· 4741 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); 4742 if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) { 4743 vhost->discovery_threads--; 4744 + timer_delete(&tgt->timer); 4745 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); 4746 kref_put(&tgt->kref, ibmvfc_release_tgt); 4747 } else ··· 5519 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT); 5520 spin_unlock_irqrestore(vhost->host->host_lock, flags); 5521 fc_remote_port_delete(rport); 5522 + timer_delete_sync(&tgt->timer); 5523 kref_put(&tgt->kref, ibmvfc_release_tgt); 5524 return; 5525 } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) { ··· 5672 spin_unlock_irqrestore(vhost->host->host_lock, flags); 5673 if (rport) 5674 fc_remote_port_delete(rport); 5675 + timer_delete_sync(&tgt->timer); 5676 kref_put(&tgt->kref, ibmvfc_release_tgt); 5677 return; 5678 } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
+3 -3
drivers/scsi/ibmvscsi/ibmvscsi.c
··· 789 while (!list_empty(&hostdata->sent)) { 790 evt = list_first_entry(&hostdata->sent, struct srp_event_struct, list); 791 list_del(&evt->list); 792 - del_timer(&evt->timer); 793 794 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 795 if (evt->cmnd) { ··· 944 be64_to_cpu(crq_as_u64[1])); 945 if (rc != 0) { 946 list_del(&evt_struct->list); 947 - del_timer(&evt_struct->timer); 948 949 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY. 950 * Firmware will send a CRQ with a transport event (0xFF) to ··· 1840 atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta), 1841 &hostdata->request_limit); 1842 1843 - del_timer(&evt_struct->timer); 1844 1845 if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd) 1846 evt_struct->cmnd->result = DID_ERROR << 16;
··· 789 while (!list_empty(&hostdata->sent)) { 790 evt = list_first_entry(&hostdata->sent, struct srp_event_struct, list); 791 list_del(&evt->list); 792 + timer_delete(&evt->timer); 793 794 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 795 if (evt->cmnd) { ··· 944 be64_to_cpu(crq_as_u64[1])); 945 if (rc != 0) { 946 list_del(&evt_struct->list); 947 + timer_delete(&evt_struct->timer); 948 949 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY. 950 * Firmware will send a CRQ with a transport event (0xFF) to ··· 1840 atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta), 1841 &hostdata->request_limit); 1842 1843 + timer_delete(&evt_struct->timer); 1844 1845 if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd) 1846 evt_struct->cmnd->result = DID_ERROR << 16;
+6 -6
drivers/scsi/ipr.c
··· 873 874 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, 875 IPR_IOASC_IOA_WAS_RESET); 876 - del_timer(&ipr_cmd->timer); 877 ipr_cmd->done(ipr_cmd); 878 } 879 spin_unlock(&hrrq->_lock); ··· 5347 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); 5348 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 5349 list_del(&ioa_cfg->reset_cmd->queue); 5350 - del_timer(&ioa_cfg->reset_cmd->timer); 5351 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5352 return IRQ_HANDLED; 5353 } ··· 5362 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 5363 5364 list_del(&ioa_cfg->reset_cmd->queue); 5365 - del_timer(&ioa_cfg->reset_cmd->timer); 5366 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5367 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { 5368 if (ioa_cfg->clear_isr) { ··· 5481 5482 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5483 list_del(&ipr_cmd->queue); 5484 - del_timer(&ipr_cmd->timer); 5485 ipr_cmd->fast_done(ipr_cmd); 5486 } 5487 ··· 5550 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5551 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5552 list_del(&ipr_cmd->queue); 5553 - del_timer(&ipr_cmd->timer); 5554 ipr_cmd->fast_done(ipr_cmd); 5555 } 5556 return rc; ··· 5600 5601 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5602 list_del(&ipr_cmd->queue); 5603 - del_timer(&ipr_cmd->timer); 5604 ipr_cmd->fast_done(ipr_cmd); 5605 } 5606 return rc;
··· 873 874 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, 875 IPR_IOASC_IOA_WAS_RESET); 876 + timer_delete(&ipr_cmd->timer); 877 ipr_cmd->done(ipr_cmd); 878 } 879 spin_unlock(&hrrq->_lock); ··· 5347 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); 5348 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 5349 list_del(&ioa_cfg->reset_cmd->queue); 5350 + timer_delete(&ioa_cfg->reset_cmd->timer); 5351 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5352 return IRQ_HANDLED; 5353 } ··· 5362 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 5363 5364 list_del(&ioa_cfg->reset_cmd->queue); 5365 + timer_delete(&ioa_cfg->reset_cmd->timer); 5366 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5367 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { 5368 if (ioa_cfg->clear_isr) { ··· 5481 5482 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5483 list_del(&ipr_cmd->queue); 5484 + timer_delete(&ipr_cmd->timer); 5485 ipr_cmd->fast_done(ipr_cmd); 5486 } 5487 ··· 5550 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); 5551 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5552 list_del(&ipr_cmd->queue); 5553 + timer_delete(&ipr_cmd->timer); 5554 ipr_cmd->fast_done(ipr_cmd); 5555 } 5556 return rc; ··· 5600 5601 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { 5602 list_del(&ipr_cmd->queue); 5603 + timer_delete(&ipr_cmd->timer); 5604 ipr_cmd->fast_done(ipr_cmd); 5605 } 5606 return rc;
+6 -6
drivers/scsi/isci/host.c
··· 1271 /* Cancel any/all outstanding port timers */ 1272 for (i = 0; i < ihost->logical_port_entries; i++) { 1273 struct isci_port *iport = &ihost->ports[i]; 1274 - del_timer_sync(&iport->timer.timer); 1275 } 1276 1277 /* Cancel any/all outstanding phy timers */ 1278 for (i = 0; i < SCI_MAX_PHYS; i++) { 1279 struct isci_phy *iphy = &ihost->phys[i]; 1280 - del_timer_sync(&iphy->sata_timer.timer); 1281 } 1282 1283 - del_timer_sync(&ihost->port_agent.timer.timer); 1284 1285 - del_timer_sync(&ihost->power_control.timer.timer); 1286 1287 - del_timer_sync(&ihost->timer.timer); 1288 1289 - del_timer_sync(&ihost->phy_timer.timer); 1290 } 1291 1292 static void __iomem *scu_base(struct isci_host *isci_host)
··· 1271 /* Cancel any/all outstanding port timers */ 1272 for (i = 0; i < ihost->logical_port_entries; i++) { 1273 struct isci_port *iport = &ihost->ports[i]; 1274 + timer_delete_sync(&iport->timer.timer); 1275 } 1276 1277 /* Cancel any/all outstanding phy timers */ 1278 for (i = 0; i < SCI_MAX_PHYS; i++) { 1279 struct isci_phy *iphy = &ihost->phys[i]; 1280 + timer_delete_sync(&iphy->sata_timer.timer); 1281 } 1282 1283 + timer_delete_sync(&ihost->port_agent.timer.timer); 1284 1285 + timer_delete_sync(&ihost->power_control.timer.timer); 1286 1287 + timer_delete_sync(&ihost->timer.timer); 1288 1289 + timer_delete_sync(&ihost->phy_timer.timer); 1290 } 1291 1292 static void __iomem *scu_base(struct isci_host *isci_host)
+4 -4
drivers/scsi/isci/isci.h
··· 481 482 /* 483 * Each timer is associated with a cancellation flag that is set when 484 - * del_timer() is called and checked in the timer callback function. This 485 - * is needed since del_timer_sync() cannot be called with sci_lock held. 486 - * For deinit however, del_timer_sync() is used without holding the lock. 487 */ 488 struct sci_timer { 489 struct timer_list timer; ··· 506 static inline void sci_del_timer(struct sci_timer *tmr) 507 { 508 tmr->cancel = true; 509 - del_timer(&tmr->timer); 510 } 511 512 struct sci_base_state_machine {
··· 481 482 /* 483 * Each timer is associated with a cancellation flag that is set when 484 + * timer_delete() is called and checked in the timer callback function. This 485 + * is needed since timer_delete_sync() cannot be called with sci_lock held. 486 + * For deinit however, timer_delete_sync() is used without holding the lock. 487 */ 488 struct sci_timer { 489 struct timer_list timer; ··· 506 static inline void sci_del_timer(struct sci_timer *tmr) 507 { 508 tmr->cancel = true; 509 + timer_delete(&tmr->timer); 510 } 511 512 struct sci_base_state_machine {
+2 -2
drivers/scsi/libfc/fc_fcp.c
··· 1329 fsp->state |= FC_SRB_COMPL; 1330 spin_unlock_bh(&fsp->scsi_pkt_lock); 1331 1332 - del_timer_sync(&fsp->timer); 1333 1334 spin_lock_bh(&fsp->scsi_pkt_lock); 1335 if (fsp->seq_ptr) { ··· 1961 fsp->state |= FC_SRB_COMPL; 1962 if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) { 1963 spin_unlock_bh(&fsp->scsi_pkt_lock); 1964 - del_timer_sync(&fsp->timer); 1965 spin_lock_bh(&fsp->scsi_pkt_lock); 1966 } 1967
··· 1329 fsp->state |= FC_SRB_COMPL; 1330 spin_unlock_bh(&fsp->scsi_pkt_lock); 1331 1332 + timer_delete_sync(&fsp->timer); 1333 1334 spin_lock_bh(&fsp->scsi_pkt_lock); 1335 if (fsp->seq_ptr) { ··· 1961 fsp->state |= FC_SRB_COMPL; 1962 if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) { 1963 spin_unlock_bh(&fsp->scsi_pkt_lock); 1964 + timer_delete_sync(&fsp->timer); 1965 spin_lock_bh(&fsp->scsi_pkt_lock); 1966 } 1967
+3 -3
drivers/scsi/libiscsi.c
··· 1945 session->tmf_state != TMF_QUEUED); 1946 if (signal_pending(current)) 1947 flush_signals(current); 1948 - del_timer_sync(&session->tmf_timer); 1949 1950 mutex_lock(&session->eh_mutex); 1951 spin_lock_bh(&session->frwd_lock); ··· 3247 3248 iscsi_remove_conn(cls_conn); 3249 3250 - del_timer_sync(&conn->transport_timer); 3251 3252 mutex_lock(&session->eh_mutex); 3253 spin_lock_bh(&session->frwd_lock); ··· 3411 conn->stop_stage = flag; 3412 spin_unlock_bh(&session->frwd_lock); 3413 3414 - del_timer_sync(&conn->transport_timer); 3415 iscsi_suspend_tx(conn); 3416 3417 spin_lock_bh(&session->frwd_lock);
··· 1945 session->tmf_state != TMF_QUEUED); 1946 if (signal_pending(current)) 1947 flush_signals(current); 1948 + timer_delete_sync(&session->tmf_timer); 1949 1950 mutex_lock(&session->eh_mutex); 1951 spin_lock_bh(&session->frwd_lock); ··· 3247 3248 iscsi_remove_conn(cls_conn); 3249 3250 + timer_delete_sync(&conn->transport_timer); 3251 3252 mutex_lock(&session->eh_mutex); 3253 spin_lock_bh(&session->frwd_lock); ··· 3411 conn->stop_stage = flag; 3412 spin_unlock_bh(&session->frwd_lock); 3413 3414 + timer_delete_sync(&conn->transport_timer); 3415 iscsi_suspend_tx(conn); 3416 3417 spin_lock_bh(&session->frwd_lock);
+1 -1
drivers/scsi/libsas/sas_expander.c
··· 89 res = i->dft->lldd_execute_task(task, GFP_KERNEL); 90 91 if (res) { 92 - del_timer_sync(&task->slow_task->timer); 93 pr_notice("executing SMP task failed:%d\n", res); 94 break; 95 }
··· 89 res = i->dft->lldd_execute_task(task, GFP_KERNEL); 90 91 if (res) { 92 + timer_delete_sync(&task->slow_task->timer); 93 pr_notice("executing SMP task failed:%d\n", res); 94 break; 95 }
+4 -4
drivers/scsi/libsas/sas_scsi_host.c
··· 859 860 void sas_task_internal_done(struct sas_task *task) 861 { 862 - del_timer(&task->slow_task->timer); 863 complete(&task->slow_task->completion); 864 } 865 ··· 911 912 res = i->dft->lldd_execute_task(task, GFP_KERNEL); 913 if (res) { 914 - del_timer_sync(&task->slow_task->timer); 915 pr_err("Executing internal abort failed %016llx (%d)\n", 916 SAS_ADDR(device->sas_addr), res); 917 break; ··· 1010 1011 res = i->dft->lldd_execute_task(task, GFP_KERNEL); 1012 if (res) { 1013 - del_timer_sync(&task->slow_task->timer); 1014 pr_err("executing TMF task failed %016llx (%d)\n", 1015 SAS_ADDR(device->sas_addr), res); 1016 break; ··· 1180 1181 if (!slow) 1182 return; 1183 - if (!del_timer(&slow->timer)) 1184 return; 1185 slow->timer.function(&slow->timer); 1186 return;
··· 859 860 void sas_task_internal_done(struct sas_task *task) 861 { 862 + timer_delete(&task->slow_task->timer); 863 complete(&task->slow_task->completion); 864 } 865 ··· 911 912 res = i->dft->lldd_execute_task(task, GFP_KERNEL); 913 if (res) { 914 + timer_delete_sync(&task->slow_task->timer); 915 pr_err("Executing internal abort failed %016llx (%d)\n", 916 SAS_ADDR(device->sas_addr), res); 917 break; ··· 1010 1011 res = i->dft->lldd_execute_task(task, GFP_KERNEL); 1012 if (res) { 1013 + timer_delete_sync(&task->slow_task->timer); 1014 pr_err("executing TMF task failed %016llx (%d)\n", 1015 SAS_ADDR(device->sas_addr), res); 1016 break; ··· 1180 1181 if (!slow) 1182 return; 1183 + if (!timer_delete(&slow->timer)) 1184 return; 1185 slow->timer.function(&slow->timer); 1186 return;
+1 -1
drivers/scsi/lpfc/lpfc_attr.c
··· 2578 (old_val & DISABLE_FCP_RING_INT)) 2579 { 2580 spin_unlock_irq(&phba->hbalock); 2581 - del_timer(&phba->fcp_poll_timer); 2582 spin_lock_irq(&phba->hbalock); 2583 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 2584 spin_unlock_irq(&phba->hbalock);
··· 2578 (old_val & DISABLE_FCP_RING_INT)) 2579 { 2580 spin_unlock_irq(&phba->hbalock); 2581 + timer_delete(&phba->fcp_poll_timer); 2582 spin_lock_irq(&phba->hbalock); 2583 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 2584 spin_unlock_irq(&phba->hbalock);
+2 -2
drivers/scsi/lpfc/lpfc_els.c
··· 4333 4334 if (!test_and_clear_bit(NLP_DELAY_TMO, &nlp->nlp_flag)) 4335 return; 4336 - del_timer_sync(&nlp->nlp_delayfunc); 4337 nlp->nlp_last_elscmd = 0; 4338 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4339 list_del_init(&nlp->els_retry_evt.evt_listp); ··· 4431 * firing and before processing the timer, cancel the 4432 * nlp_delayfunc. 4433 */ 4434 - del_timer_sync(&ndlp->nlp_delayfunc); 4435 retry = ndlp->nlp_retry; 4436 ndlp->nlp_retry = 0; 4437
··· 4333 4334 if (!test_and_clear_bit(NLP_DELAY_TMO, &nlp->nlp_flag)) 4335 return; 4336 + timer_delete_sync(&nlp->nlp_delayfunc); 4337 nlp->nlp_last_elscmd = 0; 4338 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4339 list_del_init(&nlp->els_retry_evt.evt_listp); ··· 4431 * firing and before processing the timer, cancel the 4432 * nlp_delayfunc. 4433 */ 4434 + timer_delete_sync(&ndlp->nlp_delayfunc); 4435 retry = ndlp->nlp_retry; 4436 ndlp->nlp_retry = 0; 4437
+4 -4
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 1228 1229 /* Stop delayed Nport discovery */ 1230 clear_bit(FC_DISC_DELAYED, &vport->fc_flag); 1231 - del_timer_sync(&vport->delayed_disc_tmo); 1232 1233 if (phba->sli_rev == LPFC_SLI_REV4 && 1234 vport->port_type == LPFC_PHYSICAL_PORT && ··· 1418 1419 /* Unblock fabric iocbs if they are blocked */ 1420 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 1421 - del_timer_sync(&phba->fabric_block_timer); 1422 1423 vports = lpfc_create_vport_work_array(phba); 1424 if (vports != NULL) ··· 5010 if (test_bit(FC_DISC_TMO, &vport->fc_flag) || 5011 timer_pending(&vport->fc_disctmo)) { 5012 clear_bit(FC_DISC_TMO, &vport->fc_flag); 5013 - del_timer_sync(&vport->fc_disctmo); 5014 spin_lock_irqsave(&vport->work_port_lock, iflags); 5015 vport->work_port_events &= ~WORKER_DISC_TMO; 5016 spin_unlock_irqrestore(&vport->work_port_lock, iflags); ··· 5501 clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); 5502 5503 ndlp->nlp_last_elscmd = 0; 5504 - del_timer_sync(&ndlp->nlp_delayfunc); 5505 5506 list_del_init(&ndlp->els_retry_evt.evt_listp); 5507 list_del_init(&ndlp->dev_loss_evt.evt_listp);
··· 1228 1229 /* Stop delayed Nport discovery */ 1230 clear_bit(FC_DISC_DELAYED, &vport->fc_flag); 1231 + timer_delete_sync(&vport->delayed_disc_tmo); 1232 1233 if (phba->sli_rev == LPFC_SLI_REV4 && 1234 vport->port_type == LPFC_PHYSICAL_PORT && ··· 1418 1419 /* Unblock fabric iocbs if they are blocked */ 1420 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 1421 + timer_delete_sync(&phba->fabric_block_timer); 1422 1423 vports = lpfc_create_vport_work_array(phba); 1424 if (vports != NULL) ··· 5010 if (test_bit(FC_DISC_TMO, &vport->fc_flag) || 5011 timer_pending(&vport->fc_disctmo)) { 5012 clear_bit(FC_DISC_TMO, &vport->fc_flag); 5013 + timer_delete_sync(&vport->fc_disctmo); 5014 spin_lock_irqsave(&vport->work_port_lock, iflags); 5015 vport->work_port_events &= ~WORKER_DISC_TMO; 5016 spin_unlock_irqrestore(&vport->work_port_lock, iflags); ··· 5501 clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); 5502 5503 ndlp->nlp_last_elscmd = 0; 5504 + timer_delete_sync(&ndlp->nlp_delayfunc); 5505 5506 list_del_init(&ndlp->els_retry_evt.evt_listp); 5507 list_del_init(&ndlp->dev_loss_evt.evt_listp);
+10 -10
drivers/scsi/lpfc/lpfc_init.c
··· 3120 void 3121 lpfc_stop_vport_timers(struct lpfc_vport *vport) 3122 { 3123 - del_timer_sync(&vport->els_tmofunc); 3124 - del_timer_sync(&vport->delayed_disc_tmo); 3125 lpfc_can_disctmo(vport); 3126 return; 3127 } ··· 3140 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3141 3142 /* Now, try to stop the timer */ 3143 - del_timer(&phba->fcf.redisc_wait); 3144 } 3145 3146 /** ··· 3302 lpfc_stop_vport_timers(phba->pport); 3303 cancel_delayed_work_sync(&phba->eq_delay_work); 3304 cancel_delayed_work_sync(&phba->idle_stat_delay_work); 3305 - del_timer_sync(&phba->sli.mbox_tmo); 3306 - del_timer_sync(&phba->fabric_block_timer); 3307 - del_timer_sync(&phba->eratt_poll); 3308 - del_timer_sync(&phba->hb_tmofunc); 3309 if (phba->sli_rev == LPFC_SLI_REV4) { 3310 - del_timer_sync(&phba->rrq_tmr); 3311 clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); 3312 } 3313 clear_bit(HBA_HBEAT_INP, &phba->hba_flag); ··· 3316 switch (phba->pci_dev_grp) { 3317 case LPFC_PCI_DEV_LP: 3318 /* Stop any LightPulse device specific driver timers */ 3319 - del_timer_sync(&phba->fcp_poll_timer); 3320 break; 3321 case LPFC_PCI_DEV_OC: 3322 /* Stop any OneConnect device specific driver timers */ ··· 12761 * timer. Wait for the poll timer to retire. 12762 */ 12763 synchronize_rcu(); 12764 - del_timer_sync(&phba->cpuhp_poll_timer); 12765 } 12766 12767 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
··· 3120 void 3121 lpfc_stop_vport_timers(struct lpfc_vport *vport) 3122 { 3123 + timer_delete_sync(&vport->els_tmofunc); 3124 + timer_delete_sync(&vport->delayed_disc_tmo); 3125 lpfc_can_disctmo(vport); 3126 return; 3127 } ··· 3140 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3141 3142 /* Now, try to stop the timer */ 3143 + timer_delete(&phba->fcf.redisc_wait); 3144 } 3145 3146 /** ··· 3302 lpfc_stop_vport_timers(phba->pport); 3303 cancel_delayed_work_sync(&phba->eq_delay_work); 3304 cancel_delayed_work_sync(&phba->idle_stat_delay_work); 3305 + timer_delete_sync(&phba->sli.mbox_tmo); 3306 + timer_delete_sync(&phba->fabric_block_timer); 3307 + timer_delete_sync(&phba->eratt_poll); 3308 + timer_delete_sync(&phba->hb_tmofunc); 3309 if (phba->sli_rev == LPFC_SLI_REV4) { 3310 + timer_delete_sync(&phba->rrq_tmr); 3311 clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); 3312 } 3313 clear_bit(HBA_HBEAT_INP, &phba->hba_flag); ··· 3316 switch (phba->pci_dev_grp) { 3317 case LPFC_PCI_DEV_LP: 3318 /* Stop any LightPulse device specific driver timers */ 3319 + timer_delete_sync(&phba->fcp_poll_timer); 3320 break; 3321 case LPFC_PCI_DEV_OC: 3322 /* Stop any OneConnect device specific driver timers */ ··· 12761 * timer. Wait for the poll timer to retire. 12762 */ 12763 synchronize_rcu(); 12764 + timer_delete_sync(&phba->cpuhp_poll_timer); 12765 } 12766 12767 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
+1 -1
drivers/scsi/lpfc/lpfc_scsi.c
··· 5488 struct lpfc_vmid *cur; 5489 5490 if (vport->port_type == LPFC_PHYSICAL_PORT) 5491 - del_timer_sync(&vport->phba->inactive_vmid_poll); 5492 5493 kfree(vport->qfpa_res); 5494 kfree(vport->vmid_priority.vmid_range);
··· 5488 struct lpfc_vmid *cur; 5489 5490 if (vport->port_type == LPFC_PHYSICAL_PORT) 5491 + timer_delete_sync(&vport->phba->inactive_vmid_poll); 5492 5493 kfree(vport->qfpa_res); 5494 kfree(vport->vmid_priority.vmid_range);
+5 -5
drivers/scsi/lpfc/lpfc_sli.c
··· 5041 return 1; 5042 } 5043 5044 - del_timer_sync(&psli->mbox_tmo); 5045 if (ha_copy & HA_ERATT) { 5046 writel(HA_ERATT, phba->HAregaddr); 5047 phba->pport->stopped = 1; ··· 12076 local_bh_enable(); 12077 12078 /* Return any active mbox cmds */ 12079 - del_timer_sync(&psli->mbox_tmo); 12080 12081 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 12082 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; ··· 13802 phba->sli.mbox_active = NULL; 13803 spin_unlock_irqrestore(&phba->hbalock, iflag); 13804 phba->last_completion_time = jiffies; 13805 - del_timer(&phba->sli.mbox_tmo); 13806 if (pmb->mbox_cmpl) { 13807 lpfc_sli_pcimem_bcopy(mbox, pmbox, 13808 MAILBOX_CMD_SIZE); ··· 14302 14303 /* Reset heartbeat timer */ 14304 phba->last_completion_time = jiffies; 14305 - del_timer(&phba->sli.mbox_tmo); 14306 14307 /* Move mbox data to caller's mailbox region, do endian swapping */ 14308 if (pmb->mbox_cmpl && mbox) ··· 15689 synchronize_rcu(); 15690 15691 if (list_empty(&phba->poll_list)) 15692 - del_timer_sync(&phba->cpuhp_poll_timer); 15693 } 15694 15695 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
··· 5041 return 1; 5042 } 5043 5044 + timer_delete_sync(&psli->mbox_tmo); 5045 if (ha_copy & HA_ERATT) { 5046 writel(HA_ERATT, phba->HAregaddr); 5047 phba->pport->stopped = 1; ··· 12076 local_bh_enable(); 12077 12078 /* Return any active mbox cmds */ 12079 + timer_delete_sync(&psli->mbox_tmo); 12080 12081 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 12082 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; ··· 13802 phba->sli.mbox_active = NULL; 13803 spin_unlock_irqrestore(&phba->hbalock, iflag); 13804 phba->last_completion_time = jiffies; 13805 + timer_delete(&phba->sli.mbox_tmo); 13806 if (pmb->mbox_cmpl) { 13807 lpfc_sli_pcimem_bcopy(mbox, pmbox, 13808 MAILBOX_CMD_SIZE); ··· 14302 14303 /* Reset heartbeat timer */ 14304 phba->last_completion_time = jiffies; 14305 + timer_delete(&phba->sli.mbox_tmo); 14306 14307 /* Move mbox data to caller's mailbox region, do endian swapping */ 14308 if (pmb->mbox_cmpl && mbox) ··· 15689 synchronize_rcu(); 15690 15691 if (list_empty(&phba->poll_list)) 15692 + timer_delete_sync(&phba->cpuhp_poll_timer); 15693 } 15694 15695 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
+1 -1
drivers/scsi/megaraid/megaraid_mbox.c
··· 3951 } 3952 3953 3954 - del_timer_sync(&timeout.timer); 3955 destroy_timer_on_stack(&timeout.timer); 3956 3957 mutex_unlock(&raid_dev->sysfs_mtx);
··· 3951 } 3952 3953 3954 + timer_delete_sync(&timeout.timer); 3955 destroy_timer_on_stack(&timeout.timer); 3956 3957 mutex_unlock(&raid_dev->sysfs_mtx);
+1 -1
drivers/scsi/megaraid/megaraid_mm.c
··· 703 */ 704 wait_event(wait_q, (kioc->status != -ENODATA)); 705 if (timeout.timer.function) { 706 - del_timer_sync(&timeout.timer); 707 destroy_timer_on_stack(&timeout.timer); 708 } 709
··· 703 */ 704 wait_event(wait_q, (kioc->status != -ENODATA)); 705 if (timeout.timer.function) { 706 + timer_delete_sync(&timeout.timer); 707 destroy_timer_on_stack(&timeout.timer); 708 } 709
+5 -5
drivers/scsi/megaraid/megaraid_sas_base.c
··· 6521 6522 fail_start_watchdog: 6523 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6524 - del_timer_sync(&instance->sriov_heartbeat_timer); 6525 fail_get_ld_pd_list: 6526 instance->instancet->disable_intr(instance); 6527 megasas_destroy_irqs(instance); ··· 7603 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 7604 7605 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7606 - del_timer_sync(&instance->sriov_heartbeat_timer); 7607 7608 instance->instancet->disable_intr(instance); 7609 megasas_destroy_irqs(instance); ··· 7743 7744 /* Shutdown SR-IOV heartbeat timer */ 7745 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7746 - del_timer_sync(&instance->sriov_heartbeat_timer); 7747 7748 /* Stop the FW fault detection watchdog */ 7749 if (instance->adapter_type != MFI_SERIES) ··· 7907 7908 fail_start_watchdog: 7909 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7910 - del_timer_sync(&instance->sriov_heartbeat_timer); 7911 fail_init_mfi: 7912 megasas_free_ctrl_dma_buffers(instance); 7913 megasas_free_ctrl_mem(instance); ··· 7971 7972 /* Shutdown SR-IOV heartbeat timer */ 7973 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7974 - del_timer_sync(&instance->sriov_heartbeat_timer); 7975 7976 /* Stop the FW fault detection watchdog */ 7977 if (instance->adapter_type != MFI_SERIES)
··· 6521 6522 fail_start_watchdog: 6523 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 6524 + timer_delete_sync(&instance->sriov_heartbeat_timer); 6525 fail_get_ld_pd_list: 6526 instance->instancet->disable_intr(instance); 6527 megasas_destroy_irqs(instance); ··· 7603 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 7604 7605 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7606 + timer_delete_sync(&instance->sriov_heartbeat_timer); 7607 7608 instance->instancet->disable_intr(instance); 7609 megasas_destroy_irqs(instance); ··· 7743 7744 /* Shutdown SR-IOV heartbeat timer */ 7745 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7746 + timer_delete_sync(&instance->sriov_heartbeat_timer); 7747 7748 /* Stop the FW fault detection watchdog */ 7749 if (instance->adapter_type != MFI_SERIES) ··· 7907 7908 fail_start_watchdog: 7909 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7910 + timer_delete_sync(&instance->sriov_heartbeat_timer); 7911 fail_init_mfi: 7912 megasas_free_ctrl_dma_buffers(instance); 7913 megasas_free_ctrl_mem(instance); ··· 7971 7972 /* Shutdown SR-IOV heartbeat timer */ 7973 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 7974 + timer_delete_sync(&instance->sriov_heartbeat_timer); 7975 7976 /* Stop the FW fault detection watchdog */ 7977 if (instance->adapter_type != MFI_SERIES)
+1 -1
drivers/scsi/megaraid/megaraid_sas_fusion.c
··· 4969 } 4970 4971 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 4972 - del_timer_sync(&instance->sriov_heartbeat_timer); 4973 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 4974 set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); 4975 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
··· 4969 } 4970 4971 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 4972 + timer_delete_sync(&instance->sriov_heartbeat_timer); 4973 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 4974 set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); 4975 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
+1 -1
drivers/scsi/mvsas/mv_sas.c
··· 976 static void mvs_sig_remove_timer(struct mvs_phy *phy) 977 { 978 if (phy->timer.function) 979 - del_timer(&phy->timer); 980 phy->timer.function = NULL; 981 } 982
··· 976 static void mvs_sig_remove_timer(struct mvs_phy *phy) 977 { 978 if (phy->timer.function) 979 + timer_delete(&phy->timer); 980 phy->timer.function = NULL; 981 } 982
+3 -3
drivers/scsi/pmcraid.c
··· 495 } 496 497 if (pinstance->reset_cmd != NULL) { 498 - del_timer(&pinstance->reset_cmd->timer); 499 spin_lock_irqsave( 500 pinstance->host->host_lock, lock_flags); 501 pinstance->reset_cmd->cmd_done(pinstance->reset_cmd); ··· 1999 cpu_to_le32(PMCRAID_DRIVER_ILID); 2000 2001 /* In case the command timer is still running */ 2002 - del_timer(&cmd->timer); 2003 2004 /* If this is an IO command, complete it by invoking scsi_done 2005 * function. If this is one of the internal commands other ··· 3982 list_del(&cmd->free_list); 3983 spin_unlock_irqrestore(&pinstance->pending_pool_lock, 3984 pending_lock_flags); 3985 - del_timer(&cmd->timer); 3986 atomic_dec(&pinstance->outstanding_cmds); 3987 3988 if (cmd->cmd_done == pmcraid_ioa_reset) {
··· 495 } 496 497 if (pinstance->reset_cmd != NULL) { 498 + timer_delete(&pinstance->reset_cmd->timer); 499 spin_lock_irqsave( 500 pinstance->host->host_lock, lock_flags); 501 pinstance->reset_cmd->cmd_done(pinstance->reset_cmd); ··· 1999 cpu_to_le32(PMCRAID_DRIVER_ILID); 2000 2001 /* In case the command timer is still running */ 2002 + timer_delete(&cmd->timer); 2003 2004 /* If this is an IO command, complete it by invoking scsi_done 2005 * function. If this is one of the internal commands other ··· 3982 list_del(&cmd->free_list); 3983 spin_unlock_irqrestore(&pinstance->pending_pool_lock, 3984 pending_lock_flags); 3985 + timer_delete(&cmd->timer); 3986 atomic_dec(&pinstance->outstanding_cmds); 3987 3988 if (cmd->cmd_done == pmcraid_ioa_reset) {
+1 -1
drivers/scsi/qla1280.c
··· 2454 qla1280_debounce_register(&reg->istatus); 2455 2456 wait_for_completion(&wait); 2457 - del_timer_sync(&ha->mailbox_timer); 2458 2459 spin_lock_irq(ha->host->host_lock); 2460
··· 2454 qla1280_debounce_register(&reg->istatus); 2455 2456 wait_for_completion(&wait); 2457 + timer_delete_sync(&ha->mailbox_timer); 2458 2459 spin_lock_irq(ha->host->host_lock); 2460
+1 -1
drivers/scsi/qla2xxx/qla_init.c
··· 67 { 68 struct srb_iocb *iocb = &sp->u.iocb_cmd; 69 70 - del_timer(&iocb->timer); 71 qla2x00_rel_sp(sp); 72 } 73
··· 67 { 68 struct srb_iocb *iocb = &sp->u.iocb_cmd; 69 70 + timer_delete(&iocb->timer); 71 qla2x00_rel_sp(sp); 72 } 73
+2 -2
drivers/scsi/qla2xxx/qla_iocb.c
··· 2572 static void 2573 qla2x00_async_done(struct srb *sp, int res) 2574 { 2575 - if (del_timer(&sp->u.iocb_cmd.timer)) { 2576 /* 2577 * Successfully cancelled the timeout handler 2578 * ref: TMR ··· 2645 elsio->u.els_logo.els_logo_pyld, 2646 elsio->u.els_logo.els_logo_pyld_dma); 2647 2648 - del_timer(&elsio->timer); 2649 qla2x00_rel_sp(sp); 2650 } 2651
··· 2572 static void 2573 qla2x00_async_done(struct srb *sp, int res) 2574 { 2575 + if (timer_delete(&sp->u.iocb_cmd.timer)) { 2576 /* 2577 * Successfully cancelled the timeout handler 2578 * ref: TMR ··· 2645 elsio->u.els_logo.els_logo_pyld, 2646 elsio->u.els_logo.els_logo_pyld_dma); 2647 2648 + timer_delete(&elsio->timer); 2649 qla2x00_rel_sp(sp); 2650 } 2651
+1 -1
drivers/scsi/qla2xxx/qla_mid.c
··· 20 qla2x00_vp_stop_timer(scsi_qla_host_t *vha) 21 { 22 if (vha->vp_idx && vha->timer_active) { 23 - del_timer_sync(&vha->timer); 24 vha->timer_active = 0; 25 } 26 }
··· 20 qla2x00_vp_stop_timer(scsi_qla_host_t *vha) 21 { 22 if (vha->vp_idx && vha->timer_active) { 23 + timer_delete_sync(&vha->timer); 24 vha->timer_active = 0; 25 } 26 }
+1 -1
drivers/scsi/qla2xxx/qla_os.c
··· 401 static __inline__ void 402 qla2x00_stop_timer(scsi_qla_host_t *vha) 403 { 404 - del_timer_sync(&vha->timer); 405 vha->timer_active = 0; 406 } 407
··· 401 static __inline__ void 402 qla2x00_stop_timer(scsi_qla_host_t *vha) 403 { 404 + timer_delete_sync(&vha->timer); 405 vha->timer_active = 0; 406 } 407
+1 -1
drivers/scsi/qla4xxx/ql4_os.c
··· 4021 4022 static void qla4xxx_stop_timer(struct scsi_qla_host *ha) 4023 { 4024 - del_timer_sync(&ha->timer); 4025 ha->timer_active = 0; 4026 } 4027
··· 4021 4022 static void qla4xxx_stop_timer(struct scsi_qla_host *ha) 4023 { 4024 + timer_delete_sync(&ha->timer); 4025 ha->timer_active = 0; 4026 } 4027
+1 -1
drivers/scsi/smartpqi/smartpqi_init.c
··· 3853 3854 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3855 { 3856 - del_timer_sync(&ctrl_info->heartbeat_timer); 3857 } 3858 3859 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
··· 3853 3854 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3855 { 3856 + timer_delete_sync(&ctrl_info->heartbeat_timer); 3857 } 3858 3859 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
+1 -1
drivers/scsi/sym53c8xx_2/sym_glue.c
··· 1657 struct sym_hcb *np = sym_get_hcb(shost); 1658 printk("%s: detaching ...\n", sym_name(np)); 1659 1660 - del_timer_sync(&np->s.timer); 1661 1662 /* 1663 * Reset NCR chip.
··· 1657 struct sym_hcb *np = sym_get_hcb(shost); 1658 printk("%s: detaching ...\n", sym_name(np)); 1659 1660 + timer_delete_sync(&np->s.timer); 1661 1662 /* 1663 * Reset NCR chip.
+2 -2
drivers/staging/gpib/agilent_82357a/agilent_82357a.c
··· 102 cleanup: 103 if (timeout_msecs) { 104 if (timer_pending(&a_priv->bulk_timer)) 105 - del_timer_sync(&a_priv->bulk_timer); 106 } 107 mutex_lock(&a_priv->bulk_alloc_lock); 108 if (a_priv->bulk_urb) { ··· 169 *actual_data_length = a_priv->bulk_urb->actual_length; 170 cleanup: 171 if (timeout_msecs) 172 - del_timer_sync(&a_priv->bulk_timer); 173 174 mutex_lock(&a_priv->bulk_alloc_lock); 175 if (a_priv->bulk_urb) {
··· 102 cleanup: 103 if (timeout_msecs) { 104 if (timer_pending(&a_priv->bulk_timer)) 105 + timer_delete_sync(&a_priv->bulk_timer); 106 } 107 mutex_lock(&a_priv->bulk_alloc_lock); 108 if (a_priv->bulk_urb) { ··· 169 *actual_data_length = a_priv->bulk_urb->actual_length; 170 cleanup: 171 if (timeout_msecs) 172 + timer_delete_sync(&a_priv->bulk_timer); 173 174 mutex_lock(&a_priv->bulk_alloc_lock); 175 if (a_priv->bulk_urb) {
+2 -2
drivers/staging/gpib/common/gpib_os.c
··· 109 /* Removes the timeout task */ 110 { 111 if (timer_pending(&board->timer)) 112 - del_timer_sync(&board->timer); 113 } 114 115 int io_timed_out(struct gpib_board *board) ··· 163 { 164 atomic_set(&board->pseudo_irq.active, 0); 165 166 - del_timer_sync(&board->pseudo_irq.timer); 167 board->pseudo_irq.handler = NULL; 168 } 169 EXPORT_SYMBOL(gpib_free_pseudo_irq);
··· 109 /* Removes the timeout task */ 110 { 111 if (timer_pending(&board->timer)) 112 + timer_delete_sync(&board->timer); 113 } 114 115 int io_timed_out(struct gpib_board *board) ··· 163 { 164 atomic_set(&board->pseudo_irq.active, 0); 165 166 + timer_delete_sync(&board->pseudo_irq.timer); 167 board->pseudo_irq.handler = NULL; 168 } 169 EXPORT_SYMBOL(gpib_free_pseudo_irq);
+1 -1
drivers/staging/gpib/common/iblib.c
··· 610 611 static void remove_wait_timer(struct wait_info *winfo) 612 { 613 - del_timer_sync(&winfo->timer); 614 destroy_timer_on_stack(&winfo->timer); 615 } 616
··· 610 611 static void remove_wait_timer(struct wait_info *winfo) 612 { 613 + timer_delete_sync(&winfo->timer); 614 destroy_timer_on_stack(&winfo->timer); 615 } 616
+4 -4
drivers/staging/gpib/ni_usb/ni_usb_gpib.c
··· 136 137 retval = usb_submit_urb(ni_priv->bulk_urb, GFP_KERNEL); 138 if (retval) { 139 - del_timer_sync(&ni_priv->bulk_timer); 140 usb_free_urb(ni_priv->bulk_urb); 141 ni_priv->bulk_urb = NULL; 142 dev_err(&usb_dev->dev, "failed to submit bulk out urb, retval=%i\n", ··· 154 retval = ni_priv->bulk_urb->status; 155 } 156 157 - del_timer_sync(&ni_priv->bulk_timer); 158 *actual_data_length = ni_priv->bulk_urb->actual_length; 159 mutex_lock(&ni_priv->bulk_transfer_lock); 160 usb_free_urb(ni_priv->bulk_urb); ··· 222 223 retval = usb_submit_urb(ni_priv->bulk_urb, GFP_KERNEL); 224 if (retval) { 225 - del_timer_sync(&ni_priv->bulk_timer); 226 usb_free_urb(ni_priv->bulk_urb); 227 ni_priv->bulk_urb = NULL; 228 dev_err(&usb_dev->dev, "failed to submit bulk in urb, retval=%i\n", retval); ··· 256 if (ni_priv->bulk_urb->status) 257 retval = ni_priv->bulk_urb->status; 258 } 259 - del_timer_sync(&ni_priv->bulk_timer); 260 *actual_data_length = ni_priv->bulk_urb->actual_length; 261 mutex_lock(&ni_priv->bulk_transfer_lock); 262 usb_free_urb(ni_priv->bulk_urb);
··· 136 137 retval = usb_submit_urb(ni_priv->bulk_urb, GFP_KERNEL); 138 if (retval) { 139 + timer_delete_sync(&ni_priv->bulk_timer); 140 usb_free_urb(ni_priv->bulk_urb); 141 ni_priv->bulk_urb = NULL; 142 dev_err(&usb_dev->dev, "failed to submit bulk out urb, retval=%i\n", ··· 154 retval = ni_priv->bulk_urb->status; 155 } 156 157 + timer_delete_sync(&ni_priv->bulk_timer); 158 *actual_data_length = ni_priv->bulk_urb->actual_length; 159 mutex_lock(&ni_priv->bulk_transfer_lock); 160 usb_free_urb(ni_priv->bulk_urb); ··· 222 223 retval = usb_submit_urb(ni_priv->bulk_urb, GFP_KERNEL); 224 if (retval) { 225 + timer_delete_sync(&ni_priv->bulk_timer); 226 usb_free_urb(ni_priv->bulk_urb); 227 ni_priv->bulk_urb = NULL; 228 dev_err(&usb_dev->dev, "failed to submit bulk in urb, retval=%i\n", retval); ··· 256 if (ni_priv->bulk_urb->status) 257 retval = ni_priv->bulk_urb->status; 258 } 259 + timer_delete_sync(&ni_priv->bulk_timer); 260 *actual_data_length = ni_priv->bulk_urb->actual_length; 261 mutex_lock(&ni_priv->bulk_transfer_lock); 262 usb_free_urb(ni_priv->bulk_urb);
+1 -1
drivers/staging/media/imx/imx-ic-prpencvf.c
··· 781 imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf); 782 783 /* cancel the EOF timeout timer */ 784 - del_timer_sync(&priv->eof_timeout_timer); 785 786 prp_put_ipu_resources(priv); 787 }
··· 781 imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf); 782 783 /* cancel the EOF timeout timer */ 784 + timer_delete_sync(&priv->eof_timeout_timer); 785 786 prp_put_ipu_resources(priv); 787 }
+1 -1
drivers/staging/media/imx/imx-media-csi.c
··· 695 imx_media_free_dma_buf(priv->dev, &priv->underrun_buf); 696 697 /* cancel the EOF timeout timer */ 698 - del_timer_sync(&priv->eof_timeout_timer); 699 700 csi_idmac_put_ipu_resources(priv); 701 }
··· 695 imx_media_free_dma_buf(priv->dev, &priv->underrun_buf); 696 697 /* cancel the EOF timeout timer */ 698 + timer_delete_sync(&priv->eof_timeout_timer); 699 700 csi_idmac_put_ipu_resources(priv); 701 }
+1 -1
drivers/staging/rtl8723bs/core/rtw_cmd.c
··· 1846 if (pcmd->res != H2C_SUCCESS) 1847 _set_timer(&pmlmepriv->assoc_timer, 1); 1848 1849 - del_timer_sync(&pmlmepriv->assoc_timer); 1850 1851 spin_lock_bh(&pmlmepriv->lock); 1852
··· 1846 if (pcmd->res != H2C_SUCCESS) 1847 _set_timer(&pmlmepriv->assoc_timer, 1); 1848 1849 + timer_delete_sync(&pmlmepriv->assoc_timer); 1850 1851 spin_lock_bh(&pmlmepriv->lock); 1852
+2 -2
drivers/staging/rtl8723bs/core/rtw_mlme.c
··· 681 682 if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) { 683 spin_unlock_bh(&pmlmepriv->lock); 684 - del_timer_sync(&pmlmepriv->scan_to_timer); 685 spin_lock_bh(&pmlmepriv->lock); 686 _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); 687 } ··· 1166 1167 spin_unlock_bh(&pmlmepriv->lock); 1168 /* s5. Cancel assoc_timer */ 1169 - del_timer_sync(&pmlmepriv->assoc_timer); 1170 spin_lock_bh(&pmlmepriv->lock); 1171 } else { 1172 spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
··· 681 682 if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) { 683 spin_unlock_bh(&pmlmepriv->lock); 684 + timer_delete_sync(&pmlmepriv->scan_to_timer); 685 spin_lock_bh(&pmlmepriv->lock); 686 _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); 687 } ··· 1166 1167 spin_unlock_bh(&pmlmepriv->lock); 1168 /* s5. Cancel assoc_timer */ 1169 + timer_delete_sync(&pmlmepriv->assoc_timer); 1170 spin_lock_bh(&pmlmepriv->lock); 1171 } else { 1172 spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+11 -11
drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
··· 412 return; 413 414 if (padapter->bDriverStopped) { 415 - del_timer_sync(&pmlmeext->survey_timer); 416 - del_timer_sync(&pmlmeext->link_timer); 417 - /* del_timer_sync(&pmlmeext->ADDBA_timer); */ 418 } 419 } 420 ··· 1390 if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) 1391 return _SUCCESS; 1392 1393 - del_timer_sync(&pmlmeext->link_timer); 1394 1395 /* status */ 1396 status = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 2)); ··· 1862 break; 1863 1864 case 1: /* SA Query rsp */ 1865 - del_timer_sync(&pmlmeext->sa_query_timer); 1866 break; 1867 default: 1868 break; ··· 4185 struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; 4186 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); 4187 4188 - del_timer_sync(&pmlmeext->link_timer); 4189 4190 pmlmeinfo->state &= (~WIFI_FW_AUTH_NULL); 4191 pmlmeinfo->state |= WIFI_FW_AUTH_STATE; ··· 4210 struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; 4211 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); 4212 4213 - del_timer_sync(&pmlmeext->link_timer); 4214 4215 pmlmeinfo->state &= (~(WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE)); 4216 pmlmeinfo->state |= (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE); ··· 4792 4793 flush_all_cam_entry(padapter); 4794 4795 - del_timer_sync(&pmlmeext->link_timer); 4796 4797 /* pmlmepriv->LinkDetectInfo.TrafficBusyState = false; */ 4798 pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 0; ··· 5268 /* rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain)); */ 5269 5270 /* cancel link timer */ 5271 - del_timer_sync(&pmlmeext->link_timer); 5272 5273 /* clear CAM */ 5274 flush_all_cam_entry(padapter); ··· 5312 /* clear CAM */ 5313 flush_all_cam_entry(padapter); 5314 5315 - del_timer_sync(&pmlmeext->link_timer); 5316 5317 /* set MSR to nolink -> infra. mode */ 5318 /* Set_MSR(padapter, _HW_STATE_NOLINK_); */ ··· 5425 set_channel_bwmode(padapter, ch, offset, bw); 5426 5427 /* cancel link timer */ 5428 - del_timer_sync(&pmlmeext->link_timer); 5429 5430 start_clnt_join(padapter); 5431
··· 412 return; 413 414 if (padapter->bDriverStopped) { 415 + timer_delete_sync(&pmlmeext->survey_timer); 416 + timer_delete_sync(&pmlmeext->link_timer); 417 + /* timer_delete_sync(&pmlmeext->ADDBA_timer); */ 418 } 419 } 420 ··· 1390 if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) 1391 return _SUCCESS; 1392 1393 + timer_delete_sync(&pmlmeext->link_timer); 1394 1395 /* status */ 1396 status = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 2)); ··· 1862 break; 1863 1864 case 1: /* SA Query rsp */ 1865 + timer_delete_sync(&pmlmeext->sa_query_timer); 1866 break; 1867 default: 1868 break; ··· 4185 struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; 4186 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); 4187 4188 + timer_delete_sync(&pmlmeext->link_timer); 4189 4190 pmlmeinfo->state &= (~WIFI_FW_AUTH_NULL); 4191 pmlmeinfo->state |= WIFI_FW_AUTH_STATE; ··· 4210 struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; 4211 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); 4212 4213 + timer_delete_sync(&pmlmeext->link_timer); 4214 4215 pmlmeinfo->state &= (~(WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE)); 4216 pmlmeinfo->state |= (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE); ··· 4792 4793 flush_all_cam_entry(padapter); 4794 4795 + timer_delete_sync(&pmlmeext->link_timer); 4796 4797 /* pmlmepriv->LinkDetectInfo.TrafficBusyState = false; */ 4798 pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 0; ··· 5268 /* rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain)); */ 5269 5270 /* cancel link timer */ 5271 + timer_delete_sync(&pmlmeext->link_timer); 5272 5273 /* clear CAM */ 5274 flush_all_cam_entry(padapter); ··· 5312 /* clear CAM */ 5313 flush_all_cam_entry(padapter); 5314 5315 + timer_delete_sync(&pmlmeext->link_timer); 5316 5317 /* set MSR to nolink -> infra. mode */ 5318 /* Set_MSR(padapter, _HW_STATE_NOLINK_); */ ··· 5425 set_channel_bwmode(padapter, ch, offset, bw); 5426 5427 /* cancel link timer */ 5428 + timer_delete_sync(&pmlmeext->link_timer); 5429 5430 start_clnt_join(padapter); 5431
+1 -1
drivers/staging/rtl8723bs/core/rtw_recv.c
··· 1893 spin_unlock_bh(&ppending_recvframe_queue->lock); 1894 } else { 1895 spin_unlock_bh(&ppending_recvframe_queue->lock); 1896 - del_timer_sync(&preorder_ctrl->reordering_ctrl_timer); 1897 } 1898 1899 return _SUCCESS;
··· 1893 spin_unlock_bh(&ppending_recvframe_queue->lock); 1894 } else { 1895 spin_unlock_bh(&ppending_recvframe_queue->lock); 1896 + timer_delete_sync(&preorder_ctrl->reordering_ctrl_timer); 1897 } 1898 1899 return _SUCCESS;
+3 -3
drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
··· 158 159 for (i = 0; i < 16 ; i++) { 160 preorder_ctrl = &psta->recvreorder_ctrl[i]; 161 - del_timer_sync(&preorder_ctrl->reordering_ctrl_timer); 162 } 163 } 164 } ··· 343 /* _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv); */ 344 /* _rtw_init_sta_recv_priv(&psta->sta_recvpriv); */ 345 346 - del_timer_sync(&psta->addba_retry_timer); 347 348 /* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */ 349 for (i = 0; i < 16 ; i++) { ··· 354 355 preorder_ctrl = &psta->recvreorder_ctrl[i]; 356 357 - del_timer_sync(&preorder_ctrl->reordering_ctrl_timer); 358 359 ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue; 360
··· 158 159 for (i = 0; i < 16 ; i++) { 160 preorder_ctrl = &psta->recvreorder_ctrl[i]; 161 + timer_delete_sync(&preorder_ctrl->reordering_ctrl_timer); 162 } 163 } 164 } ··· 343 /* _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv); */ 344 /* _rtw_init_sta_recv_priv(&psta->sta_recvpriv); */ 345 346 + timer_delete_sync(&psta->addba_retry_timer); 347 348 /* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */ 349 for (i = 0; i < 16 ; i++) { ··· 354 355 preorder_ctrl = &psta->recvreorder_ctrl[i]; 356 357 + timer_delete_sync(&preorder_ctrl->reordering_ctrl_timer); 358 359 ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue; 360
+1 -1
drivers/staging/rtl8723bs/hal/sdio_ops.c
··· 871 } 872 873 if (hal->sdio_hisr & SDIO_HISR_CPWM1) { 874 - del_timer_sync(&(pwrctl->pwr_rpwm_timer)); 875 876 SdioLocalCmd52Read1Byte(adapter, SDIO_REG_HCPWM1_8723B); 877
··· 871 } 872 873 if (hal->sdio_hisr & SDIO_HISR_CPWM1) { 874 + timer_delete_sync(&(pwrctl->pwr_rpwm_timer)); 875 876 SdioLocalCmd52Read1Byte(adapter, SDIO_REG_HCPWM1_8723B); 877
+6 -6
drivers/staging/rtl8723bs/os_dep/os_intfs.c
··· 697 698 void rtw_cancel_all_timer(struct adapter *padapter) 699 { 700 - del_timer_sync(&padapter->mlmepriv.assoc_timer); 701 702 - del_timer_sync(&padapter->mlmepriv.scan_to_timer); 703 704 - del_timer_sync(&padapter->mlmepriv.dynamic_chk_timer); 705 706 - del_timer_sync(&(adapter_to_pwrctl(padapter)->pwr_state_check_timer)); 707 708 - del_timer_sync(&padapter->mlmepriv.set_scan_deny_timer); 709 rtw_clear_scan_deny(padapter); 710 711 - del_timer_sync(&padapter->recvpriv.signal_stat_timer); 712 713 /* cancel dm timer */ 714 rtw_hal_dm_deinit(padapter);
··· 697 698 void rtw_cancel_all_timer(struct adapter *padapter) 699 { 700 + timer_delete_sync(&padapter->mlmepriv.assoc_timer); 701 702 + timer_delete_sync(&padapter->mlmepriv.scan_to_timer); 703 704 + timer_delete_sync(&padapter->mlmepriv.dynamic_chk_timer); 705 706 + timer_delete_sync(&(adapter_to_pwrctl(padapter)->pwr_state_check_timer)); 707 708 + timer_delete_sync(&padapter->mlmepriv.set_scan_deny_timer); 709 rtw_clear_scan_deny(padapter); 710 711 + timer_delete_sync(&padapter->recvpriv.signal_stat_timer); 712 713 /* cancel dm timer */ 714 rtw_hal_dm_deinit(padapter);
+1 -1
drivers/target/iscsi/iscsi_target_erl0.c
··· 810 sess->time2retain_timer_flags |= ISCSI_TF_STOP; 811 spin_unlock(&se_tpg->session_lock); 812 813 - del_timer_sync(&sess->time2retain_timer); 814 815 spin_lock(&se_tpg->session_lock); 816 sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
··· 810 sess->time2retain_timer_flags |= ISCSI_TF_STOP; 811 spin_unlock(&se_tpg->session_lock); 812 813 + timer_delete_sync(&sess->time2retain_timer); 814 815 spin_lock(&se_tpg->session_lock); 816 sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
+1 -1
drivers/target/iscsi/iscsi_target_erl1.c
··· 1227 cmd->dataout_timer_flags |= ISCSI_TF_STOP; 1228 spin_unlock_bh(&cmd->dataout_timeout_lock); 1229 1230 - del_timer_sync(&cmd->dataout_timer); 1231 1232 spin_lock_bh(&cmd->dataout_timeout_lock); 1233 cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
··· 1227 cmd->dataout_timer_flags |= ISCSI_TF_STOP; 1228 spin_unlock_bh(&cmd->dataout_timeout_lock); 1229 1230 + timer_delete_sync(&cmd->dataout_timer); 1231 1232 spin_lock_bh(&cmd->dataout_timeout_lock); 1233 cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
+2 -2
drivers/target/iscsi/iscsi_target_util.c
··· 922 conn->nopin_response_timer_flags |= ISCSI_TF_STOP; 923 spin_unlock_bh(&conn->nopin_timer_lock); 924 925 - del_timer_sync(&conn->nopin_response_timer); 926 927 spin_lock_bh(&conn->nopin_timer_lock); 928 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; ··· 989 conn->nopin_timer_flags |= ISCSI_TF_STOP; 990 spin_unlock_bh(&conn->nopin_timer_lock); 991 992 - del_timer_sync(&conn->nopin_timer); 993 994 spin_lock_bh(&conn->nopin_timer_lock); 995 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
··· 922 conn->nopin_response_timer_flags |= ISCSI_TF_STOP; 923 spin_unlock_bh(&conn->nopin_timer_lock); 924 925 + timer_delete_sync(&conn->nopin_response_timer); 926 927 spin_lock_bh(&conn->nopin_timer_lock); 928 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; ··· 989 conn->nopin_timer_flags |= ISCSI_TF_STOP; 990 spin_unlock_bh(&conn->nopin_timer_lock); 991 992 + timer_delete_sync(&conn->nopin_timer); 993 994 spin_lock_bh(&conn->nopin_timer_lock); 995 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
+4 -4
drivers/target/target_core_user.c
··· 1232 cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry); 1233 mod_timer(timer, cmd->deadline); 1234 } else 1235 - del_timer(timer); 1236 } 1237 1238 static int ··· 2321 { 2322 struct tcmu_dev *udev = TCMU_DEV(dev); 2323 2324 - del_timer_sync(&udev->cmd_timer); 2325 - del_timer_sync(&udev->qfull_timer); 2326 2327 mutex_lock(&root_udev_mutex); 2328 list_del(&udev->node); ··· 2408 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2409 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 2410 2411 - del_timer(&udev->cmd_timer); 2412 2413 /* 2414 * ring is empty and qfull queue never contains aborted commands.
··· 1232 cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry); 1233 mod_timer(timer, cmd->deadline); 1234 } else 1235 + timer_delete(timer); 1236 } 1237 1238 static int ··· 2321 { 2322 struct tcmu_dev *udev = TCMU_DEV(dev); 2323 2324 + timer_delete_sync(&udev->cmd_timer); 2325 + timer_delete_sync(&udev->qfull_timer); 2326 2327 mutex_lock(&root_udev_mutex); 2328 list_del(&udev->node); ··· 2408 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2409 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 2410 2411 + timer_delete(&udev->cmd_timer); 2412 2413 /* 2414 * ring is empty and qfull queue never contains aborted commands.
+2 -2
drivers/tty/ipwireless/hardware.c
··· 1496 static void handle_setup_get_version_rsp(struct ipw_hardware *hw, 1497 unsigned char vers_no) 1498 { 1499 - del_timer(&hw->setup_timer); 1500 hw->initializing = 0; 1501 printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": card is ready.\n"); 1502 ··· 1721 if (!hw->shutting_down) { 1722 /* Tell everyone we are going down. */ 1723 hw->shutting_down = 1; 1724 - del_timer(&hw->setup_timer); 1725 1726 /* Prevent the hardware from sending any more interrupts */ 1727 do_close_hardware(hw);
··· 1496 static void handle_setup_get_version_rsp(struct ipw_hardware *hw, 1497 unsigned char vers_no) 1498 { 1499 + timer_delete(&hw->setup_timer); 1500 hw->initializing = 0; 1501 printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": card is ready.\n"); 1502 ··· 1721 if (!hw->shutting_down) { 1722 /* Tell everyone we are going down. */ 1723 hw->shutting_down = 1; 1724 + timer_delete(&hw->setup_timer); 1725 1726 /* Prevent the hardware from sending any more interrupts */ 1727 do_close_hardware(hw);
+2 -2
drivers/tty/mips_ejtag_fdc.c
··· 1031 raw_spin_unlock_irq(&priv->lock); 1032 } else { 1033 priv->removing = true; 1034 - del_timer_sync(&priv->poll_timer); 1035 } 1036 kthread_stop(priv->thread); 1037 err_destroy_ports: ··· 1061 raw_spin_unlock_irq(&priv->lock); 1062 } else { 1063 priv->removing = true; 1064 - del_timer_sync(&priv->poll_timer); 1065 } 1066 kthread_stop(priv->thread); 1067
··· 1031 raw_spin_unlock_irq(&priv->lock); 1032 } else { 1033 priv->removing = true; 1034 + timer_delete_sync(&priv->poll_timer); 1035 } 1036 kthread_stop(priv->thread); 1037 err_destroy_ports: ··· 1061 raw_spin_unlock_irq(&priv->lock); 1062 } else { 1063 priv->removing = true; 1064 + timer_delete_sync(&priv->poll_timer); 1065 } 1066 kthread_stop(priv->thread); 1067
+1 -1
drivers/tty/moxa.c
··· 1187 { 1188 pci_unregister_driver(&moxa_pci_driver); 1189 1190 - del_timer_sync(&moxaTimer); 1191 1192 tty_unregister_driver(moxaDriver); 1193 tty_driver_kref_put(moxaDriver);
··· 1187 { 1188 pci_unregister_driver(&moxa_pci_driver); 1189 1190 + timer_delete_sync(&moxaTimer); 1191 1192 tty_unregister_driver(moxaDriver); 1193 tty_driver_kref_put(moxaDriver);
+7 -7
drivers/tty/n_gsm.c
··· 1941 /* Does the reply match our command */ 1942 if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) { 1943 /* Our command was replied to, kill the retry timer */ 1944 - del_timer(&gsm->t2_timer); 1945 gsm->pending_cmd = NULL; 1946 /* Rejected by the other end */ 1947 if (command == CMD_NSC) ··· 2131 2132 static void gsm_dlci_close(struct gsm_dlci *dlci) 2133 { 2134 - del_timer(&dlci->t1); 2135 if (debug & DBG_ERRORS) 2136 pr_debug("DLCI %d goes closed.\n", dlci->addr); 2137 dlci->state = DLCI_CLOSED; ··· 2144 tty_port_set_initialized(&dlci->port, false); 2145 wake_up_interruptible(&dlci->port.open_wait); 2146 } else { 2147 - del_timer(&dlci->gsm->ka_timer); 2148 dlci->gsm->dead = true; 2149 } 2150 /* A DLCI 0 close is a MUX termination so we need to kick that ··· 2166 2167 /* Note that SABM UA .. SABM UA first UA lost can mean that we go 2168 open -> open */ 2169 - del_timer(&dlci->t1); 2170 /* This will let a tty open continue */ 2171 dlci->state = DLCI_OPEN; 2172 dlci->constipated = false; ··· 3144 } 3145 3146 /* Finish outstanding timers, making sure they are done */ 3147 - del_timer_sync(&gsm->kick_timer); 3148 - del_timer_sync(&gsm->t2_timer); 3149 - del_timer_sync(&gsm->ka_timer); 3150 3151 /* Finish writing to ldisc */ 3152 flush_work(&gsm->tx_work);
··· 1941 /* Does the reply match our command */ 1942 if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) { 1943 /* Our command was replied to, kill the retry timer */ 1944 + timer_delete(&gsm->t2_timer); 1945 gsm->pending_cmd = NULL; 1946 /* Rejected by the other end */ 1947 if (command == CMD_NSC) ··· 2131 2132 static void gsm_dlci_close(struct gsm_dlci *dlci) 2133 { 2134 + timer_delete(&dlci->t1); 2135 if (debug & DBG_ERRORS) 2136 pr_debug("DLCI %d goes closed.\n", dlci->addr); 2137 dlci->state = DLCI_CLOSED; ··· 2144 tty_port_set_initialized(&dlci->port, false); 2145 wake_up_interruptible(&dlci->port.open_wait); 2146 } else { 2147 + timer_delete(&dlci->gsm->ka_timer); 2148 dlci->gsm->dead = true; 2149 } 2150 /* A DLCI 0 close is a MUX termination so we need to kick that ··· 2166 2167 /* Note that SABM UA .. SABM UA first UA lost can mean that we go 2168 open -> open */ 2169 + timer_delete(&dlci->t1); 2170 /* This will let a tty open continue */ 2171 dlci->state = DLCI_OPEN; 2172 dlci->constipated = false; ··· 3144 } 3145 3146 /* Finish outstanding timers, making sure they are done */ 3147 + timer_delete_sync(&gsm->kick_timer); 3148 + timer_delete_sync(&gsm->t2_timer); 3149 + timer_delete_sync(&gsm->ka_timer); 3150 3151 /* Finish writing to ldisc */ 3152 flush_work(&gsm->tx_work);
+1 -1
drivers/tty/serial/8250/8250_aspeed_vuart.c
··· 550 { 551 struct aspeed_vuart *vuart = platform_get_drvdata(pdev); 552 553 - del_timer_sync(&vuart->unthrottle_timer); 554 aspeed_vuart_set_enabled(vuart, false); 555 serial8250_unregister_port(vuart->line); 556 sysfs_remove_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
··· 550 { 551 struct aspeed_vuart *vuart = platform_get_drvdata(pdev); 552 553 + timer_delete_sync(&vuart->unthrottle_timer); 554 aspeed_vuart_set_enabled(vuart, false); 555 serial8250_unregister_port(vuart->line); 556 sysfs_remove_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
+1 -1
drivers/tty/serial/8250/8250_core.c
··· 298 { 299 struct uart_port *port = &up->port; 300 301 - del_timer_sync(&up->timer); 302 up->timer.function = serial8250_timeout; 303 if (port->irq) 304 serial_unlink_irq_chain(up);
··· 298 { 299 struct uart_port *port = &up->port; 300 301 + timer_delete_sync(&up->timer); 302 up->timer.function = serial8250_timeout; 303 if (port->irq) 304 serial_unlink_irq_chain(up);
+1 -1
drivers/tty/serial/altera_uart.c
··· 339 if (port->irq) 340 free_irq(port->irq, port); 341 else 342 - del_timer_sync(&pp->tmr); 343 } 344 345 static const char *altera_uart_type(struct uart_port *port)
··· 339 if (port->irq) 340 free_irq(port->irq, port); 341 else 342 + timer_delete_sync(&pp->tmr); 343 } 344 345 static const char *altera_uart_type(struct uart_port *port)
+2 -2
drivers/tty/serial/amba-pl011.c
··· 1084 1085 uap->dmarx.running = false; 1086 dmaengine_terminate_all(rxchan); 1087 - del_timer(&uap->dmarx.timer); 1088 } else { 1089 mod_timer(&uap->dmarx.timer, 1090 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); ··· 1199 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE); 1200 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE); 1201 if (uap->dmarx.poll_rate) 1202 - del_timer_sync(&uap->dmarx.timer); 1203 uap->using_rx_dma = false; 1204 } 1205 }
··· 1084 1085 uap->dmarx.running = false; 1086 dmaengine_terminate_all(rxchan); 1087 + timer_delete(&uap->dmarx.timer); 1088 } else { 1089 mod_timer(&uap->dmarx.timer, 1090 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); ··· 1199 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE); 1200 pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE); 1201 if (uap->dmarx.poll_rate) 1202 + timer_delete_sync(&uap->dmarx.timer); 1203 uap->using_rx_dma = false; 1204 } 1205 }
+1 -1
drivers/tty/serial/atmel_serial.c
··· 2017 * Prevent any tasklets being scheduled during 2018 * cleanup 2019 */ 2020 - del_timer_sync(&atmel_port->uart_timer); 2021 2022 /* Make sure that no interrupt is on the fly */ 2023 synchronize_irq(port->irq);
··· 2017 * Prevent any tasklets being scheduled during 2018 * cleanup 2019 */ 2020 + timer_delete_sync(&atmel_port->uart_timer); 2021 2022 /* Make sure that no interrupt is on the fly */ 2023 synchronize_irq(port->irq);
+3 -3
drivers/tty/serial/fsl_lpuart.c
··· 1433 1434 dmaengine_terminate_sync(chan); 1435 if (!sport->dma_idle_int) 1436 - del_timer_sync(&sport->lpuart_timer); 1437 1438 dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE); 1439 kfree(sport->rx_ring.buf); ··· 2071 * baud rate and restart Rx DMA path. 2072 * 2073 * Since timer function acqures port->lock, need to stop before 2074 - * acquring same lock because otherwise del_timer_sync() can deadlock. 2075 */ 2076 if (old && sport->lpuart_dma_rx_use) 2077 lpuart_dma_rx_free(port); ··· 2316 * baud rate and restart Rx DMA path. 2317 * 2318 * Since timer function acqures port->lock, need to stop before 2319 - * acquring same lock because otherwise del_timer_sync() can deadlock. 2320 */ 2321 if (old && sport->lpuart_dma_rx_use) 2322 lpuart_dma_rx_free(port);
··· 1433 1434 dmaengine_terminate_sync(chan); 1435 if (!sport->dma_idle_int) 1436 + timer_delete_sync(&sport->lpuart_timer); 1437 1438 dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE); 1439 kfree(sport->rx_ring.buf); ··· 2071 * baud rate and restart Rx DMA path. 2072 * 2073 * Since timer function acqures port->lock, need to stop before 2074 + * acquring same lock because otherwise timer_delete_sync() can deadlock. 2075 */ 2076 if (old && sport->lpuart_dma_rx_use) 2077 lpuart_dma_rx_free(port); ··· 2316 * baud rate and restart Rx DMA path. 2317 * 2318 * Since timer function acqures port->lock, need to stop before 2319 + * acquring same lock because otherwise timer_delete_sync() can deadlock. 2320 */ 2321 if (old && sport->lpuart_dma_rx_use) 2322 lpuart_dma_rx_free(port);
+2 -2
drivers/tty/serial/imx.c
··· 1619 /* 1620 * Stop our timer. 1621 */ 1622 - del_timer_sync(&sport->timer); 1623 1624 /* 1625 * Disable all interrupts, port and break condition. ··· 1752 old_csize = CS8; 1753 } 1754 1755 - del_timer_sync(&sport->timer); 1756 1757 /* 1758 * Ask the core to calculate the divisor for us.
··· 1619 /* 1620 * Stop our timer. 1621 */ 1622 + timer_delete_sync(&sport->timer); 1623 1624 /* 1625 * Disable all interrupts, port and break condition. ··· 1752 old_csize = CS8; 1753 } 1754 1755 + timer_delete_sync(&sport->timer); 1756 1757 /* 1758 * Ask the core to calculate the divisor for us.
+2 -2
drivers/tty/serial/liteuart.c
··· 96 struct liteuart_port *uart = to_liteuart_port(port); 97 98 /* just delete timer */ 99 - del_timer(&uart->timer); 100 } 101 102 static void liteuart_rx_chars(struct uart_port *port) ··· 220 if (port->irq) 221 free_irq(port->irq, port); 222 else 223 - del_timer_sync(&uart->timer); 224 } 225 226 static void liteuart_set_termios(struct uart_port *port, struct ktermios *new,
··· 96 struct liteuart_port *uart = to_liteuart_port(port); 97 98 /* just delete timer */ 99 + timer_delete(&uart->timer); 100 } 101 102 static void liteuart_rx_chars(struct uart_port *port) ··· 220 if (port->irq) 221 free_irq(port->irq, port); 222 else 223 + timer_delete_sync(&uart->timer); 224 } 225 226 static void liteuart_set_termios(struct uart_port *port, struct ktermios *new,
+2 -2
drivers/tty/serial/max3100.c
··· 506 MAX3100_STATUS_PE | MAX3100_STATUS_FE | 507 MAX3100_STATUS_OE; 508 509 - del_timer_sync(&s->timer); 510 uart_update_timeout(port, termios->c_cflag, baud); 511 512 spin_lock(&s->conf_lock); ··· 532 533 s->force_end_work = 1; 534 535 - del_timer_sync(&s->timer); 536 537 if (s->workqueue) { 538 destroy_workqueue(s->workqueue);
··· 506 MAX3100_STATUS_PE | MAX3100_STATUS_FE | 507 MAX3100_STATUS_OE; 508 509 + timer_delete_sync(&s->timer); 510 uart_update_timeout(port, termios->c_cflag, baud); 511 512 spin_lock(&s->conf_lock); ··· 532 533 s->force_end_work = 1; 534 535 + timer_delete_sync(&s->timer); 536 537 if (s->workqueue) { 538 destroy_workqueue(s->workqueue);
+1 -1
drivers/tty/serial/mux.c
··· 563 { 564 /* Delete the Mux timer. */ 565 if(port_cnt > 0) { 566 - del_timer_sync(&mux_timer); 567 #ifdef CONFIG_SERIAL_MUX_CONSOLE 568 unregister_console(&mux_console); 569 #endif
··· 563 { 564 /* Delete the Mux timer. */ 565 if(port_cnt > 0) { 566 + timer_delete_sync(&mux_timer); 567 #ifdef CONFIG_SERIAL_MUX_CONSOLE 568 unregister_console(&mux_console); 569 #endif
+2 -2
drivers/tty/serial/sa1100.c
··· 369 /* 370 * Stop our timer. 371 */ 372 - del_timer_sync(&sport->timer); 373 374 /* 375 * Free the interrupt ··· 421 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); 422 quot = uart_get_divisor(port, baud); 423 424 - del_timer_sync(&sport->timer); 425 426 uart_port_lock_irqsave(&sport->port, &flags); 427
··· 369 /* 370 * Stop our timer. 371 */ 372 + timer_delete_sync(&sport->timer); 373 374 /* 375 * Free the interrupt ··· 421 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); 422 quot = uart_get_divisor(port, baud); 423 424 + timer_delete_sync(&sport->timer); 425 426 uart_port_lock_irqsave(&sport->port, &flags); 427
+1 -1
drivers/tty/serial/sccnxp.c
··· 1033 if (!s->poll) 1034 devm_free_irq(&pdev->dev, s->irq, s); 1035 else 1036 - del_timer_sync(&s->timer); 1037 1038 for (i = 0; i < s->uart.nr; i++) 1039 uart_remove_one_port(&s->uart, &s->port[i]);
··· 1033 if (!s->poll) 1034 devm_free_irq(&pdev->dev, s->irq, s); 1035 else 1036 + timer_delete_sync(&s->timer); 1037 1038 for (i = 0; i < s->uart.nr; i++) 1039 uart_remove_one_port(&s->uart, &s->port[i]);
+1 -1
drivers/tty/serial/sh-sci.c
··· 2337 #endif 2338 2339 if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) 2340 - del_timer_sync(&s->rx_fifo_timer); 2341 sci_free_irq(s); 2342 sci_free_dma(port); 2343 }
··· 2337 #endif 2338 2339 if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) 2340 + timer_delete_sync(&s->rx_fifo_timer); 2341 sci_free_irq(s); 2342 sci_free_dma(port); 2343 }
+2 -2
drivers/tty/sysrq.c
··· 743 */ 744 if (value && state->reset_seq_cnt) { 745 state->reset_canceled = true; 746 - del_timer(&state->keyreset_timer); 747 } 748 } else if (value == 0) { 749 /* ··· 751 * to be pressed and held for the reset timeout 752 * to hold. 753 */ 754 - del_timer(&state->keyreset_timer); 755 756 if (--state->reset_seq_cnt == 0) 757 state->reset_canceled = false;
··· 743 */ 744 if (value && state->reset_seq_cnt) { 745 state->reset_canceled = true; 746 + timer_delete(&state->keyreset_timer); 747 } 748 } else if (value == 0) { 749 /* ··· 751 * to be pressed and held for the reset timeout 752 * to hold. 753 */ 754 + timer_delete(&state->keyreset_timer); 755 756 if (--state->reset_seq_cnt == 0) 757 state->reset_canceled = false;
+3 -3
drivers/tty/vcc.c
··· 683 { 684 struct vcc_port *port = dev_get_drvdata(&vdev->dev); 685 686 - del_timer_sync(&port->rx_timer); 687 - del_timer_sync(&port->tx_timer); 688 689 /* If there's a process with the device open, do a synchronous 690 * hangup of the TTY. This *may* cause the process to call close ··· 700 701 tty_unregister_device(vcc_tty_driver, port->index); 702 703 - del_timer_sync(&port->vio.timer); 704 vio_ldc_free(&port->vio); 705 sysfs_remove_group(&vdev->dev.kobj, &vcc_attribute_group); 706 dev_set_drvdata(&vdev->dev, NULL);
··· 683 { 684 struct vcc_port *port = dev_get_drvdata(&vdev->dev); 685 686 + timer_delete_sync(&port->rx_timer); 687 + timer_delete_sync(&port->tx_timer); 688 689 /* If there's a process with the device open, do a synchronous 690 * hangup of the TTY. This *may* cause the process to call close ··· 700 701 tty_unregister_device(vcc_tty_driver, port->index); 702 703 + timer_delete_sync(&port->vio.timer); 704 vio_ldc_free(&port->vio); 705 sysfs_remove_group(&vdev->dev.kobj, &vcc_attribute_group); 706 dev_set_drvdata(&vdev->dev, NULL);
+1 -1
drivers/tty/vt/keyboard.c
··· 275 276 void kd_mksound(unsigned int hz, unsigned int ticks) 277 { 278 - del_timer_sync(&kd_mksound_timer); 279 280 input_handler_for_each_handle(&kbd_handler, &hz, kd_sound_helper); 281
··· 275 276 void kd_mksound(unsigned int hz, unsigned int ticks) 277 { 278 + timer_delete_sync(&kd_mksound_timer); 279 280 input_handler_for_each_handle(&kbd_handler, &hz, kd_sound_helper); 281
+2 -2
drivers/tty/vt/vt.c
··· 4501 } 4502 4503 hide_cursor(vc); 4504 - del_timer_sync(&console_timer); 4505 blank_timer_expired = 0; 4506 4507 save_screen(vc); ··· 4606 /* This isn't perfectly race free, but a race here would be mostly harmless, 4607 * at worst, we'll do a spurious blank and it's unlikely 4608 */ 4609 - del_timer(&console_timer); 4610 blank_timer_expired = 0; 4611 4612 if (ignore_poke || !vc_cons[fg_console].d || vc_cons[fg_console].d->vc_mode == KD_GRAPHICS)
··· 4501 } 4502 4503 hide_cursor(vc); 4504 + timer_delete_sync(&console_timer); 4505 blank_timer_expired = 0; 4506 4507 save_screen(vc); ··· 4606 /* This isn't perfectly race free, but a race here would be mostly harmless, 4607 * at worst, we'll do a spurious blank and it's unlikely 4608 */ 4609 + timer_delete(&console_timer); 4610 blank_timer_expired = 0; 4611 4612 if (ignore_poke || !vc_cons[fg_console].d || vc_cons[fg_console].d->vc_mode == KD_GRAPHICS)
+1 -1
drivers/usb/atm/cxacru.c
··· 597 timer_setup_on_stack(&timer.timer, cxacru_timeout_kill, 0); 598 mod_timer(&timer.timer, jiffies + msecs_to_jiffies(CMD_TIMEOUT)); 599 wait_for_completion(done); 600 - del_timer_sync(&timer.timer); 601 destroy_timer_on_stack(&timer.timer); 602 603 if (actual_length)
··· 597 timer_setup_on_stack(&timer.timer, cxacru_timeout_kill, 0); 598 mod_timer(&timer.timer, jiffies + msecs_to_jiffies(CMD_TIMEOUT)); 599 wait_for_completion(done); 600 + timer_delete_sync(&timer.timer); 601 destroy_timer_on_stack(&timer.timer); 602 603 if (actual_length)
+4 -4
drivers/usb/atm/speedtch.c
··· 612 } 613 614 if ((count == 6) && !memcmp(up_int, instance->int_data, 6)) { 615 - del_timer(&instance->status_check_timer); 616 atm_info(usbatm, "DSL line goes up\n"); 617 } else if ((count == 6) && !memcmp(down_int, instance->int_data, 6)) { 618 atm_info(usbatm, "DSL line goes down\n"); ··· 688 689 atm_dbg(usbatm, "%s entered\n", __func__); 690 691 - del_timer_sync(&instance->status_check_timer); 692 693 /* 694 * Since resubmit_timer and int_urb can schedule themselves and ··· 697 instance->int_urb = NULL; /* signal shutdown */ 698 mb(); 699 usb_kill_urb(int_urb); 700 - del_timer_sync(&instance->resubmit_timer); 701 /* 702 * At this point, speedtch_handle_int and speedtch_resubmit_int 703 * can run or be running, but instance->int_urb == NULL means that 704 * they will not reschedule 705 */ 706 usb_kill_urb(int_urb); 707 - del_timer_sync(&instance->resubmit_timer); 708 usb_free_urb(int_urb); 709 710 flush_work(&instance->status_check_work);
··· 612 } 613 614 if ((count == 6) && !memcmp(up_int, instance->int_data, 6)) { 615 + timer_delete(&instance->status_check_timer); 616 atm_info(usbatm, "DSL line goes up\n"); 617 } else if ((count == 6) && !memcmp(down_int, instance->int_data, 6)) { 618 atm_info(usbatm, "DSL line goes down\n"); ··· 688 689 atm_dbg(usbatm, "%s entered\n", __func__); 690 691 + timer_delete_sync(&instance->status_check_timer); 692 693 /* 694 * Since resubmit_timer and int_urb can schedule themselves and ··· 697 instance->int_urb = NULL; /* signal shutdown */ 698 mb(); 699 usb_kill_urb(int_urb); 700 + timer_delete_sync(&instance->resubmit_timer); 701 /* 702 * At this point, speedtch_handle_int and speedtch_resubmit_int 703 * can run or be running, but instance->int_urb == NULL means that 704 * they will not reschedule 705 */ 706 usb_kill_urb(int_urb); 707 + timer_delete_sync(&instance->resubmit_timer); 708 usb_free_urb(int_urb); 709 710 flush_work(&instance->status_check_work);
+2 -2
drivers/usb/atm/usbatm.c
··· 1237 for (i = 0; i < num_rcv_urbs + num_snd_urbs; i++) 1238 usb_kill_urb(instance->urbs[i]); 1239 1240 - del_timer_sync(&instance->rx_channel.delay); 1241 - del_timer_sync(&instance->tx_channel.delay); 1242 1243 /* turn usbatm_[rt]x_process into something close to a no-op */ 1244 /* no need to take the spinlock */
··· 1237 for (i = 0; i < num_rcv_urbs + num_snd_urbs; i++) 1238 usb_kill_urb(instance->urbs[i]); 1239 1240 + timer_delete_sync(&instance->rx_channel.delay); 1241 + timer_delete_sync(&instance->tx_channel.delay); 1242 1243 /* turn usbatm_[rt]x_process into something close to a no-op */ 1244 /* no need to take the spinlock */
+3 -3
drivers/usb/core/hcd.c
··· 842 843 } else { /* Status URB */ 844 if (!hcd->uses_new_polling) 845 - del_timer (&hcd->rh_timer); 846 if (urb == hcd->status_urb) { 847 hcd->status_urb = NULL; 848 usb_hcd_unlink_urb_from_ep(hcd, urb); ··· 2768 { 2769 hcd->rh_pollable = 0; 2770 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 2771 - del_timer_sync(&hcd->rh_timer); 2772 2773 hcd->driver->stop(hcd); 2774 hcd->state = HC_STATE_HALT; 2775 2776 /* In case the HCD restarted the timer, stop it again. */ 2777 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 2778 - del_timer_sync(&hcd->rh_timer); 2779 } 2780 2781 /**
··· 842 843 } else { /* Status URB */ 844 if (!hcd->uses_new_polling) 845 + timer_delete(&hcd->rh_timer); 846 if (urb == hcd->status_urb) { 847 hcd->status_urb = NULL; 848 usb_hcd_unlink_urb_from_ep(hcd, urb); ··· 2768 { 2769 hcd->rh_pollable = 0; 2770 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 2771 + timer_delete_sync(&hcd->rh_timer); 2772 2773 hcd->driver->stop(hcd); 2774 hcd->state = HC_STATE_HALT; 2775 2776 /* In case the HCD restarted the timer, stop it again. */ 2777 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 2778 + timer_delete_sync(&hcd->rh_timer); 2779 } 2780 2781 /**
+1 -1
drivers/usb/core/hub.c
··· 1385 } 1386 1387 /* Stop hub_wq and related activity */ 1388 - del_timer_sync(&hub->irq_urb_retry); 1389 usb_kill_urb(hub->urb); 1390 if (hub->has_indicators) 1391 cancel_delayed_work_sync(&hub->leds);
··· 1385 } 1386 1387 /* Stop hub_wq and related activity */ 1388 + timer_delete_sync(&hub->irq_urb_retry); 1389 usb_kill_urb(hub->urb); 1390 if (hub->has_indicators) 1391 cancel_delayed_work_sync(&hub->leds);
+1 -1
drivers/usb/dwc2/hcd.c
··· 5081 5082 cancel_work_sync(&hsotg->phy_reset_work); 5083 5084 - del_timer(&hsotg->wkp_timer); 5085 } 5086 5087 static void dwc2_hcd_release(struct dwc2_hsotg *hsotg)
··· 5081 5082 cancel_work_sync(&hsotg->phy_reset_work); 5083 5084 + timer_delete(&hsotg->wkp_timer); 5085 } 5086 5087 static void dwc2_hcd_release(struct dwc2_hsotg *hsotg)
+2 -2
drivers/usb/dwc2/hcd_queue.c
··· 1302 } 1303 1304 /* Cancel pending unreserve; if canceled OK, unreserve was pending */ 1305 - if (del_timer(&qh->unreserve_timer)) 1306 WARN_ON(!qh->unreserve_pending); 1307 1308 /* ··· 1614 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 1615 { 1616 /* Make sure any unreserve work is finished. */ 1617 - if (del_timer_sync(&qh->unreserve_timer)) { 1618 unsigned long flags; 1619 1620 spin_lock_irqsave(&hsotg->lock, flags);
··· 1302 } 1303 1304 /* Cancel pending unreserve; if canceled OK, unreserve was pending */ 1305 + if (timer_delete(&qh->unreserve_timer)) 1306 WARN_ON(!qh->unreserve_pending); 1307 1308 /* ··· 1614 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 1615 { 1616 /* Make sure any unreserve work is finished. */ 1617 + if (timer_delete_sync(&qh->unreserve_timer)) { 1618 unsigned long flags; 1619 1620 spin_lock_irqsave(&hsotg->lock, flags);
+2 -2
drivers/usb/gadget/legacy/zero.c
··· 194 static void zero_resume(struct usb_composite_dev *cdev) 195 { 196 DBG(cdev, "%s\n", __func__); 197 - del_timer(&autoresume_timer); 198 } 199 200 /*-------------------------------------------------------------------------*/ ··· 398 399 static int zero_unbind(struct usb_composite_dev *cdev) 400 { 401 - del_timer_sync(&autoresume_timer); 402 if (!IS_ERR_OR_NULL(func_ss)) 403 usb_put_function(func_ss); 404 usb_put_function_instance(func_inst_ss);
··· 194 static void zero_resume(struct usb_composite_dev *cdev) 195 { 196 DBG(cdev, "%s\n", __func__); 197 + timer_delete(&autoresume_timer); 198 } 199 200 /*-------------------------------------------------------------------------*/ ··· 398 399 static int zero_unbind(struct usb_composite_dev *cdev) 400 { 401 + timer_delete_sync(&autoresume_timer); 402 if (!IS_ERR_OR_NULL(func_ss)) 403 usb_put_function(func_ss); 404 usb_put_function_instance(func_inst_ss);
+1 -1
drivers/usb/gadget/udc/omap_udc.c
··· 252 ep->has_dma = 0; 253 omap_writew(UDC_SET_HALT, UDC_CTRL); 254 list_del_init(&ep->iso); 255 - del_timer(&ep->timer); 256 257 spin_unlock_irqrestore(&ep->udc->lock, flags); 258
··· 252 ep->has_dma = 0; 253 omap_writew(UDC_SET_HALT, UDC_CTRL); 254 list_del_init(&ep->iso); 255 + timer_delete(&ep->timer); 256 257 spin_unlock_irqrestore(&ep->udc->lock, flags); 258
+4 -4
drivers/usb/gadget/udc/pxa25x_udc.c
··· 1503 ep->stopped = 1; 1504 nuke(ep, -ESHUTDOWN); 1505 } 1506 - del_timer_sync(&dev->timer); 1507 1508 /* report reset; the driver is already quiesced */ 1509 if (driver) ··· 1530 ep->stopped = 1; 1531 nuke(ep, -ESHUTDOWN); 1532 } 1533 - del_timer_sync(&dev->timer); 1534 1535 /* report disconnect; the driver is already quiesced */ 1536 if (driver) ··· 1607 if (udccs0 & UDCCS0_SST) { 1608 nuke(ep, -EPIPE); 1609 udc_ep0_set_UDCCS(dev, UDCCS0_SST); 1610 - del_timer(&dev->timer); 1611 ep0_idle(dev); 1612 } 1613 1614 /* previous request unfinished? non-error iff back-to-back ... */ 1615 if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) { 1616 nuke(ep, 0); 1617 - del_timer(&dev->timer); 1618 ep0_idle(dev); 1619 } 1620
··· 1503 ep->stopped = 1; 1504 nuke(ep, -ESHUTDOWN); 1505 } 1506 + timer_delete_sync(&dev->timer); 1507 1508 /* report reset; the driver is already quiesced */ 1509 if (driver) ··· 1530 ep->stopped = 1; 1531 nuke(ep, -ESHUTDOWN); 1532 } 1533 + timer_delete_sync(&dev->timer); 1534 1535 /* report disconnect; the driver is already quiesced */ 1536 if (driver) ··· 1607 if (udccs0 & UDCCS0_SST) { 1608 nuke(ep, -EPIPE); 1609 udc_ep0_set_UDCCS(dev, UDCCS0_SST); 1610 + timer_delete(&dev->timer); 1611 ep0_idle(dev); 1612 } 1613 1614 /* previous request unfinished? non-error iff back-to-back ... */ 1615 if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) { 1616 nuke(ep, 0); 1617 + timer_delete(&dev->timer); 1618 ep0_idle(dev); 1619 } 1620
+1 -1
drivers/usb/gadget/udc/r8a66597-udc.c
··· 1810 struct r8a66597 *r8a66597 = platform_get_drvdata(pdev); 1811 1812 usb_del_gadget_udc(&r8a66597->gadget); 1813 - del_timer_sync(&r8a66597->timer); 1814 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req); 1815 1816 if (r8a66597->pdata->on_chip) {
··· 1810 struct r8a66597 *r8a66597 = platform_get_drvdata(pdev); 1811 1812 usb_del_gadget_udc(&r8a66597->gadget); 1813 + timer_delete_sync(&r8a66597->timer); 1814 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req); 1815 1816 if (r8a66597->pdata->on_chip) {
+2 -2
drivers/usb/gadget/udc/snps_udc_core.c
··· 3035 stop_timer++; 3036 if (timer_pending(&udc_timer)) 3037 wait_for_completion(&on_exit); 3038 - del_timer_sync(&udc_timer); 3039 /* remove pollstall timer */ 3040 stop_pollstall_timer++; 3041 if (timer_pending(&udc_pollstall_timer)) 3042 wait_for_completion(&on_pollstall_exit); 3043 - del_timer_sync(&udc_pollstall_timer); 3044 udc = NULL; 3045 } 3046 EXPORT_SYMBOL_GPL(udc_remove);
··· 3035 stop_timer++; 3036 if (timer_pending(&udc_timer)) 3037 wait_for_completion(&on_exit); 3038 + timer_delete_sync(&udc_timer); 3039 /* remove pollstall timer */ 3040 stop_pollstall_timer++; 3041 if (timer_pending(&udc_pollstall_timer)) 3042 wait_for_completion(&on_pollstall_exit); 3043 + timer_delete_sync(&udc_pollstall_timer); 3044 udc = NULL; 3045 } 3046 EXPORT_SYMBOL_GPL(udc_remove);
+1 -1
drivers/usb/host/ehci-platform.c
··· 224 225 static void quirk_poll_end(struct ehci_platform_priv *priv) 226 { 227 - del_timer_sync(&priv->poll_timer); 228 cancel_delayed_work(&priv->poll_work); 229 } 230
··· 224 225 static void quirk_poll_end(struct ehci_platform_priv *priv) 226 { 227 + timer_delete_sync(&priv->poll_timer); 228 cancel_delayed_work(&priv->poll_work); 229 } 230
+1 -1
drivers/usb/host/isp1362-hcd.c
··· 2357 2358 pr_debug("%s:\n", __func__); 2359 2360 - del_timer_sync(&hcd->rh_timer); 2361 2362 spin_lock_irqsave(&isp1362_hcd->lock, flags); 2363
··· 2357 2358 pr_debug("%s:\n", __func__); 2359 2360 + timer_delete_sync(&hcd->rh_timer); 2361 2362 spin_lock_irqsave(&isp1362_hcd->lock, flags); 2363
+1 -1
drivers/usb/host/ohci-hcd.c
··· 1003 1004 if (quirk_nec(ohci)) 1005 flush_work(&ohci->nec_work); 1006 - del_timer_sync(&ohci->io_watchdog); 1007 ohci->prev_frame_no = IO_WATCHDOG_OFF; 1008 1009 ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
··· 1003 1004 if (quirk_nec(ohci)) 1005 flush_work(&ohci->nec_work); 1006 + timer_delete_sync(&ohci->io_watchdog); 1007 ohci->prev_frame_no = IO_WATCHDOG_OFF; 1008 1009 ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
+1 -1
drivers/usb/host/ohci-hub.c
··· 315 spin_unlock_irq (&ohci->lock); 316 317 if (rc == 0) { 318 - del_timer_sync(&ohci->io_watchdog); 319 ohci->prev_frame_no = IO_WATCHDOG_OFF; 320 } 321 return rc;
··· 315 spin_unlock_irq (&ohci->lock); 316 317 if (rc == 0) { 318 + timer_delete_sync(&ohci->io_watchdog); 319 ohci->prev_frame_no = IO_WATCHDOG_OFF; 320 } 321 return rc;
+3 -3
drivers/usb/host/oxu210hp-hcd.c
··· 1127 qh_put(oxu->async); 1128 oxu->async = NULL; 1129 1130 - del_timer(&oxu->urb_timer); 1131 1132 oxu->periodic = NULL; 1133 ··· 3154 ehci_port_power(oxu, 0); 3155 3156 /* no more interrupts ... */ 3157 - del_timer_sync(&oxu->watchdog); 3158 3159 spin_lock_irq(&oxu->lock); 3160 if (HC_IS_RUNNING(hcd->state)) ··· 3887 3888 spin_unlock_irq(&oxu->lock); 3889 /* turn off now-idle HC */ 3890 - del_timer_sync(&oxu->watchdog); 3891 spin_lock_irq(&oxu->lock); 3892 ehci_halt(oxu); 3893 hcd->state = HC_STATE_SUSPENDED;
··· 1127 qh_put(oxu->async); 1128 oxu->async = NULL; 1129 1130 + timer_delete(&oxu->urb_timer); 1131 1132 oxu->periodic = NULL; 1133 ··· 3154 ehci_port_power(oxu, 0); 3155 3156 /* no more interrupts ... */ 3157 + timer_delete_sync(&oxu->watchdog); 3158 3159 spin_lock_irq(&oxu->lock); 3160 if (HC_IS_RUNNING(hcd->state)) ··· 3887 3888 spin_unlock_irq(&oxu->lock); 3889 /* turn off now-idle HC */ 3890 + timer_delete_sync(&oxu->watchdog); 3891 spin_lock_irq(&oxu->lock); 3892 ehci_halt(oxu); 3893 hcd->state = HC_STATE_SUSPENDED;
+1 -1
drivers/usb/host/r8a66597-hcd.c
··· 2384 struct r8a66597 *r8a66597 = platform_get_drvdata(pdev); 2385 struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597); 2386 2387 - del_timer_sync(&r8a66597->rh_timer); 2388 usb_remove_hcd(hcd); 2389 iounmap(r8a66597->reg); 2390 if (r8a66597->pdata->on_chip)
··· 2384 struct r8a66597 *r8a66597 = platform_get_drvdata(pdev); 2385 struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597); 2386 2387 + timer_delete_sync(&r8a66597->rh_timer); 2388 usb_remove_hcd(hcd); 2389 iounmap(r8a66597->reg); 2390 if (r8a66597->pdata->on_chip)
+1 -1
drivers/usb/host/sl811-hcd.c
··· 1515 struct sl811 *sl811 = hcd_to_sl811(hcd); 1516 unsigned long flags; 1517 1518 - del_timer_sync(&hcd->rh_timer); 1519 1520 spin_lock_irqsave(&sl811->lock, flags); 1521 port_power(sl811, 0);
··· 1515 struct sl811 *sl811 = hcd_to_sl811(hcd); 1516 unsigned long flags; 1517 1518 + timer_delete_sync(&hcd->rh_timer); 1519 1520 spin_lock_irqsave(&sl811->lock, flags); 1521 port_power(sl811, 0);
+1 -1
drivers/usb/host/uhci-hcd.c
··· 716 spin_unlock_irq(&uhci->lock); 717 synchronize_irq(hcd->irq); 718 719 - del_timer_sync(&uhci->fsbr_timer); 720 release_uhci(uhci); 721 } 722
··· 716 spin_unlock_irq(&uhci->lock); 717 synchronize_irq(hcd->irq); 718 719 + timer_delete_sync(&uhci->fsbr_timer); 720 release_uhci(uhci); 721 } 722
+1 -1
drivers/usb/host/uhci-q.c
··· 84 uhci_fsbr_on(uhci); 85 else if (uhci->fsbr_expiring) { 86 uhci->fsbr_expiring = 0; 87 - del_timer(&uhci->fsbr_timer); 88 } 89 } 90 }
··· 84 uhci_fsbr_on(uhci); 85 else if (uhci->fsbr_expiring) { 86 uhci->fsbr_expiring = 0; 87 + timer_delete(&uhci->fsbr_timer); 88 } 89 } 90 }
+2 -2
drivers/usb/host/xen-hcd.c
··· 327 } 328 spin_unlock_irq(&info->lock); 329 330 - del_timer_sync(&info->watchdog); 331 332 return ret; 333 } ··· 1307 { 1308 struct xenhcd_info *info = xenhcd_hcd_to_info(hcd); 1309 1310 - del_timer_sync(&info->watchdog); 1311 spin_lock_irq(&info->lock); 1312 /* cancel all urbs */ 1313 hcd->state = HC_STATE_HALT;
··· 327 } 328 spin_unlock_irq(&info->lock); 329 330 + timer_delete_sync(&info->watchdog); 331 332 return ret; 333 } ··· 1307 { 1308 struct xenhcd_info *info = xenhcd_hcd_to_info(hcd); 1309 1310 + timer_delete_sync(&info->watchdog); 1311 spin_lock_irq(&info->lock); 1312 /* cancel all urbs */ 1313 hcd->state = HC_STATE_HALT;
+1 -1
drivers/usb/host/xhci-hub.c
··· 926 if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) { 927 xhci->port_status_u0 |= 1 << wIndex; 928 if (xhci->port_status_u0 == all_ports_seen_u0) { 929 - del_timer_sync(&xhci->comp_mode_recovery_timer); 930 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 931 "All USB3 ports have entered U0 already!"); 932 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
··· 926 if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) { 927 xhci->port_status_u0 |= 1 << wIndex; 928 if (xhci->port_status_u0 == all_ports_seen_u0) { 929 + timer_delete_sync(&xhci->comp_mode_recovery_timer); 930 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 931 "All USB3 ports have entered U0 already!"); 932 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+2 -2
drivers/usb/host/xhci-mtk.c
··· 746 747 xhci_dbg(xhci, "%s: stop port polling\n", __func__); 748 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 749 - del_timer_sync(&hcd->rh_timer); 750 if (shared_hcd) { 751 clear_bit(HCD_FLAG_POLL_RH, &shared_hcd->flags); 752 - del_timer_sync(&shared_hcd->rh_timer); 753 } 754 755 ret = xhci_mtk_host_disable(mtk);
··· 746 747 xhci_dbg(xhci, "%s: stop port polling\n", __func__); 748 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 749 + timer_delete_sync(&hcd->rh_timer); 750 if (shared_hcd) { 751 clear_bit(HCD_FLAG_POLL_RH, &shared_hcd->flags); 752 + timer_delete_sync(&shared_hcd->rh_timer); 753 } 754 755 ret = xhci_mtk_host_disable(mtk);
+7 -7
drivers/usb/host/xhci.c
··· 627 /* Deleting Compliance Mode Recovery Timer */ 628 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 629 (!(xhci_all_ports_seen_u0(xhci)))) { 630 - del_timer_sync(&xhci->comp_mode_recovery_timer); 631 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 632 "%s: compliance mode recovery timer deleted", 633 __func__); ··· 672 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", 673 __func__, hcd->self.busnum); 674 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 675 - del_timer_sync(&hcd->rh_timer); 676 677 if (xhci->shared_hcd) { 678 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 679 - del_timer_sync(&xhci->shared_hcd->rh_timer); 680 } 681 682 spin_lock_irq(&xhci->lock); ··· 908 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", 909 __func__, hcd->self.busnum); 910 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 911 - del_timer_sync(&hcd->rh_timer); 912 if (xhci->shared_hcd) { 913 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 914 - del_timer_sync(&xhci->shared_hcd->rh_timer); 915 } 916 917 if (xhci->quirks & XHCI_SUSPEND_DELAY) ··· 978 */ 979 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 980 (!(xhci_all_ports_seen_u0(xhci)))) { 981 - del_timer_sync(&xhci->comp_mode_recovery_timer); 982 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 983 "%s: compliance mode recovery timer deleted", 984 __func__); ··· 1071 if (power_lost) { 1072 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 1073 !(xhci_all_ports_seen_u0(xhci))) { 1074 - del_timer_sync(&xhci->comp_mode_recovery_timer); 1075 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1076 "Compliance Mode Recovery Timer deleted!"); 1077 }
··· 627 /* Deleting Compliance Mode Recovery Timer */ 628 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 629 (!(xhci_all_ports_seen_u0(xhci)))) { 630 + timer_delete_sync(&xhci->comp_mode_recovery_timer); 631 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 632 "%s: compliance mode recovery timer deleted", 633 __func__); ··· 672 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", 673 __func__, hcd->self.busnum); 674 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 675 + timer_delete_sync(&hcd->rh_timer); 676 677 if (xhci->shared_hcd) { 678 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 679 + timer_delete_sync(&xhci->shared_hcd->rh_timer); 680 } 681 682 spin_lock_irq(&xhci->lock); ··· 908 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", 909 __func__, hcd->self.busnum); 910 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 911 + timer_delete_sync(&hcd->rh_timer); 912 if (xhci->shared_hcd) { 913 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 914 + timer_delete_sync(&xhci->shared_hcd->rh_timer); 915 } 916 917 if (xhci->quirks & XHCI_SUSPEND_DELAY) ··· 978 */ 979 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 980 (!(xhci_all_ports_seen_u0(xhci)))) { 981 + timer_delete_sync(&xhci->comp_mode_recovery_timer); 982 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 983 "%s: compliance mode recovery timer deleted", 984 __func__); ··· 1071 if (power_lost) { 1072 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 1073 !(xhci_all_ports_seen_u0(xhci))) { 1074 + timer_delete_sync(&xhci->comp_mode_recovery_timer); 1075 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1076 "Compliance Mode Recovery Timer deleted!"); 1077 }
+1 -1
drivers/usb/isp1760/isp1760-hcd.c
··· 2458 { 2459 struct isp1760_hcd *priv = hcd_to_priv(hcd); 2460 2461 - del_timer(&errata2_timer); 2462 2463 isp1760_hub_control(hcd, ClearPortFeature, USB_PORT_FEAT_POWER, 1, 2464 NULL, 0);
··· 2458 { 2459 struct isp1760_hcd *priv = hcd_to_priv(hcd); 2460 2461 + timer_delete(&errata2_timer); 2462 2463 isp1760_hub_control(hcd, ClearPortFeature, USB_PORT_FEAT_POWER, 1, 2464 NULL, 0);
+2 -2
drivers/usb/isp1760/isp1760-udc.c
··· 1145 if (udc->driver->disconnect) 1146 udc->driver->disconnect(&udc->gadget); 1147 1148 - del_timer(&udc->vbus_timer); 1149 1150 /* TODO Reset all endpoints ? */ 1151 } ··· 1314 1315 dev_dbg(udc->isp->dev, "%s\n", __func__); 1316 1317 - del_timer_sync(&udc->vbus_timer); 1318 1319 isp1760_reg_write(udc->regs, mode_reg, 0); 1320
··· 1145 if (udc->driver->disconnect) 1146 udc->driver->disconnect(&udc->gadget); 1147 1148 + timer_delete(&udc->vbus_timer); 1149 1150 /* TODO Reset all endpoints ? */ 1151 } ··· 1314 1315 dev_dbg(udc->isp->dev, "%s\n", __func__); 1316 1317 + timer_delete_sync(&udc->vbus_timer); 1318 1319 isp1760_reg_write(udc->regs, mode_reg, 0); 1320
+1 -1
drivers/usb/misc/usbtest.c
··· 626 mod_timer(&timeout.timer, jiffies + 627 msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); 628 usb_sg_wait(req); 629 - if (!del_timer_sync(&timeout.timer)) 630 retval = -ETIMEDOUT; 631 else 632 retval = req->status;
··· 626 mod_timer(&timeout.timer, jiffies + 627 msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); 628 usb_sg_wait(req); 629 + if (!timer_delete_sync(&timeout.timer)) 630 retval = -ETIMEDOUT; 631 else 632 retval = req->status;
+3 -3
drivers/usb/musb/da8xx.c
··· 204 musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) { 205 dev_dbg(musb->controller, "%s active, deleting timer\n", 206 usb_otg_state_string(musb->xceiv->otg->state)); 207 - del_timer(&musb->dev_timer); 208 last_timer = jiffies; 209 return; 210 } ··· 290 MUSB_HST_MODE(musb); 291 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; 292 portstate(musb->port1_status |= USB_PORT_STAT_POWER); 293 - del_timer(&musb->dev_timer); 294 } else if (!(musb->int_usb & MUSB_INTR_BABBLE)) { 295 /* 296 * When babble condition happens, drvvbus interrupt ··· 419 { 420 struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent); 421 422 - del_timer_sync(&musb->dev_timer); 423 424 phy_power_off(glue->phy); 425 phy_exit(glue->phy);
··· 204 musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) { 205 dev_dbg(musb->controller, "%s active, deleting timer\n", 206 usb_otg_state_string(musb->xceiv->otg->state)); 207 + timer_delete(&musb->dev_timer); 208 last_timer = jiffies; 209 return; 210 } ··· 290 MUSB_HST_MODE(musb); 291 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; 292 portstate(musb->port1_status |= USB_PORT_STAT_POWER); 293 + timer_delete(&musb->dev_timer); 294 } else if (!(musb->int_usb & MUSB_INTR_BABBLE)) { 295 /* 296 * When babble condition happens, drvvbus interrupt ··· 419 { 420 struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent); 421 422 + timer_delete_sync(&musb->dev_timer); 423 424 phy_power_off(glue->phy); 425 phy_exit(glue->phy);
+2 -2
drivers/usb/musb/mpfs.c
··· 165 musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) { 166 dev_dbg(musb->controller, "%s active, deleting timer\n", 167 usb_otg_state_string(musb->xceiv->otg->state)); 168 - del_timer(&musb->dev_timer); 169 last_timer = jiffies; 170 return; 171 } ··· 232 233 static int mpfs_musb_exit(struct musb *musb) 234 { 235 - del_timer_sync(&musb->dev_timer); 236 237 return 0; 238 }
··· 165 musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) { 166 dev_dbg(musb->controller, "%s active, deleting timer\n", 167 usb_otg_state_string(musb->xceiv->otg->state)); 168 + timer_delete(&musb->dev_timer); 169 last_timer = jiffies; 170 return; 171 } ··· 232 233 static int mpfs_musb_exit(struct musb *musb) 234 { 235 + timer_delete_sync(&musb->dev_timer); 236 237 return 0; 238 }
+2 -2
drivers/usb/musb/musb_core.c
··· 921 musb_set_state(musb, OTG_STATE_B_HOST); 922 if (musb->hcd) 923 musb->hcd->self.is_b_host = 1; 924 - del_timer(&musb->otg_timer); 925 break; 926 default: 927 if ((devctl & MUSB_DEVCTL_VBUS) ··· 1015 + msecs_to_jiffies(TA_WAIT_BCON(musb))); 1016 break; 1017 case OTG_STATE_A_PERIPHERAL: 1018 - del_timer(&musb->otg_timer); 1019 musb_g_reset(musb); 1020 break; 1021 case OTG_STATE_B_WAIT_ACON:
··· 921 musb_set_state(musb, OTG_STATE_B_HOST); 922 if (musb->hcd) 923 musb->hcd->self.is_b_host = 1; 924 + timer_delete(&musb->otg_timer); 925 break; 926 default: 927 if ((devctl & MUSB_DEVCTL_VBUS) ··· 1015 + msecs_to_jiffies(TA_WAIT_BCON(musb))); 1016 break; 1017 case OTG_STATE_A_PERIPHERAL: 1018 + timer_delete(&musb->otg_timer); 1019 musb_g_reset(musb); 1020 break; 1021 case OTG_STATE_B_WAIT_ACON:
+4 -4
drivers/usb/musb/musb_dsps.c
··· 201 musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap); 202 musb_writel(reg_base, wrp->epintr_clear, 203 wrp->txep_bitmap | wrp->rxep_bitmap); 204 - del_timer_sync(&musb->dev_timer); 205 } 206 207 /* Caller must take musb->lock */ ··· 215 int skip_session = 0; 216 217 if (glue->vbus_irq) 218 - del_timer(&musb->dev_timer); 219 220 /* 221 * We poll because DSPS IP's won't expose several OTG-critical ··· 499 struct device *dev = musb->controller; 500 struct dsps_glue *glue = dev_get_drvdata(dev->parent); 501 502 - del_timer_sync(&musb->dev_timer); 503 phy_power_off(musb->phy); 504 phy_exit(musb->phy); 505 debugfs_remove_recursive(glue->dbgfs_root); ··· 983 return ret; 984 } 985 986 - del_timer_sync(&musb->dev_timer); 987 988 mbase = musb->ctrl_base; 989 glue->context.control = musb_readl(mbase, wrp->control);
··· 201 musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap); 202 musb_writel(reg_base, wrp->epintr_clear, 203 wrp->txep_bitmap | wrp->rxep_bitmap); 204 + timer_delete_sync(&musb->dev_timer); 205 } 206 207 /* Caller must take musb->lock */ ··· 215 int skip_session = 0; 216 217 if (glue->vbus_irq) 218 + timer_delete(&musb->dev_timer); 219 220 /* 221 * We poll because DSPS IP's won't expose several OTG-critical ··· 499 struct device *dev = musb->controller; 500 struct dsps_glue *glue = dev_get_drvdata(dev->parent); 501 502 + timer_delete_sync(&musb->dev_timer); 503 phy_power_off(musb->phy); 504 phy_exit(musb->phy); 505 debugfs_remove_recursive(glue->dbgfs_root); ··· 983 return ret; 984 } 985 986 + timer_delete_sync(&musb->dev_timer); 987 988 mbase = musb->ctrl_base; 989 glue->context.control = musb_readl(mbase, wrp->control);
+4 -4
drivers/usb/musb/tusb6010.c
··· 525 && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) { 526 dev_dbg(musb->controller, "%s active, deleting timer\n", 527 usb_otg_state_string(musb->xceiv->otg->state)); 528 - del_timer(&musb->dev_timer); 529 last_timer = jiffies; 530 return; 531 } ··· 875 } 876 877 if (int_src & TUSB_INT_SRC_USB_IP_CONN) 878 - del_timer(&musb->dev_timer); 879 880 /* OTG state change reports (annoyingly) not issued by Mentor core */ 881 if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG ··· 984 musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff); 985 musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff); 986 987 - del_timer(&musb->dev_timer); 988 989 if (is_dma_capable() && !dma_off) { 990 printk(KERN_WARNING "%s %s: dma still active\n", ··· 1174 { 1175 struct tusb6010_glue *glue = dev_get_drvdata(musb->controller->parent); 1176 1177 - del_timer_sync(&musb->dev_timer); 1178 the_musb = NULL; 1179 1180 gpiod_set_value(glue->enable, 0);
··· 525 && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) { 526 dev_dbg(musb->controller, "%s active, deleting timer\n", 527 usb_otg_state_string(musb->xceiv->otg->state)); 528 + timer_delete(&musb->dev_timer); 529 last_timer = jiffies; 530 return; 531 } ··· 875 } 876 877 if (int_src & TUSB_INT_SRC_USB_IP_CONN) 878 + timer_delete(&musb->dev_timer); 879 880 /* OTG state change reports (annoyingly) not issued by Mentor core */ 881 if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG ··· 984 musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff); 985 musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff); 986 987 + timer_delete(&musb->dev_timer); 988 989 if (is_dma_capable() && !dma_off) { 990 printk(KERN_WARNING "%s %s: dma still active\n", ··· 1174 { 1175 struct tusb6010_glue *glue = dev_get_drvdata(musb->controller->parent); 1176 1177 + timer_delete_sync(&musb->dev_timer); 1178 the_musb = NULL; 1179 1180 gpiod_set_value(glue->enable, 0);
+1 -1
drivers/usb/phy/phy-mv-usb.c
··· 110 timer = &mvotg->otg_ctrl.timer[id]; 111 112 if (timer_pending(timer)) 113 - del_timer(timer); 114 115 return 0; 116 }
··· 110 timer = &mvotg->otg_ctrl.timer[id]; 111 112 if (timer_pending(timer)) 113 + timer_delete(timer); 114 115 return 0; 116 }
+1 -1
drivers/usb/storage/realtek_cr.c
··· 934 935 #ifdef CONFIG_REALTEK_AUTOPM 936 if (ss_en) { 937 - del_timer(&chip->rts51x_suspend_timer); 938 chip->timer_expires = 0; 939 } 940 #endif
··· 934 935 #ifdef CONFIG_REALTEK_AUTOPM 936 if (ss_en) { 937 + timer_delete(&chip->rts51x_suspend_timer); 938 chip->timer_expires = 0; 939 } 940 #endif
+1 -1
drivers/video/fbdev/aty/radeon_backlight.c
··· 59 */ 60 level = backlight_get_brightness(bd); 61 62 - del_timer_sync(&rinfo->lvds_timer); 63 radeon_engine_idle(); 64 65 lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
··· 59 */ 60 level = backlight_get_brightness(bd); 61 62 + timer_delete_sync(&rinfo->lvds_timer); 63 radeon_engine_idle(); 64 65 lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
+2 -2
drivers/video/fbdev/aty/radeon_base.c
··· 1082 } 1083 break; 1084 case MT_LCD: 1085 - del_timer_sync(&rinfo->lvds_timer); 1086 val = INREG(LVDS_GEN_CNTL); 1087 if (unblank) { 1088 u32 target_val = (val & ~LVDS_DISPLAY_DIS) | LVDS_BLON | LVDS_ON ··· 2516 if (rinfo->mon2_EDID) 2517 sysfs_remove_bin_file(&rinfo->pdev->dev.kobj, &edid2_attr); 2518 2519 - del_timer_sync(&rinfo->lvds_timer); 2520 arch_phys_wc_del(rinfo->wc_cookie); 2521 radeonfb_bl_exit(rinfo); 2522 unregister_framebuffer(info);
··· 1082 } 1083 break; 1084 case MT_LCD: 1085 + timer_delete_sync(&rinfo->lvds_timer); 1086 val = INREG(LVDS_GEN_CNTL); 1087 if (unblank) { 1088 u32 target_val = (val & ~LVDS_DISPLAY_DIS) | LVDS_BLON | LVDS_ON ··· 2516 if (rinfo->mon2_EDID) 2517 sysfs_remove_bin_file(&rinfo->pdev->dev.kobj, &edid2_attr); 2518 2519 + timer_delete_sync(&rinfo->lvds_timer); 2520 arch_phys_wc_del(rinfo->wc_cookie); 2521 radeonfb_bl_exit(rinfo); 2522 unregister_framebuffer(info);
+1 -1
drivers/video/fbdev/aty/radeon_pm.c
··· 2650 /* Sleep */ 2651 rinfo->asleep = 1; 2652 rinfo->lock_blank = 1; 2653 - del_timer_sync(&rinfo->lvds_timer); 2654 2655 #ifdef CONFIG_PPC_PMAC 2656 /* On powermac, we have hooks to properly suspend/resume AGP now,
··· 2650 /* Sleep */ 2651 rinfo->asleep = 1; 2652 rinfo->lock_blank = 1; 2653 + timer_delete_sync(&rinfo->lvds_timer); 2654 2655 #ifdef CONFIG_PPC_PMAC 2656 /* On powermac, we have hooks to properly suspend/resume AGP now,
+1 -1
drivers/video/fbdev/omap/hwa742.c
··· 597 break; 598 case OMAPFB_AUTO_UPDATE: 599 hwa742.stop_auto_update = 1; 600 - del_timer_sync(&hwa742.auto_update_timer); 601 break; 602 case OMAPFB_UPDATE_DISABLED: 603 break;
··· 597 break; 598 case OMAPFB_AUTO_UPDATE: 599 hwa742.stop_auto_update = 1; 600 + timer_delete_sync(&hwa742.auto_update_timer); 601 break; 602 case OMAPFB_UPDATE_DISABLED: 603 break;
+1 -1
drivers/video/fbdev/omap2/omapfb/dss/dsi.c
··· 835 836 #ifdef DSI_CATCH_MISSING_TE 837 if (irqstatus & DSI_IRQ_TE_TRIGGER) 838 - del_timer(&dsi->te_timer); 839 #endif 840 841 /* make a copy and unlock, so that isrs can unregister
··· 835 836 #ifdef DSI_CATCH_MISSING_TE 837 if (irqstatus & DSI_IRQ_TE_TRIGGER) 838 + timer_delete(&dsi->te_timer); 839 #endif 840 841 /* make a copy and unlock, so that isrs can unregister
+1 -1
drivers/virt/vboxguest/vboxguest_core.c
··· 495 */ 496 static void vbg_heartbeat_exit(struct vbg_dev *gdev) 497 { 498 - del_timer_sync(&gdev->heartbeat_timer); 499 vbg_heartbeat_host_config(gdev, false); 500 vbg_req_free(gdev->guest_heartbeat_req, 501 sizeof(*gdev->guest_heartbeat_req));
··· 495 */ 496 static void vbg_heartbeat_exit(struct vbg_dev *gdev) 497 { 498 + timer_delete_sync(&gdev->heartbeat_timer); 499 vbg_heartbeat_host_config(gdev, false); 500 vbg_req_free(gdev->guest_heartbeat_req, 501 sizeof(*gdev->guest_heartbeat_req));
+2 -2
drivers/watchdog/alim7101_wdt.c
··· 166 static void wdt_turnoff(void) 167 { 168 /* Stop the timer */ 169 - del_timer_sync(&timer); 170 wdt_change(WDT_DISABLE); 171 pr_info("Watchdog timer is now disabled...\n"); 172 } ··· 223 if (wdt_expect_close == 42) 224 wdt_turnoff(); 225 else { 226 - /* wim: shouldn't there be a: del_timer(&timer); */ 227 pr_crit("device file closed unexpectedly. Will not stop the WDT!\n"); 228 } 229 clear_bit(0, &wdt_is_open);
··· 166 static void wdt_turnoff(void) 167 { 168 /* Stop the timer */ 169 + timer_delete_sync(&timer); 170 wdt_change(WDT_DISABLE); 171 pr_info("Watchdog timer is now disabled...\n"); 172 } ··· 223 if (wdt_expect_close == 42) 224 wdt_turnoff(); 225 else { 226 + /* wim: shouldn't there be a: timer_delete(&timer); */ 227 pr_crit("device file closed unexpectedly. Will not stop the WDT!\n"); 228 } 229 clear_bit(0, &wdt_is_open);
+2 -2
drivers/watchdog/at91sam9_wdt.c
··· 242 return 0; 243 244 out_stop_timer: 245 - del_timer(&wdt->timer); 246 return err; 247 } 248 ··· 378 watchdog_unregister_device(&wdt->wdd); 379 380 pr_warn("I quit now, hardware will probably reboot!\n"); 381 - del_timer(&wdt->timer); 382 } 383 384 #if defined(CONFIG_OF)
··· 242 return 0; 243 244 out_stop_timer: 245 + timer_delete(&wdt->timer); 246 return err; 247 } 248 ··· 378 watchdog_unregister_device(&wdt->wdd); 379 380 pr_warn("I quit now, hardware will probably reboot!\n"); 381 + timer_delete(&wdt->timer); 382 } 383 384 #if defined(CONFIG_OF)
+2 -2
drivers/watchdog/bcm47xx_wdt.c
··· 139 { 140 struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd); 141 142 - del_timer_sync(&wdt->soft_timer); 143 wdt->timer_set(wdt, 0); 144 145 return 0; ··· 213 214 err_timer: 215 if (soft) 216 - del_timer_sync(&wdt->soft_timer); 217 218 return ret; 219 }
··· 139 { 140 struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd); 141 142 + timer_delete_sync(&wdt->soft_timer); 143 wdt->timer_set(wdt, 0); 144 145 return 0; ··· 213 214 err_timer: 215 if (soft) 216 + timer_delete_sync(&wdt->soft_timer); 217 218 return ret; 219 }
+2 -2
drivers/watchdog/cpwd.c
··· 240 * were called directly instead of by kernel timer 241 */ 242 if (timer_pending(&cpwd_timer)) 243 - del_timer(&cpwd_timer); 244 245 for (id = 0; id < WD_NUMDEVS; id++) { 246 if (p->devs[id].runstatus & WD_STAT_BSTOP) { ··· 629 } 630 631 if (p->broken) 632 - del_timer_sync(&cpwd_timer); 633 634 if (p->initialized) 635 free_irq(p->irq, p);
··· 240 * were called directly instead of by kernel timer 241 */ 242 if (timer_pending(&cpwd_timer)) 243 + timer_delete(&cpwd_timer); 244 245 for (id = 0; id < WD_NUMDEVS; id++) { 246 if (p->devs[id].runstatus & WD_STAT_BSTOP) { ··· 629 } 630 631 if (p->broken) 632 + timer_delete_sync(&cpwd_timer); 633 634 if (p->initialized) 635 free_irq(p->irq, p);
+2 -2
drivers/watchdog/lpc18xx_wdt.c
··· 135 unsigned int val; 136 137 if (timer_pending(&lpc18xx_wdt->timer)) 138 - del_timer(&lpc18xx_wdt->timer); 139 140 val = readl(lpc18xx_wdt->base + LPC18XX_WDT_MOD); 141 val |= LPC18XX_WDT_MOD_WDEN; ··· 266 struct lpc18xx_wdt_dev *lpc18xx_wdt = platform_get_drvdata(pdev); 267 268 dev_warn(&pdev->dev, "I quit now, hardware will probably reboot!\n"); 269 - del_timer_sync(&lpc18xx_wdt->timer); 270 } 271 272 static const struct of_device_id lpc18xx_wdt_match[] = {
··· 135 unsigned int val; 136 137 if (timer_pending(&lpc18xx_wdt->timer)) 138 + timer_delete(&lpc18xx_wdt->timer); 139 140 val = readl(lpc18xx_wdt->base + LPC18XX_WDT_MOD); 141 val |= LPC18XX_WDT_MOD_WDEN; ··· 266 struct lpc18xx_wdt_dev *lpc18xx_wdt = platform_get_drvdata(pdev); 267 268 dev_warn(&pdev->dev, "I quit now, hardware will probably reboot!\n"); 269 + timer_delete_sync(&lpc18xx_wdt->timer); 270 } 271 272 static const struct of_device_id lpc18xx_wdt_match[] = {
+2 -2
drivers/watchdog/machzwd.c
··· 189 unsigned long flags; 190 191 /* stop internal ping */ 192 - del_timer_sync(&zf_timer); 193 194 spin_lock_irqsave(&zf_port_lock, flags); 195 /* stop watchdog timer */ ··· 337 if (zf_expect_close == 42) 338 zf_timer_off(); 339 else { 340 - del_timer(&zf_timer); 341 pr_err("device file closed unexpectedly. Will not stop the WDT!\n"); 342 } 343 clear_bit(0, &zf_is_open);
··· 189 unsigned long flags; 190 191 /* stop internal ping */ 192 + timer_delete_sync(&zf_timer); 193 194 spin_lock_irqsave(&zf_port_lock, flags); 195 /* stop watchdog timer */ ··· 337 if (zf_expect_close == 42) 338 zf_timer_off(); 339 else { 340 + timer_delete(&zf_timer); 341 pr_err("device file closed unexpectedly. Will not stop the WDT!\n"); 342 } 343 clear_bit(0, &zf_is_open);
+2 -2
drivers/watchdog/mixcomwd.c
··· 141 __module_get(THIS_MODULE); 142 else { 143 if (mixcomwd_timer_alive) { 144 - del_timer(&mixcomwd_timer); 145 mixcomwd_timer_alive = 0; 146 } 147 } ··· 295 if (!nowayout) { 296 if (mixcomwd_timer_alive) { 297 pr_warn("I quit now, hardware will probably reboot!\n"); 298 - del_timer_sync(&mixcomwd_timer); 299 mixcomwd_timer_alive = 0; 300 } 301 }
··· 141 __module_get(THIS_MODULE); 142 else { 143 if (mixcomwd_timer_alive) { 144 + timer_delete(&mixcomwd_timer); 145 mixcomwd_timer_alive = 0; 146 } 147 } ··· 295 if (!nowayout) { 296 if (mixcomwd_timer_alive) { 297 pr_warn("I quit now, hardware will probably reboot!\n"); 298 + timer_delete_sync(&mixcomwd_timer); 299 mixcomwd_timer_alive = 0; 300 } 301 }
+1 -1
drivers/watchdog/pcwd.c
··· 432 int stat_reg; 433 434 /* Stop the timer */ 435 - del_timer(&pcwd_private.timer); 436 437 /* Disable the board */ 438 if (pcwd_private.revision == PCWD_REVISION_C) {
··· 432 int stat_reg; 433 434 /* Stop the timer */ 435 + timer_delete(&pcwd_private.timer); 436 437 /* Disable the board */ 438 if (pcwd_private.revision == PCWD_REVISION_C) {
+1 -1
drivers/watchdog/pika_wdt.c
··· 129 { 130 /* stop internal ping */ 131 if (!pikawdt_private.expect_close) 132 - del_timer(&pikawdt_private.timer); 133 134 clear_bit(0, &pikawdt_private.open); 135 pikawdt_private.expect_close = 0;
··· 129 { 130 /* stop internal ping */ 131 if (!pikawdt_private.expect_close) 132 + timer_delete(&pikawdt_private.timer); 133 134 clear_bit(0, &pikawdt_private.open); 135 pikawdt_private.expect_close = 0;
+2 -2
drivers/watchdog/sbc60xxwdt.c
··· 146 static void wdt_turnoff(void) 147 { 148 /* Stop the timer */ 149 - del_timer_sync(&timer); 150 inb_p(wdt_stop); 151 pr_info("Watchdog timer is now disabled...\n"); 152 } ··· 210 if (wdt_expect_close == 42) 211 wdt_turnoff(); 212 else { 213 - del_timer(&timer); 214 pr_crit("device file closed unexpectedly. Will not stop the WDT!\n"); 215 } 216 clear_bit(0, &wdt_is_open);
··· 146 static void wdt_turnoff(void) 147 { 148 /* Stop the timer */ 149 + timer_delete_sync(&timer); 150 inb_p(wdt_stop); 151 pr_info("Watchdog timer is now disabled...\n"); 152 } ··· 210 if (wdt_expect_close == 42) 211 wdt_turnoff(); 212 else { 213 + timer_delete(&timer); 214 pr_crit("device file closed unexpectedly. Will not stop the WDT!\n"); 215 } 216 clear_bit(0, &wdt_is_open);
+1 -1
drivers/watchdog/sc520_wdt.c
··· 186 static int wdt_turnoff(void) 187 { 188 /* Stop the timer */ 189 - del_timer_sync(&timer); 190 191 /* Stop the watchdog */ 192 wdt_config(0);
··· 186 static int wdt_turnoff(void) 187 { 188 /* Stop the timer */ 189 + timer_delete_sync(&timer); 190 191 /* Stop the watchdog */ 192 wdt_config(0);
+1 -1
drivers/watchdog/shwdt.c
··· 129 130 spin_lock_irqsave(&wdt->lock, flags); 131 132 - del_timer(&wdt->timer); 133 134 csr = sh_wdt_read_csr(); 135 csr &= ~WTCSR_TME;
··· 129 130 spin_lock_irqsave(&wdt->lock, flags); 131 132 + timer_delete(&wdt->timer); 133 134 csr = sh_wdt_read_csr(); 135 csr &= ~WTCSR_TME;
+1 -1
drivers/watchdog/via_wdt.c
··· 233 static void wdt_remove(struct pci_dev *pdev) 234 { 235 watchdog_unregister_device(&wdt_dev); 236 - del_timer_sync(&timer); 237 iounmap(wdt_mem); 238 release_mem_region(mmio, VIA_WDT_MMIO_LEN); 239 release_resource(&wdt_res);
··· 233 static void wdt_remove(struct pci_dev *pdev) 234 { 235 watchdog_unregister_device(&wdt_dev); 236 + timer_delete_sync(&timer); 237 iounmap(wdt_mem); 238 release_mem_region(mmio, VIA_WDT_MMIO_LEN); 239 release_resource(&wdt_res);
+2 -2
drivers/watchdog/w83877f_wdt.c
··· 166 static void wdt_turnoff(void) 167 { 168 /* Stop the timer */ 169 - del_timer_sync(&timer); 170 171 wdt_change(WDT_DISABLE); 172 ··· 228 if (wdt_expect_close == 42) 229 wdt_turnoff(); 230 else { 231 - del_timer(&timer); 232 pr_crit("device file closed unexpectedly. Will not stop the WDT!\n"); 233 } 234 clear_bit(0, &wdt_is_open);
··· 166 static void wdt_turnoff(void) 167 { 168 /* Stop the timer */ 169 + timer_delete_sync(&timer); 170 171 wdt_change(WDT_DISABLE); 172 ··· 228 if (wdt_expect_close == 42) 229 wdt_turnoff(); 230 else { 231 + timer_delete(&timer); 232 pr_crit("device file closed unexpectedly. Will not stop the WDT!\n"); 233 } 234 clear_bit(0, &wdt_is_open);
+1 -1
fs/afs/fs_probe.c
··· 534 */ 535 void afs_fs_probe_cleanup(struct afs_net *net) 536 { 537 - if (del_timer_sync(&net->fs_probe_timer)) 538 afs_dec_servers_outstanding(net); 539 }
··· 534 */ 535 void afs_fs_probe_cleanup(struct afs_net *net) 536 { 537 + if (timer_delete_sync(&net->fs_probe_timer)) 538 afs_dec_servers_outstanding(net); 539 }
+1 -1
fs/afs/server.c
··· 318 a = atomic_inc_return(&server->active); 319 if (a == 1 && activate && 320 !test_bit(AFS_SERVER_FL_EXPIRED, &server->flags)) 321 - del_timer(&server->timer); 322 323 trace_afs_server(server->debug_id, r + 1, a, reason); 324 return server;
··· 318 a = atomic_inc_return(&server->active); 319 if (a == 1 && activate && 320 !test_bit(AFS_SERVER_FL_EXPIRED, &server->flags)) 321 + timer_delete(&server->timer); 322 323 trace_afs_server(server->debug_id, r + 1, a, reason); 324 return server;
+1 -1
fs/bcachefs/clock.c
··· 121 } while (0); 122 123 __set_current_state(TASK_RUNNING); 124 - del_timer_sync(&wait.cpu_timer); 125 destroy_timer_on_stack(&wait.cpu_timer); 126 bch2_io_timer_del(clock, &wait.io_timer); 127 }
··· 121 } while (0); 122 123 __set_current_state(TASK_RUNNING); 124 + timer_delete_sync(&wait.cpu_timer); 125 destroy_timer_on_stack(&wait.cpu_timer); 126 bch2_io_timer_del(clock, &wait.io_timer); 127 }
+1 -1
fs/btrfs/zstd.c
··· 225 } 226 spin_unlock_bh(&wsm.lock); 227 228 - del_timer_sync(&wsm.timer); 229 } 230 231 /*
··· 225 } 226 spin_unlock_bh(&wsm.lock); 227 228 + timer_delete_sync(&wsm.timer); 229 } 230 231 /*
+1 -1
fs/ext4/super.c
··· 5680 /* flush s_sb_upd_work before sbi destroy */ 5681 flush_work(&sbi->s_sb_upd_work); 5682 ext4_stop_mmpd(sbi); 5683 - del_timer_sync(&sbi->s_err_report); 5684 ext4_group_desc_free(sbi); 5685 failed_mount: 5686 #if IS_ENABLED(CONFIG_UNICODE)
··· 5680 /* flush s_sb_upd_work before sbi destroy */ 5681 flush_work(&sbi->s_sb_upd_work); 5682 ext4_stop_mmpd(sbi); 5683 + timer_delete_sync(&sbi->s_err_report); 5684 ext4_group_desc_free(sbi); 5685 failed_mount: 5686 #if IS_ENABLED(CONFIG_UNICODE)
+2 -2
fs/jbd2/journal.c
··· 197 if (journal->j_commit_sequence != journal->j_commit_request) { 198 jbd2_debug(1, "OK, requests differ\n"); 199 write_unlock(&journal->j_state_lock); 200 - del_timer_sync(&journal->j_commit_timer); 201 jbd2_journal_commit_transaction(journal); 202 write_lock(&journal->j_state_lock); 203 goto loop; ··· 246 goto loop; 247 248 end_loop: 249 - del_timer_sync(&journal->j_commit_timer); 250 journal->j_task = NULL; 251 wake_up(&journal->j_wait_done_commit); 252 jbd2_debug(1, "Journal thread exiting.\n");
··· 197 if (journal->j_commit_sequence != journal->j_commit_request) { 198 jbd2_debug(1, "OK, requests differ\n"); 199 write_unlock(&journal->j_state_lock); 200 + timer_delete_sync(&journal->j_commit_timer); 201 jbd2_journal_commit_transaction(journal); 202 write_lock(&journal->j_state_lock); 203 goto loop; ··· 246 goto loop; 247 248 end_loop: 249 + timer_delete_sync(&journal->j_commit_timer); 250 journal->j_task = NULL; 251 wake_up(&journal->j_wait_done_commit); 252 jbd2_debug(1, "Journal thread exiting.\n");
+1 -1
fs/jffs2/wbuf.c
··· 584 size_t retlen; 585 586 /* Nothing to do if not write-buffering the flash. In particular, we shouldn't 587 - del_timer() the timer we never initialised. */ 588 if (!jffs2_is_writebuffered(c)) 589 return 0; 590
··· 584 size_t retlen; 585 586 /* Nothing to do if not write-buffering the flash. In particular, we shouldn't 587 + call timer_delete() on the timer we never initialised. */ 588 if (!jffs2_is_writebuffered(c)) 589 return 0; 590
+1 -1
fs/nilfs2/segment.c
··· 2424 * the area protected by sc_state_lock. 2425 */ 2426 if (thread_is_alive) 2427 - del_timer_sync(&sci->sc_timer); 2428 } 2429 2430 /**
··· 2424 * the area protected by sc_state_lock. 2425 */ 2426 if (thread_is_alive) 2427 + timer_delete_sync(&sci->sc_timer); 2428 } 2429 2430 /**
+1 -1
fs/ocfs2/cluster/tcp.c
··· 724 if (o2net_unregister_callbacks(sc->sc_sock->sk, sc)) { 725 /* we shouldn't flush as we're in the thread, the 726 * races with pending sc work structs are harmless */ 727 - del_timer_sync(&sc->sc_idle_timeout); 728 o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work); 729 sc_put(sc); 730 kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR);
··· 724 if (o2net_unregister_callbacks(sc->sc_sock->sk, sc)) { 725 /* we shouldn't flush as we're in the thread, the 726 * races with pending sc work structs are harmless */ 727 + timer_delete_sync(&sc->sc_idle_timeout); 728 o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work); 729 sc_put(sc); 730 kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR);
+1 -1
fs/pstore/platform.c
··· 563 pstore_unregister_kmsg(); 564 565 /* Stop timer and make sure all work has finished. */ 566 - del_timer_sync(&pstore_timer); 567 flush_work(&pstore_work); 568 569 /* Remove all backend records from filesystem tree. */
··· 563 pstore_unregister_kmsg(); 564 565 /* Stop timer and make sure all work has finished. */ 566 + timer_delete_sync(&pstore_timer); 567 flush_work(&pstore_work); 568 569 /* Remove all backend records from filesystem tree. */
-2
include/linux/hrtimer.h
··· 231 /* Exported timer functions: */ 232 233 /* Initialize timers: */ 234 - extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, 235 - enum hrtimer_mode mode); 236 extern void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *), 237 clockid_t clock_id, enum hrtimer_mode mode); 238 extern void hrtimer_setup_on_stack(struct hrtimer *timer,
··· 231 /* Exported timer functions: */ 232 233 /* Initialize timers: */ 234 extern void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *), 235 clockid_t clock_id, enum hrtimer_mode mode); 236 extern void hrtimer_setup_on_stack(struct hrtimer *timer,
+2 -2
include/linux/hrtimer_types.h
··· 34 * @is_hard: Set if hrtimer will be expired in hard interrupt context 35 * even on RT. 36 * 37 - * The hrtimer structure must be initialized by hrtimer_init() 38 */ 39 struct hrtimer { 40 struct timerqueue_node node; 41 ktime_t _softexpires; 42 - enum hrtimer_restart (*function)(struct hrtimer *); 43 struct hrtimer_clock_base *base; 44 u8 state; 45 u8 is_rel;
··· 34 * @is_hard: Set if hrtimer will be expired in hard interrupt context 35 * even on RT. 36 * 37 + * The hrtimer structure must be initialized by hrtimer_setup() 38 */ 39 struct hrtimer { 40 struct timerqueue_node node; 41 ktime_t _softexpires; 42 + enum hrtimer_restart (*__private function)(struct hrtimer *); 43 struct hrtimer_clock_base *base; 44 u8 state; 45 u8 is_rel;
+1 -35
include/linux/timer.h
··· 30 * 31 * @TIMER_IRQSAFE: An irqsafe timer is executed with IRQ disabled and 32 * it's safe to wait for the completion of the running instance from 33 - * IRQ handlers, for example, by calling del_timer_sync(). 34 * 35 * Note: The irq disabled callback execution is a special case for 36 * workqueue locking issues. It's not meant for executing random crap ··· 167 extern int timer_delete(struct timer_list *timer); 168 extern int timer_shutdown_sync(struct timer_list *timer); 169 extern int timer_shutdown(struct timer_list *timer); 170 - 171 - /** 172 - * del_timer_sync - Delete a pending timer and wait for a running callback 173 - * @timer: The timer to be deleted 174 - * 175 - * See timer_delete_sync() for detailed explanation. 176 - * 177 - * Do not use in new code. Use timer_delete_sync() instead. 178 - * 179 - * Returns: 180 - * * %0 - The timer was not pending 181 - * * %1 - The timer was pending and deactivated 182 - */ 183 - static inline int del_timer_sync(struct timer_list *timer) 184 - { 185 - return timer_delete_sync(timer); 186 - } 187 - 188 - /** 189 - * del_timer - Delete a pending timer 190 - * @timer: The timer to be deleted 191 - * 192 - * See timer_delete() for detailed explanation. 193 - * 194 - * Do not use in new code. Use timer_delete() instead. 195 - * 196 - * Returns: 197 - * * %0 - The timer was not pending 198 - * * %1 - The timer was pending and deactivated 199 - */ 200 - static inline int del_timer(struct timer_list *timer) 201 - { 202 - return timer_delete(timer); 203 - } 204 205 extern void init_timers(void); 206 struct hrtimer;
··· 30 * 31 * @TIMER_IRQSAFE: An irqsafe timer is executed with IRQ disabled and 32 * it's safe to wait for the completion of the running instance from 33 + * IRQ handlers, for example, by calling timer_delete_sync(). 34 * 35 * Note: The irq disabled callback execution is a special case for 36 * workqueue locking issues. It's not meant for executing random crap ··· 167 extern int timer_delete(struct timer_list *timer); 168 extern int timer_shutdown_sync(struct timer_list *timer); 169 extern int timer_shutdown(struct timer_list *timer); 170 171 extern void init_timers(void); 172 struct hrtimer;
+1 -1
include/net/sctp/sctp.h
··· 636 } 637 } else { 638 if (t->pl.state != SCTP_PL_DISABLED) { 639 - if (del_timer(&t->probe_timer)) 640 sctp_transport_put(t); 641 t->pl.state = SCTP_PL_DISABLED; 642 }
··· 636 } 637 } else { 638 if (t->pl.state != SCTP_PL_DISABLED) { 639 + if (timer_delete(&t->probe_timer)) 640 sctp_transport_put(t); 641 t->pl.state = SCTP_PL_DISABLED; 642 }
+4 -4
include/trace/events/timer.h
··· 185 { HRTIMER_MODE_REL_PINNED_HARD, "REL|PINNED|HARD" }) 186 187 /** 188 - * hrtimer_init - called when the hrtimer is initialized 189 * @hrtimer: pointer to struct hrtimer 190 * @clockid: the hrtimers clock 191 * @mode: the hrtimers mode 192 */ 193 - TRACE_EVENT(hrtimer_init, 194 195 TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid, 196 enum hrtimer_mode mode), ··· 235 236 TP_fast_assign( 237 __entry->hrtimer = hrtimer; 238 - __entry->function = hrtimer->function; 239 __entry->expires = hrtimer_get_expires(hrtimer); 240 __entry->softexpires = hrtimer_get_softexpires(hrtimer); 241 __entry->mode = mode; ··· 271 TP_fast_assign( 272 __entry->hrtimer = hrtimer; 273 __entry->now = *now; 274 - __entry->function = hrtimer->function; 275 ), 276 277 TP_printk("hrtimer=%p function=%ps now=%llu",
··· 185 { HRTIMER_MODE_REL_PINNED_HARD, "REL|PINNED|HARD" }) 186 187 /** 188 + * hrtimer_setup - called when the hrtimer is initialized 189 * @hrtimer: pointer to struct hrtimer 190 * @clockid: the hrtimers clock 191 * @mode: the hrtimers mode 192 */ 193 + TRACE_EVENT(hrtimer_setup, 194 195 TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid, 196 enum hrtimer_mode mode), ··· 235 236 TP_fast_assign( 237 __entry->hrtimer = hrtimer; 238 + __entry->function = ACCESS_PRIVATE(hrtimer, function); 239 __entry->expires = hrtimer_get_expires(hrtimer); 240 __entry->softexpires = hrtimer_get_softexpires(hrtimer); 241 __entry->mode = mode; ··· 271 TP_fast_assign( 272 __entry->hrtimer = hrtimer; 273 __entry->now = *now; 274 + __entry->function = ACCESS_PRIVATE(hrtimer, function); 275 ), 276 277 TP_printk("hrtimer=%p function=%ps now=%llu",
+1 -1
kernel/cgroup/cgroup.c
··· 1695 cfile->kn = NULL; 1696 spin_unlock_irq(&cgroup_file_kn_lock); 1697 1698 - del_timer_sync(&cfile->notify_timer); 1699 } 1700 1701 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
··· 1695 cfile->kn = NULL; 1696 spin_unlock_irq(&cgroup_file_kn_lock); 1697 1698 + timer_delete_sync(&cfile->notify_timer); 1699 } 1700 1701 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
+1 -1
kernel/kcsan/kcsan_test.c
··· 1500 func(); 1501 } 1502 } while (!torture_must_stop()); 1503 - del_timer_sync(&timer); 1504 destroy_timer_on_stack(&timer); 1505 1506 torture_kthread_stopping("access_thread");
··· 1500 func(); 1501 } 1502 } while (!torture_must_stop()); 1503 + timer_delete_sync(&timer); 1504 destroy_timer_on_stack(&timer); 1505 1506 torture_kthread_stopping("access_thread");
+2 -2
kernel/kthread.c
··· 1362 struct kthread_worker *worker = work->worker; 1363 1364 /* 1365 - * del_timer_sync() must be called to make sure that the timer 1366 * callback is not running. The lock must be temporary released 1367 * to avoid a deadlock with the callback. In the meantime, 1368 * any queuing is blocked by setting the canceling counter. 1369 */ 1370 work->canceling++; 1371 raw_spin_unlock_irqrestore(&worker->lock, *flags); 1372 - del_timer_sync(&dwork->timer); 1373 raw_spin_lock_irqsave(&worker->lock, *flags); 1374 work->canceling--; 1375 }
··· 1362 struct kthread_worker *worker = work->worker; 1363 1364 /* 1365 + * timer_delete_sync() must be called to make sure that the timer 1366 * callback is not running. The lock must be temporary released 1367 * to avoid a deadlock with the callback. In the meantime, 1368 * any queuing is blocked by setting the canceling counter. 1369 */ 1370 work->canceling++; 1371 raw_spin_unlock_irqrestore(&worker->lock, *flags); 1372 + timer_delete_sync(&dwork->timer); 1373 raw_spin_lock_irqsave(&worker->lock, *flags); 1374 work->canceling--; 1375 }
+1 -1
kernel/rcu/rcutorture.c
··· 2324 stutter_wait("rcu_torture_reader"); 2325 } while (!torture_must_stop()); 2326 if (irqreader && cur_ops->irq_capable) { 2327 - del_timer_sync(&t); 2328 destroy_timer_on_stack(&t); 2329 } 2330 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
··· 2324 stutter_wait("rcu_torture_reader"); 2325 } while (!torture_must_stop()); 2326 if (irqreader && cur_ops->irq_capable) { 2327 + timer_delete_sync(&t); 2328 destroy_timer_on_stack(&t); 2329 } 2330 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
+1 -1
kernel/rcu/srcutree.c
··· 690 for_each_possible_cpu(cpu) { 691 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); 692 693 - del_timer_sync(&sdp->delay_work); 694 flush_work(&sdp->work); 695 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist))) 696 return; /* Forgot srcu_barrier(), so just leak it! */
··· 690 for_each_possible_cpu(cpu) { 691 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); 692 693 + timer_delete_sync(&sdp->delay_work); 694 flush_work(&sdp->work); 695 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist))) 696 return; /* Forgot srcu_barrier(), so just leak it! */
+1 -1
kernel/rcu/tasks.h
··· 1086 } 1087 1088 if (!IS_ENABLED(CONFIG_TINY_RCU)) 1089 - del_timer_sync(&tasks_rcu_exit_srcu_stall_timer); 1090 } 1091 1092 /* See if tasks are still holding out, complain if so. */
··· 1086 } 1087 1088 if (!IS_ENABLED(CONFIG_TINY_RCU)) 1089 + timer_delete_sync(&tasks_rcu_exit_srcu_stall_timer); 1090 } 1091 1092 /* See if tasks are still holding out, complain if so. */
+2 -2
kernel/rcu/tree_nocb.h
··· 206 207 if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { 208 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); 209 - del_timer(&rdp_gp->nocb_timer); 210 } 211 212 if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) { ··· 822 823 if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { 824 WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); 825 - del_timer(&my_rdp->nocb_timer); 826 } 827 WRITE_ONCE(my_rdp->nocb_gp_sleep, true); 828 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
··· 206 207 if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { 208 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); 209 + timer_delete(&rdp_gp->nocb_timer); 210 } 211 212 if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) { ··· 822 823 if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) { 824 WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); 825 + timer_delete(&my_rdp->nocb_timer); 826 } 827 WRITE_ONCE(my_rdp->nocb_gp_sleep, true); 828 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
+1 -1
kernel/sched/psi.c
··· 1440 group->rtpoll_task, 1441 lockdep_is_held(&group->rtpoll_trigger_lock)); 1442 rcu_assign_pointer(group->rtpoll_task, NULL); 1443 - del_timer(&group->rtpoll_timer); 1444 } 1445 } 1446 mutex_unlock(&group->rtpoll_trigger_lock);
··· 1440 group->rtpoll_task, 1441 lockdep_is_held(&group->rtpoll_trigger_lock)); 1442 rcu_assign_pointer(group->rtpoll_task, NULL); 1443 + timer_delete(&group->rtpoll_timer); 1444 } 1445 } 1446 mutex_unlock(&group->rtpoll_trigger_lock);
+1 -1
kernel/time/clocksource.c
··· 619 { 620 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) 621 return; 622 - del_timer(&watchdog_timer); 623 watchdog_running = 0; 624 } 625
··· 619 { 620 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) 621 return; 622 + timer_delete(&watchdog_timer); 623 watchdog_running = 0; 624 } 625
+20 -51
kernel/time/hrtimer.c
··· 465 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } 466 #endif 467 468 - static inline void 469 - debug_init(struct hrtimer *timer, clockid_t clockid, 470 - enum hrtimer_mode mode) 471 { 472 debug_hrtimer_init(timer); 473 - trace_hrtimer_init(timer, clockid, mode); 474 } 475 476 - static inline void debug_init_on_stack(struct hrtimer *timer, clockid_t clockid, 477 - enum hrtimer_mode mode) 478 { 479 debug_hrtimer_init_on_stack(timer); 480 - trace_hrtimer_init(timer, clockid, mode); 481 } 482 483 static inline void debug_activate(struct hrtimer *timer, ··· 1314 struct hrtimer_clock_base *base; 1315 unsigned long flags; 1316 1317 - if (WARN_ON_ONCE(!timer->function)) 1318 - return; 1319 /* 1320 * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft 1321 * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard ··· 1425 * running. 1426 * 1427 * This prevents priority inversion: if the soft irq thread is preempted 1428 - * in the middle of a timer callback, then calling del_timer_sync() can 1429 * lead to two issues: 1430 * 1431 * - If the caller is on a remote CPU then it has to spin wait for the timer ··· 1588 } 1589 } 1590 1591 - static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 1592 - enum hrtimer_mode mode) 1593 { 1594 bool softtimer = !!(mode & HRTIMER_MODE_SOFT); 1595 struct hrtimer_cpu_base *cpu_base; ··· 1623 timer->is_hard = !!(mode & HRTIMER_MODE_HARD); 1624 timer->base = &cpu_base->clock_base[base]; 1625 timerqueue_init(&timer->node); 1626 - } 1627 - 1628 - static void __hrtimer_setup(struct hrtimer *timer, 1629 - enum hrtimer_restart (*function)(struct hrtimer *), 1630 - clockid_t clock_id, enum hrtimer_mode mode) 1631 - { 1632 - __hrtimer_init(timer, clock_id, mode); 1633 1634 if (WARN_ON_ONCE(!function)) 1635 - timer->function = hrtimer_dummy_timeout; 1636 else 1637 - timer->function = function; 1638 } 1639 - 1640 - /** 1641 - * hrtimer_init - initialize a timer to the given clock 1642 - * @timer: the timer to be initialized 1643 - * @clock_id: the clock to be used 1644 - * @mode: The modes which are relevant for initialization: 1645 - * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT, 1646 - * HRTIMER_MODE_REL_SOFT 1647 - * 1648 - * The PINNED variants of the above can be handed in, 1649 - * but the PINNED bit is ignored as pinning happens 1650 - * when the hrtimer is started 1651 - */ 1652 - void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, 1653 - enum hrtimer_mode mode) 1654 - { 1655 - debug_init(timer, clock_id, mode); 1656 - __hrtimer_init(timer, clock_id, mode); 1657 - } 1658 - EXPORT_SYMBOL_GPL(hrtimer_init); 1659 1660 /** 1661 * hrtimer_setup - initialize a timer to the given clock ··· 1646 void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *), 1647 clockid_t clock_id, enum hrtimer_mode mode) 1648 { 1649 - debug_init(timer, clock_id, mode); 1650 __hrtimer_setup(timer, function, clock_id, mode); 1651 } 1652 EXPORT_SYMBOL_GPL(hrtimer_setup); ··· 1665 enum hrtimer_restart (*function)(struct hrtimer *), 1666 clockid_t clock_id, enum hrtimer_mode mode) 1667 { 1668 - debug_init_on_stack(timer, clock_id, mode); 1669 __hrtimer_setup(timer, function, clock_id, mode); 1670 } 1671 EXPORT_SYMBOL_GPL(hrtimer_setup_on_stack); ··· 1739 raw_write_seqcount_barrier(&base->seq); 1740 1741 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); 1742 - fn = timer->function; 1743 1744 /* 1745 * Clear the 'is relative' flag for the TIME_LOW_RES case. If the ··· 2014 * Make the enqueue delivery mode check work on RT. If the sleeper 2015 * was initialized for hard interrupt delivery, force the mode bit. 2016 * This is a special case for hrtimer_sleepers because 2017 - * __hrtimer_init_sleeper() determines the delivery mode on RT so the 2018 * fiddling with this decision is avoided at the call sites. 2019 */ 2020 if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard) ··· 2024 } 2025 EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires); 2026 2027 - static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, 2028 - clockid_t clock_id, enum hrtimer_mode mode) 2029 { 2030 /* 2031 * On PREEMPT_RT enabled kernels hrtimers which are not explicitly ··· 2051 mode |= HRTIMER_MODE_HARD; 2052 } 2053 2054 - __hrtimer_init(&sl->timer, clock_id, mode); 2055 - sl->timer.function = hrtimer_wakeup; 2056 sl->task = current; 2057 } 2058 ··· 2064 void hrtimer_setup_sleeper_on_stack(struct hrtimer_sleeper *sl, 2065 clockid_t clock_id, enum hrtimer_mode mode) 2066 { 2067 - debug_init_on_stack(&sl->timer, clock_id, mode); 2068 - __hrtimer_init_sleeper(sl, clock_id, mode); 2069 } 2070 EXPORT_SYMBOL_GPL(hrtimer_setup_sleeper_on_stack); 2071
··· 465 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } 466 #endif 467 468 + static inline void debug_setup(struct hrtimer *timer, clockid_t clockid, enum hrtimer_mode mode) 469 { 470 debug_hrtimer_init(timer); 471 + trace_hrtimer_setup(timer, clockid, mode); 472 } 473 474 + static inline void debug_setup_on_stack(struct hrtimer *timer, clockid_t clockid, 475 + enum hrtimer_mode mode) 476 { 477 debug_hrtimer_init_on_stack(timer); 478 + trace_hrtimer_setup(timer, clockid, mode); 479 } 480 481 static inline void debug_activate(struct hrtimer *timer, ··· 1316 struct hrtimer_clock_base *base; 1317 unsigned long flags; 1318 1319 /* 1320 * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft 1321 * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard ··· 1429 * running. 1430 * 1431 * This prevents priority inversion: if the soft irq thread is preempted 1432 + * in the middle of a timer callback, then calling hrtimer_cancel() can 1433 * lead to two issues: 1434 * 1435 * - If the caller is on a remote CPU then it has to spin wait for the timer ··· 1592 } 1593 } 1594 1595 + static void __hrtimer_setup(struct hrtimer *timer, 1596 + enum hrtimer_restart (*function)(struct hrtimer *), 1597 + clockid_t clock_id, enum hrtimer_mode mode) 1598 { 1599 bool softtimer = !!(mode & HRTIMER_MODE_SOFT); 1600 struct hrtimer_cpu_base *cpu_base; ··· 1626 timer->is_hard = !!(mode & HRTIMER_MODE_HARD); 1627 timer->base = &cpu_base->clock_base[base]; 1628 timerqueue_init(&timer->node); 1629 1630 if (WARN_ON_ONCE(!function)) 1631 + ACCESS_PRIVATE(timer, function) = hrtimer_dummy_timeout; 1632 else 1633 + ACCESS_PRIVATE(timer, function) = function; 1634 } 1635 1636 /** 1637 * hrtimer_setup - initialize a timer to the given clock ··· 1676 void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *), 1677 clockid_t clock_id, enum hrtimer_mode mode) 1678 { 1679 + debug_setup(timer, clock_id, mode); 1680 __hrtimer_setup(timer, function, clock_id, mode); 1681 } 1682 EXPORT_SYMBOL_GPL(hrtimer_setup); ··· 1695 enum hrtimer_restart (*function)(struct hrtimer *), 1696 clockid_t clock_id, enum hrtimer_mode mode) 1697 { 1698 + debug_setup_on_stack(timer, clock_id, mode); 1699 __hrtimer_setup(timer, function, clock_id, mode); 1700 } 1701 EXPORT_SYMBOL_GPL(hrtimer_setup_on_stack); ··· 1769 raw_write_seqcount_barrier(&base->seq); 1770 1771 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); 1772 + fn = ACCESS_PRIVATE(timer, function); 1773 1774 /* 1775 * Clear the 'is relative' flag for the TIME_LOW_RES case. If the ··· 2044 * Make the enqueue delivery mode check work on RT. If the sleeper 2045 * was initialized for hard interrupt delivery, force the mode bit. 2046 * This is a special case for hrtimer_sleepers because 2047 + * __hrtimer_setup_sleeper() determines the delivery mode on RT so the 2048 * fiddling with this decision is avoided at the call sites. 2049 */ 2050 if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard) ··· 2054 } 2055 EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires); 2056 2057 + static void __hrtimer_setup_sleeper(struct hrtimer_sleeper *sl, 2058 + clockid_t clock_id, enum hrtimer_mode mode) 2059 { 2060 /* 2061 * On PREEMPT_RT enabled kernels hrtimers which are not explicitly ··· 2081 mode |= HRTIMER_MODE_HARD; 2082 } 2083 2084 + __hrtimer_setup(&sl->timer, hrtimer_wakeup, clock_id, mode); 2085 sl->task = current; 2086 } 2087 ··· 2095 void hrtimer_setup_sleeper_on_stack(struct hrtimer_sleeper *sl, 2096 clockid_t clock_id, enum hrtimer_mode mode) 2097 { 2098 + debug_setup_on_stack(&sl->timer, clock_id, mode); 2099 + __hrtimer_setup_sleeper(sl, clock_id, mode); 2100 } 2101 EXPORT_SYMBOL_GPL(hrtimer_setup_sleeper_on_stack); 2102
+1 -1
kernel/time/sleep_timeout.c
··· 97 timer.timer.expires = expire; 98 add_timer(&timer.timer); 99 schedule(); 100 - del_timer_sync(&timer.timer); 101 102 /* Remove the timer from the object tracker */ 103 destroy_timer_on_stack(&timer.timer);
··· 97 timer.timer.expires = expire; 98 add_timer(&timer.timer); 99 schedule(); 100 + timer_delete_sync(&timer.timer); 101 102 /* Remove the timer from the object tracker */ 103 destroy_timer_on_stack(&timer.timer);
+4 -4
kernel/time/timer.c
··· 744 745 switch (state) { 746 case ODEBUG_STATE_ACTIVE: 747 - del_timer_sync(timer); 748 debug_object_init(timer, &timer_debug_descr); 749 return true; 750 default: ··· 790 791 switch (state) { 792 case ODEBUG_STATE_ACTIVE: 793 - del_timer_sync(timer); 794 debug_object_free(timer, &timer_debug_descr); 795 return true; 796 default: ··· 1212 * 1213 * mod_timer(timer, expires) is equivalent to: 1214 * 1215 - * del_timer(timer); timer->expires = expires; add_timer(timer); 1216 * 1217 * mod_timer() is more efficient than the above open coded sequence. In 1218 - * case that the timer is inactive, the del_timer() part is a NOP. The 1219 * timer is in any case activated with the new expiry time @expires. 1220 * 1221 * Note that if there are multiple unserialized concurrent users of the
··· 744 745 switch (state) { 746 case ODEBUG_STATE_ACTIVE: 747 + timer_delete_sync(timer); 748 debug_object_init(timer, &timer_debug_descr); 749 return true; 750 default: ··· 790 791 switch (state) { 792 case ODEBUG_STATE_ACTIVE: 793 + timer_delete_sync(timer); 794 debug_object_free(timer, &timer_debug_descr); 795 return true; 796 default: ··· 1212 * 1213 * mod_timer(timer, expires) is equivalent to: 1214 * 1215 + * timer_delete(timer); timer->expires = expires; add_timer(timer); 1216 * 1217 * mod_timer() is more efficient than the above open coded sequence. In 1218 + * case that the timer is inactive, the timer_delete() part is a NOP. The 1219 * timer is in any case activated with the new expiry time @expires. 1220 * 1221 * Note that if there are multiple unserialized concurrent users of the
+1 -1
kernel/time/timer_list.c
··· 46 print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer, 47 int idx, u64 now) 48 { 49 - SEQ_printf(m, " #%d: <%p>, %ps", idx, taddr, timer->function); 50 SEQ_printf(m, ", S:%02x", timer->state); 51 SEQ_printf(m, "\n"); 52 SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n",
··· 46 print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer, 47 int idx, u64 now) 48 { 49 + SEQ_printf(m, " #%d: <%p>, %ps", idx, taddr, ACCESS_PRIVATE(timer, function)); 50 SEQ_printf(m, ", S:%02x", timer->state); 51 SEQ_printf(m, "\n"); 52 SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n",
+7 -7
kernel/workqueue.c
··· 2057 struct delayed_work *dwork = to_delayed_work(work); 2058 2059 /* 2060 - * dwork->timer is irqsafe. If del_timer() fails, it's 2061 * guaranteed that the timer is not queued anywhere and not 2062 * running on the local CPU. 2063 */ 2064 - if (likely(del_timer(&dwork->timer))) 2065 return 1; 2066 } 2067 ··· 3069 break; 3070 } 3071 3072 - del_timer_sync(&pool->mayday_timer); 3073 raw_spin_lock_irq(&pool->lock); 3074 /* 3075 * This is necessary even after a new worker was just successfully ··· 4281 bool flush_delayed_work(struct delayed_work *dwork) 4282 { 4283 local_irq_disable(); 4284 - if (del_timer_sync(&dwork->timer)) 4285 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 4286 local_irq_enable(); 4287 return flush_work(&dwork->work); ··· 4984 reap_dying_workers(&cull_list); 4985 4986 /* shut down the timers */ 4987 - del_timer_sync(&pool->idle_timer); 4988 cancel_work_sync(&pool->idle_cull_work); 4989 - del_timer_sync(&pool->mayday_timer); 4990 4991 /* RCU protected to allow dereferences from get_work_pool() */ 4992 call_rcu(&pool->rcu, rcu_free_pool); ··· 7637 static void wq_watchdog_set_thresh(unsigned long thresh) 7638 { 7639 wq_watchdog_thresh = 0; 7640 - del_timer_sync(&wq_watchdog_timer); 7641 7642 if (thresh) { 7643 wq_watchdog_thresh = thresh;
··· 2057 struct delayed_work *dwork = to_delayed_work(work); 2058 2059 /* 2060 + * dwork->timer is irqsafe. If timer_delete() fails, it's 2061 * guaranteed that the timer is not queued anywhere and not 2062 * running on the local CPU. 2063 */ 2064 + if (likely(timer_delete(&dwork->timer))) 2065 return 1; 2066 } 2067 ··· 3069 break; 3070 } 3071 3072 + timer_delete_sync(&pool->mayday_timer); 3073 raw_spin_lock_irq(&pool->lock); 3074 /* 3075 * This is necessary even after a new worker was just successfully ··· 4281 bool flush_delayed_work(struct delayed_work *dwork) 4282 { 4283 local_irq_disable(); 4284 + if (timer_delete_sync(&dwork->timer)) 4285 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 4286 local_irq_enable(); 4287 return flush_work(&dwork->work); ··· 4984 reap_dying_workers(&cull_list); 4985 4986 /* shut down the timers */ 4987 + timer_delete_sync(&pool->idle_timer); 4988 cancel_work_sync(&pool->idle_cull_work); 4989 + timer_delete_sync(&pool->mayday_timer); 4990 4991 /* RCU protected to allow dereferences from get_work_pool() */ 4992 call_rcu(&pool->rcu, rcu_free_pool); ··· 7637 static void wq_watchdog_set_thresh(unsigned long thresh) 7638 { 7639 wq_watchdog_thresh = 0; 7640 + timer_delete_sync(&wq_watchdog_timer); 7641 7642 if (thresh) { 7643 wq_watchdog_thresh = thresh;
+1 -1
mm/backing-dev.c
··· 1151 1152 void bdi_unregister(struct backing_dev_info *bdi) 1153 { 1154 - del_timer_sync(&bdi->laptop_mode_wb_timer); 1155 1156 /* make sure nobody finds us on the bdi_list anymore */ 1157 bdi_remove_from_list(bdi);
··· 1151 1152 void bdi_unregister(struct backing_dev_info *bdi) 1153 { 1154 + timer_delete_sync(&bdi->laptop_mode_wb_timer); 1155 1156 /* make sure nobody finds us on the bdi_list anymore */ 1157 bdi_remove_from_list(bdi);
+2 -2
mm/page-writeback.c
··· 640 #ifdef CONFIG_CGROUP_WRITEBACK 641 void wb_domain_exit(struct wb_domain *dom) 642 { 643 - del_timer_sync(&dom->period_timer); 644 fprop_global_destroy(&dom->completions); 645 } 646 #endif ··· 2229 rcu_read_lock(); 2230 2231 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) 2232 - del_timer(&bdi->laptop_mode_wb_timer); 2233 2234 rcu_read_unlock(); 2235 }
··· 640 #ifdef CONFIG_CGROUP_WRITEBACK 641 void wb_domain_exit(struct wb_domain *dom) 642 { 643 + timer_delete_sync(&dom->period_timer); 644 fprop_global_destroy(&dom->completions); 645 } 646 #endif ··· 2229 rcu_read_lock(); 2230 2231 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) 2232 + timer_delete(&bdi->laptop_mode_wb_timer); 2233 2234 rcu_read_unlock(); 2235 }
+2 -2
net/appletalk/aarp.c
··· 856 add_timer(&aarp_timer); 857 rc = register_netdevice_notifier(&aarp_notifier); 858 if (rc) { 859 - del_timer_sync(&aarp_timer); 860 unregister_snap_client(aarp_dl); 861 } 862 return rc; ··· 1011 /* General module cleanup. Called from cleanup_module() in ddp.c. */ 1012 void aarp_cleanup_module(void) 1013 { 1014 - del_timer_sync(&aarp_timer); 1015 unregister_netdevice_notifier(&aarp_notifier); 1016 unregister_snap_client(aarp_dl); 1017 aarp_purge();
··· 856 add_timer(&aarp_timer); 857 rc = register_netdevice_notifier(&aarp_notifier); 858 if (rc) { 859 + timer_delete_sync(&aarp_timer); 860 unregister_snap_client(aarp_dl); 861 } 862 return rc; ··· 1011 /* General module cleanup. Called from cleanup_module() in ddp.c. */ 1012 void aarp_cleanup_module(void) 1013 { 1014 + timer_delete_sync(&aarp_timer); 1015 unregister_netdevice_notifier(&aarp_notifier); 1016 unregister_snap_client(aarp_dl); 1017 aarp_purge();
+1 -1
net/atm/clip.c
··· 904 /* First, stop the idle timer, so it stops banging 905 * on the table. 906 */ 907 - del_timer_sync(&idle_timer); 908 909 dev = clip_devs; 910 while (dev) {
··· 904 /* First, stop the idle timer, so it stops banging 905 * on the table. 906 */ 907 + timer_delete_sync(&idle_timer); 908 909 dev = clip_devs; 910 while (dev) {
+13 -13
net/atm/lec.c
··· 1302 return -1; 1303 1304 hlist_del(&to_remove->next); 1305 - del_timer(&to_remove->timer); 1306 1307 /* 1308 * If this is the only MAC connected to this VCC, ··· 1482 1483 hlist_for_each_entry_safe(entry, next, 1484 &priv->lec_arp_empty_ones, next) { 1485 - del_timer_sync(&entry->timer); 1486 lec_arp_clear_vccs(entry); 1487 hlist_del(&entry->next); 1488 lec_arp_put(entry); ··· 1491 1492 hlist_for_each_entry_safe(entry, next, 1493 &priv->lec_no_forward, next) { 1494 - del_timer_sync(&entry->timer); 1495 lec_arp_clear_vccs(entry); 1496 hlist_del(&entry->next); 1497 lec_arp_put(entry); ··· 1575 struct lec_arp_table *to_remove = from_timer(to_remove, t, timer); 1576 struct lec_priv *priv = to_remove->priv; 1577 1578 - del_timer(&to_remove->timer); 1579 1580 pr_debug("%p %p: vpi:%d vci:%d\n", 1581 to_remove, priv, ··· 1843 &priv->lec_arp_empty_ones, next) { 1844 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { 1845 hlist_del(&entry->next); 1846 - del_timer(&entry->timer); 1847 tmp = lec_arp_find(priv, mac_addr); 1848 if (tmp) { 1849 - del_timer(&tmp->timer); 1850 tmp->status = ESI_FORWARD_DIRECT; 1851 memcpy(tmp->atm_addr, atm_addr, ATM_ESA_LEN); 1852 tmp->vcc = entry->vcc; 1853 tmp->old_push = entry->old_push; 1854 tmp->last_used = jiffies; 1855 - del_timer(&entry->timer); 1856 lec_arp_put(entry); 1857 entry = tmp; 1858 } else { ··· 1883 /* Temporary, changes before end of function */ 1884 } 1885 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); 1886 - del_timer(&entry->timer); 1887 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1888 hlist_for_each_entry(tmp, 1889 &priv->lec_arp_tables[i], next) { ··· 1946 entry = make_entry(priv, bus_mac); 1947 if (entry == NULL) 1948 goto out; 1949 - del_timer(&entry->timer); 1950 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); 1951 entry->recv_vcc = vcc; 1952 entry->old_recv_push = old_push; ··· 1988 entry->recv_vcc ? entry->recv_vcc-> 1989 vci : 0); 1990 found_entry = 1; 1991 - del_timer(&entry->timer); 1992 entry->vcc = vcc; 1993 entry->old_push = old_push; 1994 if (entry->status == ESI_VC_PENDING) { ··· 2172 &priv->lec_arp_empty_ones, next) { 2173 if (entry->vcc == vcc) { 2174 lec_arp_clear_vccs(entry); 2175 - del_timer(&entry->timer); 2176 hlist_del(&entry->next); 2177 lec_arp_put(entry); 2178 } ··· 2182 &priv->lec_no_forward, next) { 2183 if (entry->recv_vcc == vcc) { 2184 lec_arp_clear_vccs(entry); 2185 - del_timer(&entry->timer); 2186 hlist_del(&entry->next); 2187 lec_arp_put(entry); 2188 } ··· 2215 hlist_for_each_entry_safe(entry, next, 2216 &priv->lec_arp_empty_ones, next) { 2217 if (vcc == entry->vcc) { 2218 - del_timer(&entry->timer); 2219 ether_addr_copy(entry->mac_addr, src); 2220 entry->status = ESI_FORWARD_DIRECT; 2221 entry->last_used = jiffies;
··· 1302 return -1; 1303 1304 hlist_del(&to_remove->next); 1305 + timer_delete(&to_remove->timer); 1306 1307 /* 1308 * If this is the only MAC connected to this VCC, ··· 1482 1483 hlist_for_each_entry_safe(entry, next, 1484 &priv->lec_arp_empty_ones, next) { 1485 + timer_delete_sync(&entry->timer); 1486 lec_arp_clear_vccs(entry); 1487 hlist_del(&entry->next); 1488 lec_arp_put(entry); ··· 1491 1492 hlist_for_each_entry_safe(entry, next, 1493 &priv->lec_no_forward, next) { 1494 + timer_delete_sync(&entry->timer); 1495 lec_arp_clear_vccs(entry); 1496 hlist_del(&entry->next); 1497 lec_arp_put(entry); ··· 1575 struct lec_arp_table *to_remove = from_timer(to_remove, t, timer); 1576 struct lec_priv *priv = to_remove->priv; 1577 1578 + timer_delete(&to_remove->timer); 1579 1580 pr_debug("%p %p: vpi:%d vci:%d\n", 1581 to_remove, priv, ··· 1843 &priv->lec_arp_empty_ones, next) { 1844 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { 1845 hlist_del(&entry->next); 1846 + timer_delete(&entry->timer); 1847 tmp = lec_arp_find(priv, mac_addr); 1848 if (tmp) { 1849 + timer_delete(&tmp->timer); 1850 tmp->status = ESI_FORWARD_DIRECT; 1851 memcpy(tmp->atm_addr, atm_addr, ATM_ESA_LEN); 1852 tmp->vcc = entry->vcc; 1853 tmp->old_push = entry->old_push; 1854 tmp->last_used = jiffies; 1855 + timer_delete(&entry->timer); 1856 lec_arp_put(entry); 1857 entry = tmp; 1858 } else { ··· 1883 /* Temporary, changes before end of function */ 1884 } 1885 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); 1886 + timer_delete(&entry->timer); 1887 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1888 hlist_for_each_entry(tmp, 1889 &priv->lec_arp_tables[i], next) { ··· 1946 entry = make_entry(priv, bus_mac); 1947 if (entry == NULL) 1948 goto out; 1949 + timer_delete(&entry->timer); 1950 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); 1951 entry->recv_vcc = vcc; 1952 entry->old_recv_push = old_push; ··· 1988 entry->recv_vcc ? entry->recv_vcc-> 1989 vci : 0); 1990 found_entry = 1; 1991 + timer_delete(&entry->timer); 1992 entry->vcc = vcc; 1993 entry->old_push = old_push; 1994 if (entry->status == ESI_VC_PENDING) { ··· 2172 &priv->lec_arp_empty_ones, next) { 2173 if (entry->vcc == vcc) { 2174 lec_arp_clear_vccs(entry); 2175 + timer_delete(&entry->timer); 2176 hlist_del(&entry->next); 2177 lec_arp_put(entry); 2178 } ··· 2182 &priv->lec_no_forward, next) { 2183 if (entry->recv_vcc == vcc) { 2184 lec_arp_clear_vccs(entry); 2185 + timer_delete(&entry->timer); 2186 hlist_del(&entry->next); 2187 lec_arp_put(entry); 2188 } ··· 2215 hlist_for_each_entry_safe(entry, next, 2216 &priv->lec_arp_empty_ones, next) { 2217 if (vcc == entry->vcc) { 2218 + timer_delete(&entry->timer); 2219 ether_addr_copy(entry->mac_addr, src); 2220 entry->status = ESI_FORWARD_DIRECT; 2221 entry->last_used = jiffies;
+2 -2
net/atm/mpc.c
··· 804 /* This lets us now how our LECs are doing */ 805 err = register_netdevice_notifier(&mpoa_notifier); 806 if (err < 0) { 807 - del_timer(&mpc_timer); 808 return err; 809 } 810 } ··· 1495 1496 mpc_proc_clean(); 1497 1498 - del_timer_sync(&mpc_timer); 1499 unregister_netdevice_notifier(&mpoa_notifier); 1500 deregister_atm_ioctl(&atm_ioctl_ops); 1501
··· 804 /* This lets us now how our LECs are doing */ 805 err = register_netdevice_notifier(&mpoa_notifier); 806 if (err < 0) { 807 + timer_delete(&mpc_timer); 808 return err; 809 } 810 } ··· 1495 1496 mpc_proc_clean(); 1497 1498 + timer_delete_sync(&mpc_timer); 1499 unregister_netdevice_notifier(&mpoa_notifier); 1500 deregister_atm_ioctl(&atm_ioctl_ops); 1501
+5 -5
net/ax25/af_ax25.c
··· 1071 } 1072 if (ax25_dev) { 1073 if (!ax25_dev->device_up) { 1074 - del_timer_sync(&ax25->timer); 1075 - del_timer_sync(&ax25->t1timer); 1076 - del_timer_sync(&ax25->t2timer); 1077 - del_timer_sync(&ax25->t3timer); 1078 - del_timer_sync(&ax25->idletimer); 1079 } 1080 netdev_put(ax25_dev->dev, &ax25->dev_tracker); 1081 ax25_dev_put(ax25_dev);
··· 1071 } 1072 if (ax25_dev) { 1073 if (!ax25_dev->device_up) { 1074 + timer_delete_sync(&ax25->timer); 1075 + timer_delete_sync(&ax25->t1timer); 1076 + timer_delete_sync(&ax25->t2timer); 1077 + timer_delete_sync(&ax25->t3timer); 1078 + timer_delete_sync(&ax25->idletimer); 1079 } 1080 netdev_put(ax25_dev->dev, &ax25->dev_tracker); 1081 ax25_dev_put(ax25_dev);
+1 -1
net/ax25/ax25_ds_timer.c
··· 44 void ax25_ds_del_timer(ax25_dev *ax25_dev) 45 { 46 if (ax25_dev) 47 - del_timer(&ax25_dev->dama.slave_timer); 48 } 49 50 void ax25_ds_set_timer(ax25_dev *ax25_dev)
··· 44 void ax25_ds_del_timer(ax25_dev *ax25_dev) 45 { 46 if (ax25_dev) 47 + timer_delete(&ax25_dev->dama.slave_timer); 48 } 49 50 void ax25_ds_set_timer(ax25_dev *ax25_dev)
+5 -5
net/ax25/ax25_subr.c
··· 262 ax25_clear_queues(ax25); 263 264 if (reason == ENETUNREACH) { 265 - del_timer_sync(&ax25->timer); 266 - del_timer_sync(&ax25->t1timer); 267 - del_timer_sync(&ax25->t2timer); 268 - del_timer_sync(&ax25->t3timer); 269 - del_timer_sync(&ax25->idletimer); 270 } else { 271 if (ax25->sk && !sock_flag(ax25->sk, SOCK_DESTROY)) 272 ax25_stop_heartbeat(ax25);
··· 262 ax25_clear_queues(ax25); 263 264 if (reason == ENETUNREACH) { 265 + timer_delete_sync(&ax25->timer); 266 + timer_delete_sync(&ax25->t1timer); 267 + timer_delete_sync(&ax25->t2timer); 268 + timer_delete_sync(&ax25->t3timer); 269 + timer_delete_sync(&ax25->idletimer); 270 } else { 271 if (ax25->sk && !sock_flag(ax25->sk, SOCK_DESTROY)) 272 ax25_stop_heartbeat(ax25);
+7 -7
net/ax25/ax25_timer.c
··· 65 if (ax25->t3 > 0) 66 mod_timer(&ax25->t3timer, jiffies + ax25->t3); 67 else 68 - del_timer(&ax25->t3timer); 69 } 70 71 void ax25_start_idletimer(ax25_cb *ax25) ··· 73 if (ax25->idle > 0) 74 mod_timer(&ax25->idletimer, jiffies + ax25->idle); 75 else 76 - del_timer(&ax25->idletimer); 77 } 78 79 void ax25_stop_heartbeat(ax25_cb *ax25) 80 { 81 - del_timer(&ax25->timer); 82 } 83 84 void ax25_stop_t1timer(ax25_cb *ax25) 85 { 86 - del_timer(&ax25->t1timer); 87 } 88 89 void ax25_stop_t2timer(ax25_cb *ax25) 90 { 91 - del_timer(&ax25->t2timer); 92 } 93 94 void ax25_stop_t3timer(ax25_cb *ax25) 95 { 96 - del_timer(&ax25->t3timer); 97 } 98 99 void ax25_stop_idletimer(ax25_cb *ax25) 100 { 101 - del_timer(&ax25->idletimer); 102 } 103 104 int ax25_t1timer_running(ax25_cb *ax25)
··· 65 if (ax25->t3 > 0) 66 mod_timer(&ax25->t3timer, jiffies + ax25->t3); 67 else 68 + timer_delete(&ax25->t3timer); 69 } 70 71 void ax25_start_idletimer(ax25_cb *ax25) ··· 73 if (ax25->idle > 0) 74 mod_timer(&ax25->idletimer, jiffies + ax25->idle); 75 else 76 + timer_delete(&ax25->idletimer); 77 } 78 79 void ax25_stop_heartbeat(ax25_cb *ax25) 80 { 81 + timer_delete(&ax25->timer); 82 } 83 84 void ax25_stop_t1timer(ax25_cb *ax25) 85 { 86 + timer_delete(&ax25->t1timer); 87 } 88 89 void ax25_stop_t2timer(ax25_cb *ax25) 90 { 91 + timer_delete(&ax25->t2timer); 92 } 93 94 void ax25_stop_t3timer(ax25_cb *ax25) 95 { 96 + timer_delete(&ax25->t3timer); 97 } 98 99 void ax25_stop_idletimer(ax25_cb *ax25) 100 { 101 + timer_delete(&ax25->idletimer); 102 } 103 104 int ax25_t1timer_running(ax25_cb *ax25)
+3 -3
net/batman-adv/tp_meter.c
··· 384 atomic_dec(&tp_vars->bat_priv->tp_num); 385 386 /* kill the timer and remove its reference */ 387 - del_timer_sync(&tp_vars->timer); 388 /* the worker might have rearmed itself therefore we kill it again. Note 389 * that if the worker should run again before invoking the following 390 - * del_timer(), it would not re-arm itself once again because the status 391 * is OFF now 392 */ 393 - del_timer(&tp_vars->timer); 394 batadv_tp_vars_put(tp_vars); 395 } 396
··· 384 atomic_dec(&tp_vars->bat_priv->tp_num); 385 386 /* kill the timer and remove its reference */ 387 + timer_delete_sync(&tp_vars->timer); 388 /* the worker might have rearmed itself therefore we kill it again. Note 389 * that if the worker should run again before invoking the following 390 + * timer_delete(), it would not re-arm itself once again because the status 391 * is OFF now 392 */ 393 + timer_delete(&tp_vars->timer); 394 batadv_tp_vars_put(tp_vars); 395 } 396
+1 -1
net/bluetooth/hidp/core.c
··· 433 static void hidp_del_timer(struct hidp_session *session) 434 { 435 if (session->idle_to > 0) 436 - del_timer_sync(&session->timer); 437 } 438 439 static void hidp_process_report(struct hidp_session *session, int type,
··· 433 static void hidp_del_timer(struct hidp_session *session) 434 { 435 if (session->idle_to > 0) 436 + timer_delete_sync(&session->timer); 437 } 438 439 static void hidp_process_report(struct hidp_session *session, int type,
+2 -2
net/bluetooth/rfcomm/core.c
··· 254 { 255 BT_DBG("session %p state %ld", s, s->state); 256 257 - del_timer_sync(&s->timer); 258 } 259 260 /* ---- RFCOMM DLCs ---- */ ··· 281 { 282 BT_DBG("dlc %p state %ld", d, d->state); 283 284 - if (del_timer(&d->timer)) 285 rfcomm_dlc_put(d); 286 } 287
··· 254 { 255 BT_DBG("session %p state %ld", s, s->state); 256 257 + timer_delete_sync(&s->timer); 258 } 259 260 /* ---- RFCOMM DLCs ---- */ ··· 281 { 282 BT_DBG("dlc %p state %ld", d, d->state); 283 284 + if (timer_delete(&d->timer)) 285 rfcomm_dlc_put(d); 286 } 287
+3 -3
net/bridge/br_mdb.c
··· 732 mod_timer(&pg->timer, 733 now + brmctx->multicast_membership_interval); 734 else 735 - del_timer(&pg->timer); 736 737 br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB); 738 ··· 853 cfg->entry->state == MDB_TEMPORARY) 854 mod_timer(&ent->timer, now + br_multicast_gmi(brmctx)); 855 else 856 - del_timer(&ent->timer); 857 858 /* Install a (S, G) forwarding entry for the source. */ 859 err = br_mdb_add_group_src_fwd(cfg, &src->addr, brmctx, extack); ··· 953 mod_timer(&pg->timer, 954 now + brmctx->multicast_membership_interval); 955 else 956 - del_timer(&pg->timer); 957 958 br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB); 959
··· 732 mod_timer(&pg->timer, 733 now + brmctx->multicast_membership_interval); 734 else 735 + timer_delete(&pg->timer); 736 737 br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB); 738 ··· 853 cfg->entry->state == MDB_TEMPORARY) 854 mod_timer(&ent->timer, now + br_multicast_gmi(brmctx)); 855 else 856 + timer_delete(&ent->timer); 857 858 /* Install a (S, G) forwarding entry for the source. */ 859 err = br_mdb_add_group_src_fwd(cfg, &src->addr, brmctx, extack); ··· 953 mod_timer(&pg->timer, 954 now + brmctx->multicast_membership_interval); 955 else 956 + timer_delete(&pg->timer); 957 958 br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB); 959
+22 -22
net/bridge/br_multicast.c
··· 546 return; 547 548 /* the kernel is now responsible for removing this S,G */ 549 - del_timer(&sg->timer); 550 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr); 551 if (!star_mp) 552 return; ··· 2015 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx) 2016 { 2017 #if IS_ENABLED(CONFIG_IPV6) 2018 - del_timer_sync(&pmctx->ip6_mc_router_timer); 2019 #endif 2020 - del_timer_sync(&pmctx->ip4_mc_router_timer); 2021 } 2022 2023 int br_multicast_add_port(struct net_bridge_port *port) ··· 2062 query->startup_sent = 0; 2063 2064 if (try_to_del_timer_sync(&query->timer) >= 0 || 2065 - del_timer(&query->timer)) 2066 mod_timer(&query->timer, jiffies); 2067 } 2068 ··· 2127 br_multicast_find_del_pg(pmctx->port->br, pg); 2128 2129 del |= br_ip4_multicast_rport_del(pmctx); 2130 - del_timer(&pmctx->ip4_mc_router_timer); 2131 - del_timer(&pmctx->ip4_own_query.timer); 2132 del |= br_ip6_multicast_rport_del(pmctx); 2133 #if IS_ENABLED(CONFIG_IPV6) 2134 - del_timer(&pmctx->ip6_mc_router_timer); 2135 - del_timer(&pmctx->ip6_own_query.timer); 2136 #endif 2137 br_multicast_rport_del_notify(pmctx, del); 2138 } ··· 4199 4200 static void __br_multicast_stop(struct net_bridge_mcast *brmctx) 4201 { 4202 - del_timer_sync(&brmctx->ip4_mc_router_timer); 4203 - del_timer_sync(&brmctx->ip4_other_query.timer); 4204 - del_timer_sync(&brmctx->ip4_other_query.delay_timer); 4205 - del_timer_sync(&brmctx->ip4_own_query.timer); 4206 #if IS_ENABLED(CONFIG_IPV6) 4207 - del_timer_sync(&brmctx->ip6_mc_router_timer); 4208 - del_timer_sync(&brmctx->ip6_other_query.timer); 4209 - del_timer_sync(&brmctx->ip6_other_query.delay_timer); 4210 - del_timer_sync(&brmctx->ip6_own_query.timer); 4211 #endif 4212 } 4213 ··· 4384 case MDB_RTR_TYPE_DISABLED: 4385 case MDB_RTR_TYPE_PERM: 4386 br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM); 4387 - del_timer(&brmctx->ip4_mc_router_timer); 4388 #if IS_ENABLED(CONFIG_IPV6) 4389 - del_timer(&brmctx->ip6_mc_router_timer); 4390 #endif 4391 brmctx->multicast_router = val; 4392 err = 0; ··· 4455 case MDB_RTR_TYPE_DISABLED: 4456 pmctx->multicast_router = MDB_RTR_TYPE_DISABLED; 4457 del |= br_ip4_multicast_rport_del(pmctx); 4458 - del_timer(&pmctx->ip4_mc_router_timer); 4459 del |= br_ip6_multicast_rport_del(pmctx); 4460 #if IS_ENABLED(CONFIG_IPV6) 4461 - del_timer(&pmctx->ip6_mc_router_timer); 4462 #endif 4463 br_multicast_rport_del_notify(pmctx, del); 4464 break; ··· 4470 break; 4471 case MDB_RTR_TYPE_PERM: 4472 pmctx->multicast_router = MDB_RTR_TYPE_PERM; 4473 - del_timer(&pmctx->ip4_mc_router_timer); 4474 br_ip4_multicast_add_router(brmctx, pmctx); 4475 #if IS_ENABLED(CONFIG_IPV6) 4476 - del_timer(&pmctx->ip6_mc_router_timer); 4477 #endif 4478 br_ip6_multicast_add_router(brmctx, pmctx); 4479 break;
··· 546 return; 547 548 /* the kernel is now responsible for removing this S,G */ 549 + timer_delete(&sg->timer); 550 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr); 551 if (!star_mp) 552 return; ··· 2015 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx) 2016 { 2017 #if IS_ENABLED(CONFIG_IPV6) 2018 + timer_delete_sync(&pmctx->ip6_mc_router_timer); 2019 #endif 2020 + timer_delete_sync(&pmctx->ip4_mc_router_timer); 2021 } 2022 2023 int br_multicast_add_port(struct net_bridge_port *port) ··· 2062 query->startup_sent = 0; 2063 2064 if (try_to_del_timer_sync(&query->timer) >= 0 || 2065 + timer_delete(&query->timer)) 2066 mod_timer(&query->timer, jiffies); 2067 } 2068 ··· 2127 br_multicast_find_del_pg(pmctx->port->br, pg); 2128 2129 del |= br_ip4_multicast_rport_del(pmctx); 2130 + timer_delete(&pmctx->ip4_mc_router_timer); 2131 + timer_delete(&pmctx->ip4_own_query.timer); 2132 del |= br_ip6_multicast_rport_del(pmctx); 2133 #if IS_ENABLED(CONFIG_IPV6) 2134 + timer_delete(&pmctx->ip6_mc_router_timer); 2135 + timer_delete(&pmctx->ip6_own_query.timer); 2136 #endif 2137 br_multicast_rport_del_notify(pmctx, del); 2138 } ··· 4199 4200 static void __br_multicast_stop(struct net_bridge_mcast *brmctx) 4201 { 4202 + timer_delete_sync(&brmctx->ip4_mc_router_timer); 4203 + timer_delete_sync(&brmctx->ip4_other_query.timer); 4204 + timer_delete_sync(&brmctx->ip4_other_query.delay_timer); 4205 + timer_delete_sync(&brmctx->ip4_own_query.timer); 4206 #if IS_ENABLED(CONFIG_IPV6) 4207 + timer_delete_sync(&brmctx->ip6_mc_router_timer); 4208 + timer_delete_sync(&brmctx->ip6_other_query.timer); 4209 + timer_delete_sync(&brmctx->ip6_other_query.delay_timer); 4210 + timer_delete_sync(&brmctx->ip6_own_query.timer); 4211 #endif 4212 } 4213 ··· 4384 case MDB_RTR_TYPE_DISABLED: 4385 case MDB_RTR_TYPE_PERM: 4386 br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM); 4387 + timer_delete(&brmctx->ip4_mc_router_timer); 4388 #if IS_ENABLED(CONFIG_IPV6) 4389 + timer_delete(&brmctx->ip6_mc_router_timer); 4390 #endif 4391 brmctx->multicast_router = val; 4392 err = 0; ··· 4455 case MDB_RTR_TYPE_DISABLED: 4456 pmctx->multicast_router = MDB_RTR_TYPE_DISABLED; 4457 del |= br_ip4_multicast_rport_del(pmctx); 4458 + timer_delete(&pmctx->ip4_mc_router_timer); 4459 del |= br_ip6_multicast_rport_del(pmctx); 4460 #if IS_ENABLED(CONFIG_IPV6) 4461 + timer_delete(&pmctx->ip6_mc_router_timer); 4462 #endif 4463 br_multicast_rport_del_notify(pmctx, del); 4464 break; ··· 4470 break; 4471 case MDB_RTR_TYPE_PERM: 4472 pmctx->multicast_router = MDB_RTR_TYPE_PERM; 4473 + timer_delete(&pmctx->ip4_mc_router_timer); 4474 br_ip4_multicast_add_router(brmctx, pmctx); 4475 #if IS_ENABLED(CONFIG_IPV6) 4476 + timer_delete(&pmctx->ip6_mc_router_timer); 4477 #endif 4478 br_ip6_multicast_add_router(brmctx, pmctx); 4479 break;
+7 -7
net/bridge/br_stp.c
··· 198 br->hello_time = br->bridge_hello_time; 199 br->forward_delay = br->bridge_forward_delay; 200 br_topology_change_detection(br); 201 - del_timer(&br->tcn_timer); 202 203 if (br->dev->flags & IFF_UP) { 204 br_config_bpdu_generation(br); ··· 363 static void br_topology_change_acknowledged(struct net_bridge *br) 364 { 365 br->topology_change_detected = 0; 366 - del_timer(&br->tcn_timer); 367 } 368 369 /* called under bridge lock */ ··· 439 br_set_state(p, BR_STATE_BLOCKING); 440 br_ifinfo_notify(RTM_NEWLINK, NULL, p); 441 442 - del_timer(&p->forward_delay_timer); 443 } 444 } 445 ··· 454 if (br->stp_enabled == BR_NO_STP || br->forward_delay == 0) { 455 br_set_state(p, BR_STATE_FORWARDING); 456 br_topology_change_detection(br); 457 - del_timer(&p->forward_delay_timer); 458 } else if (br->stp_enabled == BR_KERNEL_STP) 459 br_set_state(p, BR_STATE_LISTENING); 460 else ··· 483 p->topology_change_ack = 0; 484 br_make_forwarding(p); 485 } else if (br_is_designated_port(p)) { 486 - del_timer(&p->message_age_timer); 487 br_make_forwarding(p); 488 } else { 489 p->config_pending = 0; ··· 533 br_port_state_selection(br); 534 535 if (!br_is_root_bridge(br) && was_root) { 536 - del_timer(&br->hello_timer); 537 if (br->topology_change_detected) { 538 - del_timer(&br->topology_change_timer); 539 br_transmit_tcn(br); 540 541 mod_timer(&br->tcn_timer,
··· 198 br->hello_time = br->bridge_hello_time; 199 br->forward_delay = br->bridge_forward_delay; 200 br_topology_change_detection(br); 201 + timer_delete(&br->tcn_timer); 202 203 if (br->dev->flags & IFF_UP) { 204 br_config_bpdu_generation(br); ··· 363 static void br_topology_change_acknowledged(struct net_bridge *br) 364 { 365 br->topology_change_detected = 0; 366 + timer_delete(&br->tcn_timer); 367 } 368 369 /* called under bridge lock */ ··· 439 br_set_state(p, BR_STATE_BLOCKING); 440 br_ifinfo_notify(RTM_NEWLINK, NULL, p); 441 442 + timer_delete(&p->forward_delay_timer); 443 } 444 } 445 ··· 454 if (br->stp_enabled == BR_NO_STP || br->forward_delay == 0) { 455 br_set_state(p, BR_STATE_FORWARDING); 456 br_topology_change_detection(br); 457 + timer_delete(&p->forward_delay_timer); 458 } else if (br->stp_enabled == BR_KERNEL_STP) 459 br_set_state(p, BR_STATE_LISTENING); 460 else ··· 483 p->topology_change_ack = 0; 484 br_make_forwarding(p); 485 } else if (br_is_designated_port(p)) { 486 + timer_delete(&p->message_age_timer); 487 br_make_forwarding(p); 488 } else { 489 p->config_pending = 0; ··· 533 br_port_state_selection(br); 534 535 if (!br_is_root_bridge(br) && was_root) { 536 + timer_delete(&br->hello_timer); 537 if (br->topology_change_detected) { 538 + timer_delete(&br->topology_change_timer); 539 br_transmit_tcn(br); 540 541 mod_timer(&br->tcn_timer,
+6 -6
net/bridge/br_stp_if.c
··· 81 br->topology_change_detected = 0; 82 spin_unlock_bh(&br->lock); 83 84 - del_timer_sync(&br->hello_timer); 85 - del_timer_sync(&br->topology_change_timer); 86 - del_timer_sync(&br->tcn_timer); 87 cancel_delayed_work_sync(&br->gc_work); 88 } 89 ··· 109 110 br_ifinfo_notify(RTM_NEWLINK, NULL, p); 111 112 - del_timer(&p->message_age_timer); 113 - del_timer(&p->forward_delay_timer); 114 - del_timer(&p->hold_timer); 115 116 if (!rcu_access_pointer(p->backup_port)) 117 br_fdb_delete_by_port(br, p, 0, 0);
··· 81 br->topology_change_detected = 0; 82 spin_unlock_bh(&br->lock); 83 84 + timer_delete_sync(&br->hello_timer); 85 + timer_delete_sync(&br->topology_change_timer); 86 + timer_delete_sync(&br->tcn_timer); 87 cancel_delayed_work_sync(&br->gc_work); 88 } 89 ··· 109 110 br_ifinfo_notify(RTM_NEWLINK, NULL, p); 111 112 + timer_delete(&p->message_age_timer); 113 + timer_delete(&p->forward_delay_timer); 114 + timer_delete(&p->hold_timer); 115 116 if (!rcu_access_pointer(p->backup_port)) 117 br_fdb_delete_by_port(br, p, 0, 0);
+1 -1
net/can/af_can.c
··· 825 if (IS_ENABLED(CONFIG_PROC_FS)) { 826 can_remove_proc(net); 827 if (stats_timer) 828 - del_timer_sync(&net->can.stattimer); 829 } 830 831 kfree(net->can.rx_alldev_list);
··· 825 if (IS_ENABLED(CONFIG_PROC_FS)) { 826 can_remove_proc(net); 827 if (stats_timer) 828 + timer_delete_sync(&net->can.stattimer); 829 } 830 831 kfree(net->can.rx_alldev_list);
+4 -4
net/core/drop_monitor.c
··· 1088 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); 1089 struct sk_buff *skb; 1090 1091 - del_timer_sync(&hw_data->send_timer); 1092 cancel_work_sync(&hw_data->dm_alert_work); 1093 while ((skb = __skb_dequeue(&hw_data->drop_queue))) { 1094 struct devlink_trap_metadata *hw_metadata; ··· 1122 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); 1123 struct sk_buff *skb; 1124 1125 - del_timer_sync(&hw_data->send_timer); 1126 cancel_work_sync(&hw_data->dm_alert_work); 1127 while ((skb = __skb_dequeue(&hw_data->drop_queue))) { 1128 struct devlink_trap_metadata *hw_metadata; ··· 1183 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu); 1184 struct sk_buff *skb; 1185 1186 - del_timer_sync(&data->send_timer); 1187 cancel_work_sync(&data->dm_alert_work); 1188 while ((skb = __skb_dequeue(&data->drop_queue))) 1189 consume_skb(skb); ··· 1211 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu); 1212 struct sk_buff *skb; 1213 1214 - del_timer_sync(&data->send_timer); 1215 cancel_work_sync(&data->dm_alert_work); 1216 while ((skb = __skb_dequeue(&data->drop_queue))) 1217 consume_skb(skb);
··· 1088 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); 1089 struct sk_buff *skb; 1090 1091 + timer_delete_sync(&hw_data->send_timer); 1092 cancel_work_sync(&hw_data->dm_alert_work); 1093 while ((skb = __skb_dequeue(&hw_data->drop_queue))) { 1094 struct devlink_trap_metadata *hw_metadata; ··· 1122 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); 1123 struct sk_buff *skb; 1124 1125 + timer_delete_sync(&hw_data->send_timer); 1126 cancel_work_sync(&hw_data->dm_alert_work); 1127 while ((skb = __skb_dequeue(&hw_data->drop_queue))) { 1128 struct devlink_trap_metadata *hw_metadata; ··· 1183 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu); 1184 struct sk_buff *skb; 1185 1186 + timer_delete_sync(&data->send_timer); 1187 cancel_work_sync(&data->dm_alert_work); 1188 while ((skb = __skb_dequeue(&data->drop_queue))) 1189 consume_skb(skb); ··· 1211 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu); 1212 struct sk_buff *skb; 1213 1214 + timer_delete_sync(&data->send_timer); 1215 cancel_work_sync(&data->dm_alert_work); 1216 while ((skb = __skb_dequeue(&data->drop_queue))) 1217 consume_skb(skb);
+1 -1
net/core/gen_estimator.c
··· 177 spin_lock_bh(lock); 178 old = rcu_dereference_protected(*rate_est, 1); 179 if (old) { 180 - del_timer_sync(&old->timer); 181 est->avbps = old->avbps; 182 est->avpps = old->avpps; 183 }
··· 177 spin_lock_bh(lock); 178 old = rcu_dereference_protected(*rate_est, 1); 179 if (old) { 180 + timer_delete_sync(&old->timer); 181 est->avbps = old->avbps; 182 est->avpps = old->avpps; 183 }
+5 -5
net/core/neighbour.c
··· 309 static int neigh_del_timer(struct neighbour *n) 310 { 311 if ((n->nud_state & NUD_IN_TIMER) && 312 - del_timer(&n->timer)) { 313 neigh_release(n); 314 return 1; 315 } ··· 427 pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL, 428 tbl->family); 429 if (skb_queue_empty_lockless(&tbl->proxy_queue)) 430 - del_timer_sync(&tbl->proxy_timer); 431 return 0; 432 } 433 ··· 1597 } else if (!sched_next || tdif < sched_next) 1598 sched_next = tdif; 1599 } 1600 - del_timer(&tbl->proxy_timer); 1601 if (sched_next) 1602 mod_timer(&tbl->proxy_timer, jiffies + sched_next); 1603 spin_unlock(&tbl->proxy_queue.lock); ··· 1628 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED; 1629 1630 spin_lock(&tbl->proxy_queue.lock); 1631 - if (del_timer(&tbl->proxy_timer)) { 1632 if (time_before(tbl->proxy_timer.expires, sched_next)) 1633 sched_next = tbl->proxy_timer.expires; 1634 } ··· 1786 /* It is not clean... Fix it to unload IPv6 module safely */ 1787 cancel_delayed_work_sync(&tbl->managed_work); 1788 cancel_delayed_work_sync(&tbl->gc_work); 1789 - del_timer_sync(&tbl->proxy_timer); 1790 pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family); 1791 neigh_ifdown(tbl, NULL); 1792 if (atomic_read(&tbl->entries))
··· 309 static int neigh_del_timer(struct neighbour *n) 310 { 311 if ((n->nud_state & NUD_IN_TIMER) && 312 + timer_delete(&n->timer)) { 313 neigh_release(n); 314 return 1; 315 } ··· 427 pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL, 428 tbl->family); 429 if (skb_queue_empty_lockless(&tbl->proxy_queue)) 430 + timer_delete_sync(&tbl->proxy_timer); 431 return 0; 432 } 433 ··· 1597 } else if (!sched_next || tdif < sched_next) 1598 sched_next = tdif; 1599 } 1600 + timer_delete(&tbl->proxy_timer); 1601 if (sched_next) 1602 mod_timer(&tbl->proxy_timer, jiffies + sched_next); 1603 spin_unlock(&tbl->proxy_queue.lock); ··· 1628 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED; 1629 1630 spin_lock(&tbl->proxy_queue.lock); 1631 + if (timer_delete(&tbl->proxy_timer)) { 1632 if (time_before(tbl->proxy_timer.expires, sched_next)) 1633 sched_next = tbl->proxy_timer.expires; 1634 } ··· 1786 /* It is not clean... Fix it to unload IPv6 module safely */ 1787 cancel_delayed_work_sync(&tbl->managed_work); 1788 cancel_delayed_work_sync(&tbl->gc_work); 1789 + timer_delete_sync(&tbl->proxy_timer); 1790 pneigh_queue_purge(&tbl->proxy_queue, NULL, tbl->family); 1791 neigh_ifdown(tbl, NULL); 1792 if (atomic_read(&tbl->entries))
+2 -2
net/core/sock.c
··· 3598 3599 void sk_stop_timer(struct sock *sk, struct timer_list* timer) 3600 { 3601 - if (del_timer(timer)) 3602 __sock_put(sk); 3603 } 3604 EXPORT_SYMBOL(sk_stop_timer); 3605 3606 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer) 3607 { 3608 - if (del_timer_sync(timer)) 3609 __sock_put(sk); 3610 } 3611 EXPORT_SYMBOL(sk_stop_timer_sync);
··· 3598 3599 void sk_stop_timer(struct sock *sk, struct timer_list* timer) 3600 { 3601 + if (timer_delete(timer)) 3602 __sock_put(sk); 3603 } 3604 EXPORT_SYMBOL(sk_stop_timer); 3605 3606 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer) 3607 { 3608 + if (timer_delete_sync(timer)) 3609 __sock_put(sk); 3610 } 3611 EXPORT_SYMBOL(sk_stop_timer_sync);
+5 -5
net/ipv4/igmp.c
··· 205 static void igmp_stop_timer(struct ip_mc_list *im) 206 { 207 spin_lock_bh(&im->lock); 208 - if (del_timer(&im->timer)) 209 refcount_dec(&im->refcnt); 210 im->tm_running = 0; 211 im->reporter = 0; ··· 251 { 252 spin_lock_bh(&im->lock); 253 im->unsolicit_count = 0; 254 - if (del_timer(&im->timer)) { 255 if ((long)(im->timer.expires-jiffies) < max_delay) { 256 add_timer(&im->timer); 257 im->tm_running = 1; ··· 974 } 975 /* cancel the interface change timer */ 976 WRITE_ONCE(in_dev->mr_ifc_count, 0); 977 - if (del_timer(&in_dev->mr_ifc_timer)) 978 __in_dev_put(in_dev); 979 /* clear deleted report items */ 980 igmpv3_clear_delrec(in_dev); ··· 1830 1831 #ifdef CONFIG_IP_MULTICAST 1832 WRITE_ONCE(in_dev->mr_ifc_count, 0); 1833 - if (del_timer(&in_dev->mr_ifc_timer)) 1834 __in_dev_put(in_dev); 1835 in_dev->mr_gq_running = 0; 1836 - if (del_timer(&in_dev->mr_gq_timer)) 1837 __in_dev_put(in_dev); 1838 #endif 1839
··· 205 static void igmp_stop_timer(struct ip_mc_list *im) 206 { 207 spin_lock_bh(&im->lock); 208 + if (timer_delete(&im->timer)) 209 refcount_dec(&im->refcnt); 210 im->tm_running = 0; 211 im->reporter = 0; ··· 251 { 252 spin_lock_bh(&im->lock); 253 im->unsolicit_count = 0; 254 + if (timer_delete(&im->timer)) { 255 if ((long)(im->timer.expires-jiffies) < max_delay) { 256 add_timer(&im->timer); 257 im->tm_running = 1; ··· 974 } 975 /* cancel the interface change timer */ 976 WRITE_ONCE(in_dev->mr_ifc_count, 0); 977 + if (timer_delete(&in_dev->mr_ifc_timer)) 978 __in_dev_put(in_dev); 979 /* clear deleted report items */ 980 igmpv3_clear_delrec(in_dev); ··· 1830 1831 #ifdef CONFIG_IP_MULTICAST 1832 WRITE_ONCE(in_dev->mr_ifc_count, 0); 1833 + if (timer_delete(&in_dev->mr_ifc_timer)) 1834 __in_dev_put(in_dev); 1835 in_dev->mr_gq_running = 0; 1836 + if (timer_delete(&in_dev->mr_gq_timer)) 1837 __in_dev_put(in_dev); 1838 #endif 1839
+3 -3
net/ipv4/inet_fragment.c
··· 133 struct inet_frag_queue *fq = ptr; 134 int count; 135 136 - count = del_timer_sync(&fq->timer) ? 1 : 0; 137 138 spin_lock_bh(&fq->lock); 139 fq->flags |= INET_FRAG_DROP; ··· 227 228 void inet_frag_kill(struct inet_frag_queue *fq, int *refs) 229 { 230 - if (del_timer(&fq->timer)) 231 (*refs)++; 232 233 if (!(fq->flags & INET_FRAG_COMPLETE)) { ··· 297 reason = (q->flags & INET_FRAG_DROP) ? 298 SKB_DROP_REASON_FRAG_REASM_TIMEOUT : 299 SKB_CONSUMED; 300 - WARN_ON(del_timer(&q->timer) != 0); 301 302 /* Release all fragment data. */ 303 fqdir = q->fqdir;
··· 133 struct inet_frag_queue *fq = ptr; 134 int count; 135 136 + count = timer_delete_sync(&fq->timer) ? 1 : 0; 137 138 spin_lock_bh(&fq->lock); 139 fq->flags |= INET_FRAG_DROP; ··· 227 228 void inet_frag_kill(struct inet_frag_queue *fq, int *refs) 229 { 230 + if (timer_delete(&fq->timer)) 231 (*refs)++; 232 233 if (!(fq->flags & INET_FRAG_COMPLETE)) { ··· 297 reason = (q->flags & INET_FRAG_DROP) ? 298 SKB_DROP_REASON_FRAG_REASM_TIMEOUT : 299 SKB_CONSUMED; 300 + WARN_ON(timer_delete(&q->timer) != 0); 301 302 /* Release all fragment data. */ 303 fqdir = q->fqdir;
+1 -1
net/ipv4/ipmr.c
··· 1289 } 1290 } 1291 if (list_empty(&mrt->mfc_unres_queue)) 1292 - del_timer(&mrt->ipmr_expire_timer); 1293 spin_unlock_bh(&mfc_unres_lock); 1294 1295 if (found) {
··· 1289 } 1290 } 1291 if (list_empty(&mrt->mfc_unres_queue)) 1292 + timer_delete(&mrt->ipmr_expire_timer); 1293 spin_unlock_bh(&mfc_unres_lock); 1294 1295 if (found) {
+1 -1
net/ipv6/addrconf.c
··· 313 314 static void addrconf_del_rs_timer(struct inet6_dev *idev) 315 { 316 - if (del_timer(&idev->rs_timer)) 317 __in6_dev_put(idev); 318 } 319
··· 313 314 static void addrconf_del_rs_timer(struct inet6_dev *idev) 315 { 316 + if (timer_delete(&idev->rs_timer)) 317 __in6_dev_put(idev); 318 } 319
+2 -2
net/ipv6/ip6_fib.c
··· 2383 round_jiffies(now 2384 + net->ipv6.sysctl.ip6_rt_gc_interval)); 2385 else 2386 - del_timer(&net->ipv6.ip6_fib_timer); 2387 spin_unlock_bh(&net->ipv6.fib6_gc_lock); 2388 } 2389 ··· 2470 { 2471 unsigned int i; 2472 2473 - del_timer_sync(&net->ipv6.ip6_fib_timer); 2474 2475 for (i = 0; i < FIB6_TABLE_HASHSZ; i++) { 2476 struct hlist_head *head = &net->ipv6.fib_table_hash[i];
··· 2383 round_jiffies(now 2384 + net->ipv6.sysctl.ip6_rt_gc_interval)); 2385 else 2386 + timer_delete(&net->ipv6.ip6_fib_timer); 2387 spin_unlock_bh(&net->ipv6.fib6_gc_lock); 2388 } 2389 ··· 2470 { 2471 unsigned int i; 2472 2473 + timer_delete_sync(&net->ipv6.ip6_fib_timer); 2474 2475 for (i = 0; i < FIB6_TABLE_HASHSZ; i++) { 2476 struct hlist_head *head = &net->ipv6.fib_table_hash[i];
+1 -1
net/ipv6/ip6_flowlabel.c
··· 907 void ip6_flowlabel_cleanup(void) 908 { 909 static_key_deferred_flush(&ipv6_flowlabel_exclusive); 910 - del_timer(&ip6_fl_gc_timer); 911 unregister_pernet_subsys(&ip6_flowlabel_net_ops); 912 }
··· 907 void ip6_flowlabel_cleanup(void) 908 { 909 static_key_deferred_flush(&ipv6_flowlabel_exclusive); 910 + timer_delete(&ip6_fl_gc_timer); 911 unregister_pernet_subsys(&ip6_flowlabel_net_ops); 912 }
+1 -1
net/ipv6/ip6mr.c
··· 1526 } 1527 } 1528 if (list_empty(&mrt->mfc_unres_queue)) 1529 - del_timer(&mrt->ipmr_expire_timer); 1530 spin_unlock_bh(&mfc_unres_lock); 1531 1532 if (found) {
··· 1526 } 1527 } 1528 if (list_empty(&mrt->mfc_unres_queue)) 1529 + timer_delete(&mrt->ipmr_expire_timer); 1530 spin_unlock_bh(&mfc_unres_lock); 1531 1532 if (found) {
+2 -2
net/lapb/lapb_iface.c
··· 194 spin_unlock_bh(&lapb->lock); 195 196 /* Wait for running timers to stop */ 197 - del_timer_sync(&lapb->t1timer); 198 - del_timer_sync(&lapb->t2timer); 199 200 __lapb_remove_cb(lapb); 201
··· 194 spin_unlock_bh(&lapb->lock); 195 196 /* Wait for running timers to stop */ 197 + timer_delete_sync(&lapb->t1timer); 198 + timer_delete_sync(&lapb->t2timer); 199 200 __lapb_remove_cb(lapb); 201
+4 -4
net/lapb/lapb_timer.c
··· 35 36 void lapb_start_t1timer(struct lapb_cb *lapb) 37 { 38 - del_timer(&lapb->t1timer); 39 40 lapb->t1timer.function = lapb_t1timer_expiry; 41 lapb->t1timer.expires = jiffies + lapb->t1; ··· 46 47 void lapb_start_t2timer(struct lapb_cb *lapb) 48 { 49 - del_timer(&lapb->t2timer); 50 51 lapb->t2timer.function = lapb_t2timer_expiry; 52 lapb->t2timer.expires = jiffies + lapb->t2; ··· 58 void lapb_stop_t1timer(struct lapb_cb *lapb) 59 { 60 lapb->t1timer_running = false; 61 - del_timer(&lapb->t1timer); 62 } 63 64 void lapb_stop_t2timer(struct lapb_cb *lapb) 65 { 66 lapb->t2timer_running = false; 67 - del_timer(&lapb->t2timer); 68 } 69 70 int lapb_t1timer_running(struct lapb_cb *lapb)
··· 35 36 void lapb_start_t1timer(struct lapb_cb *lapb) 37 { 38 + timer_delete(&lapb->t1timer); 39 40 lapb->t1timer.function = lapb_t1timer_expiry; 41 lapb->t1timer.expires = jiffies + lapb->t1; ··· 46 47 void lapb_start_t2timer(struct lapb_cb *lapb) 48 { 49 + timer_delete(&lapb->t2timer); 50 51 lapb->t2timer.function = lapb_t2timer_expiry; 52 lapb->t2timer.expires = jiffies + lapb->t2; ··· 58 void lapb_stop_t1timer(struct lapb_cb *lapb) 59 { 60 lapb->t1timer_running = false; 61 + timer_delete(&lapb->t1timer); 62 } 63 64 void lapb_stop_t2timer(struct lapb_cb *lapb) 65 { 66 lapb->t2timer_running = false; 67 + timer_delete(&lapb->t2timer); 68 } 69 70 int lapb_t1timer_running(struct lapb_cb *lapb)
+9 -9
net/llc/llc_c_ac.c
··· 51 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 52 53 llc->remote_busy_flag = 0; 54 - del_timer(&llc->busy_state_timer.timer); 55 nr = LLC_I_GET_NR(pdu); 56 llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); 57 } ··· 191 struct llc_sock *llc = llc_sk(sk); 192 193 if (llc->data_flag == 2) 194 - del_timer(&llc->rej_sent_timer.timer); 195 return 0; 196 } 197 ··· 1111 { 1112 struct llc_sock *llc = llc_sk(sk); 1113 1114 - del_timer(&llc->rej_sent_timer.timer); 1115 - del_timer(&llc->pf_cycle_timer.timer); 1116 - del_timer(&llc->busy_state_timer.timer); 1117 llc->ack_must_be_send = 0; 1118 llc->ack_pf = 0; 1119 return 0; ··· 1149 1150 int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb) 1151 { 1152 - del_timer(&llc_sk(sk)->ack_timer.timer); 1153 return 0; 1154 } 1155 ··· 1157 { 1158 struct llc_sock *llc = llc_sk(sk); 1159 1160 - del_timer(&llc->pf_cycle_timer.timer); 1161 llc_conn_set_p_flag(sk, 0); 1162 return 0; 1163 } 1164 1165 int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb) 1166 { 1167 - del_timer(&llc_sk(sk)->rej_sent_timer.timer); 1168 return 0; 1169 } 1170 ··· 1180 /* On loopback we don't queue I frames in unack_pdu_q queue. */ 1181 if (acked > 0 || (llc->dev->flags & IFF_LOOPBACK)) { 1182 llc->retry_count = 0; 1183 - del_timer(&llc->ack_timer.timer); 1184 if (llc->failed_data_req) { 1185 /* already, we did not accept data from upper layer 1186 * (tx_window full or unacceptable state). Now, we
··· 51 struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); 52 53 llc->remote_busy_flag = 0; 54 + timer_delete(&llc->busy_state_timer.timer); 55 nr = LLC_I_GET_NR(pdu); 56 llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); 57 } ··· 191 struct llc_sock *llc = llc_sk(sk); 192 193 if (llc->data_flag == 2) 194 + timer_delete(&llc->rej_sent_timer.timer); 195 return 0; 196 } 197 ··· 1111 { 1112 struct llc_sock *llc = llc_sk(sk); 1113 1114 + timer_delete(&llc->rej_sent_timer.timer); 1115 + timer_delete(&llc->pf_cycle_timer.timer); 1116 + timer_delete(&llc->busy_state_timer.timer); 1117 llc->ack_must_be_send = 0; 1118 llc->ack_pf = 0; 1119 return 0; ··· 1149 1150 int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb) 1151 { 1152 + timer_delete(&llc_sk(sk)->ack_timer.timer); 1153 return 0; 1154 } 1155 ··· 1157 { 1158 struct llc_sock *llc = llc_sk(sk); 1159 1160 + timer_delete(&llc->pf_cycle_timer.timer); 1161 llc_conn_set_p_flag(sk, 0); 1162 return 0; 1163 } 1164 1165 int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb) 1166 { 1167 + timer_delete(&llc_sk(sk)->rej_sent_timer.timer); 1168 return 0; 1169 } 1170 ··· 1180 /* On loopback we don't queue I frames in unack_pdu_q queue. */ 1181 if (acked > 0 || (llc->dev->flags & IFF_LOOPBACK)) { 1182 llc->retry_count = 0; 1183 + timer_delete(&llc->ack_timer.timer); 1184 if (llc->failed_data_req) { 1185 /* already, we did not accept data from upper layer 1186 * (tx_window full or unacceptable state). Now, we
+8 -8
net/llc/llc_conn.c
··· 949 struct llc_sock *llc = llc_sk(sk); 950 951 if (sync) { 952 - del_timer_sync(&llc->pf_cycle_timer.timer); 953 - del_timer_sync(&llc->ack_timer.timer); 954 - del_timer_sync(&llc->rej_sent_timer.timer); 955 - del_timer_sync(&llc->busy_state_timer.timer); 956 } else { 957 - del_timer(&llc->pf_cycle_timer.timer); 958 - del_timer(&llc->ack_timer.timer); 959 - del_timer(&llc->rej_sent_timer.timer); 960 - del_timer(&llc->busy_state_timer.timer); 961 } 962 963 llc->ack_must_be_send = 0;
··· 949 struct llc_sock *llc = llc_sk(sk); 950 951 if (sync) { 952 + timer_delete_sync(&llc->pf_cycle_timer.timer); 953 + timer_delete_sync(&llc->ack_timer.timer); 954 + timer_delete_sync(&llc->rej_sent_timer.timer); 955 + timer_delete_sync(&llc->busy_state_timer.timer); 956 } else { 957 + timer_delete(&llc->pf_cycle_timer.timer); 958 + timer_delete(&llc->ack_timer.timer); 959 + timer_delete(&llc->rej_sent_timer.timer); 960 + timer_delete(&llc->busy_state_timer.timer); 961 } 962 963 llc->ack_must_be_send = 0;
+2 -2
net/mac80211/agg-rx.c
··· 103 if (!tid_rx) 104 return; 105 106 - del_timer_sync(&tid_rx->session_timer); 107 108 /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */ 109 spin_lock_bh(&tid_rx->reorder_lock); 110 tid_rx->removed = true; 111 spin_unlock_bh(&tid_rx->reorder_lock); 112 - del_timer_sync(&tid_rx->reorder_timer); 113 114 call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); 115 }
··· 103 if (!tid_rx) 104 return; 105 106 + timer_delete_sync(&tid_rx->session_timer); 107 108 /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */ 109 spin_lock_bh(&tid_rx->reorder_lock); 110 tid_rx->removed = true; 111 spin_unlock_bh(&tid_rx->reorder_lock); 112 + timer_delete_sync(&tid_rx->reorder_timer); 113 114 call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); 115 }
+3 -3
net/mac80211/agg-tx.c
··· 362 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", 363 sta->sta.addr, tid); 364 365 - del_timer_sync(&tid_tx->addba_resp_timer); 366 - del_timer_sync(&tid_tx->session_timer); 367 368 /* 369 * After this packets are no longer handed right through ··· 1002 return; 1003 } 1004 1005 - del_timer_sync(&tid_tx->addba_resp_timer); 1006 1007 ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n", 1008 sta->sta.addr, tid);
··· 362 ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", 363 sta->sta.addr, tid); 364 365 + timer_delete_sync(&tid_tx->addba_resp_timer); 366 + timer_delete_sync(&tid_tx->session_timer); 367 368 /* 369 * After this packets are no longer handed right through ··· 1002 return; 1003 } 1004 1005 + timer_delete_sync(&tid_tx->addba_resp_timer); 1006 1007 ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n", 1008 sta->sta.addr, tid);
+1 -1
net/mac80211/ibss.c
··· 1844 1845 skb_queue_purge(&sdata->skb_queue); 1846 1847 - del_timer_sync(&sdata->u.ibss.timer); 1848 1849 return 0; 1850 }
··· 1844 1845 skb_queue_purge(&sdata->skb_queue); 1846 1847 + timer_delete_sync(&sdata->u.ibss.timer); 1848 1849 return 0; 1850 }
+1 -1
net/mac80211/iface.c
··· 526 netif_addr_unlock_bh(sdata->dev); 527 } 528 529 - del_timer_sync(&local->dynamic_ps_timer); 530 wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work); 531 532 WARN(ieee80211_vif_is_mld(&sdata->vif),
··· 526 netif_addr_unlock_bh(sdata->dev); 527 } 528 529 + timer_delete_sync(&local->dynamic_ps_timer); 530 wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work); 531 532 WARN(ieee80211_vif_is_mld(&sdata->vif),
+1 -1
net/mac80211/led.c
··· 342 return; 343 344 tpt_trig->running = false; 345 - del_timer_sync(&tpt_trig->timer); 346 347 led_trigger_event(&local->tpt_led, LED_OFF); 348 }
··· 342 return; 343 344 tpt_trig->running = false; 345 + timer_delete_sync(&tpt_trig->timer); 346 347 led_trigger_event(&local->tpt_led, LED_OFF); 348 }
+4 -4
net/mac80211/mesh.c
··· 706 else { 707 clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); 708 /* stop running timer */ 709 - del_timer_sync(&ifmsh->mesh_path_root_timer); 710 } 711 } 712 ··· 1241 local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf); 1242 skb_queue_purge(&ifmsh->ps.bc_buf); 1243 1244 - del_timer_sync(&sdata->u.mesh.housekeeping_timer); 1245 - del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); 1246 - del_timer_sync(&sdata->u.mesh.mesh_path_timer); 1247 1248 /* clear any mesh work (for next join) we may have accrued */ 1249 ifmsh->wrkq_flags = 0;
··· 706 else { 707 clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); 708 /* stop running timer */ 709 + timer_delete_sync(&ifmsh->mesh_path_root_timer); 710 } 711 } 712 ··· 1241 local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf); 1242 skb_queue_purge(&ifmsh->ps.bc_buf); 1243 1244 + timer_delete_sync(&sdata->u.mesh.housekeeping_timer); 1245 + timer_delete_sync(&sdata->u.mesh.mesh_path_root_timer); 1246 + timer_delete_sync(&sdata->u.mesh.mesh_path_timer); 1247 1248 /* clear any mesh work (for next join) we may have accrued */ 1249 ifmsh->wrkq_flags = 0;
+6 -6
net/mac80211/mesh_plink.c
··· 417 } 418 spin_unlock_bh(&sta->mesh->plink_lock); 419 if (!sdata->u.mesh.user_mpm) 420 - del_timer_sync(&sta->mesh->plink_timer); 421 mesh_path_flush_by_nexthop(sta); 422 423 /* make sure no readers can access nexthop sta from here on */ ··· 666 667 /* 668 * This STA is valid because sta_info_destroy() will 669 - * del_timer_sync() this timer after having made sure 670 * it cannot be re-added (by deleting the plink.) 671 */ 672 sta = mesh->plink_sta; ··· 689 return; 690 } 691 692 - /* del_timer() and handler may race when entering these states */ 693 if (sta->mesh->plink_state == NL80211_PLINK_LISTEN || 694 sta->mesh->plink_state == NL80211_PLINK_ESTAB) { 695 mpl_dbg(sta->sdata, ··· 735 break; 736 case NL80211_PLINK_HOLDING: 737 /* holding timer */ 738 - del_timer(&sta->mesh->plink_timer); 739 mesh_plink_fsm_restart(sta); 740 break; 741 default: ··· 848 struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; 849 u64 changed = 0; 850 851 - del_timer(&sta->mesh->plink_timer); 852 sta->mesh->plink_state = NL80211_PLINK_ESTAB; 853 changed |= mesh_plink_inc_estab_count(sdata); 854 changed |= mesh_set_ht_prot_mode(sdata); ··· 975 case NL80211_PLINK_HOLDING: 976 switch (event) { 977 case CLS_ACPT: 978 - del_timer(&sta->mesh->plink_timer); 979 mesh_plink_fsm_restart(sta); 980 break; 981 case OPN_ACPT:
··· 417 } 418 spin_unlock_bh(&sta->mesh->plink_lock); 419 if (!sdata->u.mesh.user_mpm) 420 + timer_delete_sync(&sta->mesh->plink_timer); 421 mesh_path_flush_by_nexthop(sta); 422 423 /* make sure no readers can access nexthop sta from here on */ ··· 666 667 /* 668 * This STA is valid because sta_info_destroy() will 669 + * timer_delete_sync() this timer after having made sure 670 * it cannot be re-added (by deleting the plink.) 671 */ 672 sta = mesh->plink_sta; ··· 689 return; 690 } 691 692 + /* timer_delete() and handler may race when entering these states */ 693 if (sta->mesh->plink_state == NL80211_PLINK_LISTEN || 694 sta->mesh->plink_state == NL80211_PLINK_ESTAB) { 695 mpl_dbg(sta->sdata, ··· 735 break; 736 case NL80211_PLINK_HOLDING: 737 /* holding timer */ 738 + timer_delete(&sta->mesh->plink_timer); 739 mesh_plink_fsm_restart(sta); 740 break; 741 default: ··· 848 struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; 849 u64 changed = 0; 850 851 + timer_delete(&sta->mesh->plink_timer); 852 sta->mesh->plink_state = NL80211_PLINK_ESTAB; 853 changed |= mesh_plink_inc_estab_count(sdata); 854 changed |= mesh_set_ht_prot_mode(sdata); ··· 975 case NL80211_PLINK_HOLDING: 976 switch (event) { 977 case CLS_ACPT: 978 + timer_delete(&sta->mesh->plink_timer); 979 mesh_plink_fsm_restart(sta); 980 break; 981 case OPN_ACPT:
+8 -8
net/mac80211/mlme.c
··· 3194 } else if (conf->flags & IEEE80211_CONF_PS) { 3195 conf->flags &= ~IEEE80211_CONF_PS; 3196 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 3197 - del_timer_sync(&local->dynamic_ps_timer); 3198 wiphy_work_cancel(local->hw.wiphy, 3199 &local->dynamic_ps_enable_work); 3200 } ··· 4069 4070 sdata->deflink.ap_power_level = IEEE80211_UNSET_POWER_LEVEL; 4071 4072 - del_timer_sync(&local->dynamic_ps_timer); 4073 wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work); 4074 4075 /* Disable ARP filtering */ ··· 4097 /* disassociated - set to defaults now */ 4098 ieee80211_set_wmm_default(&sdata->deflink, false, false); 4099 4100 - del_timer_sync(&sdata->u.mgd.conn_mon_timer); 4101 - del_timer_sync(&sdata->u.mgd.bcn_mon_timer); 4102 - del_timer_sync(&sdata->u.mgd.timer); 4103 4104 sdata->vif.bss_conf.dtim_period = 0; 4105 sdata->vif.bss_conf.beacon_rate = NULL; ··· 4589 * running is the timeout for the authentication response which 4590 * which is not relevant anymore. 4591 */ 4592 - del_timer_sync(&sdata->u.mgd.timer); 4593 sta_info_destroy_addr(sdata, auth_data->ap_addr); 4594 4595 /* other links are destroyed */ ··· 4628 * running is the timeout for the association response which 4629 * which is not relevant anymore. 4630 */ 4631 - del_timer_sync(&sdata->u.mgd.timer); 4632 sta_info_destroy_addr(sdata, assoc_data->ap_addr); 4633 4634 eth_zero_addr(sdata->deflink.u.mgd.bssid); ··· 9852 ifmgd->assoc_req_ies = NULL; 9853 ifmgd->assoc_req_ies_len = 0; 9854 spin_unlock_bh(&ifmgd->teardown_lock); 9855 - del_timer_sync(&ifmgd->timer); 9856 } 9857 9858 void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
··· 3194 } else if (conf->flags & IEEE80211_CONF_PS) { 3195 conf->flags &= ~IEEE80211_CONF_PS; 3196 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 3197 + timer_delete_sync(&local->dynamic_ps_timer); 3198 wiphy_work_cancel(local->hw.wiphy, 3199 &local->dynamic_ps_enable_work); 3200 } ··· 4069 4070 sdata->deflink.ap_power_level = IEEE80211_UNSET_POWER_LEVEL; 4071 4072 + timer_delete_sync(&local->dynamic_ps_timer); 4073 wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work); 4074 4075 /* Disable ARP filtering */ ··· 4097 /* disassociated - set to defaults now */ 4098 ieee80211_set_wmm_default(&sdata->deflink, false, false); 4099 4100 + timer_delete_sync(&sdata->u.mgd.conn_mon_timer); 4101 + timer_delete_sync(&sdata->u.mgd.bcn_mon_timer); 4102 + timer_delete_sync(&sdata->u.mgd.timer); 4103 4104 sdata->vif.bss_conf.dtim_period = 0; 4105 sdata->vif.bss_conf.beacon_rate = NULL; ··· 4589 * running is the timeout for the authentication response which 4590 * which is not relevant anymore. 4591 */ 4592 + timer_delete_sync(&sdata->u.mgd.timer); 4593 sta_info_destroy_addr(sdata, auth_data->ap_addr); 4594 4595 /* other links are destroyed */ ··· 4628 * running is the timeout for the association response which 4629 * which is not relevant anymore. 4630 */ 4631 + timer_delete_sync(&sdata->u.mgd.timer); 4632 sta_info_destroy_addr(sdata, assoc_data->ap_addr); 4633 4634 eth_zero_addr(sdata->deflink.u.mgd.bssid); ··· 9852 ifmgd->assoc_req_ies = NULL; 9853 ifmgd->assoc_req_ies_len = 0; 9854 spin_unlock_bh(&ifmgd->teardown_lock); 9855 + timer_delete_sync(&ifmgd->timer); 9856 } 9857 9858 void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
+1 -1
net/mac80211/ocb.c
··· 230 231 skb_queue_purge(&sdata->skb_queue); 232 233 - del_timer_sync(&sdata->u.ocb.housekeeping_timer); 234 /* If the timer fired while we waited for it, it will have 235 * requeued the work. Now the work will be running again 236 * but will not rearm the timer again because it checks
··· 230 231 skb_queue_purge(&sdata->skb_queue); 232 233 + timer_delete_sync(&sdata->u.ocb.housekeeping_timer); 234 /* If the timer fired while we waited for it, it will have 235 * requeued the work. Now the work will be running again 236 * but will not rearm the timer again because it checks
+3 -3
net/mac80211/offchannel.c
··· 30 31 /* FIXME: what to do when local->pspolling is true? */ 32 33 - del_timer_sync(&local->dynamic_ps_timer); 34 - del_timer_sync(&ifmgd->bcn_mon_timer); 35 - del_timer_sync(&ifmgd->conn_mon_timer); 36 37 wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work); 38
··· 30 31 /* FIXME: what to do when local->pspolling is true? */ 32 33 + timer_delete_sync(&local->dynamic_ps_timer); 34 + timer_delete_sync(&ifmgd->bcn_mon_timer); 35 + timer_delete_sync(&ifmgd->conn_mon_timer); 36 37 wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work); 38
+2 -2
net/mac80211/pm.c
··· 69 flush_workqueue(local->workqueue); 70 71 /* Don't try to run timers while suspended. */ 72 - del_timer_sync(&local->sta_cleanup); 73 74 /* 75 * Note that this particular timer doesn't need to be 76 * restarted at resume. 77 */ 78 wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work); 79 - del_timer_sync(&local->dynamic_ps_timer); 80 81 local->wowlan = wowlan; 82 if (local->wowlan) {
··· 69 flush_workqueue(local->workqueue); 70 71 /* Don't try to run timers while suspended. */ 72 + timer_delete_sync(&local->sta_cleanup); 73 74 /* 75 * Note that this particular timer doesn't need to be 76 * restarted at resume. 77 */ 78 wiphy_work_cancel(local->hw.wiphy, &local->dynamic_ps_enable_work); 79 + timer_delete_sync(&local->dynamic_ps_timer); 80 81 local->wowlan = wowlan; 82 if (local->wowlan) {
+1 -1
net/mac80211/rx.c
··· 1242 tid_agg_rx->reorder_time[j] + 1 + 1243 HT_RX_REORDER_BUF_TIMEOUT); 1244 } else { 1245 - del_timer(&tid_agg_rx->reorder_timer); 1246 } 1247 } 1248
··· 1242 tid_agg_rx->reorder_time[j] + 1 + 1243 HT_RX_REORDER_BUF_TIMEOUT); 1244 } else { 1245 + timer_delete(&tid_agg_rx->reorder_timer); 1246 } 1247 } 1248
+1 -1
net/mac80211/sta_info.c
··· 1592 1593 void sta_info_stop(struct ieee80211_local *local) 1594 { 1595 - del_timer_sync(&local->sta_cleanup); 1596 rhltable_destroy(&local->sta_hash); 1597 rhltable_destroy(&local->link_sta_hash); 1598 }
··· 1592 1593 void sta_info_stop(struct ieee80211_local *local) 1594 { 1595 + timer_delete_sync(&local->sta_cleanup); 1596 rhltable_destroy(&local->sta_hash); 1597 rhltable_destroy(&local->link_sta_hash); 1598 }
+1 -1
net/mctp/af_mctp.c
··· 663 * keys), stop any pending expiry events. the timer cannot be re-queued 664 * as the sk is no longer observable 665 */ 666 - del_timer_sync(&msk->key_expiry); 667 } 668 669 static void mctp_sk_destruct(struct sock *sk)
··· 663 * keys), stop any pending expiry events. the timer cannot be re-queued 664 * as the sk is no longer observable 665 */ 666 + timer_delete_sync(&msk->key_expiry); 667 } 668 669 static void mctp_sk_destruct(struct sock *sk)
+1 -1
net/mptcp/pm.c
··· 327 list_del(&entry->list); 328 spin_unlock_bh(&msk->pm.lock); 329 330 - /* no lock, because sk_stop_timer_sync() is calling del_timer_sync() */ 331 if (add_timer) 332 sk_stop_timer_sync(sk, add_timer); 333
··· 327 list_del(&entry->list); 328 spin_unlock_bh(&msk->pm.lock); 329 330 + /* no lock, because sk_stop_timer_sync() is calling timer_delete_sync() */ 331 if (add_timer) 332 sk_stop_timer_sync(sk, add_timer); 333
+2 -2
net/ncsi/ncsi-manage.c
··· 189 nc->monitor.enabled = false; 190 spin_unlock_irqrestore(&nc->lock, flags); 191 192 - del_timer_sync(&nc->monitor.timer); 193 } 194 195 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np, ··· 396 397 if (nr->enabled) { 398 nr->enabled = false; 399 - del_timer_sync(&nr->timer); 400 } 401 402 spin_lock_irqsave(&ndp->lock, flags);
··· 189 nc->monitor.enabled = false; 190 spin_unlock_irqrestore(&nc->lock, flags); 191 192 + timer_delete_sync(&nc->monitor.timer); 193 } 194 195 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np, ··· 396 397 if (nr->enabled) { 398 nr->enabled = false; 399 + timer_delete_sync(&nr->timer); 400 } 401 402 spin_lock_irqsave(&ndp->lock, flags);
+1 -1
net/netfilter/ipset/ip_set_bitmap_gen.h
··· 294 struct mtype *map = set->data; 295 296 if (SET_WITH_TIMEOUT(set)) 297 - del_timer_sync(&map->gc); 298 } 299 300 static const struct ip_set_type_variant mtype = {
··· 294 struct mtype *map = set->data; 295 296 if (SET_WITH_TIMEOUT(set)) 297 + timer_delete_sync(&map->gc); 298 } 299 300 static const struct ip_set_type_variant mtype = {
+3 -3
net/netfilter/ipvs/ip_vs_conn.c
··· 822 /* Try to delete connection while not holding reference */ 823 static void ip_vs_conn_del(struct ip_vs_conn *cp) 824 { 825 - if (del_timer(&cp->timer)) { 826 /* Drop cp->control chain too */ 827 if (cp->control) 828 cp->timeout = 0; ··· 833 /* Try to delete connection while holding reference */ 834 static void ip_vs_conn_del_put(struct ip_vs_conn *cp) 835 { 836 - if (del_timer(&cp->timer)) { 837 /* Drop cp->control chain too */ 838 if (cp->control) 839 cp->timeout = 0; ··· 860 struct ip_vs_conn *ct = cp->control; 861 862 /* delete the timer if it is activated by other users */ 863 - del_timer(&cp->timer); 864 865 /* does anybody control me? */ 866 if (ct) {
··· 822 /* Try to delete connection while not holding reference */ 823 static void ip_vs_conn_del(struct ip_vs_conn *cp) 824 { 825 + if (timer_delete(&cp->timer)) { 826 /* Drop cp->control chain too */ 827 if (cp->control) 828 cp->timeout = 0; ··· 833 /* Try to delete connection while holding reference */ 834 static void ip_vs_conn_del_put(struct ip_vs_conn *cp) 835 { 836 + if (timer_delete(&cp->timer)) { 837 /* Drop cp->control chain too */ 838 if (cp->control) 839 cp->timeout = 0; ··· 860 struct ip_vs_conn *ct = cp->control; 861 862 /* delete the timer if it is activated by other users */ 863 + timer_delete(&cp->timer); 864 865 /* does anybody control me? */ 866 if (ct) {
+1 -1
net/netfilter/ipvs/ip_vs_ctl.c
··· 848 { 849 struct ip_vs_dest *dest, *nxt; 850 851 - del_timer_sync(&ipvs->dest_trash_timer); 852 /* No need to use dest_trash_lock */ 853 list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, t_list) { 854 list_del(&dest->t_list);
··· 848 { 849 struct ip_vs_dest *dest, *nxt; 850 851 + timer_delete_sync(&ipvs->dest_trash_timer); 852 /* No need to use dest_trash_lock */ 853 list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, t_list) { 854 list_del(&dest->t_list);
+5 -5
net/netfilter/nf_conntrack_expect.c
··· 118 119 bool nf_ct_remove_expect(struct nf_conntrack_expect *exp) 120 { 121 - if (del_timer(&exp->timeout)) { 122 nf_ct_unlink_expect(exp); 123 nf_ct_expect_put(exp); 124 return true; ··· 214 if (exp->flags & NF_CT_EXPECT_PERMANENT || !unlink) { 215 refcount_inc(&exp->use); 216 return exp; 217 - } else if (del_timer(&exp->timeout)) { 218 nf_ct_unlink_expect(exp); 219 return exp; 220 } 221 - /* Undo exp->master refcnt increase, if del_timer() failed */ 222 nf_ct_put(exp->master); 223 224 return NULL; ··· 520 hlist_for_each_entry_safe(exp, next, 521 &nf_ct_expect_hash[i], 522 hnode) { 523 - if (iter(exp, data) && del_timer(&exp->timeout)) { 524 nf_ct_unlink_expect(exp); 525 nf_ct_expect_put(exp); 526 } ··· 550 if (!net_eq(nf_ct_exp_net(exp), net)) 551 continue; 552 553 - if (iter(exp, data) && del_timer(&exp->timeout)) { 554 nf_ct_unlink_expect_report(exp, portid, report); 555 nf_ct_expect_put(exp); 556 }
··· 118 119 bool nf_ct_remove_expect(struct nf_conntrack_expect *exp) 120 { 121 + if (timer_delete(&exp->timeout)) { 122 nf_ct_unlink_expect(exp); 123 nf_ct_expect_put(exp); 124 return true; ··· 214 if (exp->flags & NF_CT_EXPECT_PERMANENT || !unlink) { 215 refcount_inc(&exp->use); 216 return exp; 217 + } else if (timer_delete(&exp->timeout)) { 218 nf_ct_unlink_expect(exp); 219 return exp; 220 } 221 + /* Undo exp->master refcnt increase, if timer_delete() failed */ 222 nf_ct_put(exp->master); 223 224 return NULL; ··· 520 hlist_for_each_entry_safe(exp, next, 521 &nf_ct_expect_hash[i], 522 hnode) { 523 + if (iter(exp, data) && timer_delete(&exp->timeout)) { 524 nf_ct_unlink_expect(exp); 525 nf_ct_expect_put(exp); 526 } ··· 550 if (!net_eq(nf_ct_exp_net(exp), net)) 551 continue; 552 553 + if (iter(exp, data) && timer_delete(&exp->timeout)) { 554 nf_ct_unlink_expect_report(exp, portid, report); 555 nf_ct_expect_put(exp); 556 }
+2 -2
net/netfilter/nf_conntrack_netlink.c
··· 3448 3449 /* after list removal, usage count == 1 */ 3450 spin_lock_bh(&nf_conntrack_expect_lock); 3451 - if (del_timer(&exp->timeout)) { 3452 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid, 3453 nlmsg_report(info->nlh)); 3454 nf_ct_expect_put(exp); ··· 3477 const struct nlattr * const cda[]) 3478 { 3479 if (cda[CTA_EXPECT_TIMEOUT]) { 3480 - if (!del_timer(&x->timeout)) 3481 return -ETIME; 3482 3483 x->timeout.expires = jiffies +
··· 3448 3449 /* after list removal, usage count == 1 */ 3450 spin_lock_bh(&nf_conntrack_expect_lock); 3451 + if (timer_delete(&exp->timeout)) { 3452 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid, 3453 nlmsg_report(info->nlh)); 3454 nf_ct_expect_put(exp); ··· 3477 const struct nlattr * const cda[]) 3478 { 3479 if (cda[CTA_EXPECT_TIMEOUT]) { 3480 + if (!timer_delete(&x->timeout)) 3481 return -ETIME; 3482 3483 x->timeout.expires = jiffies +
+1 -1
net/netrom/nr_loopback.c
··· 68 69 void nr_loopback_clear(void) 70 { 71 - del_timer_sync(&loopback_timer); 72 skb_queue_purge(&loopback_queue); 73 }
··· 68 69 void nr_loopback_clear(void) 70 { 71 + timer_delete_sync(&loopback_timer); 72 skb_queue_purge(&loopback_queue); 73 }
+3 -3
net/nfc/core.c
··· 464 } 465 466 if (dev->ops->check_presence) 467 - del_timer_sync(&dev->check_pres_timer); 468 469 dev->ops->deactivate_target(dev, dev->active_target, mode); 470 dev->active_target = NULL; ··· 509 } 510 511 if (dev->ops->check_presence) 512 - del_timer_sync(&dev->check_pres_timer); 513 514 rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb, 515 cb_context); ··· 1172 device_unlock(&dev->dev); 1173 1174 if (dev->ops->check_presence) { 1175 - del_timer_sync(&dev->check_pres_timer); 1176 cancel_work_sync(&dev->check_pres_work); 1177 } 1178
··· 464 } 465 466 if (dev->ops->check_presence) 467 + timer_delete_sync(&dev->check_pres_timer); 468 469 dev->ops->deactivate_target(dev, dev->active_target, mode); 470 dev->active_target = NULL; ··· 509 } 510 511 if (dev->ops->check_presence) 512 + timer_delete_sync(&dev->check_pres_timer); 513 514 rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb, 515 cb_context); ··· 1172 device_unlock(&dev->dev); 1173 1174 if (dev->ops->check_presence) { 1175 + timer_delete_sync(&dev->check_pres_timer); 1176 cancel_work_sync(&dev->check_pres_work); 1177 } 1178
+2 -2
net/nfc/hci/core.c
··· 148 static void __nfc_hci_cmd_completion(struct nfc_hci_dev *hdev, int err, 149 struct sk_buff *skb) 150 { 151 - del_timer_sync(&hdev->cmd_timer); 152 153 if (hdev->cmd_pending_msg->cb) 154 hdev->cmd_pending_msg->cb(hdev->cmd_pending_msg->cb_context, ··· 1046 1047 mutex_unlock(&hdev->msg_tx_mutex); 1048 1049 - del_timer_sync(&hdev->cmd_timer); 1050 cancel_work_sync(&hdev->msg_tx_work); 1051 1052 cancel_work_sync(&hdev->msg_rx_work);
··· 148 static void __nfc_hci_cmd_completion(struct nfc_hci_dev *hdev, int err, 149 struct sk_buff *skb) 150 { 151 + timer_delete_sync(&hdev->cmd_timer); 152 153 if (hdev->cmd_pending_msg->cb) 154 hdev->cmd_pending_msg->cb(hdev->cmd_pending_msg->cb_context, ··· 1046 1047 mutex_unlock(&hdev->msg_tx_mutex); 1048 1049 + timer_delete_sync(&hdev->cmd_timer); 1050 cancel_work_sync(&hdev->msg_tx_work); 1051 1052 cancel_work_sync(&hdev->msg_rx_work);
+4 -4
net/nfc/hci/llc_shdlc.c
··· 198 199 if (skb_queue_empty(&shdlc->ack_pending_q)) { 200 if (shdlc->t2_active) { 201 - del_timer_sync(&shdlc->t2_timer); 202 shdlc->t2_active = false; 203 204 pr_debug("All sent frames acked. Stopped T2(retransmit)\n"); ··· 289 290 if (llc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) { 291 if (shdlc->t2_active) { 292 - del_timer_sync(&shdlc->t2_timer); 293 shdlc->t2_active = false; 294 pr_debug("Stopped T2(retransmit)\n"); 295 } ··· 342 { 343 pr_debug("result=%d\n", r); 344 345 - del_timer_sync(&shdlc->connect_timer); 346 347 if (r == 0) { 348 shdlc->ns = 0; ··· 526 (shdlc->rnr == false)) { 527 528 if (shdlc->t1_active) { 529 - del_timer_sync(&shdlc->t1_timer); 530 shdlc->t1_active = false; 531 pr_debug("Stopped T1(send ack)\n"); 532 }
··· 198 199 if (skb_queue_empty(&shdlc->ack_pending_q)) { 200 if (shdlc->t2_active) { 201 + timer_delete_sync(&shdlc->t2_timer); 202 shdlc->t2_active = false; 203 204 pr_debug("All sent frames acked. Stopped T2(retransmit)\n"); ··· 289 290 if (llc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) { 291 if (shdlc->t2_active) { 292 + timer_delete_sync(&shdlc->t2_timer); 293 shdlc->t2_active = false; 294 pr_debug("Stopped T2(retransmit)\n"); 295 } ··· 342 { 343 pr_debug("result=%d\n", r); 344 345 + timer_delete_sync(&shdlc->connect_timer); 346 347 if (r == 0) { 348 shdlc->ns = 0; ··· 526 (shdlc->rnr == false)) { 527 528 if (shdlc->t1_active) { 529 + timer_delete_sync(&shdlc->t1_timer); 530 shdlc->t1_active = false; 531 pr_debug("Stopped T1(send ack)\n"); 532 }
+3 -3
net/nfc/llcp_core.c
··· 160 static void local_cleanup(struct nfc_llcp_local *local) 161 { 162 nfc_llcp_socket_release(local, false, ENXIO); 163 - del_timer_sync(&local->link_timer); 164 skb_queue_purge(&local->tx_queue); 165 cancel_work_sync(&local->tx_work); 166 cancel_work_sync(&local->rx_work); 167 cancel_work_sync(&local->timeout_work); 168 kfree_skb(local->rx_pending); 169 local->rx_pending = NULL; 170 - del_timer_sync(&local->sdreq_timer); 171 cancel_work_sync(&local->sdreq_timeout_work); 172 nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs); 173 } ··· 1536 static void __nfc_llcp_recv(struct nfc_llcp_local *local, struct sk_buff *skb) 1537 { 1538 local->rx_pending = skb; 1539 - del_timer(&local->link_timer); 1540 schedule_work(&local->rx_work); 1541 } 1542
··· 160 static void local_cleanup(struct nfc_llcp_local *local) 161 { 162 nfc_llcp_socket_release(local, false, ENXIO); 163 + timer_delete_sync(&local->link_timer); 164 skb_queue_purge(&local->tx_queue); 165 cancel_work_sync(&local->tx_work); 166 cancel_work_sync(&local->rx_work); 167 cancel_work_sync(&local->timeout_work); 168 kfree_skb(local->rx_pending); 169 local->rx_pending = NULL; 170 + timer_delete_sync(&local->sdreq_timer); 171 cancel_work_sync(&local->sdreq_timeout_work); 172 nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs); 173 } ··· 1536 static void __nfc_llcp_recv(struct nfc_llcp_local *local, struct sk_buff *skb) 1537 { 1538 local->rx_pending = skb; 1539 + timer_delete(&local->link_timer); 1540 schedule_work(&local->rx_work); 1541 } 1542
+3 -3
net/nfc/nci/core.c
··· 565 * there is a queued/running cmd_work 566 */ 567 flush_workqueue(ndev->cmd_wq); 568 - del_timer_sync(&ndev->cmd_timer); 569 - del_timer_sync(&ndev->data_timer); 570 mutex_unlock(&ndev->req_lock); 571 return 0; 572 } ··· 597 /* Flush cmd wq */ 598 flush_workqueue(ndev->cmd_wq); 599 600 - del_timer_sync(&ndev->cmd_timer); 601 602 /* Clear flags except NCI_UNREG */ 603 ndev->flags &= BIT(NCI_UNREG);
··· 565 * there is a queued/running cmd_work 566 */ 567 flush_workqueue(ndev->cmd_wq); 568 + timer_delete_sync(&ndev->cmd_timer); 569 + timer_delete_sync(&ndev->data_timer); 570 mutex_unlock(&ndev->req_lock); 571 return 0; 572 } ··· 597 /* Flush cmd wq */ 598 flush_workqueue(ndev->cmd_wq); 599 600 + timer_delete_sync(&ndev->cmd_timer); 601 602 /* Clear flags except NCI_UNREG */ 603 ndev->flags &= BIT(NCI_UNREG);
+1 -1
net/nfc/nci/data.c
··· 42 pr_debug("len %d, err %d\n", skb ? skb->len : 0, err); 43 44 /* data exchange is complete, stop the data timer */ 45 - del_timer_sync(&ndev->data_timer); 46 clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags); 47 48 if (cb) {
··· 42 pr_debug("len %d, err %d\n", skb ? skb->len : 0, err); 43 44 /* data exchange is complete, stop the data timer */ 45 + timer_delete_sync(&ndev->data_timer); 46 clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags); 47 48 if (cb) {
+1 -1
net/nfc/nci/rsp.c
··· 347 __u16 rsp_opcode = nci_opcode(skb->data); 348 349 /* we got a rsp, stop the cmd timer */ 350 - del_timer(&ndev->cmd_timer); 351 352 pr_debug("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", 353 nci_pbf(skb->data),
··· 347 __u16 rsp_opcode = nci_opcode(skb->data); 348 349 /* we got a rsp, stop the cmd timer */ 350 + timer_delete(&ndev->cmd_timer); 351 352 pr_debug("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", 353 nci_pbf(skb->data),
+1 -1
net/packet/af_packet.c
··· 581 582 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) 583 { 584 - del_timer_sync(&pkc->retire_blk_timer); 585 } 586 587 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
··· 581 582 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) 583 { 584 + timer_delete_sync(&pkc->retire_blk_timer); 585 } 586 587 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
+4 -4
net/rose/rose_link.c
··· 32 33 void rose_start_ftimer(struct rose_neigh *neigh) 34 { 35 - del_timer(&neigh->ftimer); 36 37 neigh->ftimer.function = rose_ftimer_expiry; 38 neigh->ftimer.expires = ··· 43 44 static void rose_start_t0timer(struct rose_neigh *neigh) 45 { 46 - del_timer(&neigh->t0timer); 47 48 neigh->t0timer.function = rose_t0timer_expiry; 49 neigh->t0timer.expires = ··· 54 55 void rose_stop_ftimer(struct rose_neigh *neigh) 56 { 57 - del_timer(&neigh->ftimer); 58 } 59 60 void rose_stop_t0timer(struct rose_neigh *neigh) 61 { 62 - del_timer(&neigh->t0timer); 63 } 64 65 int rose_ftimer_running(struct rose_neigh *neigh)
··· 32 33 void rose_start_ftimer(struct rose_neigh *neigh) 34 { 35 + timer_delete(&neigh->ftimer); 36 37 neigh->ftimer.function = rose_ftimer_expiry; 38 neigh->ftimer.expires = ··· 43 44 static void rose_start_t0timer(struct rose_neigh *neigh) 45 { 46 + timer_delete(&neigh->t0timer); 47 48 neigh->t0timer.function = rose_t0timer_expiry; 49 neigh->t0timer.expires = ··· 54 55 void rose_stop_ftimer(struct rose_neigh *neigh) 56 { 57 + timer_delete(&neigh->ftimer); 58 } 59 60 void rose_stop_t0timer(struct rose_neigh *neigh) 61 { 62 + timer_delete(&neigh->t0timer); 63 } 64 65 int rose_ftimer_running(struct rose_neigh *neigh)
+1 -1
net/rose/rose_loopback.c
··· 124 { 125 struct sk_buff *skb; 126 127 - del_timer(&loopback_timer); 128 129 while ((skb = skb_dequeue(&loopback_queue)) != NULL) { 130 skb->sk = NULL;
··· 124 { 125 struct sk_buff *skb; 126 127 + timer_delete(&loopback_timer); 128 129 while ((skb = skb_dequeue(&loopback_queue)) != NULL) { 130 skb->sk = NULL;
+2 -2
net/rose/rose_route.c
··· 227 { 228 struct rose_neigh *s; 229 230 - del_timer_sync(&rose_neigh->ftimer); 231 - del_timer_sync(&rose_neigh->t0timer); 232 233 skb_queue_purge(&rose_neigh->queue); 234
··· 227 { 228 struct rose_neigh *s; 229 230 + timer_delete_sync(&rose_neigh->ftimer); 231 + timer_delete_sync(&rose_neigh->t0timer); 232 233 skb_queue_purge(&rose_neigh->queue); 234
+1 -1
net/rxrpc/call_event.c
··· 469 470 out: 471 if (__rxrpc_call_is_complete(call)) { 472 - del_timer_sync(&call->timer); 473 if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) 474 rxrpc_disconnect_call(call); 475 if (call->security)
··· 469 470 out: 471 if (__rxrpc_call_is_complete(call)) { 472 + timer_delete_sync(&call->timer); 473 if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) 474 rxrpc_disconnect_call(call); 475 if (call->security)
+2 -2
net/rxrpc/call_object.c
··· 688 { 689 struct rxrpc_call *call = container_of(work, struct rxrpc_call, destroyer); 690 691 - del_timer_sync(&call->timer); 692 693 rxrpc_cleanup_tx_buffers(call); 694 rxrpc_cleanup_rx_buffers(call); ··· 711 ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE); 712 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); 713 714 - del_timer(&call->timer); 715 716 if (rcu_read_lock_held()) 717 /* Can't use the rxrpc workqueue as we need to cancel/flush
··· 688 { 689 struct rxrpc_call *call = container_of(work, struct rxrpc_call, destroyer); 690 691 + timer_delete_sync(&call->timer); 692 693 rxrpc_cleanup_tx_buffers(call); 694 rxrpc_cleanup_rx_buffers(call); ··· 711 ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE); 712 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); 713 714 + timer_delete(&call->timer); 715 716 if (rcu_read_lock_held()) 717 /* Can't use the rxrpc workqueue as we need to cancel/flush
+1 -1
net/rxrpc/conn_client.c
··· 818 819 local->kill_all_client_conns = true; 820 821 - del_timer_sync(&local->client_conn_reap_timer); 822 823 while ((conn = list_first_entry_or_null(&local->idle_client_conns, 824 struct rxrpc_connection, cache_link))) {
··· 818 819 local->kill_all_client_conns = true; 820 821 + timer_delete_sync(&local->client_conn_reap_timer); 822 823 while ((conn = list_first_entry_or_null(&local->idle_client_conns, 824 struct rxrpc_connection, cache_link))) {
+4 -4
net/rxrpc/conn_object.c
··· 314 !conn->channels[3].call); 315 ASSERT(list_empty(&conn->cache_link)); 316 317 - del_timer_sync(&conn->timer); 318 cancel_work_sync(&conn->processor); /* Processing may restart the timer */ 319 - del_timer_sync(&conn->timer); 320 321 write_lock(&rxnet->conn_lock); 322 list_del_init(&conn->proc_link); ··· 365 dead = __refcount_dec_and_test(&conn->ref, &r); 366 trace_rxrpc_conn(debug_id, r - 1, why); 367 if (dead) { 368 - del_timer(&conn->timer); 369 cancel_work(&conn->processor); 370 371 if (in_softirq() || work_busy(&conn->processor) || ··· 470 471 atomic_dec(&rxnet->nr_conns); 472 473 - del_timer_sync(&rxnet->service_conn_reap_timer); 474 rxrpc_queue_work(&rxnet->service_conn_reaper); 475 flush_workqueue(rxrpc_workqueue); 476
··· 314 !conn->channels[3].call); 315 ASSERT(list_empty(&conn->cache_link)); 316 317 + timer_delete_sync(&conn->timer); 318 cancel_work_sync(&conn->processor); /* Processing may restart the timer */ 319 + timer_delete_sync(&conn->timer); 320 321 write_lock(&rxnet->conn_lock); 322 list_del_init(&conn->proc_link); ··· 365 dead = __refcount_dec_and_test(&conn->ref, &r); 366 trace_rxrpc_conn(debug_id, r - 1, why); 367 if (dead) { 368 + timer_delete(&conn->timer); 369 cancel_work(&conn->processor); 370 371 if (in_softirq() || work_busy(&conn->processor) || ··· 470 471 atomic_dec(&rxnet->nr_conns); 472 473 + timer_delete_sync(&rxnet->service_conn_reap_timer); 474 rxrpc_queue_work(&rxnet->service_conn_reaper); 475 flush_workqueue(rxrpc_workqueue); 476
+2 -2
net/rxrpc/net_ns.c
··· 105 struct rxrpc_net *rxnet = rxrpc_net(net); 106 107 rxnet->live = false; 108 - del_timer_sync(&rxnet->peer_keepalive_timer); 109 cancel_work_sync(&rxnet->peer_keepalive_work); 110 /* Remove the timer again as the worker may have restarted it. */ 111 - del_timer_sync(&rxnet->peer_keepalive_timer); 112 rxrpc_destroy_all_calls(rxnet); 113 rxrpc_destroy_all_connections(rxnet); 114 rxrpc_destroy_all_peers(rxnet);
··· 105 struct rxrpc_net *rxnet = rxrpc_net(net); 106 107 rxnet->live = false; 108 + timer_delete_sync(&rxnet->peer_keepalive_timer); 109 cancel_work_sync(&rxnet->peer_keepalive_work); 110 /* Remove the timer again as the worker may have restarted it. */ 111 + timer_delete_sync(&rxnet->peer_keepalive_timer); 112 rxrpc_destroy_all_calls(rxnet); 113 rxrpc_destroy_all_connections(rxnet); 114 rxrpc_destroy_all_peers(rxnet);
+1 -1
net/sched/sch_fq_pie.c
··· 555 556 tcf_block_put(q->block); 557 q->p_params.tupdate = 0; 558 - del_timer_sync(&q->adapt_timer); 559 kvfree(q->flows); 560 } 561
··· 555 556 tcf_block_put(q->block); 557 q->p_params.tupdate = 0; 558 + timer_delete_sync(&q->adapt_timer); 559 kvfree(q->flows); 560 } 561
+1 -1
net/sched/sch_generic.c
··· 567 static void netdev_watchdog_down(struct net_device *dev) 568 { 569 netif_tx_lock_bh(dev); 570 - if (del_timer(&dev->watchdog_timer)) 571 netdev_put(dev, &dev->watchdog_dev_tracker); 572 netif_tx_unlock_bh(dev); 573 }
··· 567 static void netdev_watchdog_down(struct net_device *dev) 568 { 569 netif_tx_lock_bh(dev); 570 + if (timer_delete(&dev->watchdog_timer)) 571 netdev_put(dev, &dev->watchdog_dev_tracker); 572 netif_tx_unlock_bh(dev); 573 }
+1 -1
net/sched/sch_pie.c
··· 545 struct pie_sched_data *q = qdisc_priv(sch); 546 547 q->params.tupdate = 0; 548 - del_timer_sync(&q->adapt_timer); 549 } 550 551 static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
··· 545 struct pie_sched_data *q = qdisc_priv(sch); 546 547 q->params.tupdate = 0; 548 + timer_delete_sync(&q->adapt_timer); 549 } 550 551 static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
+2 -2
net/sched/sch_red.c
··· 218 219 tcf_qevent_destroy(&q->qe_mark, sch); 220 tcf_qevent_destroy(&q->qe_early_drop, sch); 221 - del_timer_sync(&q->adapt_timer); 222 red_offload(sch, false); 223 qdisc_put(q->qdisc); 224 } ··· 297 max_P); 298 red_set_vars(&q->vars); 299 300 - del_timer(&q->adapt_timer); 301 if (ctl->flags & TC_RED_ADAPTATIVE) 302 mod_timer(&q->adapt_timer, jiffies + HZ/2); 303
··· 218 219 tcf_qevent_destroy(&q->qe_mark, sch); 220 tcf_qevent_destroy(&q->qe_early_drop, sch); 221 + timer_delete_sync(&q->adapt_timer); 222 red_offload(sch, false); 223 qdisc_put(q->qdisc); 224 } ··· 297 max_P); 298 red_set_vars(&q->vars); 299 300 + timer_delete(&q->adapt_timer); 301 if (ctl->flags & TC_RED_ADAPTATIVE) 302 mod_timer(&q->adapt_timer, jiffies + HZ/2); 303
+2 -2
net/sched/sch_sfq.c
··· 696 rtnl_kfree_skbs(to_free, tail); 697 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); 698 699 - del_timer(&q->perturb_timer); 700 if (q->perturb_period) { 701 mod_timer(&q->perturb_timer, jiffies + q->perturb_period); 702 get_random_bytes(&q->perturbation, sizeof(q->perturbation)); ··· 722 723 tcf_block_put(q->block); 724 WRITE_ONCE(q->perturb_period, 0); 725 - del_timer_sync(&q->perturb_timer); 726 sfq_free(q->ht); 727 sfq_free(q->slots); 728 kfree(q->red_parms);
··· 696 rtnl_kfree_skbs(to_free, tail); 697 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); 698 699 + timer_delete(&q->perturb_timer); 700 if (q->perturb_period) { 701 mod_timer(&q->perturb_timer, jiffies + q->perturb_period); 702 get_random_bytes(&q->perturbation, sizeof(q->perturbation)); ··· 722 723 tcf_block_put(q->block); 724 WRITE_ONCE(q->perturb_period, 0); 725 + timer_delete_sync(&q->perturb_timer); 726 sfq_free(q->ht); 727 sfq_free(q->slots); 728 kfree(q->red_parms);
+2 -2
net/sctp/associola.c
··· 362 * on our state. 363 */ 364 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { 365 - if (del_timer(&asoc->timers[i])) 366 sctp_association_put(asoc); 367 } 368 ··· 1521 1522 /* Stop the SACK timer. */ 1523 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; 1524 - if (del_timer(timer)) 1525 sctp_association_put(asoc); 1526 } 1527 }
··· 362 * on our state. 363 */ 364 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { 365 + if (timer_delete(&asoc->timers[i])) 366 sctp_association_put(asoc); 367 } 368 ··· 1521 1522 /* Stop the SACK timer. */ 1523 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; 1524 + if (timer_delete(timer)) 1525 sctp_association_put(asoc); 1526 } 1527 }
+1 -1
net/sctp/input.c
··· 446 pr_debug("%s: unrecognized next header type " 447 "encountered!\n", __func__); 448 449 - if (del_timer(&t->proto_unreach_timer)) 450 sctp_transport_put(t); 451 452 sctp_do_sm(net, SCTP_EVENT_T_OTHER,
··· 446 pr_debug("%s: unrecognized next header type " 447 "encountered!\n", __func__); 448 449 + if (timer_delete(&t->proto_unreach_timer)) 450 sctp_transport_put(t); 451 452 sctp_do_sm(net, SCTP_EVENT_T_OTHER,
+1 -1
net/sctp/output.c
··· 312 SCTP_MIB_OUTCTRLCHUNKS); 313 asoc->stats.octrlchunks++; 314 asoc->peer.sack_needed = 0; 315 - if (del_timer(timer)) 316 sctp_association_put(asoc); 317 } 318 }
··· 312 SCTP_MIB_OUTCTRLCHUNKS); 313 asoc->stats.octrlchunks++; 314 asoc->peer.sack_needed = 0; 315 + if (timer_delete(timer)) 316 sctp_association_put(asoc); 317 } 318 }
+2 -3
net/sctp/outqueue.c
··· 1630 * as the receiver acknowledged any data. 1631 */ 1632 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING && 1633 - del_timer(&asoc->timers 1634 - [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD])) 1635 sctp_association_put(asoc); 1636 1637 /* Mark the destination transport address as ··· 1687 * address. 1688 */ 1689 if (!transport->flight_size) { 1690 - if (del_timer(&transport->T3_rtx_timer)) 1691 sctp_transport_put(transport); 1692 } else if (restart_timer) { 1693 if (!mod_timer(&transport->T3_rtx_timer,
··· 1630 * as the receiver acknowledged any data. 1631 */ 1632 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING && 1633 + timer_delete(&asoc->timers[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD])) 1634 sctp_association_put(asoc); 1635 1636 /* Mark the destination transport address as ··· 1688 * address. 1689 */ 1690 if (!transport->flight_size) { 1691 + if (timer_delete(&transport->T3_rtx_timer)) 1692 sctp_transport_put(transport); 1693 } else if (restart_timer) { 1694 if (!mod_timer(&transport->T3_rtx_timer,
+1 -1
net/sctp/protocol.c
··· 695 struct sctp_sockaddr_entry *temp; 696 697 spin_lock_bh(&net->sctp.addr_wq_lock); 698 - del_timer(&net->sctp.addr_wq_timer); 699 list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) { 700 list_del(&addrw->list); 701 kfree(addrw);
··· 695 struct sctp_sockaddr_entry *temp; 696 697 spin_lock_bh(&net->sctp.addr_wq_lock); 698 + timer_delete(&net->sctp.addr_wq_timer); 699 list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) { 700 list_del(&addrw->list); 701 kfree(addrw);
+3 -3
net/sctp/sm_sideeffect.c
··· 734 735 list_for_each_entry(t, &asoc->peer.transport_addr_list, 736 transports) { 737 - if (del_timer(&t->hb_timer)) 738 sctp_transport_put(t); 739 } 740 } ··· 747 748 list_for_each_entry(t, &asoc->peer.transport_addr_list, 749 transports) { 750 - if (del_timer(&t->T3_rtx_timer)) 751 sctp_transport_put(t); 752 } 753 } ··· 1557 1558 case SCTP_CMD_TIMER_STOP: 1559 timer = &asoc->timers[cmd->obj.to]; 1560 - if (del_timer(timer)) 1561 sctp_association_put(asoc); 1562 break; 1563
··· 734 735 list_for_each_entry(t, &asoc->peer.transport_addr_list, 736 transports) { 737 + if (timer_delete(&t->hb_timer)) 738 sctp_transport_put(t); 739 } 740 } ··· 747 748 list_for_each_entry(t, &asoc->peer.transport_addr_list, 749 transports) { 750 + if (timer_delete(&t->T3_rtx_timer)) 751 sctp_transport_put(t); 752 } 753 } ··· 1557 1558 case SCTP_CMD_TIMER_STOP: 1559 timer = &asoc->timers[cmd->obj.to]; 1560 + if (timer_delete(timer)) 1561 sctp_association_put(asoc); 1562 break; 1563
+3 -3
net/sctp/stream.c
··· 576 struct sctp_transport *t; 577 578 t = asoc->strreset_chunk->transport; 579 - if (del_timer(&t->reconf_timer)) 580 sctp_transport_put(t); 581 582 sctp_chunk_put(asoc->strreset_chunk); ··· 825 struct sctp_transport *t; 826 827 t = asoc->strreset_chunk->transport; 828 - if (del_timer(&t->reconf_timer)) 829 sctp_transport_put(t); 830 831 sctp_chunk_put(asoc->strreset_chunk); ··· 1076 /* remove everything for this reconf request */ 1077 if (!asoc->strreset_outstanding) { 1078 t = asoc->strreset_chunk->transport; 1079 - if (del_timer(&t->reconf_timer)) 1080 sctp_transport_put(t); 1081 1082 sctp_chunk_put(asoc->strreset_chunk);
··· 576 struct sctp_transport *t; 577 578 t = asoc->strreset_chunk->transport; 579 + if (timer_delete(&t->reconf_timer)) 580 sctp_transport_put(t); 581 582 sctp_chunk_put(asoc->strreset_chunk); ··· 825 struct sctp_transport *t; 826 827 t = asoc->strreset_chunk->transport; 828 + if (timer_delete(&t->reconf_timer)) 829 sctp_transport_put(t); 830 831 sctp_chunk_put(asoc->strreset_chunk); ··· 1076 /* remove everything for this reconf request */ 1077 if (!asoc->strreset_outstanding) { 1078 t = asoc->strreset_chunk->transport; 1079 + if (timer_delete(&t->reconf_timer)) 1080 sctp_transport_put(t); 1081 1082 sctp_chunk_put(asoc->strreset_chunk);
+6 -6
net/sctp/transport.c
··· 118 void sctp_transport_free(struct sctp_transport *transport) 119 { 120 /* Try to delete the heartbeat timer. */ 121 - if (del_timer(&transport->hb_timer)) 122 sctp_transport_put(transport); 123 124 /* Delete the T3_rtx timer if it's active. ··· 126 * structure hang around in memory since we know 127 * the transport is going away. 128 */ 129 - if (del_timer(&transport->T3_rtx_timer)) 130 sctp_transport_put(transport); 131 132 - if (del_timer(&transport->reconf_timer)) 133 sctp_transport_put(transport); 134 135 - if (del_timer(&transport->probe_timer)) 136 sctp_transport_put(transport); 137 138 /* Delete the ICMP proto unreachable timer if it's active. */ 139 - if (del_timer(&transport->proto_unreach_timer)) 140 sctp_transport_put(transport); 141 142 sctp_transport_put(transport); ··· 829 void sctp_transport_immediate_rtx(struct sctp_transport *t) 830 { 831 /* Stop pending T3_rtx_timer */ 832 - if (del_timer(&t->T3_rtx_timer)) 833 sctp_transport_put(t); 834 835 sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
··· 118 void sctp_transport_free(struct sctp_transport *transport) 119 { 120 /* Try to delete the heartbeat timer. */ 121 + if (timer_delete(&transport->hb_timer)) 122 sctp_transport_put(transport); 123 124 /* Delete the T3_rtx timer if it's active. ··· 126 * structure hang around in memory since we know 127 * the transport is going away. 128 */ 129 + if (timer_delete(&transport->T3_rtx_timer)) 130 sctp_transport_put(transport); 131 132 + if (timer_delete(&transport->reconf_timer)) 133 sctp_transport_put(transport); 134 135 + if (timer_delete(&transport->probe_timer)) 136 sctp_transport_put(transport); 137 138 /* Delete the ICMP proto unreachable timer if it's active. */ 139 + if (timer_delete(&transport->proto_unreach_timer)) 140 sctp_transport_put(transport); 141 142 sctp_transport_put(transport); ··· 829 void sctp_transport_immediate_rtx(struct sctp_transport *t) 830 { 831 /* Stop pending T3_rtx_timer */ 832 + if (timer_delete(&t->T3_rtx_timer)) 833 sctp_transport_put(t); 834 835 sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
+2 -2
net/sunrpc/xprt.c
··· 1167 spin_unlock(&xprt->queue_lock); 1168 1169 /* Turn off autodisconnect */ 1170 - del_timer_sync(&xprt->timer); 1171 return 0; 1172 } 1173 ··· 2138 * can only run *before* del_time_sync(), never after. 2139 */ 2140 spin_lock(&xprt->transport_lock); 2141 - del_timer_sync(&xprt->timer); 2142 spin_unlock(&xprt->transport_lock); 2143 2144 /*
··· 1167 spin_unlock(&xprt->queue_lock); 1168 1169 /* Turn off autodisconnect */ 1170 + timer_delete_sync(&xprt->timer); 1171 return 0; 1172 } 1173 ··· 2138 * can only run *before* del_time_sync(), never after. 2139 */ 2140 spin_lock(&xprt->transport_lock); 2141 + timer_delete_sync(&xprt->timer); 2142 spin_unlock(&xprt->transport_lock); 2143 2144 /*
+1 -1
net/tipc/node.c
··· 638 trace_tipc_node_delete(node, true, " "); 639 tipc_node_delete_from_list(node); 640 641 - del_timer_sync(&node->timer); 642 tipc_node_put(node); 643 } 644
··· 638 trace_tipc_node_delete(node, true, " "); 639 tipc_node_delete_from_list(node); 640 641 + timer_delete_sync(&node->timer); 642 tipc_node_put(node); 643 } 644
+1 -1
net/tipc/subscr.c
··· 177 { 178 tipc_nametbl_unsubscribe(sub); 179 if (sub->evt.s.timeout != TIPC_WAIT_FOREVER) 180 - del_timer_sync(&sub->timer); 181 list_del(&sub->sub_list); 182 tipc_sub_put(sub); 183 }
··· 177 { 178 tipc_nametbl_unsubscribe(sub); 179 if (sub->evt.s.timeout != TIPC_WAIT_FOREVER) 180 + timer_delete_sync(&sub->timer); 181 list_del(&sub->sub_list); 182 tipc_sub_put(sub); 183 }
+3 -3
net/wireless/core.c
··· 1722 trace_wiphy_delayed_work_queue(wiphy, &dwork->work, delay); 1723 1724 if (!delay) { 1725 - del_timer(&dwork->timer); 1726 wiphy_work_queue(wiphy, &dwork->work); 1727 return; 1728 } ··· 1737 { 1738 lockdep_assert_held(&wiphy->mtx); 1739 1740 - del_timer_sync(&dwork->timer); 1741 wiphy_work_cancel(wiphy, &dwork->work); 1742 } 1743 EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel); ··· 1747 { 1748 lockdep_assert_held(&wiphy->mtx); 1749 1750 - del_timer_sync(&dwork->timer); 1751 wiphy_work_flush(wiphy, &dwork->work); 1752 } 1753 EXPORT_SYMBOL_GPL(wiphy_delayed_work_flush);
··· 1722 trace_wiphy_delayed_work_queue(wiphy, &dwork->work, delay); 1723 1724 if (!delay) { 1725 + timer_delete(&dwork->timer); 1726 wiphy_work_queue(wiphy, &dwork->work); 1727 return; 1728 } ··· 1737 { 1738 lockdep_assert_held(&wiphy->mtx); 1739 1740 + timer_delete_sync(&dwork->timer); 1741 wiphy_work_cancel(wiphy, &dwork->work); 1742 } 1743 EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel); ··· 1747 { 1748 lockdep_assert_held(&wiphy->mtx); 1749 1750 + timer_delete_sync(&dwork->timer); 1751 wiphy_work_flush(wiphy, &dwork->work); 1752 } 1753 EXPORT_SYMBOL_GPL(wiphy_delayed_work_flush);
+1 -1
net/x25/x25_link.c
··· 55 56 static inline void x25_stop_t20timer(struct x25_neigh *nb) 57 { 58 - del_timer(&nb->t20timer); 59 } 60 61 /*
··· 55 56 static inline void x25_stop_t20timer(struct x25_neigh *nb) 57 { 58 + timer_delete(&nb->t20timer); 59 } 60 61 /*
+2 -2
net/x25/x25_timer.c
··· 41 42 void x25_stop_heartbeat(struct sock *sk) 43 { 44 - del_timer(&sk->sk_timer); 45 } 46 47 void x25_start_t2timer(struct sock *sk) ··· 74 75 void x25_stop_timer(struct sock *sk) 76 { 77 - del_timer(&x25_sk(sk)->timer); 78 } 79 80 unsigned long x25_display_timer(struct sock *sk)
··· 41 42 void x25_stop_heartbeat(struct sock *sk) 43 { 44 + timer_delete(&sk->sk_timer); 45 } 46 47 void x25_start_t2timer(struct sock *sk) ··· 74 75 void x25_stop_timer(struct sock *sk) 76 { 77 + timer_delete(&x25_sk(sk)->timer); 78 } 79 80 unsigned long x25_display_timer(struct sock *sk)
+5 -5
net/xfrm/xfrm_policy.c
··· 462 { 463 BUG_ON(!policy->walk.dead); 464 465 - if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) 466 BUG(); 467 468 xfrm_dev_policy_free(policy); ··· 487 488 atomic_inc(&policy->genid); 489 490 - if (del_timer(&policy->polq.hold_timer)) 491 xfrm_pol_put(policy); 492 skb_queue_purge(&policy->polq.hold_queue); 493 494 - if (del_timer(&policy->timer)) 495 xfrm_pol_put(policy); 496 497 /* XXX: Flush state cache */ ··· 1469 1470 spin_lock_bh(&pq->hold_queue.lock); 1471 skb_queue_splice_init(&pq->hold_queue, &list); 1472 - if (del_timer(&pq->hold_timer)) 1473 xfrm_pol_put(old); 1474 spin_unlock_bh(&pq->hold_queue.lock); 1475 ··· 3004 3005 sched_next = jiffies + pq->timeout; 3006 3007 - if (del_timer(&pq->hold_timer)) { 3008 if (time_before(pq->hold_timer.expires, sched_next)) 3009 sched_next = pq->hold_timer.expires; 3010 xfrm_pol_put(pol);
··· 462 { 463 BUG_ON(!policy->walk.dead); 464 465 + if (timer_delete(&policy->timer) || timer_delete(&policy->polq.hold_timer)) 466 BUG(); 467 468 xfrm_dev_policy_free(policy); ··· 487 488 atomic_inc(&policy->genid); 489 490 + if (timer_delete(&policy->polq.hold_timer)) 491 xfrm_pol_put(policy); 492 skb_queue_purge(&policy->polq.hold_queue); 493 494 + if (timer_delete(&policy->timer)) 495 xfrm_pol_put(policy); 496 497 /* XXX: Flush state cache */ ··· 1469 1470 spin_lock_bh(&pq->hold_queue.lock); 1471 skb_queue_splice_init(&pq->hold_queue, &list); 1472 + if (timer_delete(&pq->hold_timer)) 1473 xfrm_pol_put(old); 1474 spin_unlock_bh(&pq->hold_queue.lock); 1475 ··· 3004 3005 sched_next = jiffies + pq->timeout; 3006 3007 + if (timer_delete(&pq->hold_timer)) { 3008 if (time_before(pq->hold_timer.expires, sched_next)) 3009 sched_next = pq->hold_timer.expires; 3010 xfrm_pol_put(pol);
+1 -1
net/xfrm/xfrm_state.c
··· 598 if (x->mode_cbs && x->mode_cbs->destroy_state) 599 x->mode_cbs->destroy_state(x); 600 hrtimer_cancel(&x->mtimer); 601 - del_timer_sync(&x->rtimer); 602 kfree(x->aead); 603 kfree(x->aalg); 604 kfree(x->ealg);
··· 598 if (x->mode_cbs && x->mode_cbs->destroy_state) 599 x->mode_cbs->destroy_state(x); 600 hrtimer_cancel(&x->mtimer); 601 + timer_delete_sync(&x->rtimer); 602 kfree(x->aead); 603 kfree(x->aalg); 604 kfree(x->ealg);
+1 -1
samples/connector/cn_test.c
··· 172 173 static void cn_test_fini(void) 174 { 175 - del_timer_sync(&cn_test_timer); 176 cn_del_callback(&cn_test_id); 177 cn_test_id.val--; 178 cn_del_callback(&cn_test_id);
··· 172 173 static void cn_test_fini(void) 174 { 175 + timer_delete_sync(&cn_test_timer); 176 cn_del_callback(&cn_test_id); 177 cn_test_id.val--; 178 cn_del_callback(&cn_test_id);
+1 -1
samples/ftrace/sample-trace-array.c
··· 82 while (!kthread_should_stop()) 83 simple_thread_func(count++); 84 85 - del_timer(&mytimer); 86 cancel_work_sync(&trace_work); 87 88 /*
··· 82 while (!kthread_should_stop()) 83 simple_thread_func(count++); 84 85 + timer_delete(&mytimer); 86 cancel_work_sync(&trace_work); 87 88 /*
+2 -2
sound/core/timer.c
··· 1152 unsigned long jiff; 1153 1154 priv = (struct snd_timer_system_private *) timer->private_data; 1155 - del_timer(&priv->tlist); 1156 jiff = jiffies; 1157 if (time_before(jiff, priv->last_expires)) 1158 timer->sticks = priv->last_expires - jiff; ··· 1167 struct snd_timer_system_private *priv; 1168 1169 priv = (struct snd_timer_system_private *)timer->private_data; 1170 - del_timer_sync(&priv->tlist); 1171 return 0; 1172 } 1173
··· 1152 unsigned long jiff; 1153 1154 priv = (struct snd_timer_system_private *) timer->private_data; 1155 + timer_delete(&priv->tlist); 1156 jiff = jiffies; 1157 if (time_before(jiff, priv->last_expires)) 1158 timer->sticks = priv->last_expires - jiff; ··· 1167 struct snd_timer_system_private *priv; 1168 1169 priv = (struct snd_timer_system_private *)timer->private_data; 1170 + timer_delete_sync(&priv->tlist); 1171 return 0; 1172 } 1173
+2 -2
sound/drivers/aloop.c
··· 261 /* call in cable->lock */ 262 static inline int loopback_jiffies_timer_stop(struct loopback_pcm *dpcm) 263 { 264 - del_timer(&dpcm->timer); 265 dpcm->timer.expires = 0; 266 267 return 0; ··· 292 293 static inline int loopback_jiffies_timer_stop_sync(struct loopback_pcm *dpcm) 294 { 295 - del_timer_sync(&dpcm->timer); 296 297 return 0; 298 }
··· 261 /* call in cable->lock */ 262 static inline int loopback_jiffies_timer_stop(struct loopback_pcm *dpcm) 263 { 264 + timer_delete(&dpcm->timer); 265 dpcm->timer.expires = 0; 266 267 return 0; ··· 292 293 static inline int loopback_jiffies_timer_stop_sync(struct loopback_pcm *dpcm) 294 { 295 + timer_delete_sync(&dpcm->timer); 296 297 return 0; 298 }
+1 -1
sound/drivers/dummy.c
··· 279 { 280 struct dummy_systimer_pcm *dpcm = substream->runtime->private_data; 281 spin_lock(&dpcm->lock); 282 - del_timer(&dpcm->timer); 283 spin_unlock(&dpcm->lock); 284 return 0; 285 }
··· 279 { 280 struct dummy_systimer_pcm *dpcm = substream->runtime->private_data; 281 spin_lock(&dpcm->lock); 282 + timer_delete(&dpcm->timer); 283 spin_unlock(&dpcm->lock); 284 return 0; 285 }
+1 -1
sound/drivers/mpu401/mpu401_uart.c
··· 197 mpu->timer_invoked &= input ? ~MPU401_MODE_INPUT_TIMER : 198 ~MPU401_MODE_OUTPUT_TIMER; 199 if (! mpu->timer_invoked) 200 - del_timer(&mpu->timer); 201 } 202 spin_unlock_irqrestore (&mpu->timer_lock, flags); 203 }
··· 197 mpu->timer_invoked &= input ? ~MPU401_MODE_INPUT_TIMER : 198 ~MPU401_MODE_OUTPUT_TIMER; 199 if (! mpu->timer_invoked) 200 + timer_delete(&mpu->timer); 201 } 202 spin_unlock_irqrestore (&mpu->timer_lock, flags); 203 }
+1 -1
sound/drivers/mtpav.c
··· 412 /* spinlock held! */ 413 static void snd_mtpav_remove_output_timer(struct mtpav *chip) 414 { 415 - del_timer(&chip->timer); 416 } 417 418 /*
··· 412 /* spinlock held! */ 413 static void snd_mtpav_remove_output_timer(struct mtpav *chip) 414 { 415 + timer_delete(&chip->timer); 416 } 417 418 /*
+1 -1
sound/drivers/opl3/opl3_seq.c
··· 74 /* Stop system timer */ 75 spin_lock_irqsave(&opl3->sys_timer_lock, flags); 76 if (opl3->sys_timer_status) { 77 - del_timer(&opl3->tlist); 78 opl3->sys_timer_status = 0; 79 } 80 spin_unlock_irqrestore(&opl3->sys_timer_lock, flags);
··· 74 /* Stop system timer */ 75 spin_lock_irqsave(&opl3->sys_timer_lock, flags); 76 if (opl3->sys_timer_status) { 77 + timer_delete(&opl3->tlist); 78 opl3->sys_timer_status = 0; 79 } 80 spin_unlock_irqrestore(&opl3->sys_timer_lock, flags);
+1 -1
sound/drivers/serial-u16550.c
··· 166 static inline void snd_uart16550_del_timer(struct snd_uart16550 *uart) 167 { 168 if (uart->timer_running) { 169 - del_timer(&uart->buffer_timer); 170 uart->timer_running = 0; 171 } 172 }
··· 166 static inline void snd_uart16550_del_timer(struct snd_uart16550 *uart) 167 { 168 if (uart->timer_running) { 169 + timer_delete(&uart->buffer_timer); 170 uart->timer_running = 0; 171 } 172 }
+1 -1
sound/i2c/other/ak4117.c
··· 99 { 100 unsigned char old = chip->regmap[AK4117_REG_PWRDN], reg; 101 102 - del_timer(&chip->timer); 103 chip->init = 1; 104 /* bring the chip to reset state and powerdown state */ 105 reg_write(chip, AK4117_REG_PWRDN, 0);
··· 99 { 100 unsigned char old = chip->regmap[AK4117_REG_PWRDN], reg; 101 102 + timer_delete(&chip->timer); 103 chip->init = 1; 104 /* bring the chip to reset state and powerdown state */ 105 reg_write(chip, AK4117_REG_PWRDN, 0);
+1 -1
sound/isa/sb/emu8000_pcm.c
··· 364 /* stop timer */ 365 spin_lock_irqsave(&rec->timer_lock, flags); 366 if (rec->timer_running) { 367 - del_timer(&rec->timer); 368 rec->timer_running = 0; 369 } 370 spin_unlock_irqrestore(&rec->timer_lock, flags);
··· 364 /* stop timer */ 365 spin_lock_irqsave(&rec->timer_lock, flags); 366 if (rec->timer_running) { 367 + timer_delete(&rec->timer); 368 rec->timer_running = 0; 369 } 370 spin_unlock_irqrestore(&rec->timer_lock, flags);
+2 -2
sound/isa/sb/sb8_midi.c
··· 125 struct snd_sb *chip; 126 127 chip = substream->rmidi->private_data; 128 - del_timer_sync(&chip->midi_timer); 129 spin_lock_irqsave(&chip->open_lock, flags); 130 chip->open &= ~(SB_OPEN_MIDI_OUTPUT | SB_OPEN_MIDI_OUTPUT_TRIGGER); 131 chip->midi_substream_output = NULL; ··· 174 spin_lock_irqsave(&chip->open_lock, flags); 175 if (snd_rawmidi_transmit_peek(substream, &byte, 1) != 1) { 176 chip->open &= ~SB_OPEN_MIDI_OUTPUT_TRIGGER; 177 - del_timer(&chip->midi_timer); 178 spin_unlock_irqrestore(&chip->open_lock, flags); 179 break; 180 }
··· 125 struct snd_sb *chip; 126 127 chip = substream->rmidi->private_data; 128 + timer_delete_sync(&chip->midi_timer); 129 spin_lock_irqsave(&chip->open_lock, flags); 130 chip->open &= ~(SB_OPEN_MIDI_OUTPUT | SB_OPEN_MIDI_OUTPUT_TRIGGER); 131 chip->midi_substream_output = NULL; ··· 174 spin_lock_irqsave(&chip->open_lock, flags); 175 if (snd_rawmidi_transmit_peek(substream, &byte, 1) != 1) { 176 chip->open &= ~SB_OPEN_MIDI_OUTPUT_TRIGGER; 177 + timer_delete(&chip->midi_timer); 178 spin_unlock_irqrestore(&chip->open_lock, flags); 179 break; 180 }
+2 -2
sound/isa/wavefront/wavefront_midi.c
··· 157 } else { 158 if (midi->istimer) { 159 if (--midi->istimer <= 0) 160 - del_timer(&midi->timer); 161 } 162 midi->mode[midi->output_mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER; 163 spin_unlock_irqrestore (&midi->virtual, flags); ··· 212 __timer: 213 if (midi->istimer) { 214 if (--midi->istimer <= 0) 215 - del_timer(&midi->timer); 216 } 217 midi->mode[mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER; 218 spin_unlock_irqrestore (&midi->virtual, flags);
··· 157 } else { 158 if (midi->istimer) { 159 if (--midi->istimer <= 0) 160 + timer_delete(&midi->timer); 161 } 162 midi->mode[midi->output_mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER; 163 spin_unlock_irqrestore (&midi->virtual, flags); ··· 212 __timer: 213 if (midi->istimer) { 214 if (--midi->istimer <= 0) 215 + timer_delete(&midi->timer); 216 } 217 midi->mode[mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER; 218 spin_unlock_irqrestore (&midi->virtual, flags);
+1 -1
sound/pci/asihpi/asihpi.c
··· 518 struct snd_card_asihpi_pcm *dpcm = runtime->private_data; 519 520 dpcm->respawn_timer = 0; 521 - del_timer(&dpcm->timer); 522 } 523 524 static void snd_card_asihpi_pcm_int_start(struct snd_pcm_substream *substream)
··· 518 struct snd_card_asihpi_pcm *dpcm = runtime->private_data; 519 520 dpcm->respawn_timer = 0; 521 + timer_delete(&dpcm->timer); 522 } 523 524 static void snd_card_asihpi_pcm_int_start(struct snd_pcm_substream *substream)
+1 -1
sound/pci/ctxfi/cttimer.c
··· 112 113 spin_lock_irqsave(&ti->lock, flags); 114 ti->running = 0; 115 - del_timer(&ti->timer); 116 spin_unlock_irqrestore(&ti->lock, flags); 117 } 118
··· 112 113 spin_lock_irqsave(&ti->lock, flags); 114 ti->running = 0; 115 + timer_delete(&ti->timer); 116 spin_unlock_irqrestore(&ti->lock, flags); 117 } 118
+1 -1
sound/pci/echoaudio/midi.c
··· 264 if (chip->tinuse) { 265 chip->tinuse = 0; 266 spin_unlock_irq(&chip->lock); 267 - del_timer_sync(&chip->timer); 268 dev_dbg(chip->card->dev, "Timer removed\n"); 269 return; 270 }
··· 264 if (chip->tinuse) { 265 chip->tinuse = 0; 266 spin_unlock_irq(&chip->lock); 267 + timer_delete_sync(&chip->timer); 268 dev_dbg(chip->card->dev, "Timer removed\n"); 269 return; 270 }
+1 -1
sound/pci/rme9652/hdsp.c
··· 1427 } 1428 } else { 1429 if (hmidi->istimer && --hmidi->istimer <= 0) 1430 - del_timer (&hmidi->timer); 1431 } 1432 spin_unlock_irqrestore (&hmidi->lock, flags); 1433 if (up)
··· 1427 } 1428 } else { 1429 if (hmidi->istimer && --hmidi->istimer <= 0) 1430 + timer_delete(&hmidi->timer); 1431 } 1432 spin_unlock_irqrestore (&hmidi->lock, flags); 1433 if (up)
+1 -1
sound/pci/rme9652/hdspm.c
··· 1978 } 1979 } else { 1980 if (hmidi->istimer && --hmidi->istimer <= 0) 1981 - del_timer (&hmidi->timer); 1982 } 1983 spin_unlock_irqrestore (&hmidi->lock, flags); 1984 if (up)
··· 1978 } 1979 } else { 1980 if (hmidi->istimer && --hmidi->istimer <= 0) 1981 + timer_delete(&hmidi->timer); 1982 } 1983 spin_unlock_irqrestore (&hmidi->lock, flags); 1984 if (up)
+1 -1
sound/sh/aica.c
··· 354 { 355 struct snd_card_aica *dreamcastcard = substream->pcm->private_data; 356 357 - del_timer_sync(&dreamcastcard->timer); 358 cancel_work_sync(&dreamcastcard->spu_dma_work); 359 return 0; 360 }
··· 354 { 355 struct snd_card_aica *dreamcastcard = substream->pcm->private_data; 356 357 + timer_delete_sync(&dreamcastcard->timer); 358 cancel_work_sync(&dreamcastcard->spu_dma_work); 359 return 0; 360 }
+2 -2
sound/soc/codecs/rt5645.c
··· 4286 * Since the rt5645_btn_check_callback() can queue jack_detect_work, 4287 * the timer need to be delted first 4288 */ 4289 - del_timer_sync(&rt5645->btn_check_timer); 4290 4291 cancel_delayed_work_sync(&rt5645->jack_detect_work); 4292 cancel_delayed_work_sync(&rt5645->rcclock_work); ··· 4318 { 4319 struct rt5645_priv *rt5645 = dev_get_drvdata(dev); 4320 4321 - del_timer_sync(&rt5645->btn_check_timer); 4322 cancel_delayed_work_sync(&rt5645->jack_detect_work); 4323 cancel_delayed_work_sync(&rt5645->rcclock_work); 4324
··· 4286 * Since the rt5645_btn_check_callback() can queue jack_detect_work, 4287 * the timer need to be delted first 4288 */ 4289 + timer_delete_sync(&rt5645->btn_check_timer); 4290 4291 cancel_delayed_work_sync(&rt5645->jack_detect_work); 4292 cancel_delayed_work_sync(&rt5645->rcclock_work); ··· 4318 { 4319 struct rt5645_priv *rt5645 = dev_get_drvdata(dev); 4320 4321 + timer_delete_sync(&rt5645->btn_check_timer); 4322 cancel_delayed_work_sync(&rt5645->jack_detect_work); 4323 cancel_delayed_work_sync(&rt5645->rcclock_work); 4324
+2 -2
sound/soc/fsl/imx-pcm-rpmsg.c
··· 301 302 info->send_message(msg, info); 303 304 - del_timer(&info->stream_timer[substream->stream].timer); 305 306 rtd->dai_link->ignore_suspend = 0; 307 ··· 452 info->msg[RX_POINTER].r_msg.param.buffer_offset = 0; 453 } 454 455 - del_timer(&info->stream_timer[substream->stream].timer); 456 457 return imx_rpmsg_insert_workqueue(substream, msg, info); 458 }
··· 301 302 info->send_message(msg, info); 303 304 + timer_delete(&info->stream_timer[substream->stream].timer); 305 306 rtd->dai_link->ignore_suspend = 0; 307 ··· 452 info->msg[RX_POINTER].r_msg.param.buffer_offset = 0; 453 } 454 455 + timer_delete(&info->stream_timer[substream->stream].timer); 456 457 return imx_rpmsg_insert_workqueue(substream, msg, info); 458 }
+1 -1
sound/soc/ti/ams-delta.c
··· 303 struct snd_soc_component *component = tty->disc_data; 304 struct snd_soc_dapm_context *dapm; 305 306 - del_timer_sync(&cx81801_timer); 307 308 /* Prevent the hook switch from further changing the DAPM pins */ 309 INIT_LIST_HEAD(&ams_delta_hook_switch.pins);
··· 303 struct snd_soc_component *component = tty->disc_data; 304 struct snd_soc_dapm_context *dapm; 305 306 + timer_delete_sync(&cx81801_timer); 307 308 /* Prevent the hook switch from further changing the DAPM pins */ 309 INIT_LIST_HEAD(&ams_delta_hook_switch.pins);
+1 -1
sound/usb/midi.c
··· 1553 spin_unlock_irq(&umidi->disc_lock); 1554 up_write(&umidi->disc_rwsem); 1555 1556 - del_timer_sync(&umidi->error_timer); 1557 1558 for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { 1559 struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i];
··· 1553 spin_unlock_irq(&umidi->disc_lock); 1554 up_write(&umidi->disc_rwsem); 1555 1556 + timer_delete_sync(&umidi->error_timer); 1557 1558 for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { 1559 struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i];
+1 -1
tools/perf/tests/shell/trace_btf_enum.sh
··· 6 set -e 7 8 syscall="landlock_add_rule" 9 - non_syscall="timer:hrtimer_init,timer:hrtimer_start" 10 11 TESTPROG="perf test -w landlock" 12
··· 6 set -e 7 8 syscall="landlock_add_rule" 9 + non_syscall="timer:hrtimer_setup,timer:hrtimer_start" 10 11 TESTPROG="perf test -w landlock" 12