Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'timers-cleanups-2025-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer cleanups from Thomas Gleixner:
"Another set of timer API cleanups:

- Convert init_timer*(), try_to_del_timer_sync() and
destroy_timer_on_stack() over to the canonical timer_*()
namespace convention.

There is another large conversion pending, which has not been included
because it would have caused a gazillion of merge conflicts in next.
The conversion scripts will be run towards the end of the merge window
and a pull request sent once all conflict dependencies have been
merged"

* tag 'timers-cleanups-2025-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
treewide, timers: Rename destroy_timer_on_stack() as timer_destroy_on_stack()
treewide, timers: Rename try_to_del_timer_sync() as timer_delete_sync_try()
timers: Rename init_timers() as timers_init()
timers: Rename NEXT_TIMER_MAX_DELTA as TIMER_NEXT_MAX_DELTA
timers: Rename __init_timer_on_stack() as __timer_init_on_stack()
timers: Rename __init_timer() as __timer_init()
timers: Rename init_timer_on_stack_key() as timer_init_key_on_stack()
timers: Rename init_timer_key() as timer_init_key()

+78 -78
+4 -4
arch/powerpc/kvm/booke.c
··· 572 572 573 573 /* 574 574 * Return the number of jiffies until the next timeout. If the timeout is 575 - * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA 575 + * longer than the TIMER_NEXT_MAX_DELTA, then return TIMER_NEXT_MAX_DELTA 576 576 * because the larger value can break the timer APIs. 577 577 */ 578 578 static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu) ··· 598 598 if (do_div(nr_jiffies, tb_ticks_per_jiffy)) 599 599 nr_jiffies++; 600 600 601 - return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA); 601 + return min_t(unsigned long long, nr_jiffies, TIMER_NEXT_MAX_DELTA); 602 602 } 603 603 604 604 static void arm_next_watchdog(struct kvm_vcpu *vcpu) ··· 616 616 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags); 617 617 nr_jiffies = watchdog_next_timeout(vcpu); 618 618 /* 619 - * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA 619 + * If the number of jiffies of watchdog timer >= TIMER_NEXT_MAX_DELTA 620 620 * then do not run the watchdog timer as this can break timer APIs. 621 621 */ 622 - if (nr_jiffies < NEXT_TIMER_MAX_DELTA) 622 + if (nr_jiffies < TIMER_NEXT_MAX_DELTA) 623 623 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies); 624 624 else 625 625 timer_delete(&vcpu->arch.wdt_timer);
+1 -1
arch/x86/kernel/apic/vector.c
··· 864 864 __vector_cleanup(cl, false); 865 865 866 866 irq_matrix_offline(vector_matrix); 867 - WARN_ON_ONCE(try_to_del_timer_sync(&cl->timer) < 0); 867 + WARN_ON_ONCE(timer_delete_sync_try(&cl->timer) < 0); 868 868 WARN_ON_ONCE(!hlist_empty(&cl->head)); 869 869 870 870 unlock_vector_lock();
+1 -1
drivers/base/power/main.c
··· 560 560 struct timer_list *timer = &wd->timer; 561 561 562 562 timer_delete_sync(timer); 563 - destroy_timer_on_stack(timer); 563 + timer_destroy_on_stack(timer); 564 564 } 565 565 #else 566 566 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
+3 -3
drivers/char/random.c
··· 1312 1312 while (!crng_ready() && !signal_pending(current)) { 1313 1313 /* 1314 1314 * Check !timer_pending() and then ensure that any previous callback has finished 1315 - * executing by checking try_to_del_timer_sync(), before queueing the next one. 1315 + * executing by checking timer_delete_sync_try(), before queueing the next one. 1316 1316 */ 1317 - if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) { 1317 + if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) { 1318 1318 struct cpumask timer_cpus; 1319 1319 unsigned int num_cpus; 1320 1320 ··· 1354 1354 mix_pool_bytes(&stack->entropy, sizeof(stack->entropy)); 1355 1355 1356 1356 timer_delete_sync(&stack->timer); 1357 - destroy_timer_on_stack(&stack->timer); 1357 + timer_destroy_on_stack(&stack->timer); 1358 1358 } 1359 1359 1360 1360
+1 -1
drivers/dma-buf/st-dma-fence.c
··· 413 413 err = 0; 414 414 err_free: 415 415 timer_delete_sync(&wt.timer); 416 - destroy_timer_on_stack(&wt.timer); 416 + timer_destroy_on_stack(&wt.timer); 417 417 dma_fence_signal(wt.f); 418 418 dma_fence_put(wt.f); 419 419 return err;
+1 -1
drivers/firewire/core-transaction.c
··· 431 431 fw_send_request(card, &t, tcode, destination_id, generation, speed, 432 432 offset, payload, length, transaction_callback, &d); 433 433 wait_for_completion(&d.done); 434 - destroy_timer_on_stack(&t.split_timeout_timer); 434 + timer_destroy_on_stack(&t.split_timeout_timer); 435 435 436 436 return d.rcode; 437 437 }
+1 -1
drivers/firmware/psci/psci_checker.c
··· 343 343 * later. 344 344 */ 345 345 timer_delete(&wakeup_timer); 346 - destroy_timer_on_stack(&wakeup_timer); 346 + timer_destroy_on_stack(&wakeup_timer); 347 347 348 348 if (atomic_dec_return_relaxed(&nb_active_threads) == 0) 349 349 complete(&suspend_threads_done);
+1 -1
drivers/gpu/drm/gud/gud_pipe.c
··· 261 261 else if (ctx.sgr.bytes != len) 262 262 ret = -EIO; 263 263 264 - destroy_timer_on_stack(&ctx.timer); 264 + timer_destroy_on_stack(&ctx.timer); 265 265 266 266 return ret; 267 267 }
+1 -1
drivers/gpu/drm/i915/gt/selftest_migrate.c
··· 661 661 out_rq: 662 662 i915_request_add(rq); /* GEM_BUG_ON(rq->reserved_space > ring->space)? */ 663 663 timer_delete_sync(&st.timer); 664 - destroy_timer_on_stack(&st.timer); 664 + timer_destroy_on_stack(&st.timer); 665 665 out_unpin: 666 666 intel_context_unpin(ce); 667 667 out_put:
+1 -1
drivers/gpu/drm/i915/selftests/lib_sw_fence.c
··· 77 77 if (timer_delete_sync(&tf->timer)) 78 78 i915_sw_fence_commit(&tf->fence); 79 79 80 - destroy_timer_on_stack(&tf->timer); 80 + timer_destroy_on_stack(&tf->timer); 81 81 i915_sw_fence_fini(&tf->fence); 82 82 } 83 83
+1 -1
drivers/gpu/drm/ttm/tests/ttm_bo_test.c
··· 201 201 err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx); 202 202 203 203 timer_delete_sync(&s_timer.timer); 204 - destroy_timer_on_stack(&s_timer.timer); 204 + timer_destroy_on_stack(&s_timer.timer); 205 205 206 206 ww_acquire_fini(&ctx); 207 207
+1 -1
drivers/irqchip/irq-riscv-imsic-state.c
··· 564 564 struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); 565 565 566 566 raw_spin_lock_irqsave(&lpriv->lock, flags); 567 - WARN_ON_ONCE(try_to_del_timer_sync(&lpriv->timer) < 0); 567 + WARN_ON_ONCE(timer_delete_sync_try(&lpriv->timer) < 0); 568 568 raw_spin_unlock_irqrestore(&lpriv->lock, flags); 569 569 #endif 570 570 }
+1 -1
drivers/media/usb/pvrusb2/pvrusb2-hdw.c
··· 3806 3806 if ((status < 0) && (!probe_fl)) { 3807 3807 pvr2_hdw_render_useless(hdw); 3808 3808 } 3809 - destroy_timer_on_stack(&timer.timer); 3809 + timer_destroy_on_stack(&timer.timer); 3810 3810 3811 3811 return status; 3812 3812 }
+1 -1
drivers/scsi/megaraid/megaraid_mbox.c
··· 3952 3952 3953 3953 3954 3954 timer_delete_sync(&timeout.timer); 3955 - destroy_timer_on_stack(&timeout.timer); 3955 + timer_destroy_on_stack(&timeout.timer); 3956 3956 3957 3957 mutex_unlock(&raid_dev->sysfs_mtx); 3958 3958
+1 -1
drivers/scsi/megaraid/megaraid_mm.c
··· 704 704 wait_event(wait_q, (kioc->status != -ENODATA)); 705 705 if (timeout.timer.function) { 706 706 timer_delete_sync(&timeout.timer); 707 - destroy_timer_on_stack(&timeout.timer); 707 + timer_destroy_on_stack(&timeout.timer); 708 708 } 709 709 710 710 /*
+1 -1
drivers/staging/gpib/common/iblib.c
··· 611 611 static void remove_wait_timer(struct wait_info *winfo) 612 612 { 613 613 timer_delete_sync(&winfo->timer); 614 - destroy_timer_on_stack(&winfo->timer); 614 + timer_destroy_on_stack(&winfo->timer); 615 615 } 616 616 617 617 /*
+1 -1
drivers/usb/atm/cxacru.c
··· 598 598 mod_timer(&timer.timer, jiffies + msecs_to_jiffies(CMD_TIMEOUT)); 599 599 wait_for_completion(done); 600 600 timer_delete_sync(&timer.timer); 601 - destroy_timer_on_stack(&timer.timer); 601 + timer_destroy_on_stack(&timer.timer); 602 602 603 603 if (actual_length) 604 604 *actual_length = urb->actual_length;
+1 -1
drivers/usb/misc/usbtest.c
··· 630 630 retval = -ETIMEDOUT; 631 631 else 632 632 retval = req->status; 633 - destroy_timer_on_stack(&timeout.timer); 633 + timer_destroy_on_stack(&timeout.timer); 634 634 635 635 /* FIXME check resulting data pattern */ 636 636
+1 -1
fs/bcachefs/clock.c
··· 122 122 123 123 __set_current_state(TASK_RUNNING); 124 124 timer_delete_sync(&wait.cpu_timer); 125 - destroy_timer_on_stack(&wait.cpu_timer); 125 + timer_destroy_on_stack(&wait.cpu_timer); 126 126 bch2_io_timer_del(clock, &wait.io_timer); 127 127 } 128 128
+20 -20
include/linux/timer.h
··· 67 67 /* 68 68 * LOCKDEP and DEBUG timer interfaces. 69 69 */ 70 - void init_timer_key(struct timer_list *timer, 70 + void timer_init_key(struct timer_list *timer, 71 71 void (*func)(struct timer_list *), unsigned int flags, 72 72 const char *name, struct lock_class_key *key); 73 73 74 74 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS 75 - extern void init_timer_on_stack_key(struct timer_list *timer, 75 + extern void timer_init_key_on_stack(struct timer_list *timer, 76 76 void (*func)(struct timer_list *), 77 77 unsigned int flags, const char *name, 78 78 struct lock_class_key *key); 79 79 #else 80 - static inline void init_timer_on_stack_key(struct timer_list *timer, 80 + static inline void timer_init_key_on_stack(struct timer_list *timer, 81 81 void (*func)(struct timer_list *), 82 82 unsigned int flags, 83 83 const char *name, 84 84 struct lock_class_key *key) 85 85 { 86 - init_timer_key(timer, func, flags, name, key); 86 + timer_init_key(timer, func, flags, name, key); 87 87 } 88 88 #endif 89 89 90 90 #ifdef CONFIG_LOCKDEP 91 - #define __init_timer(_timer, _fn, _flags) \ 91 + #define __timer_init(_timer, _fn, _flags) \ 92 92 do { \ 93 93 static struct lock_class_key __key; \ 94 - init_timer_key((_timer), (_fn), (_flags), #_timer, &__key);\ 94 + timer_init_key((_timer), (_fn), (_flags), #_timer, &__key);\ 95 95 } while (0) 96 96 97 - #define __init_timer_on_stack(_timer, _fn, _flags) \ 97 + #define __timer_init_on_stack(_timer, _fn, _flags) \ 98 98 do { \ 99 99 static struct lock_class_key __key; \ 100 - init_timer_on_stack_key((_timer), (_fn), (_flags), \ 100 + timer_init_key_on_stack((_timer), (_fn), (_flags), \ 101 101 #_timer, &__key); \ 102 102 } while (0) 103 103 #else 104 - #define __init_timer(_timer, _fn, _flags) \ 105 - init_timer_key((_timer), (_fn), (_flags), NULL, NULL) 106 - #define __init_timer_on_stack(_timer, _fn, _flags) \ 107 - init_timer_on_stack_key((_timer), (_fn), (_flags), NULL, NULL) 104 + #define __timer_init(_timer, _fn, _flags) \ 105 + timer_init_key((_timer), (_fn), (_flags), NULL, NULL) 106 + #define __timer_init_on_stack(_timer, _fn, _flags) \ 107 + timer_init_key_on_stack((_timer), (_fn), (_flags), NULL, NULL) 108 108 #endif 109 109 110 110 /** ··· 115 115 * 116 116 * Regular timer initialization should use either DEFINE_TIMER() above, 117 117 * or timer_setup(). For timers on the stack, timer_setup_on_stack() must 118 - * be used and must be balanced with a call to destroy_timer_on_stack(). 118 + * be used and must be balanced with a call to timer_destroy_on_stack(). 119 119 */ 120 120 #define timer_setup(timer, callback, flags) \ 121 - __init_timer((timer), (callback), (flags)) 121 + __timer_init((timer), (callback), (flags)) 122 122 123 123 #define timer_setup_on_stack(timer, callback, flags) \ 124 - __init_timer_on_stack((timer), (callback), (flags)) 124 + __timer_init_on_stack((timer), (callback), (flags)) 125 125 126 126 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS 127 - extern void destroy_timer_on_stack(struct timer_list *timer); 127 + extern void timer_destroy_on_stack(struct timer_list *timer); 128 128 #else 129 - static inline void destroy_timer_on_stack(struct timer_list *timer) { } 129 + static inline void timer_destroy_on_stack(struct timer_list *timer) { } 130 130 #endif 131 131 132 132 #define from_timer(var, callback_timer, timer_fieldname) \ ··· 156 156 * The jiffies value which is added to now, when there is no timer 157 157 * in the timer wheel: 158 158 */ 159 - #define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1) 159 + #define TIMER_NEXT_MAX_DELTA ((1UL << 30) - 1) 160 160 161 161 extern void add_timer(struct timer_list *timer); 162 162 extern void add_timer_local(struct timer_list *timer); 163 163 extern void add_timer_global(struct timer_list *timer); 164 164 165 - extern int try_to_del_timer_sync(struct timer_list *timer); 165 + extern int timer_delete_sync_try(struct timer_list *timer); 166 166 extern int timer_delete_sync(struct timer_list *timer); 167 167 extern int timer_delete(struct timer_list *timer); 168 168 extern int timer_shutdown_sync(struct timer_list *timer); 169 169 extern int timer_shutdown(struct timer_list *timer); 170 170 171 - extern void init_timers(void); 171 + extern void timers_init(void); 172 172 struct hrtimer; 173 173 extern enum hrtimer_restart it_real_fn(struct hrtimer *); 174 174
+2 -2
include/linux/workqueue.h
··· 316 316 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ 317 317 do { \ 318 318 INIT_WORK(&(_work)->work, (_func)); \ 319 - __init_timer(&(_work)->timer, \ 319 + __timer_init(&(_work)->timer, \ 320 320 delayed_work_timer_fn, \ 321 321 (_tflags) | TIMER_IRQSAFE); \ 322 322 } while (0) ··· 324 324 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ 325 325 do { \ 326 326 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ 327 - __init_timer_on_stack(&(_work)->timer, \ 327 + __timer_init_on_stack(&(_work)->timer, \ 328 328 delayed_work_timer_fn, \ 329 329 (_tflags) | TIMER_IRQSAFE); \ 330 330 } while (0)
+1 -1
init/main.c
··· 1002 1002 init_IRQ(); 1003 1003 tick_init(); 1004 1004 rcu_init_nohz(); 1005 - init_timers(); 1005 + timers_init(); 1006 1006 srcu_init(); 1007 1007 hrtimers_init(); 1008 1008 softirq_init();
+1 -1
kernel/kcsan/kcsan_test.c
··· 1501 1501 } 1502 1502 } while (!torture_must_stop()); 1503 1503 timer_delete_sync(&timer); 1504 - destroy_timer_on_stack(&timer); 1504 + timer_destroy_on_stack(&timer); 1505 1505 1506 1506 torture_kthread_stopping("access_thread"); 1507 1507 return 0;
+1 -1
kernel/rcu/rcutorture.c
··· 2371 2371 } while (!torture_must_stop()); 2372 2372 if (irqreader && cur_ops->irq_capable) { 2373 2373 timer_delete_sync(&t); 2374 - destroy_timer_on_stack(&t); 2374 + timer_destroy_on_stack(&t); 2375 2375 } 2376 2376 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2377 2377 torture_kthread_stopping("rcu_torture_reader");
+1 -1
kernel/time/sleep_timeout.c
··· 100 100 timer_delete_sync(&timer.timer); 101 101 102 102 /* Remove the timer from the object tracker */ 103 - destroy_timer_on_stack(&timer.timer); 103 + timer_destroy_on_stack(&timer.timer); 104 104 105 105 timeout = expire - jiffies; 106 106
+18 -18
kernel/time/timer.c
··· 850 850 unsigned int flags, 851 851 const char *name, struct lock_class_key *key); 852 852 853 - void init_timer_on_stack_key(struct timer_list *timer, 853 + void timer_init_key_on_stack(struct timer_list *timer, 854 854 void (*func)(struct timer_list *), 855 855 unsigned int flags, 856 856 const char *name, struct lock_class_key *key) ··· 858 858 debug_object_init_on_stack(timer, &timer_debug_descr); 859 859 do_init_timer(timer, func, flags, name, key); 860 860 } 861 - EXPORT_SYMBOL_GPL(init_timer_on_stack_key); 861 + EXPORT_SYMBOL_GPL(timer_init_key_on_stack); 862 862 863 - void destroy_timer_on_stack(struct timer_list *timer) 863 + void timer_destroy_on_stack(struct timer_list *timer) 864 864 { 865 865 debug_object_free(timer, &timer_debug_descr); 866 866 } 867 - EXPORT_SYMBOL_GPL(destroy_timer_on_stack); 867 + EXPORT_SYMBOL_GPL(timer_destroy_on_stack); 868 868 869 869 #else 870 870 static inline void debug_timer_init(struct timer_list *timer) { } ··· 904 904 } 905 905 906 906 /** 907 - * init_timer_key - initialize a timer 907 + * timer_init_key - initialize a timer 908 908 * @timer: the timer to be initialized 909 909 * @func: timer callback function 910 910 * @flags: timer flags ··· 912 912 * @key: lockdep class key of the fake lock used for tracking timer 913 913 * sync lock dependencies 914 914 * 915 - * init_timer_key() must be done to a timer prior to calling *any* of the 915 + * timer_init_key() must be done to a timer prior to calling *any* of the 916 916 * other timer functions. 917 917 */ 918 - void init_timer_key(struct timer_list *timer, 918 + void timer_init_key(struct timer_list *timer, 919 919 void (*func)(struct timer_list *), unsigned int flags, 920 920 const char *name, struct lock_class_key *key) 921 921 { 922 922 debug_init(timer); 923 923 do_init_timer(timer, func, flags, name, key); 924 924 } 925 - EXPORT_SYMBOL(init_timer_key); 925 + EXPORT_SYMBOL(timer_init_key); 926 926 927 927 static inline void detach_timer(struct timer_list *timer, bool clear_pending) 928 928 { ··· 1511 1511 } 1512 1512 1513 1513 /** 1514 - * try_to_del_timer_sync - Try to deactivate a timer 1514 + * timer_delete_sync_try - Try to deactivate a timer 1515 1515 * @timer: Timer to deactivate 1516 1516 * 1517 1517 * This function tries to deactivate a timer. On success the timer is not ··· 1526 1526 * * %1 - The timer was pending and deactivated 1527 1527 * * %-1 - The timer callback function is running on a different CPU 1528 1528 */ 1529 - int try_to_del_timer_sync(struct timer_list *timer) 1529 + int timer_delete_sync_try(struct timer_list *timer) 1530 1530 { 1531 1531 return __try_to_del_timer_sync(timer, false); 1532 1532 } 1533 - EXPORT_SYMBOL(try_to_del_timer_sync); 1533 + EXPORT_SYMBOL(timer_delete_sync_try); 1534 1534 1535 1535 #ifdef CONFIG_PREEMPT_RT 1536 1536 static __init void timer_base_init_expiry_lock(struct timer_base *base) ··· 1900 1900 unsigned long clk, next, adj; 1901 1901 unsigned lvl, offset = 0; 1902 1902 1903 - next = base->clk + NEXT_TIMER_MAX_DELTA; 1903 + next = base->clk + TIMER_NEXT_MAX_DELTA; 1904 1904 clk = base->clk; 1905 1905 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) { 1906 1906 int pos = next_pending_bucket(base, offset, clk & LVL_MASK); ··· 1963 1963 1964 1964 WRITE_ONCE(base->next_expiry, next); 1965 1965 base->next_expiry_recalc = false; 1966 - base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA); 1966 + base->timers_pending = !(next == base->clk + TIMER_NEXT_MAX_DELTA); 1967 1967 } 1968 1968 1969 1969 #ifdef CONFIG_NO_HZ_COMMON ··· 2015 2015 * easy comparable to find out which base holds the first pending timer. 2016 2016 */ 2017 2017 if (!base->timers_pending) 2018 - WRITE_ONCE(base->next_expiry, basej + NEXT_TIMER_MAX_DELTA); 2018 + WRITE_ONCE(base->next_expiry, basej + TIMER_NEXT_MAX_DELTA); 2019 2019 2020 2020 return base->next_expiry; 2021 2021 } ··· 2399 2399 * timer at this clk are that all matching timers have been 2400 2400 * dequeued or no timer has been queued since 2401 2401 * base::next_expiry was set to base::clk + 2402 - * NEXT_TIMER_MAX_DELTA. 2402 + * TIMER_NEXT_MAX_DELTA. 2403 2403 */ 2404 2404 WARN_ON_ONCE(!levels && !base->next_expiry_recalc 2405 2405 && base->timers_pending); ··· 2544 2544 for (b = 0; b < NR_BASES; b++) { 2545 2545 base = per_cpu_ptr(&timer_bases[b], cpu); 2546 2546 base->clk = jiffies; 2547 - base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; 2547 + base->next_expiry = base->clk + TIMER_NEXT_MAX_DELTA; 2548 2548 base->next_expiry_recalc = false; 2549 2549 base->timers_pending = false; 2550 2550 base->is_idle = false; ··· 2599 2599 base->cpu = cpu; 2600 2600 raw_spin_lock_init(&base->lock); 2601 2601 base->clk = jiffies; 2602 - base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; 2602 + base->next_expiry = base->clk + TIMER_NEXT_MAX_DELTA; 2603 2603 timer_base_init_expiry_lock(base); 2604 2604 } 2605 2605 } ··· 2612 2612 init_timer_cpu(cpu); 2613 2613 } 2614 2614 2615 - void __init init_timers(void) 2615 + void __init timers_init(void) 2616 2616 { 2617 2617 init_timer_cpus(); 2618 2618 posix_cputimers_init_work();
+1 -1
kernel/workqueue.c
··· 686 686 687 687 void destroy_delayed_work_on_stack(struct delayed_work *work) 688 688 { 689 - destroy_timer_on_stack(&work->timer); 689 + timer_destroy_on_stack(&work->timer); 690 690 debug_object_free(&work->work, &work_debug_descr); 691 691 } 692 692 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
+8 -8
net/bridge/br_multicast.c
··· 2061 2061 { 2062 2062 query->startup_sent = 0; 2063 2063 2064 - if (try_to_del_timer_sync(&query->timer) >= 0 || 2064 + if (timer_delete_sync_try(&query->timer) >= 0 || 2065 2065 timer_delete(&query->timer)) 2066 2066 mod_timer(&query->timer, jiffies); 2067 2067 } ··· 3480 3480 if (mp->host_joined && 3481 3481 (timer_pending(&mp->timer) ? 3482 3482 time_after(mp->timer.expires, now + max_delay) : 3483 - try_to_del_timer_sync(&mp->timer) >= 0)) 3483 + timer_delete_sync_try(&mp->timer) >= 0)) 3484 3484 mod_timer(&mp->timer, now + max_delay); 3485 3485 3486 3486 for (pp = &mp->ports; ··· 3488 3488 pp = &p->next) { 3489 3489 if (timer_pending(&p->timer) ? 3490 3490 time_after(p->timer.expires, now + max_delay) : 3491 - try_to_del_timer_sync(&p->timer) >= 0 && 3491 + timer_delete_sync_try(&p->timer) >= 0 && 3492 3492 (brmctx->multicast_igmp_version == 2 || 3493 3493 p->filter_mode == MCAST_EXCLUDE)) 3494 3494 mod_timer(&p->timer, now + max_delay); ··· 3569 3569 if (mp->host_joined && 3570 3570 (timer_pending(&mp->timer) ? 3571 3571 time_after(mp->timer.expires, now + max_delay) : 3572 - try_to_del_timer_sync(&mp->timer) >= 0)) 3572 + timer_delete_sync_try(&mp->timer) >= 0)) 3573 3573 mod_timer(&mp->timer, now + max_delay); 3574 3574 3575 3575 for (pp = &mp->ports; ··· 3577 3577 pp = &p->next) { 3578 3578 if (timer_pending(&p->timer) ? 3579 3579 time_after(p->timer.expires, now + max_delay) : 3580 - try_to_del_timer_sync(&p->timer) >= 0 && 3580 + timer_delete_sync_try(&p->timer) >= 0 && 3581 3581 (brmctx->multicast_mld_version == 1 || 3582 3582 p->filter_mode == MCAST_EXCLUDE)) 3583 3583 mod_timer(&p->timer, now + max_delay); ··· 3649 3649 if (!hlist_unhashed(&p->mglist) && 3650 3650 (timer_pending(&p->timer) ? 3651 3651 time_after(p->timer.expires, time) : 3652 - try_to_del_timer_sync(&p->timer) >= 0)) { 3652 + timer_delete_sync_try(&p->timer) >= 0)) { 3653 3653 mod_timer(&p->timer, time); 3654 3654 } 3655 3655 ··· 3665 3665 if (mp->host_joined && 3666 3666 (timer_pending(&mp->timer) ? 3667 3667 time_after(mp->timer.expires, time) : 3668 - try_to_del_timer_sync(&mp->timer) >= 0)) { 3668 + timer_delete_sync_try(&mp->timer) >= 0)) { 3669 3669 mod_timer(&mp->timer, time); 3670 3670 } 3671 3671 ··· 3681 3681 if (!hlist_unhashed(&p->mglist) && 3682 3682 (timer_pending(&p->timer) ? 3683 3683 time_after(p->timer.expires, time) : 3684 - try_to_del_timer_sync(&p->timer) >= 0)) { 3684 + timer_delete_sync_try(&p->timer) >= 0)) { 3685 3685 mod_timer(&p->timer, time); 3686 3686 } 3687 3687
+1 -1
sound/pci/ctxfi/cttimer.c
··· 119 119 static void ct_systimer_prepare(struct ct_timer_instance *ti) 120 120 { 121 121 ct_systimer_stop(ti); 122 - try_to_del_timer_sync(&ti->timer); 122 + timer_delete_sync_try(&ti->timer); 123 123 } 124 124 125 125 #define ct_systimer_free ct_systimer_prepare