Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer updates from Thomas Gleixner:
"Yet another big pile of changes:

- More year 2038 work from Arnd slowly reaching the point where we
need to think about the syscalls themself.

- A new timer function which allows to conditionally (re)arm a timer
only when it's either not running or the new expiry time is sooner
than the armed expiry time. This allows to use a single timer for
multiple timeout requirements w/o caring about the first expiry
time at the call site.

- A new NMI safe accessor to clock real time for the printk timestamp
work. Can be used by tracing, perf as well if required.

- A large number of timer setup conversions from Kees which got
collected here because either maintainers requested so or they
simply got ignored. As Kees pointed out already there are a few
trivial merge conflicts and some redundant commits which was
unavoidable due to the size of this conversion effort.

- Avoid a redundant iteration in the timer wheel softirq processing.

- Provide a mechanism to treat RTC implementations depending on their
hardware properties, i.e. don't inflict the write at the 0.5
seconds boundary which originates from the PC CMOS RTC to all RTCs.
No functional change as drivers need to be updated separately.

- The usual small updates to core code clocksource drivers. Nothing
really exciting"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (111 commits)
timers: Add a function to start/reduce a timer
pstore: Use ktime_get_real_fast_ns() instead of __getnstimeofday()
timer: Prepare to change all DEFINE_TIMER() callbacks
netfilter: ipvs: Convert timers to use timer_setup()
scsi: qla2xxx: Convert timers to use timer_setup()
block/aoe: discover_timer: Convert timers to use timer_setup()
ide: Convert timers to use timer_setup()
drbd: Convert timers to use timer_setup()
mailbox: Convert timers to use timer_setup()
crypto: Convert timers to use timer_setup()
drivers/pcmcia: omap1: Fix error in automated timer conversion
ARM: footbridge: Fix typo in timer conversion
drivers/sgi-xp: Convert timers to use timer_setup()
drivers/pcmcia: Convert timers to use timer_setup()
drivers/memstick: Convert timers to use timer_setup()
drivers/macintosh: Convert timers to use timer_setup()
hwrng/xgene-rng: Convert timers to use timer_setup()
auxdisplay: Convert timers to use timer_setup()
sparc/led: Convert timers to use timer_setup()
mips: ip22/32: Convert timers to use timer_setup()
...

+1814 -1868
+12 -12
Documentation/devicetree/bindings/timer/renesas,cmt.txt
··· 20 20 (CMT1 on sh73a0 and r8a7740) 21 21 This is a fallback for the above renesas,cmt-48-* entries. 22 22 23 - - "renesas,cmt0-r8a73a4" for the 32-bit CMT0 device included in r8a73a4. 24 - - "renesas,cmt1-r8a73a4" for the 48-bit CMT1 device included in r8a73a4. 25 - - "renesas,cmt0-r8a7790" for the 32-bit CMT0 device included in r8a7790. 26 - - "renesas,cmt1-r8a7790" for the 48-bit CMT1 device included in r8a7790. 27 - - "renesas,cmt0-r8a7791" for the 32-bit CMT0 device included in r8a7791. 28 - - "renesas,cmt1-r8a7791" for the 48-bit CMT1 device included in r8a7791. 29 - - "renesas,cmt0-r8a7793" for the 32-bit CMT0 device included in r8a7793. 30 - - "renesas,cmt1-r8a7793" for the 48-bit CMT1 device included in r8a7793. 31 - - "renesas,cmt0-r8a7794" for the 32-bit CMT0 device included in r8a7794. 32 - - "renesas,cmt1-r8a7794" for the 48-bit CMT1 device included in r8a7794. 23 + - "renesas,r8a73a4-cmt0" for the 32-bit CMT0 device included in r8a73a4. 24 + - "renesas,r8a73a4-cmt1" for the 48-bit CMT1 device included in r8a73a4. 25 + - "renesas,r8a7790-cmt0" for the 32-bit CMT0 device included in r8a7790. 26 + - "renesas,r8a7790-cmt1" for the 48-bit CMT1 device included in r8a7790. 27 + - "renesas,r8a7791-cmt0" for the 32-bit CMT0 device included in r8a7791. 28 + - "renesas,r8a7791-cmt1" for the 48-bit CMT1 device included in r8a7791. 29 + - "renesas,r8a7793-cmt0" for the 32-bit CMT0 device included in r8a7793. 30 + - "renesas,r8a7793-cmt1" for the 48-bit CMT1 device included in r8a7793. 31 + - "renesas,r8a7794-cmt0" for the 32-bit CMT0 device included in r8a7794. 32 + - "renesas,r8a7794-cmt1" for the 48-bit CMT1 device included in r8a7794. 33 33 34 34 - "renesas,rcar-gen2-cmt0" for 32-bit CMT0 devices included in R-Car Gen2. 35 35 - "renesas,rcar-gen2-cmt1" for 48-bit CMT1 devices included in R-Car Gen2. ··· 46 46 Example: R8A7790 (R-Car H2) CMT0 and CMT1 nodes 47 47 48 48 cmt0: timer@ffca0000 { 49 - compatible = "renesas,cmt0-r8a7790", "renesas,rcar-gen2-cmt0"; 49 + compatible = "renesas,r8a7790-cmt0", "renesas,rcar-gen2-cmt0"; 50 50 reg = <0 0xffca0000 0 0x1004>; 51 51 interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>, 52 52 <0 142 IRQ_TYPE_LEVEL_HIGH>; ··· 55 55 }; 56 56 57 57 cmt1: timer@e6130000 { 58 - compatible = "renesas,cmt1-r8a7790", "renesas,rcar-gen2-cmt1"; 58 + compatible = "renesas,r8a7790-cmt1", "renesas,rcar-gen2-cmt1"; 59 59 reg = <0 0xe6130000 0 0x1004>; 60 60 interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>, 61 61 <0 121 IRQ_TYPE_LEVEL_HIGH>,
+2 -1
MAINTAINERS
··· 3444 3444 L: linux-kernel@vger.kernel.org 3445 3445 T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core 3446 3446 S: Supported 3447 - F: drivers/clocksource 3447 + F: drivers/clocksource/ 3448 + F: Documentation/devicetree/bindings/timer/ 3448 3449 3449 3450 CMPC ACPI DRIVER 3450 3451 M: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
+8 -18
arch/arm/mach-footbridge/dc21285.c
··· 136 136 static struct timer_list serr_timer; 137 137 static struct timer_list perr_timer; 138 138 139 - static void dc21285_enable_error(unsigned long __data) 139 + static void dc21285_enable_error(struct timer_list *timer) 140 140 { 141 - switch (__data) { 142 - case IRQ_PCI_SERR: 143 - del_timer(&serr_timer); 144 - break; 141 + del_timer(timer); 145 142 146 - case IRQ_PCI_PERR: 147 - del_timer(&perr_timer); 148 - break; 149 - } 150 - 151 - enable_irq(__data); 143 + if (timer == &serr_timer) 144 + enable_irq(IRQ_PCI_SERR); 145 + else if (timer == &perr_timer) 146 + enable_irq(IRQ_PCI_PERR); 152 147 } 153 148 154 149 /* ··· 318 323 *CSR_PCICMD = (*CSR_PCICMD & 0xffff) | PCICMD_ERROR_BITS; 319 324 } 320 325 321 - init_timer(&serr_timer); 322 - init_timer(&perr_timer); 323 - 324 - serr_timer.data = IRQ_PCI_SERR; 325 - serr_timer.function = dc21285_enable_error; 326 - perr_timer.data = IRQ_PCI_PERR; 327 - perr_timer.function = dc21285_enable_error; 326 + timer_setup(&serr_timer, dc21285_enable_error, 0); 327 + timer_setup(&perr_timer, dc21285_enable_error, 0); 328 328 329 329 /* 330 330 * We don't care if these fail.
+1 -1
arch/arm/mach-ixp4xx/dsmg600-setup.c
··· 175 175 #define PBUTTON_HOLDDOWN_COUNT 4 /* 2 secs */ 176 176 177 177 static void dsmg600_power_handler(unsigned long data); 178 - static DEFINE_TIMER(dsmg600_power_timer, dsmg600_power_handler, 0, 0); 178 + static DEFINE_TIMER(dsmg600_power_timer, dsmg600_power_handler); 179 179 180 180 static void dsmg600_power_handler(unsigned long data) 181 181 {
+1 -1
arch/arm/mach-ixp4xx/nas100d-setup.c
··· 198 198 #define PBUTTON_HOLDDOWN_COUNT 4 /* 2 secs */ 199 199 200 200 static void nas100d_power_handler(unsigned long data); 201 - static DEFINE_TIMER(nas100d_power_timer, nas100d_power_handler, 0, 0); 201 + static DEFINE_TIMER(nas100d_power_timer, nas100d_power_handler); 202 202 203 203 static void nas100d_power_handler(unsigned long data) 204 204 {
+6 -9
arch/arm/mach-pxa/lubbock.c
··· 381 381 382 382 #define MMC_POLL_RATE msecs_to_jiffies(1000) 383 383 384 - static void lubbock_mmc_poll(unsigned long); 385 384 static irq_handler_t mmc_detect_int; 385 + static void *mmc_detect_int_data; 386 + static struct timer_list mmc_timer; 386 387 387 - static struct timer_list mmc_timer = { 388 - .function = lubbock_mmc_poll, 389 - }; 390 - 391 - static void lubbock_mmc_poll(unsigned long data) 388 + static void lubbock_mmc_poll(struct timer_list *unused) 392 389 { 393 390 unsigned long flags; 394 391 ··· 398 401 if (LUB_IRQ_SET_CLR & (1 << 0)) 399 402 mod_timer(&mmc_timer, jiffies + MMC_POLL_RATE); 400 403 else { 401 - (void) mmc_detect_int(LUBBOCK_SD_IRQ, (void *)data); 404 + (void) mmc_detect_int(LUBBOCK_SD_IRQ, mmc_detect_int_data); 402 405 enable_irq(LUBBOCK_SD_IRQ); 403 406 } 404 407 } ··· 418 421 { 419 422 /* detect card insert/eject */ 420 423 mmc_detect_int = detect_int; 421 - init_timer(&mmc_timer); 422 - mmc_timer.data = (unsigned long) data; 424 + mmc_detect_int_data = data; 425 + timer_setup(&mmc_timer, lubbock_mmc_poll, 0); 423 426 return request_irq(LUBBOCK_SD_IRQ, lubbock_detect_int, 424 427 0, "lubbock-sd-detect", data); 425 428 }
+4 -4
arch/arm/mach-pxa/sharpsl_pm.c
··· 341 341 sharpsl_pm.charge_start_time = jiffies; 342 342 } 343 343 344 - static void sharpsl_ac_timer(unsigned long data) 344 + static void sharpsl_ac_timer(struct timer_list *unused) 345 345 { 346 346 int acin = sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN); 347 347 ··· 366 366 return IRQ_HANDLED; 367 367 } 368 368 369 - static void sharpsl_chrg_full_timer(unsigned long data) 369 + static void sharpsl_chrg_full_timer(struct timer_list *unused) 370 370 { 371 371 dev_dbg(sharpsl_pm.dev, "Charge Full at time: %lx\n", jiffies); 372 372 ··· 841 841 sharpsl_pm.charge_mode = CHRG_OFF; 842 842 sharpsl_pm.flags = 0; 843 843 844 - setup_timer(&sharpsl_pm.ac_timer, sharpsl_ac_timer, 0UL); 844 + timer_setup(&sharpsl_pm.ac_timer, sharpsl_ac_timer, 0); 845 845 846 - setup_timer(&sharpsl_pm.chrg_full_timer, sharpsl_chrg_full_timer, 0UL); 846 + timer_setup(&sharpsl_pm.chrg_full_timer, sharpsl_chrg_full_timer, 0); 847 847 848 848 led_trigger_register_simple("sharpsl-charge", &sharpsl_charge_led_trigger); 849 849
+3 -1
arch/ia64/include/asm/sn/bte.h
··· 17 17 #include <asm/sn/types.h> 18 18 #include <asm/sn/shub_mmr.h> 19 19 20 + struct nodepda_s; 21 + 20 22 #define IBCT_NOTIFY (0x1UL << 4) 21 23 #define IBCT_ZFIL_MODE (0x1UL << 0) 22 24 ··· 212 210 */ 213 211 extern bte_result_t bte_copy(u64, u64, u64, u64, void *); 214 212 extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64); 215 - extern void bte_error_handler(unsigned long); 213 + extern void bte_error_handler(struct nodepda_s *); 216 214 217 215 #define bte_zero(dest, len, mode, notification) \ 218 216 bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification)
+4 -4
arch/ia64/kernel/mca.c
··· 1513 1513 * 1514 1514 */ 1515 1515 static void 1516 - ia64_mca_cmc_poll (unsigned long dummy) 1516 + ia64_mca_cmc_poll (struct timer_list *unused) 1517 1517 { 1518 1518 /* Trigger a CMC interrupt cascade */ 1519 1519 platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR, ··· 1590 1590 * 1591 1591 */ 1592 1592 static void 1593 - ia64_mca_cpe_poll (unsigned long dummy) 1593 + ia64_mca_cpe_poll (struct timer_list *unused) 1594 1594 { 1595 1595 /* Trigger a CPE interrupt cascade */ 1596 1596 platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR, ··· 2098 2098 return 0; 2099 2099 2100 2100 /* Setup the CMCI/P vector and handler */ 2101 - setup_timer(&cmc_poll_timer, ia64_mca_cmc_poll, 0UL); 2101 + timer_setup(&cmc_poll_timer, ia64_mca_cmc_poll, 0); 2102 2102 2103 2103 /* Unmask/enable the vector */ 2104 2104 cmc_polling_enabled = 0; ··· 2109 2109 #ifdef CONFIG_ACPI 2110 2110 /* Setup the CPEI/P vector and handler */ 2111 2111 cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI); 2112 - setup_timer(&cpe_poll_timer, ia64_mca_cpe_poll, 0UL); 2112 + timer_setup(&cpe_poll_timer, ia64_mca_cpe_poll, 0); 2113 2113 2114 2114 { 2115 2115 unsigned int irq;
+2 -3
arch/ia64/kernel/salinfo.c
··· 263 263 } 264 264 265 265 static void 266 - salinfo_timeout (unsigned long arg) 266 + salinfo_timeout(struct timer_list *unused) 267 267 { 268 268 ia64_mlogbuf_dump(); 269 269 salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA); ··· 623 623 624 624 *sdir++ = salinfo_dir; 625 625 626 - init_timer(&salinfo_timer); 626 + timer_setup(&salinfo_timer, salinfo_timeout, 0); 627 627 salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY; 628 - salinfo_timer.function = &salinfo_timeout; 629 628 add_timer(&salinfo_timer); 630 629 631 630 i = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/salinfo:online",
+8 -4
arch/ia64/sn/kernel/bte.c
··· 219 219 BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na) ); 220 220 bte->bte_error_count++; 221 221 bte->bh_error = IBLS_ERROR; 222 - bte_error_handler((unsigned long)NODEPDA(bte->bte_cnode)); 222 + bte_error_handler(NODEPDA(bte->bte_cnode)); 223 223 *bte->most_rcnt_na = BTE_WORD_AVAILABLE; 224 224 goto retry_bteop; 225 225 } ··· 414 414 * Block Transfer Engine initialization functions. 415 415 * 416 416 ***********************************************************************/ 417 + static void bte_recovery_timeout(struct timer_list *t) 418 + { 419 + struct nodepda_s *nodepda = from_timer(nodepda, t, bte_recovery_timer); 420 + 421 + bte_error_handler(nodepda); 422 + } 417 423 418 424 /* 419 425 * bte_init_node(nodepda, cnode) ··· 442 436 * will point at this one bte_recover structure to get the lock. 443 437 */ 444 438 spin_lock_init(&mynodepda->bte_recovery_lock); 445 - init_timer(&mynodepda->bte_recovery_timer); 446 - mynodepda->bte_recovery_timer.function = bte_error_handler; 447 - mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda; 439 + timer_setup(&mynodepda->bte_recovery_timer, bte_recovery_timeout, 0); 448 440 449 441 for (i = 0; i < BTES_PER_NODE; i++) { 450 442 u64 *base_addr;
+6 -11
arch/ia64/sn/kernel/bte_error.c
··· 27 27 * transfers to be queued. 28 28 */ 29 29 30 - void bte_error_handler(unsigned long); 31 - 32 30 /* 33 31 * Wait until all BTE related CRBs are completed 34 32 * and then reset the interfaces. 35 33 */ 36 - int shub1_bte_error_handler(unsigned long _nodepda) 34 + static int shub1_bte_error_handler(struct nodepda_s *err_nodepda) 37 35 { 38 - struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; 39 36 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; 40 37 nasid_t nasid; 41 38 int i; ··· 128 131 * Wait until all BTE related CRBs are completed 129 132 * and then reset the interfaces. 130 133 */ 131 - int shub2_bte_error_handler(unsigned long _nodepda) 134 + static int shub2_bte_error_handler(struct nodepda_s *err_nodepda) 132 135 { 133 - struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; 134 136 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; 135 137 struct bteinfo_s *bte; 136 138 nasid_t nasid; ··· 166 170 * Wait until all BTE related CRBs are completed 167 171 * and then reset the interfaces. 168 172 */ 169 - void bte_error_handler(unsigned long _nodepda) 173 + void bte_error_handler(struct nodepda_s *err_nodepda) 170 174 { 171 - struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; 172 175 spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock; 173 176 int i; 174 177 unsigned long irq_flags; ··· 194 199 } 195 200 196 201 if (is_shub1()) { 197 - if (shub1_bte_error_handler(_nodepda)) { 202 + if (shub1_bte_error_handler(err_nodepda)) { 198 203 spin_unlock_irqrestore(recovery_lock, irq_flags); 199 204 return; 200 205 } 201 206 } else { 202 - if (shub2_bte_error_handler(_nodepda)) { 207 + if (shub2_bte_error_handler(err_nodepda)) { 203 208 spin_unlock_irqrestore(recovery_lock, irq_flags); 204 209 return; 205 210 } ··· 250 255 251 256 BTE_PRINTK(("Got an error on cnode %d bte %d: HW error type 0x%x\n", 252 257 bte->bte_cnode, bte->bte_num, ioe->ie_errortype)); 253 - bte_error_handler((unsigned long) NODEPDA(cnode)); 258 + bte_error_handler(NODEPDA(cnode)); 254 259 } 255 260
+1 -1
arch/ia64/sn/kernel/huberror.c
··· 50 50 if ((int)ret_stuff.v0) 51 51 panic("%s: Fatal TIO Error", __func__); 52 52 } else 53 - bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); 53 + bte_error_handler(NODEPDA(nasid_to_cnodeid(nasid))); 54 54 55 55 return IRQ_HANDLED; 56 56 }
+2 -3
arch/ia64/sn/kernel/mca.c
··· 72 72 ia64_sn_plat_cpei_handler(); 73 73 } 74 74 75 - static void sn_cpei_timer_handler(unsigned long dummy) 75 + static void sn_cpei_timer_handler(struct timer_list *unused) 76 76 { 77 77 sn_cpei_handler(-1, NULL, NULL); 78 78 mod_timer(&sn_cpei_timer, jiffies + CPEI_INTERVAL); ··· 80 80 81 81 void sn_init_cpei_timer(void) 82 82 { 83 - init_timer(&sn_cpei_timer); 83 + timer_setup(&sn_cpei_timer, sn_cpei_timer_handler, 0); 84 84 sn_cpei_timer.expires = jiffies + CPEI_INTERVAL; 85 - sn_cpei_timer.function = sn_cpei_timer_handler; 86 85 add_timer(&sn_cpei_timer); 87 86 } 88 87
+1 -1
arch/m68k/amiga/amisound.c
··· 66 66 } 67 67 68 68 static void nosound( unsigned long ignored ); 69 - static DEFINE_TIMER(sound_timer, nosound, 0, 0); 69 + static DEFINE_TIMER(sound_timer, nosound); 70 70 71 71 void amiga_mksound( unsigned int hz, unsigned int ticks ) 72 72 {
+1 -1
arch/m68k/mac/macboing.c
··· 57 57 /* 58 58 * our timer to start/continue/stop the bell 59 59 */ 60 - static DEFINE_TIMER(mac_sound_timer, mac_nosound, 0, 0); 60 + static DEFINE_TIMER(mac_sound_timer, mac_nosound); 61 61 62 62 /* 63 63 * Sort of initialize the sound chip (called from mac_mksound on the first
+3 -3
arch/mips/mti-malta/malta-display.c
··· 36 36 } 37 37 } 38 38 39 - static void scroll_display_message(unsigned long data); 40 - static DEFINE_TIMER(mips_scroll_timer, scroll_display_message, HZ, 0); 39 + static void scroll_display_message(unsigned long unused); 40 + static DEFINE_TIMER(mips_scroll_timer, scroll_display_message); 41 41 42 - static void scroll_display_message(unsigned long data) 42 + static void scroll_display_message(unsigned long unused) 43 43 { 44 44 mips_display_message(&display_string[display_count++]); 45 45 if (display_count == max_display_count)
+12 -14
arch/mips/sgi-ip22/ip22-reset.c
··· 38 38 #define PANIC_FREQ (HZ / 8) 39 39 40 40 static struct timer_list power_timer, blink_timer, debounce_timer; 41 + static unsigned long blink_timer_timeout; 41 42 42 43 #define MACHINE_PANICED 1 43 44 #define MACHINE_SHUTTING_DOWN 2 ··· 82 81 ArcEnterInteractiveMode(); 83 82 } 84 83 85 - static void power_timeout(unsigned long data) 84 + static void power_timeout(struct timer_list *unused) 86 85 { 87 86 sgi_machine_power_off(); 88 87 } 89 88 90 - static void blink_timeout(unsigned long data) 89 + static void blink_timeout(struct timer_list *unused) 91 90 { 92 91 /* XXX fix this for fullhouse */ 93 92 sgi_ioc_reset ^= (SGIOC_RESET_LC0OFF|SGIOC_RESET_LC1OFF); 94 93 sgioc->reset = sgi_ioc_reset; 95 94 96 - mod_timer(&blink_timer, jiffies + data); 95 + mod_timer(&blink_timer, jiffies + blink_timer_timeout); 97 96 } 98 97 99 - static void debounce(unsigned long data) 98 + static void debounce(struct timer_list *unused) 100 99 { 101 100 del_timer(&debounce_timer); 102 101 if (sgint->istat1 & SGINT_ISTAT1_PWR) { ··· 129 128 } 130 129 131 130 machine_state |= MACHINE_SHUTTING_DOWN; 132 - blink_timer.data = POWERDOWN_FREQ; 133 - blink_timeout(POWERDOWN_FREQ); 131 + blink_timer_timeout = POWERDOWN_FREQ; 132 + blink_timeout(&blink_timer); 134 133 135 - init_timer(&power_timer); 136 - power_timer.function = power_timeout; 134 + timer_setup(&power_timer, power_timeout, 0); 137 135 power_timer.expires = jiffies + POWERDOWN_TIMEOUT * HZ; 138 136 add_timer(&power_timer); 139 137 } ··· 147 147 if (sgint->istat1 & SGINT_ISTAT1_PWR) { 148 148 /* Wait until interrupt goes away */ 149 149 disable_irq_nosync(SGI_PANEL_IRQ); 150 - init_timer(&debounce_timer); 151 - debounce_timer.function = debounce; 150 + timer_setup(&debounce_timer, debounce, 0); 152 151 debounce_timer.expires = jiffies + 5; 153 152 add_timer(&debounce_timer); 154 153 } ··· 170 171 return NOTIFY_DONE; 171 172 machine_state |= MACHINE_PANICED; 172 173 173 - blink_timer.data = PANIC_FREQ; 174 - blink_timeout(PANIC_FREQ); 174 + blink_timer_timeout = PANIC_FREQ; 175 + blink_timeout(&blink_timer); 175 176 176 177 return NOTIFY_DONE; 177 178 } ··· 194 195 return res; 195 196 } 196 197 197 - init_timer(&blink_timer); 198 - blink_timer.function = blink_timeout; 198 + timer_setup(&blink_timer, blink_timeout, 0); 199 199 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 200 200 201 201 return 0;
+10 -11
arch/mips/sgi-ip32/ip32-reset.c
··· 38 38 extern struct platform_device ip32_rtc_device; 39 39 40 40 static struct timer_list power_timer, blink_timer; 41 + static unsigned long blink_timer_timeout; 41 42 static int has_panicked, shutting_down; 42 43 43 44 static __noreturn void ip32_poweroff(void *data) ··· 72 71 unreachable(); 73 72 } 74 73 75 - static void blink_timeout(unsigned long data) 74 + static void blink_timeout(struct timer_list *unused) 76 75 { 77 76 unsigned long led = mace->perif.ctrl.misc ^ MACEISA_LED_RED; 78 77 mace->perif.ctrl.misc = led; 79 - mod_timer(&blink_timer, jiffies + data); 78 + mod_timer(&blink_timer, jiffies + blink_timer_timeout); 80 79 } 81 80 82 81 static void ip32_machine_halt(void) ··· 84 83 ip32_poweroff(&ip32_rtc_device); 85 84 } 86 85 87 - static void power_timeout(unsigned long data) 86 + static void power_timeout(struct timer_list *unused) 88 87 { 89 88 ip32_poweroff(&ip32_rtc_device); 90 89 } ··· 100 99 } 101 100 102 101 shutting_down = 1; 103 - blink_timer.data = POWERDOWN_FREQ; 104 - blink_timeout(POWERDOWN_FREQ); 102 + blink_timer_timeout = POWERDOWN_FREQ; 103 + blink_timeout(&blink_timer); 105 104 106 - init_timer(&power_timer); 107 - power_timer.function = power_timeout; 105 + timer_setup(&power_timer, power_timeout, 0); 108 106 power_timer.expires = jiffies + POWERDOWN_TIMEOUT * HZ; 109 107 add_timer(&power_timer); 110 108 } ··· 121 121 led = mace->perif.ctrl.misc | MACEISA_LED_GREEN; 122 122 mace->perif.ctrl.misc = led; 123 123 124 - blink_timer.data = PANIC_FREQ; 125 - blink_timeout(PANIC_FREQ); 124 + blink_timer_timeout = PANIC_FREQ; 125 + blink_timeout(&blink_timer); 126 126 127 127 return NOTIFY_DONE; 128 128 } ··· 143 143 _machine_halt = ip32_machine_halt; 144 144 pm_power_off = ip32_machine_halt; 145 145 146 - init_timer(&blink_timer); 147 - blink_timer.function = blink_timeout; 146 + timer_setup(&blink_timer, blink_timeout, 0); 148 147 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 149 148 150 149 return 0;
+1 -1
arch/parisc/kernel/pdc_cons.c
··· 92 92 #define PDC_CONS_POLL_DELAY (30 * HZ / 1000) 93 93 94 94 static void pdc_console_poll(unsigned long unused); 95 - static DEFINE_TIMER(pdc_console_timer, pdc_console_poll, 0, 0); 95 + static DEFINE_TIMER(pdc_console_timer, pdc_console_poll); 96 96 static struct tty_port tty_port; 97 97 98 98 static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp)
+2 -3
arch/powerpc/kernel/watchdog.c
··· 262 262 add_timer_on(t, cpu); 263 263 } 264 264 265 - static void wd_timer_fn(unsigned long data) 265 + static void wd_timer_fn(struct timer_list *t) 266 266 { 267 - struct timer_list *t = this_cpu_ptr(&wd_timer); 268 267 int cpu = smp_processor_id(); 269 268 270 269 watchdog_timer_interrupt(cpu); ··· 287 288 288 289 per_cpu(wd_timer_tb, cpu) = get_tb(); 289 290 290 - setup_pinned_timer(t, wd_timer_fn, 0); 291 + timer_setup(t, wd_timer_fn, TIMER_PINNED); 291 292 wd_timer_reset(cpu, t); 292 293 } 293 294
+5 -7
arch/powerpc/mm/numa.c
··· 1452 1452 schedule_work(&topology_work); 1453 1453 } 1454 1454 1455 - static void topology_timer_fn(unsigned long ignored) 1455 + static void topology_timer_fn(struct timer_list *unused) 1456 1456 { 1457 1457 if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask)) 1458 1458 topology_schedule_update(); ··· 1462 1462 reset_topology_timer(); 1463 1463 } 1464 1464 } 1465 - static struct timer_list topology_timer = 1466 - TIMER_INITIALIZER(topology_timer_fn, 0, 0); 1465 + static struct timer_list topology_timer; 1467 1466 1468 1467 static void reset_topology_timer(void) 1469 1468 { 1470 - topology_timer.data = 0; 1471 - topology_timer.expires = jiffies + 60 * HZ; 1472 - mod_timer(&topology_timer, topology_timer.expires); 1469 + mod_timer(&topology_timer, jiffies + 60 * HZ); 1473 1470 } 1474 1471 1475 1472 #ifdef CONFIG_SMP ··· 1526 1529 prrn_enabled = 0; 1527 1530 vphn_enabled = 1; 1528 1531 setup_cpu_associativity_change_counters(); 1529 - init_timer_deferrable(&topology_timer); 1532 + timer_setup(&topology_timer, topology_timer_fn, 1533 + TIMER_DEFERRABLE); 1530 1534 reset_topology_timer(); 1531 1535 } 1532 1536 }
+3 -3
arch/s390/kernel/lgr.c
··· 153 153 /* 154 154 * LGR timer callback 155 155 */ 156 - static void lgr_timer_fn(unsigned long ignored) 156 + static void lgr_timer_fn(struct timer_list *unused) 157 157 { 158 158 lgr_info_log(); 159 159 lgr_timer_set(); 160 160 } 161 161 162 - static struct timer_list lgr_timer = 163 - TIMER_DEFERRED_INITIALIZER(lgr_timer_fn, 0, 0); 162 + static struct timer_list lgr_timer; 164 163 165 164 /* 166 165 * Setup next LGR timer ··· 180 181 debug_register_view(lgr_dbf, &debug_hex_ascii_view); 181 182 lgr_info_get(&lgr_info_last); 182 183 debug_event(lgr_dbf, 1, &lgr_info_last, sizeof(lgr_info_last)); 184 + timer_setup(&lgr_timer, lgr_timer_fn, TIMER_DEFERRABLE); 183 185 lgr_timer_set(); 184 186 return 0; 185 187 }
+3 -3
arch/s390/kernel/topology.c
··· 330 330 flush_work(&topology_work); 331 331 } 332 332 333 - static void topology_timer_fn(unsigned long ignored) 333 + static void topology_timer_fn(struct timer_list *unused) 334 334 { 335 335 if (ptf(PTF_CHECK)) 336 336 topology_schedule_update(); 337 337 set_topology_timer(); 338 338 } 339 339 340 - static struct timer_list topology_timer = 341 - TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0); 340 + static struct timer_list topology_timer; 342 341 343 342 static atomic_t topology_poll = ATOMIC_INIT(0); 344 343 ··· 637 638 638 639 static int __init topology_init(void) 639 640 { 641 + timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE); 640 642 if (MACHINE_HAS_TOPOLOGY) 641 643 set_topology_timer(); 642 644 else
+1 -1
arch/s390/mm/cmm.c
··· 56 56 57 57 static struct task_struct *cmm_thread_ptr; 58 58 static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait); 59 - static DEFINE_TIMER(cmm_timer, NULL, 0, 0); 59 + static DEFINE_TIMER(cmm_timer, NULL); 60 60 61 61 static void cmm_timer_fn(unsigned long); 62 62 static void cmm_set_timer(void);
+9 -7
arch/sparc/kernel/led.c
··· 31 31 } 32 32 33 33 static struct timer_list led_blink_timer; 34 + static unsigned long led_blink_timer_timeout; 34 35 35 - static void led_blink(unsigned long timeout) 36 + static void led_blink(struct timer_list *unused) 36 37 { 38 + unsigned long timeout = led_blink_timer_timeout; 39 + 37 40 led_toggle(); 38 41 39 42 /* reschedule */ 40 43 if (!timeout) { /* blink according to load */ 41 44 led_blink_timer.expires = jiffies + 42 45 ((1 + (avenrun[0] >> FSHIFT)) * HZ); 43 - led_blink_timer.data = 0; 44 46 } else { /* blink at user specified interval */ 45 47 led_blink_timer.expires = jiffies + (timeout * HZ); 46 - led_blink_timer.data = timeout; 47 48 } 48 49 add_timer(&led_blink_timer); 49 50 } ··· 89 88 } else if (!strcmp(buf, "toggle")) { 90 89 led_toggle(); 91 90 } else if ((*buf > '0') && (*buf <= '9')) { 92 - led_blink(simple_strtoul(buf, NULL, 10)); 91 + led_blink_timer_timeout = simple_strtoul(buf, NULL, 10); 92 + led_blink(&led_blink_timer); 93 93 } else if (!strcmp(buf, "load")) { 94 - led_blink(0); 94 + led_blink_timer_timeout = 0; 95 + led_blink(&led_blink_timer); 95 96 } else { 96 97 auxio_set_led(AUXIO_LED_OFF); 97 98 } ··· 118 115 119 116 static int __init led_init(void) 120 117 { 121 - init_timer(&led_blink_timer); 122 - led_blink_timer.function = led_blink; 118 + timer_setup(&led_blink_timer, led_blink, 0); 123 119 124 120 led = proc_create("led", 0, NULL, &led_proc_fops); 125 121 if (!led)
+3 -5
arch/x86/kernel/pci-calgary_64.c
··· 898 898 PHB_ROOT_COMPLEX_STATUS); 899 899 } 900 900 901 - static void calgary_watchdog(unsigned long data) 901 + static void calgary_watchdog(struct timer_list *t) 902 902 { 903 - struct pci_dev *dev = (struct pci_dev *)data; 904 - struct iommu_table *tbl = pci_iommu(dev->bus); 903 + struct iommu_table *tbl = from_timer(tbl, t, watchdog_timer); 905 904 void __iomem *bbar = tbl->bbar; 906 905 u32 val32; 907 906 void __iomem *target; ··· 1015 1016 writel(cpu_to_be32(val32), target); 1016 1017 readl(target); /* flush */ 1017 1018 1018 - setup_timer(&tbl->watchdog_timer, &calgary_watchdog, 1019 - (unsigned long)dev); 1019 + timer_setup(&tbl->watchdog_timer, calgary_watchdog, 0); 1020 1020 mod_timer(&tbl->watchdog_timer, jiffies); 1021 1021 } 1022 1022
+4 -5
arch/xtensa/platforms/iss/console.c
··· 47 47 * initialization for the tty structure. 48 48 */ 49 49 50 - static void rs_poll(unsigned long); 50 + static void rs_poll(struct timer_list *); 51 51 52 52 static int rs_open(struct tty_struct *tty, struct file * filp) 53 53 { 54 54 tty->port = &serial_port; 55 55 spin_lock_bh(&timer_lock); 56 56 if (tty->count == 1) { 57 - setup_timer(&serial_timer, rs_poll, 58 - (unsigned long)&serial_port); 57 + timer_setup(&serial_timer, rs_poll, 0); 59 58 mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE); 60 59 } 61 60 spin_unlock_bh(&timer_lock); ··· 91 92 return count; 92 93 } 93 94 94 - static void rs_poll(unsigned long priv) 95 + static void rs_poll(struct timer_list *unused) 95 96 { 96 - struct tty_port *port = (struct tty_port *)priv; 97 + struct tty_port *port = &serial_port; 97 98 int i = 0; 98 99 int rd = 1; 99 100 unsigned char c;
+5 -8
arch/xtensa/platforms/iss/network.c
··· 349 349 } 350 350 351 351 352 - static void iss_net_timer(unsigned long priv) 352 + static void iss_net_timer(struct timer_list *t) 353 353 { 354 - struct iss_net_private *lp = (struct iss_net_private *)priv; 354 + struct iss_net_private *lp = from_timer(lp, t, timer); 355 355 356 356 iss_net_poll(); 357 357 spin_lock(&lp->lock); ··· 386 386 spin_unlock_bh(&opened_lock); 387 387 spin_lock_bh(&lp->lock); 388 388 389 - init_timer(&lp->timer); 389 + timer_setup(&lp->timer, iss_net_timer, 0); 390 390 lp->timer_val = ISS_NET_TIMER_VALUE; 391 - lp->timer.data = (unsigned long) lp; 392 - lp->timer.function = iss_net_timer; 393 391 mod_timer(&lp->timer, jiffies + lp->timer_val); 394 392 395 393 out: ··· 480 482 return -EINVAL; 481 483 } 482 484 483 - void iss_net_user_timer_expire(unsigned long _conn) 485 + void iss_net_user_timer_expire(struct timer_list *unused) 484 486 { 485 487 } 486 488 ··· 580 582 return 1; 581 583 } 582 584 583 - init_timer(&lp->tl); 584 - lp->tl.function = iss_net_user_timer_expire; 585 + timer_setup(&lp->tl, iss_net_user_timer_expire, 0); 585 586 586 587 return 0; 587 588
+3 -4
drivers/acpi/apei/ghes.c
··· 774 774 add_timer(&ghes->timer); 775 775 } 776 776 777 - static void ghes_poll_func(unsigned long data) 777 + static void ghes_poll_func(struct timer_list *t) 778 778 { 779 - struct ghes *ghes = (void *)data; 779 + struct ghes *ghes = from_timer(ghes, t, timer); 780 780 781 781 ghes_proc(ghes); 782 782 if (!(ghes->flags & GHES_EXITING)) ··· 1147 1147 1148 1148 switch (generic->notify.type) { 1149 1149 case ACPI_HEST_NOTIFY_POLLED: 1150 - setup_deferrable_timer(&ghes->timer, ghes_poll_func, 1151 - (unsigned long)ghes); 1150 + timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE); 1152 1151 ghes_add_timer(ghes); 1153 1152 break; 1154 1153 case ACPI_HEST_NOTIFY_EXTERNAL:
+1
drivers/ata/ahci.h
··· 303 303 unsigned long saved_activity; 304 304 unsigned long activity; 305 305 unsigned long led_state; 306 + struct ata_link *link; 306 307 }; 307 308 308 309 struct ahci_port_priv {
+6 -5
drivers/ata/libahci.c
··· 968 968 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10)); 969 969 } 970 970 971 - static void ahci_sw_activity_blink(unsigned long arg) 971 + static void ahci_sw_activity_blink(struct timer_list *t) 972 972 { 973 - struct ata_link *link = (struct ata_link *)arg; 973 + struct ahci_em_priv *emp = from_timer(emp, t, timer); 974 + struct ata_link *link = emp->link; 974 975 struct ata_port *ap = link->ap; 975 - struct ahci_port_priv *pp = ap->private_data; 976 - struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 976 + 977 977 unsigned long led_message = emp->led_state; 978 978 u32 activity_led_state; 979 979 unsigned long flags; ··· 1020 1020 1021 1021 /* init activity stats, setup timer */ 1022 1022 emp->saved_activity = emp->activity = 0; 1023 - setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link); 1023 + emp->link = link; 1024 + timer_setup(&emp->timer, ahci_sw_activity_blink, 0); 1024 1025 1025 1026 /* check our blink policy and set flag for link if it's enabled */ 1026 1027 if (emp->blink_policy)
+2 -3
drivers/ata/libata-core.c
··· 5979 5979 INIT_LIST_HEAD(&ap->eh_done_q); 5980 5980 init_waitqueue_head(&ap->eh_wait_q); 5981 5981 init_completion(&ap->park_req_pending); 5982 - setup_deferrable_timer(&ap->fastdrain_timer, 5983 - ata_eh_fastdrain_timerfn, 5984 - (unsigned long)ap); 5982 + timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn, 5983 + TIMER_DEFERRABLE); 5985 5984 5986 5985 ap->cbl = ATA_CBL_NONE; 5987 5986
+2 -2
drivers/ata/libata-eh.c
··· 879 879 return nr; 880 880 } 881 881 882 - void ata_eh_fastdrain_timerfn(unsigned long arg) 882 + void ata_eh_fastdrain_timerfn(struct timer_list *t) 883 883 { 884 - struct ata_port *ap = (void *)arg; 884 + struct ata_port *ap = from_timer(ap, t, fastdrain_timer); 885 885 unsigned long flags; 886 886 int cnt; 887 887
+1 -1
drivers/ata/libata.h
··· 154 154 extern void ata_eh_acquire(struct ata_port *ap); 155 155 extern void ata_eh_release(struct ata_port *ap); 156 156 extern void ata_scsi_error(struct Scsi_Host *host); 157 - extern void ata_eh_fastdrain_timerfn(unsigned long arg); 157 + extern void ata_eh_fastdrain_timerfn(struct timer_list *t); 158 158 extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); 159 159 extern void ata_dev_disable(struct ata_device *dev); 160 160 extern void ata_eh_detach_dev(struct ata_device *dev);
+2 -2
drivers/atm/idt77105.c
··· 49 49 static void idt77105_restart_timer_func(unsigned long); 50 50 51 51 52 - static DEFINE_TIMER(stats_timer, idt77105_stats_timer_func, 0, 0); 53 - static DEFINE_TIMER(restart_timer, idt77105_restart_timer_func, 0, 0); 52 + static DEFINE_TIMER(stats_timer, idt77105_stats_timer_func); 53 + static DEFINE_TIMER(restart_timer, idt77105_restart_timer_func); 54 54 static int start_timer = 1; 55 55 static struct idt77105_priv *idt77105_all = NULL; 56 56
+1 -1
drivers/atm/iphase.c
··· 76 76 static struct atm_dev *_ia_dev[8]; 77 77 static int iadev_count; 78 78 static void ia_led_timer(unsigned long arg); 79 - static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0); 79 + static DEFINE_TIMER(ia_timer, ia_led_timer); 80 80 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ; 81 81 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ; 82 82 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
+4 -6
drivers/auxdisplay/img-ascii-lcd.c
··· 229 229 * Scroll the current message along the LCD by one character, rearming the 230 230 * timer if required. 231 231 */ 232 - static void img_ascii_lcd_scroll(unsigned long arg) 232 + static void img_ascii_lcd_scroll(struct timer_list *t) 233 233 { 234 - struct img_ascii_lcd_ctx *ctx = (struct img_ascii_lcd_ctx *)arg; 234 + struct img_ascii_lcd_ctx *ctx = from_timer(ctx, t, timer); 235 235 unsigned int i, ch = ctx->scroll_pos; 236 236 unsigned int num_chars = ctx->cfg->num_chars; 237 237 ··· 299 299 ctx->scroll_pos = 0; 300 300 301 301 /* update the LCD */ 302 - img_ascii_lcd_scroll((unsigned long)ctx); 302 + img_ascii_lcd_scroll(&ctx->timer); 303 303 304 304 return 0; 305 305 } ··· 395 395 ctx->scroll_rate = HZ / 2; 396 396 397 397 /* initialise a timer for scrolling the message */ 398 - init_timer(&ctx->timer); 399 - ctx->timer.function = img_ascii_lcd_scroll; 400 - ctx->timer.data = (unsigned long)ctx; 398 + timer_setup(&ctx->timer, img_ascii_lcd_scroll, 0); 401 399 402 400 platform_set_drvdata(pdev, ctx); 403 401
+2 -2
drivers/auxdisplay/panel.c
··· 1396 1396 } 1397 1397 } 1398 1398 1399 - static void panel_scan_timer(void) 1399 + static void panel_scan_timer(struct timer_list *unused) 1400 1400 { 1401 1401 if (keypad.enabled && keypad_initialized) { 1402 1402 if (spin_trylock_irq(&pprt_lock)) { ··· 1421 1421 if (scan_timer.function) 1422 1422 return; /* already started */ 1423 1423 1424 - setup_timer(&scan_timer, (void *)&panel_scan_timer, 0); 1424 + timer_setup(&scan_timer, panel_scan_timer, 0); 1425 1425 scan_timer.expires = jiffies + INPUT_POLL_TIME; 1426 1426 add_timer(&scan_timer); 1427 1427 }
+3 -5
drivers/base/power/main.c
··· 478 478 * There's not much we can do here to recover so panic() to 479 479 * capture a crash-dump in pstore. 480 480 */ 481 - static void dpm_watchdog_handler(unsigned long data) 481 + static void dpm_watchdog_handler(struct timer_list *t) 482 482 { 483 - struct dpm_watchdog *wd = (void *)data; 483 + struct dpm_watchdog *wd = from_timer(wd, t, timer); 484 484 485 485 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); 486 486 show_stack(wd->tsk, NULL); ··· 500 500 wd->dev = dev; 501 501 wd->tsk = current; 502 502 503 - init_timer_on_stack(timer); 503 + timer_setup_on_stack(timer, dpm_watchdog_handler, 0); 504 504 /* use same timeout value for both suspend and resume */ 505 505 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; 506 - timer->function = dpm_watchdog_handler; 507 - timer->data = (unsigned long)wd; 508 506 add_timer(timer); 509 507 } 510 508
+1 -2
drivers/block/amiflop.c
··· 323 323 324 324 } 325 325 326 - static void motor_on_callback(unsigned long nr) 326 + static void motor_on_callback(unsigned long ignored) 327 327 { 328 328 if (!(ciaa.pra & DSKRDY) || --on_attempts == 0) { 329 329 complete_all(&motor_on_completion); ··· 344 344 fd_select(nr); 345 345 346 346 reinit_completion(&motor_on_completion); 347 - motor_on_timer.data = nr; 348 347 mod_timer(&motor_on_timer, jiffies + HZ/2); 349 348 350 349 on_attempts = 10;
+8 -36
drivers/block/aoe/aoemain.c
··· 15 15 MODULE_DESCRIPTION("AoE block/char driver for 2.6.2 and newer 2.6 kernels"); 16 16 MODULE_VERSION(VERSION); 17 17 18 - enum { TINIT, TRUN, TKILL }; 18 + static struct timer_list timer; 19 19 20 - static void 21 - discover_timer(ulong vp) 20 + static void discover_timer(struct timer_list *t) 22 21 { 23 - static struct timer_list t; 24 - static volatile ulong die; 25 - static spinlock_t lock; 26 - ulong flags; 27 - enum { DTIMERTICK = HZ * 60 }; /* one minute */ 22 + mod_timer(t, jiffies + HZ * 60); /* one minute */ 28 23 29 - switch (vp) { 30 - case TINIT: 31 - init_timer(&t); 32 - spin_lock_init(&lock); 33 - t.data = TRUN; 34 - t.function = discover_timer; 35 - die = 0; 36 - case TRUN: 37 - spin_lock_irqsave(&lock, flags); 38 - if (!die) { 39 - t.expires = jiffies + DTIMERTICK; 40 - add_timer(&t); 41 - } 42 - spin_unlock_irqrestore(&lock, flags); 43 - 44 - aoecmd_cfg(0xffff, 0xff); 45 - return; 46 - case TKILL: 47 - spin_lock_irqsave(&lock, flags); 48 - die = 1; 49 - spin_unlock_irqrestore(&lock, flags); 50 - 51 - del_timer_sync(&t); 52 - default: 53 - return; 54 - } 24 + aoecmd_cfg(0xffff, 0xff); 55 25 } 56 26 57 27 static void 58 28 aoe_exit(void) 59 29 { 60 - discover_timer(TKILL); 30 + del_timer_sync(&timer); 61 31 62 32 aoenet_exit(); 63 33 unregister_blkdev(AOE_MAJOR, DEVICE_NAME); ··· 63 93 goto blkreg_fail; 64 94 } 65 95 printk(KERN_INFO "aoe: AoE v%s initialised.\n", VERSION); 66 - discover_timer(TINIT); 96 + 97 + timer_setup(&timer, discover_timer, 0); 98 + discover_timer(&timer); 67 99 return 0; 68 100 blkreg_fail: 69 101 aoecmd_exit();
+4 -4
drivers/block/ataflop.c
··· 373 373 374 374 /************************* End of Prototypes **************************/ 375 375 376 - static DEFINE_TIMER(motor_off_timer, fd_motor_off_timer, 0, 0); 377 - static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0); 378 - static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0); 379 - static DEFINE_TIMER(fd_timer, check_change, 0, 0); 376 + static DEFINE_TIMER(motor_off_timer, fd_motor_off_timer); 377 + static DEFINE_TIMER(readtrack_timer, fd_readtrack_check); 378 + static DEFINE_TIMER(timeout_timer, fd_times_out); 379 + static DEFINE_TIMER(fd_timer, check_change); 380 380 381 381 static void fd_end_request_cur(blk_status_t err) 382 382 {
+2 -2
drivers/block/drbd/drbd_int.h
··· 1551 1551 extern int w_send_out_of_sync(struct drbd_work *, int); 1552 1552 extern int w_start_resync(struct drbd_work *, int); 1553 1553 1554 - extern void resync_timer_fn(unsigned long data); 1555 - extern void start_resync_timer_fn(unsigned long data); 1554 + extern void resync_timer_fn(struct timer_list *t); 1555 + extern void start_resync_timer_fn(struct timer_list *t); 1556 1556 1557 1557 extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req); 1558 1558
+7 -11
drivers/block/drbd/drbd_main.c
··· 64 64 static DEFINE_MUTEX(drbd_main_mutex); 65 65 static int drbd_open(struct block_device *bdev, fmode_t mode); 66 66 static void drbd_release(struct gendisk *gd, fmode_t mode); 67 - static void md_sync_timer_fn(unsigned long data); 67 + static void md_sync_timer_fn(struct timer_list *t); 68 68 static int w_bitmap_io(struct drbd_work *w, int unused); 69 69 70 70 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " ··· 2023 2023 device->unplug_work.cb = w_send_write_hint; 2024 2024 device->bm_io_work.w.cb = w_bitmap_io; 2025 2025 2026 - setup_timer(&device->resync_timer, resync_timer_fn, 2027 - (unsigned long)device); 2028 - setup_timer(&device->md_sync_timer, md_sync_timer_fn, 2029 - (unsigned long)device); 2030 - setup_timer(&device->start_resync_timer, start_resync_timer_fn, 2031 - (unsigned long)device); 2032 - setup_timer(&device->request_timer, request_timer_fn, 2033 - (unsigned long)device); 2026 + timer_setup(&device->resync_timer, resync_timer_fn, 0); 2027 + timer_setup(&device->md_sync_timer, md_sync_timer_fn, 0); 2028 + timer_setup(&device->start_resync_timer, start_resync_timer_fn, 0); 2029 + timer_setup(&device->request_timer, request_timer_fn, 0); 2034 2030 2035 2031 init_waitqueue_head(&device->misc_wait); 2036 2032 init_waitqueue_head(&device->state_wait); ··· 3717 3721 return (bdev->md.flags & flag) != 0; 3718 3722 } 3719 3723 3720 - static void md_sync_timer_fn(unsigned long data) 3724 + static void md_sync_timer_fn(struct timer_list *t) 3721 3725 { 3722 - struct drbd_device *device = (struct drbd_device *) data; 3726 + struct drbd_device *device = from_timer(device, t, md_sync_timer); 3723 3727 drbd_device_post_work(device, MD_SYNC); 3724 3728 } 3725 3729
+1 -1
drivers/block/drbd/drbd_receiver.c
··· 5056 5056 wake_up(&device->misc_wait); 5057 5057 5058 5058 del_timer_sync(&device->resync_timer); 5059 - resync_timer_fn((unsigned long)device); 5059 + resync_timer_fn(&device->resync_timer); 5060 5060 5061 5061 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, 5062 5062 * w_make_resync_request etc. which may still be on the worker queue
+2 -2
drivers/block/drbd/drbd_req.c
··· 1714 1714 * to expire twice (worst case) to become effective. Good enough. 1715 1715 */ 1716 1716 1717 - void request_timer_fn(unsigned long data) 1717 + void request_timer_fn(struct timer_list *t) 1718 1718 { 1719 - struct drbd_device *device = (struct drbd_device *) data; 1719 + struct drbd_device *device = from_timer(device, t, request_timer); 1720 1720 struct drbd_connection *connection = first_peer_device(device)->connection; 1721 1721 struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */ 1722 1722 struct net_conf *nc;
+1 -1
drivers/block/drbd/drbd_req.h
··· 294 294 struct bio_and_error *m); 295 295 extern void complete_master_bio(struct drbd_device *device, 296 296 struct bio_and_error *m); 297 - extern void request_timer_fn(unsigned long data); 297 + extern void request_timer_fn(struct timer_list *t); 298 298 extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what); 299 299 extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what); 300 300 extern void tl_abort_disk_io(struct drbd_device *device);
+4 -4
drivers/block/drbd/drbd_worker.c
··· 457 457 return 0; 458 458 } 459 459 460 - void resync_timer_fn(unsigned long data) 460 + void resync_timer_fn(struct timer_list *t) 461 461 { 462 - struct drbd_device *device = (struct drbd_device *) data; 462 + struct drbd_device *device = from_timer(device, t, resync_timer); 463 463 464 464 drbd_queue_work_if_unqueued( 465 465 &first_peer_device(device)->connection->sender_work, ··· 1705 1705 rcu_read_unlock(); 1706 1706 } 1707 1707 1708 - void start_resync_timer_fn(unsigned long data) 1708 + void start_resync_timer_fn(struct timer_list *t) 1709 1709 { 1710 - struct drbd_device *device = (struct drbd_device *) data; 1710 + struct drbd_device *device = from_timer(device, t, start_resync_timer); 1711 1711 drbd_device_post_work(device, RS_START); 1712 1712 } 1713 1713
+1 -1
drivers/char/dtlk.c
··· 84 84 static unsigned int dtlk_portlist[] = 85 85 {0x25e, 0x29e, 0x2de, 0x31e, 0x35e, 0x39e, 0}; 86 86 static wait_queue_head_t dtlk_process_list; 87 - static DEFINE_TIMER(dtlk_timer, dtlk_timer_tick, 0, 0); 87 + static DEFINE_TIMER(dtlk_timer, dtlk_timer_tick); 88 88 89 89 /* prototypes for file_operations struct */ 90 90 static ssize_t dtlk_read(struct file *, char __user *,
+1 -1
drivers/char/hangcheck-timer.c
··· 124 124 125 125 static void hangcheck_fire(unsigned long); 126 126 127 - static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire, 0, 0); 127 + static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire); 128 128 129 129 static void hangcheck_fire(unsigned long data) 130 130 {
+3 -5
drivers/char/hw_random/xgene-rng.c
··· 100 100 struct clk *clk; 101 101 }; 102 102 103 - static void xgene_rng_expired_timer(unsigned long arg) 103 + static void xgene_rng_expired_timer(struct timer_list *t) 104 104 { 105 - struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) arg; 105 + struct xgene_rng_dev *ctx = from_timer(ctx, t, failure_timer); 106 106 107 107 /* Clear failure counter as timer expired */ 108 108 disable_irq(ctx->irq); ··· 113 113 114 114 static void xgene_rng_start_timer(struct xgene_rng_dev *ctx) 115 115 { 116 - ctx->failure_timer.data = (unsigned long) ctx; 117 - ctx->failure_timer.function = xgene_rng_expired_timer; 118 116 ctx->failure_timer.expires = jiffies + 120 * HZ; 119 117 add_timer(&ctx->failure_timer); 120 118 } ··· 290 292 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; 291 293 292 294 ctx->failure_cnt = 0; 293 - init_timer(&ctx->failure_timer); 295 + timer_setup(&ctx->failure_timer, xgene_rng_expired_timer, 0); 294 296 295 297 ctx->revision = readl(ctx->csr_base + RNG_EIP_REV); 296 298
+1 -1
drivers/char/nwbutton.c
··· 27 27 28 28 static int button_press_count; /* The count of button presses */ 29 29 /* Times for the end of a sequence */ 30 - static DEFINE_TIMER(button_timer, button_sequence_finished, 0, 0); 30 + static DEFINE_TIMER(button_timer, button_sequence_finished); 31 31 static DECLARE_WAIT_QUEUE_HEAD(button_wait_queue); /* Used for blocking read */ 32 32 static char button_output_buffer[32]; /* Stores data to write out of device */ 33 33 static int bcount; /* The number of bytes in the buffer */
+1 -1
drivers/char/rtc.c
··· 137 137 #ifdef RTC_IRQ 138 138 static void rtc_dropped_irq(unsigned long data); 139 139 140 - static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq, 0, 0); 140 + static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq); 141 141 #endif 142 142 143 143 static ssize_t rtc_read(struct file *file, char __user *buf,
+5 -7
drivers/char/tlclk.c
··· 184 184 static int int_events; /* Event that generate a interrupt */ 185 185 static int got_event; /* if events processing have been done */ 186 186 187 - static void switchover_timeout(unsigned long data); 188 - static struct timer_list switchover_timer = 189 - TIMER_INITIALIZER(switchover_timeout , 0, 0); 187 + static void switchover_timeout(struct timer_list *t); 188 + static struct timer_list switchover_timer; 190 189 static unsigned long tlclk_timer_data; 191 190 192 191 static struct tlclk_alarms *alarm_events; ··· 804 805 goto out3; 805 806 } 806 807 807 - init_timer(&switchover_timer); 808 + timer_setup(&switchover_timer, switchover_timeout, 0); 808 809 809 810 ret = misc_register(&tlclk_miscdev); 810 811 if (ret < 0) { ··· 854 855 855 856 } 856 857 857 - static void switchover_timeout(unsigned long data) 858 + static void switchover_timeout(struct timer_list *unused) 858 859 { 859 - unsigned long flags = *(unsigned long *) data; 860 + unsigned long flags = tlclk_timer_data; 860 861 861 862 if ((flags & 1)) { 862 863 if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08)) ··· 921 922 /* TIMEOUT in ~10ms */ 922 923 switchover_timer.expires = jiffies + msecs_to_jiffies(10); 923 924 tlclk_timer_data = inb(TLCLK_REG1); 924 - switchover_timer.data = (unsigned long) &tlclk_timer_data; 925 925 mod_timer(&switchover_timer, switchover_timer.expires); 926 926 } else { 927 927 got_event = 1;
+9 -41
drivers/clocksource/Kconfig
··· 1 1 menu "Clock Source drivers" 2 - depends on !ARCH_USES_GETTIMEOFFSET 2 + depends on GENERIC_CLOCKEVENTS 3 3 4 4 config TIMER_OF 5 5 bool 6 - depends on GENERIC_CLOCKEVENTS 7 6 select TIMER_PROBE 8 7 9 8 config TIMER_ACPI ··· 29 30 30 31 config BCM2835_TIMER 31 32 bool "BCM2835 timer driver" if COMPILE_TEST 32 - depends on GENERIC_CLOCKEVENTS 33 33 select CLKSRC_MMIO 34 34 help 35 35 Enables the support for the BCM2835 timer driver. 36 36 37 37 config BCM_KONA_TIMER 38 38 bool "BCM mobile timer driver" if COMPILE_TEST 39 - depends on GENERIC_CLOCKEVENTS 40 39 select CLKSRC_MMIO 41 40 help 42 41 Enables the support for the BCM Kona mobile timer driver. 43 42 44 43 config DIGICOLOR_TIMER 45 44 bool "Digicolor timer driver" if COMPILE_TEST 46 - depends on GENERIC_CLOCKEVENTS 47 45 select CLKSRC_MMIO 48 46 depends on HAS_IOMEM 49 47 help ··· 48 52 49 53 config DW_APB_TIMER 50 54 bool "DW APB timer driver" if COMPILE_TEST 51 - depends on GENERIC_CLOCKEVENTS 52 55 help 53 56 Enables the support for the dw_apb timer. 54 57 ··· 58 63 59 64 config FTTMR010_TIMER 60 65 bool "Faraday Technology timer driver" if COMPILE_TEST 61 - depends on GENERIC_CLOCKEVENTS 62 66 depends on HAS_IOMEM 63 67 select CLKSRC_MMIO 64 68 select TIMER_OF ··· 84 90 85 91 config MESON6_TIMER 86 92 bool "Meson6 timer driver" if COMPILE_TEST 87 - depends on GENERIC_CLOCKEVENTS 88 93 select CLKSRC_MMIO 89 94 help 90 95 Enables the support for the Meson6 timer driver. ··· 98 105 99 106 config OWL_TIMER 100 107 bool "Owl timer driver" if COMPILE_TEST 101 - depends on GENERIC_CLOCKEVENTS 102 108 select CLKSRC_MMIO 103 109 help 104 110 Enables the support for the Actions Semi Owl timer driver. 105 111 106 112 config SUN4I_TIMER 107 113 bool "Sun4i timer driver" if COMPILE_TEST 108 - depends on GENERIC_CLOCKEVENTS 109 114 depends on HAS_IOMEM 110 115 select CLKSRC_MMIO 111 116 select TIMER_OF ··· 126 135 127 136 config VT8500_TIMER 128 137 bool "VT8500 timer driver" if COMPILE_TEST 129 - depends on GENERIC_CLOCKEVENTS 130 138 depends on HAS_IOMEM 131 139 help 132 140 Enables support for the VT8500 driver. ··· 138 148 139 149 config ASM9260_TIMER 140 150 bool "ASM9260 timer driver" if COMPILE_TEST 141 - depends on GENERIC_CLOCKEVENTS 142 151 select CLKSRC_MMIO 143 152 select TIMER_OF 144 153 help ··· 160 171 161 172 config CLKSRC_DBX500_PRCMU 162 173 bool "Clocksource PRCMU Timer" if COMPILE_TEST 163 - depends on GENERIC_CLOCKEVENTS 164 174 depends on HAS_IOMEM 165 175 help 166 176 Use the always on PRCMU Timer as clocksource 167 177 168 178 config CLPS711X_TIMER 169 179 bool "Cirrus logic timer driver" if COMPILE_TEST 170 - depends on GENERIC_CLOCKEVENTS 171 180 select CLKSRC_MMIO 172 181 help 173 182 Enables support for the Cirrus Logic PS711 timer. 174 183 175 184 config ATLAS7_TIMER 176 185 bool "Atlas7 timer driver" if COMPILE_TEST 177 - depends on GENERIC_CLOCKEVENTS 178 186 select CLKSRC_MMIO 179 187 help 180 188 Enables support for the Atlas7 timer. 181 189 182 190 config MXS_TIMER 183 191 bool "Mxs timer driver" if COMPILE_TEST 184 - depends on GENERIC_CLOCKEVENTS 185 192 select CLKSRC_MMIO 186 193 select STMP_DEVICE 187 194 help ··· 185 200 186 201 config PRIMA2_TIMER 187 202 bool "Prima2 timer driver" if COMPILE_TEST 188 - depends on GENERIC_CLOCKEVENTS 189 203 select CLKSRC_MMIO 190 204 help 191 205 Enables support for the Prima2 timer. 192 206 193 207 config U300_TIMER 194 208 bool "U300 timer driver" if COMPILE_TEST 195 - depends on GENERIC_CLOCKEVENTS 196 209 depends on ARM 197 210 select CLKSRC_MMIO 198 211 help ··· 198 215 199 216 config NSPIRE_TIMER 200 217 bool "NSpire timer driver" if COMPILE_TEST 201 - depends on GENERIC_CLOCKEVENTS 202 218 select CLKSRC_MMIO 203 219 help 204 220 Enables support for the Nspire timer. 205 221 206 222 config KEYSTONE_TIMER 207 223 bool "Keystone timer driver" if COMPILE_TEST 208 - depends on GENERIC_CLOCKEVENTS 209 224 depends on ARM || ARM64 210 225 select CLKSRC_MMIO 211 226 help ··· 211 230 212 231 config INTEGRATOR_AP_TIMER 213 232 bool "Integrator-ap timer driver" if COMPILE_TEST 214 - depends on GENERIC_CLOCKEVENTS 215 233 select CLKSRC_MMIO 216 234 help 217 235 Enables support for the Integrator-ap timer. ··· 233 253 234 254 config CLKSRC_LPC32XX 235 255 bool "Clocksource for LPC32XX" if COMPILE_TEST 236 - depends on GENERIC_CLOCKEVENTS && HAS_IOMEM 256 + depends on HAS_IOMEM 237 257 depends on ARM 238 258 select CLKSRC_MMIO 239 259 select TIMER_OF ··· 242 262 243 263 config CLKSRC_PISTACHIO 244 264 bool "Clocksource for Pistachio SoC" if COMPILE_TEST 245 - depends on GENERIC_CLOCKEVENTS && HAS_IOMEM 265 + depends on HAS_IOMEM 246 266 select TIMER_OF 247 267 help 248 268 Enables the clocksource for the Pistachio SoC. ··· 278 298 279 299 config ARC_TIMERS 280 300 bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST 281 - depends on GENERIC_CLOCKEVENTS 282 301 select TIMER_OF 283 302 help 284 303 These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores ··· 286 307 287 308 config ARC_TIMERS_64BIT 288 309 bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST 289 - depends on GENERIC_CLOCKEVENTS 290 310 depends on ARC_TIMERS 291 311 select TIMER_OF 292 312 help ··· 385 407 386 408 config ATMEL_ST 387 409 bool "Atmel ST timer support" if COMPILE_TEST 388 - depends on GENERIC_CLOCKEVENTS 389 410 select TIMER_OF 390 411 select MFD_SYSCON 391 412 help ··· 403 426 404 427 config CLKSRC_SAMSUNG_PWM 405 428 bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST 406 - depends on GENERIC_CLOCKEVENTS 407 429 depends on HAS_IOMEM 408 430 help 409 431 This is a new clocksource driver for the PWM timer found in ··· 412 436 413 437 config FSL_FTM_TIMER 414 438 bool "Freescale FlexTimer Module driver" if COMPILE_TEST 415 - depends on GENERIC_CLOCKEVENTS 416 439 depends on HAS_IOMEM 417 440 select CLKSRC_MMIO 418 441 help ··· 425 450 426 451 config OXNAS_RPS_TIMER 427 452 bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST 428 - depends on GENERIC_CLOCKEVENTS 429 453 select TIMER_OF 430 454 select CLKSRC_MMIO 431 455 help ··· 435 461 436 462 config MTK_TIMER 437 463 bool "Mediatek timer driver" if COMPILE_TEST 438 - depends on GENERIC_CLOCKEVENTS && HAS_IOMEM 464 + depends on HAS_IOMEM 439 465 select TIMER_OF 440 466 select CLKSRC_MMIO 441 467 help ··· 453 479 config CLKSRC_JCORE_PIT 454 480 bool "J-Core PIT timer driver" if COMPILE_TEST 455 481 depends on OF 456 - depends on GENERIC_CLOCKEVENTS 457 482 depends on HAS_IOMEM 458 483 select CLKSRC_MMIO 459 484 help ··· 461 488 462 489 config SH_TIMER_CMT 463 490 bool "Renesas CMT timer driver" if COMPILE_TEST 464 - depends on GENERIC_CLOCKEVENTS 465 491 depends on HAS_IOMEM 466 492 default SYS_SUPPORTS_SH_CMT 467 493 help ··· 470 498 471 499 config SH_TIMER_MTU2 472 500 bool "Renesas MTU2 timer driver" if COMPILE_TEST 473 - depends on GENERIC_CLOCKEVENTS 474 501 depends on HAS_IOMEM 475 502 default SYS_SUPPORTS_SH_MTU2 476 503 help ··· 479 508 480 509 config RENESAS_OSTM 481 510 bool "Renesas OSTM timer driver" if COMPILE_TEST 482 - depends on GENERIC_CLOCKEVENTS 483 511 select CLKSRC_MMIO 484 512 help 485 513 Enables the support for the Renesas OSTM. 486 514 487 515 config SH_TIMER_TMU 488 516 bool "Renesas TMU timer driver" if COMPILE_TEST 489 - depends on GENERIC_CLOCKEVENTS 490 517 depends on HAS_IOMEM 491 518 default SYS_SUPPORTS_SH_TMU 492 519 help ··· 494 525 495 526 config EM_TIMER_STI 496 527 bool "Renesas STI timer driver" if COMPILE_TEST 497 - depends on GENERIC_CLOCKEVENTS && HAS_IOMEM 528 + depends on HAS_IOMEM 498 529 default SYS_SUPPORTS_EM_STI 499 530 help 500 531 This enables build of a clocksource and clockevent driver for ··· 535 566 536 567 config CLKSRC_PXA 537 568 bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST 538 - depends on GENERIC_CLOCKEVENTS 539 569 depends on HAS_IOMEM 540 570 select CLKSRC_MMIO 541 571 help ··· 543 575 544 576 config H8300_TMR8 545 577 bool "Clockevent timer for the H8300 platform" if COMPILE_TEST 546 - depends on GENERIC_CLOCKEVENTS && HAS_IOMEM 578 + depends on HAS_IOMEM 547 579 help 548 580 This enables the 8 bits timer for the H8300 platform. 549 581 550 582 config H8300_TMR16 551 583 bool "Clockevent timer for the H83069 platform" if COMPILE_TEST 552 - depends on GENERIC_CLOCKEVENTS && HAS_IOMEM 584 + depends on HAS_IOMEM 553 585 help 554 586 This enables the 16 bits timer for the H8300 platform with the 555 587 H83069 cpu. 556 588 557 589 config H8300_TPU 558 590 bool "Clocksource for the H8300 platform" if COMPILE_TEST 559 - depends on GENERIC_CLOCKEVENTS && HAS_IOMEM 591 + depends on HAS_IOMEM 560 592 help 561 593 This enables the clocksource for the H8300 platform with the 562 594 H8S2678 cpu. ··· 568 600 569 601 config CLKSRC_IMX_TPM 570 602 bool "Clocksource using i.MX TPM" if COMPILE_TEST 571 - depends on ARM && CLKDEV_LOOKUP && GENERIC_CLOCKEVENTS 603 + depends on ARM && CLKDEV_LOOKUP 572 604 select CLKSRC_MMIO 573 605 help 574 606 Enable this option to use IMX Timer/PWM Module (TPM) timer as
+22 -19
drivers/clocksource/arm_arch_timer.c
··· 299 299 #endif 300 300 301 301 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND 302 - DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, 303 - timer_unstable_counter_workaround); 302 + DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround); 304 303 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); 305 304 306 305 DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled); ··· 1267 1268 1268 1269 iounmap(cntctlbase); 1269 1270 1270 - if (!best_frame) 1271 - pr_err("Unable to find a suitable frame in timer @ %pa\n", 1272 - &timer_mem->cntctlbase); 1273 - 1274 1271 return best_frame; 1275 1272 } 1276 1273 ··· 1367 1372 1368 1373 frame = arch_timer_mem_find_best_frame(timer_mem); 1369 1374 if (!frame) { 1375 + pr_err("Unable to find a suitable frame in timer @ %pa\n", 1376 + &timer_mem->cntctlbase); 1370 1377 ret = -EINVAL; 1371 1378 goto out; 1372 1379 } ··· 1417 1420 static int __init arch_timer_mem_acpi_init(int platform_timer_count) 1418 1421 { 1419 1422 struct arch_timer_mem *timers, *timer; 1420 - struct arch_timer_mem_frame *frame; 1423 + struct arch_timer_mem_frame *frame, *best_frame = NULL; 1421 1424 int timer_count, i, ret = 0; 1422 1425 1423 1426 timers = kcalloc(platform_timer_count, sizeof(*timers), ··· 1429 1432 if (ret || !timer_count) 1430 1433 goto out; 1431 1434 1432 - for (i = 0; i < timer_count; i++) { 1433 - ret = arch_timer_mem_verify_cntfrq(&timers[i]); 1434 - if (ret) { 1435 - pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); 1436 - goto out; 1437 - } 1438 - } 1439 - 1440 1435 /* 1441 1436 * While unlikely, it's theoretically possible that none of the frames 1442 1437 * in a timer expose the combination of feature we want. ··· 1437 1448 timer = &timers[i]; 1438 1449 1439 1450 frame = arch_timer_mem_find_best_frame(timer); 1440 - if (frame) 1441 - break; 1451 + if (!best_frame) 1452 + best_frame = frame; 1453 + 1454 + ret = arch_timer_mem_verify_cntfrq(timer); 1455 + if (ret) { 1456 + pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); 1457 + goto out; 1458 + } 1459 + 1460 + if (!best_frame) /* implies !frame */ 1461 + /* 1462 + * Only complain about missing suitable frames if we 1463 + * haven't already found one in a previous iteration. 1464 + */ 1465 + pr_err("Unable to find a suitable frame in timer @ %pa\n", 1466 + &timer->cntctlbase); 1442 1467 } 1443 1468 1444 - if (frame) 1445 - ret = arch_timer_mem_frame_register(frame); 1469 + if (best_frame) 1470 + ret = arch_timer_mem_frame_register(best_frame); 1446 1471 out: 1447 1472 kfree(timers); 1448 1473 return ret;
+7 -5
drivers/clocksource/mips-gic-timer.c
··· 39 39 40 40 static int gic_next_event(unsigned long delta, struct clock_event_device *evt) 41 41 { 42 - unsigned long flags; 42 + int cpu = cpumask_first(evt->cpumask); 43 43 u64 cnt; 44 44 int res; 45 45 46 46 cnt = gic_read_count(); 47 47 cnt += (u64)delta; 48 - local_irq_save(flags); 49 - write_gic_vl_other(mips_cm_vp_id(cpumask_first(evt->cpumask))); 50 - write_gic_vo_compare(cnt); 51 - local_irq_restore(flags); 48 + if (cpu == raw_smp_processor_id()) { 49 + write_gic_vl_compare(cnt); 50 + } else { 51 + write_gic_vl_other(mips_cm_vp_id(cpu)); 52 + write_gic_vo_compare(cnt); 53 + } 52 54 res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; 53 55 return res; 54 56 }
+2 -2
drivers/clocksource/owl-timer.c
··· 125 125 126 126 owl_timer_base = of_io_request_and_map(node, 0, "owl-timer"); 127 127 if (IS_ERR(owl_timer_base)) { 128 - pr_err("Can't map timer registers"); 128 + pr_err("Can't map timer registers\n"); 129 129 return PTR_ERR(owl_timer_base); 130 130 } 131 131 ··· 134 134 135 135 timer1_irq = of_irq_get_byname(node, "timer1"); 136 136 if (timer1_irq <= 0) { 137 - pr_err("Can't parse timer1 IRQ"); 137 + pr_err("Can't parse timer1 IRQ\n"); 138 138 return -EINVAL; 139 139 } 140 140
+1 -1
drivers/clocksource/rockchip_timer.c
··· 274 274 TIMER_NAME, rk_clksrc->freq, 250, 32, 275 275 clocksource_mmio_readl_down); 276 276 if (ret) { 277 - pr_err("Failed to register clocksource"); 277 + pr_err("Failed to register clocksource\n"); 278 278 goto out_clocksource; 279 279 } 280 280
+37 -47
drivers/clocksource/sh_cmt.c
··· 25 25 #include <linux/irq.h> 26 26 #include <linux/module.h> 27 27 #include <linux/of.h> 28 + #include <linux/of_device.h> 28 29 #include <linux/platform_device.h> 29 30 #include <linux/pm_domain.h> 30 31 #include <linux/pm_runtime.h> ··· 40 39 * SoC but also on the particular instance. The following table lists the main 41 40 * characteristics of those flavours. 42 41 * 43 - * 16B 32B 32B-F 48B 48B-2 42 + * 16B 32B 32B-F 48B R-Car Gen2 44 43 * ----------------------------------------------------------------------------- 45 44 * Channels 2 1/4 1 6 2/8 46 45 * Control Width 16 16 16 16 32 47 46 * Counter Width 16 32 32 32/48 32/48 48 47 * Shared Start/Stop Y Y Y Y N 49 48 * 50 - * The 48-bit gen2 version has a per-channel start/stop register located in the 51 - * channel registers block. All other versions have a shared start/stop register 52 - * located in the global space. 49 + * The r8a73a4 / R-Car Gen2 version has a per-channel start/stop register 50 + * located in the channel registers block. All other versions have a shared 51 + * start/stop register located in the global space. 53 52 * 54 53 * Channels are indexed from 0 to N-1 in the documentation. The channel index 55 54 * infers the start/stop bit position in the control register and the channel ··· 67 66 enum sh_cmt_model { 68 67 SH_CMT_16BIT, 69 68 SH_CMT_32BIT, 70 - SH_CMT_32BIT_FAST, 71 69 SH_CMT_48BIT, 72 - SH_CMT_48BIT_GEN2, 70 + SH_CMT0_RCAR_GEN2, 71 + SH_CMT1_RCAR_GEN2, 73 72 }; 74 73 75 74 struct sh_cmt_info { 76 75 enum sh_cmt_model model; 76 + 77 + unsigned int channels_mask; 77 78 78 79 unsigned long width; /* 16 or 32 bit version of hardware block */ 79 80 unsigned long overflow_bit; ··· 203 200 .read_count = sh_cmt_read32, 204 201 .write_count = sh_cmt_write32, 205 202 }, 206 - [SH_CMT_32BIT_FAST] = { 207 - .model = SH_CMT_32BIT_FAST, 208 - .width = 32, 209 - .overflow_bit = SH_CMT32_CMCSR_CMF, 210 - .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), 211 - .read_control = sh_cmt_read16, 212 - .write_control = sh_cmt_write16, 213 - .read_count = sh_cmt_read32, 214 - .write_count = sh_cmt_write32, 215 - }, 216 203 [SH_CMT_48BIT] = { 217 204 .model = SH_CMT_48BIT, 205 + .channels_mask = 0x3f, 218 206 .width = 32, 219 207 .overflow_bit = SH_CMT32_CMCSR_CMF, 220 208 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), ··· 214 220 .read_count = sh_cmt_read32, 215 221 .write_count = sh_cmt_write32, 216 222 }, 217 - [SH_CMT_48BIT_GEN2] = { 218 - .model = SH_CMT_48BIT_GEN2, 223 + [SH_CMT0_RCAR_GEN2] = { 224 + .model = SH_CMT0_RCAR_GEN2, 225 + .channels_mask = 0x60, 226 + .width = 32, 227 + .overflow_bit = SH_CMT32_CMCSR_CMF, 228 + .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), 229 + .read_control = sh_cmt_read32, 230 + .write_control = sh_cmt_write32, 231 + .read_count = sh_cmt_read32, 232 + .write_count = sh_cmt_write32, 233 + }, 234 + [SH_CMT1_RCAR_GEN2] = { 235 + .model = SH_CMT1_RCAR_GEN2, 236 + .channels_mask = 0xff, 219 237 .width = 32, 220 238 .overflow_bit = SH_CMT32_CMCSR_CMF, 221 239 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), ··· 865 859 ch->cmt = cmt; 866 860 ch->index = index; 867 861 ch->hwidx = hwidx; 862 + ch->timer_bit = hwidx; 868 863 869 864 /* 870 865 * Compute the address of the channel control register block. For the ··· 880 873 case SH_CMT_48BIT: 881 874 ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10; 882 875 break; 883 - case SH_CMT_32BIT_FAST: 884 - /* 885 - * The 32-bit "fast" timer has a single channel at hwidx 5 but 886 - * is located at offset 0x40 instead of 0x60 for some reason. 887 - */ 888 - ch->ioctrl = cmt->mapbase + 0x40; 889 - break; 890 - case SH_CMT_48BIT_GEN2: 876 + case SH_CMT0_RCAR_GEN2: 877 + case SH_CMT1_RCAR_GEN2: 891 878 ch->iostart = cmt->mapbase + ch->hwidx * 0x100; 892 879 ch->ioctrl = ch->iostart + 0x10; 880 + ch->timer_bit = 0; 893 881 break; 894 882 } 895 883 ··· 895 893 896 894 ch->match_value = ch->max_match_value; 897 895 raw_spin_lock_init(&ch->lock); 898 - 899 - ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 ? 0 : ch->hwidx; 900 896 901 897 ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev), 902 898 clockevent, clocksource); ··· 935 935 MODULE_DEVICE_TABLE(platform, sh_cmt_id_table); 936 936 937 937 static const struct of_device_id sh_cmt_of_table[] __maybe_unused = { 938 - { .compatible = "renesas,cmt-32", .data = &sh_cmt_info[SH_CMT_32BIT] }, 939 - { .compatible = "renesas,cmt-32-fast", .data = &sh_cmt_info[SH_CMT_32BIT_FAST] }, 940 938 { .compatible = "renesas,cmt-48", .data = &sh_cmt_info[SH_CMT_48BIT] }, 941 - { .compatible = "renesas,cmt-48-gen2", .data = &sh_cmt_info[SH_CMT_48BIT_GEN2] }, 939 + { 940 + /* deprecated, preserved for backward compatibility */ 941 + .compatible = "renesas,cmt-48-gen2", 942 + .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] 943 + }, 944 + { .compatible = "renesas,rcar-gen2-cmt0", .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] }, 945 + { .compatible = "renesas,rcar-gen2-cmt1", .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2] }, 942 946 { } 943 947 }; 944 948 MODULE_DEVICE_TABLE(of, sh_cmt_of_table); 945 - 946 - static int sh_cmt_parse_dt(struct sh_cmt_device *cmt) 947 - { 948 - struct device_node *np = cmt->pdev->dev.of_node; 949 - 950 - return of_property_read_u32(np, "renesas,channels-mask", 951 - &cmt->hw_channels); 952 - } 953 949 954 950 static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) 955 951 { ··· 957 961 raw_spin_lock_init(&cmt->lock); 958 962 959 963 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { 960 - const struct of_device_id *id; 961 - 962 - id = of_match_node(sh_cmt_of_table, pdev->dev.of_node); 963 - cmt->info = id->data; 964 - 965 - ret = sh_cmt_parse_dt(cmt); 966 - if (ret < 0) 967 - return ret; 964 + cmt->info = of_device_get_match_data(&pdev->dev); 965 + cmt->hw_channels = cmt->info->channels_mask; 968 966 } else if (pdev->dev.platform_data) { 969 967 struct sh_timer_config *cfg = pdev->dev.platform_data; 970 968 const struct platform_device_id *id = pdev->id_entry;
+2 -2
drivers/clocksource/timer-fttmr010.c
··· 264 264 265 265 fttmr010->base = of_iomap(np, 0); 266 266 if (!fttmr010->base) { 267 - pr_err("Can't remap registers"); 267 + pr_err("Can't remap registers\n"); 268 268 ret = -ENXIO; 269 269 goto out_free; 270 270 } 271 271 /* IRQ for timer 1 */ 272 272 irq = irq_of_parse_and_map(np, 0); 273 273 if (irq <= 0) { 274 - pr_err("Can't parse IRQ"); 274 + pr_err("Can't parse IRQ\n"); 275 275 ret = -EINVAL; 276 276 goto out_unmap; 277 277 }
+12
drivers/clocksource/timer-of.c
··· 176 176 timer_base_exit(&to->of_base); 177 177 return ret; 178 178 } 179 + 180 + void timer_of_exit(struct timer_of *to) 181 + { 182 + if (to->flags & TIMER_OF_IRQ) 183 + timer_irq_exit(&to->of_irq); 184 + 185 + if (to->flags & TIMER_OF_CLOCK) 186 + timer_clk_exit(&to->of_clk); 187 + 188 + if (to->flags & TIMER_OF_BASE) 189 + timer_base_exit(&to->of_base); 190 + }
+3
drivers/clocksource/timer-of.h
··· 67 67 68 68 extern int __init timer_of_init(struct device_node *np, 69 69 struct timer_of *to); 70 + 71 + extern void timer_of_exit(struct timer_of *to); 72 + 70 73 #endif
+7 -6
drivers/cpufreq/powernv-cpufreq.c
··· 90 90 int last_gpstate_idx; 91 91 spinlock_t gpstate_lock; 92 92 struct timer_list timer; 93 + struct cpufreq_policy *policy; 93 94 }; 94 95 95 96 static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; ··· 626 625 * according quadratic equation. Queues a new timer if it is still not equal 627 626 * to local pstate 628 627 */ 629 - void gpstate_timer_handler(unsigned long data) 628 + void gpstate_timer_handler(struct timer_list *t) 630 629 { 631 - struct cpufreq_policy *policy = (struct cpufreq_policy *)data; 632 - struct global_pstate_info *gpstates = policy->driver_data; 630 + struct global_pstate_info *gpstates = from_timer(gpstates, t, timer); 631 + struct cpufreq_policy *policy = gpstates->policy; 633 632 int gpstate_idx, lpstate_idx; 634 633 unsigned long val; 635 634 unsigned int time_diff = jiffies_to_msecs(jiffies) ··· 801 800 policy->driver_data = gpstates; 802 801 803 802 /* initialize timer */ 804 - init_timer_pinned_deferrable(&gpstates->timer); 805 - gpstates->timer.data = (unsigned long)policy; 806 - gpstates->timer.function = gpstate_timer_handler; 803 + gpstates->policy = policy; 804 + timer_setup(&gpstates->timer, gpstate_timer_handler, 805 + TIMER_PINNED | TIMER_DEFERRABLE); 807 806 gpstates->timer.expires = jiffies + 808 807 msecs_to_jiffies(GPSTATE_TIMER_INTERVAL); 809 808 spin_lock_init(&gpstates->gpstate_lock);
+3 -3
drivers/crypto/axis/artpec6_crypto.c
··· 2072 2072 del_timer(&ac->timer); 2073 2073 } 2074 2074 2075 - static void artpec6_crypto_timeout(unsigned long data) 2075 + static void artpec6_crypto_timeout(struct timer_list *t) 2076 2076 { 2077 - struct artpec6_crypto *ac = (struct artpec6_crypto *) data; 2077 + struct artpec6_crypto *ac = from_timer(ac, t, timer); 2078 2078 2079 2079 dev_info_ratelimited(artpec6_crypto_dev, "timeout\n"); 2080 2080 ··· 3063 3063 spin_lock_init(&ac->queue_lock); 3064 3064 INIT_LIST_HEAD(&ac->queue); 3065 3065 INIT_LIST_HEAD(&ac->pending); 3066 - setup_timer(&ac->timer, artpec6_crypto_timeout, (unsigned long) ac); 3066 + timer_setup(&ac->timer, artpec6_crypto_timeout, 0); 3067 3067 3068 3068 ac->base = base; 3069 3069
+2 -2
drivers/crypto/mv_cesa.c
··· 149 149 int count_add; 150 150 }; 151 151 152 - static void mv_completion_timer_callback(unsigned long unused) 152 + static void mv_completion_timer_callback(struct timer_list *unused) 153 153 { 154 154 int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0; 155 155 ··· 167 167 168 168 static void mv_setup_timer(void) 169 169 { 170 - setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0); 170 + timer_setup(&cpg->completion_timer, mv_completion_timer_callback, 0); 171 171 mod_timer(&cpg->completion_timer, 172 172 jiffies + msecs_to_jiffies(MV_CESA_EXPIRE)); 173 173 }
+3 -4
drivers/crypto/picoxcell_crypto.c
··· 1125 1125 return IRQ_HANDLED; 1126 1126 } 1127 1127 1128 - static void spacc_packet_timeout(unsigned long data) 1128 + static void spacc_packet_timeout(struct timer_list *t) 1129 1129 { 1130 - struct spacc_engine *engine = (struct spacc_engine *)data; 1130 + struct spacc_engine *engine = from_timer(engine, t, packet_timeout); 1131 1131 1132 1132 spacc_process_done(engine); 1133 1133 } ··· 1714 1714 writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN, 1715 1715 engine->regs + SPA_IRQ_EN_REG_OFFSET); 1716 1716 1717 - setup_timer(&engine->packet_timeout, spacc_packet_timeout, 1718 - (unsigned long)engine); 1717 + timer_setup(&engine->packet_timeout, spacc_packet_timeout, 0); 1719 1718 1720 1719 INIT_LIST_HEAD(&engine->pending); 1721 1720 INIT_LIST_HEAD(&engine->completed);
+5 -5
drivers/firewire/core-transaction.c
··· 137 137 } 138 138 EXPORT_SYMBOL(fw_cancel_transaction); 139 139 140 - static void split_transaction_timeout_callback(unsigned long data) 140 + static void split_transaction_timeout_callback(struct timer_list *timer) 141 141 { 142 - struct fw_transaction *t = (struct fw_transaction *)data; 142 + struct fw_transaction *t = from_timer(t, timer, split_timeout_timer); 143 143 struct fw_card *card = t->card; 144 144 unsigned long flags; 145 145 ··· 373 373 t->tlabel = tlabel; 374 374 t->card = card; 375 375 t->is_split_transaction = false; 376 - setup_timer(&t->split_timeout_timer, 377 - split_transaction_timeout_callback, (unsigned long)t); 376 + timer_setup(&t->split_timeout_timer, 377 + split_transaction_timeout_callback, 0); 378 378 t->callback = callback; 379 379 t->callback_data = callback_data; 380 380 ··· 423 423 struct transaction_callback_data d; 424 424 struct fw_transaction t; 425 425 426 - init_timer_on_stack(&t.split_timeout_timer); 426 + timer_setup_on_stack(&t.split_timeout_timer, NULL, 0); 427 427 init_completion(&d.done); 428 428 d.payload = payload; 429 429 fw_send_request(card, &t, tcode, destination_id, generation, speed,
+3 -4
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
··· 975 975 round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES)); 976 976 } 977 977 978 - static void hangcheck_handler(unsigned long data) 978 + static void hangcheck_handler(struct timer_list *t) 979 979 { 980 - struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data; 980 + struct etnaviv_gpu *gpu = from_timer(gpu, t, hangcheck_timer); 981 981 u32 fence = gpu->completed_fence; 982 982 bool progress = false; 983 983 ··· 1648 1648 INIT_WORK(&gpu->recover_work, recover_worker); 1649 1649 init_waitqueue_head(&gpu->fence_event); 1650 1650 1651 - setup_deferrable_timer(&gpu->hangcheck_timer, hangcheck_handler, 1652 - (unsigned long)gpu); 1651 + timer_setup(&gpu->hangcheck_timer, hangcheck_handler, TIMER_DEFERRABLE); 1653 1652 1654 1653 priv->gpu[priv->num_gpus++] = gpu; 1655 1654
+3 -5
drivers/gpu/drm/gma500/psb_lid.c
··· 23 23 #include "psb_intel_reg.h" 24 24 #include <linux/spinlock.h> 25 25 26 - static void psb_lid_timer_func(unsigned long data) 26 + static void psb_lid_timer_func(struct timer_list *t) 27 27 { 28 - struct drm_psb_private * dev_priv = (struct drm_psb_private *)data; 28 + struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer); 29 29 struct drm_device *dev = (struct drm_device *)dev_priv->dev; 30 30 struct timer_list *lid_timer = &dev_priv->lid_timer; 31 31 unsigned long irq_flags; ··· 77 77 spin_lock_init(&dev_priv->lid_lock); 78 78 spin_lock_irqsave(&dev_priv->lid_lock, irq_flags); 79 79 80 - init_timer(lid_timer); 80 + timer_setup(lid_timer, psb_lid_timer_func, 0); 81 81 82 - lid_timer->data = (unsigned long)dev_priv; 83 - lid_timer->function = psb_lid_timer_func; 84 82 lid_timer->expires = jiffies + PSB_LID_DELAY; 85 83 86 84 add_timer(lid_timer);
+18 -14
drivers/hsi/clients/ssi_protocol.c
··· 464 464 hsi_async_read(cl, msg); 465 465 } 466 466 467 - static void ssip_keep_alive(unsigned long data) 467 + static void ssip_keep_alive(struct timer_list *t) 468 468 { 469 - struct hsi_client *cl = (struct hsi_client *)data; 470 - struct ssi_protocol *ssi = hsi_client_drvdata(cl); 469 + struct ssi_protocol *ssi = from_timer(ssi, t, keep_alive); 470 + struct hsi_client *cl = ssi->cl; 471 471 472 472 dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n", 473 473 ssi->main_state, ssi->recv_state, ssi->send_state); ··· 490 490 spin_unlock(&ssi->lock); 491 491 } 492 492 493 - static void ssip_wd(unsigned long data) 493 + static void ssip_rx_wd(struct timer_list *t) 494 494 { 495 - struct hsi_client *cl = (struct hsi_client *)data; 495 + struct ssi_protocol *ssi = from_timer(ssi, t, rx_wd); 496 + struct hsi_client *cl = ssi->cl; 497 + 498 + dev_err(&cl->device, "Watchdog trigerred\n"); 499 + ssip_error(cl); 500 + } 501 + 502 + static void ssip_tx_wd(struct timer_list *t) 503 + { 504 + struct ssi_protocol *ssi = from_timer(ssi, t, tx_wd); 505 + struct hsi_client *cl = ssi->cl; 496 506 497 507 dev_err(&cl->device, "Watchdog trigerred\n"); 498 508 ssip_error(cl); ··· 1094 1084 } 1095 1085 1096 1086 spin_lock_init(&ssi->lock); 1097 - init_timer_deferrable(&ssi->rx_wd); 1098 - init_timer_deferrable(&ssi->tx_wd); 1099 - init_timer(&ssi->keep_alive); 1100 - ssi->rx_wd.data = (unsigned long)cl; 1101 - ssi->rx_wd.function = ssip_wd; 1102 - ssi->tx_wd.data = (unsigned long)cl; 1103 - ssi->tx_wd.function = ssip_wd; 1104 - ssi->keep_alive.data = (unsigned long)cl; 1105 - ssi->keep_alive.function = ssip_keep_alive; 1087 + timer_setup(&ssi->rx_wd, ssip_rx_wd, TIMER_DEFERRABLE); 1088 + timer_setup(&ssi->tx_wd, ssip_tx_wd, TIMER_DEFERRABLE); 1089 + timer_setup(&ssi->keep_alive, ssip_keep_alive, 0); 1106 1090 INIT_LIST_HEAD(&ssi->txqueue); 1107 1091 INIT_LIST_HEAD(&ssi->cmdqueue); 1108 1092 atomic_set(&ssi->tx_usecnt, 0);
+2 -2
drivers/ide/ide-io.c
··· 611 611 * logic that wants cleaning up. 612 612 */ 613 613 614 - void ide_timer_expiry (unsigned long data) 614 + void ide_timer_expiry (struct timer_list *t) 615 615 { 616 - ide_hwif_t *hwif = (ide_hwif_t *)data; 616 + ide_hwif_t *hwif = from_timer(hwif, t, timer); 617 617 ide_drive_t *uninitialized_var(drive); 618 618 ide_handler_t *handler; 619 619 unsigned long flags;
+1 -1
drivers/ide/ide-probe.c
··· 1184 1184 1185 1185 spin_lock_init(&hwif->lock); 1186 1186 1187 - setup_timer(&hwif->timer, &ide_timer_expiry, (unsigned long)hwif); 1187 + timer_setup(&hwif->timer, ide_timer_expiry, 0); 1188 1188 1189 1189 init_completion(&hwif->gendev_rel_comp); 1190 1190
+1 -1
drivers/input/touchscreen/s3c2410_ts.c
··· 145 145 } 146 146 } 147 147 148 - static DEFINE_TIMER(touch_timer, touch_timer_fire, 0, 0); 148 + static DEFINE_TIMER(touch_timer, touch_timer_fire); 149 149 150 150 /** 151 151 * stylus_irq - touchscreen stylus event interrupt
+4 -6
drivers/macintosh/smu.c
··· 103 103 static int smu_irq_inited; 104 104 static unsigned long smu_cmdbuf_abs; 105 105 106 - static void smu_i2c_retry(unsigned long data); 106 + static void smu_i2c_retry(struct timer_list *t); 107 107 108 108 /* 109 109 * SMU driver low level stuff ··· 582 582 if (!smu) 583 583 return 0; 584 584 585 - init_timer(&smu->i2c_timer); 586 - smu->i2c_timer.function = smu_i2c_retry; 587 - smu->i2c_timer.data = (unsigned long)smu; 585 + timer_setup(&smu->i2c_timer, smu_i2c_retry, 0); 588 586 589 587 if (smu->db_node) { 590 588 smu->db_irq = irq_of_parse_and_map(smu->db_node, 0); ··· 753 755 } 754 756 755 757 756 - static void smu_i2c_retry(unsigned long data) 758 + static void smu_i2c_retry(struct timer_list *unused) 757 759 { 758 760 struct smu_i2c_cmd *cmd = smu->cmd_i2c_cur; 759 761 ··· 793 795 BUG_ON(cmd != smu->cmd_i2c_cur); 794 796 if (!smu_irq_inited) { 795 797 mdelay(5); 796 - smu_i2c_retry(0); 798 + smu_i2c_retry(NULL); 797 799 return; 798 800 } 799 801 mod_timer(&smu->i2c_timer, jiffies + msecs_to_jiffies(5));
+6 -6
drivers/mailbox/mailbox-altera.c
··· 57 57 58 58 /* If the controller supports only RX polling mode */ 59 59 struct timer_list rxpoll_timer; 60 + struct mbox_chan *chan; 60 61 }; 61 62 62 63 static struct altera_mbox *mbox_chan_to_altera_mbox(struct mbox_chan *chan) ··· 139 138 } 140 139 } 141 140 142 - static void altera_mbox_poll_rx(unsigned long data) 141 + static void altera_mbox_poll_rx(struct timer_list *t) 143 142 { 144 - struct mbox_chan *chan = (struct mbox_chan *)data; 145 - struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan); 143 + struct altera_mbox *mbox = from_timer(mbox, t, rxpoll_timer); 146 144 147 - altera_mbox_rx_data(chan); 145 + altera_mbox_rx_data(mbox->chan); 148 146 149 147 mod_timer(&mbox->rxpoll_timer, 150 148 jiffies + msecs_to_jiffies(MBOX_POLLING_MS)); ··· 206 206 207 207 polling: 208 208 /* Setup polling timer */ 209 - setup_timer(&mbox->rxpoll_timer, altera_mbox_poll_rx, 210 - (unsigned long)chan); 209 + mbox->chan = chan; 210 + timer_setup(&mbox->rxpoll_timer, altera_mbox_poll_rx, 0); 211 211 mod_timer(&mbox->rxpoll_timer, 212 212 jiffies + msecs_to_jiffies(MBOX_POLLING_MS)); 213 213
+36 -28
drivers/media/usb/pvrusb2/pvrusb2-hdw.c
··· 330 330 static int pvr2_hdw_cmd_usbstream(struct pvr2_hdw *hdw,int runFl); 331 331 static int pvr2_hdw_commit_setup(struct pvr2_hdw *hdw); 332 332 static int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *hdw); 333 - static void pvr2_hdw_quiescent_timeout(unsigned long); 334 - static void pvr2_hdw_decoder_stabilization_timeout(unsigned long); 335 - static void pvr2_hdw_encoder_wait_timeout(unsigned long); 336 - static void pvr2_hdw_encoder_run_timeout(unsigned long); 333 + static void pvr2_hdw_quiescent_timeout(struct timer_list *); 334 + static void pvr2_hdw_decoder_stabilization_timeout(struct timer_list *); 335 + static void pvr2_hdw_encoder_wait_timeout(struct timer_list *); 336 + static void pvr2_hdw_encoder_run_timeout(struct timer_list *); 337 337 static int pvr2_issue_simple_cmd(struct pvr2_hdw *,u32); 338 338 static int pvr2_send_request_ex(struct pvr2_hdw *hdw, 339 339 unsigned int timeout,int probe_fl, ··· 2373 2373 } 2374 2374 if (!hdw) goto fail; 2375 2375 2376 - setup_timer(&hdw->quiescent_timer, pvr2_hdw_quiescent_timeout, 2377 - (unsigned long)hdw); 2376 + timer_setup(&hdw->quiescent_timer, pvr2_hdw_quiescent_timeout, 0); 2378 2377 2379 - setup_timer(&hdw->decoder_stabilization_timer, 2380 - pvr2_hdw_decoder_stabilization_timeout, 2381 - (unsigned long)hdw); 2378 + timer_setup(&hdw->decoder_stabilization_timer, 2379 + pvr2_hdw_decoder_stabilization_timeout, 0); 2382 2380 2383 - setup_timer(&hdw->encoder_wait_timer, pvr2_hdw_encoder_wait_timeout, 2384 - (unsigned long)hdw); 2381 + timer_setup(&hdw->encoder_wait_timer, pvr2_hdw_encoder_wait_timeout, 2382 + 0); 2385 2383 2386 - setup_timer(&hdw->encoder_run_timer, pvr2_hdw_encoder_run_timeout, 2387 - (unsigned long)hdw); 2384 + timer_setup(&hdw->encoder_run_timer, pvr2_hdw_encoder_run_timeout, 0); 2388 2385 2389 2386 hdw->master_state = PVR2_STATE_DEAD; 2390 2387 ··· 3536 3539 complete(&hdw->ctl_done); 3537 3540 } 3538 3541 3542 + struct hdw_timer { 3543 + struct timer_list timer; 3544 + struct pvr2_hdw *hdw; 3545 + }; 3539 3546 3540 - static void pvr2_ctl_timeout(unsigned long data) 3547 + static void pvr2_ctl_timeout(struct timer_list *t) 3541 3548 { 3542 - struct pvr2_hdw *hdw = (struct pvr2_hdw *)data; 3549 + struct hdw_timer *timer = from_timer(timer, t, timer); 3550 + struct pvr2_hdw *hdw = timer->hdw; 3551 + 3543 3552 if (hdw->ctl_write_pend_flag || hdw->ctl_read_pend_flag) { 3544 3553 hdw->ctl_timeout_flag = !0; 3545 3554 if (hdw->ctl_write_pend_flag) ··· 3567 3564 { 3568 3565 unsigned int idx; 3569 3566 int status = 0; 3570 - struct timer_list timer; 3567 + struct hdw_timer timer = { 3568 + .hdw = hdw, 3569 + }; 3570 + 3571 3571 if (!hdw->ctl_lock_held) { 3572 3572 pvr2_trace(PVR2_TRACE_ERROR_LEGS, 3573 3573 "Attempted to execute control transfer without lock!!"); ··· 3627 3621 hdw->ctl_timeout_flag = 0; 3628 3622 hdw->ctl_write_pend_flag = 0; 3629 3623 hdw->ctl_read_pend_flag = 0; 3630 - setup_timer(&timer, pvr2_ctl_timeout, (unsigned long)hdw); 3631 - timer.expires = jiffies + timeout; 3624 + timer_setup_on_stack(&timer.timer, pvr2_ctl_timeout, 0); 3625 + timer.timer.expires = jiffies + timeout; 3632 3626 3633 3627 if (write_len && write_data) { 3634 3628 hdw->cmd_debug_state = 2; ··· 3683 3677 } 3684 3678 3685 3679 /* Start timer */ 3686 - add_timer(&timer); 3680 + add_timer(&timer.timer); 3687 3681 3688 3682 /* Now wait for all I/O to complete */ 3689 3683 hdw->cmd_debug_state = 4; ··· 3693 3687 hdw->cmd_debug_state = 5; 3694 3688 3695 3689 /* Stop timer */ 3696 - del_timer_sync(&timer); 3690 + del_timer_sync(&timer.timer); 3697 3691 3698 3692 hdw->cmd_debug_state = 6; 3699 3693 status = 0; ··· 3775 3769 if ((status < 0) && (!probe_fl)) { 3776 3770 pvr2_hdw_render_useless(hdw); 3777 3771 } 3772 + destroy_timer_on_stack(&timer.timer); 3773 + 3778 3774 return status; 3779 3775 } 3780 3776 ··· 4374 4366 4375 4367 4376 4368 /* Timeout function for quiescent timer. */ 4377 - static void pvr2_hdw_quiescent_timeout(unsigned long data) 4369 + static void pvr2_hdw_quiescent_timeout(struct timer_list *t) 4378 4370 { 4379 - struct pvr2_hdw *hdw = (struct pvr2_hdw *)data; 4371 + struct pvr2_hdw *hdw = from_timer(hdw, t, quiescent_timer); 4380 4372 hdw->state_decoder_quiescent = !0; 4381 4373 trace_stbit("state_decoder_quiescent",hdw->state_decoder_quiescent); 4382 4374 hdw->state_stale = !0; ··· 4385 4377 4386 4378 4387 4379 /* Timeout function for decoder stabilization timer. */ 4388 - static void pvr2_hdw_decoder_stabilization_timeout(unsigned long data) 4380 + static void pvr2_hdw_decoder_stabilization_timeout(struct timer_list *t) 4389 4381 { 4390 - struct pvr2_hdw *hdw = (struct pvr2_hdw *)data; 4382 + struct pvr2_hdw *hdw = from_timer(hdw, t, decoder_stabilization_timer); 4391 4383 hdw->state_decoder_ready = !0; 4392 4384 trace_stbit("state_decoder_ready", hdw->state_decoder_ready); 4393 4385 hdw->state_stale = !0; ··· 4396 4388 4397 4389 4398 4390 /* Timeout function for encoder wait timer. */ 4399 - static void pvr2_hdw_encoder_wait_timeout(unsigned long data) 4391 + static void pvr2_hdw_encoder_wait_timeout(struct timer_list *t) 4400 4392 { 4401 - struct pvr2_hdw *hdw = (struct pvr2_hdw *)data; 4393 + struct pvr2_hdw *hdw = from_timer(hdw, t, encoder_wait_timer); 4402 4394 hdw->state_encoder_waitok = !0; 4403 4395 trace_stbit("state_encoder_waitok",hdw->state_encoder_waitok); 4404 4396 hdw->state_stale = !0; ··· 4407 4399 4408 4400 4409 4401 /* Timeout function for encoder run timer. */ 4410 - static void pvr2_hdw_encoder_run_timeout(unsigned long data) 4402 + static void pvr2_hdw_encoder_run_timeout(struct timer_list *t) 4411 4403 { 4412 - struct pvr2_hdw *hdw = (struct pvr2_hdw *)data; 4404 + struct pvr2_hdw *hdw = from_timer(hdw, t, encoder_run_timer); 4413 4405 if (!hdw->state_encoder_runok) { 4414 4406 hdw->state_encoder_runok = !0; 4415 4407 trace_stbit("state_encoder_runok",hdw->state_encoder_runok);
+6 -4
drivers/memstick/host/jmb38x_ms.c
··· 59 59 unsigned int block_pos; 60 60 unsigned long timeout_jiffies; 61 61 struct timer_list timer; 62 + struct memstick_host *msh; 62 63 struct memstick_request *req; 63 64 unsigned char cmd_flags; 64 65 unsigned char io_pos; ··· 593 592 return IRQ_HANDLED; 594 593 } 595 594 596 - static void jmb38x_ms_abort(unsigned long data) 595 + static void jmb38x_ms_abort(struct timer_list *t) 597 596 { 598 - struct memstick_host *msh = (struct memstick_host *)data; 599 - struct jmb38x_ms_host *host = memstick_priv(msh); 597 + struct jmb38x_ms_host *host = from_timer(host, t, timer); 598 + struct memstick_host *msh = host->msh; 600 599 unsigned long flags; 601 600 602 601 dev_dbg(&host->chip->pdev->dev, "abort\n"); ··· 879 878 return NULL; 880 879 881 880 host = memstick_priv(msh); 881 + host->msh = msh; 882 882 host->chip = jm; 883 883 host->addr = ioremap(pci_resource_start(jm->pdev, cnt), 884 884 pci_resource_len(jm->pdev, cnt)); ··· 899 897 900 898 msh->caps = MEMSTICK_CAP_PAR4 | MEMSTICK_CAP_PAR8; 901 899 902 - setup_timer(&host->timer, jmb38x_ms_abort, (unsigned long)msh); 900 + timer_setup(&host->timer, jmb38x_ms_abort, 0); 903 901 904 902 if (!request_irq(host->irq, jmb38x_ms_isr, IRQF_SHARED, host->host_id, 905 903 msh))
+3 -4
drivers/memstick/host/r592.c
··· 616 616 } 617 617 618 618 /* Timer routine that fires 1 second after last card detection event, */ 619 - static void r592_detect_timer(long unsigned int data) 619 + static void r592_detect_timer(struct timer_list *t) 620 620 { 621 - struct r592_device *dev = (struct r592_device *)data; 621 + struct r592_device *dev = from_timer(dev, t, detect_timer); 622 622 r592_update_card_detect(dev); 623 623 memstick_detect_change(dev->host); 624 624 } ··· 770 770 spin_lock_init(&dev->io_thread_lock); 771 771 init_completion(&dev->dma_done); 772 772 INIT_KFIFO(dev->pio_fifo); 773 - setup_timer(&dev->detect_timer, 774 - r592_detect_timer, (long unsigned int)dev); 773 + timer_setup(&dev->detect_timer, r592_detect_timer, 0); 775 774 776 775 /* Host initialization */ 777 776 host->caps = MEMSTICK_CAP_PAR4;
+3 -3
drivers/memstick/host/tifm_ms.c
··· 538 538 return 0; 539 539 } 540 540 541 - static void tifm_ms_abort(unsigned long data) 541 + static void tifm_ms_abort(struct timer_list *t) 542 542 { 543 - struct tifm_ms *host = (struct tifm_ms *)data; 543 + struct tifm_ms *host = from_timer(host, t, timer); 544 544 545 545 dev_dbg(&host->dev->dev, "status %x\n", 546 546 readl(host->dev->addr + SOCK_MS_STATUS)); ··· 575 575 host->dev = sock; 576 576 host->timeout_jiffies = msecs_to_jiffies(1000); 577 577 578 - setup_timer(&host->timer, tifm_ms_abort, (unsigned long)host); 578 + timer_setup(&host->timer, tifm_ms_abort, 0); 579 579 tasklet_init(&host->notify, tifm_ms_req_tasklet, (unsigned long)msh); 580 580 581 581 msh->request = tifm_ms_submit_req;
+6 -9
drivers/misc/sgi-xp/xpc_main.c
··· 172 172 * Timer function to enforce the timelimit on the partition disengage. 173 173 */ 174 174 static void 175 - xpc_timeout_partition_disengage(unsigned long data) 175 + xpc_timeout_partition_disengage(struct timer_list *t) 176 176 { 177 - struct xpc_partition *part = (struct xpc_partition *)data; 177 + struct xpc_partition *part = from_timer(part, t, disengage_timer); 178 178 179 179 DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); 180 180 ··· 190 190 * specify when the next timeout should occur. 191 191 */ 192 192 static void 193 - xpc_hb_beater(unsigned long dummy) 193 + xpc_hb_beater(struct timer_list *unused) 194 194 { 195 195 xpc_arch_ops.increment_heartbeat(); 196 196 ··· 205 205 xpc_start_hb_beater(void) 206 206 { 207 207 xpc_arch_ops.heartbeat_init(); 208 - init_timer(&xpc_hb_timer); 209 - xpc_hb_timer.function = xpc_hb_beater; 208 + timer_setup(&xpc_hb_timer, xpc_hb_beater, 0); 210 209 xpc_hb_beater(0); 211 210 } 212 211 ··· 930 931 part->act_state = XPC_P_AS_INACTIVE; 931 932 XPC_SET_REASON(part, 0, 0); 932 933 933 - init_timer(&part->disengage_timer); 934 - part->disengage_timer.function = 935 - xpc_timeout_partition_disengage; 936 - part->disengage_timer.data = (unsigned long)part; 934 + timer_setup(&part->disengage_timer, 935 + xpc_timeout_partition_disengage, 0); 937 936 938 937 part->setup_state = XPC_P_SS_UNSET; 939 938 init_waitqueue_head(&part->teardown_wq);
+6 -9
drivers/misc/sgi-xp/xpc_sn2.c
··· 323 323 * was received. 324 324 */ 325 325 static void 326 - xpc_check_for_dropped_notify_IRQ_sn2(struct xpc_partition *part) 326 + xpc_check_for_dropped_notify_IRQ_sn2(struct timer_list *t) 327 327 { 328 - struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; 328 + struct xpc_partition *part = 329 + from_timer(part, t, sn.sn2.dropped_notify_IRQ_timer); 329 330 330 331 if (xpc_part_ref(part)) { 331 332 xpc_check_for_sent_chctl_flags_sn2(part); 332 333 333 - part_sn2->dropped_notify_IRQ_timer.expires = jiffies + 334 - XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; 335 - add_timer(&part_sn2->dropped_notify_IRQ_timer); 334 + t->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; 335 + add_timer(t); 336 336 xpc_part_deref(part); 337 337 } 338 338 } ··· 1232 1232 1233 1233 /* Setup a timer to check for dropped notify IRQs */ 1234 1234 timer = &part_sn2->dropped_notify_IRQ_timer; 1235 - init_timer(timer); 1236 - timer->function = 1237 - (void (*)(unsigned long))xpc_check_for_dropped_notify_IRQ_sn2; 1238 - timer->data = (unsigned long)part; 1235 + timer_setup(timer, xpc_check_for_dropped_notify_IRQ_sn2, 0); 1239 1236 timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; 1240 1237 add_timer(timer); 1241 1238
+3 -3
drivers/net/cris/eth_v10.c
··· 166 166 static unsigned int network_tr_ctrl_shadow = 0; 167 167 168 168 /* Network speed indication. */ 169 - static DEFINE_TIMER(speed_timer, NULL, 0, 0); 170 - static DEFINE_TIMER(clear_led_timer, NULL, 0, 0); 169 + static DEFINE_TIMER(speed_timer, NULL); 170 + static DEFINE_TIMER(clear_led_timer, NULL); 171 171 static int current_speed; /* Speed read from transceiver */ 172 172 static int current_speed_selection; /* Speed selected by user */ 173 173 static unsigned long led_next_time; ··· 175 175 static int rx_queue_len; 176 176 177 177 /* Duplex */ 178 - static DEFINE_TIMER(duplex_timer, NULL, 0, 0); 178 + static DEFINE_TIMER(duplex_timer, NULL); 179 179 static int full_duplex; 180 180 static enum duplex current_duplex; 181 181
+4 -7
drivers/net/ethernet/qlogic/qlge/qlge_main.c
··· 4725 4725 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, 4726 4726 }; 4727 4727 4728 - static void ql_timer(unsigned long data) 4728 + static void ql_timer(struct timer_list *t) 4729 4729 { 4730 - struct ql_adapter *qdev = (struct ql_adapter *)data; 4730 + struct ql_adapter *qdev = from_timer(qdev, t, timer); 4731 4731 u32 var = 0; 4732 4732 4733 4733 var = ql_read32(qdev, STS); ··· 4806 4806 /* Start up the timer to trigger EEH if 4807 4807 * the bus goes dead 4808 4808 */ 4809 - init_timer_deferrable(&qdev->timer); 4810 - qdev->timer.data = (unsigned long)qdev; 4811 - qdev->timer.function = ql_timer; 4812 - qdev->timer.expires = jiffies + (5*HZ); 4813 - add_timer(&qdev->timer); 4809 + timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE); 4810 + mod_timer(&qdev->timer, jiffies + (5*HZ)); 4814 4811 ql_link_off(qdev); 4815 4812 ql_display_dev_info(ndev); 4816 4813 atomic_set(&qdev->lb_count, 0);
+4 -5
drivers/net/ethernet/tile/tilepro.c
··· 608 608 * ISSUE: Maybe instead track number of expected completions, and free 609 609 * only that many, resetting to zero if "pending" is ever false. 610 610 */ 611 - static void tile_net_handle_egress_timer(unsigned long arg) 611 + static void tile_net_handle_egress_timer(struct timer_list *t) 612 612 { 613 - struct tile_net_cpu *info = (struct tile_net_cpu *)arg; 613 + struct tile_net_cpu *info = from_timer(info, t, egress_timer); 614 614 struct net_device *dev = info->napi.dev; 615 615 616 616 /* The timer is no longer scheduled. */ ··· 1004 1004 BUG(); 1005 1005 1006 1006 /* Initialize the egress timer. */ 1007 - init_timer_pinned(&info->egress_timer); 1008 - info->egress_timer.data = (long)info; 1009 - info->egress_timer.function = tile_net_handle_egress_timer; 1007 + timer_setup(&info->egress_timer, tile_net_handle_egress_timer, 1008 + TIMER_PINNED); 1010 1009 1011 1010 u64_stats_init(&info->stats.syncp); 1012 1011
+1 -1
drivers/net/hamradio/yam.c
··· 157 157 158 158 static struct yam_mcs *yam_data; 159 159 160 - static DEFINE_TIMER(yam_timer, NULL, 0, 0); 160 + static DEFINE_TIMER(yam_timer, NULL); 161 161 162 162 /* --------------------------------------------------------------------- */ 163 163
+3 -5
drivers/net/vxlan.c
··· 2325 2325 } 2326 2326 2327 2327 /* Walk the forwarding table and purge stale entries */ 2328 - static void vxlan_cleanup(unsigned long arg) 2328 + static void vxlan_cleanup(struct timer_list *t) 2329 2329 { 2330 - struct vxlan_dev *vxlan = (struct vxlan_dev *) arg; 2330 + struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer); 2331 2331 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL; 2332 2332 unsigned int h; 2333 2333 ··· 2647 2647 INIT_LIST_HEAD(&vxlan->next); 2648 2648 spin_lock_init(&vxlan->hash_lock); 2649 2649 2650 - init_timer_deferrable(&vxlan->age_timer); 2651 - vxlan->age_timer.function = vxlan_cleanup; 2652 - vxlan->age_timer.data = (unsigned long) vxlan; 2650 + timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE); 2653 2651 2654 2652 vxlan->dev = dev; 2655 2653
-2
drivers/net/wan/hdlc_cisco.c
··· 276 276 spin_unlock(&st->lock); 277 277 278 278 st->timer.expires = jiffies + st->settings.interval * HZ; 279 - st->timer.function = cisco_timer; 280 - st->timer.data = arg; 281 279 add_timer(&st->timer); 282 280 } 283 281
-2
drivers/net/wan/hdlc_fr.c
··· 644 644 state(hdlc)->settings.t391 * HZ; 645 645 } 646 646 647 - state(hdlc)->timer.function = fr_timer; 648 - state(hdlc)->timer.data = arg; 649 647 add_timer(&state(hdlc)->timer); 650 648 } 651 649
+4 -5
drivers/net/wireless/ath/ath6kl/recovery.c
··· 60 60 ar->fw_recovery.hb_pending = false; 61 61 } 62 62 63 - static void ath6kl_recovery_hb_timer(unsigned long data) 63 + static void ath6kl_recovery_hb_timer(struct timer_list *t) 64 64 { 65 - struct ath6kl *ar = (struct ath6kl *) data; 65 + struct ath6kl *ar = from_timer(ar, t, fw_recovery.hb_timer); 66 66 int err; 67 67 68 68 if (test_bit(RECOVERY_CLEANUP, &ar->flag) || ··· 104 104 recovery->seq_num = 0; 105 105 recovery->hb_misscnt = 0; 106 106 ar->fw_recovery.hb_pending = false; 107 - ar->fw_recovery.hb_timer.function = ath6kl_recovery_hb_timer; 108 - ar->fw_recovery.hb_timer.data = (unsigned long) ar; 109 - init_timer_deferrable(&ar->fw_recovery.hb_timer); 107 + timer_setup(&ar->fw_recovery.hb_timer, ath6kl_recovery_hb_timer, 108 + TIMER_DEFERRABLE); 110 109 111 110 if (ar->fw_recovery.hb_poll) 112 111 mod_timer(&ar->fw_recovery.hb_timer, jiffies +
+1 -1
drivers/net/wireless/atmel/at76c50x-usb.c
··· 519 519 /* LED trigger */ 520 520 static int tx_activity; 521 521 static void at76_ledtrig_tx_timerfunc(unsigned long data); 522 - static DEFINE_TIMER(ledtrig_tx_timer, at76_ledtrig_tx_timerfunc, 0, 0); 522 + static DEFINE_TIMER(ledtrig_tx_timer, at76_ledtrig_tx_timerfunc); 523 523 DEFINE_LED_TRIGGER(ledtrig_tx); 524 524 525 525 static void at76_ledtrig_tx_timerfunc(unsigned long data)
+7 -14
drivers/parport/ieee1284.c
··· 44 44 up (&port->physport->ieee1284.irq); 45 45 } 46 46 47 - static struct parport *port_from_cookie[PARPORT_MAX]; 48 - static void timeout_waiting_on_port (unsigned long cookie) 47 + static void timeout_waiting_on_port (struct timer_list *t) 49 48 { 50 - parport_ieee1284_wakeup (port_from_cookie[cookie % PARPORT_MAX]); 49 + struct parport *port = from_timer(port, t, timer); 50 + 51 + parport_ieee1284_wakeup (port); 51 52 } 52 53 53 54 /** ··· 70 69 int parport_wait_event (struct parport *port, signed long timeout) 71 70 { 72 71 int ret; 73 - struct timer_list timer; 74 72 75 73 if (!port->physport->cad->timeout) 76 74 /* Zero timeout is special, and we can't down() the 77 75 semaphore. */ 78 76 return 1; 79 77 80 - init_timer_on_stack(&timer); 81 - timer.expires = jiffies + timeout; 82 - timer.function = timeout_waiting_on_port; 83 - port_from_cookie[port->number % PARPORT_MAX] = port; 84 - timer.data = port->number; 85 - 86 - add_timer (&timer); 78 + timer_setup(&port->timer, timeout_waiting_on_port, 0); 79 + mod_timer(&port->timer, jiffies + timeout); 87 80 ret = down_interruptible (&port->physport->ieee1284.irq); 88 - if (!del_timer_sync(&timer) && !ret) 81 + if (!del_timer_sync(&port->timer) && !ret) 89 82 /* Timed out. */ 90 83 ret = 1; 91 - 92 - destroy_timer_on_stack(&timer); 93 84 94 85 return ret; 95 86 }
+3 -3
drivers/pcmcia/bcm63xx_pcmcia.c
··· 263 263 /* 264 264 * socket polling timer callback 265 265 */ 266 - static void bcm63xx_pcmcia_poll(unsigned long data) 266 + static void bcm63xx_pcmcia_poll(struct timer_list *t) 267 267 { 268 268 struct bcm63xx_pcmcia_socket *skt; 269 269 unsigned int stat, events; 270 270 271 - skt = (struct bcm63xx_pcmcia_socket *)data; 271 + skt = from_timer(skt, t, timer); 272 272 273 273 spin_lock_bh(&skt->lock); 274 274 ··· 392 392 sock->map_size = resource_size(skt->common_res); 393 393 394 394 /* initialize polling timer */ 395 - setup_timer(&skt->timer, bcm63xx_pcmcia_poll, (unsigned long)skt); 395 + timer_setup(&skt->timer, bcm63xx_pcmcia_poll, 0); 396 396 397 397 /* initialize pcmcia control register, drive VS[12] to 0, 398 398 * leave CB IDSEL to the old value since it is set by the PCI
+3 -3
drivers/pcmcia/bfin_cf_pcmcia.c
··· 86 86 } 87 87 88 88 /* the timer is primarily to kick this socket's pccardd */ 89 - static void bfin_cf_timer(unsigned long _cf) 89 + static void bfin_cf_timer(struct timer_list *t) 90 90 { 91 - struct bfin_cf_socket *cf = (void *)_cf; 91 + struct bfin_cf_socket *cf = from_timer(cf, t, timer); 92 92 unsigned short present = bfin_cf_present(cf->cd_pfx); 93 93 94 94 if (present != cf->present) { ··· 227 227 228 228 cf->cd_pfx = cd_pfx; 229 229 230 - setup_timer(&cf->timer, bfin_cf_timer, (unsigned long)cf); 230 + timer_setup(&cf->timer, bfin_cf_timer, 0); 231 231 232 232 cf->pdev = pdev; 233 233 platform_set_drvdata(pdev, cf);
+2 -4
drivers/pcmcia/i82365.c
··· 875 875 return IRQ_RETVAL(handled); 876 876 } /* pcic_interrupt */ 877 877 878 - static void pcic_interrupt_wrapper(u_long data) 878 + static void pcic_interrupt_wrapper(struct timer_list *unused) 879 879 { 880 880 pcic_interrupt(0, NULL); 881 881 poll_timer.expires = jiffies + poll_interval; ··· 1289 1289 1290 1290 /* Finally, schedule a polling interrupt */ 1291 1291 if (poll_interval != 0) { 1292 - poll_timer.function = pcic_interrupt_wrapper; 1293 - poll_timer.data = 0; 1294 - init_timer(&poll_timer); 1292 + timer_setup(&poll_timer, pcic_interrupt_wrapper, 0); 1295 1293 poll_timer.expires = jiffies + poll_interval; 1296 1294 add_timer(&poll_timer); 1297 1295 }
+6 -4
drivers/pcmcia/omap_cf.c
··· 80 80 } 81 81 82 82 /* the timer is primarily to kick this socket's pccardd */ 83 - static void omap_cf_timer(unsigned long _cf) 83 + static void omap_cf_timer(struct timer_list *t) 84 84 { 85 - struct omap_cf_socket *cf = (void *) _cf; 85 + struct omap_cf_socket *cf = from_timer(cf, t, timer); 86 86 unsigned present = omap_cf_present(); 87 87 88 88 if (present != cf->present) { ··· 102 102 */ 103 103 static irqreturn_t omap_cf_irq(int irq, void *_cf) 104 104 { 105 - omap_cf_timer((unsigned long)_cf); 105 + struct omap_cf_socket *cf = (struct omap_cf_socket *)_cf; 106 + 107 + omap_cf_timer(&cf->timer); 106 108 return IRQ_HANDLED; 107 109 } 108 110 ··· 222 220 cf = kzalloc(sizeof *cf, GFP_KERNEL); 223 221 if (!cf) 224 222 return -ENOMEM; 225 - setup_timer(&cf->timer, omap_cf_timer, (unsigned long)cf); 223 + timer_setup(&cf->timer, omap_cf_timer, 0); 226 224 227 225 cf->pdev = pdev; 228 226 platform_set_drvdata(pdev, cf);
+3 -4
drivers/pcmcia/pd6729.c
··· 234 234 235 235 /* socket functions */ 236 236 237 - static void pd6729_interrupt_wrapper(unsigned long data) 237 + static void pd6729_interrupt_wrapper(struct timer_list *t) 238 238 { 239 - struct pd6729_socket *socket = (struct pd6729_socket *) data; 239 + struct pd6729_socket *socket = from_timer(socket, t, poll_timer); 240 240 241 241 pd6729_interrupt(0, (void *)socket); 242 242 mod_timer(&socket->poll_timer, jiffies + HZ); ··· 707 707 } 708 708 } else { 709 709 /* poll Card status change */ 710 - setup_timer(&socket->poll_timer, pd6729_interrupt_wrapper, 711 - (unsigned long)socket); 710 + timer_setup(&socket->poll_timer, pd6729_interrupt_wrapper, 0); 712 711 mod_timer(&socket->poll_timer, jiffies + HZ); 713 712 } 714 713
+3 -4
drivers/pcmcia/soc_common.c
··· 456 456 } 457 457 458 458 /* Let's poll for events in addition to IRQs since IRQ only is unreliable... */ 459 - static void soc_common_pcmcia_poll_event(unsigned long dummy) 459 + static void soc_common_pcmcia_poll_event(struct timer_list *t) 460 460 { 461 - struct soc_pcmcia_socket *skt = (struct soc_pcmcia_socket *)dummy; 461 + struct soc_pcmcia_socket *skt = from_timer(skt, t, poll_timer); 462 462 debug(skt, 4, "polling for events\n"); 463 463 464 464 mod_timer(&skt->poll_timer, jiffies + SOC_PCMCIA_POLL_PERIOD); ··· 794 794 795 795 skt->cs_state = dead_socket; 796 796 797 - setup_timer(&skt->poll_timer, soc_common_pcmcia_poll_event, 798 - (unsigned long)skt); 797 + timer_setup(&skt->poll_timer, soc_common_pcmcia_poll_event, 0); 799 798 skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD; 800 799 801 800 ret = request_resource(&iomem_resource, &skt->res_skt);
+3 -5
drivers/pcmcia/tcic.c
··· 98 98 /*====================================================================*/ 99 99 100 100 static irqreturn_t tcic_interrupt(int irq, void *dev); 101 - static void tcic_timer(u_long data); 101 + static void tcic_timer(struct timer_list *unused); 102 102 static struct pccard_operations tcic_operations; 103 103 104 104 struct tcic_socket { ··· 435 435 } 436 436 437 437 /* Set up polling */ 438 - poll_timer.function = &tcic_timer; 439 - poll_timer.data = 0; 440 - init_timer(&poll_timer); 438 + timer_setup(&poll_timer, &tcic_timer, 0); 441 439 442 440 /* Build interrupt mask */ 443 441 printk(KERN_CONT ", %d sockets\n", sockets); ··· 581 583 return IRQ_HANDLED; 582 584 } /* tcic_interrupt */ 583 585 584 - static void tcic_timer(u_long data) 586 + static void tcic_timer(struct timer_list *unused) 585 587 { 586 588 pr_debug("tcic_timer()\n"); 587 589 tcic_timer_pending = 0;
+3 -4
drivers/pcmcia/yenta_socket.c
··· 534 534 return IRQ_HANDLED; 535 535 } 536 536 537 - static void yenta_interrupt_wrapper(unsigned long data) 537 + static void yenta_interrupt_wrapper(struct timer_list *t) 538 538 { 539 - struct yenta_socket *socket = (struct yenta_socket *) data; 539 + struct yenta_socket *socket = from_timer(socket, t, poll_timer); 540 540 541 541 yenta_interrupt(0, (void *)socket); 542 542 socket->poll_timer.expires = jiffies + HZ; ··· 1233 1233 if (!socket->cb_irq || request_irq(socket->cb_irq, yenta_interrupt, IRQF_SHARED, "yenta", socket)) { 1234 1234 /* No IRQ or request_irq failed. Poll */ 1235 1235 socket->cb_irq = 0; /* But zero is a valid IRQ number. */ 1236 - setup_timer(&socket->poll_timer, yenta_interrupt_wrapper, 1237 - (unsigned long)socket); 1236 + timer_setup(&socket->poll_timer, yenta_interrupt_wrapper, 0); 1238 1237 mod_timer(&socket->poll_timer, jiffies + HZ); 1239 1238 dev_info(&dev->dev, 1240 1239 "no PCI IRQ, CardBus support disabled for this socket.\n");
+3 -5
drivers/ras/cec.c
··· 169 169 mod_timer(t, round_jiffies(iv)); 170 170 } 171 171 172 - static void cec_timer_fn(unsigned long data) 172 + static void cec_timer_fn(struct timer_list *unused) 173 173 { 174 - struct ce_array *ca = (struct ce_array *)data; 175 - 176 - do_spring_cleaning(ca); 174 + do_spring_cleaning(&ce_arr); 177 175 178 176 cec_mod_timer(&cec_timer, timer_interval); 179 177 } ··· 508 510 if (create_debugfs_nodes()) 509 511 return; 510 512 511 - setup_timer(&cec_timer, cec_timer_fn, (unsigned long)&ce_arr); 513 + timer_setup(&cec_timer, cec_timer_fn, 0); 512 514 cec_mod_timer(&cec_timer, CEC_TIMER_DEFAULT_INTERVAL); 513 515 514 516 pr_info("Correctable Errors collector initialized.\n");
+3
drivers/rtc/class.c
··· 161 161 162 162 device_initialize(&rtc->dev); 163 163 164 + /* Drivers can revise this default after allocating the device. */ 165 + rtc->set_offset_nsec = NSEC_PER_SEC / 2; 166 + 164 167 rtc->irq_freq = 1; 165 168 rtc->max_user_freq = 64; 166 169 rtc->dev.class = rtc_class;
+38 -15
drivers/rtc/systohc.c
··· 10 10 /** 11 11 * rtc_set_ntp_time - Save NTP synchronized time to the RTC 12 12 * @now: Current time of day 13 + * @target_nsec: pointer for desired now->tv_nsec value 13 14 * 14 15 * Replacement for the NTP platform function update_persistent_clock64 15 16 * that stores time for later retrieval by rtc_hctosys. ··· 19 18 * possible at all, and various other -errno for specific temporary failure 20 19 * cases. 21 20 * 21 + * -EPROTO is returned if now.tv_nsec is not close enough to *target_nsec. 22 + ( 22 23 * If temporary failure is indicated the caller should try again 'soon' 23 24 */ 24 - int rtc_set_ntp_time(struct timespec64 now) 25 + int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec) 25 26 { 26 27 struct rtc_device *rtc; 27 28 struct rtc_time tm; 29 + struct timespec64 to_set; 28 30 int err = -ENODEV; 29 - 30 - if (now.tv_nsec < (NSEC_PER_SEC >> 1)) 31 - rtc_time64_to_tm(now.tv_sec, &tm); 32 - else 33 - rtc_time64_to_tm(now.tv_sec + 1, &tm); 31 + bool ok; 34 32 35 33 rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE); 36 - if (rtc) { 37 - /* rtc_hctosys exclusively uses UTC, so we call set_time here, 38 - * not set_mmss. */ 39 - if (rtc->ops && 40 - (rtc->ops->set_time || 41 - rtc->ops->set_mmss64 || 42 - rtc->ops->set_mmss)) 43 - err = rtc_set_time(rtc, &tm); 44 - rtc_class_close(rtc); 34 + if (!rtc) 35 + goto out_err; 36 + 37 + if (!rtc->ops || (!rtc->ops->set_time && !rtc->ops->set_mmss64 && 38 + !rtc->ops->set_mmss)) 39 + goto out_close; 40 + 41 + /* Compute the value of tv_nsec we require the caller to supply in 42 + * now.tv_nsec. This is the value such that (now + 43 + * set_offset_nsec).tv_nsec == 0. 44 + */ 45 + set_normalized_timespec64(&to_set, 0, -rtc->set_offset_nsec); 46 + *target_nsec = to_set.tv_nsec; 47 + 48 + /* The ntp code must call this with the correct value in tv_nsec, if 49 + * it does not we update target_nsec and return EPROTO to make the ntp 50 + * code try again later. 51 + */ 52 + ok = rtc_tv_nsec_ok(rtc->set_offset_nsec, &to_set, &now); 53 + if (!ok) { 54 + err = -EPROTO; 55 + goto out_close; 45 56 } 46 57 58 + rtc_time64_to_tm(to_set.tv_sec, &tm); 59 + 60 + /* rtc_hctosys exclusively uses UTC, so we call set_time here, not 61 + * set_mmss. 62 + */ 63 + err = rtc_set_time(rtc, &tm); 64 + 65 + out_close: 66 + rtc_class_close(rtc); 67 + out_err: 47 68 return err; 48 69 }
+1
drivers/s390/char/tape.h
··· 129 129 int options; /* options for execution. */ 130 130 int retries; /* retry counter for error recovery. */ 131 131 int rescnt; /* residual count from devstat. */ 132 + struct timer_list timer; /* timer for std_assign_timeout(). */ 132 133 133 134 /* Callback for delivering final status. */ 134 135 void (*callback)(struct tape_request *, void *);
+6 -12
drivers/s390/char/tape_std.c
··· 33 33 * tape_std_assign 34 34 */ 35 35 static void 36 - tape_std_assign_timeout(unsigned long data) 36 + tape_std_assign_timeout(struct timer_list *t) 37 37 { 38 - struct tape_request * request; 39 - struct tape_device * device; 38 + struct tape_request * request = from_timer(request, t, timer); 39 + struct tape_device * device = request->device; 40 40 int rc; 41 41 42 - request = (struct tape_request *) data; 43 - device = request->device; 44 42 BUG_ON(!device); 45 43 46 44 DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n", ··· 69 71 * to another host (actually this shouldn't happen but it does). 70 72 * So we set up a timeout for this call. 71 73 */ 72 - init_timer_on_stack(&timeout); 73 - timeout.function = tape_std_assign_timeout; 74 - timeout.data = (unsigned long) request; 75 - timeout.expires = jiffies + 2 * HZ; 76 - add_timer(&timeout); 74 + timer_setup(&request->timer, tape_std_assign_timeout, 0); 75 + mod_timer(&timeout, jiffies + 2 * HZ); 77 76 78 77 rc = tape_do_io_interruptible(device, request); 79 78 80 - del_timer_sync(&timeout); 81 - destroy_timer_on_stack(&timeout); 79 + del_timer_sync(&request->timer); 82 80 83 81 if (rc != 0) { 84 82 DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
+6 -11
drivers/s390/net/lcs.c
··· 834 834 * Emit buffer of a lan command. 835 835 */ 836 836 static void 837 - lcs_lancmd_timeout(unsigned long data) 837 + lcs_lancmd_timeout(struct timer_list *t) 838 838 { 839 - struct lcs_reply *reply, *list_reply, *r; 839 + struct lcs_reply *reply = from_timer(reply, t, timer); 840 + struct lcs_reply *list_reply, *r; 840 841 unsigned long flags; 841 842 842 843 LCS_DBF_TEXT(4, trace, "timeout"); 843 - reply = (struct lcs_reply *) data; 844 844 spin_lock_irqsave(&reply->card->lock, flags); 845 845 list_for_each_entry_safe(list_reply, r, 846 846 &reply->card->lancmd_waiters,list) { ··· 864 864 { 865 865 struct lcs_reply *reply; 866 866 struct lcs_cmd *cmd; 867 - struct timer_list timer; 868 867 unsigned long flags; 869 868 int rc; 870 869 ··· 884 885 rc = lcs_ready_buffer(&card->write, buffer); 885 886 if (rc) 886 887 return rc; 887 - init_timer_on_stack(&timer); 888 - timer.function = lcs_lancmd_timeout; 889 - timer.data = (unsigned long) reply; 890 - timer.expires = jiffies + HZ*card->lancmd_timeout; 891 - add_timer(&timer); 888 + timer_setup(&reply->timer, lcs_lancmd_timeout, 0); 889 + mod_timer(&reply->timer, jiffies + HZ * card->lancmd_timeout); 892 890 wait_event(reply->wait_q, reply->received); 893 - del_timer_sync(&timer); 894 - destroy_timer_on_stack(&timer); 891 + del_timer_sync(&reply->timer); 895 892 LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc); 896 893 rc = reply->rc; 897 894 lcs_put_reply(reply);
+1
drivers/s390/net/lcs.h
··· 276 276 void (*callback)(struct lcs_card *, struct lcs_cmd *); 277 277 wait_queue_head_t wait_q; 278 278 struct lcs_card *card; 279 + struct timer_list timer; 279 280 int received; 280 281 int rc; 281 282 };
+1 -4
drivers/scsi/aic7xxx/aic79xx.h
··· 1046 1046 1047 1047 typedef uint8_t ahd_mode_state; 1048 1048 1049 - typedef void ahd_callback_t (void *); 1050 - 1051 1049 struct ahd_completion 1052 1050 { 1053 1051 uint16_t tag; ··· 1120 1122 /* 1121 1123 * Timer handles for timer driven callbacks. 1122 1124 */ 1123 - ahd_timer_t reset_timer; 1124 - ahd_timer_t stat_timer; 1125 + struct timer_list stat_timer; 1125 1126 1126 1127 /* 1127 1128 * Statistics.
+8 -21
drivers/scsi/aic7xxx/aic79xx_core.c
··· 207 207 static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, 208 208 u_int prev, u_int next, u_int tid); 209 209 static void ahd_reset_current_bus(struct ahd_softc *ahd); 210 - static ahd_callback_t ahd_stat_timer; 210 + static void ahd_stat_timer(struct timer_list *t); 211 211 #ifdef AHD_DUMP_SEQ 212 212 static void ahd_dumpseq(struct ahd_softc *ahd); 213 213 #endif ··· 6104 6104 ahd->bugs = AHD_BUGNONE; 6105 6105 ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A 6106 6106 | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A; 6107 - ahd_timer_init(&ahd->reset_timer); 6108 - ahd_timer_init(&ahd->stat_timer); 6107 + timer_setup(&ahd->stat_timer, ahd_stat_timer, 0); 6109 6108 ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT; 6110 6109 ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT; 6111 6110 ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT; ··· 6234 6235 /* 6235 6236 * Stop periodic timer callbacks. 6236 6237 */ 6237 - ahd_timer_stop(&ahd->reset_timer); 6238 - ahd_timer_stop(&ahd->stat_timer); 6238 + del_timer_sync(&ahd->stat_timer); 6239 6239 6240 6240 /* This will reset most registers to 0, but not all */ 6241 6241 ahd_reset(ahd, /*reinit*/FALSE); ··· 7037 7039 }; 7038 7040 7039 7041 /***************************** Timer Facilities *******************************/ 7040 - #define ahd_timer_init init_timer 7041 - #define ahd_timer_stop del_timer_sync 7042 - typedef void ahd_linux_callback_t (u_long); 7043 - 7044 7042 static void 7045 - ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg) 7043 + ahd_timer_reset(struct timer_list *timer, int usec) 7046 7044 { 7047 - struct ahd_softc *ahd; 7048 - 7049 - ahd = (struct ahd_softc *)arg; 7050 7045 del_timer(timer); 7051 - timer->data = (u_long)arg; 7052 7046 timer->expires = jiffies + (usec * HZ)/1000000; 7053 - timer->function = (ahd_linux_callback_t*)func; 7054 7047 add_timer(timer); 7055 7048 } 7056 7049 ··· 7268 7279 } 7269 7280 init_done: 7270 7281 ahd_restart(ahd); 7271 - ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, 7272 - ahd_stat_timer, ahd); 7282 + ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US); 7273 7283 return (0); 7274 7284 } 7275 7285 ··· 8866 8878 8867 8879 /**************************** Statistics Processing ***************************/ 8868 8880 static void 8869 - ahd_stat_timer(void *arg) 8881 + ahd_stat_timer(struct timer_list *t) 8870 8882 { 8871 - struct ahd_softc *ahd = arg; 8883 + struct ahd_softc *ahd = from_timer(ahd, t, stat_timer); 8872 8884 u_long s; 8873 8885 int enint_coal; 8874 8886 ··· 8895 8907 ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1); 8896 8908 ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]; 8897 8909 ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0; 8898 - ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, 8899 - ahd_stat_timer, ahd); 8910 + ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US); 8900 8911 ahd_unlock(ahd, &s); 8901 8912 } 8902 8913
-7
drivers/scsi/aic7xxx/aic79xx_osm.h
··· 203 203 */ 204 204 #define ahd_dmamap_sync(ahd, dma_tag, dmamap, offset, len, op) 205 205 206 - /************************** Timer DataStructures ******************************/ 207 - typedef struct timer_list ahd_timer_t; 208 - 209 206 /********************************** Includes **********************************/ 210 207 #ifdef CONFIG_AIC79XX_REG_PRETTY_PRINT 211 208 #define AIC_DEBUG_REGISTERS 1 ··· 210 213 #define AIC_DEBUG_REGISTERS 0 211 214 #endif 212 215 #include "aic79xx.h" 213 - 214 - /***************************** Timer Facilities *******************************/ 215 - #define ahd_timer_init init_timer 216 - #define ahd_timer_stop del_timer_sync 217 216 218 217 /***************************** SMP support ************************************/ 219 218 #include <linux/spinlock.h>
+1 -2
drivers/scsi/aic94xx/aic94xx_hwi.c
··· 1178 1178 struct asd_ascb *ascb; 1179 1179 list_for_each_entry(ascb, list, list) { 1180 1180 if (!ascb->uldd_timer) { 1181 - ascb->timer.data = (unsigned long) ascb; 1182 - ascb->timer.function = asd_ascb_timedout; 1181 + ascb->timer.function = (TIMER_FUNC_TYPE)asd_ascb_timedout; 1183 1182 ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT; 1184 1183 add_timer(&ascb->timer); 1185 1184 }
+2 -3
drivers/scsi/aic94xx/aic94xx_hwi.h
··· 291 291 INIT_LIST_HEAD(&ascb->list); 292 292 ascb->scb = ascb->dma_scb.vaddr; 293 293 ascb->ha = asd_ha; 294 - ascb->timer.function = NULL; 295 - init_timer(&ascb->timer); 294 + timer_setup(&ascb->timer, NULL, 0); 296 295 ascb->tc_index = -1; 297 296 } 298 297 ··· 391 392 void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op); 392 393 int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask); 393 394 394 - void asd_ascb_timedout(unsigned long data); 395 + void asd_ascb_timedout(struct timer_list *t); 395 396 int asd_chip_hardrst(struct asd_ha_struct *asd_ha); 396 397 397 398 #endif
+3 -3
drivers/scsi/aic94xx/aic94xx_scb.c
··· 866 866 * Upper layers can implement their own timeout function, say to free 867 867 * resources they have with this SCB, and then call this one at the 868 868 * end of their timeout function. To do this, one should initialize 869 - * the ascb->timer.{function, data, expires} prior to calling the post 869 + * the ascb->timer.{function, expires} prior to calling the post 870 870 * function. The timer is started by the post function. 871 871 */ 872 - void asd_ascb_timedout(unsigned long data) 872 + void asd_ascb_timedout(struct timer_list *t) 873 873 { 874 - struct asd_ascb *ascb = (void *) data; 874 + struct asd_ascb *ascb = from_timer(ascb, t, timer); 875 875 struct asd_seq_data *seq = &ascb->ha->seq; 876 876 unsigned long flags; 877 877
+6 -7
drivers/scsi/aic94xx/aic94xx_tmf.c
··· 35 35 static int asd_enqueue_internal(struct asd_ascb *ascb, 36 36 void (*tasklet_complete)(struct asd_ascb *, 37 37 struct done_list_struct *), 38 - void (*timed_out)(unsigned long)) 38 + void (*timed_out)(struct timer_list *t)) 39 39 { 40 40 int res; 41 41 42 42 ascb->tasklet_complete = tasklet_complete; 43 43 ascb->uldd_timer = 1; 44 44 45 - ascb->timer.data = (unsigned long) ascb; 46 - ascb->timer.function = timed_out; 45 + ascb->timer.function = (TIMER_FUNC_TYPE)timed_out; 47 46 ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT; 48 47 49 48 add_timer(&ascb->timer); ··· 86 87 asd_ascb_free(ascb); 87 88 } 88 89 89 - static void asd_clear_nexus_timedout(unsigned long data) 90 + static void asd_clear_nexus_timedout(struct timer_list *t) 90 91 { 91 - struct asd_ascb *ascb = (void *)data; 92 + struct asd_ascb *ascb = from_timer(ascb, t, timer); 92 93 struct tasklet_completion_status *tcs = ascb->uldd_task; 93 94 94 95 ASD_DPRINTK("%s: here\n", __func__); ··· 260 261 261 262 /* ---------- TMFs ---------- */ 262 263 263 - static void asd_tmf_timedout(unsigned long data) 264 + static void asd_tmf_timedout(struct timer_list *t) 264 265 { 265 - struct asd_ascb *ascb = (void *) data; 266 + struct asd_ascb *ascb = from_timer(ascb, t, timer); 266 267 struct tasklet_completion_status *tcs = ascb->uldd_task; 267 268 268 269 ASD_DPRINTK("tmf timed out\n");
+7 -11
drivers/scsi/be2iscsi/be_main.c
··· 5230 5230 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5231 5231 } 5232 5232 5233 - static void beiscsi_hw_tpe_check(unsigned long ptr) 5233 + static void beiscsi_hw_tpe_check(struct timer_list *t) 5234 5234 { 5235 - struct beiscsi_hba *phba; 5235 + struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5236 5236 u32 wait; 5237 5237 5238 - phba = (struct beiscsi_hba *)ptr; 5239 5238 /* if not TPE, do nothing */ 5240 5239 if (!beiscsi_detect_tpe(phba)) 5241 5240 return; ··· 5247 5248 msecs_to_jiffies(wait)); 5248 5249 } 5249 5250 5250 - static void beiscsi_hw_health_check(unsigned long ptr) 5251 + static void beiscsi_hw_health_check(struct timer_list *t) 5251 5252 { 5252 - struct beiscsi_hba *phba; 5253 + struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5253 5254 5254 - phba = (struct beiscsi_hba *)ptr; 5255 5255 beiscsi_detect_ue(phba); 5256 5256 if (beiscsi_detect_ue(phba)) { 5257 5257 __beiscsi_log(phba, KERN_ERR, ··· 5262 5264 if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state)) 5263 5265 return; 5264 5266 /* modify this timer to check TPE */ 5265 - phba->hw_check.function = beiscsi_hw_tpe_check; 5267 + phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_tpe_check; 5266 5268 } 5267 5269 5268 5270 mod_timer(&phba->hw_check, ··· 5349 5351 * Timer function gets modified for TPE detection. 5350 5352 * Always reinit to do health check first. 5351 5353 */ 5352 - phba->hw_check.function = beiscsi_hw_health_check; 5354 + phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_health_check; 5353 5355 mod_timer(&phba->hw_check, 5354 5356 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5355 5357 return 0; ··· 5706 5708 * Start UE detection here. UE before this will cause stall in probe 5707 5709 * and eventually fail the probe. 5708 5710 */ 5709 - init_timer(&phba->hw_check); 5710 - phba->hw_check.function = beiscsi_hw_health_check; 5711 - phba->hw_check.data = (unsigned long)phba; 5711 + timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0); 5712 5712 mod_timer(&phba->hw_check, 5713 5713 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5714 5714 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+5 -6
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 823 823 824 824 skb_queue_head_init(&port->fcoe_pending_queue); 825 825 port->fcoe_pending_queue_active = 0; 826 - setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport); 826 + timer_setup(&port->timer, fcoe_queue_timer, 0); 827 827 828 828 fcoe_link_speed_update(lport); 829 829 ··· 845 845 return 0; 846 846 } 847 847 848 - static void bnx2fc_destroy_timer(unsigned long data) 848 + static void bnx2fc_destroy_timer(struct timer_list *t) 849 849 { 850 - struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data; 850 + struct bnx2fc_hba *hba = from_timer(hba, t, destroy_timer); 851 851 852 852 printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - " 853 853 "Destroy compl not received!!\n"); ··· 1946 1946 { 1947 1947 if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) { 1948 1948 if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) { 1949 - init_timer(&hba->destroy_timer); 1949 + timer_setup(&hba->destroy_timer, bnx2fc_destroy_timer, 1950 + 0); 1950 1951 hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT + 1951 1952 jiffies; 1952 - hba->destroy_timer.function = bnx2fc_destroy_timer; 1953 - hba->destroy_timer.data = (unsigned long)hba; 1954 1953 add_timer(&hba->destroy_timer); 1955 1954 wait_event_interruptible(hba->destroy_wait, 1956 1955 test_bit(BNX2FC_FLAG_DESTROY_CMPL,
+1 -1
drivers/scsi/bnx2i/bnx2i.h
··· 858 858 struct bnx2i_endpoint *ep); 859 859 extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba, 860 860 struct bnx2i_endpoint *ep); 861 - extern void bnx2i_ep_ofld_timer(unsigned long data); 861 + extern void bnx2i_ep_ofld_timer(struct timer_list *t); 862 862 extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list( 863 863 struct bnx2i_hba *hba, u32 iscsi_cid); 864 864 extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
+2 -2
drivers/scsi/bnx2i/bnx2i_hwi.c
··· 698 698 * 699 699 * routine to handle connection offload/destroy request timeout 700 700 */ 701 - void bnx2i_ep_ofld_timer(unsigned long data) 701 + void bnx2i_ep_ofld_timer(struct timer_list *t) 702 702 { 703 - struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data; 703 + struct bnx2i_endpoint *ep = from_timer(ep, t, ofld_timer); 704 704 705 705 if (ep->state == EP_STATE_OFLD_START) { 706 706 printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
+4 -11
drivers/scsi/bnx2i/bnx2i_iscsi.c
··· 1611 1611 * this should normally not sleep for a long time so it should 1612 1612 * not disrupt the caller. 1613 1613 */ 1614 + timer_setup(&bnx2i_conn->ep->ofld_timer, bnx2i_ep_ofld_timer, 0); 1614 1615 bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies; 1615 - bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer; 1616 - bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep; 1617 1616 add_timer(&bnx2i_conn->ep->ofld_timer); 1618 1617 /* update iSCSI context for this conn, wait for CNIC to complete */ 1619 1618 wait_event_interruptible(bnx2i_conn->ep->ofld_wait, ··· 1728 1729 } 1729 1730 1730 1731 ep->state = EP_STATE_CLEANUP_START; 1731 - init_timer(&ep->ofld_timer); 1732 + timer_setup(&ep->ofld_timer, bnx2i_ep_ofld_timer, 0); 1732 1733 ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies; 1733 - ep->ofld_timer.function = bnx2i_ep_ofld_timer; 1734 - ep->ofld_timer.data = (unsigned long) ep; 1735 1734 add_timer(&ep->ofld_timer); 1736 1735 1737 1736 bnx2i_ep_destroy_list_add(hba, ep); ··· 1832 1835 bnx2i_ep->state = EP_STATE_OFLD_START; 1833 1836 bnx2i_ep_ofld_list_add(hba, bnx2i_ep); 1834 1837 1835 - init_timer(&bnx2i_ep->ofld_timer); 1838 + timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0); 1836 1839 bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies; 1837 - bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; 1838 - bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; 1839 1840 add_timer(&bnx2i_ep->ofld_timer); 1840 1841 1841 1842 if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) { ··· 2049 2054 session = conn->session; 2050 2055 } 2051 2056 2052 - init_timer(&bnx2i_ep->ofld_timer); 2057 + timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0); 2053 2058 bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies; 2054 - bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; 2055 - bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; 2056 2059 add_timer(&bnx2i_ep->ofld_timer); 2057 2060 2058 2061 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
+6 -9
drivers/scsi/csiostor/csio_hw.c
··· 3347 3347 * 3348 3348 **/ 3349 3349 static void 3350 - csio_hw_mb_timer(uintptr_t data) 3350 + csio_hw_mb_timer(struct timer_list *t) 3351 3351 { 3352 - struct csio_hw *hw = (struct csio_hw *)data; 3352 + struct csio_mbm *mbm = from_timer(mbm, t, timer); 3353 + struct csio_hw *hw = mbm->hw; 3353 3354 struct csio_mb *mbp = NULL; 3354 3355 3355 3356 spin_lock_irq(&hw->lock); ··· 3716 3715 * Return - none. 3717 3716 */ 3718 3717 static void 3719 - csio_mgmt_tmo_handler(uintptr_t data) 3718 + csio_mgmt_tmo_handler(struct timer_list *t) 3720 3719 { 3721 - struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data; 3720 + struct csio_mgmtm *mgmtm = from_timer(mgmtm, t, mgmt_timer); 3722 3721 struct list_head *tmp; 3723 3722 struct csio_ioreq *io_req; 3724 3723 ··· 3798 3797 static int 3799 3798 csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw) 3800 3799 { 3801 - struct timer_list *timer = &mgmtm->mgmt_timer; 3802 - 3803 - init_timer(timer); 3804 - timer->function = csio_mgmt_tmo_handler; 3805 - timer->data = (unsigned long)mgmtm; 3800 + timer_setup(&mgmtm->mgmt_timer, csio_mgmt_tmo_handler, 0); 3806 3801 3807 3802 INIT_LIST_HEAD(&mgmtm->active_q); 3808 3803 INIT_LIST_HEAD(&mgmtm->cbfn_q);
+3 -6
drivers/scsi/csiostor/csio_mb.c
··· 1644 1644 */ 1645 1645 int 1646 1646 csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw, 1647 - void (*timer_fn)(uintptr_t)) 1647 + void (*timer_fn)(struct timer_list *)) 1648 1648 { 1649 - struct timer_list *timer = &mbm->timer; 1650 - 1651 - init_timer(timer); 1652 - timer->function = timer_fn; 1653 - timer->data = (unsigned long)hw; 1649 + mbm->hw = hw; 1650 + timer_setup(&mbm->timer, timer_fn, 0); 1654 1651 1655 1652 INIT_LIST_HEAD(&mbm->req_q); 1656 1653 INIT_LIST_HEAD(&mbm->cbfn_q);
+2 -1
drivers/scsi/csiostor/csio_mb.h
··· 137 137 uint32_t a_mbox; /* Async mbox num */ 138 138 uint32_t intr_idx; /* Interrupt index */ 139 139 struct timer_list timer; /* Mbox timer */ 140 + struct csio_hw *hw; /* Hardware pointer */ 140 141 struct list_head req_q; /* Mbox request queue */ 141 142 struct list_head cbfn_q; /* Mbox completion q */ 142 143 struct csio_mb *mcurrent; /* Current mailbox */ ··· 253 252 254 253 /* MB module functions */ 255 254 int csio_mbm_init(struct csio_mbm *, struct csio_hw *, 256 - void (*)(uintptr_t)); 255 + void (*)(struct timer_list *)); 257 256 void csio_mbm_exit(struct csio_mbm *); 258 257 void csio_mb_intr_enable(struct csio_hw *); 259 258 void csio_mb_intr_disable(struct csio_hw *);
+4 -4
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
··· 545 545 } 546 546 } 547 547 548 - static void act_open_retry_timer(unsigned long data) 548 + static void act_open_retry_timer(struct timer_list *t) 549 549 { 550 + struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); 550 551 struct sk_buff *skb; 551 - struct cxgbi_sock *csk = (struct cxgbi_sock *)data; 552 552 553 553 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 554 554 "csk 0x%p,%u,0x%lx,%u.\n", ··· 586 586 cxgbi_sock_get(csk); 587 587 spin_lock_bh(&csk->lock); 588 588 if (rpl->status == CPL_ERR_CONN_EXIST && 589 - csk->retry_timer.function != act_open_retry_timer) { 590 - csk->retry_timer.function = act_open_retry_timer; 589 + csk->retry_timer.function != (TIMER_FUNC_TYPE)act_open_retry_timer) { 590 + csk->retry_timer.function = (TIMER_FUNC_TYPE)act_open_retry_timer; 591 591 mod_timer(&csk->retry_timer, jiffies + HZ / 2); 592 592 } else 593 593 cxgbi_sock_fail_act_open(csk,
+4 -4
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
··· 872 872 } 873 873 } 874 874 875 - static void csk_act_open_retry_timer(unsigned long data) 875 + static void csk_act_open_retry_timer(struct timer_list *t) 876 876 { 877 877 struct sk_buff *skb = NULL; 878 - struct cxgbi_sock *csk = (struct cxgbi_sock *)data; 878 + struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); 879 879 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 880 880 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, 881 881 struct l2t_entry *); ··· 963 963 spin_lock_bh(&csk->lock); 964 964 965 965 if (status == CPL_ERR_CONN_EXIST && 966 - csk->retry_timer.function != csk_act_open_retry_timer) { 967 - csk->retry_timer.function = csk_act_open_retry_timer; 966 + csk->retry_timer.function != (TIMER_FUNC_TYPE)csk_act_open_retry_timer) { 967 + csk->retry_timer.function = (TIMER_FUNC_TYPE)csk_act_open_retry_timer; 968 968 mod_timer(&csk->retry_timer, jiffies + HZ / 2); 969 969 } else 970 970 cxgbi_sock_fail_act_open(csk,
+1 -1
drivers/scsi/cxgbi/libcxgbi.c
··· 572 572 kref_init(&csk->refcnt); 573 573 skb_queue_head_init(&csk->receive_queue); 574 574 skb_queue_head_init(&csk->write_queue); 575 - setup_timer(&csk->retry_timer, NULL, (unsigned long)csk); 575 + timer_setup(&csk->retry_timer, NULL, 0); 576 576 rwlock_init(&csk->callback_lock); 577 577 csk->cdev = cdev; 578 578 csk->flags = 0;
+5 -8
drivers/scsi/dc395x.c
··· 395 395 struct ScsiReqBlk *srb); 396 396 static void set_xfer_rate(struct AdapterCtlBlk *acb, 397 397 struct DeviceCtlBlk *dcb); 398 - static void waiting_timeout(unsigned long ptr); 398 + static void waiting_timeout(struct timer_list *t); 399 399 400 400 401 401 /*--------------------------------------------------------------------------- ··· 857 857 { 858 858 if (timer_pending(&acb->waiting_timer)) 859 859 return; 860 - init_timer(&acb->waiting_timer); 861 - acb->waiting_timer.function = waiting_timeout; 862 - acb->waiting_timer.data = (unsigned long) acb; 863 860 if (time_before(jiffies + to, acb->last_reset - HZ / 2)) 864 861 acb->waiting_timer.expires = 865 862 acb->last_reset - HZ / 2 + 1; ··· 933 936 934 937 935 938 /* Wake up waiting queue */ 936 - static void waiting_timeout(unsigned long ptr) 939 + static void waiting_timeout(struct timer_list *t) 937 940 { 938 941 unsigned long flags; 939 - struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr; 942 + struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer); 940 943 dprintkdbg(DBG_1, 941 944 "waiting_timeout: Queue woken up by timer. acb=%p\n", acb); 942 945 DC395x_LOCK_IO(acb->scsi_host, flags); ··· 4363 4366 INIT_LIST_HEAD(&acb->srb_free_list); 4364 4367 /* temp SRB for Q tag used or abort command used */ 4365 4368 acb->tmp_srb = &acb->srb; 4366 - init_timer(&acb->waiting_timer); 4367 - init_timer(&acb->selto_timer); 4369 + timer_setup(&acb->waiting_timer, waiting_timeout, 0); 4370 + timer_setup(&acb->selto_timer, NULL, 0); 4368 4371 4369 4372 acb->srb_count = DC395x_MAX_SRB_CNT; 4370 4373
+1 -1
drivers/scsi/fcoe/fcoe.c
··· 754 754 755 755 skb_queue_head_init(&port->fcoe_pending_queue); 756 756 port->fcoe_pending_queue_active = 0; 757 - setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport); 757 + timer_setup(&port->timer, fcoe_queue_timer, 0); 758 758 759 759 fcoe_link_speed_update(lport); 760 760
+4 -2
drivers/scsi/fcoe/fcoe_transport.c
··· 455 455 * 456 456 * Calls fcoe_check_wait_queue on timeout 457 457 */ 458 - void fcoe_queue_timer(ulong lport) 458 + void fcoe_queue_timer(struct timer_list *t) 459 459 { 460 - fcoe_check_wait_queue((struct fc_lport *)lport, NULL); 460 + struct fcoe_port *port = from_timer(port, t, timer); 461 + 462 + fcoe_check_wait_queue(port->lport, NULL); 461 463 } 462 464 EXPORT_SYMBOL_GPL(fcoe_queue_timer); 463 465
+2 -4
drivers/scsi/gdth.c
··· 3705 3705 #ifdef GDTH_STATISTICS 3706 3706 static u8 gdth_timer_running; 3707 3707 3708 - static void gdth_timeout(unsigned long data) 3708 + static void gdth_timeout(struct timer_list *unused) 3709 3709 { 3710 3710 u32 i; 3711 3711 Scsi_Cmnd *nscp; ··· 3743 3743 gdth_timer_running = 1; 3744 3744 TRACE2(("gdth_detect(): Initializing timer !\n")); 3745 3745 gdth_timer.expires = jiffies + HZ; 3746 - gdth_timer.data = 0L; 3747 - gdth_timer.function = gdth_timeout; 3748 3746 add_timer(&gdth_timer); 3749 3747 } 3750 3748 #else ··· 5163 5165 /* initializations */ 5164 5166 gdth_polling = TRUE; 5165 5167 gdth_clear_events(); 5166 - init_timer(&gdth_timer); 5168 + timer_setup(&gdth_timer, gdth_timeout, 0); 5167 5169 5168 5170 /* As default we do not probe for EISA or ISA controllers */ 5169 5171 if (probe_eisa_isa) {
-1
drivers/scsi/hisi_sas/hisi_sas.h
··· 103 103 struct hisi_sas_port *port; 104 104 struct asd_sas_phy sas_phy; 105 105 struct sas_identify identify; 106 - struct timer_list timer; 107 106 struct work_struct phyup_ws; 108 107 u64 port_id; /* from hw */ 109 108 u64 dev_sas_addr;
+6 -8
drivers/scsi/hisi_sas/hisi_sas_main.c
··· 627 627 628 628 phy->hisi_hba = hisi_hba; 629 629 phy->port = NULL; 630 - init_timer(&phy->timer); 631 630 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 632 631 sas_phy->class = SAS; 633 632 sas_phy->iproto = SAS_PROTOCOL_ALL; ··· 791 792 complete(&task->slow_task->completion); 792 793 } 793 794 794 - static void hisi_sas_tmf_timedout(unsigned long data) 795 + static void hisi_sas_tmf_timedout(struct timer_list *t) 795 796 { 796 - struct sas_task *task = (struct sas_task *)data; 797 + struct sas_task_slow *slow = from_timer(slow, t, timer); 798 + struct sas_task *task = slow->task; 797 799 unsigned long flags; 798 800 799 801 spin_lock_irqsave(&task->task_state_lock, flags); ··· 833 833 } 834 834 task->task_done = hisi_sas_task_done; 835 835 836 - task->slow_task->timer.data = (unsigned long) task; 837 - task->slow_task->timer.function = hisi_sas_tmf_timedout; 836 + task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout; 838 837 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ; 839 838 add_timer(&task->slow_task->timer); 840 839 ··· 1446 1447 task->dev = device; 1447 1448 task->task_proto = device->tproto; 1448 1449 task->task_done = hisi_sas_task_done; 1449 - task->slow_task->timer.data = (unsigned long)task; 1450 - task->slow_task->timer.function = hisi_sas_tmf_timedout; 1450 + task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout; 1451 1451 task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110); 1452 1452 add_timer(&task->slow_task->timer); 1453 1453 ··· 1875 1877 hisi_hba->shost = shost; 1876 1878 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 1877 1879 1878 - init_timer(&hisi_hba->timer); 1880 + timer_setup(&hisi_hba->timer, NULL, 0); 1879 1881 1880 1882 if (hisi_sas_get_fw_info(hisi_hba) < 0) 1881 1883 goto err_out;
+3 -3
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
··· 807 807 start_phy_v1_hw(hisi_hba, phy_no); 808 808 } 809 809 810 - static void start_phys_v1_hw(unsigned long data) 810 + static void start_phys_v1_hw(struct timer_list *t) 811 811 { 812 - struct hisi_hba *hisi_hba = (struct hisi_hba *)data; 812 + struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer); 813 813 int i; 814 814 815 815 for (i = 0; i < hisi_hba->n_phy; i++) { ··· 828 828 hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK); 829 829 } 830 830 831 - setup_timer(timer, start_phys_v1_hw, (unsigned long)hisi_hba); 831 + timer_setup(timer, start_phys_v1_hw, 0); 832 832 mod_timer(timer, jiffies + HZ); 833 833 } 834 834
+11 -13
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
··· 728 728 #define ERR_ON_RX_PHASE(err_phase) (err_phase == 0x10 || \ 729 729 err_phase == 0x20 || err_phase == 0x40) 730 730 731 - static void link_timeout_disable_link(unsigned long data); 731 + static void link_timeout_disable_link(struct timer_list *t); 732 732 733 733 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) 734 734 { ··· 1270 1270 upper_32_bits(hisi_hba->initial_fis_dma)); 1271 1271 } 1272 1272 1273 - static void link_timeout_enable_link(unsigned long data) 1273 + static void link_timeout_enable_link(struct timer_list *t) 1274 1274 { 1275 - struct hisi_hba *hisi_hba = (struct hisi_hba *)data; 1275 + struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer); 1276 1276 int i, reg_val; 1277 1277 1278 1278 for (i = 0; i < hisi_hba->n_phy; i++) { ··· 1287 1287 } 1288 1288 } 1289 1289 1290 - hisi_hba->timer.function = link_timeout_disable_link; 1290 + hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link; 1291 1291 mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900)); 1292 1292 } 1293 1293 1294 - static void link_timeout_disable_link(unsigned long data) 1294 + static void link_timeout_disable_link(struct timer_list *t) 1295 1295 { 1296 - struct hisi_hba *hisi_hba = (struct hisi_hba *)data; 1296 + struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer); 1297 1297 int i, reg_val; 1298 1298 1299 1299 reg_val = hisi_sas_read32(hisi_hba, PHY_STATE); ··· 1308 1308 } 1309 1309 } 1310 1310 1311 - hisi_hba->timer.function = link_timeout_enable_link; 1311 + hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_enable_link; 1312 1312 mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100)); 1313 1313 } 1314 1314 1315 1315 static void set_link_timer_quirk(struct hisi_hba *hisi_hba) 1316 1316 { 1317 - hisi_hba->timer.data = (unsigned long)hisi_hba; 1318 - hisi_hba->timer.function = link_timeout_disable_link; 1317 + hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link; 1319 1318 hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000); 1320 1319 add_timer(&hisi_hba->timer); 1321 1320 } ··· 2573 2574 return 0; 2574 2575 } 2575 2576 2576 - static void hisi_sas_internal_abort_quirk_timeout(unsigned long data) 2577 + static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t) 2577 2578 { 2578 - struct hisi_sas_slot *slot = (struct hisi_sas_slot *)data; 2579 + struct hisi_sas_slot *slot = from_timer(slot, t, internal_abort_timer); 2579 2580 struct hisi_sas_port *port = slot->port; 2580 2581 struct asd_sas_port *asd_sas_port; 2581 2582 struct asd_sas_phy *sas_phy; ··· 2618 2619 struct timer_list *timer = &slot->internal_abort_timer; 2619 2620 2620 2621 /* setup the quirk timer */ 2621 - setup_timer(timer, hisi_sas_internal_abort_quirk_timeout, 2622 - (unsigned long)slot); 2622 + timer_setup(timer, hisi_sas_internal_abort_quirk_timeout, 0); 2623 2623 /* Set the timeout to 10ms less than internal abort timeout */ 2624 2624 mod_timer(timer, jiffies + msecs_to_jiffies(100)); 2625 2625
+1 -1
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
··· 1823 1823 hisi_hba->shost = shost; 1824 1824 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 1825 1825 1826 - init_timer(&hisi_hba->timer); 1826 + timer_setup(&hisi_hba->timer, NULL, 0); 1827 1827 1828 1828 if (hisi_sas_get_fw_info(hisi_hba) < 0) 1829 1829 goto err_out;
+6 -8
drivers/scsi/ibmvscsi/ibmvfc.c
··· 1393 1393 * 1394 1394 * Called when an internally generated command times out 1395 1395 **/ 1396 - static void ibmvfc_timeout(struct ibmvfc_event *evt) 1396 + static void ibmvfc_timeout(struct timer_list *t) 1397 1397 { 1398 + struct ibmvfc_event *evt = from_timer(evt, t, timer); 1398 1399 struct ibmvfc_host *vhost = evt->vhost; 1399 1400 dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt); 1400 1401 ibmvfc_reset_host(vhost); ··· 1425 1424 BUG(); 1426 1425 1427 1426 list_add_tail(&evt->queue, &vhost->sent); 1428 - init_timer(&evt->timer); 1427 + timer_setup(&evt->timer, ibmvfc_timeout, 0); 1429 1428 1430 1429 if (timeout) { 1431 - evt->timer.data = (unsigned long) evt; 1432 1430 evt->timer.expires = jiffies + (timeout * HZ); 1433 - evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout; 1434 1431 add_timer(&evt->timer); 1435 1432 } 1436 1433 ··· 3691 3692 * out, reset the CRQ. When the ADISC comes back as cancelled, 3692 3693 * log back into the target. 3693 3694 **/ 3694 - static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt) 3695 + static void ibmvfc_adisc_timeout(struct timer_list *t) 3695 3696 { 3697 + struct ibmvfc_target *tgt = from_timer(tgt, t, timer); 3696 3698 struct ibmvfc_host *vhost = tgt->vhost; 3697 3699 struct ibmvfc_event *evt; 3698 3700 struct ibmvfc_tmf *tmf; ··· 3778 3778 if (timer_pending(&tgt->timer)) 3779 3779 mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ)); 3780 3780 else { 3781 - tgt->timer.data = (unsigned long) tgt; 3782 3781 tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ); 3783 - tgt->timer.function = (void (*)(unsigned long))ibmvfc_adisc_timeout; 3784 3782 add_timer(&tgt->timer); 3785 3783 } 3786 3784 ··· 3910 3912 tgt->vhost = vhost; 3911 3913 tgt->need_login = 1; 3912 3914 tgt->cancel_key = vhost->task_set++; 3913 - init_timer(&tgt->timer); 3915 + timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0); 3914 3916 kref_init(&tgt->kref); 3915 3917 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); 3916 3918 spin_lock_irqsave(vhost->host->host_lock, flags);
+3 -4
drivers/scsi/ibmvscsi/ibmvscsi.c
··· 837 837 * 838 838 * Called when an internally generated command times out 839 839 */ 840 - static void ibmvscsi_timeout(struct srp_event_struct *evt_struct) 840 + static void ibmvscsi_timeout(struct timer_list *t) 841 841 { 842 + struct srp_event_struct *evt_struct = from_timer(evt_struct, t, timer); 842 843 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; 843 844 844 845 dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n", ··· 928 927 */ 929 928 list_add_tail(&evt_struct->list, &hostdata->sent); 930 929 931 - init_timer(&evt_struct->timer); 930 + timer_setup(&evt_struct->timer, ibmvscsi_timeout, 0); 932 931 if (timeout) { 933 - evt_struct->timer.data = (unsigned long) evt_struct; 934 932 evt_struct->timer.expires = jiffies + (timeout * HZ); 935 - evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout; 936 933 add_timer(&evt_struct->timer); 937 934 } 938 935
+15 -15
drivers/scsi/ipr.c
··· 694 694 ipr_cmd->sibling = NULL; 695 695 ipr_cmd->eh_comp = NULL; 696 696 ipr_cmd->fast_done = fast_done; 697 - init_timer(&ipr_cmd->timer); 697 + timer_setup(&ipr_cmd->timer, NULL, 0); 698 698 } 699 699 700 700 /** ··· 990 990 **/ 991 991 static void ipr_do_req(struct ipr_cmnd *ipr_cmd, 992 992 void (*done) (struct ipr_cmnd *), 993 - void (*timeout_func) (struct ipr_cmnd *), u32 timeout) 993 + void (*timeout_func) (struct timer_list *), u32 timeout) 994 994 { 995 995 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 996 996 997 997 ipr_cmd->done = done; 998 998 999 - ipr_cmd->timer.data = (unsigned long) ipr_cmd; 1000 999 ipr_cmd->timer.expires = jiffies + timeout; 1001 - ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func; 1000 + ipr_cmd->timer.function = (TIMER_FUNC_TYPE)timeout_func; 1002 1001 1003 1002 add_timer(&ipr_cmd->timer); 1004 1003 ··· 1079 1080 * none 1080 1081 **/ 1081 1082 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, 1082 - void (*timeout_func) (struct ipr_cmnd *ipr_cmd), 1083 + void (*timeout_func) (struct timer_list *), 1083 1084 u32 timeout) 1084 1085 { 1085 1086 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; ··· 2663 2664 * Return value: 2664 2665 * none 2665 2666 **/ 2666 - static void ipr_timeout(struct ipr_cmnd *ipr_cmd) 2667 + static void ipr_timeout(struct timer_list *t) 2667 2668 { 2669 + struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); 2668 2670 unsigned long lock_flags = 0; 2669 2671 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2670 2672 ··· 2696 2696 * Return value: 2697 2697 * none 2698 2698 **/ 2699 - static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd) 2699 + static void ipr_oper_timeout(struct timer_list *t) 2700 2700 { 2701 + struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); 2701 2702 unsigned long lock_flags = 0; 2702 2703 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2703 2704 ··· 5450 5449 * Return value: 5451 5450 * none 5452 5451 **/ 5453 - static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd) 5452 + static void ipr_abort_timeout(struct timer_list *t) 5454 5453 { 5454 + struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); 5455 5455 struct ipr_cmnd *reset_cmd; 5456 5456 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5457 5457 struct ipr_cmd_pkt *cmd_pkt; ··· 8273 8271 * Return value: 8274 8272 * none 8275 8273 **/ 8276 - static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd) 8274 + static void ipr_reset_timer_done(struct timer_list *t) 8277 8275 { 8276 + struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); 8278 8277 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 8279 8278 unsigned long lock_flags = 0; 8280 8279 ··· 8311 8308 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); 8312 8309 ipr_cmd->done = ipr_reset_ioa_job; 8313 8310 8314 - ipr_cmd->timer.data = (unsigned long) ipr_cmd; 8315 8311 ipr_cmd->timer.expires = jiffies + timeout; 8316 - ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done; 8312 + ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_reset_timer_done; 8317 8313 add_timer(&ipr_cmd->timer); 8318 8314 } 8319 8315 ··· 8396 8394 } 8397 8395 } 8398 8396 8399 - ipr_cmd->timer.data = (unsigned long) ipr_cmd; 8400 8397 ipr_cmd->timer.expires = jiffies + stage_time * HZ; 8401 - ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 8398 + ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_oper_timeout; 8402 8399 ipr_cmd->done = ipr_reset_ioa_job; 8403 8400 add_timer(&ipr_cmd->timer); 8404 8401 ··· 8467 8466 return IPR_RC_JOB_CONTINUE; 8468 8467 } 8469 8468 8470 - ipr_cmd->timer.data = (unsigned long) ipr_cmd; 8471 8469 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); 8472 - ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; 8470 + ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_oper_timeout; 8473 8471 ipr_cmd->done = ipr_reset_ioa_job; 8474 8472 add_timer(&ipr_cmd->timer); 8475 8473 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
+6 -6
drivers/scsi/isci/host.c
··· 958 958 return status; 959 959 } 960 960 961 - static void phy_startup_timeout(unsigned long data) 961 + static void phy_startup_timeout(struct timer_list *t) 962 962 { 963 - struct sci_timer *tmr = (struct sci_timer *)data; 963 + struct sci_timer *tmr = from_timer(tmr, t, timer); 964 964 struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer); 965 965 unsigned long flags; 966 966 enum sci_status status; ··· 1592 1592 [SCIC_FAILED] = {} 1593 1593 }; 1594 1594 1595 - static void controller_timeout(unsigned long data) 1595 + static void controller_timeout(struct timer_list *t) 1596 1596 { 1597 - struct sci_timer *tmr = (struct sci_timer *)data; 1597 + struct sci_timer *tmr = from_timer(tmr, t, timer); 1598 1598 struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer); 1599 1599 struct sci_base_state_machine *sm = &ihost->sm; 1600 1600 unsigned long flags; ··· 1737 1737 MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT); 1738 1738 } 1739 1739 1740 - static void power_control_timeout(unsigned long data) 1740 + static void power_control_timeout(struct timer_list *t) 1741 1741 { 1742 - struct sci_timer *tmr = (struct sci_timer *)data; 1742 + struct sci_timer *tmr = from_timer(tmr, t, timer); 1743 1743 struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer); 1744 1744 struct isci_phy *iphy; 1745 1745 unsigned long flags;
+2 -4
drivers/scsi/isci/isci.h
··· 498 498 }; 499 499 500 500 static inline 501 - void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long)) 501 + void sci_init_timer(struct sci_timer *tmr, void (*fn)(struct timer_list *t)) 502 502 { 503 - tmr->timer.function = fn; 504 - tmr->timer.data = (unsigned long) tmr; 505 503 tmr->cancel = 0; 506 - init_timer(&tmr->timer); 504 + timer_setup(&tmr->timer, fn, 0); 507 505 } 508 506 509 507 static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
+2 -2
drivers/scsi/isci/phy.c
··· 315 315 return SCI_SUCCESS; 316 316 } 317 317 318 - static void phy_sata_timeout(unsigned long data) 318 + static void phy_sata_timeout(struct timer_list *t) 319 319 { 320 - struct sci_timer *tmr = (struct sci_timer *)data; 320 + struct sci_timer *tmr = from_timer(tmr, t, timer); 321 321 struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer); 322 322 struct isci_host *ihost = iphy->owning_port->owning_controller; 323 323 unsigned long flags;
+2 -2
drivers/scsi/isci/port.c
··· 769 769 return true; 770 770 } 771 771 772 - static void port_timeout(unsigned long data) 772 + static void port_timeout(struct timer_list *t) 773 773 { 774 - struct sci_timer *tmr = (struct sci_timer *)data; 774 + struct sci_timer *tmr = from_timer(tmr, t, timer); 775 775 struct isci_port *iport = container_of(tmr, typeof(*iport), timer); 776 776 struct isci_host *ihost = iport->owning_controller; 777 777 unsigned long flags;
+4 -4
drivers/scsi/isci/port_config.c
··· 319 319 return sci_port_configuration_agent_validate_ports(ihost, port_agent); 320 320 } 321 321 322 - static void mpc_agent_timeout(unsigned long data) 322 + static void mpc_agent_timeout(struct timer_list *t) 323 323 { 324 324 u8 index; 325 - struct sci_timer *tmr = (struct sci_timer *)data; 325 + struct sci_timer *tmr = from_timer(tmr, t, timer); 326 326 struct sci_port_configuration_agent *port_agent; 327 327 struct isci_host *ihost; 328 328 unsigned long flags; ··· 654 654 } 655 655 656 656 /* configure the phys into ports when the timer fires */ 657 - static void apc_agent_timeout(unsigned long data) 657 + static void apc_agent_timeout(struct timer_list *t) 658 658 { 659 659 u32 index; 660 - struct sci_timer *tmr = (struct sci_timer *)data; 660 + struct sci_timer *tmr = from_timer(tmr, t, timer); 661 661 struct sci_port_configuration_agent *port_agent; 662 662 struct isci_host *ihost; 663 663 unsigned long flags;
+10 -11
drivers/scsi/libfc/fc_fcp.c
··· 97 97 static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); 98 98 static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *); 99 99 static void fc_fcp_recovery(struct fc_fcp_pkt *, u8 code); 100 - static void fc_fcp_timeout(unsigned long); 100 + static void fc_fcp_timeout(struct timer_list *); 101 101 static void fc_fcp_rec(struct fc_fcp_pkt *); 102 102 static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); 103 103 static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); ··· 155 155 fsp->lp = lport; 156 156 fsp->xfer_ddp = FC_XID_UNKNOWN; 157 157 refcount_set(&fsp->ref_cnt, 1); 158 - init_timer(&fsp->timer); 159 - fsp->timer.data = (unsigned long)fsp; 158 + timer_setup(&fsp->timer, NULL, 0); 160 159 INIT_LIST_HEAD(&fsp->list); 161 160 spin_lock_init(&fsp->scsi_pkt_lock); 162 161 } else { ··· 1214 1215 fsp->seq_ptr = seq; 1215 1216 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ 1216 1217 1217 - setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); 1218 + fsp->timer.function = (TIMER_FUNC_TYPE)fc_fcp_timeout; 1218 1219 if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) 1219 1220 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); 1220 1221 ··· 1297 1298 * fc_lun_reset_send() - Send LUN reset command 1298 1299 * @data: The FCP packet that identifies the LUN to be reset 1299 1300 */ 1300 - static void fc_lun_reset_send(unsigned long data) 1301 + static void fc_lun_reset_send(struct timer_list *t) 1301 1302 { 1302 - struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1303 + struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer); 1303 1304 struct fc_lport *lport = fsp->lp; 1304 1305 1305 1306 if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) { ··· 1307 1308 return; 1308 1309 if (fc_fcp_lock_pkt(fsp)) 1309 1310 return; 1310 - setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp); 1311 + fsp->timer.function = (TIMER_FUNC_TYPE)fc_lun_reset_send; 1311 1312 fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); 1312 1313 fc_fcp_unlock_pkt(fsp); 1313 1314 } ··· 1333 1334 fsp->wait_for_comp = 1; 1334 1335 init_completion(&fsp->tm_done); 1335 1336 1336 - fc_lun_reset_send((unsigned long)fsp); 1337 + fc_lun_reset_send(&fsp->timer); 1337 1338 1338 1339 /* 1339 1340 * wait for completion of reset ··· 1430 1431 * received we see if data was received recently. If it has been then we 1431 1432 * continue waiting, otherwise, we abort the command. 1432 1433 */ 1433 - static void fc_fcp_timeout(unsigned long data) 1434 + static void fc_fcp_timeout(struct timer_list *t) 1434 1435 { 1435 - struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; 1436 + struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer); 1436 1437 struct fc_rport *rport = fsp->rport; 1437 1438 struct fc_rport_libfc_priv *rpriv = rport->dd_data; 1438 1439 ··· 1445 1446 if (fsp->lp->qfull) { 1446 1447 FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n", 1447 1448 fsp->timer_delay); 1448 - setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); 1449 + fsp->timer.function = (TIMER_FUNC_TYPE)fc_fcp_timeout; 1449 1450 fc_fcp_timer_set(fsp, fsp->timer_delay); 1450 1451 goto unlock; 1451 1452 }
+6 -10
drivers/scsi/libiscsi.c
··· 1805 1805 } 1806 1806 EXPORT_SYMBOL_GPL(iscsi_target_alloc); 1807 1807 1808 - static void iscsi_tmf_timedout(unsigned long data) 1808 + static void iscsi_tmf_timedout(struct timer_list *t) 1809 1809 { 1810 - struct iscsi_conn *conn = (struct iscsi_conn *)data; 1810 + struct iscsi_conn *conn = from_timer(conn, t, tmf_timer); 1811 1811 struct iscsi_session *session = conn->session; 1812 1812 1813 1813 spin_lock(&session->frwd_lock); ··· 1838 1838 } 1839 1839 conn->tmfcmd_pdus_cnt++; 1840 1840 conn->tmf_timer.expires = timeout * HZ + jiffies; 1841 - conn->tmf_timer.function = iscsi_tmf_timedout; 1842 - conn->tmf_timer.data = (unsigned long)conn; 1843 1841 add_timer(&conn->tmf_timer); 1844 1842 ISCSI_DBG_EH(session, "tmf set timeout\n"); 1845 1843 ··· 2087 2089 } 2088 2090 EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out); 2089 2091 2090 - static void iscsi_check_transport_timeouts(unsigned long data) 2092 + static void iscsi_check_transport_timeouts(struct timer_list *t) 2091 2093 { 2092 - struct iscsi_conn *conn = (struct iscsi_conn *)data; 2094 + struct iscsi_conn *conn = from_timer(conn, t, transport_timer); 2093 2095 struct iscsi_session *session = conn->session; 2094 2096 unsigned long recv_timeout, next_timeout = 0, last_recv; 2095 2097 ··· 2911 2913 conn->exp_statsn = 0; 2912 2914 conn->tmf_state = TMF_INITIAL; 2913 2915 2914 - init_timer(&conn->transport_timer); 2915 - conn->transport_timer.data = (unsigned long)conn; 2916 - conn->transport_timer.function = iscsi_check_transport_timeouts; 2916 + timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0); 2917 2917 2918 2918 INIT_LIST_HEAD(&conn->mgmtqueue); 2919 2919 INIT_LIST_HEAD(&conn->cmdqueue); ··· 2935 2939 goto login_task_data_alloc_fail; 2936 2940 conn->login_task->data = conn->data = data; 2937 2941 2938 - init_timer(&conn->tmf_timer); 2942 + timer_setup(&conn->tmf_timer, iscsi_tmf_timedout, 0); 2939 2943 init_waitqueue_head(&conn->ehwait); 2940 2944 2941 2945 return cls_conn;
+4 -4
drivers/scsi/libsas/sas_expander.c
··· 41 41 42 42 /* ---------- SMP task management ---------- */ 43 43 44 - static void smp_task_timedout(unsigned long _task) 44 + static void smp_task_timedout(struct timer_list *t) 45 45 { 46 - struct sas_task *task = (void *) _task; 46 + struct sas_task_slow *slow = from_timer(slow, t, timer); 47 + struct sas_task *task = slow->task; 47 48 unsigned long flags; 48 49 49 50 spin_lock_irqsave(&task->task_state_lock, flags); ··· 92 91 93 92 task->task_done = smp_task_done; 94 93 95 - task->slow_task->timer.data = (unsigned long) task; 96 - task->slow_task->timer.function = smp_task_timedout; 94 + task->slow_task->timer.function = (TIMER_FUNC_TYPE)smp_task_timedout; 97 95 task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ; 98 96 add_timer(&task->slow_task->timer); 99 97
+2 -1
drivers/scsi/libsas/sas_init.c
··· 66 66 } 67 67 68 68 task->slow_task = slow; 69 - init_timer(&slow->timer); 69 + slow->task = task; 70 + timer_setup(&slow->timer, NULL, 0); 70 71 init_completion(&slow->completion); 71 72 72 73 return task;
+1 -1
drivers/scsi/libsas/sas_scsi_host.c
··· 919 919 return; 920 920 if (!del_timer(&slow->timer)) 921 921 return; 922 - slow->timer.function(slow->timer.data); 922 + slow->timer.function((TIMER_DATA_TYPE)&slow->timer); 923 923 return; 924 924 } 925 925
+8 -8
drivers/scsi/lpfc/lpfc_crtn.h
··· 113 113 void lpfc_disc_start(struct lpfc_vport *); 114 114 void lpfc_cleanup_discovery_resources(struct lpfc_vport *); 115 115 void lpfc_cleanup(struct lpfc_vport *); 116 - void lpfc_disc_timeout(unsigned long); 116 + void lpfc_disc_timeout(struct timer_list *); 117 117 118 118 int lpfc_unregister_fcf_prep(struct lpfc_hba *); 119 119 struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); ··· 154 154 int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *, 155 155 struct lpfc_nodelist *); 156 156 void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *); 157 - void lpfc_els_retry_delay(unsigned long); 157 + void lpfc_els_retry_delay(struct timer_list *); 158 158 void lpfc_els_retry_delay_handler(struct lpfc_nodelist *); 159 159 void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 160 160 struct lpfc_iocbq *); ··· 165 165 void lpfc_els_flush_cmd(struct lpfc_vport *); 166 166 int lpfc_els_disc_adisc(struct lpfc_vport *); 167 167 int lpfc_els_disc_plogi(struct lpfc_vport *); 168 - void lpfc_els_timeout(unsigned long); 168 + void lpfc_els_timeout(struct timer_list *); 169 169 void lpfc_els_timeout_handler(struct lpfc_vport *); 170 170 struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t, 171 171 uint8_t, struct lpfc_nodelist *, ··· 180 180 int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); 181 181 int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t); 182 182 void lpfc_fdmi_num_disc_check(struct lpfc_vport *); 183 - void lpfc_delayed_disc_tmo(unsigned long); 183 + void lpfc_delayed_disc_tmo(struct timer_list *); 184 184 void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *); 185 185 186 186 int lpfc_config_port_prep(struct lpfc_hba *); ··· 279 279 void lpfc_mem_free_all(struct lpfc_hba *); 280 280 void lpfc_stop_vport_timers(struct lpfc_vport *); 281 281 282 - void lpfc_poll_timeout(unsigned long ptr); 282 + void lpfc_poll_timeout(struct timer_list *t); 283 283 void lpfc_poll_start_timer(struct lpfc_hba *); 284 - void lpfc_poll_eratt(unsigned long); 284 + void lpfc_poll_eratt(struct timer_list *); 285 285 int 286 286 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *, 287 287 struct lpfc_sli_ring *, uint32_t); ··· 351 351 lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *, 352 352 uint16_t, uint64_t, lpfc_ctx_cmd); 353 353 354 - void lpfc_mbox_timeout(unsigned long); 354 + void lpfc_mbox_timeout(struct timer_list *t); 355 355 void lpfc_mbox_timeout_handler(struct lpfc_hba *); 356 356 357 357 struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t); ··· 445 445 /* Interface exported by fabric iocb scheduler */ 446 446 void lpfc_fabric_abort_nport(struct lpfc_nodelist *); 447 447 void lpfc_fabric_abort_hba(struct lpfc_hba *); 448 - void lpfc_fabric_block_timeout(unsigned long); 448 + void lpfc_fabric_block_timeout(struct timer_list *); 449 449 void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); 450 450 void lpfc_rampdown_queue_depth(struct lpfc_hba *); 451 451 void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
+2 -2
drivers/scsi/lpfc/lpfc_ct.c
··· 2884 2884 * the worker thread. 2885 2885 **/ 2886 2886 void 2887 - lpfc_delayed_disc_tmo(unsigned long ptr) 2887 + lpfc_delayed_disc_tmo(struct timer_list *t) 2888 2888 { 2889 - struct lpfc_vport *vport = (struct lpfc_vport *)ptr; 2889 + struct lpfc_vport *vport = from_timer(vport, t, delayed_disc_tmo); 2890 2890 struct lpfc_hba *phba = vport->phba; 2891 2891 uint32_t tmo_posted; 2892 2892 unsigned long iflag;
+6 -6
drivers/scsi/lpfc/lpfc_els.c
··· 3131 3131 * to the event associated with the ndlp. 3132 3132 **/ 3133 3133 void 3134 - lpfc_els_retry_delay(unsigned long ptr) 3134 + lpfc_els_retry_delay(struct timer_list *t) 3135 3135 { 3136 - struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr; 3136 + struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 3137 3137 struct lpfc_vport *vport = ndlp->vport; 3138 3138 struct lpfc_hba *phba = vport->phba; 3139 3139 unsigned long flags; ··· 7385 7385 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 7386 7386 **/ 7387 7387 void 7388 - lpfc_els_timeout(unsigned long ptr) 7388 + lpfc_els_timeout(struct timer_list *t) 7389 7389 { 7390 - struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 7390 + struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 7391 7391 struct lpfc_hba *phba = vport->phba; 7392 7392 uint32_t tmo_posted; 7393 7393 unsigned long iflag; ··· 9017 9017 * posted event WORKER_FABRIC_BLOCK_TMO. 9018 9018 **/ 9019 9019 void 9020 - lpfc_fabric_block_timeout(unsigned long ptr) 9020 + lpfc_fabric_block_timeout(struct timer_list *t) 9021 9021 { 9022 - struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 9022 + struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 9023 9023 unsigned long iflags; 9024 9024 uint32_t tmo_posted; 9025 9025
+3 -4
drivers/scsi/lpfc/lpfc_hbadisc.c
··· 4370 4370 { 4371 4371 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 4372 4372 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); 4373 - setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 4374 - (unsigned long)ndlp); 4373 + timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0); 4375 4374 ndlp->nlp_DID = did; 4376 4375 ndlp->vport = vport; 4377 4376 ndlp->phba = vport->phba; ··· 5507 5508 */ 5508 5509 /*****************************************************************************/ 5509 5510 void 5510 - lpfc_disc_timeout(unsigned long ptr) 5511 + lpfc_disc_timeout(struct timer_list *t) 5511 5512 { 5512 - struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 5513 + struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo); 5513 5514 struct lpfc_hba *phba = vport->phba; 5514 5515 uint32_t tmo_posted; 5515 5516 unsigned long flags = 0;
+16 -23
drivers/scsi/lpfc/lpfc_init.c
··· 1138 1138 * be cleared by the worker thread after it has taken the event bitmap out. 1139 1139 **/ 1140 1140 static void 1141 - lpfc_hb_timeout(unsigned long ptr) 1141 + lpfc_hb_timeout(struct timer_list *t) 1142 1142 { 1143 1143 struct lpfc_hba *phba; 1144 1144 uint32_t tmo_posted; 1145 1145 unsigned long iflag; 1146 1146 1147 - phba = (struct lpfc_hba *)ptr; 1147 + phba = from_timer(phba, t, hb_tmofunc); 1148 1148 1149 1149 /* Check for heart beat timeout conditions */ 1150 1150 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); ··· 1172 1172 * be cleared by the worker thread after it has taken the event bitmap out. 1173 1173 **/ 1174 1174 static void 1175 - lpfc_rrq_timeout(unsigned long ptr) 1175 + lpfc_rrq_timeout(struct timer_list *t) 1176 1176 { 1177 1177 struct lpfc_hba *phba; 1178 1178 unsigned long iflag; 1179 1179 1180 - phba = (struct lpfc_hba *)ptr; 1180 + phba = from_timer(phba, t, rrq_tmr); 1181 1181 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1182 1182 if (!(phba->pport->load_flag & FC_UNLOADING)) 1183 1183 phba->hba_flag |= HBA_RRQ_ACTIVE; ··· 3937 3937 INIT_LIST_HEAD(&vport->rcv_buffer_list); 3938 3938 spin_lock_init(&vport->work_port_lock); 3939 3939 3940 - setup_timer(&vport->fc_disctmo, lpfc_disc_timeout, 3941 - (unsigned long)vport); 3940 + timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); 3942 3941 3943 - setup_timer(&vport->els_tmofunc, lpfc_els_timeout, 3944 - (unsigned long)vport); 3942 + timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); 3945 3943 3946 - setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 3947 - (unsigned long)vport); 3944 + timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); 3948 3945 3949 3946 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 3950 3947 if (error) ··· 4207 4210 * worker thread context. 4208 4211 **/ 4209 4212 static void 4210 - lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 4213 + lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) 4211 4214 { 4212 - struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 4215 + struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); 4213 4216 4214 4217 /* Don't send FCF rediscovery event if timer cancelled */ 4215 4218 spin_lock_irq(&phba->hbalock); ··· 5621 5624 INIT_LIST_HEAD(&phba->luns); 5622 5625 5623 5626 /* MBOX heartbeat timer */ 5624 - setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba); 5627 + timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); 5625 5628 /* Fabric block timer */ 5626 - setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 5627 - (unsigned long)phba); 5629 + timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); 5628 5630 /* EA polling mode timer */ 5629 - setup_timer(&phba->eratt_poll, lpfc_poll_eratt, 5630 - (unsigned long)phba); 5631 + timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); 5631 5632 /* Heartbeat timer */ 5632 - setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba); 5633 + timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); 5633 5634 5634 5635 return 0; 5635 5636 } ··· 5653 5658 */ 5654 5659 5655 5660 /* FCP polling mode timer */ 5656 - setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout, 5657 - (unsigned long)phba); 5661 + timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); 5658 5662 5659 5663 /* Host attention work mask setup */ 5660 5664 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); ··· 5823 5829 * Initialize timers used by driver 5824 5830 */ 5825 5831 5826 - setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba); 5832 + timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); 5827 5833 5828 5834 /* FCF rediscover timer */ 5829 - setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 5830 - (unsigned long)phba); 5835 + timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); 5831 5836 5832 5837 /* 5833 5838 * Control structure for handling external multi-buffer mailbox
+2 -2
drivers/scsi/lpfc/lpfc_scsi.c
··· 4501 4501 * and FCP Ring interrupt is disable. 4502 4502 **/ 4503 4503 4504 - void lpfc_poll_timeout(unsigned long ptr) 4504 + void lpfc_poll_timeout(struct timer_list *t) 4505 4505 { 4506 - struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 4506 + struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); 4507 4507 4508 4508 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 4509 4509 lpfc_sli_handle_fast_ring_event(phba,
+4 -4
drivers/scsi/lpfc/lpfc_sli.c
··· 3004 3004 * and wake up worker thread to process it. Otherwise, it will set up the 3005 3005 * Error Attention polling timer for the next poll. 3006 3006 **/ 3007 - void lpfc_poll_eratt(unsigned long ptr) 3007 + void lpfc_poll_eratt(struct timer_list *t) 3008 3008 { 3009 3009 struct lpfc_hba *phba; 3010 3010 uint32_t eratt = 0; 3011 3011 uint64_t sli_intr, cnt; 3012 3012 3013 - phba = (struct lpfc_hba *)ptr; 3013 + phba = from_timer(phba, t, eratt_poll); 3014 3014 3015 3015 /* Here we will also keep track of interrupts per sec of the hba */ 3016 3016 sli_intr = phba->sli.slistat.sli_intr; ··· 7167 7167 * done by the worker thread function lpfc_mbox_timeout_handler. 7168 7168 **/ 7169 7169 void 7170 - lpfc_mbox_timeout(unsigned long ptr) 7170 + lpfc_mbox_timeout(struct timer_list *t) 7171 7171 { 7172 - struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 7172 + struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); 7173 7173 unsigned long iflag; 7174 7174 uint32_t tmo_posted; 7175 7175
+6
drivers/scsi/megaraid/megaraid_ioctl.h
··· 19 19 20 20 #include <linux/types.h> 21 21 #include <linux/semaphore.h> 22 + #include <linux/timer.h> 22 23 23 24 #include "mbox_defs.h" 24 25 ··· 154 153 155 154 } __attribute__ ((aligned(1024),packed)) uioc_t; 156 155 156 + /* For on-stack uioc timers. */ 157 + struct uioc_timeout { 158 + struct timer_list timer; 159 + uioc_t *uioc; 160 + }; 157 161 158 162 /** 159 163 * struct mraid_hba_info - information about the controller
+12 -14
drivers/scsi/megaraid/megaraid_mbox.c
··· 3904 3904 wake_up(&raid_dev->sysfs_wait_q); 3905 3905 } 3906 3906 3907 - 3908 3907 /** 3909 3908 * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap 3910 - * @data : timed out packet 3909 + * @t : timed out timer 3911 3910 * 3912 3911 * Timeout routine to recover and return to application, in case the adapter 3913 3912 * has stopped responding. A timeout of 60 seconds for this command seems like 3914 3913 * a good value. 3915 3914 */ 3916 3915 static void 3917 - megaraid_sysfs_get_ldmap_timeout(unsigned long data) 3916 + megaraid_sysfs_get_ldmap_timeout(struct timer_list *t) 3918 3917 { 3919 - uioc_t *uioc = (uioc_t *)data; 3918 + struct uioc_timeout *timeout = from_timer(timeout, t, timer); 3919 + uioc_t *uioc = timeout->uioc; 3920 3920 adapter_t *adapter = (adapter_t *)uioc->buf_vaddr; 3921 3921 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); 3922 3922 ··· 3951 3951 mbox64_t *mbox64; 3952 3952 mbox_t *mbox; 3953 3953 char *raw_mbox; 3954 - struct timer_list sysfs_timer; 3955 - struct timer_list *timerp; 3954 + struct uioc_timeout timeout; 3956 3955 caddr_t ldmap; 3957 3956 int rval = 0; 3958 3957 ··· 3987 3988 /* 3988 3989 * Setup a timer to recover from a non-responding controller 3989 3990 */ 3990 - timerp = &sysfs_timer; 3991 - init_timer(timerp); 3991 + timeout.uioc = uioc; 3992 + timer_setup_on_stack(&timeout.timer, 3993 + megaraid_sysfs_get_ldmap_timeout, 0); 3992 3994 3993 - timerp->function = megaraid_sysfs_get_ldmap_timeout; 3994 - timerp->data = (unsigned long)uioc; 3995 - timerp->expires = jiffies + 60 * HZ; 3996 - 3997 - add_timer(timerp); 3995 + timeout.timer.expires = jiffies + 60 * HZ; 3996 + add_timer(&timeout.timer); 3998 3997 3999 3998 /* 4000 3999 * Send the command to the firmware ··· 4030 4033 } 4031 4034 4032 4035 4033 - del_timer_sync(timerp); 4036 + del_timer_sync(&timeout.timer); 4037 + destroy_timer_on_stack(&timeout.timer); 4034 4038 4035 4039 mutex_unlock(&raid_dev->sysfs_mtx); 4036 4040
+13 -14
drivers/scsi/megaraid/megaraid_mm.c
··· 35 35 static int handle_drvrcmd(void __user *, uint8_t, int *); 36 36 static int lld_ioctl(mraid_mmadp_t *, uioc_t *); 37 37 static void ioctl_done(uioc_t *); 38 - static void lld_timedout(unsigned long); 38 + static void lld_timedout(struct timer_list *); 39 39 static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *); 40 40 static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *); 41 41 static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *); ··· 686 686 lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc) 687 687 { 688 688 int rval; 689 - struct timer_list timer; 690 - struct timer_list *tp = NULL; 689 + struct uioc_timeout timeout = { }; 691 690 692 691 kioc->status = -ENODATA; 693 692 rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE); ··· 697 698 * Start the timer 698 699 */ 699 700 if (adp->timeout > 0) { 700 - tp = &timer; 701 - init_timer(tp); 701 + timeout.uioc = kioc; 702 + timer_setup_on_stack(&timeout.timer, lld_timedout, 0); 702 703 703 - tp->function = lld_timedout; 704 - tp->data = (unsigned long)kioc; 705 - tp->expires = jiffies + adp->timeout * HZ; 704 + timeout.timer.expires = jiffies + adp->timeout * HZ; 706 705 707 - add_timer(tp); 706 + add_timer(&timeout.timer); 708 707 } 709 708 710 709 /* ··· 710 713 * call, the ioctl either completed successfully or timedout. 711 714 */ 712 715 wait_event(wait_q, (kioc->status != -ENODATA)); 713 - if (tp) { 714 - del_timer_sync(tp); 716 + if (timeout.timer.function) { 717 + del_timer_sync(&timeout.timer); 718 + destroy_timer_on_stack(&timeout.timer); 715 719 } 716 720 717 721 /* ··· 781 783 782 784 /** 783 785 * lld_timedout - callback from the expired timer 784 - * @ptr : ioctl packet that timed out 786 + * @t : timer that timed out 785 787 */ 786 788 static void 787 - lld_timedout(unsigned long ptr) 789 + lld_timedout(struct timer_list *t) 788 790 { 789 - uioc_t *kioc = (uioc_t *)ptr; 791 + struct uioc_timeout *timeout = from_timer(timeout, t, timer); 792 + uioc_t *kioc = timeout->uioc; 790 793 791 794 kioc->status = -ETIME; 792 795 kioc->timedout = 1;
+12 -21
drivers/scsi/megaraid/megaraid_sas_base.c
··· 2114 2114 megasas_check_and_restore_queue_depth(instance); 2115 2115 } 2116 2116 2117 + static void megasas_sriov_heartbeat_handler(struct timer_list *t); 2118 + 2117 2119 /** 2118 - * megasas_start_timer - Initializes a timer object 2120 + * megasas_start_timer - Initializes sriov heartbeat timer object 2119 2121 * @instance: Adapter soft state 2120 - * @timer: timer object to be initialized 2121 - * @fn: timer function 2122 - * @interval: time interval between timer function call 2123 2122 * 2124 2123 */ 2125 - void megasas_start_timer(struct megasas_instance *instance, 2126 - struct timer_list *timer, 2127 - void *fn, unsigned long interval) 2124 + void megasas_start_timer(struct megasas_instance *instance) 2128 2125 { 2129 - init_timer(timer); 2130 - timer->expires = jiffies + interval; 2131 - timer->data = (unsigned long)instance; 2132 - timer->function = fn; 2126 + struct timer_list *timer = &instance->sriov_heartbeat_timer; 2127 + 2128 + timer_setup(timer, megasas_sriov_heartbeat_handler, 0); 2129 + timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF; 2133 2130 add_timer(timer); 2134 2131 } 2135 2132 ··· 2512 2515 } 2513 2516 2514 2517 /* Handler for SR-IOV heartbeat */ 2515 - void megasas_sriov_heartbeat_handler(unsigned long instance_addr) 2518 + static void megasas_sriov_heartbeat_handler(struct timer_list *t) 2516 2519 { 2517 2520 struct megasas_instance *instance = 2518 - (struct megasas_instance *)instance_addr; 2521 + from_timer(instance, t, sriov_heartbeat_timer); 2519 2522 2520 2523 if (instance->hb_host_mem->HB.fwCounter != 2521 2524 instance->hb_host_mem->HB.driverCounter) { ··· 5490 5493 /* Launch SR-IOV heartbeat timer */ 5491 5494 if (instance->requestorId) { 5492 5495 if (!megasas_sriov_start_heartbeat(instance, 1)) 5493 - megasas_start_timer(instance, 5494 - &instance->sriov_heartbeat_timer, 5495 - megasas_sriov_heartbeat_handler, 5496 - MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 5496 + megasas_start_timer(instance); 5497 5497 else 5498 5498 instance->skip_heartbeat_timer_del = 1; 5499 5499 } ··· 6501 6507 /* Re-launch SR-IOV heartbeat timer */ 6502 6508 if (instance->requestorId) { 6503 6509 if (!megasas_sriov_start_heartbeat(instance, 0)) 6504 - megasas_start_timer(instance, 6505 - &instance->sriov_heartbeat_timer, 6506 - megasas_sriov_heartbeat_handler, 6507 - MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 6510 + megasas_start_timer(instance); 6508 6511 else { 6509 6512 instance->skip_heartbeat_timer_del = 1; 6510 6513 goto fail_init_mfi;
+3 -12
drivers/scsi/megaraid/megaraid_sas_fusion.c
··· 85 85 void megaraid_sas_kill_hba(struct megasas_instance *instance); 86 86 87 87 extern u32 megasas_dbg_lvl; 88 - void megasas_sriov_heartbeat_handler(unsigned long instance_addr); 89 88 int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 90 89 int initial); 91 - void megasas_start_timer(struct megasas_instance *instance, 92 - struct timer_list *timer, 93 - void *fn, unsigned long interval); 90 + void megasas_start_timer(struct megasas_instance *instance); 94 91 extern struct megasas_mgmt_info megasas_mgmt_info; 95 92 extern unsigned int resetwaittime; 96 93 extern unsigned int dual_qdepth_disable; ··· 4366 4369 /* Restart SR-IOV heartbeat */ 4367 4370 if (instance->requestorId) { 4368 4371 if (!megasas_sriov_start_heartbeat(instance, 0)) 4369 - megasas_start_timer(instance, 4370 - &instance->sriov_heartbeat_timer, 4371 - megasas_sriov_heartbeat_handler, 4372 - MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 4372 + megasas_start_timer(instance); 4373 4373 else 4374 4374 instance->skip_heartbeat_timer_del = 1; 4375 4375 } ··· 4398 4404 } else { 4399 4405 /* For VF: Restart HB timer if we didn't OCR */ 4400 4406 if (instance->requestorId) { 4401 - megasas_start_timer(instance, 4402 - &instance->sriov_heartbeat_timer, 4403 - megasas_sriov_heartbeat_handler, 4404 - MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 4407 + megasas_start_timer(instance); 4405 4408 } 4406 4409 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 4407 4410 instance->instancet->enable_intr(instance);
+1 -2
drivers/scsi/mvsas/mv_init.c
··· 95 95 96 96 phy->mvi = mvi; 97 97 phy->port = NULL; 98 - init_timer(&phy->timer); 98 + timer_setup(&phy->timer, NULL, 0); 99 99 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; 100 100 sas_phy->class = SAS; 101 101 sas_phy->iproto = SAS_PROTOCOL_ALL; ··· 248 248 mvi->devices[i].dev_type = SAS_PHY_UNUSED; 249 249 mvi->devices[i].device_id = i; 250 250 mvi->devices[i].dev_status = MVS_DEV_NORMAL; 251 - init_timer(&mvi->devices[i].timer); 252 251 } 253 252 254 253 /*
+7 -8
drivers/scsi/mvsas/mv_sas.c
··· 1283 1283 complete(&task->slow_task->completion); 1284 1284 } 1285 1285 1286 - static void mvs_tmf_timedout(unsigned long data) 1286 + static void mvs_tmf_timedout(struct timer_list *t) 1287 1287 { 1288 - struct sas_task *task = (struct sas_task *)data; 1288 + struct sas_task_slow *slow = from_timer(slow, t, timer); 1289 + struct sas_task *task = slow->task; 1289 1290 1290 1291 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1291 1292 complete(&task->slow_task->completion); ··· 1310 1309 memcpy(&task->ssp_task, parameter, para_len); 1311 1310 task->task_done = mvs_task_done; 1312 1311 1313 - task->slow_task->timer.data = (unsigned long) task; 1314 - task->slow_task->timer.function = mvs_tmf_timedout; 1312 + task->slow_task->timer.function = (TIMER_FUNC_TYPE)mvs_tmf_timedout; 1315 1313 task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; 1316 1314 add_timer(&task->slow_task->timer); 1317 1315 ··· 1954 1954 return ret; 1955 1955 } 1956 1956 1957 - static void mvs_sig_time_out(unsigned long tphy) 1957 + static void mvs_sig_time_out(struct timer_list *t) 1958 1958 { 1959 - struct mvs_phy *phy = (struct mvs_phy *)tphy; 1959 + struct mvs_phy *phy = from_timer(phy, t, timer); 1960 1960 struct mvs_info *mvi = phy->mvi; 1961 1961 u8 phy_no; 1962 1962 ··· 2020 2020 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, 2021 2021 tmp | PHYEV_SIG_FIS); 2022 2022 if (phy->timer.function == NULL) { 2023 - phy->timer.data = (unsigned long)phy; 2024 - phy->timer.function = mvs_sig_time_out; 2023 + phy->timer.function = (TIMER_FUNC_TYPE)mvs_sig_time_out; 2025 2024 phy->timer.expires = jiffies + 5*HZ; 2026 2025 add_timer(&phy->timer); 2027 2026 }
-1
drivers/scsi/mvsas/mv_sas.h
··· 247 247 enum sas_device_type dev_type; 248 248 struct mvs_info *mvi_info; 249 249 struct domain_device *sas_device; 250 - struct timer_list timer; 251 250 u32 attached_phy; 252 251 u32 device_id; 253 252 u32 running_req;
+5 -6
drivers/scsi/pm8001/pm8001_sas.c
··· 656 656 complete(&task->slow_task->completion); 657 657 } 658 658 659 - static void pm8001_tmf_timedout(unsigned long data) 659 + static void pm8001_tmf_timedout(struct timer_list *t) 660 660 { 661 - struct sas_task *task = (struct sas_task *)data; 661 + struct sas_task_slow *slow = from_timer(slow, t, timer); 662 + struct sas_task *task = slow->task; 662 663 663 664 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 664 665 complete(&task->slow_task->completion); ··· 695 694 task->task_proto = dev->tproto; 696 695 memcpy(&task->ssp_task, parameter, para_len); 697 696 task->task_done = pm8001_task_done; 698 - task->slow_task->timer.data = (unsigned long)task; 699 - task->slow_task->timer.function = pm8001_tmf_timedout; 697 + task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout; 700 698 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; 701 699 add_timer(&task->slow_task->timer); 702 700 ··· 781 781 task->dev = dev; 782 782 task->task_proto = dev->tproto; 783 783 task->task_done = pm8001_task_done; 784 - task->slow_task->timer.data = (unsigned long)task; 785 - task->slow_task->timer.function = pm8001_tmf_timedout; 784 + task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout; 786 785 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ; 787 786 add_timer(&task->slow_task->timer); 788 787
+13 -20
drivers/scsi/pmcraid.c
··· 348 348 cmd->sense_buffer = NULL; 349 349 cmd->sense_buffer_dma = 0; 350 350 cmd->dma_handle = 0; 351 - init_timer(&cmd->timer); 351 + timer_setup(&cmd->timer, NULL, 0); 352 352 } 353 353 354 354 /** ··· 557 557 558 558 static void pmcraid_ioa_reset(struct pmcraid_cmd *); 559 559 560 - static void pmcraid_bist_done(struct pmcraid_cmd *cmd) 560 + static void pmcraid_bist_done(struct timer_list *t) 561 561 { 562 + struct pmcraid_cmd *cmd = from_timer(cmd, t, timer); 562 563 struct pmcraid_instance *pinstance = cmd->drv_inst; 563 564 unsigned long lock_flags; 564 565 int rc; ··· 573 572 pmcraid_info("BIST not complete, waiting another 2 secs\n"); 574 573 cmd->timer.expires = jiffies + cmd->time_left; 575 574 cmd->time_left = 0; 576 - cmd->timer.data = (unsigned long)cmd; 577 - cmd->timer.function = 578 - (void (*)(unsigned long))pmcraid_bist_done; 579 575 add_timer(&cmd->timer); 580 576 } else { 581 577 cmd->time_left = 0; ··· 603 605 doorbells, intrs); 604 606 605 607 cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT); 606 - cmd->timer.data = (unsigned long)cmd; 607 608 cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT); 608 - cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done; 609 + cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_bist_done; 609 610 add_timer(&cmd->timer); 610 611 } 611 612 ··· 614 617 * Return value 615 618 * None 616 619 */ 617 - static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd) 620 + static void pmcraid_reset_alert_done(struct timer_list *t) 618 621 { 622 + struct pmcraid_cmd *cmd = from_timer(cmd, t, timer); 619 623 struct pmcraid_instance *pinstance = cmd->drv_inst; 620 624 u32 status = ioread32(pinstance->ioa_status); 621 625 unsigned long lock_flags; ··· 635 637 pmcraid_info("critical op is not yet reset waiting again\n"); 636 638 /* restart timer if some more time is available to wait */ 637 639 cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT; 638 - cmd->timer.data = (unsigned long)cmd; 639 640 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT; 640 - cmd->timer.function = 641 - (void (*)(unsigned long))pmcraid_reset_alert_done; 641 + cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_reset_alert_done; 642 642 add_timer(&cmd->timer); 643 643 } 644 644 } ··· 672 676 * bit to be reset. 673 677 */ 674 678 cmd->time_left = PMCRAID_RESET_TIMEOUT; 675 - cmd->timer.data = (unsigned long)cmd; 676 679 cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT; 677 - cmd->timer.function = 678 - (void (*)(unsigned long))pmcraid_reset_alert_done; 680 + cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_reset_alert_done; 679 681 add_timer(&cmd->timer); 680 682 681 683 iowrite32(DOORBELL_IOA_RESET_ALERT, ··· 698 704 * Return value: 699 705 * None 700 706 */ 701 - static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd) 707 + static void pmcraid_timeout_handler(struct timer_list *t) 702 708 { 709 + struct pmcraid_cmd *cmd = from_timer(cmd, t, timer); 703 710 struct pmcraid_instance *pinstance = cmd->drv_inst; 704 711 unsigned long lock_flags; 705 712 ··· 914 919 struct pmcraid_cmd *cmd, 915 920 void (*cmd_done) (struct pmcraid_cmd *), 916 921 unsigned long timeout, 917 - void (*timeout_func) (struct pmcraid_cmd *) 922 + void (*timeout_func) (struct timer_list *) 918 923 ) 919 924 { 920 925 /* initialize done function */ ··· 922 927 923 928 if (timeout_func) { 924 929 /* setup timeout handler */ 925 - cmd->timer.data = (unsigned long)cmd; 926 930 cmd->timer.expires = jiffies + timeout; 927 - cmd->timer.function = (void (*)(unsigned long))timeout_func; 931 + cmd->timer.function = (TIMER_FUNC_TYPE)timeout_func; 928 932 add_timer(&cmd->timer); 929 933 } 930 934 ··· 1949 1955 * would re-initiate a reset 1950 1956 */ 1951 1957 cmd->cmd_done = pmcraid_ioa_reset; 1952 - cmd->timer.data = (unsigned long)cmd; 1953 1958 cmd->timer.expires = jiffies + 1954 1959 msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT); 1955 - cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler; 1960 + cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_timeout_handler; 1956 1961 1957 1962 if (!timer_pending(&cmd->timer)) 1958 1963 add_timer(&cmd->timer);
+5 -9
drivers/scsi/qla1280.c
··· 758 758 }; 759 759 760 760 761 - static void qla1280_mailbox_timeout(unsigned long __data) 761 + static void qla1280_mailbox_timeout(struct timer_list *t) 762 762 { 763 - struct scsi_qla_host *ha = (struct scsi_qla_host *)__data; 763 + struct scsi_qla_host *ha = from_timer(ha, t, mailbox_timer); 764 764 struct device_reg __iomem *reg; 765 765 reg = ha->iobase; 766 766 ··· 2465 2465 uint16_t __iomem *mptr; 2466 2466 uint16_t data; 2467 2467 DECLARE_COMPLETION_ONSTACK(wait); 2468 - struct timer_list timer; 2469 2468 2470 2469 ENTER("qla1280_mailbox_command"); 2471 2470 ··· 2493 2494 /* Issue set host interrupt command. */ 2494 2495 2495 2496 /* set up a timer just in case we're really jammed */ 2496 - init_timer_on_stack(&timer); 2497 - timer.expires = jiffies + 20*HZ; 2498 - timer.data = (unsigned long)ha; 2499 - timer.function = qla1280_mailbox_timeout; 2500 - add_timer(&timer); 2497 + timer_setup(&ha->mailbox_timer, qla1280_mailbox_timeout, 0); 2498 + mod_timer(&ha->mailbox_timer, jiffies + 20 * HZ); 2501 2499 2502 2500 spin_unlock_irq(ha->host->host_lock); 2503 2501 WRT_REG_WORD(&reg->host_cmd, HC_SET_HOST_INT); 2504 2502 data = qla1280_debounce_register(&reg->istatus); 2505 2503 2506 2504 wait_for_completion(&wait); 2507 - del_timer_sync(&timer); 2505 + del_timer_sync(&ha->mailbox_timer); 2508 2506 2509 2507 spin_lock_irq(ha->host->host_lock); 2510 2508
+1
drivers/scsi/qla1280.h
··· 1055 1055 struct list_head done_q; /* Done queue */ 1056 1056 1057 1057 struct completion *mailbox_wait; 1058 + struct timer_list mailbox_timer; 1058 1059 1059 1060 volatile struct { 1060 1061 uint32_t online:1; /* 0 */
+3 -3
drivers/scsi/qla2xxx/qla_gbl.h
··· 206 206 */ 207 207 extern struct scsi_host_template qla2xxx_driver_template; 208 208 extern struct scsi_transport_template *qla2xxx_transport_vport_template; 209 - extern void qla2x00_timer(scsi_qla_host_t *); 210 - extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long); 209 + extern void qla2x00_timer(struct timer_list *); 210 + extern void qla2x00_start_timer(scsi_qla_host_t *, unsigned long); 211 211 extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *); 212 212 extern int qla24xx_disable_vp (scsi_qla_host_t *); 213 213 extern int qla24xx_enable_vp (scsi_qla_host_t *); ··· 753 753 /* IOCB related functions */ 754 754 extern int qla82xx_start_scsi(srb_t *); 755 755 extern void qla2x00_sp_free(void *); 756 - extern void qla2x00_sp_timeout(unsigned long); 756 + extern void qla2x00_sp_timeout(struct timer_list *); 757 757 extern void qla2x00_bsg_job_done(void *, int); 758 758 extern void qla2x00_bsg_sp_free(void *); 759 759 extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
+2 -2
drivers/scsi/qla2xxx/qla_init.c
··· 45 45 /* SRB Extensions ---------------------------------------------------------- */ 46 46 47 47 void 48 - qla2x00_sp_timeout(unsigned long __data) 48 + qla2x00_sp_timeout(struct timer_list *t) 49 49 { 50 - srb_t *sp = (srb_t *)__data; 50 + srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); 51 51 struct srb_iocb *iocb; 52 52 scsi_qla_host_t *vha = sp->vha; 53 53 struct req_que *req;
+1 -3
drivers/scsi/qla2xxx/qla_inline.h
··· 269 269 static inline void 270 270 qla2x00_init_timer(srb_t *sp, unsigned long tmo) 271 271 { 272 - init_timer(&sp->u.iocb_cmd.timer); 272 + timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0); 273 273 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; 274 - sp->u.iocb_cmd.timer.data = (unsigned long)sp; 275 - sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout; 276 274 add_timer(&sp->u.iocb_cmd.timer); 277 275 sp->free = qla2x00_sp_free; 278 276 if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
+1 -1
drivers/scsi/qla2xxx/qla_mid.c
··· 487 487 atomic_set(&vha->loop_state, LOOP_DOWN); 488 488 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 489 489 490 - qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); 490 + qla2x00_start_timer(vha, WATCH_INTERVAL); 491 491 492 492 vha->req = base_vha->req; 493 493 host->can_queue = base_vha->req->length + 128;
+5 -6
drivers/scsi/qla2xxx/qla_os.c
··· 330 330 */ 331 331 332 332 __inline__ void 333 - qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval) 333 + qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval) 334 334 { 335 - init_timer(&vha->timer); 335 + timer_setup(&vha->timer, qla2x00_timer, 0); 336 336 vha->timer.expires = jiffies + interval * HZ; 337 - vha->timer.data = (unsigned long)vha; 338 - vha->timer.function = (void (*)(unsigned long))func; 339 337 add_timer(&vha->timer); 340 338 vha->timer_active = 1; 341 339 } ··· 3245 3247 base_vha->host->irq = ha->pdev->irq; 3246 3248 3247 3249 /* Initialized the timer */ 3248 - qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL); 3250 + qla2x00_start_timer(base_vha, WATCH_INTERVAL); 3249 3251 ql_dbg(ql_dbg_init, base_vha, 0x00ef, 3250 3252 "Started qla2x00_timer with " 3251 3253 "interval=%d.\n", WATCH_INTERVAL); ··· 5994 5996 * Context: Interrupt 5995 5997 ***************************************************************************/ 5996 5998 void 5997 - qla2x00_timer(scsi_qla_host_t *vha) 5999 + qla2x00_timer(struct timer_list *t) 5998 6000 { 6001 + scsi_qla_host_t *vha = from_timer(vha, t, timer); 5999 6002 unsigned long cpu_flags = 0; 6000 6003 int start_dpc = 0; 6001 6004 int index;
+6 -6
drivers/scsi/qla4xxx/ql4_os.c
··· 3955 3955 /* 3956 3956 * Timer routines 3957 3957 */ 3958 + static void qla4xxx_timer(struct timer_list *t); 3958 3959 3959 - static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func, 3960 + static void qla4xxx_start_timer(struct scsi_qla_host *ha, 3960 3961 unsigned long interval) 3961 3962 { 3962 3963 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n", 3963 3964 __func__, ha->host->host_no)); 3964 - init_timer(&ha->timer); 3965 + timer_setup(&ha->timer, qla4xxx_timer, 0); 3965 3966 ha->timer.expires = jiffies + interval * HZ; 3966 - ha->timer.data = (unsigned long)ha; 3967 - ha->timer.function = (void (*)(unsigned long))func; 3968 3967 add_timer(&ha->timer); 3969 3968 ha->timer_active = 1; 3970 3969 } ··· 4507 4508 * qla4xxx_timer - checks every second for work to do. 4508 4509 * @ha: Pointer to host adapter structure. 4509 4510 **/ 4510 - static void qla4xxx_timer(struct scsi_qla_host *ha) 4511 + static void qla4xxx_timer(struct timer_list *t) 4511 4512 { 4513 + struct scsi_qla_host *ha = from_timer(ha, t, timer); 4512 4514 int start_dpc = 0; 4513 4515 uint16_t w; 4514 4516 ··· 8805 8805 ha->isp_ops->enable_intrs(ha); 8806 8806 8807 8807 /* Start timer thread. */ 8808 - qla4xxx_start_timer(ha, qla4xxx_timer, 1); 8808 + qla4xxx_start_timer(ha, 1); 8809 8809 8810 8810 set_bit(AF_INIT_DONE, &ha->flags); 8811 8811
+4 -5
drivers/scsi/smartpqi/smartpqi_init.c
··· 2860 2860 2861 2861 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) 2862 2862 2863 - static void pqi_heartbeat_timer_handler(unsigned long data) 2863 + static void pqi_heartbeat_timer_handler(struct timer_list *t) 2864 2864 { 2865 2865 int num_interrupts; 2866 2866 u32 heartbeat_count; 2867 - struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data; 2867 + struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, 2868 + heartbeat_timer); 2868 2869 2869 2870 pqi_check_ctrl_health(ctrl_info); 2870 2871 if (pqi_ctrl_offline(ctrl_info)) ··· 2903 2902 2904 2903 ctrl_info->heartbeat_timer.expires = 2905 2904 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 2906 - ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info; 2907 - ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler; 2908 2905 add_timer(&ctrl_info->heartbeat_timer); 2909 2906 } 2910 2907 ··· 6464 6465 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 6465 6466 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 6466 6467 6467 - init_timer(&ctrl_info->heartbeat_timer); 6468 + timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); 6468 6469 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 6469 6470 6470 6471 sema_init(&ctrl_info->sync_request_sem,
+1 -1
drivers/staging/speakup/main.c
··· 1165 1165 1166 1166 static void read_all_doc(struct vc_data *vc); 1167 1167 static void cursor_done(u_long data); 1168 - static DEFINE_TIMER(cursor_timer, cursor_done, 0, 0); 1168 + static DEFINE_TIMER(cursor_timer, cursor_done); 1169 1169 1170 1170 static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag) 1171 1171 {
+1 -1
drivers/staging/speakup/synth.c
··· 158 158 wake_up_interruptible_all(&speakup_event); 159 159 } 160 160 161 - static DEFINE_TIMER(thread_timer, thread_wake_up, 0, 0); 161 + static DEFINE_TIMER(thread_timer, thread_wake_up); 162 162 163 163 void synth_start(void) 164 164 {
+1 -3
drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
··· 267 267 last_scanned_shadow[i].time_scan = jiffies; 268 268 } 269 269 270 - static void remove_network_from_shadow(unsigned long arg) 270 + static void remove_network_from_shadow(unsigned long unused) 271 271 { 272 272 unsigned long now = jiffies; 273 273 int i, j; ··· 288 288 } 289 289 290 290 if (last_scanned_cnt != 0) { 291 - hAgingTimer.data = arg; 292 291 mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME)); 293 292 } 294 293 } ··· 304 305 int i; 305 306 306 307 if (last_scanned_cnt == 0) { 307 - hAgingTimer.data = (unsigned long)user_void; 308 308 mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME)); 309 309 state = -1; 310 310 } else {
+2
drivers/target/iscsi/iscsi_target.c
··· 372 372 init_completion(&np->np_restart_comp); 373 373 INIT_LIST_HEAD(&np->np_list); 374 374 375 + timer_setup(&np->np_login_timer, iscsi_handle_login_thread_timeout, 0); 376 + 375 377 ret = iscsi_target_setup_login_socket(np, sockaddr); 376 378 if (ret != 0) { 377 379 kfree(np);
+4 -8
drivers/target/iscsi/iscsi_target_erl0.c
··· 749 749 } 750 750 } 751 751 752 - static void iscsit_handle_time2retain_timeout(unsigned long data) 752 + void iscsit_handle_time2retain_timeout(struct timer_list *t) 753 753 { 754 - struct iscsi_session *sess = (struct iscsi_session *) data; 754 + struct iscsi_session *sess = from_timer(sess, t, time2retain_timer); 755 755 struct iscsi_portal_group *tpg = sess->tpg; 756 756 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 757 757 ··· 809 809 pr_debug("Starting Time2Retain timer for %u seconds on" 810 810 " SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid); 811 811 812 - init_timer(&sess->time2retain_timer); 813 - sess->time2retain_timer.expires = 814 - (get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ); 815 - sess->time2retain_timer.data = (unsigned long)sess; 816 - sess->time2retain_timer.function = iscsit_handle_time2retain_timeout; 817 812 sess->time2retain_timer_flags &= ~ISCSI_TF_STOP; 818 813 sess->time2retain_timer_flags |= ISCSI_TF_RUNNING; 819 - add_timer(&sess->time2retain_timer); 814 + mod_timer(&sess->time2retain_timer, 815 + jiffies + sess->sess_ops->DefaultTime2Retain * HZ); 820 816 } 821 817 822 818 /*
+1
drivers/target/iscsi/iscsi_target_erl0.h
··· 12 12 extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *); 13 13 extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8); 14 14 extern void iscsit_start_time2retain_handler(struct iscsi_session *); 15 + extern void iscsit_handle_time2retain_timeout(struct timer_list *t); 15 16 extern int iscsit_stop_time2retain_timer(struct iscsi_session *); 16 17 extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); 17 18 extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
+3 -7
drivers/target/iscsi/iscsi_target_erl1.c
··· 1148 1148 /* 1149 1149 * NOTE: Called from interrupt (timer) context. 1150 1150 */ 1151 - static void iscsit_handle_dataout_timeout(unsigned long data) 1151 + void iscsit_handle_dataout_timeout(struct timer_list *t) 1152 1152 { 1153 1153 u32 pdu_length = 0, pdu_offset = 0; 1154 1154 u32 r2t_length = 0, r2t_offset = 0; 1155 - struct iscsi_cmd *cmd = (struct iscsi_cmd *) data; 1155 + struct iscsi_cmd *cmd = from_timer(cmd, t, dataout_timer); 1156 1156 struct iscsi_conn *conn = cmd->conn; 1157 1157 struct iscsi_session *sess = NULL; 1158 1158 struct iscsi_node_attrib *na; ··· 1264 1264 pr_debug("Starting DataOUT timer for ITT: 0x%08x on" 1265 1265 " CID: %hu.\n", cmd->init_task_tag, conn->cid); 1266 1266 1267 - init_timer(&cmd->dataout_timer); 1268 - cmd->dataout_timer.expires = (get_jiffies_64() + na->dataout_timeout * HZ); 1269 - cmd->dataout_timer.data = (unsigned long)cmd; 1270 - cmd->dataout_timer.function = iscsit_handle_dataout_timeout; 1271 1267 cmd->dataout_timer_flags &= ~ISCSI_TF_STOP; 1272 1268 cmd->dataout_timer_flags |= ISCSI_TF_RUNNING; 1273 - add_timer(&cmd->dataout_timer); 1269 + mod_timer(&cmd->dataout_timer, jiffies + na->dataout_timeout * HZ); 1274 1270 } 1275 1271 1276 1272 void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
+1
drivers/target/iscsi/iscsi_target_erl1.h
··· 30 30 extern int iscsit_execute_cmd(struct iscsi_cmd *, int); 31 31 extern int iscsit_handle_ooo_cmdsn(struct iscsi_session *, struct iscsi_cmd *, u32); 32 32 extern void iscsit_remove_ooo_cmdsn(struct iscsi_session *, struct iscsi_ooo_cmdsn *); 33 + extern void iscsit_handle_dataout_timeout(struct timer_list *t); 33 34 extern void iscsit_mod_dataout_timer(struct iscsi_cmd *); 34 35 extern void iscsit_start_dataout_timer(struct iscsi_cmd *, struct iscsi_conn *); 35 36 extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
+10 -7
drivers/target/iscsi/iscsi_target_login.c
··· 333 333 spin_lock_init(&sess->session_usage_lock); 334 334 spin_lock_init(&sess->ttt_lock); 335 335 336 + timer_setup(&sess->time2retain_timer, 337 + iscsit_handle_time2retain_timeout, 0); 338 + 336 339 idr_preload(GFP_KERNEL); 337 340 spin_lock_bh(&sess_idr_lock); 338 341 ret = idr_alloc(&sess_idr, NULL, 0, 0, GFP_NOWAIT); ··· 842 839 iscsit_dec_conn_usage_count(conn); 843 840 } 844 841 845 - static void iscsi_handle_login_thread_timeout(unsigned long data) 842 + void iscsi_handle_login_thread_timeout(struct timer_list *t) 846 843 { 847 - struct iscsi_np *np = (struct iscsi_np *) data; 844 + struct iscsi_np *np = from_timer(np, t, np_login_timer); 848 845 849 846 spin_lock_bh(&np->np_thread_lock); 850 847 pr_err("iSCSI Login timeout on Network Portal %pISpc\n", ··· 869 866 * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout 870 867 */ 871 868 spin_lock_bh(&np->np_thread_lock); 872 - init_timer(&np->np_login_timer); 873 - np->np_login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ); 874 - np->np_login_timer.data = (unsigned long)np; 875 - np->np_login_timer.function = iscsi_handle_login_thread_timeout; 876 869 np->np_login_timer_flags &= ~ISCSI_TF_STOP; 877 870 np->np_login_timer_flags |= ISCSI_TF_RUNNING; 878 - add_timer(&np->np_login_timer); 871 + mod_timer(&np->np_login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ); 879 872 880 873 pr_debug("Added timeout timer to iSCSI login request for" 881 874 " %u seconds.\n", TA_LOGIN_TIMEOUT); ··· 1264 1265 } 1265 1266 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 1266 1267 conn->conn_state = TARG_CONN_STATE_FREE; 1268 + 1269 + timer_setup(&conn->nopin_response_timer, 1270 + iscsit_handle_nopin_response_timeout, 0); 1271 + timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0); 1267 1272 1268 1273 if (iscsit_conn_set_transport(conn, np->np_transport) < 0) { 1269 1274 kfree(conn);
+1
drivers/target/iscsi/iscsi_target_login.h
··· 25 25 extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, 26 26 bool, bool); 27 27 extern int iscsi_target_login_thread(void *); 28 + extern void iscsi_handle_login_thread_timeout(struct timer_list *t); 28 29 29 30 #endif /*** ISCSI_TARGET_LOGIN_H ***/
+15 -10
drivers/target/iscsi/iscsi_target_nego.c
··· 559 559 iscsi_target_login_sess_out(conn, np, zero_tsih, true); 560 560 } 561 561 562 - static void iscsi_target_login_timeout(unsigned long data) 562 + struct conn_timeout { 563 + struct timer_list timer; 564 + struct iscsi_conn *conn; 565 + }; 566 + 567 + static void iscsi_target_login_timeout(struct timer_list *t) 563 568 { 564 - struct iscsi_conn *conn = (struct iscsi_conn *)data; 569 + struct conn_timeout *timeout = from_timer(timeout, t, timer); 570 + struct iscsi_conn *conn = timeout->conn; 565 571 566 572 pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n"); 567 573 ··· 586 580 struct iscsi_np *np = login->np; 587 581 struct iscsi_portal_group *tpg = conn->tpg; 588 582 struct iscsi_tpg_np *tpg_np = conn->tpg_np; 589 - struct timer_list login_timer; 583 + struct conn_timeout timeout; 590 584 int rc, zero_tsih = login->zero_tsih; 591 585 bool state; 592 586 ··· 624 618 conn->login_kworker = current; 625 619 allow_signal(SIGINT); 626 620 627 - init_timer(&login_timer); 628 - login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ); 629 - login_timer.data = (unsigned long)conn; 630 - login_timer.function = iscsi_target_login_timeout; 631 - add_timer(&login_timer); 632 - pr_debug("Starting login_timer for %s/%d\n", current->comm, current->pid); 621 + timeout.conn = conn; 622 + timer_setup_on_stack(&timeout.timer, iscsi_target_login_timeout, 0); 623 + mod_timer(&timeout.timer, jiffies + TA_LOGIN_TIMEOUT * HZ); 624 + pr_debug("Starting login timer for %s/%d\n", current->comm, current->pid); 633 625 634 626 rc = conn->conn_transport->iscsit_get_login_rx(conn, login); 635 - del_timer_sync(&login_timer); 627 + del_timer_sync(&timeout.timer); 628 + destroy_timer_on_stack(&timeout.timer); 636 629 flush_signals(current); 637 630 conn->login_kworker = NULL; 638 631
+9 -20
drivers/target/iscsi/iscsi_target_util.c
··· 176 176 spin_lock_init(&cmd->istate_lock); 177 177 spin_lock_init(&cmd->error_lock); 178 178 spin_lock_init(&cmd->r2t_lock); 179 + timer_setup(&cmd->dataout_timer, iscsit_handle_dataout_timeout, 0); 179 180 180 181 return cmd; 181 182 } ··· 881 880 return 0; 882 881 } 883 882 884 - static void iscsit_handle_nopin_response_timeout(unsigned long data) 883 + void iscsit_handle_nopin_response_timeout(struct timer_list *t) 885 884 { 886 - struct iscsi_conn *conn = (struct iscsi_conn *) data; 885 + struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer); 887 886 888 887 iscsit_inc_conn_usage_count(conn); 889 888 ··· 950 949 return; 951 950 } 952 951 953 - init_timer(&conn->nopin_response_timer); 954 - conn->nopin_response_timer.expires = 955 - (get_jiffies_64() + na->nopin_response_timeout * HZ); 956 - conn->nopin_response_timer.data = (unsigned long)conn; 957 - conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout; 958 952 conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP; 959 953 conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING; 960 - add_timer(&conn->nopin_response_timer); 954 + mod_timer(&conn->nopin_response_timer, 955 + jiffies + na->nopin_response_timeout * HZ); 961 956 962 957 pr_debug("Started NOPIN Response Timer on CID: %d to %u" 963 958 " seconds\n", conn->cid, na->nopin_response_timeout); ··· 977 980 spin_unlock_bh(&conn->nopin_timer_lock); 978 981 } 979 982 980 - static void iscsit_handle_nopin_timeout(unsigned long data) 983 + void iscsit_handle_nopin_timeout(struct timer_list *t) 981 984 { 982 - struct iscsi_conn *conn = (struct iscsi_conn *) data; 985 + struct iscsi_conn *conn = from_timer(conn, t, nopin_timer); 983 986 984 987 iscsit_inc_conn_usage_count(conn); 985 988 ··· 1012 1015 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) 1013 1016 return; 1014 1017 1015 - init_timer(&conn->nopin_timer); 1016 - conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); 1017 - conn->nopin_timer.data = (unsigned long)conn; 1018 - conn->nopin_timer.function = iscsit_handle_nopin_timeout; 1019 1018 conn->nopin_timer_flags &= ~ISCSI_TF_STOP; 1020 1019 conn->nopin_timer_flags |= ISCSI_TF_RUNNING; 1021 - add_timer(&conn->nopin_timer); 1020 + mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ); 1022 1021 1023 1022 pr_debug("Started NOPIN Timer on CID: %d at %u second" 1024 1023 " interval\n", conn->cid, na->nopin_timeout); ··· 1036 1043 return; 1037 1044 } 1038 1045 1039 - init_timer(&conn->nopin_timer); 1040 - conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); 1041 - conn->nopin_timer.data = (unsigned long)conn; 1042 - conn->nopin_timer.function = iscsit_handle_nopin_timeout; 1043 1046 conn->nopin_timer_flags &= ~ISCSI_TF_STOP; 1044 1047 conn->nopin_timer_flags |= ISCSI_TF_RUNNING; 1045 - add_timer(&conn->nopin_timer); 1048 + mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ); 1046 1049 1047 1050 pr_debug("Started NOPIN Timer on CID: %d at %u second" 1048 1051 " interval\n", conn->cid, na->nopin_timeout);
+2
drivers/target/iscsi/iscsi_target_util.h
··· 48 48 extern void iscsit_check_conn_usage_count(struct iscsi_conn *); 49 49 extern void iscsit_dec_conn_usage_count(struct iscsi_conn *); 50 50 extern void iscsit_inc_conn_usage_count(struct iscsi_conn *); 51 + extern void iscsit_handle_nopin_response_timeout(struct timer_list *t); 51 52 extern void iscsit_mod_nopin_response_timer(struct iscsi_conn *); 52 53 extern void iscsit_start_nopin_response_timer(struct iscsi_conn *); 53 54 extern void iscsit_stop_nopin_response_timer(struct iscsi_conn *); 55 + extern void iscsit_handle_nopin_timeout(struct timer_list *t); 54 56 extern void __iscsit_start_nopin_timer(struct iscsi_conn *); 55 57 extern void iscsit_start_nopin_timer(struct iscsi_conn *); 56 58 extern void iscsit_stop_nopin_timer(struct iscsi_conn *);
+1 -1
drivers/tty/cyclades.c
··· 283 283 /* The Cyclades-Z polling cycle is defined by this variable */ 284 284 static long cyz_polling_cycle = CZ_DEF_POLL; 285 285 286 - static DEFINE_TIMER(cyz_timerlist, cyz_poll, 0, 0); 286 + static DEFINE_TIMER(cyz_timerlist, cyz_poll); 287 287 288 288 #else /* CONFIG_CYZ_INTR */ 289 289 static void cyz_rx_restart(unsigned long);
+1 -1
drivers/tty/isicom.c
··· 177 177 static void isicom_tx(unsigned long _data); 178 178 static void isicom_start(struct tty_struct *tty); 179 179 180 - static DEFINE_TIMER(tx, isicom_tx, 0, 0); 180 + static DEFINE_TIMER(tx, isicom_tx); 181 181 182 182 /* baud index mappings from linux defns to isi */ 183 183
+1 -1
drivers/tty/moxa.c
··· 428 428 }; 429 429 430 430 static struct tty_driver *moxaDriver; 431 - static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0); 431 + static DEFINE_TIMER(moxaTimer, moxa_poll); 432 432 433 433 /* 434 434 * HW init
+1 -1
drivers/tty/rocket.c
··· 111 111 static unsigned int xmit_flags[NUM_BOARDS]; /* Bit significant, indicates port had data to transmit. */ 112 112 /* eg. Bit 0 indicates port 0 has xmit data, ... */ 113 113 static atomic_t rp_num_ports_open; /* Number of serial ports open */ 114 - static DEFINE_TIMER(rocket_timer, rp_do_poll, 0, 0); 114 + static DEFINE_TIMER(rocket_timer, rp_do_poll); 115 115 116 116 static unsigned long board1; /* ISA addresses, retrieved from rocketport.conf */ 117 117 static unsigned long board2;
+1 -1
drivers/tty/vt/keyboard.c
··· 250 250 input_handler_for_each_handle(&kbd_handler, &zero, kd_sound_helper); 251 251 } 252 252 253 - static DEFINE_TIMER(kd_mksound_timer, kd_nosound, 0, 0); 253 + static DEFINE_TIMER(kd_mksound_timer, kd_nosound); 254 254 255 255 void kd_mksound(unsigned int hz, unsigned int ticks) 256 256 {
+1 -1
drivers/tty/vt/vt.c
··· 228 228 */ 229 229 int (*console_blank_hook)(int); 230 230 231 - static DEFINE_TIMER(console_timer, blank_screen_t, 0, 0); 231 + static DEFINE_TIMER(console_timer, blank_screen_t); 232 232 static int blank_state; 233 233 static int blank_timer_expired; 234 234 enum {
+16 -8
drivers/usb/misc/usbtest.c
··· 576 576 return sg; 577 577 } 578 578 579 - static void sg_timeout(unsigned long _req) 580 - { 581 - struct usb_sg_request *req = (struct usb_sg_request *) _req; 579 + struct sg_timeout { 580 + struct timer_list timer; 581 + struct usb_sg_request *req; 582 + }; 582 583 583 - usb_sg_cancel(req); 584 + static void sg_timeout(struct timer_list *t) 585 + { 586 + struct sg_timeout *timeout = from_timer(timeout, t, timer); 587 + 588 + usb_sg_cancel(timeout->req); 584 589 } 585 590 586 591 static int perform_sglist( ··· 599 594 { 600 595 struct usb_device *udev = testdev_to_usbdev(tdev); 601 596 int retval = 0; 602 - struct timer_list sg_timer; 597 + struct sg_timeout timeout = { 598 + .req = req, 599 + }; 603 600 604 - setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req); 601 + timer_setup_on_stack(&timeout.timer, sg_timeout, 0); 605 602 606 603 while (retval == 0 && iterations-- > 0) { 607 604 retval = usb_sg_init(req, udev, pipe, ··· 614 607 615 608 if (retval) 616 609 break; 617 - mod_timer(&sg_timer, jiffies + 610 + mod_timer(&timeout.timer, jiffies + 618 611 msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); 619 612 usb_sg_wait(req); 620 - if (!del_timer_sync(&sg_timer)) 613 + if (!del_timer_sync(&timeout.timer)) 621 614 retval = -ETIMEDOUT; 622 615 else 623 616 retval = req->status; 617 + destroy_timer_on_stack(&timeout.timer); 624 618 625 619 /* FIXME check resulting data pattern */ 626 620
+2 -2
drivers/watchdog/alim7101_wdt.c
··· 71 71 "Use the gpio watchdog (required by old cobalt boards)."); 72 72 73 73 static void wdt_timer_ping(unsigned long); 74 - static DEFINE_TIMER(timer, wdt_timer_ping, 0, 1); 74 + static DEFINE_TIMER(timer, wdt_timer_ping); 75 75 static unsigned long next_heartbeat; 76 76 static unsigned long wdt_is_open; 77 77 static char wdt_expect_close; ··· 87 87 * Whack the dog 88 88 */ 89 89 90 - static void wdt_timer_ping(unsigned long data) 90 + static void wdt_timer_ping(unsigned long unused) 91 91 { 92 92 /* If we got a heartbeat pulse within the WDT_US_INTERVAL 93 93 * we agree to ping the WDT
+4 -4
drivers/watchdog/cpwd.c
··· 230 230 * interrupts within the PLD so me must continually 231 231 * reset the timers ad infinitum. 232 232 */ 233 - static void cpwd_brokentimer(unsigned long data) 233 + static void cpwd_brokentimer(struct timer_list *unused) 234 234 { 235 - struct cpwd *p = (struct cpwd *) data; 235 + struct cpwd *p = cpwd_device; 236 236 int id, tripped = 0; 237 237 238 238 /* kill a running timer instance, in case we ··· 275 275 276 276 if (p->broken) { 277 277 p->devs[index].runstatus |= WD_STAT_BSTOP; 278 - cpwd_brokentimer((unsigned long) p); 278 + cpwd_brokentimer(NULL); 279 279 } 280 280 } 281 281 } ··· 608 608 } 609 609 610 610 if (p->broken) { 611 - setup_timer(&cpwd_timer, cpwd_brokentimer, (unsigned long)p); 611 + timer_setup(&cpwd_timer, cpwd_brokentimer, 0); 612 612 cpwd_timer.expires = WD_BTIMEOUT; 613 613 614 614 pr_info("PLD defect workaround enabled for model %s\n",
+7 -6
drivers/watchdog/lpc18xx_wdt.c
··· 78 78 return 0; 79 79 } 80 80 81 - static void lpc18xx_wdt_timer_feed(unsigned long data) 81 + static void lpc18xx_wdt_timer_feed(struct timer_list *t) 82 82 { 83 - struct watchdog_device *wdt_dev = (struct watchdog_device *)data; 84 - struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev); 83 + struct lpc18xx_wdt_dev *lpc18xx_wdt = from_timer(lpc18xx_wdt, t, timer); 84 + struct watchdog_device *wdt_dev = &lpc18xx_wdt->wdt_dev; 85 85 86 86 lpc18xx_wdt_feed(wdt_dev); 87 87 ··· 96 96 */ 97 97 static int lpc18xx_wdt_stop(struct watchdog_device *wdt_dev) 98 98 { 99 - lpc18xx_wdt_timer_feed((unsigned long)wdt_dev); 99 + struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev); 100 + 101 + lpc18xx_wdt_timer_feed(&lpc18xx_wdt->timer); 100 102 101 103 return 0; 102 104 } ··· 269 267 270 268 __lpc18xx_wdt_set_timeout(lpc18xx_wdt); 271 269 272 - setup_timer(&lpc18xx_wdt->timer, lpc18xx_wdt_timer_feed, 273 - (unsigned long)&lpc18xx_wdt->wdt_dev); 270 + timer_setup(&lpc18xx_wdt->timer, lpc18xx_wdt_timer_feed, 0); 274 271 275 272 watchdog_set_nowayout(&lpc18xx_wdt->wdt_dev, nowayout); 276 273 watchdog_set_restart_priority(&lpc18xx_wdt->wdt_dev, 128);
+1 -1
drivers/watchdog/machzwd.c
··· 127 127 static unsigned long zf_is_open; 128 128 static char zf_expect_close; 129 129 static DEFINE_SPINLOCK(zf_port_lock); 130 - static DEFINE_TIMER(zf_timer, zf_ping, 0, 0); 130 + static DEFINE_TIMER(zf_timer, zf_ping); 131 131 static unsigned long next_heartbeat; 132 132 133 133
+1 -1
drivers/watchdog/mixcomwd.c
··· 105 105 106 106 static int watchdog_port; 107 107 static int mixcomwd_timer_alive; 108 - static DEFINE_TIMER(mixcomwd_timer, mixcomwd_timerfun, 0, 0); 108 + static DEFINE_TIMER(mixcomwd_timer, mixcomwd_timerfun); 109 109 static char expect_close; 110 110 111 111 static bool nowayout = WATCHDOG_NOWAYOUT;
+1 -1
drivers/watchdog/sbc60xxwdt.c
··· 113 113 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 114 114 115 115 static void wdt_timer_ping(unsigned long); 116 - static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0); 116 + static DEFINE_TIMER(timer, wdt_timer_ping); 117 117 static unsigned long next_heartbeat; 118 118 static unsigned long wdt_is_open; 119 119 static char wdt_expect_close;
+1 -1
drivers/watchdog/sc520_wdt.c
··· 124 124 static __u16 __iomem *wdtmrctl; 125 125 126 126 static void wdt_timer_ping(unsigned long); 127 - static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0); 127 + static DEFINE_TIMER(timer, wdt_timer_ping); 128 128 static unsigned long next_heartbeat; 129 129 static unsigned long wdt_is_open; 130 130 static char wdt_expect_close;
+1 -1
drivers/watchdog/via_wdt.c
··· 68 68 static void __iomem *wdt_mem; 69 69 static unsigned int mmio; 70 70 static void wdt_timer_tick(unsigned long data); 71 - static DEFINE_TIMER(timer, wdt_timer_tick, 0, 0); 71 + static DEFINE_TIMER(timer, wdt_timer_tick); 72 72 /* The timer that pings the watchdog */ 73 73 static unsigned long next_heartbeat; /* the next_heartbeat for the timer */ 74 74
+1 -1
drivers/watchdog/w83877f_wdt.c
··· 98 98 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 99 99 100 100 static void wdt_timer_ping(unsigned long); 101 - static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0); 101 + static DEFINE_TIMER(timer, wdt_timer_ping); 102 102 static unsigned long next_heartbeat; 103 103 static unsigned long wdt_is_open; 104 104 static char wdt_expect_close;
+1 -1
drivers/xen/grant-table.c
··· 305 305 }; 306 306 static LIST_HEAD(deferred_list); 307 307 static void gnttab_handle_deferred(unsigned long); 308 - static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0); 308 + static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred); 309 309 310 310 static void gnttab_handle_deferred(unsigned long unused) 311 311 {
+1 -3
fs/ncpfs/inode.c
··· 618 618 server->tx.creq = NULL; 619 619 server->rcv.creq = NULL; 620 620 621 - init_timer(&server->timeout_tm); 621 + timer_setup(&server->timeout_tm, ncpdgram_timeout_call, 0); 622 622 #undef NCP_PACKET_SIZE 623 623 #define NCP_PACKET_SIZE 131072 624 624 error = -ENOMEM; ··· 650 650 } else { 651 651 INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc); 652 652 INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc); 653 - server->timeout_tm.data = (unsigned long)server; 654 - server->timeout_tm.function = ncpdgram_timeout_call; 655 653 } 656 654 release_sock(sock->sk); 657 655
+1 -1
fs/ncpfs/ncp_fs_sb.h
··· 150 150 extern void ncp_tcp_tx_proc(struct work_struct *work); 151 151 extern void ncpdgram_rcv_proc(struct work_struct *work); 152 152 extern void ncpdgram_timeout_proc(struct work_struct *work); 153 - extern void ncpdgram_timeout_call(unsigned long server); 153 + extern void ncpdgram_timeout_call(struct timer_list *t); 154 154 extern void ncp_tcp_data_ready(struct sock* sk); 155 155 extern void ncp_tcp_write_space(struct sock* sk); 156 156 extern void ncp_tcp_error_report(struct sock* sk);
+3 -3
fs/ncpfs/sock.c
··· 117 117 schedule_work(&server->tx.tq); 118 118 } 119 119 120 - void ncpdgram_timeout_call(unsigned long v) 120 + void ncpdgram_timeout_call(struct timer_list *t) 121 121 { 122 - struct ncp_server *server = (void*)v; 123 - 122 + struct ncp_server *server = from_timer(server, t, timeout_tm); 123 + 124 124 schedule_work(&server->timeout_tq); 125 125 } 126 126
+2 -5
fs/pstore/platform.c
··· 62 62 static int pstore_new_entry; 63 63 64 64 static void pstore_timefunc(unsigned long); 65 - static DEFINE_TIMER(pstore_timer, pstore_timefunc, 0, 0); 65 + static DEFINE_TIMER(pstore_timer, pstore_timefunc); 66 66 67 67 static void pstore_dowork(struct work_struct *); 68 68 static DECLARE_WORK(pstore_work, pstore_dowork); ··· 482 482 record->psi = psinfo; 483 483 484 484 /* Report zeroed timestamp if called before timekeeping has resumed. */ 485 - if (__getnstimeofday(&record->time)) { 486 - record->time.tv_sec = 0; 487 - record->time.tv_nsec = 0; 488 - } 485 + record->time = ns_to_timespec(ktime_get_real_fast_ns()); 489 486 } 490 487 491 488 /*
+1 -1
include/linux/ide.h
··· 1212 1212 1213 1213 extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); 1214 1214 1215 - extern void ide_timer_expiry(unsigned long); 1215 + extern void ide_timer_expiry(struct timer_list *t); 1216 1216 extern irqreturn_t ide_intr(int irq, void *dev_id); 1217 1217 extern void do_ide_request(struct request_queue *); 1218 1218 extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
+5 -5
include/linux/kthread.h
··· 76 76 */ 77 77 struct kthread_work; 78 78 typedef void (*kthread_work_func_t)(struct kthread_work *work); 79 - void kthread_delayed_work_timer_fn(unsigned long __data); 79 + void kthread_delayed_work_timer_fn(struct timer_list *t); 80 80 81 81 enum { 82 82 KTW_FREEZABLE = 1 << 0, /* freeze during suspend */ ··· 117 117 118 118 #define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \ 119 119 .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \ 120 - .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn, \ 121 - 0, (unsigned long)&(dwork), \ 120 + .timer = __TIMER_INITIALIZER((TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn,\ 121 + (TIMER_DATA_TYPE)&(dwork.timer), \ 122 122 TIMER_IRQSAFE), \ 123 123 } 124 124 ··· 165 165 do { \ 166 166 kthread_init_work(&(dwork)->work, (fn)); \ 167 167 __setup_timer(&(dwork)->timer, \ 168 - kthread_delayed_work_timer_fn, \ 169 - (unsigned long)(dwork), \ 168 + (TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn,\ 169 + (TIMER_DATA_TYPE)&(dwork)->timer, \ 170 170 TIMER_IRQSAFE); \ 171 171 } while (0) 172 172
+1
include/linux/ktime.h
··· 270 270 } 271 271 272 272 # include <linux/timekeeping.h> 273 + # include <linux/timekeeping32.h> 273 274 274 275 #endif
+1
include/linux/parport.h
··· 225 225 struct pardevice *waittail; 226 226 227 227 struct list_head list; 228 + struct timer_list timer; 228 229 unsigned int flags; 229 230 230 231 void *sysctl_table;
+42 -1
include/linux/rtc.h
··· 136 136 /* Some hardware can't support UIE mode */ 137 137 int uie_unsupported; 138 138 139 + /* Number of nsec it takes to set the RTC clock. This influences when 140 + * the set ops are called. An offset: 141 + * - of 0.5 s will call RTC set for wall clock time 10.0 s at 9.5 s 142 + * - of 1.5 s will call RTC set for wall clock time 10.0 s at 8.5 s 143 + * - of -0.5 s will call RTC set for wall clock time 10.0 s at 10.5 s 144 + */ 145 + long set_offset_nsec; 146 + 139 147 bool registered; 140 148 141 149 struct nvmem_config *nvmem_config; ··· 181 173 182 174 extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); 183 175 extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); 184 - extern int rtc_set_ntp_time(struct timespec64 now); 176 + extern int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec); 185 177 int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); 186 178 extern int rtc_read_alarm(struct rtc_device *rtc, 187 179 struct rtc_wkalrm *alrm); ··· 228 220 static inline bool is_leap_year(unsigned int year) 229 221 { 230 222 return (!(year % 4) && (year % 100)) || !(year % 400); 223 + } 224 + 225 + /* Determine if we can call to driver to set the time. Drivers can only be 226 + * called to set a second aligned time value, and the field set_offset_nsec 227 + * specifies how far away from the second aligned time to call the driver. 228 + * 229 + * This also computes 'to_set' which is the time we are trying to set, and has 230 + * a zero in tv_nsecs, such that: 231 + * to_set - set_delay_nsec == now +/- FUZZ 232 + * 233 + */ 234 + static inline bool rtc_tv_nsec_ok(s64 set_offset_nsec, 235 + struct timespec64 *to_set, 236 + const struct timespec64 *now) 237 + { 238 + /* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */ 239 + const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5; 240 + struct timespec64 delay = {.tv_sec = 0, 241 + .tv_nsec = set_offset_nsec}; 242 + 243 + *to_set = timespec64_add(*now, delay); 244 + 245 + if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) { 246 + to_set->tv_nsec = 0; 247 + return true; 248 + } 249 + 250 + if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) { 251 + to_set->tv_sec++; 252 + to_set->tv_nsec = 0; 253 + return true; 254 + } 255 + return false; 231 256 } 232 257 233 258 #define rtc_register_device(device) \
+1 -206
include/linux/time.h
··· 18 18 int put_itimerspec64(const struct itimerspec64 *it, 19 19 struct itimerspec __user *uit); 20 20 21 - #define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) 22 - 23 - static inline int timespec_equal(const struct timespec *a, 24 - const struct timespec *b) 25 - { 26 - return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); 27 - } 28 - 29 - /* 30 - * lhs < rhs: return <0 31 - * lhs == rhs: return 0 32 - * lhs > rhs: return >0 33 - */ 34 - static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs) 35 - { 36 - if (lhs->tv_sec < rhs->tv_sec) 37 - return -1; 38 - if (lhs->tv_sec > rhs->tv_sec) 39 - return 1; 40 - return lhs->tv_nsec - rhs->tv_nsec; 41 - } 42 - 43 - static inline int timeval_compare(const struct timeval *lhs, const struct timeval *rhs) 44 - { 45 - if (lhs->tv_sec < rhs->tv_sec) 46 - return -1; 47 - if (lhs->tv_sec > rhs->tv_sec) 48 - return 1; 49 - return lhs->tv_usec - rhs->tv_usec; 50 - } 51 - 52 21 extern time64_t mktime64(const unsigned int year, const unsigned int mon, 53 22 const unsigned int day, const unsigned int hour, 54 23 const unsigned int min, const unsigned int sec); 55 - 56 - /** 57 - * Deprecated. Use mktime64(). 58 - */ 59 - static inline unsigned long mktime(const unsigned int year, 60 - const unsigned int mon, const unsigned int day, 61 - const unsigned int hour, const unsigned int min, 62 - const unsigned int sec) 63 - { 64 - return mktime64(year, mon, day, hour, min, sec); 65 - } 66 - 67 - extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); 68 - 69 - /* 70 - * timespec_add_safe assumes both values are positive and checks 71 - * for overflow. It will return TIME_T_MAX if the reutrn would be 72 - * smaller then either of the arguments. 73 - */ 74 - extern struct timespec timespec_add_safe(const struct timespec lhs, 75 - const struct timespec rhs); 76 - 77 - 78 - static inline struct timespec timespec_add(struct timespec lhs, 79 - struct timespec rhs) 80 - { 81 - struct timespec ts_delta; 82 - set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec, 83 - lhs.tv_nsec + rhs.tv_nsec); 84 - return ts_delta; 85 - } 86 - 87 - /* 88 - * sub = lhs - rhs, in normalized form 89 - */ 90 - static inline struct timespec timespec_sub(struct timespec lhs, 91 - struct timespec rhs) 92 - { 93 - struct timespec ts_delta; 94 - set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec, 95 - lhs.tv_nsec - rhs.tv_nsec); 96 - return ts_delta; 97 - } 98 - 99 - /* 100 - * Returns true if the timespec is norm, false if denorm: 101 - */ 102 - static inline bool timespec_valid(const struct timespec *ts) 103 - { 104 - /* Dates before 1970 are bogus */ 105 - if (ts->tv_sec < 0) 106 - return false; 107 - /* Can't have more nanoseconds then a second */ 108 - if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) 109 - return false; 110 - return true; 111 - } 112 - 113 - static inline bool timespec_valid_strict(const struct timespec *ts) 114 - { 115 - if (!timespec_valid(ts)) 116 - return false; 117 - /* Disallow values that could overflow ktime_t */ 118 - if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) 119 - return false; 120 - return true; 121 - } 122 - 123 - static inline bool timeval_valid(const struct timeval *tv) 124 - { 125 - /* Dates before 1970 are bogus */ 126 - if (tv->tv_sec < 0) 127 - return false; 128 - 129 - /* Can't have more microseconds then a second */ 130 - if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC) 131 - return false; 132 - 133 - return true; 134 - } 135 - 136 - extern struct timespec timespec_trunc(struct timespec t, unsigned gran); 137 - 138 - /* 139 - * Validates if a timespec/timeval used to inject a time offset is valid. 140 - * Offsets can be postive or negative. The value of the timeval/timespec 141 - * is the sum of its fields, but *NOTE*: the field tv_usec/tv_nsec must 142 - * always be non-negative. 143 - */ 144 - static inline bool timeval_inject_offset_valid(const struct timeval *tv) 145 - { 146 - /* We don't check the tv_sec as it can be positive or negative */ 147 - 148 - /* Can't have more microseconds then a second */ 149 - if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC) 150 - return false; 151 - return true; 152 - } 153 - 154 - static inline bool timespec_inject_offset_valid(const struct timespec *ts) 155 - { 156 - /* We don't check the tv_sec as it can be positive or negative */ 157 - 158 - /* Can't have more nanoseconds then a second */ 159 - if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC) 160 - return false; 161 - return true; 162 - } 163 24 164 25 /* Some architectures do not supply their own clocksource. 165 26 * This is mainly the case in architectures that get their ··· 70 209 71 210 void time64_to_tm(time64_t totalsecs, int offset, struct tm *result); 72 211 73 - /** 74 - * time_to_tm - converts the calendar time to local broken-down time 75 - * 76 - * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970, 77 - * Coordinated Universal Time (UTC). 78 - * @offset offset seconds adding to totalsecs. 79 - * @result pointer to struct tm variable to receive broken-down time 80 - */ 81 - static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result) 82 - { 83 - time64_to_tm(totalsecs, offset, result); 84 - } 85 - 86 - /** 87 - * timespec_to_ns - Convert timespec to nanoseconds 88 - * @ts: pointer to the timespec variable to be converted 89 - * 90 - * Returns the scalar nanosecond representation of the timespec 91 - * parameter. 92 - */ 93 - static inline s64 timespec_to_ns(const struct timespec *ts) 94 - { 95 - return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; 96 - } 97 - 98 - /** 99 - * timeval_to_ns - Convert timeval to nanoseconds 100 - * @ts: pointer to the timeval variable to be converted 101 - * 102 - * Returns the scalar nanosecond representation of the timeval 103 - * parameter. 104 - */ 105 - static inline s64 timeval_to_ns(const struct timeval *tv) 106 - { 107 - return ((s64) tv->tv_sec * NSEC_PER_SEC) + 108 - tv->tv_usec * NSEC_PER_USEC; 109 - } 110 - 111 - /** 112 - * ns_to_timespec - Convert nanoseconds to timespec 113 - * @nsec: the nanoseconds value to be converted 114 - * 115 - * Returns the timespec representation of the nsec parameter. 116 - */ 117 - extern struct timespec ns_to_timespec(const s64 nsec); 118 - 119 - /** 120 - * ns_to_timeval - Convert nanoseconds to timeval 121 - * @nsec: the nanoseconds value to be converted 122 - * 123 - * Returns the timeval representation of the nsec parameter. 124 - */ 125 - extern struct timeval ns_to_timeval(const s64 nsec); 126 - 127 - /** 128 - * timespec_add_ns - Adds nanoseconds to a timespec 129 - * @a: pointer to timespec to be incremented 130 - * @ns: unsigned nanoseconds value to be added 131 - * 132 - * This must always be inlined because its used from the x86-64 vdso, 133 - * which cannot call other kernel functions. 134 - */ 135 - static __always_inline void timespec_add_ns(struct timespec *a, u64 ns) 136 - { 137 - a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); 138 - a->tv_nsec = ns; 139 - } 212 + # include <linux/time32.h> 140 213 141 214 static inline bool itimerspec64_valid(const struct itimerspec64 *its) 142 215 {
+221
include/linux/time32.h
··· 1 + #ifndef _LINUX_TIME32_H 2 + #define _LINUX_TIME32_H 3 + /* 4 + * These are all interfaces based on the old time_t definition 5 + * that overflows in 2038 on 32-bit architectures. New code 6 + * should use the replacements based on time64_t and timespec64. 7 + * 8 + * Any interfaces in here that become unused as we migrate 9 + * code to time64_t should get removed. 10 + */ 11 + 12 + #include <linux/time64.h> 13 + 14 + #define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) 15 + 16 + #if __BITS_PER_LONG == 64 17 + 18 + /* timespec64 is defined as timespec here */ 19 + static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) 20 + { 21 + return ts64; 22 + } 23 + 24 + static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) 25 + { 26 + return ts; 27 + } 28 + 29 + # define timespec_equal timespec64_equal 30 + # define timespec_compare timespec64_compare 31 + # define set_normalized_timespec set_normalized_timespec64 32 + # define timespec_add timespec64_add 33 + # define timespec_sub timespec64_sub 34 + # define timespec_valid timespec64_valid 35 + # define timespec_valid_strict timespec64_valid_strict 36 + # define timespec_to_ns timespec64_to_ns 37 + # define ns_to_timespec ns_to_timespec64 38 + # define timespec_add_ns timespec64_add_ns 39 + 40 + #else 41 + static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) 42 + { 43 + struct timespec ret; 44 + 45 + ret.tv_sec = (time_t)ts64.tv_sec; 46 + ret.tv_nsec = ts64.tv_nsec; 47 + return ret; 48 + } 49 + 50 + static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) 51 + { 52 + struct timespec64 ret; 53 + 54 + ret.tv_sec = ts.tv_sec; 55 + ret.tv_nsec = ts.tv_nsec; 56 + return ret; 57 + } 58 + 59 + static inline int timespec_equal(const struct timespec *a, 60 + const struct timespec *b) 61 + { 62 + return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); 63 + } 64 + 65 + /* 66 + * lhs < rhs: return <0 67 + * lhs == rhs: return 0 68 + * lhs > rhs: return >0 69 + */ 70 + static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs) 71 + { 72 + if (lhs->tv_sec < rhs->tv_sec) 73 + return -1; 74 + if (lhs->tv_sec > rhs->tv_sec) 75 + return 1; 76 + return lhs->tv_nsec - rhs->tv_nsec; 77 + } 78 + 79 + extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); 80 + 81 + static inline struct timespec timespec_add(struct timespec lhs, 82 + struct timespec rhs) 83 + { 84 + struct timespec ts_delta; 85 + 86 + set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec, 87 + lhs.tv_nsec + rhs.tv_nsec); 88 + return ts_delta; 89 + } 90 + 91 + /* 92 + * sub = lhs - rhs, in normalized form 93 + */ 94 + static inline struct timespec timespec_sub(struct timespec lhs, 95 + struct timespec rhs) 96 + { 97 + struct timespec ts_delta; 98 + 99 + set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec, 100 + lhs.tv_nsec - rhs.tv_nsec); 101 + return ts_delta; 102 + } 103 + 104 + /* 105 + * Returns true if the timespec is norm, false if denorm: 106 + */ 107 + static inline bool timespec_valid(const struct timespec *ts) 108 + { 109 + /* Dates before 1970 are bogus */ 110 + if (ts->tv_sec < 0) 111 + return false; 112 + /* Can't have more nanoseconds then a second */ 113 + if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) 114 + return false; 115 + return true; 116 + } 117 + 118 + static inline bool timespec_valid_strict(const struct timespec *ts) 119 + { 120 + if (!timespec_valid(ts)) 121 + return false; 122 + /* Disallow values that could overflow ktime_t */ 123 + if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) 124 + return false; 125 + return true; 126 + } 127 + 128 + /** 129 + * timespec_to_ns - Convert timespec to nanoseconds 130 + * @ts: pointer to the timespec variable to be converted 131 + * 132 + * Returns the scalar nanosecond representation of the timespec 133 + * parameter. 134 + */ 135 + static inline s64 timespec_to_ns(const struct timespec *ts) 136 + { 137 + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; 138 + } 139 + 140 + /** 141 + * ns_to_timespec - Convert nanoseconds to timespec 142 + * @nsec: the nanoseconds value to be converted 143 + * 144 + * Returns the timespec representation of the nsec parameter. 145 + */ 146 + extern struct timespec ns_to_timespec(const s64 nsec); 147 + 148 + /** 149 + * timespec_add_ns - Adds nanoseconds to a timespec 150 + * @a: pointer to timespec to be incremented 151 + * @ns: unsigned nanoseconds value to be added 152 + * 153 + * This must always be inlined because its used from the x86-64 vdso, 154 + * which cannot call other kernel functions. 155 + */ 156 + static __always_inline void timespec_add_ns(struct timespec *a, u64 ns) 157 + { 158 + a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); 159 + a->tv_nsec = ns; 160 + } 161 + 162 + #endif 163 + 164 + /** 165 + * time_to_tm - converts the calendar time to local broken-down time 166 + * 167 + * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970, 168 + * Coordinated Universal Time (UTC). 169 + * @offset offset seconds adding to totalsecs. 170 + * @result pointer to struct tm variable to receive broken-down time 171 + */ 172 + static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result) 173 + { 174 + time64_to_tm(totalsecs, offset, result); 175 + } 176 + 177 + static inline unsigned long mktime(const unsigned int year, 178 + const unsigned int mon, const unsigned int day, 179 + const unsigned int hour, const unsigned int min, 180 + const unsigned int sec) 181 + { 182 + return mktime64(year, mon, day, hour, min, sec); 183 + } 184 + 185 + static inline bool timeval_valid(const struct timeval *tv) 186 + { 187 + /* Dates before 1970 are bogus */ 188 + if (tv->tv_sec < 0) 189 + return false; 190 + 191 + /* Can't have more microseconds then a second */ 192 + if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC) 193 + return false; 194 + 195 + return true; 196 + } 197 + 198 + extern struct timespec timespec_trunc(struct timespec t, unsigned int gran); 199 + 200 + /** 201 + * timeval_to_ns - Convert timeval to nanoseconds 202 + * @ts: pointer to the timeval variable to be converted 203 + * 204 + * Returns the scalar nanosecond representation of the timeval 205 + * parameter. 206 + */ 207 + static inline s64 timeval_to_ns(const struct timeval *tv) 208 + { 209 + return ((s64) tv->tv_sec * NSEC_PER_SEC) + 210 + tv->tv_usec * NSEC_PER_USEC; 211 + } 212 + 213 + /** 214 + * ns_to_timeval - Convert nanoseconds to timeval 215 + * @nsec: the nanoseconds value to be converted 216 + * 217 + * Returns the timeval representation of the nsec parameter. 218 + */ 219 + extern struct timeval ns_to_timeval(const s64 nsec); 220 + 221 + #endif
+1 -77
include/linux/time64.h
··· 8 8 typedef __s64 time64_t; 9 9 typedef __u64 timeu64_t; 10 10 11 - /* 12 - * This wants to go into uapi/linux/time.h once we agreed about the 13 - * userspace interfaces. 14 - */ 15 11 #if __BITS_PER_LONG == 64 12 + /* this trick allows us to optimize out timespec64_to_timespec */ 16 13 # define timespec64 timespec 17 14 #define itimerspec64 itimerspec 18 15 #else ··· 38 41 #define TIME64_MAX ((s64)~((u64)1 << 63)) 39 42 #define KTIME_MAX ((s64)~((u64)1 << 63)) 40 43 #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) 41 - 42 - #if __BITS_PER_LONG == 64 43 - 44 - static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) 45 - { 46 - return ts64; 47 - } 48 - 49 - static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) 50 - { 51 - return ts; 52 - } 53 - 54 - static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64) 55 - { 56 - return *its64; 57 - } 58 - 59 - static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its) 60 - { 61 - return *its; 62 - } 63 - 64 - # define timespec64_equal timespec_equal 65 - # define timespec64_compare timespec_compare 66 - # define set_normalized_timespec64 set_normalized_timespec 67 - # define timespec64_add timespec_add 68 - # define timespec64_sub timespec_sub 69 - # define timespec64_valid timespec_valid 70 - # define timespec64_valid_strict timespec_valid_strict 71 - # define timespec64_to_ns timespec_to_ns 72 - # define ns_to_timespec64 ns_to_timespec 73 - # define timespec64_add_ns timespec_add_ns 74 - 75 - #else 76 - 77 - static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) 78 - { 79 - struct timespec ret; 80 - 81 - ret.tv_sec = (time_t)ts64.tv_sec; 82 - ret.tv_nsec = ts64.tv_nsec; 83 - return ret; 84 - } 85 - 86 - static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) 87 - { 88 - struct timespec64 ret; 89 - 90 - ret.tv_sec = ts.tv_sec; 91 - ret.tv_nsec = ts.tv_nsec; 92 - return ret; 93 - } 94 - 95 - static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64) 96 - { 97 - struct itimerspec ret; 98 - 99 - ret.it_interval = timespec64_to_timespec(its64->it_interval); 100 - ret.it_value = timespec64_to_timespec(its64->it_value); 101 - return ret; 102 - } 103 - 104 - static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its) 105 - { 106 - struct itimerspec64 ret; 107 - 108 - ret.it_interval = timespec_to_timespec64(its->it_interval); 109 - ret.it_value = timespec_to_timespec64(its->it_value); 110 - return ret; 111 - } 112 44 113 45 static inline int timespec64_equal(const struct timespec64 *a, 114 46 const struct timespec64 *b) ··· 139 213 a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); 140 214 a->tv_nsec = ns; 141 215 } 142 - 143 - #endif 144 216 145 217 /* 146 218 * timespec64_add_safe assumes both values are positive and checks for
+5 -1
include/linux/timekeeper_internal.h
··· 14 14 /** 15 15 * struct tk_read_base - base structure for timekeeping readout 16 16 * @clock: Current clocksource used for timekeeping. 17 - * @read: Read function of @clock 18 17 * @mask: Bitmask for two's complement subtraction of non 64bit clocks 19 18 * @cycle_last: @clock cycle value at last update 20 19 * @mult: (NTP adjusted) multiplier for scaled math conversion 21 20 * @shift: Shift value for scaled math conversion 22 21 * @xtime_nsec: Shifted (fractional) nano seconds offset for readout 23 22 * @base: ktime_t (nanoseconds) base time for readout 23 + * @base_real: Nanoseconds base value for clock REALTIME readout 24 24 * 25 25 * This struct has size 56 byte on 64 bit. Together with a seqcount it 26 26 * occupies a single 64byte cache line. 27 27 * 28 28 * The struct is separate from struct timekeeper as it is also used 29 29 * for a fast NMI safe accessors. 30 + * 31 + * @base_real is for the fast NMI safe accessor to allow reading clock 32 + * realtime from any context. 30 33 */ 31 34 struct tk_read_base { 32 35 struct clocksource *clock; ··· 39 36 u32 shift; 40 37 u64 xtime_nsec; 41 38 ktime_t base; 39 + u64 base_real; 42 40 }; 43 41 44 42 /**
+3 -135
include/linux/timekeeping.h
··· 16 16 /* 17 17 * Get and set timeofday 18 18 */ 19 - extern void do_gettimeofday(struct timeval *tv); 20 19 extern int do_settimeofday64(const struct timespec64 *ts); 21 20 extern int do_sys_settimeofday64(const struct timespec64 *tv, 22 21 const struct timezone *tz); 23 22 /* 24 23 * Kernel time accessors 25 24 */ 26 - unsigned long get_seconds(void); 27 25 struct timespec64 current_kernel_time64(void); 28 - /* does not take xtime_lock */ 29 - struct timespec __current_kernel_time(void); 30 - 31 - static inline struct timespec current_kernel_time(void) 32 - { 33 - struct timespec64 now = current_kernel_time64(); 34 - 35 - return timespec64_to_timespec(now); 36 - } 37 26 38 27 /* 39 - * timespec based interfaces 28 + * timespec64 based interfaces 40 29 */ 41 30 struct timespec64 get_monotonic_coarse64(void); 42 31 extern void getrawmonotonic64(struct timespec64 *ts); ··· 36 47 extern int __getnstimeofday64(struct timespec64 *tv); 37 48 extern void getnstimeofday64(struct timespec64 *tv); 38 49 extern void getboottime64(struct timespec64 *ts); 39 - 40 - #if BITS_PER_LONG == 64 41 - /** 42 - * Deprecated. Use do_settimeofday64(). 43 - */ 44 - static inline int do_settimeofday(const struct timespec *ts) 45 - { 46 - return do_settimeofday64(ts); 47 - } 48 - 49 - static inline int __getnstimeofday(struct timespec *ts) 50 - { 51 - return __getnstimeofday64(ts); 52 - } 53 - 54 - static inline void getnstimeofday(struct timespec *ts) 55 - { 56 - getnstimeofday64(ts); 57 - } 58 - 59 - static inline void ktime_get_ts(struct timespec *ts) 60 - { 61 - ktime_get_ts64(ts); 62 - } 63 - 64 - static inline void ktime_get_real_ts(struct timespec *ts) 65 - { 66 - getnstimeofday64(ts); 67 - } 68 - 69 - static inline void getrawmonotonic(struct timespec *ts) 70 - { 71 - getrawmonotonic64(ts); 72 - } 73 - 74 - static inline struct timespec get_monotonic_coarse(void) 75 - { 76 - return get_monotonic_coarse64(); 77 - } 78 - 79 - static inline void getboottime(struct timespec *ts) 80 - { 81 - return getboottime64(ts); 82 - } 83 - #else 84 - /** 85 - * Deprecated. Use do_settimeofday64(). 86 - */ 87 - static inline int do_settimeofday(const struct timespec *ts) 88 - { 89 - struct timespec64 ts64; 90 - 91 - ts64 = timespec_to_timespec64(*ts); 92 - return do_settimeofday64(&ts64); 93 - } 94 - 95 - static inline int __getnstimeofday(struct timespec *ts) 96 - { 97 - struct timespec64 ts64; 98 - int ret = __getnstimeofday64(&ts64); 99 - 100 - *ts = timespec64_to_timespec(ts64); 101 - return ret; 102 - } 103 - 104 - static inline void getnstimeofday(struct timespec *ts) 105 - { 106 - struct timespec64 ts64; 107 - 108 - getnstimeofday64(&ts64); 109 - *ts = timespec64_to_timespec(ts64); 110 - } 111 - 112 - static inline void ktime_get_ts(struct timespec *ts) 113 - { 114 - struct timespec64 ts64; 115 - 116 - ktime_get_ts64(&ts64); 117 - *ts = timespec64_to_timespec(ts64); 118 - } 119 - 120 - static inline void ktime_get_real_ts(struct timespec *ts) 121 - { 122 - struct timespec64 ts64; 123 - 124 - getnstimeofday64(&ts64); 125 - *ts = timespec64_to_timespec(ts64); 126 - } 127 - 128 - static inline void getrawmonotonic(struct timespec *ts) 129 - { 130 - struct timespec64 ts64; 131 - 132 - getrawmonotonic64(&ts64); 133 - *ts = timespec64_to_timespec(ts64); 134 - } 135 - 136 - static inline struct timespec get_monotonic_coarse(void) 137 - { 138 - return timespec64_to_timespec(get_monotonic_coarse64()); 139 - } 140 - 141 - static inline void getboottime(struct timespec *ts) 142 - { 143 - struct timespec64 ts64; 144 - 145 - getboottime64(&ts64); 146 - *ts = timespec64_to_timespec(ts64); 147 - } 148 - #endif 149 50 150 51 #define ktime_get_real_ts64(ts) getnstimeofday64(ts) 151 52 ··· 119 240 extern u64 ktime_get_mono_fast_ns(void); 120 241 extern u64 ktime_get_raw_fast_ns(void); 121 242 extern u64 ktime_get_boot_fast_ns(void); 243 + extern u64 ktime_get_real_fast_ns(void); 122 244 123 245 /* 124 - * Timespec interfaces utilizing the ktime based ones 246 + * timespec64 interfaces utilizing the ktime based ones 125 247 */ 126 - static inline void get_monotonic_boottime(struct timespec *ts) 127 - { 128 - *ts = ktime_to_timespec(ktime_get_boottime()); 129 - } 130 - 131 248 static inline void get_monotonic_boottime64(struct timespec64 *ts) 132 249 { 133 250 *ts = ktime_to_timespec64(ktime_get_boottime()); 134 - } 135 - 136 - static inline void timekeeping_clocktai(struct timespec *ts) 137 - { 138 - *ts = ktime_to_timespec(ktime_get_clocktai()); 139 251 } 140 252 141 253 static inline void timekeeping_clocktai64(struct timespec64 *ts) ··· 211 341 */ 212 342 extern int persistent_clock_is_local; 213 343 214 - extern void read_persistent_clock(struct timespec *ts); 215 344 extern void read_persistent_clock64(struct timespec64 *ts); 216 345 extern void read_boot_clock64(struct timespec64 *ts); 217 - extern int update_persistent_clock(struct timespec now); 218 346 extern int update_persistent_clock64(struct timespec64 now); 219 347 220 348
+151
include/linux/timekeeping32.h
··· 1 + #ifndef _LINUX_TIMEKEEPING32_H 2 + #define _LINUX_TIMEKEEPING32_H 3 + /* 4 + * These interfaces are all based on the old timespec type 5 + * and should get replaced with the timespec64 based versions 6 + * over time so we can remove the file here. 7 + */ 8 + 9 + extern void do_gettimeofday(struct timeval *tv); 10 + unsigned long get_seconds(void); 11 + 12 + /* does not take xtime_lock */ 13 + struct timespec __current_kernel_time(void); 14 + 15 + static inline struct timespec current_kernel_time(void) 16 + { 17 + struct timespec64 now = current_kernel_time64(); 18 + 19 + return timespec64_to_timespec(now); 20 + } 21 + 22 + #if BITS_PER_LONG == 64 23 + /** 24 + * Deprecated. Use do_settimeofday64(). 25 + */ 26 + static inline int do_settimeofday(const struct timespec *ts) 27 + { 28 + return do_settimeofday64(ts); 29 + } 30 + 31 + static inline int __getnstimeofday(struct timespec *ts) 32 + { 33 + return __getnstimeofday64(ts); 34 + } 35 + 36 + static inline void getnstimeofday(struct timespec *ts) 37 + { 38 + getnstimeofday64(ts); 39 + } 40 + 41 + static inline void ktime_get_ts(struct timespec *ts) 42 + { 43 + ktime_get_ts64(ts); 44 + } 45 + 46 + static inline void ktime_get_real_ts(struct timespec *ts) 47 + { 48 + getnstimeofday64(ts); 49 + } 50 + 51 + static inline void getrawmonotonic(struct timespec *ts) 52 + { 53 + getrawmonotonic64(ts); 54 + } 55 + 56 + static inline struct timespec get_monotonic_coarse(void) 57 + { 58 + return get_monotonic_coarse64(); 59 + } 60 + 61 + static inline void getboottime(struct timespec *ts) 62 + { 63 + return getboottime64(ts); 64 + } 65 + #else 66 + /** 67 + * Deprecated. Use do_settimeofday64(). 68 + */ 69 + static inline int do_settimeofday(const struct timespec *ts) 70 + { 71 + struct timespec64 ts64; 72 + 73 + ts64 = timespec_to_timespec64(*ts); 74 + return do_settimeofday64(&ts64); 75 + } 76 + 77 + static inline int __getnstimeofday(struct timespec *ts) 78 + { 79 + struct timespec64 ts64; 80 + int ret = __getnstimeofday64(&ts64); 81 + 82 + *ts = timespec64_to_timespec(ts64); 83 + return ret; 84 + } 85 + 86 + static inline void getnstimeofday(struct timespec *ts) 87 + { 88 + struct timespec64 ts64; 89 + 90 + getnstimeofday64(&ts64); 91 + *ts = timespec64_to_timespec(ts64); 92 + } 93 + 94 + static inline void ktime_get_ts(struct timespec *ts) 95 + { 96 + struct timespec64 ts64; 97 + 98 + ktime_get_ts64(&ts64); 99 + *ts = timespec64_to_timespec(ts64); 100 + } 101 + 102 + static inline void ktime_get_real_ts(struct timespec *ts) 103 + { 104 + struct timespec64 ts64; 105 + 106 + getnstimeofday64(&ts64); 107 + *ts = timespec64_to_timespec(ts64); 108 + } 109 + 110 + static inline void getrawmonotonic(struct timespec *ts) 111 + { 112 + struct timespec64 ts64; 113 + 114 + getrawmonotonic64(&ts64); 115 + *ts = timespec64_to_timespec(ts64); 116 + } 117 + 118 + static inline struct timespec get_monotonic_coarse(void) 119 + { 120 + return timespec64_to_timespec(get_monotonic_coarse64()); 121 + } 122 + 123 + static inline void getboottime(struct timespec *ts) 124 + { 125 + struct timespec64 ts64; 126 + 127 + getboottime64(&ts64); 128 + *ts = timespec64_to_timespec(ts64); 129 + } 130 + #endif 131 + 132 + /* 133 + * Timespec interfaces utilizing the ktime based ones 134 + */ 135 + static inline void get_monotonic_boottime(struct timespec *ts) 136 + { 137 + *ts = ktime_to_timespec(ktime_get_boottime()); 138 + } 139 + 140 + static inline void timekeeping_clocktai(struct timespec *ts) 141 + { 142 + *ts = ktime_to_timespec(ktime_get_clocktai()); 143 + } 144 + 145 + /* 146 + * Persistent clock related interfaces 147 + */ 148 + extern void read_persistent_clock(struct timespec *ts); 149 + extern int update_persistent_clock(struct timespec now); 150 + 151 + #endif
+30 -27
include/linux/timer.h
··· 64 64 65 65 #define TIMER_TRACE_FLAGMASK (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE) 66 66 67 - #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ 67 + #define TIMER_DATA_TYPE unsigned long 68 + #define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) 69 + 70 + #define __TIMER_INITIALIZER(_function, _data, _flags) { \ 68 71 .entry = { .next = TIMER_ENTRY_STATIC }, \ 69 72 .function = (_function), \ 70 - .expires = (_expires), \ 71 73 .data = (_data), \ 72 74 .flags = (_flags), \ 73 75 __TIMER_LOCKDEP_MAP_INITIALIZER( \ 74 76 __FILE__ ":" __stringify(__LINE__)) \ 75 77 } 76 78 77 - #define TIMER_INITIALIZER(_function, _expires, _data) \ 78 - __TIMER_INITIALIZER((_function), (_expires), (_data), 0) 79 - 80 - #define TIMER_PINNED_INITIALIZER(_function, _expires, _data) \ 81 - __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED) 82 - 83 - #define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \ 84 - __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE) 85 - 86 - #define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data) \ 87 - __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED) 88 - 89 - #define DEFINE_TIMER(_name, _function, _expires, _data) \ 79 + #define DEFINE_TIMER(_name, _function) \ 90 80 struct timer_list _name = \ 91 - TIMER_INITIALIZER(_function, _expires, _data) 81 + __TIMER_INITIALIZER((TIMER_FUNC_TYPE)_function, 0, 0) 92 82 93 83 void init_timer_key(struct timer_list *timer, unsigned int flags, 94 84 const char *name, struct lock_class_key *key); ··· 119 129 120 130 #define init_timer(timer) \ 121 131 __init_timer((timer), 0) 122 - #define init_timer_pinned(timer) \ 123 - __init_timer((timer), TIMER_PINNED) 124 - #define init_timer_deferrable(timer) \ 125 - __init_timer((timer), TIMER_DEFERRABLE) 126 - #define init_timer_pinned_deferrable(timer) \ 127 - __init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED) 128 - #define init_timer_on_stack(timer) \ 129 - __init_timer_on_stack((timer), 0) 130 132 131 133 #define __setup_timer(_timer, _fn, _data, _flags) \ 132 134 do { \ ··· 151 169 #define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \ 152 170 __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) 153 171 154 - #define TIMER_DATA_TYPE unsigned long 155 - #define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) 156 - 172 + #ifndef CONFIG_LOCKDEP 157 173 static inline void timer_setup(struct timer_list *timer, 158 174 void (*callback)(struct timer_list *), 159 175 unsigned int flags) ··· 159 179 __setup_timer(timer, (TIMER_FUNC_TYPE)callback, 160 180 (TIMER_DATA_TYPE)timer, flags); 161 181 } 182 + 183 + static inline void timer_setup_on_stack(struct timer_list *timer, 184 + void (*callback)(struct timer_list *), 185 + unsigned int flags) 186 + { 187 + __setup_timer_on_stack(timer, (TIMER_FUNC_TYPE)callback, 188 + (TIMER_DATA_TYPE)timer, flags); 189 + } 190 + #else 191 + /* 192 + * Under LOCKDEP, the timer lock_class_key (set up in __init_timer) needs 193 + * to be tied to the caller's context, so an inline (above) won't work. We 194 + * do want to keep the inline for argument type checking, though. 195 + */ 196 + # define timer_setup(timer, callback, flags) \ 197 + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ 198 + (TIMER_DATA_TYPE)(timer), (flags)) 199 + # define timer_setup_on_stack(timer, callback, flags) \ 200 + __setup_timer_on_stack((timer), \ 201 + (TIMER_FUNC_TYPE)(callback), \ 202 + (TIMER_DATA_TYPE)(timer), (flags)) 203 + #endif 162 204 163 205 #define from_timer(var, callback_timer, timer_fieldname) \ 164 206 container_of(callback_timer, typeof(*var), timer_fieldname) ··· 204 202 extern int del_timer(struct timer_list * timer); 205 203 extern int mod_timer(struct timer_list *timer, unsigned long expires); 206 204 extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); 205 + extern int timer_reduce(struct timer_list *timer, unsigned long expires); 207 206 208 207 /* 209 208 * The jiffies value which is added to now, when there is no timer
+8 -7
include/linux/workqueue.h
··· 18 18 19 19 struct work_struct; 20 20 typedef void (*work_func_t)(struct work_struct *work); 21 - void delayed_work_timer_fn(unsigned long __data); 21 + void delayed_work_timer_fn(struct timer_list *t); 22 22 23 23 /* 24 24 * The first word is the work queue pointer and the flags rolled into ··· 176 176 177 177 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ 178 178 .work = __WORK_INITIALIZER((n).work, (f)), \ 179 - .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \ 180 - 0, (unsigned long)&(n), \ 179 + .timer = __TIMER_INITIALIZER((TIMER_FUNC_TYPE)delayed_work_timer_fn,\ 180 + (TIMER_DATA_TYPE)&(n.timer), \ 181 181 (tflags) | TIMER_IRQSAFE), \ 182 182 } 183 183 ··· 242 242 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ 243 243 do { \ 244 244 INIT_WORK(&(_work)->work, (_func)); \ 245 - __setup_timer(&(_work)->timer, delayed_work_timer_fn, \ 246 - (unsigned long)(_work), \ 245 + __setup_timer(&(_work)->timer, \ 246 + (TIMER_FUNC_TYPE)delayed_work_timer_fn, \ 247 + (TIMER_DATA_TYPE)&(_work)->timer, \ 247 248 (_tflags) | TIMER_IRQSAFE); \ 248 249 } while (0) 249 250 ··· 252 251 do { \ 253 252 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ 254 253 __setup_timer_on_stack(&(_work)->timer, \ 255 - delayed_work_timer_fn, \ 256 - (unsigned long)(_work), \ 254 + (TIMER_FUNC_TYPE)delayed_work_timer_fn,\ 255 + (TIMER_DATA_TYPE)&(_work)->timer,\ 257 256 (_tflags) | TIMER_IRQSAFE); \ 258 257 } while (0) 259 258
+1 -1
include/scsi/libfcoe.h
··· 382 382 383 383 void fcoe_clean_pending_queue(struct fc_lport *); 384 384 void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb); 385 - void fcoe_queue_timer(ulong lport); 385 + void fcoe_queue_timer(struct timer_list *t); 386 386 int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen, 387 387 struct fcoe_percpu_s *fps); 388 388
+1
include/scsi/libsas.h
··· 629 629 */ 630 630 struct timer_list timer; 631 631 struct completion completion; 632 + struct sas_task *task; 632 633 }; 633 634 634 635 #define SAS_TASK_STATE_PENDING 1
+1 -1
kernel/irq/spurious.c
··· 21 21 22 22 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) 23 23 static void poll_spurious_irqs(unsigned long dummy); 24 - static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); 24 + static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs); 25 25 static int irq_poll_cpu; 26 26 static atomic_t irq_poll_active; 27 27
+4 -6
kernel/kthread.c
··· 798 798 /** 799 799 * kthread_delayed_work_timer_fn - callback that queues the associated kthread 800 800 * delayed work when the timer expires. 801 - * @__data: pointer to the data associated with the timer 801 + * @t: pointer to the expired timer 802 802 * 803 803 * The format of the function is defined by struct timer_list. 804 804 * It should have been called from irqsafe timer with irq already off. 805 805 */ 806 - void kthread_delayed_work_timer_fn(unsigned long __data) 806 + void kthread_delayed_work_timer_fn(struct timer_list *t) 807 807 { 808 - struct kthread_delayed_work *dwork = 809 - (struct kthread_delayed_work *)__data; 808 + struct kthread_delayed_work *dwork = from_timer(dwork, t, timer); 810 809 struct kthread_work *work = &dwork->work; 811 810 struct kthread_worker *worker = work->worker; 812 811 ··· 836 837 struct timer_list *timer = &dwork->timer; 837 838 struct kthread_work *work = &dwork->work; 838 839 839 - WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn || 840 - timer->data != (unsigned long)dwork); 840 + WARN_ON_ONCE(timer->function != (TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn); 841 841 842 842 /* 843 843 * If @delay is 0, queue @dwork->work immediately. This is for
+2 -2
kernel/rcu/rcutorture.c
··· 1078 1078 * counter in the element should never be greater than 1, otherwise, the 1079 1079 * RCU implementation is broken. 1080 1080 */ 1081 - static void rcu_torture_timer(unsigned long unused) 1081 + static void rcu_torture_timer(struct timer_list *unused) 1082 1082 { 1083 1083 int idx; 1084 1084 unsigned long started; ··· 1165 1165 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1166 1166 set_user_nice(current, MAX_NICE); 1167 1167 if (irqreader && cur_ops->irq_capable) 1168 - setup_timer_on_stack(&t, rcu_torture_timer, 0); 1168 + timer_setup_on_stack(&t, rcu_torture_timer, 0); 1169 1169 1170 1170 do { 1171 1171 if (irqreader && cur_ops->irq_capable) {
+5 -4
kernel/rcu/tree_plugin.h
··· 2266 2266 } 2267 2267 2268 2268 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */ 2269 - static void do_nocb_deferred_wakeup_timer(unsigned long x) 2269 + static void do_nocb_deferred_wakeup_timer(struct timer_list *t) 2270 2270 { 2271 - do_nocb_deferred_wakeup_common((struct rcu_data *)x); 2271 + struct rcu_data *rdp = from_timer(rdp, t, nocb_timer); 2272 + 2273 + do_nocb_deferred_wakeup_common(rdp); 2272 2274 } 2273 2275 2274 2276 /* ··· 2334 2332 init_swait_queue_head(&rdp->nocb_wq); 2335 2333 rdp->nocb_follower_tail = &rdp->nocb_follower_head; 2336 2334 raw_spin_lock_init(&rdp->nocb_lock); 2337 - setup_timer(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 2338 - (unsigned long)rdp); 2335 + timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0); 2339 2336 } 2340 2337 2341 2338 /*
+1 -1
kernel/time/Kconfig
··· 56 56 57 57 # Core internal switch. Selected by NO_HZ_COMMON / HIGH_RES_TIMERS. This is 58 58 # only related to the tick functionality. Oneshot clockevent devices 59 - # are supported independ of this. 59 + # are supported independent of this. 60 60 config TICK_ONESHOT 61 61 bool 62 62
+13 -8
kernel/time/clockevents.c
··· 280 280 static int clockevents_program_min_delta(struct clock_event_device *dev) 281 281 { 282 282 unsigned long long clc; 283 - int64_t delta; 283 + int64_t delta = 0; 284 + int i; 284 285 285 - delta = dev->min_delta_ns; 286 - dev->next_event = ktime_add_ns(ktime_get(), delta); 286 + for (i = 0; i < 10; i++) { 287 + delta += dev->min_delta_ns; 288 + dev->next_event = ktime_add_ns(ktime_get(), delta); 287 289 288 - if (clockevent_state_shutdown(dev)) 289 - return 0; 290 + if (clockevent_state_shutdown(dev)) 291 + return 0; 290 292 291 - dev->retries++; 292 - clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 293 - return dev->set_next_event((unsigned long) clc, dev); 293 + dev->retries++; 294 + clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 295 + if (dev->set_next_event((unsigned long) clc, dev) == 0) 296 + return 0; 297 + } 298 + return -ETIME; 294 299 } 295 300 296 301 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
+113 -114
kernel/time/ntp.c
··· 493 493 return leap; 494 494 } 495 495 496 + static void sync_hw_clock(struct work_struct *work); 497 + static DECLARE_DELAYED_WORK(sync_work, sync_hw_clock); 498 + 499 + static void sched_sync_hw_clock(struct timespec64 now, 500 + unsigned long target_nsec, bool fail) 501 + 502 + { 503 + struct timespec64 next; 504 + 505 + getnstimeofday64(&next); 506 + if (!fail) 507 + next.tv_sec = 659; 508 + else { 509 + /* 510 + * Try again as soon as possible. Delaying long periods 511 + * decreases the accuracy of the work queue timer. Due to this 512 + * the algorithm is very likely to require a short-sleep retry 513 + * after the above long sleep to synchronize ts_nsec. 514 + */ 515 + next.tv_sec = 0; 516 + } 517 + 518 + /* Compute the needed delay that will get to tv_nsec == target_nsec */ 519 + next.tv_nsec = target_nsec - next.tv_nsec; 520 + if (next.tv_nsec <= 0) 521 + next.tv_nsec += NSEC_PER_SEC; 522 + if (next.tv_nsec >= NSEC_PER_SEC) { 523 + next.tv_sec++; 524 + next.tv_nsec -= NSEC_PER_SEC; 525 + } 526 + 527 + queue_delayed_work(system_power_efficient_wq, &sync_work, 528 + timespec64_to_jiffies(&next)); 529 + } 530 + 531 + static void sync_rtc_clock(void) 532 + { 533 + unsigned long target_nsec; 534 + struct timespec64 adjust, now; 535 + int rc; 536 + 537 + if (!IS_ENABLED(CONFIG_RTC_SYSTOHC)) 538 + return; 539 + 540 + getnstimeofday64(&now); 541 + 542 + adjust = now; 543 + if (persistent_clock_is_local) 544 + adjust.tv_sec -= (sys_tz.tz_minuteswest * 60); 545 + 546 + /* 547 + * The current RTC in use will provide the target_nsec it wants to be 548 + * called at, and does rtc_tv_nsec_ok internally. 549 + */ 550 + rc = rtc_set_ntp_time(adjust, &target_nsec); 551 + if (rc == -ENODEV) 552 + return; 553 + 554 + sched_sync_hw_clock(now, target_nsec, rc); 555 + } 556 + 496 557 #ifdef CONFIG_GENERIC_CMOS_UPDATE 497 558 int __weak update_persistent_clock(struct timespec now) 498 559 { ··· 569 508 } 570 509 #endif 571 510 572 - #if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) 573 - static void sync_cmos_clock(struct work_struct *work); 574 - 575 - static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); 576 - 577 - static void sync_cmos_clock(struct work_struct *work) 511 + static bool sync_cmos_clock(void) 578 512 { 513 + static bool no_cmos; 579 514 struct timespec64 now; 580 - struct timespec64 next; 581 - int fail = 1; 515 + struct timespec64 adjust; 516 + int rc = -EPROTO; 517 + long target_nsec = NSEC_PER_SEC / 2; 518 + 519 + if (!IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE)) 520 + return false; 521 + 522 + if (no_cmos) 523 + return false; 582 524 583 525 /* 584 - * If we have an externally synchronized Linux clock, then update 585 - * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 586 - * called as close as possible to 500 ms before the new second starts. 587 - * This code is run on a timer. If the clock is set, that timer 588 - * may not expire at the correct time. Thus, we adjust... 589 - * We want the clock to be within a couple of ticks from the target. 526 + * Historically update_persistent_clock64() has followed x86 527 + * semantics, which match the MC146818A/etc RTC. This RTC will store 528 + * 'adjust' and then in .5s it will advance once second. 529 + * 530 + * Architectures are strongly encouraged to use rtclib and not 531 + * implement this legacy API. 590 532 */ 591 - if (!ntp_synced()) { 592 - /* 593 - * Not synced, exit, do not restart a timer (if one is 594 - * running, let it run out). 595 - */ 596 - return; 597 - } 598 - 599 533 getnstimeofday64(&now); 600 - if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) { 601 - struct timespec64 adjust = now; 602 - 603 - fail = -ENODEV; 534 + if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) { 604 535 if (persistent_clock_is_local) 605 536 adjust.tv_sec -= (sys_tz.tz_minuteswest * 60); 606 - #ifdef CONFIG_GENERIC_CMOS_UPDATE 607 - fail = update_persistent_clock64(adjust); 608 - #endif 609 - 610 - #ifdef CONFIG_RTC_SYSTOHC 611 - if (fail == -ENODEV) 612 - fail = rtc_set_ntp_time(adjust); 613 - #endif 537 + rc = update_persistent_clock64(adjust); 538 + /* 539 + * The machine does not support update_persistent_clock64 even 540 + * though it defines CONFIG_GENERIC_CMOS_UPDATE. 541 + */ 542 + if (rc == -ENODEV) { 543 + no_cmos = true; 544 + return false; 545 + } 614 546 } 615 547 616 - next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2); 617 - if (next.tv_nsec <= 0) 618 - next.tv_nsec += NSEC_PER_SEC; 548 + sched_sync_hw_clock(now, target_nsec, rc); 549 + return true; 550 + } 619 551 620 - if (!fail || fail == -ENODEV) 621 - next.tv_sec = 659; 622 - else 623 - next.tv_sec = 0; 552 + /* 553 + * If we have an externally synchronized Linux clock, then update RTC clock 554 + * accordingly every ~11 minutes. Generally RTCs can only store second 555 + * precision, but many RTCs will adjust the phase of their second tick to 556 + * match the moment of update. This infrastructure arranges to call to the RTC 557 + * set at the correct moment to phase synchronize the RTC second tick over 558 + * with the kernel clock. 559 + */ 560 + static void sync_hw_clock(struct work_struct *work) 561 + { 562 + if (!ntp_synced()) 563 + return; 624 564 625 - if (next.tv_nsec >= NSEC_PER_SEC) { 626 - next.tv_sec++; 627 - next.tv_nsec -= NSEC_PER_SEC; 628 - } 629 - queue_delayed_work(system_power_efficient_wq, 630 - &sync_cmos_work, timespec64_to_jiffies(&next)); 565 + if (sync_cmos_clock()) 566 + return; 567 + 568 + sync_rtc_clock(); 631 569 } 632 570 633 571 void ntp_notify_cmos_timer(void) 634 572 { 635 - queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0); 573 + if (!ntp_synced()) 574 + return; 575 + 576 + if (IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE) || 577 + IS_ENABLED(CONFIG_RTC_SYSTOHC)) 578 + queue_delayed_work(system_power_efficient_wq, &sync_work, 0); 636 579 } 637 - 638 - #else 639 - void ntp_notify_cmos_timer(void) { } 640 - #endif 641 - 642 580 643 581 /* 644 582 * Propagate a new txc->status value into the NTP state: ··· 711 651 712 652 if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) 713 653 ntp_update_frequency(); 714 - } 715 - 716 - 717 - 718 - /** 719 - * ntp_validate_timex - Ensures the timex is ok for use in do_adjtimex 720 - */ 721 - int ntp_validate_timex(struct timex *txc) 722 - { 723 - if (txc->modes & ADJ_ADJTIME) { 724 - /* singleshot must not be used with any other mode bits */ 725 - if (!(txc->modes & ADJ_OFFSET_SINGLESHOT)) 726 - return -EINVAL; 727 - if (!(txc->modes & ADJ_OFFSET_READONLY) && 728 - !capable(CAP_SYS_TIME)) 729 - return -EPERM; 730 - } else { 731 - /* In order to modify anything, you gotta be super-user! */ 732 - if (txc->modes && !capable(CAP_SYS_TIME)) 733 - return -EPERM; 734 - /* 735 - * if the quartz is off by more than 10% then 736 - * something is VERY wrong! 737 - */ 738 - if (txc->modes & ADJ_TICK && 739 - (txc->tick < 900000/USER_HZ || 740 - txc->tick > 1100000/USER_HZ)) 741 - return -EINVAL; 742 - } 743 - 744 - if (txc->modes & ADJ_SETOFFSET) { 745 - /* In order to inject time, you gotta be super-user! */ 746 - if (!capable(CAP_SYS_TIME)) 747 - return -EPERM; 748 - 749 - if (txc->modes & ADJ_NANO) { 750 - struct timespec ts; 751 - 752 - ts.tv_sec = txc->time.tv_sec; 753 - ts.tv_nsec = txc->time.tv_usec; 754 - if (!timespec_inject_offset_valid(&ts)) 755 - return -EINVAL; 756 - 757 - } else { 758 - if (!timeval_inject_offset_valid(&txc->time)) 759 - return -EINVAL; 760 - } 761 - } 762 - 763 - /* 764 - * Check for potential multiplication overflows that can 765 - * only happen on 64-bit systems: 766 - */ 767 - if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) { 768 - if (LLONG_MIN / PPM_SCALE > txc->freq) 769 - return -EINVAL; 770 - if (LLONG_MAX / PPM_SCALE < txc->freq) 771 - return -EINVAL; 772 - } 773 - 774 - return 0; 775 654 } 776 655 777 656
-1
kernel/time/ntp_internal.h
··· 8 8 extern u64 ntp_tick_length(void); 9 9 extern ktime_t ntp_get_next_leap(void); 10 10 extern int second_overflow(time64_t secs); 11 - extern int ntp_validate_timex(struct timex *); 12 11 extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *); 13 12 extern void __hardpps(const struct timespec64 *, const struct timespec64 *); 14 13 #endif /* _LINUX_NTP_INTERNAL_H */
+8 -12
kernel/time/posix-stubs.c
··· 117 117 const struct timespec __user *, rqtp, 118 118 struct timespec __user *, rmtp) 119 119 { 120 - struct timespec64 t64; 121 - struct timespec t; 120 + struct timespec64 t; 122 121 123 122 switch (which_clock) { 124 123 case CLOCK_REALTIME: ··· 128 129 return -EINVAL; 129 130 } 130 131 131 - if (copy_from_user(&t, rqtp, sizeof (struct timespec))) 132 + if (get_timespec64(&t, rqtp)) 132 133 return -EFAULT; 133 - t64 = timespec_to_timespec64(t); 134 - if (!timespec64_valid(&t64)) 134 + if (!timespec64_valid(&t)) 135 135 return -EINVAL; 136 136 if (flags & TIMER_ABSTIME) 137 137 rmtp = NULL; 138 138 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; 139 139 current->restart_block.nanosleep.rmtp = rmtp; 140 - return hrtimer_nanosleep(&t64, flags & TIMER_ABSTIME ? 140 + return hrtimer_nanosleep(&t, flags & TIMER_ABSTIME ? 141 141 HRTIMER_MODE_ABS : HRTIMER_MODE_REL, 142 142 which_clock); 143 143 } ··· 201 203 struct compat_timespec __user *, rqtp, 202 204 struct compat_timespec __user *, rmtp) 203 205 { 204 - struct timespec64 t64; 205 - struct timespec t; 206 + struct timespec64 t; 206 207 207 208 switch (which_clock) { 208 209 case CLOCK_REALTIME: ··· 212 215 return -EINVAL; 213 216 } 214 217 215 - if (compat_get_timespec(&t, rqtp)) 218 + if (compat_get_timespec64(&t, rqtp)) 216 219 return -EFAULT; 217 - t64 = timespec_to_timespec64(t); 218 - if (!timespec64_valid(&t64)) 220 + if (!timespec64_valid(&t)) 219 221 return -EINVAL; 220 222 if (flags & TIMER_ABSTIME) 221 223 rmtp = NULL; 222 224 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; 223 225 current->restart_block.nanosleep.compat_rmtp = rmtp; 224 - return hrtimer_nanosleep(&t64, flags & TIMER_ABSTIME ? 226 + return hrtimer_nanosleep(&t, flags & TIMER_ABSTIME ? 225 227 HRTIMER_MODE_ABS : HRTIMER_MODE_REL, 226 228 which_clock); 227 229 }
+1
kernel/time/tick-oneshot.c
··· 33 33 * We don't need the clock event device any more, stop it. 34 34 */ 35 35 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED); 36 + dev->next_event = KTIME_MAX; 36 37 return 0; 37 38 } 38 39
+10 -61
kernel/time/time.c
··· 82 82 83 83 SYSCALL_DEFINE1(stime, time_t __user *, tptr) 84 84 { 85 - struct timespec tv; 85 + struct timespec64 tv; 86 86 int err; 87 87 88 88 if (get_user(tv.tv_sec, tptr)) ··· 90 90 91 91 tv.tv_nsec = 0; 92 92 93 - err = security_settime(&tv, NULL); 93 + err = security_settime64(&tv, NULL); 94 94 if (err) 95 95 return err; 96 96 97 - do_settimeofday(&tv); 97 + do_settimeofday64(&tv); 98 98 return 0; 99 99 } 100 100 ··· 122 122 123 123 COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr) 124 124 { 125 - struct timespec tv; 125 + struct timespec64 tv; 126 126 int err; 127 127 128 128 if (get_user(tv.tv_sec, tptr)) ··· 130 130 131 131 tv.tv_nsec = 0; 132 132 133 - err = security_settime(&tv, NULL); 133 + err = security_settime64(&tv, NULL); 134 134 if (err) 135 135 return err; 136 136 137 - do_settimeofday(&tv); 137 + do_settimeofday64(&tv); 138 138 return 0; 139 139 } 140 140 ··· 155 155 return -EFAULT; 156 156 } 157 157 return 0; 158 - } 159 - 160 - /* 161 - * Indicates if there is an offset between the system clock and the hardware 162 - * clock/persistent clock/rtc. 163 - */ 164 - int persistent_clock_is_local; 165 - 166 - /* 167 - * Adjust the time obtained from the CMOS to be UTC time instead of 168 - * local time. 169 - * 170 - * This is ugly, but preferable to the alternatives. Otherwise we 171 - * would either need to write a program to do it in /etc/rc (and risk 172 - * confusion if the program gets run more than once; it would also be 173 - * hard to make the program warp the clock precisely n hours) or 174 - * compile in the timezone information into the kernel. Bad, bad.... 175 - * 176 - * - TYT, 1992-01-01 177 - * 178 - * The best thing to do is to keep the CMOS clock in universal time (UTC) 179 - * as real UNIX machines always do it. This avoids all headaches about 180 - * daylight saving times and warping kernel clocks. 181 - */ 182 - static inline void warp_clock(void) 183 - { 184 - if (sys_tz.tz_minuteswest != 0) { 185 - struct timespec adjust; 186 - 187 - persistent_clock_is_local = 1; 188 - adjust.tv_sec = sys_tz.tz_minuteswest * 60; 189 - adjust.tv_nsec = 0; 190 - timekeeping_inject_offset(&adjust); 191 - } 192 158 } 193 159 194 160 /* ··· 190 224 if (firsttime) { 191 225 firsttime = 0; 192 226 if (!tv) 193 - warp_clock(); 227 + timekeeping_warp_clock(); 194 228 } 195 229 } 196 230 if (tv) ··· 407 441 } 408 442 EXPORT_SYMBOL(mktime64); 409 443 444 + #if __BITS_PER_LONG == 32 410 445 /** 411 446 * set_normalized_timespec - set timespec sec and nsec parts and normalize 412 447 * ··· 468 501 return ts; 469 502 } 470 503 EXPORT_SYMBOL(ns_to_timespec); 504 + #endif 471 505 472 506 /** 473 507 * ns_to_timeval - Convert nanoseconds to timeval ··· 488 520 } 489 521 EXPORT_SYMBOL(ns_to_timeval); 490 522 491 - #if BITS_PER_LONG == 32 492 523 /** 493 524 * set_normalized_timespec - set timespec sec and nsec parts and normalize 494 525 * ··· 548 581 return ts; 549 582 } 550 583 EXPORT_SYMBOL(ns_to_timespec64); 551 - #endif 584 + 552 585 /** 553 586 * msecs_to_jiffies: - convert milliseconds to jiffies 554 587 * @m: time in milliseconds ··· 818 851 return (unsigned long)nsecs_to_jiffies64(n); 819 852 } 820 853 EXPORT_SYMBOL_GPL(nsecs_to_jiffies); 821 - 822 - /* 823 - * Add two timespec values and do a safety check for overflow. 824 - * It's assumed that both values are valid (>= 0) 825 - */ 826 - struct timespec timespec_add_safe(const struct timespec lhs, 827 - const struct timespec rhs) 828 - { 829 - struct timespec res; 830 - 831 - set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec, 832 - lhs.tv_nsec + rhs.tv_nsec); 833 - 834 - if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec) 835 - res.tv_sec = TIME_T_MAX; 836 - 837 - return res; 838 - } 839 854 840 855 /* 841 856 * Add two timespec64 values and do a safety check for overflow.
+161 -21
kernel/time/timekeeping.c
··· 60 60 struct tk_read_base base[2]; 61 61 }; 62 62 63 - static struct tk_fast tk_fast_mono ____cacheline_aligned; 64 - static struct tk_fast tk_fast_raw ____cacheline_aligned; 63 + /* Suspend-time cycles value for halted fast timekeeper. */ 64 + static u64 cycles_at_suspend; 65 + 66 + static u64 dummy_clock_read(struct clocksource *cs) 67 + { 68 + return cycles_at_suspend; 69 + } 70 + 71 + static struct clocksource dummy_clock = { 72 + .read = dummy_clock_read, 73 + }; 74 + 75 + static struct tk_fast tk_fast_mono ____cacheline_aligned = { 76 + .base[0] = { .clock = &dummy_clock, }, 77 + .base[1] = { .clock = &dummy_clock, }, 78 + }; 79 + 80 + static struct tk_fast tk_fast_raw ____cacheline_aligned = { 81 + .base[0] = { .clock = &dummy_clock, }, 82 + .base[1] = { .clock = &dummy_clock, }, 83 + }; 65 84 66 85 /* flag for if timekeeping is suspended */ 67 86 int __read_mostly timekeeping_suspended; ··· 496 477 } 497 478 EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns); 498 479 499 - /* Suspend-time cycles value for halted fast timekeeper. */ 500 - static u64 cycles_at_suspend; 501 480 502 - static u64 dummy_clock_read(struct clocksource *cs) 481 + /* 482 + * See comment for __ktime_get_fast_ns() vs. timestamp ordering 483 + */ 484 + static __always_inline u64 __ktime_get_real_fast_ns(struct tk_fast *tkf) 503 485 { 504 - return cycles_at_suspend; 486 + struct tk_read_base *tkr; 487 + unsigned int seq; 488 + u64 now; 489 + 490 + do { 491 + seq = raw_read_seqcount_latch(&tkf->seq); 492 + tkr = tkf->base + (seq & 0x01); 493 + now = ktime_to_ns(tkr->base_real); 494 + 495 + now += timekeeping_delta_to_ns(tkr, 496 + clocksource_delta( 497 + tk_clock_read(tkr), 498 + tkr->cycle_last, 499 + tkr->mask)); 500 + } while (read_seqcount_retry(&tkf->seq, seq)); 501 + 502 + return now; 505 503 } 506 504 507 - static struct clocksource dummy_clock = { 508 - .read = dummy_clock_read, 509 - }; 505 + /** 506 + * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime. 507 + */ 508 + u64 ktime_get_real_fast_ns(void) 509 + { 510 + return __ktime_get_real_fast_ns(&tk_fast_mono); 511 + } 512 + EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns); 510 513 511 514 /** 512 515 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. ··· 548 507 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); 549 508 cycles_at_suspend = tk_clock_read(tkr); 550 509 tkr_dummy.clock = &dummy_clock; 510 + tkr_dummy.base_real = tkr->base + tk->offs_real; 551 511 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono); 552 512 553 513 tkr = &tk->tkr_raw; ··· 696 654 update_vsyscall(tk); 697 655 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET); 698 656 657 + tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real; 699 658 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono); 700 659 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw); 701 660 ··· 1307 1264 * 1308 1265 * Adds or subtracts an offset value from the current time. 1309 1266 */ 1310 - int timekeeping_inject_offset(struct timespec *ts) 1267 + static int timekeeping_inject_offset(struct timespec64 *ts) 1311 1268 { 1312 1269 struct timekeeper *tk = &tk_core.timekeeper; 1313 1270 unsigned long flags; 1314 - struct timespec64 ts64, tmp; 1271 + struct timespec64 tmp; 1315 1272 int ret = 0; 1316 1273 1317 - if (!timespec_inject_offset_valid(ts)) 1274 + if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC) 1318 1275 return -EINVAL; 1319 - 1320 - ts64 = timespec_to_timespec64(*ts); 1321 1276 1322 1277 raw_spin_lock_irqsave(&timekeeper_lock, flags); 1323 1278 write_seqcount_begin(&tk_core.seq); ··· 1323 1282 timekeeping_forward_now(tk); 1324 1283 1325 1284 /* Make sure the proposed value is valid */ 1326 - tmp = timespec64_add(tk_xtime(tk), ts64); 1327 - if (timespec64_compare(&tk->wall_to_monotonic, &ts64) > 0 || 1285 + tmp = timespec64_add(tk_xtime(tk), *ts); 1286 + if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 || 1328 1287 !timespec64_valid_strict(&tmp)) { 1329 1288 ret = -EINVAL; 1330 1289 goto error; 1331 1290 } 1332 1291 1333 - tk_xtime_add(tk, &ts64); 1334 - tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64)); 1292 + tk_xtime_add(tk, ts); 1293 + tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts)); 1335 1294 1336 1295 error: /* even if we error out, we forwarded the time, so call update */ 1337 1296 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); ··· 1344 1303 1345 1304 return ret; 1346 1305 } 1347 - EXPORT_SYMBOL(timekeeping_inject_offset); 1306 + 1307 + /* 1308 + * Indicates if there is an offset between the system clock and the hardware 1309 + * clock/persistent clock/rtc. 1310 + */ 1311 + int persistent_clock_is_local; 1312 + 1313 + /* 1314 + * Adjust the time obtained from the CMOS to be UTC time instead of 1315 + * local time. 1316 + * 1317 + * This is ugly, but preferable to the alternatives. Otherwise we 1318 + * would either need to write a program to do it in /etc/rc (and risk 1319 + * confusion if the program gets run more than once; it would also be 1320 + * hard to make the program warp the clock precisely n hours) or 1321 + * compile in the timezone information into the kernel. Bad, bad.... 1322 + * 1323 + * - TYT, 1992-01-01 1324 + * 1325 + * The best thing to do is to keep the CMOS clock in universal time (UTC) 1326 + * as real UNIX machines always do it. This avoids all headaches about 1327 + * daylight saving times and warping kernel clocks. 1328 + */ 1329 + void timekeeping_warp_clock(void) 1330 + { 1331 + if (sys_tz.tz_minuteswest != 0) { 1332 + struct timespec64 adjust; 1333 + 1334 + persistent_clock_is_local = 1; 1335 + adjust.tv_sec = sys_tz.tz_minuteswest * 60; 1336 + adjust.tv_nsec = 0; 1337 + timekeeping_inject_offset(&adjust); 1338 + } 1339 + } 1348 1340 1349 1341 /** 1350 1342 * __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic ··· 2322 2248 } 2323 2249 2324 2250 /** 2251 + * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex 2252 + */ 2253 + static int timekeeping_validate_timex(struct timex *txc) 2254 + { 2255 + if (txc->modes & ADJ_ADJTIME) { 2256 + /* singleshot must not be used with any other mode bits */ 2257 + if (!(txc->modes & ADJ_OFFSET_SINGLESHOT)) 2258 + return -EINVAL; 2259 + if (!(txc->modes & ADJ_OFFSET_READONLY) && 2260 + !capable(CAP_SYS_TIME)) 2261 + return -EPERM; 2262 + } else { 2263 + /* In order to modify anything, you gotta be super-user! */ 2264 + if (txc->modes && !capable(CAP_SYS_TIME)) 2265 + return -EPERM; 2266 + /* 2267 + * if the quartz is off by more than 10% then 2268 + * something is VERY wrong! 2269 + */ 2270 + if (txc->modes & ADJ_TICK && 2271 + (txc->tick < 900000/USER_HZ || 2272 + txc->tick > 1100000/USER_HZ)) 2273 + return -EINVAL; 2274 + } 2275 + 2276 + if (txc->modes & ADJ_SETOFFSET) { 2277 + /* In order to inject time, you gotta be super-user! */ 2278 + if (!capable(CAP_SYS_TIME)) 2279 + return -EPERM; 2280 + 2281 + /* 2282 + * Validate if a timespec/timeval used to inject a time 2283 + * offset is valid. Offsets can be postive or negative, so 2284 + * we don't check tv_sec. The value of the timeval/timespec 2285 + * is the sum of its fields,but *NOTE*: 2286 + * The field tv_usec/tv_nsec must always be non-negative and 2287 + * we can't have more nanoseconds/microseconds than a second. 2288 + */ 2289 + if (txc->time.tv_usec < 0) 2290 + return -EINVAL; 2291 + 2292 + if (txc->modes & ADJ_NANO) { 2293 + if (txc->time.tv_usec >= NSEC_PER_SEC) 2294 + return -EINVAL; 2295 + } else { 2296 + if (txc->time.tv_usec >= USEC_PER_SEC) 2297 + return -EINVAL; 2298 + } 2299 + } 2300 + 2301 + /* 2302 + * Check for potential multiplication overflows that can 2303 + * only happen on 64-bit systems: 2304 + */ 2305 + if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) { 2306 + if (LLONG_MIN / PPM_SCALE > txc->freq) 2307 + return -EINVAL; 2308 + if (LLONG_MAX / PPM_SCALE < txc->freq) 2309 + return -EINVAL; 2310 + } 2311 + 2312 + return 0; 2313 + } 2314 + 2315 + 2316 + /** 2325 2317 * do_adjtimex() - Accessor function to NTP __do_adjtimex function 2326 2318 */ 2327 2319 int do_adjtimex(struct timex *txc) ··· 2399 2259 int ret; 2400 2260 2401 2261 /* Validate the data before disabling interrupts */ 2402 - ret = ntp_validate_timex(txc); 2262 + ret = timekeeping_validate_timex(txc); 2403 2263 if (ret) 2404 2264 return ret; 2405 2265 2406 2266 if (txc->modes & ADJ_SETOFFSET) { 2407 - struct timespec delta; 2267 + struct timespec64 delta; 2408 2268 delta.tv_sec = txc->time.tv_sec; 2409 2269 delta.tv_nsec = txc->time.tv_usec; 2410 2270 if (!(txc->modes & ADJ_NANO))
+1 -1
kernel/time/timekeeping.h
··· 11 11 12 12 extern int timekeeping_valid_for_hres(void); 13 13 extern u64 timekeeping_max_deferment(void); 14 - extern int timekeeping_inject_offset(struct timespec *ts); 14 + extern void timekeeping_warp_clock(void); 15 15 extern int timekeeping_suspend(void); 16 16 extern void timekeeping_resume(void); 17 17
+64 -18
kernel/time/timer.c
··· 610 610 } 611 611 612 612 /* Stub timer callback for improperly used timers. */ 613 - static void stub_timer(unsigned long data) 613 + static void stub_timer(struct timer_list *unused) 614 614 { 615 615 WARN_ON(1); 616 616 } ··· 626 626 627 627 switch (state) { 628 628 case ODEBUG_STATE_NOTAVAILABLE: 629 - setup_timer(timer, stub_timer, 0); 629 + timer_setup(timer, stub_timer, 0); 630 630 return true; 631 631 632 632 case ODEBUG_STATE_ACTIVE: ··· 665 665 666 666 switch (state) { 667 667 case ODEBUG_STATE_NOTAVAILABLE: 668 - setup_timer(timer, stub_timer, 0); 668 + timer_setup(timer, stub_timer, 0); 669 669 return true; 670 670 default: 671 671 return false; ··· 929 929 } 930 930 } 931 931 932 + #define MOD_TIMER_PENDING_ONLY 0x01 933 + #define MOD_TIMER_REDUCE 0x02 934 + 932 935 static inline int 933 - __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) 936 + __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options) 934 937 { 935 938 struct timer_base *base, *new_base; 936 939 unsigned int idx = UINT_MAX; ··· 953 950 * larger granularity than you would get from adding a new 954 951 * timer with this expiry. 955 952 */ 956 - if (timer->expires == expires) 953 + long diff = timer->expires - expires; 954 + 955 + if (!diff) 956 + return 1; 957 + if (options & MOD_TIMER_REDUCE && diff <= 0) 957 958 return 1; 958 959 959 960 /* ··· 969 962 base = lock_timer_base(timer, &flags); 970 963 forward_timer_base(base); 971 964 965 + if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) && 966 + time_before_eq(timer->expires, expires)) { 967 + ret = 1; 968 + goto out_unlock; 969 + } 970 + 972 971 clk = base->clk; 973 972 idx = calc_wheel_index(expires, clk); 974 973 ··· 984 971 * subsequent call will exit in the expires check above. 985 972 */ 986 973 if (idx == timer_get_idx(timer)) { 987 - timer->expires = expires; 974 + if (!(options & MOD_TIMER_REDUCE)) 975 + timer->expires = expires; 976 + else if (time_after(timer->expires, expires)) 977 + timer->expires = expires; 988 978 ret = 1; 989 979 goto out_unlock; 990 980 } ··· 997 981 } 998 982 999 983 ret = detach_if_pending(timer, base, false); 1000 - if (!ret && pending_only) 984 + if (!ret && (options & MOD_TIMER_PENDING_ONLY)) 1001 985 goto out_unlock; 1002 986 1003 987 debug_activate(timer, expires); ··· 1058 1042 */ 1059 1043 int mod_timer_pending(struct timer_list *timer, unsigned long expires) 1060 1044 { 1061 - return __mod_timer(timer, expires, true); 1045 + return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY); 1062 1046 } 1063 1047 EXPORT_SYMBOL(mod_timer_pending); 1064 1048 ··· 1084 1068 */ 1085 1069 int mod_timer(struct timer_list *timer, unsigned long expires) 1086 1070 { 1087 - return __mod_timer(timer, expires, false); 1071 + return __mod_timer(timer, expires, 0); 1088 1072 } 1089 1073 EXPORT_SYMBOL(mod_timer); 1074 + 1075 + /** 1076 + * timer_reduce - Modify a timer's timeout if it would reduce the timeout 1077 + * @timer: The timer to be modified 1078 + * @expires: New timeout in jiffies 1079 + * 1080 + * timer_reduce() is very similar to mod_timer(), except that it will only 1081 + * modify a running timer if that would reduce the expiration time (it will 1082 + * start a timer that isn't running). 1083 + */ 1084 + int timer_reduce(struct timer_list *timer, unsigned long expires) 1085 + { 1086 + return __mod_timer(timer, expires, MOD_TIMER_REDUCE); 1087 + } 1088 + EXPORT_SYMBOL(timer_reduce); 1090 1089 1091 1090 /** 1092 1091 * add_timer - start a timer ··· 1591 1560 * jiffies, otherwise forward to the next expiry time: 1592 1561 */ 1593 1562 if (time_after(next, jiffies)) { 1594 - /* The call site will increment clock! */ 1595 - base->clk = jiffies - 1; 1563 + /* 1564 + * The call site will increment base->clk and then 1565 + * terminate the expiry loop immediately. 1566 + */ 1567 + base->clk = jiffies; 1596 1568 return 0; 1597 1569 } 1598 1570 base->clk = next; ··· 1702 1668 raise_softirq(TIMER_SOFTIRQ); 1703 1669 } 1704 1670 1705 - static void process_timeout(unsigned long __data) 1671 + /* 1672 + * Since schedule_timeout()'s timer is defined on the stack, it must store 1673 + * the target task on the stack as well. 1674 + */ 1675 + struct process_timer { 1676 + struct timer_list timer; 1677 + struct task_struct *task; 1678 + }; 1679 + 1680 + static void process_timeout(struct timer_list *t) 1706 1681 { 1707 - wake_up_process((struct task_struct *)__data); 1682 + struct process_timer *timeout = from_timer(timeout, t, timer); 1683 + 1684 + wake_up_process(timeout->task); 1708 1685 } 1709 1686 1710 1687 /** ··· 1749 1704 */ 1750 1705 signed long __sched schedule_timeout(signed long timeout) 1751 1706 { 1752 - struct timer_list timer; 1707 + struct process_timer timer; 1753 1708 unsigned long expire; 1754 1709 1755 1710 switch (timeout) ··· 1783 1738 1784 1739 expire = timeout + jiffies; 1785 1740 1786 - setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); 1787 - __mod_timer(&timer, expire, false); 1741 + timer.task = current; 1742 + timer_setup_on_stack(&timer.timer, process_timeout, 0); 1743 + __mod_timer(&timer.timer, expire, 0); 1788 1744 schedule(); 1789 - del_singleshot_timer_sync(&timer); 1745 + del_singleshot_timer_sync(&timer.timer); 1790 1746 1791 1747 /* Remove the timer from the object tracker */ 1792 - destroy_timer_on_stack(&timer); 1748 + destroy_timer_on_stack(&timer.timer); 1793 1749 1794 1750 timeout = expire - jiffies; 1795 1751
+12 -17
kernel/workqueue.c
··· 1493 1493 } 1494 1494 EXPORT_SYMBOL(queue_work_on); 1495 1495 1496 - void delayed_work_timer_fn(unsigned long __data) 1496 + void delayed_work_timer_fn(struct timer_list *t) 1497 1497 { 1498 - struct delayed_work *dwork = (struct delayed_work *)__data; 1498 + struct delayed_work *dwork = from_timer(dwork, t, timer); 1499 1499 1500 1500 /* should have been called from irqsafe timer with irq already off */ 1501 1501 __queue_work(dwork->cpu, dwork->wq, &dwork->work); ··· 1509 1509 struct work_struct *work = &dwork->work; 1510 1510 1511 1511 WARN_ON_ONCE(!wq); 1512 - WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 1513 - timer->data != (unsigned long)dwork); 1512 + WARN_ON_ONCE(timer->function != (TIMER_FUNC_TYPE)delayed_work_timer_fn); 1514 1513 WARN_ON_ONCE(timer_pending(timer)); 1515 1514 WARN_ON_ONCE(!list_empty(&work->entry)); 1516 1515 ··· 1832 1833 wake_up_process(worker->task); 1833 1834 } 1834 1835 1835 - static void idle_worker_timeout(unsigned long __pool) 1836 + static void idle_worker_timeout(struct timer_list *t) 1836 1837 { 1837 - struct worker_pool *pool = (void *)__pool; 1838 + struct worker_pool *pool = from_timer(pool, t, idle_timer); 1838 1839 1839 1840 spin_lock_irq(&pool->lock); 1840 1841 ··· 1880 1881 } 1881 1882 } 1882 1883 1883 - static void pool_mayday_timeout(unsigned long __pool) 1884 + static void pool_mayday_timeout(struct timer_list *t) 1884 1885 { 1885 - struct worker_pool *pool = (void *)__pool; 1886 + struct worker_pool *pool = from_timer(pool, t, mayday_timer); 1886 1887 struct work_struct *work; 1887 1888 1888 1889 spin_lock_irq(&pool->lock); ··· 3222 3223 INIT_LIST_HEAD(&pool->idle_list); 3223 3224 hash_init(pool->busy_hash); 3224 3225 3225 - setup_deferrable_timer(&pool->idle_timer, idle_worker_timeout, 3226 - (unsigned long)pool); 3226 + timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); 3227 3227 3228 - setup_timer(&pool->mayday_timer, pool_mayday_timeout, 3229 - (unsigned long)pool); 3228 + timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); 3230 3229 3231 3230 mutex_init(&pool->attach_mutex); 3232 3231 INIT_LIST_HEAD(&pool->workers); ··· 5367 5370 */ 5368 5371 #ifdef CONFIG_WQ_WATCHDOG 5369 5372 5370 - static void wq_watchdog_timer_fn(unsigned long data); 5371 - 5372 5373 static unsigned long wq_watchdog_thresh = 30; 5373 - static struct timer_list wq_watchdog_timer = 5374 - TIMER_DEFERRED_INITIALIZER(wq_watchdog_timer_fn, 0, 0); 5374 + static struct timer_list wq_watchdog_timer; 5375 5375 5376 5376 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES; 5377 5377 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; ··· 5382 5388 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 5383 5389 } 5384 5390 5385 - static void wq_watchdog_timer_fn(unsigned long data) 5391 + static void wq_watchdog_timer_fn(struct timer_list *unused) 5386 5392 { 5387 5393 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; 5388 5394 bool lockup_detected = false; ··· 5484 5490 5485 5491 static void wq_watchdog_init(void) 5486 5492 { 5493 + timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE); 5487 5494 wq_watchdog_set_thresh(wq_watchdog_thresh); 5488 5495 } 5489 5496
+1 -1
lib/random32.c
··· 215 215 216 216 static void __prandom_timer(unsigned long dontcare); 217 217 218 - static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0); 218 + static DEFINE_TIMER(seed_timer, __prandom_timer); 219 219 220 220 static void __prandom_timer(unsigned long dontcare) 221 221 {
+1 -1
net/atm/mpc.c
··· 121 121 122 122 struct mpoa_client *mpcs = NULL; /* FIXME */ 123 123 static struct atm_mpoa_qos *qos_head = NULL; 124 - static DEFINE_TIMER(mpc_timer, NULL, 0, 0); 124 + static DEFINE_TIMER(mpc_timer, NULL); 125 125 126 126 127 127 static struct mpoa_client *find_mpc_by_itfnum(int itf)
+1 -1
net/decnet/dn_route.c
··· 131 131 static unsigned int dn_rt_hash_mask; 132 132 133 133 static struct timer_list dn_route_timer; 134 - static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0); 134 + static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush); 135 135 int decnet_dst_gc_interval = 2; 136 136 137 137 static struct dst_ops dn_dst_ops = {
+1 -1
net/ipv6/ip6_flowlabel.c
··· 47 47 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1]; 48 48 49 49 static void ip6_fl_gc(unsigned long dummy); 50 - static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0); 50 + static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc); 51 51 52 52 /* FL hash table lock: it protects only of GC */ 53 53
+5 -5
net/netfilter/ipvs/ip_vs_conn.c
··· 104 104 spin_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 105 105 } 106 106 107 - static void ip_vs_conn_expire(unsigned long data); 107 + static void ip_vs_conn_expire(struct timer_list *t); 108 108 109 109 /* 110 110 * Returns hash value for IPVS connection entry ··· 457 457 static void __ip_vs_conn_put_notimer(struct ip_vs_conn *cp) 458 458 { 459 459 __ip_vs_conn_put(cp); 460 - ip_vs_conn_expire((unsigned long)cp); 460 + ip_vs_conn_expire(&cp->timer); 461 461 } 462 462 463 463 /* ··· 817 817 kmem_cache_free(ip_vs_conn_cachep, cp); 818 818 } 819 819 820 - static void ip_vs_conn_expire(unsigned long data) 820 + static void ip_vs_conn_expire(struct timer_list *t) 821 821 { 822 - struct ip_vs_conn *cp = (struct ip_vs_conn *)data; 822 + struct ip_vs_conn *cp = from_timer(cp, t, timer); 823 823 struct netns_ipvs *ipvs = cp->ipvs; 824 824 825 825 /* ··· 909 909 } 910 910 911 911 INIT_HLIST_NODE(&cp->c_list); 912 - setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp); 912 + timer_setup(&cp->timer, ip_vs_conn_expire, 0); 913 913 cp->ipvs = ipvs; 914 914 cp->af = p->af; 915 915 cp->daf = dest_af;
+3 -4
net/netfilter/ipvs/ip_vs_ctl.c
··· 1146 1146 return 0; 1147 1147 } 1148 1148 1149 - static void ip_vs_dest_trash_expire(unsigned long data) 1149 + static void ip_vs_dest_trash_expire(struct timer_list *t) 1150 1150 { 1151 - struct netns_ipvs *ipvs = (struct netns_ipvs *)data; 1151 + struct netns_ipvs *ipvs = from_timer(ipvs, t, dest_trash_timer); 1152 1152 struct ip_vs_dest *dest, *next; 1153 1153 unsigned long now = jiffies; 1154 1154 ··· 4019 4019 4020 4020 INIT_LIST_HEAD(&ipvs->dest_trash); 4021 4021 spin_lock_init(&ipvs->dest_trash_lock); 4022 - setup_timer(&ipvs->dest_trash_timer, ip_vs_dest_trash_expire, 4023 - (unsigned long) ipvs); 4022 + timer_setup(&ipvs->dest_trash_timer, ip_vs_dest_trash_expire, 0); 4024 4023 atomic_set(&ipvs->ftpsvc_counter, 0); 4025 4024 atomic_set(&ipvs->nullsvc_counter, 0); 4026 4025 atomic_set(&ipvs->conn_out_counter, 0);
+3 -3
net/netfilter/ipvs/ip_vs_est.c
··· 97 97 } 98 98 99 99 100 - static void estimation_timer(unsigned long arg) 100 + static void estimation_timer(struct timer_list *t) 101 101 { 102 102 struct ip_vs_estimator *e; 103 103 struct ip_vs_stats *s; 104 104 u64 rate; 105 - struct netns_ipvs *ipvs = (struct netns_ipvs *)arg; 105 + struct netns_ipvs *ipvs = from_timer(ipvs, t, est_timer); 106 106 107 107 spin_lock(&ipvs->est_lock); 108 108 list_for_each_entry(e, &ipvs->est_list, list) { ··· 192 192 { 193 193 INIT_LIST_HEAD(&ipvs->est_list); 194 194 spin_lock_init(&ipvs->est_lock); 195 - setup_timer(&ipvs->est_timer, estimation_timer, (unsigned long)ipvs); 195 + timer_setup(&ipvs->est_timer, estimation_timer, 0); 196 196 mod_timer(&ipvs->est_timer, jiffies + 2 * HZ); 197 197 return 0; 198 198 }
+6 -5
net/netfilter/ipvs/ip_vs_lblc.c
··· 106 106 struct rcu_head rcu_head; 107 107 struct hlist_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ 108 108 struct timer_list periodic_timer; /* collect stale entries */ 109 + struct ip_vs_service *svc; /* pointer back to service */ 109 110 atomic_t entries; /* number of entries */ 110 111 int max_size; /* maximum size of entries */ 111 112 int rover; /* rover for expire check */ ··· 295 294 * of the table. 296 295 * The full expiration check is for this purpose now. 297 296 */ 298 - static void ip_vs_lblc_check_expire(unsigned long data) 297 + static void ip_vs_lblc_check_expire(struct timer_list *t) 299 298 { 300 - struct ip_vs_service *svc = (struct ip_vs_service *) data; 301 - struct ip_vs_lblc_table *tbl = svc->sched_data; 299 + struct ip_vs_lblc_table *tbl = from_timer(tbl, t, periodic_timer); 300 + struct ip_vs_service *svc = tbl->svc; 302 301 unsigned long now = jiffies; 303 302 int goal; 304 303 int i, j; ··· 370 369 tbl->rover = 0; 371 370 tbl->counter = 1; 372 371 tbl->dead = 0; 372 + tbl->svc = svc; 373 373 374 374 /* 375 375 * Hook periodic timer for garbage collection 376 376 */ 377 - setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire, 378 - (unsigned long)svc); 377 + timer_setup(&tbl->periodic_timer, ip_vs_lblc_check_expire, 0); 379 378 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL); 380 379 381 380 return 0;
+6 -5
net/netfilter/ipvs/ip_vs_lblcr.c
··· 278 278 atomic_t entries; /* number of entries */ 279 279 int max_size; /* maximum size of entries */ 280 280 struct timer_list periodic_timer; /* collect stale entries */ 281 + struct ip_vs_service *svc; /* pointer back to service */ 281 282 int rover; /* rover for expire check */ 282 283 int counter; /* counter for no expire */ 283 284 bool dead; ··· 459 458 * of the table. 460 459 * The full expiration check is for this purpose now. 461 460 */ 462 - static void ip_vs_lblcr_check_expire(unsigned long data) 461 + static void ip_vs_lblcr_check_expire(struct timer_list *t) 463 462 { 464 - struct ip_vs_service *svc = (struct ip_vs_service *) data; 465 - struct ip_vs_lblcr_table *tbl = svc->sched_data; 463 + struct ip_vs_lblcr_table *tbl = from_timer(tbl, t, periodic_timer); 464 + struct ip_vs_service *svc = tbl->svc; 466 465 unsigned long now = jiffies; 467 466 int goal; 468 467 int i, j; ··· 533 532 tbl->rover = 0; 534 533 tbl->counter = 1; 535 534 tbl->dead = 0; 535 + tbl->svc = svc; 536 536 537 537 /* 538 538 * Hook periodic timer for garbage collection 539 539 */ 540 - setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire, 541 - (unsigned long)svc); 540 + timer_setup(&tbl->periodic_timer, ip_vs_lblcr_check_expire, 0); 542 541 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL); 543 542 544 543 return 0;
+1 -1
net/netrom/nr_loopback.c
··· 18 18 static void nr_loopback_timer(unsigned long); 19 19 20 20 static struct sk_buff_head loopback_queue; 21 - static DEFINE_TIMER(loopback_timer, nr_loopback_timer, 0, 0); 21 + static DEFINE_TIMER(loopback_timer, nr_loopback_timer); 22 22 23 23 void __init nr_loopback_init(void) 24 24 {
+1 -1
security/keys/gc.c
··· 30 30 * Reaper for links from keyrings to dead keys. 31 31 */ 32 32 static void key_gc_timer_func(unsigned long); 33 - static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0); 33 + static DEFINE_TIMER(key_gc_timer, key_gc_timer_func); 34 34 35 35 static time_t key_gc_next_run = LONG_MAX; 36 36 static struct key_type *key_gc_dead_keytype;
+1 -1
sound/oss/midibuf.c
··· 52 52 static void midi_poll(unsigned long dummy); 53 53 54 54 55 - static DEFINE_TIMER(poll_timer, midi_poll, 0, 0); 55 + static DEFINE_TIMER(poll_timer, midi_poll); 56 56 57 57 static volatile int open_devs; 58 58 static DEFINE_SPINLOCK(lock);
+1 -1
sound/oss/soundcard.c
··· 662 662 } 663 663 664 664 665 - static DEFINE_TIMER(seq_timer, do_sequencer_timer, 0, 0); 665 + static DEFINE_TIMER(seq_timer, do_sequencer_timer); 666 666 667 667 void request_sound_timer(int count) 668 668 {
+1 -1
sound/oss/sys_timer.c
··· 28 28 29 29 static void poll_def_tmr(unsigned long dummy); 30 30 static DEFINE_SPINLOCK(lock); 31 - static DEFINE_TIMER(def_tmr, poll_def_tmr, 0, 0); 31 + static DEFINE_TIMER(def_tmr, poll_def_tmr); 32 32 33 33 static unsigned long 34 34 tmr2ticks(int tmr_value)
+1 -1
sound/oss/uart6850.c
··· 78 78 static void poll_uart6850(unsigned long dummy); 79 79 80 80 81 - static DEFINE_TIMER(uart6850_timer, poll_uart6850, 0, 0); 81 + static DEFINE_TIMER(uart6850_timer, poll_uart6850); 82 82 83 83 static void uart6850_input_loop(void) 84 84 {