clockevents: make device shutdown robust

The device shut down does not cleanup the next_event variable of the
clock event device. So when the device is reactivated the possible
stale next_event value can prevent the device to be reprogrammed as it
claims to wait on a event already.

This is the root cause of the resurfacing suspend/resume problem,
where systems need key press to come back to life.

Fix this by setting next_event to KTIME_MAX when the device is shut
down. Use a separate function for shutdown which takes care of that
and only keep the direct set mode call in the broadcast code, where we
can not touch the next_event value.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

+19 -8
+11 -1
kernel/time/clockevents.c
··· 72 72 } 73 73 74 74 /** 75 + * clockevents_shutdown - shutdown the device and clear next_event 76 + * @dev: device to shutdown 77 + */ 78 + void clockevents_shutdown(struct clock_event_device *dev) 79 + { 80 + clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 81 + dev->next_event.tv64 = KTIME_MAX; 82 + } 83 + 84 + /** 75 85 * clockevents_program_event - Reprogram the clock event device. 76 86 * @expires: absolute expiry time (monotonic clock) 77 87 * ··· 216 206 217 207 if (new) { 218 208 BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); 219 - clockevents_set_mode(new, CLOCK_EVT_MODE_SHUTDOWN); 209 + clockevents_shutdown(new); 220 210 } 221 211 local_irq_restore(flags); 222 212 }
+4 -5
kernel/time/tick-broadcast.c
··· 236 236 if (!cpu_isset(cpu, tick_broadcast_mask)) { 237 237 cpu_set(cpu, tick_broadcast_mask); 238 238 if (td->mode == TICKDEV_MODE_PERIODIC) 239 - clockevents_set_mode(dev, 240 - CLOCK_EVT_MODE_SHUTDOWN); 239 + clockevents_shutdown(dev); 241 240 } 242 241 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) 243 242 tick_broadcast_force = 1; ··· 253 254 254 255 if (cpus_empty(tick_broadcast_mask)) { 255 256 if (!bc_stopped) 256 - clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 257 + clockevents_shutdown(bc); 257 258 } else if (bc_stopped) { 258 259 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 259 260 tick_broadcast_start_periodic(bc); ··· 305 306 306 307 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { 307 308 if (bc && cpus_empty(tick_broadcast_mask)) 308 - clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 309 + clockevents_shutdown(bc); 309 310 } 310 311 311 312 spin_unlock_irqrestore(&tick_broadcast_lock, flags); ··· 320 321 321 322 bc = tick_broadcast_device.evtdev; 322 323 if (bc) 323 - clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 324 + clockevents_shutdown(bc); 324 325 325 326 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 326 327 }
+2 -2
kernel/time/tick-common.c
··· 249 249 * not give it back to the clockevents layer ! 250 250 */ 251 251 if (tick_is_broadcast_device(curdev)) { 252 - clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN); 252 + clockevents_shutdown(curdev); 253 253 curdev = NULL; 254 254 } 255 255 clockevents_exchange_device(curdev, newdev); ··· 311 311 unsigned long flags; 312 312 313 313 spin_lock_irqsave(&tick_device_lock, flags); 314 - clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN); 314 + clockevents_shutdown(td->evtdev); 315 315 spin_unlock_irqrestore(&tick_device_lock, flags); 316 316 } 317 317
+2
kernel/time/tick-internal.h
··· 10 10 extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); 11 11 extern void tick_handle_periodic(struct clock_event_device *dev); 12 12 13 + extern void clockevents_shutdown(struct clock_event_device *dev); 14 + 13 15 /* 14 16 * NO_HZ / high resolution timer shared code 15 17 */