Merge master.kernel.org:/pub/scm/linux/kernel/git/tglx/hrtimer-2.6

+25 -25
+4 -8
include/linux/hrtimer.h
··· 49 49 * struct hrtimer - the basic hrtimer structure 50 50 * 51 51 * @node: red black tree node for time ordered insertion 52 - * @list: list head for easier access to the time ordered list, 53 - * without walking the red black tree. 54 52 * @expires: the absolute expiry time in the hrtimers internal 55 53 * representation. The time is related to the clock on 56 54 * which the timer is based. ··· 61 63 */ 62 64 struct hrtimer { 63 65 struct rb_node node; 64 - struct list_head list; 65 66 ktime_t expires; 66 67 enum hrtimer_state state; 67 68 int (*function)(void *); ··· 75 78 * to a base on another cpu. 76 79 * @lock: lock protecting the base and associated timers 77 80 * @active: red black tree root node for the active timers 78 - * @pending: list of pending timers for simple time ordered access 81 + * @first: pointer to the timer node which expires first 79 82 * @resolution: the resolution of the clock, in nanoseconds 80 83 * @get_time: function to retrieve the current time of the clock 81 84 * @curr_timer: the timer which is executing a callback right now ··· 84 87 clockid_t index; 85 88 spinlock_t lock; 86 89 struct rb_root active; 87 - struct list_head pending; 88 - unsigned long resolution; 90 + struct rb_node *first; 91 + ktime_t resolution; 89 92 ktime_t (*get_time)(void); 90 93 struct hrtimer *curr_timer; 91 94 }; ··· 122 125 } 123 126 124 127 /* Forward a hrtimer so it expires after now: */ 125 - extern unsigned long hrtimer_forward(struct hrtimer *timer, 126 - const ktime_t interval); 128 + extern unsigned long hrtimer_forward(struct hrtimer *timer, ktime_t interval); 127 129 128 130 /* Precise sleep: */ 129 131 extern long hrtimer_nanosleep(struct timespec *rqtp,
+2 -2
include/linux/ktime.h
··· 272 272 * idea of the (in)accuracy of timers. Timer values are rounded up to 273 273 * this resolution values. 274 274 */ 275 - #define KTIME_REALTIME_RES (NSEC_PER_SEC/HZ) 276 - #define KTIME_MONOTONIC_RES (NSEC_PER_SEC/HZ) 275 + #define KTIME_REALTIME_RES (ktime_t){ .tv64 = TICK_NSEC } 276 + #define KTIME_MONOTONIC_RES (ktime_t){ .tv64 = TICK_NSEC } 277 277 278 278 /* Get the monotonic time in timespec format: */ 279 279 extern void ktime_get_ts(struct timespec *ts);
+19 -15
kernel/hrtimer.c
··· 275 275 * The number of overruns is added to the overrun field. 276 276 */ 277 277 unsigned long 278 - hrtimer_forward(struct hrtimer *timer, const ktime_t interval) 278 + hrtimer_forward(struct hrtimer *timer, ktime_t interval) 279 279 { 280 280 unsigned long orun = 1; 281 281 ktime_t delta, now; ··· 286 286 287 287 if (delta.tv64 < 0) 288 288 return 0; 289 + 290 + if (interval.tv64 < timer->base->resolution.tv64) 291 + interval.tv64 = timer->base->resolution.tv64; 289 292 290 293 if (unlikely(delta.tv64 >= interval.tv64)) { 291 294 nsec_t incr = ktime_to_ns(interval); ··· 317 314 static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) 318 315 { 319 316 struct rb_node **link = &base->active.rb_node; 320 - struct list_head *prev = &base->pending; 321 317 struct rb_node *parent = NULL; 322 318 struct hrtimer *entry; 323 319 ··· 332 330 */ 333 331 if (timer->expires.tv64 < entry->expires.tv64) 334 332 link = &(*link)->rb_left; 335 - else { 333 + else 336 334 link = &(*link)->rb_right; 337 - prev = &entry->list; 338 - } 339 335 } 340 336 341 337 /* 342 - * Insert the timer to the rbtree and to the sorted list: 338 + * Insert the timer to the rbtree and check whether it 339 + * replaces the first pending timer 343 340 */ 344 341 rb_link_node(&timer->node, parent, link); 345 342 rb_insert_color(&timer->node, &base->active); 346 - list_add(&timer->list, prev); 347 343 348 344 timer->state = HRTIMER_PENDING; 349 - } 350 345 346 + if (!base->first || timer->expires.tv64 < 347 + rb_entry(base->first, struct hrtimer, node)->expires.tv64) 348 + base->first = &timer->node; 349 + } 351 350 352 351 /* 353 352 * __remove_hrtimer - internal function to remove a timer ··· 358 355 static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) 359 356 { 360 357 /* 361 - * Remove the timer from the sorted list and from the rbtree: 358 + * Remove the timer from the rbtree and replace the 359 + * first entry pointer if necessary. 362 360 */ 363 - list_del(&timer->list); 361 + if (base->first == &timer->node) 362 + base->first = rb_next(&timer->node); 364 363 rb_erase(&timer->node, &base->active); 365 364 } 366 365 ··· 521 516 { 522 517 struct hrtimer_base *bases; 523 518 524 - tp->tv_sec = 0; 525 519 bases = per_cpu(hrtimer_bases, raw_smp_processor_id()); 526 - tp->tv_nsec = bases[which_clock].resolution; 520 + *tp = ktime_to_timespec(bases[which_clock].resolution); 527 521 528 522 return 0; 529 523 } ··· 533 529 static inline void run_hrtimer_queue(struct hrtimer_base *base) 534 530 { 535 531 ktime_t now = base->get_time(); 532 + struct rb_node *node; 536 533 537 534 spin_lock_irq(&base->lock); 538 535 539 - while (!list_empty(&base->pending)) { 536 + while ((node = base->first)) { 540 537 struct hrtimer *timer; 541 538 int (*fn)(void *); 542 539 int restart; 543 540 void *data; 544 541 545 - timer = list_entry(base->pending.next, struct hrtimer, list); 542 + timer = rb_entry(node, struct hrtimer, node); 546 543 if (now.tv64 <= timer->expires.tv64) 547 544 break; 548 545 ··· 737 732 738 733 for (i = 0; i < MAX_HRTIMER_BASES; i++) { 739 734 spin_lock_init(&base->lock); 740 - INIT_LIST_HEAD(&base->pending); 741 735 base++; 742 736 } 743 737 }