Merge master.kernel.org:/pub/scm/linux/kernel/git/tglx/hrtimer-2.6

+25 -25
+4 -8
include/linux/hrtimer.h
··· 49 * struct hrtimer - the basic hrtimer structure 50 * 51 * @node: red black tree node for time ordered insertion 52 - * @list: list head for easier access to the time ordered list, 53 - * without walking the red black tree. 54 * @expires: the absolute expiry time in the hrtimers internal 55 * representation. The time is related to the clock on 56 * which the timer is based. ··· 61 */ 62 struct hrtimer { 63 struct rb_node node; 64 - struct list_head list; 65 ktime_t expires; 66 enum hrtimer_state state; 67 int (*function)(void *); ··· 75 * to a base on another cpu. 76 * @lock: lock protecting the base and associated timers 77 * @active: red black tree root node for the active timers 78 - * @pending: list of pending timers for simple time ordered access 79 * @resolution: the resolution of the clock, in nanoseconds 80 * @get_time: function to retrieve the current time of the clock 81 * @curr_timer: the timer which is executing a callback right now ··· 84 clockid_t index; 85 spinlock_t lock; 86 struct rb_root active; 87 - struct list_head pending; 88 - unsigned long resolution; 89 ktime_t (*get_time)(void); 90 struct hrtimer *curr_timer; 91 }; ··· 122 } 123 124 /* Forward a hrtimer so it expires after now: */ 125 - extern unsigned long hrtimer_forward(struct hrtimer *timer, 126 - const ktime_t interval); 127 128 /* Precise sleep: */ 129 extern long hrtimer_nanosleep(struct timespec *rqtp,
··· 49 * struct hrtimer - the basic hrtimer structure 50 * 51 * @node: red black tree node for time ordered insertion 52 * @expires: the absolute expiry time in the hrtimers internal 53 * representation. The time is related to the clock on 54 * which the timer is based. ··· 63 */ 64 struct hrtimer { 65 struct rb_node node; 66 ktime_t expires; 67 enum hrtimer_state state; 68 int (*function)(void *); ··· 78 * to a base on another cpu. 79 * @lock: lock protecting the base and associated timers 80 * @active: red black tree root node for the active timers 81 + * @first: pointer to the timer node which expires first 82 * @resolution: the resolution of the clock, in nanoseconds 83 * @get_time: function to retrieve the current time of the clock 84 * @curr_timer: the timer which is executing a callback right now ··· 87 clockid_t index; 88 spinlock_t lock; 89 struct rb_root active; 90 + struct rb_node *first; 91 + ktime_t resolution; 92 ktime_t (*get_time)(void); 93 struct hrtimer *curr_timer; 94 }; ··· 125 } 126 127 /* Forward a hrtimer so it expires after now: */ 128 + extern unsigned long hrtimer_forward(struct hrtimer *timer, ktime_t interval); 129 130 /* Precise sleep: */ 131 extern long hrtimer_nanosleep(struct timespec *rqtp,
+2 -2
include/linux/ktime.h
··· 272 * idea of the (in)accuracy of timers. Timer values are rounded up to 273 * this resolution values. 274 */ 275 - #define KTIME_REALTIME_RES (NSEC_PER_SEC/HZ) 276 - #define KTIME_MONOTONIC_RES (NSEC_PER_SEC/HZ) 277 278 /* Get the monotonic time in timespec format: */ 279 extern void ktime_get_ts(struct timespec *ts);
··· 272 * idea of the (in)accuracy of timers. Timer values are rounded up to 273 * this resolution values. 274 */ 275 + #define KTIME_REALTIME_RES (ktime_t){ .tv64 = TICK_NSEC } 276 + #define KTIME_MONOTONIC_RES (ktime_t){ .tv64 = TICK_NSEC } 277 278 /* Get the monotonic time in timespec format: */ 279 extern void ktime_get_ts(struct timespec *ts);
+19 -15
kernel/hrtimer.c
··· 275 * The number of overruns is added to the overrun field. 276 */ 277 unsigned long 278 - hrtimer_forward(struct hrtimer *timer, const ktime_t interval) 279 { 280 unsigned long orun = 1; 281 ktime_t delta, now; ··· 286 287 if (delta.tv64 < 0) 288 return 0; 289 290 if (unlikely(delta.tv64 >= interval.tv64)) { 291 nsec_t incr = ktime_to_ns(interval); ··· 317 static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) 318 { 319 struct rb_node **link = &base->active.rb_node; 320 - struct list_head *prev = &base->pending; 321 struct rb_node *parent = NULL; 322 struct hrtimer *entry; 323 ··· 332 */ 333 if (timer->expires.tv64 < entry->expires.tv64) 334 link = &(*link)->rb_left; 335 - else { 336 link = &(*link)->rb_right; 337 - prev = &entry->list; 338 - } 339 } 340 341 /* 342 - * Insert the timer to the rbtree and to the sorted list: 343 */ 344 rb_link_node(&timer->node, parent, link); 345 rb_insert_color(&timer->node, &base->active); 346 - list_add(&timer->list, prev); 347 348 timer->state = HRTIMER_PENDING; 349 - } 350 351 352 /* 353 * __remove_hrtimer - internal function to remove a timer ··· 358 static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) 359 { 360 /* 361 - * Remove the timer from the sorted list and from the rbtree: 362 */ 363 - list_del(&timer->list); 364 rb_erase(&timer->node, &base->active); 365 } 366 ··· 521 { 522 struct hrtimer_base *bases; 523 524 - tp->tv_sec = 0; 525 bases = per_cpu(hrtimer_bases, raw_smp_processor_id()); 526 - tp->tv_nsec = bases[which_clock].resolution; 527 528 return 0; 529 } ··· 533 static inline void run_hrtimer_queue(struct hrtimer_base *base) 534 { 535 ktime_t now = base->get_time(); 536 537 spin_lock_irq(&base->lock); 538 539 - while (!list_empty(&base->pending)) { 540 struct hrtimer *timer; 541 int (*fn)(void *); 542 int restart; 543 void *data; 544 545 - timer = list_entry(base->pending.next, struct hrtimer, list); 546 if (now.tv64 <= timer->expires.tv64) 547 break; 548 ··· 737 738 for (i = 0; i < MAX_HRTIMER_BASES; i++) { 739 spin_lock_init(&base->lock); 740 - INIT_LIST_HEAD(&base->pending); 741 base++; 742 } 743 }
··· 275 * The number of overruns is added to the overrun field. 276 */ 277 unsigned long 278 + hrtimer_forward(struct hrtimer *timer, ktime_t interval) 279 { 280 unsigned long orun = 1; 281 ktime_t delta, now; ··· 286 287 if (delta.tv64 < 0) 288 return 0; 289 + 290 + if (interval.tv64 < timer->base->resolution.tv64) 291 + interval.tv64 = timer->base->resolution.tv64; 292 293 if (unlikely(delta.tv64 >= interval.tv64)) { 294 nsec_t incr = ktime_to_ns(interval); ··· 314 static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) 315 { 316 struct rb_node **link = &base->active.rb_node; 317 struct rb_node *parent = NULL; 318 struct hrtimer *entry; 319 ··· 330 */ 331 if (timer->expires.tv64 < entry->expires.tv64) 332 link = &(*link)->rb_left; 333 + else 334 link = &(*link)->rb_right; 335 } 336 337 /* 338 + * Insert the timer to the rbtree and check whether it 339 + * replaces the first pending timer 340 */ 341 rb_link_node(&timer->node, parent, link); 342 rb_insert_color(&timer->node, &base->active); 343 344 timer->state = HRTIMER_PENDING; 345 346 + if (!base->first || timer->expires.tv64 < 347 + rb_entry(base->first, struct hrtimer, node)->expires.tv64) 348 + base->first = &timer->node; 349 + } 350 351 /* 352 * __remove_hrtimer - internal function to remove a timer ··· 355 static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) 356 { 357 /* 358 + * Remove the timer from the rbtree and replace the 359 + * first entry pointer if necessary. 360 */ 361 + if (base->first == &timer->node) 362 + base->first = rb_next(&timer->node); 363 rb_erase(&timer->node, &base->active); 364 } 365 ··· 516 { 517 struct hrtimer_base *bases; 518 519 bases = per_cpu(hrtimer_bases, raw_smp_processor_id()); 520 + *tp = ktime_to_timespec(bases[which_clock].resolution); 521 522 return 0; 523 } ··· 529 static inline void run_hrtimer_queue(struct hrtimer_base *base) 530 { 531 ktime_t now = base->get_time(); 532 + struct rb_node *node; 533 534 spin_lock_irq(&base->lock); 535 536 + while ((node = base->first)) { 537 struct hrtimer *timer; 538 int (*fn)(void *); 539 int restart; 540 void *data; 541 542 + timer = rb_entry(node, struct hrtimer, node); 543 if (now.tv64 <= timer->expires.tv64) 544 break; 545 ··· 732 733 for (i = 0; i < MAX_HRTIMER_BASES; i++) { 734 spin_lock_init(&base->lock); 735 base++; 736 } 737 }