···49 * struct hrtimer - the basic hrtimer structure50 *51 * @node: red black tree node for time ordered insertion52- * @list: list head for easier access to the time ordered list,53- * without walking the red black tree.54 * @expires: the absolute expiry time in the hrtimers internal55 * representation. The time is related to the clock on56 * which the timer is based.···61 */62struct hrtimer {63 struct rb_node node;64- struct list_head list;65 ktime_t expires;66 enum hrtimer_state state;67 int (*function)(void *);···75 * to a base on another cpu.76 * @lock: lock protecting the base and associated timers77 * @active: red black tree root node for the active timers78- * @pending: list of pending timers for simple time ordered access79 * @resolution: the resolution of the clock, in nanoseconds80 * @get_time: function to retrieve the current time of the clock81 * @curr_timer: the timer which is executing a callback right now···84 clockid_t index;85 spinlock_t lock;86 struct rb_root active;87- struct list_head pending;88- unsigned long resolution;89 ktime_t (*get_time)(void);90 struct hrtimer *curr_timer;91};···122}123124/* Forward a hrtimer so it expires after now: */125-extern unsigned long hrtimer_forward(struct hrtimer *timer,126- const ktime_t interval);127128/* Precise sleep: */129extern long hrtimer_nanosleep(struct timespec *rqtp,
···49 * struct hrtimer - the basic hrtimer structure50 *51 * @node: red black tree node for time ordered insertion0052 * @expires: the absolute expiry time in the hrtimers internal53 * representation. The time is related to the clock on54 * which the timer is based.···63 */64struct hrtimer {65 struct rb_node node;066 ktime_t expires;67 enum hrtimer_state state;68 int (*function)(void *);···78 * to a base on another cpu.79 * @lock: lock protecting the base and associated timers80 * @active: red black tree root node for the active timers81+ * @first: pointer to the timer node which expires first82 * @resolution: the resolution of the clock, in nanoseconds83 * @get_time: function to retrieve the current time of the clock84 * @curr_timer: the timer which is executing a callback right now···87 clockid_t index;88 spinlock_t lock;89 struct rb_root active;90+ struct rb_node *first;91+ ktime_t resolution;92 ktime_t (*get_time)(void);93 struct hrtimer *curr_timer;94};···125}126127/* Forward a hrtimer so it expires after now: */128+extern unsigned long hrtimer_forward(struct hrtimer *timer, ktime_t interval);0129130/* Precise sleep: */131extern long hrtimer_nanosleep(struct timespec *rqtp,
+2-2
include/linux/ktime.h
···272 * idea of the (in)accuracy of timers. Timer values are rounded up to273 * this resolution values.274 */275-#define KTIME_REALTIME_RES (NSEC_PER_SEC/HZ)276-#define KTIME_MONOTONIC_RES (NSEC_PER_SEC/HZ)277278/* Get the monotonic time in timespec format: */279extern void ktime_get_ts(struct timespec *ts);
···272 * idea of the (in)accuracy of timers. Timer values are rounded up to273 * this resolution values.274 */275+#define KTIME_REALTIME_RES (ktime_t){ .tv64 = TICK_NSEC }276+#define KTIME_MONOTONIC_RES (ktime_t){ .tv64 = TICK_NSEC }277278/* Get the monotonic time in timespec format: */279extern void ktime_get_ts(struct timespec *ts);
+19-15
kernel/hrtimer.c
···275 * The number of overruns is added to the overrun field.276 */277unsigned long278-hrtimer_forward(struct hrtimer *timer, const ktime_t interval)279{280 unsigned long orun = 1;281 ktime_t delta, now;···286287 if (delta.tv64 < 0)288 return 0;000289290 if (unlikely(delta.tv64 >= interval.tv64)) {291 nsec_t incr = ktime_to_ns(interval);···317static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)318{319 struct rb_node **link = &base->active.rb_node;320- struct list_head *prev = &base->pending;321 struct rb_node *parent = NULL;322 struct hrtimer *entry;323···332 */333 if (timer->expires.tv64 < entry->expires.tv64)334 link = &(*link)->rb_left;335- else {336 link = &(*link)->rb_right;337- prev = &entry->list;338- }339 }340341 /*342- * Insert the timer to the rbtree and to the sorted list:0343 */344 rb_link_node(&timer->node, parent, link);345 rb_insert_color(&timer->node, &base->active);346- list_add(&timer->list, prev);347348 timer->state = HRTIMER_PENDING;349-}3500000351352/*353 * __remove_hrtimer - internal function to remove a timer···358static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)359{360 /*361- * Remove the timer from the sorted list and from the rbtree:0362 */363- list_del(&timer->list);0364 rb_erase(&timer->node, &base->active);365}366···521{522 struct hrtimer_base *bases;523524- tp->tv_sec = 0;525 bases = per_cpu(hrtimer_bases, raw_smp_processor_id());526- tp->tv_nsec = bases[which_clock].resolution;527528 return 0;529}···533static inline void run_hrtimer_queue(struct hrtimer_base *base)534{535 ktime_t now = base->get_time();0536537 spin_lock_irq(&base->lock);538539- while (!list_empty(&base->pending)) {540 struct hrtimer *timer;541 int (*fn)(void *);542 int restart;543 void *data;544545- timer = list_entry(base->pending.next, struct hrtimer, list);546 if (now.tv64 <= timer->expires.tv64)547 break;548···737738 for (i = 0; i < MAX_HRTIMER_BASES; i++) {739 spin_lock_init(&base->lock);740- INIT_LIST_HEAD(&base->pending);741 base++;742 }743}
···275 * The number of overruns is added to the overrun field.276 */277unsigned long278+hrtimer_forward(struct hrtimer *timer, ktime_t interval)279{280 unsigned long orun = 1;281 ktime_t delta, now;···286287 if (delta.tv64 < 0)288 return 0;289+290+ if (interval.tv64 < timer->base->resolution.tv64)291+ interval.tv64 = timer->base->resolution.tv64;292293 if (unlikely(delta.tv64 >= interval.tv64)) {294 nsec_t incr = ktime_to_ns(interval);···314static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)315{316 struct rb_node **link = &base->active.rb_node;0317 struct rb_node *parent = NULL;318 struct hrtimer *entry;319···330 */331 if (timer->expires.tv64 < entry->expires.tv64)332 link = &(*link)->rb_left;333+ else334 link = &(*link)->rb_right;00335 }336337 /*338+ * Insert the timer to the rbtree and check whether it339+ * replaces the first pending timer340 */341 rb_link_node(&timer->node, parent, link);342 rb_insert_color(&timer->node, &base->active);0343344 timer->state = HRTIMER_PENDING;0345346+ if (!base->first || timer->expires.tv64 <347+ rb_entry(base->first, struct hrtimer, node)->expires.tv64)348+ base->first = &timer->node;349+}350351/*352 * __remove_hrtimer - internal function to remove a timer···355static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)356{357 /*358+ * Remove the timer from the rbtree and replace the359+ * first entry pointer if necessary.360 */361+ if (base->first == &timer->node)362+ base->first = rb_next(&timer->node);363 rb_erase(&timer->node, &base->active);364}365···516{517 struct hrtimer_base *bases;5180519 bases = per_cpu(hrtimer_bases, raw_smp_processor_id());520+ *tp = ktime_to_timespec(bases[which_clock].resolution);521522 return 0;523}···529static inline void run_hrtimer_queue(struct hrtimer_base *base)530{531 ktime_t now = base->get_time();532+ struct rb_node *node;533534 spin_lock_irq(&base->lock);535536+ while ((node = base->first)) {537 struct hrtimer *timer;538 int (*fn)(void *);539 int restart;540 void *data;541542+ timer = rb_entry(node, struct hrtimer, node);543 if (now.tv64 <= timer->expires.tv64)544 break;545···732733 for (i = 0; i < MAX_HRTIMER_BASES; i++) {734 spin_lock_init(&base->lock);0735 base++;736 }737}