2 * linux/kernel/hrtimer.c
4 * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
7 * High-resolution kernel timers
9 * In contrast to the low-resolution timeout API implemented in
10 * kernel/timer.c, hrtimers provide finer resolution and accuracy
11 * depending on system configuration and capabilities.
13 * These timers are currently used for:
17 * - precise in-kernel timing
19 * Started by: Thomas Gleixner and Ingo Molnar
22 * based on kernel/timer.c
24 * For licencing details see kernel-base/COPYING
27 #include <linux/cpu.h>
28 #include <linux/module.h>
29 #include <linux/percpu.h>
30 #include <linux/hrtimer.h>
31 #include <linux/notifier.h>
32 #include <linux/syscalls.h>
33 #include <linux/interrupt.h>
35 #include <asm/uaccess.h>
38 * ktime_get - get the monotonic time in ktime_t format
40 * returns the time in ktime_t format
42 static ktime_t
ktime_get(void)
48 return timespec_to_ktime(now
);
52 * ktime_get_real - get the real (wall-) time in ktime_t format
54 * returns the time in ktime_t format
56 static ktime_t
ktime_get_real(void)
62 return timespec_to_ktime(now
);
65 EXPORT_SYMBOL_GPL(ktime_get_real
);
71 #define MAX_HRTIMER_BASES 2
73 static DEFINE_PER_CPU(struct hrtimer_base
, hrtimer_bases
[MAX_HRTIMER_BASES
]) =
76 .index
= CLOCK_REALTIME
,
77 .get_time
= &ktime_get_real
,
78 .resolution
= KTIME_REALTIME_RES
,
81 .index
= CLOCK_MONOTONIC
,
82 .get_time
= &ktime_get
,
83 .resolution
= KTIME_MONOTONIC_RES
,
88 * ktime_get_ts - get the monotonic clock in timespec format
90 * @ts: pointer to timespec variable
92 * The function calculates the monotonic clock from the realtime
93 * clock and the wall_to_monotonic offset and stores the result
94 * in normalized timespec format in the variable pointed to by ts.
96 void ktime_get_ts(struct timespec
*ts
)
98 struct timespec tomono
;
102 seq
= read_seqbegin(&xtime_lock
);
104 tomono
= wall_to_monotonic
;
106 } while (read_seqretry(&xtime_lock
, seq
));
108 set_normalized_timespec(ts
, ts
->tv_sec
+ tomono
.tv_sec
,
109 ts
->tv_nsec
+ tomono
.tv_nsec
);
113 * Functions and macros which are different for UP/SMP systems are kept in a
118 #define set_curr_timer(b, t) do { (b)->curr_timer = (t); } while (0)
121 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
122 * means that all timers which are tied to this base via timer->base are
123 * locked, and the base itself is locked too.
125 * So __run_timers/migrate_timers can safely modify all timers which could
126 * be found on the lists/queues.
128 * When the timer's base is locked, and the timer removed from list, it is
129 * possible to set timer->base = NULL and drop the lock: the timer remains
132 static struct hrtimer_base
*lock_hrtimer_base(const struct hrtimer
*timer
,
133 unsigned long *flags
)
135 struct hrtimer_base
*base
;
139 if (likely(base
!= NULL
)) {
140 spin_lock_irqsave(&base
->lock
, *flags
);
141 if (likely(base
== timer
->base
))
143 /* The timer has migrated to another CPU: */
144 spin_unlock_irqrestore(&base
->lock
, *flags
);
151 * Switch the timer base to the current CPU when possible.
153 static inline struct hrtimer_base
*
154 switch_hrtimer_base(struct hrtimer
*timer
, struct hrtimer_base
*base
)
156 struct hrtimer_base
*new_base
;
158 new_base
= &__get_cpu_var(hrtimer_bases
[base
->index
]);
160 if (base
!= new_base
) {
162 * We are trying to schedule the timer on the local CPU.
163 * However we can't change timer's base while it is running,
164 * so we keep it on the same CPU. No hassle vs. reprogramming
165 * the event source in the high resolution case. The softirq
166 * code will take care of this when the timer function has
167 * completed. There is no conflict as we hold the lock until
168 * the timer is enqueued.
170 if (unlikely(base
->curr_timer
== timer
))
173 /* See the comment in lock_timer_base() */
175 spin_unlock(&base
->lock
);
176 spin_lock(&new_base
->lock
);
177 timer
->base
= new_base
;
182 #else /* CONFIG_SMP */
184 #define set_curr_timer(b, t) do { } while (0)
186 static inline struct hrtimer_base
*
187 lock_hrtimer_base(const struct hrtimer
*timer
, unsigned long *flags
)
189 struct hrtimer_base
*base
= timer
->base
;
191 spin_lock_irqsave(&base
->lock
, *flags
);
196 #define switch_hrtimer_base(t, b) (b)
198 #endif /* !CONFIG_SMP */
201 * Functions for the union type storage format of ktime_t which are
202 * too large for inlining:
204 #if BITS_PER_LONG < 64
205 # ifndef CONFIG_KTIME_SCALAR
207 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
210 * @nsec: the scalar nsec value to add
212 * Returns the sum of kt and nsec in ktime_t format
214 ktime_t
ktime_add_ns(const ktime_t kt
, u64 nsec
)
218 if (likely(nsec
< NSEC_PER_SEC
)) {
221 unsigned long rem
= do_div(nsec
, NSEC_PER_SEC
);
223 tmp
= ktime_set((long)nsec
, rem
);
226 return ktime_add(kt
, tmp
);
229 #else /* CONFIG_KTIME_SCALAR */
231 # endif /* !CONFIG_KTIME_SCALAR */
234 * Divide a ktime value by a nanosecond value
236 static unsigned long ktime_divns(const ktime_t kt
, nsec_t div
)
241 dclc
= dns
= ktime_to_ns(kt
);
243 /* Make sure the divisor is less than 2^32: */
249 do_div(dclc
, (unsigned long) div
);
251 return (unsigned long) dclc
;
254 #else /* BITS_PER_LONG < 64 */
255 # define ktime_divns(kt, div) (unsigned long)((kt).tv64 / (div))
256 #endif /* BITS_PER_LONG >= 64 */
259 * Counterpart to lock_timer_base above:
262 void unlock_hrtimer_base(const struct hrtimer
*timer
, unsigned long *flags
)
264 spin_unlock_irqrestore(&timer
->base
->lock
, *flags
);
268 * hrtimer_forward - forward the timer expiry
270 * @timer: hrtimer to forward
271 * @interval: the interval to forward
273 * Forward the timer expiry so it will expire in the future.
274 * The number of overruns is added to the overrun field.
277 hrtimer_forward(struct hrtimer
*timer
, const ktime_t interval
)
279 unsigned long orun
= 1;
282 now
= timer
->base
->get_time();
284 delta
= ktime_sub(now
, timer
->expires
);
289 if (unlikely(delta
.tv64
>= interval
.tv64
)) {
290 nsec_t incr
= ktime_to_ns(interval
);
292 orun
= ktime_divns(delta
, incr
);
293 timer
->expires
= ktime_add_ns(timer
->expires
, incr
* orun
);
294 if (timer
->expires
.tv64
> now
.tv64
)
297 * This (and the ktime_add() below) is the
298 * correction for exact:
302 timer
->expires
= ktime_add(timer
->expires
, interval
);
308 * enqueue_hrtimer - internal function to (re)start a timer
310 * The timer is inserted in expiry order. Insertion into the
311 * red black tree is O(log(n)). Must hold the base lock.
313 static void enqueue_hrtimer(struct hrtimer
*timer
, struct hrtimer_base
*base
)
315 struct rb_node
**link
= &base
->active
.rb_node
;
316 struct list_head
*prev
= &base
->pending
;
317 struct rb_node
*parent
= NULL
;
318 struct hrtimer
*entry
;
321 * Find the right place in the rbtree:
325 entry
= rb_entry(parent
, struct hrtimer
, node
);
327 * We dont care about collisions. Nodes with
328 * the same expiry time stay together.
330 if (timer
->expires
.tv64
< entry
->expires
.tv64
)
331 link
= &(*link
)->rb_left
;
333 link
= &(*link
)->rb_right
;
339 * Insert the timer to the rbtree and to the sorted list:
341 rb_link_node(&timer
->node
, parent
, link
);
342 rb_insert_color(&timer
->node
, &base
->active
);
343 list_add(&timer
->list
, prev
);
345 timer
->state
= HRTIMER_PENDING
;
350 * __remove_hrtimer - internal function to remove a timer
352 * Caller must hold the base lock.
354 static void __remove_hrtimer(struct hrtimer
*timer
, struct hrtimer_base
*base
)
357 * Remove the timer from the sorted list and from the rbtree:
359 list_del(&timer
->list
);
360 rb_erase(&timer
->node
, &base
->active
);
364 * remove hrtimer, called with base lock held
367 remove_hrtimer(struct hrtimer
*timer
, struct hrtimer_base
*base
)
369 if (hrtimer_active(timer
)) {
370 __remove_hrtimer(timer
, base
);
371 timer
->state
= HRTIMER_INACTIVE
;
378 * hrtimer_start - (re)start an relative timer on the current CPU
380 * @timer: the timer to be added
382 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
386 * 1 when the timer was active
389 hrtimer_start(struct hrtimer
*timer
, ktime_t tim
, const enum hrtimer_mode mode
)
391 struct hrtimer_base
*base
, *new_base
;
395 base
= lock_hrtimer_base(timer
, &flags
);
397 /* Remove an active timer from the queue: */
398 ret
= remove_hrtimer(timer
, base
);
400 /* Switch the timer base, if necessary: */
401 new_base
= switch_hrtimer_base(timer
, base
);
403 if (mode
== HRTIMER_REL
)
404 tim
= ktime_add(tim
, new_base
->get_time());
405 timer
->expires
= tim
;
407 enqueue_hrtimer(timer
, new_base
);
409 unlock_hrtimer_base(timer
, &flags
);
415 * hrtimer_try_to_cancel - try to deactivate a timer
417 * @timer: hrtimer to stop
420 * 0 when the timer was not active
421 * 1 when the timer was active
422 * -1 when the timer is currently excuting the callback function and
425 int hrtimer_try_to_cancel(struct hrtimer
*timer
)
427 struct hrtimer_base
*base
;
431 base
= lock_hrtimer_base(timer
, &flags
);
433 if (base
->curr_timer
!= timer
)
434 ret
= remove_hrtimer(timer
, base
);
436 unlock_hrtimer_base(timer
, &flags
);
443 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
445 * @timer: the timer to be cancelled
448 * 0 when the timer was not active
449 * 1 when the timer was active
451 int hrtimer_cancel(struct hrtimer
*timer
)
454 int ret
= hrtimer_try_to_cancel(timer
);
462 * hrtimer_get_remaining - get remaining time for the timer
464 * @timer: the timer to read
466 ktime_t
hrtimer_get_remaining(const struct hrtimer
*timer
)
468 struct hrtimer_base
*base
;
472 base
= lock_hrtimer_base(timer
, &flags
);
473 rem
= ktime_sub(timer
->expires
, timer
->base
->get_time());
474 unlock_hrtimer_base(timer
, &flags
);
480 * hrtimer_rebase - rebase an initialized hrtimer to a different base
482 * @timer: the timer to be rebased
483 * @clock_id: the clock to be used
485 void hrtimer_rebase(struct hrtimer
*timer
, const clockid_t clock_id
)
487 struct hrtimer_base
*bases
;
489 bases
= per_cpu(hrtimer_bases
, raw_smp_processor_id());
490 timer
->base
= &bases
[clock_id
];
494 * hrtimer_init - initialize a timer to the given clock
496 * @timer: the timer to be initialized
497 * @clock_id: the clock to be used
499 void hrtimer_init(struct hrtimer
*timer
, const clockid_t clock_id
)
501 memset(timer
, 0, sizeof(struct hrtimer
));
502 hrtimer_rebase(timer
, clock_id
);
506 * hrtimer_get_res - get the timer resolution for a clock
508 * @which_clock: which clock to query
509 * @tp: pointer to timespec variable to store the resolution
511 * Store the resolution of the clock selected by which_clock in the
512 * variable pointed to by tp.
514 int hrtimer_get_res(const clockid_t which_clock
, struct timespec
*tp
)
516 struct hrtimer_base
*bases
;
519 bases
= per_cpu(hrtimer_bases
, raw_smp_processor_id());
520 tp
->tv_nsec
= bases
[which_clock
].resolution
;
526 * Expire the per base hrtimer-queue:
528 static inline void run_hrtimer_queue(struct hrtimer_base
*base
)
530 ktime_t now
= base
->get_time();
532 spin_lock_irq(&base
->lock
);
534 while (!list_empty(&base
->pending
)) {
535 struct hrtimer
*timer
;
540 timer
= list_entry(base
->pending
.next
, struct hrtimer
, list
);
541 if (now
.tv64
<= timer
->expires
.tv64
)
544 fn
= timer
->function
;
546 set_curr_timer(base
, timer
);
547 __remove_hrtimer(timer
, base
);
548 spin_unlock_irq(&base
->lock
);
551 * fn == NULL is special case for the simplest timer
552 * variant - wake up process and do not restart:
555 wake_up_process(data
);
556 restart
= HRTIMER_NORESTART
;
560 spin_lock_irq(&base
->lock
);
562 if (restart
== HRTIMER_RESTART
)
563 enqueue_hrtimer(timer
, base
);
565 timer
->state
= HRTIMER_EXPIRED
;
567 set_curr_timer(base
, NULL
);
568 spin_unlock_irq(&base
->lock
);
572 * Called from timer softirq every jiffy, expire hrtimers:
574 void hrtimer_run_queues(void)
576 struct hrtimer_base
*base
= __get_cpu_var(hrtimer_bases
);
579 for (i
= 0; i
< MAX_HRTIMER_BASES
; i
++)
580 run_hrtimer_queue(&base
[i
]);
584 * Functions related to boot-time initialization:
586 static void __devinit
init_hrtimers_cpu(int cpu
)
588 struct hrtimer_base
*base
= per_cpu(hrtimer_bases
, cpu
);
591 for (i
= 0; i
< MAX_HRTIMER_BASES
; i
++) {
592 spin_lock_init(&base
->lock
);
593 INIT_LIST_HEAD(&base
->pending
);
598 #ifdef CONFIG_HOTPLUG_CPU
600 static void migrate_hrtimer_list(struct hrtimer_base
*old_base
,
601 struct hrtimer_base
*new_base
)
603 struct hrtimer
*timer
;
604 struct rb_node
*node
;
606 while ((node
= rb_first(&old_base
->active
))) {
607 timer
= rb_entry(node
, struct hrtimer
, node
);
608 __remove_hrtimer(timer
, old_base
);
609 timer
->base
= new_base
;
610 enqueue_hrtimer(timer
, new_base
);
614 static void migrate_hrtimers(int cpu
)
616 struct hrtimer_base
*old_base
, *new_base
;
619 BUG_ON(cpu_online(cpu
));
620 old_base
= per_cpu(hrtimer_bases
, cpu
);
621 new_base
= get_cpu_var(hrtimer_bases
);
625 for (i
= 0; i
< MAX_HRTIMER_BASES
; i
++) {
627 spin_lock(&new_base
->lock
);
628 spin_lock(&old_base
->lock
);
630 BUG_ON(old_base
->curr_timer
);
632 migrate_hrtimer_list(old_base
, new_base
);
634 spin_unlock(&old_base
->lock
);
635 spin_unlock(&new_base
->lock
);
641 put_cpu_var(hrtimer_bases
);
643 #endif /* CONFIG_HOTPLUG_CPU */
645 static int __devinit
hrtimer_cpu_notify(struct notifier_block
*self
,
646 unsigned long action
, void *hcpu
)
648 long cpu
= (long)hcpu
;
653 init_hrtimers_cpu(cpu
);
656 #ifdef CONFIG_HOTPLUG_CPU
658 migrate_hrtimers(cpu
);
669 static struct notifier_block __devinitdata hrtimers_nb
= {
670 .notifier_call
= hrtimer_cpu_notify
,
673 void __init
hrtimers_init(void)
675 hrtimer_cpu_notify(&hrtimers_nb
, (unsigned long)CPU_UP_PREPARE
,
676 (void *)(long)smp_processor_id());
677 register_cpu_notifier(&hrtimers_nb
);