hrtimers: allow the hot-unplugging of all cpus
[deliverable/linux.git] / kernel / hrtimer.c
... / ...
CommitLineData
1/*
2 * linux/kernel/hrtimer.c
3 *
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
7 *
8 * High-resolution kernel timers
9 *
10 * In contrast to the low-resolution timeout API implemented in
11 * kernel/timer.c, hrtimers provide finer resolution and accuracy
12 * depending on system configuration and capabilities.
13 *
14 * These timers are currently used for:
15 * - itimers
16 * - POSIX timers
17 * - nanosleep
18 * - precise in-kernel timing
19 *
20 * Started by: Thomas Gleixner and Ingo Molnar
21 *
22 * Credits:
23 * based on kernel/timer.c
24 *
25 * Help, testing, suggestions, bugfixes, improvements were
26 * provided by:
27 *
28 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
29 * et. al.
30 *
31 * For licencing details see kernel-base/COPYING
32 */
33
34#include <linux/cpu.h>
35#include <linux/module.h>
36#include <linux/percpu.h>
37#include <linux/hrtimer.h>
38#include <linux/notifier.h>
39#include <linux/syscalls.h>
40#include <linux/kallsyms.h>
41#include <linux/interrupt.h>
42#include <linux/tick.h>
43#include <linux/seq_file.h>
44#include <linux/err.h>
45#include <linux/debugobjects.h>
46
47#include <asm/uaccess.h>
48
49/**
50 * ktime_get - get the monotonic time in ktime_t format
51 *
52 * returns the time in ktime_t format
53 */
54ktime_t ktime_get(void)
55{
56 struct timespec now;
57
58 ktime_get_ts(&now);
59
60 return timespec_to_ktime(now);
61}
62EXPORT_SYMBOL_GPL(ktime_get);
63
64/**
65 * ktime_get_real - get the real (wall-) time in ktime_t format
66 *
67 * returns the time in ktime_t format
68 */
69ktime_t ktime_get_real(void)
70{
71 struct timespec now;
72
73 getnstimeofday(&now);
74
75 return timespec_to_ktime(now);
76}
77
78EXPORT_SYMBOL_GPL(ktime_get_real);
79
80/*
81 * The timer bases:
82 *
83 * Note: If we want to add new timer bases, we have to skip the two
84 * clock ids captured by the cpu-timers. We do this by holding empty
85 * entries rather than doing math adjustment of the clock ids.
86 * This ensures that we capture erroneous accesses to these clock ids
87 * rather than moving them into the range of valid clock id's.
88 */
89DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
90{
91
92 .clock_base =
93 {
94 {
95 .index = CLOCK_REALTIME,
96 .get_time = &ktime_get_real,
97 .resolution = KTIME_LOW_RES,
98 },
99 {
100 .index = CLOCK_MONOTONIC,
101 .get_time = &ktime_get,
102 .resolution = KTIME_LOW_RES,
103 },
104 }
105};
106
107/**
108 * ktime_get_ts - get the monotonic clock in timespec format
109 * @ts: pointer to timespec variable
110 *
111 * The function calculates the monotonic clock from the realtime
112 * clock and the wall_to_monotonic offset and stores the result
113 * in normalized timespec format in the variable pointed to by @ts.
114 */
115void ktime_get_ts(struct timespec *ts)
116{
117 struct timespec tomono;
118 unsigned long seq;
119
120 do {
121 seq = read_seqbegin(&xtime_lock);
122 getnstimeofday(ts);
123 tomono = wall_to_monotonic;
124
125 } while (read_seqretry(&xtime_lock, seq));
126
127 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
128 ts->tv_nsec + tomono.tv_nsec);
129}
130EXPORT_SYMBOL_GPL(ktime_get_ts);
131
132/*
133 * Get the coarse grained time at the softirq based on xtime and
134 * wall_to_monotonic.
135 */
136static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
137{
138 ktime_t xtim, tomono;
139 struct timespec xts, tom;
140 unsigned long seq;
141
142 do {
143 seq = read_seqbegin(&xtime_lock);
144 xts = current_kernel_time();
145 tom = wall_to_monotonic;
146 } while (read_seqretry(&xtime_lock, seq));
147
148 xtim = timespec_to_ktime(xts);
149 tomono = timespec_to_ktime(tom);
150 base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
151 base->clock_base[CLOCK_MONOTONIC].softirq_time =
152 ktime_add(xtim, tomono);
153}
154
155/*
156 * Functions and macros which are different for UP/SMP systems are kept in a
157 * single place
158 */
159#ifdef CONFIG_SMP
160
161/*
162 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
163 * means that all timers which are tied to this base via timer->base are
164 * locked, and the base itself is locked too.
165 *
166 * So __run_timers/migrate_timers can safely modify all timers which could
167 * be found on the lists/queues.
168 *
169 * When the timer's base is locked, and the timer removed from list, it is
170 * possible to set timer->base = NULL and drop the lock: the timer remains
171 * locked.
172 */
173static
174struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
175 unsigned long *flags)
176{
177 struct hrtimer_clock_base *base;
178
179 for (;;) {
180 base = timer->base;
181 if (likely(base != NULL)) {
182 spin_lock_irqsave(&base->cpu_base->lock, *flags);
183 if (likely(base == timer->base))
184 return base;
185 /* The timer has migrated to another CPU: */
186 spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
187 }
188 cpu_relax();
189 }
190}
191
192/*
193 * Switch the timer base to the current CPU when possible.
194 */
195static inline struct hrtimer_clock_base *
196switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base)
197{
198 struct hrtimer_clock_base *new_base;
199 struct hrtimer_cpu_base *new_cpu_base;
200
201 new_cpu_base = &__get_cpu_var(hrtimer_bases);
202 new_base = &new_cpu_base->clock_base[base->index];
203
204 if (base != new_base) {
205 /*
206 * We are trying to schedule the timer on the local CPU.
207 * However we can't change timer's base while it is running,
208 * so we keep it on the same CPU. No hassle vs. reprogramming
209 * the event source in the high resolution case. The softirq
210 * code will take care of this when the timer function has
211 * completed. There is no conflict as we hold the lock until
212 * the timer is enqueued.
213 */
214 if (unlikely(hrtimer_callback_running(timer)))
215 return base;
216
217 /* See the comment in lock_timer_base() */
218 timer->base = NULL;
219 spin_unlock(&base->cpu_base->lock);
220 spin_lock(&new_base->cpu_base->lock);
221 timer->base = new_base;
222 }
223 return new_base;
224}
225
226#else /* CONFIG_SMP */
227
228static inline struct hrtimer_clock_base *
229lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
230{
231 struct hrtimer_clock_base *base = timer->base;
232
233 spin_lock_irqsave(&base->cpu_base->lock, *flags);
234
235 return base;
236}
237
238# define switch_hrtimer_base(t, b) (b)
239
240#endif /* !CONFIG_SMP */
241
242/*
243 * Functions for the union type storage format of ktime_t which are
244 * too large for inlining:
245 */
246#if BITS_PER_LONG < 64
247# ifndef CONFIG_KTIME_SCALAR
248/**
249 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
250 * @kt: addend
251 * @nsec: the scalar nsec value to add
252 *
253 * Returns the sum of kt and nsec in ktime_t format
254 */
255ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
256{
257 ktime_t tmp;
258
259 if (likely(nsec < NSEC_PER_SEC)) {
260 tmp.tv64 = nsec;
261 } else {
262 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
263
264 tmp = ktime_set((long)nsec, rem);
265 }
266
267 return ktime_add(kt, tmp);
268}
269
270EXPORT_SYMBOL_GPL(ktime_add_ns);
271
272/**
273 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
274 * @kt: minuend
275 * @nsec: the scalar nsec value to subtract
276 *
277 * Returns the subtraction of @nsec from @kt in ktime_t format
278 */
279ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
280{
281 ktime_t tmp;
282
283 if (likely(nsec < NSEC_PER_SEC)) {
284 tmp.tv64 = nsec;
285 } else {
286 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
287
288 tmp = ktime_set((long)nsec, rem);
289 }
290
291 return ktime_sub(kt, tmp);
292}
293
294EXPORT_SYMBOL_GPL(ktime_sub_ns);
295# endif /* !CONFIG_KTIME_SCALAR */
296
297/*
298 * Divide a ktime value by a nanosecond value
299 */
300u64 ktime_divns(const ktime_t kt, s64 div)
301{
302 u64 dclc;
303 int sft = 0;
304
305 dclc = ktime_to_ns(kt);
306 /* Make sure the divisor is less than 2^32: */
307 while (div >> 32) {
308 sft++;
309 div >>= 1;
310 }
311 dclc >>= sft;
312 do_div(dclc, (unsigned long) div);
313
314 return dclc;
315}
316#endif /* BITS_PER_LONG >= 64 */
317
318/*
319 * Add two ktime values and do a safety check for overflow:
320 */
321ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
322{
323 ktime_t res = ktime_add(lhs, rhs);
324
325 /*
326 * We use KTIME_SEC_MAX here, the maximum timeout which we can
327 * return to user space in a timespec:
328 */
329 if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
330 res = ktime_set(KTIME_SEC_MAX, 0);
331
332 return res;
333}
334
335#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
336
337static struct debug_obj_descr hrtimer_debug_descr;
338
339/*
340 * fixup_init is called when:
341 * - an active object is initialized
342 */
343static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
344{
345 struct hrtimer *timer = addr;
346
347 switch (state) {
348 case ODEBUG_STATE_ACTIVE:
349 hrtimer_cancel(timer);
350 debug_object_init(timer, &hrtimer_debug_descr);
351 return 1;
352 default:
353 return 0;
354 }
355}
356
357/*
358 * fixup_activate is called when:
359 * - an active object is activated
360 * - an unknown object is activated (might be a statically initialized object)
361 */
362static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
363{
364 switch (state) {
365
366 case ODEBUG_STATE_NOTAVAILABLE:
367 WARN_ON_ONCE(1);
368 return 0;
369
370 case ODEBUG_STATE_ACTIVE:
371 WARN_ON(1);
372
373 default:
374 return 0;
375 }
376}
377
378/*
379 * fixup_free is called when:
380 * - an active object is freed
381 */
382static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
383{
384 struct hrtimer *timer = addr;
385
386 switch (state) {
387 case ODEBUG_STATE_ACTIVE:
388 hrtimer_cancel(timer);
389 debug_object_free(timer, &hrtimer_debug_descr);
390 return 1;
391 default:
392 return 0;
393 }
394}
395
396static struct debug_obj_descr hrtimer_debug_descr = {
397 .name = "hrtimer",
398 .fixup_init = hrtimer_fixup_init,
399 .fixup_activate = hrtimer_fixup_activate,
400 .fixup_free = hrtimer_fixup_free,
401};
402
403static inline void debug_hrtimer_init(struct hrtimer *timer)
404{
405 debug_object_init(timer, &hrtimer_debug_descr);
406}
407
408static inline void debug_hrtimer_activate(struct hrtimer *timer)
409{
410 debug_object_activate(timer, &hrtimer_debug_descr);
411}
412
413static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
414{
415 debug_object_deactivate(timer, &hrtimer_debug_descr);
416}
417
418static inline void debug_hrtimer_free(struct hrtimer *timer)
419{
420 debug_object_free(timer, &hrtimer_debug_descr);
421}
422
423static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
424 enum hrtimer_mode mode);
425
426void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
427 enum hrtimer_mode mode)
428{
429 debug_object_init_on_stack(timer, &hrtimer_debug_descr);
430 __hrtimer_init(timer, clock_id, mode);
431}
432
433void destroy_hrtimer_on_stack(struct hrtimer *timer)
434{
435 debug_object_free(timer, &hrtimer_debug_descr);
436}
437
438#else
439static inline void debug_hrtimer_init(struct hrtimer *timer) { }
440static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
441static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
442#endif
443
444/* High resolution timer related functions */
445#ifdef CONFIG_HIGH_RES_TIMERS
446
447/*
448 * High resolution timer enabled ?
449 */
450static int hrtimer_hres_enabled __read_mostly = 1;
451
452/*
453 * Enable / Disable high resolution mode
454 */
455static int __init setup_hrtimer_hres(char *str)
456{
457 if (!strcmp(str, "off"))
458 hrtimer_hres_enabled = 0;
459 else if (!strcmp(str, "on"))
460 hrtimer_hres_enabled = 1;
461 else
462 return 0;
463 return 1;
464}
465
466__setup("highres=", setup_hrtimer_hres);
467
468/*
469 * hrtimer_high_res_enabled - query, if the highres mode is enabled
470 */
471static inline int hrtimer_is_hres_enabled(void)
472{
473 return hrtimer_hres_enabled;
474}
475
476/*
477 * Is the high resolution mode active ?
478 */
479static inline int hrtimer_hres_active(void)
480{
481 return __get_cpu_var(hrtimer_bases).hres_active;
482}
483
484/*
485 * Reprogram the event source with checking both queues for the
486 * next event
487 * Called with interrupts disabled and base->lock held
488 */
489static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
490{
491 int i;
492 struct hrtimer_clock_base *base = cpu_base->clock_base;
493 ktime_t expires;
494
495 cpu_base->expires_next.tv64 = KTIME_MAX;
496
497 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
498 struct hrtimer *timer;
499
500 if (!base->first)
501 continue;
502 timer = rb_entry(base->first, struct hrtimer, node);
503 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
504 if (expires.tv64 < cpu_base->expires_next.tv64)
505 cpu_base->expires_next = expires;
506 }
507
508 if (cpu_base->expires_next.tv64 != KTIME_MAX)
509 tick_program_event(cpu_base->expires_next, 1);
510}
511
512/*
513 * Shared reprogramming for clock_realtime and clock_monotonic
514 *
515 * When a timer is enqueued and expires earlier than the already enqueued
516 * timers, we have to check, whether it expires earlier than the timer for
517 * which the clock event device was armed.
518 *
519 * Called with interrupts disabled and base->cpu_base.lock held
520 */
521static int hrtimer_reprogram(struct hrtimer *timer,
522 struct hrtimer_clock_base *base)
523{
524 ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
525 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
526 int res;
527
528 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
529
530 /*
531 * When the callback is running, we do not reprogram the clock event
532 * device. The timer callback is either running on a different CPU or
533 * the callback is executed in the hrtimer_interrupt context. The
534 * reprogramming is handled either by the softirq, which called the
535 * callback or at the end of the hrtimer_interrupt.
536 */
537 if (hrtimer_callback_running(timer))
538 return 0;
539
540 /*
541 * CLOCK_REALTIME timer might be requested with an absolute
542 * expiry time which is less than base->offset. Nothing wrong
543 * about that, just avoid to call into the tick code, which
544 * has now objections against negative expiry values.
545 */
546 if (expires.tv64 < 0)
547 return -ETIME;
548
549 if (expires.tv64 >= expires_next->tv64)
550 return 0;
551
552 /*
553 * Clockevents returns -ETIME, when the event was in the past.
554 */
555 res = tick_program_event(expires, 0);
556 if (!IS_ERR_VALUE(res))
557 *expires_next = expires;
558 return res;
559}
560
561
562/*
563 * Retrigger next event is called after clock was set
564 *
565 * Called with interrupts disabled via on_each_cpu()
566 */
567static void retrigger_next_event(void *arg)
568{
569 struct hrtimer_cpu_base *base;
570 struct timespec realtime_offset;
571 unsigned long seq;
572
573 if (!hrtimer_hres_active())
574 return;
575
576 do {
577 seq = read_seqbegin(&xtime_lock);
578 set_normalized_timespec(&realtime_offset,
579 -wall_to_monotonic.tv_sec,
580 -wall_to_monotonic.tv_nsec);
581 } while (read_seqretry(&xtime_lock, seq));
582
583 base = &__get_cpu_var(hrtimer_bases);
584
585 /* Adjust CLOCK_REALTIME offset */
586 spin_lock(&base->lock);
587 base->clock_base[CLOCK_REALTIME].offset =
588 timespec_to_ktime(realtime_offset);
589
590 hrtimer_force_reprogram(base);
591 spin_unlock(&base->lock);
592}
593
594/*
595 * Clock realtime was set
596 *
597 * Change the offset of the realtime clock vs. the monotonic
598 * clock.
599 *
600 * We might have to reprogram the high resolution timer interrupt. On
601 * SMP we call the architecture specific code to retrigger _all_ high
602 * resolution timer interrupts. On UP we just disable interrupts and
603 * call the high resolution interrupt code.
604 */
605void clock_was_set(void)
606{
607 /* Retrigger the CPU local events everywhere */
608 on_each_cpu(retrigger_next_event, NULL, 1);
609}
610
611/*
612 * During resume we might have to reprogram the high resolution timer
613 * interrupt (on the local CPU):
614 */
615void hres_timers_resume(void)
616{
617 WARN_ONCE(!irqs_disabled(),
618 KERN_INFO "hres_timers_resume() called with IRQs enabled!");
619
620 retrigger_next_event(NULL);
621}
622
623/*
624 * Initialize the high resolution related parts of cpu_base
625 */
626static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
627{
628 base->expires_next.tv64 = KTIME_MAX;
629 base->hres_active = 0;
630}
631
632/*
633 * Initialize the high resolution related parts of a hrtimer
634 */
635static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
636{
637}
638
639
640/*
641 * When High resolution timers are active, try to reprogram. Note, that in case
642 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
643 * check happens. The timer gets enqueued into the rbtree. The reprogramming
644 * and expiry check is done in the hrtimer_interrupt or in the softirq.
645 */
646static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
647 struct hrtimer_clock_base *base)
648{
649 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
650 spin_unlock(&base->cpu_base->lock);
651 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
652 spin_lock(&base->cpu_base->lock);
653 return 1;
654 }
655 return 0;
656}
657
658/*
659 * Switch to high resolution mode
660 */
661static int hrtimer_switch_to_hres(void)
662{
663 int cpu = smp_processor_id();
664 struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
665 unsigned long flags;
666
667 if (base->hres_active)
668 return 1;
669
670 local_irq_save(flags);
671
672 if (tick_init_highres()) {
673 local_irq_restore(flags);
674 printk(KERN_WARNING "Could not switch to high resolution "
675 "mode on CPU %d\n", cpu);
676 return 0;
677 }
678 base->hres_active = 1;
679 base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
680 base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES;
681
682 tick_setup_sched_timer();
683
684 /* "Retrigger" the interrupt to get things going */
685 retrigger_next_event(NULL);
686 local_irq_restore(flags);
687 printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
688 smp_processor_id());
689 return 1;
690}
691
692#else
693
694static inline int hrtimer_hres_active(void) { return 0; }
695static inline int hrtimer_is_hres_enabled(void) { return 0; }
696static inline int hrtimer_switch_to_hres(void) { return 0; }
697static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
698static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
699 struct hrtimer_clock_base *base)
700{
701 return 0;
702}
703static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
704static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
705
706#endif /* CONFIG_HIGH_RES_TIMERS */
707
708#ifdef CONFIG_TIMER_STATS
709void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
710{
711 if (timer->start_site)
712 return;
713
714 timer->start_site = addr;
715 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
716 timer->start_pid = current->pid;
717}
718#endif
719
720/*
721 * Counterpart to lock_hrtimer_base above:
722 */
723static inline
724void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
725{
726 spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
727}
728
729/**
730 * hrtimer_forward - forward the timer expiry
731 * @timer: hrtimer to forward
732 * @now: forward past this time
733 * @interval: the interval to forward
734 *
735 * Forward the timer expiry so it will expire in the future.
736 * Returns the number of overruns.
737 */
738u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
739{
740 u64 orun = 1;
741 ktime_t delta;
742
743 delta = ktime_sub(now, hrtimer_get_expires(timer));
744
745 if (delta.tv64 < 0)
746 return 0;
747
748 if (interval.tv64 < timer->base->resolution.tv64)
749 interval.tv64 = timer->base->resolution.tv64;
750
751 if (unlikely(delta.tv64 >= interval.tv64)) {
752 s64 incr = ktime_to_ns(interval);
753
754 orun = ktime_divns(delta, incr);
755 hrtimer_add_expires_ns(timer, incr * orun);
756 if (hrtimer_get_expires_tv64(timer) > now.tv64)
757 return orun;
758 /*
759 * This (and the ktime_add() below) is the
760 * correction for exact:
761 */
762 orun++;
763 }
764 hrtimer_add_expires(timer, interval);
765
766 return orun;
767}
768EXPORT_SYMBOL_GPL(hrtimer_forward);
769
770/*
771 * enqueue_hrtimer - internal function to (re)start a timer
772 *
773 * The timer is inserted in expiry order. Insertion into the
774 * red black tree is O(log(n)). Must hold the base lock.
775 *
776 * Returns 1 when the new timer is the leftmost timer in the tree.
777 */
778static int enqueue_hrtimer(struct hrtimer *timer,
779 struct hrtimer_clock_base *base)
780{
781 struct rb_node **link = &base->active.rb_node;
782 struct rb_node *parent = NULL;
783 struct hrtimer *entry;
784 int leftmost = 1;
785
786 debug_hrtimer_activate(timer);
787
788 /*
789 * Find the right place in the rbtree:
790 */
791 while (*link) {
792 parent = *link;
793 entry = rb_entry(parent, struct hrtimer, node);
794 /*
795 * We dont care about collisions. Nodes with
796 * the same expiry time stay together.
797 */
798 if (hrtimer_get_expires_tv64(timer) <
799 hrtimer_get_expires_tv64(entry)) {
800 link = &(*link)->rb_left;
801 } else {
802 link = &(*link)->rb_right;
803 leftmost = 0;
804 }
805 }
806
807 /*
808 * Insert the timer to the rbtree and check whether it
809 * replaces the first pending timer
810 */
811 if (leftmost)
812 base->first = &timer->node;
813
814 rb_link_node(&timer->node, parent, link);
815 rb_insert_color(&timer->node, &base->active);
816 /*
817 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
818 * state of a possibly running callback.
819 */
820 timer->state |= HRTIMER_STATE_ENQUEUED;
821
822 return leftmost;
823}
824
825/*
826 * __remove_hrtimer - internal function to remove a timer
827 *
828 * Caller must hold the base lock.
829 *
830 * High resolution timer mode reprograms the clock event device when the
831 * timer is the one which expires next. The caller can disable this by setting
832 * reprogram to zero. This is useful, when the context does a reprogramming
833 * anyway (e.g. timer interrupt)
834 */
835static void __remove_hrtimer(struct hrtimer *timer,
836 struct hrtimer_clock_base *base,
837 unsigned long newstate, int reprogram)
838{
839 if (timer->state & HRTIMER_STATE_ENQUEUED) {
840 /*
841 * Remove the timer from the rbtree and replace the
842 * first entry pointer if necessary.
843 */
844 if (base->first == &timer->node) {
845 base->first = rb_next(&timer->node);
846 /* Reprogram the clock event device. if enabled */
847 if (reprogram && hrtimer_hres_active())
848 hrtimer_force_reprogram(base->cpu_base);
849 }
850 rb_erase(&timer->node, &base->active);
851 }
852 timer->state = newstate;
853}
854
855/*
856 * remove hrtimer, called with base lock held
857 */
858static inline int
859remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
860{
861 if (hrtimer_is_queued(timer)) {
862 int reprogram;
863
864 /*
865 * Remove the timer and force reprogramming when high
866 * resolution mode is active and the timer is on the current
867 * CPU. If we remove a timer on another CPU, reprogramming is
868 * skipped. The interrupt event on this CPU is fired and
869 * reprogramming happens in the interrupt handler. This is a
870 * rare case and less expensive than a smp call.
871 */
872 debug_hrtimer_deactivate(timer);
873 timer_stats_hrtimer_clear_start_info(timer);
874 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
875 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
876 reprogram);
877 return 1;
878 }
879 return 0;
880}
881
882/**
883 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
884 * @timer: the timer to be added
885 * @tim: expiry time
886 * @delta_ns: "slack" range for the timer
887 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
888 *
889 * Returns:
890 * 0 on success
891 * 1 when the timer was active
892 */
893int
894hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns,
895 const enum hrtimer_mode mode)
896{
897 struct hrtimer_clock_base *base, *new_base;
898 unsigned long flags;
899 int ret, leftmost;
900
901 base = lock_hrtimer_base(timer, &flags);
902
903 /* Remove an active timer from the queue: */
904 ret = remove_hrtimer(timer, base);
905
906 /* Switch the timer base, if necessary: */
907 new_base = switch_hrtimer_base(timer, base);
908
909 if (mode == HRTIMER_MODE_REL) {
910 tim = ktime_add_safe(tim, new_base->get_time());
911 /*
912 * CONFIG_TIME_LOW_RES is a temporary way for architectures
913 * to signal that they simply return xtime in
914 * do_gettimeoffset(). In this case we want to round up by
915 * resolution when starting a relative timer, to avoid short
916 * timeouts. This will go away with the GTOD framework.
917 */
918#ifdef CONFIG_TIME_LOW_RES
919 tim = ktime_add_safe(tim, base->resolution);
920#endif
921 }
922
923 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
924
925 timer_stats_hrtimer_set_start_info(timer);
926
927 leftmost = enqueue_hrtimer(timer, new_base);
928
929 /*
930 * Only allow reprogramming if the new base is on this CPU.
931 * (it might still be on another CPU if the timer was pending)
932 *
933 * XXX send_remote_softirq() ?
934 */
935 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
936 hrtimer_enqueue_reprogram(timer, new_base);
937
938 unlock_hrtimer_base(timer, &flags);
939
940 return ret;
941}
942EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
943
944/**
945 * hrtimer_start - (re)start an hrtimer on the current CPU
946 * @timer: the timer to be added
947 * @tim: expiry time
948 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
949 *
950 * Returns:
951 * 0 on success
952 * 1 when the timer was active
953 */
954int
955hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
956{
957 return hrtimer_start_range_ns(timer, tim, 0, mode);
958}
959EXPORT_SYMBOL_GPL(hrtimer_start);
960
961
962/**
963 * hrtimer_try_to_cancel - try to deactivate a timer
964 * @timer: hrtimer to stop
965 *
966 * Returns:
967 * 0 when the timer was not active
968 * 1 when the timer was active
969 * -1 when the timer is currently excuting the callback function and
970 * cannot be stopped
971 */
972int hrtimer_try_to_cancel(struct hrtimer *timer)
973{
974 struct hrtimer_clock_base *base;
975 unsigned long flags;
976 int ret = -1;
977
978 base = lock_hrtimer_base(timer, &flags);
979
980 if (!hrtimer_callback_running(timer))
981 ret = remove_hrtimer(timer, base);
982
983 unlock_hrtimer_base(timer, &flags);
984
985 return ret;
986
987}
988EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
989
990/**
991 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
992 * @timer: the timer to be cancelled
993 *
994 * Returns:
995 * 0 when the timer was not active
996 * 1 when the timer was active
997 */
998int hrtimer_cancel(struct hrtimer *timer)
999{
1000 for (;;) {
1001 int ret = hrtimer_try_to_cancel(timer);
1002
1003 if (ret >= 0)
1004 return ret;
1005 cpu_relax();
1006 }
1007}
1008EXPORT_SYMBOL_GPL(hrtimer_cancel);
1009
1010/**
1011 * hrtimer_get_remaining - get remaining time for the timer
1012 * @timer: the timer to read
1013 */
1014ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1015{
1016 struct hrtimer_clock_base *base;
1017 unsigned long flags;
1018 ktime_t rem;
1019
1020 base = lock_hrtimer_base(timer, &flags);
1021 rem = hrtimer_expires_remaining(timer);
1022 unlock_hrtimer_base(timer, &flags);
1023
1024 return rem;
1025}
1026EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
1027
1028#ifdef CONFIG_NO_HZ
1029/**
1030 * hrtimer_get_next_event - get the time until next expiry event
1031 *
1032 * Returns the delta to the next expiry event or KTIME_MAX if no timer
1033 * is pending.
1034 */
1035ktime_t hrtimer_get_next_event(void)
1036{
1037 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1038 struct hrtimer_clock_base *base = cpu_base->clock_base;
1039 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
1040 unsigned long flags;
1041 int i;
1042
1043 spin_lock_irqsave(&cpu_base->lock, flags);
1044
1045 if (!hrtimer_hres_active()) {
1046 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
1047 struct hrtimer *timer;
1048
1049 if (!base->first)
1050 continue;
1051
1052 timer = rb_entry(base->first, struct hrtimer, node);
1053 delta.tv64 = hrtimer_get_expires_tv64(timer);
1054 delta = ktime_sub(delta, base->get_time());
1055 if (delta.tv64 < mindelta.tv64)
1056 mindelta.tv64 = delta.tv64;
1057 }
1058 }
1059
1060 spin_unlock_irqrestore(&cpu_base->lock, flags);
1061
1062 if (mindelta.tv64 < 0)
1063 mindelta.tv64 = 0;
1064 return mindelta;
1065}
1066#endif
1067
1068static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1069 enum hrtimer_mode mode)
1070{
1071 struct hrtimer_cpu_base *cpu_base;
1072
1073 memset(timer, 0, sizeof(struct hrtimer));
1074
1075 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1076
1077 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
1078 clock_id = CLOCK_MONOTONIC;
1079
1080 timer->base = &cpu_base->clock_base[clock_id];
1081 INIT_LIST_HEAD(&timer->cb_entry);
1082 hrtimer_init_timer_hres(timer);
1083
1084#ifdef CONFIG_TIMER_STATS
1085 timer->start_site = NULL;
1086 timer->start_pid = -1;
1087 memset(timer->start_comm, 0, TASK_COMM_LEN);
1088#endif
1089}
1090
1091/**
1092 * hrtimer_init - initialize a timer to the given clock
1093 * @timer: the timer to be initialized
1094 * @clock_id: the clock to be used
1095 * @mode: timer mode abs/rel
1096 */
1097void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1098 enum hrtimer_mode mode)
1099{
1100 debug_hrtimer_init(timer);
1101 __hrtimer_init(timer, clock_id, mode);
1102}
1103EXPORT_SYMBOL_GPL(hrtimer_init);
1104
1105/**
1106 * hrtimer_get_res - get the timer resolution for a clock
1107 * @which_clock: which clock to query
1108 * @tp: pointer to timespec variable to store the resolution
1109 *
1110 * Store the resolution of the clock selected by @which_clock in the
1111 * variable pointed to by @tp.
1112 */
1113int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1114{
1115 struct hrtimer_cpu_base *cpu_base;
1116
1117 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1118 *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);
1119
1120 return 0;
1121}
1122EXPORT_SYMBOL_GPL(hrtimer_get_res);
1123
1124static void __run_hrtimer(struct hrtimer *timer)
1125{
1126 struct hrtimer_clock_base *base = timer->base;
1127 struct hrtimer_cpu_base *cpu_base = base->cpu_base;
1128 enum hrtimer_restart (*fn)(struct hrtimer *);
1129 int restart;
1130
1131 WARN_ON(!irqs_disabled());
1132
1133 debug_hrtimer_deactivate(timer);
1134 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1135 timer_stats_account_hrtimer(timer);
1136 fn = timer->function;
1137
1138 /*
1139 * Because we run timers from hardirq context, there is no chance
1140 * they get migrated to another cpu, therefore its safe to unlock
1141 * the timer base.
1142 */
1143 spin_unlock(&cpu_base->lock);
1144 restart = fn(timer);
1145 spin_lock(&cpu_base->lock);
1146
1147 /*
1148 * Note: We clear the CALLBACK bit after enqueue_hrtimer and
1149 * we do not reprogramm the event hardware. Happens either in
1150 * hrtimer_start_range_ns() or in hrtimer_interrupt()
1151 */
1152 if (restart != HRTIMER_NORESTART) {
1153 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1154 enqueue_hrtimer(timer, base);
1155 }
1156 timer->state &= ~HRTIMER_STATE_CALLBACK;
1157}
1158
1159#ifdef CONFIG_HIGH_RES_TIMERS
1160
1161static int force_clock_reprogram;
1162
1163/*
1164 * After 5 iteration's attempts, we consider that hrtimer_interrupt()
1165 * is hanging, which could happen with something that slows the interrupt
1166 * such as the tracing. Then we force the clock reprogramming for each future
1167 * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
1168 * threshold that we will overwrite.
1169 * The next tick event will be scheduled to 3 times we currently spend on
1170 * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
1171 * 1/4 of their time to process the hrtimer interrupts. This is enough to
1172 * let it running without serious starvation.
1173 */
1174
1175static inline void
1176hrtimer_interrupt_hanging(struct clock_event_device *dev,
1177 ktime_t try_time)
1178{
1179 force_clock_reprogram = 1;
1180 dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
1181 printk(KERN_WARNING "hrtimer: interrupt too slow, "
1182 "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
1183}
1184/*
1185 * High resolution timer interrupt
1186 * Called with interrupts disabled
1187 */
1188void hrtimer_interrupt(struct clock_event_device *dev)
1189{
1190 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1191 struct hrtimer_clock_base *base;
1192 ktime_t expires_next, now;
1193 int nr_retries = 0;
1194 int i;
1195
1196 BUG_ON(!cpu_base->hres_active);
1197 cpu_base->nr_events++;
1198 dev->next_event.tv64 = KTIME_MAX;
1199
1200 retry:
1201 /* 5 retries is enough to notice a hang */
1202 if (!(++nr_retries % 5))
1203 hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
1204
1205 now = ktime_get();
1206
1207 expires_next.tv64 = KTIME_MAX;
1208
1209 base = cpu_base->clock_base;
1210
1211 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1212 ktime_t basenow;
1213 struct rb_node *node;
1214
1215 spin_lock(&cpu_base->lock);
1216
1217 basenow = ktime_add(now, base->offset);
1218
1219 while ((node = base->first)) {
1220 struct hrtimer *timer;
1221
1222 timer = rb_entry(node, struct hrtimer, node);
1223
1224 /*
1225 * The immediate goal for using the softexpires is
1226 * minimizing wakeups, not running timers at the
1227 * earliest interrupt after their soft expiration.
1228 * This allows us to avoid using a Priority Search
1229 * Tree, which can answer a stabbing querry for
1230 * overlapping intervals and instead use the simple
1231 * BST we already have.
1232 * We don't add extra wakeups by delaying timers that
1233 * are right-of a not yet expired timer, because that
1234 * timer will have to trigger a wakeup anyway.
1235 */
1236
1237 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
1238 ktime_t expires;
1239
1240 expires = ktime_sub(hrtimer_get_expires(timer),
1241 base->offset);
1242 if (expires.tv64 < expires_next.tv64)
1243 expires_next = expires;
1244 break;
1245 }
1246
1247 __run_hrtimer(timer);
1248 }
1249 spin_unlock(&cpu_base->lock);
1250 base++;
1251 }
1252
1253 cpu_base->expires_next = expires_next;
1254
1255 /* Reprogramming necessary ? */
1256 if (expires_next.tv64 != KTIME_MAX) {
1257 if (tick_program_event(expires_next, force_clock_reprogram))
1258 goto retry;
1259 }
1260}
1261
1262/*
1263 * local version of hrtimer_peek_ahead_timers() called with interrupts
1264 * disabled.
1265 */
1266static void __hrtimer_peek_ahead_timers(void)
1267{
1268 struct tick_device *td;
1269
1270 if (!hrtimer_hres_active())
1271 return;
1272
1273 td = &__get_cpu_var(tick_cpu_device);
1274 if (td && td->evtdev)
1275 hrtimer_interrupt(td->evtdev);
1276}
1277
1278/**
1279 * hrtimer_peek_ahead_timers -- run soft-expired timers now
1280 *
1281 * hrtimer_peek_ahead_timers will peek at the timer queue of
1282 * the current cpu and check if there are any timers for which
1283 * the soft expires time has passed. If any such timers exist,
1284 * they are run immediately and then removed from the timer queue.
1285 *
1286 */
1287void hrtimer_peek_ahead_timers(void)
1288{
1289 unsigned long flags;
1290
1291 local_irq_save(flags);
1292 __hrtimer_peek_ahead_timers();
1293 local_irq_restore(flags);
1294}
1295
1296static void run_hrtimer_softirq(struct softirq_action *h)
1297{
1298 hrtimer_peek_ahead_timers();
1299}
1300
1301#else /* CONFIG_HIGH_RES_TIMERS */
1302
1303static inline void __hrtimer_peek_ahead_timers(void) { }
1304
1305#endif /* !CONFIG_HIGH_RES_TIMERS */
1306
1307/*
1308 * Called from timer softirq every jiffy, expire hrtimers:
1309 *
1310 * For HRT its the fall back code to run the softirq in the timer
1311 * softirq context in case the hrtimer initialization failed or has
1312 * not been done yet.
1313 */
1314void hrtimer_run_pending(void)
1315{
1316 if (hrtimer_hres_active())
1317 return;
1318
1319 /*
1320 * This _is_ ugly: We have to check in the softirq context,
1321 * whether we can switch to highres and / or nohz mode. The
1322 * clocksource switch happens in the timer interrupt with
1323 * xtime_lock held. Notification from there only sets the
1324 * check bit in the tick_oneshot code, otherwise we might
1325 * deadlock vs. xtime_lock.
1326 */
1327 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1328 hrtimer_switch_to_hres();
1329}
1330
1331/*
1332 * Called from hardirq context every jiffy
1333 */
1334void hrtimer_run_queues(void)
1335{
1336 struct rb_node *node;
1337 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1338 struct hrtimer_clock_base *base;
1339 int index, gettime = 1;
1340
1341 if (hrtimer_hres_active())
1342 return;
1343
1344 for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
1345 base = &cpu_base->clock_base[index];
1346
1347 if (!base->first)
1348 continue;
1349
1350 if (gettime) {
1351 hrtimer_get_softirq_time(cpu_base);
1352 gettime = 0;
1353 }
1354
1355 spin_lock(&cpu_base->lock);
1356
1357 while ((node = base->first)) {
1358 struct hrtimer *timer;
1359
1360 timer = rb_entry(node, struct hrtimer, node);
1361 if (base->softirq_time.tv64 <=
1362 hrtimer_get_expires_tv64(timer))
1363 break;
1364
1365 __run_hrtimer(timer);
1366 }
1367 spin_unlock(&cpu_base->lock);
1368 }
1369}
1370
1371/*
1372 * Sleep related functions:
1373 */
1374static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1375{
1376 struct hrtimer_sleeper *t =
1377 container_of(timer, struct hrtimer_sleeper, timer);
1378 struct task_struct *task = t->task;
1379
1380 t->task = NULL;
1381 if (task)
1382 wake_up_process(task);
1383
1384 return HRTIMER_NORESTART;
1385}
1386
1387void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1388{
1389 sl->timer.function = hrtimer_wakeup;
1390 sl->task = task;
1391}
1392
1393static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1394{
1395 hrtimer_init_sleeper(t, current);
1396
1397 do {
1398 set_current_state(TASK_INTERRUPTIBLE);
1399 hrtimer_start_expires(&t->timer, mode);
1400 if (!hrtimer_active(&t->timer))
1401 t->task = NULL;
1402
1403 if (likely(t->task))
1404 schedule();
1405
1406 hrtimer_cancel(&t->timer);
1407 mode = HRTIMER_MODE_ABS;
1408
1409 } while (t->task && !signal_pending(current));
1410
1411 __set_current_state(TASK_RUNNING);
1412
1413 return t->task == NULL;
1414}
1415
1416static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1417{
1418 struct timespec rmt;
1419 ktime_t rem;
1420
1421 rem = hrtimer_expires_remaining(timer);
1422 if (rem.tv64 <= 0)
1423 return 0;
1424 rmt = ktime_to_timespec(rem);
1425
1426 if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
1427 return -EFAULT;
1428
1429 return 1;
1430}
1431
1432long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1433{
1434 struct hrtimer_sleeper t;
1435 struct timespec __user *rmtp;
1436 int ret = 0;
1437
1438 hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
1439 HRTIMER_MODE_ABS);
1440 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1441
1442 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
1443 goto out;
1444
1445 rmtp = restart->nanosleep.rmtp;
1446 if (rmtp) {
1447 ret = update_rmtp(&t.timer, rmtp);
1448 if (ret <= 0)
1449 goto out;
1450 }
1451
1452 /* The other values in restart are already filled in */
1453 ret = -ERESTART_RESTARTBLOCK;
1454out:
1455 destroy_hrtimer_on_stack(&t.timer);
1456 return ret;
1457}
1458
1459long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1460 const enum hrtimer_mode mode, const clockid_t clockid)
1461{
1462 struct restart_block *restart;
1463 struct hrtimer_sleeper t;
1464 int ret = 0;
1465 unsigned long slack;
1466
1467 slack = current->timer_slack_ns;
1468 if (rt_task(current))
1469 slack = 0;
1470
1471 hrtimer_init_on_stack(&t.timer, clockid, mode);
1472 hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
1473 if (do_nanosleep(&t, mode))
1474 goto out;
1475
1476 /* Absolute timers do not update the rmtp value and restart: */
1477 if (mode == HRTIMER_MODE_ABS) {
1478 ret = -ERESTARTNOHAND;
1479 goto out;
1480 }
1481
1482 if (rmtp) {
1483 ret = update_rmtp(&t.timer, rmtp);
1484 if (ret <= 0)
1485 goto out;
1486 }
1487
1488 restart = &current_thread_info()->restart_block;
1489 restart->fn = hrtimer_nanosleep_restart;
1490 restart->nanosleep.index = t.timer.base->index;
1491 restart->nanosleep.rmtp = rmtp;
1492 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1493
1494 ret = -ERESTART_RESTARTBLOCK;
1495out:
1496 destroy_hrtimer_on_stack(&t.timer);
1497 return ret;
1498}
1499
1500SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
1501 struct timespec __user *, rmtp)
1502{
1503 struct timespec tu;
1504
1505 if (copy_from_user(&tu, rqtp, sizeof(tu)))
1506 return -EFAULT;
1507
1508 if (!timespec_valid(&tu))
1509 return -EINVAL;
1510
1511 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
1512}
1513
1514/*
1515 * Functions related to boot-time initialization:
1516 */
1517static void __cpuinit init_hrtimers_cpu(int cpu)
1518{
1519 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1520 int i;
1521
1522 spin_lock_init(&cpu_base->lock);
1523
1524 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1525 cpu_base->clock_base[i].cpu_base = cpu_base;
1526
1527 hrtimer_init_hres(cpu_base);
1528}
1529
1530#ifdef CONFIG_HOTPLUG_CPU
1531
1532static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1533 struct hrtimer_clock_base *new_base)
1534{
1535 struct hrtimer *timer;
1536 struct rb_node *node;
1537
1538 while ((node = rb_first(&old_base->active))) {
1539 timer = rb_entry(node, struct hrtimer, node);
1540 BUG_ON(hrtimer_callback_running(timer));
1541 debug_hrtimer_deactivate(timer);
1542
1543 /*
1544 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1545 * timer could be seen as !active and just vanish away
1546 * under us on another CPU
1547 */
1548 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1549 timer->base = new_base;
1550 /*
1551 * Enqueue the timers on the new cpu. This does not
1552 * reprogram the event device in case the timer
1553 * expires before the earliest on this CPU, but we run
1554 * hrtimer_interrupt after we migrated everything to
1555 * sort out already expired timers and reprogram the
1556 * event device.
1557 */
1558 enqueue_hrtimer(timer, new_base);
1559
1560 /* Clear the migration state bit */
1561 timer->state &= ~HRTIMER_STATE_MIGRATE;
1562 }
1563}
1564
1565static void migrate_hrtimers(int scpu)
1566{
1567 struct hrtimer_cpu_base *old_base, *new_base;
1568 int i;
1569
1570 BUG_ON(cpu_online(scpu));
1571 tick_cancel_sched_timer(scpu);
1572
1573 local_irq_disable();
1574 old_base = &per_cpu(hrtimer_bases, scpu);
1575 new_base = &__get_cpu_var(hrtimer_bases);
1576 /*
1577 * The caller is globally serialized and nobody else
1578 * takes two locks at once, deadlock is not possible.
1579 */
1580 spin_lock(&new_base->lock);
1581 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1582
1583 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1584 migrate_hrtimer_list(&old_base->clock_base[i],
1585 &new_base->clock_base[i]);
1586 }
1587
1588 spin_unlock(&old_base->lock);
1589 spin_unlock(&new_base->lock);
1590
1591 /* Check, if we got expired work to do */
1592 __hrtimer_peek_ahead_timers();
1593 local_irq_enable();
1594}
1595
1596#endif /* CONFIG_HOTPLUG_CPU */
1597
1598static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1599 unsigned long action, void *hcpu)
1600{
1601 int scpu = (long)hcpu;
1602
1603 switch (action) {
1604
1605 case CPU_UP_PREPARE:
1606 case CPU_UP_PREPARE_FROZEN:
1607 init_hrtimers_cpu(scpu);
1608 break;
1609
1610#ifdef CONFIG_HOTPLUG_CPU
1611 case CPU_DYING:
1612 case CPU_DYING_FROZEN:
1613 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
1614 break;
1615 case CPU_DEAD:
1616 case CPU_DEAD_FROZEN:
1617 {
1618 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
1619 migrate_hrtimers(scpu);
1620 break;
1621 }
1622#endif
1623
1624 default:
1625 break;
1626 }
1627
1628 return NOTIFY_OK;
1629}
1630
1631static struct notifier_block __cpuinitdata hrtimers_nb = {
1632 .notifier_call = hrtimer_cpu_notify,
1633};
1634
1635void __init hrtimers_init(void)
1636{
1637 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1638 (void *)(long)smp_processor_id());
1639 register_cpu_notifier(&hrtimers_nb);
1640#ifdef CONFIG_HIGH_RES_TIMERS
1641 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1642#endif
1643}
1644
1645/**
1646 * schedule_hrtimeout_range - sleep until timeout
1647 * @expires: timeout value (ktime_t)
1648 * @delta: slack in expires timeout (ktime_t)
1649 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1650 *
1651 * Make the current task sleep until the given expiry time has
1652 * elapsed. The routine will return immediately unless
1653 * the current task state has been set (see set_current_state()).
1654 *
1655 * The @delta argument gives the kernel the freedom to schedule the
1656 * actual wakeup to a time that is both power and performance friendly.
1657 * The kernel give the normal best effort behavior for "@expires+@delta",
1658 * but may decide to fire the timer earlier, but no earlier than @expires.
1659 *
1660 * You can set the task state as follows -
1661 *
1662 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1663 * pass before the routine returns.
1664 *
1665 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1666 * delivered to the current task.
1667 *
1668 * The current task state is guaranteed to be TASK_RUNNING when this
1669 * routine returns.
1670 *
1671 * Returns 0 when the timer has expired otherwise -EINTR
1672 */
1673int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
1674 const enum hrtimer_mode mode)
1675{
1676 struct hrtimer_sleeper t;
1677
1678 /*
1679 * Optimize when a zero timeout value is given. It does not
1680 * matter whether this is an absolute or a relative time.
1681 */
1682 if (expires && !expires->tv64) {
1683 __set_current_state(TASK_RUNNING);
1684 return 0;
1685 }
1686
1687 /*
1688 * A NULL parameter means "inifinte"
1689 */
1690 if (!expires) {
1691 schedule();
1692 __set_current_state(TASK_RUNNING);
1693 return -EINTR;
1694 }
1695
1696 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
1697 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
1698
1699 hrtimer_init_sleeper(&t, current);
1700
1701 hrtimer_start_expires(&t.timer, mode);
1702 if (!hrtimer_active(&t.timer))
1703 t.task = NULL;
1704
1705 if (likely(t.task))
1706 schedule();
1707
1708 hrtimer_cancel(&t.timer);
1709 destroy_hrtimer_on_stack(&t.timer);
1710
1711 __set_current_state(TASK_RUNNING);
1712
1713 return !t.task ? 0 : -EINTR;
1714}
1715EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
1716
1717/**
1718 * schedule_hrtimeout - sleep until timeout
1719 * @expires: timeout value (ktime_t)
1720 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1721 *
1722 * Make the current task sleep until the given expiry time has
1723 * elapsed. The routine will return immediately unless
1724 * the current task state has been set (see set_current_state()).
1725 *
1726 * You can set the task state as follows -
1727 *
1728 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1729 * pass before the routine returns.
1730 *
1731 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1732 * delivered to the current task.
1733 *
1734 * The current task state is guaranteed to be TASK_RUNNING when this
1735 * routine returns.
1736 *
1737 * Returns 0 when the timer has expired otherwise -EINTR
1738 */
1739int __sched schedule_hrtimeout(ktime_t *expires,
1740 const enum hrtimer_mode mode)
1741{
1742 return schedule_hrtimeout_range(expires, 0, mode);
1743}
1744EXPORT_SYMBOL_GPL(schedule_hrtimeout);
This page took 0.03426 seconds and 5 git commands to generate.