4 * Kernel internal timers
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/export.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 #include <linux/irq_work.h>
41 #include <linux/sched.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/slab.h>
44 #include <linux/compat.h>
46 #include <asm/uaccess.h>
47 #include <asm/unistd.h>
48 #include <asm/div64.h>
49 #include <asm/timex.h>
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/timer.h>
55 __visible u64 jiffies_64 __cacheline_aligned_in_smp
= INITIAL_JIFFIES
;
57 EXPORT_SYMBOL(jiffies_64
);
60 * per-CPU timer vector definitions:
62 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
63 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
64 #define TVN_SIZE (1 << TVN_BITS)
65 #define TVR_SIZE (1 << TVR_BITS)
66 #define TVN_MASK (TVN_SIZE - 1)
67 #define TVR_MASK (TVR_SIZE - 1)
68 #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
71 struct list_head vec
[TVN_SIZE
];
75 struct list_head vec
[TVR_SIZE
];
80 struct timer_list
*running_timer
;
81 unsigned long timer_jiffies
;
82 unsigned long next_timer
;
83 unsigned long active_timers
;
84 unsigned long all_timers
;
91 } ____cacheline_aligned
;
93 struct tvec_base boot_tvec_bases
;
94 EXPORT_SYMBOL(boot_tvec_bases
);
95 static DEFINE_PER_CPU(struct tvec_base
*, tvec_bases
) = &boot_tvec_bases
;
97 /* Functions below help us manage 'deferrable' flag */
98 static inline unsigned int tbase_get_deferrable(struct tvec_base
*base
)
100 return ((unsigned int)(unsigned long)base
& TIMER_DEFERRABLE
);
103 static inline unsigned int tbase_get_irqsafe(struct tvec_base
*base
)
105 return ((unsigned int)(unsigned long)base
& TIMER_IRQSAFE
);
108 static inline struct tvec_base
*tbase_get_base(struct tvec_base
*base
)
110 return ((struct tvec_base
*)((unsigned long)base
& ~TIMER_FLAG_MASK
));
114 timer_set_base(struct timer_list
*timer
, struct tvec_base
*new_base
)
116 unsigned long flags
= (unsigned long)timer
->base
& TIMER_FLAG_MASK
;
118 timer
->base
= (struct tvec_base
*)((unsigned long)(new_base
) | flags
);
121 static unsigned long round_jiffies_common(unsigned long j
, int cpu
,
125 unsigned long original
= j
;
128 * We don't want all cpus firing their timers at once hitting the
129 * same lock or cachelines, so we skew each extra cpu with an extra
130 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
132 * The skew is done by adding 3*cpunr, then round, then subtract this
133 * extra offset again.
140 * If the target jiffie is just after a whole second (which can happen
141 * due to delays of the timer irq, long irq off times etc etc) then
142 * we should round down to the whole second, not up. Use 1/4th second
143 * as cutoff for this rounding as an extreme upper bound for this.
144 * But never round down if @force_up is set.
146 if (rem
< HZ
/4 && !force_up
) /* round down */
151 /* now that we have rounded, subtract the extra skew again */
155 * Make sure j is still in the future. Otherwise return the
158 return time_is_after_jiffies(j
) ? j
: original
;
162 * __round_jiffies - function to round jiffies to a full second
163 * @j: the time in (absolute) jiffies that should be rounded
164 * @cpu: the processor number on which the timeout will happen
166 * __round_jiffies() rounds an absolute time in the future (in jiffies)
167 * up or down to (approximately) full seconds. This is useful for timers
168 * for which the exact time they fire does not matter too much, as long as
169 * they fire approximately every X seconds.
171 * By rounding these timers to whole seconds, all such timers will fire
172 * at the same time, rather than at various times spread out. The goal
173 * of this is to have the CPU wake up less, which saves power.
175 * The exact rounding is skewed for each processor to avoid all
176 * processors firing at the exact same time, which could lead
177 * to lock contention or spurious cache line bouncing.
179 * The return value is the rounded version of the @j parameter.
181 unsigned long __round_jiffies(unsigned long j
, int cpu
)
183 return round_jiffies_common(j
, cpu
, false);
185 EXPORT_SYMBOL_GPL(__round_jiffies
);
188 * __round_jiffies_relative - function to round jiffies to a full second
189 * @j: the time in (relative) jiffies that should be rounded
190 * @cpu: the processor number on which the timeout will happen
192 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
193 * up or down to (approximately) full seconds. This is useful for timers
194 * for which the exact time they fire does not matter too much, as long as
195 * they fire approximately every X seconds.
197 * By rounding these timers to whole seconds, all such timers will fire
198 * at the same time, rather than at various times spread out. The goal
199 * of this is to have the CPU wake up less, which saves power.
201 * The exact rounding is skewed for each processor to avoid all
202 * processors firing at the exact same time, which could lead
203 * to lock contention or spurious cache line bouncing.
205 * The return value is the rounded version of the @j parameter.
207 unsigned long __round_jiffies_relative(unsigned long j
, int cpu
)
209 unsigned long j0
= jiffies
;
211 /* Use j0 because jiffies might change while we run */
212 return round_jiffies_common(j
+ j0
, cpu
, false) - j0
;
214 EXPORT_SYMBOL_GPL(__round_jiffies_relative
);
217 * round_jiffies - function to round jiffies to a full second
218 * @j: the time in (absolute) jiffies that should be rounded
220 * round_jiffies() rounds an absolute time in the future (in jiffies)
221 * up or down to (approximately) full seconds. This is useful for timers
222 * for which the exact time they fire does not matter too much, as long as
223 * they fire approximately every X seconds.
225 * By rounding these timers to whole seconds, all such timers will fire
226 * at the same time, rather than at various times spread out. The goal
227 * of this is to have the CPU wake up less, which saves power.
229 * The return value is the rounded version of the @j parameter.
231 unsigned long round_jiffies(unsigned long j
)
233 return round_jiffies_common(j
, raw_smp_processor_id(), false);
235 EXPORT_SYMBOL_GPL(round_jiffies
);
238 * round_jiffies_relative - function to round jiffies to a full second
239 * @j: the time in (relative) jiffies that should be rounded
241 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
242 * up or down to (approximately) full seconds. This is useful for timers
243 * for which the exact time they fire does not matter too much, as long as
244 * they fire approximately every X seconds.
246 * By rounding these timers to whole seconds, all such timers will fire
247 * at the same time, rather than at various times spread out. The goal
248 * of this is to have the CPU wake up less, which saves power.
250 * The return value is the rounded version of the @j parameter.
252 unsigned long round_jiffies_relative(unsigned long j
)
254 return __round_jiffies_relative(j
, raw_smp_processor_id());
256 EXPORT_SYMBOL_GPL(round_jiffies_relative
);
259 * __round_jiffies_up - function to round jiffies up to a full second
260 * @j: the time in (absolute) jiffies that should be rounded
261 * @cpu: the processor number on which the timeout will happen
263 * This is the same as __round_jiffies() except that it will never
264 * round down. This is useful for timeouts for which the exact time
265 * of firing does not matter too much, as long as they don't fire too
268 unsigned long __round_jiffies_up(unsigned long j
, int cpu
)
270 return round_jiffies_common(j
, cpu
, true);
272 EXPORT_SYMBOL_GPL(__round_jiffies_up
);
275 * __round_jiffies_up_relative - function to round jiffies up to a full second
276 * @j: the time in (relative) jiffies that should be rounded
277 * @cpu: the processor number on which the timeout will happen
279 * This is the same as __round_jiffies_relative() except that it will never
280 * round down. This is useful for timeouts for which the exact time
281 * of firing does not matter too much, as long as they don't fire too
284 unsigned long __round_jiffies_up_relative(unsigned long j
, int cpu
)
286 unsigned long j0
= jiffies
;
288 /* Use j0 because jiffies might change while we run */
289 return round_jiffies_common(j
+ j0
, cpu
, true) - j0
;
291 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative
);
294 * round_jiffies_up - function to round jiffies up to a full second
295 * @j: the time in (absolute) jiffies that should be rounded
297 * This is the same as round_jiffies() except that it will never
298 * round down. This is useful for timeouts for which the exact time
299 * of firing does not matter too much, as long as they don't fire too
302 unsigned long round_jiffies_up(unsigned long j
)
304 return round_jiffies_common(j
, raw_smp_processor_id(), true);
306 EXPORT_SYMBOL_GPL(round_jiffies_up
);
309 * round_jiffies_up_relative - function to round jiffies up to a full second
310 * @j: the time in (relative) jiffies that should be rounded
312 * This is the same as round_jiffies_relative() except that it will never
313 * round down. This is useful for timeouts for which the exact time
314 * of firing does not matter too much, as long as they don't fire too
317 unsigned long round_jiffies_up_relative(unsigned long j
)
319 return __round_jiffies_up_relative(j
, raw_smp_processor_id());
321 EXPORT_SYMBOL_GPL(round_jiffies_up_relative
);
324 * set_timer_slack - set the allowed slack for a timer
325 * @timer: the timer to be modified
326 * @slack_hz: the amount of time (in jiffies) allowed for rounding
328 * Set the amount of time, in jiffies, that a certain timer has
329 * in terms of slack. By setting this value, the timer subsystem
330 * will schedule the actual timer somewhere between
331 * the time mod_timer() asks for, and that time plus the slack.
333 * By setting the slack to -1, a percentage of the delay is used
336 void set_timer_slack(struct timer_list
*timer
, int slack_hz
)
338 timer
->slack
= slack_hz
;
340 EXPORT_SYMBOL_GPL(set_timer_slack
);
343 * If the list is empty, catch up ->timer_jiffies to the current time.
344 * The caller must hold the tvec_base lock. Returns true if the list
345 * was empty and therefore ->timer_jiffies was updated.
347 static bool catchup_timer_jiffies(struct tvec_base
*base
)
349 if (!base
->all_timers
) {
350 base
->timer_jiffies
= jiffies
;
357 __internal_add_timer(struct tvec_base
*base
, struct timer_list
*timer
)
359 unsigned long expires
= timer
->expires
;
360 unsigned long idx
= expires
- base
->timer_jiffies
;
361 struct list_head
*vec
;
363 if (idx
< TVR_SIZE
) {
364 int i
= expires
& TVR_MASK
;
365 vec
= base
->tv1
.vec
+ i
;
366 } else if (idx
< 1 << (TVR_BITS
+ TVN_BITS
)) {
367 int i
= (expires
>> TVR_BITS
) & TVN_MASK
;
368 vec
= base
->tv2
.vec
+ i
;
369 } else if (idx
< 1 << (TVR_BITS
+ 2 * TVN_BITS
)) {
370 int i
= (expires
>> (TVR_BITS
+ TVN_BITS
)) & TVN_MASK
;
371 vec
= base
->tv3
.vec
+ i
;
372 } else if (idx
< 1 << (TVR_BITS
+ 3 * TVN_BITS
)) {
373 int i
= (expires
>> (TVR_BITS
+ 2 * TVN_BITS
)) & TVN_MASK
;
374 vec
= base
->tv4
.vec
+ i
;
375 } else if ((signed long) idx
< 0) {
377 * Can happen if you add a timer with expires == jiffies,
378 * or you set a timer to go off in the past
380 vec
= base
->tv1
.vec
+ (base
->timer_jiffies
& TVR_MASK
);
383 /* If the timeout is larger than MAX_TVAL (on 64-bit
384 * architectures or with CONFIG_BASE_SMALL=1) then we
385 * use the maximum timeout.
387 if (idx
> MAX_TVAL
) {
389 expires
= idx
+ base
->timer_jiffies
;
391 i
= (expires
>> (TVR_BITS
+ 3 * TVN_BITS
)) & TVN_MASK
;
392 vec
= base
->tv5
.vec
+ i
;
397 list_add_tail(&timer
->entry
, vec
);
400 static void internal_add_timer(struct tvec_base
*base
, struct timer_list
*timer
)
402 (void)catchup_timer_jiffies(base
);
403 __internal_add_timer(base
, timer
);
405 * Update base->active_timers and base->next_timer
407 if (!tbase_get_deferrable(timer
->base
)) {
408 if (!base
->active_timers
++ ||
409 time_before(timer
->expires
, base
->next_timer
))
410 base
->next_timer
= timer
->expires
;
415 * Check whether the other CPU is in dynticks mode and needs
416 * to be triggered to reevaluate the timer wheel.
417 * We are protected against the other CPU fiddling
418 * with the timer by holding the timer base lock. This also
419 * makes sure that a CPU on the way to stop its tick can not
420 * evaluate the timer wheel.
422 * Spare the IPI for deferrable timers on idle targets though.
423 * The next busy ticks will take care of it. Except full dynticks
424 * require special care against races with idle_cpu(), lets deal
427 if (!tbase_get_deferrable(base
) || tick_nohz_full_cpu(base
->cpu
))
428 wake_up_nohz_cpu(base
->cpu
);
431 #ifdef CONFIG_TIMER_STATS
432 void __timer_stats_timer_set_start_info(struct timer_list
*timer
, void *addr
)
434 if (timer
->start_site
)
437 timer
->start_site
= addr
;
438 memcpy(timer
->start_comm
, current
->comm
, TASK_COMM_LEN
);
439 timer
->start_pid
= current
->pid
;
442 static void timer_stats_account_timer(struct timer_list
*timer
)
444 unsigned int flag
= 0;
446 if (likely(!timer
->start_site
))
448 if (unlikely(tbase_get_deferrable(timer
->base
)))
449 flag
|= TIMER_STATS_FLAG_DEFERRABLE
;
451 timer_stats_update_stats(timer
, timer
->start_pid
, timer
->start_site
,
452 timer
->function
, timer
->start_comm
, flag
);
456 static void timer_stats_account_timer(struct timer_list
*timer
) {}
459 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
461 static struct debug_obj_descr timer_debug_descr
;
463 static void *timer_debug_hint(void *addr
)
465 return ((struct timer_list
*) addr
)->function
;
469 * fixup_init is called when:
470 * - an active object is initialized
472 static int timer_fixup_init(void *addr
, enum debug_obj_state state
)
474 struct timer_list
*timer
= addr
;
477 case ODEBUG_STATE_ACTIVE
:
478 del_timer_sync(timer
);
479 debug_object_init(timer
, &timer_debug_descr
);
486 /* Stub timer callback for improperly used timers. */
487 static void stub_timer(unsigned long data
)
493 * fixup_activate is called when:
494 * - an active object is activated
495 * - an unknown object is activated (might be a statically initialized object)
497 static int timer_fixup_activate(void *addr
, enum debug_obj_state state
)
499 struct timer_list
*timer
= addr
;
503 case ODEBUG_STATE_NOTAVAILABLE
:
505 * This is not really a fixup. The timer was
506 * statically initialized. We just make sure that it
507 * is tracked in the object tracker.
509 if (timer
->entry
.next
== NULL
&&
510 timer
->entry
.prev
== TIMER_ENTRY_STATIC
) {
511 debug_object_init(timer
, &timer_debug_descr
);
512 debug_object_activate(timer
, &timer_debug_descr
);
515 setup_timer(timer
, stub_timer
, 0);
520 case ODEBUG_STATE_ACTIVE
:
529 * fixup_free is called when:
530 * - an active object is freed
532 static int timer_fixup_free(void *addr
, enum debug_obj_state state
)
534 struct timer_list
*timer
= addr
;
537 case ODEBUG_STATE_ACTIVE
:
538 del_timer_sync(timer
);
539 debug_object_free(timer
, &timer_debug_descr
);
547 * fixup_assert_init is called when:
548 * - an untracked/uninit-ed object is found
550 static int timer_fixup_assert_init(void *addr
, enum debug_obj_state state
)
552 struct timer_list
*timer
= addr
;
555 case ODEBUG_STATE_NOTAVAILABLE
:
556 if (timer
->entry
.prev
== TIMER_ENTRY_STATIC
) {
558 * This is not really a fixup. The timer was
559 * statically initialized. We just make sure that it
560 * is tracked in the object tracker.
562 debug_object_init(timer
, &timer_debug_descr
);
565 setup_timer(timer
, stub_timer
, 0);
573 static struct debug_obj_descr timer_debug_descr
= {
574 .name
= "timer_list",
575 .debug_hint
= timer_debug_hint
,
576 .fixup_init
= timer_fixup_init
,
577 .fixup_activate
= timer_fixup_activate
,
578 .fixup_free
= timer_fixup_free
,
579 .fixup_assert_init
= timer_fixup_assert_init
,
582 static inline void debug_timer_init(struct timer_list
*timer
)
584 debug_object_init(timer
, &timer_debug_descr
);
587 static inline void debug_timer_activate(struct timer_list
*timer
)
589 debug_object_activate(timer
, &timer_debug_descr
);
592 static inline void debug_timer_deactivate(struct timer_list
*timer
)
594 debug_object_deactivate(timer
, &timer_debug_descr
);
597 static inline void debug_timer_free(struct timer_list
*timer
)
599 debug_object_free(timer
, &timer_debug_descr
);
602 static inline void debug_timer_assert_init(struct timer_list
*timer
)
604 debug_object_assert_init(timer
, &timer_debug_descr
);
607 static void do_init_timer(struct timer_list
*timer
, unsigned int flags
,
608 const char *name
, struct lock_class_key
*key
);
610 void init_timer_on_stack_key(struct timer_list
*timer
, unsigned int flags
,
611 const char *name
, struct lock_class_key
*key
)
613 debug_object_init_on_stack(timer
, &timer_debug_descr
);
614 do_init_timer(timer
, flags
, name
, key
);
616 EXPORT_SYMBOL_GPL(init_timer_on_stack_key
);
618 void destroy_timer_on_stack(struct timer_list
*timer
)
620 debug_object_free(timer
, &timer_debug_descr
);
622 EXPORT_SYMBOL_GPL(destroy_timer_on_stack
);
625 static inline void debug_timer_init(struct timer_list
*timer
) { }
626 static inline void debug_timer_activate(struct timer_list
*timer
) { }
627 static inline void debug_timer_deactivate(struct timer_list
*timer
) { }
628 static inline void debug_timer_assert_init(struct timer_list
*timer
) { }
631 static inline void debug_init(struct timer_list
*timer
)
633 debug_timer_init(timer
);
634 trace_timer_init(timer
);
638 debug_activate(struct timer_list
*timer
, unsigned long expires
)
640 debug_timer_activate(timer
);
641 trace_timer_start(timer
, expires
);
644 static inline void debug_deactivate(struct timer_list
*timer
)
646 debug_timer_deactivate(timer
);
647 trace_timer_cancel(timer
);
650 static inline void debug_assert_init(struct timer_list
*timer
)
652 debug_timer_assert_init(timer
);
655 static void do_init_timer(struct timer_list
*timer
, unsigned int flags
,
656 const char *name
, struct lock_class_key
*key
)
658 struct tvec_base
*base
= raw_cpu_read(tvec_bases
);
660 timer
->entry
.next
= NULL
;
661 timer
->base
= (void *)((unsigned long)base
| flags
);
663 #ifdef CONFIG_TIMER_STATS
664 timer
->start_site
= NULL
;
665 timer
->start_pid
= -1;
666 memset(timer
->start_comm
, 0, TASK_COMM_LEN
);
668 lockdep_init_map(&timer
->lockdep_map
, name
, key
, 0);
672 * init_timer_key - initialize a timer
673 * @timer: the timer to be initialized
674 * @flags: timer flags
675 * @name: name of the timer
676 * @key: lockdep class key of the fake lock used for tracking timer
677 * sync lock dependencies
679 * init_timer_key() must be done to a timer prior calling *any* of the
680 * other timer functions.
682 void init_timer_key(struct timer_list
*timer
, unsigned int flags
,
683 const char *name
, struct lock_class_key
*key
)
686 do_init_timer(timer
, flags
, name
, key
);
688 EXPORT_SYMBOL(init_timer_key
);
690 static inline void detach_timer(struct timer_list
*timer
, bool clear_pending
)
692 struct list_head
*entry
= &timer
->entry
;
694 debug_deactivate(timer
);
696 __list_del(entry
->prev
, entry
->next
);
699 entry
->prev
= LIST_POISON2
;
703 detach_expired_timer(struct timer_list
*timer
, struct tvec_base
*base
)
705 detach_timer(timer
, true);
706 if (!tbase_get_deferrable(timer
->base
))
707 base
->active_timers
--;
709 (void)catchup_timer_jiffies(base
);
712 static int detach_if_pending(struct timer_list
*timer
, struct tvec_base
*base
,
715 if (!timer_pending(timer
))
718 detach_timer(timer
, clear_pending
);
719 if (!tbase_get_deferrable(timer
->base
)) {
720 base
->active_timers
--;
721 if (timer
->expires
== base
->next_timer
)
722 base
->next_timer
= base
->timer_jiffies
;
725 (void)catchup_timer_jiffies(base
);
730 * We are using hashed locking: holding per_cpu(tvec_bases).lock
731 * means that all timers which are tied to this base via timer->base are
732 * locked, and the base itself is locked too.
734 * So __run_timers/migrate_timers can safely modify all timers which could
735 * be found on ->tvX lists.
737 * When the timer's base is locked, and the timer removed from list, it is
738 * possible to set timer->base = NULL and drop the lock: the timer remains
741 static struct tvec_base
*lock_timer_base(struct timer_list
*timer
,
742 unsigned long *flags
)
743 __acquires(timer
->base
->lock
)
745 struct tvec_base
*base
;
748 struct tvec_base
*prelock_base
= timer
->base
;
749 base
= tbase_get_base(prelock_base
);
750 if (likely(base
!= NULL
)) {
751 spin_lock_irqsave(&base
->lock
, *flags
);
752 if (likely(prelock_base
== timer
->base
))
754 /* The timer has migrated to another CPU */
755 spin_unlock_irqrestore(&base
->lock
, *flags
);
762 __mod_timer(struct timer_list
*timer
, unsigned long expires
,
763 bool pending_only
, int pinned
)
765 struct tvec_base
*base
, *new_base
;
769 timer_stats_timer_set_start_info(timer
);
770 BUG_ON(!timer
->function
);
772 base
= lock_timer_base(timer
, &flags
);
774 ret
= detach_if_pending(timer
, base
, false);
775 if (!ret
&& pending_only
)
778 debug_activate(timer
, expires
);
780 cpu
= get_nohz_timer_target(pinned
);
781 new_base
= per_cpu(tvec_bases
, cpu
);
783 if (base
!= new_base
) {
785 * We are trying to schedule the timer on the local CPU.
786 * However we can't change timer's base while it is running,
787 * otherwise del_timer_sync() can't detect that the timer's
788 * handler yet has not finished. This also guarantees that
789 * the timer is serialized wrt itself.
791 if (likely(base
->running_timer
!= timer
)) {
792 /* See the comment in lock_timer_base() */
793 timer_set_base(timer
, NULL
);
794 spin_unlock(&base
->lock
);
796 spin_lock(&base
->lock
);
797 timer_set_base(timer
, base
);
801 timer
->expires
= expires
;
802 internal_add_timer(base
, timer
);
805 spin_unlock_irqrestore(&base
->lock
, flags
);
811 * mod_timer_pending - modify a pending timer's timeout
812 * @timer: the pending timer to be modified
813 * @expires: new timeout in jiffies
815 * mod_timer_pending() is the same for pending timers as mod_timer(),
816 * but will not re-activate and modify already deleted timers.
818 * It is useful for unserialized use of timers.
820 int mod_timer_pending(struct timer_list
*timer
, unsigned long expires
)
822 return __mod_timer(timer
, expires
, true, TIMER_NOT_PINNED
);
824 EXPORT_SYMBOL(mod_timer_pending
);
827 * Decide where to put the timer while taking the slack into account
830 * 1) calculate the maximum (absolute) time
831 * 2) calculate the highest bit where the expires and new max are different
832 * 3) use this bit to make a mask
833 * 4) use the bitmask to round down the maximum time, so that all last
837 unsigned long apply_slack(struct timer_list
*timer
, unsigned long expires
)
839 unsigned long expires_limit
, mask
;
842 if (timer
->slack
>= 0) {
843 expires_limit
= expires
+ timer
->slack
;
845 long delta
= expires
- jiffies
;
850 expires_limit
= expires
+ delta
/ 256;
852 mask
= expires
^ expires_limit
;
856 bit
= find_last_bit(&mask
, BITS_PER_LONG
);
858 mask
= (1UL << bit
) - 1;
860 expires_limit
= expires_limit
& ~(mask
);
862 return expires_limit
;
866 * mod_timer - modify a timer's timeout
867 * @timer: the timer to be modified
868 * @expires: new timeout in jiffies
870 * mod_timer() is a more efficient way to update the expire field of an
871 * active timer (if the timer is inactive it will be activated)
873 * mod_timer(timer, expires) is equivalent to:
875 * del_timer(timer); timer->expires = expires; add_timer(timer);
877 * Note that if there are multiple unserialized concurrent users of the
878 * same timer, then mod_timer() is the only safe way to modify the timeout,
879 * since add_timer() cannot modify an already running timer.
881 * The function returns whether it has modified a pending timer or not.
882 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
883 * active timer returns 1.)
885 int mod_timer(struct timer_list
*timer
, unsigned long expires
)
887 expires
= apply_slack(timer
, expires
);
890 * This is a common optimization triggered by the
891 * networking code - if the timer is re-modified
892 * to be the same thing then just return:
894 if (timer_pending(timer
) && timer
->expires
== expires
)
897 return __mod_timer(timer
, expires
, false, TIMER_NOT_PINNED
);
899 EXPORT_SYMBOL(mod_timer
);
902 * mod_timer_pinned - modify a timer's timeout
903 * @timer: the timer to be modified
904 * @expires: new timeout in jiffies
906 * mod_timer_pinned() is a way to update the expire field of an
907 * active timer (if the timer is inactive it will be activated)
908 * and to ensure that the timer is scheduled on the current CPU.
910 * Note that this does not prevent the timer from being migrated
911 * when the current CPU goes offline. If this is a problem for
912 * you, use CPU-hotplug notifiers to handle it correctly, for
913 * example, cancelling the timer when the corresponding CPU goes
916 * mod_timer_pinned(timer, expires) is equivalent to:
918 * del_timer(timer); timer->expires = expires; add_timer(timer);
920 int mod_timer_pinned(struct timer_list
*timer
, unsigned long expires
)
922 if (timer
->expires
== expires
&& timer_pending(timer
))
925 return __mod_timer(timer
, expires
, false, TIMER_PINNED
);
927 EXPORT_SYMBOL(mod_timer_pinned
);
930 * add_timer - start a timer
931 * @timer: the timer to be added
933 * The kernel will do a ->function(->data) callback from the
934 * timer interrupt at the ->expires point in the future. The
935 * current time is 'jiffies'.
937 * The timer's ->expires, ->function (and if the handler uses it, ->data)
938 * fields must be set prior calling this function.
940 * Timers with an ->expires field in the past will be executed in the next
943 void add_timer(struct timer_list
*timer
)
945 BUG_ON(timer_pending(timer
));
946 mod_timer(timer
, timer
->expires
);
948 EXPORT_SYMBOL(add_timer
);
951 * add_timer_on - start a timer on a particular CPU
952 * @timer: the timer to be added
953 * @cpu: the CPU to start it on
955 * This is not very scalable on SMP. Double adds are not possible.
957 void add_timer_on(struct timer_list
*timer
, int cpu
)
959 struct tvec_base
*base
= per_cpu(tvec_bases
, cpu
);
962 timer_stats_timer_set_start_info(timer
);
963 BUG_ON(timer_pending(timer
) || !timer
->function
);
964 spin_lock_irqsave(&base
->lock
, flags
);
965 timer_set_base(timer
, base
);
966 debug_activate(timer
, timer
->expires
);
967 internal_add_timer(base
, timer
);
968 spin_unlock_irqrestore(&base
->lock
, flags
);
970 EXPORT_SYMBOL_GPL(add_timer_on
);
973 * del_timer - deactive a timer.
974 * @timer: the timer to be deactivated
976 * del_timer() deactivates a timer - this works on both active and inactive
979 * The function returns whether it has deactivated a pending timer or not.
980 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
981 * active timer returns 1.)
983 int del_timer(struct timer_list
*timer
)
985 struct tvec_base
*base
;
989 debug_assert_init(timer
);
991 timer_stats_timer_clear_start_info(timer
);
992 if (timer_pending(timer
)) {
993 base
= lock_timer_base(timer
, &flags
);
994 ret
= detach_if_pending(timer
, base
, true);
995 spin_unlock_irqrestore(&base
->lock
, flags
);
1000 EXPORT_SYMBOL(del_timer
);
1003 * try_to_del_timer_sync - Try to deactivate a timer
1004 * @timer: timer do del
1006 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1007 * exit the timer is not queued and the handler is not running on any CPU.
1009 int try_to_del_timer_sync(struct timer_list
*timer
)
1011 struct tvec_base
*base
;
1012 unsigned long flags
;
1015 debug_assert_init(timer
);
1017 base
= lock_timer_base(timer
, &flags
);
1019 if (base
->running_timer
!= timer
) {
1020 timer_stats_timer_clear_start_info(timer
);
1021 ret
= detach_if_pending(timer
, base
, true);
1023 spin_unlock_irqrestore(&base
->lock
, flags
);
1027 EXPORT_SYMBOL(try_to_del_timer_sync
);
1031 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1032 * @timer: the timer to be deactivated
1034 * This function only differs from del_timer() on SMP: besides deactivating
1035 * the timer it also makes sure the handler has finished executing on other
1038 * Synchronization rules: Callers must prevent restarting of the timer,
1039 * otherwise this function is meaningless. It must not be called from
1040 * interrupt contexts unless the timer is an irqsafe one. The caller must
1041 * not hold locks which would prevent completion of the timer's
1042 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1043 * timer is not queued and the handler is not running on any CPU.
1045 * Note: For !irqsafe timers, you must not hold locks that are held in
1046 * interrupt context while calling this function. Even if the lock has
1047 * nothing to do with the timer in question. Here's why:
1053 * base->running_timer = mytimer;
1054 * spin_lock_irq(somelock);
1056 * spin_lock(somelock);
1057 * del_timer_sync(mytimer);
1058 * while (base->running_timer == mytimer);
1060 * Now del_timer_sync() will never return and never release somelock.
1061 * The interrupt on the other CPU is waiting to grab somelock but
1062 * it has interrupted the softirq that CPU0 is waiting to finish.
1064 * The function returns whether it has deactivated a pending timer or not.
1066 int del_timer_sync(struct timer_list
*timer
)
1068 #ifdef CONFIG_LOCKDEP
1069 unsigned long flags
;
1072 * If lockdep gives a backtrace here, please reference
1073 * the synchronization rules above.
1075 local_irq_save(flags
);
1076 lock_map_acquire(&timer
->lockdep_map
);
1077 lock_map_release(&timer
->lockdep_map
);
1078 local_irq_restore(flags
);
1081 * don't use it in hardirq context, because it
1082 * could lead to deadlock.
1084 WARN_ON(in_irq() && !tbase_get_irqsafe(timer
->base
));
1086 int ret
= try_to_del_timer_sync(timer
);
1092 EXPORT_SYMBOL(del_timer_sync
);
1095 static int cascade(struct tvec_base
*base
, struct tvec
*tv
, int index
)
1097 /* cascade all the timers from tv up one level */
1098 struct timer_list
*timer
, *tmp
;
1099 struct list_head tv_list
;
1101 list_replace_init(tv
->vec
+ index
, &tv_list
);
1104 * We are removing _all_ timers from the list, so we
1105 * don't have to detach them individually.
1107 list_for_each_entry_safe(timer
, tmp
, &tv_list
, entry
) {
1108 BUG_ON(tbase_get_base(timer
->base
) != base
);
1109 /* No accounting, while moving them */
1110 __internal_add_timer(base
, timer
);
1116 static void call_timer_fn(struct timer_list
*timer
, void (*fn
)(unsigned long),
1119 int count
= preempt_count();
1121 #ifdef CONFIG_LOCKDEP
1123 * It is permissible to free the timer from inside the
1124 * function that is called from it, this we need to take into
1125 * account for lockdep too. To avoid bogus "held lock freed"
1126 * warnings as well as problems when looking into
1127 * timer->lockdep_map, make a copy and use that here.
1129 struct lockdep_map lockdep_map
;
1131 lockdep_copy_map(&lockdep_map
, &timer
->lockdep_map
);
1134 * Couple the lock chain with the lock chain at
1135 * del_timer_sync() by acquiring the lock_map around the fn()
1136 * call here and in del_timer_sync().
1138 lock_map_acquire(&lockdep_map
);
1140 trace_timer_expire_entry(timer
);
1142 trace_timer_expire_exit(timer
);
1144 lock_map_release(&lockdep_map
);
1146 if (count
!= preempt_count()) {
1147 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1148 fn
, count
, preempt_count());
1150 * Restore the preempt count. That gives us a decent
1151 * chance to survive and extract information. If the
1152 * callback kept a lock held, bad luck, but not worse
1153 * than the BUG() we had.
1155 preempt_count_set(count
);
1159 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1162 * __run_timers - run all expired timers (if any) on this CPU.
1163 * @base: the timer vector to be processed.
1165 * This function cascades all vectors and executes all expired timer
1168 static inline void __run_timers(struct tvec_base
*base
)
1170 struct timer_list
*timer
;
1172 spin_lock_irq(&base
->lock
);
1173 if (catchup_timer_jiffies(base
)) {
1174 spin_unlock_irq(&base
->lock
);
1177 while (time_after_eq(jiffies
, base
->timer_jiffies
)) {
1178 struct list_head work_list
;
1179 struct list_head
*head
= &work_list
;
1180 int index
= base
->timer_jiffies
& TVR_MASK
;
1186 (!cascade(base
, &base
->tv2
, INDEX(0))) &&
1187 (!cascade(base
, &base
->tv3
, INDEX(1))) &&
1188 !cascade(base
, &base
->tv4
, INDEX(2)))
1189 cascade(base
, &base
->tv5
, INDEX(3));
1190 ++base
->timer_jiffies
;
1191 list_replace_init(base
->tv1
.vec
+ index
, head
);
1192 while (!list_empty(head
)) {
1193 void (*fn
)(unsigned long);
1197 timer
= list_first_entry(head
, struct timer_list
,entry
);
1198 fn
= timer
->function
;
1200 irqsafe
= tbase_get_irqsafe(timer
->base
);
1202 timer_stats_account_timer(timer
);
1204 base
->running_timer
= timer
;
1205 detach_expired_timer(timer
, base
);
1208 spin_unlock(&base
->lock
);
1209 call_timer_fn(timer
, fn
, data
);
1210 spin_lock(&base
->lock
);
1212 spin_unlock_irq(&base
->lock
);
1213 call_timer_fn(timer
, fn
, data
);
1214 spin_lock_irq(&base
->lock
);
1218 base
->running_timer
= NULL
;
1219 spin_unlock_irq(&base
->lock
);
1222 #ifdef CONFIG_NO_HZ_COMMON
1224 * Find out when the next timer event is due to happen. This
1225 * is used on S/390 to stop all activity when a CPU is idle.
1226 * This function needs to be called with interrupts disabled.
1228 static unsigned long __next_timer_interrupt(struct tvec_base
*base
)
1230 unsigned long timer_jiffies
= base
->timer_jiffies
;
1231 unsigned long expires
= timer_jiffies
+ NEXT_TIMER_MAX_DELTA
;
1232 int index
, slot
, array
, found
= 0;
1233 struct timer_list
*nte
;
1234 struct tvec
*varray
[4];
1236 /* Look for timer events in tv1. */
1237 index
= slot
= timer_jiffies
& TVR_MASK
;
1239 list_for_each_entry(nte
, base
->tv1
.vec
+ slot
, entry
) {
1240 if (tbase_get_deferrable(nte
->base
))
1244 expires
= nte
->expires
;
1245 /* Look at the cascade bucket(s)? */
1246 if (!index
|| slot
< index
)
1250 slot
= (slot
+ 1) & TVR_MASK
;
1251 } while (slot
!= index
);
1254 /* Calculate the next cascade event */
1256 timer_jiffies
+= TVR_SIZE
- index
;
1257 timer_jiffies
>>= TVR_BITS
;
1259 /* Check tv2-tv5. */
1260 varray
[0] = &base
->tv2
;
1261 varray
[1] = &base
->tv3
;
1262 varray
[2] = &base
->tv4
;
1263 varray
[3] = &base
->tv5
;
1265 for (array
= 0; array
< 4; array
++) {
1266 struct tvec
*varp
= varray
[array
];
1268 index
= slot
= timer_jiffies
& TVN_MASK
;
1270 list_for_each_entry(nte
, varp
->vec
+ slot
, entry
) {
1271 if (tbase_get_deferrable(nte
->base
))
1275 if (time_before(nte
->expires
, expires
))
1276 expires
= nte
->expires
;
1279 * Do we still search for the first timer or are
1280 * we looking up the cascade buckets ?
1283 /* Look at the cascade bucket(s)? */
1284 if (!index
|| slot
< index
)
1288 slot
= (slot
+ 1) & TVN_MASK
;
1289 } while (slot
!= index
);
1292 timer_jiffies
+= TVN_SIZE
- index
;
1293 timer_jiffies
>>= TVN_BITS
;
1299 * Check, if the next hrtimer event is before the next timer wheel
1302 static unsigned long cmp_next_hrtimer_event(unsigned long now
,
1303 unsigned long expires
)
1305 ktime_t hr_delta
= hrtimer_get_next_event();
1306 struct timespec tsdelta
;
1307 unsigned long delta
;
1309 if (hr_delta
.tv64
== KTIME_MAX
)
1313 * Expired timer available, let it expire in the next tick
1315 if (hr_delta
.tv64
<= 0)
1318 tsdelta
= ktime_to_timespec(hr_delta
);
1319 delta
= timespec_to_jiffies(&tsdelta
);
1322 * Limit the delta to the max value, which is checked in
1323 * tick_nohz_stop_sched_tick():
1325 if (delta
> NEXT_TIMER_MAX_DELTA
)
1326 delta
= NEXT_TIMER_MAX_DELTA
;
1329 * Take rounding errors in to account and make sure, that it
1330 * expires in the next tick. Otherwise we go into an endless
1331 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1337 if (time_before(now
, expires
))
1343 * get_next_timer_interrupt - return the jiffy of the next pending timer
1344 * @now: current time (in jiffies)
1346 unsigned long get_next_timer_interrupt(unsigned long now
)
1348 struct tvec_base
*base
= __this_cpu_read(tvec_bases
);
1349 unsigned long expires
= now
+ NEXT_TIMER_MAX_DELTA
;
1352 * Pretend that there is no timer pending if the cpu is offline.
1353 * Possible pending timers will be migrated later to an active cpu.
1355 if (cpu_is_offline(smp_processor_id()))
1358 spin_lock(&base
->lock
);
1359 if (base
->active_timers
) {
1360 if (time_before_eq(base
->next_timer
, base
->timer_jiffies
))
1361 base
->next_timer
= __next_timer_interrupt(base
);
1362 expires
= base
->next_timer
;
1364 spin_unlock(&base
->lock
);
1366 if (time_before_eq(expires
, now
))
1369 return cmp_next_hrtimer_event(now
, expires
);
1374 * Called from the timer interrupt handler to charge one tick to the current
1375 * process. user_tick is 1 if the tick is user time, 0 for system.
1377 void update_process_times(int user_tick
)
1379 struct task_struct
*p
= current
;
1381 /* Note: this timer irq context must be accounted for as well. */
1382 account_process_tick(p
, user_tick
);
1384 rcu_check_callbacks(user_tick
);
1385 #ifdef CONFIG_IRQ_WORK
1390 run_posix_cpu_timers(p
);
1394 * This function runs timers and the timer-tq in bottom half context.
1396 static void run_timer_softirq(struct softirq_action
*h
)
1398 struct tvec_base
*base
= __this_cpu_read(tvec_bases
);
1400 hrtimer_run_pending();
1402 if (time_after_eq(jiffies
, base
->timer_jiffies
))
1407 * Called by the local, per-CPU timer interrupt on SMP.
1409 void run_local_timers(void)
1411 hrtimer_run_queues();
1412 raise_softirq(TIMER_SOFTIRQ
);
1415 #ifdef __ARCH_WANT_SYS_ALARM
1418 * For backwards compatibility? This can be done in libc so Alpha
1419 * and all newer ports shouldn't need it.
1421 SYSCALL_DEFINE1(alarm
, unsigned int, seconds
)
1423 return alarm_setitimer(seconds
);
1428 static void process_timeout(unsigned long __data
)
1430 wake_up_process((struct task_struct
*)__data
);
1434 * schedule_timeout - sleep until timeout
1435 * @timeout: timeout value in jiffies
1437 * Make the current task sleep until @timeout jiffies have
1438 * elapsed. The routine will return immediately unless
1439 * the current task state has been set (see set_current_state()).
1441 * You can set the task state as follows -
1443 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1444 * pass before the routine returns. The routine will return 0
1446 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1447 * delivered to the current task. In this case the remaining time
1448 * in jiffies will be returned, or 0 if the timer expired in time
1450 * The current task state is guaranteed to be TASK_RUNNING when this
1453 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1454 * the CPU away without a bound on the timeout. In this case the return
1455 * value will be %MAX_SCHEDULE_TIMEOUT.
1457 * In all cases the return value is guaranteed to be non-negative.
1459 signed long __sched
schedule_timeout(signed long timeout
)
1461 struct timer_list timer
;
1462 unsigned long expire
;
1466 case MAX_SCHEDULE_TIMEOUT
:
1468 * These two special cases are useful to be comfortable
1469 * in the caller. Nothing more. We could take
1470 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1471 * but I' d like to return a valid offset (>=0) to allow
1472 * the caller to do everything it want with the retval.
1478 * Another bit of PARANOID. Note that the retval will be
1479 * 0 since no piece of kernel is supposed to do a check
1480 * for a negative retval of schedule_timeout() (since it
1481 * should never happens anyway). You just have the printk()
1482 * that will tell you if something is gone wrong and where.
1485 printk(KERN_ERR
"schedule_timeout: wrong timeout "
1486 "value %lx\n", timeout
);
1488 current
->state
= TASK_RUNNING
;
1493 expire
= timeout
+ jiffies
;
1495 setup_timer_on_stack(&timer
, process_timeout
, (unsigned long)current
);
1496 __mod_timer(&timer
, expire
, false, TIMER_NOT_PINNED
);
1498 del_singleshot_timer_sync(&timer
);
1500 /* Remove the timer from the object tracker */
1501 destroy_timer_on_stack(&timer
);
1503 timeout
= expire
- jiffies
;
1506 return timeout
< 0 ? 0 : timeout
;
1508 EXPORT_SYMBOL(schedule_timeout
);
1511 * We can use __set_current_state() here because schedule_timeout() calls
1512 * schedule() unconditionally.
1514 signed long __sched
schedule_timeout_interruptible(signed long timeout
)
1516 __set_current_state(TASK_INTERRUPTIBLE
);
1517 return schedule_timeout(timeout
);
1519 EXPORT_SYMBOL(schedule_timeout_interruptible
);
1521 signed long __sched
schedule_timeout_killable(signed long timeout
)
1523 __set_current_state(TASK_KILLABLE
);
1524 return schedule_timeout(timeout
);
1526 EXPORT_SYMBOL(schedule_timeout_killable
);
1528 signed long __sched
schedule_timeout_uninterruptible(signed long timeout
)
1530 __set_current_state(TASK_UNINTERRUPTIBLE
);
1531 return schedule_timeout(timeout
);
1533 EXPORT_SYMBOL(schedule_timeout_uninterruptible
);
1535 static int init_timers_cpu(int cpu
)
1538 struct tvec_base
*base
;
1539 static char tvec_base_done
[NR_CPUS
];
1541 if (!tvec_base_done
[cpu
]) {
1542 static char boot_done
;
1546 * The APs use this path later in boot
1548 base
= kzalloc_node(sizeof(*base
), GFP_KERNEL
,
1553 /* Make sure tvec_base has TIMER_FLAG_MASK bits free */
1554 if (WARN_ON(base
!= tbase_get_base(base
))) {
1558 per_cpu(tvec_bases
, cpu
) = base
;
1561 * This is for the boot CPU - we use compile-time
1562 * static initialisation because per-cpu memory isn't
1563 * ready yet and because the memory allocators are not
1564 * initialised either.
1567 base
= &boot_tvec_bases
;
1569 spin_lock_init(&base
->lock
);
1570 tvec_base_done
[cpu
] = 1;
1573 base
= per_cpu(tvec_bases
, cpu
);
1577 for (j
= 0; j
< TVN_SIZE
; j
++) {
1578 INIT_LIST_HEAD(base
->tv5
.vec
+ j
);
1579 INIT_LIST_HEAD(base
->tv4
.vec
+ j
);
1580 INIT_LIST_HEAD(base
->tv3
.vec
+ j
);
1581 INIT_LIST_HEAD(base
->tv2
.vec
+ j
);
1583 for (j
= 0; j
< TVR_SIZE
; j
++)
1584 INIT_LIST_HEAD(base
->tv1
.vec
+ j
);
1586 base
->timer_jiffies
= jiffies
;
1587 base
->next_timer
= base
->timer_jiffies
;
1588 base
->active_timers
= 0;
1589 base
->all_timers
= 0;
1593 #ifdef CONFIG_HOTPLUG_CPU
1594 static void migrate_timer_list(struct tvec_base
*new_base
, struct list_head
*head
)
1596 struct timer_list
*timer
;
1598 while (!list_empty(head
)) {
1599 timer
= list_first_entry(head
, struct timer_list
, entry
);
1600 /* We ignore the accounting on the dying cpu */
1601 detach_timer(timer
, false);
1602 timer_set_base(timer
, new_base
);
1603 internal_add_timer(new_base
, timer
);
1607 static void migrate_timers(int cpu
)
1609 struct tvec_base
*old_base
;
1610 struct tvec_base
*new_base
;
1613 BUG_ON(cpu_online(cpu
));
1614 old_base
= per_cpu(tvec_bases
, cpu
);
1615 new_base
= get_cpu_var(tvec_bases
);
1617 * The caller is globally serialized and nobody else
1618 * takes two locks at once, deadlock is not possible.
1620 spin_lock_irq(&new_base
->lock
);
1621 spin_lock_nested(&old_base
->lock
, SINGLE_DEPTH_NESTING
);
1623 BUG_ON(old_base
->running_timer
);
1625 for (i
= 0; i
< TVR_SIZE
; i
++)
1626 migrate_timer_list(new_base
, old_base
->tv1
.vec
+ i
);
1627 for (i
= 0; i
< TVN_SIZE
; i
++) {
1628 migrate_timer_list(new_base
, old_base
->tv2
.vec
+ i
);
1629 migrate_timer_list(new_base
, old_base
->tv3
.vec
+ i
);
1630 migrate_timer_list(new_base
, old_base
->tv4
.vec
+ i
);
1631 migrate_timer_list(new_base
, old_base
->tv5
.vec
+ i
);
1634 spin_unlock(&old_base
->lock
);
1635 spin_unlock_irq(&new_base
->lock
);
1636 put_cpu_var(tvec_bases
);
1638 #endif /* CONFIG_HOTPLUG_CPU */
1640 static int timer_cpu_notify(struct notifier_block
*self
,
1641 unsigned long action
, void *hcpu
)
1643 long cpu
= (long)hcpu
;
1647 case CPU_UP_PREPARE
:
1648 case CPU_UP_PREPARE_FROZEN
:
1649 err
= init_timers_cpu(cpu
);
1651 return notifier_from_errno(err
);
1653 #ifdef CONFIG_HOTPLUG_CPU
1655 case CPU_DEAD_FROZEN
:
1656 migrate_timers(cpu
);
1665 static struct notifier_block timers_nb
= {
1666 .notifier_call
= timer_cpu_notify
,
1670 void __init
init_timers(void)
1674 /* ensure there are enough low bits for flags in timer->base pointer */
1675 BUILD_BUG_ON(__alignof__(struct tvec_base
) & TIMER_FLAG_MASK
);
1677 err
= timer_cpu_notify(&timers_nb
, (unsigned long)CPU_UP_PREPARE
,
1678 (void *)(long)smp_processor_id());
1679 BUG_ON(err
!= NOTIFY_OK
);
1682 register_cpu_notifier(&timers_nb
);
1683 open_softirq(TIMER_SOFTIRQ
, run_timer_softirq
);
1687 * msleep - sleep safely even with waitqueue interruptions
1688 * @msecs: Time in milliseconds to sleep for
1690 void msleep(unsigned int msecs
)
1692 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
1695 timeout
= schedule_timeout_uninterruptible(timeout
);
1698 EXPORT_SYMBOL(msleep
);
1701 * msleep_interruptible - sleep waiting for signals
1702 * @msecs: Time in milliseconds to sleep for
1704 unsigned long msleep_interruptible(unsigned int msecs
)
1706 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
1708 while (timeout
&& !signal_pending(current
))
1709 timeout
= schedule_timeout_interruptible(timeout
);
1710 return jiffies_to_msecs(timeout
);
1713 EXPORT_SYMBOL(msleep_interruptible
);
1715 static int __sched
do_usleep_range(unsigned long min
, unsigned long max
)
1718 unsigned long delta
;
1720 kmin
= ktime_set(0, min
* NSEC_PER_USEC
);
1721 delta
= (max
- min
) * NSEC_PER_USEC
;
1722 return schedule_hrtimeout_range(&kmin
, delta
, HRTIMER_MODE_REL
);
1726 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1727 * @min: Minimum time in usecs to sleep
1728 * @max: Maximum time in usecs to sleep
1730 void usleep_range(unsigned long min
, unsigned long max
)
1732 __set_current_state(TASK_UNINTERRUPTIBLE
);
1733 do_usleep_range(min
, max
);
1735 EXPORT_SYMBOL(usleep_range
);