Merge branch 'stable-4.7' of git://git.infradead.org/users/pcmoore/audit
[deliverable/linux.git] / kernel / time / timer.c
1 /*
2 * linux/kernel/timer.c
3 *
4 * Kernel internal timers
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22 #include <linux/kernel_stat.h>
23 #include <linux/export.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
27 #include <linux/mm.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 #include <linux/irq_work.h>
41 #include <linux/sched.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/slab.h>
44 #include <linux/compat.h>
45
46 #include <asm/uaccess.h>
47 #include <asm/unistd.h>
48 #include <asm/div64.h>
49 #include <asm/timex.h>
50 #include <asm/io.h>
51
52 #include "tick-internal.h"
53
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/timer.h>
56
57 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
58
59 EXPORT_SYMBOL(jiffies_64);
60
61 /*
62 * per-CPU timer vector definitions:
63 */
64 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
65 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
66 #define TVN_SIZE (1 << TVN_BITS)
67 #define TVR_SIZE (1 << TVR_BITS)
68 #define TVN_MASK (TVN_SIZE - 1)
69 #define TVR_MASK (TVR_SIZE - 1)
70 #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
71
72 struct tvec {
73 struct hlist_head vec[TVN_SIZE];
74 };
75
76 struct tvec_root {
77 struct hlist_head vec[TVR_SIZE];
78 };
79
80 struct tvec_base {
81 spinlock_t lock;
82 struct timer_list *running_timer;
83 unsigned long timer_jiffies;
84 unsigned long next_timer;
85 unsigned long active_timers;
86 unsigned long all_timers;
87 int cpu;
88 bool migration_enabled;
89 bool nohz_active;
90 struct tvec_root tv1;
91 struct tvec tv2;
92 struct tvec tv3;
93 struct tvec tv4;
94 struct tvec tv5;
95 } ____cacheline_aligned;
96
97
98 static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
99
100 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
101 unsigned int sysctl_timer_migration = 1;
102
103 void timers_update_migration(bool update_nohz)
104 {
105 bool on = sysctl_timer_migration && tick_nohz_active;
106 unsigned int cpu;
107
108 /* Avoid the loop, if nothing to update */
109 if (this_cpu_read(tvec_bases.migration_enabled) == on)
110 return;
111
112 for_each_possible_cpu(cpu) {
113 per_cpu(tvec_bases.migration_enabled, cpu) = on;
114 per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
115 if (!update_nohz)
116 continue;
117 per_cpu(tvec_bases.nohz_active, cpu) = true;
118 per_cpu(hrtimer_bases.nohz_active, cpu) = true;
119 }
120 }
121
122 int timer_migration_handler(struct ctl_table *table, int write,
123 void __user *buffer, size_t *lenp,
124 loff_t *ppos)
125 {
126 static DEFINE_MUTEX(mutex);
127 int ret;
128
129 mutex_lock(&mutex);
130 ret = proc_dointvec(table, write, buffer, lenp, ppos);
131 if (!ret && write)
132 timers_update_migration(false);
133 mutex_unlock(&mutex);
134 return ret;
135 }
136
137 static inline struct tvec_base *get_target_base(struct tvec_base *base,
138 int pinned)
139 {
140 if (pinned || !base->migration_enabled)
141 return this_cpu_ptr(&tvec_bases);
142 return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
143 }
144 #else
145 static inline struct tvec_base *get_target_base(struct tvec_base *base,
146 int pinned)
147 {
148 return this_cpu_ptr(&tvec_bases);
149 }
150 #endif
151
152 static unsigned long round_jiffies_common(unsigned long j, int cpu,
153 bool force_up)
154 {
155 int rem;
156 unsigned long original = j;
157
158 /*
159 * We don't want all cpus firing their timers at once hitting the
160 * same lock or cachelines, so we skew each extra cpu with an extra
161 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
162 * already did this.
163 * The skew is done by adding 3*cpunr, then round, then subtract this
164 * extra offset again.
165 */
166 j += cpu * 3;
167
168 rem = j % HZ;
169
170 /*
171 * If the target jiffie is just after a whole second (which can happen
172 * due to delays of the timer irq, long irq off times etc etc) then
173 * we should round down to the whole second, not up. Use 1/4th second
174 * as cutoff for this rounding as an extreme upper bound for this.
175 * But never round down if @force_up is set.
176 */
177 if (rem < HZ/4 && !force_up) /* round down */
178 j = j - rem;
179 else /* round up */
180 j = j - rem + HZ;
181
182 /* now that we have rounded, subtract the extra skew again */
183 j -= cpu * 3;
184
185 /*
186 * Make sure j is still in the future. Otherwise return the
187 * unmodified value.
188 */
189 return time_is_after_jiffies(j) ? j : original;
190 }
191
192 /**
193 * __round_jiffies - function to round jiffies to a full second
194 * @j: the time in (absolute) jiffies that should be rounded
195 * @cpu: the processor number on which the timeout will happen
196 *
197 * __round_jiffies() rounds an absolute time in the future (in jiffies)
198 * up or down to (approximately) full seconds. This is useful for timers
199 * for which the exact time they fire does not matter too much, as long as
200 * they fire approximately every X seconds.
201 *
202 * By rounding these timers to whole seconds, all such timers will fire
203 * at the same time, rather than at various times spread out. The goal
204 * of this is to have the CPU wake up less, which saves power.
205 *
206 * The exact rounding is skewed for each processor to avoid all
207 * processors firing at the exact same time, which could lead
208 * to lock contention or spurious cache line bouncing.
209 *
210 * The return value is the rounded version of the @j parameter.
211 */
212 unsigned long __round_jiffies(unsigned long j, int cpu)
213 {
214 return round_jiffies_common(j, cpu, false);
215 }
216 EXPORT_SYMBOL_GPL(__round_jiffies);
217
218 /**
219 * __round_jiffies_relative - function to round jiffies to a full second
220 * @j: the time in (relative) jiffies that should be rounded
221 * @cpu: the processor number on which the timeout will happen
222 *
223 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
224 * up or down to (approximately) full seconds. This is useful for timers
225 * for which the exact time they fire does not matter too much, as long as
226 * they fire approximately every X seconds.
227 *
228 * By rounding these timers to whole seconds, all such timers will fire
229 * at the same time, rather than at various times spread out. The goal
230 * of this is to have the CPU wake up less, which saves power.
231 *
232 * The exact rounding is skewed for each processor to avoid all
233 * processors firing at the exact same time, which could lead
234 * to lock contention or spurious cache line bouncing.
235 *
236 * The return value is the rounded version of the @j parameter.
237 */
238 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
239 {
240 unsigned long j0 = jiffies;
241
242 /* Use j0 because jiffies might change while we run */
243 return round_jiffies_common(j + j0, cpu, false) - j0;
244 }
245 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
246
247 /**
248 * round_jiffies - function to round jiffies to a full second
249 * @j: the time in (absolute) jiffies that should be rounded
250 *
251 * round_jiffies() rounds an absolute time in the future (in jiffies)
252 * up or down to (approximately) full seconds. This is useful for timers
253 * for which the exact time they fire does not matter too much, as long as
254 * they fire approximately every X seconds.
255 *
256 * By rounding these timers to whole seconds, all such timers will fire
257 * at the same time, rather than at various times spread out. The goal
258 * of this is to have the CPU wake up less, which saves power.
259 *
260 * The return value is the rounded version of the @j parameter.
261 */
262 unsigned long round_jiffies(unsigned long j)
263 {
264 return round_jiffies_common(j, raw_smp_processor_id(), false);
265 }
266 EXPORT_SYMBOL_GPL(round_jiffies);
267
268 /**
269 * round_jiffies_relative - function to round jiffies to a full second
270 * @j: the time in (relative) jiffies that should be rounded
271 *
272 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
273 * up or down to (approximately) full seconds. This is useful for timers
274 * for which the exact time they fire does not matter too much, as long as
275 * they fire approximately every X seconds.
276 *
277 * By rounding these timers to whole seconds, all such timers will fire
278 * at the same time, rather than at various times spread out. The goal
279 * of this is to have the CPU wake up less, which saves power.
280 *
281 * The return value is the rounded version of the @j parameter.
282 */
283 unsigned long round_jiffies_relative(unsigned long j)
284 {
285 return __round_jiffies_relative(j, raw_smp_processor_id());
286 }
287 EXPORT_SYMBOL_GPL(round_jiffies_relative);
288
289 /**
290 * __round_jiffies_up - function to round jiffies up to a full second
291 * @j: the time in (absolute) jiffies that should be rounded
292 * @cpu: the processor number on which the timeout will happen
293 *
294 * This is the same as __round_jiffies() except that it will never
295 * round down. This is useful for timeouts for which the exact time
296 * of firing does not matter too much, as long as they don't fire too
297 * early.
298 */
299 unsigned long __round_jiffies_up(unsigned long j, int cpu)
300 {
301 return round_jiffies_common(j, cpu, true);
302 }
303 EXPORT_SYMBOL_GPL(__round_jiffies_up);
304
305 /**
306 * __round_jiffies_up_relative - function to round jiffies up to a full second
307 * @j: the time in (relative) jiffies that should be rounded
308 * @cpu: the processor number on which the timeout will happen
309 *
310 * This is the same as __round_jiffies_relative() except that it will never
311 * round down. This is useful for timeouts for which the exact time
312 * of firing does not matter too much, as long as they don't fire too
313 * early.
314 */
315 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
316 {
317 unsigned long j0 = jiffies;
318
319 /* Use j0 because jiffies might change while we run */
320 return round_jiffies_common(j + j0, cpu, true) - j0;
321 }
322 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
323
324 /**
325 * round_jiffies_up - function to round jiffies up to a full second
326 * @j: the time in (absolute) jiffies that should be rounded
327 *
328 * This is the same as round_jiffies() except that it will never
329 * round down. This is useful for timeouts for which the exact time
330 * of firing does not matter too much, as long as they don't fire too
331 * early.
332 */
333 unsigned long round_jiffies_up(unsigned long j)
334 {
335 return round_jiffies_common(j, raw_smp_processor_id(), true);
336 }
337 EXPORT_SYMBOL_GPL(round_jiffies_up);
338
339 /**
340 * round_jiffies_up_relative - function to round jiffies up to a full second
341 * @j: the time in (relative) jiffies that should be rounded
342 *
343 * This is the same as round_jiffies_relative() except that it will never
344 * round down. This is useful for timeouts for which the exact time
345 * of firing does not matter too much, as long as they don't fire too
346 * early.
347 */
348 unsigned long round_jiffies_up_relative(unsigned long j)
349 {
350 return __round_jiffies_up_relative(j, raw_smp_processor_id());
351 }
352 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
353
354 /**
355 * set_timer_slack - set the allowed slack for a timer
356 * @timer: the timer to be modified
357 * @slack_hz: the amount of time (in jiffies) allowed for rounding
358 *
359 * Set the amount of time, in jiffies, that a certain timer has
360 * in terms of slack. By setting this value, the timer subsystem
361 * will schedule the actual timer somewhere between
362 * the time mod_timer() asks for, and that time plus the slack.
363 *
364 * By setting the slack to -1, a percentage of the delay is used
365 * instead.
366 */
367 void set_timer_slack(struct timer_list *timer, int slack_hz)
368 {
369 timer->slack = slack_hz;
370 }
371 EXPORT_SYMBOL_GPL(set_timer_slack);
372
373 static void
374 __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
375 {
376 unsigned long expires = timer->expires;
377 unsigned long idx = expires - base->timer_jiffies;
378 struct hlist_head *vec;
379
380 if (idx < TVR_SIZE) {
381 int i = expires & TVR_MASK;
382 vec = base->tv1.vec + i;
383 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
384 int i = (expires >> TVR_BITS) & TVN_MASK;
385 vec = base->tv2.vec + i;
386 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
387 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
388 vec = base->tv3.vec + i;
389 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
390 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
391 vec = base->tv4.vec + i;
392 } else if ((signed long) idx < 0) {
393 /*
394 * Can happen if you add a timer with expires == jiffies,
395 * or you set a timer to go off in the past
396 */
397 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
398 } else {
399 int i;
400 /* If the timeout is larger than MAX_TVAL (on 64-bit
401 * architectures or with CONFIG_BASE_SMALL=1) then we
402 * use the maximum timeout.
403 */
404 if (idx > MAX_TVAL) {
405 idx = MAX_TVAL;
406 expires = idx + base->timer_jiffies;
407 }
408 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
409 vec = base->tv5.vec + i;
410 }
411
412 hlist_add_head(&timer->entry, vec);
413 }
414
415 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
416 {
417 /* Advance base->jiffies, if the base is empty */
418 if (!base->all_timers++)
419 base->timer_jiffies = jiffies;
420
421 __internal_add_timer(base, timer);
422 /*
423 * Update base->active_timers and base->next_timer
424 */
425 if (!(timer->flags & TIMER_DEFERRABLE)) {
426 if (!base->active_timers++ ||
427 time_before(timer->expires, base->next_timer))
428 base->next_timer = timer->expires;
429 }
430
431 /*
432 * Check whether the other CPU is in dynticks mode and needs
433 * to be triggered to reevaluate the timer wheel.
434 * We are protected against the other CPU fiddling
435 * with the timer by holding the timer base lock. This also
436 * makes sure that a CPU on the way to stop its tick can not
437 * evaluate the timer wheel.
438 *
439 * Spare the IPI for deferrable timers on idle targets though.
440 * The next busy ticks will take care of it. Except full dynticks
441 * require special care against races with idle_cpu(), lets deal
442 * with that later.
443 */
444 if (base->nohz_active) {
445 if (!(timer->flags & TIMER_DEFERRABLE) ||
446 tick_nohz_full_cpu(base->cpu))
447 wake_up_nohz_cpu(base->cpu);
448 }
449 }
450
451 #ifdef CONFIG_TIMER_STATS
452 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
453 {
454 if (timer->start_site)
455 return;
456
457 timer->start_site = addr;
458 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
459 timer->start_pid = current->pid;
460 }
461
462 static void timer_stats_account_timer(struct timer_list *timer)
463 {
464 void *site;
465
466 /*
467 * start_site can be concurrently reset by
468 * timer_stats_timer_clear_start_info()
469 */
470 site = READ_ONCE(timer->start_site);
471 if (likely(!site))
472 return;
473
474 timer_stats_update_stats(timer, timer->start_pid, site,
475 timer->function, timer->start_comm,
476 timer->flags);
477 }
478
479 #else
480 static void timer_stats_account_timer(struct timer_list *timer) {}
481 #endif
482
483 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
484
485 static struct debug_obj_descr timer_debug_descr;
486
487 static void *timer_debug_hint(void *addr)
488 {
489 return ((struct timer_list *) addr)->function;
490 }
491
492 static bool timer_is_static_object(void *addr)
493 {
494 struct timer_list *timer = addr;
495
496 return (timer->entry.pprev == NULL &&
497 timer->entry.next == TIMER_ENTRY_STATIC);
498 }
499
500 /*
501 * fixup_init is called when:
502 * - an active object is initialized
503 */
504 static bool timer_fixup_init(void *addr, enum debug_obj_state state)
505 {
506 struct timer_list *timer = addr;
507
508 switch (state) {
509 case ODEBUG_STATE_ACTIVE:
510 del_timer_sync(timer);
511 debug_object_init(timer, &timer_debug_descr);
512 return true;
513 default:
514 return false;
515 }
516 }
517
518 /* Stub timer callback for improperly used timers. */
519 static void stub_timer(unsigned long data)
520 {
521 WARN_ON(1);
522 }
523
524 /*
525 * fixup_activate is called when:
526 * - an active object is activated
527 * - an unknown non-static object is activated
528 */
529 static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
530 {
531 struct timer_list *timer = addr;
532
533 switch (state) {
534 case ODEBUG_STATE_NOTAVAILABLE:
535 setup_timer(timer, stub_timer, 0);
536 return true;
537
538 case ODEBUG_STATE_ACTIVE:
539 WARN_ON(1);
540
541 default:
542 return false;
543 }
544 }
545
546 /*
547 * fixup_free is called when:
548 * - an active object is freed
549 */
550 static bool timer_fixup_free(void *addr, enum debug_obj_state state)
551 {
552 struct timer_list *timer = addr;
553
554 switch (state) {
555 case ODEBUG_STATE_ACTIVE:
556 del_timer_sync(timer);
557 debug_object_free(timer, &timer_debug_descr);
558 return true;
559 default:
560 return false;
561 }
562 }
563
564 /*
565 * fixup_assert_init is called when:
566 * - an untracked/uninit-ed object is found
567 */
568 static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
569 {
570 struct timer_list *timer = addr;
571
572 switch (state) {
573 case ODEBUG_STATE_NOTAVAILABLE:
574 setup_timer(timer, stub_timer, 0);
575 return true;
576 default:
577 return false;
578 }
579 }
580
581 static struct debug_obj_descr timer_debug_descr = {
582 .name = "timer_list",
583 .debug_hint = timer_debug_hint,
584 .is_static_object = timer_is_static_object,
585 .fixup_init = timer_fixup_init,
586 .fixup_activate = timer_fixup_activate,
587 .fixup_free = timer_fixup_free,
588 .fixup_assert_init = timer_fixup_assert_init,
589 };
590
591 static inline void debug_timer_init(struct timer_list *timer)
592 {
593 debug_object_init(timer, &timer_debug_descr);
594 }
595
596 static inline void debug_timer_activate(struct timer_list *timer)
597 {
598 debug_object_activate(timer, &timer_debug_descr);
599 }
600
601 static inline void debug_timer_deactivate(struct timer_list *timer)
602 {
603 debug_object_deactivate(timer, &timer_debug_descr);
604 }
605
606 static inline void debug_timer_free(struct timer_list *timer)
607 {
608 debug_object_free(timer, &timer_debug_descr);
609 }
610
611 static inline void debug_timer_assert_init(struct timer_list *timer)
612 {
613 debug_object_assert_init(timer, &timer_debug_descr);
614 }
615
616 static void do_init_timer(struct timer_list *timer, unsigned int flags,
617 const char *name, struct lock_class_key *key);
618
619 void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
620 const char *name, struct lock_class_key *key)
621 {
622 debug_object_init_on_stack(timer, &timer_debug_descr);
623 do_init_timer(timer, flags, name, key);
624 }
625 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
626
627 void destroy_timer_on_stack(struct timer_list *timer)
628 {
629 debug_object_free(timer, &timer_debug_descr);
630 }
631 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
632
633 #else
634 static inline void debug_timer_init(struct timer_list *timer) { }
635 static inline void debug_timer_activate(struct timer_list *timer) { }
636 static inline void debug_timer_deactivate(struct timer_list *timer) { }
637 static inline void debug_timer_assert_init(struct timer_list *timer) { }
638 #endif
639
640 static inline void debug_init(struct timer_list *timer)
641 {
642 debug_timer_init(timer);
643 trace_timer_init(timer);
644 }
645
646 static inline void
647 debug_activate(struct timer_list *timer, unsigned long expires)
648 {
649 debug_timer_activate(timer);
650 trace_timer_start(timer, expires, timer->flags);
651 }
652
653 static inline void debug_deactivate(struct timer_list *timer)
654 {
655 debug_timer_deactivate(timer);
656 trace_timer_cancel(timer);
657 }
658
659 static inline void debug_assert_init(struct timer_list *timer)
660 {
661 debug_timer_assert_init(timer);
662 }
663
664 static void do_init_timer(struct timer_list *timer, unsigned int flags,
665 const char *name, struct lock_class_key *key)
666 {
667 timer->entry.pprev = NULL;
668 timer->flags = flags | raw_smp_processor_id();
669 timer->slack = -1;
670 #ifdef CONFIG_TIMER_STATS
671 timer->start_site = NULL;
672 timer->start_pid = -1;
673 memset(timer->start_comm, 0, TASK_COMM_LEN);
674 #endif
675 lockdep_init_map(&timer->lockdep_map, name, key, 0);
676 }
677
678 /**
679 * init_timer_key - initialize a timer
680 * @timer: the timer to be initialized
681 * @flags: timer flags
682 * @name: name of the timer
683 * @key: lockdep class key of the fake lock used for tracking timer
684 * sync lock dependencies
685 *
686 * init_timer_key() must be done to a timer prior calling *any* of the
687 * other timer functions.
688 */
689 void init_timer_key(struct timer_list *timer, unsigned int flags,
690 const char *name, struct lock_class_key *key)
691 {
692 debug_init(timer);
693 do_init_timer(timer, flags, name, key);
694 }
695 EXPORT_SYMBOL(init_timer_key);
696
697 static inline void detach_timer(struct timer_list *timer, bool clear_pending)
698 {
699 struct hlist_node *entry = &timer->entry;
700
701 debug_deactivate(timer);
702
703 __hlist_del(entry);
704 if (clear_pending)
705 entry->pprev = NULL;
706 entry->next = LIST_POISON2;
707 }
708
709 static inline void
710 detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
711 {
712 detach_timer(timer, true);
713 if (!(timer->flags & TIMER_DEFERRABLE))
714 base->active_timers--;
715 base->all_timers--;
716 }
717
718 static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
719 bool clear_pending)
720 {
721 if (!timer_pending(timer))
722 return 0;
723
724 detach_timer(timer, clear_pending);
725 if (!(timer->flags & TIMER_DEFERRABLE)) {
726 base->active_timers--;
727 if (timer->expires == base->next_timer)
728 base->next_timer = base->timer_jiffies;
729 }
730 /* If this was the last timer, advance base->jiffies */
731 if (!--base->all_timers)
732 base->timer_jiffies = jiffies;
733 return 1;
734 }
735
736 /*
737 * We are using hashed locking: holding per_cpu(tvec_bases).lock
738 * means that all timers which are tied to this base via timer->base are
739 * locked, and the base itself is locked too.
740 *
741 * So __run_timers/migrate_timers can safely modify all timers which could
742 * be found on ->tvX lists.
743 *
744 * When the timer's base is locked and removed from the list, the
745 * TIMER_MIGRATING flag is set, FIXME
746 */
747 static struct tvec_base *lock_timer_base(struct timer_list *timer,
748 unsigned long *flags)
749 __acquires(timer->base->lock)
750 {
751 for (;;) {
752 u32 tf = timer->flags;
753 struct tvec_base *base;
754
755 if (!(tf & TIMER_MIGRATING)) {
756 base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
757 spin_lock_irqsave(&base->lock, *flags);
758 if (timer->flags == tf)
759 return base;
760 spin_unlock_irqrestore(&base->lock, *flags);
761 }
762 cpu_relax();
763 }
764 }
765
766 static inline int
767 __mod_timer(struct timer_list *timer, unsigned long expires,
768 bool pending_only, int pinned)
769 {
770 struct tvec_base *base, *new_base;
771 unsigned long flags;
772 int ret = 0;
773
774 timer_stats_timer_set_start_info(timer);
775 BUG_ON(!timer->function);
776
777 base = lock_timer_base(timer, &flags);
778
779 ret = detach_if_pending(timer, base, false);
780 if (!ret && pending_only)
781 goto out_unlock;
782
783 debug_activate(timer, expires);
784
785 new_base = get_target_base(base, pinned);
786
787 if (base != new_base) {
788 /*
789 * We are trying to schedule the timer on the local CPU.
790 * However we can't change timer's base while it is running,
791 * otherwise del_timer_sync() can't detect that the timer's
792 * handler yet has not finished. This also guarantees that
793 * the timer is serialized wrt itself.
794 */
795 if (likely(base->running_timer != timer)) {
796 /* See the comment in lock_timer_base() */
797 timer->flags |= TIMER_MIGRATING;
798
799 spin_unlock(&base->lock);
800 base = new_base;
801 spin_lock(&base->lock);
802 WRITE_ONCE(timer->flags,
803 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
804 }
805 }
806
807 timer->expires = expires;
808 internal_add_timer(base, timer);
809
810 out_unlock:
811 spin_unlock_irqrestore(&base->lock, flags);
812
813 return ret;
814 }
815
816 /**
817 * mod_timer_pending - modify a pending timer's timeout
818 * @timer: the pending timer to be modified
819 * @expires: new timeout in jiffies
820 *
821 * mod_timer_pending() is the same for pending timers as mod_timer(),
822 * but will not re-activate and modify already deleted timers.
823 *
824 * It is useful for unserialized use of timers.
825 */
826 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
827 {
828 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
829 }
830 EXPORT_SYMBOL(mod_timer_pending);
831
832 /*
833 * Decide where to put the timer while taking the slack into account
834 *
835 * Algorithm:
836 * 1) calculate the maximum (absolute) time
837 * 2) calculate the highest bit where the expires and new max are different
838 * 3) use this bit to make a mask
839 * 4) use the bitmask to round down the maximum time, so that all last
840 * bits are zeros
841 */
842 static inline
843 unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
844 {
845 unsigned long expires_limit, mask;
846 int bit;
847
848 if (timer->slack >= 0) {
849 expires_limit = expires + timer->slack;
850 } else {
851 long delta = expires - jiffies;
852
853 if (delta < 256)
854 return expires;
855
856 expires_limit = expires + delta / 256;
857 }
858 mask = expires ^ expires_limit;
859 if (mask == 0)
860 return expires;
861
862 bit = __fls(mask);
863
864 mask = (1UL << bit) - 1;
865
866 expires_limit = expires_limit & ~(mask);
867
868 return expires_limit;
869 }
870
871 /**
872 * mod_timer - modify a timer's timeout
873 * @timer: the timer to be modified
874 * @expires: new timeout in jiffies
875 *
876 * mod_timer() is a more efficient way to update the expire field of an
877 * active timer (if the timer is inactive it will be activated)
878 *
879 * mod_timer(timer, expires) is equivalent to:
880 *
881 * del_timer(timer); timer->expires = expires; add_timer(timer);
882 *
883 * Note that if there are multiple unserialized concurrent users of the
884 * same timer, then mod_timer() is the only safe way to modify the timeout,
885 * since add_timer() cannot modify an already running timer.
886 *
887 * The function returns whether it has modified a pending timer or not.
888 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
889 * active timer returns 1.)
890 */
891 int mod_timer(struct timer_list *timer, unsigned long expires)
892 {
893 expires = apply_slack(timer, expires);
894
895 /*
896 * This is a common optimization triggered by the
897 * networking code - if the timer is re-modified
898 * to be the same thing then just return:
899 */
900 if (timer_pending(timer) && timer->expires == expires)
901 return 1;
902
903 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
904 }
905 EXPORT_SYMBOL(mod_timer);
906
907 /**
908 * mod_timer_pinned - modify a timer's timeout
909 * @timer: the timer to be modified
910 * @expires: new timeout in jiffies
911 *
912 * mod_timer_pinned() is a way to update the expire field of an
913 * active timer (if the timer is inactive it will be activated)
914 * and to ensure that the timer is scheduled on the current CPU.
915 *
916 * Note that this does not prevent the timer from being migrated
917 * when the current CPU goes offline. If this is a problem for
918 * you, use CPU-hotplug notifiers to handle it correctly, for
919 * example, cancelling the timer when the corresponding CPU goes
920 * offline.
921 *
922 * mod_timer_pinned(timer, expires) is equivalent to:
923 *
924 * del_timer(timer); timer->expires = expires; add_timer(timer);
925 */
926 int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
927 {
928 if (timer->expires == expires && timer_pending(timer))
929 return 1;
930
931 return __mod_timer(timer, expires, false, TIMER_PINNED);
932 }
933 EXPORT_SYMBOL(mod_timer_pinned);
934
935 /**
936 * add_timer - start a timer
937 * @timer: the timer to be added
938 *
939 * The kernel will do a ->function(->data) callback from the
940 * timer interrupt at the ->expires point in the future. The
941 * current time is 'jiffies'.
942 *
943 * The timer's ->expires, ->function (and if the handler uses it, ->data)
944 * fields must be set prior calling this function.
945 *
946 * Timers with an ->expires field in the past will be executed in the next
947 * timer tick.
948 */
949 void add_timer(struct timer_list *timer)
950 {
951 BUG_ON(timer_pending(timer));
952 mod_timer(timer, timer->expires);
953 }
954 EXPORT_SYMBOL(add_timer);
955
956 /**
957 * add_timer_on - start a timer on a particular CPU
958 * @timer: the timer to be added
959 * @cpu: the CPU to start it on
960 *
961 * This is not very scalable on SMP. Double adds are not possible.
962 */
963 void add_timer_on(struct timer_list *timer, int cpu)
964 {
965 struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
966 struct tvec_base *base;
967 unsigned long flags;
968
969 timer_stats_timer_set_start_info(timer);
970 BUG_ON(timer_pending(timer) || !timer->function);
971
972 /*
973 * If @timer was on a different CPU, it should be migrated with the
974 * old base locked to prevent other operations proceeding with the
975 * wrong base locked. See lock_timer_base().
976 */
977 base = lock_timer_base(timer, &flags);
978 if (base != new_base) {
979 timer->flags |= TIMER_MIGRATING;
980
981 spin_unlock(&base->lock);
982 base = new_base;
983 spin_lock(&base->lock);
984 WRITE_ONCE(timer->flags,
985 (timer->flags & ~TIMER_BASEMASK) | cpu);
986 }
987
988 debug_activate(timer, timer->expires);
989 internal_add_timer(base, timer);
990 spin_unlock_irqrestore(&base->lock, flags);
991 }
992 EXPORT_SYMBOL_GPL(add_timer_on);
993
994 /**
995 * del_timer - deactive a timer.
996 * @timer: the timer to be deactivated
997 *
998 * del_timer() deactivates a timer - this works on both active and inactive
999 * timers.
1000 *
1001 * The function returns whether it has deactivated a pending timer or not.
1002 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1003 * active timer returns 1.)
1004 */
1005 int del_timer(struct timer_list *timer)
1006 {
1007 struct tvec_base *base;
1008 unsigned long flags;
1009 int ret = 0;
1010
1011 debug_assert_init(timer);
1012
1013 timer_stats_timer_clear_start_info(timer);
1014 if (timer_pending(timer)) {
1015 base = lock_timer_base(timer, &flags);
1016 ret = detach_if_pending(timer, base, true);
1017 spin_unlock_irqrestore(&base->lock, flags);
1018 }
1019
1020 return ret;
1021 }
1022 EXPORT_SYMBOL(del_timer);
1023
1024 /**
1025 * try_to_del_timer_sync - Try to deactivate a timer
1026 * @timer: timer do del
1027 *
1028 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1029 * exit the timer is not queued and the handler is not running on any CPU.
1030 */
1031 int try_to_del_timer_sync(struct timer_list *timer)
1032 {
1033 struct tvec_base *base;
1034 unsigned long flags;
1035 int ret = -1;
1036
1037 debug_assert_init(timer);
1038
1039 base = lock_timer_base(timer, &flags);
1040
1041 if (base->running_timer != timer) {
1042 timer_stats_timer_clear_start_info(timer);
1043 ret = detach_if_pending(timer, base, true);
1044 }
1045 spin_unlock_irqrestore(&base->lock, flags);
1046
1047 return ret;
1048 }
1049 EXPORT_SYMBOL(try_to_del_timer_sync);
1050
1051 #ifdef CONFIG_SMP
1052 /**
1053 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1054 * @timer: the timer to be deactivated
1055 *
1056 * This function only differs from del_timer() on SMP: besides deactivating
1057 * the timer it also makes sure the handler has finished executing on other
1058 * CPUs.
1059 *
1060 * Synchronization rules: Callers must prevent restarting of the timer,
1061 * otherwise this function is meaningless. It must not be called from
1062 * interrupt contexts unless the timer is an irqsafe one. The caller must
1063 * not hold locks which would prevent completion of the timer's
1064 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1065 * timer is not queued and the handler is not running on any CPU.
1066 *
1067 * Note: For !irqsafe timers, you must not hold locks that are held in
1068 * interrupt context while calling this function. Even if the lock has
1069 * nothing to do with the timer in question. Here's why:
1070 *
1071 * CPU0 CPU1
1072 * ---- ----
1073 * <SOFTIRQ>
1074 * call_timer_fn();
1075 * base->running_timer = mytimer;
1076 * spin_lock_irq(somelock);
1077 * <IRQ>
1078 * spin_lock(somelock);
1079 * del_timer_sync(mytimer);
1080 * while (base->running_timer == mytimer);
1081 *
1082 * Now del_timer_sync() will never return and never release somelock.
1083 * The interrupt on the other CPU is waiting to grab somelock but
1084 * it has interrupted the softirq that CPU0 is waiting to finish.
1085 *
1086 * The function returns whether it has deactivated a pending timer or not.
1087 */
1088 int del_timer_sync(struct timer_list *timer)
1089 {
1090 #ifdef CONFIG_LOCKDEP
1091 unsigned long flags;
1092
1093 /*
1094 * If lockdep gives a backtrace here, please reference
1095 * the synchronization rules above.
1096 */
1097 local_irq_save(flags);
1098 lock_map_acquire(&timer->lockdep_map);
1099 lock_map_release(&timer->lockdep_map);
1100 local_irq_restore(flags);
1101 #endif
1102 /*
1103 * don't use it in hardirq context, because it
1104 * could lead to deadlock.
1105 */
1106 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
1107 for (;;) {
1108 int ret = try_to_del_timer_sync(timer);
1109 if (ret >= 0)
1110 return ret;
1111 cpu_relax();
1112 }
1113 }
1114 EXPORT_SYMBOL(del_timer_sync);
1115 #endif
1116
1117 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1118 {
1119 /* cascade all the timers from tv up one level */
1120 struct timer_list *timer;
1121 struct hlist_node *tmp;
1122 struct hlist_head tv_list;
1123
1124 hlist_move_list(tv->vec + index, &tv_list);
1125
1126 /*
1127 * We are removing _all_ timers from the list, so we
1128 * don't have to detach them individually.
1129 */
1130 hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1131 /* No accounting, while moving them */
1132 __internal_add_timer(base, timer);
1133 }
1134
1135 return index;
1136 }
1137
1138 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1139 unsigned long data)
1140 {
1141 int count = preempt_count();
1142
1143 #ifdef CONFIG_LOCKDEP
1144 /*
1145 * It is permissible to free the timer from inside the
1146 * function that is called from it, this we need to take into
1147 * account for lockdep too. To avoid bogus "held lock freed"
1148 * warnings as well as problems when looking into
1149 * timer->lockdep_map, make a copy and use that here.
1150 */
1151 struct lockdep_map lockdep_map;
1152
1153 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1154 #endif
1155 /*
1156 * Couple the lock chain with the lock chain at
1157 * del_timer_sync() by acquiring the lock_map around the fn()
1158 * call here and in del_timer_sync().
1159 */
1160 lock_map_acquire(&lockdep_map);
1161
1162 trace_timer_expire_entry(timer);
1163 fn(data);
1164 trace_timer_expire_exit(timer);
1165
1166 lock_map_release(&lockdep_map);
1167
1168 if (count != preempt_count()) {
1169 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1170 fn, count, preempt_count());
1171 /*
1172 * Restore the preempt count. That gives us a decent
1173 * chance to survive and extract information. If the
1174 * callback kept a lock held, bad luck, but not worse
1175 * than the BUG() we had.
1176 */
1177 preempt_count_set(count);
1178 }
1179 }
1180
1181 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1182
1183 /**
1184 * __run_timers - run all expired timers (if any) on this CPU.
1185 * @base: the timer vector to be processed.
1186 *
1187 * This function cascades all vectors and executes all expired timer
1188 * vectors.
1189 */
1190 static inline void __run_timers(struct tvec_base *base)
1191 {
1192 struct timer_list *timer;
1193
1194 spin_lock_irq(&base->lock);
1195
1196 while (time_after_eq(jiffies, base->timer_jiffies)) {
1197 struct hlist_head work_list;
1198 struct hlist_head *head = &work_list;
1199 int index;
1200
1201 if (!base->all_timers) {
1202 base->timer_jiffies = jiffies;
1203 break;
1204 }
1205
1206 index = base->timer_jiffies & TVR_MASK;
1207
1208 /*
1209 * Cascade timers:
1210 */
1211 if (!index &&
1212 (!cascade(base, &base->tv2, INDEX(0))) &&
1213 (!cascade(base, &base->tv3, INDEX(1))) &&
1214 !cascade(base, &base->tv4, INDEX(2)))
1215 cascade(base, &base->tv5, INDEX(3));
1216 ++base->timer_jiffies;
1217 hlist_move_list(base->tv1.vec + index, head);
1218 while (!hlist_empty(head)) {
1219 void (*fn)(unsigned long);
1220 unsigned long data;
1221 bool irqsafe;
1222
1223 timer = hlist_entry(head->first, struct timer_list, entry);
1224 fn = timer->function;
1225 data = timer->data;
1226 irqsafe = timer->flags & TIMER_IRQSAFE;
1227
1228 timer_stats_account_timer(timer);
1229
1230 base->running_timer = timer;
1231 detach_expired_timer(timer, base);
1232
1233 if (irqsafe) {
1234 spin_unlock(&base->lock);
1235 call_timer_fn(timer, fn, data);
1236 spin_lock(&base->lock);
1237 } else {
1238 spin_unlock_irq(&base->lock);
1239 call_timer_fn(timer, fn, data);
1240 spin_lock_irq(&base->lock);
1241 }
1242 }
1243 }
1244 base->running_timer = NULL;
1245 spin_unlock_irq(&base->lock);
1246 }
1247
1248 #ifdef CONFIG_NO_HZ_COMMON
1249 /*
1250 * Find out when the next timer event is due to happen. This
1251 * is used on S/390 to stop all activity when a CPU is idle.
1252 * This function needs to be called with interrupts disabled.
1253 */
1254 static unsigned long __next_timer_interrupt(struct tvec_base *base)
1255 {
1256 unsigned long timer_jiffies = base->timer_jiffies;
1257 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1258 int index, slot, array, found = 0;
1259 struct timer_list *nte;
1260 struct tvec *varray[4];
1261
1262 /* Look for timer events in tv1. */
1263 index = slot = timer_jiffies & TVR_MASK;
1264 do {
1265 hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
1266 if (nte->flags & TIMER_DEFERRABLE)
1267 continue;
1268
1269 found = 1;
1270 expires = nte->expires;
1271 /* Look at the cascade bucket(s)? */
1272 if (!index || slot < index)
1273 goto cascade;
1274 return expires;
1275 }
1276 slot = (slot + 1) & TVR_MASK;
1277 } while (slot != index);
1278
1279 cascade:
1280 /* Calculate the next cascade event */
1281 if (index)
1282 timer_jiffies += TVR_SIZE - index;
1283 timer_jiffies >>= TVR_BITS;
1284
1285 /* Check tv2-tv5. */
1286 varray[0] = &base->tv2;
1287 varray[1] = &base->tv3;
1288 varray[2] = &base->tv4;
1289 varray[3] = &base->tv5;
1290
1291 for (array = 0; array < 4; array++) {
1292 struct tvec *varp = varray[array];
1293
1294 index = slot = timer_jiffies & TVN_MASK;
1295 do {
1296 hlist_for_each_entry(nte, varp->vec + slot, entry) {
1297 if (nte->flags & TIMER_DEFERRABLE)
1298 continue;
1299
1300 found = 1;
1301 if (time_before(nte->expires, expires))
1302 expires = nte->expires;
1303 }
1304 /*
1305 * Do we still search for the first timer or are
1306 * we looking up the cascade buckets ?
1307 */
1308 if (found) {
1309 /* Look at the cascade bucket(s)? */
1310 if (!index || slot < index)
1311 break;
1312 return expires;
1313 }
1314 slot = (slot + 1) & TVN_MASK;
1315 } while (slot != index);
1316
1317 if (index)
1318 timer_jiffies += TVN_SIZE - index;
1319 timer_jiffies >>= TVN_BITS;
1320 }
1321 return expires;
1322 }
1323
1324 /*
1325 * Check, if the next hrtimer event is before the next timer wheel
1326 * event:
1327 */
1328 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1329 {
1330 u64 nextevt = hrtimer_get_next_event();
1331
1332 /*
1333 * If high resolution timers are enabled
1334 * hrtimer_get_next_event() returns KTIME_MAX.
1335 */
1336 if (expires <= nextevt)
1337 return expires;
1338
1339 /*
1340 * If the next timer is already expired, return the tick base
1341 * time so the tick is fired immediately.
1342 */
1343 if (nextevt <= basem)
1344 return basem;
1345
1346 /*
1347 * Round up to the next jiffie. High resolution timers are
1348 * off, so the hrtimers are expired in the tick and we need to
1349 * make sure that this tick really expires the timer to avoid
1350 * a ping pong of the nohz stop code.
1351 *
1352 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
1353 */
1354 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1355 }
1356
1357 /**
1358 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1359 * @basej: base time jiffies
1360 * @basem: base time clock monotonic
1361 *
1362 * Returns the tick aligned clock monotonic time of the next pending
1363 * timer or KTIME_MAX if no timer is pending.
1364 */
1365 u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1366 {
1367 struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1368 u64 expires = KTIME_MAX;
1369 unsigned long nextevt;
1370
1371 /*
1372 * Pretend that there is no timer pending if the cpu is offline.
1373 * Possible pending timers will be migrated later to an active cpu.
1374 */
1375 if (cpu_is_offline(smp_processor_id()))
1376 return expires;
1377
1378 spin_lock(&base->lock);
1379 if (base->active_timers) {
1380 if (time_before_eq(base->next_timer, base->timer_jiffies))
1381 base->next_timer = __next_timer_interrupt(base);
1382 nextevt = base->next_timer;
1383 if (time_before_eq(nextevt, basej))
1384 expires = basem;
1385 else
1386 expires = basem + (nextevt - basej) * TICK_NSEC;
1387 }
1388 spin_unlock(&base->lock);
1389
1390 return cmp_next_hrtimer_event(basem, expires);
1391 }
1392 #endif
1393
1394 /*
1395 * Called from the timer interrupt handler to charge one tick to the current
1396 * process. user_tick is 1 if the tick is user time, 0 for system.
1397 */
1398 void update_process_times(int user_tick)
1399 {
1400 struct task_struct *p = current;
1401
1402 /* Note: this timer irq context must be accounted for as well. */
1403 account_process_tick(p, user_tick);
1404 run_local_timers();
1405 rcu_check_callbacks(user_tick);
1406 #ifdef CONFIG_IRQ_WORK
1407 if (in_irq())
1408 irq_work_tick();
1409 #endif
1410 scheduler_tick();
1411 run_posix_cpu_timers(p);
1412 }
1413
1414 /*
1415 * This function runs timers and the timer-tq in bottom half context.
1416 */
1417 static void run_timer_softirq(struct softirq_action *h)
1418 {
1419 struct tvec_base *base = this_cpu_ptr(&tvec_bases);
1420
1421 if (time_after_eq(jiffies, base->timer_jiffies))
1422 __run_timers(base);
1423 }
1424
1425 /*
1426 * Called by the local, per-CPU timer interrupt on SMP.
1427 */
1428 void run_local_timers(void)
1429 {
1430 hrtimer_run_queues();
1431 raise_softirq(TIMER_SOFTIRQ);
1432 }
1433
1434 #ifdef __ARCH_WANT_SYS_ALARM
1435
1436 /*
1437 * For backwards compatibility? This can be done in libc so Alpha
1438 * and all newer ports shouldn't need it.
1439 */
1440 SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1441 {
1442 return alarm_setitimer(seconds);
1443 }
1444
1445 #endif
1446
1447 static void process_timeout(unsigned long __data)
1448 {
1449 wake_up_process((struct task_struct *)__data);
1450 }
1451
1452 /**
1453 * schedule_timeout - sleep until timeout
1454 * @timeout: timeout value in jiffies
1455 *
1456 * Make the current task sleep until @timeout jiffies have
1457 * elapsed. The routine will return immediately unless
1458 * the current task state has been set (see set_current_state()).
1459 *
1460 * You can set the task state as follows -
1461 *
1462 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1463 * pass before the routine returns. The routine will return 0
1464 *
1465 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1466 * delivered to the current task. In this case the remaining time
1467 * in jiffies will be returned, or 0 if the timer expired in time
1468 *
1469 * The current task state is guaranteed to be TASK_RUNNING when this
1470 * routine returns.
1471 *
1472 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1473 * the CPU away without a bound on the timeout. In this case the return
1474 * value will be %MAX_SCHEDULE_TIMEOUT.
1475 *
1476 * In all cases the return value is guaranteed to be non-negative.
1477 */
1478 signed long __sched schedule_timeout(signed long timeout)
1479 {
1480 struct timer_list timer;
1481 unsigned long expire;
1482
1483 switch (timeout)
1484 {
1485 case MAX_SCHEDULE_TIMEOUT:
1486 /*
1487 * These two special cases are useful to be comfortable
1488 * in the caller. Nothing more. We could take
1489 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1490 * but I' d like to return a valid offset (>=0) to allow
1491 * the caller to do everything it want with the retval.
1492 */
1493 schedule();
1494 goto out;
1495 default:
1496 /*
1497 * Another bit of PARANOID. Note that the retval will be
1498 * 0 since no piece of kernel is supposed to do a check
1499 * for a negative retval of schedule_timeout() (since it
1500 * should never happens anyway). You just have the printk()
1501 * that will tell you if something is gone wrong and where.
1502 */
1503 if (timeout < 0) {
1504 printk(KERN_ERR "schedule_timeout: wrong timeout "
1505 "value %lx\n", timeout);
1506 dump_stack();
1507 current->state = TASK_RUNNING;
1508 goto out;
1509 }
1510 }
1511
1512 expire = timeout + jiffies;
1513
1514 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1515 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1516 schedule();
1517 del_singleshot_timer_sync(&timer);
1518
1519 /* Remove the timer from the object tracker */
1520 destroy_timer_on_stack(&timer);
1521
1522 timeout = expire - jiffies;
1523
1524 out:
1525 return timeout < 0 ? 0 : timeout;
1526 }
1527 EXPORT_SYMBOL(schedule_timeout);
1528
1529 /*
1530 * We can use __set_current_state() here because schedule_timeout() calls
1531 * schedule() unconditionally.
1532 */
1533 signed long __sched schedule_timeout_interruptible(signed long timeout)
1534 {
1535 __set_current_state(TASK_INTERRUPTIBLE);
1536 return schedule_timeout(timeout);
1537 }
1538 EXPORT_SYMBOL(schedule_timeout_interruptible);
1539
1540 signed long __sched schedule_timeout_killable(signed long timeout)
1541 {
1542 __set_current_state(TASK_KILLABLE);
1543 return schedule_timeout(timeout);
1544 }
1545 EXPORT_SYMBOL(schedule_timeout_killable);
1546
1547 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1548 {
1549 __set_current_state(TASK_UNINTERRUPTIBLE);
1550 return schedule_timeout(timeout);
1551 }
1552 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1553
1554 /*
1555 * Like schedule_timeout_uninterruptible(), except this task will not contribute
1556 * to load average.
1557 */
1558 signed long __sched schedule_timeout_idle(signed long timeout)
1559 {
1560 __set_current_state(TASK_IDLE);
1561 return schedule_timeout(timeout);
1562 }
1563 EXPORT_SYMBOL(schedule_timeout_idle);
1564
1565 #ifdef CONFIG_HOTPLUG_CPU
1566 static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
1567 {
1568 struct timer_list *timer;
1569 int cpu = new_base->cpu;
1570
1571 while (!hlist_empty(head)) {
1572 timer = hlist_entry(head->first, struct timer_list, entry);
1573 /* We ignore the accounting on the dying cpu */
1574 detach_timer(timer, false);
1575 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
1576 internal_add_timer(new_base, timer);
1577 }
1578 }
1579
1580 static void migrate_timers(int cpu)
1581 {
1582 struct tvec_base *old_base;
1583 struct tvec_base *new_base;
1584 int i;
1585
1586 BUG_ON(cpu_online(cpu));
1587 old_base = per_cpu_ptr(&tvec_bases, cpu);
1588 new_base = get_cpu_ptr(&tvec_bases);
1589 /*
1590 * The caller is globally serialized and nobody else
1591 * takes two locks at once, deadlock is not possible.
1592 */
1593 spin_lock_irq(&new_base->lock);
1594 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1595
1596 BUG_ON(old_base->running_timer);
1597
1598 for (i = 0; i < TVR_SIZE; i++)
1599 migrate_timer_list(new_base, old_base->tv1.vec + i);
1600 for (i = 0; i < TVN_SIZE; i++) {
1601 migrate_timer_list(new_base, old_base->tv2.vec + i);
1602 migrate_timer_list(new_base, old_base->tv3.vec + i);
1603 migrate_timer_list(new_base, old_base->tv4.vec + i);
1604 migrate_timer_list(new_base, old_base->tv5.vec + i);
1605 }
1606
1607 old_base->active_timers = 0;
1608 old_base->all_timers = 0;
1609
1610 spin_unlock(&old_base->lock);
1611 spin_unlock_irq(&new_base->lock);
1612 put_cpu_ptr(&tvec_bases);
1613 }
1614
1615 static int timer_cpu_notify(struct notifier_block *self,
1616 unsigned long action, void *hcpu)
1617 {
1618 switch (action) {
1619 case CPU_DEAD:
1620 case CPU_DEAD_FROZEN:
1621 migrate_timers((long)hcpu);
1622 break;
1623 default:
1624 break;
1625 }
1626
1627 return NOTIFY_OK;
1628 }
1629
1630 static inline void timer_register_cpu_notifier(void)
1631 {
1632 cpu_notifier(timer_cpu_notify, 0);
1633 }
1634 #else
1635 static inline void timer_register_cpu_notifier(void) { }
1636 #endif /* CONFIG_HOTPLUG_CPU */
1637
1638 static void __init init_timer_cpu(int cpu)
1639 {
1640 struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
1641
1642 base->cpu = cpu;
1643 spin_lock_init(&base->lock);
1644
1645 base->timer_jiffies = jiffies;
1646 base->next_timer = base->timer_jiffies;
1647 }
1648
1649 static void __init init_timer_cpus(void)
1650 {
1651 int cpu;
1652
1653 for_each_possible_cpu(cpu)
1654 init_timer_cpu(cpu);
1655 }
1656
1657 void __init init_timers(void)
1658 {
1659 init_timer_cpus();
1660 init_timer_stats();
1661 timer_register_cpu_notifier();
1662 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1663 }
1664
1665 /**
1666 * msleep - sleep safely even with waitqueue interruptions
1667 * @msecs: Time in milliseconds to sleep for
1668 */
1669 void msleep(unsigned int msecs)
1670 {
1671 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1672
1673 while (timeout)
1674 timeout = schedule_timeout_uninterruptible(timeout);
1675 }
1676
1677 EXPORT_SYMBOL(msleep);
1678
1679 /**
1680 * msleep_interruptible - sleep waiting for signals
1681 * @msecs: Time in milliseconds to sleep for
1682 */
1683 unsigned long msleep_interruptible(unsigned int msecs)
1684 {
1685 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1686
1687 while (timeout && !signal_pending(current))
1688 timeout = schedule_timeout_interruptible(timeout);
1689 return jiffies_to_msecs(timeout);
1690 }
1691
1692 EXPORT_SYMBOL(msleep_interruptible);
1693
1694 static void __sched do_usleep_range(unsigned long min, unsigned long max)
1695 {
1696 ktime_t kmin;
1697 u64 delta;
1698
1699 kmin = ktime_set(0, min * NSEC_PER_USEC);
1700 delta = (u64)(max - min) * NSEC_PER_USEC;
1701 schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1702 }
1703
1704 /**
1705 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1706 * @min: Minimum time in usecs to sleep
1707 * @max: Maximum time in usecs to sleep
1708 */
1709 void __sched usleep_range(unsigned long min, unsigned long max)
1710 {
1711 __set_current_state(TASK_UNINTERRUPTIBLE);
1712 do_usleep_range(min, max);
1713 }
1714 EXPORT_SYMBOL(usleep_range);
This page took 0.116137 seconds and 6 git commands to generate.