Merge branch 'fortglx/3.7/time' of git://git.linaro.org/people/jstultz/linux into...
[deliverable/linux.git] / kernel / timer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/timer.c
3 *
8524070b 4 * Kernel internal timers, basic process system calls
1da177e4
LT
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22#include <linux/kernel_stat.h>
9984de1a 23#include <linux/export.h>
1da177e4
LT
24#include <linux/interrupt.h>
25#include <linux/percpu.h>
26#include <linux/init.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
b488893a 29#include <linux/pid_namespace.h>
1da177e4
LT
30#include <linux/notifier.h>
31#include <linux/thread_info.h>
32#include <linux/time.h>
33#include <linux/jiffies.h>
34#include <linux/posix-timers.h>
35#include <linux/cpu.h>
36#include <linux/syscalls.h>
97a41e26 37#include <linux/delay.h>
79bf2bb3 38#include <linux/tick.h>
82f67cd9 39#include <linux/kallsyms.h>
e360adbe 40#include <linux/irq_work.h>
eea08f32 41#include <linux/sched.h>
5a0e3ad6 42#include <linux/slab.h>
1da177e4
LT
43
44#include <asm/uaccess.h>
45#include <asm/unistd.h>
46#include <asm/div64.h>
47#include <asm/timex.h>
48#include <asm/io.h>
49
2b022e3d
XG
50#define CREATE_TRACE_POINTS
51#include <trace/events/timer.h>
52
ecea8d19
TG
53u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
54
55EXPORT_SYMBOL(jiffies_64);
56
1da177e4
LT
57/*
58 * per-CPU timer vector definitions:
59 */
1da177e4
LT
60#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
61#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
62#define TVN_SIZE (1 << TVN_BITS)
63#define TVR_SIZE (1 << TVR_BITS)
64#define TVN_MASK (TVN_SIZE - 1)
65#define TVR_MASK (TVR_SIZE - 1)
66
a6fa8e5a 67struct tvec {
1da177e4 68 struct list_head vec[TVN_SIZE];
a6fa8e5a 69};
1da177e4 70
a6fa8e5a 71struct tvec_root {
1da177e4 72 struct list_head vec[TVR_SIZE];
a6fa8e5a 73};
1da177e4 74
a6fa8e5a 75struct tvec_base {
3691c519
ON
76 spinlock_t lock;
77 struct timer_list *running_timer;
1da177e4 78 unsigned long timer_jiffies;
97fd9ed4 79 unsigned long next_timer;
99d5f3aa 80 unsigned long active_timers;
a6fa8e5a
PM
81 struct tvec_root tv1;
82 struct tvec tv2;
83 struct tvec tv3;
84 struct tvec tv4;
85 struct tvec tv5;
6e453a67 86} ____cacheline_aligned;
1da177e4 87
a6fa8e5a 88struct tvec_base boot_tvec_bases;
3691c519 89EXPORT_SYMBOL(boot_tvec_bases);
a6fa8e5a 90static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
1da177e4 91
6e453a67 92/* Functions below help us manage 'deferrable' flag */
a6fa8e5a 93static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
6e453a67 94{
e52b1db3 95 return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
6e453a67
VP
96}
97
c5f66e99
TH
98static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
99{
100 return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
101}
102
a6fa8e5a 103static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
6e453a67 104{
e52b1db3 105 return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
6e453a67
VP
106}
107
6e453a67 108static inline void
a6fa8e5a 109timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
6e453a67 110{
e52b1db3
TH
111 unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
112
113 timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
6e453a67
VP
114}
115
9c133c46
AS
116static unsigned long round_jiffies_common(unsigned long j, int cpu,
117 bool force_up)
4c36a5de
AV
118{
119 int rem;
120 unsigned long original = j;
121
122 /*
123 * We don't want all cpus firing their timers at once hitting the
124 * same lock or cachelines, so we skew each extra cpu with an extra
125 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
126 * already did this.
127 * The skew is done by adding 3*cpunr, then round, then subtract this
128 * extra offset again.
129 */
130 j += cpu * 3;
131
132 rem = j % HZ;
133
134 /*
135 * If the target jiffie is just after a whole second (which can happen
136 * due to delays of the timer irq, long irq off times etc etc) then
137 * we should round down to the whole second, not up. Use 1/4th second
138 * as cutoff for this rounding as an extreme upper bound for this.
9c133c46 139 * But never round down if @force_up is set.
4c36a5de 140 */
9c133c46 141 if (rem < HZ/4 && !force_up) /* round down */
4c36a5de
AV
142 j = j - rem;
143 else /* round up */
144 j = j - rem + HZ;
145
146 /* now that we have rounded, subtract the extra skew again */
147 j -= cpu * 3;
148
149 if (j <= jiffies) /* rounding ate our timeout entirely; */
150 return original;
151 return j;
152}
9c133c46
AS
153
154/**
155 * __round_jiffies - function to round jiffies to a full second
156 * @j: the time in (absolute) jiffies that should be rounded
157 * @cpu: the processor number on which the timeout will happen
158 *
159 * __round_jiffies() rounds an absolute time in the future (in jiffies)
160 * up or down to (approximately) full seconds. This is useful for timers
161 * for which the exact time they fire does not matter too much, as long as
162 * they fire approximately every X seconds.
163 *
164 * By rounding these timers to whole seconds, all such timers will fire
165 * at the same time, rather than at various times spread out. The goal
166 * of this is to have the CPU wake up less, which saves power.
167 *
168 * The exact rounding is skewed for each processor to avoid all
169 * processors firing at the exact same time, which could lead
170 * to lock contention or spurious cache line bouncing.
171 *
172 * The return value is the rounded version of the @j parameter.
173 */
174unsigned long __round_jiffies(unsigned long j, int cpu)
175{
176 return round_jiffies_common(j, cpu, false);
177}
4c36a5de
AV
178EXPORT_SYMBOL_GPL(__round_jiffies);
179
180/**
181 * __round_jiffies_relative - function to round jiffies to a full second
182 * @j: the time in (relative) jiffies that should be rounded
183 * @cpu: the processor number on which the timeout will happen
184 *
72fd4a35 185 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
4c36a5de
AV
186 * up or down to (approximately) full seconds. This is useful for timers
187 * for which the exact time they fire does not matter too much, as long as
188 * they fire approximately every X seconds.
189 *
190 * By rounding these timers to whole seconds, all such timers will fire
191 * at the same time, rather than at various times spread out. The goal
192 * of this is to have the CPU wake up less, which saves power.
193 *
194 * The exact rounding is skewed for each processor to avoid all
195 * processors firing at the exact same time, which could lead
196 * to lock contention or spurious cache line bouncing.
197 *
72fd4a35 198 * The return value is the rounded version of the @j parameter.
4c36a5de
AV
199 */
200unsigned long __round_jiffies_relative(unsigned long j, int cpu)
201{
9c133c46
AS
202 unsigned long j0 = jiffies;
203
204 /* Use j0 because jiffies might change while we run */
205 return round_jiffies_common(j + j0, cpu, false) - j0;
4c36a5de
AV
206}
207EXPORT_SYMBOL_GPL(__round_jiffies_relative);
208
209/**
210 * round_jiffies - function to round jiffies to a full second
211 * @j: the time in (absolute) jiffies that should be rounded
212 *
72fd4a35 213 * round_jiffies() rounds an absolute time in the future (in jiffies)
4c36a5de
AV
214 * up or down to (approximately) full seconds. This is useful for timers
215 * for which the exact time they fire does not matter too much, as long as
216 * they fire approximately every X seconds.
217 *
218 * By rounding these timers to whole seconds, all such timers will fire
219 * at the same time, rather than at various times spread out. The goal
220 * of this is to have the CPU wake up less, which saves power.
221 *
72fd4a35 222 * The return value is the rounded version of the @j parameter.
4c36a5de
AV
223 */
224unsigned long round_jiffies(unsigned long j)
225{
9c133c46 226 return round_jiffies_common(j, raw_smp_processor_id(), false);
4c36a5de
AV
227}
228EXPORT_SYMBOL_GPL(round_jiffies);
229
230/**
231 * round_jiffies_relative - function to round jiffies to a full second
232 * @j: the time in (relative) jiffies that should be rounded
233 *
72fd4a35 234 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
4c36a5de
AV
235 * up or down to (approximately) full seconds. This is useful for timers
236 * for which the exact time they fire does not matter too much, as long as
237 * they fire approximately every X seconds.
238 *
239 * By rounding these timers to whole seconds, all such timers will fire
240 * at the same time, rather than at various times spread out. The goal
241 * of this is to have the CPU wake up less, which saves power.
242 *
72fd4a35 243 * The return value is the rounded version of the @j parameter.
4c36a5de
AV
244 */
245unsigned long round_jiffies_relative(unsigned long j)
246{
247 return __round_jiffies_relative(j, raw_smp_processor_id());
248}
249EXPORT_SYMBOL_GPL(round_jiffies_relative);
250
9c133c46
AS
251/**
252 * __round_jiffies_up - function to round jiffies up to a full second
253 * @j: the time in (absolute) jiffies that should be rounded
254 * @cpu: the processor number on which the timeout will happen
255 *
256 * This is the same as __round_jiffies() except that it will never
257 * round down. This is useful for timeouts for which the exact time
258 * of firing does not matter too much, as long as they don't fire too
259 * early.
260 */
261unsigned long __round_jiffies_up(unsigned long j, int cpu)
262{
263 return round_jiffies_common(j, cpu, true);
264}
265EXPORT_SYMBOL_GPL(__round_jiffies_up);
266
267/**
268 * __round_jiffies_up_relative - function to round jiffies up to a full second
269 * @j: the time in (relative) jiffies that should be rounded
270 * @cpu: the processor number on which the timeout will happen
271 *
272 * This is the same as __round_jiffies_relative() except that it will never
273 * round down. This is useful for timeouts for which the exact time
274 * of firing does not matter too much, as long as they don't fire too
275 * early.
276 */
277unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
278{
279 unsigned long j0 = jiffies;
280
281 /* Use j0 because jiffies might change while we run */
282 return round_jiffies_common(j + j0, cpu, true) - j0;
283}
284EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
285
286/**
287 * round_jiffies_up - function to round jiffies up to a full second
288 * @j: the time in (absolute) jiffies that should be rounded
289 *
290 * This is the same as round_jiffies() except that it will never
291 * round down. This is useful for timeouts for which the exact time
292 * of firing does not matter too much, as long as they don't fire too
293 * early.
294 */
295unsigned long round_jiffies_up(unsigned long j)
296{
297 return round_jiffies_common(j, raw_smp_processor_id(), true);
298}
299EXPORT_SYMBOL_GPL(round_jiffies_up);
300
301/**
302 * round_jiffies_up_relative - function to round jiffies up to a full second
303 * @j: the time in (relative) jiffies that should be rounded
304 *
305 * This is the same as round_jiffies_relative() except that it will never
306 * round down. This is useful for timeouts for which the exact time
307 * of firing does not matter too much, as long as they don't fire too
308 * early.
309 */
310unsigned long round_jiffies_up_relative(unsigned long j)
311{
312 return __round_jiffies_up_relative(j, raw_smp_processor_id());
313}
314EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
315
3bbb9ec9
AV
316/**
317 * set_timer_slack - set the allowed slack for a timer
0caa6210 318 * @timer: the timer to be modified
3bbb9ec9
AV
319 * @slack_hz: the amount of time (in jiffies) allowed for rounding
320 *
321 * Set the amount of time, in jiffies, that a certain timer has
322 * in terms of slack. By setting this value, the timer subsystem
323 * will schedule the actual timer somewhere between
324 * the time mod_timer() asks for, and that time plus the slack.
325 *
326 * By setting the slack to -1, a percentage of the delay is used
327 * instead.
328 */
329void set_timer_slack(struct timer_list *timer, int slack_hz)
330{
331 timer->slack = slack_hz;
332}
333EXPORT_SYMBOL_GPL(set_timer_slack);
334
facbb4a7
TG
335static void
336__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
1da177e4
LT
337{
338 unsigned long expires = timer->expires;
339 unsigned long idx = expires - base->timer_jiffies;
340 struct list_head *vec;
341
342 if (idx < TVR_SIZE) {
343 int i = expires & TVR_MASK;
344 vec = base->tv1.vec + i;
345 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
346 int i = (expires >> TVR_BITS) & TVN_MASK;
347 vec = base->tv2.vec + i;
348 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
349 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
350 vec = base->tv3.vec + i;
351 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
352 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
353 vec = base->tv4.vec + i;
354 } else if ((signed long) idx < 0) {
355 /*
356 * Can happen if you add a timer with expires == jiffies,
357 * or you set a timer to go off in the past
358 */
359 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
360 } else {
361 int i;
362 /* If the timeout is larger than 0xffffffff on 64-bit
363 * architectures then we use the maximum timeout:
364 */
365 if (idx > 0xffffffffUL) {
366 idx = 0xffffffffUL;
367 expires = idx + base->timer_jiffies;
368 }
369 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
370 vec = base->tv5.vec + i;
371 }
372 /*
373 * Timers are FIFO:
374 */
375 list_add_tail(&timer->entry, vec);
376}
377
facbb4a7
TG
378static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
379{
380 __internal_add_timer(base, timer);
381 /*
99d5f3aa 382 * Update base->active_timers and base->next_timer
facbb4a7 383 */
99d5f3aa
TG
384 if (!tbase_get_deferrable(timer->base)) {
385 if (time_before(timer->expires, base->next_timer))
386 base->next_timer = timer->expires;
387 base->active_timers++;
388 }
facbb4a7
TG
389}
390
82f67cd9
IM
391#ifdef CONFIG_TIMER_STATS
392void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
393{
394 if (timer->start_site)
395 return;
396
397 timer->start_site = addr;
398 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
399 timer->start_pid = current->pid;
400}
c5c061b8
VP
401
402static void timer_stats_account_timer(struct timer_list *timer)
403{
404 unsigned int flag = 0;
405
507e1231
HC
406 if (likely(!timer->start_site))
407 return;
c5c061b8
VP
408 if (unlikely(tbase_get_deferrable(timer->base)))
409 flag |= TIMER_STATS_FLAG_DEFERRABLE;
410
411 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
412 timer->function, timer->start_comm, flag);
413}
414
415#else
416static void timer_stats_account_timer(struct timer_list *timer) {}
82f67cd9
IM
417#endif
418
c6f3a97f
TG
419#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
420
421static struct debug_obj_descr timer_debug_descr;
422
99777288
SG
423static void *timer_debug_hint(void *addr)
424{
425 return ((struct timer_list *) addr)->function;
426}
427
c6f3a97f
TG
428/*
429 * fixup_init is called when:
430 * - an active object is initialized
55c888d6 431 */
c6f3a97f
TG
432static int timer_fixup_init(void *addr, enum debug_obj_state state)
433{
434 struct timer_list *timer = addr;
435
436 switch (state) {
437 case ODEBUG_STATE_ACTIVE:
438 del_timer_sync(timer);
439 debug_object_init(timer, &timer_debug_descr);
440 return 1;
441 default:
442 return 0;
443 }
444}
445
fb16b8cf
SB
446/* Stub timer callback for improperly used timers. */
447static void stub_timer(unsigned long data)
448{
449 WARN_ON(1);
450}
451
c6f3a97f
TG
452/*
453 * fixup_activate is called when:
454 * - an active object is activated
455 * - an unknown object is activated (might be a statically initialized object)
456 */
457static int timer_fixup_activate(void *addr, enum debug_obj_state state)
458{
459 struct timer_list *timer = addr;
460
461 switch (state) {
462
463 case ODEBUG_STATE_NOTAVAILABLE:
464 /*
465 * This is not really a fixup. The timer was
466 * statically initialized. We just make sure that it
467 * is tracked in the object tracker.
468 */
469 if (timer->entry.next == NULL &&
470 timer->entry.prev == TIMER_ENTRY_STATIC) {
471 debug_object_init(timer, &timer_debug_descr);
472 debug_object_activate(timer, &timer_debug_descr);
473 return 0;
474 } else {
fb16b8cf
SB
475 setup_timer(timer, stub_timer, 0);
476 return 1;
c6f3a97f
TG
477 }
478 return 0;
479
480 case ODEBUG_STATE_ACTIVE:
481 WARN_ON(1);
482
483 default:
484 return 0;
485 }
486}
487
488/*
489 * fixup_free is called when:
490 * - an active object is freed
491 */
492static int timer_fixup_free(void *addr, enum debug_obj_state state)
493{
494 struct timer_list *timer = addr;
495
496 switch (state) {
497 case ODEBUG_STATE_ACTIVE:
498 del_timer_sync(timer);
499 debug_object_free(timer, &timer_debug_descr);
500 return 1;
501 default:
502 return 0;
503 }
504}
505
dc4218bd
CC
506/*
507 * fixup_assert_init is called when:
508 * - an untracked/uninit-ed object is found
509 */
510static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
511{
512 struct timer_list *timer = addr;
513
514 switch (state) {
515 case ODEBUG_STATE_NOTAVAILABLE:
516 if (timer->entry.prev == TIMER_ENTRY_STATIC) {
517 /*
518 * This is not really a fixup. The timer was
519 * statically initialized. We just make sure that it
520 * is tracked in the object tracker.
521 */
522 debug_object_init(timer, &timer_debug_descr);
523 return 0;
524 } else {
525 setup_timer(timer, stub_timer, 0);
526 return 1;
527 }
528 default:
529 return 0;
530 }
531}
532
c6f3a97f 533static struct debug_obj_descr timer_debug_descr = {
dc4218bd
CC
534 .name = "timer_list",
535 .debug_hint = timer_debug_hint,
536 .fixup_init = timer_fixup_init,
537 .fixup_activate = timer_fixup_activate,
538 .fixup_free = timer_fixup_free,
539 .fixup_assert_init = timer_fixup_assert_init,
c6f3a97f
TG
540};
541
542static inline void debug_timer_init(struct timer_list *timer)
543{
544 debug_object_init(timer, &timer_debug_descr);
545}
546
547static inline void debug_timer_activate(struct timer_list *timer)
548{
549 debug_object_activate(timer, &timer_debug_descr);
550}
551
552static inline void debug_timer_deactivate(struct timer_list *timer)
553{
554 debug_object_deactivate(timer, &timer_debug_descr);
555}
556
557static inline void debug_timer_free(struct timer_list *timer)
558{
559 debug_object_free(timer, &timer_debug_descr);
560}
561
dc4218bd
CC
562static inline void debug_timer_assert_init(struct timer_list *timer)
563{
564 debug_object_assert_init(timer, &timer_debug_descr);
565}
566
fc683995
TH
567static void do_init_timer(struct timer_list *timer, unsigned int flags,
568 const char *name, struct lock_class_key *key);
c6f3a97f 569
fc683995
TH
570void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
571 const char *name, struct lock_class_key *key)
c6f3a97f
TG
572{
573 debug_object_init_on_stack(timer, &timer_debug_descr);
fc683995 574 do_init_timer(timer, flags, name, key);
c6f3a97f 575}
6f2b9b9a 576EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
c6f3a97f
TG
577
578void destroy_timer_on_stack(struct timer_list *timer)
579{
580 debug_object_free(timer, &timer_debug_descr);
581}
582EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
583
584#else
585static inline void debug_timer_init(struct timer_list *timer) { }
586static inline void debug_timer_activate(struct timer_list *timer) { }
587static inline void debug_timer_deactivate(struct timer_list *timer) { }
dc4218bd 588static inline void debug_timer_assert_init(struct timer_list *timer) { }
c6f3a97f
TG
589#endif
590
2b022e3d
XG
591static inline void debug_init(struct timer_list *timer)
592{
593 debug_timer_init(timer);
594 trace_timer_init(timer);
595}
596
597static inline void
598debug_activate(struct timer_list *timer, unsigned long expires)
599{
600 debug_timer_activate(timer);
601 trace_timer_start(timer, expires);
602}
603
604static inline void debug_deactivate(struct timer_list *timer)
605{
606 debug_timer_deactivate(timer);
607 trace_timer_cancel(timer);
608}
609
dc4218bd
CC
610static inline void debug_assert_init(struct timer_list *timer)
611{
612 debug_timer_assert_init(timer);
613}
614
fc683995
TH
615static void do_init_timer(struct timer_list *timer, unsigned int flags,
616 const char *name, struct lock_class_key *key)
55c888d6 617{
fc683995
TH
618 struct tvec_base *base = __raw_get_cpu_var(tvec_bases);
619
55c888d6 620 timer->entry.next = NULL;
fc683995 621 timer->base = (void *)((unsigned long)base | flags);
3bbb9ec9 622 timer->slack = -1;
82f67cd9
IM
623#ifdef CONFIG_TIMER_STATS
624 timer->start_site = NULL;
625 timer->start_pid = -1;
626 memset(timer->start_comm, 0, TASK_COMM_LEN);
627#endif
6f2b9b9a 628 lockdep_init_map(&timer->lockdep_map, name, key, 0);
55c888d6 629}
c6f3a97f
TG
630
631/**
633fe795 632 * init_timer_key - initialize a timer
c6f3a97f 633 * @timer: the timer to be initialized
fc683995 634 * @flags: timer flags
633fe795
RD
635 * @name: name of the timer
636 * @key: lockdep class key of the fake lock used for tracking timer
637 * sync lock dependencies
c6f3a97f 638 *
633fe795 639 * init_timer_key() must be done to a timer prior calling *any* of the
c6f3a97f
TG
640 * other timer functions.
641 */
fc683995
TH
642void init_timer_key(struct timer_list *timer, unsigned int flags,
643 const char *name, struct lock_class_key *key)
c6f3a97f 644{
2b022e3d 645 debug_init(timer);
fc683995 646 do_init_timer(timer, flags, name, key);
c6f3a97f 647}
6f2b9b9a 648EXPORT_SYMBOL(init_timer_key);
55c888d6 649
ec44bc7a 650static inline void detach_timer(struct timer_list *timer, bool clear_pending)
55c888d6
ON
651{
652 struct list_head *entry = &timer->entry;
653
2b022e3d 654 debug_deactivate(timer);
c6f3a97f 655
55c888d6
ON
656 __list_del(entry->prev, entry->next);
657 if (clear_pending)
658 entry->next = NULL;
659 entry->prev = LIST_POISON2;
660}
661
99d5f3aa
TG
662static inline void
663detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
664{
665 detach_timer(timer, true);
666 if (!tbase_get_deferrable(timer->base))
e52b1db3 667 base->active_timers--;
99d5f3aa
TG
668}
669
ec44bc7a
TG
670static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
671 bool clear_pending)
672{
673 if (!timer_pending(timer))
674 return 0;
675
676 detach_timer(timer, clear_pending);
99d5f3aa 677 if (!tbase_get_deferrable(timer->base)) {
e52b1db3 678 base->active_timers--;
99d5f3aa
TG
679 if (timer->expires == base->next_timer)
680 base->next_timer = base->timer_jiffies;
681 }
ec44bc7a
TG
682 return 1;
683}
684
55c888d6 685/*
3691c519 686 * We are using hashed locking: holding per_cpu(tvec_bases).lock
55c888d6
ON
687 * means that all timers which are tied to this base via timer->base are
688 * locked, and the base itself is locked too.
689 *
690 * So __run_timers/migrate_timers can safely modify all timers which could
691 * be found on ->tvX lists.
692 *
693 * When the timer's base is locked, and the timer removed from list, it is
694 * possible to set timer->base = NULL and drop the lock: the timer remains
695 * locked.
696 */
a6fa8e5a 697static struct tvec_base *lock_timer_base(struct timer_list *timer,
55c888d6 698 unsigned long *flags)
89e7e374 699 __acquires(timer->base->lock)
55c888d6 700{
a6fa8e5a 701 struct tvec_base *base;
55c888d6
ON
702
703 for (;;) {
a6fa8e5a 704 struct tvec_base *prelock_base = timer->base;
6e453a67 705 base = tbase_get_base(prelock_base);
55c888d6
ON
706 if (likely(base != NULL)) {
707 spin_lock_irqsave(&base->lock, *flags);
6e453a67 708 if (likely(prelock_base == timer->base))
55c888d6
ON
709 return base;
710 /* The timer has migrated to another CPU */
711 spin_unlock_irqrestore(&base->lock, *flags);
712 }
713 cpu_relax();
714 }
715}
716
74019224 717static inline int
597d0275
AB
718__mod_timer(struct timer_list *timer, unsigned long expires,
719 bool pending_only, int pinned)
1da177e4 720{
a6fa8e5a 721 struct tvec_base *base, *new_base;
1da177e4 722 unsigned long flags;
eea08f32 723 int ret = 0 , cpu;
1da177e4 724
82f67cd9 725 timer_stats_timer_set_start_info(timer);
1da177e4 726 BUG_ON(!timer->function);
1da177e4 727
55c888d6
ON
728 base = lock_timer_base(timer, &flags);
729
ec44bc7a
TG
730 ret = detach_if_pending(timer, base, false);
731 if (!ret && pending_only)
732 goto out_unlock;
55c888d6 733
2b022e3d 734 debug_activate(timer, expires);
c6f3a97f 735
eea08f32
AB
736 cpu = smp_processor_id();
737
738#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
83cd4fe2
VP
739 if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
740 cpu = get_nohz_timer_target();
eea08f32
AB
741#endif
742 new_base = per_cpu(tvec_bases, cpu);
743
3691c519 744 if (base != new_base) {
1da177e4 745 /*
55c888d6
ON
746 * We are trying to schedule the timer on the local CPU.
747 * However we can't change timer's base while it is running,
748 * otherwise del_timer_sync() can't detect that the timer's
749 * handler yet has not finished. This also guarantees that
750 * the timer is serialized wrt itself.
1da177e4 751 */
a2c348fe 752 if (likely(base->running_timer != timer)) {
55c888d6 753 /* See the comment in lock_timer_base() */
6e453a67 754 timer_set_base(timer, NULL);
55c888d6 755 spin_unlock(&base->lock);
a2c348fe
ON
756 base = new_base;
757 spin_lock(&base->lock);
6e453a67 758 timer_set_base(timer, base);
1da177e4
LT
759 }
760 }
761
1da177e4 762 timer->expires = expires;
a2c348fe 763 internal_add_timer(base, timer);
74019224
IM
764
765out_unlock:
a2c348fe 766 spin_unlock_irqrestore(&base->lock, flags);
1da177e4
LT
767
768 return ret;
769}
770
2aae4a10 771/**
74019224
IM
772 * mod_timer_pending - modify a pending timer's timeout
773 * @timer: the pending timer to be modified
774 * @expires: new timeout in jiffies
1da177e4 775 *
74019224
IM
776 * mod_timer_pending() is the same for pending timers as mod_timer(),
777 * but will not re-activate and modify already deleted timers.
778 *
779 * It is useful for unserialized use of timers.
1da177e4 780 */
74019224 781int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1da177e4 782{
597d0275 783 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
1da177e4 784}
74019224 785EXPORT_SYMBOL(mod_timer_pending);
1da177e4 786
3bbb9ec9
AV
787/*
788 * Decide where to put the timer while taking the slack into account
789 *
790 * Algorithm:
791 * 1) calculate the maximum (absolute) time
792 * 2) calculate the highest bit where the expires and new max are different
793 * 3) use this bit to make a mask
794 * 4) use the bitmask to round down the maximum time, so that all last
795 * bits are zeros
796 */
797static inline
798unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
799{
800 unsigned long expires_limit, mask;
801 int bit;
802
8e63d779 803 if (timer->slack >= 0) {
f00e047e 804 expires_limit = expires + timer->slack;
8e63d779 805 } else {
1c3cc116
SAS
806 long delta = expires - jiffies;
807
808 if (delta < 256)
809 return expires;
3bbb9ec9 810
1c3cc116 811 expires_limit = expires + delta / 256;
8e63d779 812 }
3bbb9ec9 813 mask = expires ^ expires_limit;
3bbb9ec9
AV
814 if (mask == 0)
815 return expires;
816
817 bit = find_last_bit(&mask, BITS_PER_LONG);
818
819 mask = (1 << bit) - 1;
820
821 expires_limit = expires_limit & ~(mask);
822
823 return expires_limit;
824}
825
2aae4a10 826/**
1da177e4
LT
827 * mod_timer - modify a timer's timeout
828 * @timer: the timer to be modified
2aae4a10 829 * @expires: new timeout in jiffies
1da177e4 830 *
72fd4a35 831 * mod_timer() is a more efficient way to update the expire field of an
1da177e4
LT
832 * active timer (if the timer is inactive it will be activated)
833 *
834 * mod_timer(timer, expires) is equivalent to:
835 *
836 * del_timer(timer); timer->expires = expires; add_timer(timer);
837 *
838 * Note that if there are multiple unserialized concurrent users of the
839 * same timer, then mod_timer() is the only safe way to modify the timeout,
840 * since add_timer() cannot modify an already running timer.
841 *
842 * The function returns whether it has modified a pending timer or not.
843 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
844 * active timer returns 1.)
845 */
846int mod_timer(struct timer_list *timer, unsigned long expires)
847{
1c3cc116
SAS
848 expires = apply_slack(timer, expires);
849
1da177e4
LT
850 /*
851 * This is a common optimization triggered by the
852 * networking code - if the timer is re-modified
853 * to be the same thing then just return:
854 */
4841158b 855 if (timer_pending(timer) && timer->expires == expires)
1da177e4
LT
856 return 1;
857
597d0275 858 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
1da177e4 859}
1da177e4
LT
860EXPORT_SYMBOL(mod_timer);
861
597d0275
AB
862/**
863 * mod_timer_pinned - modify a timer's timeout
864 * @timer: the timer to be modified
865 * @expires: new timeout in jiffies
866 *
867 * mod_timer_pinned() is a way to update the expire field of an
868 * active timer (if the timer is inactive it will be activated)
048a0e8f
PM
869 * and to ensure that the timer is scheduled on the current CPU.
870 *
871 * Note that this does not prevent the timer from being migrated
872 * when the current CPU goes offline. If this is a problem for
873 * you, use CPU-hotplug notifiers to handle it correctly, for
874 * example, cancelling the timer when the corresponding CPU goes
875 * offline.
597d0275
AB
876 *
877 * mod_timer_pinned(timer, expires) is equivalent to:
878 *
879 * del_timer(timer); timer->expires = expires; add_timer(timer);
880 */
881int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
882{
883 if (timer->expires == expires && timer_pending(timer))
884 return 1;
885
886 return __mod_timer(timer, expires, false, TIMER_PINNED);
887}
888EXPORT_SYMBOL(mod_timer_pinned);
889
74019224
IM
890/**
891 * add_timer - start a timer
892 * @timer: the timer to be added
893 *
894 * The kernel will do a ->function(->data) callback from the
895 * timer interrupt at the ->expires point in the future. The
896 * current time is 'jiffies'.
897 *
898 * The timer's ->expires, ->function (and if the handler uses it, ->data)
899 * fields must be set prior calling this function.
900 *
901 * Timers with an ->expires field in the past will be executed in the next
902 * timer tick.
903 */
904void add_timer(struct timer_list *timer)
905{
906 BUG_ON(timer_pending(timer));
907 mod_timer(timer, timer->expires);
908}
909EXPORT_SYMBOL(add_timer);
910
911/**
912 * add_timer_on - start a timer on a particular CPU
913 * @timer: the timer to be added
914 * @cpu: the CPU to start it on
915 *
916 * This is not very scalable on SMP. Double adds are not possible.
917 */
918void add_timer_on(struct timer_list *timer, int cpu)
919{
920 struct tvec_base *base = per_cpu(tvec_bases, cpu);
921 unsigned long flags;
922
923 timer_stats_timer_set_start_info(timer);
924 BUG_ON(timer_pending(timer) || !timer->function);
925 spin_lock_irqsave(&base->lock, flags);
926 timer_set_base(timer, base);
2b022e3d 927 debug_activate(timer, timer->expires);
74019224
IM
928 internal_add_timer(base, timer);
929 /*
930 * Check whether the other CPU is idle and needs to be
931 * triggered to reevaluate the timer wheel when nohz is
932 * active. We are protected against the other CPU fiddling
933 * with the timer by holding the timer base lock. This also
934 * makes sure that a CPU on the way to idle can not evaluate
935 * the timer wheel.
936 */
937 wake_up_idle_cpu(cpu);
938 spin_unlock_irqrestore(&base->lock, flags);
939}
a9862e05 940EXPORT_SYMBOL_GPL(add_timer_on);
74019224 941
2aae4a10 942/**
1da177e4
LT
943 * del_timer - deactive a timer.
944 * @timer: the timer to be deactivated
945 *
946 * del_timer() deactivates a timer - this works on both active and inactive
947 * timers.
948 *
949 * The function returns whether it has deactivated a pending timer or not.
950 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
951 * active timer returns 1.)
952 */
953int del_timer(struct timer_list *timer)
954{
a6fa8e5a 955 struct tvec_base *base;
1da177e4 956 unsigned long flags;
55c888d6 957 int ret = 0;
1da177e4 958
dc4218bd
CC
959 debug_assert_init(timer);
960
82f67cd9 961 timer_stats_timer_clear_start_info(timer);
55c888d6
ON
962 if (timer_pending(timer)) {
963 base = lock_timer_base(timer, &flags);
ec44bc7a 964 ret = detach_if_pending(timer, base, true);
1da177e4 965 spin_unlock_irqrestore(&base->lock, flags);
1da177e4 966 }
1da177e4 967
55c888d6 968 return ret;
1da177e4 969}
1da177e4
LT
970EXPORT_SYMBOL(del_timer);
971
2aae4a10
REB
972/**
973 * try_to_del_timer_sync - Try to deactivate a timer
974 * @timer: timer do del
975 *
fd450b73
ON
976 * This function tries to deactivate a timer. Upon successful (ret >= 0)
977 * exit the timer is not queued and the handler is not running on any CPU.
fd450b73
ON
978 */
979int try_to_del_timer_sync(struct timer_list *timer)
980{
a6fa8e5a 981 struct tvec_base *base;
fd450b73
ON
982 unsigned long flags;
983 int ret = -1;
984
dc4218bd
CC
985 debug_assert_init(timer);
986
fd450b73
ON
987 base = lock_timer_base(timer, &flags);
988
ec44bc7a
TG
989 if (base->running_timer != timer) {
990 timer_stats_timer_clear_start_info(timer);
991 ret = detach_if_pending(timer, base, true);
fd450b73 992 }
fd450b73
ON
993 spin_unlock_irqrestore(&base->lock, flags);
994
995 return ret;
996}
e19dff1f
DH
997EXPORT_SYMBOL(try_to_del_timer_sync);
998
6f1bc451 999#ifdef CONFIG_SMP
2aae4a10 1000/**
1da177e4
LT
1001 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1002 * @timer: the timer to be deactivated
1003 *
1004 * This function only differs from del_timer() on SMP: besides deactivating
1005 * the timer it also makes sure the handler has finished executing on other
1006 * CPUs.
1007 *
72fd4a35 1008 * Synchronization rules: Callers must prevent restarting of the timer,
1da177e4 1009 * otherwise this function is meaningless. It must not be called from
c5f66e99
TH
1010 * interrupt contexts unless the timer is an irqsafe one. The caller must
1011 * not hold locks which would prevent completion of the timer's
1012 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1013 * timer is not queued and the handler is not running on any CPU.
1da177e4 1014 *
c5f66e99
TH
1015 * Note: For !irqsafe timers, you must not hold locks that are held in
1016 * interrupt context while calling this function. Even if the lock has
1017 * nothing to do with the timer in question. Here's why:
48228f7b
SR
1018 *
1019 * CPU0 CPU1
1020 * ---- ----
1021 * <SOFTIRQ>
1022 * call_timer_fn();
1023 * base->running_timer = mytimer;
1024 * spin_lock_irq(somelock);
1025 * <IRQ>
1026 * spin_lock(somelock);
1027 * del_timer_sync(mytimer);
1028 * while (base->running_timer == mytimer);
1029 *
1030 * Now del_timer_sync() will never return and never release somelock.
1031 * The interrupt on the other CPU is waiting to grab somelock but
1032 * it has interrupted the softirq that CPU0 is waiting to finish.
1033 *
1da177e4 1034 * The function returns whether it has deactivated a pending timer or not.
1da177e4
LT
1035 */
1036int del_timer_sync(struct timer_list *timer)
1037{
6f2b9b9a 1038#ifdef CONFIG_LOCKDEP
f266a511
PZ
1039 unsigned long flags;
1040
48228f7b
SR
1041 /*
1042 * If lockdep gives a backtrace here, please reference
1043 * the synchronization rules above.
1044 */
7ff20792 1045 local_irq_save(flags);
6f2b9b9a
JB
1046 lock_map_acquire(&timer->lockdep_map);
1047 lock_map_release(&timer->lockdep_map);
7ff20792 1048 local_irq_restore(flags);
6f2b9b9a 1049#endif
466bd303
YZ
1050 /*
1051 * don't use it in hardirq context, because it
1052 * could lead to deadlock.
1053 */
c5f66e99 1054 WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
fd450b73
ON
1055 for (;;) {
1056 int ret = try_to_del_timer_sync(timer);
1057 if (ret >= 0)
1058 return ret;
a0009652 1059 cpu_relax();
fd450b73 1060 }
1da177e4 1061}
55c888d6 1062EXPORT_SYMBOL(del_timer_sync);
1da177e4
LT
1063#endif
1064
a6fa8e5a 1065static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1da177e4
LT
1066{
1067 /* cascade all the timers from tv up one level */
3439dd86
P
1068 struct timer_list *timer, *tmp;
1069 struct list_head tv_list;
1070
1071 list_replace_init(tv->vec + index, &tv_list);
1da177e4 1072
1da177e4 1073 /*
3439dd86
P
1074 * We are removing _all_ timers from the list, so we
1075 * don't have to detach them individually.
1da177e4 1076 */
3439dd86 1077 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
6e453a67 1078 BUG_ON(tbase_get_base(timer->base) != base);
facbb4a7
TG
1079 /* No accounting, while moving them */
1080 __internal_add_timer(base, timer);
1da177e4 1081 }
1da177e4
LT
1082
1083 return index;
1084}
1085
576da126
TG
1086static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1087 unsigned long data)
1088{
1089 int preempt_count = preempt_count();
1090
1091#ifdef CONFIG_LOCKDEP
1092 /*
1093 * It is permissible to free the timer from inside the
1094 * function that is called from it, this we need to take into
1095 * account for lockdep too. To avoid bogus "held lock freed"
1096 * warnings as well as problems when looking into
1097 * timer->lockdep_map, make a copy and use that here.
1098 */
4d82a1de
PZ
1099 struct lockdep_map lockdep_map;
1100
1101 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
576da126
TG
1102#endif
1103 /*
1104 * Couple the lock chain with the lock chain at
1105 * del_timer_sync() by acquiring the lock_map around the fn()
1106 * call here and in del_timer_sync().
1107 */
1108 lock_map_acquire(&lockdep_map);
1109
1110 trace_timer_expire_entry(timer);
1111 fn(data);
1112 trace_timer_expire_exit(timer);
1113
1114 lock_map_release(&lockdep_map);
1115
1116 if (preempt_count != preempt_count()) {
802702e0
TG
1117 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1118 fn, preempt_count, preempt_count());
1119 /*
1120 * Restore the preempt count. That gives us a decent
1121 * chance to survive and extract information. If the
1122 * callback kept a lock held, bad luck, but not worse
1123 * than the BUG() we had.
1124 */
1125 preempt_count() = preempt_count;
576da126
TG
1126 }
1127}
1128
2aae4a10
REB
1129#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1130
1131/**
1da177e4
LT
1132 * __run_timers - run all expired timers (if any) on this CPU.
1133 * @base: the timer vector to be processed.
1134 *
1135 * This function cascades all vectors and executes all expired timer
1136 * vectors.
1137 */
a6fa8e5a 1138static inline void __run_timers(struct tvec_base *base)
1da177e4
LT
1139{
1140 struct timer_list *timer;
1141
3691c519 1142 spin_lock_irq(&base->lock);
1da177e4 1143 while (time_after_eq(jiffies, base->timer_jiffies)) {
626ab0e6 1144 struct list_head work_list;
1da177e4 1145 struct list_head *head = &work_list;
6819457d 1146 int index = base->timer_jiffies & TVR_MASK;
626ab0e6 1147
1da177e4
LT
1148 /*
1149 * Cascade timers:
1150 */
1151 if (!index &&
1152 (!cascade(base, &base->tv2, INDEX(0))) &&
1153 (!cascade(base, &base->tv3, INDEX(1))) &&
1154 !cascade(base, &base->tv4, INDEX(2)))
1155 cascade(base, &base->tv5, INDEX(3));
626ab0e6
ON
1156 ++base->timer_jiffies;
1157 list_replace_init(base->tv1.vec + index, &work_list);
55c888d6 1158 while (!list_empty(head)) {
1da177e4
LT
1159 void (*fn)(unsigned long);
1160 unsigned long data;
c5f66e99 1161 bool irqsafe;
1da177e4 1162
b5e61818 1163 timer = list_first_entry(head, struct timer_list,entry);
6819457d
TG
1164 fn = timer->function;
1165 data = timer->data;
c5f66e99 1166 irqsafe = tbase_get_irqsafe(timer->base);
1da177e4 1167
82f67cd9
IM
1168 timer_stats_account_timer(timer);
1169
6f1bc451 1170 base->running_timer = timer;
99d5f3aa 1171 detach_expired_timer(timer, base);
6f2b9b9a 1172
c5f66e99
TH
1173 if (irqsafe) {
1174 spin_unlock(&base->lock);
1175 call_timer_fn(timer, fn, data);
1176 spin_lock(&base->lock);
1177 } else {
1178 spin_unlock_irq(&base->lock);
1179 call_timer_fn(timer, fn, data);
1180 spin_lock_irq(&base->lock);
1181 }
1da177e4
LT
1182 }
1183 }
6f1bc451 1184 base->running_timer = NULL;
3691c519 1185 spin_unlock_irq(&base->lock);
1da177e4
LT
1186}
1187
ee9c5785 1188#ifdef CONFIG_NO_HZ
1da177e4
LT
1189/*
1190 * Find out when the next timer event is due to happen. This
90cba64a
RD
1191 * is used on S/390 to stop all activity when a CPU is idle.
1192 * This function needs to be called with interrupts disabled.
1da177e4 1193 */
a6fa8e5a 1194static unsigned long __next_timer_interrupt(struct tvec_base *base)
1da177e4 1195{
1cfd6849 1196 unsigned long timer_jiffies = base->timer_jiffies;
eaad084b 1197 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1cfd6849 1198 int index, slot, array, found = 0;
1da177e4 1199 struct timer_list *nte;
a6fa8e5a 1200 struct tvec *varray[4];
1da177e4
LT
1201
1202 /* Look for timer events in tv1. */
1cfd6849 1203 index = slot = timer_jiffies & TVR_MASK;
1da177e4 1204 do {
1cfd6849 1205 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
6819457d
TG
1206 if (tbase_get_deferrable(nte->base))
1207 continue;
6e453a67 1208
1cfd6849 1209 found = 1;
1da177e4 1210 expires = nte->expires;
1cfd6849
TG
1211 /* Look at the cascade bucket(s)? */
1212 if (!index || slot < index)
1213 goto cascade;
1214 return expires;
1da177e4 1215 }
1cfd6849
TG
1216 slot = (slot + 1) & TVR_MASK;
1217 } while (slot != index);
1218
1219cascade:
1220 /* Calculate the next cascade event */
1221 if (index)
1222 timer_jiffies += TVR_SIZE - index;
1223 timer_jiffies >>= TVR_BITS;
1da177e4
LT
1224
1225 /* Check tv2-tv5. */
1226 varray[0] = &base->tv2;
1227 varray[1] = &base->tv3;
1228 varray[2] = &base->tv4;
1229 varray[3] = &base->tv5;
1cfd6849
TG
1230
1231 for (array = 0; array < 4; array++) {
a6fa8e5a 1232 struct tvec *varp = varray[array];
1cfd6849
TG
1233
1234 index = slot = timer_jiffies & TVN_MASK;
1da177e4 1235 do {
1cfd6849 1236 list_for_each_entry(nte, varp->vec + slot, entry) {
a0419888
JH
1237 if (tbase_get_deferrable(nte->base))
1238 continue;
1239
1cfd6849 1240 found = 1;
1da177e4
LT
1241 if (time_before(nte->expires, expires))
1242 expires = nte->expires;
1cfd6849
TG
1243 }
1244 /*
1245 * Do we still search for the first timer or are
1246 * we looking up the cascade buckets ?
1247 */
1248 if (found) {
1249 /* Look at the cascade bucket(s)? */
1250 if (!index || slot < index)
1251 break;
1252 return expires;
1253 }
1254 slot = (slot + 1) & TVN_MASK;
1255 } while (slot != index);
1256
1257 if (index)
1258 timer_jiffies += TVN_SIZE - index;
1259 timer_jiffies >>= TVN_BITS;
1da177e4 1260 }
1cfd6849
TG
1261 return expires;
1262}
69239749 1263
1cfd6849
TG
1264/*
1265 * Check, if the next hrtimer event is before the next timer wheel
1266 * event:
1267 */
1268static unsigned long cmp_next_hrtimer_event(unsigned long now,
1269 unsigned long expires)
1270{
1271 ktime_t hr_delta = hrtimer_get_next_event();
1272 struct timespec tsdelta;
9501b6cf 1273 unsigned long delta;
1cfd6849
TG
1274
1275 if (hr_delta.tv64 == KTIME_MAX)
1276 return expires;
0662b713 1277
9501b6cf
TG
1278 /*
1279 * Expired timer available, let it expire in the next tick
1280 */
1281 if (hr_delta.tv64 <= 0)
1282 return now + 1;
69239749 1283
1cfd6849 1284 tsdelta = ktime_to_timespec(hr_delta);
9501b6cf 1285 delta = timespec_to_jiffies(&tsdelta);
eaad084b
TG
1286
1287 /*
1288 * Limit the delta to the max value, which is checked in
1289 * tick_nohz_stop_sched_tick():
1290 */
1291 if (delta > NEXT_TIMER_MAX_DELTA)
1292 delta = NEXT_TIMER_MAX_DELTA;
1293
9501b6cf
TG
1294 /*
1295 * Take rounding errors in to account and make sure, that it
1296 * expires in the next tick. Otherwise we go into an endless
1297 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1298 * the timer softirq
1299 */
1300 if (delta < 1)
1301 delta = 1;
1302 now += delta;
1cfd6849
TG
1303 if (time_before(now, expires))
1304 return now;
1da177e4
LT
1305 return expires;
1306}
1cfd6849
TG
1307
1308/**
8dce39c2 1309 * get_next_timer_interrupt - return the jiffy of the next pending timer
05fb6bf0 1310 * @now: current time (in jiffies)
1cfd6849 1311 */
fd064b9b 1312unsigned long get_next_timer_interrupt(unsigned long now)
1cfd6849 1313{
7496351a 1314 struct tvec_base *base = __this_cpu_read(tvec_bases);
e40468a5 1315 unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
1cfd6849 1316
dbd87b5a
HC
1317 /*
1318 * Pretend that there is no timer pending if the cpu is offline.
1319 * Possible pending timers will be migrated later to an active cpu.
1320 */
1321 if (cpu_is_offline(smp_processor_id()))
e40468a5
TG
1322 return expires;
1323
1cfd6849 1324 spin_lock(&base->lock);
e40468a5
TG
1325 if (base->active_timers) {
1326 if (time_before_eq(base->next_timer, base->timer_jiffies))
1327 base->next_timer = __next_timer_interrupt(base);
1328 expires = base->next_timer;
1329 }
1cfd6849
TG
1330 spin_unlock(&base->lock);
1331
1332 if (time_before_eq(expires, now))
1333 return now;
1334
1335 return cmp_next_hrtimer_event(now, expires);
1336}
1da177e4
LT
1337#endif
1338
1da177e4 1339/*
5b4db0c2 1340 * Called from the timer interrupt handler to charge one tick to the current
1da177e4
LT
1341 * process. user_tick is 1 if the tick is user time, 0 for system.
1342 */
1343void update_process_times(int user_tick)
1344{
1345 struct task_struct *p = current;
1346 int cpu = smp_processor_id();
1347
1348 /* Note: this timer irq context must be accounted for as well. */
fa13a5a1 1349 account_process_tick(p, user_tick);
1da177e4 1350 run_local_timers();
a157229c 1351 rcu_check_callbacks(cpu, user_tick);
b845b517 1352 printk_tick();
e360adbe
PZ
1353#ifdef CONFIG_IRQ_WORK
1354 if (in_irq())
1355 irq_work_run();
1356#endif
1da177e4 1357 scheduler_tick();
6819457d 1358 run_posix_cpu_timers(p);
1da177e4
LT
1359}
1360
1da177e4
LT
1361/*
1362 * This function runs timers and the timer-tq in bottom half context.
1363 */
1364static void run_timer_softirq(struct softirq_action *h)
1365{
7496351a 1366 struct tvec_base *base = __this_cpu_read(tvec_bases);
1da177e4 1367
d3d74453 1368 hrtimer_run_pending();
82f67cd9 1369
1da177e4
LT
1370 if (time_after_eq(jiffies, base->timer_jiffies))
1371 __run_timers(base);
1372}
1373
1374/*
1375 * Called by the local, per-CPU timer interrupt on SMP.
1376 */
1377void run_local_timers(void)
1378{
d3d74453 1379 hrtimer_run_queues();
1da177e4
LT
1380 raise_softirq(TIMER_SOFTIRQ);
1381}
1382
1da177e4
LT
1383#ifdef __ARCH_WANT_SYS_ALARM
1384
1385/*
1386 * For backwards compatibility? This can be done in libc so Alpha
1387 * and all newer ports shouldn't need it.
1388 */
58fd3aa2 1389SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1da177e4 1390{
c08b8a49 1391 return alarm_setitimer(seconds);
1da177e4
LT
1392}
1393
1394#endif
1395
1da177e4
LT
1396/**
1397 * sys_getpid - return the thread group id of the current process
1398 *
1399 * Note, despite the name, this returns the tgid not the pid. The tgid and
1400 * the pid are identical unless CLONE_THREAD was specified on clone() in
1401 * which case the tgid is the same in all threads of the same group.
1402 *
1403 * This is SMP safe as current->tgid does not change.
1404 */
58fd3aa2 1405SYSCALL_DEFINE0(getpid)
1da177e4 1406{
b488893a 1407 return task_tgid_vnr(current);
1da177e4
LT
1408}
1409
1410/*
6997a6fa
KK
1411 * Accessing ->real_parent is not SMP-safe, it could
1412 * change from under us. However, we can use a stale
1413 * value of ->real_parent under rcu_read_lock(), see
1414 * release_task()->call_rcu(delayed_put_task_struct).
1da177e4 1415 */
dbf040d9 1416SYSCALL_DEFINE0(getppid)
1da177e4
LT
1417{
1418 int pid;
1da177e4 1419
6997a6fa 1420 rcu_read_lock();
031af165 1421 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
6997a6fa 1422 rcu_read_unlock();
1da177e4 1423
1da177e4
LT
1424 return pid;
1425}
1426
dbf040d9 1427SYSCALL_DEFINE0(getuid)
1da177e4
LT
1428{
1429 /* Only we change this so SMP safe */
a29c33f4 1430 return from_kuid_munged(current_user_ns(), current_uid());
1da177e4
LT
1431}
1432
dbf040d9 1433SYSCALL_DEFINE0(geteuid)
1da177e4
LT
1434{
1435 /* Only we change this so SMP safe */
a29c33f4 1436 return from_kuid_munged(current_user_ns(), current_euid());
1da177e4
LT
1437}
1438
dbf040d9 1439SYSCALL_DEFINE0(getgid)
1da177e4
LT
1440{
1441 /* Only we change this so SMP safe */
a29c33f4 1442 return from_kgid_munged(current_user_ns(), current_gid());
1da177e4
LT
1443}
1444
dbf040d9 1445SYSCALL_DEFINE0(getegid)
1da177e4
LT
1446{
1447 /* Only we change this so SMP safe */
a29c33f4 1448 return from_kgid_munged(current_user_ns(), current_egid());
1da177e4
LT
1449}
1450
1da177e4
LT
1451static void process_timeout(unsigned long __data)
1452{
36c8b586 1453 wake_up_process((struct task_struct *)__data);
1da177e4
LT
1454}
1455
1456/**
1457 * schedule_timeout - sleep until timeout
1458 * @timeout: timeout value in jiffies
1459 *
1460 * Make the current task sleep until @timeout jiffies have
1461 * elapsed. The routine will return immediately unless
1462 * the current task state has been set (see set_current_state()).
1463 *
1464 * You can set the task state as follows -
1465 *
1466 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1467 * pass before the routine returns. The routine will return 0
1468 *
1469 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1470 * delivered to the current task. In this case the remaining time
1471 * in jiffies will be returned, or 0 if the timer expired in time
1472 *
1473 * The current task state is guaranteed to be TASK_RUNNING when this
1474 * routine returns.
1475 *
1476 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1477 * the CPU away without a bound on the timeout. In this case the return
1478 * value will be %MAX_SCHEDULE_TIMEOUT.
1479 *
1480 * In all cases the return value is guaranteed to be non-negative.
1481 */
7ad5b3a5 1482signed long __sched schedule_timeout(signed long timeout)
1da177e4
LT
1483{
1484 struct timer_list timer;
1485 unsigned long expire;
1486
1487 switch (timeout)
1488 {
1489 case MAX_SCHEDULE_TIMEOUT:
1490 /*
1491 * These two special cases are useful to be comfortable
1492 * in the caller. Nothing more. We could take
1493 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1494 * but I' d like to return a valid offset (>=0) to allow
1495 * the caller to do everything it want with the retval.
1496 */
1497 schedule();
1498 goto out;
1499 default:
1500 /*
1501 * Another bit of PARANOID. Note that the retval will be
1502 * 0 since no piece of kernel is supposed to do a check
1503 * for a negative retval of schedule_timeout() (since it
1504 * should never happens anyway). You just have the printk()
1505 * that will tell you if something is gone wrong and where.
1506 */
5b149bcc 1507 if (timeout < 0) {
1da177e4 1508 printk(KERN_ERR "schedule_timeout: wrong timeout "
5b149bcc
AM
1509 "value %lx\n", timeout);
1510 dump_stack();
1da177e4
LT
1511 current->state = TASK_RUNNING;
1512 goto out;
1513 }
1514 }
1515
1516 expire = timeout + jiffies;
1517
c6f3a97f 1518 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
597d0275 1519 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1da177e4
LT
1520 schedule();
1521 del_singleshot_timer_sync(&timer);
1522
c6f3a97f
TG
1523 /* Remove the timer from the object tracker */
1524 destroy_timer_on_stack(&timer);
1525
1da177e4
LT
1526 timeout = expire - jiffies;
1527
1528 out:
1529 return timeout < 0 ? 0 : timeout;
1530}
1da177e4
LT
1531EXPORT_SYMBOL(schedule_timeout);
1532
8a1c1757
AM
1533/*
1534 * We can use __set_current_state() here because schedule_timeout() calls
1535 * schedule() unconditionally.
1536 */
64ed93a2
NA
1537signed long __sched schedule_timeout_interruptible(signed long timeout)
1538{
a5a0d52c
AM
1539 __set_current_state(TASK_INTERRUPTIBLE);
1540 return schedule_timeout(timeout);
64ed93a2
NA
1541}
1542EXPORT_SYMBOL(schedule_timeout_interruptible);
1543
294d5cc2
MW
1544signed long __sched schedule_timeout_killable(signed long timeout)
1545{
1546 __set_current_state(TASK_KILLABLE);
1547 return schedule_timeout(timeout);
1548}
1549EXPORT_SYMBOL(schedule_timeout_killable);
1550
64ed93a2
NA
1551signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1552{
a5a0d52c
AM
1553 __set_current_state(TASK_UNINTERRUPTIBLE);
1554 return schedule_timeout(timeout);
64ed93a2
NA
1555}
1556EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1557
1da177e4 1558/* Thread ID - the internal kernel "pid" */
58fd3aa2 1559SYSCALL_DEFINE0(gettid)
1da177e4 1560{
b488893a 1561 return task_pid_vnr(current);
1da177e4
LT
1562}
1563
2aae4a10 1564/**
d4d23add 1565 * do_sysinfo - fill in sysinfo struct
2aae4a10 1566 * @info: pointer to buffer to fill
6819457d 1567 */
d4d23add 1568int do_sysinfo(struct sysinfo *info)
1da177e4 1569{
1da177e4
LT
1570 unsigned long mem_total, sav_total;
1571 unsigned int mem_unit, bitcount;
2d02494f 1572 struct timespec tp;
1da177e4 1573
d4d23add 1574 memset(info, 0, sizeof(struct sysinfo));
1da177e4 1575
2d02494f
TG
1576 ktime_get_ts(&tp);
1577 monotonic_to_bootbased(&tp);
1578 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1da177e4 1579
2d02494f 1580 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
1da177e4 1581
2d02494f 1582 info->procs = nr_threads;
1da177e4 1583
d4d23add
KM
1584 si_meminfo(info);
1585 si_swapinfo(info);
1da177e4
LT
1586
1587 /*
1588 * If the sum of all the available memory (i.e. ram + swap)
1589 * is less than can be stored in a 32 bit unsigned long then
1590 * we can be binary compatible with 2.2.x kernels. If not,
1591 * well, in that case 2.2.x was broken anyways...
1592 *
1593 * -Erik Andersen <andersee@debian.org>
1594 */
1595
d4d23add
KM
1596 mem_total = info->totalram + info->totalswap;
1597 if (mem_total < info->totalram || mem_total < info->totalswap)
1da177e4
LT
1598 goto out;
1599 bitcount = 0;
d4d23add 1600 mem_unit = info->mem_unit;
1da177e4
LT
1601 while (mem_unit > 1) {
1602 bitcount++;
1603 mem_unit >>= 1;
1604 sav_total = mem_total;
1605 mem_total <<= 1;
1606 if (mem_total < sav_total)
1607 goto out;
1608 }
1609
1610 /*
1611 * If mem_total did not overflow, multiply all memory values by
d4d23add 1612 * info->mem_unit and set it to 1. This leaves things compatible
1da177e4
LT
1613 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1614 * kernels...
1615 */
1616
d4d23add
KM
1617 info->mem_unit = 1;
1618 info->totalram <<= bitcount;
1619 info->freeram <<= bitcount;
1620 info->sharedram <<= bitcount;
1621 info->bufferram <<= bitcount;
1622 info->totalswap <<= bitcount;
1623 info->freeswap <<= bitcount;
1624 info->totalhigh <<= bitcount;
1625 info->freehigh <<= bitcount;
1626
1627out:
1628 return 0;
1629}
1630
1e7bfb21 1631SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
d4d23add
KM
1632{
1633 struct sysinfo val;
1634
1635 do_sysinfo(&val);
1da177e4 1636
1da177e4
LT
1637 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1638 return -EFAULT;
1639
1640 return 0;
1641}
1642
b4be6258 1643static int __cpuinit init_timers_cpu(int cpu)
1da177e4
LT
1644{
1645 int j;
a6fa8e5a 1646 struct tvec_base *base;
b4be6258 1647 static char __cpuinitdata tvec_base_done[NR_CPUS];
55c888d6 1648
ba6edfcd 1649 if (!tvec_base_done[cpu]) {
a4a6198b
JB
1650 static char boot_done;
1651
a4a6198b 1652 if (boot_done) {
ba6edfcd
AM
1653 /*
1654 * The APs use this path later in boot
1655 */
94f6030c
CL
1656 base = kmalloc_node(sizeof(*base),
1657 GFP_KERNEL | __GFP_ZERO,
a4a6198b
JB
1658 cpu_to_node(cpu));
1659 if (!base)
1660 return -ENOMEM;
6e453a67
VP
1661
1662 /* Make sure that tvec_base is 2 byte aligned */
1663 if (tbase_get_deferrable(base)) {
1664 WARN_ON(1);
1665 kfree(base);
1666 return -ENOMEM;
1667 }
ba6edfcd 1668 per_cpu(tvec_bases, cpu) = base;
a4a6198b 1669 } else {
ba6edfcd
AM
1670 /*
1671 * This is for the boot CPU - we use compile-time
1672 * static initialisation because per-cpu memory isn't
1673 * ready yet and because the memory allocators are not
1674 * initialised either.
1675 */
a4a6198b 1676 boot_done = 1;
ba6edfcd 1677 base = &boot_tvec_bases;
a4a6198b 1678 }
ba6edfcd
AM
1679 tvec_base_done[cpu] = 1;
1680 } else {
1681 base = per_cpu(tvec_bases, cpu);
a4a6198b 1682 }
ba6edfcd 1683
3691c519 1684 spin_lock_init(&base->lock);
d730e882 1685
1da177e4
LT
1686 for (j = 0; j < TVN_SIZE; j++) {
1687 INIT_LIST_HEAD(base->tv5.vec + j);
1688 INIT_LIST_HEAD(base->tv4.vec + j);
1689 INIT_LIST_HEAD(base->tv3.vec + j);
1690 INIT_LIST_HEAD(base->tv2.vec + j);
1691 }
1692 for (j = 0; j < TVR_SIZE; j++)
1693 INIT_LIST_HEAD(base->tv1.vec + j);
1694
1695 base->timer_jiffies = jiffies;
97fd9ed4 1696 base->next_timer = base->timer_jiffies;
99d5f3aa 1697 base->active_timers = 0;
a4a6198b 1698 return 0;
1da177e4
LT
1699}
1700
1701#ifdef CONFIG_HOTPLUG_CPU
a6fa8e5a 1702static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1da177e4
LT
1703{
1704 struct timer_list *timer;
1705
1706 while (!list_empty(head)) {
b5e61818 1707 timer = list_first_entry(head, struct timer_list, entry);
99d5f3aa 1708 /* We ignore the accounting on the dying cpu */
ec44bc7a 1709 detach_timer(timer, false);
6e453a67 1710 timer_set_base(timer, new_base);
1da177e4 1711 internal_add_timer(new_base, timer);
1da177e4 1712 }
1da177e4
LT
1713}
1714
48ccf3da 1715static void __cpuinit migrate_timers(int cpu)
1da177e4 1716{
a6fa8e5a
PM
1717 struct tvec_base *old_base;
1718 struct tvec_base *new_base;
1da177e4
LT
1719 int i;
1720
1721 BUG_ON(cpu_online(cpu));
a4a6198b
JB
1722 old_base = per_cpu(tvec_bases, cpu);
1723 new_base = get_cpu_var(tvec_bases);
d82f0b0f
ON
1724 /*
1725 * The caller is globally serialized and nobody else
1726 * takes two locks at once, deadlock is not possible.
1727 */
1728 spin_lock_irq(&new_base->lock);
0d180406 1729 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
3691c519
ON
1730
1731 BUG_ON(old_base->running_timer);
1da177e4 1732
1da177e4 1733 for (i = 0; i < TVR_SIZE; i++)
55c888d6
ON
1734 migrate_timer_list(new_base, old_base->tv1.vec + i);
1735 for (i = 0; i < TVN_SIZE; i++) {
1736 migrate_timer_list(new_base, old_base->tv2.vec + i);
1737 migrate_timer_list(new_base, old_base->tv3.vec + i);
1738 migrate_timer_list(new_base, old_base->tv4.vec + i);
1739 migrate_timer_list(new_base, old_base->tv5.vec + i);
1740 }
1741
0d180406 1742 spin_unlock(&old_base->lock);
d82f0b0f 1743 spin_unlock_irq(&new_base->lock);
1da177e4 1744 put_cpu_var(tvec_bases);
1da177e4
LT
1745}
1746#endif /* CONFIG_HOTPLUG_CPU */
1747
8c78f307 1748static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1da177e4
LT
1749 unsigned long action, void *hcpu)
1750{
1751 long cpu = (long)hcpu;
80b5184c
AM
1752 int err;
1753
1da177e4
LT
1754 switch(action) {
1755 case CPU_UP_PREPARE:
8bb78442 1756 case CPU_UP_PREPARE_FROZEN:
80b5184c
AM
1757 err = init_timers_cpu(cpu);
1758 if (err < 0)
1759 return notifier_from_errno(err);
1da177e4
LT
1760 break;
1761#ifdef CONFIG_HOTPLUG_CPU
1762 case CPU_DEAD:
8bb78442 1763 case CPU_DEAD_FROZEN:
1da177e4
LT
1764 migrate_timers(cpu);
1765 break;
1766#endif
1767 default:
1768 break;
1769 }
1770 return NOTIFY_OK;
1771}
1772
8c78f307 1773static struct notifier_block __cpuinitdata timers_nb = {
1da177e4
LT
1774 .notifier_call = timer_cpu_notify,
1775};
1776
1777
1778void __init init_timers(void)
1779{
e52b1db3
TH
1780 int err;
1781
1782 /* ensure there are enough low bits for flags in timer->base pointer */
1783 BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
07dccf33 1784
e52b1db3
TH
1785 err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1786 (void *)(long)smp_processor_id());
82f67cd9
IM
1787 init_timer_stats();
1788
9e506f7a 1789 BUG_ON(err != NOTIFY_OK);
1da177e4 1790 register_cpu_notifier(&timers_nb);
962cf36c 1791 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1da177e4
LT
1792}
1793
1da177e4
LT
1794/**
1795 * msleep - sleep safely even with waitqueue interruptions
1796 * @msecs: Time in milliseconds to sleep for
1797 */
1798void msleep(unsigned int msecs)
1799{
1800 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1801
75bcc8c5
NA
1802 while (timeout)
1803 timeout = schedule_timeout_uninterruptible(timeout);
1da177e4
LT
1804}
1805
1806EXPORT_SYMBOL(msleep);
1807
1808/**
96ec3efd 1809 * msleep_interruptible - sleep waiting for signals
1da177e4
LT
1810 * @msecs: Time in milliseconds to sleep for
1811 */
1812unsigned long msleep_interruptible(unsigned int msecs)
1813{
1814 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1815
75bcc8c5
NA
1816 while (timeout && !signal_pending(current))
1817 timeout = schedule_timeout_interruptible(timeout);
1da177e4
LT
1818 return jiffies_to_msecs(timeout);
1819}
1820
1821EXPORT_SYMBOL(msleep_interruptible);
5e7f5a17
PP
1822
1823static int __sched do_usleep_range(unsigned long min, unsigned long max)
1824{
1825 ktime_t kmin;
1826 unsigned long delta;
1827
1828 kmin = ktime_set(0, min * NSEC_PER_USEC);
1829 delta = (max - min) * NSEC_PER_USEC;
1830 return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1831}
1832
1833/**
1834 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1835 * @min: Minimum time in usecs to sleep
1836 * @max: Maximum time in usecs to sleep
1837 */
1838void usleep_range(unsigned long min, unsigned long max)
1839{
1840 __set_current_state(TASK_UNINTERRUPTIBLE);
1841 do_usleep_range(min, max);
1842}
1843EXPORT_SYMBOL(usleep_range);
This page took 0.785076 seconds and 5 git commands to generate.