ARM: 5625/1: fix hard coded 4K resource size in amba bus detection
[deliverable/linux.git] / kernel / timer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/timer.c
3 *
8524070b 4 * Kernel internal timers, basic process system calls
1da177e4
LT
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22#include <linux/kernel_stat.h>
23#include <linux/module.h>
24#include <linux/interrupt.h>
25#include <linux/percpu.h>
26#include <linux/init.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
b488893a 29#include <linux/pid_namespace.h>
1da177e4
LT
30#include <linux/notifier.h>
31#include <linux/thread_info.h>
32#include <linux/time.h>
33#include <linux/jiffies.h>
34#include <linux/posix-timers.h>
35#include <linux/cpu.h>
36#include <linux/syscalls.h>
97a41e26 37#include <linux/delay.h>
79bf2bb3 38#include <linux/tick.h>
82f67cd9 39#include <linux/kallsyms.h>
925d519a 40#include <linux/perf_counter.h>
eea08f32 41#include <linux/sched.h>
1da177e4
LT
42
43#include <asm/uaccess.h>
44#include <asm/unistd.h>
45#include <asm/div64.h>
46#include <asm/timex.h>
47#include <asm/io.h>
48
ecea8d19
TG
49u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
50
51EXPORT_SYMBOL(jiffies_64);
52
1da177e4
LT
53/*
54 * per-CPU timer vector definitions:
55 */
1da177e4
LT
56#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
57#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
58#define TVN_SIZE (1 << TVN_BITS)
59#define TVR_SIZE (1 << TVR_BITS)
60#define TVN_MASK (TVN_SIZE - 1)
61#define TVR_MASK (TVR_SIZE - 1)
62
a6fa8e5a 63struct tvec {
1da177e4 64 struct list_head vec[TVN_SIZE];
a6fa8e5a 65};
1da177e4 66
a6fa8e5a 67struct tvec_root {
1da177e4 68 struct list_head vec[TVR_SIZE];
a6fa8e5a 69};
1da177e4 70
a6fa8e5a 71struct tvec_base {
3691c519
ON
72 spinlock_t lock;
73 struct timer_list *running_timer;
1da177e4 74 unsigned long timer_jiffies;
a6fa8e5a
PM
75 struct tvec_root tv1;
76 struct tvec tv2;
77 struct tvec tv3;
78 struct tvec tv4;
79 struct tvec tv5;
6e453a67 80} ____cacheline_aligned;
1da177e4 81
a6fa8e5a 82struct tvec_base boot_tvec_bases;
3691c519 83EXPORT_SYMBOL(boot_tvec_bases);
a6fa8e5a 84static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
1da177e4 85
6e453a67 86/*
a6fa8e5a 87 * Note that all tvec_bases are 2 byte aligned and lower bit of
6e453a67
VP
88 * base in timer_list is guaranteed to be zero. Use the LSB for
89 * the new flag to indicate whether the timer is deferrable
90 */
91#define TBASE_DEFERRABLE_FLAG (0x1)
92
93/* Functions below help us manage 'deferrable' flag */
a6fa8e5a 94static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
6e453a67 95{
e9910846 96 return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
6e453a67
VP
97}
98
a6fa8e5a 99static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
6e453a67 100{
a6fa8e5a 101 return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
6e453a67
VP
102}
103
104static inline void timer_set_deferrable(struct timer_list *timer)
105{
a6fa8e5a 106 timer->base = ((struct tvec_base *)((unsigned long)(timer->base) |
6819457d 107 TBASE_DEFERRABLE_FLAG));
6e453a67
VP
108}
109
110static inline void
a6fa8e5a 111timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
6e453a67 112{
a6fa8e5a 113 timer->base = (struct tvec_base *)((unsigned long)(new_base) |
6819457d 114 tbase_get_deferrable(timer->base));
6e453a67
VP
115}
116
9c133c46
AS
117static unsigned long round_jiffies_common(unsigned long j, int cpu,
118 bool force_up)
4c36a5de
AV
119{
120 int rem;
121 unsigned long original = j;
122
123 /*
124 * We don't want all cpus firing their timers at once hitting the
125 * same lock or cachelines, so we skew each extra cpu with an extra
126 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
127 * already did this.
128 * The skew is done by adding 3*cpunr, then round, then subtract this
129 * extra offset again.
130 */
131 j += cpu * 3;
132
133 rem = j % HZ;
134
135 /*
136 * If the target jiffie is just after a whole second (which can happen
137 * due to delays of the timer irq, long irq off times etc etc) then
138 * we should round down to the whole second, not up. Use 1/4th second
139 * as cutoff for this rounding as an extreme upper bound for this.
9c133c46 140 * But never round down if @force_up is set.
4c36a5de 141 */
9c133c46 142 if (rem < HZ/4 && !force_up) /* round down */
4c36a5de
AV
143 j = j - rem;
144 else /* round up */
145 j = j - rem + HZ;
146
147 /* now that we have rounded, subtract the extra skew again */
148 j -= cpu * 3;
149
150 if (j <= jiffies) /* rounding ate our timeout entirely; */
151 return original;
152 return j;
153}
9c133c46
AS
154
155/**
156 * __round_jiffies - function to round jiffies to a full second
157 * @j: the time in (absolute) jiffies that should be rounded
158 * @cpu: the processor number on which the timeout will happen
159 *
160 * __round_jiffies() rounds an absolute time in the future (in jiffies)
161 * up or down to (approximately) full seconds. This is useful for timers
162 * for which the exact time they fire does not matter too much, as long as
163 * they fire approximately every X seconds.
164 *
165 * By rounding these timers to whole seconds, all such timers will fire
166 * at the same time, rather than at various times spread out. The goal
167 * of this is to have the CPU wake up less, which saves power.
168 *
169 * The exact rounding is skewed for each processor to avoid all
170 * processors firing at the exact same time, which could lead
171 * to lock contention or spurious cache line bouncing.
172 *
173 * The return value is the rounded version of the @j parameter.
174 */
175unsigned long __round_jiffies(unsigned long j, int cpu)
176{
177 return round_jiffies_common(j, cpu, false);
178}
4c36a5de
AV
179EXPORT_SYMBOL_GPL(__round_jiffies);
180
181/**
182 * __round_jiffies_relative - function to round jiffies to a full second
183 * @j: the time in (relative) jiffies that should be rounded
184 * @cpu: the processor number on which the timeout will happen
185 *
72fd4a35 186 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
4c36a5de
AV
187 * up or down to (approximately) full seconds. This is useful for timers
188 * for which the exact time they fire does not matter too much, as long as
189 * they fire approximately every X seconds.
190 *
191 * By rounding these timers to whole seconds, all such timers will fire
192 * at the same time, rather than at various times spread out. The goal
193 * of this is to have the CPU wake up less, which saves power.
194 *
195 * The exact rounding is skewed for each processor to avoid all
196 * processors firing at the exact same time, which could lead
197 * to lock contention or spurious cache line bouncing.
198 *
72fd4a35 199 * The return value is the rounded version of the @j parameter.
4c36a5de
AV
200 */
201unsigned long __round_jiffies_relative(unsigned long j, int cpu)
202{
9c133c46
AS
203 unsigned long j0 = jiffies;
204
205 /* Use j0 because jiffies might change while we run */
206 return round_jiffies_common(j + j0, cpu, false) - j0;
4c36a5de
AV
207}
208EXPORT_SYMBOL_GPL(__round_jiffies_relative);
209
210/**
211 * round_jiffies - function to round jiffies to a full second
212 * @j: the time in (absolute) jiffies that should be rounded
213 *
72fd4a35 214 * round_jiffies() rounds an absolute time in the future (in jiffies)
4c36a5de
AV
215 * up or down to (approximately) full seconds. This is useful for timers
216 * for which the exact time they fire does not matter too much, as long as
217 * they fire approximately every X seconds.
218 *
219 * By rounding these timers to whole seconds, all such timers will fire
220 * at the same time, rather than at various times spread out. The goal
221 * of this is to have the CPU wake up less, which saves power.
222 *
72fd4a35 223 * The return value is the rounded version of the @j parameter.
4c36a5de
AV
224 */
225unsigned long round_jiffies(unsigned long j)
226{
9c133c46 227 return round_jiffies_common(j, raw_smp_processor_id(), false);
4c36a5de
AV
228}
229EXPORT_SYMBOL_GPL(round_jiffies);
230
231/**
232 * round_jiffies_relative - function to round jiffies to a full second
233 * @j: the time in (relative) jiffies that should be rounded
234 *
72fd4a35 235 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
4c36a5de
AV
236 * up or down to (approximately) full seconds. This is useful for timers
237 * for which the exact time they fire does not matter too much, as long as
238 * they fire approximately every X seconds.
239 *
240 * By rounding these timers to whole seconds, all such timers will fire
241 * at the same time, rather than at various times spread out. The goal
242 * of this is to have the CPU wake up less, which saves power.
243 *
72fd4a35 244 * The return value is the rounded version of the @j parameter.
4c36a5de
AV
245 */
246unsigned long round_jiffies_relative(unsigned long j)
247{
248 return __round_jiffies_relative(j, raw_smp_processor_id());
249}
250EXPORT_SYMBOL_GPL(round_jiffies_relative);
251
9c133c46
AS
252/**
253 * __round_jiffies_up - function to round jiffies up to a full second
254 * @j: the time in (absolute) jiffies that should be rounded
255 * @cpu: the processor number on which the timeout will happen
256 *
257 * This is the same as __round_jiffies() except that it will never
258 * round down. This is useful for timeouts for which the exact time
259 * of firing does not matter too much, as long as they don't fire too
260 * early.
261 */
262unsigned long __round_jiffies_up(unsigned long j, int cpu)
263{
264 return round_jiffies_common(j, cpu, true);
265}
266EXPORT_SYMBOL_GPL(__round_jiffies_up);
267
268/**
269 * __round_jiffies_up_relative - function to round jiffies up to a full second
270 * @j: the time in (relative) jiffies that should be rounded
271 * @cpu: the processor number on which the timeout will happen
272 *
273 * This is the same as __round_jiffies_relative() except that it will never
274 * round down. This is useful for timeouts for which the exact time
275 * of firing does not matter too much, as long as they don't fire too
276 * early.
277 */
278unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
279{
280 unsigned long j0 = jiffies;
281
282 /* Use j0 because jiffies might change while we run */
283 return round_jiffies_common(j + j0, cpu, true) - j0;
284}
285EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
286
287/**
288 * round_jiffies_up - function to round jiffies up to a full second
289 * @j: the time in (absolute) jiffies that should be rounded
290 *
291 * This is the same as round_jiffies() except that it will never
292 * round down. This is useful for timeouts for which the exact time
293 * of firing does not matter too much, as long as they don't fire too
294 * early.
295 */
296unsigned long round_jiffies_up(unsigned long j)
297{
298 return round_jiffies_common(j, raw_smp_processor_id(), true);
299}
300EXPORT_SYMBOL_GPL(round_jiffies_up);
301
302/**
303 * round_jiffies_up_relative - function to round jiffies up to a full second
304 * @j: the time in (relative) jiffies that should be rounded
305 *
306 * This is the same as round_jiffies_relative() except that it will never
307 * round down. This is useful for timeouts for which the exact time
308 * of firing does not matter too much, as long as they don't fire too
309 * early.
310 */
311unsigned long round_jiffies_up_relative(unsigned long j)
312{
313 return __round_jiffies_up_relative(j, raw_smp_processor_id());
314}
315EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
316
4c36a5de 317
a6fa8e5a 318static inline void set_running_timer(struct tvec_base *base,
1da177e4
LT
319 struct timer_list *timer)
320{
321#ifdef CONFIG_SMP
3691c519 322 base->running_timer = timer;
1da177e4
LT
323#endif
324}
325
a6fa8e5a 326static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
1da177e4
LT
327{
328 unsigned long expires = timer->expires;
329 unsigned long idx = expires - base->timer_jiffies;
330 struct list_head *vec;
331
332 if (idx < TVR_SIZE) {
333 int i = expires & TVR_MASK;
334 vec = base->tv1.vec + i;
335 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
336 int i = (expires >> TVR_BITS) & TVN_MASK;
337 vec = base->tv2.vec + i;
338 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
339 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
340 vec = base->tv3.vec + i;
341 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
342 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
343 vec = base->tv4.vec + i;
344 } else if ((signed long) idx < 0) {
345 /*
346 * Can happen if you add a timer with expires == jiffies,
347 * or you set a timer to go off in the past
348 */
349 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
350 } else {
351 int i;
352 /* If the timeout is larger than 0xffffffff on 64-bit
353 * architectures then we use the maximum timeout:
354 */
355 if (idx > 0xffffffffUL) {
356 idx = 0xffffffffUL;
357 expires = idx + base->timer_jiffies;
358 }
359 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
360 vec = base->tv5.vec + i;
361 }
362 /*
363 * Timers are FIFO:
364 */
365 list_add_tail(&timer->entry, vec);
366}
367
82f67cd9
IM
368#ifdef CONFIG_TIMER_STATS
369void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
370{
371 if (timer->start_site)
372 return;
373
374 timer->start_site = addr;
375 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
376 timer->start_pid = current->pid;
377}
c5c061b8
VP
378
379static void timer_stats_account_timer(struct timer_list *timer)
380{
381 unsigned int flag = 0;
382
383 if (unlikely(tbase_get_deferrable(timer->base)))
384 flag |= TIMER_STATS_FLAG_DEFERRABLE;
385
386 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
387 timer->function, timer->start_comm, flag);
388}
389
390#else
391static void timer_stats_account_timer(struct timer_list *timer) {}
82f67cd9
IM
392#endif
393
c6f3a97f
TG
394#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
395
396static struct debug_obj_descr timer_debug_descr;
397
398/*
399 * fixup_init is called when:
400 * - an active object is initialized
55c888d6 401 */
c6f3a97f
TG
402static int timer_fixup_init(void *addr, enum debug_obj_state state)
403{
404 struct timer_list *timer = addr;
405
406 switch (state) {
407 case ODEBUG_STATE_ACTIVE:
408 del_timer_sync(timer);
409 debug_object_init(timer, &timer_debug_descr);
410 return 1;
411 default:
412 return 0;
413 }
414}
415
416/*
417 * fixup_activate is called when:
418 * - an active object is activated
419 * - an unknown object is activated (might be a statically initialized object)
420 */
421static int timer_fixup_activate(void *addr, enum debug_obj_state state)
422{
423 struct timer_list *timer = addr;
424
425 switch (state) {
426
427 case ODEBUG_STATE_NOTAVAILABLE:
428 /*
429 * This is not really a fixup. The timer was
430 * statically initialized. We just make sure that it
431 * is tracked in the object tracker.
432 */
433 if (timer->entry.next == NULL &&
434 timer->entry.prev == TIMER_ENTRY_STATIC) {
435 debug_object_init(timer, &timer_debug_descr);
436 debug_object_activate(timer, &timer_debug_descr);
437 return 0;
438 } else {
439 WARN_ON_ONCE(1);
440 }
441 return 0;
442
443 case ODEBUG_STATE_ACTIVE:
444 WARN_ON(1);
445
446 default:
447 return 0;
448 }
449}
450
451/*
452 * fixup_free is called when:
453 * - an active object is freed
454 */
455static int timer_fixup_free(void *addr, enum debug_obj_state state)
456{
457 struct timer_list *timer = addr;
458
459 switch (state) {
460 case ODEBUG_STATE_ACTIVE:
461 del_timer_sync(timer);
462 debug_object_free(timer, &timer_debug_descr);
463 return 1;
464 default:
465 return 0;
466 }
467}
468
469static struct debug_obj_descr timer_debug_descr = {
470 .name = "timer_list",
471 .fixup_init = timer_fixup_init,
472 .fixup_activate = timer_fixup_activate,
473 .fixup_free = timer_fixup_free,
474};
475
476static inline void debug_timer_init(struct timer_list *timer)
477{
478 debug_object_init(timer, &timer_debug_descr);
479}
480
481static inline void debug_timer_activate(struct timer_list *timer)
482{
483 debug_object_activate(timer, &timer_debug_descr);
484}
485
486static inline void debug_timer_deactivate(struct timer_list *timer)
487{
488 debug_object_deactivate(timer, &timer_debug_descr);
489}
490
491static inline void debug_timer_free(struct timer_list *timer)
492{
493 debug_object_free(timer, &timer_debug_descr);
494}
495
6f2b9b9a
JB
496static void __init_timer(struct timer_list *timer,
497 const char *name,
498 struct lock_class_key *key);
c6f3a97f 499
6f2b9b9a
JB
500void init_timer_on_stack_key(struct timer_list *timer,
501 const char *name,
502 struct lock_class_key *key)
c6f3a97f
TG
503{
504 debug_object_init_on_stack(timer, &timer_debug_descr);
6f2b9b9a 505 __init_timer(timer, name, key);
c6f3a97f 506}
6f2b9b9a 507EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
c6f3a97f
TG
508
509void destroy_timer_on_stack(struct timer_list *timer)
510{
511 debug_object_free(timer, &timer_debug_descr);
512}
513EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
514
515#else
516static inline void debug_timer_init(struct timer_list *timer) { }
517static inline void debug_timer_activate(struct timer_list *timer) { }
518static inline void debug_timer_deactivate(struct timer_list *timer) { }
519#endif
520
6f2b9b9a
JB
521static void __init_timer(struct timer_list *timer,
522 const char *name,
523 struct lock_class_key *key)
55c888d6
ON
524{
525 timer->entry.next = NULL;
bfe5d834 526 timer->base = __raw_get_cpu_var(tvec_bases);
82f67cd9
IM
527#ifdef CONFIG_TIMER_STATS
528 timer->start_site = NULL;
529 timer->start_pid = -1;
530 memset(timer->start_comm, 0, TASK_COMM_LEN);
531#endif
6f2b9b9a 532 lockdep_init_map(&timer->lockdep_map, name, key, 0);
55c888d6 533}
c6f3a97f
TG
534
535/**
633fe795 536 * init_timer_key - initialize a timer
c6f3a97f 537 * @timer: the timer to be initialized
633fe795
RD
538 * @name: name of the timer
539 * @key: lockdep class key of the fake lock used for tracking timer
540 * sync lock dependencies
c6f3a97f 541 *
633fe795 542 * init_timer_key() must be done to a timer prior calling *any* of the
c6f3a97f
TG
543 * other timer functions.
544 */
6f2b9b9a
JB
545void init_timer_key(struct timer_list *timer,
546 const char *name,
547 struct lock_class_key *key)
c6f3a97f
TG
548{
549 debug_timer_init(timer);
6f2b9b9a 550 __init_timer(timer, name, key);
c6f3a97f 551}
6f2b9b9a 552EXPORT_SYMBOL(init_timer_key);
55c888d6 553
6f2b9b9a
JB
554void init_timer_deferrable_key(struct timer_list *timer,
555 const char *name,
556 struct lock_class_key *key)
6e453a67 557{
6f2b9b9a 558 init_timer_key(timer, name, key);
6e453a67
VP
559 timer_set_deferrable(timer);
560}
6f2b9b9a 561EXPORT_SYMBOL(init_timer_deferrable_key);
6e453a67 562
55c888d6 563static inline void detach_timer(struct timer_list *timer,
82f67cd9 564 int clear_pending)
55c888d6
ON
565{
566 struct list_head *entry = &timer->entry;
567
c6f3a97f
TG
568 debug_timer_deactivate(timer);
569
55c888d6
ON
570 __list_del(entry->prev, entry->next);
571 if (clear_pending)
572 entry->next = NULL;
573 entry->prev = LIST_POISON2;
574}
575
576/*
3691c519 577 * We are using hashed locking: holding per_cpu(tvec_bases).lock
55c888d6
ON
578 * means that all timers which are tied to this base via timer->base are
579 * locked, and the base itself is locked too.
580 *
581 * So __run_timers/migrate_timers can safely modify all timers which could
582 * be found on ->tvX lists.
583 *
584 * When the timer's base is locked, and the timer removed from list, it is
585 * possible to set timer->base = NULL and drop the lock: the timer remains
586 * locked.
587 */
a6fa8e5a 588static struct tvec_base *lock_timer_base(struct timer_list *timer,
55c888d6 589 unsigned long *flags)
89e7e374 590 __acquires(timer->base->lock)
55c888d6 591{
a6fa8e5a 592 struct tvec_base *base;
55c888d6
ON
593
594 for (;;) {
a6fa8e5a 595 struct tvec_base *prelock_base = timer->base;
6e453a67 596 base = tbase_get_base(prelock_base);
55c888d6
ON
597 if (likely(base != NULL)) {
598 spin_lock_irqsave(&base->lock, *flags);
6e453a67 599 if (likely(prelock_base == timer->base))
55c888d6
ON
600 return base;
601 /* The timer has migrated to another CPU */
602 spin_unlock_irqrestore(&base->lock, *flags);
603 }
604 cpu_relax();
605 }
606}
607
74019224 608static inline int
597d0275
AB
609__mod_timer(struct timer_list *timer, unsigned long expires,
610 bool pending_only, int pinned)
1da177e4 611{
a6fa8e5a 612 struct tvec_base *base, *new_base;
1da177e4 613 unsigned long flags;
eea08f32 614 int ret = 0 , cpu;
1da177e4 615
82f67cd9 616 timer_stats_timer_set_start_info(timer);
1da177e4 617 BUG_ON(!timer->function);
1da177e4 618
55c888d6
ON
619 base = lock_timer_base(timer, &flags);
620
621 if (timer_pending(timer)) {
622 detach_timer(timer, 0);
623 ret = 1;
74019224
IM
624 } else {
625 if (pending_only)
626 goto out_unlock;
55c888d6
ON
627 }
628
c6f3a97f
TG
629 debug_timer_activate(timer);
630
a4a6198b 631 new_base = __get_cpu_var(tvec_bases);
1da177e4 632
eea08f32
AB
633 cpu = smp_processor_id();
634
635#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
636 if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) {
637 int preferred_cpu = get_nohz_load_balancer();
638
639 if (preferred_cpu >= 0)
640 cpu = preferred_cpu;
641 }
642#endif
643 new_base = per_cpu(tvec_bases, cpu);
644
3691c519 645 if (base != new_base) {
1da177e4 646 /*
55c888d6
ON
647 * We are trying to schedule the timer on the local CPU.
648 * However we can't change timer's base while it is running,
649 * otherwise del_timer_sync() can't detect that the timer's
650 * handler yet has not finished. This also guarantees that
651 * the timer is serialized wrt itself.
1da177e4 652 */
a2c348fe 653 if (likely(base->running_timer != timer)) {
55c888d6 654 /* See the comment in lock_timer_base() */
6e453a67 655 timer_set_base(timer, NULL);
55c888d6 656 spin_unlock(&base->lock);
a2c348fe
ON
657 base = new_base;
658 spin_lock(&base->lock);
6e453a67 659 timer_set_base(timer, base);
1da177e4
LT
660 }
661 }
662
1da177e4 663 timer->expires = expires;
a2c348fe 664 internal_add_timer(base, timer);
74019224
IM
665
666out_unlock:
a2c348fe 667 spin_unlock_irqrestore(&base->lock, flags);
1da177e4
LT
668
669 return ret;
670}
671
2aae4a10 672/**
74019224
IM
673 * mod_timer_pending - modify a pending timer's timeout
674 * @timer: the pending timer to be modified
675 * @expires: new timeout in jiffies
1da177e4 676 *
74019224
IM
677 * mod_timer_pending() is the same for pending timers as mod_timer(),
678 * but will not re-activate and modify already deleted timers.
679 *
680 * It is useful for unserialized use of timers.
1da177e4 681 */
74019224 682int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1da177e4 683{
597d0275 684 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
1da177e4 685}
74019224 686EXPORT_SYMBOL(mod_timer_pending);
1da177e4 687
2aae4a10 688/**
1da177e4
LT
689 * mod_timer - modify a timer's timeout
690 * @timer: the timer to be modified
2aae4a10 691 * @expires: new timeout in jiffies
1da177e4 692 *
72fd4a35 693 * mod_timer() is a more efficient way to update the expire field of an
1da177e4
LT
694 * active timer (if the timer is inactive it will be activated)
695 *
696 * mod_timer(timer, expires) is equivalent to:
697 *
698 * del_timer(timer); timer->expires = expires; add_timer(timer);
699 *
700 * Note that if there are multiple unserialized concurrent users of the
701 * same timer, then mod_timer() is the only safe way to modify the timeout,
702 * since add_timer() cannot modify an already running timer.
703 *
704 * The function returns whether it has modified a pending timer or not.
705 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
706 * active timer returns 1.)
707 */
708int mod_timer(struct timer_list *timer, unsigned long expires)
709{
1da177e4
LT
710 /*
711 * This is a common optimization triggered by the
712 * networking code - if the timer is re-modified
713 * to be the same thing then just return:
714 */
715 if (timer->expires == expires && timer_pending(timer))
716 return 1;
717
597d0275 718 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
1da177e4 719}
1da177e4
LT
720EXPORT_SYMBOL(mod_timer);
721
597d0275
AB
722/**
723 * mod_timer_pinned - modify a timer's timeout
724 * @timer: the timer to be modified
725 * @expires: new timeout in jiffies
726 *
727 * mod_timer_pinned() is a way to update the expire field of an
728 * active timer (if the timer is inactive it will be activated)
729 * and not allow the timer to be migrated to a different CPU.
730 *
731 * mod_timer_pinned(timer, expires) is equivalent to:
732 *
733 * del_timer(timer); timer->expires = expires; add_timer(timer);
734 */
735int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
736{
737 if (timer->expires == expires && timer_pending(timer))
738 return 1;
739
740 return __mod_timer(timer, expires, false, TIMER_PINNED);
741}
742EXPORT_SYMBOL(mod_timer_pinned);
743
74019224
IM
744/**
745 * add_timer - start a timer
746 * @timer: the timer to be added
747 *
748 * The kernel will do a ->function(->data) callback from the
749 * timer interrupt at the ->expires point in the future. The
750 * current time is 'jiffies'.
751 *
752 * The timer's ->expires, ->function (and if the handler uses it, ->data)
753 * fields must be set prior calling this function.
754 *
755 * Timers with an ->expires field in the past will be executed in the next
756 * timer tick.
757 */
758void add_timer(struct timer_list *timer)
759{
760 BUG_ON(timer_pending(timer));
761 mod_timer(timer, timer->expires);
762}
763EXPORT_SYMBOL(add_timer);
764
765/**
766 * add_timer_on - start a timer on a particular CPU
767 * @timer: the timer to be added
768 * @cpu: the CPU to start it on
769 *
770 * This is not very scalable on SMP. Double adds are not possible.
771 */
772void add_timer_on(struct timer_list *timer, int cpu)
773{
774 struct tvec_base *base = per_cpu(tvec_bases, cpu);
775 unsigned long flags;
776
777 timer_stats_timer_set_start_info(timer);
778 BUG_ON(timer_pending(timer) || !timer->function);
779 spin_lock_irqsave(&base->lock, flags);
780 timer_set_base(timer, base);
781 debug_timer_activate(timer);
782 internal_add_timer(base, timer);
783 /*
784 * Check whether the other CPU is idle and needs to be
785 * triggered to reevaluate the timer wheel when nohz is
786 * active. We are protected against the other CPU fiddling
787 * with the timer by holding the timer base lock. This also
788 * makes sure that a CPU on the way to idle can not evaluate
789 * the timer wheel.
790 */
791 wake_up_idle_cpu(cpu);
792 spin_unlock_irqrestore(&base->lock, flags);
793}
a9862e05 794EXPORT_SYMBOL_GPL(add_timer_on);
74019224 795
2aae4a10 796/**
1da177e4
LT
797 * del_timer - deactive a timer.
798 * @timer: the timer to be deactivated
799 *
800 * del_timer() deactivates a timer - this works on both active and inactive
801 * timers.
802 *
803 * The function returns whether it has deactivated a pending timer or not.
804 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
805 * active timer returns 1.)
806 */
807int del_timer(struct timer_list *timer)
808{
a6fa8e5a 809 struct tvec_base *base;
1da177e4 810 unsigned long flags;
55c888d6 811 int ret = 0;
1da177e4 812
82f67cd9 813 timer_stats_timer_clear_start_info(timer);
55c888d6
ON
814 if (timer_pending(timer)) {
815 base = lock_timer_base(timer, &flags);
816 if (timer_pending(timer)) {
817 detach_timer(timer, 1);
818 ret = 1;
819 }
1da177e4 820 spin_unlock_irqrestore(&base->lock, flags);
1da177e4 821 }
1da177e4 822
55c888d6 823 return ret;
1da177e4 824}
1da177e4
LT
825EXPORT_SYMBOL(del_timer);
826
827#ifdef CONFIG_SMP
2aae4a10
REB
828/**
829 * try_to_del_timer_sync - Try to deactivate a timer
830 * @timer: timer do del
831 *
fd450b73
ON
832 * This function tries to deactivate a timer. Upon successful (ret >= 0)
833 * exit the timer is not queued and the handler is not running on any CPU.
834 *
835 * It must not be called from interrupt contexts.
836 */
837int try_to_del_timer_sync(struct timer_list *timer)
838{
a6fa8e5a 839 struct tvec_base *base;
fd450b73
ON
840 unsigned long flags;
841 int ret = -1;
842
843 base = lock_timer_base(timer, &flags);
844
845 if (base->running_timer == timer)
846 goto out;
847
848 ret = 0;
849 if (timer_pending(timer)) {
850 detach_timer(timer, 1);
851 ret = 1;
852 }
853out:
854 spin_unlock_irqrestore(&base->lock, flags);
855
856 return ret;
857}
e19dff1f
DH
858EXPORT_SYMBOL(try_to_del_timer_sync);
859
2aae4a10 860/**
1da177e4
LT
861 * del_timer_sync - deactivate a timer and wait for the handler to finish.
862 * @timer: the timer to be deactivated
863 *
864 * This function only differs from del_timer() on SMP: besides deactivating
865 * the timer it also makes sure the handler has finished executing on other
866 * CPUs.
867 *
72fd4a35 868 * Synchronization rules: Callers must prevent restarting of the timer,
1da177e4
LT
869 * otherwise this function is meaningless. It must not be called from
870 * interrupt contexts. The caller must not hold locks which would prevent
55c888d6
ON
871 * completion of the timer's handler. The timer's handler must not call
872 * add_timer_on(). Upon exit the timer is not queued and the handler is
873 * not running on any CPU.
1da177e4
LT
874 *
875 * The function returns whether it has deactivated a pending timer or not.
1da177e4
LT
876 */
877int del_timer_sync(struct timer_list *timer)
878{
6f2b9b9a
JB
879#ifdef CONFIG_LOCKDEP
880 unsigned long flags;
881
882 local_irq_save(flags);
883 lock_map_acquire(&timer->lockdep_map);
884 lock_map_release(&timer->lockdep_map);
885 local_irq_restore(flags);
886#endif
887
fd450b73
ON
888 for (;;) {
889 int ret = try_to_del_timer_sync(timer);
890 if (ret >= 0)
891 return ret;
a0009652 892 cpu_relax();
fd450b73 893 }
1da177e4 894}
55c888d6 895EXPORT_SYMBOL(del_timer_sync);
1da177e4
LT
896#endif
897
a6fa8e5a 898static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1da177e4
LT
899{
900 /* cascade all the timers from tv up one level */
3439dd86
P
901 struct timer_list *timer, *tmp;
902 struct list_head tv_list;
903
904 list_replace_init(tv->vec + index, &tv_list);
1da177e4 905
1da177e4 906 /*
3439dd86
P
907 * We are removing _all_ timers from the list, so we
908 * don't have to detach them individually.
1da177e4 909 */
3439dd86 910 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
6e453a67 911 BUG_ON(tbase_get_base(timer->base) != base);
3439dd86 912 internal_add_timer(base, timer);
1da177e4 913 }
1da177e4
LT
914
915 return index;
916}
917
2aae4a10
REB
918#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
919
920/**
1da177e4
LT
921 * __run_timers - run all expired timers (if any) on this CPU.
922 * @base: the timer vector to be processed.
923 *
924 * This function cascades all vectors and executes all expired timer
925 * vectors.
926 */
a6fa8e5a 927static inline void __run_timers(struct tvec_base *base)
1da177e4
LT
928{
929 struct timer_list *timer;
930
3691c519 931 spin_lock_irq(&base->lock);
1da177e4 932 while (time_after_eq(jiffies, base->timer_jiffies)) {
626ab0e6 933 struct list_head work_list;
1da177e4 934 struct list_head *head = &work_list;
6819457d 935 int index = base->timer_jiffies & TVR_MASK;
626ab0e6 936
1da177e4
LT
937 /*
938 * Cascade timers:
939 */
940 if (!index &&
941 (!cascade(base, &base->tv2, INDEX(0))) &&
942 (!cascade(base, &base->tv3, INDEX(1))) &&
943 !cascade(base, &base->tv4, INDEX(2)))
944 cascade(base, &base->tv5, INDEX(3));
626ab0e6
ON
945 ++base->timer_jiffies;
946 list_replace_init(base->tv1.vec + index, &work_list);
55c888d6 947 while (!list_empty(head)) {
1da177e4
LT
948 void (*fn)(unsigned long);
949 unsigned long data;
950
b5e61818 951 timer = list_first_entry(head, struct timer_list,entry);
6819457d
TG
952 fn = timer->function;
953 data = timer->data;
1da177e4 954
82f67cd9
IM
955 timer_stats_account_timer(timer);
956
1da177e4 957 set_running_timer(base, timer);
55c888d6 958 detach_timer(timer, 1);
6f2b9b9a 959
3691c519 960 spin_unlock_irq(&base->lock);
1da177e4 961 {
be5b4fbd 962 int preempt_count = preempt_count();
6f2b9b9a
JB
963
964#ifdef CONFIG_LOCKDEP
965 /*
966 * It is permissible to free the timer from
967 * inside the function that is called from
968 * it, this we need to take into account for
969 * lockdep too. To avoid bogus "held lock
970 * freed" warnings as well as problems when
971 * looking into timer->lockdep_map, make a
972 * copy and use that here.
973 */
974 struct lockdep_map lockdep_map =
975 timer->lockdep_map;
976#endif
977 /*
978 * Couple the lock chain with the lock chain at
979 * del_timer_sync() by acquiring the lock_map
980 * around the fn() call here and in
981 * del_timer_sync().
982 */
983 lock_map_acquire(&lockdep_map);
984
1da177e4 985 fn(data);
6f2b9b9a
JB
986
987 lock_map_release(&lockdep_map);
988
1da177e4 989 if (preempt_count != preempt_count()) {
4c9dc641 990 printk(KERN_ERR "huh, entered %p "
be5b4fbd
JJ
991 "with preempt_count %08x, exited"
992 " with %08x?\n",
993 fn, preempt_count,
994 preempt_count());
1da177e4
LT
995 BUG();
996 }
997 }
3691c519 998 spin_lock_irq(&base->lock);
1da177e4
LT
999 }
1000 }
1001 set_running_timer(base, NULL);
3691c519 1002 spin_unlock_irq(&base->lock);
1da177e4
LT
1003}
1004
ee9c5785 1005#ifdef CONFIG_NO_HZ
1da177e4
LT
1006/*
1007 * Find out when the next timer event is due to happen. This
1008 * is used on S/390 to stop all activity when a cpus is idle.
1009 * This functions needs to be called disabled.
1010 */
a6fa8e5a 1011static unsigned long __next_timer_interrupt(struct tvec_base *base)
1da177e4 1012{
1cfd6849 1013 unsigned long timer_jiffies = base->timer_jiffies;
eaad084b 1014 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1cfd6849 1015 int index, slot, array, found = 0;
1da177e4 1016 struct timer_list *nte;
a6fa8e5a 1017 struct tvec *varray[4];
1da177e4
LT
1018
1019 /* Look for timer events in tv1. */
1cfd6849 1020 index = slot = timer_jiffies & TVR_MASK;
1da177e4 1021 do {
1cfd6849 1022 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
6819457d
TG
1023 if (tbase_get_deferrable(nte->base))
1024 continue;
6e453a67 1025
1cfd6849 1026 found = 1;
1da177e4 1027 expires = nte->expires;
1cfd6849
TG
1028 /* Look at the cascade bucket(s)? */
1029 if (!index || slot < index)
1030 goto cascade;
1031 return expires;
1da177e4 1032 }
1cfd6849
TG
1033 slot = (slot + 1) & TVR_MASK;
1034 } while (slot != index);
1035
1036cascade:
1037 /* Calculate the next cascade event */
1038 if (index)
1039 timer_jiffies += TVR_SIZE - index;
1040 timer_jiffies >>= TVR_BITS;
1da177e4
LT
1041
1042 /* Check tv2-tv5. */
1043 varray[0] = &base->tv2;
1044 varray[1] = &base->tv3;
1045 varray[2] = &base->tv4;
1046 varray[3] = &base->tv5;
1cfd6849
TG
1047
1048 for (array = 0; array < 4; array++) {
a6fa8e5a 1049 struct tvec *varp = varray[array];
1cfd6849
TG
1050
1051 index = slot = timer_jiffies & TVN_MASK;
1da177e4 1052 do {
1cfd6849 1053 list_for_each_entry(nte, varp->vec + slot, entry) {
a0419888
JH
1054 if (tbase_get_deferrable(nte->base))
1055 continue;
1056
1cfd6849 1057 found = 1;
1da177e4
LT
1058 if (time_before(nte->expires, expires))
1059 expires = nte->expires;
1cfd6849
TG
1060 }
1061 /*
1062 * Do we still search for the first timer or are
1063 * we looking up the cascade buckets ?
1064 */
1065 if (found) {
1066 /* Look at the cascade bucket(s)? */
1067 if (!index || slot < index)
1068 break;
1069 return expires;
1070 }
1071 slot = (slot + 1) & TVN_MASK;
1072 } while (slot != index);
1073
1074 if (index)
1075 timer_jiffies += TVN_SIZE - index;
1076 timer_jiffies >>= TVN_BITS;
1da177e4 1077 }
1cfd6849
TG
1078 return expires;
1079}
69239749 1080
1cfd6849
TG
1081/*
1082 * Check, if the next hrtimer event is before the next timer wheel
1083 * event:
1084 */
1085static unsigned long cmp_next_hrtimer_event(unsigned long now,
1086 unsigned long expires)
1087{
1088 ktime_t hr_delta = hrtimer_get_next_event();
1089 struct timespec tsdelta;
9501b6cf 1090 unsigned long delta;
1cfd6849
TG
1091
1092 if (hr_delta.tv64 == KTIME_MAX)
1093 return expires;
0662b713 1094
9501b6cf
TG
1095 /*
1096 * Expired timer available, let it expire in the next tick
1097 */
1098 if (hr_delta.tv64 <= 0)
1099 return now + 1;
69239749 1100
1cfd6849 1101 tsdelta = ktime_to_timespec(hr_delta);
9501b6cf 1102 delta = timespec_to_jiffies(&tsdelta);
eaad084b
TG
1103
1104 /*
1105 * Limit the delta to the max value, which is checked in
1106 * tick_nohz_stop_sched_tick():
1107 */
1108 if (delta > NEXT_TIMER_MAX_DELTA)
1109 delta = NEXT_TIMER_MAX_DELTA;
1110
9501b6cf
TG
1111 /*
1112 * Take rounding errors in to account and make sure, that it
1113 * expires in the next tick. Otherwise we go into an endless
1114 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1115 * the timer softirq
1116 */
1117 if (delta < 1)
1118 delta = 1;
1119 now += delta;
1cfd6849
TG
1120 if (time_before(now, expires))
1121 return now;
1da177e4
LT
1122 return expires;
1123}
1cfd6849
TG
1124
1125/**
8dce39c2 1126 * get_next_timer_interrupt - return the jiffy of the next pending timer
05fb6bf0 1127 * @now: current time (in jiffies)
1cfd6849 1128 */
fd064b9b 1129unsigned long get_next_timer_interrupt(unsigned long now)
1cfd6849 1130{
a6fa8e5a 1131 struct tvec_base *base = __get_cpu_var(tvec_bases);
fd064b9b 1132 unsigned long expires;
1cfd6849
TG
1133
1134 spin_lock(&base->lock);
1135 expires = __next_timer_interrupt(base);
1136 spin_unlock(&base->lock);
1137
1138 if (time_before_eq(expires, now))
1139 return now;
1140
1141 return cmp_next_hrtimer_event(now, expires);
1142}
1da177e4
LT
1143#endif
1144
1da177e4 1145/*
5b4db0c2 1146 * Called from the timer interrupt handler to charge one tick to the current
1da177e4
LT
1147 * process. user_tick is 1 if the tick is user time, 0 for system.
1148 */
1149void update_process_times(int user_tick)
1150{
1151 struct task_struct *p = current;
1152 int cpu = smp_processor_id();
1153
1154 /* Note: this timer irq context must be accounted for as well. */
fa13a5a1 1155 account_process_tick(p, user_tick);
1da177e4
LT
1156 run_local_timers();
1157 if (rcu_pending(cpu))
1158 rcu_check_callbacks(cpu, user_tick);
b845b517 1159 printk_tick();
1da177e4 1160 scheduler_tick();
6819457d 1161 run_posix_cpu_timers(p);
1da177e4
LT
1162}
1163
1da177e4
LT
1164/*
1165 * This function runs timers and the timer-tq in bottom half context.
1166 */
1167static void run_timer_softirq(struct softirq_action *h)
1168{
a6fa8e5a 1169 struct tvec_base *base = __get_cpu_var(tvec_bases);
1da177e4 1170
925d519a
PZ
1171 perf_counter_do_pending();
1172
d3d74453 1173 hrtimer_run_pending();
82f67cd9 1174
1da177e4
LT
1175 if (time_after_eq(jiffies, base->timer_jiffies))
1176 __run_timers(base);
1177}
1178
1179/*
1180 * Called by the local, per-CPU timer interrupt on SMP.
1181 */
1182void run_local_timers(void)
1183{
d3d74453 1184 hrtimer_run_queues();
1da177e4 1185 raise_softirq(TIMER_SOFTIRQ);
6687a97d 1186 softlockup_tick();
1da177e4
LT
1187}
1188
1da177e4
LT
1189/*
1190 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1191 * without sampling the sequence number in xtime_lock.
1192 * jiffies is defined in the linker script...
1193 */
1194
3171a030 1195void do_timer(unsigned long ticks)
1da177e4 1196{
3171a030 1197 jiffies_64 += ticks;
dce48a84
TG
1198 update_wall_time();
1199 calc_global_load();
1da177e4
LT
1200}
1201
1202#ifdef __ARCH_WANT_SYS_ALARM
1203
1204/*
1205 * For backwards compatibility? This can be done in libc so Alpha
1206 * and all newer ports shouldn't need it.
1207 */
58fd3aa2 1208SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1da177e4 1209{
c08b8a49 1210 return alarm_setitimer(seconds);
1da177e4
LT
1211}
1212
1213#endif
1214
1215#ifndef __alpha__
1216
1217/*
1218 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
1219 * should be moved into arch/i386 instead?
1220 */
1221
1222/**
1223 * sys_getpid - return the thread group id of the current process
1224 *
1225 * Note, despite the name, this returns the tgid not the pid. The tgid and
1226 * the pid are identical unless CLONE_THREAD was specified on clone() in
1227 * which case the tgid is the same in all threads of the same group.
1228 *
1229 * This is SMP safe as current->tgid does not change.
1230 */
58fd3aa2 1231SYSCALL_DEFINE0(getpid)
1da177e4 1232{
b488893a 1233 return task_tgid_vnr(current);
1da177e4
LT
1234}
1235
1236/*
6997a6fa
KK
1237 * Accessing ->real_parent is not SMP-safe, it could
1238 * change from under us. However, we can use a stale
1239 * value of ->real_parent under rcu_read_lock(), see
1240 * release_task()->call_rcu(delayed_put_task_struct).
1da177e4 1241 */
dbf040d9 1242SYSCALL_DEFINE0(getppid)
1da177e4
LT
1243{
1244 int pid;
1da177e4 1245
6997a6fa 1246 rcu_read_lock();
6c5f3e7b 1247 pid = task_tgid_vnr(current->real_parent);
6997a6fa 1248 rcu_read_unlock();
1da177e4 1249
1da177e4
LT
1250 return pid;
1251}
1252
dbf040d9 1253SYSCALL_DEFINE0(getuid)
1da177e4
LT
1254{
1255 /* Only we change this so SMP safe */
76aac0e9 1256 return current_uid();
1da177e4
LT
1257}
1258
dbf040d9 1259SYSCALL_DEFINE0(geteuid)
1da177e4
LT
1260{
1261 /* Only we change this so SMP safe */
76aac0e9 1262 return current_euid();
1da177e4
LT
1263}
1264
dbf040d9 1265SYSCALL_DEFINE0(getgid)
1da177e4
LT
1266{
1267 /* Only we change this so SMP safe */
76aac0e9 1268 return current_gid();
1da177e4
LT
1269}
1270
dbf040d9 1271SYSCALL_DEFINE0(getegid)
1da177e4
LT
1272{
1273 /* Only we change this so SMP safe */
76aac0e9 1274 return current_egid();
1da177e4
LT
1275}
1276
1277#endif
1278
1279static void process_timeout(unsigned long __data)
1280{
36c8b586 1281 wake_up_process((struct task_struct *)__data);
1da177e4
LT
1282}
1283
1284/**
1285 * schedule_timeout - sleep until timeout
1286 * @timeout: timeout value in jiffies
1287 *
1288 * Make the current task sleep until @timeout jiffies have
1289 * elapsed. The routine will return immediately unless
1290 * the current task state has been set (see set_current_state()).
1291 *
1292 * You can set the task state as follows -
1293 *
1294 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1295 * pass before the routine returns. The routine will return 0
1296 *
1297 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1298 * delivered to the current task. In this case the remaining time
1299 * in jiffies will be returned, or 0 if the timer expired in time
1300 *
1301 * The current task state is guaranteed to be TASK_RUNNING when this
1302 * routine returns.
1303 *
1304 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1305 * the CPU away without a bound on the timeout. In this case the return
1306 * value will be %MAX_SCHEDULE_TIMEOUT.
1307 *
1308 * In all cases the return value is guaranteed to be non-negative.
1309 */
7ad5b3a5 1310signed long __sched schedule_timeout(signed long timeout)
1da177e4
LT
1311{
1312 struct timer_list timer;
1313 unsigned long expire;
1314
1315 switch (timeout)
1316 {
1317 case MAX_SCHEDULE_TIMEOUT:
1318 /*
1319 * These two special cases are useful to be comfortable
1320 * in the caller. Nothing more. We could take
1321 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1322 * but I' d like to return a valid offset (>=0) to allow
1323 * the caller to do everything it want with the retval.
1324 */
1325 schedule();
1326 goto out;
1327 default:
1328 /*
1329 * Another bit of PARANOID. Note that the retval will be
1330 * 0 since no piece of kernel is supposed to do a check
1331 * for a negative retval of schedule_timeout() (since it
1332 * should never happens anyway). You just have the printk()
1333 * that will tell you if something is gone wrong and where.
1334 */
5b149bcc 1335 if (timeout < 0) {
1da177e4 1336 printk(KERN_ERR "schedule_timeout: wrong timeout "
5b149bcc
AM
1337 "value %lx\n", timeout);
1338 dump_stack();
1da177e4
LT
1339 current->state = TASK_RUNNING;
1340 goto out;
1341 }
1342 }
1343
1344 expire = timeout + jiffies;
1345
c6f3a97f 1346 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
597d0275 1347 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1da177e4
LT
1348 schedule();
1349 del_singleshot_timer_sync(&timer);
1350
c6f3a97f
TG
1351 /* Remove the timer from the object tracker */
1352 destroy_timer_on_stack(&timer);
1353
1da177e4
LT
1354 timeout = expire - jiffies;
1355
1356 out:
1357 return timeout < 0 ? 0 : timeout;
1358}
1da177e4
LT
1359EXPORT_SYMBOL(schedule_timeout);
1360
8a1c1757
AM
1361/*
1362 * We can use __set_current_state() here because schedule_timeout() calls
1363 * schedule() unconditionally.
1364 */
64ed93a2
NA
1365signed long __sched schedule_timeout_interruptible(signed long timeout)
1366{
a5a0d52c
AM
1367 __set_current_state(TASK_INTERRUPTIBLE);
1368 return schedule_timeout(timeout);
64ed93a2
NA
1369}
1370EXPORT_SYMBOL(schedule_timeout_interruptible);
1371
294d5cc2
MW
1372signed long __sched schedule_timeout_killable(signed long timeout)
1373{
1374 __set_current_state(TASK_KILLABLE);
1375 return schedule_timeout(timeout);
1376}
1377EXPORT_SYMBOL(schedule_timeout_killable);
1378
64ed93a2
NA
1379signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1380{
a5a0d52c
AM
1381 __set_current_state(TASK_UNINTERRUPTIBLE);
1382 return schedule_timeout(timeout);
64ed93a2
NA
1383}
1384EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1385
1da177e4 1386/* Thread ID - the internal kernel "pid" */
58fd3aa2 1387SYSCALL_DEFINE0(gettid)
1da177e4 1388{
b488893a 1389 return task_pid_vnr(current);
1da177e4
LT
1390}
1391
2aae4a10 1392/**
d4d23add 1393 * do_sysinfo - fill in sysinfo struct
2aae4a10 1394 * @info: pointer to buffer to fill
6819457d 1395 */
d4d23add 1396int do_sysinfo(struct sysinfo *info)
1da177e4 1397{
1da177e4
LT
1398 unsigned long mem_total, sav_total;
1399 unsigned int mem_unit, bitcount;
2d02494f 1400 struct timespec tp;
1da177e4 1401
d4d23add 1402 memset(info, 0, sizeof(struct sysinfo));
1da177e4 1403
2d02494f
TG
1404 ktime_get_ts(&tp);
1405 monotonic_to_bootbased(&tp);
1406 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1da177e4 1407
2d02494f 1408 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
1da177e4 1409
2d02494f 1410 info->procs = nr_threads;
1da177e4 1411
d4d23add
KM
1412 si_meminfo(info);
1413 si_swapinfo(info);
1da177e4
LT
1414
1415 /*
1416 * If the sum of all the available memory (i.e. ram + swap)
1417 * is less than can be stored in a 32 bit unsigned long then
1418 * we can be binary compatible with 2.2.x kernels. If not,
1419 * well, in that case 2.2.x was broken anyways...
1420 *
1421 * -Erik Andersen <andersee@debian.org>
1422 */
1423
d4d23add
KM
1424 mem_total = info->totalram + info->totalswap;
1425 if (mem_total < info->totalram || mem_total < info->totalswap)
1da177e4
LT
1426 goto out;
1427 bitcount = 0;
d4d23add 1428 mem_unit = info->mem_unit;
1da177e4
LT
1429 while (mem_unit > 1) {
1430 bitcount++;
1431 mem_unit >>= 1;
1432 sav_total = mem_total;
1433 mem_total <<= 1;
1434 if (mem_total < sav_total)
1435 goto out;
1436 }
1437
1438 /*
1439 * If mem_total did not overflow, multiply all memory values by
d4d23add 1440 * info->mem_unit and set it to 1. This leaves things compatible
1da177e4
LT
1441 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1442 * kernels...
1443 */
1444
d4d23add
KM
1445 info->mem_unit = 1;
1446 info->totalram <<= bitcount;
1447 info->freeram <<= bitcount;
1448 info->sharedram <<= bitcount;
1449 info->bufferram <<= bitcount;
1450 info->totalswap <<= bitcount;
1451 info->freeswap <<= bitcount;
1452 info->totalhigh <<= bitcount;
1453 info->freehigh <<= bitcount;
1454
1455out:
1456 return 0;
1457}
1458
1e7bfb21 1459SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
d4d23add
KM
1460{
1461 struct sysinfo val;
1462
1463 do_sysinfo(&val);
1da177e4 1464
1da177e4
LT
1465 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1466 return -EFAULT;
1467
1468 return 0;
1469}
1470
b4be6258 1471static int __cpuinit init_timers_cpu(int cpu)
1da177e4
LT
1472{
1473 int j;
a6fa8e5a 1474 struct tvec_base *base;
b4be6258 1475 static char __cpuinitdata tvec_base_done[NR_CPUS];
55c888d6 1476
ba6edfcd 1477 if (!tvec_base_done[cpu]) {
a4a6198b
JB
1478 static char boot_done;
1479
a4a6198b 1480 if (boot_done) {
ba6edfcd
AM
1481 /*
1482 * The APs use this path later in boot
1483 */
94f6030c
CL
1484 base = kmalloc_node(sizeof(*base),
1485 GFP_KERNEL | __GFP_ZERO,
a4a6198b
JB
1486 cpu_to_node(cpu));
1487 if (!base)
1488 return -ENOMEM;
6e453a67
VP
1489
1490 /* Make sure that tvec_base is 2 byte aligned */
1491 if (tbase_get_deferrable(base)) {
1492 WARN_ON(1);
1493 kfree(base);
1494 return -ENOMEM;
1495 }
ba6edfcd 1496 per_cpu(tvec_bases, cpu) = base;
a4a6198b 1497 } else {
ba6edfcd
AM
1498 /*
1499 * This is for the boot CPU - we use compile-time
1500 * static initialisation because per-cpu memory isn't
1501 * ready yet and because the memory allocators are not
1502 * initialised either.
1503 */
a4a6198b 1504 boot_done = 1;
ba6edfcd 1505 base = &boot_tvec_bases;
a4a6198b 1506 }
ba6edfcd
AM
1507 tvec_base_done[cpu] = 1;
1508 } else {
1509 base = per_cpu(tvec_bases, cpu);
a4a6198b 1510 }
ba6edfcd 1511
3691c519 1512 spin_lock_init(&base->lock);
d730e882 1513
1da177e4
LT
1514 for (j = 0; j < TVN_SIZE; j++) {
1515 INIT_LIST_HEAD(base->tv5.vec + j);
1516 INIT_LIST_HEAD(base->tv4.vec + j);
1517 INIT_LIST_HEAD(base->tv3.vec + j);
1518 INIT_LIST_HEAD(base->tv2.vec + j);
1519 }
1520 for (j = 0; j < TVR_SIZE; j++)
1521 INIT_LIST_HEAD(base->tv1.vec + j);
1522
1523 base->timer_jiffies = jiffies;
a4a6198b 1524 return 0;
1da177e4
LT
1525}
1526
1527#ifdef CONFIG_HOTPLUG_CPU
a6fa8e5a 1528static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1da177e4
LT
1529{
1530 struct timer_list *timer;
1531
1532 while (!list_empty(head)) {
b5e61818 1533 timer = list_first_entry(head, struct timer_list, entry);
55c888d6 1534 detach_timer(timer, 0);
6e453a67 1535 timer_set_base(timer, new_base);
1da177e4 1536 internal_add_timer(new_base, timer);
1da177e4 1537 }
1da177e4
LT
1538}
1539
48ccf3da 1540static void __cpuinit migrate_timers(int cpu)
1da177e4 1541{
a6fa8e5a
PM
1542 struct tvec_base *old_base;
1543 struct tvec_base *new_base;
1da177e4
LT
1544 int i;
1545
1546 BUG_ON(cpu_online(cpu));
a4a6198b
JB
1547 old_base = per_cpu(tvec_bases, cpu);
1548 new_base = get_cpu_var(tvec_bases);
d82f0b0f
ON
1549 /*
1550 * The caller is globally serialized and nobody else
1551 * takes two locks at once, deadlock is not possible.
1552 */
1553 spin_lock_irq(&new_base->lock);
0d180406 1554 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
3691c519
ON
1555
1556 BUG_ON(old_base->running_timer);
1da177e4 1557
1da177e4 1558 for (i = 0; i < TVR_SIZE; i++)
55c888d6
ON
1559 migrate_timer_list(new_base, old_base->tv1.vec + i);
1560 for (i = 0; i < TVN_SIZE; i++) {
1561 migrate_timer_list(new_base, old_base->tv2.vec + i);
1562 migrate_timer_list(new_base, old_base->tv3.vec + i);
1563 migrate_timer_list(new_base, old_base->tv4.vec + i);
1564 migrate_timer_list(new_base, old_base->tv5.vec + i);
1565 }
1566
0d180406 1567 spin_unlock(&old_base->lock);
d82f0b0f 1568 spin_unlock_irq(&new_base->lock);
1da177e4 1569 put_cpu_var(tvec_bases);
1da177e4
LT
1570}
1571#endif /* CONFIG_HOTPLUG_CPU */
1572
8c78f307 1573static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1da177e4
LT
1574 unsigned long action, void *hcpu)
1575{
1576 long cpu = (long)hcpu;
1577 switch(action) {
1578 case CPU_UP_PREPARE:
8bb78442 1579 case CPU_UP_PREPARE_FROZEN:
a4a6198b
JB
1580 if (init_timers_cpu(cpu) < 0)
1581 return NOTIFY_BAD;
1da177e4
LT
1582 break;
1583#ifdef CONFIG_HOTPLUG_CPU
1584 case CPU_DEAD:
8bb78442 1585 case CPU_DEAD_FROZEN:
1da177e4
LT
1586 migrate_timers(cpu);
1587 break;
1588#endif
1589 default:
1590 break;
1591 }
1592 return NOTIFY_OK;
1593}
1594
8c78f307 1595static struct notifier_block __cpuinitdata timers_nb = {
1da177e4
LT
1596 .notifier_call = timer_cpu_notify,
1597};
1598
1599
1600void __init init_timers(void)
1601{
07dccf33 1602 int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1da177e4 1603 (void *)(long)smp_processor_id());
07dccf33 1604
82f67cd9
IM
1605 init_timer_stats();
1606
07dccf33 1607 BUG_ON(err == NOTIFY_BAD);
1da177e4 1608 register_cpu_notifier(&timers_nb);
962cf36c 1609 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1da177e4
LT
1610}
1611
1da177e4
LT
1612/**
1613 * msleep - sleep safely even with waitqueue interruptions
1614 * @msecs: Time in milliseconds to sleep for
1615 */
1616void msleep(unsigned int msecs)
1617{
1618 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1619
75bcc8c5
NA
1620 while (timeout)
1621 timeout = schedule_timeout_uninterruptible(timeout);
1da177e4
LT
1622}
1623
1624EXPORT_SYMBOL(msleep);
1625
1626/**
96ec3efd 1627 * msleep_interruptible - sleep waiting for signals
1da177e4
LT
1628 * @msecs: Time in milliseconds to sleep for
1629 */
1630unsigned long msleep_interruptible(unsigned int msecs)
1631{
1632 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1633
75bcc8c5
NA
1634 while (timeout && !signal_pending(current))
1635 timeout = schedule_timeout_interruptible(timeout);
1da177e4
LT
1636 return jiffies_to_msecs(timeout);
1637}
1638
1639EXPORT_SYMBOL(msleep_interruptible);
This page took 0.571214 seconds and 5 git commands to generate.