Merge branch 'pm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspe...
[deliverable/linux.git] / kernel / timer.c
1 /*
2 * linux/kernel/timer.c
3 *
4 * Kernel internal timers, basic process system calls
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
27 #include <linux/mm.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 #include <linux/perf_event.h>
41 #include <linux/sched.h>
42
43 #include <asm/uaccess.h>
44 #include <asm/unistd.h>
45 #include <asm/div64.h>
46 #include <asm/timex.h>
47 #include <asm/io.h>
48
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/timer.h>
51
52 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
53
54 EXPORT_SYMBOL(jiffies_64);
55
56 /*
57 * per-CPU timer vector definitions:
58 */
59 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
60 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
61 #define TVN_SIZE (1 << TVN_BITS)
62 #define TVR_SIZE (1 << TVR_BITS)
63 #define TVN_MASK (TVN_SIZE - 1)
64 #define TVR_MASK (TVR_SIZE - 1)
65
66 struct tvec {
67 struct list_head vec[TVN_SIZE];
68 };
69
70 struct tvec_root {
71 struct list_head vec[TVR_SIZE];
72 };
73
74 struct tvec_base {
75 spinlock_t lock;
76 struct timer_list *running_timer;
77 unsigned long timer_jiffies;
78 unsigned long next_timer;
79 struct tvec_root tv1;
80 struct tvec tv2;
81 struct tvec tv3;
82 struct tvec tv4;
83 struct tvec tv5;
84 } ____cacheline_aligned;
85
86 struct tvec_base boot_tvec_bases;
87 EXPORT_SYMBOL(boot_tvec_bases);
88 static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
89
90 /*
91 * Note that all tvec_bases are 2 byte aligned and lower bit of
92 * base in timer_list is guaranteed to be zero. Use the LSB for
93 * the new flag to indicate whether the timer is deferrable
94 */
95 #define TBASE_DEFERRABLE_FLAG (0x1)
96
97 /* Functions below help us manage 'deferrable' flag */
98 static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
99 {
100 return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
101 }
102
103 static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
104 {
105 return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
106 }
107
108 static inline void timer_set_deferrable(struct timer_list *timer)
109 {
110 timer->base = ((struct tvec_base *)((unsigned long)(timer->base) |
111 TBASE_DEFERRABLE_FLAG));
112 }
113
114 static inline void
115 timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
116 {
117 timer->base = (struct tvec_base *)((unsigned long)(new_base) |
118 tbase_get_deferrable(timer->base));
119 }
120
121 static unsigned long round_jiffies_common(unsigned long j, int cpu,
122 bool force_up)
123 {
124 int rem;
125 unsigned long original = j;
126
127 /*
128 * We don't want all cpus firing their timers at once hitting the
129 * same lock or cachelines, so we skew each extra cpu with an extra
130 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
131 * already did this.
132 * The skew is done by adding 3*cpunr, then round, then subtract this
133 * extra offset again.
134 */
135 j += cpu * 3;
136
137 rem = j % HZ;
138
139 /*
140 * If the target jiffie is just after a whole second (which can happen
141 * due to delays of the timer irq, long irq off times etc etc) then
142 * we should round down to the whole second, not up. Use 1/4th second
143 * as cutoff for this rounding as an extreme upper bound for this.
144 * But never round down if @force_up is set.
145 */
146 if (rem < HZ/4 && !force_up) /* round down */
147 j = j - rem;
148 else /* round up */
149 j = j - rem + HZ;
150
151 /* now that we have rounded, subtract the extra skew again */
152 j -= cpu * 3;
153
154 if (j <= jiffies) /* rounding ate our timeout entirely; */
155 return original;
156 return j;
157 }
158
159 /**
160 * __round_jiffies - function to round jiffies to a full second
161 * @j: the time in (absolute) jiffies that should be rounded
162 * @cpu: the processor number on which the timeout will happen
163 *
164 * __round_jiffies() rounds an absolute time in the future (in jiffies)
165 * up or down to (approximately) full seconds. This is useful for timers
166 * for which the exact time they fire does not matter too much, as long as
167 * they fire approximately every X seconds.
168 *
169 * By rounding these timers to whole seconds, all such timers will fire
170 * at the same time, rather than at various times spread out. The goal
171 * of this is to have the CPU wake up less, which saves power.
172 *
173 * The exact rounding is skewed for each processor to avoid all
174 * processors firing at the exact same time, which could lead
175 * to lock contention or spurious cache line bouncing.
176 *
177 * The return value is the rounded version of the @j parameter.
178 */
179 unsigned long __round_jiffies(unsigned long j, int cpu)
180 {
181 return round_jiffies_common(j, cpu, false);
182 }
183 EXPORT_SYMBOL_GPL(__round_jiffies);
184
185 /**
186 * __round_jiffies_relative - function to round jiffies to a full second
187 * @j: the time in (relative) jiffies that should be rounded
188 * @cpu: the processor number on which the timeout will happen
189 *
190 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
191 * up or down to (approximately) full seconds. This is useful for timers
192 * for which the exact time they fire does not matter too much, as long as
193 * they fire approximately every X seconds.
194 *
195 * By rounding these timers to whole seconds, all such timers will fire
196 * at the same time, rather than at various times spread out. The goal
197 * of this is to have the CPU wake up less, which saves power.
198 *
199 * The exact rounding is skewed for each processor to avoid all
200 * processors firing at the exact same time, which could lead
201 * to lock contention or spurious cache line bouncing.
202 *
203 * The return value is the rounded version of the @j parameter.
204 */
205 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
206 {
207 unsigned long j0 = jiffies;
208
209 /* Use j0 because jiffies might change while we run */
210 return round_jiffies_common(j + j0, cpu, false) - j0;
211 }
212 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
213
214 /**
215 * round_jiffies - function to round jiffies to a full second
216 * @j: the time in (absolute) jiffies that should be rounded
217 *
218 * round_jiffies() rounds an absolute time in the future (in jiffies)
219 * up or down to (approximately) full seconds. This is useful for timers
220 * for which the exact time they fire does not matter too much, as long as
221 * they fire approximately every X seconds.
222 *
223 * By rounding these timers to whole seconds, all such timers will fire
224 * at the same time, rather than at various times spread out. The goal
225 * of this is to have the CPU wake up less, which saves power.
226 *
227 * The return value is the rounded version of the @j parameter.
228 */
229 unsigned long round_jiffies(unsigned long j)
230 {
231 return round_jiffies_common(j, raw_smp_processor_id(), false);
232 }
233 EXPORT_SYMBOL_GPL(round_jiffies);
234
235 /**
236 * round_jiffies_relative - function to round jiffies to a full second
237 * @j: the time in (relative) jiffies that should be rounded
238 *
239 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
240 * up or down to (approximately) full seconds. This is useful for timers
241 * for which the exact time they fire does not matter too much, as long as
242 * they fire approximately every X seconds.
243 *
244 * By rounding these timers to whole seconds, all such timers will fire
245 * at the same time, rather than at various times spread out. The goal
246 * of this is to have the CPU wake up less, which saves power.
247 *
248 * The return value is the rounded version of the @j parameter.
249 */
250 unsigned long round_jiffies_relative(unsigned long j)
251 {
252 return __round_jiffies_relative(j, raw_smp_processor_id());
253 }
254 EXPORT_SYMBOL_GPL(round_jiffies_relative);
255
256 /**
257 * __round_jiffies_up - function to round jiffies up to a full second
258 * @j: the time in (absolute) jiffies that should be rounded
259 * @cpu: the processor number on which the timeout will happen
260 *
261 * This is the same as __round_jiffies() except that it will never
262 * round down. This is useful for timeouts for which the exact time
263 * of firing does not matter too much, as long as they don't fire too
264 * early.
265 */
266 unsigned long __round_jiffies_up(unsigned long j, int cpu)
267 {
268 return round_jiffies_common(j, cpu, true);
269 }
270 EXPORT_SYMBOL_GPL(__round_jiffies_up);
271
272 /**
273 * __round_jiffies_up_relative - function to round jiffies up to a full second
274 * @j: the time in (relative) jiffies that should be rounded
275 * @cpu: the processor number on which the timeout will happen
276 *
277 * This is the same as __round_jiffies_relative() except that it will never
278 * round down. This is useful for timeouts for which the exact time
279 * of firing does not matter too much, as long as they don't fire too
280 * early.
281 */
282 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
283 {
284 unsigned long j0 = jiffies;
285
286 /* Use j0 because jiffies might change while we run */
287 return round_jiffies_common(j + j0, cpu, true) - j0;
288 }
289 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
290
291 /**
292 * round_jiffies_up - function to round jiffies up to a full second
293 * @j: the time in (absolute) jiffies that should be rounded
294 *
295 * This is the same as round_jiffies() except that it will never
296 * round down. This is useful for timeouts for which the exact time
297 * of firing does not matter too much, as long as they don't fire too
298 * early.
299 */
300 unsigned long round_jiffies_up(unsigned long j)
301 {
302 return round_jiffies_common(j, raw_smp_processor_id(), true);
303 }
304 EXPORT_SYMBOL_GPL(round_jiffies_up);
305
306 /**
307 * round_jiffies_up_relative - function to round jiffies up to a full second
308 * @j: the time in (relative) jiffies that should be rounded
309 *
310 * This is the same as round_jiffies_relative() except that it will never
311 * round down. This is useful for timeouts for which the exact time
312 * of firing does not matter too much, as long as they don't fire too
313 * early.
314 */
315 unsigned long round_jiffies_up_relative(unsigned long j)
316 {
317 return __round_jiffies_up_relative(j, raw_smp_processor_id());
318 }
319 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
320
321
322 static inline void set_running_timer(struct tvec_base *base,
323 struct timer_list *timer)
324 {
325 #ifdef CONFIG_SMP
326 base->running_timer = timer;
327 #endif
328 }
329
330 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
331 {
332 unsigned long expires = timer->expires;
333 unsigned long idx = expires - base->timer_jiffies;
334 struct list_head *vec;
335
336 if (idx < TVR_SIZE) {
337 int i = expires & TVR_MASK;
338 vec = base->tv1.vec + i;
339 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
340 int i = (expires >> TVR_BITS) & TVN_MASK;
341 vec = base->tv2.vec + i;
342 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
343 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
344 vec = base->tv3.vec + i;
345 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
346 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
347 vec = base->tv4.vec + i;
348 } else if ((signed long) idx < 0) {
349 /*
350 * Can happen if you add a timer with expires == jiffies,
351 * or you set a timer to go off in the past
352 */
353 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
354 } else {
355 int i;
356 /* If the timeout is larger than 0xffffffff on 64-bit
357 * architectures then we use the maximum timeout:
358 */
359 if (idx > 0xffffffffUL) {
360 idx = 0xffffffffUL;
361 expires = idx + base->timer_jiffies;
362 }
363 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
364 vec = base->tv5.vec + i;
365 }
366 /*
367 * Timers are FIFO:
368 */
369 list_add_tail(&timer->entry, vec);
370 }
371
372 #ifdef CONFIG_TIMER_STATS
373 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
374 {
375 if (timer->start_site)
376 return;
377
378 timer->start_site = addr;
379 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
380 timer->start_pid = current->pid;
381 }
382
383 static void timer_stats_account_timer(struct timer_list *timer)
384 {
385 unsigned int flag = 0;
386
387 if (likely(!timer->start_site))
388 return;
389 if (unlikely(tbase_get_deferrable(timer->base)))
390 flag |= TIMER_STATS_FLAG_DEFERRABLE;
391
392 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
393 timer->function, timer->start_comm, flag);
394 }
395
396 #else
397 static void timer_stats_account_timer(struct timer_list *timer) {}
398 #endif
399
400 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
401
402 static struct debug_obj_descr timer_debug_descr;
403
404 /*
405 * fixup_init is called when:
406 * - an active object is initialized
407 */
408 static int timer_fixup_init(void *addr, enum debug_obj_state state)
409 {
410 struct timer_list *timer = addr;
411
412 switch (state) {
413 case ODEBUG_STATE_ACTIVE:
414 del_timer_sync(timer);
415 debug_object_init(timer, &timer_debug_descr);
416 return 1;
417 default:
418 return 0;
419 }
420 }
421
422 /*
423 * fixup_activate is called when:
424 * - an active object is activated
425 * - an unknown object is activated (might be a statically initialized object)
426 */
427 static int timer_fixup_activate(void *addr, enum debug_obj_state state)
428 {
429 struct timer_list *timer = addr;
430
431 switch (state) {
432
433 case ODEBUG_STATE_NOTAVAILABLE:
434 /*
435 * This is not really a fixup. The timer was
436 * statically initialized. We just make sure that it
437 * is tracked in the object tracker.
438 */
439 if (timer->entry.next == NULL &&
440 timer->entry.prev == TIMER_ENTRY_STATIC) {
441 debug_object_init(timer, &timer_debug_descr);
442 debug_object_activate(timer, &timer_debug_descr);
443 return 0;
444 } else {
445 WARN_ON_ONCE(1);
446 }
447 return 0;
448
449 case ODEBUG_STATE_ACTIVE:
450 WARN_ON(1);
451
452 default:
453 return 0;
454 }
455 }
456
457 /*
458 * fixup_free is called when:
459 * - an active object is freed
460 */
461 static int timer_fixup_free(void *addr, enum debug_obj_state state)
462 {
463 struct timer_list *timer = addr;
464
465 switch (state) {
466 case ODEBUG_STATE_ACTIVE:
467 del_timer_sync(timer);
468 debug_object_free(timer, &timer_debug_descr);
469 return 1;
470 default:
471 return 0;
472 }
473 }
474
475 static struct debug_obj_descr timer_debug_descr = {
476 .name = "timer_list",
477 .fixup_init = timer_fixup_init,
478 .fixup_activate = timer_fixup_activate,
479 .fixup_free = timer_fixup_free,
480 };
481
482 static inline void debug_timer_init(struct timer_list *timer)
483 {
484 debug_object_init(timer, &timer_debug_descr);
485 }
486
487 static inline void debug_timer_activate(struct timer_list *timer)
488 {
489 debug_object_activate(timer, &timer_debug_descr);
490 }
491
492 static inline void debug_timer_deactivate(struct timer_list *timer)
493 {
494 debug_object_deactivate(timer, &timer_debug_descr);
495 }
496
497 static inline void debug_timer_free(struct timer_list *timer)
498 {
499 debug_object_free(timer, &timer_debug_descr);
500 }
501
502 static void __init_timer(struct timer_list *timer,
503 const char *name,
504 struct lock_class_key *key);
505
506 void init_timer_on_stack_key(struct timer_list *timer,
507 const char *name,
508 struct lock_class_key *key)
509 {
510 debug_object_init_on_stack(timer, &timer_debug_descr);
511 __init_timer(timer, name, key);
512 }
513 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
514
515 void destroy_timer_on_stack(struct timer_list *timer)
516 {
517 debug_object_free(timer, &timer_debug_descr);
518 }
519 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
520
521 #else
522 static inline void debug_timer_init(struct timer_list *timer) { }
523 static inline void debug_timer_activate(struct timer_list *timer) { }
524 static inline void debug_timer_deactivate(struct timer_list *timer) { }
525 #endif
526
527 static inline void debug_init(struct timer_list *timer)
528 {
529 debug_timer_init(timer);
530 trace_timer_init(timer);
531 }
532
533 static inline void
534 debug_activate(struct timer_list *timer, unsigned long expires)
535 {
536 debug_timer_activate(timer);
537 trace_timer_start(timer, expires);
538 }
539
540 static inline void debug_deactivate(struct timer_list *timer)
541 {
542 debug_timer_deactivate(timer);
543 trace_timer_cancel(timer);
544 }
545
546 static void __init_timer(struct timer_list *timer,
547 const char *name,
548 struct lock_class_key *key)
549 {
550 timer->entry.next = NULL;
551 timer->base = __raw_get_cpu_var(tvec_bases);
552 #ifdef CONFIG_TIMER_STATS
553 timer->start_site = NULL;
554 timer->start_pid = -1;
555 memset(timer->start_comm, 0, TASK_COMM_LEN);
556 #endif
557 lockdep_init_map(&timer->lockdep_map, name, key, 0);
558 }
559
560 /**
561 * init_timer_key - initialize a timer
562 * @timer: the timer to be initialized
563 * @name: name of the timer
564 * @key: lockdep class key of the fake lock used for tracking timer
565 * sync lock dependencies
566 *
567 * init_timer_key() must be done to a timer prior calling *any* of the
568 * other timer functions.
569 */
570 void init_timer_key(struct timer_list *timer,
571 const char *name,
572 struct lock_class_key *key)
573 {
574 debug_init(timer);
575 __init_timer(timer, name, key);
576 }
577 EXPORT_SYMBOL(init_timer_key);
578
579 void init_timer_deferrable_key(struct timer_list *timer,
580 const char *name,
581 struct lock_class_key *key)
582 {
583 init_timer_key(timer, name, key);
584 timer_set_deferrable(timer);
585 }
586 EXPORT_SYMBOL(init_timer_deferrable_key);
587
588 static inline void detach_timer(struct timer_list *timer,
589 int clear_pending)
590 {
591 struct list_head *entry = &timer->entry;
592
593 debug_deactivate(timer);
594
595 __list_del(entry->prev, entry->next);
596 if (clear_pending)
597 entry->next = NULL;
598 entry->prev = LIST_POISON2;
599 }
600
601 /*
602 * We are using hashed locking: holding per_cpu(tvec_bases).lock
603 * means that all timers which are tied to this base via timer->base are
604 * locked, and the base itself is locked too.
605 *
606 * So __run_timers/migrate_timers can safely modify all timers which could
607 * be found on ->tvX lists.
608 *
609 * When the timer's base is locked, and the timer removed from list, it is
610 * possible to set timer->base = NULL and drop the lock: the timer remains
611 * locked.
612 */
613 static struct tvec_base *lock_timer_base(struct timer_list *timer,
614 unsigned long *flags)
615 __acquires(timer->base->lock)
616 {
617 struct tvec_base *base;
618
619 for (;;) {
620 struct tvec_base *prelock_base = timer->base;
621 base = tbase_get_base(prelock_base);
622 if (likely(base != NULL)) {
623 spin_lock_irqsave(&base->lock, *flags);
624 if (likely(prelock_base == timer->base))
625 return base;
626 /* The timer has migrated to another CPU */
627 spin_unlock_irqrestore(&base->lock, *flags);
628 }
629 cpu_relax();
630 }
631 }
632
633 static inline int
634 __mod_timer(struct timer_list *timer, unsigned long expires,
635 bool pending_only, int pinned)
636 {
637 struct tvec_base *base, *new_base;
638 unsigned long flags;
639 int ret = 0 , cpu;
640
641 timer_stats_timer_set_start_info(timer);
642 BUG_ON(!timer->function);
643
644 base = lock_timer_base(timer, &flags);
645
646 if (timer_pending(timer)) {
647 detach_timer(timer, 0);
648 if (timer->expires == base->next_timer &&
649 !tbase_get_deferrable(timer->base))
650 base->next_timer = base->timer_jiffies;
651 ret = 1;
652 } else {
653 if (pending_only)
654 goto out_unlock;
655 }
656
657 debug_activate(timer, expires);
658
659 cpu = smp_processor_id();
660
661 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
662 if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) {
663 int preferred_cpu = get_nohz_load_balancer();
664
665 if (preferred_cpu >= 0)
666 cpu = preferred_cpu;
667 }
668 #endif
669 new_base = per_cpu(tvec_bases, cpu);
670
671 if (base != new_base) {
672 /*
673 * We are trying to schedule the timer on the local CPU.
674 * However we can't change timer's base while it is running,
675 * otherwise del_timer_sync() can't detect that the timer's
676 * handler yet has not finished. This also guarantees that
677 * the timer is serialized wrt itself.
678 */
679 if (likely(base->running_timer != timer)) {
680 /* See the comment in lock_timer_base() */
681 timer_set_base(timer, NULL);
682 spin_unlock(&base->lock);
683 base = new_base;
684 spin_lock(&base->lock);
685 timer_set_base(timer, base);
686 }
687 }
688
689 timer->expires = expires;
690 if (time_before(timer->expires, base->next_timer) &&
691 !tbase_get_deferrable(timer->base))
692 base->next_timer = timer->expires;
693 internal_add_timer(base, timer);
694
695 out_unlock:
696 spin_unlock_irqrestore(&base->lock, flags);
697
698 return ret;
699 }
700
701 /**
702 * mod_timer_pending - modify a pending timer's timeout
703 * @timer: the pending timer to be modified
704 * @expires: new timeout in jiffies
705 *
706 * mod_timer_pending() is the same for pending timers as mod_timer(),
707 * but will not re-activate and modify already deleted timers.
708 *
709 * It is useful for unserialized use of timers.
710 */
711 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
712 {
713 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
714 }
715 EXPORT_SYMBOL(mod_timer_pending);
716
717 /**
718 * mod_timer - modify a timer's timeout
719 * @timer: the timer to be modified
720 * @expires: new timeout in jiffies
721 *
722 * mod_timer() is a more efficient way to update the expire field of an
723 * active timer (if the timer is inactive it will be activated)
724 *
725 * mod_timer(timer, expires) is equivalent to:
726 *
727 * del_timer(timer); timer->expires = expires; add_timer(timer);
728 *
729 * Note that if there are multiple unserialized concurrent users of the
730 * same timer, then mod_timer() is the only safe way to modify the timeout,
731 * since add_timer() cannot modify an already running timer.
732 *
733 * The function returns whether it has modified a pending timer or not.
734 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
735 * active timer returns 1.)
736 */
737 int mod_timer(struct timer_list *timer, unsigned long expires)
738 {
739 /*
740 * This is a common optimization triggered by the
741 * networking code - if the timer is re-modified
742 * to be the same thing then just return:
743 */
744 if (timer_pending(timer) && timer->expires == expires)
745 return 1;
746
747 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
748 }
749 EXPORT_SYMBOL(mod_timer);
750
751 /**
752 * mod_timer_pinned - modify a timer's timeout
753 * @timer: the timer to be modified
754 * @expires: new timeout in jiffies
755 *
756 * mod_timer_pinned() is a way to update the expire field of an
757 * active timer (if the timer is inactive it will be activated)
758 * and not allow the timer to be migrated to a different CPU.
759 *
760 * mod_timer_pinned(timer, expires) is equivalent to:
761 *
762 * del_timer(timer); timer->expires = expires; add_timer(timer);
763 */
764 int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
765 {
766 if (timer->expires == expires && timer_pending(timer))
767 return 1;
768
769 return __mod_timer(timer, expires, false, TIMER_PINNED);
770 }
771 EXPORT_SYMBOL(mod_timer_pinned);
772
773 /**
774 * add_timer - start a timer
775 * @timer: the timer to be added
776 *
777 * The kernel will do a ->function(->data) callback from the
778 * timer interrupt at the ->expires point in the future. The
779 * current time is 'jiffies'.
780 *
781 * The timer's ->expires, ->function (and if the handler uses it, ->data)
782 * fields must be set prior calling this function.
783 *
784 * Timers with an ->expires field in the past will be executed in the next
785 * timer tick.
786 */
787 void add_timer(struct timer_list *timer)
788 {
789 BUG_ON(timer_pending(timer));
790 mod_timer(timer, timer->expires);
791 }
792 EXPORT_SYMBOL(add_timer);
793
794 /**
795 * add_timer_on - start a timer on a particular CPU
796 * @timer: the timer to be added
797 * @cpu: the CPU to start it on
798 *
799 * This is not very scalable on SMP. Double adds are not possible.
800 */
801 void add_timer_on(struct timer_list *timer, int cpu)
802 {
803 struct tvec_base *base = per_cpu(tvec_bases, cpu);
804 unsigned long flags;
805
806 timer_stats_timer_set_start_info(timer);
807 BUG_ON(timer_pending(timer) || !timer->function);
808 spin_lock_irqsave(&base->lock, flags);
809 timer_set_base(timer, base);
810 debug_activate(timer, timer->expires);
811 if (time_before(timer->expires, base->next_timer) &&
812 !tbase_get_deferrable(timer->base))
813 base->next_timer = timer->expires;
814 internal_add_timer(base, timer);
815 /*
816 * Check whether the other CPU is idle and needs to be
817 * triggered to reevaluate the timer wheel when nohz is
818 * active. We are protected against the other CPU fiddling
819 * with the timer by holding the timer base lock. This also
820 * makes sure that a CPU on the way to idle can not evaluate
821 * the timer wheel.
822 */
823 wake_up_idle_cpu(cpu);
824 spin_unlock_irqrestore(&base->lock, flags);
825 }
826 EXPORT_SYMBOL_GPL(add_timer_on);
827
828 /**
829 * del_timer - deactive a timer.
830 * @timer: the timer to be deactivated
831 *
832 * del_timer() deactivates a timer - this works on both active and inactive
833 * timers.
834 *
835 * The function returns whether it has deactivated a pending timer or not.
836 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
837 * active timer returns 1.)
838 */
839 int del_timer(struct timer_list *timer)
840 {
841 struct tvec_base *base;
842 unsigned long flags;
843 int ret = 0;
844
845 timer_stats_timer_clear_start_info(timer);
846 if (timer_pending(timer)) {
847 base = lock_timer_base(timer, &flags);
848 if (timer_pending(timer)) {
849 detach_timer(timer, 1);
850 if (timer->expires == base->next_timer &&
851 !tbase_get_deferrable(timer->base))
852 base->next_timer = base->timer_jiffies;
853 ret = 1;
854 }
855 spin_unlock_irqrestore(&base->lock, flags);
856 }
857
858 return ret;
859 }
860 EXPORT_SYMBOL(del_timer);
861
862 #ifdef CONFIG_SMP
863 /**
864 * try_to_del_timer_sync - Try to deactivate a timer
865 * @timer: timer do del
866 *
867 * This function tries to deactivate a timer. Upon successful (ret >= 0)
868 * exit the timer is not queued and the handler is not running on any CPU.
869 *
870 * It must not be called from interrupt contexts.
871 */
872 int try_to_del_timer_sync(struct timer_list *timer)
873 {
874 struct tvec_base *base;
875 unsigned long flags;
876 int ret = -1;
877
878 base = lock_timer_base(timer, &flags);
879
880 if (base->running_timer == timer)
881 goto out;
882
883 timer_stats_timer_clear_start_info(timer);
884 ret = 0;
885 if (timer_pending(timer)) {
886 detach_timer(timer, 1);
887 if (timer->expires == base->next_timer &&
888 !tbase_get_deferrable(timer->base))
889 base->next_timer = base->timer_jiffies;
890 ret = 1;
891 }
892 out:
893 spin_unlock_irqrestore(&base->lock, flags);
894
895 return ret;
896 }
897 EXPORT_SYMBOL(try_to_del_timer_sync);
898
899 /**
900 * del_timer_sync - deactivate a timer and wait for the handler to finish.
901 * @timer: the timer to be deactivated
902 *
903 * This function only differs from del_timer() on SMP: besides deactivating
904 * the timer it also makes sure the handler has finished executing on other
905 * CPUs.
906 *
907 * Synchronization rules: Callers must prevent restarting of the timer,
908 * otherwise this function is meaningless. It must not be called from
909 * interrupt contexts. The caller must not hold locks which would prevent
910 * completion of the timer's handler. The timer's handler must not call
911 * add_timer_on(). Upon exit the timer is not queued and the handler is
912 * not running on any CPU.
913 *
914 * The function returns whether it has deactivated a pending timer or not.
915 */
916 int del_timer_sync(struct timer_list *timer)
917 {
918 #ifdef CONFIG_LOCKDEP
919 unsigned long flags;
920
921 local_irq_save(flags);
922 lock_map_acquire(&timer->lockdep_map);
923 lock_map_release(&timer->lockdep_map);
924 local_irq_restore(flags);
925 #endif
926
927 for (;;) {
928 int ret = try_to_del_timer_sync(timer);
929 if (ret >= 0)
930 return ret;
931 cpu_relax();
932 }
933 }
934 EXPORT_SYMBOL(del_timer_sync);
935 #endif
936
937 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
938 {
939 /* cascade all the timers from tv up one level */
940 struct timer_list *timer, *tmp;
941 struct list_head tv_list;
942
943 list_replace_init(tv->vec + index, &tv_list);
944
945 /*
946 * We are removing _all_ timers from the list, so we
947 * don't have to detach them individually.
948 */
949 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
950 BUG_ON(tbase_get_base(timer->base) != base);
951 internal_add_timer(base, timer);
952 }
953
954 return index;
955 }
956
957 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
958
959 /**
960 * __run_timers - run all expired timers (if any) on this CPU.
961 * @base: the timer vector to be processed.
962 *
963 * This function cascades all vectors and executes all expired timer
964 * vectors.
965 */
966 static inline void __run_timers(struct tvec_base *base)
967 {
968 struct timer_list *timer;
969
970 spin_lock_irq(&base->lock);
971 while (time_after_eq(jiffies, base->timer_jiffies)) {
972 struct list_head work_list;
973 struct list_head *head = &work_list;
974 int index = base->timer_jiffies & TVR_MASK;
975
976 /*
977 * Cascade timers:
978 */
979 if (!index &&
980 (!cascade(base, &base->tv2, INDEX(0))) &&
981 (!cascade(base, &base->tv3, INDEX(1))) &&
982 !cascade(base, &base->tv4, INDEX(2)))
983 cascade(base, &base->tv5, INDEX(3));
984 ++base->timer_jiffies;
985 list_replace_init(base->tv1.vec + index, &work_list);
986 while (!list_empty(head)) {
987 void (*fn)(unsigned long);
988 unsigned long data;
989
990 timer = list_first_entry(head, struct timer_list,entry);
991 fn = timer->function;
992 data = timer->data;
993
994 timer_stats_account_timer(timer);
995
996 set_running_timer(base, timer);
997 detach_timer(timer, 1);
998
999 spin_unlock_irq(&base->lock);
1000 {
1001 int preempt_count = preempt_count();
1002
1003 #ifdef CONFIG_LOCKDEP
1004 /*
1005 * It is permissible to free the timer from
1006 * inside the function that is called from
1007 * it, this we need to take into account for
1008 * lockdep too. To avoid bogus "held lock
1009 * freed" warnings as well as problems when
1010 * looking into timer->lockdep_map, make a
1011 * copy and use that here.
1012 */
1013 struct lockdep_map lockdep_map =
1014 timer->lockdep_map;
1015 #endif
1016 /*
1017 * Couple the lock chain with the lock chain at
1018 * del_timer_sync() by acquiring the lock_map
1019 * around the fn() call here and in
1020 * del_timer_sync().
1021 */
1022 lock_map_acquire(&lockdep_map);
1023
1024 trace_timer_expire_entry(timer);
1025 fn(data);
1026 trace_timer_expire_exit(timer);
1027
1028 lock_map_release(&lockdep_map);
1029
1030 if (preempt_count != preempt_count()) {
1031 printk(KERN_ERR "huh, entered %p "
1032 "with preempt_count %08x, exited"
1033 " with %08x?\n",
1034 fn, preempt_count,
1035 preempt_count());
1036 BUG();
1037 }
1038 }
1039 spin_lock_irq(&base->lock);
1040 }
1041 }
1042 set_running_timer(base, NULL);
1043 spin_unlock_irq(&base->lock);
1044 }
1045
1046 #ifdef CONFIG_NO_HZ
1047 /*
1048 * Find out when the next timer event is due to happen. This
1049 * is used on S/390 to stop all activity when a CPU is idle.
1050 * This function needs to be called with interrupts disabled.
1051 */
1052 static unsigned long __next_timer_interrupt(struct tvec_base *base)
1053 {
1054 unsigned long timer_jiffies = base->timer_jiffies;
1055 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1056 int index, slot, array, found = 0;
1057 struct timer_list *nte;
1058 struct tvec *varray[4];
1059
1060 /* Look for timer events in tv1. */
1061 index = slot = timer_jiffies & TVR_MASK;
1062 do {
1063 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
1064 if (tbase_get_deferrable(nte->base))
1065 continue;
1066
1067 found = 1;
1068 expires = nte->expires;
1069 /* Look at the cascade bucket(s)? */
1070 if (!index || slot < index)
1071 goto cascade;
1072 return expires;
1073 }
1074 slot = (slot + 1) & TVR_MASK;
1075 } while (slot != index);
1076
1077 cascade:
1078 /* Calculate the next cascade event */
1079 if (index)
1080 timer_jiffies += TVR_SIZE - index;
1081 timer_jiffies >>= TVR_BITS;
1082
1083 /* Check tv2-tv5. */
1084 varray[0] = &base->tv2;
1085 varray[1] = &base->tv3;
1086 varray[2] = &base->tv4;
1087 varray[3] = &base->tv5;
1088
1089 for (array = 0; array < 4; array++) {
1090 struct tvec *varp = varray[array];
1091
1092 index = slot = timer_jiffies & TVN_MASK;
1093 do {
1094 list_for_each_entry(nte, varp->vec + slot, entry) {
1095 if (tbase_get_deferrable(nte->base))
1096 continue;
1097
1098 found = 1;
1099 if (time_before(nte->expires, expires))
1100 expires = nte->expires;
1101 }
1102 /*
1103 * Do we still search for the first timer or are
1104 * we looking up the cascade buckets ?
1105 */
1106 if (found) {
1107 /* Look at the cascade bucket(s)? */
1108 if (!index || slot < index)
1109 break;
1110 return expires;
1111 }
1112 slot = (slot + 1) & TVN_MASK;
1113 } while (slot != index);
1114
1115 if (index)
1116 timer_jiffies += TVN_SIZE - index;
1117 timer_jiffies >>= TVN_BITS;
1118 }
1119 return expires;
1120 }
1121
1122 /*
1123 * Check, if the next hrtimer event is before the next timer wheel
1124 * event:
1125 */
1126 static unsigned long cmp_next_hrtimer_event(unsigned long now,
1127 unsigned long expires)
1128 {
1129 ktime_t hr_delta = hrtimer_get_next_event();
1130 struct timespec tsdelta;
1131 unsigned long delta;
1132
1133 if (hr_delta.tv64 == KTIME_MAX)
1134 return expires;
1135
1136 /*
1137 * Expired timer available, let it expire in the next tick
1138 */
1139 if (hr_delta.tv64 <= 0)
1140 return now + 1;
1141
1142 tsdelta = ktime_to_timespec(hr_delta);
1143 delta = timespec_to_jiffies(&tsdelta);
1144
1145 /*
1146 * Limit the delta to the max value, which is checked in
1147 * tick_nohz_stop_sched_tick():
1148 */
1149 if (delta > NEXT_TIMER_MAX_DELTA)
1150 delta = NEXT_TIMER_MAX_DELTA;
1151
1152 /*
1153 * Take rounding errors in to account and make sure, that it
1154 * expires in the next tick. Otherwise we go into an endless
1155 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1156 * the timer softirq
1157 */
1158 if (delta < 1)
1159 delta = 1;
1160 now += delta;
1161 if (time_before(now, expires))
1162 return now;
1163 return expires;
1164 }
1165
1166 /**
1167 * get_next_timer_interrupt - return the jiffy of the next pending timer
1168 * @now: current time (in jiffies)
1169 */
1170 unsigned long get_next_timer_interrupt(unsigned long now)
1171 {
1172 struct tvec_base *base = __get_cpu_var(tvec_bases);
1173 unsigned long expires;
1174
1175 spin_lock(&base->lock);
1176 if (time_before_eq(base->next_timer, base->timer_jiffies))
1177 base->next_timer = __next_timer_interrupt(base);
1178 expires = base->next_timer;
1179 spin_unlock(&base->lock);
1180
1181 if (time_before_eq(expires, now))
1182 return now;
1183
1184 return cmp_next_hrtimer_event(now, expires);
1185 }
1186 #endif
1187
1188 /*
1189 * Called from the timer interrupt handler to charge one tick to the current
1190 * process. user_tick is 1 if the tick is user time, 0 for system.
1191 */
1192 void update_process_times(int user_tick)
1193 {
1194 struct task_struct *p = current;
1195 int cpu = smp_processor_id();
1196
1197 /* Note: this timer irq context must be accounted for as well. */
1198 account_process_tick(p, user_tick);
1199 run_local_timers();
1200 rcu_check_callbacks(cpu, user_tick);
1201 printk_tick();
1202 perf_event_do_pending();
1203 scheduler_tick();
1204 run_posix_cpu_timers(p);
1205 }
1206
1207 /*
1208 * This function runs timers and the timer-tq in bottom half context.
1209 */
1210 static void run_timer_softirq(struct softirq_action *h)
1211 {
1212 struct tvec_base *base = __get_cpu_var(tvec_bases);
1213
1214 hrtimer_run_pending();
1215
1216 if (time_after_eq(jiffies, base->timer_jiffies))
1217 __run_timers(base);
1218 }
1219
1220 /*
1221 * Called by the local, per-CPU timer interrupt on SMP.
1222 */
1223 void run_local_timers(void)
1224 {
1225 hrtimer_run_queues();
1226 raise_softirq(TIMER_SOFTIRQ);
1227 softlockup_tick();
1228 }
1229
1230 /*
1231 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1232 * without sampling the sequence number in xtime_lock.
1233 * jiffies is defined in the linker script...
1234 */
1235
1236 void do_timer(unsigned long ticks)
1237 {
1238 jiffies_64 += ticks;
1239 update_wall_time();
1240 calc_global_load();
1241 }
1242
1243 #ifdef __ARCH_WANT_SYS_ALARM
1244
1245 /*
1246 * For backwards compatibility? This can be done in libc so Alpha
1247 * and all newer ports shouldn't need it.
1248 */
1249 SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1250 {
1251 return alarm_setitimer(seconds);
1252 }
1253
1254 #endif
1255
1256 #ifndef __alpha__
1257
1258 /*
1259 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
1260 * should be moved into arch/i386 instead?
1261 */
1262
1263 /**
1264 * sys_getpid - return the thread group id of the current process
1265 *
1266 * Note, despite the name, this returns the tgid not the pid. The tgid and
1267 * the pid are identical unless CLONE_THREAD was specified on clone() in
1268 * which case the tgid is the same in all threads of the same group.
1269 *
1270 * This is SMP safe as current->tgid does not change.
1271 */
1272 SYSCALL_DEFINE0(getpid)
1273 {
1274 return task_tgid_vnr(current);
1275 }
1276
1277 /*
1278 * Accessing ->real_parent is not SMP-safe, it could
1279 * change from under us. However, we can use a stale
1280 * value of ->real_parent under rcu_read_lock(), see
1281 * release_task()->call_rcu(delayed_put_task_struct).
1282 */
1283 SYSCALL_DEFINE0(getppid)
1284 {
1285 int pid;
1286
1287 rcu_read_lock();
1288 pid = task_tgid_vnr(current->real_parent);
1289 rcu_read_unlock();
1290
1291 return pid;
1292 }
1293
1294 SYSCALL_DEFINE0(getuid)
1295 {
1296 /* Only we change this so SMP safe */
1297 return current_uid();
1298 }
1299
1300 SYSCALL_DEFINE0(geteuid)
1301 {
1302 /* Only we change this so SMP safe */
1303 return current_euid();
1304 }
1305
1306 SYSCALL_DEFINE0(getgid)
1307 {
1308 /* Only we change this so SMP safe */
1309 return current_gid();
1310 }
1311
1312 SYSCALL_DEFINE0(getegid)
1313 {
1314 /* Only we change this so SMP safe */
1315 return current_egid();
1316 }
1317
1318 #endif
1319
1320 static void process_timeout(unsigned long __data)
1321 {
1322 wake_up_process((struct task_struct *)__data);
1323 }
1324
1325 /**
1326 * schedule_timeout - sleep until timeout
1327 * @timeout: timeout value in jiffies
1328 *
1329 * Make the current task sleep until @timeout jiffies have
1330 * elapsed. The routine will return immediately unless
1331 * the current task state has been set (see set_current_state()).
1332 *
1333 * You can set the task state as follows -
1334 *
1335 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1336 * pass before the routine returns. The routine will return 0
1337 *
1338 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1339 * delivered to the current task. In this case the remaining time
1340 * in jiffies will be returned, or 0 if the timer expired in time
1341 *
1342 * The current task state is guaranteed to be TASK_RUNNING when this
1343 * routine returns.
1344 *
1345 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1346 * the CPU away without a bound on the timeout. In this case the return
1347 * value will be %MAX_SCHEDULE_TIMEOUT.
1348 *
1349 * In all cases the return value is guaranteed to be non-negative.
1350 */
1351 signed long __sched schedule_timeout(signed long timeout)
1352 {
1353 struct timer_list timer;
1354 unsigned long expire;
1355
1356 switch (timeout)
1357 {
1358 case MAX_SCHEDULE_TIMEOUT:
1359 /*
1360 * These two special cases are useful to be comfortable
1361 * in the caller. Nothing more. We could take
1362 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1363 * but I' d like to return a valid offset (>=0) to allow
1364 * the caller to do everything it want with the retval.
1365 */
1366 schedule();
1367 goto out;
1368 default:
1369 /*
1370 * Another bit of PARANOID. Note that the retval will be
1371 * 0 since no piece of kernel is supposed to do a check
1372 * for a negative retval of schedule_timeout() (since it
1373 * should never happens anyway). You just have the printk()
1374 * that will tell you if something is gone wrong and where.
1375 */
1376 if (timeout < 0) {
1377 printk(KERN_ERR "schedule_timeout: wrong timeout "
1378 "value %lx\n", timeout);
1379 dump_stack();
1380 current->state = TASK_RUNNING;
1381 goto out;
1382 }
1383 }
1384
1385 expire = timeout + jiffies;
1386
1387 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1388 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1389 schedule();
1390 del_singleshot_timer_sync(&timer);
1391
1392 /* Remove the timer from the object tracker */
1393 destroy_timer_on_stack(&timer);
1394
1395 timeout = expire - jiffies;
1396
1397 out:
1398 return timeout < 0 ? 0 : timeout;
1399 }
1400 EXPORT_SYMBOL(schedule_timeout);
1401
1402 /*
1403 * We can use __set_current_state() here because schedule_timeout() calls
1404 * schedule() unconditionally.
1405 */
1406 signed long __sched schedule_timeout_interruptible(signed long timeout)
1407 {
1408 __set_current_state(TASK_INTERRUPTIBLE);
1409 return schedule_timeout(timeout);
1410 }
1411 EXPORT_SYMBOL(schedule_timeout_interruptible);
1412
1413 signed long __sched schedule_timeout_killable(signed long timeout)
1414 {
1415 __set_current_state(TASK_KILLABLE);
1416 return schedule_timeout(timeout);
1417 }
1418 EXPORT_SYMBOL(schedule_timeout_killable);
1419
1420 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1421 {
1422 __set_current_state(TASK_UNINTERRUPTIBLE);
1423 return schedule_timeout(timeout);
1424 }
1425 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1426
1427 /* Thread ID - the internal kernel "pid" */
1428 SYSCALL_DEFINE0(gettid)
1429 {
1430 return task_pid_vnr(current);
1431 }
1432
1433 /**
1434 * do_sysinfo - fill in sysinfo struct
1435 * @info: pointer to buffer to fill
1436 */
1437 int do_sysinfo(struct sysinfo *info)
1438 {
1439 unsigned long mem_total, sav_total;
1440 unsigned int mem_unit, bitcount;
1441 struct timespec tp;
1442
1443 memset(info, 0, sizeof(struct sysinfo));
1444
1445 ktime_get_ts(&tp);
1446 monotonic_to_bootbased(&tp);
1447 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1448
1449 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
1450
1451 info->procs = nr_threads;
1452
1453 si_meminfo(info);
1454 si_swapinfo(info);
1455
1456 /*
1457 * If the sum of all the available memory (i.e. ram + swap)
1458 * is less than can be stored in a 32 bit unsigned long then
1459 * we can be binary compatible with 2.2.x kernels. If not,
1460 * well, in that case 2.2.x was broken anyways...
1461 *
1462 * -Erik Andersen <andersee@debian.org>
1463 */
1464
1465 mem_total = info->totalram + info->totalswap;
1466 if (mem_total < info->totalram || mem_total < info->totalswap)
1467 goto out;
1468 bitcount = 0;
1469 mem_unit = info->mem_unit;
1470 while (mem_unit > 1) {
1471 bitcount++;
1472 mem_unit >>= 1;
1473 sav_total = mem_total;
1474 mem_total <<= 1;
1475 if (mem_total < sav_total)
1476 goto out;
1477 }
1478
1479 /*
1480 * If mem_total did not overflow, multiply all memory values by
1481 * info->mem_unit and set it to 1. This leaves things compatible
1482 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1483 * kernels...
1484 */
1485
1486 info->mem_unit = 1;
1487 info->totalram <<= bitcount;
1488 info->freeram <<= bitcount;
1489 info->sharedram <<= bitcount;
1490 info->bufferram <<= bitcount;
1491 info->totalswap <<= bitcount;
1492 info->freeswap <<= bitcount;
1493 info->totalhigh <<= bitcount;
1494 info->freehigh <<= bitcount;
1495
1496 out:
1497 return 0;
1498 }
1499
1500 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
1501 {
1502 struct sysinfo val;
1503
1504 do_sysinfo(&val);
1505
1506 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1507 return -EFAULT;
1508
1509 return 0;
1510 }
1511
1512 static int __cpuinit init_timers_cpu(int cpu)
1513 {
1514 int j;
1515 struct tvec_base *base;
1516 static char __cpuinitdata tvec_base_done[NR_CPUS];
1517
1518 if (!tvec_base_done[cpu]) {
1519 static char boot_done;
1520
1521 if (boot_done) {
1522 /*
1523 * The APs use this path later in boot
1524 */
1525 base = kmalloc_node(sizeof(*base),
1526 GFP_KERNEL | __GFP_ZERO,
1527 cpu_to_node(cpu));
1528 if (!base)
1529 return -ENOMEM;
1530
1531 /* Make sure that tvec_base is 2 byte aligned */
1532 if (tbase_get_deferrable(base)) {
1533 WARN_ON(1);
1534 kfree(base);
1535 return -ENOMEM;
1536 }
1537 per_cpu(tvec_bases, cpu) = base;
1538 } else {
1539 /*
1540 * This is for the boot CPU - we use compile-time
1541 * static initialisation because per-cpu memory isn't
1542 * ready yet and because the memory allocators are not
1543 * initialised either.
1544 */
1545 boot_done = 1;
1546 base = &boot_tvec_bases;
1547 }
1548 tvec_base_done[cpu] = 1;
1549 } else {
1550 base = per_cpu(tvec_bases, cpu);
1551 }
1552
1553 spin_lock_init(&base->lock);
1554
1555 for (j = 0; j < TVN_SIZE; j++) {
1556 INIT_LIST_HEAD(base->tv5.vec + j);
1557 INIT_LIST_HEAD(base->tv4.vec + j);
1558 INIT_LIST_HEAD(base->tv3.vec + j);
1559 INIT_LIST_HEAD(base->tv2.vec + j);
1560 }
1561 for (j = 0; j < TVR_SIZE; j++)
1562 INIT_LIST_HEAD(base->tv1.vec + j);
1563
1564 base->timer_jiffies = jiffies;
1565 base->next_timer = base->timer_jiffies;
1566 return 0;
1567 }
1568
1569 #ifdef CONFIG_HOTPLUG_CPU
1570 static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1571 {
1572 struct timer_list *timer;
1573
1574 while (!list_empty(head)) {
1575 timer = list_first_entry(head, struct timer_list, entry);
1576 detach_timer(timer, 0);
1577 timer_set_base(timer, new_base);
1578 if (time_before(timer->expires, new_base->next_timer) &&
1579 !tbase_get_deferrable(timer->base))
1580 new_base->next_timer = timer->expires;
1581 internal_add_timer(new_base, timer);
1582 }
1583 }
1584
1585 static void __cpuinit migrate_timers(int cpu)
1586 {
1587 struct tvec_base *old_base;
1588 struct tvec_base *new_base;
1589 int i;
1590
1591 BUG_ON(cpu_online(cpu));
1592 old_base = per_cpu(tvec_bases, cpu);
1593 new_base = get_cpu_var(tvec_bases);
1594 /*
1595 * The caller is globally serialized and nobody else
1596 * takes two locks at once, deadlock is not possible.
1597 */
1598 spin_lock_irq(&new_base->lock);
1599 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1600
1601 BUG_ON(old_base->running_timer);
1602
1603 for (i = 0; i < TVR_SIZE; i++)
1604 migrate_timer_list(new_base, old_base->tv1.vec + i);
1605 for (i = 0; i < TVN_SIZE; i++) {
1606 migrate_timer_list(new_base, old_base->tv2.vec + i);
1607 migrate_timer_list(new_base, old_base->tv3.vec + i);
1608 migrate_timer_list(new_base, old_base->tv4.vec + i);
1609 migrate_timer_list(new_base, old_base->tv5.vec + i);
1610 }
1611
1612 spin_unlock(&old_base->lock);
1613 spin_unlock_irq(&new_base->lock);
1614 put_cpu_var(tvec_bases);
1615 }
1616 #endif /* CONFIG_HOTPLUG_CPU */
1617
1618 static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1619 unsigned long action, void *hcpu)
1620 {
1621 long cpu = (long)hcpu;
1622 switch(action) {
1623 case CPU_UP_PREPARE:
1624 case CPU_UP_PREPARE_FROZEN:
1625 if (init_timers_cpu(cpu) < 0)
1626 return NOTIFY_BAD;
1627 break;
1628 #ifdef CONFIG_HOTPLUG_CPU
1629 case CPU_DEAD:
1630 case CPU_DEAD_FROZEN:
1631 migrate_timers(cpu);
1632 break;
1633 #endif
1634 default:
1635 break;
1636 }
1637 return NOTIFY_OK;
1638 }
1639
1640 static struct notifier_block __cpuinitdata timers_nb = {
1641 .notifier_call = timer_cpu_notify,
1642 };
1643
1644
1645 void __init init_timers(void)
1646 {
1647 int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1648 (void *)(long)smp_processor_id());
1649
1650 init_timer_stats();
1651
1652 BUG_ON(err == NOTIFY_BAD);
1653 register_cpu_notifier(&timers_nb);
1654 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1655 }
1656
1657 /**
1658 * msleep - sleep safely even with waitqueue interruptions
1659 * @msecs: Time in milliseconds to sleep for
1660 */
1661 void msleep(unsigned int msecs)
1662 {
1663 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1664
1665 while (timeout)
1666 timeout = schedule_timeout_uninterruptible(timeout);
1667 }
1668
1669 EXPORT_SYMBOL(msleep);
1670
1671 /**
1672 * msleep_interruptible - sleep waiting for signals
1673 * @msecs: Time in milliseconds to sleep for
1674 */
1675 unsigned long msleep_interruptible(unsigned int msecs)
1676 {
1677 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1678
1679 while (timeout && !signal_pending(current))
1680 timeout = schedule_timeout_interruptible(timeout);
1681 return jiffies_to_msecs(timeout);
1682 }
1683
1684 EXPORT_SYMBOL(msleep_interruptible);
This page took 0.085721 seconds and 6 git commands to generate.