Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/timer.c | |
3 | * | |
8524070b | 4 | * Kernel internal timers, basic process system calls |
1da177e4 LT |
5 | * |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * | |
8 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. | |
9 | * | |
10 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
12 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | |
13 | * serialize accesses to xtime/lost_ticks). | |
14 | * Copyright (C) 1998 Andrea Arcangeli | |
15 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl | |
16 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love | |
17 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. | |
18 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar | |
19 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | |
20 | */ | |
21 | ||
22 | #include <linux/kernel_stat.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/swap.h> | |
b488893a | 29 | #include <linux/pid_namespace.h> |
1da177e4 LT |
30 | #include <linux/notifier.h> |
31 | #include <linux/thread_info.h> | |
32 | #include <linux/time.h> | |
33 | #include <linux/jiffies.h> | |
34 | #include <linux/posix-timers.h> | |
35 | #include <linux/cpu.h> | |
36 | #include <linux/syscalls.h> | |
97a41e26 | 37 | #include <linux/delay.h> |
79bf2bb3 | 38 | #include <linux/tick.h> |
82f67cd9 | 39 | #include <linux/kallsyms.h> |
1da177e4 LT |
40 | |
41 | #include <asm/uaccess.h> | |
42 | #include <asm/unistd.h> | |
43 | #include <asm/div64.h> | |
44 | #include <asm/timex.h> | |
45 | #include <asm/io.h> | |
46 | ||
ecea8d19 TG |
47 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
48 | ||
49 | EXPORT_SYMBOL(jiffies_64); | |
50 | ||
1da177e4 LT |
51 | /* |
52 | * per-CPU timer vector definitions: | |
53 | */ | |
1da177e4 LT |
54 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) |
55 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | |
56 | #define TVN_SIZE (1 << TVN_BITS) | |
57 | #define TVR_SIZE (1 << TVR_BITS) | |
58 | #define TVN_MASK (TVN_SIZE - 1) | |
59 | #define TVR_MASK (TVR_SIZE - 1) | |
60 | ||
a6fa8e5a | 61 | struct tvec { |
1da177e4 | 62 | struct list_head vec[TVN_SIZE]; |
a6fa8e5a | 63 | }; |
1da177e4 | 64 | |
a6fa8e5a | 65 | struct tvec_root { |
1da177e4 | 66 | struct list_head vec[TVR_SIZE]; |
a6fa8e5a | 67 | }; |
1da177e4 | 68 | |
a6fa8e5a | 69 | struct tvec_base { |
3691c519 ON |
70 | spinlock_t lock; |
71 | struct timer_list *running_timer; | |
1da177e4 | 72 | unsigned long timer_jiffies; |
a6fa8e5a PM |
73 | struct tvec_root tv1; |
74 | struct tvec tv2; | |
75 | struct tvec tv3; | |
76 | struct tvec tv4; | |
77 | struct tvec tv5; | |
6e453a67 | 78 | } ____cacheline_aligned; |
1da177e4 | 79 | |
a6fa8e5a | 80 | struct tvec_base boot_tvec_bases; |
3691c519 | 81 | EXPORT_SYMBOL(boot_tvec_bases); |
a6fa8e5a | 82 | static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases; |
1da177e4 | 83 | |
6e453a67 | 84 | /* |
a6fa8e5a | 85 | * Note that all tvec_bases are 2 byte aligned and lower bit of |
6e453a67 VP |
86 | * base in timer_list is guaranteed to be zero. Use the LSB for |
87 | * the new flag to indicate whether the timer is deferrable | |
88 | */ | |
89 | #define TBASE_DEFERRABLE_FLAG (0x1) | |
90 | ||
91 | /* Functions below help us manage 'deferrable' flag */ | |
a6fa8e5a | 92 | static inline unsigned int tbase_get_deferrable(struct tvec_base *base) |
6e453a67 | 93 | { |
e9910846 | 94 | return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG); |
6e453a67 VP |
95 | } |
96 | ||
a6fa8e5a | 97 | static inline struct tvec_base *tbase_get_base(struct tvec_base *base) |
6e453a67 | 98 | { |
a6fa8e5a | 99 | return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG)); |
6e453a67 VP |
100 | } |
101 | ||
102 | static inline void timer_set_deferrable(struct timer_list *timer) | |
103 | { | |
a6fa8e5a | 104 | timer->base = ((struct tvec_base *)((unsigned long)(timer->base) | |
6819457d | 105 | TBASE_DEFERRABLE_FLAG)); |
6e453a67 VP |
106 | } |
107 | ||
108 | static inline void | |
a6fa8e5a | 109 | timer_set_base(struct timer_list *timer, struct tvec_base *new_base) |
6e453a67 | 110 | { |
a6fa8e5a | 111 | timer->base = (struct tvec_base *)((unsigned long)(new_base) | |
6819457d | 112 | tbase_get_deferrable(timer->base)); |
6e453a67 VP |
113 | } |
114 | ||
9c133c46 AS |
115 | static unsigned long round_jiffies_common(unsigned long j, int cpu, |
116 | bool force_up) | |
4c36a5de AV |
117 | { |
118 | int rem; | |
119 | unsigned long original = j; | |
120 | ||
121 | /* | |
122 | * We don't want all cpus firing their timers at once hitting the | |
123 | * same lock or cachelines, so we skew each extra cpu with an extra | |
124 | * 3 jiffies. This 3 jiffies came originally from the mm/ code which | |
125 | * already did this. | |
126 | * The skew is done by adding 3*cpunr, then round, then subtract this | |
127 | * extra offset again. | |
128 | */ | |
129 | j += cpu * 3; | |
130 | ||
131 | rem = j % HZ; | |
132 | ||
133 | /* | |
134 | * If the target jiffie is just after a whole second (which can happen | |
135 | * due to delays of the timer irq, long irq off times etc etc) then | |
136 | * we should round down to the whole second, not up. Use 1/4th second | |
137 | * as cutoff for this rounding as an extreme upper bound for this. | |
9c133c46 | 138 | * But never round down if @force_up is set. |
4c36a5de | 139 | */ |
9c133c46 | 140 | if (rem < HZ/4 && !force_up) /* round down */ |
4c36a5de AV |
141 | j = j - rem; |
142 | else /* round up */ | |
143 | j = j - rem + HZ; | |
144 | ||
145 | /* now that we have rounded, subtract the extra skew again */ | |
146 | j -= cpu * 3; | |
147 | ||
148 | if (j <= jiffies) /* rounding ate our timeout entirely; */ | |
149 | return original; | |
150 | return j; | |
151 | } | |
9c133c46 AS |
152 | |
153 | /** | |
154 | * __round_jiffies - function to round jiffies to a full second | |
155 | * @j: the time in (absolute) jiffies that should be rounded | |
156 | * @cpu: the processor number on which the timeout will happen | |
157 | * | |
158 | * __round_jiffies() rounds an absolute time in the future (in jiffies) | |
159 | * up or down to (approximately) full seconds. This is useful for timers | |
160 | * for which the exact time they fire does not matter too much, as long as | |
161 | * they fire approximately every X seconds. | |
162 | * | |
163 | * By rounding these timers to whole seconds, all such timers will fire | |
164 | * at the same time, rather than at various times spread out. The goal | |
165 | * of this is to have the CPU wake up less, which saves power. | |
166 | * | |
167 | * The exact rounding is skewed for each processor to avoid all | |
168 | * processors firing at the exact same time, which could lead | |
169 | * to lock contention or spurious cache line bouncing. | |
170 | * | |
171 | * The return value is the rounded version of the @j parameter. | |
172 | */ | |
173 | unsigned long __round_jiffies(unsigned long j, int cpu) | |
174 | { | |
175 | return round_jiffies_common(j, cpu, false); | |
176 | } | |
4c36a5de AV |
177 | EXPORT_SYMBOL_GPL(__round_jiffies); |
178 | ||
179 | /** | |
180 | * __round_jiffies_relative - function to round jiffies to a full second | |
181 | * @j: the time in (relative) jiffies that should be rounded | |
182 | * @cpu: the processor number on which the timeout will happen | |
183 | * | |
72fd4a35 | 184 | * __round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
185 | * up or down to (approximately) full seconds. This is useful for timers |
186 | * for which the exact time they fire does not matter too much, as long as | |
187 | * they fire approximately every X seconds. | |
188 | * | |
189 | * By rounding these timers to whole seconds, all such timers will fire | |
190 | * at the same time, rather than at various times spread out. The goal | |
191 | * of this is to have the CPU wake up less, which saves power. | |
192 | * | |
193 | * The exact rounding is skewed for each processor to avoid all | |
194 | * processors firing at the exact same time, which could lead | |
195 | * to lock contention or spurious cache line bouncing. | |
196 | * | |
72fd4a35 | 197 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
198 | */ |
199 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | |
200 | { | |
9c133c46 AS |
201 | unsigned long j0 = jiffies; |
202 | ||
203 | /* Use j0 because jiffies might change while we run */ | |
204 | return round_jiffies_common(j + j0, cpu, false) - j0; | |
4c36a5de AV |
205 | } |
206 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); | |
207 | ||
208 | /** | |
209 | * round_jiffies - function to round jiffies to a full second | |
210 | * @j: the time in (absolute) jiffies that should be rounded | |
211 | * | |
72fd4a35 | 212 | * round_jiffies() rounds an absolute time in the future (in jiffies) |
4c36a5de AV |
213 | * up or down to (approximately) full seconds. This is useful for timers |
214 | * for which the exact time they fire does not matter too much, as long as | |
215 | * they fire approximately every X seconds. | |
216 | * | |
217 | * By rounding these timers to whole seconds, all such timers will fire | |
218 | * at the same time, rather than at various times spread out. The goal | |
219 | * of this is to have the CPU wake up less, which saves power. | |
220 | * | |
72fd4a35 | 221 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
222 | */ |
223 | unsigned long round_jiffies(unsigned long j) | |
224 | { | |
9c133c46 | 225 | return round_jiffies_common(j, raw_smp_processor_id(), false); |
4c36a5de AV |
226 | } |
227 | EXPORT_SYMBOL_GPL(round_jiffies); | |
228 | ||
229 | /** | |
230 | * round_jiffies_relative - function to round jiffies to a full second | |
231 | * @j: the time in (relative) jiffies that should be rounded | |
232 | * | |
72fd4a35 | 233 | * round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
234 | * up or down to (approximately) full seconds. This is useful for timers |
235 | * for which the exact time they fire does not matter too much, as long as | |
236 | * they fire approximately every X seconds. | |
237 | * | |
238 | * By rounding these timers to whole seconds, all such timers will fire | |
239 | * at the same time, rather than at various times spread out. The goal | |
240 | * of this is to have the CPU wake up less, which saves power. | |
241 | * | |
72fd4a35 | 242 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
243 | */ |
244 | unsigned long round_jiffies_relative(unsigned long j) | |
245 | { | |
246 | return __round_jiffies_relative(j, raw_smp_processor_id()); | |
247 | } | |
248 | EXPORT_SYMBOL_GPL(round_jiffies_relative); | |
249 | ||
9c133c46 AS |
250 | /** |
251 | * __round_jiffies_up - function to round jiffies up to a full second | |
252 | * @j: the time in (absolute) jiffies that should be rounded | |
253 | * @cpu: the processor number on which the timeout will happen | |
254 | * | |
255 | * This is the same as __round_jiffies() except that it will never | |
256 | * round down. This is useful for timeouts for which the exact time | |
257 | * of firing does not matter too much, as long as they don't fire too | |
258 | * early. | |
259 | */ | |
260 | unsigned long __round_jiffies_up(unsigned long j, int cpu) | |
261 | { | |
262 | return round_jiffies_common(j, cpu, true); | |
263 | } | |
264 | EXPORT_SYMBOL_GPL(__round_jiffies_up); | |
265 | ||
266 | /** | |
267 | * __round_jiffies_up_relative - function to round jiffies up to a full second | |
268 | * @j: the time in (relative) jiffies that should be rounded | |
269 | * @cpu: the processor number on which the timeout will happen | |
270 | * | |
271 | * This is the same as __round_jiffies_relative() except that it will never | |
272 | * round down. This is useful for timeouts for which the exact time | |
273 | * of firing does not matter too much, as long as they don't fire too | |
274 | * early. | |
275 | */ | |
276 | unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) | |
277 | { | |
278 | unsigned long j0 = jiffies; | |
279 | ||
280 | /* Use j0 because jiffies might change while we run */ | |
281 | return round_jiffies_common(j + j0, cpu, true) - j0; | |
282 | } | |
283 | EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); | |
284 | ||
285 | /** | |
286 | * round_jiffies_up - function to round jiffies up to a full second | |
287 | * @j: the time in (absolute) jiffies that should be rounded | |
288 | * | |
289 | * This is the same as round_jiffies() except that it will never | |
290 | * round down. This is useful for timeouts for which the exact time | |
291 | * of firing does not matter too much, as long as they don't fire too | |
292 | * early. | |
293 | */ | |
294 | unsigned long round_jiffies_up(unsigned long j) | |
295 | { | |
296 | return round_jiffies_common(j, raw_smp_processor_id(), true); | |
297 | } | |
298 | EXPORT_SYMBOL_GPL(round_jiffies_up); | |
299 | ||
300 | /** | |
301 | * round_jiffies_up_relative - function to round jiffies up to a full second | |
302 | * @j: the time in (relative) jiffies that should be rounded | |
303 | * | |
304 | * This is the same as round_jiffies_relative() except that it will never | |
305 | * round down. This is useful for timeouts for which the exact time | |
306 | * of firing does not matter too much, as long as they don't fire too | |
307 | * early. | |
308 | */ | |
309 | unsigned long round_jiffies_up_relative(unsigned long j) | |
310 | { | |
311 | return __round_jiffies_up_relative(j, raw_smp_processor_id()); | |
312 | } | |
313 | EXPORT_SYMBOL_GPL(round_jiffies_up_relative); | |
314 | ||
4c36a5de | 315 | |
a6fa8e5a | 316 | static inline void set_running_timer(struct tvec_base *base, |
1da177e4 LT |
317 | struct timer_list *timer) |
318 | { | |
319 | #ifdef CONFIG_SMP | |
3691c519 | 320 | base->running_timer = timer; |
1da177e4 LT |
321 | #endif |
322 | } | |
323 | ||
a6fa8e5a | 324 | static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) |
1da177e4 LT |
325 | { |
326 | unsigned long expires = timer->expires; | |
327 | unsigned long idx = expires - base->timer_jiffies; | |
328 | struct list_head *vec; | |
329 | ||
330 | if (idx < TVR_SIZE) { | |
331 | int i = expires & TVR_MASK; | |
332 | vec = base->tv1.vec + i; | |
333 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | |
334 | int i = (expires >> TVR_BITS) & TVN_MASK; | |
335 | vec = base->tv2.vec + i; | |
336 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | |
337 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | |
338 | vec = base->tv3.vec + i; | |
339 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | |
340 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | |
341 | vec = base->tv4.vec + i; | |
342 | } else if ((signed long) idx < 0) { | |
343 | /* | |
344 | * Can happen if you add a timer with expires == jiffies, | |
345 | * or you set a timer to go off in the past | |
346 | */ | |
347 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | |
348 | } else { | |
349 | int i; | |
350 | /* If the timeout is larger than 0xffffffff on 64-bit | |
351 | * architectures then we use the maximum timeout: | |
352 | */ | |
353 | if (idx > 0xffffffffUL) { | |
354 | idx = 0xffffffffUL; | |
355 | expires = idx + base->timer_jiffies; | |
356 | } | |
357 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | |
358 | vec = base->tv5.vec + i; | |
359 | } | |
360 | /* | |
361 | * Timers are FIFO: | |
362 | */ | |
363 | list_add_tail(&timer->entry, vec); | |
364 | } | |
365 | ||
82f67cd9 IM |
366 | #ifdef CONFIG_TIMER_STATS |
367 | void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) | |
368 | { | |
369 | if (timer->start_site) | |
370 | return; | |
371 | ||
372 | timer->start_site = addr; | |
373 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | |
374 | timer->start_pid = current->pid; | |
375 | } | |
c5c061b8 VP |
376 | |
377 | static void timer_stats_account_timer(struct timer_list *timer) | |
378 | { | |
379 | unsigned int flag = 0; | |
380 | ||
381 | if (unlikely(tbase_get_deferrable(timer->base))) | |
382 | flag |= TIMER_STATS_FLAG_DEFERRABLE; | |
383 | ||
384 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | |
385 | timer->function, timer->start_comm, flag); | |
386 | } | |
387 | ||
388 | #else | |
389 | static void timer_stats_account_timer(struct timer_list *timer) {} | |
82f67cd9 IM |
390 | #endif |
391 | ||
c6f3a97f TG |
392 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
393 | ||
394 | static struct debug_obj_descr timer_debug_descr; | |
395 | ||
396 | /* | |
397 | * fixup_init is called when: | |
398 | * - an active object is initialized | |
55c888d6 | 399 | */ |
c6f3a97f TG |
400 | static int timer_fixup_init(void *addr, enum debug_obj_state state) |
401 | { | |
402 | struct timer_list *timer = addr; | |
403 | ||
404 | switch (state) { | |
405 | case ODEBUG_STATE_ACTIVE: | |
406 | del_timer_sync(timer); | |
407 | debug_object_init(timer, &timer_debug_descr); | |
408 | return 1; | |
409 | default: | |
410 | return 0; | |
411 | } | |
412 | } | |
413 | ||
414 | /* | |
415 | * fixup_activate is called when: | |
416 | * - an active object is activated | |
417 | * - an unknown object is activated (might be a statically initialized object) | |
418 | */ | |
419 | static int timer_fixup_activate(void *addr, enum debug_obj_state state) | |
420 | { | |
421 | struct timer_list *timer = addr; | |
422 | ||
423 | switch (state) { | |
424 | ||
425 | case ODEBUG_STATE_NOTAVAILABLE: | |
426 | /* | |
427 | * This is not really a fixup. The timer was | |
428 | * statically initialized. We just make sure that it | |
429 | * is tracked in the object tracker. | |
430 | */ | |
431 | if (timer->entry.next == NULL && | |
432 | timer->entry.prev == TIMER_ENTRY_STATIC) { | |
433 | debug_object_init(timer, &timer_debug_descr); | |
434 | debug_object_activate(timer, &timer_debug_descr); | |
435 | return 0; | |
436 | } else { | |
437 | WARN_ON_ONCE(1); | |
438 | } | |
439 | return 0; | |
440 | ||
441 | case ODEBUG_STATE_ACTIVE: | |
442 | WARN_ON(1); | |
443 | ||
444 | default: | |
445 | return 0; | |
446 | } | |
447 | } | |
448 | ||
449 | /* | |
450 | * fixup_free is called when: | |
451 | * - an active object is freed | |
452 | */ | |
453 | static int timer_fixup_free(void *addr, enum debug_obj_state state) | |
454 | { | |
455 | struct timer_list *timer = addr; | |
456 | ||
457 | switch (state) { | |
458 | case ODEBUG_STATE_ACTIVE: | |
459 | del_timer_sync(timer); | |
460 | debug_object_free(timer, &timer_debug_descr); | |
461 | return 1; | |
462 | default: | |
463 | return 0; | |
464 | } | |
465 | } | |
466 | ||
467 | static struct debug_obj_descr timer_debug_descr = { | |
468 | .name = "timer_list", | |
469 | .fixup_init = timer_fixup_init, | |
470 | .fixup_activate = timer_fixup_activate, | |
471 | .fixup_free = timer_fixup_free, | |
472 | }; | |
473 | ||
474 | static inline void debug_timer_init(struct timer_list *timer) | |
475 | { | |
476 | debug_object_init(timer, &timer_debug_descr); | |
477 | } | |
478 | ||
479 | static inline void debug_timer_activate(struct timer_list *timer) | |
480 | { | |
481 | debug_object_activate(timer, &timer_debug_descr); | |
482 | } | |
483 | ||
484 | static inline void debug_timer_deactivate(struct timer_list *timer) | |
485 | { | |
486 | debug_object_deactivate(timer, &timer_debug_descr); | |
487 | } | |
488 | ||
489 | static inline void debug_timer_free(struct timer_list *timer) | |
490 | { | |
491 | debug_object_free(timer, &timer_debug_descr); | |
492 | } | |
493 | ||
6f2b9b9a JB |
494 | static void __init_timer(struct timer_list *timer, |
495 | const char *name, | |
496 | struct lock_class_key *key); | |
c6f3a97f | 497 | |
6f2b9b9a JB |
498 | void init_timer_on_stack_key(struct timer_list *timer, |
499 | const char *name, | |
500 | struct lock_class_key *key) | |
c6f3a97f TG |
501 | { |
502 | debug_object_init_on_stack(timer, &timer_debug_descr); | |
6f2b9b9a | 503 | __init_timer(timer, name, key); |
c6f3a97f | 504 | } |
6f2b9b9a | 505 | EXPORT_SYMBOL_GPL(init_timer_on_stack_key); |
c6f3a97f TG |
506 | |
507 | void destroy_timer_on_stack(struct timer_list *timer) | |
508 | { | |
509 | debug_object_free(timer, &timer_debug_descr); | |
510 | } | |
511 | EXPORT_SYMBOL_GPL(destroy_timer_on_stack); | |
512 | ||
513 | #else | |
514 | static inline void debug_timer_init(struct timer_list *timer) { } | |
515 | static inline void debug_timer_activate(struct timer_list *timer) { } | |
516 | static inline void debug_timer_deactivate(struct timer_list *timer) { } | |
517 | #endif | |
518 | ||
6f2b9b9a JB |
519 | static void __init_timer(struct timer_list *timer, |
520 | const char *name, | |
521 | struct lock_class_key *key) | |
55c888d6 ON |
522 | { |
523 | timer->entry.next = NULL; | |
bfe5d834 | 524 | timer->base = __raw_get_cpu_var(tvec_bases); |
82f67cd9 IM |
525 | #ifdef CONFIG_TIMER_STATS |
526 | timer->start_site = NULL; | |
527 | timer->start_pid = -1; | |
528 | memset(timer->start_comm, 0, TASK_COMM_LEN); | |
529 | #endif | |
6f2b9b9a | 530 | lockdep_init_map(&timer->lockdep_map, name, key, 0); |
55c888d6 | 531 | } |
c6f3a97f TG |
532 | |
533 | /** | |
633fe795 | 534 | * init_timer_key - initialize a timer |
c6f3a97f | 535 | * @timer: the timer to be initialized |
633fe795 RD |
536 | * @name: name of the timer |
537 | * @key: lockdep class key of the fake lock used for tracking timer | |
538 | * sync lock dependencies | |
c6f3a97f | 539 | * |
633fe795 | 540 | * init_timer_key() must be done to a timer prior calling *any* of the |
c6f3a97f TG |
541 | * other timer functions. |
542 | */ | |
6f2b9b9a JB |
543 | void init_timer_key(struct timer_list *timer, |
544 | const char *name, | |
545 | struct lock_class_key *key) | |
c6f3a97f TG |
546 | { |
547 | debug_timer_init(timer); | |
6f2b9b9a | 548 | __init_timer(timer, name, key); |
c6f3a97f | 549 | } |
6f2b9b9a | 550 | EXPORT_SYMBOL(init_timer_key); |
55c888d6 | 551 | |
6f2b9b9a JB |
552 | void init_timer_deferrable_key(struct timer_list *timer, |
553 | const char *name, | |
554 | struct lock_class_key *key) | |
6e453a67 | 555 | { |
6f2b9b9a | 556 | init_timer_key(timer, name, key); |
6e453a67 VP |
557 | timer_set_deferrable(timer); |
558 | } | |
6f2b9b9a | 559 | EXPORT_SYMBOL(init_timer_deferrable_key); |
6e453a67 | 560 | |
55c888d6 | 561 | static inline void detach_timer(struct timer_list *timer, |
82f67cd9 | 562 | int clear_pending) |
55c888d6 ON |
563 | { |
564 | struct list_head *entry = &timer->entry; | |
565 | ||
c6f3a97f TG |
566 | debug_timer_deactivate(timer); |
567 | ||
55c888d6 ON |
568 | __list_del(entry->prev, entry->next); |
569 | if (clear_pending) | |
570 | entry->next = NULL; | |
571 | entry->prev = LIST_POISON2; | |
572 | } | |
573 | ||
574 | /* | |
3691c519 | 575 | * We are using hashed locking: holding per_cpu(tvec_bases).lock |
55c888d6 ON |
576 | * means that all timers which are tied to this base via timer->base are |
577 | * locked, and the base itself is locked too. | |
578 | * | |
579 | * So __run_timers/migrate_timers can safely modify all timers which could | |
580 | * be found on ->tvX lists. | |
581 | * | |
582 | * When the timer's base is locked, and the timer removed from list, it is | |
583 | * possible to set timer->base = NULL and drop the lock: the timer remains | |
584 | * locked. | |
585 | */ | |
a6fa8e5a | 586 | static struct tvec_base *lock_timer_base(struct timer_list *timer, |
55c888d6 | 587 | unsigned long *flags) |
89e7e374 | 588 | __acquires(timer->base->lock) |
55c888d6 | 589 | { |
a6fa8e5a | 590 | struct tvec_base *base; |
55c888d6 ON |
591 | |
592 | for (;;) { | |
a6fa8e5a | 593 | struct tvec_base *prelock_base = timer->base; |
6e453a67 | 594 | base = tbase_get_base(prelock_base); |
55c888d6 ON |
595 | if (likely(base != NULL)) { |
596 | spin_lock_irqsave(&base->lock, *flags); | |
6e453a67 | 597 | if (likely(prelock_base == timer->base)) |
55c888d6 ON |
598 | return base; |
599 | /* The timer has migrated to another CPU */ | |
600 | spin_unlock_irqrestore(&base->lock, *flags); | |
601 | } | |
602 | cpu_relax(); | |
603 | } | |
604 | } | |
605 | ||
74019224 IM |
606 | static inline int |
607 | __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) | |
1da177e4 | 608 | { |
a6fa8e5a | 609 | struct tvec_base *base, *new_base; |
1da177e4 | 610 | unsigned long flags; |
74019224 IM |
611 | int ret; |
612 | ||
613 | ret = 0; | |
1da177e4 | 614 | |
82f67cd9 | 615 | timer_stats_timer_set_start_info(timer); |
1da177e4 | 616 | BUG_ON(!timer->function); |
1da177e4 | 617 | |
55c888d6 ON |
618 | base = lock_timer_base(timer, &flags); |
619 | ||
620 | if (timer_pending(timer)) { | |
621 | detach_timer(timer, 0); | |
622 | ret = 1; | |
74019224 IM |
623 | } else { |
624 | if (pending_only) | |
625 | goto out_unlock; | |
55c888d6 ON |
626 | } |
627 | ||
c6f3a97f TG |
628 | debug_timer_activate(timer); |
629 | ||
a4a6198b | 630 | new_base = __get_cpu_var(tvec_bases); |
1da177e4 | 631 | |
3691c519 | 632 | if (base != new_base) { |
1da177e4 | 633 | /* |
55c888d6 ON |
634 | * We are trying to schedule the timer on the local CPU. |
635 | * However we can't change timer's base while it is running, | |
636 | * otherwise del_timer_sync() can't detect that the timer's | |
637 | * handler yet has not finished. This also guarantees that | |
638 | * the timer is serialized wrt itself. | |
1da177e4 | 639 | */ |
a2c348fe | 640 | if (likely(base->running_timer != timer)) { |
55c888d6 | 641 | /* See the comment in lock_timer_base() */ |
6e453a67 | 642 | timer_set_base(timer, NULL); |
55c888d6 | 643 | spin_unlock(&base->lock); |
a2c348fe ON |
644 | base = new_base; |
645 | spin_lock(&base->lock); | |
6e453a67 | 646 | timer_set_base(timer, base); |
1da177e4 LT |
647 | } |
648 | } | |
649 | ||
1da177e4 | 650 | timer->expires = expires; |
a2c348fe | 651 | internal_add_timer(base, timer); |
74019224 IM |
652 | |
653 | out_unlock: | |
a2c348fe | 654 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 LT |
655 | |
656 | return ret; | |
657 | } | |
658 | ||
2aae4a10 | 659 | /** |
74019224 IM |
660 | * mod_timer_pending - modify a pending timer's timeout |
661 | * @timer: the pending timer to be modified | |
662 | * @expires: new timeout in jiffies | |
1da177e4 | 663 | * |
74019224 IM |
664 | * mod_timer_pending() is the same for pending timers as mod_timer(), |
665 | * but will not re-activate and modify already deleted timers. | |
666 | * | |
667 | * It is useful for unserialized use of timers. | |
1da177e4 | 668 | */ |
74019224 | 669 | int mod_timer_pending(struct timer_list *timer, unsigned long expires) |
1da177e4 | 670 | { |
74019224 | 671 | return __mod_timer(timer, expires, true); |
1da177e4 | 672 | } |
74019224 | 673 | EXPORT_SYMBOL(mod_timer_pending); |
1da177e4 | 674 | |
2aae4a10 | 675 | /** |
1da177e4 LT |
676 | * mod_timer - modify a timer's timeout |
677 | * @timer: the timer to be modified | |
2aae4a10 | 678 | * @expires: new timeout in jiffies |
1da177e4 | 679 | * |
72fd4a35 | 680 | * mod_timer() is a more efficient way to update the expire field of an |
1da177e4 LT |
681 | * active timer (if the timer is inactive it will be activated) |
682 | * | |
683 | * mod_timer(timer, expires) is equivalent to: | |
684 | * | |
685 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
686 | * | |
687 | * Note that if there are multiple unserialized concurrent users of the | |
688 | * same timer, then mod_timer() is the only safe way to modify the timeout, | |
689 | * since add_timer() cannot modify an already running timer. | |
690 | * | |
691 | * The function returns whether it has modified a pending timer or not. | |
692 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | |
693 | * active timer returns 1.) | |
694 | */ | |
695 | int mod_timer(struct timer_list *timer, unsigned long expires) | |
696 | { | |
1da177e4 LT |
697 | /* |
698 | * This is a common optimization triggered by the | |
699 | * networking code - if the timer is re-modified | |
700 | * to be the same thing then just return: | |
701 | */ | |
702 | if (timer->expires == expires && timer_pending(timer)) | |
703 | return 1; | |
704 | ||
74019224 | 705 | return __mod_timer(timer, expires, false); |
1da177e4 | 706 | } |
1da177e4 LT |
707 | EXPORT_SYMBOL(mod_timer); |
708 | ||
74019224 IM |
709 | /** |
710 | * add_timer - start a timer | |
711 | * @timer: the timer to be added | |
712 | * | |
713 | * The kernel will do a ->function(->data) callback from the | |
714 | * timer interrupt at the ->expires point in the future. The | |
715 | * current time is 'jiffies'. | |
716 | * | |
717 | * The timer's ->expires, ->function (and if the handler uses it, ->data) | |
718 | * fields must be set prior calling this function. | |
719 | * | |
720 | * Timers with an ->expires field in the past will be executed in the next | |
721 | * timer tick. | |
722 | */ | |
723 | void add_timer(struct timer_list *timer) | |
724 | { | |
725 | BUG_ON(timer_pending(timer)); | |
726 | mod_timer(timer, timer->expires); | |
727 | } | |
728 | EXPORT_SYMBOL(add_timer); | |
729 | ||
730 | /** | |
731 | * add_timer_on - start a timer on a particular CPU | |
732 | * @timer: the timer to be added | |
733 | * @cpu: the CPU to start it on | |
734 | * | |
735 | * This is not very scalable on SMP. Double adds are not possible. | |
736 | */ | |
737 | void add_timer_on(struct timer_list *timer, int cpu) | |
738 | { | |
739 | struct tvec_base *base = per_cpu(tvec_bases, cpu); | |
740 | unsigned long flags; | |
741 | ||
742 | timer_stats_timer_set_start_info(timer); | |
743 | BUG_ON(timer_pending(timer) || !timer->function); | |
744 | spin_lock_irqsave(&base->lock, flags); | |
745 | timer_set_base(timer, base); | |
746 | debug_timer_activate(timer); | |
747 | internal_add_timer(base, timer); | |
748 | /* | |
749 | * Check whether the other CPU is idle and needs to be | |
750 | * triggered to reevaluate the timer wheel when nohz is | |
751 | * active. We are protected against the other CPU fiddling | |
752 | * with the timer by holding the timer base lock. This also | |
753 | * makes sure that a CPU on the way to idle can not evaluate | |
754 | * the timer wheel. | |
755 | */ | |
756 | wake_up_idle_cpu(cpu); | |
757 | spin_unlock_irqrestore(&base->lock, flags); | |
758 | } | |
759 | ||
2aae4a10 | 760 | /** |
1da177e4 LT |
761 | * del_timer - deactive a timer. |
762 | * @timer: the timer to be deactivated | |
763 | * | |
764 | * del_timer() deactivates a timer - this works on both active and inactive | |
765 | * timers. | |
766 | * | |
767 | * The function returns whether it has deactivated a pending timer or not. | |
768 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | |
769 | * active timer returns 1.) | |
770 | */ | |
771 | int del_timer(struct timer_list *timer) | |
772 | { | |
a6fa8e5a | 773 | struct tvec_base *base; |
1da177e4 | 774 | unsigned long flags; |
55c888d6 | 775 | int ret = 0; |
1da177e4 | 776 | |
82f67cd9 | 777 | timer_stats_timer_clear_start_info(timer); |
55c888d6 ON |
778 | if (timer_pending(timer)) { |
779 | base = lock_timer_base(timer, &flags); | |
780 | if (timer_pending(timer)) { | |
781 | detach_timer(timer, 1); | |
782 | ret = 1; | |
783 | } | |
1da177e4 | 784 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 | 785 | } |
1da177e4 | 786 | |
55c888d6 | 787 | return ret; |
1da177e4 | 788 | } |
1da177e4 LT |
789 | EXPORT_SYMBOL(del_timer); |
790 | ||
791 | #ifdef CONFIG_SMP | |
2aae4a10 REB |
792 | /** |
793 | * try_to_del_timer_sync - Try to deactivate a timer | |
794 | * @timer: timer do del | |
795 | * | |
fd450b73 ON |
796 | * This function tries to deactivate a timer. Upon successful (ret >= 0) |
797 | * exit the timer is not queued and the handler is not running on any CPU. | |
798 | * | |
799 | * It must not be called from interrupt contexts. | |
800 | */ | |
801 | int try_to_del_timer_sync(struct timer_list *timer) | |
802 | { | |
a6fa8e5a | 803 | struct tvec_base *base; |
fd450b73 ON |
804 | unsigned long flags; |
805 | int ret = -1; | |
806 | ||
807 | base = lock_timer_base(timer, &flags); | |
808 | ||
809 | if (base->running_timer == timer) | |
810 | goto out; | |
811 | ||
812 | ret = 0; | |
813 | if (timer_pending(timer)) { | |
814 | detach_timer(timer, 1); | |
815 | ret = 1; | |
816 | } | |
817 | out: | |
818 | spin_unlock_irqrestore(&base->lock, flags); | |
819 | ||
820 | return ret; | |
821 | } | |
e19dff1f DH |
822 | EXPORT_SYMBOL(try_to_del_timer_sync); |
823 | ||
2aae4a10 | 824 | /** |
1da177e4 LT |
825 | * del_timer_sync - deactivate a timer and wait for the handler to finish. |
826 | * @timer: the timer to be deactivated | |
827 | * | |
828 | * This function only differs from del_timer() on SMP: besides deactivating | |
829 | * the timer it also makes sure the handler has finished executing on other | |
830 | * CPUs. | |
831 | * | |
72fd4a35 | 832 | * Synchronization rules: Callers must prevent restarting of the timer, |
1da177e4 LT |
833 | * otherwise this function is meaningless. It must not be called from |
834 | * interrupt contexts. The caller must not hold locks which would prevent | |
55c888d6 ON |
835 | * completion of the timer's handler. The timer's handler must not call |
836 | * add_timer_on(). Upon exit the timer is not queued and the handler is | |
837 | * not running on any CPU. | |
1da177e4 LT |
838 | * |
839 | * The function returns whether it has deactivated a pending timer or not. | |
1da177e4 LT |
840 | */ |
841 | int del_timer_sync(struct timer_list *timer) | |
842 | { | |
6f2b9b9a JB |
843 | #ifdef CONFIG_LOCKDEP |
844 | unsigned long flags; | |
845 | ||
846 | local_irq_save(flags); | |
847 | lock_map_acquire(&timer->lockdep_map); | |
848 | lock_map_release(&timer->lockdep_map); | |
849 | local_irq_restore(flags); | |
850 | #endif | |
851 | ||
fd450b73 ON |
852 | for (;;) { |
853 | int ret = try_to_del_timer_sync(timer); | |
854 | if (ret >= 0) | |
855 | return ret; | |
a0009652 | 856 | cpu_relax(); |
fd450b73 | 857 | } |
1da177e4 | 858 | } |
55c888d6 | 859 | EXPORT_SYMBOL(del_timer_sync); |
1da177e4 LT |
860 | #endif |
861 | ||
a6fa8e5a | 862 | static int cascade(struct tvec_base *base, struct tvec *tv, int index) |
1da177e4 LT |
863 | { |
864 | /* cascade all the timers from tv up one level */ | |
3439dd86 P |
865 | struct timer_list *timer, *tmp; |
866 | struct list_head tv_list; | |
867 | ||
868 | list_replace_init(tv->vec + index, &tv_list); | |
1da177e4 | 869 | |
1da177e4 | 870 | /* |
3439dd86 P |
871 | * We are removing _all_ timers from the list, so we |
872 | * don't have to detach them individually. | |
1da177e4 | 873 | */ |
3439dd86 | 874 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { |
6e453a67 | 875 | BUG_ON(tbase_get_base(timer->base) != base); |
3439dd86 | 876 | internal_add_timer(base, timer); |
1da177e4 | 877 | } |
1da177e4 LT |
878 | |
879 | return index; | |
880 | } | |
881 | ||
2aae4a10 REB |
882 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) |
883 | ||
884 | /** | |
1da177e4 LT |
885 | * __run_timers - run all expired timers (if any) on this CPU. |
886 | * @base: the timer vector to be processed. | |
887 | * | |
888 | * This function cascades all vectors and executes all expired timer | |
889 | * vectors. | |
890 | */ | |
a6fa8e5a | 891 | static inline void __run_timers(struct tvec_base *base) |
1da177e4 LT |
892 | { |
893 | struct timer_list *timer; | |
894 | ||
3691c519 | 895 | spin_lock_irq(&base->lock); |
1da177e4 | 896 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
626ab0e6 | 897 | struct list_head work_list; |
1da177e4 | 898 | struct list_head *head = &work_list; |
6819457d | 899 | int index = base->timer_jiffies & TVR_MASK; |
626ab0e6 | 900 | |
1da177e4 LT |
901 | /* |
902 | * Cascade timers: | |
903 | */ | |
904 | if (!index && | |
905 | (!cascade(base, &base->tv2, INDEX(0))) && | |
906 | (!cascade(base, &base->tv3, INDEX(1))) && | |
907 | !cascade(base, &base->tv4, INDEX(2))) | |
908 | cascade(base, &base->tv5, INDEX(3)); | |
626ab0e6 ON |
909 | ++base->timer_jiffies; |
910 | list_replace_init(base->tv1.vec + index, &work_list); | |
55c888d6 | 911 | while (!list_empty(head)) { |
1da177e4 LT |
912 | void (*fn)(unsigned long); |
913 | unsigned long data; | |
914 | ||
b5e61818 | 915 | timer = list_first_entry(head, struct timer_list,entry); |
6819457d TG |
916 | fn = timer->function; |
917 | data = timer->data; | |
1da177e4 | 918 | |
82f67cd9 IM |
919 | timer_stats_account_timer(timer); |
920 | ||
1da177e4 | 921 | set_running_timer(base, timer); |
55c888d6 | 922 | detach_timer(timer, 1); |
6f2b9b9a | 923 | |
3691c519 | 924 | spin_unlock_irq(&base->lock); |
1da177e4 | 925 | { |
be5b4fbd | 926 | int preempt_count = preempt_count(); |
6f2b9b9a JB |
927 | |
928 | #ifdef CONFIG_LOCKDEP | |
929 | /* | |
930 | * It is permissible to free the timer from | |
931 | * inside the function that is called from | |
932 | * it, this we need to take into account for | |
933 | * lockdep too. To avoid bogus "held lock | |
934 | * freed" warnings as well as problems when | |
935 | * looking into timer->lockdep_map, make a | |
936 | * copy and use that here. | |
937 | */ | |
938 | struct lockdep_map lockdep_map = | |
939 | timer->lockdep_map; | |
940 | #endif | |
941 | /* | |
942 | * Couple the lock chain with the lock chain at | |
943 | * del_timer_sync() by acquiring the lock_map | |
944 | * around the fn() call here and in | |
945 | * del_timer_sync(). | |
946 | */ | |
947 | lock_map_acquire(&lockdep_map); | |
948 | ||
1da177e4 | 949 | fn(data); |
6f2b9b9a JB |
950 | |
951 | lock_map_release(&lockdep_map); | |
952 | ||
1da177e4 | 953 | if (preempt_count != preempt_count()) { |
4c9dc641 | 954 | printk(KERN_ERR "huh, entered %p " |
be5b4fbd JJ |
955 | "with preempt_count %08x, exited" |
956 | " with %08x?\n", | |
957 | fn, preempt_count, | |
958 | preempt_count()); | |
1da177e4 LT |
959 | BUG(); |
960 | } | |
961 | } | |
3691c519 | 962 | spin_lock_irq(&base->lock); |
1da177e4 LT |
963 | } |
964 | } | |
965 | set_running_timer(base, NULL); | |
3691c519 | 966 | spin_unlock_irq(&base->lock); |
1da177e4 LT |
967 | } |
968 | ||
ee9c5785 | 969 | #ifdef CONFIG_NO_HZ |
1da177e4 LT |
970 | /* |
971 | * Find out when the next timer event is due to happen. This | |
972 | * is used on S/390 to stop all activity when a cpus is idle. | |
973 | * This functions needs to be called disabled. | |
974 | */ | |
a6fa8e5a | 975 | static unsigned long __next_timer_interrupt(struct tvec_base *base) |
1da177e4 | 976 | { |
1cfd6849 | 977 | unsigned long timer_jiffies = base->timer_jiffies; |
eaad084b | 978 | unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; |
1cfd6849 | 979 | int index, slot, array, found = 0; |
1da177e4 | 980 | struct timer_list *nte; |
a6fa8e5a | 981 | struct tvec *varray[4]; |
1da177e4 LT |
982 | |
983 | /* Look for timer events in tv1. */ | |
1cfd6849 | 984 | index = slot = timer_jiffies & TVR_MASK; |
1da177e4 | 985 | do { |
1cfd6849 | 986 | list_for_each_entry(nte, base->tv1.vec + slot, entry) { |
6819457d TG |
987 | if (tbase_get_deferrable(nte->base)) |
988 | continue; | |
6e453a67 | 989 | |
1cfd6849 | 990 | found = 1; |
1da177e4 | 991 | expires = nte->expires; |
1cfd6849 TG |
992 | /* Look at the cascade bucket(s)? */ |
993 | if (!index || slot < index) | |
994 | goto cascade; | |
995 | return expires; | |
1da177e4 | 996 | } |
1cfd6849 TG |
997 | slot = (slot + 1) & TVR_MASK; |
998 | } while (slot != index); | |
999 | ||
1000 | cascade: | |
1001 | /* Calculate the next cascade event */ | |
1002 | if (index) | |
1003 | timer_jiffies += TVR_SIZE - index; | |
1004 | timer_jiffies >>= TVR_BITS; | |
1da177e4 LT |
1005 | |
1006 | /* Check tv2-tv5. */ | |
1007 | varray[0] = &base->tv2; | |
1008 | varray[1] = &base->tv3; | |
1009 | varray[2] = &base->tv4; | |
1010 | varray[3] = &base->tv5; | |
1cfd6849 TG |
1011 | |
1012 | for (array = 0; array < 4; array++) { | |
a6fa8e5a | 1013 | struct tvec *varp = varray[array]; |
1cfd6849 TG |
1014 | |
1015 | index = slot = timer_jiffies & TVN_MASK; | |
1da177e4 | 1016 | do { |
1cfd6849 TG |
1017 | list_for_each_entry(nte, varp->vec + slot, entry) { |
1018 | found = 1; | |
1da177e4 LT |
1019 | if (time_before(nte->expires, expires)) |
1020 | expires = nte->expires; | |
1cfd6849 TG |
1021 | } |
1022 | /* | |
1023 | * Do we still search for the first timer or are | |
1024 | * we looking up the cascade buckets ? | |
1025 | */ | |
1026 | if (found) { | |
1027 | /* Look at the cascade bucket(s)? */ | |
1028 | if (!index || slot < index) | |
1029 | break; | |
1030 | return expires; | |
1031 | } | |
1032 | slot = (slot + 1) & TVN_MASK; | |
1033 | } while (slot != index); | |
1034 | ||
1035 | if (index) | |
1036 | timer_jiffies += TVN_SIZE - index; | |
1037 | timer_jiffies >>= TVN_BITS; | |
1da177e4 | 1038 | } |
1cfd6849 TG |
1039 | return expires; |
1040 | } | |
69239749 | 1041 | |
1cfd6849 TG |
1042 | /* |
1043 | * Check, if the next hrtimer event is before the next timer wheel | |
1044 | * event: | |
1045 | */ | |
1046 | static unsigned long cmp_next_hrtimer_event(unsigned long now, | |
1047 | unsigned long expires) | |
1048 | { | |
1049 | ktime_t hr_delta = hrtimer_get_next_event(); | |
1050 | struct timespec tsdelta; | |
9501b6cf | 1051 | unsigned long delta; |
1cfd6849 TG |
1052 | |
1053 | if (hr_delta.tv64 == KTIME_MAX) | |
1054 | return expires; | |
0662b713 | 1055 | |
9501b6cf TG |
1056 | /* |
1057 | * Expired timer available, let it expire in the next tick | |
1058 | */ | |
1059 | if (hr_delta.tv64 <= 0) | |
1060 | return now + 1; | |
69239749 | 1061 | |
1cfd6849 | 1062 | tsdelta = ktime_to_timespec(hr_delta); |
9501b6cf | 1063 | delta = timespec_to_jiffies(&tsdelta); |
eaad084b TG |
1064 | |
1065 | /* | |
1066 | * Limit the delta to the max value, which is checked in | |
1067 | * tick_nohz_stop_sched_tick(): | |
1068 | */ | |
1069 | if (delta > NEXT_TIMER_MAX_DELTA) | |
1070 | delta = NEXT_TIMER_MAX_DELTA; | |
1071 | ||
9501b6cf TG |
1072 | /* |
1073 | * Take rounding errors in to account and make sure, that it | |
1074 | * expires in the next tick. Otherwise we go into an endless | |
1075 | * ping pong due to tick_nohz_stop_sched_tick() retriggering | |
1076 | * the timer softirq | |
1077 | */ | |
1078 | if (delta < 1) | |
1079 | delta = 1; | |
1080 | now += delta; | |
1cfd6849 TG |
1081 | if (time_before(now, expires)) |
1082 | return now; | |
1da177e4 LT |
1083 | return expires; |
1084 | } | |
1cfd6849 TG |
1085 | |
1086 | /** | |
8dce39c2 | 1087 | * get_next_timer_interrupt - return the jiffy of the next pending timer |
05fb6bf0 | 1088 | * @now: current time (in jiffies) |
1cfd6849 | 1089 | */ |
fd064b9b | 1090 | unsigned long get_next_timer_interrupt(unsigned long now) |
1cfd6849 | 1091 | { |
a6fa8e5a | 1092 | struct tvec_base *base = __get_cpu_var(tvec_bases); |
fd064b9b | 1093 | unsigned long expires; |
1cfd6849 TG |
1094 | |
1095 | spin_lock(&base->lock); | |
1096 | expires = __next_timer_interrupt(base); | |
1097 | spin_unlock(&base->lock); | |
1098 | ||
1099 | if (time_before_eq(expires, now)) | |
1100 | return now; | |
1101 | ||
1102 | return cmp_next_hrtimer_event(now, expires); | |
1103 | } | |
1da177e4 LT |
1104 | #endif |
1105 | ||
1da177e4 | 1106 | /* |
5b4db0c2 | 1107 | * Called from the timer interrupt handler to charge one tick to the current |
1da177e4 LT |
1108 | * process. user_tick is 1 if the tick is user time, 0 for system. |
1109 | */ | |
1110 | void update_process_times(int user_tick) | |
1111 | { | |
1112 | struct task_struct *p = current; | |
1113 | int cpu = smp_processor_id(); | |
1114 | ||
1115 | /* Note: this timer irq context must be accounted for as well. */ | |
fa13a5a1 | 1116 | account_process_tick(p, user_tick); |
1da177e4 LT |
1117 | run_local_timers(); |
1118 | if (rcu_pending(cpu)) | |
1119 | rcu_check_callbacks(cpu, user_tick); | |
b845b517 | 1120 | printk_tick(); |
1da177e4 | 1121 | scheduler_tick(); |
6819457d | 1122 | run_posix_cpu_timers(p); |
1da177e4 LT |
1123 | } |
1124 | ||
1125 | /* | |
1126 | * Nr of active tasks - counted in fixed-point numbers | |
1127 | */ | |
1128 | static unsigned long count_active_tasks(void) | |
1129 | { | |
db1b1fef | 1130 | return nr_active() * FIXED_1; |
1da177e4 LT |
1131 | } |
1132 | ||
1133 | /* | |
1134 | * Hmm.. Changed this, as the GNU make sources (load.c) seems to | |
1135 | * imply that avenrun[] is the standard name for this kind of thing. | |
1136 | * Nothing else seems to be standardized: the fractional size etc | |
1137 | * all seem to differ on different machines. | |
1138 | * | |
1139 | * Requires xtime_lock to access. | |
1140 | */ | |
1141 | unsigned long avenrun[3]; | |
1142 | ||
1143 | EXPORT_SYMBOL(avenrun); | |
1144 | ||
1145 | /* | |
1146 | * calc_load - given tick count, update the avenrun load estimates. | |
1147 | * This is called while holding a write_lock on xtime_lock. | |
1148 | */ | |
1149 | static inline void calc_load(unsigned long ticks) | |
1150 | { | |
1151 | unsigned long active_tasks; /* fixed-point */ | |
1152 | static int count = LOAD_FREQ; | |
1153 | ||
cd7175ed ED |
1154 | count -= ticks; |
1155 | if (unlikely(count < 0)) { | |
1156 | active_tasks = count_active_tasks(); | |
1157 | do { | |
1158 | CALC_LOAD(avenrun[0], EXP_1, active_tasks); | |
1159 | CALC_LOAD(avenrun[1], EXP_5, active_tasks); | |
1160 | CALC_LOAD(avenrun[2], EXP_15, active_tasks); | |
1161 | count += LOAD_FREQ; | |
1162 | } while (count < 0); | |
1da177e4 LT |
1163 | } |
1164 | } | |
1165 | ||
1da177e4 LT |
1166 | /* |
1167 | * This function runs timers and the timer-tq in bottom half context. | |
1168 | */ | |
1169 | static void run_timer_softirq(struct softirq_action *h) | |
1170 | { | |
a6fa8e5a | 1171 | struct tvec_base *base = __get_cpu_var(tvec_bases); |
1da177e4 | 1172 | |
d3d74453 | 1173 | hrtimer_run_pending(); |
82f67cd9 | 1174 | |
1da177e4 LT |
1175 | if (time_after_eq(jiffies, base->timer_jiffies)) |
1176 | __run_timers(base); | |
1177 | } | |
1178 | ||
1179 | /* | |
1180 | * Called by the local, per-CPU timer interrupt on SMP. | |
1181 | */ | |
1182 | void run_local_timers(void) | |
1183 | { | |
d3d74453 | 1184 | hrtimer_run_queues(); |
1da177e4 | 1185 | raise_softirq(TIMER_SOFTIRQ); |
6687a97d | 1186 | softlockup_tick(); |
1da177e4 LT |
1187 | } |
1188 | ||
1189 | /* | |
1190 | * Called by the timer interrupt. xtime_lock must already be taken | |
1191 | * by the timer IRQ! | |
1192 | */ | |
3171a030 | 1193 | static inline void update_times(unsigned long ticks) |
1da177e4 | 1194 | { |
ad596171 | 1195 | update_wall_time(); |
1da177e4 LT |
1196 | calc_load(ticks); |
1197 | } | |
6819457d | 1198 | |
1da177e4 LT |
1199 | /* |
1200 | * The 64-bit jiffies value is not atomic - you MUST NOT read it | |
1201 | * without sampling the sequence number in xtime_lock. | |
1202 | * jiffies is defined in the linker script... | |
1203 | */ | |
1204 | ||
3171a030 | 1205 | void do_timer(unsigned long ticks) |
1da177e4 | 1206 | { |
3171a030 AN |
1207 | jiffies_64 += ticks; |
1208 | update_times(ticks); | |
1da177e4 LT |
1209 | } |
1210 | ||
1211 | #ifdef __ARCH_WANT_SYS_ALARM | |
1212 | ||
1213 | /* | |
1214 | * For backwards compatibility? This can be done in libc so Alpha | |
1215 | * and all newer ports shouldn't need it. | |
1216 | */ | |
58fd3aa2 | 1217 | SYSCALL_DEFINE1(alarm, unsigned int, seconds) |
1da177e4 | 1218 | { |
c08b8a49 | 1219 | return alarm_setitimer(seconds); |
1da177e4 LT |
1220 | } |
1221 | ||
1222 | #endif | |
1223 | ||
1224 | #ifndef __alpha__ | |
1225 | ||
1226 | /* | |
1227 | * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this | |
1228 | * should be moved into arch/i386 instead? | |
1229 | */ | |
1230 | ||
1231 | /** | |
1232 | * sys_getpid - return the thread group id of the current process | |
1233 | * | |
1234 | * Note, despite the name, this returns the tgid not the pid. The tgid and | |
1235 | * the pid are identical unless CLONE_THREAD was specified on clone() in | |
1236 | * which case the tgid is the same in all threads of the same group. | |
1237 | * | |
1238 | * This is SMP safe as current->tgid does not change. | |
1239 | */ | |
58fd3aa2 | 1240 | SYSCALL_DEFINE0(getpid) |
1da177e4 | 1241 | { |
b488893a | 1242 | return task_tgid_vnr(current); |
1da177e4 LT |
1243 | } |
1244 | ||
1245 | /* | |
6997a6fa KK |
1246 | * Accessing ->real_parent is not SMP-safe, it could |
1247 | * change from under us. However, we can use a stale | |
1248 | * value of ->real_parent under rcu_read_lock(), see | |
1249 | * release_task()->call_rcu(delayed_put_task_struct). | |
1da177e4 | 1250 | */ |
dbf040d9 | 1251 | SYSCALL_DEFINE0(getppid) |
1da177e4 LT |
1252 | { |
1253 | int pid; | |
1da177e4 | 1254 | |
6997a6fa | 1255 | rcu_read_lock(); |
6c5f3e7b | 1256 | pid = task_tgid_vnr(current->real_parent); |
6997a6fa | 1257 | rcu_read_unlock(); |
1da177e4 | 1258 | |
1da177e4 LT |
1259 | return pid; |
1260 | } | |
1261 | ||
dbf040d9 | 1262 | SYSCALL_DEFINE0(getuid) |
1da177e4 LT |
1263 | { |
1264 | /* Only we change this so SMP safe */ | |
76aac0e9 | 1265 | return current_uid(); |
1da177e4 LT |
1266 | } |
1267 | ||
dbf040d9 | 1268 | SYSCALL_DEFINE0(geteuid) |
1da177e4 LT |
1269 | { |
1270 | /* Only we change this so SMP safe */ | |
76aac0e9 | 1271 | return current_euid(); |
1da177e4 LT |
1272 | } |
1273 | ||
dbf040d9 | 1274 | SYSCALL_DEFINE0(getgid) |
1da177e4 LT |
1275 | { |
1276 | /* Only we change this so SMP safe */ | |
76aac0e9 | 1277 | return current_gid(); |
1da177e4 LT |
1278 | } |
1279 | ||
dbf040d9 | 1280 | SYSCALL_DEFINE0(getegid) |
1da177e4 LT |
1281 | { |
1282 | /* Only we change this so SMP safe */ | |
76aac0e9 | 1283 | return current_egid(); |
1da177e4 LT |
1284 | } |
1285 | ||
1286 | #endif | |
1287 | ||
1288 | static void process_timeout(unsigned long __data) | |
1289 | { | |
36c8b586 | 1290 | wake_up_process((struct task_struct *)__data); |
1da177e4 LT |
1291 | } |
1292 | ||
1293 | /** | |
1294 | * schedule_timeout - sleep until timeout | |
1295 | * @timeout: timeout value in jiffies | |
1296 | * | |
1297 | * Make the current task sleep until @timeout jiffies have | |
1298 | * elapsed. The routine will return immediately unless | |
1299 | * the current task state has been set (see set_current_state()). | |
1300 | * | |
1301 | * You can set the task state as follows - | |
1302 | * | |
1303 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | |
1304 | * pass before the routine returns. The routine will return 0 | |
1305 | * | |
1306 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1307 | * delivered to the current task. In this case the remaining time | |
1308 | * in jiffies will be returned, or 0 if the timer expired in time | |
1309 | * | |
1310 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1311 | * routine returns. | |
1312 | * | |
1313 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | |
1314 | * the CPU away without a bound on the timeout. In this case the return | |
1315 | * value will be %MAX_SCHEDULE_TIMEOUT. | |
1316 | * | |
1317 | * In all cases the return value is guaranteed to be non-negative. | |
1318 | */ | |
7ad5b3a5 | 1319 | signed long __sched schedule_timeout(signed long timeout) |
1da177e4 LT |
1320 | { |
1321 | struct timer_list timer; | |
1322 | unsigned long expire; | |
1323 | ||
1324 | switch (timeout) | |
1325 | { | |
1326 | case MAX_SCHEDULE_TIMEOUT: | |
1327 | /* | |
1328 | * These two special cases are useful to be comfortable | |
1329 | * in the caller. Nothing more. We could take | |
1330 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | |
1331 | * but I' d like to return a valid offset (>=0) to allow | |
1332 | * the caller to do everything it want with the retval. | |
1333 | */ | |
1334 | schedule(); | |
1335 | goto out; | |
1336 | default: | |
1337 | /* | |
1338 | * Another bit of PARANOID. Note that the retval will be | |
1339 | * 0 since no piece of kernel is supposed to do a check | |
1340 | * for a negative retval of schedule_timeout() (since it | |
1341 | * should never happens anyway). You just have the printk() | |
1342 | * that will tell you if something is gone wrong and where. | |
1343 | */ | |
5b149bcc | 1344 | if (timeout < 0) { |
1da177e4 | 1345 | printk(KERN_ERR "schedule_timeout: wrong timeout " |
5b149bcc AM |
1346 | "value %lx\n", timeout); |
1347 | dump_stack(); | |
1da177e4 LT |
1348 | current->state = TASK_RUNNING; |
1349 | goto out; | |
1350 | } | |
1351 | } | |
1352 | ||
1353 | expire = timeout + jiffies; | |
1354 | ||
c6f3a97f | 1355 | setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); |
74019224 | 1356 | __mod_timer(&timer, expire, false); |
1da177e4 LT |
1357 | schedule(); |
1358 | del_singleshot_timer_sync(&timer); | |
1359 | ||
c6f3a97f TG |
1360 | /* Remove the timer from the object tracker */ |
1361 | destroy_timer_on_stack(&timer); | |
1362 | ||
1da177e4 LT |
1363 | timeout = expire - jiffies; |
1364 | ||
1365 | out: | |
1366 | return timeout < 0 ? 0 : timeout; | |
1367 | } | |
1da177e4 LT |
1368 | EXPORT_SYMBOL(schedule_timeout); |
1369 | ||
8a1c1757 AM |
1370 | /* |
1371 | * We can use __set_current_state() here because schedule_timeout() calls | |
1372 | * schedule() unconditionally. | |
1373 | */ | |
64ed93a2 NA |
1374 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1375 | { | |
a5a0d52c AM |
1376 | __set_current_state(TASK_INTERRUPTIBLE); |
1377 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1378 | } |
1379 | EXPORT_SYMBOL(schedule_timeout_interruptible); | |
1380 | ||
294d5cc2 MW |
1381 | signed long __sched schedule_timeout_killable(signed long timeout) |
1382 | { | |
1383 | __set_current_state(TASK_KILLABLE); | |
1384 | return schedule_timeout(timeout); | |
1385 | } | |
1386 | EXPORT_SYMBOL(schedule_timeout_killable); | |
1387 | ||
64ed93a2 NA |
1388 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) |
1389 | { | |
a5a0d52c AM |
1390 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1391 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1392 | } |
1393 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | |
1394 | ||
1da177e4 | 1395 | /* Thread ID - the internal kernel "pid" */ |
58fd3aa2 | 1396 | SYSCALL_DEFINE0(gettid) |
1da177e4 | 1397 | { |
b488893a | 1398 | return task_pid_vnr(current); |
1da177e4 LT |
1399 | } |
1400 | ||
2aae4a10 | 1401 | /** |
d4d23add | 1402 | * do_sysinfo - fill in sysinfo struct |
2aae4a10 | 1403 | * @info: pointer to buffer to fill |
6819457d | 1404 | */ |
d4d23add | 1405 | int do_sysinfo(struct sysinfo *info) |
1da177e4 | 1406 | { |
1da177e4 LT |
1407 | unsigned long mem_total, sav_total; |
1408 | unsigned int mem_unit, bitcount; | |
1409 | unsigned long seq; | |
1410 | ||
d4d23add | 1411 | memset(info, 0, sizeof(struct sysinfo)); |
1da177e4 LT |
1412 | |
1413 | do { | |
1414 | struct timespec tp; | |
1415 | seq = read_seqbegin(&xtime_lock); | |
1416 | ||
1417 | /* | |
1418 | * This is annoying. The below is the same thing | |
1419 | * posix_get_clock_monotonic() does, but it wants to | |
1420 | * take the lock which we want to cover the loads stuff | |
1421 | * too. | |
1422 | */ | |
1423 | ||
1424 | getnstimeofday(&tp); | |
1425 | tp.tv_sec += wall_to_monotonic.tv_sec; | |
1426 | tp.tv_nsec += wall_to_monotonic.tv_nsec; | |
d6214141 | 1427 | monotonic_to_bootbased(&tp); |
1da177e4 LT |
1428 | if (tp.tv_nsec - NSEC_PER_SEC >= 0) { |
1429 | tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; | |
1430 | tp.tv_sec++; | |
1431 | } | |
d4d23add | 1432 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); |
1da177e4 | 1433 | |
d4d23add KM |
1434 | info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); |
1435 | info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); | |
1436 | info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); | |
1da177e4 | 1437 | |
d4d23add | 1438 | info->procs = nr_threads; |
1da177e4 LT |
1439 | } while (read_seqretry(&xtime_lock, seq)); |
1440 | ||
d4d23add KM |
1441 | si_meminfo(info); |
1442 | si_swapinfo(info); | |
1da177e4 LT |
1443 | |
1444 | /* | |
1445 | * If the sum of all the available memory (i.e. ram + swap) | |
1446 | * is less than can be stored in a 32 bit unsigned long then | |
1447 | * we can be binary compatible with 2.2.x kernels. If not, | |
1448 | * well, in that case 2.2.x was broken anyways... | |
1449 | * | |
1450 | * -Erik Andersen <andersee@debian.org> | |
1451 | */ | |
1452 | ||
d4d23add KM |
1453 | mem_total = info->totalram + info->totalswap; |
1454 | if (mem_total < info->totalram || mem_total < info->totalswap) | |
1da177e4 LT |
1455 | goto out; |
1456 | bitcount = 0; | |
d4d23add | 1457 | mem_unit = info->mem_unit; |
1da177e4 LT |
1458 | while (mem_unit > 1) { |
1459 | bitcount++; | |
1460 | mem_unit >>= 1; | |
1461 | sav_total = mem_total; | |
1462 | mem_total <<= 1; | |
1463 | if (mem_total < sav_total) | |
1464 | goto out; | |
1465 | } | |
1466 | ||
1467 | /* | |
1468 | * If mem_total did not overflow, multiply all memory values by | |
d4d23add | 1469 | * info->mem_unit and set it to 1. This leaves things compatible |
1da177e4 LT |
1470 | * with 2.2.x, and also retains compatibility with earlier 2.4.x |
1471 | * kernels... | |
1472 | */ | |
1473 | ||
d4d23add KM |
1474 | info->mem_unit = 1; |
1475 | info->totalram <<= bitcount; | |
1476 | info->freeram <<= bitcount; | |
1477 | info->sharedram <<= bitcount; | |
1478 | info->bufferram <<= bitcount; | |
1479 | info->totalswap <<= bitcount; | |
1480 | info->freeswap <<= bitcount; | |
1481 | info->totalhigh <<= bitcount; | |
1482 | info->freehigh <<= bitcount; | |
1483 | ||
1484 | out: | |
1485 | return 0; | |
1486 | } | |
1487 | ||
1e7bfb21 | 1488 | SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info) |
d4d23add KM |
1489 | { |
1490 | struct sysinfo val; | |
1491 | ||
1492 | do_sysinfo(&val); | |
1da177e4 | 1493 | |
1da177e4 LT |
1494 | if (copy_to_user(info, &val, sizeof(struct sysinfo))) |
1495 | return -EFAULT; | |
1496 | ||
1497 | return 0; | |
1498 | } | |
1499 | ||
b4be6258 | 1500 | static int __cpuinit init_timers_cpu(int cpu) |
1da177e4 LT |
1501 | { |
1502 | int j; | |
a6fa8e5a | 1503 | struct tvec_base *base; |
b4be6258 | 1504 | static char __cpuinitdata tvec_base_done[NR_CPUS]; |
55c888d6 | 1505 | |
ba6edfcd | 1506 | if (!tvec_base_done[cpu]) { |
a4a6198b JB |
1507 | static char boot_done; |
1508 | ||
a4a6198b | 1509 | if (boot_done) { |
ba6edfcd AM |
1510 | /* |
1511 | * The APs use this path later in boot | |
1512 | */ | |
94f6030c CL |
1513 | base = kmalloc_node(sizeof(*base), |
1514 | GFP_KERNEL | __GFP_ZERO, | |
a4a6198b JB |
1515 | cpu_to_node(cpu)); |
1516 | if (!base) | |
1517 | return -ENOMEM; | |
6e453a67 VP |
1518 | |
1519 | /* Make sure that tvec_base is 2 byte aligned */ | |
1520 | if (tbase_get_deferrable(base)) { | |
1521 | WARN_ON(1); | |
1522 | kfree(base); | |
1523 | return -ENOMEM; | |
1524 | } | |
ba6edfcd | 1525 | per_cpu(tvec_bases, cpu) = base; |
a4a6198b | 1526 | } else { |
ba6edfcd AM |
1527 | /* |
1528 | * This is for the boot CPU - we use compile-time | |
1529 | * static initialisation because per-cpu memory isn't | |
1530 | * ready yet and because the memory allocators are not | |
1531 | * initialised either. | |
1532 | */ | |
a4a6198b | 1533 | boot_done = 1; |
ba6edfcd | 1534 | base = &boot_tvec_bases; |
a4a6198b | 1535 | } |
ba6edfcd AM |
1536 | tvec_base_done[cpu] = 1; |
1537 | } else { | |
1538 | base = per_cpu(tvec_bases, cpu); | |
a4a6198b | 1539 | } |
ba6edfcd | 1540 | |
3691c519 | 1541 | spin_lock_init(&base->lock); |
d730e882 | 1542 | |
1da177e4 LT |
1543 | for (j = 0; j < TVN_SIZE; j++) { |
1544 | INIT_LIST_HEAD(base->tv5.vec + j); | |
1545 | INIT_LIST_HEAD(base->tv4.vec + j); | |
1546 | INIT_LIST_HEAD(base->tv3.vec + j); | |
1547 | INIT_LIST_HEAD(base->tv2.vec + j); | |
1548 | } | |
1549 | for (j = 0; j < TVR_SIZE; j++) | |
1550 | INIT_LIST_HEAD(base->tv1.vec + j); | |
1551 | ||
1552 | base->timer_jiffies = jiffies; | |
a4a6198b | 1553 | return 0; |
1da177e4 LT |
1554 | } |
1555 | ||
1556 | #ifdef CONFIG_HOTPLUG_CPU | |
a6fa8e5a | 1557 | static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head) |
1da177e4 LT |
1558 | { |
1559 | struct timer_list *timer; | |
1560 | ||
1561 | while (!list_empty(head)) { | |
b5e61818 | 1562 | timer = list_first_entry(head, struct timer_list, entry); |
55c888d6 | 1563 | detach_timer(timer, 0); |
6e453a67 | 1564 | timer_set_base(timer, new_base); |
1da177e4 | 1565 | internal_add_timer(new_base, timer); |
1da177e4 | 1566 | } |
1da177e4 LT |
1567 | } |
1568 | ||
48ccf3da | 1569 | static void __cpuinit migrate_timers(int cpu) |
1da177e4 | 1570 | { |
a6fa8e5a PM |
1571 | struct tvec_base *old_base; |
1572 | struct tvec_base *new_base; | |
1da177e4 LT |
1573 | int i; |
1574 | ||
1575 | BUG_ON(cpu_online(cpu)); | |
a4a6198b JB |
1576 | old_base = per_cpu(tvec_bases, cpu); |
1577 | new_base = get_cpu_var(tvec_bases); | |
d82f0b0f ON |
1578 | /* |
1579 | * The caller is globally serialized and nobody else | |
1580 | * takes two locks at once, deadlock is not possible. | |
1581 | */ | |
1582 | spin_lock_irq(&new_base->lock); | |
0d180406 | 1583 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
3691c519 ON |
1584 | |
1585 | BUG_ON(old_base->running_timer); | |
1da177e4 | 1586 | |
1da177e4 | 1587 | for (i = 0; i < TVR_SIZE; i++) |
55c888d6 ON |
1588 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1589 | for (i = 0; i < TVN_SIZE; i++) { | |
1590 | migrate_timer_list(new_base, old_base->tv2.vec + i); | |
1591 | migrate_timer_list(new_base, old_base->tv3.vec + i); | |
1592 | migrate_timer_list(new_base, old_base->tv4.vec + i); | |
1593 | migrate_timer_list(new_base, old_base->tv5.vec + i); | |
1594 | } | |
1595 | ||
0d180406 | 1596 | spin_unlock(&old_base->lock); |
d82f0b0f | 1597 | spin_unlock_irq(&new_base->lock); |
1da177e4 | 1598 | put_cpu_var(tvec_bases); |
1da177e4 LT |
1599 | } |
1600 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1601 | ||
8c78f307 | 1602 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, |
1da177e4 LT |
1603 | unsigned long action, void *hcpu) |
1604 | { | |
1605 | long cpu = (long)hcpu; | |
1606 | switch(action) { | |
1607 | case CPU_UP_PREPARE: | |
8bb78442 | 1608 | case CPU_UP_PREPARE_FROZEN: |
a4a6198b JB |
1609 | if (init_timers_cpu(cpu) < 0) |
1610 | return NOTIFY_BAD; | |
1da177e4 LT |
1611 | break; |
1612 | #ifdef CONFIG_HOTPLUG_CPU | |
1613 | case CPU_DEAD: | |
8bb78442 | 1614 | case CPU_DEAD_FROZEN: |
1da177e4 LT |
1615 | migrate_timers(cpu); |
1616 | break; | |
1617 | #endif | |
1618 | default: | |
1619 | break; | |
1620 | } | |
1621 | return NOTIFY_OK; | |
1622 | } | |
1623 | ||
8c78f307 | 1624 | static struct notifier_block __cpuinitdata timers_nb = { |
1da177e4 LT |
1625 | .notifier_call = timer_cpu_notify, |
1626 | }; | |
1627 | ||
1628 | ||
1629 | void __init init_timers(void) | |
1630 | { | |
07dccf33 | 1631 | int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, |
1da177e4 | 1632 | (void *)(long)smp_processor_id()); |
07dccf33 | 1633 | |
82f67cd9 IM |
1634 | init_timer_stats(); |
1635 | ||
07dccf33 | 1636 | BUG_ON(err == NOTIFY_BAD); |
1da177e4 | 1637 | register_cpu_notifier(&timers_nb); |
962cf36c | 1638 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); |
1da177e4 LT |
1639 | } |
1640 | ||
1da177e4 LT |
1641 | /** |
1642 | * msleep - sleep safely even with waitqueue interruptions | |
1643 | * @msecs: Time in milliseconds to sleep for | |
1644 | */ | |
1645 | void msleep(unsigned int msecs) | |
1646 | { | |
1647 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1648 | ||
75bcc8c5 NA |
1649 | while (timeout) |
1650 | timeout = schedule_timeout_uninterruptible(timeout); | |
1da177e4 LT |
1651 | } |
1652 | ||
1653 | EXPORT_SYMBOL(msleep); | |
1654 | ||
1655 | /** | |
96ec3efd | 1656 | * msleep_interruptible - sleep waiting for signals |
1da177e4 LT |
1657 | * @msecs: Time in milliseconds to sleep for |
1658 | */ | |
1659 | unsigned long msleep_interruptible(unsigned int msecs) | |
1660 | { | |
1661 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1662 | ||
75bcc8c5 NA |
1663 | while (timeout && !signal_pending(current)) |
1664 | timeout = schedule_timeout_interruptible(timeout); | |
1da177e4 LT |
1665 | return jiffies_to_msecs(timeout); |
1666 | } | |
1667 | ||
1668 | EXPORT_SYMBOL(msleep_interruptible); |