Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/timer.c | |
3 | * | |
8524070b | 4 | * Kernel internal timers, basic process system calls |
1da177e4 LT |
5 | * |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * | |
8 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. | |
9 | * | |
10 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
12 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | |
13 | * serialize accesses to xtime/lost_ticks). | |
14 | * Copyright (C) 1998 Andrea Arcangeli | |
15 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl | |
16 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love | |
17 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. | |
18 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar | |
19 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | |
20 | */ | |
21 | ||
22 | #include <linux/kernel_stat.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/swap.h> | |
b488893a | 29 | #include <linux/pid_namespace.h> |
1da177e4 LT |
30 | #include <linux/notifier.h> |
31 | #include <linux/thread_info.h> | |
32 | #include <linux/time.h> | |
33 | #include <linux/jiffies.h> | |
34 | #include <linux/posix-timers.h> | |
35 | #include <linux/cpu.h> | |
36 | #include <linux/syscalls.h> | |
97a41e26 | 37 | #include <linux/delay.h> |
79bf2bb3 | 38 | #include <linux/tick.h> |
82f67cd9 | 39 | #include <linux/kallsyms.h> |
1da177e4 LT |
40 | |
41 | #include <asm/uaccess.h> | |
42 | #include <asm/unistd.h> | |
43 | #include <asm/div64.h> | |
44 | #include <asm/timex.h> | |
45 | #include <asm/io.h> | |
46 | ||
ecea8d19 TG |
47 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
48 | ||
49 | EXPORT_SYMBOL(jiffies_64); | |
50 | ||
1da177e4 LT |
51 | /* |
52 | * per-CPU timer vector definitions: | |
53 | */ | |
1da177e4 LT |
54 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) |
55 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | |
56 | #define TVN_SIZE (1 << TVN_BITS) | |
57 | #define TVR_SIZE (1 << TVR_BITS) | |
58 | #define TVN_MASK (TVN_SIZE - 1) | |
59 | #define TVR_MASK (TVR_SIZE - 1) | |
60 | ||
61 | typedef struct tvec_s { | |
62 | struct list_head vec[TVN_SIZE]; | |
63 | } tvec_t; | |
64 | ||
65 | typedef struct tvec_root_s { | |
66 | struct list_head vec[TVR_SIZE]; | |
67 | } tvec_root_t; | |
68 | ||
69 | struct tvec_t_base_s { | |
3691c519 ON |
70 | spinlock_t lock; |
71 | struct timer_list *running_timer; | |
1da177e4 | 72 | unsigned long timer_jiffies; |
1da177e4 LT |
73 | tvec_root_t tv1; |
74 | tvec_t tv2; | |
75 | tvec_t tv3; | |
76 | tvec_t tv4; | |
77 | tvec_t tv5; | |
6e453a67 | 78 | } ____cacheline_aligned; |
1da177e4 LT |
79 | |
80 | typedef struct tvec_t_base_s tvec_base_t; | |
ba6edfcd | 81 | |
3691c519 ON |
82 | tvec_base_t boot_tvec_bases; |
83 | EXPORT_SYMBOL(boot_tvec_bases); | |
51d8c5ed | 84 | static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases; |
1da177e4 | 85 | |
6e453a67 VP |
86 | /* |
87 | * Note that all tvec_bases is 2 byte aligned and lower bit of | |
88 | * base in timer_list is guaranteed to be zero. Use the LSB for | |
89 | * the new flag to indicate whether the timer is deferrable | |
90 | */ | |
91 | #define TBASE_DEFERRABLE_FLAG (0x1) | |
92 | ||
93 | /* Functions below help us manage 'deferrable' flag */ | |
94 | static inline unsigned int tbase_get_deferrable(tvec_base_t *base) | |
95 | { | |
e9910846 | 96 | return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG); |
6e453a67 VP |
97 | } |
98 | ||
99 | static inline tvec_base_t *tbase_get_base(tvec_base_t *base) | |
100 | { | |
e9910846 | 101 | return ((tvec_base_t *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG)); |
6e453a67 VP |
102 | } |
103 | ||
104 | static inline void timer_set_deferrable(struct timer_list *timer) | |
105 | { | |
e9910846 | 106 | timer->base = ((tvec_base_t *)((unsigned long)(timer->base) | |
6819457d | 107 | TBASE_DEFERRABLE_FLAG)); |
6e453a67 VP |
108 | } |
109 | ||
110 | static inline void | |
111 | timer_set_base(struct timer_list *timer, tvec_base_t *new_base) | |
112 | { | |
e9910846 | 113 | timer->base = (tvec_base_t *)((unsigned long)(new_base) | |
6819457d | 114 | tbase_get_deferrable(timer->base)); |
6e453a67 VP |
115 | } |
116 | ||
4c36a5de AV |
117 | /** |
118 | * __round_jiffies - function to round jiffies to a full second | |
119 | * @j: the time in (absolute) jiffies that should be rounded | |
120 | * @cpu: the processor number on which the timeout will happen | |
121 | * | |
72fd4a35 | 122 | * __round_jiffies() rounds an absolute time in the future (in jiffies) |
4c36a5de AV |
123 | * up or down to (approximately) full seconds. This is useful for timers |
124 | * for which the exact time they fire does not matter too much, as long as | |
125 | * they fire approximately every X seconds. | |
126 | * | |
127 | * By rounding these timers to whole seconds, all such timers will fire | |
128 | * at the same time, rather than at various times spread out. The goal | |
129 | * of this is to have the CPU wake up less, which saves power. | |
130 | * | |
131 | * The exact rounding is skewed for each processor to avoid all | |
132 | * processors firing at the exact same time, which could lead | |
133 | * to lock contention or spurious cache line bouncing. | |
134 | * | |
72fd4a35 | 135 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
136 | */ |
137 | unsigned long __round_jiffies(unsigned long j, int cpu) | |
138 | { | |
139 | int rem; | |
140 | unsigned long original = j; | |
141 | ||
142 | /* | |
143 | * We don't want all cpus firing their timers at once hitting the | |
144 | * same lock or cachelines, so we skew each extra cpu with an extra | |
145 | * 3 jiffies. This 3 jiffies came originally from the mm/ code which | |
146 | * already did this. | |
147 | * The skew is done by adding 3*cpunr, then round, then subtract this | |
148 | * extra offset again. | |
149 | */ | |
150 | j += cpu * 3; | |
151 | ||
152 | rem = j % HZ; | |
153 | ||
154 | /* | |
155 | * If the target jiffie is just after a whole second (which can happen | |
156 | * due to delays of the timer irq, long irq off times etc etc) then | |
157 | * we should round down to the whole second, not up. Use 1/4th second | |
158 | * as cutoff for this rounding as an extreme upper bound for this. | |
159 | */ | |
160 | if (rem < HZ/4) /* round down */ | |
161 | j = j - rem; | |
162 | else /* round up */ | |
163 | j = j - rem + HZ; | |
164 | ||
165 | /* now that we have rounded, subtract the extra skew again */ | |
166 | j -= cpu * 3; | |
167 | ||
168 | if (j <= jiffies) /* rounding ate our timeout entirely; */ | |
169 | return original; | |
170 | return j; | |
171 | } | |
172 | EXPORT_SYMBOL_GPL(__round_jiffies); | |
173 | ||
174 | /** | |
175 | * __round_jiffies_relative - function to round jiffies to a full second | |
176 | * @j: the time in (relative) jiffies that should be rounded | |
177 | * @cpu: the processor number on which the timeout will happen | |
178 | * | |
72fd4a35 | 179 | * __round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
180 | * up or down to (approximately) full seconds. This is useful for timers |
181 | * for which the exact time they fire does not matter too much, as long as | |
182 | * they fire approximately every X seconds. | |
183 | * | |
184 | * By rounding these timers to whole seconds, all such timers will fire | |
185 | * at the same time, rather than at various times spread out. The goal | |
186 | * of this is to have the CPU wake up less, which saves power. | |
187 | * | |
188 | * The exact rounding is skewed for each processor to avoid all | |
189 | * processors firing at the exact same time, which could lead | |
190 | * to lock contention or spurious cache line bouncing. | |
191 | * | |
72fd4a35 | 192 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
193 | */ |
194 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | |
195 | { | |
196 | /* | |
197 | * In theory the following code can skip a jiffy in case jiffies | |
198 | * increments right between the addition and the later subtraction. | |
199 | * However since the entire point of this function is to use approximate | |
200 | * timeouts, it's entirely ok to not handle that. | |
201 | */ | |
202 | return __round_jiffies(j + jiffies, cpu) - jiffies; | |
203 | } | |
204 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); | |
205 | ||
206 | /** | |
207 | * round_jiffies - function to round jiffies to a full second | |
208 | * @j: the time in (absolute) jiffies that should be rounded | |
209 | * | |
72fd4a35 | 210 | * round_jiffies() rounds an absolute time in the future (in jiffies) |
4c36a5de AV |
211 | * up or down to (approximately) full seconds. This is useful for timers |
212 | * for which the exact time they fire does not matter too much, as long as | |
213 | * they fire approximately every X seconds. | |
214 | * | |
215 | * By rounding these timers to whole seconds, all such timers will fire | |
216 | * at the same time, rather than at various times spread out. The goal | |
217 | * of this is to have the CPU wake up less, which saves power. | |
218 | * | |
72fd4a35 | 219 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
220 | */ |
221 | unsigned long round_jiffies(unsigned long j) | |
222 | { | |
223 | return __round_jiffies(j, raw_smp_processor_id()); | |
224 | } | |
225 | EXPORT_SYMBOL_GPL(round_jiffies); | |
226 | ||
227 | /** | |
228 | * round_jiffies_relative - function to round jiffies to a full second | |
229 | * @j: the time in (relative) jiffies that should be rounded | |
230 | * | |
72fd4a35 | 231 | * round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
232 | * up or down to (approximately) full seconds. This is useful for timers |
233 | * for which the exact time they fire does not matter too much, as long as | |
234 | * they fire approximately every X seconds. | |
235 | * | |
236 | * By rounding these timers to whole seconds, all such timers will fire | |
237 | * at the same time, rather than at various times spread out. The goal | |
238 | * of this is to have the CPU wake up less, which saves power. | |
239 | * | |
72fd4a35 | 240 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
241 | */ |
242 | unsigned long round_jiffies_relative(unsigned long j) | |
243 | { | |
244 | return __round_jiffies_relative(j, raw_smp_processor_id()); | |
245 | } | |
246 | EXPORT_SYMBOL_GPL(round_jiffies_relative); | |
247 | ||
248 | ||
1da177e4 LT |
249 | static inline void set_running_timer(tvec_base_t *base, |
250 | struct timer_list *timer) | |
251 | { | |
252 | #ifdef CONFIG_SMP | |
3691c519 | 253 | base->running_timer = timer; |
1da177e4 LT |
254 | #endif |
255 | } | |
256 | ||
1da177e4 LT |
257 | static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) |
258 | { | |
259 | unsigned long expires = timer->expires; | |
260 | unsigned long idx = expires - base->timer_jiffies; | |
261 | struct list_head *vec; | |
262 | ||
263 | if (idx < TVR_SIZE) { | |
264 | int i = expires & TVR_MASK; | |
265 | vec = base->tv1.vec + i; | |
266 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | |
267 | int i = (expires >> TVR_BITS) & TVN_MASK; | |
268 | vec = base->tv2.vec + i; | |
269 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | |
270 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | |
271 | vec = base->tv3.vec + i; | |
272 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | |
273 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | |
274 | vec = base->tv4.vec + i; | |
275 | } else if ((signed long) idx < 0) { | |
276 | /* | |
277 | * Can happen if you add a timer with expires == jiffies, | |
278 | * or you set a timer to go off in the past | |
279 | */ | |
280 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | |
281 | } else { | |
282 | int i; | |
283 | /* If the timeout is larger than 0xffffffff on 64-bit | |
284 | * architectures then we use the maximum timeout: | |
285 | */ | |
286 | if (idx > 0xffffffffUL) { | |
287 | idx = 0xffffffffUL; | |
288 | expires = idx + base->timer_jiffies; | |
289 | } | |
290 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | |
291 | vec = base->tv5.vec + i; | |
292 | } | |
293 | /* | |
294 | * Timers are FIFO: | |
295 | */ | |
296 | list_add_tail(&timer->entry, vec); | |
297 | } | |
298 | ||
82f67cd9 IM |
299 | #ifdef CONFIG_TIMER_STATS |
300 | void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) | |
301 | { | |
302 | if (timer->start_site) | |
303 | return; | |
304 | ||
305 | timer->start_site = addr; | |
306 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | |
307 | timer->start_pid = current->pid; | |
308 | } | |
c5c061b8 VP |
309 | |
310 | static void timer_stats_account_timer(struct timer_list *timer) | |
311 | { | |
312 | unsigned int flag = 0; | |
313 | ||
314 | if (unlikely(tbase_get_deferrable(timer->base))) | |
315 | flag |= TIMER_STATS_FLAG_DEFERRABLE; | |
316 | ||
317 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | |
318 | timer->function, timer->start_comm, flag); | |
319 | } | |
320 | ||
321 | #else | |
322 | static void timer_stats_account_timer(struct timer_list *timer) {} | |
82f67cd9 IM |
323 | #endif |
324 | ||
2aae4a10 | 325 | /** |
55c888d6 ON |
326 | * init_timer - initialize a timer. |
327 | * @timer: the timer to be initialized | |
328 | * | |
329 | * init_timer() must be done to a timer prior calling *any* of the | |
330 | * other timer functions. | |
331 | */ | |
332 | void fastcall init_timer(struct timer_list *timer) | |
333 | { | |
334 | timer->entry.next = NULL; | |
bfe5d834 | 335 | timer->base = __raw_get_cpu_var(tvec_bases); |
82f67cd9 IM |
336 | #ifdef CONFIG_TIMER_STATS |
337 | timer->start_site = NULL; | |
338 | timer->start_pid = -1; | |
339 | memset(timer->start_comm, 0, TASK_COMM_LEN); | |
340 | #endif | |
55c888d6 ON |
341 | } |
342 | EXPORT_SYMBOL(init_timer); | |
343 | ||
6e453a67 VP |
344 | void fastcall init_timer_deferrable(struct timer_list *timer) |
345 | { | |
346 | init_timer(timer); | |
347 | timer_set_deferrable(timer); | |
348 | } | |
349 | EXPORT_SYMBOL(init_timer_deferrable); | |
350 | ||
55c888d6 | 351 | static inline void detach_timer(struct timer_list *timer, |
82f67cd9 | 352 | int clear_pending) |
55c888d6 ON |
353 | { |
354 | struct list_head *entry = &timer->entry; | |
355 | ||
356 | __list_del(entry->prev, entry->next); | |
357 | if (clear_pending) | |
358 | entry->next = NULL; | |
359 | entry->prev = LIST_POISON2; | |
360 | } | |
361 | ||
362 | /* | |
3691c519 | 363 | * We are using hashed locking: holding per_cpu(tvec_bases).lock |
55c888d6 ON |
364 | * means that all timers which are tied to this base via timer->base are |
365 | * locked, and the base itself is locked too. | |
366 | * | |
367 | * So __run_timers/migrate_timers can safely modify all timers which could | |
368 | * be found on ->tvX lists. | |
369 | * | |
370 | * When the timer's base is locked, and the timer removed from list, it is | |
371 | * possible to set timer->base = NULL and drop the lock: the timer remains | |
372 | * locked. | |
373 | */ | |
3691c519 | 374 | static tvec_base_t *lock_timer_base(struct timer_list *timer, |
55c888d6 | 375 | unsigned long *flags) |
89e7e374 | 376 | __acquires(timer->base->lock) |
55c888d6 | 377 | { |
3691c519 | 378 | tvec_base_t *base; |
55c888d6 ON |
379 | |
380 | for (;;) { | |
6e453a67 VP |
381 | tvec_base_t *prelock_base = timer->base; |
382 | base = tbase_get_base(prelock_base); | |
55c888d6 ON |
383 | if (likely(base != NULL)) { |
384 | spin_lock_irqsave(&base->lock, *flags); | |
6e453a67 | 385 | if (likely(prelock_base == timer->base)) |
55c888d6 ON |
386 | return base; |
387 | /* The timer has migrated to another CPU */ | |
388 | spin_unlock_irqrestore(&base->lock, *flags); | |
389 | } | |
390 | cpu_relax(); | |
391 | } | |
392 | } | |
393 | ||
1da177e4 LT |
394 | int __mod_timer(struct timer_list *timer, unsigned long expires) |
395 | { | |
3691c519 | 396 | tvec_base_t *base, *new_base; |
1da177e4 LT |
397 | unsigned long flags; |
398 | int ret = 0; | |
399 | ||
82f67cd9 | 400 | timer_stats_timer_set_start_info(timer); |
1da177e4 | 401 | BUG_ON(!timer->function); |
1da177e4 | 402 | |
55c888d6 ON |
403 | base = lock_timer_base(timer, &flags); |
404 | ||
405 | if (timer_pending(timer)) { | |
406 | detach_timer(timer, 0); | |
407 | ret = 1; | |
408 | } | |
409 | ||
a4a6198b | 410 | new_base = __get_cpu_var(tvec_bases); |
1da177e4 | 411 | |
3691c519 | 412 | if (base != new_base) { |
1da177e4 | 413 | /* |
55c888d6 ON |
414 | * We are trying to schedule the timer on the local CPU. |
415 | * However we can't change timer's base while it is running, | |
416 | * otherwise del_timer_sync() can't detect that the timer's | |
417 | * handler yet has not finished. This also guarantees that | |
418 | * the timer is serialized wrt itself. | |
1da177e4 | 419 | */ |
a2c348fe | 420 | if (likely(base->running_timer != timer)) { |
55c888d6 | 421 | /* See the comment in lock_timer_base() */ |
6e453a67 | 422 | timer_set_base(timer, NULL); |
55c888d6 | 423 | spin_unlock(&base->lock); |
a2c348fe ON |
424 | base = new_base; |
425 | spin_lock(&base->lock); | |
6e453a67 | 426 | timer_set_base(timer, base); |
1da177e4 LT |
427 | } |
428 | } | |
429 | ||
1da177e4 | 430 | timer->expires = expires; |
a2c348fe ON |
431 | internal_add_timer(base, timer); |
432 | spin_unlock_irqrestore(&base->lock, flags); | |
1da177e4 LT |
433 | |
434 | return ret; | |
435 | } | |
436 | ||
437 | EXPORT_SYMBOL(__mod_timer); | |
438 | ||
2aae4a10 | 439 | /** |
1da177e4 LT |
440 | * add_timer_on - start a timer on a particular CPU |
441 | * @timer: the timer to be added | |
442 | * @cpu: the CPU to start it on | |
443 | * | |
444 | * This is not very scalable on SMP. Double adds are not possible. | |
445 | */ | |
446 | void add_timer_on(struct timer_list *timer, int cpu) | |
447 | { | |
a4a6198b | 448 | tvec_base_t *base = per_cpu(tvec_bases, cpu); |
6819457d | 449 | unsigned long flags; |
55c888d6 | 450 | |
82f67cd9 | 451 | timer_stats_timer_set_start_info(timer); |
6819457d | 452 | BUG_ON(timer_pending(timer) || !timer->function); |
3691c519 | 453 | spin_lock_irqsave(&base->lock, flags); |
6e453a67 | 454 | timer_set_base(timer, base); |
1da177e4 | 455 | internal_add_timer(base, timer); |
3691c519 | 456 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 LT |
457 | } |
458 | ||
459 | ||
2aae4a10 | 460 | /** |
1da177e4 LT |
461 | * mod_timer - modify a timer's timeout |
462 | * @timer: the timer to be modified | |
2aae4a10 | 463 | * @expires: new timeout in jiffies |
1da177e4 | 464 | * |
72fd4a35 | 465 | * mod_timer() is a more efficient way to update the expire field of an |
1da177e4 LT |
466 | * active timer (if the timer is inactive it will be activated) |
467 | * | |
468 | * mod_timer(timer, expires) is equivalent to: | |
469 | * | |
470 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
471 | * | |
472 | * Note that if there are multiple unserialized concurrent users of the | |
473 | * same timer, then mod_timer() is the only safe way to modify the timeout, | |
474 | * since add_timer() cannot modify an already running timer. | |
475 | * | |
476 | * The function returns whether it has modified a pending timer or not. | |
477 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | |
478 | * active timer returns 1.) | |
479 | */ | |
480 | int mod_timer(struct timer_list *timer, unsigned long expires) | |
481 | { | |
482 | BUG_ON(!timer->function); | |
483 | ||
82f67cd9 | 484 | timer_stats_timer_set_start_info(timer); |
1da177e4 LT |
485 | /* |
486 | * This is a common optimization triggered by the | |
487 | * networking code - if the timer is re-modified | |
488 | * to be the same thing then just return: | |
489 | */ | |
490 | if (timer->expires == expires && timer_pending(timer)) | |
491 | return 1; | |
492 | ||
493 | return __mod_timer(timer, expires); | |
494 | } | |
495 | ||
496 | EXPORT_SYMBOL(mod_timer); | |
497 | ||
2aae4a10 | 498 | /** |
1da177e4 LT |
499 | * del_timer - deactive a timer. |
500 | * @timer: the timer to be deactivated | |
501 | * | |
502 | * del_timer() deactivates a timer - this works on both active and inactive | |
503 | * timers. | |
504 | * | |
505 | * The function returns whether it has deactivated a pending timer or not. | |
506 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | |
507 | * active timer returns 1.) | |
508 | */ | |
509 | int del_timer(struct timer_list *timer) | |
510 | { | |
3691c519 | 511 | tvec_base_t *base; |
1da177e4 | 512 | unsigned long flags; |
55c888d6 | 513 | int ret = 0; |
1da177e4 | 514 | |
82f67cd9 | 515 | timer_stats_timer_clear_start_info(timer); |
55c888d6 ON |
516 | if (timer_pending(timer)) { |
517 | base = lock_timer_base(timer, &flags); | |
518 | if (timer_pending(timer)) { | |
519 | detach_timer(timer, 1); | |
520 | ret = 1; | |
521 | } | |
1da177e4 | 522 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 | 523 | } |
1da177e4 | 524 | |
55c888d6 | 525 | return ret; |
1da177e4 LT |
526 | } |
527 | ||
528 | EXPORT_SYMBOL(del_timer); | |
529 | ||
530 | #ifdef CONFIG_SMP | |
2aae4a10 REB |
531 | /** |
532 | * try_to_del_timer_sync - Try to deactivate a timer | |
533 | * @timer: timer do del | |
534 | * | |
fd450b73 ON |
535 | * This function tries to deactivate a timer. Upon successful (ret >= 0) |
536 | * exit the timer is not queued and the handler is not running on any CPU. | |
537 | * | |
538 | * It must not be called from interrupt contexts. | |
539 | */ | |
540 | int try_to_del_timer_sync(struct timer_list *timer) | |
541 | { | |
3691c519 | 542 | tvec_base_t *base; |
fd450b73 ON |
543 | unsigned long flags; |
544 | int ret = -1; | |
545 | ||
546 | base = lock_timer_base(timer, &flags); | |
547 | ||
548 | if (base->running_timer == timer) | |
549 | goto out; | |
550 | ||
551 | ret = 0; | |
552 | if (timer_pending(timer)) { | |
553 | detach_timer(timer, 1); | |
554 | ret = 1; | |
555 | } | |
556 | out: | |
557 | spin_unlock_irqrestore(&base->lock, flags); | |
558 | ||
559 | return ret; | |
560 | } | |
561 | ||
e19dff1f DH |
562 | EXPORT_SYMBOL(try_to_del_timer_sync); |
563 | ||
2aae4a10 | 564 | /** |
1da177e4 LT |
565 | * del_timer_sync - deactivate a timer and wait for the handler to finish. |
566 | * @timer: the timer to be deactivated | |
567 | * | |
568 | * This function only differs from del_timer() on SMP: besides deactivating | |
569 | * the timer it also makes sure the handler has finished executing on other | |
570 | * CPUs. | |
571 | * | |
72fd4a35 | 572 | * Synchronization rules: Callers must prevent restarting of the timer, |
1da177e4 LT |
573 | * otherwise this function is meaningless. It must not be called from |
574 | * interrupt contexts. The caller must not hold locks which would prevent | |
55c888d6 ON |
575 | * completion of the timer's handler. The timer's handler must not call |
576 | * add_timer_on(). Upon exit the timer is not queued and the handler is | |
577 | * not running on any CPU. | |
1da177e4 LT |
578 | * |
579 | * The function returns whether it has deactivated a pending timer or not. | |
1da177e4 LT |
580 | */ |
581 | int del_timer_sync(struct timer_list *timer) | |
582 | { | |
fd450b73 ON |
583 | for (;;) { |
584 | int ret = try_to_del_timer_sync(timer); | |
585 | if (ret >= 0) | |
586 | return ret; | |
a0009652 | 587 | cpu_relax(); |
fd450b73 | 588 | } |
1da177e4 | 589 | } |
1da177e4 | 590 | |
55c888d6 | 591 | EXPORT_SYMBOL(del_timer_sync); |
1da177e4 LT |
592 | #endif |
593 | ||
594 | static int cascade(tvec_base_t *base, tvec_t *tv, int index) | |
595 | { | |
596 | /* cascade all the timers from tv up one level */ | |
3439dd86 P |
597 | struct timer_list *timer, *tmp; |
598 | struct list_head tv_list; | |
599 | ||
600 | list_replace_init(tv->vec + index, &tv_list); | |
1da177e4 | 601 | |
1da177e4 | 602 | /* |
3439dd86 P |
603 | * We are removing _all_ timers from the list, so we |
604 | * don't have to detach them individually. | |
1da177e4 | 605 | */ |
3439dd86 | 606 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { |
6e453a67 | 607 | BUG_ON(tbase_get_base(timer->base) != base); |
3439dd86 | 608 | internal_add_timer(base, timer); |
1da177e4 | 609 | } |
1da177e4 LT |
610 | |
611 | return index; | |
612 | } | |
613 | ||
2aae4a10 REB |
614 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) |
615 | ||
616 | /** | |
1da177e4 LT |
617 | * __run_timers - run all expired timers (if any) on this CPU. |
618 | * @base: the timer vector to be processed. | |
619 | * | |
620 | * This function cascades all vectors and executes all expired timer | |
621 | * vectors. | |
622 | */ | |
1da177e4 LT |
623 | static inline void __run_timers(tvec_base_t *base) |
624 | { | |
625 | struct timer_list *timer; | |
626 | ||
3691c519 | 627 | spin_lock_irq(&base->lock); |
1da177e4 | 628 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
626ab0e6 | 629 | struct list_head work_list; |
1da177e4 | 630 | struct list_head *head = &work_list; |
6819457d | 631 | int index = base->timer_jiffies & TVR_MASK; |
626ab0e6 | 632 | |
1da177e4 LT |
633 | /* |
634 | * Cascade timers: | |
635 | */ | |
636 | if (!index && | |
637 | (!cascade(base, &base->tv2, INDEX(0))) && | |
638 | (!cascade(base, &base->tv3, INDEX(1))) && | |
639 | !cascade(base, &base->tv4, INDEX(2))) | |
640 | cascade(base, &base->tv5, INDEX(3)); | |
626ab0e6 ON |
641 | ++base->timer_jiffies; |
642 | list_replace_init(base->tv1.vec + index, &work_list); | |
55c888d6 | 643 | while (!list_empty(head)) { |
1da177e4 LT |
644 | void (*fn)(unsigned long); |
645 | unsigned long data; | |
646 | ||
b5e61818 | 647 | timer = list_first_entry(head, struct timer_list,entry); |
6819457d TG |
648 | fn = timer->function; |
649 | data = timer->data; | |
1da177e4 | 650 | |
82f67cd9 IM |
651 | timer_stats_account_timer(timer); |
652 | ||
1da177e4 | 653 | set_running_timer(base, timer); |
55c888d6 | 654 | detach_timer(timer, 1); |
3691c519 | 655 | spin_unlock_irq(&base->lock); |
1da177e4 | 656 | { |
be5b4fbd | 657 | int preempt_count = preempt_count(); |
1da177e4 LT |
658 | fn(data); |
659 | if (preempt_count != preempt_count()) { | |
be5b4fbd JJ |
660 | printk(KERN_WARNING "huh, entered %p " |
661 | "with preempt_count %08x, exited" | |
662 | " with %08x?\n", | |
663 | fn, preempt_count, | |
664 | preempt_count()); | |
1da177e4 LT |
665 | BUG(); |
666 | } | |
667 | } | |
3691c519 | 668 | spin_lock_irq(&base->lock); |
1da177e4 LT |
669 | } |
670 | } | |
671 | set_running_timer(base, NULL); | |
3691c519 | 672 | spin_unlock_irq(&base->lock); |
1da177e4 LT |
673 | } |
674 | ||
fd064b9b | 675 | #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ) |
1da177e4 LT |
676 | /* |
677 | * Find out when the next timer event is due to happen. This | |
678 | * is used on S/390 to stop all activity when a cpus is idle. | |
679 | * This functions needs to be called disabled. | |
680 | */ | |
1cfd6849 | 681 | static unsigned long __next_timer_interrupt(tvec_base_t *base) |
1da177e4 | 682 | { |
1cfd6849 | 683 | unsigned long timer_jiffies = base->timer_jiffies; |
eaad084b | 684 | unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; |
1cfd6849 | 685 | int index, slot, array, found = 0; |
1da177e4 | 686 | struct timer_list *nte; |
1da177e4 | 687 | tvec_t *varray[4]; |
1da177e4 LT |
688 | |
689 | /* Look for timer events in tv1. */ | |
1cfd6849 | 690 | index = slot = timer_jiffies & TVR_MASK; |
1da177e4 | 691 | do { |
1cfd6849 | 692 | list_for_each_entry(nte, base->tv1.vec + slot, entry) { |
6819457d TG |
693 | if (tbase_get_deferrable(nte->base)) |
694 | continue; | |
6e453a67 | 695 | |
1cfd6849 | 696 | found = 1; |
1da177e4 | 697 | expires = nte->expires; |
1cfd6849 TG |
698 | /* Look at the cascade bucket(s)? */ |
699 | if (!index || slot < index) | |
700 | goto cascade; | |
701 | return expires; | |
1da177e4 | 702 | } |
1cfd6849 TG |
703 | slot = (slot + 1) & TVR_MASK; |
704 | } while (slot != index); | |
705 | ||
706 | cascade: | |
707 | /* Calculate the next cascade event */ | |
708 | if (index) | |
709 | timer_jiffies += TVR_SIZE - index; | |
710 | timer_jiffies >>= TVR_BITS; | |
1da177e4 LT |
711 | |
712 | /* Check tv2-tv5. */ | |
713 | varray[0] = &base->tv2; | |
714 | varray[1] = &base->tv3; | |
715 | varray[2] = &base->tv4; | |
716 | varray[3] = &base->tv5; | |
1cfd6849 TG |
717 | |
718 | for (array = 0; array < 4; array++) { | |
719 | tvec_t *varp = varray[array]; | |
720 | ||
721 | index = slot = timer_jiffies & TVN_MASK; | |
1da177e4 | 722 | do { |
1cfd6849 TG |
723 | list_for_each_entry(nte, varp->vec + slot, entry) { |
724 | found = 1; | |
1da177e4 LT |
725 | if (time_before(nte->expires, expires)) |
726 | expires = nte->expires; | |
1cfd6849 TG |
727 | } |
728 | /* | |
729 | * Do we still search for the first timer or are | |
730 | * we looking up the cascade buckets ? | |
731 | */ | |
732 | if (found) { | |
733 | /* Look at the cascade bucket(s)? */ | |
734 | if (!index || slot < index) | |
735 | break; | |
736 | return expires; | |
737 | } | |
738 | slot = (slot + 1) & TVN_MASK; | |
739 | } while (slot != index); | |
740 | ||
741 | if (index) | |
742 | timer_jiffies += TVN_SIZE - index; | |
743 | timer_jiffies >>= TVN_BITS; | |
1da177e4 | 744 | } |
1cfd6849 TG |
745 | return expires; |
746 | } | |
69239749 | 747 | |
1cfd6849 TG |
748 | /* |
749 | * Check, if the next hrtimer event is before the next timer wheel | |
750 | * event: | |
751 | */ | |
752 | static unsigned long cmp_next_hrtimer_event(unsigned long now, | |
753 | unsigned long expires) | |
754 | { | |
755 | ktime_t hr_delta = hrtimer_get_next_event(); | |
756 | struct timespec tsdelta; | |
9501b6cf | 757 | unsigned long delta; |
1cfd6849 TG |
758 | |
759 | if (hr_delta.tv64 == KTIME_MAX) | |
760 | return expires; | |
0662b713 | 761 | |
9501b6cf TG |
762 | /* |
763 | * Expired timer available, let it expire in the next tick | |
764 | */ | |
765 | if (hr_delta.tv64 <= 0) | |
766 | return now + 1; | |
69239749 | 767 | |
1cfd6849 | 768 | tsdelta = ktime_to_timespec(hr_delta); |
9501b6cf | 769 | delta = timespec_to_jiffies(&tsdelta); |
eaad084b TG |
770 | |
771 | /* | |
772 | * Limit the delta to the max value, which is checked in | |
773 | * tick_nohz_stop_sched_tick(): | |
774 | */ | |
775 | if (delta > NEXT_TIMER_MAX_DELTA) | |
776 | delta = NEXT_TIMER_MAX_DELTA; | |
777 | ||
9501b6cf TG |
778 | /* |
779 | * Take rounding errors in to account and make sure, that it | |
780 | * expires in the next tick. Otherwise we go into an endless | |
781 | * ping pong due to tick_nohz_stop_sched_tick() retriggering | |
782 | * the timer softirq | |
783 | */ | |
784 | if (delta < 1) | |
785 | delta = 1; | |
786 | now += delta; | |
1cfd6849 TG |
787 | if (time_before(now, expires)) |
788 | return now; | |
1da177e4 LT |
789 | return expires; |
790 | } | |
1cfd6849 TG |
791 | |
792 | /** | |
8dce39c2 | 793 | * get_next_timer_interrupt - return the jiffy of the next pending timer |
05fb6bf0 | 794 | * @now: current time (in jiffies) |
1cfd6849 | 795 | */ |
fd064b9b | 796 | unsigned long get_next_timer_interrupt(unsigned long now) |
1cfd6849 TG |
797 | { |
798 | tvec_base_t *base = __get_cpu_var(tvec_bases); | |
fd064b9b | 799 | unsigned long expires; |
1cfd6849 TG |
800 | |
801 | spin_lock(&base->lock); | |
802 | expires = __next_timer_interrupt(base); | |
803 | spin_unlock(&base->lock); | |
804 | ||
805 | if (time_before_eq(expires, now)) | |
806 | return now; | |
807 | ||
808 | return cmp_next_hrtimer_event(now, expires); | |
809 | } | |
fd064b9b TG |
810 | |
811 | #ifdef CONFIG_NO_IDLE_HZ | |
812 | unsigned long next_timer_interrupt(void) | |
813 | { | |
814 | return get_next_timer_interrupt(jiffies); | |
815 | } | |
816 | #endif | |
817 | ||
1da177e4 LT |
818 | #endif |
819 | ||
1da177e4 | 820 | /* |
5b4db0c2 | 821 | * Called from the timer interrupt handler to charge one tick to the current |
1da177e4 LT |
822 | * process. user_tick is 1 if the tick is user time, 0 for system. |
823 | */ | |
824 | void update_process_times(int user_tick) | |
825 | { | |
826 | struct task_struct *p = current; | |
827 | int cpu = smp_processor_id(); | |
828 | ||
829 | /* Note: this timer irq context must be accounted for as well. */ | |
c66f08be | 830 | if (user_tick) { |
1da177e4 | 831 | account_user_time(p, jiffies_to_cputime(1)); |
c66f08be MN |
832 | account_user_time_scaled(p, jiffies_to_cputime(1)); |
833 | } else { | |
1da177e4 | 834 | account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); |
c66f08be MN |
835 | account_system_time_scaled(p, jiffies_to_cputime(1)); |
836 | } | |
1da177e4 LT |
837 | run_local_timers(); |
838 | if (rcu_pending(cpu)) | |
839 | rcu_check_callbacks(cpu, user_tick); | |
840 | scheduler_tick(); | |
6819457d | 841 | run_posix_cpu_timers(p); |
1da177e4 LT |
842 | } |
843 | ||
844 | /* | |
845 | * Nr of active tasks - counted in fixed-point numbers | |
846 | */ | |
847 | static unsigned long count_active_tasks(void) | |
848 | { | |
db1b1fef | 849 | return nr_active() * FIXED_1; |
1da177e4 LT |
850 | } |
851 | ||
852 | /* | |
853 | * Hmm.. Changed this, as the GNU make sources (load.c) seems to | |
854 | * imply that avenrun[] is the standard name for this kind of thing. | |
855 | * Nothing else seems to be standardized: the fractional size etc | |
856 | * all seem to differ on different machines. | |
857 | * | |
858 | * Requires xtime_lock to access. | |
859 | */ | |
860 | unsigned long avenrun[3]; | |
861 | ||
862 | EXPORT_SYMBOL(avenrun); | |
863 | ||
864 | /* | |
865 | * calc_load - given tick count, update the avenrun load estimates. | |
866 | * This is called while holding a write_lock on xtime_lock. | |
867 | */ | |
868 | static inline void calc_load(unsigned long ticks) | |
869 | { | |
870 | unsigned long active_tasks; /* fixed-point */ | |
871 | static int count = LOAD_FREQ; | |
872 | ||
cd7175ed ED |
873 | count -= ticks; |
874 | if (unlikely(count < 0)) { | |
875 | active_tasks = count_active_tasks(); | |
876 | do { | |
877 | CALC_LOAD(avenrun[0], EXP_1, active_tasks); | |
878 | CALC_LOAD(avenrun[1], EXP_5, active_tasks); | |
879 | CALC_LOAD(avenrun[2], EXP_15, active_tasks); | |
880 | count += LOAD_FREQ; | |
881 | } while (count < 0); | |
1da177e4 LT |
882 | } |
883 | } | |
884 | ||
1da177e4 LT |
885 | /* |
886 | * This function runs timers and the timer-tq in bottom half context. | |
887 | */ | |
888 | static void run_timer_softirq(struct softirq_action *h) | |
889 | { | |
a4a6198b | 890 | tvec_base_t *base = __get_cpu_var(tvec_bases); |
1da177e4 | 891 | |
82f67cd9 IM |
892 | hrtimer_run_queues(); |
893 | ||
1da177e4 LT |
894 | if (time_after_eq(jiffies, base->timer_jiffies)) |
895 | __run_timers(base); | |
896 | } | |
897 | ||
898 | /* | |
899 | * Called by the local, per-CPU timer interrupt on SMP. | |
900 | */ | |
901 | void run_local_timers(void) | |
902 | { | |
903 | raise_softirq(TIMER_SOFTIRQ); | |
6687a97d | 904 | softlockup_tick(); |
1da177e4 LT |
905 | } |
906 | ||
907 | /* | |
908 | * Called by the timer interrupt. xtime_lock must already be taken | |
909 | * by the timer IRQ! | |
910 | */ | |
3171a030 | 911 | static inline void update_times(unsigned long ticks) |
1da177e4 | 912 | { |
ad596171 | 913 | update_wall_time(); |
1da177e4 LT |
914 | calc_load(ticks); |
915 | } | |
6819457d | 916 | |
1da177e4 LT |
917 | /* |
918 | * The 64-bit jiffies value is not atomic - you MUST NOT read it | |
919 | * without sampling the sequence number in xtime_lock. | |
920 | * jiffies is defined in the linker script... | |
921 | */ | |
922 | ||
3171a030 | 923 | void do_timer(unsigned long ticks) |
1da177e4 | 924 | { |
3171a030 AN |
925 | jiffies_64 += ticks; |
926 | update_times(ticks); | |
1da177e4 LT |
927 | } |
928 | ||
929 | #ifdef __ARCH_WANT_SYS_ALARM | |
930 | ||
931 | /* | |
932 | * For backwards compatibility? This can be done in libc so Alpha | |
933 | * and all newer ports shouldn't need it. | |
934 | */ | |
935 | asmlinkage unsigned long sys_alarm(unsigned int seconds) | |
936 | { | |
c08b8a49 | 937 | return alarm_setitimer(seconds); |
1da177e4 LT |
938 | } |
939 | ||
940 | #endif | |
941 | ||
942 | #ifndef __alpha__ | |
943 | ||
944 | /* | |
945 | * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this | |
946 | * should be moved into arch/i386 instead? | |
947 | */ | |
948 | ||
949 | /** | |
950 | * sys_getpid - return the thread group id of the current process | |
951 | * | |
952 | * Note, despite the name, this returns the tgid not the pid. The tgid and | |
953 | * the pid are identical unless CLONE_THREAD was specified on clone() in | |
954 | * which case the tgid is the same in all threads of the same group. | |
955 | * | |
956 | * This is SMP safe as current->tgid does not change. | |
957 | */ | |
958 | asmlinkage long sys_getpid(void) | |
959 | { | |
b488893a | 960 | return task_tgid_vnr(current); |
1da177e4 LT |
961 | } |
962 | ||
963 | /* | |
6997a6fa KK |
964 | * Accessing ->real_parent is not SMP-safe, it could |
965 | * change from under us. However, we can use a stale | |
966 | * value of ->real_parent under rcu_read_lock(), see | |
967 | * release_task()->call_rcu(delayed_put_task_struct). | |
1da177e4 LT |
968 | */ |
969 | asmlinkage long sys_getppid(void) | |
970 | { | |
971 | int pid; | |
1da177e4 | 972 | |
6997a6fa | 973 | rcu_read_lock(); |
b488893a | 974 | pid = task_ppid_nr_ns(current, current->nsproxy->pid_ns); |
6997a6fa | 975 | rcu_read_unlock(); |
1da177e4 | 976 | |
1da177e4 LT |
977 | return pid; |
978 | } | |
979 | ||
980 | asmlinkage long sys_getuid(void) | |
981 | { | |
982 | /* Only we change this so SMP safe */ | |
983 | return current->uid; | |
984 | } | |
985 | ||
986 | asmlinkage long sys_geteuid(void) | |
987 | { | |
988 | /* Only we change this so SMP safe */ | |
989 | return current->euid; | |
990 | } | |
991 | ||
992 | asmlinkage long sys_getgid(void) | |
993 | { | |
994 | /* Only we change this so SMP safe */ | |
995 | return current->gid; | |
996 | } | |
997 | ||
998 | asmlinkage long sys_getegid(void) | |
999 | { | |
1000 | /* Only we change this so SMP safe */ | |
1001 | return current->egid; | |
1002 | } | |
1003 | ||
1004 | #endif | |
1005 | ||
1006 | static void process_timeout(unsigned long __data) | |
1007 | { | |
36c8b586 | 1008 | wake_up_process((struct task_struct *)__data); |
1da177e4 LT |
1009 | } |
1010 | ||
1011 | /** | |
1012 | * schedule_timeout - sleep until timeout | |
1013 | * @timeout: timeout value in jiffies | |
1014 | * | |
1015 | * Make the current task sleep until @timeout jiffies have | |
1016 | * elapsed. The routine will return immediately unless | |
1017 | * the current task state has been set (see set_current_state()). | |
1018 | * | |
1019 | * You can set the task state as follows - | |
1020 | * | |
1021 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | |
1022 | * pass before the routine returns. The routine will return 0 | |
1023 | * | |
1024 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1025 | * delivered to the current task. In this case the remaining time | |
1026 | * in jiffies will be returned, or 0 if the timer expired in time | |
1027 | * | |
1028 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1029 | * routine returns. | |
1030 | * | |
1031 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | |
1032 | * the CPU away without a bound on the timeout. In this case the return | |
1033 | * value will be %MAX_SCHEDULE_TIMEOUT. | |
1034 | * | |
1035 | * In all cases the return value is guaranteed to be non-negative. | |
1036 | */ | |
1037 | fastcall signed long __sched schedule_timeout(signed long timeout) | |
1038 | { | |
1039 | struct timer_list timer; | |
1040 | unsigned long expire; | |
1041 | ||
1042 | switch (timeout) | |
1043 | { | |
1044 | case MAX_SCHEDULE_TIMEOUT: | |
1045 | /* | |
1046 | * These two special cases are useful to be comfortable | |
1047 | * in the caller. Nothing more. We could take | |
1048 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | |
1049 | * but I' d like to return a valid offset (>=0) to allow | |
1050 | * the caller to do everything it want with the retval. | |
1051 | */ | |
1052 | schedule(); | |
1053 | goto out; | |
1054 | default: | |
1055 | /* | |
1056 | * Another bit of PARANOID. Note that the retval will be | |
1057 | * 0 since no piece of kernel is supposed to do a check | |
1058 | * for a negative retval of schedule_timeout() (since it | |
1059 | * should never happens anyway). You just have the printk() | |
1060 | * that will tell you if something is gone wrong and where. | |
1061 | */ | |
5b149bcc | 1062 | if (timeout < 0) { |
1da177e4 | 1063 | printk(KERN_ERR "schedule_timeout: wrong timeout " |
5b149bcc AM |
1064 | "value %lx\n", timeout); |
1065 | dump_stack(); | |
1da177e4 LT |
1066 | current->state = TASK_RUNNING; |
1067 | goto out; | |
1068 | } | |
1069 | } | |
1070 | ||
1071 | expire = timeout + jiffies; | |
1072 | ||
a8db2db1 ON |
1073 | setup_timer(&timer, process_timeout, (unsigned long)current); |
1074 | __mod_timer(&timer, expire); | |
1da177e4 LT |
1075 | schedule(); |
1076 | del_singleshot_timer_sync(&timer); | |
1077 | ||
1078 | timeout = expire - jiffies; | |
1079 | ||
1080 | out: | |
1081 | return timeout < 0 ? 0 : timeout; | |
1082 | } | |
1da177e4 LT |
1083 | EXPORT_SYMBOL(schedule_timeout); |
1084 | ||
8a1c1757 AM |
1085 | /* |
1086 | * We can use __set_current_state() here because schedule_timeout() calls | |
1087 | * schedule() unconditionally. | |
1088 | */ | |
64ed93a2 NA |
1089 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1090 | { | |
a5a0d52c AM |
1091 | __set_current_state(TASK_INTERRUPTIBLE); |
1092 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1093 | } |
1094 | EXPORT_SYMBOL(schedule_timeout_interruptible); | |
1095 | ||
1096 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) | |
1097 | { | |
a5a0d52c AM |
1098 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1099 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1100 | } |
1101 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | |
1102 | ||
1da177e4 LT |
1103 | /* Thread ID - the internal kernel "pid" */ |
1104 | asmlinkage long sys_gettid(void) | |
1105 | { | |
b488893a | 1106 | return task_pid_vnr(current); |
1da177e4 LT |
1107 | } |
1108 | ||
2aae4a10 | 1109 | /** |
d4d23add | 1110 | * do_sysinfo - fill in sysinfo struct |
2aae4a10 | 1111 | * @info: pointer to buffer to fill |
6819457d | 1112 | */ |
d4d23add | 1113 | int do_sysinfo(struct sysinfo *info) |
1da177e4 | 1114 | { |
1da177e4 LT |
1115 | unsigned long mem_total, sav_total; |
1116 | unsigned int mem_unit, bitcount; | |
1117 | unsigned long seq; | |
1118 | ||
d4d23add | 1119 | memset(info, 0, sizeof(struct sysinfo)); |
1da177e4 LT |
1120 | |
1121 | do { | |
1122 | struct timespec tp; | |
1123 | seq = read_seqbegin(&xtime_lock); | |
1124 | ||
1125 | /* | |
1126 | * This is annoying. The below is the same thing | |
1127 | * posix_get_clock_monotonic() does, but it wants to | |
1128 | * take the lock which we want to cover the loads stuff | |
1129 | * too. | |
1130 | */ | |
1131 | ||
1132 | getnstimeofday(&tp); | |
1133 | tp.tv_sec += wall_to_monotonic.tv_sec; | |
1134 | tp.tv_nsec += wall_to_monotonic.tv_nsec; | |
d6214141 | 1135 | monotonic_to_bootbased(&tp); |
1da177e4 LT |
1136 | if (tp.tv_nsec - NSEC_PER_SEC >= 0) { |
1137 | tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; | |
1138 | tp.tv_sec++; | |
1139 | } | |
d4d23add | 1140 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); |
1da177e4 | 1141 | |
d4d23add KM |
1142 | info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); |
1143 | info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); | |
1144 | info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); | |
1da177e4 | 1145 | |
d4d23add | 1146 | info->procs = nr_threads; |
1da177e4 LT |
1147 | } while (read_seqretry(&xtime_lock, seq)); |
1148 | ||
d4d23add KM |
1149 | si_meminfo(info); |
1150 | si_swapinfo(info); | |
1da177e4 LT |
1151 | |
1152 | /* | |
1153 | * If the sum of all the available memory (i.e. ram + swap) | |
1154 | * is less than can be stored in a 32 bit unsigned long then | |
1155 | * we can be binary compatible with 2.2.x kernels. If not, | |
1156 | * well, in that case 2.2.x was broken anyways... | |
1157 | * | |
1158 | * -Erik Andersen <andersee@debian.org> | |
1159 | */ | |
1160 | ||
d4d23add KM |
1161 | mem_total = info->totalram + info->totalswap; |
1162 | if (mem_total < info->totalram || mem_total < info->totalswap) | |
1da177e4 LT |
1163 | goto out; |
1164 | bitcount = 0; | |
d4d23add | 1165 | mem_unit = info->mem_unit; |
1da177e4 LT |
1166 | while (mem_unit > 1) { |
1167 | bitcount++; | |
1168 | mem_unit >>= 1; | |
1169 | sav_total = mem_total; | |
1170 | mem_total <<= 1; | |
1171 | if (mem_total < sav_total) | |
1172 | goto out; | |
1173 | } | |
1174 | ||
1175 | /* | |
1176 | * If mem_total did not overflow, multiply all memory values by | |
d4d23add | 1177 | * info->mem_unit and set it to 1. This leaves things compatible |
1da177e4 LT |
1178 | * with 2.2.x, and also retains compatibility with earlier 2.4.x |
1179 | * kernels... | |
1180 | */ | |
1181 | ||
d4d23add KM |
1182 | info->mem_unit = 1; |
1183 | info->totalram <<= bitcount; | |
1184 | info->freeram <<= bitcount; | |
1185 | info->sharedram <<= bitcount; | |
1186 | info->bufferram <<= bitcount; | |
1187 | info->totalswap <<= bitcount; | |
1188 | info->freeswap <<= bitcount; | |
1189 | info->totalhigh <<= bitcount; | |
1190 | info->freehigh <<= bitcount; | |
1191 | ||
1192 | out: | |
1193 | return 0; | |
1194 | } | |
1195 | ||
1196 | asmlinkage long sys_sysinfo(struct sysinfo __user *info) | |
1197 | { | |
1198 | struct sysinfo val; | |
1199 | ||
1200 | do_sysinfo(&val); | |
1da177e4 | 1201 | |
1da177e4 LT |
1202 | if (copy_to_user(info, &val, sizeof(struct sysinfo))) |
1203 | return -EFAULT; | |
1204 | ||
1205 | return 0; | |
1206 | } | |
1207 | ||
d730e882 IM |
1208 | /* |
1209 | * lockdep: we want to track each per-CPU base as a separate lock-class, | |
1210 | * but timer-bases are kmalloc()-ed, so we need to attach separate | |
1211 | * keys to them: | |
1212 | */ | |
1213 | static struct lock_class_key base_lock_keys[NR_CPUS]; | |
1214 | ||
a4a6198b | 1215 | static int __devinit init_timers_cpu(int cpu) |
1da177e4 LT |
1216 | { |
1217 | int j; | |
1218 | tvec_base_t *base; | |
ba6edfcd | 1219 | static char __devinitdata tvec_base_done[NR_CPUS]; |
55c888d6 | 1220 | |
ba6edfcd | 1221 | if (!tvec_base_done[cpu]) { |
a4a6198b JB |
1222 | static char boot_done; |
1223 | ||
a4a6198b | 1224 | if (boot_done) { |
ba6edfcd AM |
1225 | /* |
1226 | * The APs use this path later in boot | |
1227 | */ | |
94f6030c CL |
1228 | base = kmalloc_node(sizeof(*base), |
1229 | GFP_KERNEL | __GFP_ZERO, | |
a4a6198b JB |
1230 | cpu_to_node(cpu)); |
1231 | if (!base) | |
1232 | return -ENOMEM; | |
6e453a67 VP |
1233 | |
1234 | /* Make sure that tvec_base is 2 byte aligned */ | |
1235 | if (tbase_get_deferrable(base)) { | |
1236 | WARN_ON(1); | |
1237 | kfree(base); | |
1238 | return -ENOMEM; | |
1239 | } | |
ba6edfcd | 1240 | per_cpu(tvec_bases, cpu) = base; |
a4a6198b | 1241 | } else { |
ba6edfcd AM |
1242 | /* |
1243 | * This is for the boot CPU - we use compile-time | |
1244 | * static initialisation because per-cpu memory isn't | |
1245 | * ready yet and because the memory allocators are not | |
1246 | * initialised either. | |
1247 | */ | |
a4a6198b | 1248 | boot_done = 1; |
ba6edfcd | 1249 | base = &boot_tvec_bases; |
a4a6198b | 1250 | } |
ba6edfcd AM |
1251 | tvec_base_done[cpu] = 1; |
1252 | } else { | |
1253 | base = per_cpu(tvec_bases, cpu); | |
a4a6198b | 1254 | } |
ba6edfcd | 1255 | |
3691c519 | 1256 | spin_lock_init(&base->lock); |
d730e882 IM |
1257 | lockdep_set_class(&base->lock, base_lock_keys + cpu); |
1258 | ||
1da177e4 LT |
1259 | for (j = 0; j < TVN_SIZE; j++) { |
1260 | INIT_LIST_HEAD(base->tv5.vec + j); | |
1261 | INIT_LIST_HEAD(base->tv4.vec + j); | |
1262 | INIT_LIST_HEAD(base->tv3.vec + j); | |
1263 | INIT_LIST_HEAD(base->tv2.vec + j); | |
1264 | } | |
1265 | for (j = 0; j < TVR_SIZE; j++) | |
1266 | INIT_LIST_HEAD(base->tv1.vec + j); | |
1267 | ||
1268 | base->timer_jiffies = jiffies; | |
a4a6198b | 1269 | return 0; |
1da177e4 LT |
1270 | } |
1271 | ||
1272 | #ifdef CONFIG_HOTPLUG_CPU | |
55c888d6 | 1273 | static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head) |
1da177e4 LT |
1274 | { |
1275 | struct timer_list *timer; | |
1276 | ||
1277 | while (!list_empty(head)) { | |
b5e61818 | 1278 | timer = list_first_entry(head, struct timer_list, entry); |
55c888d6 | 1279 | detach_timer(timer, 0); |
6e453a67 | 1280 | timer_set_base(timer, new_base); |
1da177e4 | 1281 | internal_add_timer(new_base, timer); |
1da177e4 | 1282 | } |
1da177e4 LT |
1283 | } |
1284 | ||
1285 | static void __devinit migrate_timers(int cpu) | |
1286 | { | |
1287 | tvec_base_t *old_base; | |
1288 | tvec_base_t *new_base; | |
1289 | int i; | |
1290 | ||
1291 | BUG_ON(cpu_online(cpu)); | |
a4a6198b JB |
1292 | old_base = per_cpu(tvec_bases, cpu); |
1293 | new_base = get_cpu_var(tvec_bases); | |
1da177e4 LT |
1294 | |
1295 | local_irq_disable(); | |
e81ce1f7 HC |
1296 | double_spin_lock(&new_base->lock, &old_base->lock, |
1297 | smp_processor_id() < cpu); | |
3691c519 ON |
1298 | |
1299 | BUG_ON(old_base->running_timer); | |
1da177e4 | 1300 | |
1da177e4 | 1301 | for (i = 0; i < TVR_SIZE; i++) |
55c888d6 ON |
1302 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1303 | for (i = 0; i < TVN_SIZE; i++) { | |
1304 | migrate_timer_list(new_base, old_base->tv2.vec + i); | |
1305 | migrate_timer_list(new_base, old_base->tv3.vec + i); | |
1306 | migrate_timer_list(new_base, old_base->tv4.vec + i); | |
1307 | migrate_timer_list(new_base, old_base->tv5.vec + i); | |
1308 | } | |
1309 | ||
e81ce1f7 HC |
1310 | double_spin_unlock(&new_base->lock, &old_base->lock, |
1311 | smp_processor_id() < cpu); | |
1da177e4 LT |
1312 | local_irq_enable(); |
1313 | put_cpu_var(tvec_bases); | |
1da177e4 LT |
1314 | } |
1315 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1316 | ||
8c78f307 | 1317 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, |
1da177e4 LT |
1318 | unsigned long action, void *hcpu) |
1319 | { | |
1320 | long cpu = (long)hcpu; | |
1321 | switch(action) { | |
1322 | case CPU_UP_PREPARE: | |
8bb78442 | 1323 | case CPU_UP_PREPARE_FROZEN: |
a4a6198b JB |
1324 | if (init_timers_cpu(cpu) < 0) |
1325 | return NOTIFY_BAD; | |
1da177e4 LT |
1326 | break; |
1327 | #ifdef CONFIG_HOTPLUG_CPU | |
1328 | case CPU_DEAD: | |
8bb78442 | 1329 | case CPU_DEAD_FROZEN: |
1da177e4 LT |
1330 | migrate_timers(cpu); |
1331 | break; | |
1332 | #endif | |
1333 | default: | |
1334 | break; | |
1335 | } | |
1336 | return NOTIFY_OK; | |
1337 | } | |
1338 | ||
8c78f307 | 1339 | static struct notifier_block __cpuinitdata timers_nb = { |
1da177e4 LT |
1340 | .notifier_call = timer_cpu_notify, |
1341 | }; | |
1342 | ||
1343 | ||
1344 | void __init init_timers(void) | |
1345 | { | |
07dccf33 | 1346 | int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, |
1da177e4 | 1347 | (void *)(long)smp_processor_id()); |
07dccf33 | 1348 | |
82f67cd9 IM |
1349 | init_timer_stats(); |
1350 | ||
07dccf33 | 1351 | BUG_ON(err == NOTIFY_BAD); |
1da177e4 LT |
1352 | register_cpu_notifier(&timers_nb); |
1353 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); | |
1354 | } | |
1355 | ||
1da177e4 LT |
1356 | /** |
1357 | * msleep - sleep safely even with waitqueue interruptions | |
1358 | * @msecs: Time in milliseconds to sleep for | |
1359 | */ | |
1360 | void msleep(unsigned int msecs) | |
1361 | { | |
1362 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1363 | ||
75bcc8c5 NA |
1364 | while (timeout) |
1365 | timeout = schedule_timeout_uninterruptible(timeout); | |
1da177e4 LT |
1366 | } |
1367 | ||
1368 | EXPORT_SYMBOL(msleep); | |
1369 | ||
1370 | /** | |
96ec3efd | 1371 | * msleep_interruptible - sleep waiting for signals |
1da177e4 LT |
1372 | * @msecs: Time in milliseconds to sleep for |
1373 | */ | |
1374 | unsigned long msleep_interruptible(unsigned int msecs) | |
1375 | { | |
1376 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1377 | ||
75bcc8c5 NA |
1378 | while (timeout && !signal_pending(current)) |
1379 | timeout = schedule_timeout_interruptible(timeout); | |
1da177e4 LT |
1380 | return jiffies_to_msecs(timeout); |
1381 | } | |
1382 | ||
1383 | EXPORT_SYMBOL(msleep_interruptible); |