Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/timer.c | |
3 | * | |
4a22f166 | 4 | * Kernel internal timers |
1da177e4 LT |
5 | * |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * | |
8 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. | |
9 | * | |
10 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
12 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | |
13 | * serialize accesses to xtime/lost_ticks). | |
14 | * Copyright (C) 1998 Andrea Arcangeli | |
15 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl | |
16 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love | |
17 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. | |
18 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar | |
19 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | |
20 | */ | |
21 | ||
22 | #include <linux/kernel_stat.h> | |
9984de1a | 23 | #include <linux/export.h> |
1da177e4 LT |
24 | #include <linux/interrupt.h> |
25 | #include <linux/percpu.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/swap.h> | |
b488893a | 29 | #include <linux/pid_namespace.h> |
1da177e4 LT |
30 | #include <linux/notifier.h> |
31 | #include <linux/thread_info.h> | |
32 | #include <linux/time.h> | |
33 | #include <linux/jiffies.h> | |
34 | #include <linux/posix-timers.h> | |
35 | #include <linux/cpu.h> | |
36 | #include <linux/syscalls.h> | |
97a41e26 | 37 | #include <linux/delay.h> |
79bf2bb3 | 38 | #include <linux/tick.h> |
82f67cd9 | 39 | #include <linux/kallsyms.h> |
e360adbe | 40 | #include <linux/irq_work.h> |
eea08f32 | 41 | #include <linux/sched.h> |
cf4aebc2 | 42 | #include <linux/sched/sysctl.h> |
5a0e3ad6 | 43 | #include <linux/slab.h> |
1a0df594 | 44 | #include <linux/compat.h> |
1da177e4 LT |
45 | |
46 | #include <asm/uaccess.h> | |
47 | #include <asm/unistd.h> | |
48 | #include <asm/div64.h> | |
49 | #include <asm/timex.h> | |
50 | #include <asm/io.h> | |
51 | ||
2b022e3d XG |
52 | #define CREATE_TRACE_POINTS |
53 | #include <trace/events/timer.h> | |
54 | ||
40747ffa | 55 | __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
ecea8d19 TG |
56 | |
57 | EXPORT_SYMBOL(jiffies_64); | |
58 | ||
1da177e4 LT |
59 | /* |
60 | * per-CPU timer vector definitions: | |
61 | */ | |
1da177e4 LT |
62 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) |
63 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | |
64 | #define TVN_SIZE (1 << TVN_BITS) | |
65 | #define TVR_SIZE (1 << TVR_BITS) | |
66 | #define TVN_MASK (TVN_SIZE - 1) | |
67 | #define TVR_MASK (TVR_SIZE - 1) | |
26cff4e2 | 68 | #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1)) |
1da177e4 | 69 | |
a6fa8e5a | 70 | struct tvec { |
1da177e4 | 71 | struct list_head vec[TVN_SIZE]; |
a6fa8e5a | 72 | }; |
1da177e4 | 73 | |
a6fa8e5a | 74 | struct tvec_root { |
1da177e4 | 75 | struct list_head vec[TVR_SIZE]; |
a6fa8e5a | 76 | }; |
1da177e4 | 77 | |
a6fa8e5a | 78 | struct tvec_base { |
3691c519 ON |
79 | spinlock_t lock; |
80 | struct timer_list *running_timer; | |
1da177e4 | 81 | unsigned long timer_jiffies; |
97fd9ed4 | 82 | unsigned long next_timer; |
99d5f3aa | 83 | unsigned long active_timers; |
fff42158 | 84 | unsigned long all_timers; |
d6f93829 | 85 | int cpu; |
a6fa8e5a PM |
86 | struct tvec_root tv1; |
87 | struct tvec tv2; | |
88 | struct tvec tv3; | |
89 | struct tvec tv4; | |
90 | struct tvec tv5; | |
6e453a67 | 91 | } ____cacheline_aligned; |
1da177e4 | 92 | |
a6fa8e5a | 93 | struct tvec_base boot_tvec_bases; |
3691c519 | 94 | EXPORT_SYMBOL(boot_tvec_bases); |
a6fa8e5a | 95 | static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases; |
1da177e4 | 96 | |
6e453a67 | 97 | /* Functions below help us manage 'deferrable' flag */ |
a6fa8e5a | 98 | static inline unsigned int tbase_get_deferrable(struct tvec_base *base) |
6e453a67 | 99 | { |
e52b1db3 | 100 | return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE); |
6e453a67 VP |
101 | } |
102 | ||
c5f66e99 TH |
103 | static inline unsigned int tbase_get_irqsafe(struct tvec_base *base) |
104 | { | |
105 | return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE); | |
106 | } | |
107 | ||
a6fa8e5a | 108 | static inline struct tvec_base *tbase_get_base(struct tvec_base *base) |
6e453a67 | 109 | { |
e52b1db3 | 110 | return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK)); |
6e453a67 VP |
111 | } |
112 | ||
6e453a67 | 113 | static inline void |
a6fa8e5a | 114 | timer_set_base(struct timer_list *timer, struct tvec_base *new_base) |
6e453a67 | 115 | { |
e52b1db3 TH |
116 | unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK; |
117 | ||
118 | timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags); | |
6e453a67 VP |
119 | } |
120 | ||
9c133c46 AS |
121 | static unsigned long round_jiffies_common(unsigned long j, int cpu, |
122 | bool force_up) | |
4c36a5de AV |
123 | { |
124 | int rem; | |
125 | unsigned long original = j; | |
126 | ||
127 | /* | |
128 | * We don't want all cpus firing their timers at once hitting the | |
129 | * same lock or cachelines, so we skew each extra cpu with an extra | |
130 | * 3 jiffies. This 3 jiffies came originally from the mm/ code which | |
131 | * already did this. | |
132 | * The skew is done by adding 3*cpunr, then round, then subtract this | |
133 | * extra offset again. | |
134 | */ | |
135 | j += cpu * 3; | |
136 | ||
137 | rem = j % HZ; | |
138 | ||
139 | /* | |
140 | * If the target jiffie is just after a whole second (which can happen | |
141 | * due to delays of the timer irq, long irq off times etc etc) then | |
142 | * we should round down to the whole second, not up. Use 1/4th second | |
143 | * as cutoff for this rounding as an extreme upper bound for this. | |
9c133c46 | 144 | * But never round down if @force_up is set. |
4c36a5de | 145 | */ |
9c133c46 | 146 | if (rem < HZ/4 && !force_up) /* round down */ |
4c36a5de AV |
147 | j = j - rem; |
148 | else /* round up */ | |
149 | j = j - rem + HZ; | |
150 | ||
151 | /* now that we have rounded, subtract the extra skew again */ | |
152 | j -= cpu * 3; | |
153 | ||
9e04d380 BVA |
154 | /* |
155 | * Make sure j is still in the future. Otherwise return the | |
156 | * unmodified value. | |
157 | */ | |
158 | return time_is_after_jiffies(j) ? j : original; | |
4c36a5de | 159 | } |
9c133c46 AS |
160 | |
161 | /** | |
162 | * __round_jiffies - function to round jiffies to a full second | |
163 | * @j: the time in (absolute) jiffies that should be rounded | |
164 | * @cpu: the processor number on which the timeout will happen | |
165 | * | |
166 | * __round_jiffies() rounds an absolute time in the future (in jiffies) | |
167 | * up or down to (approximately) full seconds. This is useful for timers | |
168 | * for which the exact time they fire does not matter too much, as long as | |
169 | * they fire approximately every X seconds. | |
170 | * | |
171 | * By rounding these timers to whole seconds, all such timers will fire | |
172 | * at the same time, rather than at various times spread out. The goal | |
173 | * of this is to have the CPU wake up less, which saves power. | |
174 | * | |
175 | * The exact rounding is skewed for each processor to avoid all | |
176 | * processors firing at the exact same time, which could lead | |
177 | * to lock contention or spurious cache line bouncing. | |
178 | * | |
179 | * The return value is the rounded version of the @j parameter. | |
180 | */ | |
181 | unsigned long __round_jiffies(unsigned long j, int cpu) | |
182 | { | |
183 | return round_jiffies_common(j, cpu, false); | |
184 | } | |
4c36a5de AV |
185 | EXPORT_SYMBOL_GPL(__round_jiffies); |
186 | ||
187 | /** | |
188 | * __round_jiffies_relative - function to round jiffies to a full second | |
189 | * @j: the time in (relative) jiffies that should be rounded | |
190 | * @cpu: the processor number on which the timeout will happen | |
191 | * | |
72fd4a35 | 192 | * __round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
193 | * up or down to (approximately) full seconds. This is useful for timers |
194 | * for which the exact time they fire does not matter too much, as long as | |
195 | * they fire approximately every X seconds. | |
196 | * | |
197 | * By rounding these timers to whole seconds, all such timers will fire | |
198 | * at the same time, rather than at various times spread out. The goal | |
199 | * of this is to have the CPU wake up less, which saves power. | |
200 | * | |
201 | * The exact rounding is skewed for each processor to avoid all | |
202 | * processors firing at the exact same time, which could lead | |
203 | * to lock contention or spurious cache line bouncing. | |
204 | * | |
72fd4a35 | 205 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
206 | */ |
207 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | |
208 | { | |
9c133c46 AS |
209 | unsigned long j0 = jiffies; |
210 | ||
211 | /* Use j0 because jiffies might change while we run */ | |
212 | return round_jiffies_common(j + j0, cpu, false) - j0; | |
4c36a5de AV |
213 | } |
214 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); | |
215 | ||
216 | /** | |
217 | * round_jiffies - function to round jiffies to a full second | |
218 | * @j: the time in (absolute) jiffies that should be rounded | |
219 | * | |
72fd4a35 | 220 | * round_jiffies() rounds an absolute time in the future (in jiffies) |
4c36a5de AV |
221 | * up or down to (approximately) full seconds. This is useful for timers |
222 | * for which the exact time they fire does not matter too much, as long as | |
223 | * they fire approximately every X seconds. | |
224 | * | |
225 | * By rounding these timers to whole seconds, all such timers will fire | |
226 | * at the same time, rather than at various times spread out. The goal | |
227 | * of this is to have the CPU wake up less, which saves power. | |
228 | * | |
72fd4a35 | 229 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
230 | */ |
231 | unsigned long round_jiffies(unsigned long j) | |
232 | { | |
9c133c46 | 233 | return round_jiffies_common(j, raw_smp_processor_id(), false); |
4c36a5de AV |
234 | } |
235 | EXPORT_SYMBOL_GPL(round_jiffies); | |
236 | ||
237 | /** | |
238 | * round_jiffies_relative - function to round jiffies to a full second | |
239 | * @j: the time in (relative) jiffies that should be rounded | |
240 | * | |
72fd4a35 | 241 | * round_jiffies_relative() rounds a time delta in the future (in jiffies) |
4c36a5de AV |
242 | * up or down to (approximately) full seconds. This is useful for timers |
243 | * for which the exact time they fire does not matter too much, as long as | |
244 | * they fire approximately every X seconds. | |
245 | * | |
246 | * By rounding these timers to whole seconds, all such timers will fire | |
247 | * at the same time, rather than at various times spread out. The goal | |
248 | * of this is to have the CPU wake up less, which saves power. | |
249 | * | |
72fd4a35 | 250 | * The return value is the rounded version of the @j parameter. |
4c36a5de AV |
251 | */ |
252 | unsigned long round_jiffies_relative(unsigned long j) | |
253 | { | |
254 | return __round_jiffies_relative(j, raw_smp_processor_id()); | |
255 | } | |
256 | EXPORT_SYMBOL_GPL(round_jiffies_relative); | |
257 | ||
9c133c46 AS |
258 | /** |
259 | * __round_jiffies_up - function to round jiffies up to a full second | |
260 | * @j: the time in (absolute) jiffies that should be rounded | |
261 | * @cpu: the processor number on which the timeout will happen | |
262 | * | |
263 | * This is the same as __round_jiffies() except that it will never | |
264 | * round down. This is useful for timeouts for which the exact time | |
265 | * of firing does not matter too much, as long as they don't fire too | |
266 | * early. | |
267 | */ | |
268 | unsigned long __round_jiffies_up(unsigned long j, int cpu) | |
269 | { | |
270 | return round_jiffies_common(j, cpu, true); | |
271 | } | |
272 | EXPORT_SYMBOL_GPL(__round_jiffies_up); | |
273 | ||
274 | /** | |
275 | * __round_jiffies_up_relative - function to round jiffies up to a full second | |
276 | * @j: the time in (relative) jiffies that should be rounded | |
277 | * @cpu: the processor number on which the timeout will happen | |
278 | * | |
279 | * This is the same as __round_jiffies_relative() except that it will never | |
280 | * round down. This is useful for timeouts for which the exact time | |
281 | * of firing does not matter too much, as long as they don't fire too | |
282 | * early. | |
283 | */ | |
284 | unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) | |
285 | { | |
286 | unsigned long j0 = jiffies; | |
287 | ||
288 | /* Use j0 because jiffies might change while we run */ | |
289 | return round_jiffies_common(j + j0, cpu, true) - j0; | |
290 | } | |
291 | EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); | |
292 | ||
293 | /** | |
294 | * round_jiffies_up - function to round jiffies up to a full second | |
295 | * @j: the time in (absolute) jiffies that should be rounded | |
296 | * | |
297 | * This is the same as round_jiffies() except that it will never | |
298 | * round down. This is useful for timeouts for which the exact time | |
299 | * of firing does not matter too much, as long as they don't fire too | |
300 | * early. | |
301 | */ | |
302 | unsigned long round_jiffies_up(unsigned long j) | |
303 | { | |
304 | return round_jiffies_common(j, raw_smp_processor_id(), true); | |
305 | } | |
306 | EXPORT_SYMBOL_GPL(round_jiffies_up); | |
307 | ||
308 | /** | |
309 | * round_jiffies_up_relative - function to round jiffies up to a full second | |
310 | * @j: the time in (relative) jiffies that should be rounded | |
311 | * | |
312 | * This is the same as round_jiffies_relative() except that it will never | |
313 | * round down. This is useful for timeouts for which the exact time | |
314 | * of firing does not matter too much, as long as they don't fire too | |
315 | * early. | |
316 | */ | |
317 | unsigned long round_jiffies_up_relative(unsigned long j) | |
318 | { | |
319 | return __round_jiffies_up_relative(j, raw_smp_processor_id()); | |
320 | } | |
321 | EXPORT_SYMBOL_GPL(round_jiffies_up_relative); | |
322 | ||
3bbb9ec9 AV |
323 | /** |
324 | * set_timer_slack - set the allowed slack for a timer | |
0caa6210 | 325 | * @timer: the timer to be modified |
3bbb9ec9 AV |
326 | * @slack_hz: the amount of time (in jiffies) allowed for rounding |
327 | * | |
328 | * Set the amount of time, in jiffies, that a certain timer has | |
329 | * in terms of slack. By setting this value, the timer subsystem | |
330 | * will schedule the actual timer somewhere between | |
331 | * the time mod_timer() asks for, and that time plus the slack. | |
332 | * | |
333 | * By setting the slack to -1, a percentage of the delay is used | |
334 | * instead. | |
335 | */ | |
336 | void set_timer_slack(struct timer_list *timer, int slack_hz) | |
337 | { | |
338 | timer->slack = slack_hz; | |
339 | } | |
340 | EXPORT_SYMBOL_GPL(set_timer_slack); | |
341 | ||
d550e81d PM |
342 | /* |
343 | * If the list is empty, catch up ->timer_jiffies to the current time. | |
344 | * The caller must hold the tvec_base lock. Returns true if the list | |
345 | * was empty and therefore ->timer_jiffies was updated. | |
346 | */ | |
347 | static bool catchup_timer_jiffies(struct tvec_base *base) | |
348 | { | |
349 | if (!base->all_timers) { | |
350 | base->timer_jiffies = jiffies; | |
351 | return true; | |
352 | } | |
353 | return false; | |
354 | } | |
355 | ||
facbb4a7 TG |
356 | static void |
357 | __internal_add_timer(struct tvec_base *base, struct timer_list *timer) | |
1da177e4 LT |
358 | { |
359 | unsigned long expires = timer->expires; | |
360 | unsigned long idx = expires - base->timer_jiffies; | |
361 | struct list_head *vec; | |
362 | ||
363 | if (idx < TVR_SIZE) { | |
364 | int i = expires & TVR_MASK; | |
365 | vec = base->tv1.vec + i; | |
366 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | |
367 | int i = (expires >> TVR_BITS) & TVN_MASK; | |
368 | vec = base->tv2.vec + i; | |
369 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | |
370 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | |
371 | vec = base->tv3.vec + i; | |
372 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | |
373 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | |
374 | vec = base->tv4.vec + i; | |
375 | } else if ((signed long) idx < 0) { | |
376 | /* | |
377 | * Can happen if you add a timer with expires == jiffies, | |
378 | * or you set a timer to go off in the past | |
379 | */ | |
380 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | |
381 | } else { | |
382 | int i; | |
26cff4e2 HC |
383 | /* If the timeout is larger than MAX_TVAL (on 64-bit |
384 | * architectures or with CONFIG_BASE_SMALL=1) then we | |
385 | * use the maximum timeout. | |
1da177e4 | 386 | */ |
26cff4e2 HC |
387 | if (idx > MAX_TVAL) { |
388 | idx = MAX_TVAL; | |
1da177e4 LT |
389 | expires = idx + base->timer_jiffies; |
390 | } | |
391 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | |
392 | vec = base->tv5.vec + i; | |
393 | } | |
394 | /* | |
395 | * Timers are FIFO: | |
396 | */ | |
397 | list_add_tail(&timer->entry, vec); | |
398 | } | |
399 | ||
facbb4a7 TG |
400 | static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) |
401 | { | |
18d8cb64 | 402 | (void)catchup_timer_jiffies(base); |
facbb4a7 TG |
403 | __internal_add_timer(base, timer); |
404 | /* | |
99d5f3aa | 405 | * Update base->active_timers and base->next_timer |
facbb4a7 | 406 | */ |
99d5f3aa | 407 | if (!tbase_get_deferrable(timer->base)) { |
aea369b9 ON |
408 | if (!base->active_timers++ || |
409 | time_before(timer->expires, base->next_timer)) | |
99d5f3aa | 410 | base->next_timer = timer->expires; |
99d5f3aa | 411 | } |
fff42158 | 412 | base->all_timers++; |
9f6d9baa VK |
413 | |
414 | /* | |
415 | * Check whether the other CPU is in dynticks mode and needs | |
416 | * to be triggered to reevaluate the timer wheel. | |
417 | * We are protected against the other CPU fiddling | |
418 | * with the timer by holding the timer base lock. This also | |
419 | * makes sure that a CPU on the way to stop its tick can not | |
420 | * evaluate the timer wheel. | |
421 | * | |
422 | * Spare the IPI for deferrable timers on idle targets though. | |
423 | * The next busy ticks will take care of it. Except full dynticks | |
424 | * require special care against races with idle_cpu(), lets deal | |
425 | * with that later. | |
426 | */ | |
427 | if (!tbase_get_deferrable(base) || tick_nohz_full_cpu(base->cpu)) | |
428 | wake_up_nohz_cpu(base->cpu); | |
facbb4a7 TG |
429 | } |
430 | ||
82f67cd9 IM |
431 | #ifdef CONFIG_TIMER_STATS |
432 | void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) | |
433 | { | |
434 | if (timer->start_site) | |
435 | return; | |
436 | ||
437 | timer->start_site = addr; | |
438 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | |
439 | timer->start_pid = current->pid; | |
440 | } | |
c5c061b8 VP |
441 | |
442 | static void timer_stats_account_timer(struct timer_list *timer) | |
443 | { | |
444 | unsigned int flag = 0; | |
445 | ||
507e1231 HC |
446 | if (likely(!timer->start_site)) |
447 | return; | |
c5c061b8 VP |
448 | if (unlikely(tbase_get_deferrable(timer->base))) |
449 | flag |= TIMER_STATS_FLAG_DEFERRABLE; | |
450 | ||
451 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | |
452 | timer->function, timer->start_comm, flag); | |
453 | } | |
454 | ||
455 | #else | |
456 | static void timer_stats_account_timer(struct timer_list *timer) {} | |
82f67cd9 IM |
457 | #endif |
458 | ||
c6f3a97f TG |
459 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
460 | ||
461 | static struct debug_obj_descr timer_debug_descr; | |
462 | ||
99777288 SG |
463 | static void *timer_debug_hint(void *addr) |
464 | { | |
465 | return ((struct timer_list *) addr)->function; | |
466 | } | |
467 | ||
c6f3a97f TG |
468 | /* |
469 | * fixup_init is called when: | |
470 | * - an active object is initialized | |
55c888d6 | 471 | */ |
c6f3a97f TG |
472 | static int timer_fixup_init(void *addr, enum debug_obj_state state) |
473 | { | |
474 | struct timer_list *timer = addr; | |
475 | ||
476 | switch (state) { | |
477 | case ODEBUG_STATE_ACTIVE: | |
478 | del_timer_sync(timer); | |
479 | debug_object_init(timer, &timer_debug_descr); | |
480 | return 1; | |
481 | default: | |
482 | return 0; | |
483 | } | |
484 | } | |
485 | ||
fb16b8cf SB |
486 | /* Stub timer callback for improperly used timers. */ |
487 | static void stub_timer(unsigned long data) | |
488 | { | |
489 | WARN_ON(1); | |
490 | } | |
491 | ||
c6f3a97f TG |
492 | /* |
493 | * fixup_activate is called when: | |
494 | * - an active object is activated | |
495 | * - an unknown object is activated (might be a statically initialized object) | |
496 | */ | |
497 | static int timer_fixup_activate(void *addr, enum debug_obj_state state) | |
498 | { | |
499 | struct timer_list *timer = addr; | |
500 | ||
501 | switch (state) { | |
502 | ||
503 | case ODEBUG_STATE_NOTAVAILABLE: | |
504 | /* | |
505 | * This is not really a fixup. The timer was | |
506 | * statically initialized. We just make sure that it | |
507 | * is tracked in the object tracker. | |
508 | */ | |
509 | if (timer->entry.next == NULL && | |
510 | timer->entry.prev == TIMER_ENTRY_STATIC) { | |
511 | debug_object_init(timer, &timer_debug_descr); | |
512 | debug_object_activate(timer, &timer_debug_descr); | |
513 | return 0; | |
514 | } else { | |
fb16b8cf SB |
515 | setup_timer(timer, stub_timer, 0); |
516 | return 1; | |
c6f3a97f TG |
517 | } |
518 | return 0; | |
519 | ||
520 | case ODEBUG_STATE_ACTIVE: | |
521 | WARN_ON(1); | |
522 | ||
523 | default: | |
524 | return 0; | |
525 | } | |
526 | } | |
527 | ||
528 | /* | |
529 | * fixup_free is called when: | |
530 | * - an active object is freed | |
531 | */ | |
532 | static int timer_fixup_free(void *addr, enum debug_obj_state state) | |
533 | { | |
534 | struct timer_list *timer = addr; | |
535 | ||
536 | switch (state) { | |
537 | case ODEBUG_STATE_ACTIVE: | |
538 | del_timer_sync(timer); | |
539 | debug_object_free(timer, &timer_debug_descr); | |
540 | return 1; | |
541 | default: | |
542 | return 0; | |
543 | } | |
544 | } | |
545 | ||
dc4218bd CC |
546 | /* |
547 | * fixup_assert_init is called when: | |
548 | * - an untracked/uninit-ed object is found | |
549 | */ | |
550 | static int timer_fixup_assert_init(void *addr, enum debug_obj_state state) | |
551 | { | |
552 | struct timer_list *timer = addr; | |
553 | ||
554 | switch (state) { | |
555 | case ODEBUG_STATE_NOTAVAILABLE: | |
556 | if (timer->entry.prev == TIMER_ENTRY_STATIC) { | |
557 | /* | |
558 | * This is not really a fixup. The timer was | |
559 | * statically initialized. We just make sure that it | |
560 | * is tracked in the object tracker. | |
561 | */ | |
562 | debug_object_init(timer, &timer_debug_descr); | |
563 | return 0; | |
564 | } else { | |
565 | setup_timer(timer, stub_timer, 0); | |
566 | return 1; | |
567 | } | |
568 | default: | |
569 | return 0; | |
570 | } | |
571 | } | |
572 | ||
c6f3a97f | 573 | static struct debug_obj_descr timer_debug_descr = { |
dc4218bd CC |
574 | .name = "timer_list", |
575 | .debug_hint = timer_debug_hint, | |
576 | .fixup_init = timer_fixup_init, | |
577 | .fixup_activate = timer_fixup_activate, | |
578 | .fixup_free = timer_fixup_free, | |
579 | .fixup_assert_init = timer_fixup_assert_init, | |
c6f3a97f TG |
580 | }; |
581 | ||
582 | static inline void debug_timer_init(struct timer_list *timer) | |
583 | { | |
584 | debug_object_init(timer, &timer_debug_descr); | |
585 | } | |
586 | ||
587 | static inline void debug_timer_activate(struct timer_list *timer) | |
588 | { | |
589 | debug_object_activate(timer, &timer_debug_descr); | |
590 | } | |
591 | ||
592 | static inline void debug_timer_deactivate(struct timer_list *timer) | |
593 | { | |
594 | debug_object_deactivate(timer, &timer_debug_descr); | |
595 | } | |
596 | ||
597 | static inline void debug_timer_free(struct timer_list *timer) | |
598 | { | |
599 | debug_object_free(timer, &timer_debug_descr); | |
600 | } | |
601 | ||
dc4218bd CC |
602 | static inline void debug_timer_assert_init(struct timer_list *timer) |
603 | { | |
604 | debug_object_assert_init(timer, &timer_debug_descr); | |
605 | } | |
606 | ||
fc683995 TH |
607 | static void do_init_timer(struct timer_list *timer, unsigned int flags, |
608 | const char *name, struct lock_class_key *key); | |
c6f3a97f | 609 | |
fc683995 TH |
610 | void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags, |
611 | const char *name, struct lock_class_key *key) | |
c6f3a97f TG |
612 | { |
613 | debug_object_init_on_stack(timer, &timer_debug_descr); | |
fc683995 | 614 | do_init_timer(timer, flags, name, key); |
c6f3a97f | 615 | } |
6f2b9b9a | 616 | EXPORT_SYMBOL_GPL(init_timer_on_stack_key); |
c6f3a97f TG |
617 | |
618 | void destroy_timer_on_stack(struct timer_list *timer) | |
619 | { | |
620 | debug_object_free(timer, &timer_debug_descr); | |
621 | } | |
622 | EXPORT_SYMBOL_GPL(destroy_timer_on_stack); | |
623 | ||
624 | #else | |
625 | static inline void debug_timer_init(struct timer_list *timer) { } | |
626 | static inline void debug_timer_activate(struct timer_list *timer) { } | |
627 | static inline void debug_timer_deactivate(struct timer_list *timer) { } | |
dc4218bd | 628 | static inline void debug_timer_assert_init(struct timer_list *timer) { } |
c6f3a97f TG |
629 | #endif |
630 | ||
2b022e3d XG |
631 | static inline void debug_init(struct timer_list *timer) |
632 | { | |
633 | debug_timer_init(timer); | |
634 | trace_timer_init(timer); | |
635 | } | |
636 | ||
637 | static inline void | |
638 | debug_activate(struct timer_list *timer, unsigned long expires) | |
639 | { | |
640 | debug_timer_activate(timer); | |
641 | trace_timer_start(timer, expires); | |
642 | } | |
643 | ||
644 | static inline void debug_deactivate(struct timer_list *timer) | |
645 | { | |
646 | debug_timer_deactivate(timer); | |
647 | trace_timer_cancel(timer); | |
648 | } | |
649 | ||
dc4218bd CC |
650 | static inline void debug_assert_init(struct timer_list *timer) |
651 | { | |
652 | debug_timer_assert_init(timer); | |
653 | } | |
654 | ||
fc683995 TH |
655 | static void do_init_timer(struct timer_list *timer, unsigned int flags, |
656 | const char *name, struct lock_class_key *key) | |
55c888d6 | 657 | { |
22127e93 | 658 | struct tvec_base *base = raw_cpu_read(tvec_bases); |
fc683995 | 659 | |
55c888d6 | 660 | timer->entry.next = NULL; |
fc683995 | 661 | timer->base = (void *)((unsigned long)base | flags); |
3bbb9ec9 | 662 | timer->slack = -1; |
82f67cd9 IM |
663 | #ifdef CONFIG_TIMER_STATS |
664 | timer->start_site = NULL; | |
665 | timer->start_pid = -1; | |
666 | memset(timer->start_comm, 0, TASK_COMM_LEN); | |
667 | #endif | |
6f2b9b9a | 668 | lockdep_init_map(&timer->lockdep_map, name, key, 0); |
55c888d6 | 669 | } |
c6f3a97f TG |
670 | |
671 | /** | |
633fe795 | 672 | * init_timer_key - initialize a timer |
c6f3a97f | 673 | * @timer: the timer to be initialized |
fc683995 | 674 | * @flags: timer flags |
633fe795 RD |
675 | * @name: name of the timer |
676 | * @key: lockdep class key of the fake lock used for tracking timer | |
677 | * sync lock dependencies | |
c6f3a97f | 678 | * |
633fe795 | 679 | * init_timer_key() must be done to a timer prior calling *any* of the |
c6f3a97f TG |
680 | * other timer functions. |
681 | */ | |
fc683995 TH |
682 | void init_timer_key(struct timer_list *timer, unsigned int flags, |
683 | const char *name, struct lock_class_key *key) | |
c6f3a97f | 684 | { |
2b022e3d | 685 | debug_init(timer); |
fc683995 | 686 | do_init_timer(timer, flags, name, key); |
c6f3a97f | 687 | } |
6f2b9b9a | 688 | EXPORT_SYMBOL(init_timer_key); |
55c888d6 | 689 | |
ec44bc7a | 690 | static inline void detach_timer(struct timer_list *timer, bool clear_pending) |
55c888d6 ON |
691 | { |
692 | struct list_head *entry = &timer->entry; | |
693 | ||
2b022e3d | 694 | debug_deactivate(timer); |
c6f3a97f | 695 | |
55c888d6 ON |
696 | __list_del(entry->prev, entry->next); |
697 | if (clear_pending) | |
698 | entry->next = NULL; | |
699 | entry->prev = LIST_POISON2; | |
700 | } | |
701 | ||
99d5f3aa TG |
702 | static inline void |
703 | detach_expired_timer(struct timer_list *timer, struct tvec_base *base) | |
704 | { | |
705 | detach_timer(timer, true); | |
706 | if (!tbase_get_deferrable(timer->base)) | |
e52b1db3 | 707 | base->active_timers--; |
fff42158 | 708 | base->all_timers--; |
16d937f8 | 709 | (void)catchup_timer_jiffies(base); |
99d5f3aa TG |
710 | } |
711 | ||
ec44bc7a TG |
712 | static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, |
713 | bool clear_pending) | |
714 | { | |
715 | if (!timer_pending(timer)) | |
716 | return 0; | |
717 | ||
718 | detach_timer(timer, clear_pending); | |
99d5f3aa | 719 | if (!tbase_get_deferrable(timer->base)) { |
e52b1db3 | 720 | base->active_timers--; |
99d5f3aa TG |
721 | if (timer->expires == base->next_timer) |
722 | base->next_timer = base->timer_jiffies; | |
723 | } | |
fff42158 | 724 | base->all_timers--; |
16d937f8 | 725 | (void)catchup_timer_jiffies(base); |
ec44bc7a TG |
726 | return 1; |
727 | } | |
728 | ||
55c888d6 | 729 | /* |
3691c519 | 730 | * We are using hashed locking: holding per_cpu(tvec_bases).lock |
55c888d6 ON |
731 | * means that all timers which are tied to this base via timer->base are |
732 | * locked, and the base itself is locked too. | |
733 | * | |
734 | * So __run_timers/migrate_timers can safely modify all timers which could | |
735 | * be found on ->tvX lists. | |
736 | * | |
737 | * When the timer's base is locked, and the timer removed from list, it is | |
738 | * possible to set timer->base = NULL and drop the lock: the timer remains | |
739 | * locked. | |
740 | */ | |
a6fa8e5a | 741 | static struct tvec_base *lock_timer_base(struct timer_list *timer, |
55c888d6 | 742 | unsigned long *flags) |
89e7e374 | 743 | __acquires(timer->base->lock) |
55c888d6 | 744 | { |
a6fa8e5a | 745 | struct tvec_base *base; |
55c888d6 ON |
746 | |
747 | for (;;) { | |
a6fa8e5a | 748 | struct tvec_base *prelock_base = timer->base; |
6e453a67 | 749 | base = tbase_get_base(prelock_base); |
55c888d6 ON |
750 | if (likely(base != NULL)) { |
751 | spin_lock_irqsave(&base->lock, *flags); | |
6e453a67 | 752 | if (likely(prelock_base == timer->base)) |
55c888d6 ON |
753 | return base; |
754 | /* The timer has migrated to another CPU */ | |
755 | spin_unlock_irqrestore(&base->lock, *flags); | |
756 | } | |
757 | cpu_relax(); | |
758 | } | |
759 | } | |
760 | ||
74019224 | 761 | static inline int |
597d0275 AB |
762 | __mod_timer(struct timer_list *timer, unsigned long expires, |
763 | bool pending_only, int pinned) | |
1da177e4 | 764 | { |
a6fa8e5a | 765 | struct tvec_base *base, *new_base; |
1da177e4 | 766 | unsigned long flags; |
eea08f32 | 767 | int ret = 0 , cpu; |
1da177e4 | 768 | |
82f67cd9 | 769 | timer_stats_timer_set_start_info(timer); |
1da177e4 | 770 | BUG_ON(!timer->function); |
1da177e4 | 771 | |
55c888d6 ON |
772 | base = lock_timer_base(timer, &flags); |
773 | ||
ec44bc7a TG |
774 | ret = detach_if_pending(timer, base, false); |
775 | if (!ret && pending_only) | |
776 | goto out_unlock; | |
55c888d6 | 777 | |
2b022e3d | 778 | debug_activate(timer, expires); |
c6f3a97f | 779 | |
6201b4d6 | 780 | cpu = get_nohz_timer_target(pinned); |
eea08f32 AB |
781 | new_base = per_cpu(tvec_bases, cpu); |
782 | ||
3691c519 | 783 | if (base != new_base) { |
1da177e4 | 784 | /* |
55c888d6 ON |
785 | * We are trying to schedule the timer on the local CPU. |
786 | * However we can't change timer's base while it is running, | |
787 | * otherwise del_timer_sync() can't detect that the timer's | |
788 | * handler yet has not finished. This also guarantees that | |
789 | * the timer is serialized wrt itself. | |
1da177e4 | 790 | */ |
a2c348fe | 791 | if (likely(base->running_timer != timer)) { |
55c888d6 | 792 | /* See the comment in lock_timer_base() */ |
6e453a67 | 793 | timer_set_base(timer, NULL); |
55c888d6 | 794 | spin_unlock(&base->lock); |
a2c348fe ON |
795 | base = new_base; |
796 | spin_lock(&base->lock); | |
6e453a67 | 797 | timer_set_base(timer, base); |
1da177e4 LT |
798 | } |
799 | } | |
800 | ||
1da177e4 | 801 | timer->expires = expires; |
a2c348fe | 802 | internal_add_timer(base, timer); |
74019224 IM |
803 | |
804 | out_unlock: | |
a2c348fe | 805 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 LT |
806 | |
807 | return ret; | |
808 | } | |
809 | ||
2aae4a10 | 810 | /** |
74019224 IM |
811 | * mod_timer_pending - modify a pending timer's timeout |
812 | * @timer: the pending timer to be modified | |
813 | * @expires: new timeout in jiffies | |
1da177e4 | 814 | * |
74019224 IM |
815 | * mod_timer_pending() is the same for pending timers as mod_timer(), |
816 | * but will not re-activate and modify already deleted timers. | |
817 | * | |
818 | * It is useful for unserialized use of timers. | |
1da177e4 | 819 | */ |
74019224 | 820 | int mod_timer_pending(struct timer_list *timer, unsigned long expires) |
1da177e4 | 821 | { |
597d0275 | 822 | return __mod_timer(timer, expires, true, TIMER_NOT_PINNED); |
1da177e4 | 823 | } |
74019224 | 824 | EXPORT_SYMBOL(mod_timer_pending); |
1da177e4 | 825 | |
3bbb9ec9 AV |
826 | /* |
827 | * Decide where to put the timer while taking the slack into account | |
828 | * | |
829 | * Algorithm: | |
830 | * 1) calculate the maximum (absolute) time | |
831 | * 2) calculate the highest bit where the expires and new max are different | |
832 | * 3) use this bit to make a mask | |
833 | * 4) use the bitmask to round down the maximum time, so that all last | |
834 | * bits are zeros | |
835 | */ | |
836 | static inline | |
837 | unsigned long apply_slack(struct timer_list *timer, unsigned long expires) | |
838 | { | |
839 | unsigned long expires_limit, mask; | |
840 | int bit; | |
841 | ||
8e63d779 | 842 | if (timer->slack >= 0) { |
f00e047e | 843 | expires_limit = expires + timer->slack; |
8e63d779 | 844 | } else { |
1c3cc116 SAS |
845 | long delta = expires - jiffies; |
846 | ||
847 | if (delta < 256) | |
848 | return expires; | |
3bbb9ec9 | 849 | |
1c3cc116 | 850 | expires_limit = expires + delta / 256; |
8e63d779 | 851 | } |
3bbb9ec9 | 852 | mask = expires ^ expires_limit; |
3bbb9ec9 AV |
853 | if (mask == 0) |
854 | return expires; | |
855 | ||
856 | bit = find_last_bit(&mask, BITS_PER_LONG); | |
857 | ||
98a01e77 | 858 | mask = (1UL << bit) - 1; |
3bbb9ec9 AV |
859 | |
860 | expires_limit = expires_limit & ~(mask); | |
861 | ||
862 | return expires_limit; | |
863 | } | |
864 | ||
2aae4a10 | 865 | /** |
1da177e4 LT |
866 | * mod_timer - modify a timer's timeout |
867 | * @timer: the timer to be modified | |
2aae4a10 | 868 | * @expires: new timeout in jiffies |
1da177e4 | 869 | * |
72fd4a35 | 870 | * mod_timer() is a more efficient way to update the expire field of an |
1da177e4 LT |
871 | * active timer (if the timer is inactive it will be activated) |
872 | * | |
873 | * mod_timer(timer, expires) is equivalent to: | |
874 | * | |
875 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
876 | * | |
877 | * Note that if there are multiple unserialized concurrent users of the | |
878 | * same timer, then mod_timer() is the only safe way to modify the timeout, | |
879 | * since add_timer() cannot modify an already running timer. | |
880 | * | |
881 | * The function returns whether it has modified a pending timer or not. | |
882 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | |
883 | * active timer returns 1.) | |
884 | */ | |
885 | int mod_timer(struct timer_list *timer, unsigned long expires) | |
886 | { | |
1c3cc116 SAS |
887 | expires = apply_slack(timer, expires); |
888 | ||
1da177e4 LT |
889 | /* |
890 | * This is a common optimization triggered by the | |
891 | * networking code - if the timer is re-modified | |
892 | * to be the same thing then just return: | |
893 | */ | |
4841158b | 894 | if (timer_pending(timer) && timer->expires == expires) |
1da177e4 LT |
895 | return 1; |
896 | ||
597d0275 | 897 | return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); |
1da177e4 | 898 | } |
1da177e4 LT |
899 | EXPORT_SYMBOL(mod_timer); |
900 | ||
597d0275 AB |
901 | /** |
902 | * mod_timer_pinned - modify a timer's timeout | |
903 | * @timer: the timer to be modified | |
904 | * @expires: new timeout in jiffies | |
905 | * | |
906 | * mod_timer_pinned() is a way to update the expire field of an | |
907 | * active timer (if the timer is inactive it will be activated) | |
048a0e8f PM |
908 | * and to ensure that the timer is scheduled on the current CPU. |
909 | * | |
910 | * Note that this does not prevent the timer from being migrated | |
911 | * when the current CPU goes offline. If this is a problem for | |
912 | * you, use CPU-hotplug notifiers to handle it correctly, for | |
913 | * example, cancelling the timer when the corresponding CPU goes | |
914 | * offline. | |
597d0275 AB |
915 | * |
916 | * mod_timer_pinned(timer, expires) is equivalent to: | |
917 | * | |
918 | * del_timer(timer); timer->expires = expires; add_timer(timer); | |
919 | */ | |
920 | int mod_timer_pinned(struct timer_list *timer, unsigned long expires) | |
921 | { | |
922 | if (timer->expires == expires && timer_pending(timer)) | |
923 | return 1; | |
924 | ||
925 | return __mod_timer(timer, expires, false, TIMER_PINNED); | |
926 | } | |
927 | EXPORT_SYMBOL(mod_timer_pinned); | |
928 | ||
74019224 IM |
929 | /** |
930 | * add_timer - start a timer | |
931 | * @timer: the timer to be added | |
932 | * | |
933 | * The kernel will do a ->function(->data) callback from the | |
934 | * timer interrupt at the ->expires point in the future. The | |
935 | * current time is 'jiffies'. | |
936 | * | |
937 | * The timer's ->expires, ->function (and if the handler uses it, ->data) | |
938 | * fields must be set prior calling this function. | |
939 | * | |
940 | * Timers with an ->expires field in the past will be executed in the next | |
941 | * timer tick. | |
942 | */ | |
943 | void add_timer(struct timer_list *timer) | |
944 | { | |
945 | BUG_ON(timer_pending(timer)); | |
946 | mod_timer(timer, timer->expires); | |
947 | } | |
948 | EXPORT_SYMBOL(add_timer); | |
949 | ||
950 | /** | |
951 | * add_timer_on - start a timer on a particular CPU | |
952 | * @timer: the timer to be added | |
953 | * @cpu: the CPU to start it on | |
954 | * | |
955 | * This is not very scalable on SMP. Double adds are not possible. | |
956 | */ | |
957 | void add_timer_on(struct timer_list *timer, int cpu) | |
958 | { | |
959 | struct tvec_base *base = per_cpu(tvec_bases, cpu); | |
960 | unsigned long flags; | |
961 | ||
962 | timer_stats_timer_set_start_info(timer); | |
963 | BUG_ON(timer_pending(timer) || !timer->function); | |
964 | spin_lock_irqsave(&base->lock, flags); | |
965 | timer_set_base(timer, base); | |
2b022e3d | 966 | debug_activate(timer, timer->expires); |
74019224 | 967 | internal_add_timer(base, timer); |
74019224 IM |
968 | spin_unlock_irqrestore(&base->lock, flags); |
969 | } | |
a9862e05 | 970 | EXPORT_SYMBOL_GPL(add_timer_on); |
74019224 | 971 | |
2aae4a10 | 972 | /** |
1da177e4 LT |
973 | * del_timer - deactive a timer. |
974 | * @timer: the timer to be deactivated | |
975 | * | |
976 | * del_timer() deactivates a timer - this works on both active and inactive | |
977 | * timers. | |
978 | * | |
979 | * The function returns whether it has deactivated a pending timer or not. | |
980 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | |
981 | * active timer returns 1.) | |
982 | */ | |
983 | int del_timer(struct timer_list *timer) | |
984 | { | |
a6fa8e5a | 985 | struct tvec_base *base; |
1da177e4 | 986 | unsigned long flags; |
55c888d6 | 987 | int ret = 0; |
1da177e4 | 988 | |
dc4218bd CC |
989 | debug_assert_init(timer); |
990 | ||
82f67cd9 | 991 | timer_stats_timer_clear_start_info(timer); |
55c888d6 ON |
992 | if (timer_pending(timer)) { |
993 | base = lock_timer_base(timer, &flags); | |
ec44bc7a | 994 | ret = detach_if_pending(timer, base, true); |
1da177e4 | 995 | spin_unlock_irqrestore(&base->lock, flags); |
1da177e4 | 996 | } |
1da177e4 | 997 | |
55c888d6 | 998 | return ret; |
1da177e4 | 999 | } |
1da177e4 LT |
1000 | EXPORT_SYMBOL(del_timer); |
1001 | ||
2aae4a10 REB |
1002 | /** |
1003 | * try_to_del_timer_sync - Try to deactivate a timer | |
1004 | * @timer: timer do del | |
1005 | * | |
fd450b73 ON |
1006 | * This function tries to deactivate a timer. Upon successful (ret >= 0) |
1007 | * exit the timer is not queued and the handler is not running on any CPU. | |
fd450b73 ON |
1008 | */ |
1009 | int try_to_del_timer_sync(struct timer_list *timer) | |
1010 | { | |
a6fa8e5a | 1011 | struct tvec_base *base; |
fd450b73 ON |
1012 | unsigned long flags; |
1013 | int ret = -1; | |
1014 | ||
dc4218bd CC |
1015 | debug_assert_init(timer); |
1016 | ||
fd450b73 ON |
1017 | base = lock_timer_base(timer, &flags); |
1018 | ||
ec44bc7a TG |
1019 | if (base->running_timer != timer) { |
1020 | timer_stats_timer_clear_start_info(timer); | |
1021 | ret = detach_if_pending(timer, base, true); | |
fd450b73 | 1022 | } |
fd450b73 ON |
1023 | spin_unlock_irqrestore(&base->lock, flags); |
1024 | ||
1025 | return ret; | |
1026 | } | |
e19dff1f DH |
1027 | EXPORT_SYMBOL(try_to_del_timer_sync); |
1028 | ||
6f1bc451 | 1029 | #ifdef CONFIG_SMP |
2aae4a10 | 1030 | /** |
1da177e4 LT |
1031 | * del_timer_sync - deactivate a timer and wait for the handler to finish. |
1032 | * @timer: the timer to be deactivated | |
1033 | * | |
1034 | * This function only differs from del_timer() on SMP: besides deactivating | |
1035 | * the timer it also makes sure the handler has finished executing on other | |
1036 | * CPUs. | |
1037 | * | |
72fd4a35 | 1038 | * Synchronization rules: Callers must prevent restarting of the timer, |
1da177e4 | 1039 | * otherwise this function is meaningless. It must not be called from |
c5f66e99 TH |
1040 | * interrupt contexts unless the timer is an irqsafe one. The caller must |
1041 | * not hold locks which would prevent completion of the timer's | |
1042 | * handler. The timer's handler must not call add_timer_on(). Upon exit the | |
1043 | * timer is not queued and the handler is not running on any CPU. | |
1da177e4 | 1044 | * |
c5f66e99 TH |
1045 | * Note: For !irqsafe timers, you must not hold locks that are held in |
1046 | * interrupt context while calling this function. Even if the lock has | |
1047 | * nothing to do with the timer in question. Here's why: | |
48228f7b SR |
1048 | * |
1049 | * CPU0 CPU1 | |
1050 | * ---- ---- | |
1051 | * <SOFTIRQ> | |
1052 | * call_timer_fn(); | |
1053 | * base->running_timer = mytimer; | |
1054 | * spin_lock_irq(somelock); | |
1055 | * <IRQ> | |
1056 | * spin_lock(somelock); | |
1057 | * del_timer_sync(mytimer); | |
1058 | * while (base->running_timer == mytimer); | |
1059 | * | |
1060 | * Now del_timer_sync() will never return and never release somelock. | |
1061 | * The interrupt on the other CPU is waiting to grab somelock but | |
1062 | * it has interrupted the softirq that CPU0 is waiting to finish. | |
1063 | * | |
1da177e4 | 1064 | * The function returns whether it has deactivated a pending timer or not. |
1da177e4 LT |
1065 | */ |
1066 | int del_timer_sync(struct timer_list *timer) | |
1067 | { | |
6f2b9b9a | 1068 | #ifdef CONFIG_LOCKDEP |
f266a511 PZ |
1069 | unsigned long flags; |
1070 | ||
48228f7b SR |
1071 | /* |
1072 | * If lockdep gives a backtrace here, please reference | |
1073 | * the synchronization rules above. | |
1074 | */ | |
7ff20792 | 1075 | local_irq_save(flags); |
6f2b9b9a JB |
1076 | lock_map_acquire(&timer->lockdep_map); |
1077 | lock_map_release(&timer->lockdep_map); | |
7ff20792 | 1078 | local_irq_restore(flags); |
6f2b9b9a | 1079 | #endif |
466bd303 YZ |
1080 | /* |
1081 | * don't use it in hardirq context, because it | |
1082 | * could lead to deadlock. | |
1083 | */ | |
c5f66e99 | 1084 | WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base)); |
fd450b73 ON |
1085 | for (;;) { |
1086 | int ret = try_to_del_timer_sync(timer); | |
1087 | if (ret >= 0) | |
1088 | return ret; | |
a0009652 | 1089 | cpu_relax(); |
fd450b73 | 1090 | } |
1da177e4 | 1091 | } |
55c888d6 | 1092 | EXPORT_SYMBOL(del_timer_sync); |
1da177e4 LT |
1093 | #endif |
1094 | ||
a6fa8e5a | 1095 | static int cascade(struct tvec_base *base, struct tvec *tv, int index) |
1da177e4 LT |
1096 | { |
1097 | /* cascade all the timers from tv up one level */ | |
3439dd86 P |
1098 | struct timer_list *timer, *tmp; |
1099 | struct list_head tv_list; | |
1100 | ||
1101 | list_replace_init(tv->vec + index, &tv_list); | |
1da177e4 | 1102 | |
1da177e4 | 1103 | /* |
3439dd86 P |
1104 | * We are removing _all_ timers from the list, so we |
1105 | * don't have to detach them individually. | |
1da177e4 | 1106 | */ |
3439dd86 | 1107 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { |
6e453a67 | 1108 | BUG_ON(tbase_get_base(timer->base) != base); |
facbb4a7 TG |
1109 | /* No accounting, while moving them */ |
1110 | __internal_add_timer(base, timer); | |
1da177e4 | 1111 | } |
1da177e4 LT |
1112 | |
1113 | return index; | |
1114 | } | |
1115 | ||
576da126 TG |
1116 | static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), |
1117 | unsigned long data) | |
1118 | { | |
4a2b4b22 | 1119 | int count = preempt_count(); |
576da126 TG |
1120 | |
1121 | #ifdef CONFIG_LOCKDEP | |
1122 | /* | |
1123 | * It is permissible to free the timer from inside the | |
1124 | * function that is called from it, this we need to take into | |
1125 | * account for lockdep too. To avoid bogus "held lock freed" | |
1126 | * warnings as well as problems when looking into | |
1127 | * timer->lockdep_map, make a copy and use that here. | |
1128 | */ | |
4d82a1de PZ |
1129 | struct lockdep_map lockdep_map; |
1130 | ||
1131 | lockdep_copy_map(&lockdep_map, &timer->lockdep_map); | |
576da126 TG |
1132 | #endif |
1133 | /* | |
1134 | * Couple the lock chain with the lock chain at | |
1135 | * del_timer_sync() by acquiring the lock_map around the fn() | |
1136 | * call here and in del_timer_sync(). | |
1137 | */ | |
1138 | lock_map_acquire(&lockdep_map); | |
1139 | ||
1140 | trace_timer_expire_entry(timer); | |
1141 | fn(data); | |
1142 | trace_timer_expire_exit(timer); | |
1143 | ||
1144 | lock_map_release(&lockdep_map); | |
1145 | ||
4a2b4b22 | 1146 | if (count != preempt_count()) { |
802702e0 | 1147 | WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", |
4a2b4b22 | 1148 | fn, count, preempt_count()); |
802702e0 TG |
1149 | /* |
1150 | * Restore the preempt count. That gives us a decent | |
1151 | * chance to survive and extract information. If the | |
1152 | * callback kept a lock held, bad luck, but not worse | |
1153 | * than the BUG() we had. | |
1154 | */ | |
4a2b4b22 | 1155 | preempt_count_set(count); |
576da126 TG |
1156 | } |
1157 | } | |
1158 | ||
2aae4a10 REB |
1159 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) |
1160 | ||
1161 | /** | |
1da177e4 LT |
1162 | * __run_timers - run all expired timers (if any) on this CPU. |
1163 | * @base: the timer vector to be processed. | |
1164 | * | |
1165 | * This function cascades all vectors and executes all expired timer | |
1166 | * vectors. | |
1167 | */ | |
a6fa8e5a | 1168 | static inline void __run_timers(struct tvec_base *base) |
1da177e4 LT |
1169 | { |
1170 | struct timer_list *timer; | |
1171 | ||
3691c519 | 1172 | spin_lock_irq(&base->lock); |
d550e81d PM |
1173 | if (catchup_timer_jiffies(base)) { |
1174 | spin_unlock_irq(&base->lock); | |
1175 | return; | |
1176 | } | |
1da177e4 | 1177 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
626ab0e6 | 1178 | struct list_head work_list; |
1da177e4 | 1179 | struct list_head *head = &work_list; |
6819457d | 1180 | int index = base->timer_jiffies & TVR_MASK; |
626ab0e6 | 1181 | |
1da177e4 LT |
1182 | /* |
1183 | * Cascade timers: | |
1184 | */ | |
1185 | if (!index && | |
1186 | (!cascade(base, &base->tv2, INDEX(0))) && | |
1187 | (!cascade(base, &base->tv3, INDEX(1))) && | |
1188 | !cascade(base, &base->tv4, INDEX(2))) | |
1189 | cascade(base, &base->tv5, INDEX(3)); | |
626ab0e6 | 1190 | ++base->timer_jiffies; |
c41eba7d | 1191 | list_replace_init(base->tv1.vec + index, head); |
55c888d6 | 1192 | while (!list_empty(head)) { |
1da177e4 LT |
1193 | void (*fn)(unsigned long); |
1194 | unsigned long data; | |
c5f66e99 | 1195 | bool irqsafe; |
1da177e4 | 1196 | |
b5e61818 | 1197 | timer = list_first_entry(head, struct timer_list,entry); |
6819457d TG |
1198 | fn = timer->function; |
1199 | data = timer->data; | |
c5f66e99 | 1200 | irqsafe = tbase_get_irqsafe(timer->base); |
1da177e4 | 1201 | |
82f67cd9 IM |
1202 | timer_stats_account_timer(timer); |
1203 | ||
6f1bc451 | 1204 | base->running_timer = timer; |
99d5f3aa | 1205 | detach_expired_timer(timer, base); |
6f2b9b9a | 1206 | |
c5f66e99 TH |
1207 | if (irqsafe) { |
1208 | spin_unlock(&base->lock); | |
1209 | call_timer_fn(timer, fn, data); | |
1210 | spin_lock(&base->lock); | |
1211 | } else { | |
1212 | spin_unlock_irq(&base->lock); | |
1213 | call_timer_fn(timer, fn, data); | |
1214 | spin_lock_irq(&base->lock); | |
1215 | } | |
1da177e4 LT |
1216 | } |
1217 | } | |
6f1bc451 | 1218 | base->running_timer = NULL; |
3691c519 | 1219 | spin_unlock_irq(&base->lock); |
1da177e4 LT |
1220 | } |
1221 | ||
3451d024 | 1222 | #ifdef CONFIG_NO_HZ_COMMON |
1da177e4 LT |
1223 | /* |
1224 | * Find out when the next timer event is due to happen. This | |
90cba64a RD |
1225 | * is used on S/390 to stop all activity when a CPU is idle. |
1226 | * This function needs to be called with interrupts disabled. | |
1da177e4 | 1227 | */ |
a6fa8e5a | 1228 | static unsigned long __next_timer_interrupt(struct tvec_base *base) |
1da177e4 | 1229 | { |
1cfd6849 | 1230 | unsigned long timer_jiffies = base->timer_jiffies; |
eaad084b | 1231 | unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; |
1cfd6849 | 1232 | int index, slot, array, found = 0; |
1da177e4 | 1233 | struct timer_list *nte; |
a6fa8e5a | 1234 | struct tvec *varray[4]; |
1da177e4 LT |
1235 | |
1236 | /* Look for timer events in tv1. */ | |
1cfd6849 | 1237 | index = slot = timer_jiffies & TVR_MASK; |
1da177e4 | 1238 | do { |
1cfd6849 | 1239 | list_for_each_entry(nte, base->tv1.vec + slot, entry) { |
6819457d TG |
1240 | if (tbase_get_deferrable(nte->base)) |
1241 | continue; | |
6e453a67 | 1242 | |
1cfd6849 | 1243 | found = 1; |
1da177e4 | 1244 | expires = nte->expires; |
1cfd6849 TG |
1245 | /* Look at the cascade bucket(s)? */ |
1246 | if (!index || slot < index) | |
1247 | goto cascade; | |
1248 | return expires; | |
1da177e4 | 1249 | } |
1cfd6849 TG |
1250 | slot = (slot + 1) & TVR_MASK; |
1251 | } while (slot != index); | |
1252 | ||
1253 | cascade: | |
1254 | /* Calculate the next cascade event */ | |
1255 | if (index) | |
1256 | timer_jiffies += TVR_SIZE - index; | |
1257 | timer_jiffies >>= TVR_BITS; | |
1da177e4 LT |
1258 | |
1259 | /* Check tv2-tv5. */ | |
1260 | varray[0] = &base->tv2; | |
1261 | varray[1] = &base->tv3; | |
1262 | varray[2] = &base->tv4; | |
1263 | varray[3] = &base->tv5; | |
1cfd6849 TG |
1264 | |
1265 | for (array = 0; array < 4; array++) { | |
a6fa8e5a | 1266 | struct tvec *varp = varray[array]; |
1cfd6849 TG |
1267 | |
1268 | index = slot = timer_jiffies & TVN_MASK; | |
1da177e4 | 1269 | do { |
1cfd6849 | 1270 | list_for_each_entry(nte, varp->vec + slot, entry) { |
a0419888 JH |
1271 | if (tbase_get_deferrable(nte->base)) |
1272 | continue; | |
1273 | ||
1cfd6849 | 1274 | found = 1; |
1da177e4 LT |
1275 | if (time_before(nte->expires, expires)) |
1276 | expires = nte->expires; | |
1cfd6849 TG |
1277 | } |
1278 | /* | |
1279 | * Do we still search for the first timer or are | |
1280 | * we looking up the cascade buckets ? | |
1281 | */ | |
1282 | if (found) { | |
1283 | /* Look at the cascade bucket(s)? */ | |
1284 | if (!index || slot < index) | |
1285 | break; | |
1286 | return expires; | |
1287 | } | |
1288 | slot = (slot + 1) & TVN_MASK; | |
1289 | } while (slot != index); | |
1290 | ||
1291 | if (index) | |
1292 | timer_jiffies += TVN_SIZE - index; | |
1293 | timer_jiffies >>= TVN_BITS; | |
1da177e4 | 1294 | } |
1cfd6849 TG |
1295 | return expires; |
1296 | } | |
69239749 | 1297 | |
1cfd6849 TG |
1298 | /* |
1299 | * Check, if the next hrtimer event is before the next timer wheel | |
1300 | * event: | |
1301 | */ | |
1302 | static unsigned long cmp_next_hrtimer_event(unsigned long now, | |
1303 | unsigned long expires) | |
1304 | { | |
1305 | ktime_t hr_delta = hrtimer_get_next_event(); | |
1306 | struct timespec tsdelta; | |
9501b6cf | 1307 | unsigned long delta; |
1cfd6849 TG |
1308 | |
1309 | if (hr_delta.tv64 == KTIME_MAX) | |
1310 | return expires; | |
0662b713 | 1311 | |
9501b6cf TG |
1312 | /* |
1313 | * Expired timer available, let it expire in the next tick | |
1314 | */ | |
1315 | if (hr_delta.tv64 <= 0) | |
1316 | return now + 1; | |
69239749 | 1317 | |
1cfd6849 | 1318 | tsdelta = ktime_to_timespec(hr_delta); |
9501b6cf | 1319 | delta = timespec_to_jiffies(&tsdelta); |
eaad084b TG |
1320 | |
1321 | /* | |
1322 | * Limit the delta to the max value, which is checked in | |
1323 | * tick_nohz_stop_sched_tick(): | |
1324 | */ | |
1325 | if (delta > NEXT_TIMER_MAX_DELTA) | |
1326 | delta = NEXT_TIMER_MAX_DELTA; | |
1327 | ||
9501b6cf TG |
1328 | /* |
1329 | * Take rounding errors in to account and make sure, that it | |
1330 | * expires in the next tick. Otherwise we go into an endless | |
1331 | * ping pong due to tick_nohz_stop_sched_tick() retriggering | |
1332 | * the timer softirq | |
1333 | */ | |
1334 | if (delta < 1) | |
1335 | delta = 1; | |
1336 | now += delta; | |
1cfd6849 TG |
1337 | if (time_before(now, expires)) |
1338 | return now; | |
1da177e4 LT |
1339 | return expires; |
1340 | } | |
1cfd6849 TG |
1341 | |
1342 | /** | |
8dce39c2 | 1343 | * get_next_timer_interrupt - return the jiffy of the next pending timer |
05fb6bf0 | 1344 | * @now: current time (in jiffies) |
1cfd6849 | 1345 | */ |
fd064b9b | 1346 | unsigned long get_next_timer_interrupt(unsigned long now) |
1cfd6849 | 1347 | { |
7496351a | 1348 | struct tvec_base *base = __this_cpu_read(tvec_bases); |
e40468a5 | 1349 | unsigned long expires = now + NEXT_TIMER_MAX_DELTA; |
1cfd6849 | 1350 | |
dbd87b5a HC |
1351 | /* |
1352 | * Pretend that there is no timer pending if the cpu is offline. | |
1353 | * Possible pending timers will be migrated later to an active cpu. | |
1354 | */ | |
1355 | if (cpu_is_offline(smp_processor_id())) | |
e40468a5 TG |
1356 | return expires; |
1357 | ||
1cfd6849 | 1358 | spin_lock(&base->lock); |
e40468a5 TG |
1359 | if (base->active_timers) { |
1360 | if (time_before_eq(base->next_timer, base->timer_jiffies)) | |
1361 | base->next_timer = __next_timer_interrupt(base); | |
1362 | expires = base->next_timer; | |
1363 | } | |
1cfd6849 TG |
1364 | spin_unlock(&base->lock); |
1365 | ||
1366 | if (time_before_eq(expires, now)) | |
1367 | return now; | |
1368 | ||
1369 | return cmp_next_hrtimer_event(now, expires); | |
1370 | } | |
1da177e4 LT |
1371 | #endif |
1372 | ||
1da177e4 | 1373 | /* |
5b4db0c2 | 1374 | * Called from the timer interrupt handler to charge one tick to the current |
1da177e4 LT |
1375 | * process. user_tick is 1 if the tick is user time, 0 for system. |
1376 | */ | |
1377 | void update_process_times(int user_tick) | |
1378 | { | |
1379 | struct task_struct *p = current; | |
1da177e4 LT |
1380 | |
1381 | /* Note: this timer irq context must be accounted for as well. */ | |
fa13a5a1 | 1382 | account_process_tick(p, user_tick); |
1da177e4 | 1383 | run_local_timers(); |
c3377c2d | 1384 | rcu_check_callbacks(user_tick); |
e360adbe PZ |
1385 | #ifdef CONFIG_IRQ_WORK |
1386 | if (in_irq()) | |
76a33061 | 1387 | irq_work_tick(); |
e360adbe | 1388 | #endif |
1da177e4 | 1389 | scheduler_tick(); |
6819457d | 1390 | run_posix_cpu_timers(p); |
1da177e4 LT |
1391 | } |
1392 | ||
1da177e4 LT |
1393 | /* |
1394 | * This function runs timers and the timer-tq in bottom half context. | |
1395 | */ | |
1396 | static void run_timer_softirq(struct softirq_action *h) | |
1397 | { | |
7496351a | 1398 | struct tvec_base *base = __this_cpu_read(tvec_bases); |
1da177e4 | 1399 | |
d3d74453 | 1400 | hrtimer_run_pending(); |
82f67cd9 | 1401 | |
1da177e4 LT |
1402 | if (time_after_eq(jiffies, base->timer_jiffies)) |
1403 | __run_timers(base); | |
1404 | } | |
1405 | ||
1406 | /* | |
1407 | * Called by the local, per-CPU timer interrupt on SMP. | |
1408 | */ | |
1409 | void run_local_timers(void) | |
1410 | { | |
d3d74453 | 1411 | hrtimer_run_queues(); |
1da177e4 LT |
1412 | raise_softirq(TIMER_SOFTIRQ); |
1413 | } | |
1414 | ||
1da177e4 LT |
1415 | #ifdef __ARCH_WANT_SYS_ALARM |
1416 | ||
1417 | /* | |
1418 | * For backwards compatibility? This can be done in libc so Alpha | |
1419 | * and all newer ports shouldn't need it. | |
1420 | */ | |
58fd3aa2 | 1421 | SYSCALL_DEFINE1(alarm, unsigned int, seconds) |
1da177e4 | 1422 | { |
c08b8a49 | 1423 | return alarm_setitimer(seconds); |
1da177e4 LT |
1424 | } |
1425 | ||
1426 | #endif | |
1427 | ||
1da177e4 LT |
1428 | static void process_timeout(unsigned long __data) |
1429 | { | |
36c8b586 | 1430 | wake_up_process((struct task_struct *)__data); |
1da177e4 LT |
1431 | } |
1432 | ||
1433 | /** | |
1434 | * schedule_timeout - sleep until timeout | |
1435 | * @timeout: timeout value in jiffies | |
1436 | * | |
1437 | * Make the current task sleep until @timeout jiffies have | |
1438 | * elapsed. The routine will return immediately unless | |
1439 | * the current task state has been set (see set_current_state()). | |
1440 | * | |
1441 | * You can set the task state as follows - | |
1442 | * | |
1443 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | |
1444 | * pass before the routine returns. The routine will return 0 | |
1445 | * | |
1446 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1447 | * delivered to the current task. In this case the remaining time | |
1448 | * in jiffies will be returned, or 0 if the timer expired in time | |
1449 | * | |
1450 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1451 | * routine returns. | |
1452 | * | |
1453 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | |
1454 | * the CPU away without a bound on the timeout. In this case the return | |
1455 | * value will be %MAX_SCHEDULE_TIMEOUT. | |
1456 | * | |
1457 | * In all cases the return value is guaranteed to be non-negative. | |
1458 | */ | |
7ad5b3a5 | 1459 | signed long __sched schedule_timeout(signed long timeout) |
1da177e4 LT |
1460 | { |
1461 | struct timer_list timer; | |
1462 | unsigned long expire; | |
1463 | ||
1464 | switch (timeout) | |
1465 | { | |
1466 | case MAX_SCHEDULE_TIMEOUT: | |
1467 | /* | |
1468 | * These two special cases are useful to be comfortable | |
1469 | * in the caller. Nothing more. We could take | |
1470 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | |
1471 | * but I' d like to return a valid offset (>=0) to allow | |
1472 | * the caller to do everything it want with the retval. | |
1473 | */ | |
1474 | schedule(); | |
1475 | goto out; | |
1476 | default: | |
1477 | /* | |
1478 | * Another bit of PARANOID. Note that the retval will be | |
1479 | * 0 since no piece of kernel is supposed to do a check | |
1480 | * for a negative retval of schedule_timeout() (since it | |
1481 | * should never happens anyway). You just have the printk() | |
1482 | * that will tell you if something is gone wrong and where. | |
1483 | */ | |
5b149bcc | 1484 | if (timeout < 0) { |
1da177e4 | 1485 | printk(KERN_ERR "schedule_timeout: wrong timeout " |
5b149bcc AM |
1486 | "value %lx\n", timeout); |
1487 | dump_stack(); | |
1da177e4 LT |
1488 | current->state = TASK_RUNNING; |
1489 | goto out; | |
1490 | } | |
1491 | } | |
1492 | ||
1493 | expire = timeout + jiffies; | |
1494 | ||
c6f3a97f | 1495 | setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); |
597d0275 | 1496 | __mod_timer(&timer, expire, false, TIMER_NOT_PINNED); |
1da177e4 LT |
1497 | schedule(); |
1498 | del_singleshot_timer_sync(&timer); | |
1499 | ||
c6f3a97f TG |
1500 | /* Remove the timer from the object tracker */ |
1501 | destroy_timer_on_stack(&timer); | |
1502 | ||
1da177e4 LT |
1503 | timeout = expire - jiffies; |
1504 | ||
1505 | out: | |
1506 | return timeout < 0 ? 0 : timeout; | |
1507 | } | |
1da177e4 LT |
1508 | EXPORT_SYMBOL(schedule_timeout); |
1509 | ||
8a1c1757 AM |
1510 | /* |
1511 | * We can use __set_current_state() here because schedule_timeout() calls | |
1512 | * schedule() unconditionally. | |
1513 | */ | |
64ed93a2 NA |
1514 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1515 | { | |
a5a0d52c AM |
1516 | __set_current_state(TASK_INTERRUPTIBLE); |
1517 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1518 | } |
1519 | EXPORT_SYMBOL(schedule_timeout_interruptible); | |
1520 | ||
294d5cc2 MW |
1521 | signed long __sched schedule_timeout_killable(signed long timeout) |
1522 | { | |
1523 | __set_current_state(TASK_KILLABLE); | |
1524 | return schedule_timeout(timeout); | |
1525 | } | |
1526 | EXPORT_SYMBOL(schedule_timeout_killable); | |
1527 | ||
64ed93a2 NA |
1528 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) |
1529 | { | |
a5a0d52c AM |
1530 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1531 | return schedule_timeout(timeout); | |
64ed93a2 NA |
1532 | } |
1533 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | |
1534 | ||
0db0628d | 1535 | static int init_timers_cpu(int cpu) |
1da177e4 LT |
1536 | { |
1537 | int j; | |
a6fa8e5a | 1538 | struct tvec_base *base; |
0db0628d | 1539 | static char tvec_base_done[NR_CPUS]; |
55c888d6 | 1540 | |
ba6edfcd | 1541 | if (!tvec_base_done[cpu]) { |
a4a6198b JB |
1542 | static char boot_done; |
1543 | ||
a4a6198b | 1544 | if (boot_done) { |
ba6edfcd AM |
1545 | /* |
1546 | * The APs use this path later in boot | |
1547 | */ | |
da554eba JP |
1548 | base = kzalloc_node(sizeof(*base), GFP_KERNEL, |
1549 | cpu_to_node(cpu)); | |
a4a6198b JB |
1550 | if (!base) |
1551 | return -ENOMEM; | |
6e453a67 | 1552 | |
38edbb0b VK |
1553 | /* Make sure tvec_base has TIMER_FLAG_MASK bits free */ |
1554 | if (WARN_ON(base != tbase_get_base(base))) { | |
6e453a67 VP |
1555 | kfree(base); |
1556 | return -ENOMEM; | |
1557 | } | |
ba6edfcd | 1558 | per_cpu(tvec_bases, cpu) = base; |
a4a6198b | 1559 | } else { |
ba6edfcd AM |
1560 | /* |
1561 | * This is for the boot CPU - we use compile-time | |
1562 | * static initialisation because per-cpu memory isn't | |
1563 | * ready yet and because the memory allocators are not | |
1564 | * initialised either. | |
1565 | */ | |
a4a6198b | 1566 | boot_done = 1; |
ba6edfcd | 1567 | base = &boot_tvec_bases; |
a4a6198b | 1568 | } |
42a5cf46 | 1569 | spin_lock_init(&base->lock); |
ba6edfcd | 1570 | tvec_base_done[cpu] = 1; |
d6f93829 | 1571 | base->cpu = cpu; |
ba6edfcd AM |
1572 | } else { |
1573 | base = per_cpu(tvec_bases, cpu); | |
a4a6198b | 1574 | } |
ba6edfcd | 1575 | |
d730e882 | 1576 | |
1da177e4 LT |
1577 | for (j = 0; j < TVN_SIZE; j++) { |
1578 | INIT_LIST_HEAD(base->tv5.vec + j); | |
1579 | INIT_LIST_HEAD(base->tv4.vec + j); | |
1580 | INIT_LIST_HEAD(base->tv3.vec + j); | |
1581 | INIT_LIST_HEAD(base->tv2.vec + j); | |
1582 | } | |
1583 | for (j = 0; j < TVR_SIZE; j++) | |
1584 | INIT_LIST_HEAD(base->tv1.vec + j); | |
1585 | ||
1586 | base->timer_jiffies = jiffies; | |
97fd9ed4 | 1587 | base->next_timer = base->timer_jiffies; |
99d5f3aa | 1588 | base->active_timers = 0; |
fff42158 | 1589 | base->all_timers = 0; |
a4a6198b | 1590 | return 0; |
1da177e4 LT |
1591 | } |
1592 | ||
1593 | #ifdef CONFIG_HOTPLUG_CPU | |
a6fa8e5a | 1594 | static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head) |
1da177e4 LT |
1595 | { |
1596 | struct timer_list *timer; | |
1597 | ||
1598 | while (!list_empty(head)) { | |
b5e61818 | 1599 | timer = list_first_entry(head, struct timer_list, entry); |
99d5f3aa | 1600 | /* We ignore the accounting on the dying cpu */ |
ec44bc7a | 1601 | detach_timer(timer, false); |
6e453a67 | 1602 | timer_set_base(timer, new_base); |
1da177e4 | 1603 | internal_add_timer(new_base, timer); |
1da177e4 | 1604 | } |
1da177e4 LT |
1605 | } |
1606 | ||
0db0628d | 1607 | static void migrate_timers(int cpu) |
1da177e4 | 1608 | { |
a6fa8e5a PM |
1609 | struct tvec_base *old_base; |
1610 | struct tvec_base *new_base; | |
1da177e4 LT |
1611 | int i; |
1612 | ||
1613 | BUG_ON(cpu_online(cpu)); | |
a4a6198b JB |
1614 | old_base = per_cpu(tvec_bases, cpu); |
1615 | new_base = get_cpu_var(tvec_bases); | |
d82f0b0f ON |
1616 | /* |
1617 | * The caller is globally serialized and nobody else | |
1618 | * takes two locks at once, deadlock is not possible. | |
1619 | */ | |
1620 | spin_lock_irq(&new_base->lock); | |
0d180406 | 1621 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
3691c519 ON |
1622 | |
1623 | BUG_ON(old_base->running_timer); | |
1da177e4 | 1624 | |
1da177e4 | 1625 | for (i = 0; i < TVR_SIZE; i++) |
55c888d6 ON |
1626 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1627 | for (i = 0; i < TVN_SIZE; i++) { | |
1628 | migrate_timer_list(new_base, old_base->tv2.vec + i); | |
1629 | migrate_timer_list(new_base, old_base->tv3.vec + i); | |
1630 | migrate_timer_list(new_base, old_base->tv4.vec + i); | |
1631 | migrate_timer_list(new_base, old_base->tv5.vec + i); | |
1632 | } | |
1633 | ||
0d180406 | 1634 | spin_unlock(&old_base->lock); |
d82f0b0f | 1635 | spin_unlock_irq(&new_base->lock); |
1da177e4 | 1636 | put_cpu_var(tvec_bases); |
1da177e4 LT |
1637 | } |
1638 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1639 | ||
0db0628d | 1640 | static int timer_cpu_notify(struct notifier_block *self, |
1da177e4 LT |
1641 | unsigned long action, void *hcpu) |
1642 | { | |
1643 | long cpu = (long)hcpu; | |
80b5184c AM |
1644 | int err; |
1645 | ||
1da177e4 LT |
1646 | switch(action) { |
1647 | case CPU_UP_PREPARE: | |
8bb78442 | 1648 | case CPU_UP_PREPARE_FROZEN: |
80b5184c AM |
1649 | err = init_timers_cpu(cpu); |
1650 | if (err < 0) | |
1651 | return notifier_from_errno(err); | |
1da177e4 LT |
1652 | break; |
1653 | #ifdef CONFIG_HOTPLUG_CPU | |
1654 | case CPU_DEAD: | |
8bb78442 | 1655 | case CPU_DEAD_FROZEN: |
1da177e4 LT |
1656 | migrate_timers(cpu); |
1657 | break; | |
1658 | #endif | |
1659 | default: | |
1660 | break; | |
1661 | } | |
1662 | return NOTIFY_OK; | |
1663 | } | |
1664 | ||
0db0628d | 1665 | static struct notifier_block timers_nb = { |
1da177e4 LT |
1666 | .notifier_call = timer_cpu_notify, |
1667 | }; | |
1668 | ||
1669 | ||
1670 | void __init init_timers(void) | |
1671 | { | |
e52b1db3 TH |
1672 | int err; |
1673 | ||
1674 | /* ensure there are enough low bits for flags in timer->base pointer */ | |
1675 | BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK); | |
07dccf33 | 1676 | |
e52b1db3 TH |
1677 | err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, |
1678 | (void *)(long)smp_processor_id()); | |
9e506f7a | 1679 | BUG_ON(err != NOTIFY_OK); |
c24a4a36 VK |
1680 | |
1681 | init_timer_stats(); | |
1da177e4 | 1682 | register_cpu_notifier(&timers_nb); |
962cf36c | 1683 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); |
1da177e4 LT |
1684 | } |
1685 | ||
1da177e4 LT |
1686 | /** |
1687 | * msleep - sleep safely even with waitqueue interruptions | |
1688 | * @msecs: Time in milliseconds to sleep for | |
1689 | */ | |
1690 | void msleep(unsigned int msecs) | |
1691 | { | |
1692 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1693 | ||
75bcc8c5 NA |
1694 | while (timeout) |
1695 | timeout = schedule_timeout_uninterruptible(timeout); | |
1da177e4 LT |
1696 | } |
1697 | ||
1698 | EXPORT_SYMBOL(msleep); | |
1699 | ||
1700 | /** | |
96ec3efd | 1701 | * msleep_interruptible - sleep waiting for signals |
1da177e4 LT |
1702 | * @msecs: Time in milliseconds to sleep for |
1703 | */ | |
1704 | unsigned long msleep_interruptible(unsigned int msecs) | |
1705 | { | |
1706 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | |
1707 | ||
75bcc8c5 NA |
1708 | while (timeout && !signal_pending(current)) |
1709 | timeout = schedule_timeout_interruptible(timeout); | |
1da177e4 LT |
1710 | return jiffies_to_msecs(timeout); |
1711 | } | |
1712 | ||
1713 | EXPORT_SYMBOL(msleep_interruptible); | |
5e7f5a17 PP |
1714 | |
1715 | static int __sched do_usleep_range(unsigned long min, unsigned long max) | |
1716 | { | |
1717 | ktime_t kmin; | |
1718 | unsigned long delta; | |
1719 | ||
1720 | kmin = ktime_set(0, min * NSEC_PER_USEC); | |
1721 | delta = (max - min) * NSEC_PER_USEC; | |
1722 | return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL); | |
1723 | } | |
1724 | ||
1725 | /** | |
1726 | * usleep_range - Drop in replacement for udelay where wakeup is flexible | |
1727 | * @min: Minimum time in usecs to sleep | |
1728 | * @max: Maximum time in usecs to sleep | |
1729 | */ | |
1730 | void usleep_range(unsigned long min, unsigned long max) | |
1731 | { | |
1732 | __set_current_state(TASK_UNINTERRUPTIBLE); | |
1733 | do_usleep_range(min, max); | |
1734 | } | |
1735 | EXPORT_SYMBOL(usleep_range); |