timers: Only wake softirq if necessary
[deliverable/linux.git] / kernel / time / timer.c
1 /*
2 * linux/kernel/timer.c
3 *
4 * Kernel internal timers
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22 #include <linux/kernel_stat.h>
23 #include <linux/export.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
27 #include <linux/mm.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 #include <linux/irq_work.h>
41 #include <linux/sched.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/slab.h>
44 #include <linux/compat.h>
45
46 #include <asm/uaccess.h>
47 #include <asm/unistd.h>
48 #include <asm/div64.h>
49 #include <asm/timex.h>
50 #include <asm/io.h>
51
52 #include "tick-internal.h"
53
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/timer.h>
56
57 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
58
59 EXPORT_SYMBOL(jiffies_64);
60
61 /*
62 * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
63 * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
64 * level has a different granularity.
65 *
66 * The level granularity is: LVL_CLK_DIV ^ lvl
67 * The level clock frequency is: HZ / (LVL_CLK_DIV ^ level)
68 *
69 * The array level of a newly armed timer depends on the relative expiry
70 * time. The farther the expiry time is away the higher the array level and
71 * therefor the granularity becomes.
72 *
73 * Contrary to the original timer wheel implementation, which aims for 'exact'
74 * expiry of the timers, this implementation removes the need for recascading
75 * the timers into the lower array levels. The previous 'classic' timer wheel
76 * implementation of the kernel already violated the 'exact' expiry by adding
77 * slack to the expiry time to provide batched expiration. The granularity
78 * levels provide implicit batching.
79 *
80 * This is an optimization of the original timer wheel implementation for the
81 * majority of the timer wheel use cases: timeouts. The vast majority of
82 * timeout timers (networking, disk I/O ...) are canceled before expiry. If
83 * the timeout expires it indicates that normal operation is disturbed, so it
84 * does not matter much whether the timeout comes with a slight delay.
85 *
86 * The only exception to this are networking timers with a small expiry
87 * time. They rely on the granularity. Those fit into the first wheel level,
88 * which has HZ granularity.
89 *
90 * We don't have cascading anymore. timers with a expiry time above the
91 * capacity of the last wheel level are force expired at the maximum timeout
92 * value of the last wheel level. From data sampling we know that the maximum
93 * value observed is 5 days (network connection tracking), so this should not
94 * be an issue.
95 *
96 * The currently chosen array constants values are a good compromise between
97 * array size and granularity.
98 *
99 * This results in the following granularity and range levels:
100 *
101 * HZ 1000 steps
102 * Level Offset Granularity Range
103 * 0 0 1 ms 0 ms - 63 ms
104 * 1 64 8 ms 64 ms - 511 ms
105 * 2 128 64 ms 512 ms - 4095 ms (512ms - ~4s)
106 * 3 192 512 ms 4096 ms - 32767 ms (~4s - ~32s)
107 * 4 256 4096 ms (~4s) 32768 ms - 262143 ms (~32s - ~4m)
108 * 5 320 32768 ms (~32s) 262144 ms - 2097151 ms (~4m - ~34m)
109 * 6 384 262144 ms (~4m) 2097152 ms - 16777215 ms (~34m - ~4h)
110 * 7 448 2097152 ms (~34m) 16777216 ms - 134217727 ms (~4h - ~1d)
111 * 8 512 16777216 ms (~4h) 134217728 ms - 1073741822 ms (~1d - ~12d)
112 *
113 * HZ 300
114 * Level Offset Granularity Range
115 * 0 0 3 ms 0 ms - 210 ms
116 * 1 64 26 ms 213 ms - 1703 ms (213ms - ~1s)
117 * 2 128 213 ms 1706 ms - 13650 ms (~1s - ~13s)
118 * 3 192 1706 ms (~1s) 13653 ms - 109223 ms (~13s - ~1m)
119 * 4 256 13653 ms (~13s) 109226 ms - 873810 ms (~1m - ~14m)
120 * 5 320 109226 ms (~1m) 873813 ms - 6990503 ms (~14m - ~1h)
121 * 6 384 873813 ms (~14m) 6990506 ms - 55924050 ms (~1h - ~15h)
122 * 7 448 6990506 ms (~1h) 55924053 ms - 447392423 ms (~15h - ~5d)
123 * 8 512 55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
124 *
125 * HZ 250
126 * Level Offset Granularity Range
127 * 0 0 4 ms 0 ms - 255 ms
128 * 1 64 32 ms 256 ms - 2047 ms (256ms - ~2s)
129 * 2 128 256 ms 2048 ms - 16383 ms (~2s - ~16s)
130 * 3 192 2048 ms (~2s) 16384 ms - 131071 ms (~16s - ~2m)
131 * 4 256 16384 ms (~16s) 131072 ms - 1048575 ms (~2m - ~17m)
132 * 5 320 131072 ms (~2m) 1048576 ms - 8388607 ms (~17m - ~2h)
133 * 6 384 1048576 ms (~17m) 8388608 ms - 67108863 ms (~2h - ~18h)
134 * 7 448 8388608 ms (~2h) 67108864 ms - 536870911 ms (~18h - ~6d)
135 * 8 512 67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
136 *
137 * HZ 100
138 * Level Offset Granularity Range
139 * 0 0 10 ms 0 ms - 630 ms
140 * 1 64 80 ms 640 ms - 5110 ms (640ms - ~5s)
141 * 2 128 640 ms 5120 ms - 40950 ms (~5s - ~40s)
142 * 3 192 5120 ms (~5s) 40960 ms - 327670 ms (~40s - ~5m)
143 * 4 256 40960 ms (~40s) 327680 ms - 2621430 ms (~5m - ~43m)
144 * 5 320 327680 ms (~5m) 2621440 ms - 20971510 ms (~43m - ~5h)
145 * 6 384 2621440 ms (~43m) 20971520 ms - 167772150 ms (~5h - ~1d)
146 * 7 448 20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
147 */
148
149 /* Clock divisor for the next level */
150 #define LVL_CLK_SHIFT 3
151 #define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
152 #define LVL_CLK_MASK (LVL_CLK_DIV - 1)
153 #define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
154 #define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
155
156 /*
157 * The time start value for each level to select the bucket at enqueue
158 * time.
159 */
160 #define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
161
162 /* Size of each clock level */
163 #define LVL_BITS 6
164 #define LVL_SIZE (1UL << LVL_BITS)
165 #define LVL_MASK (LVL_SIZE - 1)
166 #define LVL_OFFS(n) ((n) * LVL_SIZE)
167
168 /* Level depth */
169 #if HZ > 100
170 # define LVL_DEPTH 9
171 # else
172 # define LVL_DEPTH 8
173 #endif
174
175 /* The cutoff (max. capacity of the wheel) */
176 #define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
177 #define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
178
179 /*
180 * The resulting wheel size. If NOHZ is configured we allocate two
181 * wheels so we have a separate storage for the deferrable timers.
182 */
183 #define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
184
185 #ifdef CONFIG_NO_HZ_COMMON
186 # define NR_BASES 2
187 # define BASE_STD 0
188 # define BASE_DEF 1
189 #else
190 # define NR_BASES 1
191 # define BASE_STD 0
192 # define BASE_DEF 0
193 #endif
194
195 struct timer_base {
196 spinlock_t lock;
197 struct timer_list *running_timer;
198 unsigned long clk;
199 unsigned long next_expiry;
200 unsigned int cpu;
201 bool migration_enabled;
202 bool nohz_active;
203 bool is_idle;
204 DECLARE_BITMAP(pending_map, WHEEL_SIZE);
205 struct hlist_head vectors[WHEEL_SIZE];
206 } ____cacheline_aligned;
207
208 static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
209
210 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
211 unsigned int sysctl_timer_migration = 1;
212
213 void timers_update_migration(bool update_nohz)
214 {
215 bool on = sysctl_timer_migration && tick_nohz_active;
216 unsigned int cpu;
217
218 /* Avoid the loop, if nothing to update */
219 if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
220 return;
221
222 for_each_possible_cpu(cpu) {
223 per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
224 per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
225 per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
226 if (!update_nohz)
227 continue;
228 per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
229 per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
230 per_cpu(hrtimer_bases.nohz_active, cpu) = true;
231 }
232 }
233
234 int timer_migration_handler(struct ctl_table *table, int write,
235 void __user *buffer, size_t *lenp,
236 loff_t *ppos)
237 {
238 static DEFINE_MUTEX(mutex);
239 int ret;
240
241 mutex_lock(&mutex);
242 ret = proc_dointvec(table, write, buffer, lenp, ppos);
243 if (!ret && write)
244 timers_update_migration(false);
245 mutex_unlock(&mutex);
246 return ret;
247 }
248 #endif
249
250 static unsigned long round_jiffies_common(unsigned long j, int cpu,
251 bool force_up)
252 {
253 int rem;
254 unsigned long original = j;
255
256 /*
257 * We don't want all cpus firing their timers at once hitting the
258 * same lock or cachelines, so we skew each extra cpu with an extra
259 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
260 * already did this.
261 * The skew is done by adding 3*cpunr, then round, then subtract this
262 * extra offset again.
263 */
264 j += cpu * 3;
265
266 rem = j % HZ;
267
268 /*
269 * If the target jiffie is just after a whole second (which can happen
270 * due to delays of the timer irq, long irq off times etc etc) then
271 * we should round down to the whole second, not up. Use 1/4th second
272 * as cutoff for this rounding as an extreme upper bound for this.
273 * But never round down if @force_up is set.
274 */
275 if (rem < HZ/4 && !force_up) /* round down */
276 j = j - rem;
277 else /* round up */
278 j = j - rem + HZ;
279
280 /* now that we have rounded, subtract the extra skew again */
281 j -= cpu * 3;
282
283 /*
284 * Make sure j is still in the future. Otherwise return the
285 * unmodified value.
286 */
287 return time_is_after_jiffies(j) ? j : original;
288 }
289
290 /**
291 * __round_jiffies - function to round jiffies to a full second
292 * @j: the time in (absolute) jiffies that should be rounded
293 * @cpu: the processor number on which the timeout will happen
294 *
295 * __round_jiffies() rounds an absolute time in the future (in jiffies)
296 * up or down to (approximately) full seconds. This is useful for timers
297 * for which the exact time they fire does not matter too much, as long as
298 * they fire approximately every X seconds.
299 *
300 * By rounding these timers to whole seconds, all such timers will fire
301 * at the same time, rather than at various times spread out. The goal
302 * of this is to have the CPU wake up less, which saves power.
303 *
304 * The exact rounding is skewed for each processor to avoid all
305 * processors firing at the exact same time, which could lead
306 * to lock contention or spurious cache line bouncing.
307 *
308 * The return value is the rounded version of the @j parameter.
309 */
310 unsigned long __round_jiffies(unsigned long j, int cpu)
311 {
312 return round_jiffies_common(j, cpu, false);
313 }
314 EXPORT_SYMBOL_GPL(__round_jiffies);
315
316 /**
317 * __round_jiffies_relative - function to round jiffies to a full second
318 * @j: the time in (relative) jiffies that should be rounded
319 * @cpu: the processor number on which the timeout will happen
320 *
321 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
322 * up or down to (approximately) full seconds. This is useful for timers
323 * for which the exact time they fire does not matter too much, as long as
324 * they fire approximately every X seconds.
325 *
326 * By rounding these timers to whole seconds, all such timers will fire
327 * at the same time, rather than at various times spread out. The goal
328 * of this is to have the CPU wake up less, which saves power.
329 *
330 * The exact rounding is skewed for each processor to avoid all
331 * processors firing at the exact same time, which could lead
332 * to lock contention or spurious cache line bouncing.
333 *
334 * The return value is the rounded version of the @j parameter.
335 */
336 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
337 {
338 unsigned long j0 = jiffies;
339
340 /* Use j0 because jiffies might change while we run */
341 return round_jiffies_common(j + j0, cpu, false) - j0;
342 }
343 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
344
345 /**
346 * round_jiffies - function to round jiffies to a full second
347 * @j: the time in (absolute) jiffies that should be rounded
348 *
349 * round_jiffies() rounds an absolute time in the future (in jiffies)
350 * up or down to (approximately) full seconds. This is useful for timers
351 * for which the exact time they fire does not matter too much, as long as
352 * they fire approximately every X seconds.
353 *
354 * By rounding these timers to whole seconds, all such timers will fire
355 * at the same time, rather than at various times spread out. The goal
356 * of this is to have the CPU wake up less, which saves power.
357 *
358 * The return value is the rounded version of the @j parameter.
359 */
360 unsigned long round_jiffies(unsigned long j)
361 {
362 return round_jiffies_common(j, raw_smp_processor_id(), false);
363 }
364 EXPORT_SYMBOL_GPL(round_jiffies);
365
366 /**
367 * round_jiffies_relative - function to round jiffies to a full second
368 * @j: the time in (relative) jiffies that should be rounded
369 *
370 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
371 * up or down to (approximately) full seconds. This is useful for timers
372 * for which the exact time they fire does not matter too much, as long as
373 * they fire approximately every X seconds.
374 *
375 * By rounding these timers to whole seconds, all such timers will fire
376 * at the same time, rather than at various times spread out. The goal
377 * of this is to have the CPU wake up less, which saves power.
378 *
379 * The return value is the rounded version of the @j parameter.
380 */
381 unsigned long round_jiffies_relative(unsigned long j)
382 {
383 return __round_jiffies_relative(j, raw_smp_processor_id());
384 }
385 EXPORT_SYMBOL_GPL(round_jiffies_relative);
386
387 /**
388 * __round_jiffies_up - function to round jiffies up to a full second
389 * @j: the time in (absolute) jiffies that should be rounded
390 * @cpu: the processor number on which the timeout will happen
391 *
392 * This is the same as __round_jiffies() except that it will never
393 * round down. This is useful for timeouts for which the exact time
394 * of firing does not matter too much, as long as they don't fire too
395 * early.
396 */
397 unsigned long __round_jiffies_up(unsigned long j, int cpu)
398 {
399 return round_jiffies_common(j, cpu, true);
400 }
401 EXPORT_SYMBOL_GPL(__round_jiffies_up);
402
403 /**
404 * __round_jiffies_up_relative - function to round jiffies up to a full second
405 * @j: the time in (relative) jiffies that should be rounded
406 * @cpu: the processor number on which the timeout will happen
407 *
408 * This is the same as __round_jiffies_relative() except that it will never
409 * round down. This is useful for timeouts for which the exact time
410 * of firing does not matter too much, as long as they don't fire too
411 * early.
412 */
413 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
414 {
415 unsigned long j0 = jiffies;
416
417 /* Use j0 because jiffies might change while we run */
418 return round_jiffies_common(j + j0, cpu, true) - j0;
419 }
420 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
421
422 /**
423 * round_jiffies_up - function to round jiffies up to a full second
424 * @j: the time in (absolute) jiffies that should be rounded
425 *
426 * This is the same as round_jiffies() except that it will never
427 * round down. This is useful for timeouts for which the exact time
428 * of firing does not matter too much, as long as they don't fire too
429 * early.
430 */
431 unsigned long round_jiffies_up(unsigned long j)
432 {
433 return round_jiffies_common(j, raw_smp_processor_id(), true);
434 }
435 EXPORT_SYMBOL_GPL(round_jiffies_up);
436
437 /**
438 * round_jiffies_up_relative - function to round jiffies up to a full second
439 * @j: the time in (relative) jiffies that should be rounded
440 *
441 * This is the same as round_jiffies_relative() except that it will never
442 * round down. This is useful for timeouts for which the exact time
443 * of firing does not matter too much, as long as they don't fire too
444 * early.
445 */
446 unsigned long round_jiffies_up_relative(unsigned long j)
447 {
448 return __round_jiffies_up_relative(j, raw_smp_processor_id());
449 }
450 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
451
452
453 static inline unsigned int timer_get_idx(struct timer_list *timer)
454 {
455 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
456 }
457
458 static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
459 {
460 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
461 idx << TIMER_ARRAYSHIFT;
462 }
463
464 /*
465 * Helper function to calculate the array index for a given expiry
466 * time.
467 */
468 static inline unsigned calc_index(unsigned expires, unsigned lvl)
469 {
470 expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
471 return LVL_OFFS(lvl) + (expires & LVL_MASK);
472 }
473
474 static void
475 __internal_add_timer(struct timer_base *base, struct timer_list *timer)
476 {
477 unsigned long expires = timer->expires;
478 unsigned long delta = expires - base->clk;
479 struct hlist_head *vec;
480 unsigned int idx;
481
482 if (delta < LVL_START(1)) {
483 idx = calc_index(expires, 0);
484 } else if (delta < LVL_START(2)) {
485 idx = calc_index(expires, 1);
486 } else if (delta < LVL_START(3)) {
487 idx = calc_index(expires, 2);
488 } else if (delta < LVL_START(4)) {
489 idx = calc_index(expires, 3);
490 } else if (delta < LVL_START(5)) {
491 idx = calc_index(expires, 4);
492 } else if (delta < LVL_START(6)) {
493 idx = calc_index(expires, 5);
494 } else if (delta < LVL_START(7)) {
495 idx = calc_index(expires, 6);
496 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
497 idx = calc_index(expires, 7);
498 } else if ((long) delta < 0) {
499 idx = base->clk & LVL_MASK;
500 } else {
501 /*
502 * Force expire obscene large timeouts to expire at the
503 * capacity limit of the wheel.
504 */
505 if (expires >= WHEEL_TIMEOUT_CUTOFF)
506 expires = WHEEL_TIMEOUT_MAX;
507
508 idx = calc_index(expires, LVL_DEPTH - 1);
509 }
510 /*
511 * Enqueue the timer into the array bucket, mark it pending in
512 * the bitmap and store the index in the timer flags.
513 */
514 vec = base->vectors + idx;
515 hlist_add_head(&timer->entry, vec);
516 __set_bit(idx, base->pending_map);
517 timer_set_idx(timer, idx);
518 }
519
520 static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
521 {
522 __internal_add_timer(base, timer);
523
524 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
525 return;
526
527 /*
528 * TODO: This wants some optimizing similar to the code below, but we
529 * will do that when we switch from push to pull for deferrable timers.
530 */
531 if (timer->flags & TIMER_DEFERRABLE) {
532 if (tick_nohz_full_cpu(base->cpu))
533 wake_up_nohz_cpu(base->cpu);
534 return;
535 }
536
537 /*
538 * We might have to IPI the remote CPU if the base is idle and the
539 * timer is not deferrable. If the other CPU is on the way to idle
540 * then it can't set base->is_idle as we hold the base lock:
541 */
542 if (!base->is_idle)
543 return;
544
545 /* Check whether this is the new first expiring timer: */
546 if (time_after_eq(timer->expires, base->next_expiry))
547 return;
548
549 /*
550 * Set the next expiry time and kick the CPU so it can reevaluate the
551 * wheel:
552 */
553 base->next_expiry = timer->expires;
554 wake_up_nohz_cpu(base->cpu);
555 }
556
557 #ifdef CONFIG_TIMER_STATS
558 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
559 {
560 if (timer->start_site)
561 return;
562
563 timer->start_site = addr;
564 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
565 timer->start_pid = current->pid;
566 }
567
568 static void timer_stats_account_timer(struct timer_list *timer)
569 {
570 void *site;
571
572 /*
573 * start_site can be concurrently reset by
574 * timer_stats_timer_clear_start_info()
575 */
576 site = READ_ONCE(timer->start_site);
577 if (likely(!site))
578 return;
579
580 timer_stats_update_stats(timer, timer->start_pid, site,
581 timer->function, timer->start_comm,
582 timer->flags);
583 }
584
585 #else
586 static void timer_stats_account_timer(struct timer_list *timer) {}
587 #endif
588
589 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
590
591 static struct debug_obj_descr timer_debug_descr;
592
593 static void *timer_debug_hint(void *addr)
594 {
595 return ((struct timer_list *) addr)->function;
596 }
597
598 static bool timer_is_static_object(void *addr)
599 {
600 struct timer_list *timer = addr;
601
602 return (timer->entry.pprev == NULL &&
603 timer->entry.next == TIMER_ENTRY_STATIC);
604 }
605
606 /*
607 * fixup_init is called when:
608 * - an active object is initialized
609 */
610 static bool timer_fixup_init(void *addr, enum debug_obj_state state)
611 {
612 struct timer_list *timer = addr;
613
614 switch (state) {
615 case ODEBUG_STATE_ACTIVE:
616 del_timer_sync(timer);
617 debug_object_init(timer, &timer_debug_descr);
618 return true;
619 default:
620 return false;
621 }
622 }
623
624 /* Stub timer callback for improperly used timers. */
625 static void stub_timer(unsigned long data)
626 {
627 WARN_ON(1);
628 }
629
630 /*
631 * fixup_activate is called when:
632 * - an active object is activated
633 * - an unknown non-static object is activated
634 */
635 static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
636 {
637 struct timer_list *timer = addr;
638
639 switch (state) {
640 case ODEBUG_STATE_NOTAVAILABLE:
641 setup_timer(timer, stub_timer, 0);
642 return true;
643
644 case ODEBUG_STATE_ACTIVE:
645 WARN_ON(1);
646
647 default:
648 return false;
649 }
650 }
651
652 /*
653 * fixup_free is called when:
654 * - an active object is freed
655 */
656 static bool timer_fixup_free(void *addr, enum debug_obj_state state)
657 {
658 struct timer_list *timer = addr;
659
660 switch (state) {
661 case ODEBUG_STATE_ACTIVE:
662 del_timer_sync(timer);
663 debug_object_free(timer, &timer_debug_descr);
664 return true;
665 default:
666 return false;
667 }
668 }
669
670 /*
671 * fixup_assert_init is called when:
672 * - an untracked/uninit-ed object is found
673 */
674 static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
675 {
676 struct timer_list *timer = addr;
677
678 switch (state) {
679 case ODEBUG_STATE_NOTAVAILABLE:
680 setup_timer(timer, stub_timer, 0);
681 return true;
682 default:
683 return false;
684 }
685 }
686
687 static struct debug_obj_descr timer_debug_descr = {
688 .name = "timer_list",
689 .debug_hint = timer_debug_hint,
690 .is_static_object = timer_is_static_object,
691 .fixup_init = timer_fixup_init,
692 .fixup_activate = timer_fixup_activate,
693 .fixup_free = timer_fixup_free,
694 .fixup_assert_init = timer_fixup_assert_init,
695 };
696
697 static inline void debug_timer_init(struct timer_list *timer)
698 {
699 debug_object_init(timer, &timer_debug_descr);
700 }
701
702 static inline void debug_timer_activate(struct timer_list *timer)
703 {
704 debug_object_activate(timer, &timer_debug_descr);
705 }
706
707 static inline void debug_timer_deactivate(struct timer_list *timer)
708 {
709 debug_object_deactivate(timer, &timer_debug_descr);
710 }
711
712 static inline void debug_timer_free(struct timer_list *timer)
713 {
714 debug_object_free(timer, &timer_debug_descr);
715 }
716
717 static inline void debug_timer_assert_init(struct timer_list *timer)
718 {
719 debug_object_assert_init(timer, &timer_debug_descr);
720 }
721
722 static void do_init_timer(struct timer_list *timer, unsigned int flags,
723 const char *name, struct lock_class_key *key);
724
725 void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
726 const char *name, struct lock_class_key *key)
727 {
728 debug_object_init_on_stack(timer, &timer_debug_descr);
729 do_init_timer(timer, flags, name, key);
730 }
731 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
732
733 void destroy_timer_on_stack(struct timer_list *timer)
734 {
735 debug_object_free(timer, &timer_debug_descr);
736 }
737 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
738
739 #else
740 static inline void debug_timer_init(struct timer_list *timer) { }
741 static inline void debug_timer_activate(struct timer_list *timer) { }
742 static inline void debug_timer_deactivate(struct timer_list *timer) { }
743 static inline void debug_timer_assert_init(struct timer_list *timer) { }
744 #endif
745
746 static inline void debug_init(struct timer_list *timer)
747 {
748 debug_timer_init(timer);
749 trace_timer_init(timer);
750 }
751
752 static inline void
753 debug_activate(struct timer_list *timer, unsigned long expires)
754 {
755 debug_timer_activate(timer);
756 trace_timer_start(timer, expires, timer->flags);
757 }
758
759 static inline void debug_deactivate(struct timer_list *timer)
760 {
761 debug_timer_deactivate(timer);
762 trace_timer_cancel(timer);
763 }
764
765 static inline void debug_assert_init(struct timer_list *timer)
766 {
767 debug_timer_assert_init(timer);
768 }
769
770 static void do_init_timer(struct timer_list *timer, unsigned int flags,
771 const char *name, struct lock_class_key *key)
772 {
773 timer->entry.pprev = NULL;
774 timer->flags = flags | raw_smp_processor_id();
775 #ifdef CONFIG_TIMER_STATS
776 timer->start_site = NULL;
777 timer->start_pid = -1;
778 memset(timer->start_comm, 0, TASK_COMM_LEN);
779 #endif
780 lockdep_init_map(&timer->lockdep_map, name, key, 0);
781 }
782
783 /**
784 * init_timer_key - initialize a timer
785 * @timer: the timer to be initialized
786 * @flags: timer flags
787 * @name: name of the timer
788 * @key: lockdep class key of the fake lock used for tracking timer
789 * sync lock dependencies
790 *
791 * init_timer_key() must be done to a timer prior calling *any* of the
792 * other timer functions.
793 */
794 void init_timer_key(struct timer_list *timer, unsigned int flags,
795 const char *name, struct lock_class_key *key)
796 {
797 debug_init(timer);
798 do_init_timer(timer, flags, name, key);
799 }
800 EXPORT_SYMBOL(init_timer_key);
801
802 static inline void detach_timer(struct timer_list *timer, bool clear_pending)
803 {
804 struct hlist_node *entry = &timer->entry;
805
806 debug_deactivate(timer);
807
808 __hlist_del(entry);
809 if (clear_pending)
810 entry->pprev = NULL;
811 entry->next = LIST_POISON2;
812 }
813
814 static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
815 bool clear_pending)
816 {
817 unsigned idx = timer_get_idx(timer);
818
819 if (!timer_pending(timer))
820 return 0;
821
822 if (hlist_is_singular_node(&timer->entry, base->vectors + idx))
823 __clear_bit(idx, base->pending_map);
824
825 detach_timer(timer, clear_pending);
826 return 1;
827 }
828
829 static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
830 {
831 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
832
833 /*
834 * If the timer is deferrable and nohz is active then we need to use
835 * the deferrable base.
836 */
837 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
838 (tflags & TIMER_DEFERRABLE))
839 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
840 return base;
841 }
842
843 static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
844 {
845 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
846
847 /*
848 * If the timer is deferrable and nohz is active then we need to use
849 * the deferrable base.
850 */
851 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
852 (tflags & TIMER_DEFERRABLE))
853 base = this_cpu_ptr(&timer_bases[BASE_DEF]);
854 return base;
855 }
856
857 static inline struct timer_base *get_timer_base(u32 tflags)
858 {
859 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
860 }
861
862 #ifdef CONFIG_NO_HZ_COMMON
863 static inline struct timer_base *
864 __get_target_base(struct timer_base *base, unsigned tflags)
865 {
866 #ifdef CONFIG_SMP
867 if ((tflags & TIMER_PINNED) || !base->migration_enabled)
868 return get_timer_this_cpu_base(tflags);
869 return get_timer_cpu_base(tflags, get_nohz_timer_target());
870 #else
871 return get_timer_this_cpu_base(tflags);
872 #endif
873 }
874
875 static inline void forward_timer_base(struct timer_base *base)
876 {
877 /*
878 * We only forward the base when it's idle and we have a delta between
879 * base clock and jiffies.
880 */
881 if (!base->is_idle || (long) (jiffies - base->clk) < 2)
882 return;
883
884 /*
885 * If the next expiry value is > jiffies, then we fast forward to
886 * jiffies otherwise we forward to the next expiry value.
887 */
888 if (time_after(base->next_expiry, jiffies))
889 base->clk = jiffies;
890 else
891 base->clk = base->next_expiry;
892 }
893 #else
894 static inline struct timer_base *
895 __get_target_base(struct timer_base *base, unsigned tflags)
896 {
897 return get_timer_this_cpu_base(tflags);
898 }
899
900 static inline void forward_timer_base(struct timer_base *base) { }
901 #endif
902
903 static inline struct timer_base *
904 get_target_base(struct timer_base *base, unsigned tflags)
905 {
906 struct timer_base *target = __get_target_base(base, tflags);
907
908 forward_timer_base(target);
909 return target;
910 }
911
912 /*
913 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
914 * that all timers which are tied to this base are locked, and the base itself
915 * is locked too.
916 *
917 * So __run_timers/migrate_timers can safely modify all timers which could
918 * be found in the base->vectors array.
919 *
920 * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
921 * to wait until the migration is done.
922 */
923 static struct timer_base *lock_timer_base(struct timer_list *timer,
924 unsigned long *flags)
925 __acquires(timer->base->lock)
926 {
927 for (;;) {
928 struct timer_base *base;
929 u32 tf = timer->flags;
930
931 if (!(tf & TIMER_MIGRATING)) {
932 base = get_timer_base(tf);
933 spin_lock_irqsave(&base->lock, *flags);
934 if (timer->flags == tf)
935 return base;
936 spin_unlock_irqrestore(&base->lock, *flags);
937 }
938 cpu_relax();
939 }
940 }
941
942 static inline int
943 __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
944 {
945 struct timer_base *base, *new_base;
946 unsigned long flags;
947 int ret = 0;
948
949 /*
950 * TODO: Calculate the array bucket of the timer right here w/o
951 * holding the base lock. This allows to check not only
952 * timer->expires == expires below, but also whether the timer
953 * ends up in the same bucket. If we really need to requeue
954 * the timer then we check whether base->clk have
955 * advanced between here and locking the timer base. If
956 * jiffies advanced we have to recalc the array bucket with the
957 * lock held.
958 */
959
960 /*
961 * This is a common optimization triggered by the
962 * networking code - if the timer is re-modified
963 * to be the same thing then just return:
964 */
965 if (timer_pending(timer)) {
966 if (timer->expires == expires)
967 return 1;
968 }
969
970 timer_stats_timer_set_start_info(timer);
971 BUG_ON(!timer->function);
972
973 base = lock_timer_base(timer, &flags);
974
975 ret = detach_if_pending(timer, base, false);
976 if (!ret && pending_only)
977 goto out_unlock;
978
979 debug_activate(timer, expires);
980
981 new_base = get_target_base(base, timer->flags);
982
983 if (base != new_base) {
984 /*
985 * We are trying to schedule the timer on the new base.
986 * However we can't change timer's base while it is running,
987 * otherwise del_timer_sync() can't detect that the timer's
988 * handler yet has not finished. This also guarantees that the
989 * timer is serialized wrt itself.
990 */
991 if (likely(base->running_timer != timer)) {
992 /* See the comment in lock_timer_base() */
993 timer->flags |= TIMER_MIGRATING;
994
995 spin_unlock(&base->lock);
996 base = new_base;
997 spin_lock(&base->lock);
998 WRITE_ONCE(timer->flags,
999 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
1000 }
1001 }
1002
1003 timer->expires = expires;
1004 internal_add_timer(base, timer);
1005
1006 out_unlock:
1007 spin_unlock_irqrestore(&base->lock, flags);
1008
1009 return ret;
1010 }
1011
1012 /**
1013 * mod_timer_pending - modify a pending timer's timeout
1014 * @timer: the pending timer to be modified
1015 * @expires: new timeout in jiffies
1016 *
1017 * mod_timer_pending() is the same for pending timers as mod_timer(),
1018 * but will not re-activate and modify already deleted timers.
1019 *
1020 * It is useful for unserialized use of timers.
1021 */
1022 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1023 {
1024 return __mod_timer(timer, expires, true);
1025 }
1026 EXPORT_SYMBOL(mod_timer_pending);
1027
1028 /**
1029 * mod_timer - modify a timer's timeout
1030 * @timer: the timer to be modified
1031 * @expires: new timeout in jiffies
1032 *
1033 * mod_timer() is a more efficient way to update the expire field of an
1034 * active timer (if the timer is inactive it will be activated)
1035 *
1036 * mod_timer(timer, expires) is equivalent to:
1037 *
1038 * del_timer(timer); timer->expires = expires; add_timer(timer);
1039 *
1040 * Note that if there are multiple unserialized concurrent users of the
1041 * same timer, then mod_timer() is the only safe way to modify the timeout,
1042 * since add_timer() cannot modify an already running timer.
1043 *
1044 * The function returns whether it has modified a pending timer or not.
1045 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
1046 * active timer returns 1.)
1047 */
1048 int mod_timer(struct timer_list *timer, unsigned long expires)
1049 {
1050 return __mod_timer(timer, expires, false);
1051 }
1052 EXPORT_SYMBOL(mod_timer);
1053
1054 /**
1055 * add_timer - start a timer
1056 * @timer: the timer to be added
1057 *
1058 * The kernel will do a ->function(->data) callback from the
1059 * timer interrupt at the ->expires point in the future. The
1060 * current time is 'jiffies'.
1061 *
1062 * The timer's ->expires, ->function (and if the handler uses it, ->data)
1063 * fields must be set prior calling this function.
1064 *
1065 * Timers with an ->expires field in the past will be executed in the next
1066 * timer tick.
1067 */
1068 void add_timer(struct timer_list *timer)
1069 {
1070 BUG_ON(timer_pending(timer));
1071 mod_timer(timer, timer->expires);
1072 }
1073 EXPORT_SYMBOL(add_timer);
1074
1075 /**
1076 * add_timer_on - start a timer on a particular CPU
1077 * @timer: the timer to be added
1078 * @cpu: the CPU to start it on
1079 *
1080 * This is not very scalable on SMP. Double adds are not possible.
1081 */
1082 void add_timer_on(struct timer_list *timer, int cpu)
1083 {
1084 struct timer_base *new_base, *base;
1085 unsigned long flags;
1086
1087 timer_stats_timer_set_start_info(timer);
1088 BUG_ON(timer_pending(timer) || !timer->function);
1089
1090 new_base = get_timer_cpu_base(timer->flags, cpu);
1091
1092 /*
1093 * If @timer was on a different CPU, it should be migrated with the
1094 * old base locked to prevent other operations proceeding with the
1095 * wrong base locked. See lock_timer_base().
1096 */
1097 base = lock_timer_base(timer, &flags);
1098 if (base != new_base) {
1099 timer->flags |= TIMER_MIGRATING;
1100
1101 spin_unlock(&base->lock);
1102 base = new_base;
1103 spin_lock(&base->lock);
1104 WRITE_ONCE(timer->flags,
1105 (timer->flags & ~TIMER_BASEMASK) | cpu);
1106 }
1107
1108 debug_activate(timer, timer->expires);
1109 internal_add_timer(base, timer);
1110 spin_unlock_irqrestore(&base->lock, flags);
1111 }
1112 EXPORT_SYMBOL_GPL(add_timer_on);
1113
1114 /**
1115 * del_timer - deactive a timer.
1116 * @timer: the timer to be deactivated
1117 *
1118 * del_timer() deactivates a timer - this works on both active and inactive
1119 * timers.
1120 *
1121 * The function returns whether it has deactivated a pending timer or not.
1122 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1123 * active timer returns 1.)
1124 */
1125 int del_timer(struct timer_list *timer)
1126 {
1127 struct timer_base *base;
1128 unsigned long flags;
1129 int ret = 0;
1130
1131 debug_assert_init(timer);
1132
1133 timer_stats_timer_clear_start_info(timer);
1134 if (timer_pending(timer)) {
1135 base = lock_timer_base(timer, &flags);
1136 ret = detach_if_pending(timer, base, true);
1137 spin_unlock_irqrestore(&base->lock, flags);
1138 }
1139
1140 return ret;
1141 }
1142 EXPORT_SYMBOL(del_timer);
1143
1144 /**
1145 * try_to_del_timer_sync - Try to deactivate a timer
1146 * @timer: timer do del
1147 *
1148 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1149 * exit the timer is not queued and the handler is not running on any CPU.
1150 */
1151 int try_to_del_timer_sync(struct timer_list *timer)
1152 {
1153 struct timer_base *base;
1154 unsigned long flags;
1155 int ret = -1;
1156
1157 debug_assert_init(timer);
1158
1159 base = lock_timer_base(timer, &flags);
1160
1161 if (base->running_timer != timer) {
1162 timer_stats_timer_clear_start_info(timer);
1163 ret = detach_if_pending(timer, base, true);
1164 }
1165 spin_unlock_irqrestore(&base->lock, flags);
1166
1167 return ret;
1168 }
1169 EXPORT_SYMBOL(try_to_del_timer_sync);
1170
1171 #ifdef CONFIG_SMP
1172 /**
1173 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1174 * @timer: the timer to be deactivated
1175 *
1176 * This function only differs from del_timer() on SMP: besides deactivating
1177 * the timer it also makes sure the handler has finished executing on other
1178 * CPUs.
1179 *
1180 * Synchronization rules: Callers must prevent restarting of the timer,
1181 * otherwise this function is meaningless. It must not be called from
1182 * interrupt contexts unless the timer is an irqsafe one. The caller must
1183 * not hold locks which would prevent completion of the timer's
1184 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1185 * timer is not queued and the handler is not running on any CPU.
1186 *
1187 * Note: For !irqsafe timers, you must not hold locks that are held in
1188 * interrupt context while calling this function. Even if the lock has
1189 * nothing to do with the timer in question. Here's why:
1190 *
1191 * CPU0 CPU1
1192 * ---- ----
1193 * <SOFTIRQ>
1194 * call_timer_fn();
1195 * base->running_timer = mytimer;
1196 * spin_lock_irq(somelock);
1197 * <IRQ>
1198 * spin_lock(somelock);
1199 * del_timer_sync(mytimer);
1200 * while (base->running_timer == mytimer);
1201 *
1202 * Now del_timer_sync() will never return and never release somelock.
1203 * The interrupt on the other CPU is waiting to grab somelock but
1204 * it has interrupted the softirq that CPU0 is waiting to finish.
1205 *
1206 * The function returns whether it has deactivated a pending timer or not.
1207 */
1208 int del_timer_sync(struct timer_list *timer)
1209 {
1210 #ifdef CONFIG_LOCKDEP
1211 unsigned long flags;
1212
1213 /*
1214 * If lockdep gives a backtrace here, please reference
1215 * the synchronization rules above.
1216 */
1217 local_irq_save(flags);
1218 lock_map_acquire(&timer->lockdep_map);
1219 lock_map_release(&timer->lockdep_map);
1220 local_irq_restore(flags);
1221 #endif
1222 /*
1223 * don't use it in hardirq context, because it
1224 * could lead to deadlock.
1225 */
1226 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
1227 for (;;) {
1228 int ret = try_to_del_timer_sync(timer);
1229 if (ret >= 0)
1230 return ret;
1231 cpu_relax();
1232 }
1233 }
1234 EXPORT_SYMBOL(del_timer_sync);
1235 #endif
1236
1237 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1238 unsigned long data)
1239 {
1240 int count = preempt_count();
1241
1242 #ifdef CONFIG_LOCKDEP
1243 /*
1244 * It is permissible to free the timer from inside the
1245 * function that is called from it, this we need to take into
1246 * account for lockdep too. To avoid bogus "held lock freed"
1247 * warnings as well as problems when looking into
1248 * timer->lockdep_map, make a copy and use that here.
1249 */
1250 struct lockdep_map lockdep_map;
1251
1252 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1253 #endif
1254 /*
1255 * Couple the lock chain with the lock chain at
1256 * del_timer_sync() by acquiring the lock_map around the fn()
1257 * call here and in del_timer_sync().
1258 */
1259 lock_map_acquire(&lockdep_map);
1260
1261 trace_timer_expire_entry(timer);
1262 fn(data);
1263 trace_timer_expire_exit(timer);
1264
1265 lock_map_release(&lockdep_map);
1266
1267 if (count != preempt_count()) {
1268 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1269 fn, count, preempt_count());
1270 /*
1271 * Restore the preempt count. That gives us a decent
1272 * chance to survive and extract information. If the
1273 * callback kept a lock held, bad luck, but not worse
1274 * than the BUG() we had.
1275 */
1276 preempt_count_set(count);
1277 }
1278 }
1279
1280 static void expire_timers(struct timer_base *base, struct hlist_head *head)
1281 {
1282 while (!hlist_empty(head)) {
1283 struct timer_list *timer;
1284 void (*fn)(unsigned long);
1285 unsigned long data;
1286
1287 timer = hlist_entry(head->first, struct timer_list, entry);
1288 timer_stats_account_timer(timer);
1289
1290 base->running_timer = timer;
1291 detach_timer(timer, true);
1292
1293 fn = timer->function;
1294 data = timer->data;
1295
1296 if (timer->flags & TIMER_IRQSAFE) {
1297 spin_unlock(&base->lock);
1298 call_timer_fn(timer, fn, data);
1299 spin_lock(&base->lock);
1300 } else {
1301 spin_unlock_irq(&base->lock);
1302 call_timer_fn(timer, fn, data);
1303 spin_lock_irq(&base->lock);
1304 }
1305 }
1306 }
1307
1308 static int __collect_expired_timers(struct timer_base *base,
1309 struct hlist_head *heads)
1310 {
1311 unsigned long clk = base->clk;
1312 struct hlist_head *vec;
1313 int i, levels = 0;
1314 unsigned int idx;
1315
1316 for (i = 0; i < LVL_DEPTH; i++) {
1317 idx = (clk & LVL_MASK) + i * LVL_SIZE;
1318
1319 if (__test_and_clear_bit(idx, base->pending_map)) {
1320 vec = base->vectors + idx;
1321 hlist_move_list(vec, heads++);
1322 levels++;
1323 }
1324 /* Is it time to look at the next level? */
1325 if (clk & LVL_CLK_MASK)
1326 break;
1327 /* Shift clock for the next level granularity */
1328 clk >>= LVL_CLK_SHIFT;
1329 }
1330 return levels;
1331 }
1332
1333 #ifdef CONFIG_NO_HZ_COMMON
1334 /*
1335 * Find the next pending bucket of a level. Search from level start (@offset)
1336 * + @clk upwards and if nothing there, search from start of the level
1337 * (@offset) up to @offset + clk.
1338 */
1339 static int next_pending_bucket(struct timer_base *base, unsigned offset,
1340 unsigned clk)
1341 {
1342 unsigned pos, start = offset + clk;
1343 unsigned end = offset + LVL_SIZE;
1344
1345 pos = find_next_bit(base->pending_map, end, start);
1346 if (pos < end)
1347 return pos - start;
1348
1349 pos = find_next_bit(base->pending_map, start, offset);
1350 return pos < start ? pos + LVL_SIZE - start : -1;
1351 }
1352
1353 /*
1354 * Search the first expiring timer in the various clock levels. Caller must
1355 * hold base->lock.
1356 */
1357 static unsigned long __next_timer_interrupt(struct timer_base *base)
1358 {
1359 unsigned long clk, next, adj;
1360 unsigned lvl, offset = 0;
1361
1362 next = base->clk + NEXT_TIMER_MAX_DELTA;
1363 clk = base->clk;
1364 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
1365 int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
1366
1367 if (pos >= 0) {
1368 unsigned long tmp = clk + (unsigned long) pos;
1369
1370 tmp <<= LVL_SHIFT(lvl);
1371 if (time_before(tmp, next))
1372 next = tmp;
1373 }
1374 /*
1375 * Clock for the next level. If the current level clock lower
1376 * bits are zero, we look at the next level as is. If not we
1377 * need to advance it by one because that's going to be the
1378 * next expiring bucket in that level. base->clk is the next
1379 * expiring jiffie. So in case of:
1380 *
1381 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1382 * 0 0 0 0 0 0
1383 *
1384 * we have to look at all levels @index 0. With
1385 *
1386 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1387 * 0 0 0 0 0 2
1388 *
1389 * LVL0 has the next expiring bucket @index 2. The upper
1390 * levels have the next expiring bucket @index 1.
1391 *
1392 * In case that the propagation wraps the next level the same
1393 * rules apply:
1394 *
1395 * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
1396 * 0 0 0 0 F 2
1397 *
1398 * So after looking at LVL0 we get:
1399 *
1400 * LVL5 LVL4 LVL3 LVL2 LVL1
1401 * 0 0 0 1 0
1402 *
1403 * So no propagation from LVL1 to LVL2 because that happened
1404 * with the add already, but then we need to propagate further
1405 * from LVL2 to LVL3.
1406 *
1407 * So the simple check whether the lower bits of the current
1408 * level are 0 or not is sufficient for all cases.
1409 */
1410 adj = clk & LVL_CLK_MASK ? 1 : 0;
1411 clk >>= LVL_CLK_SHIFT;
1412 clk += adj;
1413 }
1414 return next;
1415 }
1416
1417 /*
1418 * Check, if the next hrtimer event is before the next timer wheel
1419 * event:
1420 */
1421 static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1422 {
1423 u64 nextevt = hrtimer_get_next_event();
1424
1425 /*
1426 * If high resolution timers are enabled
1427 * hrtimer_get_next_event() returns KTIME_MAX.
1428 */
1429 if (expires <= nextevt)
1430 return expires;
1431
1432 /*
1433 * If the next timer is already expired, return the tick base
1434 * time so the tick is fired immediately.
1435 */
1436 if (nextevt <= basem)
1437 return basem;
1438
1439 /*
1440 * Round up to the next jiffie. High resolution timers are
1441 * off, so the hrtimers are expired in the tick and we need to
1442 * make sure that this tick really expires the timer to avoid
1443 * a ping pong of the nohz stop code.
1444 *
1445 * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
1446 */
1447 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1448 }
1449
1450 /**
1451 * get_next_timer_interrupt - return the time (clock mono) of the next timer
1452 * @basej: base time jiffies
1453 * @basem: base time clock monotonic
1454 *
1455 * Returns the tick aligned clock monotonic time of the next pending
1456 * timer or KTIME_MAX if no timer is pending.
1457 */
1458 u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1459 {
1460 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1461 u64 expires = KTIME_MAX;
1462 unsigned long nextevt;
1463
1464 /*
1465 * Pretend that there is no timer pending if the cpu is offline.
1466 * Possible pending timers will be migrated later to an active cpu.
1467 */
1468 if (cpu_is_offline(smp_processor_id()))
1469 return expires;
1470
1471 spin_lock(&base->lock);
1472 nextevt = __next_timer_interrupt(base);
1473 base->next_expiry = nextevt;
1474 /*
1475 * We have a fresh next event. Check whether we can forward the base:
1476 */
1477 if (time_after(nextevt, jiffies))
1478 base->clk = jiffies;
1479 else if (time_after(nextevt, base->clk))
1480 base->clk = nextevt;
1481
1482 if (time_before_eq(nextevt, basej)) {
1483 expires = basem;
1484 base->is_idle = false;
1485 } else {
1486 expires = basem + (nextevt - basej) * TICK_NSEC;
1487 /*
1488 * If we expect to sleep more than a tick, mark the base idle:
1489 */
1490 if ((expires - basem) > TICK_NSEC)
1491 base->is_idle = true;
1492 }
1493 spin_unlock(&base->lock);
1494
1495 return cmp_next_hrtimer_event(basem, expires);
1496 }
1497
1498 /**
1499 * timer_clear_idle - Clear the idle state of the timer base
1500 *
1501 * Called with interrupts disabled
1502 */
1503 void timer_clear_idle(void)
1504 {
1505 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1506
1507 /*
1508 * We do this unlocked. The worst outcome is a remote enqueue sending
1509 * a pointless IPI, but taking the lock would just make the window for
1510 * sending the IPI a few instructions smaller for the cost of taking
1511 * the lock in the exit from idle path.
1512 */
1513 base->is_idle = false;
1514 }
1515
1516 static int collect_expired_timers(struct timer_base *base,
1517 struct hlist_head *heads)
1518 {
1519 /*
1520 * NOHZ optimization. After a long idle sleep we need to forward the
1521 * base to current jiffies. Avoid a loop by searching the bitfield for
1522 * the next expiring timer.
1523 */
1524 if ((long)(jiffies - base->clk) > 2) {
1525 unsigned long next = __next_timer_interrupt(base);
1526
1527 /*
1528 * If the next timer is ahead of time forward to current
1529 * jiffies, otherwise forward to the next expiry time:
1530 */
1531 if (time_after(next, jiffies)) {
1532 /* The call site will increment clock! */
1533 base->clk = jiffies - 1;
1534 return 0;
1535 }
1536 base->clk = next;
1537 }
1538 return __collect_expired_timers(base, heads);
1539 }
1540 #else
1541 static inline int collect_expired_timers(struct timer_base *base,
1542 struct hlist_head *heads)
1543 {
1544 return __collect_expired_timers(base, heads);
1545 }
1546 #endif
1547
1548 /*
1549 * Called from the timer interrupt handler to charge one tick to the current
1550 * process. user_tick is 1 if the tick is user time, 0 for system.
1551 */
1552 void update_process_times(int user_tick)
1553 {
1554 struct task_struct *p = current;
1555
1556 /* Note: this timer irq context must be accounted for as well. */
1557 account_process_tick(p, user_tick);
1558 run_local_timers();
1559 rcu_check_callbacks(user_tick);
1560 #ifdef CONFIG_IRQ_WORK
1561 if (in_irq())
1562 irq_work_tick();
1563 #endif
1564 scheduler_tick();
1565 run_posix_cpu_timers(p);
1566 }
1567
1568 /**
1569 * __run_timers - run all expired timers (if any) on this CPU.
1570 * @base: the timer vector to be processed.
1571 */
1572 static inline void __run_timers(struct timer_base *base)
1573 {
1574 struct hlist_head heads[LVL_DEPTH];
1575 int levels;
1576
1577 if (!time_after_eq(jiffies, base->clk))
1578 return;
1579
1580 spin_lock_irq(&base->lock);
1581
1582 while (time_after_eq(jiffies, base->clk)) {
1583
1584 levels = collect_expired_timers(base, heads);
1585 base->clk++;
1586
1587 while (levels--)
1588 expire_timers(base, heads + levels);
1589 }
1590 base->running_timer = NULL;
1591 spin_unlock_irq(&base->lock);
1592 }
1593
1594 /*
1595 * This function runs timers and the timer-tq in bottom half context.
1596 */
1597 static void run_timer_softirq(struct softirq_action *h)
1598 {
1599 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1600
1601 __run_timers(base);
1602 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
1603 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
1604 }
1605
1606 /*
1607 * Called by the local, per-CPU timer interrupt on SMP.
1608 */
1609 void run_local_timers(void)
1610 {
1611 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1612
1613 hrtimer_run_queues();
1614 /* Raise the softirq only if required. */
1615 if (time_before(jiffies, base->clk)) {
1616 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
1617 return;
1618 /* CPU is awake, so check the deferrable base. */
1619 base++;
1620 if (time_before(jiffies, base->clk))
1621 return;
1622 }
1623 raise_softirq(TIMER_SOFTIRQ);
1624 }
1625
1626 #ifdef __ARCH_WANT_SYS_ALARM
1627
1628 /*
1629 * For backwards compatibility? This can be done in libc so Alpha
1630 * and all newer ports shouldn't need it.
1631 */
1632 SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1633 {
1634 return alarm_setitimer(seconds);
1635 }
1636
1637 #endif
1638
1639 static void process_timeout(unsigned long __data)
1640 {
1641 wake_up_process((struct task_struct *)__data);
1642 }
1643
1644 /**
1645 * schedule_timeout - sleep until timeout
1646 * @timeout: timeout value in jiffies
1647 *
1648 * Make the current task sleep until @timeout jiffies have
1649 * elapsed. The routine will return immediately unless
1650 * the current task state has been set (see set_current_state()).
1651 *
1652 * You can set the task state as follows -
1653 *
1654 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1655 * pass before the routine returns. The routine will return 0
1656 *
1657 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1658 * delivered to the current task. In this case the remaining time
1659 * in jiffies will be returned, or 0 if the timer expired in time
1660 *
1661 * The current task state is guaranteed to be TASK_RUNNING when this
1662 * routine returns.
1663 *
1664 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1665 * the CPU away without a bound on the timeout. In this case the return
1666 * value will be %MAX_SCHEDULE_TIMEOUT.
1667 *
1668 * In all cases the return value is guaranteed to be non-negative.
1669 */
1670 signed long __sched schedule_timeout(signed long timeout)
1671 {
1672 struct timer_list timer;
1673 unsigned long expire;
1674
1675 switch (timeout)
1676 {
1677 case MAX_SCHEDULE_TIMEOUT:
1678 /*
1679 * These two special cases are useful to be comfortable
1680 * in the caller. Nothing more. We could take
1681 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1682 * but I' d like to return a valid offset (>=0) to allow
1683 * the caller to do everything it want with the retval.
1684 */
1685 schedule();
1686 goto out;
1687 default:
1688 /*
1689 * Another bit of PARANOID. Note that the retval will be
1690 * 0 since no piece of kernel is supposed to do a check
1691 * for a negative retval of schedule_timeout() (since it
1692 * should never happens anyway). You just have the printk()
1693 * that will tell you if something is gone wrong and where.
1694 */
1695 if (timeout < 0) {
1696 printk(KERN_ERR "schedule_timeout: wrong timeout "
1697 "value %lx\n", timeout);
1698 dump_stack();
1699 current->state = TASK_RUNNING;
1700 goto out;
1701 }
1702 }
1703
1704 expire = timeout + jiffies;
1705
1706 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1707 __mod_timer(&timer, expire, false);
1708 schedule();
1709 del_singleshot_timer_sync(&timer);
1710
1711 /* Remove the timer from the object tracker */
1712 destroy_timer_on_stack(&timer);
1713
1714 timeout = expire - jiffies;
1715
1716 out:
1717 return timeout < 0 ? 0 : timeout;
1718 }
1719 EXPORT_SYMBOL(schedule_timeout);
1720
1721 /*
1722 * We can use __set_current_state() here because schedule_timeout() calls
1723 * schedule() unconditionally.
1724 */
1725 signed long __sched schedule_timeout_interruptible(signed long timeout)
1726 {
1727 __set_current_state(TASK_INTERRUPTIBLE);
1728 return schedule_timeout(timeout);
1729 }
1730 EXPORT_SYMBOL(schedule_timeout_interruptible);
1731
1732 signed long __sched schedule_timeout_killable(signed long timeout)
1733 {
1734 __set_current_state(TASK_KILLABLE);
1735 return schedule_timeout(timeout);
1736 }
1737 EXPORT_SYMBOL(schedule_timeout_killable);
1738
1739 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1740 {
1741 __set_current_state(TASK_UNINTERRUPTIBLE);
1742 return schedule_timeout(timeout);
1743 }
1744 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1745
1746 /*
1747 * Like schedule_timeout_uninterruptible(), except this task will not contribute
1748 * to load average.
1749 */
1750 signed long __sched schedule_timeout_idle(signed long timeout)
1751 {
1752 __set_current_state(TASK_IDLE);
1753 return schedule_timeout(timeout);
1754 }
1755 EXPORT_SYMBOL(schedule_timeout_idle);
1756
1757 #ifdef CONFIG_HOTPLUG_CPU
1758 static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
1759 {
1760 struct timer_list *timer;
1761 int cpu = new_base->cpu;
1762
1763 while (!hlist_empty(head)) {
1764 timer = hlist_entry(head->first, struct timer_list, entry);
1765 detach_timer(timer, false);
1766 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
1767 internal_add_timer(new_base, timer);
1768 }
1769 }
1770
1771 static void migrate_timers(int cpu)
1772 {
1773 struct timer_base *old_base;
1774 struct timer_base *new_base;
1775 int b, i;
1776
1777 BUG_ON(cpu_online(cpu));
1778
1779 for (b = 0; b < NR_BASES; b++) {
1780 old_base = per_cpu_ptr(&timer_bases[b], cpu);
1781 new_base = get_cpu_ptr(&timer_bases[b]);
1782 /*
1783 * The caller is globally serialized and nobody else
1784 * takes two locks at once, deadlock is not possible.
1785 */
1786 spin_lock_irq(&new_base->lock);
1787 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1788
1789 BUG_ON(old_base->running_timer);
1790
1791 for (i = 0; i < WHEEL_SIZE; i++)
1792 migrate_timer_list(new_base, old_base->vectors + i);
1793
1794 spin_unlock(&old_base->lock);
1795 spin_unlock_irq(&new_base->lock);
1796 put_cpu_ptr(&timer_bases);
1797 }
1798 }
1799
1800 static int timer_cpu_notify(struct notifier_block *self,
1801 unsigned long action, void *hcpu)
1802 {
1803 switch (action) {
1804 case CPU_DEAD:
1805 case CPU_DEAD_FROZEN:
1806 migrate_timers((long)hcpu);
1807 break;
1808 default:
1809 break;
1810 }
1811
1812 return NOTIFY_OK;
1813 }
1814
1815 static inline void timer_register_cpu_notifier(void)
1816 {
1817 cpu_notifier(timer_cpu_notify, 0);
1818 }
1819 #else
1820 static inline void timer_register_cpu_notifier(void) { }
1821 #endif /* CONFIG_HOTPLUG_CPU */
1822
1823 static void __init init_timer_cpu(int cpu)
1824 {
1825 struct timer_base *base;
1826 int i;
1827
1828 for (i = 0; i < NR_BASES; i++) {
1829 base = per_cpu_ptr(&timer_bases[i], cpu);
1830 base->cpu = cpu;
1831 spin_lock_init(&base->lock);
1832 base->clk = jiffies;
1833 }
1834 }
1835
1836 static void __init init_timer_cpus(void)
1837 {
1838 int cpu;
1839
1840 for_each_possible_cpu(cpu)
1841 init_timer_cpu(cpu);
1842 }
1843
1844 void __init init_timers(void)
1845 {
1846 init_timer_cpus();
1847 init_timer_stats();
1848 timer_register_cpu_notifier();
1849 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1850 }
1851
1852 /**
1853 * msleep - sleep safely even with waitqueue interruptions
1854 * @msecs: Time in milliseconds to sleep for
1855 */
1856 void msleep(unsigned int msecs)
1857 {
1858 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1859
1860 while (timeout)
1861 timeout = schedule_timeout_uninterruptible(timeout);
1862 }
1863
1864 EXPORT_SYMBOL(msleep);
1865
1866 /**
1867 * msleep_interruptible - sleep waiting for signals
1868 * @msecs: Time in milliseconds to sleep for
1869 */
1870 unsigned long msleep_interruptible(unsigned int msecs)
1871 {
1872 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1873
1874 while (timeout && !signal_pending(current))
1875 timeout = schedule_timeout_interruptible(timeout);
1876 return jiffies_to_msecs(timeout);
1877 }
1878
1879 EXPORT_SYMBOL(msleep_interruptible);
1880
1881 static void __sched do_usleep_range(unsigned long min, unsigned long max)
1882 {
1883 ktime_t kmin;
1884 u64 delta;
1885
1886 kmin = ktime_set(0, min * NSEC_PER_USEC);
1887 delta = (u64)(max - min) * NSEC_PER_USEC;
1888 schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1889 }
1890
1891 /**
1892 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1893 * @min: Minimum time in usecs to sleep
1894 * @max: Maximum time in usecs to sleep
1895 */
1896 void __sched usleep_range(unsigned long min, unsigned long max)
1897 {
1898 __set_current_state(TASK_UNINTERRUPTIBLE);
1899 do_usleep_range(min, max);
1900 }
1901 EXPORT_SYMBOL(usleep_range);
This page took 0.128254 seconds and 6 git commands to generate.