Commit | Line | Data |
---|---|---|
8446f1d3 IM |
1 | /* |
2 | * Detect Soft Lockups | |
3 | * | |
6687a97d | 4 | * started by Ingo Molnar, Copyright (C) 2005, 2006 Red Hat, Inc. |
8446f1d3 IM |
5 | * |
6 | * this code detects soft lockups: incidents in where on a CPU | |
7 | * the kernel does not reschedule for 10 seconds or more. | |
8 | */ | |
8446f1d3 IM |
9 | #include <linux/mm.h> |
10 | #include <linux/cpu.h> | |
82a1fcb9 | 11 | #include <linux/nmi.h> |
8446f1d3 IM |
12 | #include <linux/init.h> |
13 | #include <linux/delay.h> | |
83144186 | 14 | #include <linux/freezer.h> |
8446f1d3 IM |
15 | #include <linux/kthread.h> |
16 | #include <linux/notifier.h> | |
17 | #include <linux/module.h> | |
18 | ||
43581a10 IM |
19 | #include <asm/irq_regs.h> |
20 | ||
8446f1d3 IM |
21 | static DEFINE_SPINLOCK(print_lock); |
22 | ||
6687a97d IM |
23 | static DEFINE_PER_CPU(unsigned long, touch_timestamp); |
24 | static DEFINE_PER_CPU(unsigned long, print_timestamp); | |
8446f1d3 IM |
25 | static DEFINE_PER_CPU(struct task_struct *, watchdog_task); |
26 | ||
90739081 | 27 | static int __read_mostly did_panic; |
9383d967 | 28 | int __read_mostly softlockup_thresh = 60; |
6687a97d | 29 | |
9c44bc03 IM |
30 | /* |
31 | * Should we panic (and reboot, if panic_timeout= is set) when a | |
32 | * soft-lockup occurs: | |
33 | */ | |
34 | unsigned int __read_mostly softlockup_panic = | |
35 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; | |
36 | ||
37 | static int __init softlockup_panic_setup(char *str) | |
38 | { | |
39 | softlockup_panic = simple_strtoul(str, NULL, 0); | |
40 | ||
41 | return 1; | |
42 | } | |
43 | __setup("softlockup_panic=", softlockup_panic_setup); | |
44 | ||
6687a97d IM |
45 | static int |
46 | softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) | |
8446f1d3 IM |
47 | { |
48 | did_panic = 1; | |
49 | ||
50 | return NOTIFY_DONE; | |
51 | } | |
52 | ||
53 | static struct notifier_block panic_block = { | |
54 | .notifier_call = softlock_panic, | |
55 | }; | |
56 | ||
966812dc JF |
57 | /* |
58 | * Returns seconds, approximately. We don't need nanosecond | |
59 | * resolution, and we don't need to waste time with a big divide when | |
60 | * 2^30ns == 1.074s. | |
61 | */ | |
a3b13c23 | 62 | static unsigned long get_timestamp(int this_cpu) |
966812dc | 63 | { |
82a1fcb9 | 64 | return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ |
966812dc JF |
65 | } |
66 | ||
8446f1d3 IM |
67 | void touch_softlockup_watchdog(void) |
68 | { | |
a3b13c23 IM |
69 | int this_cpu = raw_smp_processor_id(); |
70 | ||
71 | __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); | |
8446f1d3 IM |
72 | } |
73 | EXPORT_SYMBOL(touch_softlockup_watchdog); | |
74 | ||
04c9167f JF |
75 | void touch_all_softlockup_watchdogs(void) |
76 | { | |
77 | int cpu; | |
78 | ||
79 | /* Cause each CPU to re-update its timestamp rather than complain */ | |
80 | for_each_online_cpu(cpu) | |
81 | per_cpu(touch_timestamp, cpu) = 0; | |
82 | } | |
83 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); | |
84 | ||
8446f1d3 IM |
85 | /* |
86 | * This callback runs from the timer interrupt, and checks | |
87 | * whether the watchdog thread has hung or not: | |
88 | */ | |
6687a97d | 89 | void softlockup_tick(void) |
8446f1d3 IM |
90 | { |
91 | int this_cpu = smp_processor_id(); | |
6687a97d | 92 | unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu); |
966812dc | 93 | unsigned long print_timestamp; |
43581a10 | 94 | struct pt_regs *regs = get_irq_regs(); |
966812dc | 95 | unsigned long now; |
8446f1d3 | 96 | |
9383d967 DS |
97 | /* Is detection switched off? */ |
98 | if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) { | |
99 | /* Be sure we don't false trigger if switched back on */ | |
100 | if (touch_timestamp) | |
101 | per_cpu(touch_timestamp, this_cpu) = 0; | |
102 | return; | |
103 | } | |
104 | ||
04c9167f JF |
105 | if (touch_timestamp == 0) { |
106 | touch_softlockup_watchdog(); | |
966812dc | 107 | return; |
04c9167f | 108 | } |
966812dc JF |
109 | |
110 | print_timestamp = per_cpu(print_timestamp, this_cpu); | |
111 | ||
112 | /* report at most once a second */ | |
a115d5ca IM |
113 | if ((print_timestamp >= touch_timestamp && |
114 | print_timestamp < (touch_timestamp + 1)) || | |
9383d967 | 115 | did_panic) { |
8446f1d3 | 116 | return; |
a115d5ca | 117 | } |
8446f1d3 | 118 | |
6687a97d IM |
119 | /* do not print during early bootup: */ |
120 | if (unlikely(system_state != SYSTEM_RUNNING)) { | |
121 | touch_softlockup_watchdog(); | |
8446f1d3 | 122 | return; |
6687a97d | 123 | } |
8446f1d3 | 124 | |
a3b13c23 | 125 | now = get_timestamp(this_cpu); |
966812dc | 126 | |
ed50d6cb PZ |
127 | /* Wake up the high-prio watchdog task every second: */ |
128 | if (now > (touch_timestamp + 1)) | |
129 | wake_up_process(per_cpu(watchdog_task, this_cpu)); | |
130 | ||
82a1fcb9 | 131 | /* Warn about unreasonable delays: */ |
c4f3b63f | 132 | if (now <= (touch_timestamp + softlockup_thresh)) |
43581a10 | 133 | return; |
8446f1d3 | 134 | |
43581a10 IM |
135 | per_cpu(print_timestamp, this_cpu) = touch_timestamp; |
136 | ||
137 | spin_lock(&print_lock); | |
c4f3b63f RT |
138 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", |
139 | this_cpu, now - touch_timestamp, | |
ba25f9dc | 140 | current->comm, task_pid_nr(current)); |
43581a10 IM |
141 | if (regs) |
142 | show_regs(regs); | |
143 | else | |
6687a97d | 144 | dump_stack(); |
43581a10 | 145 | spin_unlock(&print_lock); |
9c44bc03 IM |
146 | |
147 | if (softlockup_panic) | |
148 | panic("softlockup: hung tasks"); | |
8446f1d3 IM |
149 | } |
150 | ||
82a1fcb9 IM |
151 | /* |
152 | * Have a reasonable limit on the number of tasks checked: | |
153 | */ | |
90739081 | 154 | unsigned long __read_mostly sysctl_hung_task_check_count = 1024; |
82a1fcb9 IM |
155 | |
156 | /* | |
157 | * Zero means infinite timeout - no checking done: | |
158 | */ | |
90739081 | 159 | unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; |
82a1fcb9 | 160 | |
90739081 | 161 | unsigned long __read_mostly sysctl_hung_task_warnings = 10; |
82a1fcb9 IM |
162 | |
163 | /* | |
164 | * Only do the hung-tasks check on one CPU: | |
165 | */ | |
166 | static int check_cpu __read_mostly = -1; | |
167 | ||
168 | static void check_hung_task(struct task_struct *t, unsigned long now) | |
169 | { | |
170 | unsigned long switch_count = t->nvcsw + t->nivcsw; | |
171 | ||
172 | if (t->flags & PF_FROZEN) | |
173 | return; | |
174 | ||
175 | if (switch_count != t->last_switch_count || !t->last_switch_timestamp) { | |
176 | t->last_switch_count = switch_count; | |
177 | t->last_switch_timestamp = now; | |
178 | return; | |
179 | } | |
180 | if ((long)(now - t->last_switch_timestamp) < | |
181 | sysctl_hung_task_timeout_secs) | |
182 | return; | |
183 | if (sysctl_hung_task_warnings < 0) | |
184 | return; | |
185 | sysctl_hung_task_warnings--; | |
186 | ||
187 | /* | |
188 | * Ok, the task did not get scheduled for more than 2 minutes, | |
189 | * complain: | |
190 | */ | |
191 | printk(KERN_ERR "INFO: task %s:%d blocked for more than " | |
192 | "%ld seconds.\n", t->comm, t->pid, | |
193 | sysctl_hung_task_timeout_secs); | |
194 | printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" | |
195 | " disables this message.\n"); | |
196 | sched_show_task(t); | |
197 | __debug_show_held_locks(t); | |
198 | ||
199 | t->last_switch_timestamp = now; | |
200 | touch_nmi_watchdog(); | |
9c44bc03 IM |
201 | |
202 | if (softlockup_panic) | |
203 | panic("softlockup: blocked tasks"); | |
82a1fcb9 IM |
204 | } |
205 | ||
206 | /* | |
207 | * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for | |
208 | * a really long time (120 seconds). If that happens, print out | |
209 | * a warning. | |
210 | */ | |
211 | static void check_hung_uninterruptible_tasks(int this_cpu) | |
212 | { | |
213 | int max_count = sysctl_hung_task_check_count; | |
214 | unsigned long now = get_timestamp(this_cpu); | |
215 | struct task_struct *g, *t; | |
216 | ||
217 | /* | |
218 | * If the system crashed already then all bets are off, | |
219 | * do not report extra hung tasks: | |
220 | */ | |
221 | if ((tainted & TAINT_DIE) || did_panic) | |
222 | return; | |
223 | ||
224 | read_lock(&tasklist_lock); | |
225 | do_each_thread(g, t) { | |
226 | if (!--max_count) | |
ed50d6cb | 227 | goto unlock; |
82a1fcb9 IM |
228 | if (t->state & TASK_UNINTERRUPTIBLE) |
229 | check_hung_task(t, now); | |
230 | } while_each_thread(g, t); | |
ed50d6cb | 231 | unlock: |
82a1fcb9 IM |
232 | read_unlock(&tasklist_lock); |
233 | } | |
234 | ||
8446f1d3 IM |
235 | /* |
236 | * The watchdog thread - runs every second and touches the timestamp. | |
237 | */ | |
a5f2ce3c | 238 | static int watchdog(void *__bind_cpu) |
8446f1d3 | 239 | { |
02fb6149 | 240 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
82a1fcb9 | 241 | int this_cpu = (long)__bind_cpu; |
8446f1d3 IM |
242 | |
243 | sched_setscheduler(current, SCHED_FIFO, ¶m); | |
8446f1d3 | 244 | |
966812dc JF |
245 | /* initialize timestamp */ |
246 | touch_softlockup_watchdog(); | |
247 | ||
7be2a03e | 248 | set_current_state(TASK_INTERRUPTIBLE); |
8446f1d3 | 249 | /* |
6687a97d | 250 | * Run briefly once per second to reset the softlockup timestamp. |
82a1fcb9 | 251 | * If this gets delayed for more than 60 seconds then the |
6687a97d | 252 | * debug-printout triggers in softlockup_tick(). |
8446f1d3 IM |
253 | */ |
254 | while (!kthread_should_stop()) { | |
8446f1d3 | 255 | touch_softlockup_watchdog(); |
ed50d6cb PZ |
256 | schedule(); |
257 | ||
258 | if (kthread_should_stop()) | |
259 | break; | |
82a1fcb9 | 260 | |
7be2a03e DA |
261 | if (this_cpu == check_cpu) { |
262 | if (sysctl_hung_task_timeout_secs) | |
263 | check_hung_uninterruptible_tasks(this_cpu); | |
264 | } | |
ed50d6cb | 265 | |
7be2a03e | 266 | set_current_state(TASK_INTERRUPTIBLE); |
8446f1d3 | 267 | } |
7be2a03e | 268 | __set_current_state(TASK_RUNNING); |
8446f1d3 IM |
269 | |
270 | return 0; | |
271 | } | |
272 | ||
273 | /* | |
274 | * Create/destroy watchdog threads as CPUs come and go: | |
275 | */ | |
8c78f307 | 276 | static int __cpuinit |
8446f1d3 IM |
277 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
278 | { | |
279 | int hotcpu = (unsigned long)hcpu; | |
280 | struct task_struct *p; | |
281 | ||
282 | switch (action) { | |
283 | case CPU_UP_PREPARE: | |
8bb78442 | 284 | case CPU_UP_PREPARE_FROZEN: |
8446f1d3 IM |
285 | BUG_ON(per_cpu(watchdog_task, hotcpu)); |
286 | p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); | |
287 | if (IS_ERR(p)) { | |
a5f2ce3c | 288 | printk(KERN_ERR "watchdog for %i failed\n", hotcpu); |
8446f1d3 IM |
289 | return NOTIFY_BAD; |
290 | } | |
a5f2ce3c IM |
291 | per_cpu(touch_timestamp, hotcpu) = 0; |
292 | per_cpu(watchdog_task, hotcpu) = p; | |
8446f1d3 | 293 | kthread_bind(p, hotcpu); |
a5f2ce3c | 294 | break; |
8446f1d3 | 295 | case CPU_ONLINE: |
8bb78442 | 296 | case CPU_ONLINE_FROZEN: |
82a1fcb9 | 297 | check_cpu = any_online_cpu(cpu_online_map); |
8446f1d3 IM |
298 | wake_up_process(per_cpu(watchdog_task, hotcpu)); |
299 | break; | |
300 | #ifdef CONFIG_HOTPLUG_CPU | |
82a1fcb9 IM |
301 | case CPU_DOWN_PREPARE: |
302 | case CPU_DOWN_PREPARE_FROZEN: | |
303 | if (hotcpu == check_cpu) { | |
304 | cpumask_t temp_cpu_online_map = cpu_online_map; | |
305 | ||
306 | cpu_clear(hotcpu, temp_cpu_online_map); | |
307 | check_cpu = any_online_cpu(temp_cpu_online_map); | |
308 | } | |
309 | break; | |
ed50d6cb PZ |
310 | |
311 | case CPU_UP_CANCELED: | |
312 | case CPU_UP_CANCELED_FROZEN: | |
313 | if (!per_cpu(watchdog_task, hotcpu)) | |
314 | break; | |
315 | /* Unbind so it can run. Fall thru. */ | |
316 | kthread_bind(per_cpu(watchdog_task, hotcpu), | |
317 | any_online_cpu(cpu_online_map)); | |
8446f1d3 | 318 | case CPU_DEAD: |
8bb78442 | 319 | case CPU_DEAD_FROZEN: |
8446f1d3 IM |
320 | p = per_cpu(watchdog_task, hotcpu); |
321 | per_cpu(watchdog_task, hotcpu) = NULL; | |
322 | kthread_stop(p); | |
323 | break; | |
324 | #endif /* CONFIG_HOTPLUG_CPU */ | |
a5f2ce3c | 325 | } |
8446f1d3 IM |
326 | return NOTIFY_OK; |
327 | } | |
328 | ||
8c78f307 | 329 | static struct notifier_block __cpuinitdata cpu_nfb = { |
8446f1d3 IM |
330 | .notifier_call = cpu_callback |
331 | }; | |
332 | ||
333 | __init void spawn_softlockup_task(void) | |
334 | { | |
335 | void *cpu = (void *)(long)smp_processor_id(); | |
07dccf33 | 336 | int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
8446f1d3 | 337 | |
07dccf33 | 338 | BUG_ON(err == NOTIFY_BAD); |
8446f1d3 IM |
339 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
340 | register_cpu_notifier(&cpu_nfb); | |
341 | ||
e041c683 | 342 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); |
8446f1d3 | 343 | } |