kernel/resource.c: fix new kernel-doc warning
[deliverable/linux.git] / kernel / softlockup.c
CommitLineData
8446f1d3
IM
1/*
2 * Detect Soft Lockups
3 *
6687a97d 4 * started by Ingo Molnar, Copyright (C) 2005, 2006 Red Hat, Inc.
8446f1d3
IM
5 *
6 * this code detects soft lockups: incidents in where on a CPU
7 * the kernel does not reschedule for 10 seconds or more.
8 */
8446f1d3
IM
9#include <linux/mm.h>
10#include <linux/cpu.h>
82a1fcb9 11#include <linux/nmi.h>
8446f1d3
IM
12#include <linux/init.h>
13#include <linux/delay.h>
83144186 14#include <linux/freezer.h>
8446f1d3 15#include <linux/kthread.h>
8d5be7f4 16#include <linux/lockdep.h>
8446f1d3
IM
17#include <linux/notifier.h>
18#include <linux/module.h>
19
43581a10
IM
20#include <asm/irq_regs.h>
21
8446f1d3
IM
22static DEFINE_SPINLOCK(print_lock);
23
6687a97d
IM
24static DEFINE_PER_CPU(unsigned long, touch_timestamp);
25static DEFINE_PER_CPU(unsigned long, print_timestamp);
8446f1d3
IM
26static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
27
90739081 28static int __read_mostly did_panic;
9383d967 29int __read_mostly softlockup_thresh = 60;
6687a97d 30
9c44bc03
IM
31/*
32 * Should we panic (and reboot, if panic_timeout= is set) when a
33 * soft-lockup occurs:
34 */
35unsigned int __read_mostly softlockup_panic =
36 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
37
38static int __init softlockup_panic_setup(char *str)
39{
40 softlockup_panic = simple_strtoul(str, NULL, 0);
41
42 return 1;
43}
44__setup("softlockup_panic=", softlockup_panic_setup);
45
6687a97d
IM
46static int
47softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
8446f1d3
IM
48{
49 did_panic = 1;
50
51 return NOTIFY_DONE;
52}
53
54static struct notifier_block panic_block = {
55 .notifier_call = softlock_panic,
56};
57
966812dc
JF
58/*
59 * Returns seconds, approximately. We don't need nanosecond
60 * resolution, and we don't need to waste time with a big divide when
61 * 2^30ns == 1.074s.
62 */
a3b13c23 63static unsigned long get_timestamp(int this_cpu)
966812dc 64{
82a1fcb9 65 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
966812dc
JF
66}
67
8c2238ea 68static void __touch_softlockup_watchdog(void)
8446f1d3 69{
a3b13c23
IM
70 int this_cpu = raw_smp_processor_id();
71
72 __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
8446f1d3 73}
8c2238ea
JW
74
75void touch_softlockup_watchdog(void)
76{
77 __raw_get_cpu_var(touch_timestamp) = 0;
78}
8446f1d3
IM
79EXPORT_SYMBOL(touch_softlockup_watchdog);
80
04c9167f
JF
81void touch_all_softlockup_watchdogs(void)
82{
83 int cpu;
84
85 /* Cause each CPU to re-update its timestamp rather than complain */
86 for_each_online_cpu(cpu)
87 per_cpu(touch_timestamp, cpu) = 0;
88}
89EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
90
8446f1d3
IM
91/*
92 * This callback runs from the timer interrupt, and checks
93 * whether the watchdog thread has hung or not:
94 */
6687a97d 95void softlockup_tick(void)
8446f1d3
IM
96{
97 int this_cpu = smp_processor_id();
6687a97d 98 unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
966812dc 99 unsigned long print_timestamp;
43581a10 100 struct pt_regs *regs = get_irq_regs();
966812dc 101 unsigned long now;
8446f1d3 102
9383d967
DS
103 /* Is detection switched off? */
104 if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) {
105 /* Be sure we don't false trigger if switched back on */
106 if (touch_timestamp)
107 per_cpu(touch_timestamp, this_cpu) = 0;
108 return;
109 }
110
04c9167f 111 if (touch_timestamp == 0) {
8c2238ea 112 __touch_softlockup_watchdog();
966812dc 113 return;
04c9167f 114 }
966812dc
JF
115
116 print_timestamp = per_cpu(print_timestamp, this_cpu);
117
118 /* report at most once a second */
3e2f69fd 119 if (print_timestamp == touch_timestamp || did_panic)
8446f1d3
IM
120 return;
121
6687a97d
IM
122 /* do not print during early bootup: */
123 if (unlikely(system_state != SYSTEM_RUNNING)) {
8c2238ea 124 __touch_softlockup_watchdog();
8446f1d3 125 return;
6687a97d 126 }
8446f1d3 127
a3b13c23 128 now = get_timestamp(this_cpu);
966812dc 129
dd7a1e56
JW
130 /*
131 * Wake up the high-prio watchdog task twice per
132 * threshold timespan.
133 */
134 if (now > touch_timestamp + softlockup_thresh/2)
ed50d6cb
PZ
135 wake_up_process(per_cpu(watchdog_task, this_cpu));
136
82a1fcb9 137 /* Warn about unreasonable delays: */
c4f3b63f 138 if (now <= (touch_timestamp + softlockup_thresh))
43581a10 139 return;
8446f1d3 140
43581a10
IM
141 per_cpu(print_timestamp, this_cpu) = touch_timestamp;
142
143 spin_lock(&print_lock);
c4f3b63f
RT
144 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
145 this_cpu, now - touch_timestamp,
ba25f9dc 146 current->comm, task_pid_nr(current));
688c9175 147 print_modules();
8d5be7f4 148 print_irqtrace_events(current);
43581a10
IM
149 if (regs)
150 show_regs(regs);
151 else
6687a97d 152 dump_stack();
43581a10 153 spin_unlock(&print_lock);
9c44bc03
IM
154
155 if (softlockup_panic)
156 panic("softlockup: hung tasks");
8446f1d3
IM
157}
158
82a1fcb9
IM
159/*
160 * Have a reasonable limit on the number of tasks checked:
161 */
90739081 162unsigned long __read_mostly sysctl_hung_task_check_count = 1024;
82a1fcb9
IM
163
164/*
165 * Zero means infinite timeout - no checking done:
166 */
90739081 167unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120;
82a1fcb9 168
90739081 169unsigned long __read_mostly sysctl_hung_task_warnings = 10;
82a1fcb9
IM
170
171/*
172 * Only do the hung-tasks check on one CPU:
173 */
174static int check_cpu __read_mostly = -1;
175
176static void check_hung_task(struct task_struct *t, unsigned long now)
177{
178 unsigned long switch_count = t->nvcsw + t->nivcsw;
179
180 if (t->flags & PF_FROZEN)
181 return;
182
316d9679
AK
183 /* Don't check for tasks waiting on network file systems like NFS */
184 if (t->state & TASK_KILLABLE)
185 return;
186
82a1fcb9
IM
187 if (switch_count != t->last_switch_count || !t->last_switch_timestamp) {
188 t->last_switch_count = switch_count;
189 t->last_switch_timestamp = now;
190 return;
191 }
192 if ((long)(now - t->last_switch_timestamp) <
193 sysctl_hung_task_timeout_secs)
194 return;
195 if (sysctl_hung_task_warnings < 0)
196 return;
197 sysctl_hung_task_warnings--;
198
199 /*
200 * Ok, the task did not get scheduled for more than 2 minutes,
201 * complain:
202 */
203 printk(KERN_ERR "INFO: task %s:%d blocked for more than "
204 "%ld seconds.\n", t->comm, t->pid,
205 sysctl_hung_task_timeout_secs);
206 printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
207 " disables this message.\n");
208 sched_show_task(t);
209 __debug_show_held_locks(t);
210
211 t->last_switch_timestamp = now;
212 touch_nmi_watchdog();
9c44bc03
IM
213
214 if (softlockup_panic)
215 panic("softlockup: blocked tasks");
82a1fcb9
IM
216}
217
218/*
219 * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
220 * a really long time (120 seconds). If that happens, print out
221 * a warning.
222 */
223static void check_hung_uninterruptible_tasks(int this_cpu)
224{
225 int max_count = sysctl_hung_task_check_count;
226 unsigned long now = get_timestamp(this_cpu);
227 struct task_struct *g, *t;
228
229 /*
230 * If the system crashed already then all bets are off,
231 * do not report extra hung tasks:
232 */
233 if ((tainted & TAINT_DIE) || did_panic)
234 return;
235
236 read_lock(&tasklist_lock);
237 do_each_thread(g, t) {
238 if (!--max_count)
ed50d6cb 239 goto unlock;
82a1fcb9
IM
240 if (t->state & TASK_UNINTERRUPTIBLE)
241 check_hung_task(t, now);
242 } while_each_thread(g, t);
ed50d6cb 243 unlock:
82a1fcb9
IM
244 read_unlock(&tasklist_lock);
245}
246
8446f1d3
IM
247/*
248 * The watchdog thread - runs every second and touches the timestamp.
249 */
a5f2ce3c 250static int watchdog(void *__bind_cpu)
8446f1d3 251{
02fb6149 252 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
82a1fcb9 253 int this_cpu = (long)__bind_cpu;
8446f1d3
IM
254
255 sched_setscheduler(current, SCHED_FIFO, &param);
8446f1d3 256
966812dc 257 /* initialize timestamp */
8c2238ea 258 __touch_softlockup_watchdog();
966812dc 259
7be2a03e 260 set_current_state(TASK_INTERRUPTIBLE);
8446f1d3 261 /*
6687a97d 262 * Run briefly once per second to reset the softlockup timestamp.
82a1fcb9 263 * If this gets delayed for more than 60 seconds then the
6687a97d 264 * debug-printout triggers in softlockup_tick().
8446f1d3
IM
265 */
266 while (!kthread_should_stop()) {
8c2238ea 267 __touch_softlockup_watchdog();
ed50d6cb
PZ
268 schedule();
269
270 if (kthread_should_stop())
271 break;
82a1fcb9 272
7be2a03e
DA
273 if (this_cpu == check_cpu) {
274 if (sysctl_hung_task_timeout_secs)
275 check_hung_uninterruptible_tasks(this_cpu);
276 }
ed50d6cb 277
7be2a03e 278 set_current_state(TASK_INTERRUPTIBLE);
8446f1d3 279 }
7be2a03e 280 __set_current_state(TASK_RUNNING);
8446f1d3
IM
281
282 return 0;
283}
284
285/*
286 * Create/destroy watchdog threads as CPUs come and go:
287 */
8c78f307 288static int __cpuinit
8446f1d3
IM
289cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
290{
291 int hotcpu = (unsigned long)hcpu;
292 struct task_struct *p;
293
294 switch (action) {
295 case CPU_UP_PREPARE:
8bb78442 296 case CPU_UP_PREPARE_FROZEN:
8446f1d3
IM
297 BUG_ON(per_cpu(watchdog_task, hotcpu));
298 p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
299 if (IS_ERR(p)) {
a5f2ce3c 300 printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
8446f1d3
IM
301 return NOTIFY_BAD;
302 }
a5f2ce3c
IM
303 per_cpu(touch_timestamp, hotcpu) = 0;
304 per_cpu(watchdog_task, hotcpu) = p;
8446f1d3 305 kthread_bind(p, hotcpu);
a5f2ce3c 306 break;
8446f1d3 307 case CPU_ONLINE:
8bb78442 308 case CPU_ONLINE_FROZEN:
82a1fcb9 309 check_cpu = any_online_cpu(cpu_online_map);
8446f1d3
IM
310 wake_up_process(per_cpu(watchdog_task, hotcpu));
311 break;
312#ifdef CONFIG_HOTPLUG_CPU
82a1fcb9
IM
313 case CPU_DOWN_PREPARE:
314 case CPU_DOWN_PREPARE_FROZEN:
315 if (hotcpu == check_cpu) {
316 cpumask_t temp_cpu_online_map = cpu_online_map;
317
318 cpu_clear(hotcpu, temp_cpu_online_map);
319 check_cpu = any_online_cpu(temp_cpu_online_map);
320 }
321 break;
ed50d6cb
PZ
322
323 case CPU_UP_CANCELED:
324 case CPU_UP_CANCELED_FROZEN:
325 if (!per_cpu(watchdog_task, hotcpu))
326 break;
327 /* Unbind so it can run. Fall thru. */
328 kthread_bind(per_cpu(watchdog_task, hotcpu),
329 any_online_cpu(cpu_online_map));
8446f1d3 330 case CPU_DEAD:
8bb78442 331 case CPU_DEAD_FROZEN:
8446f1d3
IM
332 p = per_cpu(watchdog_task, hotcpu);
333 per_cpu(watchdog_task, hotcpu) = NULL;
334 kthread_stop(p);
335 break;
336#endif /* CONFIG_HOTPLUG_CPU */
a5f2ce3c 337 }
8446f1d3
IM
338 return NOTIFY_OK;
339}
340
8c78f307 341static struct notifier_block __cpuinitdata cpu_nfb = {
8446f1d3
IM
342 .notifier_call = cpu_callback
343};
344
7babe8db
EGM
345static int __initdata nosoftlockup;
346
347static int __init nosoftlockup_setup(char *str)
348{
349 nosoftlockup = 1;
350 return 1;
351}
352__setup("nosoftlockup", nosoftlockup_setup);
353
354static int __init spawn_softlockup_task(void)
8446f1d3
IM
355{
356 void *cpu = (void *)(long)smp_processor_id();
7babe8db 357 int err;
8446f1d3 358
7babe8db
EGM
359 if (nosoftlockup)
360 return 0;
361
362 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
363 if (err == NOTIFY_BAD) {
364 BUG();
365 return 1;
366 }
8446f1d3
IM
367 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
368 register_cpu_notifier(&cpu_nfb);
369
e041c683 370 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
7babe8db
EGM
371
372 return 0;
8446f1d3 373}
7babe8db 374early_initcall(spawn_softlockup_task);
This page took 0.41667 seconds and 5 git commands to generate.