2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
23 #include <asm/irq_regs.h>
24 #include <linux/kvm_para.h>
25 #include <linux/perf_event.h>
28 * The run state of the lockup detectors is controlled by the content of the
29 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
30 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
32 * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
33 * are variables that are only used as an 'interface' between the parameters
34 * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
35 * 'watchdog_thresh' variable is handled differently because its value is not
36 * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
39 #define NMI_WATCHDOG_ENABLED_BIT 0
40 #define SOFT_WATCHDOG_ENABLED_BIT 1
41 #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
42 #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
44 #ifdef CONFIG_HARDLOCKUP_DETECTOR
45 static unsigned long __read_mostly watchdog_enabled
= SOFT_WATCHDOG_ENABLED
|NMI_WATCHDOG_ENABLED
;
47 static unsigned long __read_mostly watchdog_enabled
= SOFT_WATCHDOG_ENABLED
;
49 int __read_mostly nmi_watchdog_enabled
;
50 int __read_mostly soft_watchdog_enabled
;
51 int __read_mostly watchdog_user_enabled
;
52 int __read_mostly watchdog_thresh
= 10;
55 int __read_mostly sysctl_softlockup_all_cpu_backtrace
;
57 #define sysctl_softlockup_all_cpu_backtrace 0
60 static int __read_mostly watchdog_running
;
61 static u64 __read_mostly sample_period
;
63 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts
);
64 static DEFINE_PER_CPU(struct task_struct
*, softlockup_watchdog
);
65 static DEFINE_PER_CPU(struct hrtimer
, watchdog_hrtimer
);
66 static DEFINE_PER_CPU(bool, softlockup_touch_sync
);
67 static DEFINE_PER_CPU(bool, soft_watchdog_warn
);
68 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts
);
69 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt
);
70 static DEFINE_PER_CPU(struct task_struct
*, softlockup_task_ptr_saved
);
71 #ifdef CONFIG_HARDLOCKUP_DETECTOR
72 static DEFINE_PER_CPU(bool, hard_watchdog_warn
);
73 static DEFINE_PER_CPU(bool, watchdog_nmi_touch
);
74 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved
);
75 static DEFINE_PER_CPU(struct perf_event
*, watchdog_ev
);
77 static unsigned long soft_lockup_nmi_warn
;
81 * Should we panic when a soft-lockup or hard-lockup occurs:
83 #ifdef CONFIG_HARDLOCKUP_DETECTOR
84 static int hardlockup_panic
=
85 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE
;
87 * We may not want to enable hard lockup detection by default in all cases,
88 * for example when running the kernel as a guest on a hypervisor. In these
89 * cases this function can be called to disable hard lockup detection. This
90 * function should only be executed once by the boot processor before the
91 * kernel command line parameters are parsed, because otherwise it is not
92 * possible to override this in hardlockup_panic_setup().
94 void hardlockup_detector_disable(void)
96 watchdog_enabled
&= ~NMI_WATCHDOG_ENABLED
;
99 static int __init
hardlockup_panic_setup(char *str
)
101 if (!strncmp(str
, "panic", 5))
102 hardlockup_panic
= 1;
103 else if (!strncmp(str
, "nopanic", 7))
104 hardlockup_panic
= 0;
105 else if (!strncmp(str
, "0", 1))
106 watchdog_enabled
&= ~NMI_WATCHDOG_ENABLED
;
107 else if (!strncmp(str
, "1", 1))
108 watchdog_enabled
|= NMI_WATCHDOG_ENABLED
;
111 __setup("nmi_watchdog=", hardlockup_panic_setup
);
114 unsigned int __read_mostly softlockup_panic
=
115 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
;
117 static int __init
softlockup_panic_setup(char *str
)
119 softlockup_panic
= simple_strtoul(str
, NULL
, 0);
123 __setup("softlockup_panic=", softlockup_panic_setup
);
125 static int __init
nowatchdog_setup(char *str
)
127 watchdog_enabled
= 0;
130 __setup("nowatchdog", nowatchdog_setup
);
132 static int __init
nosoftlockup_setup(char *str
)
134 watchdog_enabled
&= ~SOFT_WATCHDOG_ENABLED
;
137 __setup("nosoftlockup", nosoftlockup_setup
);
140 static int __init
softlockup_all_cpu_backtrace_setup(char *str
)
142 sysctl_softlockup_all_cpu_backtrace
=
143 !!simple_strtol(str
, NULL
, 0);
146 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup
);
150 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
151 * lockups can have false positives under extreme conditions. So we generally
152 * want a higher threshold for soft lockups than for hard lockups. So we couple
153 * the thresholds with a factor: we make the soft threshold twice the amount of
154 * time the hard threshold is.
156 static int get_softlockup_thresh(void)
158 return watchdog_thresh
* 2;
162 * Returns seconds, approximately. We don't need nanosecond
163 * resolution, and we don't need to waste time with a big divide when
166 static unsigned long get_timestamp(void)
168 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
171 static void set_sample_period(void)
174 * convert watchdog_thresh from seconds to ns
175 * the divide by 5 is to give hrtimer several chances (two
176 * or three with the current relation between the soft
177 * and hard thresholds) to increment before the
178 * hardlockup detector generates a warning
180 sample_period
= get_softlockup_thresh() * ((u64
)NSEC_PER_SEC
/ 5);
183 /* Commands for resetting the watchdog */
184 static void __touch_watchdog(void)
186 __this_cpu_write(watchdog_touch_ts
, get_timestamp());
189 void touch_softlockup_watchdog(void)
192 * Preemption can be enabled. It doesn't matter which CPU's timestamp
193 * gets zeroed here, so use the raw_ operation.
195 raw_cpu_write(watchdog_touch_ts
, 0);
197 EXPORT_SYMBOL(touch_softlockup_watchdog
);
199 void touch_all_softlockup_watchdogs(void)
204 * this is done lockless
205 * do we care if a 0 races with a timestamp?
206 * all it means is the softlock check starts one cycle later
208 for_each_online_cpu(cpu
)
209 per_cpu(watchdog_touch_ts
, cpu
) = 0;
212 #ifdef CONFIG_HARDLOCKUP_DETECTOR
213 void touch_nmi_watchdog(void)
216 * Using __raw here because some code paths have
217 * preemption enabled. If preemption is enabled
218 * then interrupts should be enabled too, in which
219 * case we shouldn't have to worry about the watchdog
222 raw_cpu_write(watchdog_nmi_touch
, true);
223 touch_softlockup_watchdog();
225 EXPORT_SYMBOL(touch_nmi_watchdog
);
229 void touch_softlockup_watchdog_sync(void)
231 __this_cpu_write(softlockup_touch_sync
, true);
232 __this_cpu_write(watchdog_touch_ts
, 0);
235 #ifdef CONFIG_HARDLOCKUP_DETECTOR
236 /* watchdog detector functions */
237 static int is_hardlockup(void)
239 unsigned long hrint
= __this_cpu_read(hrtimer_interrupts
);
241 if (__this_cpu_read(hrtimer_interrupts_saved
) == hrint
)
244 __this_cpu_write(hrtimer_interrupts_saved
, hrint
);
249 static int is_softlockup(unsigned long touch_ts
)
251 unsigned long now
= get_timestamp();
253 if (watchdog_enabled
& SOFT_WATCHDOG_ENABLED
) {
254 /* Warn about unreasonable delays. */
255 if (time_after(now
, touch_ts
+ get_softlockup_thresh()))
256 return now
- touch_ts
;
261 #ifdef CONFIG_HARDLOCKUP_DETECTOR
263 static struct perf_event_attr wd_hw_attr
= {
264 .type
= PERF_TYPE_HARDWARE
,
265 .config
= PERF_COUNT_HW_CPU_CYCLES
,
266 .size
= sizeof(struct perf_event_attr
),
271 /* Callback function for perf event subsystem */
272 static void watchdog_overflow_callback(struct perf_event
*event
,
273 struct perf_sample_data
*data
,
274 struct pt_regs
*regs
)
276 /* Ensure the watchdog never gets throttled */
277 event
->hw
.interrupts
= 0;
279 if (__this_cpu_read(watchdog_nmi_touch
) == true) {
280 __this_cpu_write(watchdog_nmi_touch
, false);
284 /* check for a hardlockup
285 * This is done by making sure our timer interrupt
286 * is incrementing. The timer interrupt should have
287 * fired multiple times before we overflow'd. If it hasn't
288 * then this is a good indication the cpu is stuck
290 if (is_hardlockup()) {
291 int this_cpu
= smp_processor_id();
293 /* only print hardlockups once */
294 if (__this_cpu_read(hard_watchdog_warn
) == true)
297 if (hardlockup_panic
)
298 panic("Watchdog detected hard LOCKUP on cpu %d",
301 WARN(1, "Watchdog detected hard LOCKUP on cpu %d",
304 __this_cpu_write(hard_watchdog_warn
, true);
308 __this_cpu_write(hard_watchdog_warn
, false);
311 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
313 static void watchdog_interrupt_count(void)
315 __this_cpu_inc(hrtimer_interrupts
);
318 static int watchdog_nmi_enable(unsigned int cpu
);
319 static void watchdog_nmi_disable(unsigned int cpu
);
321 /* watchdog kicker functions */
322 static enum hrtimer_restart
watchdog_timer_fn(struct hrtimer
*hrtimer
)
324 unsigned long touch_ts
= __this_cpu_read(watchdog_touch_ts
);
325 struct pt_regs
*regs
= get_irq_regs();
327 int softlockup_all_cpu_backtrace
= sysctl_softlockup_all_cpu_backtrace
;
329 /* kick the hardlockup detector */
330 watchdog_interrupt_count();
332 /* kick the softlockup detector */
333 wake_up_process(__this_cpu_read(softlockup_watchdog
));
336 hrtimer_forward_now(hrtimer
, ns_to_ktime(sample_period
));
339 if (unlikely(__this_cpu_read(softlockup_touch_sync
))) {
341 * If the time stamp was touched atomically
342 * make sure the scheduler tick is up to date.
344 __this_cpu_write(softlockup_touch_sync
, false);
348 /* Clear the guest paused flag on watchdog reset */
349 kvm_check_and_clear_guest_paused();
351 return HRTIMER_RESTART
;
354 /* check for a softlockup
355 * This is done by making sure a high priority task is
356 * being scheduled. The task touches the watchdog to
357 * indicate it is getting cpu time. If it hasn't then
358 * this is a good indication some task is hogging the cpu
360 duration
= is_softlockup(touch_ts
);
361 if (unlikely(duration
)) {
363 * If a virtual machine is stopped by the host it can look to
364 * the watchdog like a soft lockup, check to see if the host
365 * stopped the vm before we issue the warning
367 if (kvm_check_and_clear_guest_paused())
368 return HRTIMER_RESTART
;
371 if (__this_cpu_read(soft_watchdog_warn
) == true) {
373 * When multiple processes are causing softlockups the
374 * softlockup detector only warns on the first one
375 * because the code relies on a full quiet cycle to
376 * re-arm. The second process prevents the quiet cycle
377 * and never gets reported. Use task pointers to detect
380 if (__this_cpu_read(softlockup_task_ptr_saved
) !=
382 __this_cpu_write(soft_watchdog_warn
, false);
385 return HRTIMER_RESTART
;
388 if (softlockup_all_cpu_backtrace
) {
389 /* Prevent multiple soft-lockup reports if one cpu is already
390 * engaged in dumping cpu back traces
392 if (test_and_set_bit(0, &soft_lockup_nmi_warn
)) {
393 /* Someone else will report us. Let's give up */
394 __this_cpu_write(soft_watchdog_warn
, true);
395 return HRTIMER_RESTART
;
399 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
400 smp_processor_id(), duration
,
401 current
->comm
, task_pid_nr(current
));
402 __this_cpu_write(softlockup_task_ptr_saved
, current
);
404 print_irqtrace_events(current
);
410 if (softlockup_all_cpu_backtrace
) {
411 /* Avoid generating two back traces for current
412 * given that one is already made above
414 trigger_allbutself_cpu_backtrace();
416 clear_bit(0, &soft_lockup_nmi_warn
);
417 /* Barrier to sync with other cpus */
418 smp_mb__after_atomic();
421 add_taint(TAINT_SOFTLOCKUP
, LOCKDEP_STILL_OK
);
422 if (softlockup_panic
)
423 panic("softlockup: hung tasks");
424 __this_cpu_write(soft_watchdog_warn
, true);
426 __this_cpu_write(soft_watchdog_warn
, false);
428 return HRTIMER_RESTART
;
431 static void watchdog_set_prio(unsigned int policy
, unsigned int prio
)
433 struct sched_param param
= { .sched_priority
= prio
};
435 sched_setscheduler(current
, policy
, ¶m
);
438 static void watchdog_enable(unsigned int cpu
)
440 struct hrtimer
*hrtimer
= raw_cpu_ptr(&watchdog_hrtimer
);
442 /* kick off the timer for the hardlockup detector */
443 hrtimer_init(hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
444 hrtimer
->function
= watchdog_timer_fn
;
446 /* Enable the perf event */
447 watchdog_nmi_enable(cpu
);
449 /* done here because hrtimer_start can only pin to smp_processor_id() */
450 hrtimer_start(hrtimer
, ns_to_ktime(sample_period
),
451 HRTIMER_MODE_REL_PINNED
);
453 /* initialize timestamp */
454 watchdog_set_prio(SCHED_FIFO
, MAX_RT_PRIO
- 1);
458 static void watchdog_disable(unsigned int cpu
)
460 struct hrtimer
*hrtimer
= raw_cpu_ptr(&watchdog_hrtimer
);
462 watchdog_set_prio(SCHED_NORMAL
, 0);
463 hrtimer_cancel(hrtimer
);
464 /* disable the perf event */
465 watchdog_nmi_disable(cpu
);
468 static void watchdog_cleanup(unsigned int cpu
, bool online
)
470 watchdog_disable(cpu
);
473 static int watchdog_should_run(unsigned int cpu
)
475 return __this_cpu_read(hrtimer_interrupts
) !=
476 __this_cpu_read(soft_lockup_hrtimer_cnt
);
480 * The watchdog thread function - touches the timestamp.
482 * It only runs once every sample_period seconds (4 seconds by
483 * default) to reset the softlockup timestamp. If this gets delayed
484 * for more than 2*watchdog_thresh seconds then the debug-printout
485 * triggers in watchdog_timer_fn().
487 static void watchdog(unsigned int cpu
)
489 __this_cpu_write(soft_lockup_hrtimer_cnt
,
490 __this_cpu_read(hrtimer_interrupts
));
494 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
495 * failure path. Check for failures that can occur asynchronously -
496 * for example, when CPUs are on-lined - and shut down the hardware
497 * perf event on each CPU accordingly.
499 * The only non-obvious place this bit can be cleared is through
500 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
501 * pr_info here would be too noisy as it would result in a message
502 * every few seconds if the hardlockup was disabled but the softlockup
505 if (!(watchdog_enabled
& NMI_WATCHDOG_ENABLED
))
506 watchdog_nmi_disable(cpu
);
509 #ifdef CONFIG_HARDLOCKUP_DETECTOR
511 * People like the simple clean cpu node info on boot.
512 * Reduce the watchdog noise by only printing messages
513 * that are different from what cpu0 displayed.
515 static unsigned long cpu0_err
;
517 static int watchdog_nmi_enable(unsigned int cpu
)
519 struct perf_event_attr
*wd_attr
;
520 struct perf_event
*event
= per_cpu(watchdog_ev
, cpu
);
522 /* nothing to do if the hard lockup detector is disabled */
523 if (!(watchdog_enabled
& NMI_WATCHDOG_ENABLED
))
526 /* is it already setup and enabled? */
527 if (event
&& event
->state
> PERF_EVENT_STATE_OFF
)
530 /* it is setup but not enabled */
534 wd_attr
= &wd_hw_attr
;
535 wd_attr
->sample_period
= hw_nmi_get_sample_period(watchdog_thresh
);
537 /* Try to register using hardware perf events */
538 event
= perf_event_create_kernel_counter(wd_attr
, cpu
, NULL
, watchdog_overflow_callback
, NULL
);
540 /* save cpu0 error for future comparision */
541 if (cpu
== 0 && IS_ERR(event
))
542 cpu0_err
= PTR_ERR(event
);
544 if (!IS_ERR(event
)) {
545 /* only print for cpu0 or different than cpu0 */
546 if (cpu
== 0 || cpu0_err
)
547 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
552 * Disable the hard lockup detector if _any_ CPU fails to set up
553 * set up the hardware perf event. The watchdog() function checks
554 * the NMI_WATCHDOG_ENABLED bit periodically.
556 * The barriers are for syncing up watchdog_enabled across all the
557 * cpus, as clear_bit() does not use barriers.
559 smp_mb__before_atomic();
560 clear_bit(NMI_WATCHDOG_ENABLED_BIT
, &watchdog_enabled
);
561 smp_mb__after_atomic();
563 /* skip displaying the same error again */
564 if (cpu
> 0 && (PTR_ERR(event
) == cpu0_err
))
565 return PTR_ERR(event
);
567 /* vary the KERN level based on the returned errno */
568 if (PTR_ERR(event
) == -EOPNOTSUPP
)
569 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu
);
570 else if (PTR_ERR(event
) == -ENOENT
)
571 pr_warn("disabled (cpu%i): hardware events not enabled\n",
574 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
575 cpu
, PTR_ERR(event
));
577 pr_info("Shutting down hard lockup detector on all cpus\n");
579 return PTR_ERR(event
);
583 per_cpu(watchdog_ev
, cpu
) = event
;
585 perf_event_enable(per_cpu(watchdog_ev
, cpu
));
590 static void watchdog_nmi_disable(unsigned int cpu
)
592 struct perf_event
*event
= per_cpu(watchdog_ev
, cpu
);
595 perf_event_disable(event
);
596 per_cpu(watchdog_ev
, cpu
) = NULL
;
598 /* should be in cleanup, but blocks oprofile */
599 perf_event_release_kernel(event
);
602 /* watchdog_nmi_enable() expects this to be zero initially. */
607 void watchdog_nmi_enable_all(void)
611 if (!watchdog_user_enabled
)
615 for_each_online_cpu(cpu
)
616 watchdog_nmi_enable(cpu
);
620 void watchdog_nmi_disable_all(void)
624 if (!watchdog_running
)
628 for_each_online_cpu(cpu
)
629 watchdog_nmi_disable(cpu
);
633 static int watchdog_nmi_enable(unsigned int cpu
) { return 0; }
634 static void watchdog_nmi_disable(unsigned int cpu
) { return; }
635 void watchdog_nmi_enable_all(void) {}
636 void watchdog_nmi_disable_all(void) {}
637 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
639 static struct smp_hotplug_thread watchdog_threads
= {
640 .store
= &softlockup_watchdog
,
641 .thread_should_run
= watchdog_should_run
,
642 .thread_fn
= watchdog
,
643 .thread_comm
= "watchdog/%u",
644 .setup
= watchdog_enable
,
645 .cleanup
= watchdog_cleanup
,
646 .park
= watchdog_disable
,
647 .unpark
= watchdog_enable
,
650 static void restart_watchdog_hrtimer(void *info
)
652 struct hrtimer
*hrtimer
= raw_cpu_ptr(&watchdog_hrtimer
);
656 * No need to cancel and restart hrtimer if it is currently executing
657 * because it will reprogram itself with the new period now.
658 * We should never see it unqueued here because we are running per-cpu
659 * with interrupts disabled.
661 ret
= hrtimer_try_to_cancel(hrtimer
);
663 hrtimer_start(hrtimer
, ns_to_ktime(sample_period
),
664 HRTIMER_MODE_REL_PINNED
);
667 static void update_watchdog(int cpu
)
670 * Make sure that perf event counter will adopt to a new
671 * sampling period. Updating the sampling period directly would
672 * be much nicer but we do not have an API for that now so
673 * let's use a big hammer.
674 * Hrtimer will adopt the new period on the next tick but this
675 * might be late already so we have to restart the timer as well.
677 watchdog_nmi_disable(cpu
);
678 smp_call_function_single(cpu
, restart_watchdog_hrtimer
, NULL
, 1);
679 watchdog_nmi_enable(cpu
);
682 static void update_watchdog_all_cpus(void)
687 for_each_online_cpu(cpu
)
688 update_watchdog(cpu
);
692 static int watchdog_enable_all_cpus(void)
696 if (!watchdog_running
) {
697 err
= smpboot_register_percpu_thread(&watchdog_threads
);
699 pr_err("Failed to create watchdog threads, disabled\n");
701 watchdog_running
= 1;
704 * Enable/disable the lockup detectors or
705 * change the sample period 'on the fly'.
707 update_watchdog_all_cpus();
713 /* prepare/enable/disable routines */
714 /* sysctl functions */
716 static void watchdog_disable_all_cpus(void)
718 if (watchdog_running
) {
719 watchdog_running
= 0;
720 smpboot_unregister_percpu_thread(&watchdog_threads
);
725 * Update the run state of the lockup detectors.
727 static int proc_watchdog_update(void)
732 * Watchdog threads won't be started if they are already active.
733 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
734 * care of this. If those threads are already active, the sample
735 * period will be updated and the lockup detectors will be enabled
736 * or disabled 'on the fly'.
738 if (watchdog_enabled
&& watchdog_thresh
)
739 err
= watchdog_enable_all_cpus();
741 watchdog_disable_all_cpus();
747 static DEFINE_MUTEX(watchdog_proc_mutex
);
750 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
752 * caller | table->data points to | 'which' contains the flag(s)
753 * -------------------|-----------------------|-----------------------------
754 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
755 * | | with SOFT_WATCHDOG_ENABLED
756 * -------------------|-----------------------|-----------------------------
757 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
758 * -------------------|-----------------------|-----------------------------
759 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
761 static int proc_watchdog_common(int which
, struct ctl_table
*table
, int write
,
762 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
765 int *watchdog_param
= (int *)table
->data
;
767 mutex_lock(&watchdog_proc_mutex
);
770 * If the parameter is being read return the state of the corresponding
771 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
772 * run state of the lockup detectors.
775 *watchdog_param
= (watchdog_enabled
& which
) != 0;
776 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
778 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
783 * There is a race window between fetching the current value
784 * from 'watchdog_enabled' and storing the new value. During
785 * this race window, watchdog_nmi_enable() can sneak in and
786 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
787 * The 'cmpxchg' detects this race and the loop retries.
790 old
= watchdog_enabled
;
792 * If the parameter value is not zero set the
793 * corresponding bit(s), else clear it(them).
799 } while (cmpxchg(&watchdog_enabled
, old
, new) != old
);
802 * Update the run state of the lockup detectors.
803 * Restore 'watchdog_enabled' on failure.
805 err
= proc_watchdog_update();
807 watchdog_enabled
= old
;
810 mutex_unlock(&watchdog_proc_mutex
);
815 * /proc/sys/kernel/watchdog
817 int proc_watchdog(struct ctl_table
*table
, int write
,
818 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
820 return proc_watchdog_common(NMI_WATCHDOG_ENABLED
|SOFT_WATCHDOG_ENABLED
,
821 table
, write
, buffer
, lenp
, ppos
);
825 * /proc/sys/kernel/nmi_watchdog
827 int proc_nmi_watchdog(struct ctl_table
*table
, int write
,
828 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
830 return proc_watchdog_common(NMI_WATCHDOG_ENABLED
,
831 table
, write
, buffer
, lenp
, ppos
);
835 * /proc/sys/kernel/soft_watchdog
837 int proc_soft_watchdog(struct ctl_table
*table
, int write
,
838 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
840 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED
,
841 table
, write
, buffer
, lenp
, ppos
);
845 * /proc/sys/kernel/watchdog_thresh
847 int proc_watchdog_thresh(struct ctl_table
*table
, int write
,
848 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
852 mutex_lock(&watchdog_proc_mutex
);
854 old
= ACCESS_ONCE(watchdog_thresh
);
855 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
861 * Update the sample period.
862 * Restore 'watchdog_thresh' on failure.
865 err
= proc_watchdog_update();
867 watchdog_thresh
= old
;
869 mutex_unlock(&watchdog_proc_mutex
);
872 #endif /* CONFIG_SYSCTL */
874 void __init
lockup_detector_init(void)
878 if (watchdog_enabled
)
879 watchdog_enable_all_cpus();