Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / arch / x86 / kernel / nmi.c
CommitLineData
1d48922c
DZ
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
9c48f1c6 4 * Copyright (C) 2011 Don Zickus Red Hat, Inc.
1d48922c
DZ
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
9
10/*
11 * Handle hardware traps and faults.
12 */
13#include <linux/spinlock.h>
14#include <linux/kprobes.h>
15#include <linux/kdebug.h>
16#include <linux/nmi.h>
2ab00456 17#include <linux/debugfs.h>
c9126b2e
DZ
18#include <linux/delay.h>
19#include <linux/hardirq.h>
20#include <linux/slab.h>
69c60c88 21#include <linux/export.h>
1d48922c
DZ
22
23#if defined(CONFIG_EDAC)
24#include <linux/edac.h>
25#endif
26
27#include <linux/atomic.h>
28#include <asm/traps.h>
29#include <asm/mach_traps.h>
c9126b2e 30#include <asm/nmi.h>
6fd36ba0 31#include <asm/x86_init.h>
c9126b2e 32
0c4df02d
DH
33#define CREATE_TRACE_POINTS
34#include <trace/events/nmi.h>
35
c9126b2e
DZ
36struct nmi_desc {
37 spinlock_t lock;
38 struct list_head head;
39};
40
41static struct nmi_desc nmi_desc[NMI_MAX] =
42{
43 {
44 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
45 .head = LIST_HEAD_INIT(nmi_desc[0].head),
46 },
47 {
48 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
49 .head = LIST_HEAD_INIT(nmi_desc[1].head),
50 },
553222f3
DZ
51 {
52 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
53 .head = LIST_HEAD_INIT(nmi_desc[2].head),
54 },
55 {
56 .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
57 .head = LIST_HEAD_INIT(nmi_desc[3].head),
58 },
c9126b2e
DZ
59
60};
1d48922c 61
efc3aac5
DZ
62struct nmi_stats {
63 unsigned int normal;
64 unsigned int unknown;
65 unsigned int external;
66 unsigned int swallow;
67};
68
69static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
70
1d48922c
DZ
71static int ignore_nmis;
72
73int unknown_nmi_panic;
74/*
75 * Prevent NMI reason port (0x61) being accessed simultaneously, can
76 * only be used in NMI handler.
77 */
78static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
79
80static int __init setup_unknown_nmi_panic(char *str)
81{
82 unknown_nmi_panic = 1;
83 return 1;
84}
85__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
86
c9126b2e
DZ
87#define nmi_to_desc(type) (&nmi_desc[type])
88
2ab00456 89static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
e90c7853 90
2ab00456
DH
91static int __init nmi_warning_debugfs(void)
92{
93 debugfs_create_u64("nmi_longest_ns", 0644,
94 arch_debugfs_dir, &nmi_longest_ns);
95 return 0;
96}
97fs_initcall(nmi_warning_debugfs);
98
e90c7853
PZ
99static void nmi_max_handler(struct irq_work *w)
100{
101 struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
102 int remainder_ns, decimal_msecs;
103 u64 whole_msecs = ACCESS_ONCE(a->max_duration);
104
105 remainder_ns = do_div(whole_msecs, (1000 * 1000));
106 decimal_msecs = remainder_ns / 1000;
107
108 printk_ratelimited(KERN_INFO
109 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
110 a->handler, whole_msecs, decimal_msecs);
111}
112
9326638c 113static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
c9126b2e
DZ
114{
115 struct nmi_desc *desc = nmi_to_desc(type);
116 struct nmiaction *a;
117 int handled=0;
118
119 rcu_read_lock();
120
121 /*
122 * NMIs are edge-triggered, which means if you have enough
123 * of them concurrently, you can lose some because only one
124 * can be latched at any given time. Walk the whole list
125 * to handle those situations.
126 */
2ab00456 127 list_for_each_entry_rcu(a, &desc->head, list) {
e90c7853
PZ
128 int thishandled;
129 u64 delta;
2ab00456 130
e90c7853 131 delta = sched_clock();
0c4df02d
DH
132 thishandled = a->handler(type, regs);
133 handled += thishandled;
e90c7853 134 delta = sched_clock() - delta;
0c4df02d 135 trace_nmi_handler(a->handler, (int)delta, thishandled);
2ab00456 136
e90c7853 137 if (delta < nmi_longest_ns || delta < a->max_duration)
2ab00456
DH
138 continue;
139
e90c7853
PZ
140 a->max_duration = delta;
141 irq_work_queue(&a->irq_work);
2ab00456 142 }
c9126b2e 143
c9126b2e
DZ
144 rcu_read_unlock();
145
146 /* return total number of NMI events handled */
147 return handled;
148}
9326638c 149NOKPROBE_SYMBOL(nmi_handle);
c9126b2e 150
72b3fb24 151int __register_nmi_handler(unsigned int type, struct nmiaction *action)
c9126b2e
DZ
152{
153 struct nmi_desc *desc = nmi_to_desc(type);
154 unsigned long flags;
155
72b3fb24
LZ
156 if (!action->handler)
157 return -EINVAL;
158
e90c7853
PZ
159 init_irq_work(&action->irq_work, nmi_max_handler);
160
c9126b2e
DZ
161 spin_lock_irqsave(&desc->lock, flags);
162
b227e233
DZ
163 /*
164 * most handlers of type NMI_UNKNOWN never return because
165 * they just assume the NMI is theirs. Just a sanity check
166 * to manage expectations
167 */
168 WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
553222f3
DZ
169 WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
170 WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
b227e233 171
c9126b2e
DZ
172 /*
173 * some handlers need to be executed first otherwise a fake
174 * event confuses some handlers (kdump uses this flag)
175 */
176 if (action->flags & NMI_FLAG_FIRST)
177 list_add_rcu(&action->list, &desc->head);
178 else
179 list_add_tail_rcu(&action->list, &desc->head);
180
181 spin_unlock_irqrestore(&desc->lock, flags);
182 return 0;
183}
72b3fb24 184EXPORT_SYMBOL(__register_nmi_handler);
c9126b2e 185
72b3fb24 186void unregister_nmi_handler(unsigned int type, const char *name)
c9126b2e
DZ
187{
188 struct nmi_desc *desc = nmi_to_desc(type);
189 struct nmiaction *n;
190 unsigned long flags;
191
192 spin_lock_irqsave(&desc->lock, flags);
193
194 list_for_each_entry_rcu(n, &desc->head, list) {
195 /*
196 * the name passed in to describe the nmi handler
197 * is used as the lookup key
198 */
199 if (!strcmp(n->name, name)) {
200 WARN(in_nmi(),
201 "Trying to free NMI (%s) from NMI context!\n", n->name);
202 list_del_rcu(&n->list);
203 break;
204 }
205 }
206
207 spin_unlock_irqrestore(&desc->lock, flags);
208 synchronize_rcu();
c9126b2e 209}
c9126b2e
DZ
210EXPORT_SYMBOL_GPL(unregister_nmi_handler);
211
9326638c 212static void
1d48922c
DZ
213pci_serr_error(unsigned char reason, struct pt_regs *regs)
214{
553222f3
DZ
215 /* check to see if anyone registered against these types of errors */
216 if (nmi_handle(NMI_SERR, regs, false))
217 return;
218
1d48922c
DZ
219 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
220 reason, smp_processor_id());
221
222 /*
223 * On some machines, PCI SERR line is used to report memory
224 * errors. EDAC makes use of it.
225 */
226#if defined(CONFIG_EDAC)
227 if (edac_handler_set()) {
228 edac_atomic_assert_error();
229 return;
230 }
231#endif
232
233 if (panic_on_unrecovered_nmi)
234 panic("NMI: Not continuing");
235
236 pr_emerg("Dazed and confused, but trying to continue\n");
237
238 /* Clear and disable the PCI SERR error line. */
239 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
240 outb(reason, NMI_REASON_PORT);
241}
9326638c 242NOKPROBE_SYMBOL(pci_serr_error);
1d48922c 243
9326638c 244static void
1d48922c
DZ
245io_check_error(unsigned char reason, struct pt_regs *regs)
246{
247 unsigned long i;
248
553222f3
DZ
249 /* check to see if anyone registered against these types of errors */
250 if (nmi_handle(NMI_IO_CHECK, regs, false))
251 return;
252
1d48922c
DZ
253 pr_emerg(
254 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
255 reason, smp_processor_id());
57da8b96 256 show_regs(regs);
1d48922c
DZ
257
258 if (panic_on_io_nmi)
259 panic("NMI IOCK error: Not continuing");
260
261 /* Re-enable the IOCK line, wait for a few seconds */
262 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
263 outb(reason, NMI_REASON_PORT);
264
265 i = 20000;
266 while (--i) {
267 touch_nmi_watchdog();
268 udelay(100);
269 }
270
271 reason &= ~NMI_REASON_CLEAR_IOCHK;
272 outb(reason, NMI_REASON_PORT);
273}
9326638c 274NOKPROBE_SYMBOL(io_check_error);
1d48922c 275
9326638c 276static void
1d48922c
DZ
277unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
278{
9c48f1c6
DZ
279 int handled;
280
b227e233
DZ
281 /*
282 * Use 'false' as back-to-back NMIs are dealt with one level up.
283 * Of course this makes having multiple 'unknown' handlers useless
284 * as only the first one is ever run (unless it can actually determine
285 * if it caused the NMI)
286 */
287 handled = nmi_handle(NMI_UNKNOWN, regs, false);
efc3aac5
DZ
288 if (handled) {
289 __this_cpu_add(nmi_stats.unknown, handled);
1d48922c 290 return;
efc3aac5
DZ
291 }
292
293 __this_cpu_add(nmi_stats.unknown, 1);
294
1d48922c
DZ
295 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
296 reason, smp_processor_id());
297
298 pr_emerg("Do you have a strange power saving mode enabled?\n");
299 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
300 panic("NMI: Not continuing");
301
302 pr_emerg("Dazed and confused, but trying to continue\n");
303}
9326638c 304NOKPROBE_SYMBOL(unknown_nmi_error);
1d48922c 305
b227e233
DZ
306static DEFINE_PER_CPU(bool, swallow_nmi);
307static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
308
9326638c 309static void default_do_nmi(struct pt_regs *regs)
1d48922c
DZ
310{
311 unsigned char reason = 0;
9c48f1c6 312 int handled;
b227e233 313 bool b2b = false;
1d48922c
DZ
314
315 /*
316 * CPU-specific NMI must be processed before non-CPU-specific
317 * NMI, otherwise we may lose it, because the CPU-specific
318 * NMI can not be detected/processed on other CPUs.
319 */
b227e233
DZ
320
321 /*
322 * Back-to-back NMIs are interesting because they can either
323 * be two NMI or more than two NMIs (any thing over two is dropped
324 * due to NMI being edge-triggered). If this is the second half
325 * of the back-to-back NMI, assume we dropped things and process
326 * more handlers. Otherwise reset the 'swallow' NMI behaviour
327 */
328 if (regs->ip == __this_cpu_read(last_nmi_rip))
329 b2b = true;
330 else
331 __this_cpu_write(swallow_nmi, false);
332
333 __this_cpu_write(last_nmi_rip, regs->ip);
334
335 handled = nmi_handle(NMI_LOCAL, regs, b2b);
efc3aac5 336 __this_cpu_add(nmi_stats.normal, handled);
b227e233
DZ
337 if (handled) {
338 /*
339 * There are cases when a NMI handler handles multiple
340 * events in the current NMI. One of these events may
341 * be queued for in the next NMI. Because the event is
342 * already handled, the next NMI will result in an unknown
343 * NMI. Instead lets flag this for a potential NMI to
344 * swallow.
345 */
346 if (handled > 1)
347 __this_cpu_write(swallow_nmi, true);
1d48922c 348 return;
b227e233 349 }
1d48922c
DZ
350
351 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
352 raw_spin_lock(&nmi_reason_lock);
064a59b6 353 reason = x86_platform.get_nmi_reason();
1d48922c
DZ
354
355 if (reason & NMI_REASON_MASK) {
356 if (reason & NMI_REASON_SERR)
357 pci_serr_error(reason, regs);
358 else if (reason & NMI_REASON_IOCHK)
359 io_check_error(reason, regs);
360#ifdef CONFIG_X86_32
361 /*
362 * Reassert NMI in case it became active
363 * meanwhile as it's edge-triggered:
364 */
365 reassert_nmi();
366#endif
efc3aac5 367 __this_cpu_add(nmi_stats.external, 1);
1d48922c
DZ
368 raw_spin_unlock(&nmi_reason_lock);
369 return;
370 }
371 raw_spin_unlock(&nmi_reason_lock);
372
b227e233
DZ
373 /*
374 * Only one NMI can be latched at a time. To handle
375 * this we may process multiple nmi handlers at once to
376 * cover the case where an NMI is dropped. The downside
377 * to this approach is we may process an NMI prematurely,
378 * while its real NMI is sitting latched. This will cause
379 * an unknown NMI on the next run of the NMI processing.
380 *
381 * We tried to flag that condition above, by setting the
382 * swallow_nmi flag when we process more than one event.
383 * This condition is also only present on the second half
384 * of a back-to-back NMI, so we flag that condition too.
385 *
386 * If both are true, we assume we already processed this
387 * NMI previously and we swallow it. Otherwise we reset
388 * the logic.
389 *
390 * There are scenarios where we may accidentally swallow
391 * a 'real' unknown NMI. For example, while processing
392 * a perf NMI another perf NMI comes in along with a
393 * 'real' unknown NMI. These two NMIs get combined into
394 * one (as descibed above). When the next NMI gets
395 * processed, it will be flagged by perf as handled, but
396 * noone will know that there was a 'real' unknown NMI sent
397 * also. As a result it gets swallowed. Or if the first
398 * perf NMI returns two events handled then the second
399 * NMI will get eaten by the logic below, again losing a
400 * 'real' unknown NMI. But this is the best we can do
401 * for now.
402 */
403 if (b2b && __this_cpu_read(swallow_nmi))
efc3aac5 404 __this_cpu_add(nmi_stats.swallow, 1);
b227e233
DZ
405 else
406 unknown_nmi_error(reason, regs);
1d48922c 407}
9326638c 408NOKPROBE_SYMBOL(default_do_nmi);
1d48922c 409
ccd49c23
SR
410/*
411 * NMIs can hit breakpoints which will cause it to lose its
412 * NMI context with the CPU when the breakpoint does an iret.
413 */
414#ifdef CONFIG_X86_32
415/*
416 * For i386, NMIs use the same stack as the kernel, and we can
c7d65a78
SR
417 * add a workaround to the iret problem in C (preventing nested
418 * NMIs if an NMI takes a trap). Simply have 3 states the NMI
419 * can be in:
ccd49c23
SR
420 *
421 * 1) not running
422 * 2) executing
423 * 3) latched
424 *
425 * When no NMI is in progress, it is in the "not running" state.
426 * When an NMI comes in, it goes into the "executing" state.
427 * Normally, if another NMI is triggered, it does not interrupt
428 * the running NMI and the HW will simply latch it so that when
429 * the first NMI finishes, it will restart the second NMI.
430 * (Note, the latch is binary, thus multiple NMIs triggering,
431 * when one is running, are ignored. Only one NMI is restarted.)
432 *
433 * If an NMI hits a breakpoint that executes an iret, another
434 * NMI can preempt it. We do not want to allow this new NMI
435 * to run, but we want to execute it when the first one finishes.
c7d65a78
SR
436 * We set the state to "latched", and the exit of the first NMI will
437 * perform a dec_return, if the result is zero (NOT_RUNNING), then
438 * it will simply exit the NMI handler. If not, the dec_return
439 * would have set the state to NMI_EXECUTING (what we want it to
440 * be when we are running). In this case, we simply jump back
441 * to rerun the NMI handler again, and restart the 'latched' NMI.
442 *
443 * No trap (breakpoint or page fault) should be hit before nmi_restart,
444 * thus there is no race between the first check of state for NOT_RUNNING
445 * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
446 * at this point.
70fb74a5
SR
447 *
448 * In case the NMI takes a page fault, we need to save off the CR2
449 * because the NMI could have preempted another page fault and corrupt
450 * the CR2 that is about to be read. As nested NMIs must be restarted
451 * and they can not take breakpoints or page faults, the update of the
452 * CR2 must be done before converting the nmi state back to NOT_RUNNING.
453 * Otherwise, there would be a race of another nested NMI coming in
454 * after setting state to NOT_RUNNING but before updating the nmi_cr2.
ccd49c23
SR
455 */
456enum nmi_states {
c7d65a78 457 NMI_NOT_RUNNING = 0,
ccd49c23
SR
458 NMI_EXECUTING,
459 NMI_LATCHED,
460};
461static DEFINE_PER_CPU(enum nmi_states, nmi_state);
70fb74a5 462static DEFINE_PER_CPU(unsigned long, nmi_cr2);
ccd49c23
SR
463
464#define nmi_nesting_preprocess(regs) \
465 do { \
c7d65a78
SR
466 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \
467 this_cpu_write(nmi_state, NMI_LATCHED); \
ccd49c23
SR
468 return; \
469 } \
c7d65a78 470 this_cpu_write(nmi_state, NMI_EXECUTING); \
70fb74a5 471 this_cpu_write(nmi_cr2, read_cr2()); \
c7d65a78
SR
472 } while (0); \
473 nmi_restart:
ccd49c23
SR
474
475#define nmi_nesting_postprocess() \
476 do { \
70fb74a5
SR
477 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \
478 write_cr2(this_cpu_read(nmi_cr2)); \
c7d65a78 479 if (this_cpu_dec_return(nmi_state)) \
ccd49c23
SR
480 goto nmi_restart; \
481 } while (0)
482#else /* x86_64 */
483/*
484 * In x86_64 things are a bit more difficult. This has the same problem
485 * where an NMI hitting a breakpoint that calls iret will remove the
486 * NMI context, allowing a nested NMI to enter. What makes this more
487 * difficult is that both NMIs and breakpoints have their own stack.
488 * When a new NMI or breakpoint is executed, the stack is set to a fixed
489 * point. If an NMI is nested, it will have its stack set at that same
490 * fixed address that the first NMI had, and will start corrupting the
491 * stack. This is handled in entry_64.S, but the same problem exists with
492 * the breakpoint stack.
493 *
494 * If a breakpoint is being processed, and the debug stack is being used,
495 * if an NMI comes in and also hits a breakpoint, the stack pointer
496 * will be set to the same fixed address as the breakpoint that was
497 * interrupted, causing that stack to be corrupted. To handle this case,
498 * check if the stack that was interrupted is the debug stack, and if
499 * so, change the IDT so that new breakpoints will use the current stack
500 * and not switch to the fixed address. On return of the NMI, switch back
501 * to the original IDT.
502 */
503static DEFINE_PER_CPU(int, update_debug_stack);
228bdaa9 504
ccd49c23
SR
505static inline void nmi_nesting_preprocess(struct pt_regs *regs)
506{
228bdaa9
SR
507 /*
508 * If we interrupted a breakpoint, it is possible that
509 * the nmi handler will have breakpoints too. We need to
510 * change the IDT such that breakpoints that happen here
511 * continue to use the NMI stack.
512 */
513 if (unlikely(is_debug_stack(regs->sp))) {
514 debug_stack_set_zero();
c0525a69 515 this_cpu_write(update_debug_stack, 1);
228bdaa9 516 }
ccd49c23
SR
517}
518
519static inline void nmi_nesting_postprocess(void)
520{
c0525a69 521 if (unlikely(this_cpu_read(update_debug_stack))) {
ccd49c23 522 debug_stack_reset();
c0525a69
SR
523 this_cpu_write(update_debug_stack, 0);
524 }
ccd49c23
SR
525}
526#endif
527
9326638c 528dotraplinkage notrace void
ccd49c23
SR
529do_nmi(struct pt_regs *regs, long error_code)
530{
531 nmi_nesting_preprocess(regs);
532
1d48922c
DZ
533 nmi_enter();
534
535 inc_irq_stat(__nmi_count);
536
537 if (!ignore_nmis)
538 default_do_nmi(regs);
539
540 nmi_exit();
228bdaa9 541
ccd49c23
SR
542 /* On i386, may loop back to preprocess */
543 nmi_nesting_postprocess();
1d48922c 544}
9326638c 545NOKPROBE_SYMBOL(do_nmi);
1d48922c
DZ
546
547void stop_nmi(void)
548{
549 ignore_nmis++;
550}
551
552void restart_nmi(void)
553{
554 ignore_nmis--;
555}
b227e233
DZ
556
557/* reset the back-to-back NMI logic */
558void local_touch_nmi(void)
559{
560 __this_cpu_write(last_nmi_rip, 0);
561}
29c6fb7b 562EXPORT_SYMBOL_GPL(local_touch_nmi);
This page took 0.196509 seconds and 5 git commands to generate.