Commit | Line | Data |
---|---|---|
1e019421 MT |
1 | /* |
2 | * SGI NMI support routines | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
17 | * | |
18 | * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved. | |
19 | * Copyright (c) Mike Travis | |
20 | */ | |
21 | ||
22 | #include <linux/cpu.h> | |
23 | #include <linux/nmi.h> | |
24 | ||
25 | #include <asm/apic.h> | |
26 | #include <asm/nmi.h> | |
27 | #include <asm/uv/uv.h> | |
28 | #include <asm/uv/uv_hub.h> | |
29 | #include <asm/uv/uv_mmrs.h> | |
30 | ||
31 | /* BMC sets a bit this MMR non-zero before sending an NMI */ | |
32 | #define UVH_NMI_MMR UVH_SCRATCH5 | |
33 | #define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8) | |
34 | #define UV_NMI_PENDING_MASK (1UL << 63) | |
35 | DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count); | |
36 | static DEFINE_SPINLOCK(uv_nmi_lock); | |
37 | ||
38 | /* | |
39 | * When NMI is received, print a stack trace. | |
40 | */ | |
41 | int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) | |
42 | { | |
43 | unsigned long real_uv_nmi; | |
44 | int bid; | |
45 | ||
46 | /* | |
47 | * Each blade has an MMR that indicates when an NMI has been sent | |
48 | * to cpus on the blade. If an NMI is detected, atomically | |
49 | * clear the MMR and update a per-blade NMI count used to | |
50 | * cause each cpu on the blade to notice a new NMI. | |
51 | */ | |
52 | bid = uv_numa_blade_id(); | |
53 | real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK); | |
54 | ||
55 | if (unlikely(real_uv_nmi)) { | |
56 | spin_lock(&uv_blade_info[bid].nmi_lock); | |
57 | real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & | |
58 | UV_NMI_PENDING_MASK); | |
59 | if (real_uv_nmi) { | |
60 | uv_blade_info[bid].nmi_count++; | |
61 | uv_write_local_mmr(UVH_NMI_MMR_CLEAR, | |
62 | UV_NMI_PENDING_MASK); | |
63 | } | |
64 | spin_unlock(&uv_blade_info[bid].nmi_lock); | |
65 | } | |
66 | ||
67 | if (likely(__get_cpu_var(cpu_last_nmi_count) == | |
68 | uv_blade_info[bid].nmi_count)) | |
69 | return NMI_DONE; | |
70 | ||
71 | __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; | |
72 | ||
73 | /* | |
74 | * Use a lock so only one cpu prints at a time. | |
75 | * This prevents intermixed output. | |
76 | */ | |
77 | spin_lock(&uv_nmi_lock); | |
78 | pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id()); | |
79 | dump_stack(); | |
80 | spin_unlock(&uv_nmi_lock); | |
81 | ||
82 | return NMI_HANDLED; | |
83 | } | |
84 | ||
85 | void uv_register_nmi_notifier(void) | |
86 | { | |
87 | if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv")) | |
88 | pr_warn("UV NMI handler failed to register\n"); | |
89 | } | |
90 | ||
91 | void uv_nmi_init(void) | |
92 | { | |
93 | unsigned int value; | |
94 | ||
95 | /* | |
96 | * Unmask NMI on all cpus | |
97 | */ | |
98 | value = apic_read(APIC_LVT1) | APIC_DM_NMI; | |
99 | value &= ~APIC_LVT_MASKED; | |
100 | apic_write(APIC_LVT1, value); | |
101 | } | |
102 |