[PATCH] kdump: export per cpu crash notes pointer through sysfs (fix)
[deliverable/linux.git] / arch / i386 / kernel / crash.c
CommitLineData
5033cba0
EB
1/*
2 * Architecture specific (i386) functions for kexec based crash dumps.
3 *
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 *
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
7 *
8 */
9
10#include <linux/init.h>
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/smp.h>
5033cba0
EB
14#include <linux/reboot.h>
15#include <linux/kexec.h>
5033cba0
EB
16#include <linux/delay.h>
17#include <linux/elf.h>
18#include <linux/elfcore.h>
19
20#include <asm/processor.h>
21#include <asm/hardirq.h>
22#include <asm/nmi.h>
23#include <asm/hw_irq.h>
19842d67 24#include <asm/apic.h>
c4ac4263 25#include <mach_ipi.h>
5033cba0 26
5033cba0 27
a3ea8ac8
VG
28/* This keeps a track of which one is crashing cpu. */
29static int crashing_cpu;
5033cba0 30
72414d3f
MS
31static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
32 size_t data_len)
2c818b45
EB
33{
34 struct elf_note note;
72414d3f 35
2c818b45
EB
36 note.n_namesz = strlen(name) + 1;
37 note.n_descsz = data_len;
38 note.n_type = type;
39 memcpy(buf, &note, sizeof(note));
40 buf += (sizeof(note) +3)/4;
41 memcpy(buf, name, note.n_namesz);
42 buf += (note.n_namesz + 3)/4;
43 memcpy(buf, data, note.n_descsz);
44 buf += (note.n_descsz + 3)/4;
72414d3f 45
2c818b45
EB
46 return buf;
47}
48
49static void final_note(u32 *buf)
50{
51 struct elf_note note;
72414d3f 52
2c818b45
EB
53 note.n_namesz = 0;
54 note.n_descsz = 0;
55 note.n_type = 0;
56 memcpy(buf, &note, sizeof(note));
57}
58
2c818b45
EB
59static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
60{
61 struct elf_prstatus prstatus;
62 u32 *buf;
72414d3f
MS
63
64 if ((cpu < 0) || (cpu >= NR_CPUS))
2c818b45 65 return;
72414d3f 66
2c818b45
EB
67 /* Using ELF notes here is opportunistic.
68 * I need a well defined structure format
69 * for the data I pass, and I need tags
70 * on the data to indicate what information I have
71 * squirrelled away. ELF notes happen to provide
72 * all of that that no need to invent something new.
73 */
cc571658
VG
74 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
75 if (!buf)
76 return;
2c818b45
EB
77 memset(&prstatus, 0, sizeof(prstatus));
78 prstatus.pr_pid = current->pid;
79 elf_core_copy_regs(&prstatus.pr_reg, regs);
72414d3f
MS
80 buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
81 sizeof(prstatus));
2c818b45
EB
82 final_note(buf);
83}
84
85static void crash_get_current_regs(struct pt_regs *regs)
86{
87 __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
88 __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
89 __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
90 __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
91 __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
92 __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
93 __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
94 __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
95 __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
96 __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
97 __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
98 __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
99 __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
100
101 regs->eip = (unsigned long)current_text_addr();
102}
103
6e274d14
AN
104/* CPU does not save ss and esp on stack if execution is already
105 * running in kernel mode at the time of NMI occurrence. This code
106 * fixes it.
107 */
108static void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
109{
110 memcpy(newregs, oldregs, sizeof(*newregs));
111 newregs->esp = (unsigned long)&(oldregs->esp);
82409411 112 __asm__ __volatile__(
113 "xorl %%eax, %%eax\n\t"
114 "movw %%ss, %%ax\n\t"
115 :"=a"(newregs->xss));
6e274d14
AN
116}
117
118/* We may have saved_regs from where the error came from
119 * or it is NULL if via a direct panic().
120 */
121static void crash_save_self(struct pt_regs *saved_regs)
2c818b45
EB
122{
123 struct pt_regs regs;
124 int cpu;
6e274d14 125
72414d3f 126 cpu = smp_processor_id();
6e274d14
AN
127 if (saved_regs)
128 crash_setup_regs(&regs, saved_regs);
129 else
130 crash_get_current_regs(&regs);
2c818b45
EB
131 crash_save_this_cpu(&regs, cpu);
132}
133
c4ac4263
EB
134#ifdef CONFIG_SMP
135static atomic_t waiting_for_crash_ipi;
136
137static int crash_nmi_callback(struct pt_regs *regs, int cpu)
138{
4d55476c 139 struct pt_regs fixed_regs;
a3ea8ac8
VG
140
141 /* Don't do anything if this handler is invoked on crashing cpu.
142 * Otherwise, system will completely hang. Crashing cpu can get
143 * an NMI if system was initially booted with nmi_watchdog parameter.
144 */
145 if (cpu == crashing_cpu)
146 return 1;
c4ac4263 147 local_irq_disable();
4d55476c 148
4d55476c 149 if (!user_mode(regs)) {
6e274d14 150 crash_setup_regs(&fixed_regs, regs);
4d55476c
VG
151 regs = &fixed_regs;
152 }
2c818b45 153 crash_save_this_cpu(regs, cpu);
19842d67 154 disable_local_APIC();
c4ac4263
EB
155 atomic_dec(&waiting_for_crash_ipi);
156 /* Assume hlt works */
f2ab4461 157 halt();
c4ac4263 158 for(;;);
72414d3f 159
c4ac4263
EB
160 return 1;
161}
162
163/*
164 * By using the NMI code instead of a vector we just sneak thru the
165 * word generator coming out with just what we want. AND it does
166 * not matter if clustered_apic_mode is set or not.
167 */
168static void smp_send_nmi_allbutself(void)
169{
170 send_IPI_allbutself(APIC_DM_NMI);
171}
172
173static void nmi_shootdown_cpus(void)
174{
175 unsigned long msecs;
c4ac4263 176
72414d3f 177 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
c4ac4263
EB
178 /* Would it be better to replace the trap vector here? */
179 set_nmi_callback(crash_nmi_callback);
180 /* Ensure the new callback function is set before sending
181 * out the NMI
182 */
183 wmb();
184
185 smp_send_nmi_allbutself();
186
187 msecs = 1000; /* Wait at most a second for the other cpus to stop */
188 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
189 mdelay(1);
190 msecs--;
191 }
192
193 /* Leave the nmi callback set */
19842d67 194 disable_local_APIC();
c4ac4263
EB
195}
196#else
197static void nmi_shootdown_cpus(void)
198{
199 /* There are no cpus to shootdown */
200}
201#endif
202
6e274d14 203void machine_crash_shutdown(struct pt_regs *regs)
5033cba0
EB
204{
205 /* This function is only called after the system
206 * has paniced or is otherwise in a critical state.
207 * The minimum amount of code to allow a kexec'd kernel
208 * to run successfully needs to happen here.
209 *
210 * In practice this means shooting down the other cpus in
211 * an SMP system.
212 */
c4ac4263
EB
213 /* The kernel is broken so disable interrupts */
214 local_irq_disable();
a3ea8ac8
VG
215
216 /* Make a note of crashing cpu. Will be used in NMI callback.*/
217 crashing_cpu = smp_processor_id();
c4ac4263 218 nmi_shootdown_cpus();
19842d67
VG
219 lapic_shutdown();
220#if defined(CONFIG_X86_IO_APIC)
221 disable_IO_APIC();
222#endif
6e274d14 223 crash_save_self(regs);
5033cba0 224}
This page took 0.08535 seconds and 5 git commands to generate.