Commit | Line | Data |
---|---|---|
5033cba0 | 1 | /* |
62a31a03 | 2 | * Architecture specific (i386/x86_64) functions for kexec based crash dumps. |
5033cba0 EB |
3 | * |
4 | * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) | |
5 | * | |
6 | * Copyright (C) IBM Corporation, 2004. All rights reserved. | |
dd5f7260 VG |
7 | * Copyright (C) Red Hat Inc., 2014. All rights reserved. |
8 | * Authors: | |
9 | * Vivek Goyal <vgoyal@redhat.com> | |
5033cba0 EB |
10 | * |
11 | */ | |
12 | ||
dd5f7260 VG |
13 | #define pr_fmt(fmt) "kexec: " fmt |
14 | ||
5033cba0 EB |
15 | #include <linux/types.h> |
16 | #include <linux/kernel.h> | |
17 | #include <linux/smp.h> | |
5033cba0 EB |
18 | #include <linux/reboot.h> |
19 | #include <linux/kexec.h> | |
5033cba0 EB |
20 | #include <linux/delay.h> |
21 | #include <linux/elf.h> | |
22 | #include <linux/elfcore.h> | |
f23d1f4a | 23 | #include <linux/module.h> |
dd5f7260 | 24 | #include <linux/slab.h> |
5033cba0 EB |
25 | |
26 | #include <asm/processor.h> | |
27 | #include <asm/hardirq.h> | |
28 | #include <asm/nmi.h> | |
29 | #include <asm/hw_irq.h> | |
19842d67 | 30 | #include <asm/apic.h> |
0c1b2724 | 31 | #include <asm/hpet.h> |
1eeb66a1 | 32 | #include <linux/kdebug.h> |
96b89dc6 | 33 | #include <asm/cpu.h> |
ed23dc6f | 34 | #include <asm/reboot.h> |
2340b62f | 35 | #include <asm/virtext.h> |
8e294786 | 36 | |
dd5f7260 VG |
37 | /* Alignment required for elf header segment */ |
38 | #define ELF_CORE_HEADER_ALIGN 4096 | |
39 | ||
40 | /* This primarily represents number of split ranges due to exclusion */ | |
41 | #define CRASH_MAX_RANGES 16 | |
42 | ||
43 | struct crash_mem_range { | |
44 | u64 start, end; | |
45 | }; | |
46 | ||
47 | struct crash_mem { | |
48 | unsigned int nr_ranges; | |
49 | struct crash_mem_range ranges[CRASH_MAX_RANGES]; | |
50 | }; | |
51 | ||
52 | /* Misc data about ram ranges needed to prepare elf headers */ | |
53 | struct crash_elf_data { | |
54 | struct kimage *image; | |
55 | /* | |
56 | * Total number of ram ranges we have after various adjustments for | |
57 | * GART, crash reserved region etc. | |
58 | */ | |
59 | unsigned int max_nr_ranges; | |
60 | unsigned long gart_start, gart_end; | |
61 | ||
62 | /* Pointer to elf header */ | |
63 | void *ehdr; | |
64 | /* Pointer to next phdr */ | |
65 | void *bufp; | |
66 | struct crash_mem mem; | |
67 | }; | |
68 | ||
69 | /* Used while preparing memory map entries for second kernel */ | |
70 | struct crash_memmap_data { | |
71 | struct boot_params *params; | |
72 | /* Type of memory */ | |
73 | unsigned int type; | |
74 | }; | |
75 | ||
5edd19af CW |
76 | int in_crash_kexec; |
77 | ||
f23d1f4a ZY |
78 | /* |
79 | * This is used to VMCLEAR all VMCSs loaded on the | |
80 | * processor. And when loading kvm_intel module, the | |
81 | * callback function pointer will be assigned. | |
82 | * | |
83 | * protected by rcu. | |
84 | */ | |
0ca0d818 | 85 | crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL; |
f23d1f4a | 86 | EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss); |
dd5f7260 | 87 | unsigned long crash_zero_bytes; |
f23d1f4a ZY |
88 | |
89 | static inline void cpu_crash_vmclear_loaded_vmcss(void) | |
90 | { | |
0ca0d818 | 91 | crash_vmclear_fn *do_vmclear_operation = NULL; |
f23d1f4a ZY |
92 | |
93 | rcu_read_lock(); | |
94 | do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss); | |
95 | if (do_vmclear_operation) | |
96 | do_vmclear_operation(); | |
97 | rcu_read_unlock(); | |
98 | } | |
99 | ||
b2bbe71b EH |
100 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) |
101 | ||
9c48f1c6 | 102 | static void kdump_nmi_callback(int cpu, struct pt_regs *regs) |
c4ac4263 | 103 | { |
1fb473d8 | 104 | #ifdef CONFIG_X86_32 |
4d55476c | 105 | struct pt_regs fixed_regs; |
a7d41820 | 106 | |
a7d41820 EH |
107 | if (!user_mode_vm(regs)) { |
108 | crash_fixup_ss_esp(&fixed_regs, regs); | |
109 | regs = &fixed_regs; | |
110 | } | |
111 | #endif | |
112 | crash_save_cpu(regs, cpu); | |
113 | ||
f23d1f4a ZY |
114 | /* |
115 | * VMCLEAR VMCSs loaded on all cpus if needed. | |
116 | */ | |
117 | cpu_crash_vmclear_loaded_vmcss(); | |
118 | ||
2340b62f EH |
119 | /* Disable VMX or SVM if needed. |
120 | * | |
121 | * We need to disable virtualization on all CPUs. | |
122 | * Having VMX or SVM enabled on any CPU may break rebooting | |
123 | * after the kdump kernel has finished its task. | |
124 | */ | |
125 | cpu_emergency_vmxoff(); | |
126 | cpu_emergency_svm_disable(); | |
127 | ||
a7d41820 EH |
128 | disable_local_APIC(); |
129 | } | |
130 | ||
d1e7b91c EH |
131 | static void kdump_nmi_shootdown_cpus(void) |
132 | { | |
5edd19af | 133 | in_crash_kexec = 1; |
8e294786 | 134 | nmi_shootdown_cpus(kdump_nmi_callback); |
d1e7b91c | 135 | |
19842d67 | 136 | disable_local_APIC(); |
c4ac4263 | 137 | } |
d1e7b91c | 138 | |
c4ac4263 | 139 | #else |
d1e7b91c | 140 | static void kdump_nmi_shootdown_cpus(void) |
c4ac4263 EB |
141 | { |
142 | /* There are no cpus to shootdown */ | |
143 | } | |
144 | #endif | |
145 | ||
ed23dc6f | 146 | void native_machine_crash_shutdown(struct pt_regs *regs) |
5033cba0 EB |
147 | { |
148 | /* This function is only called after the system | |
f18190bd | 149 | * has panicked or is otherwise in a critical state. |
5033cba0 EB |
150 | * The minimum amount of code to allow a kexec'd kernel |
151 | * to run successfully needs to happen here. | |
152 | * | |
153 | * In practice this means shooting down the other cpus in | |
154 | * an SMP system. | |
155 | */ | |
c4ac4263 EB |
156 | /* The kernel is broken so disable interrupts */ |
157 | local_irq_disable(); | |
a3ea8ac8 | 158 | |
d1e7b91c | 159 | kdump_nmi_shootdown_cpus(); |
2340b62f | 160 | |
f23d1f4a ZY |
161 | /* |
162 | * VMCLEAR VMCSs loaded on this cpu if needed. | |
163 | */ | |
164 | cpu_crash_vmclear_loaded_vmcss(); | |
165 | ||
2340b62f EH |
166 | /* Booting kdump kernel with VMX or SVM enabled won't work, |
167 | * because (among other limitations) we can't disable paging | |
168 | * with the virt flags. | |
169 | */ | |
170 | cpu_emergency_vmxoff(); | |
171 | cpu_emergency_svm_disable(); | |
172 | ||
17405453 YY |
173 | #ifdef CONFIG_X86_IO_APIC |
174 | /* Prevent crash_kexec() from deadlocking on ioapic_lock. */ | |
175 | ioapic_zap_locks(); | |
19842d67 | 176 | disable_IO_APIC(); |
0c1b2724 | 177 | #endif |
522e6646 | 178 | lapic_shutdown(); |
0c1b2724 OH |
179 | #ifdef CONFIG_HPET_TIMER |
180 | hpet_disable(); | |
19842d67 | 181 | #endif |
85916f81 | 182 | crash_save_cpu(regs, safe_smp_processor_id()); |
5033cba0 | 183 | } |
dd5f7260 VG |
184 | |
185 | #ifdef CONFIG_X86_64 | |
186 | ||
187 | static int get_nr_ram_ranges_callback(unsigned long start_pfn, | |
188 | unsigned long nr_pfn, void *arg) | |
189 | { | |
190 | int *nr_ranges = arg; | |
191 | ||
192 | (*nr_ranges)++; | |
193 | return 0; | |
194 | } | |
195 | ||
196 | static int get_gart_ranges_callback(u64 start, u64 end, void *arg) | |
197 | { | |
198 | struct crash_elf_data *ced = arg; | |
199 | ||
200 | ced->gart_start = start; | |
201 | ced->gart_end = end; | |
202 | ||
203 | /* Not expecting more than 1 gart aperture */ | |
204 | return 1; | |
205 | } | |
206 | ||
207 | ||
208 | /* Gather all the required information to prepare elf headers for ram regions */ | |
209 | static void fill_up_crash_elf_data(struct crash_elf_data *ced, | |
210 | struct kimage *image) | |
211 | { | |
212 | unsigned int nr_ranges = 0; | |
213 | ||
214 | ced->image = image; | |
215 | ||
216 | walk_system_ram_range(0, -1, &nr_ranges, | |
217 | get_nr_ram_ranges_callback); | |
218 | ||
219 | ced->max_nr_ranges = nr_ranges; | |
220 | ||
221 | /* | |
222 | * We don't create ELF headers for GART aperture as an attempt | |
223 | * to dump this memory in second kernel leads to hang/crash. | |
224 | * If gart aperture is present, one needs to exclude that region | |
225 | * and that could lead to need of extra phdr. | |
226 | */ | |
227 | walk_iomem_res("GART", IORESOURCE_MEM, 0, -1, | |
228 | ced, get_gart_ranges_callback); | |
229 | ||
230 | /* | |
231 | * If we have gart region, excluding that could potentially split | |
232 | * a memory range, resulting in extra header. Account for that. | |
233 | */ | |
234 | if (ced->gart_end) | |
235 | ced->max_nr_ranges++; | |
236 | ||
237 | /* Exclusion of crash region could split memory ranges */ | |
238 | ced->max_nr_ranges++; | |
239 | ||
240 | /* If crashk_low_res is not 0, another range split possible */ | |
241 | if (crashk_low_res.end != 0) | |
242 | ced->max_nr_ranges++; | |
243 | } | |
244 | ||
245 | static int exclude_mem_range(struct crash_mem *mem, | |
246 | unsigned long long mstart, unsigned long long mend) | |
247 | { | |
248 | int i, j; | |
249 | unsigned long long start, end; | |
250 | struct crash_mem_range temp_range = {0, 0}; | |
251 | ||
252 | for (i = 0; i < mem->nr_ranges; i++) { | |
253 | start = mem->ranges[i].start; | |
254 | end = mem->ranges[i].end; | |
255 | ||
256 | if (mstart > end || mend < start) | |
257 | continue; | |
258 | ||
259 | /* Truncate any area outside of range */ | |
260 | if (mstart < start) | |
261 | mstart = start; | |
262 | if (mend > end) | |
263 | mend = end; | |
264 | ||
265 | /* Found completely overlapping range */ | |
266 | if (mstart == start && mend == end) { | |
267 | mem->ranges[i].start = 0; | |
268 | mem->ranges[i].end = 0; | |
269 | if (i < mem->nr_ranges - 1) { | |
270 | /* Shift rest of the ranges to left */ | |
271 | for (j = i; j < mem->nr_ranges - 1; j++) { | |
272 | mem->ranges[j].start = | |
273 | mem->ranges[j+1].start; | |
274 | mem->ranges[j].end = | |
275 | mem->ranges[j+1].end; | |
276 | } | |
277 | } | |
278 | mem->nr_ranges--; | |
279 | return 0; | |
280 | } | |
281 | ||
282 | if (mstart > start && mend < end) { | |
283 | /* Split original range */ | |
284 | mem->ranges[i].end = mstart - 1; | |
285 | temp_range.start = mend + 1; | |
286 | temp_range.end = end; | |
287 | } else if (mstart != start) | |
288 | mem->ranges[i].end = mstart - 1; | |
289 | else | |
290 | mem->ranges[i].start = mend + 1; | |
291 | break; | |
292 | } | |
293 | ||
294 | /* If a split happend, add the split to array */ | |
295 | if (!temp_range.end) | |
296 | return 0; | |
297 | ||
298 | /* Split happened */ | |
299 | if (i == CRASH_MAX_RANGES - 1) { | |
300 | pr_err("Too many crash ranges after split\n"); | |
301 | return -ENOMEM; | |
302 | } | |
303 | ||
304 | /* Location where new range should go */ | |
305 | j = i + 1; | |
306 | if (j < mem->nr_ranges) { | |
307 | /* Move over all ranges one slot towards the end */ | |
308 | for (i = mem->nr_ranges - 1; i >= j; i--) | |
309 | mem->ranges[i + 1] = mem->ranges[i]; | |
310 | } | |
311 | ||
312 | mem->ranges[j].start = temp_range.start; | |
313 | mem->ranges[j].end = temp_range.end; | |
314 | mem->nr_ranges++; | |
315 | return 0; | |
316 | } | |
317 | ||
318 | /* | |
319 | * Look for any unwanted ranges between mstart, mend and remove them. This | |
320 | * might lead to split and split ranges are put in ced->mem.ranges[] array | |
321 | */ | |
322 | static int elf_header_exclude_ranges(struct crash_elf_data *ced, | |
323 | unsigned long long mstart, unsigned long long mend) | |
324 | { | |
325 | struct crash_mem *cmem = &ced->mem; | |
326 | int ret = 0; | |
327 | ||
328 | memset(cmem->ranges, 0, sizeof(cmem->ranges)); | |
329 | ||
330 | cmem->ranges[0].start = mstart; | |
331 | cmem->ranges[0].end = mend; | |
332 | cmem->nr_ranges = 1; | |
333 | ||
334 | /* Exclude crashkernel region */ | |
335 | ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end); | |
336 | if (ret) | |
337 | return ret; | |
338 | ||
339 | ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); | |
340 | if (ret) | |
341 | return ret; | |
342 | ||
343 | /* Exclude GART region */ | |
344 | if (ced->gart_end) { | |
345 | ret = exclude_mem_range(cmem, ced->gart_start, ced->gart_end); | |
346 | if (ret) | |
347 | return ret; | |
348 | } | |
349 | ||
350 | return ret; | |
351 | } | |
352 | ||
353 | static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg) | |
354 | { | |
355 | struct crash_elf_data *ced = arg; | |
356 | Elf64_Ehdr *ehdr; | |
357 | Elf64_Phdr *phdr; | |
358 | unsigned long mstart, mend; | |
359 | struct kimage *image = ced->image; | |
360 | struct crash_mem *cmem; | |
361 | int ret, i; | |
362 | ||
363 | ehdr = ced->ehdr; | |
364 | ||
365 | /* Exclude unwanted mem ranges */ | |
366 | ret = elf_header_exclude_ranges(ced, start, end); | |
367 | if (ret) | |
368 | return ret; | |
369 | ||
370 | /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */ | |
371 | cmem = &ced->mem; | |
372 | ||
373 | for (i = 0; i < cmem->nr_ranges; i++) { | |
374 | mstart = cmem->ranges[i].start; | |
375 | mend = cmem->ranges[i].end; | |
376 | ||
377 | phdr = ced->bufp; | |
378 | ced->bufp += sizeof(Elf64_Phdr); | |
379 | ||
380 | phdr->p_type = PT_LOAD; | |
381 | phdr->p_flags = PF_R|PF_W|PF_X; | |
382 | phdr->p_offset = mstart; | |
383 | ||
384 | /* | |
385 | * If a range matches backup region, adjust offset to backup | |
386 | * segment. | |
387 | */ | |
388 | if (mstart == image->arch.backup_src_start && | |
389 | (mend - mstart + 1) == image->arch.backup_src_sz) | |
390 | phdr->p_offset = image->arch.backup_load_addr; | |
391 | ||
392 | phdr->p_paddr = mstart; | |
393 | phdr->p_vaddr = (unsigned long long) __va(mstart); | |
394 | phdr->p_filesz = phdr->p_memsz = mend - mstart + 1; | |
395 | phdr->p_align = 0; | |
396 | ehdr->e_phnum++; | |
397 | pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n", | |
398 | phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz, | |
399 | ehdr->e_phnum, phdr->p_offset); | |
400 | } | |
401 | ||
402 | return ret; | |
403 | } | |
404 | ||
405 | static int prepare_elf64_headers(struct crash_elf_data *ced, | |
406 | void **addr, unsigned long *sz) | |
407 | { | |
408 | Elf64_Ehdr *ehdr; | |
409 | Elf64_Phdr *phdr; | |
410 | unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz; | |
411 | unsigned char *buf, *bufp; | |
412 | unsigned int cpu; | |
413 | unsigned long long notes_addr; | |
414 | int ret; | |
415 | ||
416 | /* extra phdr for vmcoreinfo elf note */ | |
417 | nr_phdr = nr_cpus + 1; | |
418 | nr_phdr += ced->max_nr_ranges; | |
419 | ||
420 | /* | |
421 | * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping | |
422 | * area on x86_64 (ffffffff80000000 - ffffffffa0000000). | |
423 | * I think this is required by tools like gdb. So same physical | |
424 | * memory will be mapped in two elf headers. One will contain kernel | |
425 | * text virtual addresses and other will have __va(physical) addresses. | |
426 | */ | |
427 | ||
428 | nr_phdr++; | |
429 | elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr); | |
430 | elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN); | |
431 | ||
432 | buf = vzalloc(elf_sz); | |
433 | if (!buf) | |
434 | return -ENOMEM; | |
435 | ||
436 | bufp = buf; | |
437 | ehdr = (Elf64_Ehdr *)bufp; | |
438 | bufp += sizeof(Elf64_Ehdr); | |
439 | memcpy(ehdr->e_ident, ELFMAG, SELFMAG); | |
440 | ehdr->e_ident[EI_CLASS] = ELFCLASS64; | |
441 | ehdr->e_ident[EI_DATA] = ELFDATA2LSB; | |
442 | ehdr->e_ident[EI_VERSION] = EV_CURRENT; | |
443 | ehdr->e_ident[EI_OSABI] = ELF_OSABI; | |
444 | memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD); | |
445 | ehdr->e_type = ET_CORE; | |
446 | ehdr->e_machine = ELF_ARCH; | |
447 | ehdr->e_version = EV_CURRENT; | |
448 | ehdr->e_phoff = sizeof(Elf64_Ehdr); | |
449 | ehdr->e_ehsize = sizeof(Elf64_Ehdr); | |
450 | ehdr->e_phentsize = sizeof(Elf64_Phdr); | |
451 | ||
452 | /* Prepare one phdr of type PT_NOTE for each present cpu */ | |
453 | for_each_present_cpu(cpu) { | |
454 | phdr = (Elf64_Phdr *)bufp; | |
455 | bufp += sizeof(Elf64_Phdr); | |
456 | phdr->p_type = PT_NOTE; | |
457 | notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu)); | |
458 | phdr->p_offset = phdr->p_paddr = notes_addr; | |
459 | phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t); | |
460 | (ehdr->e_phnum)++; | |
461 | } | |
462 | ||
463 | /* Prepare one PT_NOTE header for vmcoreinfo */ | |
464 | phdr = (Elf64_Phdr *)bufp; | |
465 | bufp += sizeof(Elf64_Phdr); | |
466 | phdr->p_type = PT_NOTE; | |
467 | phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note(); | |
468 | phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note); | |
469 | (ehdr->e_phnum)++; | |
470 | ||
471 | #ifdef CONFIG_X86_64 | |
472 | /* Prepare PT_LOAD type program header for kernel text region */ | |
473 | phdr = (Elf64_Phdr *)bufp; | |
474 | bufp += sizeof(Elf64_Phdr); | |
475 | phdr->p_type = PT_LOAD; | |
476 | phdr->p_flags = PF_R|PF_W|PF_X; | |
477 | phdr->p_vaddr = (Elf64_Addr)_text; | |
478 | phdr->p_filesz = phdr->p_memsz = _end - _text; | |
479 | phdr->p_offset = phdr->p_paddr = __pa_symbol(_text); | |
480 | (ehdr->e_phnum)++; | |
481 | #endif | |
482 | ||
483 | /* Prepare PT_LOAD headers for system ram chunks. */ | |
484 | ced->ehdr = ehdr; | |
485 | ced->bufp = bufp; | |
486 | ret = walk_system_ram_res(0, -1, ced, | |
487 | prepare_elf64_ram_headers_callback); | |
488 | if (ret < 0) | |
489 | return ret; | |
490 | ||
491 | *addr = buf; | |
492 | *sz = elf_sz; | |
493 | return 0; | |
494 | } | |
495 | ||
496 | /* Prepare elf headers. Return addr and size */ | |
497 | static int prepare_elf_headers(struct kimage *image, void **addr, | |
498 | unsigned long *sz) | |
499 | { | |
500 | struct crash_elf_data *ced; | |
501 | int ret; | |
502 | ||
503 | ced = kzalloc(sizeof(*ced), GFP_KERNEL); | |
504 | if (!ced) | |
505 | return -ENOMEM; | |
506 | ||
507 | fill_up_crash_elf_data(ced, image); | |
508 | ||
509 | /* By default prepare 64bit headers */ | |
510 | ret = prepare_elf64_headers(ced, addr, sz); | |
511 | kfree(ced); | |
512 | return ret; | |
513 | } | |
514 | ||
515 | static int add_e820_entry(struct boot_params *params, struct e820entry *entry) | |
516 | { | |
517 | unsigned int nr_e820_entries; | |
518 | ||
519 | nr_e820_entries = params->e820_entries; | |
520 | if (nr_e820_entries >= E820MAX) | |
521 | return 1; | |
522 | ||
523 | memcpy(¶ms->e820_map[nr_e820_entries], entry, | |
524 | sizeof(struct e820entry)); | |
525 | params->e820_entries++; | |
526 | return 0; | |
527 | } | |
528 | ||
529 | static int memmap_entry_callback(u64 start, u64 end, void *arg) | |
530 | { | |
531 | struct crash_memmap_data *cmd = arg; | |
532 | struct boot_params *params = cmd->params; | |
533 | struct e820entry ei; | |
534 | ||
535 | ei.addr = start; | |
536 | ei.size = end - start + 1; | |
537 | ei.type = cmd->type; | |
538 | add_e820_entry(params, &ei); | |
539 | ||
540 | return 0; | |
541 | } | |
542 | ||
543 | static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem, | |
544 | unsigned long long mstart, | |
545 | unsigned long long mend) | |
546 | { | |
547 | unsigned long start, end; | |
548 | int ret = 0; | |
549 | ||
550 | cmem->ranges[0].start = mstart; | |
551 | cmem->ranges[0].end = mend; | |
552 | cmem->nr_ranges = 1; | |
553 | ||
554 | /* Exclude Backup region */ | |
555 | start = image->arch.backup_load_addr; | |
556 | end = start + image->arch.backup_src_sz - 1; | |
557 | ret = exclude_mem_range(cmem, start, end); | |
558 | if (ret) | |
559 | return ret; | |
560 | ||
561 | /* Exclude elf header region */ | |
562 | start = image->arch.elf_load_addr; | |
563 | end = start + image->arch.elf_headers_sz - 1; | |
564 | return exclude_mem_range(cmem, start, end); | |
565 | } | |
566 | ||
567 | /* Prepare memory map for crash dump kernel */ | |
568 | int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) | |
569 | { | |
570 | int i, ret = 0; | |
571 | unsigned long flags; | |
572 | struct e820entry ei; | |
573 | struct crash_memmap_data cmd; | |
574 | struct crash_mem *cmem; | |
575 | ||
576 | cmem = vzalloc(sizeof(struct crash_mem)); | |
577 | if (!cmem) | |
578 | return -ENOMEM; | |
579 | ||
580 | memset(&cmd, 0, sizeof(struct crash_memmap_data)); | |
581 | cmd.params = params; | |
582 | ||
583 | /* Add first 640K segment */ | |
584 | ei.addr = image->arch.backup_src_start; | |
585 | ei.size = image->arch.backup_src_sz; | |
586 | ei.type = E820_RAM; | |
587 | add_e820_entry(params, &ei); | |
588 | ||
589 | /* Add ACPI tables */ | |
590 | cmd.type = E820_ACPI; | |
591 | flags = IORESOURCE_MEM | IORESOURCE_BUSY; | |
592 | walk_iomem_res("ACPI Tables", flags, 0, -1, &cmd, | |
593 | memmap_entry_callback); | |
594 | ||
595 | /* Add ACPI Non-volatile Storage */ | |
596 | cmd.type = E820_NVS; | |
597 | walk_iomem_res("ACPI Non-volatile Storage", flags, 0, -1, &cmd, | |
598 | memmap_entry_callback); | |
599 | ||
600 | /* Add crashk_low_res region */ | |
601 | if (crashk_low_res.end) { | |
602 | ei.addr = crashk_low_res.start; | |
603 | ei.size = crashk_low_res.end - crashk_low_res.start + 1; | |
604 | ei.type = E820_RAM; | |
605 | add_e820_entry(params, &ei); | |
606 | } | |
607 | ||
608 | /* Exclude some ranges from crashk_res and add rest to memmap */ | |
609 | ret = memmap_exclude_ranges(image, cmem, crashk_res.start, | |
610 | crashk_res.end); | |
611 | if (ret) | |
612 | goto out; | |
613 | ||
614 | for (i = 0; i < cmem->nr_ranges; i++) { | |
615 | ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1; | |
616 | ||
617 | /* If entry is less than a page, skip it */ | |
618 | if (ei.size < PAGE_SIZE) | |
619 | continue; | |
620 | ei.addr = cmem->ranges[i].start; | |
621 | ei.type = E820_RAM; | |
622 | add_e820_entry(params, &ei); | |
623 | } | |
624 | ||
625 | out: | |
626 | vfree(cmem); | |
627 | return ret; | |
628 | } | |
629 | ||
630 | static int determine_backup_region(u64 start, u64 end, void *arg) | |
631 | { | |
632 | struct kimage *image = arg; | |
633 | ||
634 | image->arch.backup_src_start = start; | |
635 | image->arch.backup_src_sz = end - start + 1; | |
636 | ||
637 | /* Expecting only one range for backup region */ | |
638 | return 1; | |
639 | } | |
640 | ||
641 | int crash_load_segments(struct kimage *image) | |
642 | { | |
643 | unsigned long src_start, src_sz, elf_sz; | |
644 | void *elf_addr; | |
645 | int ret; | |
646 | ||
647 | /* | |
648 | * Determine and load a segment for backup area. First 640K RAM | |
649 | * region is backup source | |
650 | */ | |
651 | ||
652 | ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END, | |
653 | image, determine_backup_region); | |
654 | ||
655 | /* Zero or postive return values are ok */ | |
656 | if (ret < 0) | |
657 | return ret; | |
658 | ||
659 | src_start = image->arch.backup_src_start; | |
660 | src_sz = image->arch.backup_src_sz; | |
661 | ||
662 | /* Add backup segment. */ | |
663 | if (src_sz) { | |
664 | /* | |
665 | * Ideally there is no source for backup segment. This is | |
666 | * copied in purgatory after crash. Just add a zero filled | |
667 | * segment for now to make sure checksum logic works fine. | |
668 | */ | |
669 | ret = kexec_add_buffer(image, (char *)&crash_zero_bytes, | |
670 | sizeof(crash_zero_bytes), src_sz, | |
671 | PAGE_SIZE, 0, -1, 0, | |
672 | &image->arch.backup_load_addr); | |
673 | if (ret) | |
674 | return ret; | |
675 | pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n", | |
676 | image->arch.backup_load_addr, src_start, src_sz); | |
677 | } | |
678 | ||
679 | /* Prepare elf headers and add a segment */ | |
680 | ret = prepare_elf_headers(image, &elf_addr, &elf_sz); | |
681 | if (ret) | |
682 | return ret; | |
683 | ||
684 | image->arch.elf_headers = elf_addr; | |
685 | image->arch.elf_headers_sz = elf_sz; | |
686 | ||
687 | ret = kexec_add_buffer(image, (char *)elf_addr, elf_sz, elf_sz, | |
688 | ELF_CORE_HEADER_ALIGN, 0, -1, 0, | |
689 | &image->arch.elf_load_addr); | |
690 | if (ret) { | |
691 | vfree((void *)image->arch.elf_headers); | |
692 | return ret; | |
693 | } | |
694 | pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", | |
695 | image->arch.elf_load_addr, elf_sz, elf_sz); | |
696 | ||
697 | return ret; | |
698 | } | |
699 | ||
700 | #endif /* CONFIG_X86_64 */ |