kexec: support kexec/kdump on EFI systems
[deliverable/linux.git] / kernel / kexec.c
CommitLineData
dc009d92
EB
1/*
2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
cb105258
VG
9#define pr_fmt(fmt) "kexec: " fmt
10
c59ede7b 11#include <linux/capability.h>
dc009d92
EB
12#include <linux/mm.h>
13#include <linux/file.h>
14#include <linux/slab.h>
15#include <linux/fs.h>
16#include <linux/kexec.h>
8c5a1cf0 17#include <linux/mutex.h>
dc009d92
EB
18#include <linux/list.h>
19#include <linux/highmem.h>
20#include <linux/syscalls.h>
21#include <linux/reboot.h>
dc009d92 22#include <linux/ioport.h>
6e274d14 23#include <linux/hardirq.h>
85916f81
MD
24#include <linux/elf.h>
25#include <linux/elfcore.h>
fd59d231
KO
26#include <linux/utsname.h>
27#include <linux/numa.h>
3ab83521
HY
28#include <linux/suspend.h>
29#include <linux/device.h>
89081d17
HY
30#include <linux/freezer.h>
31#include <linux/pm.h>
32#include <linux/cpu.h>
33#include <linux/console.h>
5f41b8cd 34#include <linux/vmalloc.h>
06a7f711 35#include <linux/swap.h>
19234c08 36#include <linux/syscore_ops.h>
52f5684c 37#include <linux/compiler.h>
8f1d26d0 38#include <linux/hugetlb.h>
6e274d14 39
dc009d92
EB
40#include <asm/page.h>
41#include <asm/uaccess.h>
42#include <asm/io.h>
fd59d231 43#include <asm/sections.h>
dc009d92 44
12db5562
VG
45#include <crypto/hash.h>
46#include <crypto/sha.h>
47
cc571658 48/* Per cpu memory for storing cpu states in case of system crash. */
43cf38eb 49note_buf_t __percpu *crash_notes;
cc571658 50
fd59d231 51/* vmcoreinfo stuff */
edb79a21 52static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
fd59d231 53u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
d768281e
KO
54size_t vmcoreinfo_size;
55size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
fd59d231 56
4fc9bbf9
KA
57/* Flag to indicate we are going to kexec a new kernel */
58bool kexec_in_progress = false;
59
12db5562
VG
60/*
61 * Declare these symbols weak so that if architecture provides a purgatory,
62 * these will be overridden.
63 */
64char __weak kexec_purgatory[0];
65size_t __weak kexec_purgatory_size = 0;
66
67static int kexec_calculate_store_digests(struct kimage *image);
68
dc009d92
EB
69/* Location of the reserved area for the crash kernel */
70struct resource crashk_res = {
71 .name = "Crash kernel",
72 .start = 0,
73 .end = 0,
74 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
75};
0212f915 76struct resource crashk_low_res = {
157752d8 77 .name = "Crash kernel",
0212f915
YL
78 .start = 0,
79 .end = 0,
80 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
81};
dc009d92 82
6e274d14
AN
83int kexec_should_crash(struct task_struct *p)
84{
b460cbc5 85 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
6e274d14
AN
86 return 1;
87 return 0;
88}
89
dc009d92
EB
90/*
91 * When kexec transitions to the new kernel there is a one-to-one
92 * mapping between physical and virtual addresses. On processors
93 * where you can disable the MMU this is trivial, and easy. For
94 * others it is still a simple predictable page table to setup.
95 *
96 * In that environment kexec copies the new kernel to its final
97 * resting place. This means I can only support memory whose
98 * physical address can fit in an unsigned long. In particular
99 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
100 * If the assembly stub has more restrictive requirements
101 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
102 * defined more restrictively in <asm/kexec.h>.
103 *
104 * The code for the transition from the current kernel to the
105 * the new kernel is placed in the control_code_buffer, whose size
163f6876 106 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
dc009d92
EB
107 * page of memory is necessary, but some architectures require more.
108 * Because this memory must be identity mapped in the transition from
109 * virtual to physical addresses it must live in the range
110 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
111 * modifiable.
112 *
113 * The assembly stub in the control code buffer is passed a linked list
114 * of descriptor pages detailing the source pages of the new kernel,
115 * and the destination addresses of those source pages. As this data
116 * structure is not used in the context of the current OS, it must
117 * be self-contained.
118 *
119 * The code has been made to work with highmem pages and will use a
120 * destination page in its final resting place (if it happens
121 * to allocate it). The end product of this is that most of the
122 * physical address space, and most of RAM can be used.
123 *
124 * Future directions include:
125 * - allocating a page table with the control code buffer identity
126 * mapped, to simplify machine_kexec and make kexec_on_panic more
127 * reliable.
128 */
129
130/*
131 * KIMAGE_NO_DEST is an impossible destination address..., for
132 * allocating pages whose destination address we do not care about.
133 */
134#define KIMAGE_NO_DEST (-1UL)
135
72414d3f
MS
136static int kimage_is_destination_range(struct kimage *image,
137 unsigned long start, unsigned long end);
138static struct page *kimage_alloc_page(struct kimage *image,
9796fdd8 139 gfp_t gfp_mask,
72414d3f 140 unsigned long dest);
dc009d92 141
dabe7862
VG
142static int copy_user_segment_list(struct kimage *image,
143 unsigned long nr_segments,
144 struct kexec_segment __user *segments)
dc009d92 145{
dabe7862 146 int ret;
dc009d92 147 size_t segment_bytes;
dc009d92
EB
148
149 /* Read in the segments */
150 image->nr_segments = nr_segments;
151 segment_bytes = nr_segments * sizeof(*segments);
dabe7862
VG
152 ret = copy_from_user(image->segment, segments, segment_bytes);
153 if (ret)
154 ret = -EFAULT;
155
156 return ret;
157}
158
159static int sanity_check_segment_list(struct kimage *image)
160{
161 int result, i;
162 unsigned long nr_segments = image->nr_segments;
dc009d92
EB
163
164 /*
165 * Verify we have good destination addresses. The caller is
166 * responsible for making certain we don't attempt to load
167 * the new image into invalid or reserved areas of RAM. This
168 * just verifies it is an address we can use.
169 *
170 * Since the kernel does everything in page size chunks ensure
b595076a 171 * the destination addresses are page aligned. Too many
dc009d92
EB
172 * special cases crop of when we don't do this. The most
173 * insidious is getting overlapping destination addresses
174 * simply because addresses are changed to page size
175 * granularity.
176 */
177 result = -EADDRNOTAVAIL;
178 for (i = 0; i < nr_segments; i++) {
179 unsigned long mstart, mend;
72414d3f 180
dc009d92
EB
181 mstart = image->segment[i].mem;
182 mend = mstart + image->segment[i].memsz;
183 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
dabe7862 184 return result;
dc009d92 185 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
dabe7862 186 return result;
dc009d92
EB
187 }
188
189 /* Verify our destination addresses do not overlap.
190 * If we alloed overlapping destination addresses
191 * through very weird things can happen with no
192 * easy explanation as one segment stops on another.
193 */
194 result = -EINVAL;
72414d3f 195 for (i = 0; i < nr_segments; i++) {
dc009d92
EB
196 unsigned long mstart, mend;
197 unsigned long j;
72414d3f 198
dc009d92
EB
199 mstart = image->segment[i].mem;
200 mend = mstart + image->segment[i].memsz;
72414d3f 201 for (j = 0; j < i; j++) {
dc009d92
EB
202 unsigned long pstart, pend;
203 pstart = image->segment[j].mem;
204 pend = pstart + image->segment[j].memsz;
205 /* Do the segments overlap ? */
206 if ((mend > pstart) && (mstart < pend))
dabe7862 207 return result;
dc009d92
EB
208 }
209 }
210
211 /* Ensure our buffer sizes are strictly less than
212 * our memory sizes. This should always be the case,
213 * and it is easier to check up front than to be surprised
214 * later on.
215 */
216 result = -EINVAL;
72414d3f 217 for (i = 0; i < nr_segments; i++) {
dc009d92 218 if (image->segment[i].bufsz > image->segment[i].memsz)
dabe7862 219 return result;
dc009d92
EB
220 }
221
dabe7862
VG
222 /*
223 * Verify we have good destination addresses. Normally
224 * the caller is responsible for making certain we don't
225 * attempt to load the new image into invalid or reserved
226 * areas of RAM. But crash kernels are preloaded into a
227 * reserved area of ram. We must ensure the addresses
228 * are in the reserved area otherwise preloading the
229 * kernel could corrupt things.
230 */
72414d3f 231
dabe7862
VG
232 if (image->type == KEXEC_TYPE_CRASH) {
233 result = -EADDRNOTAVAIL;
234 for (i = 0; i < nr_segments; i++) {
235 unsigned long mstart, mend;
236
237 mstart = image->segment[i].mem;
238 mend = mstart + image->segment[i].memsz - 1;
239 /* Ensure we are within the crash kernel limits */
240 if ((mstart < crashk_res.start) ||
241 (mend > crashk_res.end))
242 return result;
243 }
244 }
dc009d92 245
dabe7862
VG
246 return 0;
247}
248
249static struct kimage *do_kimage_alloc_init(void)
250{
251 struct kimage *image;
252
253 /* Allocate a controlling structure */
254 image = kzalloc(sizeof(*image), GFP_KERNEL);
255 if (!image)
256 return NULL;
257
258 image->head = 0;
259 image->entry = &image->head;
260 image->last_entry = &image->head;
261 image->control_page = ~0; /* By default this does not apply */
262 image->type = KEXEC_TYPE_DEFAULT;
263
264 /* Initialize the list of control pages */
265 INIT_LIST_HEAD(&image->control_pages);
266
267 /* Initialize the list of destination pages */
268 INIT_LIST_HEAD(&image->dest_pages);
269
270 /* Initialize the list of unusable pages */
271 INIT_LIST_HEAD(&image->unusable_pages);
272
273 return image;
dc009d92
EB
274}
275
b92e7e0d
ZY
276static void kimage_free_page_list(struct list_head *list);
277
255aedd9
VG
278static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
279 unsigned long nr_segments,
280 struct kexec_segment __user *segments,
281 unsigned long flags)
dc009d92 282{
255aedd9 283 int ret;
dc009d92 284 struct kimage *image;
255aedd9
VG
285 bool kexec_on_panic = flags & KEXEC_ON_CRASH;
286
287 if (kexec_on_panic) {
288 /* Verify we have a valid entry point */
289 if ((entry < crashk_res.start) || (entry > crashk_res.end))
290 return -EADDRNOTAVAIL;
291 }
dc009d92
EB
292
293 /* Allocate and initialize a controlling structure */
dabe7862
VG
294 image = do_kimage_alloc_init();
295 if (!image)
296 return -ENOMEM;
297
298 image->start = entry;
299
255aedd9
VG
300 ret = copy_user_segment_list(image, nr_segments, segments);
301 if (ret)
dabe7862
VG
302 goto out_free_image;
303
255aedd9
VG
304 ret = sanity_check_segment_list(image);
305 if (ret)
dabe7862 306 goto out_free_image;
72414d3f 307
255aedd9
VG
308 /* Enable the special crash kernel control page allocation policy. */
309 if (kexec_on_panic) {
310 image->control_page = crashk_res.start;
311 image->type = KEXEC_TYPE_CRASH;
312 }
313
dc009d92
EB
314 /*
315 * Find a location for the control code buffer, and add it
316 * the vector of segments so that it's pages will also be
317 * counted as destination pages.
318 */
255aedd9 319 ret = -ENOMEM;
dc009d92 320 image->control_code_page = kimage_alloc_control_pages(image,
163f6876 321 get_order(KEXEC_CONTROL_PAGE_SIZE));
dc009d92 322 if (!image->control_code_page) {
e1bebcf4 323 pr_err("Could not allocate control_code_buffer\n");
dabe7862 324 goto out_free_image;
dc009d92
EB
325 }
326
255aedd9
VG
327 if (!kexec_on_panic) {
328 image->swap_page = kimage_alloc_control_pages(image, 0);
329 if (!image->swap_page) {
330 pr_err("Could not allocate swap buffer\n");
331 goto out_free_control_pages;
332 }
3ab83521
HY
333 }
334
b92e7e0d
ZY
335 *rimage = image;
336 return 0;
dabe7862 337out_free_control_pages:
b92e7e0d 338 kimage_free_page_list(&image->control_pages);
dabe7862 339out_free_image:
b92e7e0d 340 kfree(image);
255aedd9 341 return ret;
dc009d92
EB
342}
343
cb105258
VG
344static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len)
345{
346 struct fd f = fdget(fd);
347 int ret;
348 struct kstat stat;
349 loff_t pos;
350 ssize_t bytes = 0;
351
352 if (!f.file)
353 return -EBADF;
354
355 ret = vfs_getattr(&f.file->f_path, &stat);
356 if (ret)
357 goto out;
358
359 if (stat.size > INT_MAX) {
360 ret = -EFBIG;
361 goto out;
362 }
363
364 /* Don't hand 0 to vmalloc, it whines. */
365 if (stat.size == 0) {
366 ret = -EINVAL;
367 goto out;
368 }
369
370 *buf = vmalloc(stat.size);
371 if (!*buf) {
372 ret = -ENOMEM;
373 goto out;
374 }
375
376 pos = 0;
377 while (pos < stat.size) {
378 bytes = kernel_read(f.file, pos, (char *)(*buf) + pos,
379 stat.size - pos);
380 if (bytes < 0) {
381 vfree(*buf);
382 ret = bytes;
383 goto out;
384 }
385
386 if (bytes == 0)
387 break;
388 pos += bytes;
389 }
390
391 if (pos != stat.size) {
392 ret = -EBADF;
393 vfree(*buf);
394 goto out;
395 }
396
397 *buf_len = pos;
398out:
399 fdput(f);
400 return ret;
401}
402
403/* Architectures can provide this probe function */
404int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
405 unsigned long buf_len)
406{
407 return -ENOEXEC;
408}
409
410void * __weak arch_kexec_kernel_image_load(struct kimage *image)
411{
412 return ERR_PTR(-ENOEXEC);
413}
414
415void __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
416{
417}
418
12db5562
VG
419/* Apply relocations of type RELA */
420int __weak
421arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
422 unsigned int relsec)
423{
424 pr_err("RELA relocation unsupported.\n");
425 return -ENOEXEC;
426}
427
428/* Apply relocations of type REL */
429int __weak
430arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
431 unsigned int relsec)
432{
433 pr_err("REL relocation unsupported.\n");
434 return -ENOEXEC;
435}
436
cb105258
VG
437/*
438 * Free up memory used by kernel, initrd, and comand line. This is temporary
439 * memory allocation which is not needed any more after these buffers have
440 * been loaded into separate segments and have been copied elsewhere.
441 */
442static void kimage_file_post_load_cleanup(struct kimage *image)
443{
12db5562
VG
444 struct purgatory_info *pi = &image->purgatory_info;
445
cb105258
VG
446 vfree(image->kernel_buf);
447 image->kernel_buf = NULL;
448
449 vfree(image->initrd_buf);
450 image->initrd_buf = NULL;
451
452 kfree(image->cmdline_buf);
453 image->cmdline_buf = NULL;
454
12db5562
VG
455 vfree(pi->purgatory_buf);
456 pi->purgatory_buf = NULL;
457
458 vfree(pi->sechdrs);
459 pi->sechdrs = NULL;
460
cb105258
VG
461 /* See if architecture has anything to cleanup post load */
462 arch_kimage_file_post_load_cleanup(image);
27f48d3e
VG
463
464 /*
465 * Above call should have called into bootloader to free up
466 * any data stored in kimage->image_loader_data. It should
467 * be ok now to free it up.
468 */
469 kfree(image->image_loader_data);
470 image->image_loader_data = NULL;
cb105258
VG
471}
472
473/*
474 * In file mode list of segments is prepared by kernel. Copy relevant
475 * data from user space, do error checking, prepare segment list
476 */
477static int
478kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
479 const char __user *cmdline_ptr,
480 unsigned long cmdline_len, unsigned flags)
481{
482 int ret = 0;
483 void *ldata;
484
485 ret = copy_file_from_fd(kernel_fd, &image->kernel_buf,
486 &image->kernel_buf_len);
487 if (ret)
488 return ret;
489
490 /* Call arch image probe handlers */
491 ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
492 image->kernel_buf_len);
493
494 if (ret)
495 goto out;
496
497 /* It is possible that there no initramfs is being loaded */
498 if (!(flags & KEXEC_FILE_NO_INITRAMFS)) {
499 ret = copy_file_from_fd(initrd_fd, &image->initrd_buf,
500 &image->initrd_buf_len);
501 if (ret)
502 goto out;
503 }
504
505 if (cmdline_len) {
506 image->cmdline_buf = kzalloc(cmdline_len, GFP_KERNEL);
507 if (!image->cmdline_buf) {
508 ret = -ENOMEM;
509 goto out;
510 }
511
512 ret = copy_from_user(image->cmdline_buf, cmdline_ptr,
513 cmdline_len);
514 if (ret) {
515 ret = -EFAULT;
516 goto out;
517 }
518
519 image->cmdline_buf_len = cmdline_len;
520
521 /* command line should be a string with last byte null */
522 if (image->cmdline_buf[cmdline_len - 1] != '\0') {
523 ret = -EINVAL;
524 goto out;
525 }
526 }
527
528 /* Call arch image load handlers */
529 ldata = arch_kexec_kernel_image_load(image);
530
531 if (IS_ERR(ldata)) {
532 ret = PTR_ERR(ldata);
533 goto out;
534 }
535
536 image->image_loader_data = ldata;
537out:
538 /* In case of error, free up all allocated memory in this function */
539 if (ret)
540 kimage_file_post_load_cleanup(image);
541 return ret;
542}
543
544static int
545kimage_file_alloc_init(struct kimage **rimage, int kernel_fd,
546 int initrd_fd, const char __user *cmdline_ptr,
547 unsigned long cmdline_len, unsigned long flags)
548{
549 int ret;
550 struct kimage *image;
dd5f7260 551 bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH;
cb105258
VG
552
553 image = do_kimage_alloc_init();
554 if (!image)
555 return -ENOMEM;
556
557 image->file_mode = 1;
558
dd5f7260
VG
559 if (kexec_on_panic) {
560 /* Enable special crash kernel control page alloc policy. */
561 image->control_page = crashk_res.start;
562 image->type = KEXEC_TYPE_CRASH;
563 }
564
cb105258
VG
565 ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd,
566 cmdline_ptr, cmdline_len, flags);
567 if (ret)
568 goto out_free_image;
569
570 ret = sanity_check_segment_list(image);
571 if (ret)
572 goto out_free_post_load_bufs;
573
574 ret = -ENOMEM;
575 image->control_code_page = kimage_alloc_control_pages(image,
576 get_order(KEXEC_CONTROL_PAGE_SIZE));
577 if (!image->control_code_page) {
578 pr_err("Could not allocate control_code_buffer\n");
579 goto out_free_post_load_bufs;
580 }
581
dd5f7260
VG
582 if (!kexec_on_panic) {
583 image->swap_page = kimage_alloc_control_pages(image, 0);
584 if (!image->swap_page) {
585 pr_err(KERN_ERR "Could not allocate swap buffer\n");
586 goto out_free_control_pages;
587 }
cb105258
VG
588 }
589
590 *rimage = image;
591 return 0;
592out_free_control_pages:
593 kimage_free_page_list(&image->control_pages);
594out_free_post_load_bufs:
595 kimage_file_post_load_cleanup(image);
cb105258
VG
596out_free_image:
597 kfree(image);
598 return ret;
599}
600
72414d3f
MS
601static int kimage_is_destination_range(struct kimage *image,
602 unsigned long start,
603 unsigned long end)
dc009d92
EB
604{
605 unsigned long i;
606
607 for (i = 0; i < image->nr_segments; i++) {
608 unsigned long mstart, mend;
72414d3f 609
dc009d92 610 mstart = image->segment[i].mem;
72414d3f
MS
611 mend = mstart + image->segment[i].memsz;
612 if ((end > mstart) && (start < mend))
dc009d92 613 return 1;
dc009d92 614 }
72414d3f 615
dc009d92
EB
616 return 0;
617}
618
9796fdd8 619static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
dc009d92
EB
620{
621 struct page *pages;
72414d3f 622
dc009d92
EB
623 pages = alloc_pages(gfp_mask, order);
624 if (pages) {
625 unsigned int count, i;
626 pages->mapping = NULL;
4c21e2f2 627 set_page_private(pages, order);
dc009d92 628 count = 1 << order;
72414d3f 629 for (i = 0; i < count; i++)
dc009d92 630 SetPageReserved(pages + i);
dc009d92 631 }
72414d3f 632
dc009d92
EB
633 return pages;
634}
635
636static void kimage_free_pages(struct page *page)
637{
638 unsigned int order, count, i;
72414d3f 639
4c21e2f2 640 order = page_private(page);
dc009d92 641 count = 1 << order;
72414d3f 642 for (i = 0; i < count; i++)
dc009d92 643 ClearPageReserved(page + i);
dc009d92
EB
644 __free_pages(page, order);
645}
646
647static void kimage_free_page_list(struct list_head *list)
648{
649 struct list_head *pos, *next;
72414d3f 650
dc009d92
EB
651 list_for_each_safe(pos, next, list) {
652 struct page *page;
653
654 page = list_entry(pos, struct page, lru);
655 list_del(&page->lru);
dc009d92
EB
656 kimage_free_pages(page);
657 }
658}
659
72414d3f
MS
660static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
661 unsigned int order)
dc009d92
EB
662{
663 /* Control pages are special, they are the intermediaries
664 * that are needed while we copy the rest of the pages
665 * to their final resting place. As such they must
666 * not conflict with either the destination addresses
667 * or memory the kernel is already using.
668 *
669 * The only case where we really need more than one of
670 * these are for architectures where we cannot disable
671 * the MMU and must instead generate an identity mapped
672 * page table for all of the memory.
673 *
674 * At worst this runs in O(N) of the image size.
675 */
676 struct list_head extra_pages;
677 struct page *pages;
678 unsigned int count;
679
680 count = 1 << order;
681 INIT_LIST_HEAD(&extra_pages);
682
683 /* Loop while I can allocate a page and the page allocated
684 * is a destination page.
685 */
686 do {
687 unsigned long pfn, epfn, addr, eaddr;
72414d3f 688
dc009d92
EB
689 pages = kimage_alloc_pages(GFP_KERNEL, order);
690 if (!pages)
691 break;
692 pfn = page_to_pfn(pages);
693 epfn = pfn + count;
694 addr = pfn << PAGE_SHIFT;
695 eaddr = epfn << PAGE_SHIFT;
696 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
72414d3f 697 kimage_is_destination_range(image, addr, eaddr)) {
dc009d92
EB
698 list_add(&pages->lru, &extra_pages);
699 pages = NULL;
700 }
72414d3f
MS
701 } while (!pages);
702
dc009d92
EB
703 if (pages) {
704 /* Remember the allocated page... */
705 list_add(&pages->lru, &image->control_pages);
706
707 /* Because the page is already in it's destination
708 * location we will never allocate another page at
709 * that address. Therefore kimage_alloc_pages
710 * will not return it (again) and we don't need
711 * to give it an entry in image->segment[].
712 */
713 }
714 /* Deal with the destination pages I have inadvertently allocated.
715 *
716 * Ideally I would convert multi-page allocations into single
25985edc 717 * page allocations, and add everything to image->dest_pages.
dc009d92
EB
718 *
719 * For now it is simpler to just free the pages.
720 */
721 kimage_free_page_list(&extra_pages);
dc009d92 722
72414d3f 723 return pages;
dc009d92
EB
724}
725
72414d3f
MS
726static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
727 unsigned int order)
dc009d92
EB
728{
729 /* Control pages are special, they are the intermediaries
730 * that are needed while we copy the rest of the pages
731 * to their final resting place. As such they must
732 * not conflict with either the destination addresses
733 * or memory the kernel is already using.
734 *
735 * Control pages are also the only pags we must allocate
736 * when loading a crash kernel. All of the other pages
737 * are specified by the segments and we just memcpy
738 * into them directly.
739 *
740 * The only case where we really need more than one of
741 * these are for architectures where we cannot disable
742 * the MMU and must instead generate an identity mapped
743 * page table for all of the memory.
744 *
745 * Given the low demand this implements a very simple
746 * allocator that finds the first hole of the appropriate
747 * size in the reserved memory region, and allocates all
748 * of the memory up to and including the hole.
749 */
750 unsigned long hole_start, hole_end, size;
751 struct page *pages;
72414d3f 752
dc009d92
EB
753 pages = NULL;
754 size = (1 << order) << PAGE_SHIFT;
755 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
756 hole_end = hole_start + size - 1;
72414d3f 757 while (hole_end <= crashk_res.end) {
dc009d92 758 unsigned long i;
72414d3f 759
3d214fae 760 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
dc009d92 761 break;
dc009d92 762 /* See if I overlap any of the segments */
72414d3f 763 for (i = 0; i < image->nr_segments; i++) {
dc009d92 764 unsigned long mstart, mend;
72414d3f 765
dc009d92
EB
766 mstart = image->segment[i].mem;
767 mend = mstart + image->segment[i].memsz - 1;
768 if ((hole_end >= mstart) && (hole_start <= mend)) {
769 /* Advance the hole to the end of the segment */
770 hole_start = (mend + (size - 1)) & ~(size - 1);
771 hole_end = hole_start + size - 1;
772 break;
773 }
774 }
775 /* If I don't overlap any segments I have found my hole! */
776 if (i == image->nr_segments) {
777 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
778 break;
779 }
780 }
72414d3f 781 if (pages)
dc009d92 782 image->control_page = hole_end;
72414d3f 783
dc009d92
EB
784 return pages;
785}
786
787
72414d3f
MS
788struct page *kimage_alloc_control_pages(struct kimage *image,
789 unsigned int order)
dc009d92
EB
790{
791 struct page *pages = NULL;
72414d3f
MS
792
793 switch (image->type) {
dc009d92
EB
794 case KEXEC_TYPE_DEFAULT:
795 pages = kimage_alloc_normal_control_pages(image, order);
796 break;
797 case KEXEC_TYPE_CRASH:
798 pages = kimage_alloc_crash_control_pages(image, order);
799 break;
800 }
72414d3f 801
dc009d92
EB
802 return pages;
803}
804
805static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
806{
72414d3f 807 if (*image->entry != 0)
dc009d92 808 image->entry++;
72414d3f 809
dc009d92
EB
810 if (image->entry == image->last_entry) {
811 kimage_entry_t *ind_page;
812 struct page *page;
72414d3f 813
dc009d92 814 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
72414d3f 815 if (!page)
dc009d92 816 return -ENOMEM;
72414d3f 817
dc009d92
EB
818 ind_page = page_address(page);
819 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
820 image->entry = ind_page;
72414d3f
MS
821 image->last_entry = ind_page +
822 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
dc009d92
EB
823 }
824 *image->entry = entry;
825 image->entry++;
826 *image->entry = 0;
72414d3f 827
dc009d92
EB
828 return 0;
829}
830
72414d3f
MS
831static int kimage_set_destination(struct kimage *image,
832 unsigned long destination)
dc009d92
EB
833{
834 int result;
835
836 destination &= PAGE_MASK;
837 result = kimage_add_entry(image, destination | IND_DESTINATION);
72414d3f 838 if (result == 0)
dc009d92 839 image->destination = destination;
72414d3f 840
dc009d92
EB
841 return result;
842}
843
844
845static int kimage_add_page(struct kimage *image, unsigned long page)
846{
847 int result;
848
849 page &= PAGE_MASK;
850 result = kimage_add_entry(image, page | IND_SOURCE);
72414d3f 851 if (result == 0)
dc009d92 852 image->destination += PAGE_SIZE;
72414d3f 853
dc009d92
EB
854 return result;
855}
856
857
858static void kimage_free_extra_pages(struct kimage *image)
859{
860 /* Walk through and free any extra destination pages I may have */
861 kimage_free_page_list(&image->dest_pages);
862
25985edc 863 /* Walk through and free any unusable pages I have cached */
7d3e2bca 864 kimage_free_page_list(&image->unusable_pages);
dc009d92
EB
865
866}
7fccf032 867static void kimage_terminate(struct kimage *image)
dc009d92 868{
72414d3f 869 if (*image->entry != 0)
dc009d92 870 image->entry++;
72414d3f 871
dc009d92 872 *image->entry = IND_DONE;
dc009d92
EB
873}
874
875#define for_each_kimage_entry(image, ptr, entry) \
876 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
e1bebcf4
FF
877 ptr = (entry & IND_INDIRECTION) ? \
878 phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
dc009d92
EB
879
880static void kimage_free_entry(kimage_entry_t entry)
881{
882 struct page *page;
883
884 page = pfn_to_page(entry >> PAGE_SHIFT);
885 kimage_free_pages(page);
886}
887
888static void kimage_free(struct kimage *image)
889{
890 kimage_entry_t *ptr, entry;
891 kimage_entry_t ind = 0;
892
893 if (!image)
894 return;
72414d3f 895
dc009d92
EB
896 kimage_free_extra_pages(image);
897 for_each_kimage_entry(image, ptr, entry) {
898 if (entry & IND_INDIRECTION) {
899 /* Free the previous indirection page */
72414d3f 900 if (ind & IND_INDIRECTION)
dc009d92 901 kimage_free_entry(ind);
dc009d92
EB
902 /* Save this indirection page until we are
903 * done with it.
904 */
905 ind = entry;
e1bebcf4 906 } else if (entry & IND_SOURCE)
dc009d92 907 kimage_free_entry(entry);
dc009d92
EB
908 }
909 /* Free the final indirection page */
72414d3f 910 if (ind & IND_INDIRECTION)
dc009d92 911 kimage_free_entry(ind);
dc009d92
EB
912
913 /* Handle any machine specific cleanup */
914 machine_kexec_cleanup(image);
915
916 /* Free the kexec control pages... */
917 kimage_free_page_list(&image->control_pages);
cb105258 918
cb105258
VG
919 /*
920 * Free up any temporary buffers allocated. This might hit if
921 * error occurred much later after buffer allocation.
922 */
923 if (image->file_mode)
924 kimage_file_post_load_cleanup(image);
925
dc009d92
EB
926 kfree(image);
927}
928
72414d3f
MS
929static kimage_entry_t *kimage_dst_used(struct kimage *image,
930 unsigned long page)
dc009d92
EB
931{
932 kimage_entry_t *ptr, entry;
933 unsigned long destination = 0;
934
935 for_each_kimage_entry(image, ptr, entry) {
72414d3f 936 if (entry & IND_DESTINATION)
dc009d92 937 destination = entry & PAGE_MASK;
dc009d92 938 else if (entry & IND_SOURCE) {
72414d3f 939 if (page == destination)
dc009d92 940 return ptr;
dc009d92
EB
941 destination += PAGE_SIZE;
942 }
943 }
72414d3f 944
314b6a4d 945 return NULL;
dc009d92
EB
946}
947
72414d3f 948static struct page *kimage_alloc_page(struct kimage *image,
9796fdd8 949 gfp_t gfp_mask,
72414d3f 950 unsigned long destination)
dc009d92
EB
951{
952 /*
953 * Here we implement safeguards to ensure that a source page
954 * is not copied to its destination page before the data on
955 * the destination page is no longer useful.
956 *
957 * To do this we maintain the invariant that a source page is
958 * either its own destination page, or it is not a
959 * destination page at all.
960 *
961 * That is slightly stronger than required, but the proof
962 * that no problems will not occur is trivial, and the
963 * implementation is simply to verify.
964 *
965 * When allocating all pages normally this algorithm will run
966 * in O(N) time, but in the worst case it will run in O(N^2)
967 * time. If the runtime is a problem the data structures can
968 * be fixed.
969 */
970 struct page *page;
971 unsigned long addr;
972
973 /*
974 * Walk through the list of destination pages, and see if I
975 * have a match.
976 */
977 list_for_each_entry(page, &image->dest_pages, lru) {
978 addr = page_to_pfn(page) << PAGE_SHIFT;
979 if (addr == destination) {
980 list_del(&page->lru);
981 return page;
982 }
983 }
984 page = NULL;
985 while (1) {
986 kimage_entry_t *old;
987
988 /* Allocate a page, if we run out of memory give up */
989 page = kimage_alloc_pages(gfp_mask, 0);
72414d3f 990 if (!page)
314b6a4d 991 return NULL;
dc009d92 992 /* If the page cannot be used file it away */
72414d3f
MS
993 if (page_to_pfn(page) >
994 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
7d3e2bca 995 list_add(&page->lru, &image->unusable_pages);
dc009d92
EB
996 continue;
997 }
998 addr = page_to_pfn(page) << PAGE_SHIFT;
999
1000 /* If it is the destination page we want use it */
1001 if (addr == destination)
1002 break;
1003
1004 /* If the page is not a destination page use it */
72414d3f
MS
1005 if (!kimage_is_destination_range(image, addr,
1006 addr + PAGE_SIZE))
dc009d92
EB
1007 break;
1008
1009 /*
1010 * I know that the page is someones destination page.
1011 * See if there is already a source page for this
1012 * destination page. And if so swap the source pages.
1013 */
1014 old = kimage_dst_used(image, addr);
1015 if (old) {
1016 /* If so move it */
1017 unsigned long old_addr;
1018 struct page *old_page;
1019
1020 old_addr = *old & PAGE_MASK;
1021 old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
1022 copy_highpage(page, old_page);
1023 *old = addr | (*old & ~PAGE_MASK);
1024
1025 /* The old page I have found cannot be a
f9092f35
JS
1026 * destination page, so return it if it's
1027 * gfp_flags honor the ones passed in.
dc009d92 1028 */
f9092f35
JS
1029 if (!(gfp_mask & __GFP_HIGHMEM) &&
1030 PageHighMem(old_page)) {
1031 kimage_free_pages(old_page);
1032 continue;
1033 }
dc009d92
EB
1034 addr = old_addr;
1035 page = old_page;
1036 break;
e1bebcf4 1037 } else {
dc009d92
EB
1038 /* Place the page on the destination list I
1039 * will use it later.
1040 */
1041 list_add(&page->lru, &image->dest_pages);
1042 }
1043 }
72414d3f 1044
dc009d92
EB
1045 return page;
1046}
1047
1048static int kimage_load_normal_segment(struct kimage *image,
72414d3f 1049 struct kexec_segment *segment)
dc009d92
EB
1050{
1051 unsigned long maddr;
310faaa9 1052 size_t ubytes, mbytes;
dc009d92 1053 int result;
cb105258
VG
1054 unsigned char __user *buf = NULL;
1055 unsigned char *kbuf = NULL;
dc009d92
EB
1056
1057 result = 0;
cb105258
VG
1058 if (image->file_mode)
1059 kbuf = segment->kbuf;
1060 else
1061 buf = segment->buf;
dc009d92
EB
1062 ubytes = segment->bufsz;
1063 mbytes = segment->memsz;
1064 maddr = segment->mem;
1065
1066 result = kimage_set_destination(image, maddr);
72414d3f 1067 if (result < 0)
dc009d92 1068 goto out;
72414d3f
MS
1069
1070 while (mbytes) {
dc009d92
EB
1071 struct page *page;
1072 char *ptr;
1073 size_t uchunk, mchunk;
72414d3f 1074
dc009d92 1075 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
c80544dc 1076 if (!page) {
dc009d92
EB
1077 result = -ENOMEM;
1078 goto out;
1079 }
72414d3f
MS
1080 result = kimage_add_page(image, page_to_pfn(page)
1081 << PAGE_SHIFT);
1082 if (result < 0)
dc009d92 1083 goto out;
72414d3f 1084
dc009d92
EB
1085 ptr = kmap(page);
1086 /* Start with a clear page */
3ecb01df 1087 clear_page(ptr);
dc009d92 1088 ptr += maddr & ~PAGE_MASK;
31c3a3fe
ZY
1089 mchunk = min_t(size_t, mbytes,
1090 PAGE_SIZE - (maddr & ~PAGE_MASK));
1091 uchunk = min(ubytes, mchunk);
72414d3f 1092
cb105258
VG
1093 /* For file based kexec, source pages are in kernel memory */
1094 if (image->file_mode)
1095 memcpy(ptr, kbuf, uchunk);
1096 else
1097 result = copy_from_user(ptr, buf, uchunk);
dc009d92
EB
1098 kunmap(page);
1099 if (result) {
f65a03f6 1100 result = -EFAULT;
dc009d92
EB
1101 goto out;
1102 }
1103 ubytes -= uchunk;
1104 maddr += mchunk;
cb105258
VG
1105 if (image->file_mode)
1106 kbuf += mchunk;
1107 else
1108 buf += mchunk;
dc009d92
EB
1109 mbytes -= mchunk;
1110 }
72414d3f 1111out:
dc009d92
EB
1112 return result;
1113}
1114
1115static int kimage_load_crash_segment(struct kimage *image,
72414d3f 1116 struct kexec_segment *segment)
dc009d92
EB
1117{
1118 /* For crash dumps kernels we simply copy the data from
1119 * user space to it's destination.
1120 * We do things a page at a time for the sake of kmap.
1121 */
1122 unsigned long maddr;
310faaa9 1123 size_t ubytes, mbytes;
dc009d92 1124 int result;
dd5f7260
VG
1125 unsigned char __user *buf = NULL;
1126 unsigned char *kbuf = NULL;
dc009d92
EB
1127
1128 result = 0;
dd5f7260
VG
1129 if (image->file_mode)
1130 kbuf = segment->kbuf;
1131 else
1132 buf = segment->buf;
dc009d92
EB
1133 ubytes = segment->bufsz;
1134 mbytes = segment->memsz;
1135 maddr = segment->mem;
72414d3f 1136 while (mbytes) {
dc009d92
EB
1137 struct page *page;
1138 char *ptr;
1139 size_t uchunk, mchunk;
72414d3f 1140
dc009d92 1141 page = pfn_to_page(maddr >> PAGE_SHIFT);
c80544dc 1142 if (!page) {
dc009d92
EB
1143 result = -ENOMEM;
1144 goto out;
1145 }
1146 ptr = kmap(page);
1147 ptr += maddr & ~PAGE_MASK;
31c3a3fe
ZY
1148 mchunk = min_t(size_t, mbytes,
1149 PAGE_SIZE - (maddr & ~PAGE_MASK));
1150 uchunk = min(ubytes, mchunk);
1151 if (mchunk > uchunk) {
dc009d92
EB
1152 /* Zero the trailing part of the page */
1153 memset(ptr + uchunk, 0, mchunk - uchunk);
1154 }
dd5f7260
VG
1155
1156 /* For file based kexec, source pages are in kernel memory */
1157 if (image->file_mode)
1158 memcpy(ptr, kbuf, uchunk);
1159 else
1160 result = copy_from_user(ptr, buf, uchunk);
a7956113 1161 kexec_flush_icache_page(page);
dc009d92
EB
1162 kunmap(page);
1163 if (result) {
f65a03f6 1164 result = -EFAULT;
dc009d92
EB
1165 goto out;
1166 }
1167 ubytes -= uchunk;
1168 maddr += mchunk;
dd5f7260
VG
1169 if (image->file_mode)
1170 kbuf += mchunk;
1171 else
1172 buf += mchunk;
dc009d92
EB
1173 mbytes -= mchunk;
1174 }
72414d3f 1175out:
dc009d92
EB
1176 return result;
1177}
1178
1179static int kimage_load_segment(struct kimage *image,
72414d3f 1180 struct kexec_segment *segment)
dc009d92
EB
1181{
1182 int result = -ENOMEM;
72414d3f
MS
1183
1184 switch (image->type) {
dc009d92
EB
1185 case KEXEC_TYPE_DEFAULT:
1186 result = kimage_load_normal_segment(image, segment);
1187 break;
1188 case KEXEC_TYPE_CRASH:
1189 result = kimage_load_crash_segment(image, segment);
1190 break;
1191 }
72414d3f 1192
dc009d92
EB
1193 return result;
1194}
1195
1196/*
1197 * Exec Kernel system call: for obvious reasons only root may call it.
1198 *
1199 * This call breaks up into three pieces.
1200 * - A generic part which loads the new kernel from the current
1201 * address space, and very carefully places the data in the
1202 * allocated pages.
1203 *
1204 * - A generic part that interacts with the kernel and tells all of
1205 * the devices to shut down. Preventing on-going dmas, and placing
1206 * the devices in a consistent state so a later kernel can
1207 * reinitialize them.
1208 *
1209 * - A machine specific part that includes the syscall number
002ace78 1210 * and then copies the image to it's final destination. And
dc009d92
EB
1211 * jumps into the image at entry.
1212 *
1213 * kexec does not sync, or unmount filesystems so if you need
1214 * that to happen you need to do that yourself.
1215 */
c330dda9
JM
1216struct kimage *kexec_image;
1217struct kimage *kexec_crash_image;
7984754b 1218int kexec_load_disabled;
8c5a1cf0
AM
1219
1220static DEFINE_MUTEX(kexec_mutex);
dc009d92 1221
754fe8d2
HC
1222SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
1223 struct kexec_segment __user *, segments, unsigned long, flags)
dc009d92
EB
1224{
1225 struct kimage **dest_image, *image;
dc009d92
EB
1226 int result;
1227
1228 /* We only trust the superuser with rebooting the system. */
7984754b 1229 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
dc009d92
EB
1230 return -EPERM;
1231
1232 /*
1233 * Verify we have a legal set of flags
1234 * This leaves us room for future extensions.
1235 */
1236 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
1237 return -EINVAL;
1238
1239 /* Verify we are on the appropriate architecture */
1240 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
1241 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
dc009d92 1242 return -EINVAL;
dc009d92
EB
1243
1244 /* Put an artificial cap on the number
1245 * of segments passed to kexec_load.
1246 */
1247 if (nr_segments > KEXEC_SEGMENT_MAX)
1248 return -EINVAL;
1249
1250 image = NULL;
1251 result = 0;
1252
1253 /* Because we write directly to the reserved memory
1254 * region when loading crash kernels we need a mutex here to
1255 * prevent multiple crash kernels from attempting to load
1256 * simultaneously, and to prevent a crash kernel from loading
1257 * over the top of a in use crash kernel.
1258 *
1259 * KISS: always take the mutex.
1260 */
8c5a1cf0 1261 if (!mutex_trylock(&kexec_mutex))
dc009d92 1262 return -EBUSY;
72414d3f 1263
dc009d92 1264 dest_image = &kexec_image;
72414d3f 1265 if (flags & KEXEC_ON_CRASH)
dc009d92 1266 dest_image = &kexec_crash_image;
dc009d92
EB
1267 if (nr_segments > 0) {
1268 unsigned long i;
72414d3f 1269
dc009d92 1270 /* Loading another kernel to reboot into */
72414d3f 1271 if ((flags & KEXEC_ON_CRASH) == 0)
255aedd9
VG
1272 result = kimage_alloc_init(&image, entry, nr_segments,
1273 segments, flags);
dc009d92
EB
1274 /* Loading another kernel to switch to if this one crashes */
1275 else if (flags & KEXEC_ON_CRASH) {
1276 /* Free any current crash dump kernel before
1277 * we corrupt it.
1278 */
1279 kimage_free(xchg(&kexec_crash_image, NULL));
255aedd9
VG
1280 result = kimage_alloc_init(&image, entry, nr_segments,
1281 segments, flags);
558df720 1282 crash_map_reserved_pages();
dc009d92 1283 }
72414d3f 1284 if (result)
dc009d92 1285 goto out;
72414d3f 1286
3ab83521
HY
1287 if (flags & KEXEC_PRESERVE_CONTEXT)
1288 image->preserve_context = 1;
dc009d92 1289 result = machine_kexec_prepare(image);
72414d3f 1290 if (result)
dc009d92 1291 goto out;
72414d3f
MS
1292
1293 for (i = 0; i < nr_segments; i++) {
dc009d92 1294 result = kimage_load_segment(image, &image->segment[i]);
72414d3f 1295 if (result)
dc009d92 1296 goto out;
dc009d92 1297 }
7fccf032 1298 kimage_terminate(image);
558df720
MH
1299 if (flags & KEXEC_ON_CRASH)
1300 crash_unmap_reserved_pages();
dc009d92
EB
1301 }
1302 /* Install the new kernel, and Uninstall the old */
1303 image = xchg(dest_image, image);
1304
72414d3f 1305out:
8c5a1cf0 1306 mutex_unlock(&kexec_mutex);
dc009d92 1307 kimage_free(image);
72414d3f 1308
dc009d92
EB
1309 return result;
1310}
1311
558df720
MH
1312/*
1313 * Add and remove page tables for crashkernel memory
1314 *
1315 * Provide an empty default implementation here -- architecture
1316 * code may override this
1317 */
1318void __weak crash_map_reserved_pages(void)
1319{}
1320
1321void __weak crash_unmap_reserved_pages(void)
1322{}
1323
dc009d92 1324#ifdef CONFIG_COMPAT
ca2c405a
HC
1325COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
1326 compat_ulong_t, nr_segments,
1327 struct compat_kexec_segment __user *, segments,
1328 compat_ulong_t, flags)
dc009d92
EB
1329{
1330 struct compat_kexec_segment in;
1331 struct kexec_segment out, __user *ksegments;
1332 unsigned long i, result;
1333
1334 /* Don't allow clients that don't understand the native
1335 * architecture to do anything.
1336 */
72414d3f 1337 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
dc009d92 1338 return -EINVAL;
dc009d92 1339
72414d3f 1340 if (nr_segments > KEXEC_SEGMENT_MAX)
dc009d92 1341 return -EINVAL;
dc009d92
EB
1342
1343 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
e1bebcf4 1344 for (i = 0; i < nr_segments; i++) {
dc009d92 1345 result = copy_from_user(&in, &segments[i], sizeof(in));
72414d3f 1346 if (result)
dc009d92 1347 return -EFAULT;
dc009d92
EB
1348
1349 out.buf = compat_ptr(in.buf);
1350 out.bufsz = in.bufsz;
1351 out.mem = in.mem;
1352 out.memsz = in.memsz;
1353
1354 result = copy_to_user(&ksegments[i], &out, sizeof(out));
72414d3f 1355 if (result)
dc009d92 1356 return -EFAULT;
dc009d92
EB
1357 }
1358
1359 return sys_kexec_load(entry, nr_segments, ksegments, flags);
1360}
1361#endif
1362
f0895685
VG
1363SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
1364 unsigned long, cmdline_len, const char __user *, cmdline_ptr,
1365 unsigned long, flags)
1366{
cb105258
VG
1367 int ret = 0, i;
1368 struct kimage **dest_image, *image;
1369
1370 /* We only trust the superuser with rebooting the system. */
1371 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
1372 return -EPERM;
1373
1374 /* Make sure we have a legal set of flags */
1375 if (flags != (flags & KEXEC_FILE_FLAGS))
1376 return -EINVAL;
1377
1378 image = NULL;
1379
1380 if (!mutex_trylock(&kexec_mutex))
1381 return -EBUSY;
1382
1383 dest_image = &kexec_image;
1384 if (flags & KEXEC_FILE_ON_CRASH)
1385 dest_image = &kexec_crash_image;
1386
1387 if (flags & KEXEC_FILE_UNLOAD)
1388 goto exchange;
1389
1390 /*
1391 * In case of crash, new kernel gets loaded in reserved region. It is
1392 * same memory where old crash kernel might be loaded. Free any
1393 * current crash dump kernel before we corrupt it.
1394 */
1395 if (flags & KEXEC_FILE_ON_CRASH)
1396 kimage_free(xchg(&kexec_crash_image, NULL));
1397
1398 ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr,
1399 cmdline_len, flags);
1400 if (ret)
1401 goto out;
1402
1403 ret = machine_kexec_prepare(image);
1404 if (ret)
1405 goto out;
1406
12db5562
VG
1407 ret = kexec_calculate_store_digests(image);
1408 if (ret)
1409 goto out;
1410
cb105258
VG
1411 for (i = 0; i < image->nr_segments; i++) {
1412 struct kexec_segment *ksegment;
1413
1414 ksegment = &image->segment[i];
1415 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
1416 i, ksegment->buf, ksegment->bufsz, ksegment->mem,
1417 ksegment->memsz);
1418
1419 ret = kimage_load_segment(image, &image->segment[i]);
1420 if (ret)
1421 goto out;
1422 }
1423
1424 kimage_terminate(image);
1425
1426 /*
1427 * Free up any temporary buffers allocated which are not needed
1428 * after image has been loaded
1429 */
1430 kimage_file_post_load_cleanup(image);
1431exchange:
1432 image = xchg(dest_image, image);
1433out:
1434 mutex_unlock(&kexec_mutex);
1435 kimage_free(image);
1436 return ret;
f0895685
VG
1437}
1438
6e274d14 1439void crash_kexec(struct pt_regs *regs)
dc009d92 1440{
8c5a1cf0 1441 /* Take the kexec_mutex here to prevent sys_kexec_load
dc009d92
EB
1442 * running on one cpu from replacing the crash kernel
1443 * we are using after a panic on a different cpu.
1444 *
1445 * If the crash kernel was not located in a fixed area
1446 * of memory the xchg(&kexec_crash_image) would be
1447 * sufficient. But since I reuse the memory...
1448 */
8c5a1cf0 1449 if (mutex_trylock(&kexec_mutex)) {
c0ce7d08 1450 if (kexec_crash_image) {
e996e581 1451 struct pt_regs fixed_regs;
0f4bd46e 1452
e996e581 1453 crash_setup_regs(&fixed_regs, regs);
fd59d231 1454 crash_save_vmcoreinfo();
e996e581 1455 machine_crash_shutdown(&fixed_regs);
c0ce7d08 1456 machine_kexec(kexec_crash_image);
dc009d92 1457 }
8c5a1cf0 1458 mutex_unlock(&kexec_mutex);
dc009d92
EB
1459 }
1460}
cc571658 1461
06a7f711
AW
1462size_t crash_get_memory_size(void)
1463{
e05bd336 1464 size_t size = 0;
06a7f711 1465 mutex_lock(&kexec_mutex);
e05bd336 1466 if (crashk_res.end != crashk_res.start)
28f65c11 1467 size = resource_size(&crashk_res);
06a7f711
AW
1468 mutex_unlock(&kexec_mutex);
1469 return size;
1470}
1471
c0bb9e45
AB
1472void __weak crash_free_reserved_phys_range(unsigned long begin,
1473 unsigned long end)
06a7f711
AW
1474{
1475 unsigned long addr;
1476
e07cee23
JL
1477 for (addr = begin; addr < end; addr += PAGE_SIZE)
1478 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
06a7f711
AW
1479}
1480
1481int crash_shrink_memory(unsigned long new_size)
1482{
1483 int ret = 0;
1484 unsigned long start, end;
bec013c4 1485 unsigned long old_size;
6480e5a0 1486 struct resource *ram_res;
06a7f711
AW
1487
1488 mutex_lock(&kexec_mutex);
1489
1490 if (kexec_crash_image) {
1491 ret = -ENOENT;
1492 goto unlock;
1493 }
1494 start = crashk_res.start;
1495 end = crashk_res.end;
bec013c4
MH
1496 old_size = (end == 0) ? 0 : end - start + 1;
1497 if (new_size >= old_size) {
1498 ret = (new_size == old_size) ? 0 : -EINVAL;
06a7f711
AW
1499 goto unlock;
1500 }
1501
6480e5a0
MH
1502 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1503 if (!ram_res) {
1504 ret = -ENOMEM;
1505 goto unlock;
1506 }
1507
558df720
MH
1508 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1509 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
06a7f711 1510
558df720 1511 crash_map_reserved_pages();
c0bb9e45 1512 crash_free_reserved_phys_range(end, crashk_res.end);
06a7f711 1513
e05bd336 1514 if ((start == end) && (crashk_res.parent != NULL))
06a7f711 1515 release_resource(&crashk_res);
6480e5a0
MH
1516
1517 ram_res->start = end;
1518 ram_res->end = crashk_res.end;
1519 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1520 ram_res->name = "System RAM";
1521
475f9aa6 1522 crashk_res.end = end - 1;
6480e5a0
MH
1523
1524 insert_resource(&iomem_resource, ram_res);
558df720 1525 crash_unmap_reserved_pages();
06a7f711
AW
1526
1527unlock:
1528 mutex_unlock(&kexec_mutex);
1529 return ret;
1530}
1531
85916f81
MD
1532static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1533 size_t data_len)
1534{
1535 struct elf_note note;
1536
1537 note.n_namesz = strlen(name) + 1;
1538 note.n_descsz = data_len;
1539 note.n_type = type;
1540 memcpy(buf, &note, sizeof(note));
1541 buf += (sizeof(note) + 3)/4;
1542 memcpy(buf, name, note.n_namesz);
1543 buf += (note.n_namesz + 3)/4;
1544 memcpy(buf, data, note.n_descsz);
1545 buf += (note.n_descsz + 3)/4;
1546
1547 return buf;
1548}
1549
1550static void final_note(u32 *buf)
1551{
1552 struct elf_note note;
1553
1554 note.n_namesz = 0;
1555 note.n_descsz = 0;
1556 note.n_type = 0;
1557 memcpy(buf, &note, sizeof(note));
1558}
1559
1560void crash_save_cpu(struct pt_regs *regs, int cpu)
1561{
1562 struct elf_prstatus prstatus;
1563 u32 *buf;
1564
4f4b6c1a 1565 if ((cpu < 0) || (cpu >= nr_cpu_ids))
85916f81
MD
1566 return;
1567
1568 /* Using ELF notes here is opportunistic.
1569 * I need a well defined structure format
1570 * for the data I pass, and I need tags
1571 * on the data to indicate what information I have
1572 * squirrelled away. ELF notes happen to provide
1573 * all of that, so there is no need to invent something new.
1574 */
e1bebcf4 1575 buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
85916f81
MD
1576 if (!buf)
1577 return;
1578 memset(&prstatus, 0, sizeof(prstatus));
1579 prstatus.pr_pid = current->pid;
6cd61c0b 1580 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
6672f76a 1581 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
e1bebcf4 1582 &prstatus, sizeof(prstatus));
85916f81
MD
1583 final_note(buf);
1584}
1585
cc571658
VG
1586static int __init crash_notes_memory_init(void)
1587{
1588 /* Allocate memory for saving cpu registers. */
1589 crash_notes = alloc_percpu(note_buf_t);
1590 if (!crash_notes) {
e1bebcf4 1591 pr_warn("Kexec: Memory allocation for saving cpu register states failed\n");
cc571658
VG
1592 return -ENOMEM;
1593 }
1594 return 0;
1595}
c96d6660 1596subsys_initcall(crash_notes_memory_init);
fd59d231 1597
cba63c30
BW
1598
1599/*
1600 * parsing the "crashkernel" commandline
1601 *
1602 * this code is intended to be called from architecture specific code
1603 */
1604
1605
1606/*
1607 * This function parses command lines in the format
1608 *
1609 * crashkernel=ramsize-range:size[,...][@offset]
1610 *
1611 * The function returns 0 on success and -EINVAL on failure.
1612 */
e1bebcf4
FF
1613static int __init parse_crashkernel_mem(char *cmdline,
1614 unsigned long long system_ram,
1615 unsigned long long *crash_size,
1616 unsigned long long *crash_base)
cba63c30
BW
1617{
1618 char *cur = cmdline, *tmp;
1619
1620 /* for each entry of the comma-separated list */
1621 do {
1622 unsigned long long start, end = ULLONG_MAX, size;
1623
1624 /* get the start of the range */
1625 start = memparse(cur, &tmp);
1626 if (cur == tmp) {
e1bebcf4 1627 pr_warn("crashkernel: Memory value expected\n");
cba63c30
BW
1628 return -EINVAL;
1629 }
1630 cur = tmp;
1631 if (*cur != '-') {
e1bebcf4 1632 pr_warn("crashkernel: '-' expected\n");
cba63c30
BW
1633 return -EINVAL;
1634 }
1635 cur++;
1636
1637 /* if no ':' is here, than we read the end */
1638 if (*cur != ':') {
1639 end = memparse(cur, &tmp);
1640 if (cur == tmp) {
e1bebcf4 1641 pr_warn("crashkernel: Memory value expected\n");
cba63c30
BW
1642 return -EINVAL;
1643 }
1644 cur = tmp;
1645 if (end <= start) {
e1bebcf4 1646 pr_warn("crashkernel: end <= start\n");
cba63c30
BW
1647 return -EINVAL;
1648 }
1649 }
1650
1651 if (*cur != ':') {
e1bebcf4 1652 pr_warn("crashkernel: ':' expected\n");
cba63c30
BW
1653 return -EINVAL;
1654 }
1655 cur++;
1656
1657 size = memparse(cur, &tmp);
1658 if (cur == tmp) {
e1bebcf4 1659 pr_warn("Memory value expected\n");
cba63c30
BW
1660 return -EINVAL;
1661 }
1662 cur = tmp;
1663 if (size >= system_ram) {
e1bebcf4 1664 pr_warn("crashkernel: invalid size\n");
cba63c30
BW
1665 return -EINVAL;
1666 }
1667
1668 /* match ? */
be089d79 1669 if (system_ram >= start && system_ram < end) {
cba63c30
BW
1670 *crash_size = size;
1671 break;
1672 }
1673 } while (*cur++ == ',');
1674
1675 if (*crash_size > 0) {
11c7da4b 1676 while (*cur && *cur != ' ' && *cur != '@')
cba63c30
BW
1677 cur++;
1678 if (*cur == '@') {
1679 cur++;
1680 *crash_base = memparse(cur, &tmp);
1681 if (cur == tmp) {
e1bebcf4 1682 pr_warn("Memory value expected after '@'\n");
cba63c30
BW
1683 return -EINVAL;
1684 }
1685 }
1686 }
1687
1688 return 0;
1689}
1690
1691/*
1692 * That function parses "simple" (old) crashkernel command lines like
1693 *
e1bebcf4 1694 * crashkernel=size[@offset]
cba63c30
BW
1695 *
1696 * It returns 0 on success and -EINVAL on failure.
1697 */
e1bebcf4
FF
1698static int __init parse_crashkernel_simple(char *cmdline,
1699 unsigned long long *crash_size,
1700 unsigned long long *crash_base)
cba63c30
BW
1701{
1702 char *cur = cmdline;
1703
1704 *crash_size = memparse(cmdline, &cur);
1705 if (cmdline == cur) {
e1bebcf4 1706 pr_warn("crashkernel: memory value expected\n");
cba63c30
BW
1707 return -EINVAL;
1708 }
1709
1710 if (*cur == '@')
1711 *crash_base = memparse(cur+1, &cur);
eaa3be6a 1712 else if (*cur != ' ' && *cur != '\0') {
e1bebcf4 1713 pr_warn("crashkernel: unrecognized char\n");
eaa3be6a
ZD
1714 return -EINVAL;
1715 }
cba63c30
BW
1716
1717 return 0;
1718}
1719
adbc742b
YL
1720#define SUFFIX_HIGH 0
1721#define SUFFIX_LOW 1
1722#define SUFFIX_NULL 2
1723static __initdata char *suffix_tbl[] = {
1724 [SUFFIX_HIGH] = ",high",
1725 [SUFFIX_LOW] = ",low",
1726 [SUFFIX_NULL] = NULL,
1727};
1728
cba63c30 1729/*
adbc742b
YL
1730 * That function parses "suffix" crashkernel command lines like
1731 *
1732 * crashkernel=size,[high|low]
1733 *
1734 * It returns 0 on success and -EINVAL on failure.
cba63c30 1735 */
adbc742b
YL
1736static int __init parse_crashkernel_suffix(char *cmdline,
1737 unsigned long long *crash_size,
1738 unsigned long long *crash_base,
1739 const char *suffix)
1740{
1741 char *cur = cmdline;
1742
1743 *crash_size = memparse(cmdline, &cur);
1744 if (cmdline == cur) {
1745 pr_warn("crashkernel: memory value expected\n");
1746 return -EINVAL;
1747 }
1748
1749 /* check with suffix */
1750 if (strncmp(cur, suffix, strlen(suffix))) {
1751 pr_warn("crashkernel: unrecognized char\n");
1752 return -EINVAL;
1753 }
1754 cur += strlen(suffix);
1755 if (*cur != ' ' && *cur != '\0') {
1756 pr_warn("crashkernel: unrecognized char\n");
1757 return -EINVAL;
1758 }
1759
1760 return 0;
1761}
1762
1763static __init char *get_last_crashkernel(char *cmdline,
1764 const char *name,
1765 const char *suffix)
1766{
1767 char *p = cmdline, *ck_cmdline = NULL;
1768
1769 /* find crashkernel and use the last one if there are more */
1770 p = strstr(p, name);
1771 while (p) {
1772 char *end_p = strchr(p, ' ');
1773 char *q;
1774
1775 if (!end_p)
1776 end_p = p + strlen(p);
1777
1778 if (!suffix) {
1779 int i;
1780
1781 /* skip the one with any known suffix */
1782 for (i = 0; suffix_tbl[i]; i++) {
1783 q = end_p - strlen(suffix_tbl[i]);
1784 if (!strncmp(q, suffix_tbl[i],
1785 strlen(suffix_tbl[i])))
1786 goto next;
1787 }
1788 ck_cmdline = p;
1789 } else {
1790 q = end_p - strlen(suffix);
1791 if (!strncmp(q, suffix, strlen(suffix)))
1792 ck_cmdline = p;
1793 }
1794next:
1795 p = strstr(p+1, name);
1796 }
1797
1798 if (!ck_cmdline)
1799 return NULL;
1800
1801 return ck_cmdline;
1802}
1803
0212f915 1804static int __init __parse_crashkernel(char *cmdline,
cba63c30
BW
1805 unsigned long long system_ram,
1806 unsigned long long *crash_size,
0212f915 1807 unsigned long long *crash_base,
adbc742b
YL
1808 const char *name,
1809 const char *suffix)
cba63c30 1810{
cba63c30 1811 char *first_colon, *first_space;
adbc742b 1812 char *ck_cmdline;
cba63c30
BW
1813
1814 BUG_ON(!crash_size || !crash_base);
1815 *crash_size = 0;
1816 *crash_base = 0;
1817
adbc742b 1818 ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
cba63c30
BW
1819
1820 if (!ck_cmdline)
1821 return -EINVAL;
1822
0212f915 1823 ck_cmdline += strlen(name);
cba63c30 1824
adbc742b
YL
1825 if (suffix)
1826 return parse_crashkernel_suffix(ck_cmdline, crash_size,
1827 crash_base, suffix);
cba63c30
BW
1828 /*
1829 * if the commandline contains a ':', then that's the extended
1830 * syntax -- if not, it must be the classic syntax
1831 */
1832 first_colon = strchr(ck_cmdline, ':');
1833 first_space = strchr(ck_cmdline, ' ');
1834 if (first_colon && (!first_space || first_colon < first_space))
1835 return parse_crashkernel_mem(ck_cmdline, system_ram,
1836 crash_size, crash_base);
cba63c30 1837
80c74f6a 1838 return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
cba63c30
BW
1839}
1840
adbc742b
YL
1841/*
1842 * That function is the entry point for command line parsing and should be
1843 * called from the arch-specific code.
1844 */
0212f915
YL
1845int __init parse_crashkernel(char *cmdline,
1846 unsigned long long system_ram,
1847 unsigned long long *crash_size,
1848 unsigned long long *crash_base)
1849{
1850 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
adbc742b 1851 "crashkernel=", NULL);
0212f915 1852}
55a20ee7
YL
1853
1854int __init parse_crashkernel_high(char *cmdline,
1855 unsigned long long system_ram,
1856 unsigned long long *crash_size,
1857 unsigned long long *crash_base)
1858{
1859 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
adbc742b 1860 "crashkernel=", suffix_tbl[SUFFIX_HIGH]);
55a20ee7 1861}
0212f915
YL
1862
1863int __init parse_crashkernel_low(char *cmdline,
1864 unsigned long long system_ram,
1865 unsigned long long *crash_size,
1866 unsigned long long *crash_base)
1867{
1868 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
adbc742b 1869 "crashkernel=", suffix_tbl[SUFFIX_LOW]);
0212f915 1870}
cba63c30 1871
fa8ff292 1872static void update_vmcoreinfo_note(void)
fd59d231 1873{
fa8ff292 1874 u32 *buf = vmcoreinfo_note;
fd59d231
KO
1875
1876 if (!vmcoreinfo_size)
1877 return;
fd59d231
KO
1878 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1879 vmcoreinfo_size);
fd59d231
KO
1880 final_note(buf);
1881}
1882
fa8ff292
MH
1883void crash_save_vmcoreinfo(void)
1884{
63dca8d5 1885 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
fa8ff292
MH
1886 update_vmcoreinfo_note();
1887}
1888
fd59d231
KO
1889void vmcoreinfo_append_str(const char *fmt, ...)
1890{
1891 va_list args;
1892 char buf[0x50];
310faaa9 1893 size_t r;
fd59d231
KO
1894
1895 va_start(args, fmt);
a19428e5 1896 r = vscnprintf(buf, sizeof(buf), fmt, args);
fd59d231
KO
1897 va_end(args);
1898
31c3a3fe 1899 r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
fd59d231
KO
1900
1901 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1902
1903 vmcoreinfo_size += r;
1904}
1905
1906/*
1907 * provide an empty default implementation here -- architecture
1908 * code may override this
1909 */
52f5684c 1910void __weak arch_crash_save_vmcoreinfo(void)
fd59d231
KO
1911{}
1912
52f5684c 1913unsigned long __weak paddr_vmcoreinfo_note(void)
fd59d231
KO
1914{
1915 return __pa((unsigned long)(char *)&vmcoreinfo_note);
1916}
1917
1918static int __init crash_save_vmcoreinfo_init(void)
1919{
bba1f603
KO
1920 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1921 VMCOREINFO_PAGESIZE(PAGE_SIZE);
fd59d231 1922
bcbba6c1
KO
1923 VMCOREINFO_SYMBOL(init_uts_ns);
1924 VMCOREINFO_SYMBOL(node_online_map);
d034cfab 1925#ifdef CONFIG_MMU
bcbba6c1 1926 VMCOREINFO_SYMBOL(swapper_pg_dir);
d034cfab 1927#endif
bcbba6c1 1928 VMCOREINFO_SYMBOL(_stext);
f1c4069e 1929 VMCOREINFO_SYMBOL(vmap_area_list);
fd59d231
KO
1930
1931#ifndef CONFIG_NEED_MULTIPLE_NODES
bcbba6c1
KO
1932 VMCOREINFO_SYMBOL(mem_map);
1933 VMCOREINFO_SYMBOL(contig_page_data);
fd59d231
KO
1934#endif
1935#ifdef CONFIG_SPARSEMEM
bcbba6c1
KO
1936 VMCOREINFO_SYMBOL(mem_section);
1937 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
c76f860c 1938 VMCOREINFO_STRUCT_SIZE(mem_section);
bcbba6c1 1939 VMCOREINFO_OFFSET(mem_section, section_mem_map);
fd59d231 1940#endif
c76f860c
KO
1941 VMCOREINFO_STRUCT_SIZE(page);
1942 VMCOREINFO_STRUCT_SIZE(pglist_data);
1943 VMCOREINFO_STRUCT_SIZE(zone);
1944 VMCOREINFO_STRUCT_SIZE(free_area);
1945 VMCOREINFO_STRUCT_SIZE(list_head);
1946 VMCOREINFO_SIZE(nodemask_t);
bcbba6c1
KO
1947 VMCOREINFO_OFFSET(page, flags);
1948 VMCOREINFO_OFFSET(page, _count);
1949 VMCOREINFO_OFFSET(page, mapping);
1950 VMCOREINFO_OFFSET(page, lru);
8d67091e
AK
1951 VMCOREINFO_OFFSET(page, _mapcount);
1952 VMCOREINFO_OFFSET(page, private);
bcbba6c1
KO
1953 VMCOREINFO_OFFSET(pglist_data, node_zones);
1954 VMCOREINFO_OFFSET(pglist_data, nr_zones);
fd59d231 1955#ifdef CONFIG_FLAT_NODE_MEM_MAP
bcbba6c1 1956 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
fd59d231 1957#endif
bcbba6c1
KO
1958 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1959 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1960 VMCOREINFO_OFFSET(pglist_data, node_id);
1961 VMCOREINFO_OFFSET(zone, free_area);
1962 VMCOREINFO_OFFSET(zone, vm_stat);
1963 VMCOREINFO_OFFSET(zone, spanned_pages);
1964 VMCOREINFO_OFFSET(free_area, free_list);
1965 VMCOREINFO_OFFSET(list_head, next);
1966 VMCOREINFO_OFFSET(list_head, prev);
13ba3fcb
AK
1967 VMCOREINFO_OFFSET(vmap_area, va_start);
1968 VMCOREINFO_OFFSET(vmap_area, list);
bcbba6c1 1969 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
04d491ab 1970 log_buf_kexec_setup();
83a08e7c 1971 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
bcbba6c1 1972 VMCOREINFO_NUMBER(NR_FREE_PAGES);
122c7a59
KO
1973 VMCOREINFO_NUMBER(PG_lru);
1974 VMCOREINFO_NUMBER(PG_private);
1975 VMCOREINFO_NUMBER(PG_swapcache);
8d67091e 1976 VMCOREINFO_NUMBER(PG_slab);
0d0bf667
MT
1977#ifdef CONFIG_MEMORY_FAILURE
1978 VMCOREINFO_NUMBER(PG_hwpoison);
1979#endif
b3acc56b 1980 VMCOREINFO_NUMBER(PG_head_mask);
8d67091e 1981 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
3a1122d2 1982#ifdef CONFIG_HUGETLBFS
8f1d26d0 1983 VMCOREINFO_SYMBOL(free_huge_page);
3a1122d2 1984#endif
fd59d231
KO
1985
1986 arch_crash_save_vmcoreinfo();
fa8ff292 1987 update_vmcoreinfo_note();
fd59d231
KO
1988
1989 return 0;
1990}
1991
c96d6660 1992subsys_initcall(crash_save_vmcoreinfo_init);
3ab83521 1993
cb105258
VG
1994static int __kexec_add_segment(struct kimage *image, char *buf,
1995 unsigned long bufsz, unsigned long mem,
1996 unsigned long memsz)
1997{
1998 struct kexec_segment *ksegment;
1999
2000 ksegment = &image->segment[image->nr_segments];
2001 ksegment->kbuf = buf;
2002 ksegment->bufsz = bufsz;
2003 ksegment->mem = mem;
2004 ksegment->memsz = memsz;
2005 image->nr_segments++;
2006
2007 return 0;
2008}
2009
2010static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
2011 struct kexec_buf *kbuf)
2012{
2013 struct kimage *image = kbuf->image;
2014 unsigned long temp_start, temp_end;
2015
2016 temp_end = min(end, kbuf->buf_max);
2017 temp_start = temp_end - kbuf->memsz;
2018
2019 do {
2020 /* align down start */
2021 temp_start = temp_start & (~(kbuf->buf_align - 1));
2022
2023 if (temp_start < start || temp_start < kbuf->buf_min)
2024 return 0;
2025
2026 temp_end = temp_start + kbuf->memsz - 1;
2027
2028 /*
2029 * Make sure this does not conflict with any of existing
2030 * segments
2031 */
2032 if (kimage_is_destination_range(image, temp_start, temp_end)) {
2033 temp_start = temp_start - PAGE_SIZE;
2034 continue;
2035 }
2036
2037 /* We found a suitable memory range */
2038 break;
2039 } while (1);
2040
2041 /* If we are here, we found a suitable memory range */
2042 __kexec_add_segment(image, kbuf->buffer, kbuf->bufsz, temp_start,
2043 kbuf->memsz);
2044
2045 /* Success, stop navigating through remaining System RAM ranges */
2046 return 1;
2047}
2048
2049static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
2050 struct kexec_buf *kbuf)
2051{
2052 struct kimage *image = kbuf->image;
2053 unsigned long temp_start, temp_end;
2054
2055 temp_start = max(start, kbuf->buf_min);
2056
2057 do {
2058 temp_start = ALIGN(temp_start, kbuf->buf_align);
2059 temp_end = temp_start + kbuf->memsz - 1;
2060
2061 if (temp_end > end || temp_end > kbuf->buf_max)
2062 return 0;
2063 /*
2064 * Make sure this does not conflict with any of existing
2065 * segments
2066 */
2067 if (kimage_is_destination_range(image, temp_start, temp_end)) {
2068 temp_start = temp_start + PAGE_SIZE;
2069 continue;
2070 }
2071
2072 /* We found a suitable memory range */
2073 break;
2074 } while (1);
2075
2076 /* If we are here, we found a suitable memory range */
2077 __kexec_add_segment(image, kbuf->buffer, kbuf->bufsz, temp_start,
2078 kbuf->memsz);
2079
2080 /* Success, stop navigating through remaining System RAM ranges */
2081 return 1;
2082}
2083
2084static int locate_mem_hole_callback(u64 start, u64 end, void *arg)
2085{
2086 struct kexec_buf *kbuf = (struct kexec_buf *)arg;
2087 unsigned long sz = end - start + 1;
2088
2089 /* Returning 0 will take to next memory range */
2090 if (sz < kbuf->memsz)
2091 return 0;
2092
2093 if (end < kbuf->buf_min || start > kbuf->buf_max)
2094 return 0;
2095
2096 /*
2097 * Allocate memory top down with-in ram range. Otherwise bottom up
2098 * allocation.
2099 */
2100 if (kbuf->top_down)
2101 return locate_mem_hole_top_down(start, end, kbuf);
2102 return locate_mem_hole_bottom_up(start, end, kbuf);
2103}
2104
2105/*
2106 * Helper function for placing a buffer in a kexec segment. This assumes
2107 * that kexec_mutex is held.
2108 */
2109int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz,
2110 unsigned long memsz, unsigned long buf_align,
2111 unsigned long buf_min, unsigned long buf_max,
2112 bool top_down, unsigned long *load_addr)
2113{
2114
2115 struct kexec_segment *ksegment;
2116 struct kexec_buf buf, *kbuf;
2117 int ret;
2118
2119 /* Currently adding segment this way is allowed only in file mode */
2120 if (!image->file_mode)
2121 return -EINVAL;
2122
2123 if (image->nr_segments >= KEXEC_SEGMENT_MAX)
2124 return -EINVAL;
2125
2126 /*
2127 * Make sure we are not trying to add buffer after allocating
2128 * control pages. All segments need to be placed first before
2129 * any control pages are allocated. As control page allocation
2130 * logic goes through list of segments to make sure there are
2131 * no destination overlaps.
2132 */
2133 if (!list_empty(&image->control_pages)) {
2134 WARN_ON(1);
2135 return -EINVAL;
2136 }
2137
2138 memset(&buf, 0, sizeof(struct kexec_buf));
2139 kbuf = &buf;
2140 kbuf->image = image;
2141 kbuf->buffer = buffer;
2142 kbuf->bufsz = bufsz;
2143
2144 kbuf->memsz = ALIGN(memsz, PAGE_SIZE);
2145 kbuf->buf_align = max(buf_align, PAGE_SIZE);
2146 kbuf->buf_min = buf_min;
2147 kbuf->buf_max = buf_max;
2148 kbuf->top_down = top_down;
2149
2150 /* Walk the RAM ranges and allocate a suitable range for the buffer */
dd5f7260
VG
2151 if (image->type == KEXEC_TYPE_CRASH)
2152 ret = walk_iomem_res("Crash kernel",
2153 IORESOURCE_MEM | IORESOURCE_BUSY,
2154 crashk_res.start, crashk_res.end, kbuf,
2155 locate_mem_hole_callback);
2156 else
2157 ret = walk_system_ram_res(0, -1, kbuf,
2158 locate_mem_hole_callback);
cb105258
VG
2159 if (ret != 1) {
2160 /* A suitable memory range could not be found for buffer */
2161 return -EADDRNOTAVAIL;
2162 }
2163
2164 /* Found a suitable memory range */
2165 ksegment = &image->segment[image->nr_segments - 1];
2166 *load_addr = ksegment->mem;
2167 return 0;
2168}
2169
12db5562
VG
2170/* Calculate and store the digest of segments */
2171static int kexec_calculate_store_digests(struct kimage *image)
2172{
2173 struct crypto_shash *tfm;
2174 struct shash_desc *desc;
2175 int ret = 0, i, j, zero_buf_sz, sha_region_sz;
2176 size_t desc_size, nullsz;
2177 char *digest;
2178 void *zero_buf;
2179 struct kexec_sha_region *sha_regions;
2180 struct purgatory_info *pi = &image->purgatory_info;
2181
2182 zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
2183 zero_buf_sz = PAGE_SIZE;
2184
2185 tfm = crypto_alloc_shash("sha256", 0, 0);
2186 if (IS_ERR(tfm)) {
2187 ret = PTR_ERR(tfm);
2188 goto out;
2189 }
2190
2191 desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
2192 desc = kzalloc(desc_size, GFP_KERNEL);
2193 if (!desc) {
2194 ret = -ENOMEM;
2195 goto out_free_tfm;
2196 }
2197
2198 sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
2199 sha_regions = vzalloc(sha_region_sz);
2200 if (!sha_regions)
2201 goto out_free_desc;
2202
2203 desc->tfm = tfm;
2204 desc->flags = 0;
2205
2206 ret = crypto_shash_init(desc);
2207 if (ret < 0)
2208 goto out_free_sha_regions;
2209
2210 digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
2211 if (!digest) {
2212 ret = -ENOMEM;
2213 goto out_free_sha_regions;
2214 }
2215
2216 for (j = i = 0; i < image->nr_segments; i++) {
2217 struct kexec_segment *ksegment;
2218
2219 ksegment = &image->segment[i];
2220 /*
2221 * Skip purgatory as it will be modified once we put digest
2222 * info in purgatory.
2223 */
2224 if (ksegment->kbuf == pi->purgatory_buf)
2225 continue;
2226
2227 ret = crypto_shash_update(desc, ksegment->kbuf,
2228 ksegment->bufsz);
2229 if (ret)
2230 break;
2231
2232 /*
2233 * Assume rest of the buffer is filled with zero and
2234 * update digest accordingly.
2235 */
2236 nullsz = ksegment->memsz - ksegment->bufsz;
2237 while (nullsz) {
2238 unsigned long bytes = nullsz;
2239
2240 if (bytes > zero_buf_sz)
2241 bytes = zero_buf_sz;
2242 ret = crypto_shash_update(desc, zero_buf, bytes);
2243 if (ret)
2244 break;
2245 nullsz -= bytes;
2246 }
2247
2248 if (ret)
2249 break;
2250
2251 sha_regions[j].start = ksegment->mem;
2252 sha_regions[j].len = ksegment->memsz;
2253 j++;
2254 }
2255
2256 if (!ret) {
2257 ret = crypto_shash_final(desc, digest);
2258 if (ret)
2259 goto out_free_digest;
2260 ret = kexec_purgatory_get_set_symbol(image, "sha_regions",
2261 sha_regions, sha_region_sz, 0);
2262 if (ret)
2263 goto out_free_digest;
2264
2265 ret = kexec_purgatory_get_set_symbol(image, "sha256_digest",
2266 digest, SHA256_DIGEST_SIZE, 0);
2267 if (ret)
2268 goto out_free_digest;
2269 }
2270
2271out_free_digest:
2272 kfree(digest);
2273out_free_sha_regions:
2274 vfree(sha_regions);
2275out_free_desc:
2276 kfree(desc);
2277out_free_tfm:
2278 kfree(tfm);
2279out:
2280 return ret;
2281}
2282
2283/* Actually load purgatory. Lot of code taken from kexec-tools */
2284static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
2285 unsigned long max, int top_down)
2286{
2287 struct purgatory_info *pi = &image->purgatory_info;
2288 unsigned long align, buf_align, bss_align, buf_sz, bss_sz, bss_pad;
2289 unsigned long memsz, entry, load_addr, curr_load_addr, bss_addr, offset;
2290 unsigned char *buf_addr, *src;
2291 int i, ret = 0, entry_sidx = -1;
2292 const Elf_Shdr *sechdrs_c;
2293 Elf_Shdr *sechdrs = NULL;
2294 void *purgatory_buf = NULL;
2295
2296 /*
2297 * sechdrs_c points to section headers in purgatory and are read
2298 * only. No modifications allowed.
2299 */
2300 sechdrs_c = (void *)pi->ehdr + pi->ehdr->e_shoff;
2301
2302 /*
2303 * We can not modify sechdrs_c[] and its fields. It is read only.
2304 * Copy it over to a local copy where one can store some temporary
2305 * data and free it at the end. We need to modify ->sh_addr and
2306 * ->sh_offset fields to keep track of permanent and temporary
2307 * locations of sections.
2308 */
2309 sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr));
2310 if (!sechdrs)
2311 return -ENOMEM;
2312
2313 memcpy(sechdrs, sechdrs_c, pi->ehdr->e_shnum * sizeof(Elf_Shdr));
2314
2315 /*
2316 * We seem to have multiple copies of sections. First copy is which
2317 * is embedded in kernel in read only section. Some of these sections
2318 * will be copied to a temporary buffer and relocated. And these
2319 * sections will finally be copied to their final destination at
2320 * segment load time.
2321 *
2322 * Use ->sh_offset to reflect section address in memory. It will
2323 * point to original read only copy if section is not allocatable.
2324 * Otherwise it will point to temporary copy which will be relocated.
2325 *
2326 * Use ->sh_addr to contain final address of the section where it
2327 * will go during execution time.
2328 */
2329 for (i = 0; i < pi->ehdr->e_shnum; i++) {
2330 if (sechdrs[i].sh_type == SHT_NOBITS)
2331 continue;
2332
2333 sechdrs[i].sh_offset = (unsigned long)pi->ehdr +
2334 sechdrs[i].sh_offset;
2335 }
2336
2337 /*
2338 * Identify entry point section and make entry relative to section
2339 * start.
2340 */
2341 entry = pi->ehdr->e_entry;
2342 for (i = 0; i < pi->ehdr->e_shnum; i++) {
2343 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
2344 continue;
2345
2346 if (!(sechdrs[i].sh_flags & SHF_EXECINSTR))
2347 continue;
2348
2349 /* Make entry section relative */
2350 if (sechdrs[i].sh_addr <= pi->ehdr->e_entry &&
2351 ((sechdrs[i].sh_addr + sechdrs[i].sh_size) >
2352 pi->ehdr->e_entry)) {
2353 entry_sidx = i;
2354 entry -= sechdrs[i].sh_addr;
2355 break;
2356 }
2357 }
2358
2359 /* Determine how much memory is needed to load relocatable object. */
2360 buf_align = 1;
2361 bss_align = 1;
2362 buf_sz = 0;
2363 bss_sz = 0;
2364
2365 for (i = 0; i < pi->ehdr->e_shnum; i++) {
2366 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
2367 continue;
2368
2369 align = sechdrs[i].sh_addralign;
2370 if (sechdrs[i].sh_type != SHT_NOBITS) {
2371 if (buf_align < align)
2372 buf_align = align;
2373 buf_sz = ALIGN(buf_sz, align);
2374 buf_sz += sechdrs[i].sh_size;
2375 } else {
2376 /* bss section */
2377 if (bss_align < align)
2378 bss_align = align;
2379 bss_sz = ALIGN(bss_sz, align);
2380 bss_sz += sechdrs[i].sh_size;
2381 }
2382 }
2383
2384 /* Determine the bss padding required to align bss properly */
2385 bss_pad = 0;
2386 if (buf_sz & (bss_align - 1))
2387 bss_pad = bss_align - (buf_sz & (bss_align - 1));
2388
2389 memsz = buf_sz + bss_pad + bss_sz;
2390
2391 /* Allocate buffer for purgatory */
2392 purgatory_buf = vzalloc(buf_sz);
2393 if (!purgatory_buf) {
2394 ret = -ENOMEM;
2395 goto out;
2396 }
2397
2398 if (buf_align < bss_align)
2399 buf_align = bss_align;
2400
2401 /* Add buffer to segment list */
2402 ret = kexec_add_buffer(image, purgatory_buf, buf_sz, memsz,
2403 buf_align, min, max, top_down,
2404 &pi->purgatory_load_addr);
2405 if (ret)
2406 goto out;
2407
2408 /* Load SHF_ALLOC sections */
2409 buf_addr = purgatory_buf;
2410 load_addr = curr_load_addr = pi->purgatory_load_addr;
2411 bss_addr = load_addr + buf_sz + bss_pad;
2412
2413 for (i = 0; i < pi->ehdr->e_shnum; i++) {
2414 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
2415 continue;
2416
2417 align = sechdrs[i].sh_addralign;
2418 if (sechdrs[i].sh_type != SHT_NOBITS) {
2419 curr_load_addr = ALIGN(curr_load_addr, align);
2420 offset = curr_load_addr - load_addr;
2421 /* We already modifed ->sh_offset to keep src addr */
2422 src = (char *) sechdrs[i].sh_offset;
2423 memcpy(buf_addr + offset, src, sechdrs[i].sh_size);
2424
2425 /* Store load address and source address of section */
2426 sechdrs[i].sh_addr = curr_load_addr;
2427
2428 /*
2429 * This section got copied to temporary buffer. Update
2430 * ->sh_offset accordingly.
2431 */
2432 sechdrs[i].sh_offset = (unsigned long)(buf_addr + offset);
2433
2434 /* Advance to the next address */
2435 curr_load_addr += sechdrs[i].sh_size;
2436 } else {
2437 bss_addr = ALIGN(bss_addr, align);
2438 sechdrs[i].sh_addr = bss_addr;
2439 bss_addr += sechdrs[i].sh_size;
2440 }
2441 }
2442
2443 /* Update entry point based on load address of text section */
2444 if (entry_sidx >= 0)
2445 entry += sechdrs[entry_sidx].sh_addr;
2446
2447 /* Make kernel jump to purgatory after shutdown */
2448 image->start = entry;
2449
2450 /* Used later to get/set symbol values */
2451 pi->sechdrs = sechdrs;
2452
2453 /*
2454 * Used later to identify which section is purgatory and skip it
2455 * from checksumming.
2456 */
2457 pi->purgatory_buf = purgatory_buf;
2458 return ret;
2459out:
2460 vfree(sechdrs);
2461 vfree(purgatory_buf);
2462 return ret;
2463}
2464
2465static int kexec_apply_relocations(struct kimage *image)
2466{
2467 int i, ret;
2468 struct purgatory_info *pi = &image->purgatory_info;
2469 Elf_Shdr *sechdrs = pi->sechdrs;
2470
2471 /* Apply relocations */
2472 for (i = 0; i < pi->ehdr->e_shnum; i++) {
2473 Elf_Shdr *section, *symtab;
2474
2475 if (sechdrs[i].sh_type != SHT_RELA &&
2476 sechdrs[i].sh_type != SHT_REL)
2477 continue;
2478
2479 /*
2480 * For section of type SHT_RELA/SHT_REL,
2481 * ->sh_link contains section header index of associated
2482 * symbol table. And ->sh_info contains section header
2483 * index of section to which relocations apply.
2484 */
2485 if (sechdrs[i].sh_info >= pi->ehdr->e_shnum ||
2486 sechdrs[i].sh_link >= pi->ehdr->e_shnum)
2487 return -ENOEXEC;
2488
2489 section = &sechdrs[sechdrs[i].sh_info];
2490 symtab = &sechdrs[sechdrs[i].sh_link];
2491
2492 if (!(section->sh_flags & SHF_ALLOC))
2493 continue;
2494
2495 /*
2496 * symtab->sh_link contain section header index of associated
2497 * string table.
2498 */
2499 if (symtab->sh_link >= pi->ehdr->e_shnum)
2500 /* Invalid section number? */
2501 continue;
2502
2503 /*
2504 * Respective archicture needs to provide support for applying
2505 * relocations of type SHT_RELA/SHT_REL.
2506 */
2507 if (sechdrs[i].sh_type == SHT_RELA)
2508 ret = arch_kexec_apply_relocations_add(pi->ehdr,
2509 sechdrs, i);
2510 else if (sechdrs[i].sh_type == SHT_REL)
2511 ret = arch_kexec_apply_relocations(pi->ehdr,
2512 sechdrs, i);
2513 if (ret)
2514 return ret;
2515 }
2516
2517 return 0;
2518}
2519
2520/* Load relocatable purgatory object and relocate it appropriately */
2521int kexec_load_purgatory(struct kimage *image, unsigned long min,
2522 unsigned long max, int top_down,
2523 unsigned long *load_addr)
2524{
2525 struct purgatory_info *pi = &image->purgatory_info;
2526 int ret;
2527
2528 if (kexec_purgatory_size <= 0)
2529 return -EINVAL;
2530
2531 if (kexec_purgatory_size < sizeof(Elf_Ehdr))
2532 return -ENOEXEC;
2533
2534 pi->ehdr = (Elf_Ehdr *)kexec_purgatory;
2535
2536 if (memcmp(pi->ehdr->e_ident, ELFMAG, SELFMAG) != 0
2537 || pi->ehdr->e_type != ET_REL
2538 || !elf_check_arch(pi->ehdr)
2539 || pi->ehdr->e_shentsize != sizeof(Elf_Shdr))
2540 return -ENOEXEC;
2541
2542 if (pi->ehdr->e_shoff >= kexec_purgatory_size
2543 || (pi->ehdr->e_shnum * sizeof(Elf_Shdr) >
2544 kexec_purgatory_size - pi->ehdr->e_shoff))
2545 return -ENOEXEC;
2546
2547 ret = __kexec_load_purgatory(image, min, max, top_down);
2548 if (ret)
2549 return ret;
2550
2551 ret = kexec_apply_relocations(image);
2552 if (ret)
2553 goto out;
2554
2555 *load_addr = pi->purgatory_load_addr;
2556 return 0;
2557out:
2558 vfree(pi->sechdrs);
2559 vfree(pi->purgatory_buf);
2560 return ret;
2561}
2562
2563static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
2564 const char *name)
2565{
2566 Elf_Sym *syms;
2567 Elf_Shdr *sechdrs;
2568 Elf_Ehdr *ehdr;
2569 int i, k;
2570 const char *strtab;
2571
2572 if (!pi->sechdrs || !pi->ehdr)
2573 return NULL;
2574
2575 sechdrs = pi->sechdrs;
2576 ehdr = pi->ehdr;
2577
2578 for (i = 0; i < ehdr->e_shnum; i++) {
2579 if (sechdrs[i].sh_type != SHT_SYMTAB)
2580 continue;
2581
2582 if (sechdrs[i].sh_link >= ehdr->e_shnum)
2583 /* Invalid strtab section number */
2584 continue;
2585 strtab = (char *)sechdrs[sechdrs[i].sh_link].sh_offset;
2586 syms = (Elf_Sym *)sechdrs[i].sh_offset;
2587
2588 /* Go through symbols for a match */
2589 for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) {
2590 if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL)
2591 continue;
2592
2593 if (strcmp(strtab + syms[k].st_name, name) != 0)
2594 continue;
2595
2596 if (syms[k].st_shndx == SHN_UNDEF ||
2597 syms[k].st_shndx >= ehdr->e_shnum) {
2598 pr_debug("Symbol: %s has bad section index %d.\n",
2599 name, syms[k].st_shndx);
2600 return NULL;
2601 }
2602
2603 /* Found the symbol we are looking for */
2604 return &syms[k];
2605 }
2606 }
2607
2608 return NULL;
2609}
2610
2611void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
2612{
2613 struct purgatory_info *pi = &image->purgatory_info;
2614 Elf_Sym *sym;
2615 Elf_Shdr *sechdr;
2616
2617 sym = kexec_purgatory_find_symbol(pi, name);
2618 if (!sym)
2619 return ERR_PTR(-EINVAL);
2620
2621 sechdr = &pi->sechdrs[sym->st_shndx];
2622
2623 /*
2624 * Returns the address where symbol will finally be loaded after
2625 * kexec_load_segment()
2626 */
2627 return (void *)(sechdr->sh_addr + sym->st_value);
2628}
2629
2630/*
2631 * Get or set value of a symbol. If "get_value" is true, symbol value is
2632 * returned in buf otherwise symbol value is set based on value in buf.
2633 */
2634int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
2635 void *buf, unsigned int size, bool get_value)
2636{
2637 Elf_Sym *sym;
2638 Elf_Shdr *sechdrs;
2639 struct purgatory_info *pi = &image->purgatory_info;
2640 char *sym_buf;
2641
2642 sym = kexec_purgatory_find_symbol(pi, name);
2643 if (!sym)
2644 return -EINVAL;
2645
2646 if (sym->st_size != size) {
2647 pr_err("symbol %s size mismatch: expected %lu actual %u\n",
2648 name, (unsigned long)sym->st_size, size);
2649 return -EINVAL;
2650 }
2651
2652 sechdrs = pi->sechdrs;
2653
2654 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2655 pr_err("symbol %s is in a bss section. Cannot %s\n", name,
2656 get_value ? "get" : "set");
2657 return -EINVAL;
2658 }
2659
2660 sym_buf = (unsigned char *)sechdrs[sym->st_shndx].sh_offset +
2661 sym->st_value;
2662
2663 if (get_value)
2664 memcpy((void *)buf, sym_buf, size);
2665 else
2666 memcpy((void *)sym_buf, buf, size);
2667
2668 return 0;
2669}
cb105258 2670
7ade3fcc
HY
2671/*
2672 * Move into place and start executing a preloaded standalone
2673 * executable. If nothing was preloaded return an error.
3ab83521
HY
2674 */
2675int kernel_kexec(void)
2676{
2677 int error = 0;
2678
8c5a1cf0 2679 if (!mutex_trylock(&kexec_mutex))
3ab83521
HY
2680 return -EBUSY;
2681 if (!kexec_image) {
2682 error = -EINVAL;
2683 goto Unlock;
2684 }
2685
3ab83521 2686#ifdef CONFIG_KEXEC_JUMP
7ade3fcc 2687 if (kexec_image->preserve_context) {
bcda53fa 2688 lock_system_sleep();
89081d17
HY
2689 pm_prepare_console();
2690 error = freeze_processes();
2691 if (error) {
2692 error = -EBUSY;
2693 goto Restore_console;
2694 }
2695 suspend_console();
d1616302 2696 error = dpm_suspend_start(PMSG_FREEZE);
89081d17
HY
2697 if (error)
2698 goto Resume_console;
d1616302 2699 /* At this point, dpm_suspend_start() has been called,
cf579dfb
RW
2700 * but *not* dpm_suspend_end(). We *must* call
2701 * dpm_suspend_end() now. Otherwise, drivers for
89081d17
HY
2702 * some devices (e.g. interrupt controllers) become
2703 * desynchronized with the actual state of the
2704 * hardware at resume time, and evil weirdness ensues.
2705 */
cf579dfb 2706 error = dpm_suspend_end(PMSG_FREEZE);
89081d17 2707 if (error)
749b0afc
RW
2708 goto Resume_devices;
2709 error = disable_nonboot_cpus();
2710 if (error)
2711 goto Enable_cpus;
2ed8d2b3 2712 local_irq_disable();
2e711c04 2713 error = syscore_suspend();
770824bd 2714 if (error)
749b0afc 2715 goto Enable_irqs;
7ade3fcc 2716 } else
3ab83521 2717#endif
7ade3fcc 2718 {
4fc9bbf9 2719 kexec_in_progress = true;
ca195b7f 2720 kernel_restart_prepare(NULL);
c97102ba 2721 migrate_to_reboot_cpu();
011e4b02
SB
2722
2723 /*
2724 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
2725 * no further code needs to use CPU hotplug (which is true in
2726 * the reboot case). However, the kexec path depends on using
2727 * CPU hotplug again; so re-enable it here.
2728 */
2729 cpu_hotplug_enable();
e1bebcf4 2730 pr_emerg("Starting new kernel\n");
3ab83521
HY
2731 machine_shutdown();
2732 }
2733
2734 machine_kexec(kexec_image);
2735
3ab83521 2736#ifdef CONFIG_KEXEC_JUMP
7ade3fcc 2737 if (kexec_image->preserve_context) {
19234c08 2738 syscore_resume();
749b0afc 2739 Enable_irqs:
3ab83521 2740 local_irq_enable();
749b0afc 2741 Enable_cpus:
89081d17 2742 enable_nonboot_cpus();
cf579dfb 2743 dpm_resume_start(PMSG_RESTORE);
89081d17 2744 Resume_devices:
d1616302 2745 dpm_resume_end(PMSG_RESTORE);
89081d17
HY
2746 Resume_console:
2747 resume_console();
2748 thaw_processes();
2749 Restore_console:
2750 pm_restore_console();
bcda53fa 2751 unlock_system_sleep();
3ab83521 2752 }
7ade3fcc 2753#endif
3ab83521
HY
2754
2755 Unlock:
8c5a1cf0 2756 mutex_unlock(&kexec_mutex);
3ab83521
HY
2757 return error;
2758}
This page took 0.915996 seconds and 5 git commands to generate.