2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #include <linux/capability.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/utsname.h>
25 #include <linux/numa.h>
26 #include <linux/suspend.h>
27 #include <linux/device.h>
28 #include <linux/freezer.h>
30 #include <linux/cpu.h>
31 #include <linux/console.h>
32 #include <linux/vmalloc.h>
33 #include <linux/swap.h>
34 #include <linux/syscore_ops.h>
35 #include <linux/compiler.h>
36 #include <linux/hugetlb.h>
39 #include <asm/uaccess.h>
41 #include <asm/sections.h>
43 /* Per cpu memory for storing cpu states in case of system crash. */
44 note_buf_t __percpu
*crash_notes
;
46 /* vmcoreinfo stuff */
47 static unsigned char vmcoreinfo_data
[VMCOREINFO_BYTES
];
48 u32 vmcoreinfo_note
[VMCOREINFO_NOTE_SIZE
/4];
49 size_t vmcoreinfo_size
;
50 size_t vmcoreinfo_max_size
= sizeof(vmcoreinfo_data
);
52 /* Flag to indicate we are going to kexec a new kernel */
53 bool kexec_in_progress
= false;
55 /* Location of the reserved area for the crash kernel */
56 struct resource crashk_res
= {
57 .name
= "Crash kernel",
60 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
62 struct resource crashk_low_res
= {
63 .name
= "Crash kernel",
66 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
69 int kexec_should_crash(struct task_struct
*p
)
71 if (in_interrupt() || !p
->pid
|| is_global_init(p
) || panic_on_oops
)
77 * When kexec transitions to the new kernel there is a one-to-one
78 * mapping between physical and virtual addresses. On processors
79 * where you can disable the MMU this is trivial, and easy. For
80 * others it is still a simple predictable page table to setup.
82 * In that environment kexec copies the new kernel to its final
83 * resting place. This means I can only support memory whose
84 * physical address can fit in an unsigned long. In particular
85 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
86 * If the assembly stub has more restrictive requirements
87 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
88 * defined more restrictively in <asm/kexec.h>.
90 * The code for the transition from the current kernel to the
91 * the new kernel is placed in the control_code_buffer, whose size
92 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
93 * page of memory is necessary, but some architectures require more.
94 * Because this memory must be identity mapped in the transition from
95 * virtual to physical addresses it must live in the range
96 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
99 * The assembly stub in the control code buffer is passed a linked list
100 * of descriptor pages detailing the source pages of the new kernel,
101 * and the destination addresses of those source pages. As this data
102 * structure is not used in the context of the current OS, it must
105 * The code has been made to work with highmem pages and will use a
106 * destination page in its final resting place (if it happens
107 * to allocate it). The end product of this is that most of the
108 * physical address space, and most of RAM can be used.
110 * Future directions include:
111 * - allocating a page table with the control code buffer identity
112 * mapped, to simplify machine_kexec and make kexec_on_panic more
117 * KIMAGE_NO_DEST is an impossible destination address..., for
118 * allocating pages whose destination address we do not care about.
120 #define KIMAGE_NO_DEST (-1UL)
122 static int kimage_is_destination_range(struct kimage
*image
,
123 unsigned long start
, unsigned long end
);
124 static struct page
*kimage_alloc_page(struct kimage
*image
,
128 static int copy_user_segment_list(struct kimage
*image
,
129 unsigned long nr_segments
,
130 struct kexec_segment __user
*segments
)
133 size_t segment_bytes
;
135 /* Read in the segments */
136 image
->nr_segments
= nr_segments
;
137 segment_bytes
= nr_segments
* sizeof(*segments
);
138 ret
= copy_from_user(image
->segment
, segments
, segment_bytes
);
145 static int sanity_check_segment_list(struct kimage
*image
)
148 unsigned long nr_segments
= image
->nr_segments
;
151 * Verify we have good destination addresses. The caller is
152 * responsible for making certain we don't attempt to load
153 * the new image into invalid or reserved areas of RAM. This
154 * just verifies it is an address we can use.
156 * Since the kernel does everything in page size chunks ensure
157 * the destination addresses are page aligned. Too many
158 * special cases crop of when we don't do this. The most
159 * insidious is getting overlapping destination addresses
160 * simply because addresses are changed to page size
163 result
= -EADDRNOTAVAIL
;
164 for (i
= 0; i
< nr_segments
; i
++) {
165 unsigned long mstart
, mend
;
167 mstart
= image
->segment
[i
].mem
;
168 mend
= mstart
+ image
->segment
[i
].memsz
;
169 if ((mstart
& ~PAGE_MASK
) || (mend
& ~PAGE_MASK
))
171 if (mend
>= KEXEC_DESTINATION_MEMORY_LIMIT
)
175 /* Verify our destination addresses do not overlap.
176 * If we alloed overlapping destination addresses
177 * through very weird things can happen with no
178 * easy explanation as one segment stops on another.
181 for (i
= 0; i
< nr_segments
; i
++) {
182 unsigned long mstart
, mend
;
185 mstart
= image
->segment
[i
].mem
;
186 mend
= mstart
+ image
->segment
[i
].memsz
;
187 for (j
= 0; j
< i
; j
++) {
188 unsigned long pstart
, pend
;
189 pstart
= image
->segment
[j
].mem
;
190 pend
= pstart
+ image
->segment
[j
].memsz
;
191 /* Do the segments overlap ? */
192 if ((mend
> pstart
) && (mstart
< pend
))
197 /* Ensure our buffer sizes are strictly less than
198 * our memory sizes. This should always be the case,
199 * and it is easier to check up front than to be surprised
203 for (i
= 0; i
< nr_segments
; i
++) {
204 if (image
->segment
[i
].bufsz
> image
->segment
[i
].memsz
)
209 * Verify we have good destination addresses. Normally
210 * the caller is responsible for making certain we don't
211 * attempt to load the new image into invalid or reserved
212 * areas of RAM. But crash kernels are preloaded into a
213 * reserved area of ram. We must ensure the addresses
214 * are in the reserved area otherwise preloading the
215 * kernel could corrupt things.
218 if (image
->type
== KEXEC_TYPE_CRASH
) {
219 result
= -EADDRNOTAVAIL
;
220 for (i
= 0; i
< nr_segments
; i
++) {
221 unsigned long mstart
, mend
;
223 mstart
= image
->segment
[i
].mem
;
224 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
225 /* Ensure we are within the crash kernel limits */
226 if ((mstart
< crashk_res
.start
) ||
227 (mend
> crashk_res
.end
))
235 static struct kimage
*do_kimage_alloc_init(void)
237 struct kimage
*image
;
239 /* Allocate a controlling structure */
240 image
= kzalloc(sizeof(*image
), GFP_KERNEL
);
245 image
->entry
= &image
->head
;
246 image
->last_entry
= &image
->head
;
247 image
->control_page
= ~0; /* By default this does not apply */
248 image
->type
= KEXEC_TYPE_DEFAULT
;
250 /* Initialize the list of control pages */
251 INIT_LIST_HEAD(&image
->control_pages
);
253 /* Initialize the list of destination pages */
254 INIT_LIST_HEAD(&image
->dest_pages
);
256 /* Initialize the list of unusable pages */
257 INIT_LIST_HEAD(&image
->unusable_pages
);
262 static void kimage_free_page_list(struct list_head
*list
);
264 static int kimage_normal_alloc(struct kimage
**rimage
, unsigned long entry
,
265 unsigned long nr_segments
,
266 struct kexec_segment __user
*segments
)
269 struct kimage
*image
;
271 /* Allocate and initialize a controlling structure */
272 image
= do_kimage_alloc_init();
276 image
->start
= entry
;
278 result
= copy_user_segment_list(image
, nr_segments
, segments
);
282 result
= sanity_check_segment_list(image
);
287 * Find a location for the control code buffer, and add it
288 * the vector of segments so that it's pages will also be
289 * counted as destination pages.
292 image
->control_code_page
= kimage_alloc_control_pages(image
,
293 get_order(KEXEC_CONTROL_PAGE_SIZE
));
294 if (!image
->control_code_page
) {
295 pr_err("Could not allocate control_code_buffer\n");
299 image
->swap_page
= kimage_alloc_control_pages(image
, 0);
300 if (!image
->swap_page
) {
301 pr_err("Could not allocate swap buffer\n");
302 goto out_free_control_pages
;
307 out_free_control_pages
:
308 kimage_free_page_list(&image
->control_pages
);
314 static int kimage_crash_alloc(struct kimage
**rimage
, unsigned long entry
,
315 unsigned long nr_segments
,
316 struct kexec_segment __user
*segments
)
319 struct kimage
*image
;
321 /* Verify we have a valid entry point */
322 if ((entry
< crashk_res
.start
) || (entry
> crashk_res
.end
))
323 return -EADDRNOTAVAIL
;
325 /* Allocate and initialize a controlling structure */
326 image
= do_kimage_alloc_init();
330 image
->start
= entry
;
332 /* Enable the special crash kernel control page
335 image
->control_page
= crashk_res
.start
;
336 image
->type
= KEXEC_TYPE_CRASH
;
338 result
= copy_user_segment_list(image
, nr_segments
, segments
);
342 result
= sanity_check_segment_list(image
);
347 * Find a location for the control code buffer, and add
348 * the vector of segments so that it's pages will also be
349 * counted as destination pages.
352 image
->control_code_page
= kimage_alloc_control_pages(image
,
353 get_order(KEXEC_CONTROL_PAGE_SIZE
));
354 if (!image
->control_code_page
) {
355 pr_err("Could not allocate control_code_buffer\n");
367 static int kimage_is_destination_range(struct kimage
*image
,
373 for (i
= 0; i
< image
->nr_segments
; i
++) {
374 unsigned long mstart
, mend
;
376 mstart
= image
->segment
[i
].mem
;
377 mend
= mstart
+ image
->segment
[i
].memsz
;
378 if ((end
> mstart
) && (start
< mend
))
385 static struct page
*kimage_alloc_pages(gfp_t gfp_mask
, unsigned int order
)
389 pages
= alloc_pages(gfp_mask
, order
);
391 unsigned int count
, i
;
392 pages
->mapping
= NULL
;
393 set_page_private(pages
, order
);
395 for (i
= 0; i
< count
; i
++)
396 SetPageReserved(pages
+ i
);
402 static void kimage_free_pages(struct page
*page
)
404 unsigned int order
, count
, i
;
406 order
= page_private(page
);
408 for (i
= 0; i
< count
; i
++)
409 ClearPageReserved(page
+ i
);
410 __free_pages(page
, order
);
413 static void kimage_free_page_list(struct list_head
*list
)
415 struct list_head
*pos
, *next
;
417 list_for_each_safe(pos
, next
, list
) {
420 page
= list_entry(pos
, struct page
, lru
);
421 list_del(&page
->lru
);
422 kimage_free_pages(page
);
426 static struct page
*kimage_alloc_normal_control_pages(struct kimage
*image
,
429 /* Control pages are special, they are the intermediaries
430 * that are needed while we copy the rest of the pages
431 * to their final resting place. As such they must
432 * not conflict with either the destination addresses
433 * or memory the kernel is already using.
435 * The only case where we really need more than one of
436 * these are for architectures where we cannot disable
437 * the MMU and must instead generate an identity mapped
438 * page table for all of the memory.
440 * At worst this runs in O(N) of the image size.
442 struct list_head extra_pages
;
447 INIT_LIST_HEAD(&extra_pages
);
449 /* Loop while I can allocate a page and the page allocated
450 * is a destination page.
453 unsigned long pfn
, epfn
, addr
, eaddr
;
455 pages
= kimage_alloc_pages(GFP_KERNEL
, order
);
458 pfn
= page_to_pfn(pages
);
460 addr
= pfn
<< PAGE_SHIFT
;
461 eaddr
= epfn
<< PAGE_SHIFT
;
462 if ((epfn
>= (KEXEC_CONTROL_MEMORY_LIMIT
>> PAGE_SHIFT
)) ||
463 kimage_is_destination_range(image
, addr
, eaddr
)) {
464 list_add(&pages
->lru
, &extra_pages
);
470 /* Remember the allocated page... */
471 list_add(&pages
->lru
, &image
->control_pages
);
473 /* Because the page is already in it's destination
474 * location we will never allocate another page at
475 * that address. Therefore kimage_alloc_pages
476 * will not return it (again) and we don't need
477 * to give it an entry in image->segment[].
480 /* Deal with the destination pages I have inadvertently allocated.
482 * Ideally I would convert multi-page allocations into single
483 * page allocations, and add everything to image->dest_pages.
485 * For now it is simpler to just free the pages.
487 kimage_free_page_list(&extra_pages
);
492 static struct page
*kimage_alloc_crash_control_pages(struct kimage
*image
,
495 /* Control pages are special, they are the intermediaries
496 * that are needed while we copy the rest of the pages
497 * to their final resting place. As such they must
498 * not conflict with either the destination addresses
499 * or memory the kernel is already using.
501 * Control pages are also the only pags we must allocate
502 * when loading a crash kernel. All of the other pages
503 * are specified by the segments and we just memcpy
504 * into them directly.
506 * The only case where we really need more than one of
507 * these are for architectures where we cannot disable
508 * the MMU and must instead generate an identity mapped
509 * page table for all of the memory.
511 * Given the low demand this implements a very simple
512 * allocator that finds the first hole of the appropriate
513 * size in the reserved memory region, and allocates all
514 * of the memory up to and including the hole.
516 unsigned long hole_start
, hole_end
, size
;
520 size
= (1 << order
) << PAGE_SHIFT
;
521 hole_start
= (image
->control_page
+ (size
- 1)) & ~(size
- 1);
522 hole_end
= hole_start
+ size
- 1;
523 while (hole_end
<= crashk_res
.end
) {
526 if (hole_end
> KEXEC_CRASH_CONTROL_MEMORY_LIMIT
)
528 /* See if I overlap any of the segments */
529 for (i
= 0; i
< image
->nr_segments
; i
++) {
530 unsigned long mstart
, mend
;
532 mstart
= image
->segment
[i
].mem
;
533 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
534 if ((hole_end
>= mstart
) && (hole_start
<= mend
)) {
535 /* Advance the hole to the end of the segment */
536 hole_start
= (mend
+ (size
- 1)) & ~(size
- 1);
537 hole_end
= hole_start
+ size
- 1;
541 /* If I don't overlap any segments I have found my hole! */
542 if (i
== image
->nr_segments
) {
543 pages
= pfn_to_page(hole_start
>> PAGE_SHIFT
);
548 image
->control_page
= hole_end
;
554 struct page
*kimage_alloc_control_pages(struct kimage
*image
,
557 struct page
*pages
= NULL
;
559 switch (image
->type
) {
560 case KEXEC_TYPE_DEFAULT
:
561 pages
= kimage_alloc_normal_control_pages(image
, order
);
563 case KEXEC_TYPE_CRASH
:
564 pages
= kimage_alloc_crash_control_pages(image
, order
);
571 static int kimage_add_entry(struct kimage
*image
, kimage_entry_t entry
)
573 if (*image
->entry
!= 0)
576 if (image
->entry
== image
->last_entry
) {
577 kimage_entry_t
*ind_page
;
580 page
= kimage_alloc_page(image
, GFP_KERNEL
, KIMAGE_NO_DEST
);
584 ind_page
= page_address(page
);
585 *image
->entry
= virt_to_phys(ind_page
) | IND_INDIRECTION
;
586 image
->entry
= ind_page
;
587 image
->last_entry
= ind_page
+
588 ((PAGE_SIZE
/sizeof(kimage_entry_t
)) - 1);
590 *image
->entry
= entry
;
597 static int kimage_set_destination(struct kimage
*image
,
598 unsigned long destination
)
602 destination
&= PAGE_MASK
;
603 result
= kimage_add_entry(image
, destination
| IND_DESTINATION
);
605 image
->destination
= destination
;
611 static int kimage_add_page(struct kimage
*image
, unsigned long page
)
616 result
= kimage_add_entry(image
, page
| IND_SOURCE
);
618 image
->destination
+= PAGE_SIZE
;
624 static void kimage_free_extra_pages(struct kimage
*image
)
626 /* Walk through and free any extra destination pages I may have */
627 kimage_free_page_list(&image
->dest_pages
);
629 /* Walk through and free any unusable pages I have cached */
630 kimage_free_page_list(&image
->unusable_pages
);
633 static void kimage_terminate(struct kimage
*image
)
635 if (*image
->entry
!= 0)
638 *image
->entry
= IND_DONE
;
641 #define for_each_kimage_entry(image, ptr, entry) \
642 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
643 ptr = (entry & IND_INDIRECTION) ? \
644 phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
646 static void kimage_free_entry(kimage_entry_t entry
)
650 page
= pfn_to_page(entry
>> PAGE_SHIFT
);
651 kimage_free_pages(page
);
654 static void kimage_free(struct kimage
*image
)
656 kimage_entry_t
*ptr
, entry
;
657 kimage_entry_t ind
= 0;
662 kimage_free_extra_pages(image
);
663 for_each_kimage_entry(image
, ptr
, entry
) {
664 if (entry
& IND_INDIRECTION
) {
665 /* Free the previous indirection page */
666 if (ind
& IND_INDIRECTION
)
667 kimage_free_entry(ind
);
668 /* Save this indirection page until we are
672 } else if (entry
& IND_SOURCE
)
673 kimage_free_entry(entry
);
675 /* Free the final indirection page */
676 if (ind
& IND_INDIRECTION
)
677 kimage_free_entry(ind
);
679 /* Handle any machine specific cleanup */
680 machine_kexec_cleanup(image
);
682 /* Free the kexec control pages... */
683 kimage_free_page_list(&image
->control_pages
);
687 static kimage_entry_t
*kimage_dst_used(struct kimage
*image
,
690 kimage_entry_t
*ptr
, entry
;
691 unsigned long destination
= 0;
693 for_each_kimage_entry(image
, ptr
, entry
) {
694 if (entry
& IND_DESTINATION
)
695 destination
= entry
& PAGE_MASK
;
696 else if (entry
& IND_SOURCE
) {
697 if (page
== destination
)
699 destination
+= PAGE_SIZE
;
706 static struct page
*kimage_alloc_page(struct kimage
*image
,
708 unsigned long destination
)
711 * Here we implement safeguards to ensure that a source page
712 * is not copied to its destination page before the data on
713 * the destination page is no longer useful.
715 * To do this we maintain the invariant that a source page is
716 * either its own destination page, or it is not a
717 * destination page at all.
719 * That is slightly stronger than required, but the proof
720 * that no problems will not occur is trivial, and the
721 * implementation is simply to verify.
723 * When allocating all pages normally this algorithm will run
724 * in O(N) time, but in the worst case it will run in O(N^2)
725 * time. If the runtime is a problem the data structures can
732 * Walk through the list of destination pages, and see if I
735 list_for_each_entry(page
, &image
->dest_pages
, lru
) {
736 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
737 if (addr
== destination
) {
738 list_del(&page
->lru
);
746 /* Allocate a page, if we run out of memory give up */
747 page
= kimage_alloc_pages(gfp_mask
, 0);
750 /* If the page cannot be used file it away */
751 if (page_to_pfn(page
) >
752 (KEXEC_SOURCE_MEMORY_LIMIT
>> PAGE_SHIFT
)) {
753 list_add(&page
->lru
, &image
->unusable_pages
);
756 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
758 /* If it is the destination page we want use it */
759 if (addr
== destination
)
762 /* If the page is not a destination page use it */
763 if (!kimage_is_destination_range(image
, addr
,
768 * I know that the page is someones destination page.
769 * See if there is already a source page for this
770 * destination page. And if so swap the source pages.
772 old
= kimage_dst_used(image
, addr
);
775 unsigned long old_addr
;
776 struct page
*old_page
;
778 old_addr
= *old
& PAGE_MASK
;
779 old_page
= pfn_to_page(old_addr
>> PAGE_SHIFT
);
780 copy_highpage(page
, old_page
);
781 *old
= addr
| (*old
& ~PAGE_MASK
);
783 /* The old page I have found cannot be a
784 * destination page, so return it if it's
785 * gfp_flags honor the ones passed in.
787 if (!(gfp_mask
& __GFP_HIGHMEM
) &&
788 PageHighMem(old_page
)) {
789 kimage_free_pages(old_page
);
796 /* Place the page on the destination list I
799 list_add(&page
->lru
, &image
->dest_pages
);
806 static int kimage_load_normal_segment(struct kimage
*image
,
807 struct kexec_segment
*segment
)
810 size_t ubytes
, mbytes
;
812 unsigned char __user
*buf
;
816 ubytes
= segment
->bufsz
;
817 mbytes
= segment
->memsz
;
818 maddr
= segment
->mem
;
820 result
= kimage_set_destination(image
, maddr
);
827 size_t uchunk
, mchunk
;
829 page
= kimage_alloc_page(image
, GFP_HIGHUSER
, maddr
);
834 result
= kimage_add_page(image
, page_to_pfn(page
)
840 /* Start with a clear page */
842 ptr
+= maddr
& ~PAGE_MASK
;
843 mchunk
= min_t(size_t, mbytes
,
844 PAGE_SIZE
- (maddr
& ~PAGE_MASK
));
845 uchunk
= min(ubytes
, mchunk
);
847 result
= copy_from_user(ptr
, buf
, uchunk
);
862 static int kimage_load_crash_segment(struct kimage
*image
,
863 struct kexec_segment
*segment
)
865 /* For crash dumps kernels we simply copy the data from
866 * user space to it's destination.
867 * We do things a page at a time for the sake of kmap.
870 size_t ubytes
, mbytes
;
872 unsigned char __user
*buf
;
876 ubytes
= segment
->bufsz
;
877 mbytes
= segment
->memsz
;
878 maddr
= segment
->mem
;
882 size_t uchunk
, mchunk
;
884 page
= pfn_to_page(maddr
>> PAGE_SHIFT
);
890 ptr
+= maddr
& ~PAGE_MASK
;
891 mchunk
= min_t(size_t, mbytes
,
892 PAGE_SIZE
- (maddr
& ~PAGE_MASK
));
893 uchunk
= min(ubytes
, mchunk
);
894 if (mchunk
> uchunk
) {
895 /* Zero the trailing part of the page */
896 memset(ptr
+ uchunk
, 0, mchunk
- uchunk
);
898 result
= copy_from_user(ptr
, buf
, uchunk
);
899 kexec_flush_icache_page(page
);
914 static int kimage_load_segment(struct kimage
*image
,
915 struct kexec_segment
*segment
)
917 int result
= -ENOMEM
;
919 switch (image
->type
) {
920 case KEXEC_TYPE_DEFAULT
:
921 result
= kimage_load_normal_segment(image
, segment
);
923 case KEXEC_TYPE_CRASH
:
924 result
= kimage_load_crash_segment(image
, segment
);
932 * Exec Kernel system call: for obvious reasons only root may call it.
934 * This call breaks up into three pieces.
935 * - A generic part which loads the new kernel from the current
936 * address space, and very carefully places the data in the
939 * - A generic part that interacts with the kernel and tells all of
940 * the devices to shut down. Preventing on-going dmas, and placing
941 * the devices in a consistent state so a later kernel can
944 * - A machine specific part that includes the syscall number
945 * and then copies the image to it's final destination. And
946 * jumps into the image at entry.
948 * kexec does not sync, or unmount filesystems so if you need
949 * that to happen you need to do that yourself.
951 struct kimage
*kexec_image
;
952 struct kimage
*kexec_crash_image
;
953 int kexec_load_disabled
;
955 static DEFINE_MUTEX(kexec_mutex
);
957 SYSCALL_DEFINE4(kexec_load
, unsigned long, entry
, unsigned long, nr_segments
,
958 struct kexec_segment __user
*, segments
, unsigned long, flags
)
960 struct kimage
**dest_image
, *image
;
963 /* We only trust the superuser with rebooting the system. */
964 if (!capable(CAP_SYS_BOOT
) || kexec_load_disabled
)
968 * Verify we have a legal set of flags
969 * This leaves us room for future extensions.
971 if ((flags
& KEXEC_FLAGS
) != (flags
& ~KEXEC_ARCH_MASK
))
974 /* Verify we are on the appropriate architecture */
975 if (((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH
) &&
976 ((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH_DEFAULT
))
979 /* Put an artificial cap on the number
980 * of segments passed to kexec_load.
982 if (nr_segments
> KEXEC_SEGMENT_MAX
)
988 /* Because we write directly to the reserved memory
989 * region when loading crash kernels we need a mutex here to
990 * prevent multiple crash kernels from attempting to load
991 * simultaneously, and to prevent a crash kernel from loading
992 * over the top of a in use crash kernel.
994 * KISS: always take the mutex.
996 if (!mutex_trylock(&kexec_mutex
))
999 dest_image
= &kexec_image
;
1000 if (flags
& KEXEC_ON_CRASH
)
1001 dest_image
= &kexec_crash_image
;
1002 if (nr_segments
> 0) {
1005 /* Loading another kernel to reboot into */
1006 if ((flags
& KEXEC_ON_CRASH
) == 0)
1007 result
= kimage_normal_alloc(&image
, entry
,
1008 nr_segments
, segments
);
1009 /* Loading another kernel to switch to if this one crashes */
1010 else if (flags
& KEXEC_ON_CRASH
) {
1011 /* Free any current crash dump kernel before
1014 kimage_free(xchg(&kexec_crash_image
, NULL
));
1015 result
= kimage_crash_alloc(&image
, entry
,
1016 nr_segments
, segments
);
1017 crash_map_reserved_pages();
1022 if (flags
& KEXEC_PRESERVE_CONTEXT
)
1023 image
->preserve_context
= 1;
1024 result
= machine_kexec_prepare(image
);
1028 for (i
= 0; i
< nr_segments
; i
++) {
1029 result
= kimage_load_segment(image
, &image
->segment
[i
]);
1033 kimage_terminate(image
);
1034 if (flags
& KEXEC_ON_CRASH
)
1035 crash_unmap_reserved_pages();
1037 /* Install the new kernel, and Uninstall the old */
1038 image
= xchg(dest_image
, image
);
1041 mutex_unlock(&kexec_mutex
);
1048 * Add and remove page tables for crashkernel memory
1050 * Provide an empty default implementation here -- architecture
1051 * code may override this
1053 void __weak
crash_map_reserved_pages(void)
1056 void __weak
crash_unmap_reserved_pages(void)
1059 #ifdef CONFIG_COMPAT
1060 COMPAT_SYSCALL_DEFINE4(kexec_load
, compat_ulong_t
, entry
,
1061 compat_ulong_t
, nr_segments
,
1062 struct compat_kexec_segment __user
*, segments
,
1063 compat_ulong_t
, flags
)
1065 struct compat_kexec_segment in
;
1066 struct kexec_segment out
, __user
*ksegments
;
1067 unsigned long i
, result
;
1069 /* Don't allow clients that don't understand the native
1070 * architecture to do anything.
1072 if ((flags
& KEXEC_ARCH_MASK
) == KEXEC_ARCH_DEFAULT
)
1075 if (nr_segments
> KEXEC_SEGMENT_MAX
)
1078 ksegments
= compat_alloc_user_space(nr_segments
* sizeof(out
));
1079 for (i
= 0; i
< nr_segments
; i
++) {
1080 result
= copy_from_user(&in
, &segments
[i
], sizeof(in
));
1084 out
.buf
= compat_ptr(in
.buf
);
1085 out
.bufsz
= in
.bufsz
;
1087 out
.memsz
= in
.memsz
;
1089 result
= copy_to_user(&ksegments
[i
], &out
, sizeof(out
));
1094 return sys_kexec_load(entry
, nr_segments
, ksegments
, flags
);
1098 void crash_kexec(struct pt_regs
*regs
)
1100 /* Take the kexec_mutex here to prevent sys_kexec_load
1101 * running on one cpu from replacing the crash kernel
1102 * we are using after a panic on a different cpu.
1104 * If the crash kernel was not located in a fixed area
1105 * of memory the xchg(&kexec_crash_image) would be
1106 * sufficient. But since I reuse the memory...
1108 if (mutex_trylock(&kexec_mutex
)) {
1109 if (kexec_crash_image
) {
1110 struct pt_regs fixed_regs
;
1112 crash_setup_regs(&fixed_regs
, regs
);
1113 crash_save_vmcoreinfo();
1114 machine_crash_shutdown(&fixed_regs
);
1115 machine_kexec(kexec_crash_image
);
1117 mutex_unlock(&kexec_mutex
);
1121 size_t crash_get_memory_size(void)
1124 mutex_lock(&kexec_mutex
);
1125 if (crashk_res
.end
!= crashk_res
.start
)
1126 size
= resource_size(&crashk_res
);
1127 mutex_unlock(&kexec_mutex
);
1131 void __weak
crash_free_reserved_phys_range(unsigned long begin
,
1136 for (addr
= begin
; addr
< end
; addr
+= PAGE_SIZE
)
1137 free_reserved_page(pfn_to_page(addr
>> PAGE_SHIFT
));
1140 int crash_shrink_memory(unsigned long new_size
)
1143 unsigned long start
, end
;
1144 unsigned long old_size
;
1145 struct resource
*ram_res
;
1147 mutex_lock(&kexec_mutex
);
1149 if (kexec_crash_image
) {
1153 start
= crashk_res
.start
;
1154 end
= crashk_res
.end
;
1155 old_size
= (end
== 0) ? 0 : end
- start
+ 1;
1156 if (new_size
>= old_size
) {
1157 ret
= (new_size
== old_size
) ? 0 : -EINVAL
;
1161 ram_res
= kzalloc(sizeof(*ram_res
), GFP_KERNEL
);
1167 start
= roundup(start
, KEXEC_CRASH_MEM_ALIGN
);
1168 end
= roundup(start
+ new_size
, KEXEC_CRASH_MEM_ALIGN
);
1170 crash_map_reserved_pages();
1171 crash_free_reserved_phys_range(end
, crashk_res
.end
);
1173 if ((start
== end
) && (crashk_res
.parent
!= NULL
))
1174 release_resource(&crashk_res
);
1176 ram_res
->start
= end
;
1177 ram_res
->end
= crashk_res
.end
;
1178 ram_res
->flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
;
1179 ram_res
->name
= "System RAM";
1181 crashk_res
.end
= end
- 1;
1183 insert_resource(&iomem_resource
, ram_res
);
1184 crash_unmap_reserved_pages();
1187 mutex_unlock(&kexec_mutex
);
1191 static u32
*append_elf_note(u32
*buf
, char *name
, unsigned type
, void *data
,
1194 struct elf_note note
;
1196 note
.n_namesz
= strlen(name
) + 1;
1197 note
.n_descsz
= data_len
;
1199 memcpy(buf
, ¬e
, sizeof(note
));
1200 buf
+= (sizeof(note
) + 3)/4;
1201 memcpy(buf
, name
, note
.n_namesz
);
1202 buf
+= (note
.n_namesz
+ 3)/4;
1203 memcpy(buf
, data
, note
.n_descsz
);
1204 buf
+= (note
.n_descsz
+ 3)/4;
1209 static void final_note(u32
*buf
)
1211 struct elf_note note
;
1216 memcpy(buf
, ¬e
, sizeof(note
));
1219 void crash_save_cpu(struct pt_regs
*regs
, int cpu
)
1221 struct elf_prstatus prstatus
;
1224 if ((cpu
< 0) || (cpu
>= nr_cpu_ids
))
1227 /* Using ELF notes here is opportunistic.
1228 * I need a well defined structure format
1229 * for the data I pass, and I need tags
1230 * on the data to indicate what information I have
1231 * squirrelled away. ELF notes happen to provide
1232 * all of that, so there is no need to invent something new.
1234 buf
= (u32
*)per_cpu_ptr(crash_notes
, cpu
);
1237 memset(&prstatus
, 0, sizeof(prstatus
));
1238 prstatus
.pr_pid
= current
->pid
;
1239 elf_core_copy_kernel_regs(&prstatus
.pr_reg
, regs
);
1240 buf
= append_elf_note(buf
, KEXEC_CORE_NOTE_NAME
, NT_PRSTATUS
,
1241 &prstatus
, sizeof(prstatus
));
1245 static int __init
crash_notes_memory_init(void)
1247 /* Allocate memory for saving cpu registers. */
1248 crash_notes
= alloc_percpu(note_buf_t
);
1250 pr_warn("Kexec: Memory allocation for saving cpu register states failed\n");
1255 subsys_initcall(crash_notes_memory_init
);
1259 * parsing the "crashkernel" commandline
1261 * this code is intended to be called from architecture specific code
1266 * This function parses command lines in the format
1268 * crashkernel=ramsize-range:size[,...][@offset]
1270 * The function returns 0 on success and -EINVAL on failure.
1272 static int __init
parse_crashkernel_mem(char *cmdline
,
1273 unsigned long long system_ram
,
1274 unsigned long long *crash_size
,
1275 unsigned long long *crash_base
)
1277 char *cur
= cmdline
, *tmp
;
1279 /* for each entry of the comma-separated list */
1281 unsigned long long start
, end
= ULLONG_MAX
, size
;
1283 /* get the start of the range */
1284 start
= memparse(cur
, &tmp
);
1286 pr_warn("crashkernel: Memory value expected\n");
1291 pr_warn("crashkernel: '-' expected\n");
1296 /* if no ':' is here, than we read the end */
1298 end
= memparse(cur
, &tmp
);
1300 pr_warn("crashkernel: Memory value expected\n");
1305 pr_warn("crashkernel: end <= start\n");
1311 pr_warn("crashkernel: ':' expected\n");
1316 size
= memparse(cur
, &tmp
);
1318 pr_warn("Memory value expected\n");
1322 if (size
>= system_ram
) {
1323 pr_warn("crashkernel: invalid size\n");
1328 if (system_ram
>= start
&& system_ram
< end
) {
1332 } while (*cur
++ == ',');
1334 if (*crash_size
> 0) {
1335 while (*cur
&& *cur
!= ' ' && *cur
!= '@')
1339 *crash_base
= memparse(cur
, &tmp
);
1341 pr_warn("Memory value expected after '@'\n");
1351 * That function parses "simple" (old) crashkernel command lines like
1353 * crashkernel=size[@offset]
1355 * It returns 0 on success and -EINVAL on failure.
1357 static int __init
parse_crashkernel_simple(char *cmdline
,
1358 unsigned long long *crash_size
,
1359 unsigned long long *crash_base
)
1361 char *cur
= cmdline
;
1363 *crash_size
= memparse(cmdline
, &cur
);
1364 if (cmdline
== cur
) {
1365 pr_warn("crashkernel: memory value expected\n");
1370 *crash_base
= memparse(cur
+1, &cur
);
1371 else if (*cur
!= ' ' && *cur
!= '\0') {
1372 pr_warn("crashkernel: unrecognized char\n");
1379 #define SUFFIX_HIGH 0
1380 #define SUFFIX_LOW 1
1381 #define SUFFIX_NULL 2
1382 static __initdata
char *suffix_tbl
[] = {
1383 [SUFFIX_HIGH
] = ",high",
1384 [SUFFIX_LOW
] = ",low",
1385 [SUFFIX_NULL
] = NULL
,
1389 * That function parses "suffix" crashkernel command lines like
1391 * crashkernel=size,[high|low]
1393 * It returns 0 on success and -EINVAL on failure.
1395 static int __init
parse_crashkernel_suffix(char *cmdline
,
1396 unsigned long long *crash_size
,
1397 unsigned long long *crash_base
,
1400 char *cur
= cmdline
;
1402 *crash_size
= memparse(cmdline
, &cur
);
1403 if (cmdline
== cur
) {
1404 pr_warn("crashkernel: memory value expected\n");
1408 /* check with suffix */
1409 if (strncmp(cur
, suffix
, strlen(suffix
))) {
1410 pr_warn("crashkernel: unrecognized char\n");
1413 cur
+= strlen(suffix
);
1414 if (*cur
!= ' ' && *cur
!= '\0') {
1415 pr_warn("crashkernel: unrecognized char\n");
1422 static __init
char *get_last_crashkernel(char *cmdline
,
1426 char *p
= cmdline
, *ck_cmdline
= NULL
;
1428 /* find crashkernel and use the last one if there are more */
1429 p
= strstr(p
, name
);
1431 char *end_p
= strchr(p
, ' ');
1435 end_p
= p
+ strlen(p
);
1440 /* skip the one with any known suffix */
1441 for (i
= 0; suffix_tbl
[i
]; i
++) {
1442 q
= end_p
- strlen(suffix_tbl
[i
]);
1443 if (!strncmp(q
, suffix_tbl
[i
],
1444 strlen(suffix_tbl
[i
])))
1449 q
= end_p
- strlen(suffix
);
1450 if (!strncmp(q
, suffix
, strlen(suffix
)))
1454 p
= strstr(p
+1, name
);
1463 static int __init
__parse_crashkernel(char *cmdline
,
1464 unsigned long long system_ram
,
1465 unsigned long long *crash_size
,
1466 unsigned long long *crash_base
,
1470 char *first_colon
, *first_space
;
1473 BUG_ON(!crash_size
|| !crash_base
);
1477 ck_cmdline
= get_last_crashkernel(cmdline
, name
, suffix
);
1482 ck_cmdline
+= strlen(name
);
1485 return parse_crashkernel_suffix(ck_cmdline
, crash_size
,
1486 crash_base
, suffix
);
1488 * if the commandline contains a ':', then that's the extended
1489 * syntax -- if not, it must be the classic syntax
1491 first_colon
= strchr(ck_cmdline
, ':');
1492 first_space
= strchr(ck_cmdline
, ' ');
1493 if (first_colon
&& (!first_space
|| first_colon
< first_space
))
1494 return parse_crashkernel_mem(ck_cmdline
, system_ram
,
1495 crash_size
, crash_base
);
1497 return parse_crashkernel_simple(ck_cmdline
, crash_size
, crash_base
);
1501 * That function is the entry point for command line parsing and should be
1502 * called from the arch-specific code.
1504 int __init
parse_crashkernel(char *cmdline
,
1505 unsigned long long system_ram
,
1506 unsigned long long *crash_size
,
1507 unsigned long long *crash_base
)
1509 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1510 "crashkernel=", NULL
);
1513 int __init
parse_crashkernel_high(char *cmdline
,
1514 unsigned long long system_ram
,
1515 unsigned long long *crash_size
,
1516 unsigned long long *crash_base
)
1518 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1519 "crashkernel=", suffix_tbl
[SUFFIX_HIGH
]);
1522 int __init
parse_crashkernel_low(char *cmdline
,
1523 unsigned long long system_ram
,
1524 unsigned long long *crash_size
,
1525 unsigned long long *crash_base
)
1527 return __parse_crashkernel(cmdline
, system_ram
, crash_size
, crash_base
,
1528 "crashkernel=", suffix_tbl
[SUFFIX_LOW
]);
1531 static void update_vmcoreinfo_note(void)
1533 u32
*buf
= vmcoreinfo_note
;
1535 if (!vmcoreinfo_size
)
1537 buf
= append_elf_note(buf
, VMCOREINFO_NOTE_NAME
, 0, vmcoreinfo_data
,
1542 void crash_save_vmcoreinfo(void)
1544 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1545 update_vmcoreinfo_note();
1548 void vmcoreinfo_append_str(const char *fmt
, ...)
1554 va_start(args
, fmt
);
1555 r
= vscnprintf(buf
, sizeof(buf
), fmt
, args
);
1558 r
= min(r
, vmcoreinfo_max_size
- vmcoreinfo_size
);
1560 memcpy(&vmcoreinfo_data
[vmcoreinfo_size
], buf
, r
);
1562 vmcoreinfo_size
+= r
;
1566 * provide an empty default implementation here -- architecture
1567 * code may override this
1569 void __weak
arch_crash_save_vmcoreinfo(void)
1572 unsigned long __weak
paddr_vmcoreinfo_note(void)
1574 return __pa((unsigned long)(char *)&vmcoreinfo_note
);
1577 static int __init
crash_save_vmcoreinfo_init(void)
1579 VMCOREINFO_OSRELEASE(init_uts_ns
.name
.release
);
1580 VMCOREINFO_PAGESIZE(PAGE_SIZE
);
1582 VMCOREINFO_SYMBOL(init_uts_ns
);
1583 VMCOREINFO_SYMBOL(node_online_map
);
1585 VMCOREINFO_SYMBOL(swapper_pg_dir
);
1587 VMCOREINFO_SYMBOL(_stext
);
1588 VMCOREINFO_SYMBOL(vmap_area_list
);
1590 #ifndef CONFIG_NEED_MULTIPLE_NODES
1591 VMCOREINFO_SYMBOL(mem_map
);
1592 VMCOREINFO_SYMBOL(contig_page_data
);
1594 #ifdef CONFIG_SPARSEMEM
1595 VMCOREINFO_SYMBOL(mem_section
);
1596 VMCOREINFO_LENGTH(mem_section
, NR_SECTION_ROOTS
);
1597 VMCOREINFO_STRUCT_SIZE(mem_section
);
1598 VMCOREINFO_OFFSET(mem_section
, section_mem_map
);
1600 VMCOREINFO_STRUCT_SIZE(page
);
1601 VMCOREINFO_STRUCT_SIZE(pglist_data
);
1602 VMCOREINFO_STRUCT_SIZE(zone
);
1603 VMCOREINFO_STRUCT_SIZE(free_area
);
1604 VMCOREINFO_STRUCT_SIZE(list_head
);
1605 VMCOREINFO_SIZE(nodemask_t
);
1606 VMCOREINFO_OFFSET(page
, flags
);
1607 VMCOREINFO_OFFSET(page
, _count
);
1608 VMCOREINFO_OFFSET(page
, mapping
);
1609 VMCOREINFO_OFFSET(page
, lru
);
1610 VMCOREINFO_OFFSET(page
, _mapcount
);
1611 VMCOREINFO_OFFSET(page
, private);
1612 VMCOREINFO_OFFSET(pglist_data
, node_zones
);
1613 VMCOREINFO_OFFSET(pglist_data
, nr_zones
);
1614 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1615 VMCOREINFO_OFFSET(pglist_data
, node_mem_map
);
1617 VMCOREINFO_OFFSET(pglist_data
, node_start_pfn
);
1618 VMCOREINFO_OFFSET(pglist_data
, node_spanned_pages
);
1619 VMCOREINFO_OFFSET(pglist_data
, node_id
);
1620 VMCOREINFO_OFFSET(zone
, free_area
);
1621 VMCOREINFO_OFFSET(zone
, vm_stat
);
1622 VMCOREINFO_OFFSET(zone
, spanned_pages
);
1623 VMCOREINFO_OFFSET(free_area
, free_list
);
1624 VMCOREINFO_OFFSET(list_head
, next
);
1625 VMCOREINFO_OFFSET(list_head
, prev
);
1626 VMCOREINFO_OFFSET(vmap_area
, va_start
);
1627 VMCOREINFO_OFFSET(vmap_area
, list
);
1628 VMCOREINFO_LENGTH(zone
.free_area
, MAX_ORDER
);
1629 log_buf_kexec_setup();
1630 VMCOREINFO_LENGTH(free_area
.free_list
, MIGRATE_TYPES
);
1631 VMCOREINFO_NUMBER(NR_FREE_PAGES
);
1632 VMCOREINFO_NUMBER(PG_lru
);
1633 VMCOREINFO_NUMBER(PG_private
);
1634 VMCOREINFO_NUMBER(PG_swapcache
);
1635 VMCOREINFO_NUMBER(PG_slab
);
1636 #ifdef CONFIG_MEMORY_FAILURE
1637 VMCOREINFO_NUMBER(PG_hwpoison
);
1639 VMCOREINFO_NUMBER(PG_head_mask
);
1640 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE
);
1641 #ifdef CONFIG_HUGETLBFS
1642 VMCOREINFO_SYMBOL(free_huge_page
);
1645 arch_crash_save_vmcoreinfo();
1646 update_vmcoreinfo_note();
1651 subsys_initcall(crash_save_vmcoreinfo_init
);
1654 * Move into place and start executing a preloaded standalone
1655 * executable. If nothing was preloaded return an error.
1657 int kernel_kexec(void)
1661 if (!mutex_trylock(&kexec_mutex
))
1668 #ifdef CONFIG_KEXEC_JUMP
1669 if (kexec_image
->preserve_context
) {
1670 lock_system_sleep();
1671 pm_prepare_console();
1672 error
= freeze_processes();
1675 goto Restore_console
;
1678 error
= dpm_suspend_start(PMSG_FREEZE
);
1680 goto Resume_console
;
1681 /* At this point, dpm_suspend_start() has been called,
1682 * but *not* dpm_suspend_end(). We *must* call
1683 * dpm_suspend_end() now. Otherwise, drivers for
1684 * some devices (e.g. interrupt controllers) become
1685 * desynchronized with the actual state of the
1686 * hardware at resume time, and evil weirdness ensues.
1688 error
= dpm_suspend_end(PMSG_FREEZE
);
1690 goto Resume_devices
;
1691 error
= disable_nonboot_cpus();
1694 local_irq_disable();
1695 error
= syscore_suspend();
1701 kexec_in_progress
= true;
1702 kernel_restart_prepare(NULL
);
1703 migrate_to_reboot_cpu();
1706 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1707 * no further code needs to use CPU hotplug (which is true in
1708 * the reboot case). However, the kexec path depends on using
1709 * CPU hotplug again; so re-enable it here.
1711 cpu_hotplug_enable();
1712 pr_emerg("Starting new kernel\n");
1716 machine_kexec(kexec_image
);
1718 #ifdef CONFIG_KEXEC_JUMP
1719 if (kexec_image
->preserve_context
) {
1724 enable_nonboot_cpus();
1725 dpm_resume_start(PMSG_RESTORE
);
1727 dpm_resume_end(PMSG_RESTORE
);
1732 pm_restore_console();
1733 unlock_system_sleep();
1738 mutex_unlock(&kexec_mutex
);