Commit | Line | Data |
---|---|---|
5033cba0 | 1 | /* |
835c34a1 | 2 | * handle transition of Linux booting another kernel |
5033cba0 EB |
3 | * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com> |
4 | * | |
5 | * This source code is licensed under the GNU General Public License, | |
6 | * Version 2. See the file COPYING for more details. | |
7 | */ | |
8 | ||
9 | #include <linux/mm.h> | |
10 | #include <linux/kexec.h> | |
11 | #include <linux/delay.h> | |
1a3f239d | 12 | #include <linux/init.h> |
fd59d231 | 13 | #include <linux/numa.h> |
5033cba0 EB |
14 | #include <asm/pgtable.h> |
15 | #include <asm/pgalloc.h> | |
16 | #include <asm/tlbflush.h> | |
17 | #include <asm/mmu_context.h> | |
18 | #include <asm/io.h> | |
19 | #include <asm/apic.h> | |
20 | #include <asm/cpufeature.h> | |
e7b47cca | 21 | #include <asm/desc.h> |
4bb0d3ec | 22 | #include <asm/system.h> |
5033cba0 EB |
23 | |
24 | #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE))) | |
3566561b MD |
25 | static u32 kexec_pgd[1024] PAGE_ALIGNED; |
26 | #ifdef CONFIG_X86_PAE | |
27 | static u32 kexec_pmd0[1024] PAGE_ALIGNED; | |
28 | static u32 kexec_pmd1[1024] PAGE_ALIGNED; | |
5033cba0 | 29 | #endif |
3566561b MD |
30 | static u32 kexec_pte0[1024] PAGE_ALIGNED; |
31 | static u32 kexec_pte1[1024] PAGE_ALIGNED; | |
5033cba0 | 32 | |
5033cba0 EB |
33 | static void set_idt(void *newidt, __u16 limit) |
34 | { | |
6b68f01b | 35 | struct desc_ptr curidt; |
5033cba0 EB |
36 | |
37 | /* ia32 supports unaliged loads & stores */ | |
e7b47cca EB |
38 | curidt.size = limit; |
39 | curidt.address = (unsigned long)newidt; | |
5033cba0 | 40 | |
f2ab4461 | 41 | load_idt(&curidt); |
5033cba0 EB |
42 | }; |
43 | ||
44 | ||
45 | static void set_gdt(void *newgdt, __u16 limit) | |
46 | { | |
6b68f01b | 47 | struct desc_ptr curgdt; |
5033cba0 EB |
48 | |
49 | /* ia32 supports unaligned loads & stores */ | |
e7b47cca EB |
50 | curgdt.size = limit; |
51 | curgdt.address = (unsigned long)newgdt; | |
5033cba0 | 52 | |
f2ab4461 | 53 | load_gdt(&curgdt); |
5033cba0 EB |
54 | }; |
55 | ||
56 | static void load_segments(void) | |
57 | { | |
58 | #define __STR(X) #X | |
59 | #define STR(X) __STR(X) | |
60 | ||
61 | __asm__ __volatile__ ( | |
62 | "\tljmp $"STR(__KERNEL_CS)",$1f\n" | |
63 | "\t1:\n" | |
2ec5e3a8 MM |
64 | "\tmovl $"STR(__KERNEL_DS)",%%eax\n" |
65 | "\tmovl %%eax,%%ds\n" | |
66 | "\tmovl %%eax,%%es\n" | |
67 | "\tmovl %%eax,%%fs\n" | |
68 | "\tmovl %%eax,%%gs\n" | |
69 | "\tmovl %%eax,%%ss\n" | |
70 | ::: "eax", "memory"); | |
5033cba0 EB |
71 | #undef STR |
72 | #undef __STR | |
73 | } | |
74 | ||
5033cba0 EB |
75 | /* |
76 | * A architecture hook called to validate the | |
77 | * proposed image and prepare the control pages | |
78 | * as needed. The pages for KEXEC_CONTROL_CODE_SIZE | |
79 | * have been allocated, but the segments have yet | |
80 | * been copied into the kernel. | |
81 | * | |
82 | * Do what every setup is needed on image and the | |
83 | * reboot code buffer to allow us to avoid allocations | |
84 | * later. | |
85 | * | |
86 | * Currently nothing. | |
87 | */ | |
88 | int machine_kexec_prepare(struct kimage *image) | |
89 | { | |
90 | return 0; | |
91 | } | |
92 | ||
93 | /* | |
94 | * Undo anything leftover by machine_kexec_prepare | |
95 | * when an image is freed. | |
96 | */ | |
97 | void machine_kexec_cleanup(struct kimage *image) | |
98 | { | |
99 | } | |
100 | ||
101 | /* | |
102 | * Do not allocate memory (or fail in any way) in machine_kexec(). | |
103 | * We are past the point of no return, committed to rebooting now. | |
104 | */ | |
105 | NORET_TYPE void machine_kexec(struct kimage *image) | |
106 | { | |
3566561b MD |
107 | unsigned long page_list[PAGES_NR]; |
108 | void *control_page; | |
5033cba0 EB |
109 | |
110 | /* Interrupts aren't acceptable while we reboot */ | |
111 | local_irq_disable(); | |
112 | ||
3566561b MD |
113 | control_page = page_address(image->control_code_page); |
114 | memcpy(control_page, relocate_kernel, PAGE_SIZE); | |
115 | ||
116 | page_list[PA_CONTROL_PAGE] = __pa(control_page); | |
117 | page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel; | |
118 | page_list[PA_PGD] = __pa(kexec_pgd); | |
119 | page_list[VA_PGD] = (unsigned long)kexec_pgd; | |
120 | #ifdef CONFIG_X86_PAE | |
121 | page_list[PA_PMD_0] = __pa(kexec_pmd0); | |
122 | page_list[VA_PMD_0] = (unsigned long)kexec_pmd0; | |
123 | page_list[PA_PMD_1] = __pa(kexec_pmd1); | |
124 | page_list[VA_PMD_1] = (unsigned long)kexec_pmd1; | |
125 | #endif | |
126 | page_list[PA_PTE_0] = __pa(kexec_pte0); | |
127 | page_list[VA_PTE_0] = (unsigned long)kexec_pte0; | |
128 | page_list[PA_PTE_1] = __pa(kexec_pte1); | |
129 | page_list[VA_PTE_1] = (unsigned long)kexec_pte1; | |
5033cba0 | 130 | |
2a8a3d5b EB |
131 | /* The segment registers are funny things, they have both a |
132 | * visible and an invisible part. Whenever the visible part is | |
133 | * set to a specific selector, the invisible part is loaded | |
134 | * with from a table in memory. At no other time is the | |
135 | * descriptor table in memory accessed. | |
5033cba0 EB |
136 | * |
137 | * I take advantage of this here by force loading the | |
138 | * segments, before I zap the gdt with an invalid value. | |
139 | */ | |
140 | load_segments(); | |
141 | /* The gdt & idt are now invalid. | |
142 | * If you want to load them you must set up your own idt & gdt. | |
143 | */ | |
144 | set_gdt(phys_to_virt(0),0); | |
145 | set_idt(phys_to_virt(0),0); | |
146 | ||
147 | /* now call it */ | |
3566561b MD |
148 | relocate_kernel((unsigned long)image->head, (unsigned long)page_list, |
149 | image->start, cpu_has_pae); | |
5033cba0 | 150 | } |
1a3f239d | 151 | |
fd59d231 KO |
152 | void arch_crash_save_vmcoreinfo(void) |
153 | { | |
154 | #ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE | |
bcbba6c1 KO |
155 | VMCOREINFO_SYMBOL(node_data); |
156 | VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); | |
fd59d231 KO |
157 | #endif |
158 | #ifdef CONFIG_X86_PAE | |
bcbba6c1 | 159 | VMCOREINFO_CONFIG(X86_PAE); |
fd59d231 KO |
160 | #endif |
161 | } | |
162 |