[PATCH] x86-64: Add EFER to the register set saved by save_processor_state
[deliverable/linux.git] / arch / x86_64 / kernel / suspend.c
CommitLineData
1da177e4
LT
1/*
2 * Suspend support specific for i386.
3 *
4 * Distribute under GPLv2
5 *
6 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
8 */
9
55679edb 10#include <linux/smp.h>
1da177e4 11#include <linux/suspend.h>
1da177e4 12#include <asm/proto.h>
3dd08325
RW
13#include <asm/page.h>
14#include <asm/pgtable.h>
1da177e4
LT
15
16struct saved_context saved_context;
17
18unsigned long saved_context_eax, saved_context_ebx, saved_context_ecx, saved_context_edx;
19unsigned long saved_context_esp, saved_context_ebp, saved_context_esi, saved_context_edi;
20unsigned long saved_context_r08, saved_context_r09, saved_context_r10, saved_context_r11;
21unsigned long saved_context_r12, saved_context_r13, saved_context_r14, saved_context_r15;
22unsigned long saved_context_eflags;
23
24void __save_processor_state(struct saved_context *ctxt)
25{
26 kernel_fpu_begin();
27
28 /*
29 * descriptor tables
30 */
31 asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit));
32 asm volatile ("sidt %0" : "=m" (ctxt->idt_limit));
1da177e4
LT
33 asm volatile ("str %0" : "=m" (ctxt->tr));
34
35 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
1da177e4
LT
36 /*
37 * segment registers
38 */
39 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
40 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
41 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
42 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
43 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
44
45 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
46 rdmsrl(MSR_GS_BASE, ctxt->gs_base);
47 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
48
49 /*
50 * control registers
51 */
3c321bce 52 rdmsrl(MSR_EFER, ctxt->efer);
1da177e4
LT
53 asm volatile ("movq %%cr0, %0" : "=r" (ctxt->cr0));
54 asm volatile ("movq %%cr2, %0" : "=r" (ctxt->cr2));
55 asm volatile ("movq %%cr3, %0" : "=r" (ctxt->cr3));
56 asm volatile ("movq %%cr4, %0" : "=r" (ctxt->cr4));
8d783b3e 57 asm volatile ("movq %%cr8, %0" : "=r" (ctxt->cr8));
1da177e4
LT
58}
59
60void save_processor_state(void)
61{
62 __save_processor_state(&saved_context);
63}
64
08967f94 65static void do_fpu_end(void)
1da177e4 66{
08967f94
SL
67 /*
68 * Restore FPU regs if necessary
69 */
70 kernel_fpu_end();
1da177e4
LT
71}
72
73void __restore_processor_state(struct saved_context *ctxt)
74{
75 /*
76 * control registers
77 */
3c321bce 78 wrmsrl(MSR_EFER, ctxt->efer);
8d783b3e 79 asm volatile ("movq %0, %%cr8" :: "r" (ctxt->cr8));
1da177e4
LT
80 asm volatile ("movq %0, %%cr4" :: "r" (ctxt->cr4));
81 asm volatile ("movq %0, %%cr3" :: "r" (ctxt->cr3));
82 asm volatile ("movq %0, %%cr2" :: "r" (ctxt->cr2));
83 asm volatile ("movq %0, %%cr0" :: "r" (ctxt->cr0));
84
8d783b3e
PM
85 /*
86 * now restore the descriptor tables to their proper values
87 * ltr is done i fix_processor_context().
88 */
89 asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
90 asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
91
1da177e4
LT
92 /*
93 * segment registers
94 */
95 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
96 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
97 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
98 load_gs_index(ctxt->gs);
99 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
100
101 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
102 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
103 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
104
1da177e4
LT
105 fix_processor_context();
106
107 do_fpu_end();
3b520b23 108 mtrr_ap_init();
1da177e4
LT
109}
110
111void restore_processor_state(void)
112{
113 __restore_processor_state(&saved_context);
114}
115
116void fix_processor_context(void)
117{
118 int cpu = smp_processor_id();
119 struct tss_struct *t = &per_cpu(init_tss, cpu);
120
121 set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
122
c11efdf9 123 cpu_gdt(cpu)[GDT_ENTRY_TSS].type = 9;
1da177e4
LT
124
125 syscall_init(); /* This sets MSR_*STAR and related */
126 load_TR_desc(); /* This does ltr */
127 load_LDT(&current->active_mm->context); /* This does lldt */
128
129 /*
130 * Now maybe reload the debug registers
131 */
132 if (current->thread.debugreg7){
133 loaddebug(&current->thread, 0);
134 loaddebug(&current->thread, 1);
135 loaddebug(&current->thread, 2);
136 loaddebug(&current->thread, 3);
137 /* no 4 and 5 */
138 loaddebug(&current->thread, 6);
139 loaddebug(&current->thread, 7);
140 }
141
142}
143
3dd08325
RW
144#ifdef CONFIG_SOFTWARE_SUSPEND
145/* Defined in arch/x86_64/kernel/suspend_asm.S */
146extern int restore_image(void);
1da177e4 147
3dd08325
RW
148pgd_t *temp_level4_pgt;
149
2c1b4a5c 150static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
3dd08325
RW
151{
152 long i, j;
153
154 i = pud_index(address);
155 pud = pud + i;
156 for (; i < PTRS_PER_PUD; pud++, i++) {
157 unsigned long paddr;
158 pmd_t *pmd;
159
160 paddr = address + i*PUD_SIZE;
161 if (paddr >= end)
162 break;
163
2c1b4a5c
RW
164 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
165 if (!pmd)
166 return -ENOMEM;
3dd08325
RW
167 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
168 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
169 unsigned long pe;
170
171 if (paddr >= end)
172 break;
173 pe = _PAGE_NX | _PAGE_PSE | _KERNPG_TABLE | paddr;
174 pe &= __supported_pte_mask;
175 set_pmd(pmd, __pmd(pe));
176 }
177 }
2c1b4a5c 178 return 0;
3dd08325
RW
179}
180
2c1b4a5c 181static int set_up_temporary_mappings(void)
3dd08325
RW
182{
183 unsigned long start, end, next;
2c1b4a5c 184 int error;
3dd08325 185
2c1b4a5c
RW
186 temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
187 if (!temp_level4_pgt)
188 return -ENOMEM;
3dd08325
RW
189
190 /* It is safe to reuse the original kernel mapping */
191 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
192 init_level4_pgt[pgd_index(__START_KERNEL_map)]);
193
194 /* Set up the direct mapping from scratch */
195 start = (unsigned long)pfn_to_kaddr(0);
196 end = (unsigned long)pfn_to_kaddr(end_pfn);
197
198 for (; start < end; start = next) {
2c1b4a5c
RW
199 pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
200 if (!pud)
201 return -ENOMEM;
3dd08325
RW
202 next = start + PGDIR_SIZE;
203 if (next > end)
204 next = end;
2c1b4a5c
RW
205 if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
206 return error;
3dd08325
RW
207 set_pgd(temp_level4_pgt + pgd_index(start),
208 mk_kernel_pgd(__pa(pud)));
209 }
2c1b4a5c 210 return 0;
3dd08325
RW
211}
212
213int swsusp_arch_resume(void)
214{
2c1b4a5c 215 int error;
3dd08325 216
3dd08325 217 /* We have got enough memory and from now on we cannot recover */
2c1b4a5c
RW
218 if ((error = set_up_temporary_mappings()))
219 return error;
3dd08325
RW
220 restore_image();
221 return 0;
222}
223#endif /* CONFIG_SOFTWARE_SUSPEND */
This page took 0.258179 seconds and 5 git commands to generate.