Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / x86 / power / cpu.c
1 /*
2 * Suspend support specific for i386/x86-64.
3 *
4 * Distribute under GPLv2
5 *
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
9 */
10
11 #include <linux/suspend.h>
12 #include <linux/export.h>
13 #include <linux/smp.h>
14 #include <linux/perf_event.h>
15
16 #include <asm/pgtable.h>
17 #include <asm/proto.h>
18 #include <asm/mtrr.h>
19 #include <asm/page.h>
20 #include <asm/mce.h>
21 #include <asm/suspend.h>
22 #include <asm/fpu/internal.h>
23 #include <asm/debugreg.h>
24 #include <asm/cpu.h>
25 #include <asm/mmu_context.h>
26
27 #ifdef CONFIG_X86_32
28 __visible unsigned long saved_context_ebx;
29 __visible unsigned long saved_context_esp, saved_context_ebp;
30 __visible unsigned long saved_context_esi, saved_context_edi;
31 __visible unsigned long saved_context_eflags;
32 #endif
33 struct saved_context saved_context;
34
35 /**
36 * __save_processor_state - save CPU registers before creating a
37 * hibernation image and before restoring the memory state from it
38 * @ctxt - structure to store the registers contents in
39 *
40 * NOTE: If there is a CPU register the modification of which by the
41 * boot kernel (ie. the kernel used for loading the hibernation image)
42 * might affect the operations of the restored target kernel (ie. the one
43 * saved in the hibernation image), then its contents must be saved by this
44 * function. In other words, if kernel A is hibernated and different
45 * kernel B is used for loading the hibernation image into memory, the
46 * kernel A's __save_processor_state() function must save all registers
47 * needed by kernel A, so that it can operate correctly after the resume
48 * regardless of what kernel B does in the meantime.
49 */
50 static void __save_processor_state(struct saved_context *ctxt)
51 {
52 #ifdef CONFIG_X86_32
53 mtrr_save_fixed_ranges(NULL);
54 #endif
55 kernel_fpu_begin();
56
57 /*
58 * descriptor tables
59 */
60 #ifdef CONFIG_X86_32
61 store_idt(&ctxt->idt);
62 #else
63 /* CONFIG_X86_64 */
64 store_idt((struct desc_ptr *)&ctxt->idt_limit);
65 #endif
66 /*
67 * We save it here, but restore it only in the hibernate case.
68 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
69 * mode in "secondary_startup_64". In 32-bit mode it is done via
70 * 'pmode_gdt' in wakeup_start.
71 */
72 ctxt->gdt_desc.size = GDT_SIZE - 1;
73 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_table(smp_processor_id());
74
75 store_tr(ctxt->tr);
76
77 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
78 /*
79 * segment registers
80 */
81 #ifdef CONFIG_X86_32
82 savesegment(es, ctxt->es);
83 savesegment(fs, ctxt->fs);
84 savesegment(gs, ctxt->gs);
85 savesegment(ss, ctxt->ss);
86 #else
87 /* CONFIG_X86_64 */
88 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
89 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
90 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
91 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
92 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
93
94 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
95 rdmsrl(MSR_GS_BASE, ctxt->gs_base);
96 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
97 mtrr_save_fixed_ranges(NULL);
98
99 rdmsrl(MSR_EFER, ctxt->efer);
100 #endif
101
102 /*
103 * control registers
104 */
105 ctxt->cr0 = read_cr0();
106 ctxt->cr2 = read_cr2();
107 ctxt->cr3 = read_cr3();
108 ctxt->cr4 = __read_cr4_safe();
109 #ifdef CONFIG_X86_64
110 ctxt->cr8 = read_cr8();
111 #endif
112 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
113 &ctxt->misc_enable);
114 }
115
116 /* Needed by apm.c */
117 void save_processor_state(void)
118 {
119 __save_processor_state(&saved_context);
120 x86_platform.save_sched_clock_state();
121 }
122 #ifdef CONFIG_X86_32
123 EXPORT_SYMBOL(save_processor_state);
124 #endif
125
126 static void do_fpu_end(void)
127 {
128 /*
129 * Restore FPU regs if necessary.
130 */
131 kernel_fpu_end();
132 }
133
134 static void fix_processor_context(void)
135 {
136 int cpu = smp_processor_id();
137 struct tss_struct *t = &per_cpu(cpu_tss, cpu);
138 #ifdef CONFIG_X86_64
139 struct desc_struct *desc = get_cpu_gdt_table(cpu);
140 tss_desc tss;
141 #endif
142 set_tss_desc(cpu, t); /*
143 * This just modifies memory; should not be
144 * necessary. But... This is necessary, because
145 * 386 hardware has concept of busy TSS or some
146 * similar stupidity.
147 */
148
149 #ifdef CONFIG_X86_64
150 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
151 tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
152 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
153
154 syscall_init(); /* This sets MSR_*STAR and related */
155 #endif
156 load_TR_desc(); /* This does ltr */
157 load_mm_ldt(current->active_mm); /* This does lldt */
158
159 fpu__resume_cpu();
160 }
161
162 /**
163 * __restore_processor_state - restore the contents of CPU registers saved
164 * by __save_processor_state()
165 * @ctxt - structure to load the registers contents from
166 */
167 static void notrace __restore_processor_state(struct saved_context *ctxt)
168 {
169 if (ctxt->misc_enable_saved)
170 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
171 /*
172 * control registers
173 */
174 /* cr4 was introduced in the Pentium CPU */
175 #ifdef CONFIG_X86_32
176 if (ctxt->cr4)
177 __write_cr4(ctxt->cr4);
178 #else
179 /* CONFIG X86_64 */
180 wrmsrl(MSR_EFER, ctxt->efer);
181 write_cr8(ctxt->cr8);
182 __write_cr4(ctxt->cr4);
183 #endif
184 write_cr3(ctxt->cr3);
185 write_cr2(ctxt->cr2);
186 write_cr0(ctxt->cr0);
187
188 /*
189 * now restore the descriptor tables to their proper values
190 * ltr is done i fix_processor_context().
191 */
192 #ifdef CONFIG_X86_32
193 load_idt(&ctxt->idt);
194 #else
195 /* CONFIG_X86_64 */
196 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
197 #endif
198
199 /*
200 * segment registers
201 */
202 #ifdef CONFIG_X86_32
203 loadsegment(es, ctxt->es);
204 loadsegment(fs, ctxt->fs);
205 loadsegment(gs, ctxt->gs);
206 loadsegment(ss, ctxt->ss);
207
208 /*
209 * sysenter MSRs
210 */
211 if (boot_cpu_has(X86_FEATURE_SEP))
212 enable_sep_cpu();
213 #else
214 /* CONFIG_X86_64 */
215 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
216 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
217 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
218 load_gs_index(ctxt->gs);
219 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
220
221 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
222 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
223 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
224 #endif
225
226 fix_processor_context();
227
228 do_fpu_end();
229 x86_platform.restore_sched_clock_state();
230 mtrr_bp_restore();
231 perf_restore_debug_store();
232 }
233
234 /* Needed by apm.c */
235 void notrace restore_processor_state(void)
236 {
237 __restore_processor_state(&saved_context);
238 }
239 #ifdef CONFIG_X86_32
240 EXPORT_SYMBOL(restore_processor_state);
241 #endif
242
243 /*
244 * When bsp_check() is called in hibernate and suspend, cpu hotplug
245 * is disabled already. So it's unnessary to handle race condition between
246 * cpumask query and cpu hotplug.
247 */
248 static int bsp_check(void)
249 {
250 if (cpumask_first(cpu_online_mask) != 0) {
251 pr_warn("CPU0 is offline.\n");
252 return -ENODEV;
253 }
254
255 return 0;
256 }
257
258 static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
259 void *ptr)
260 {
261 int ret = 0;
262
263 switch (action) {
264 case PM_SUSPEND_PREPARE:
265 case PM_HIBERNATION_PREPARE:
266 ret = bsp_check();
267 break;
268 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
269 case PM_RESTORE_PREPARE:
270 /*
271 * When system resumes from hibernation, online CPU0 because
272 * 1. it's required for resume and
273 * 2. the CPU was online before hibernation
274 */
275 if (!cpu_online(0))
276 _debug_hotplug_cpu(0, 1);
277 break;
278 case PM_POST_RESTORE:
279 /*
280 * When a resume really happens, this code won't be called.
281 *
282 * This code is called only when user space hibernation software
283 * prepares for snapshot device during boot time. So we just
284 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
285 * preparing the snapshot device.
286 *
287 * This works for normal boot case in our CPU0 hotplug debug
288 * mode, i.e. CPU0 is offline and user mode hibernation
289 * software initializes during boot time.
290 *
291 * If CPU0 is online and user application accesses snapshot
292 * device after boot time, this will offline CPU0 and user may
293 * see different CPU0 state before and after accessing
294 * the snapshot device. But hopefully this is not a case when
295 * user debugging CPU0 hotplug. Even if users hit this case,
296 * they can easily online CPU0 back.
297 *
298 * To simplify this debug code, we only consider normal boot
299 * case. Otherwise we need to remember CPU0's state and restore
300 * to that state and resolve racy conditions etc.
301 */
302 _debug_hotplug_cpu(0, 0);
303 break;
304 #endif
305 default:
306 break;
307 }
308 return notifier_from_errno(ret);
309 }
310
311 static int __init bsp_pm_check_init(void)
312 {
313 /*
314 * Set this bsp_pm_callback as lower priority than
315 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
316 * earlier to disable cpu hotplug before bsp online check.
317 */
318 pm_notifier(bsp_pm_callback, -INT_MAX);
319 return 0;
320 }
321
322 core_initcall(bsp_pm_check_init);
This page took 0.037705 seconds and 5 git commands to generate.