Commit | Line | Data |
---|---|---|
7ce0bcfd ZA |
1 | /* |
2 | * VMI specific paravirt-ops implementation | |
3 | * | |
4 | * Copyright (C) 2005, VMware, Inc. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, but | |
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
14 | * NON INFRINGEMENT. See the GNU General Public License for more | |
15 | * details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
20 | * | |
21 | * Send feedback to zach@vmware.com | |
22 | * | |
23 | */ | |
24 | ||
25 | #include <linux/module.h> | |
26 | #include <linux/license.h> | |
27 | #include <linux/cpu.h> | |
28 | #include <linux/bootmem.h> | |
29 | #include <linux/mm.h> | |
30 | #include <asm/vmi.h> | |
31 | #include <asm/io.h> | |
32 | #include <asm/fixmap.h> | |
33 | #include <asm/apicdef.h> | |
34 | #include <asm/apic.h> | |
35 | #include <asm/processor.h> | |
36 | #include <asm/timer.h> | |
bbab4f3b | 37 | #include <asm/vmi_time.h> |
7ce0bcfd ZA |
38 | |
39 | /* Convenient for calling VMI functions indirectly in the ROM */ | |
40 | typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); | |
41 | typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int); | |
42 | ||
43 | #define call_vrom_func(rom,func) \ | |
44 | (((VROMFUNC *)(rom->func))()) | |
45 | ||
46 | #define call_vrom_long_func(rom,func,arg) \ | |
47 | (((VROMLONGFUNC *)(rom->func)) (arg)) | |
48 | ||
49 | static struct vrom_header *vmi_rom; | |
50 | static int license_gplok; | |
51 | static int disable_nodelay; | |
52 | static int disable_pge; | |
53 | static int disable_pse; | |
54 | static int disable_sep; | |
55 | static int disable_tsc; | |
56 | static int disable_mtrr; | |
7507ba34 | 57 | static int disable_noidle; |
7ce0bcfd ZA |
58 | |
59 | /* Cached VMI operations */ | |
60 | struct { | |
61 | void (*cpuid)(void /* non-c */); | |
62 | void (*_set_ldt)(u32 selector); | |
63 | void (*set_tr)(u32 selector); | |
64 | void (*set_kernel_stack)(u32 selector, u32 esp0); | |
65 | void (*allocate_page)(u32, u32, u32, u32, u32); | |
66 | void (*release_page)(u32, u32); | |
67 | void (*set_pte)(pte_t, pte_t *, unsigned); | |
68 | void (*update_pte)(pte_t *, unsigned); | |
69 | void (*set_linear_mapping)(int, u32, u32, u32); | |
70 | void (*flush_tlb)(int); | |
71 | void (*set_initial_ap_state)(int, int); | |
bbab4f3b | 72 | void (*halt)(void); |
7ce0bcfd ZA |
73 | } vmi_ops; |
74 | ||
75 | /* XXX move this to alternative.h */ | |
76 | extern struct paravirt_patch __start_parainstructions[], | |
77 | __stop_parainstructions[]; | |
78 | ||
79 | /* | |
80 | * VMI patching routines. | |
81 | */ | |
82 | #define MNEM_CALL 0xe8 | |
83 | #define MNEM_JMP 0xe9 | |
84 | #define MNEM_RET 0xc3 | |
85 | ||
86 | static char irq_save_disable_callout[] = { | |
87 | MNEM_CALL, 0, 0, 0, 0, | |
88 | MNEM_CALL, 0, 0, 0, 0, | |
89 | MNEM_RET | |
90 | }; | |
91 | #define IRQ_PATCH_INT_MASK 0 | |
92 | #define IRQ_PATCH_DISABLE 5 | |
93 | ||
94 | static inline void patch_offset(unsigned char *eip, unsigned char *dest) | |
95 | { | |
96 | *(unsigned long *)(eip+1) = dest-eip-5; | |
97 | } | |
98 | ||
99 | static unsigned patch_internal(int call, unsigned len, void *insns) | |
100 | { | |
101 | u64 reloc; | |
102 | struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc; | |
103 | reloc = call_vrom_long_func(vmi_rom, get_reloc, call); | |
104 | switch(rel->type) { | |
105 | case VMI_RELOCATION_CALL_REL: | |
106 | BUG_ON(len < 5); | |
107 | *(char *)insns = MNEM_CALL; | |
108 | patch_offset(insns, rel->eip); | |
109 | return 5; | |
110 | ||
111 | case VMI_RELOCATION_JUMP_REL: | |
112 | BUG_ON(len < 5); | |
113 | *(char *)insns = MNEM_JMP; | |
114 | patch_offset(insns, rel->eip); | |
115 | return 5; | |
116 | ||
117 | case VMI_RELOCATION_NOP: | |
118 | /* obliterate the whole thing */ | |
119 | return 0; | |
120 | ||
121 | case VMI_RELOCATION_NONE: | |
122 | /* leave native code in place */ | |
123 | break; | |
124 | ||
125 | default: | |
126 | BUG(); | |
127 | } | |
128 | return len; | |
129 | } | |
130 | ||
131 | /* | |
132 | * Apply patch if appropriate, return length of new instruction | |
133 | * sequence. The callee does nop padding for us. | |
134 | */ | |
135 | static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, unsigned len) | |
136 | { | |
137 | switch (type) { | |
138 | case PARAVIRT_IRQ_DISABLE: | |
139 | return patch_internal(VMI_CALL_DisableInterrupts, len, insns); | |
140 | case PARAVIRT_IRQ_ENABLE: | |
141 | return patch_internal(VMI_CALL_EnableInterrupts, len, insns); | |
142 | case PARAVIRT_RESTORE_FLAGS: | |
143 | return patch_internal(VMI_CALL_SetInterruptMask, len, insns); | |
144 | case PARAVIRT_SAVE_FLAGS: | |
145 | return patch_internal(VMI_CALL_GetInterruptMask, len, insns); | |
146 | case PARAVIRT_SAVE_FLAGS_IRQ_DISABLE: | |
147 | if (len >= 10) { | |
148 | patch_internal(VMI_CALL_GetInterruptMask, len, insns); | |
149 | patch_internal(VMI_CALL_DisableInterrupts, len-5, insns+5); | |
150 | return 10; | |
151 | } else { | |
152 | /* | |
153 | * You bastards didn't leave enough room to | |
154 | * patch save_flags_irq_disable inline. Patch | |
155 | * to a helper | |
156 | */ | |
157 | BUG_ON(len < 5); | |
158 | *(char *)insns = MNEM_CALL; | |
159 | patch_offset(insns, irq_save_disable_callout); | |
160 | return 5; | |
161 | } | |
162 | case PARAVIRT_INTERRUPT_RETURN: | |
163 | return patch_internal(VMI_CALL_IRET, len, insns); | |
164 | case PARAVIRT_STI_SYSEXIT: | |
165 | return patch_internal(VMI_CALL_SYSEXIT, len, insns); | |
166 | default: | |
167 | break; | |
168 | } | |
169 | return len; | |
170 | } | |
171 | ||
172 | /* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */ | |
173 | static void vmi_cpuid(unsigned int *eax, unsigned int *ebx, | |
174 | unsigned int *ecx, unsigned int *edx) | |
175 | { | |
176 | int override = 0; | |
177 | if (*eax == 1) | |
178 | override = 1; | |
179 | asm volatile ("call *%6" | |
180 | : "=a" (*eax), | |
181 | "=b" (*ebx), | |
182 | "=c" (*ecx), | |
183 | "=d" (*edx) | |
184 | : "0" (*eax), "2" (*ecx), "r" (vmi_ops.cpuid)); | |
185 | if (override) { | |
186 | if (disable_pse) | |
187 | *edx &= ~X86_FEATURE_PSE; | |
188 | if (disable_pge) | |
189 | *edx &= ~X86_FEATURE_PGE; | |
190 | if (disable_sep) | |
191 | *edx &= ~X86_FEATURE_SEP; | |
192 | if (disable_tsc) | |
193 | *edx &= ~X86_FEATURE_TSC; | |
194 | if (disable_mtrr) | |
195 | *edx &= ~X86_FEATURE_MTRR; | |
196 | } | |
197 | } | |
198 | ||
199 | static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new) | |
200 | { | |
201 | if (gdt[nr].a != new->a || gdt[nr].b != new->b) | |
202 | write_gdt_entry(gdt, nr, new->a, new->b); | |
203 | } | |
204 | ||
205 | static void vmi_load_tls(struct thread_struct *t, unsigned int cpu) | |
206 | { | |
207 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | |
208 | vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0]); | |
209 | vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1]); | |
210 | vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2]); | |
211 | } | |
212 | ||
213 | static void vmi_set_ldt(const void *addr, unsigned entries) | |
214 | { | |
215 | unsigned cpu = smp_processor_id(); | |
216 | u32 low, high; | |
217 | ||
218 | pack_descriptor(&low, &high, (unsigned long)addr, | |
219 | entries * sizeof(struct desc_struct) - 1, | |
220 | DESCTYPE_LDT, 0); | |
221 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, low, high); | |
222 | vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0); | |
223 | } | |
224 | ||
225 | static void vmi_set_tr(void) | |
226 | { | |
227 | vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct)); | |
228 | } | |
229 | ||
230 | static void vmi_load_esp0(struct tss_struct *tss, | |
231 | struct thread_struct *thread) | |
232 | { | |
233 | tss->esp0 = thread->esp0; | |
234 | ||
235 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ | |
236 | if (unlikely(tss->ss1 != thread->sysenter_cs)) { | |
237 | tss->ss1 = thread->sysenter_cs; | |
238 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | |
239 | } | |
240 | vmi_ops.set_kernel_stack(__KERNEL_DS, tss->esp0); | |
241 | } | |
242 | ||
243 | static void vmi_flush_tlb_user(void) | |
244 | { | |
245 | vmi_ops.flush_tlb(VMI_FLUSH_TLB); | |
246 | } | |
247 | ||
248 | static void vmi_flush_tlb_kernel(void) | |
249 | { | |
250 | vmi_ops.flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL); | |
251 | } | |
252 | ||
253 | /* Stub to do nothing at all; used for delays and unimplemented calls */ | |
254 | static void vmi_nop(void) | |
255 | { | |
256 | } | |
257 | ||
bbab4f3b | 258 | /* For NO_IDLE_HZ, we stop the clock when halting the kernel */ |
bbab4f3b ZA |
259 | static fastcall void vmi_safe_halt(void) |
260 | { | |
261 | int idle = vmi_stop_hz_timer(); | |
262 | vmi_ops.halt(); | |
263 | if (idle) { | |
264 | local_irq_disable(); | |
265 | vmi_account_time_restart_hz_timer(); | |
266 | local_irq_enable(); | |
267 | } | |
268 | } | |
7ce0bcfd ZA |
269 | |
270 | #ifdef CONFIG_DEBUG_PAGE_TYPE | |
271 | ||
272 | #ifdef CONFIG_X86_PAE | |
273 | #define MAX_BOOT_PTS (2048+4+1) | |
274 | #else | |
275 | #define MAX_BOOT_PTS (1024+1) | |
276 | #endif | |
277 | ||
278 | /* | |
279 | * During boot, mem_map is not yet available in paging_init, so stash | |
280 | * all the boot page allocations here. | |
281 | */ | |
282 | static struct { | |
283 | u32 pfn; | |
284 | int type; | |
285 | } boot_page_allocations[MAX_BOOT_PTS]; | |
286 | static int num_boot_page_allocations; | |
287 | static int boot_allocations_applied; | |
288 | ||
289 | void vmi_apply_boot_page_allocations(void) | |
290 | { | |
291 | int i; | |
292 | BUG_ON(!mem_map); | |
293 | for (i = 0; i < num_boot_page_allocations; i++) { | |
294 | struct page *page = pfn_to_page(boot_page_allocations[i].pfn); | |
295 | page->type = boot_page_allocations[i].type; | |
296 | page->type = boot_page_allocations[i].type & | |
297 | ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); | |
298 | } | |
299 | boot_allocations_applied = 1; | |
300 | } | |
301 | ||
302 | static void record_page_type(u32 pfn, int type) | |
303 | { | |
304 | BUG_ON(num_boot_page_allocations >= MAX_BOOT_PTS); | |
305 | boot_page_allocations[num_boot_page_allocations].pfn = pfn; | |
306 | boot_page_allocations[num_boot_page_allocations].type = type; | |
307 | num_boot_page_allocations++; | |
308 | } | |
309 | ||
310 | static void check_zeroed_page(u32 pfn, int type, struct page *page) | |
311 | { | |
312 | u32 *ptr; | |
313 | int i; | |
314 | int limit = PAGE_SIZE / sizeof(int); | |
315 | ||
316 | if (page_address(page)) | |
317 | ptr = (u32 *)page_address(page); | |
318 | else | |
319 | ptr = (u32 *)__va(pfn << PAGE_SHIFT); | |
320 | /* | |
321 | * When cloning the root in non-PAE mode, only the userspace | |
322 | * pdes need to be zeroed. | |
323 | */ | |
324 | if (type & VMI_PAGE_CLONE) | |
325 | limit = USER_PTRS_PER_PGD; | |
326 | for (i = 0; i < limit; i++) | |
327 | BUG_ON(ptr[i]); | |
328 | } | |
329 | ||
330 | /* | |
331 | * We stash the page type into struct page so we can verify the page | |
332 | * types are used properly. | |
333 | */ | |
334 | static void vmi_set_page_type(u32 pfn, int type) | |
335 | { | |
336 | /* PAE can have multiple roots per page - don't track */ | |
337 | if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP)) | |
338 | return; | |
339 | ||
340 | if (boot_allocations_applied) { | |
341 | struct page *page = pfn_to_page(pfn); | |
342 | if (type != VMI_PAGE_NORMAL) | |
343 | BUG_ON(page->type); | |
344 | else | |
345 | BUG_ON(page->type == VMI_PAGE_NORMAL); | |
346 | page->type = type & ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); | |
347 | if (type & VMI_PAGE_ZEROED) | |
348 | check_zeroed_page(pfn, type, page); | |
349 | } else { | |
350 | record_page_type(pfn, type); | |
351 | } | |
352 | } | |
353 | ||
354 | static void vmi_check_page_type(u32 pfn, int type) | |
355 | { | |
356 | /* PAE can have multiple roots per page - skip checks */ | |
357 | if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP)) | |
358 | return; | |
359 | ||
360 | type &= ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); | |
361 | if (boot_allocations_applied) { | |
362 | struct page *page = pfn_to_page(pfn); | |
363 | BUG_ON((page->type ^ type) & VMI_PAGE_PAE); | |
364 | BUG_ON(type == VMI_PAGE_NORMAL && page->type); | |
365 | BUG_ON((type & page->type) == 0); | |
366 | } | |
367 | } | |
368 | #else | |
369 | #define vmi_set_page_type(p,t) do { } while (0) | |
370 | #define vmi_check_page_type(p,t) do { } while (0) | |
371 | #endif | |
372 | ||
9a1c13e9 ZA |
373 | static void vmi_map_pt_hook(int type, pte_t *va, u32 pfn) |
374 | { | |
375 | /* | |
376 | * Internally, the VMI ROM must map virtual addresses to physical | |
377 | * addresses for processing MMU updates. By the time MMU updates | |
378 | * are issued, this information is typically already lost. | |
379 | * Fortunately, the VMI provides a cache of mapping slots for active | |
380 | * page tables. | |
381 | * | |
382 | * We use slot zero for the linear mapping of physical memory, and | |
383 | * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1. | |
384 | * | |
385 | * args: SLOT VA COUNT PFN | |
386 | */ | |
387 | BUG_ON(type != KM_PTE0 && type != KM_PTE1); | |
388 | vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn); | |
389 | } | |
390 | ||
7ce0bcfd ZA |
391 | static void vmi_allocate_pt(u32 pfn) |
392 | { | |
393 | vmi_set_page_type(pfn, VMI_PAGE_L1); | |
394 | vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); | |
395 | } | |
396 | ||
397 | static void vmi_allocate_pd(u32 pfn) | |
398 | { | |
399 | /* | |
400 | * This call comes in very early, before mem_map is setup. | |
401 | * It is called only for swapper_pg_dir, which already has | |
402 | * data on it. | |
403 | */ | |
404 | vmi_set_page_type(pfn, VMI_PAGE_L2); | |
405 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); | |
406 | } | |
407 | ||
408 | static void vmi_allocate_pd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count) | |
409 | { | |
410 | vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE); | |
411 | vmi_check_page_type(clonepfn, VMI_PAGE_L2); | |
412 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); | |
413 | } | |
414 | ||
415 | static void vmi_release_pt(u32 pfn) | |
416 | { | |
417 | vmi_ops.release_page(pfn, VMI_PAGE_L1); | |
418 | vmi_set_page_type(pfn, VMI_PAGE_NORMAL); | |
419 | } | |
420 | ||
421 | static void vmi_release_pd(u32 pfn) | |
422 | { | |
423 | vmi_ops.release_page(pfn, VMI_PAGE_L2); | |
424 | vmi_set_page_type(pfn, VMI_PAGE_NORMAL); | |
425 | } | |
426 | ||
427 | /* | |
428 | * Helper macros for MMU update flags. We can defer updates until a flush | |
429 | * or page invalidation only if the update is to the current address space | |
430 | * (otherwise, there is no flush). We must check against init_mm, since | |
431 | * this could be a kernel update, which usually passes init_mm, although | |
432 | * sometimes this check can be skipped if we know the particular function | |
433 | * is only called on user mode PTEs. We could change the kernel to pass | |
434 | * current->active_mm here, but in particular, I was unsure if changing | |
435 | * mm/highmem.c to do this would still be correct on other architectures. | |
436 | */ | |
437 | #define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \ | |
438 | (!mustbeuser && (mm) == &init_mm)) | |
439 | #define vmi_flags_addr(mm, addr, level, user) \ | |
440 | ((level) | (is_current_as(mm, user) ? \ | |
441 | (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0)) | |
442 | #define vmi_flags_addr_defer(mm, addr, level, user) \ | |
443 | ((level) | (is_current_as(mm, user) ? \ | |
444 | (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0)) | |
445 | ||
446 | static void vmi_update_pte(struct mm_struct *mm, u32 addr, pte_t *ptep) | |
447 | { | |
448 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); | |
449 | vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); | |
450 | } | |
451 | ||
452 | static void vmi_update_pte_defer(struct mm_struct *mm, u32 addr, pte_t *ptep) | |
453 | { | |
454 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); | |
455 | vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0)); | |
456 | } | |
457 | ||
458 | static void vmi_set_pte(pte_t *ptep, pte_t pte) | |
459 | { | |
460 | /* XXX because of set_pmd_pte, this can be called on PT or PD layers */ | |
461 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE | VMI_PAGE_PD); | |
462 | vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT); | |
463 | } | |
464 | ||
465 | static void vmi_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pte) | |
466 | { | |
467 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); | |
468 | vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); | |
469 | } | |
470 | ||
471 | static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval) | |
472 | { | |
473 | #ifdef CONFIG_X86_PAE | |
474 | const pte_t pte = { pmdval.pmd, pmdval.pmd >> 32 }; | |
475 | vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PMD); | |
476 | #else | |
477 | const pte_t pte = { pmdval.pud.pgd.pgd }; | |
478 | vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PGD); | |
479 | #endif | |
480 | vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD); | |
481 | } | |
482 | ||
483 | #ifdef CONFIG_X86_PAE | |
484 | ||
485 | static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval) | |
486 | { | |
487 | /* | |
488 | * XXX This is called from set_pmd_pte, but at both PT | |
489 | * and PD layers so the VMI_PAGE_PT flag is wrong. But | |
490 | * it is only called for large page mapping changes, | |
491 | * the Xen backend, doesn't support large pages, and the | |
492 | * ESX backend doesn't depend on the flag. | |
493 | */ | |
494 | set_64bit((unsigned long long *)ptep,pte_val(pteval)); | |
495 | vmi_ops.update_pte(ptep, VMI_PAGE_PT); | |
496 | } | |
497 | ||
498 | static void vmi_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) | |
499 | { | |
500 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); | |
501 | vmi_ops.set_pte(pte, ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 1)); | |
502 | } | |
503 | ||
504 | static void vmi_set_pud(pud_t *pudp, pud_t pudval) | |
505 | { | |
506 | /* Um, eww */ | |
507 | const pte_t pte = { pudval.pgd.pgd, pudval.pgd.pgd >> 32 }; | |
508 | vmi_check_page_type(__pa(pudp) >> PAGE_SHIFT, VMI_PAGE_PGD); | |
509 | vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP); | |
510 | } | |
511 | ||
512 | static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
513 | { | |
514 | const pte_t pte = { 0 }; | |
515 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); | |
516 | vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); | |
517 | } | |
518 | ||
519 | void vmi_pmd_clear(pmd_t *pmd) | |
520 | { | |
521 | const pte_t pte = { 0 }; | |
522 | vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD); | |
523 | vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD); | |
524 | } | |
525 | #endif | |
526 | ||
527 | #ifdef CONFIG_SMP | |
528 | struct vmi_ap_state ap; | |
529 | extern void setup_pda(void); | |
530 | ||
531 | static void __init /* XXX cpu hotplug */ | |
532 | vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, | |
533 | unsigned long start_esp) | |
534 | { | |
535 | /* Default everything to zero. This is fine for most GPRs. */ | |
536 | memset(&ap, 0, sizeof(struct vmi_ap_state)); | |
537 | ||
538 | ap.gdtr_limit = GDT_SIZE - 1; | |
539 | ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid); | |
540 | ||
541 | ap.idtr_limit = IDT_ENTRIES * 8 - 1; | |
542 | ap.idtr_base = (unsigned long) idt_table; | |
543 | ||
544 | ap.ldtr = 0; | |
545 | ||
546 | ap.cs = __KERNEL_CS; | |
547 | ap.eip = (unsigned long) start_eip; | |
548 | ap.ss = __KERNEL_DS; | |
549 | ap.esp = (unsigned long) start_esp; | |
550 | ||
551 | ap.ds = __USER_DS; | |
552 | ap.es = __USER_DS; | |
553 | ap.fs = __KERNEL_PDA; | |
554 | ap.gs = 0; | |
555 | ||
556 | ap.eflags = 0; | |
557 | ||
558 | setup_pda(); | |
559 | ||
560 | #ifdef CONFIG_X86_PAE | |
561 | /* efer should match BSP efer. */ | |
562 | if (cpu_has_nx) { | |
563 | unsigned l, h; | |
564 | rdmsr(MSR_EFER, l, h); | |
565 | ap.efer = (unsigned long long) h << 32 | l; | |
566 | } | |
567 | #endif | |
568 | ||
569 | ap.cr3 = __pa(swapper_pg_dir); | |
570 | /* Protected mode, paging, AM, WP, NE, MP. */ | |
571 | ap.cr0 = 0x80050023; | |
572 | ap.cr4 = mmu_cr4_features; | |
573 | vmi_ops.set_initial_ap_state(__pa(&ap), phys_apicid); | |
574 | } | |
575 | #endif | |
576 | ||
577 | static inline int __init check_vmi_rom(struct vrom_header *rom) | |
578 | { | |
579 | struct pci_header *pci; | |
580 | struct pnp_header *pnp; | |
581 | const char *manufacturer = "UNKNOWN"; | |
582 | const char *product = "UNKNOWN"; | |
583 | const char *license = "unspecified"; | |
584 | ||
585 | if (rom->rom_signature != 0xaa55) | |
586 | return 0; | |
587 | if (rom->vrom_signature != VMI_SIGNATURE) | |
588 | return 0; | |
589 | if (rom->api_version_maj != VMI_API_REV_MAJOR || | |
590 | rom->api_version_min+1 < VMI_API_REV_MINOR+1) { | |
591 | printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n", | |
592 | rom->api_version_maj, | |
593 | rom->api_version_min); | |
594 | return 0; | |
595 | } | |
596 | ||
597 | /* | |
598 | * Relying on the VMI_SIGNATURE field is not 100% safe, so check | |
599 | * the PCI header and device type to make sure this is really a | |
600 | * VMI device. | |
601 | */ | |
602 | if (!rom->pci_header_offs) { | |
603 | printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n"); | |
604 | return 0; | |
605 | } | |
606 | ||
607 | pci = (struct pci_header *)((char *)rom+rom->pci_header_offs); | |
608 | if (pci->vendorID != PCI_VENDOR_ID_VMWARE || | |
609 | pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) { | |
610 | /* Allow it to run... anyways, but warn */ | |
611 | printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n"); | |
612 | } | |
613 | ||
614 | if (rom->pnp_header_offs) { | |
615 | pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs); | |
616 | if (pnp->manufacturer_offset) | |
617 | manufacturer = (const char *)rom+pnp->manufacturer_offset; | |
618 | if (pnp->product_offset) | |
619 | product = (const char *)rom+pnp->product_offset; | |
620 | } | |
621 | ||
622 | if (rom->license_offs) | |
623 | license = (char *)rom+rom->license_offs; | |
624 | ||
625 | printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n", | |
626 | manufacturer, product, | |
627 | rom->api_version_maj, rom->api_version_min, | |
628 | pci->rom_version_maj, pci->rom_version_min); | |
629 | ||
630 | license_gplok = license_is_gpl_compatible(license); | |
631 | if (!license_gplok) { | |
632 | printk(KERN_WARNING "VMI: ROM license '%s' taints kernel... " | |
633 | "inlining disabled\n", | |
634 | license); | |
635 | add_taint(TAINT_PROPRIETARY_MODULE); | |
636 | } | |
637 | return 1; | |
638 | } | |
639 | ||
640 | /* | |
641 | * Probe for the VMI option ROM | |
642 | */ | |
643 | static inline int __init probe_vmi_rom(void) | |
644 | { | |
645 | unsigned long base; | |
646 | ||
647 | /* VMI ROM is in option ROM area, check signature */ | |
648 | for (base = 0xC0000; base < 0xE0000; base += 2048) { | |
649 | struct vrom_header *romstart; | |
650 | romstart = (struct vrom_header *)isa_bus_to_virt(base); | |
651 | if (check_vmi_rom(romstart)) { | |
652 | vmi_rom = romstart; | |
653 | return 1; | |
654 | } | |
655 | } | |
656 | return 0; | |
657 | } | |
658 | ||
659 | /* | |
660 | * VMI setup common to all processors | |
661 | */ | |
662 | void vmi_bringup(void) | |
663 | { | |
664 | /* We must establish the lowmem mapping for MMU ops to work */ | |
665 | if (vmi_rom) | |
666 | vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0); | |
667 | } | |
668 | ||
669 | /* | |
670 | * Return a pointer to the VMI function or a NOP stub | |
671 | */ | |
672 | static void *vmi_get_function(int vmicall) | |
673 | { | |
674 | u64 reloc; | |
675 | const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; | |
676 | reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall); | |
677 | BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); | |
678 | if (rel->type == VMI_RELOCATION_CALL_REL) | |
679 | return (void *)rel->eip; | |
680 | else | |
681 | return (void *)vmi_nop; | |
682 | } | |
683 | ||
684 | /* | |
685 | * Helper macro for making the VMI paravirt-ops fill code readable. | |
686 | * For unimplemented operations, fall back to default. | |
687 | */ | |
688 | #define para_fill(opname, vmicall) \ | |
689 | do { \ | |
690 | reloc = call_vrom_long_func(vmi_rom, get_reloc, \ | |
691 | VMI_CALL_##vmicall); \ | |
692 | if (rel->type != VMI_RELOCATION_NONE) { \ | |
693 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); \ | |
694 | paravirt_ops.opname = (void *)rel->eip; \ | |
695 | } \ | |
696 | } while (0) | |
697 | ||
698 | /* | |
699 | * Activate the VMI interface and switch into paravirtualized mode | |
700 | */ | |
701 | static inline int __init activate_vmi(void) | |
702 | { | |
703 | short kernel_cs; | |
704 | u64 reloc; | |
705 | const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; | |
706 | ||
707 | if (call_vrom_func(vmi_rom, vmi_init) != 0) { | |
708 | printk(KERN_ERR "VMI ROM failed to initialize!"); | |
709 | return 0; | |
710 | } | |
711 | savesegment(cs, kernel_cs); | |
712 | ||
713 | paravirt_ops.paravirt_enabled = 1; | |
714 | paravirt_ops.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; | |
715 | ||
716 | paravirt_ops.patch = vmi_patch; | |
717 | paravirt_ops.name = "vmi"; | |
718 | ||
719 | /* | |
720 | * Many of these operations are ABI compatible with VMI. | |
721 | * This means we can fill in the paravirt-ops with direct | |
722 | * pointers into the VMI ROM. If the calling convention for | |
723 | * these operations changes, this code needs to be updated. | |
724 | * | |
725 | * Exceptions | |
726 | * CPUID paravirt-op uses pointers, not the native ISA | |
727 | * halt has no VMI equivalent; all VMI halts are "safe" | |
728 | * no MSR support yet - just trap and emulate. VMI uses the | |
729 | * same ABI as the native ISA, but Linux wants exceptions | |
730 | * from bogus MSR read / write handled | |
731 | * rdpmc is not yet used in Linux | |
732 | */ | |
733 | ||
734 | /* CPUID is special, so very special */ | |
735 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_CPUID); | |
736 | if (rel->type != VMI_RELOCATION_NONE) { | |
737 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); | |
738 | vmi_ops.cpuid = (void *)rel->eip; | |
739 | paravirt_ops.cpuid = vmi_cpuid; | |
740 | } | |
741 | ||
742 | para_fill(clts, CLTS); | |
743 | para_fill(get_debugreg, GetDR); | |
744 | para_fill(set_debugreg, SetDR); | |
745 | para_fill(read_cr0, GetCR0); | |
746 | para_fill(read_cr2, GetCR2); | |
747 | para_fill(read_cr3, GetCR3); | |
748 | para_fill(read_cr4, GetCR4); | |
749 | para_fill(write_cr0, SetCR0); | |
750 | para_fill(write_cr2, SetCR2); | |
751 | para_fill(write_cr3, SetCR3); | |
752 | para_fill(write_cr4, SetCR4); | |
753 | para_fill(save_fl, GetInterruptMask); | |
754 | para_fill(restore_fl, SetInterruptMask); | |
755 | para_fill(irq_disable, DisableInterrupts); | |
756 | para_fill(irq_enable, EnableInterrupts); | |
757 | /* irq_save_disable !!! sheer pain */ | |
758 | patch_offset(&irq_save_disable_callout[IRQ_PATCH_INT_MASK], | |
759 | (char *)paravirt_ops.save_fl); | |
760 | patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE], | |
761 | (char *)paravirt_ops.irq_disable); | |
7507ba34 | 762 | |
7ce0bcfd ZA |
763 | para_fill(wbinvd, WBINVD); |
764 | /* paravirt_ops.read_msr = vmi_rdmsr */ | |
765 | /* paravirt_ops.write_msr = vmi_wrmsr */ | |
766 | para_fill(read_tsc, RDTSC); | |
767 | /* paravirt_ops.rdpmc = vmi_rdpmc */ | |
768 | ||
769 | /* TR interface doesn't pass TR value */ | |
770 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_SetTR); | |
771 | if (rel->type != VMI_RELOCATION_NONE) { | |
772 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); | |
773 | vmi_ops.set_tr = (void *)rel->eip; | |
774 | paravirt_ops.load_tr_desc = vmi_set_tr; | |
775 | } | |
776 | ||
777 | /* LDT is special, too */ | |
778 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_SetLDT); | |
779 | if (rel->type != VMI_RELOCATION_NONE) { | |
780 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); | |
781 | vmi_ops._set_ldt = (void *)rel->eip; | |
782 | paravirt_ops.set_ldt = vmi_set_ldt; | |
783 | } | |
784 | ||
785 | para_fill(load_gdt, SetGDT); | |
786 | para_fill(load_idt, SetIDT); | |
787 | para_fill(store_gdt, GetGDT); | |
788 | para_fill(store_idt, GetIDT); | |
789 | para_fill(store_tr, GetTR); | |
790 | paravirt_ops.load_tls = vmi_load_tls; | |
791 | para_fill(write_ldt_entry, WriteLDTEntry); | |
792 | para_fill(write_gdt_entry, WriteGDTEntry); | |
793 | para_fill(write_idt_entry, WriteIDTEntry); | |
794 | reloc = call_vrom_long_func(vmi_rom, get_reloc, | |
795 | VMI_CALL_UpdateKernelStack); | |
796 | if (rel->type != VMI_RELOCATION_NONE) { | |
797 | BUG_ON(rel->type != VMI_RELOCATION_CALL_REL); | |
798 | vmi_ops.set_kernel_stack = (void *)rel->eip; | |
799 | paravirt_ops.load_esp0 = vmi_load_esp0; | |
800 | } | |
801 | ||
802 | para_fill(set_iopl_mask, SetIOPLMask); | |
803 | paravirt_ops.io_delay = (void *)vmi_nop; | |
804 | if (!disable_nodelay) { | |
805 | paravirt_ops.const_udelay = (void *)vmi_nop; | |
806 | } | |
807 | ||
808 | para_fill(set_lazy_mode, SetLazyMode); | |
809 | ||
810 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_FlushTLB); | |
811 | if (rel->type != VMI_RELOCATION_NONE) { | |
812 | vmi_ops.flush_tlb = (void *)rel->eip; | |
813 | paravirt_ops.flush_tlb_user = vmi_flush_tlb_user; | |
814 | paravirt_ops.flush_tlb_kernel = vmi_flush_tlb_kernel; | |
815 | } | |
816 | para_fill(flush_tlb_single, InvalPage); | |
817 | ||
818 | /* | |
819 | * Until a standard flag format can be agreed on, we need to | |
820 | * implement these as wrappers in Linux. Get the VMI ROM | |
821 | * function pointers for the two backend calls. | |
822 | */ | |
823 | #ifdef CONFIG_X86_PAE | |
824 | vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong); | |
825 | vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong); | |
826 | #else | |
827 | vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE); | |
828 | vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE); | |
829 | #endif | |
830 | vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); | |
831 | vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); | |
832 | vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); | |
833 | ||
9a1c13e9 | 834 | paravirt_ops.map_pt_hook = vmi_map_pt_hook; |
7ce0bcfd ZA |
835 | paravirt_ops.alloc_pt = vmi_allocate_pt; |
836 | paravirt_ops.alloc_pd = vmi_allocate_pd; | |
837 | paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone; | |
838 | paravirt_ops.release_pt = vmi_release_pt; | |
839 | paravirt_ops.release_pd = vmi_release_pd; | |
840 | paravirt_ops.set_pte = vmi_set_pte; | |
841 | paravirt_ops.set_pte_at = vmi_set_pte_at; | |
842 | paravirt_ops.set_pmd = vmi_set_pmd; | |
843 | paravirt_ops.pte_update = vmi_update_pte; | |
844 | paravirt_ops.pte_update_defer = vmi_update_pte_defer; | |
845 | #ifdef CONFIG_X86_PAE | |
846 | paravirt_ops.set_pte_atomic = vmi_set_pte_atomic; | |
847 | paravirt_ops.set_pte_present = vmi_set_pte_present; | |
848 | paravirt_ops.set_pud = vmi_set_pud; | |
849 | paravirt_ops.pte_clear = vmi_pte_clear; | |
850 | paravirt_ops.pmd_clear = vmi_pmd_clear; | |
851 | #endif | |
852 | /* | |
853 | * These MUST always be patched. Don't support indirect jumps | |
854 | * through these operations, as the VMI interface may use either | |
855 | * a jump or a call to get to these operations, depending on | |
856 | * the backend. They are performance critical anyway, so requiring | |
857 | * a patch is not a big problem. | |
858 | */ | |
859 | paravirt_ops.irq_enable_sysexit = (void *)0xfeedbab0; | |
860 | paravirt_ops.iret = (void *)0xbadbab0; | |
861 | ||
862 | #ifdef CONFIG_SMP | |
863 | paravirt_ops.startup_ipi_hook = vmi_startup_ipi_hook; | |
864 | vmi_ops.set_initial_ap_state = vmi_get_function(VMI_CALL_SetInitialAPState); | |
865 | #endif | |
866 | ||
867 | #ifdef CONFIG_X86_LOCAL_APIC | |
868 | paravirt_ops.apic_read = vmi_get_function(VMI_CALL_APICRead); | |
869 | paravirt_ops.apic_write = vmi_get_function(VMI_CALL_APICWrite); | |
870 | paravirt_ops.apic_write_atomic = vmi_get_function(VMI_CALL_APICWrite); | |
871 | #endif | |
872 | ||
bbab4f3b ZA |
873 | /* |
874 | * Check for VMI timer functionality by probing for a cycle frequency method | |
875 | */ | |
876 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency); | |
877 | if (rel->type != VMI_RELOCATION_NONE) { | |
878 | vmi_timer_ops.get_cycle_frequency = (void *)rel->eip; | |
879 | vmi_timer_ops.get_cycle_counter = | |
880 | vmi_get_function(VMI_CALL_GetCycleCounter); | |
881 | vmi_timer_ops.get_wallclock = | |
882 | vmi_get_function(VMI_CALL_GetWallclockTime); | |
883 | vmi_timer_ops.wallclock_updated = | |
884 | vmi_get_function(VMI_CALL_WallclockUpdated); | |
885 | vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm); | |
886 | vmi_timer_ops.cancel_alarm = | |
887 | vmi_get_function(VMI_CALL_CancelAlarm); | |
888 | paravirt_ops.time_init = vmi_time_init; | |
889 | paravirt_ops.get_wallclock = vmi_get_wallclock; | |
890 | paravirt_ops.set_wallclock = vmi_set_wallclock; | |
891 | #ifdef CONFIG_X86_LOCAL_APIC | |
892 | paravirt_ops.setup_boot_clock = vmi_timer_setup_boot_alarm; | |
893 | paravirt_ops.setup_secondary_clock = vmi_timer_setup_secondary_alarm; | |
894 | #endif | |
6cb9a835 | 895 | paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles; |
1182d852 | 896 | paravirt_ops.get_cpu_khz = vmi_cpu_khz; |
bbab4f3b | 897 | } |
7507ba34 ZA |
898 | if (!disable_noidle) |
899 | para_fill(safe_halt, Halt); | |
900 | else { | |
901 | vmi_ops.halt = vmi_get_function(VMI_CALL_Halt); | |
902 | paravirt_ops.safe_halt = vmi_safe_halt; | |
903 | } | |
bbab4f3b | 904 | |
7ce0bcfd ZA |
905 | /* |
906 | * Alternative instruction rewriting doesn't happen soon enough | |
907 | * to convert VMI_IRET to a call instead of a jump; so we have | |
908 | * to do this before IRQs get reenabled. Fortunately, it is | |
909 | * idempotent. | |
910 | */ | |
911 | apply_paravirt(__start_parainstructions, __stop_parainstructions); | |
912 | ||
913 | vmi_bringup(); | |
914 | ||
915 | return 1; | |
916 | } | |
917 | ||
918 | #undef para_fill | |
919 | ||
920 | void __init vmi_init(void) | |
921 | { | |
922 | unsigned long flags; | |
923 | ||
924 | if (!vmi_rom) | |
925 | probe_vmi_rom(); | |
926 | else | |
927 | check_vmi_rom(vmi_rom); | |
928 | ||
929 | /* In case probing for or validating the ROM failed, basil */ | |
930 | if (!vmi_rom) | |
931 | return; | |
932 | ||
933 | reserve_top_address(-vmi_rom->virtual_top); | |
934 | ||
935 | local_irq_save(flags); | |
936 | activate_vmi(); | |
7507ba34 ZA |
937 | |
938 | #ifdef CONFIG_X86_IO_APIC | |
7ce0bcfd ZA |
939 | no_timer_check = 1; |
940 | #endif | |
7507ba34 | 941 | |
7ce0bcfd ZA |
942 | local_irq_restore(flags & X86_EFLAGS_IF); |
943 | } | |
944 | ||
945 | static int __init parse_vmi(char *arg) | |
946 | { | |
947 | if (!arg) | |
948 | return -EINVAL; | |
949 | ||
950 | if (!strcmp(arg, "disable_nodelay")) | |
951 | disable_nodelay = 1; | |
952 | else if (!strcmp(arg, "disable_pge")) { | |
953 | clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); | |
954 | disable_pge = 1; | |
955 | } else if (!strcmp(arg, "disable_pse")) { | |
956 | clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); | |
957 | disable_pse = 1; | |
958 | } else if (!strcmp(arg, "disable_sep")) { | |
959 | clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability); | |
960 | disable_sep = 1; | |
961 | } else if (!strcmp(arg, "disable_tsc")) { | |
962 | clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); | |
963 | disable_tsc = 1; | |
964 | } else if (!strcmp(arg, "disable_mtrr")) { | |
965 | clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability); | |
966 | disable_mtrr = 1; | |
7507ba34 ZA |
967 | } else if (!strcmp(arg, "disable_noidle")) |
968 | disable_noidle = 1; | |
7ce0bcfd ZA |
969 | return 0; |
970 | } | |
971 | ||
972 | early_param("vmi", parse_vmi); |