Commit | Line | Data |
---|---|---|
7ce0bcfd ZA |
1 | /* |
2 | * VMI specific paravirt-ops implementation | |
3 | * | |
4 | * Copyright (C) 2005, VMware, Inc. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, but | |
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
14 | * NON INFRINGEMENT. See the GNU General Public License for more | |
15 | * details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
20 | * | |
21 | * Send feedback to zach@vmware.com | |
22 | * | |
23 | */ | |
24 | ||
25 | #include <linux/module.h> | |
7ce0bcfd ZA |
26 | #include <linux/cpu.h> |
27 | #include <linux/bootmem.h> | |
28 | #include <linux/mm.h> | |
eeef9c68 | 29 | #include <linux/highmem.h> |
fa0aa866 | 30 | #include <linux/sched.h> |
7ce0bcfd ZA |
31 | #include <asm/vmi.h> |
32 | #include <asm/io.h> | |
33 | #include <asm/fixmap.h> | |
34 | #include <asm/apicdef.h> | |
35 | #include <asm/apic.h> | |
36 | #include <asm/processor.h> | |
37 | #include <asm/timer.h> | |
bbab4f3b | 38 | #include <asm/vmi_time.h> |
8f485612 | 39 | #include <asm/kmap_types.h> |
31343d8a | 40 | #include <asm/setup.h> |
7ce0bcfd ZA |
41 | |
42 | /* Convenient for calling VMI functions indirectly in the ROM */ | |
43 | typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); | |
44 | typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int); | |
45 | ||
46 | #define call_vrom_func(rom,func) \ | |
47 | (((VROMFUNC *)(rom->func))()) | |
48 | ||
49 | #define call_vrom_long_func(rom,func,arg) \ | |
50 | (((VROMLONGFUNC *)(rom->func)) (arg)) | |
51 | ||
52 | static struct vrom_header *vmi_rom; | |
7ce0bcfd ZA |
53 | static int disable_pge; |
54 | static int disable_pse; | |
55 | static int disable_sep; | |
56 | static int disable_tsc; | |
57 | static int disable_mtrr; | |
7507ba34 | 58 | static int disable_noidle; |
772205f6 | 59 | static int disable_vmi_timer; |
7ce0bcfd ZA |
60 | |
61 | /* Cached VMI operations */ | |
30a1528d | 62 | static struct { |
7ce0bcfd ZA |
63 | void (*cpuid)(void /* non-c */); |
64 | void (*_set_ldt)(u32 selector); | |
65 | void (*set_tr)(u32 selector); | |
8d947344 | 66 | void (*write_idt_entry)(struct desc_struct *, int, u32, u32); |
014b15be | 67 | void (*write_gdt_entry)(struct desc_struct *, int, u32, u32); |
75b8bb3e | 68 | void (*write_ldt_entry)(struct desc_struct *, int, u32, u32); |
faca6227 | 69 | void (*set_kernel_stack)(u32 selector, u32 sp0); |
7ce0bcfd ZA |
70 | void (*allocate_page)(u32, u32, u32, u32, u32); |
71 | void (*release_page)(u32, u32); | |
72 | void (*set_pte)(pte_t, pte_t *, unsigned); | |
73 | void (*update_pte)(pte_t *, unsigned); | |
eeef9c68 ZA |
74 | void (*set_linear_mapping)(int, void *, u32, u32); |
75 | void (*_flush_tlb)(int); | |
7ce0bcfd | 76 | void (*set_initial_ap_state)(int, int); |
bbab4f3b | 77 | void (*halt)(void); |
49f19710 | 78 | void (*set_lazy_mode)(int mode); |
7ce0bcfd ZA |
79 | } vmi_ops; |
80 | ||
e0bb8643 ZA |
81 | /* Cached VMI operations */ |
82 | struct vmi_timer_ops vmi_timer_ops; | |
83 | ||
7ce0bcfd ZA |
84 | /* |
85 | * VMI patching routines. | |
86 | */ | |
87 | #define MNEM_CALL 0xe8 | |
88 | #define MNEM_JMP 0xe9 | |
89 | #define MNEM_RET 0xc3 | |
90 | ||
7ce0bcfd ZA |
91 | #define IRQ_PATCH_INT_MASK 0 |
92 | #define IRQ_PATCH_DISABLE 5 | |
93 | ||
ab144f5e | 94 | static inline void patch_offset(void *insnbuf, |
65ea5b03 | 95 | unsigned long ip, unsigned long dest) |
7ce0bcfd | 96 | { |
65ea5b03 | 97 | *(unsigned long *)(insnbuf+1) = dest-ip-5; |
7ce0bcfd ZA |
98 | } |
99 | ||
ab144f5e | 100 | static unsigned patch_internal(int call, unsigned len, void *insnbuf, |
65ea5b03 | 101 | unsigned long ip) |
7ce0bcfd ZA |
102 | { |
103 | u64 reloc; | |
104 | struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc; | |
105 | reloc = call_vrom_long_func(vmi_rom, get_reloc, call); | |
106 | switch(rel->type) { | |
107 | case VMI_RELOCATION_CALL_REL: | |
108 | BUG_ON(len < 5); | |
ab144f5e | 109 | *(char *)insnbuf = MNEM_CALL; |
65ea5b03 | 110 | patch_offset(insnbuf, ip, (unsigned long)rel->eip); |
7ce0bcfd ZA |
111 | return 5; |
112 | ||
113 | case VMI_RELOCATION_JUMP_REL: | |
114 | BUG_ON(len < 5); | |
ab144f5e | 115 | *(char *)insnbuf = MNEM_JMP; |
65ea5b03 | 116 | patch_offset(insnbuf, ip, (unsigned long)rel->eip); |
7ce0bcfd ZA |
117 | return 5; |
118 | ||
119 | case VMI_RELOCATION_NOP: | |
120 | /* obliterate the whole thing */ | |
121 | return 0; | |
122 | ||
123 | case VMI_RELOCATION_NONE: | |
124 | /* leave native code in place */ | |
125 | break; | |
126 | ||
127 | default: | |
128 | BUG(); | |
129 | } | |
130 | return len; | |
131 | } | |
132 | ||
133 | /* | |
134 | * Apply patch if appropriate, return length of new instruction | |
135 | * sequence. The callee does nop padding for us. | |
136 | */ | |
ab144f5e | 137 | static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, |
65ea5b03 | 138 | unsigned long ip, unsigned len) |
7ce0bcfd ZA |
139 | { |
140 | switch (type) { | |
93b1eab3 | 141 | case PARAVIRT_PATCH(pv_irq_ops.irq_disable): |
ab144f5e | 142 | return patch_internal(VMI_CALL_DisableInterrupts, len, |
65ea5b03 | 143 | insns, ip); |
93b1eab3 | 144 | case PARAVIRT_PATCH(pv_irq_ops.irq_enable): |
ab144f5e | 145 | return patch_internal(VMI_CALL_EnableInterrupts, len, |
65ea5b03 | 146 | insns, ip); |
93b1eab3 | 147 | case PARAVIRT_PATCH(pv_irq_ops.restore_fl): |
ab144f5e | 148 | return patch_internal(VMI_CALL_SetInterruptMask, len, |
65ea5b03 | 149 | insns, ip); |
93b1eab3 | 150 | case PARAVIRT_PATCH(pv_irq_ops.save_fl): |
ab144f5e | 151 | return patch_internal(VMI_CALL_GetInterruptMask, len, |
65ea5b03 | 152 | insns, ip); |
93b1eab3 | 153 | case PARAVIRT_PATCH(pv_cpu_ops.iret): |
65ea5b03 | 154 | return patch_internal(VMI_CALL_IRET, len, insns, ip); |
d75cd22f | 155 | case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit): |
65ea5b03 | 156 | return patch_internal(VMI_CALL_SYSEXIT, len, insns, ip); |
7ce0bcfd ZA |
157 | default: |
158 | break; | |
159 | } | |
160 | return len; | |
161 | } | |
162 | ||
163 | /* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */ | |
65ea5b03 PA |
164 | static void vmi_cpuid(unsigned int *ax, unsigned int *bx, |
165 | unsigned int *cx, unsigned int *dx) | |
7ce0bcfd ZA |
166 | { |
167 | int override = 0; | |
65ea5b03 | 168 | if (*ax == 1) |
7ce0bcfd ZA |
169 | override = 1; |
170 | asm volatile ("call *%6" | |
65ea5b03 PA |
171 | : "=a" (*ax), |
172 | "=b" (*bx), | |
173 | "=c" (*cx), | |
174 | "=d" (*dx) | |
175 | : "0" (*ax), "2" (*cx), "r" (vmi_ops.cpuid)); | |
7ce0bcfd ZA |
176 | if (override) { |
177 | if (disable_pse) | |
65ea5b03 | 178 | *dx &= ~X86_FEATURE_PSE; |
7ce0bcfd | 179 | if (disable_pge) |
65ea5b03 | 180 | *dx &= ~X86_FEATURE_PGE; |
7ce0bcfd | 181 | if (disable_sep) |
65ea5b03 | 182 | *dx &= ~X86_FEATURE_SEP; |
7ce0bcfd | 183 | if (disable_tsc) |
65ea5b03 | 184 | *dx &= ~X86_FEATURE_TSC; |
7ce0bcfd | 185 | if (disable_mtrr) |
65ea5b03 | 186 | *dx &= ~X86_FEATURE_MTRR; |
7ce0bcfd ZA |
187 | } |
188 | } | |
189 | ||
190 | static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new) | |
191 | { | |
192 | if (gdt[nr].a != new->a || gdt[nr].b != new->b) | |
014b15be | 193 | write_gdt_entry(gdt, nr, new, 0); |
7ce0bcfd ZA |
194 | } |
195 | ||
196 | static void vmi_load_tls(struct thread_struct *t, unsigned int cpu) | |
197 | { | |
198 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | |
199 | vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0]); | |
200 | vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1]); | |
201 | vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2]); | |
202 | } | |
203 | ||
204 | static void vmi_set_ldt(const void *addr, unsigned entries) | |
205 | { | |
206 | unsigned cpu = smp_processor_id(); | |
014b15be | 207 | struct desc_struct desc; |
7ce0bcfd | 208 | |
014b15be | 209 | pack_descriptor(&desc, (unsigned long)addr, |
7ce0bcfd | 210 | entries * sizeof(struct desc_struct) - 1, |
014b15be GOC |
211 | DESC_LDT, 0); |
212 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &desc, DESC_LDT); | |
7ce0bcfd ZA |
213 | vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0); |
214 | } | |
215 | ||
216 | static void vmi_set_tr(void) | |
217 | { | |
218 | vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct)); | |
219 | } | |
220 | ||
8d947344 GOC |
221 | static void vmi_write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) |
222 | { | |
223 | u32 *idt_entry = (u32 *)g; | |
262d5ee2 | 224 | vmi_ops.write_idt_entry(dt, entry, idt_entry[0], idt_entry[1]); |
8d947344 GOC |
225 | } |
226 | ||
014b15be GOC |
227 | static void vmi_write_gdt_entry(struct desc_struct *dt, int entry, |
228 | const void *desc, int type) | |
229 | { | |
230 | u32 *gdt_entry = (u32 *)desc; | |
262d5ee2 | 231 | vmi_ops.write_gdt_entry(dt, entry, gdt_entry[0], gdt_entry[1]); |
014b15be GOC |
232 | } |
233 | ||
75b8bb3e GOC |
234 | static void vmi_write_ldt_entry(struct desc_struct *dt, int entry, |
235 | const void *desc) | |
236 | { | |
237 | u32 *ldt_entry = (u32 *)desc; | |
de59985e | 238 | vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]); |
75b8bb3e GOC |
239 | } |
240 | ||
faca6227 | 241 | static void vmi_load_sp0(struct tss_struct *tss, |
7ce0bcfd ZA |
242 | struct thread_struct *thread) |
243 | { | |
faca6227 | 244 | tss->x86_tss.sp0 = thread->sp0; |
7ce0bcfd ZA |
245 | |
246 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ | |
a75c54f9 RR |
247 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { |
248 | tss->x86_tss.ss1 = thread->sysenter_cs; | |
7ce0bcfd ZA |
249 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); |
250 | } | |
faca6227 | 251 | vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.sp0); |
7ce0bcfd ZA |
252 | } |
253 | ||
254 | static void vmi_flush_tlb_user(void) | |
255 | { | |
eeef9c68 | 256 | vmi_ops._flush_tlb(VMI_FLUSH_TLB); |
7ce0bcfd ZA |
257 | } |
258 | ||
259 | static void vmi_flush_tlb_kernel(void) | |
260 | { | |
eeef9c68 | 261 | vmi_ops._flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL); |
7ce0bcfd ZA |
262 | } |
263 | ||
264 | /* Stub to do nothing at all; used for delays and unimplemented calls */ | |
265 | static void vmi_nop(void) | |
266 | { | |
267 | } | |
268 | ||
7ce0bcfd ZA |
269 | #ifdef CONFIG_DEBUG_PAGE_TYPE |
270 | ||
271 | #ifdef CONFIG_X86_PAE | |
272 | #define MAX_BOOT_PTS (2048+4+1) | |
273 | #else | |
274 | #define MAX_BOOT_PTS (1024+1) | |
275 | #endif | |
276 | ||
277 | /* | |
278 | * During boot, mem_map is not yet available in paging_init, so stash | |
279 | * all the boot page allocations here. | |
280 | */ | |
281 | static struct { | |
282 | u32 pfn; | |
283 | int type; | |
284 | } boot_page_allocations[MAX_BOOT_PTS]; | |
285 | static int num_boot_page_allocations; | |
286 | static int boot_allocations_applied; | |
287 | ||
288 | void vmi_apply_boot_page_allocations(void) | |
289 | { | |
290 | int i; | |
291 | BUG_ON(!mem_map); | |
292 | for (i = 0; i < num_boot_page_allocations; i++) { | |
293 | struct page *page = pfn_to_page(boot_page_allocations[i].pfn); | |
294 | page->type = boot_page_allocations[i].type; | |
295 | page->type = boot_page_allocations[i].type & | |
296 | ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); | |
297 | } | |
298 | boot_allocations_applied = 1; | |
299 | } | |
300 | ||
301 | static void record_page_type(u32 pfn, int type) | |
302 | { | |
303 | BUG_ON(num_boot_page_allocations >= MAX_BOOT_PTS); | |
304 | boot_page_allocations[num_boot_page_allocations].pfn = pfn; | |
305 | boot_page_allocations[num_boot_page_allocations].type = type; | |
306 | num_boot_page_allocations++; | |
307 | } | |
308 | ||
309 | static void check_zeroed_page(u32 pfn, int type, struct page *page) | |
310 | { | |
311 | u32 *ptr; | |
312 | int i; | |
313 | int limit = PAGE_SIZE / sizeof(int); | |
314 | ||
315 | if (page_address(page)) | |
316 | ptr = (u32 *)page_address(page); | |
317 | else | |
318 | ptr = (u32 *)__va(pfn << PAGE_SHIFT); | |
319 | /* | |
320 | * When cloning the root in non-PAE mode, only the userspace | |
321 | * pdes need to be zeroed. | |
322 | */ | |
323 | if (type & VMI_PAGE_CLONE) | |
68db065c | 324 | limit = KERNEL_PGD_BOUNDARY; |
7ce0bcfd ZA |
325 | for (i = 0; i < limit; i++) |
326 | BUG_ON(ptr[i]); | |
327 | } | |
328 | ||
329 | /* | |
330 | * We stash the page type into struct page so we can verify the page | |
331 | * types are used properly. | |
332 | */ | |
333 | static void vmi_set_page_type(u32 pfn, int type) | |
334 | { | |
335 | /* PAE can have multiple roots per page - don't track */ | |
336 | if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP)) | |
337 | return; | |
338 | ||
339 | if (boot_allocations_applied) { | |
340 | struct page *page = pfn_to_page(pfn); | |
341 | if (type != VMI_PAGE_NORMAL) | |
342 | BUG_ON(page->type); | |
343 | else | |
344 | BUG_ON(page->type == VMI_PAGE_NORMAL); | |
345 | page->type = type & ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); | |
346 | if (type & VMI_PAGE_ZEROED) | |
347 | check_zeroed_page(pfn, type, page); | |
348 | } else { | |
349 | record_page_type(pfn, type); | |
350 | } | |
351 | } | |
352 | ||
353 | static void vmi_check_page_type(u32 pfn, int type) | |
354 | { | |
355 | /* PAE can have multiple roots per page - skip checks */ | |
356 | if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP)) | |
357 | return; | |
358 | ||
359 | type &= ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); | |
360 | if (boot_allocations_applied) { | |
361 | struct page *page = pfn_to_page(pfn); | |
362 | BUG_ON((page->type ^ type) & VMI_PAGE_PAE); | |
363 | BUG_ON(type == VMI_PAGE_NORMAL && page->type); | |
364 | BUG_ON((type & page->type) == 0); | |
365 | } | |
366 | } | |
367 | #else | |
368 | #define vmi_set_page_type(p,t) do { } while (0) | |
369 | #define vmi_check_page_type(p,t) do { } while (0) | |
370 | #endif | |
371 | ||
eeef9c68 ZA |
372 | #ifdef CONFIG_HIGHPTE |
373 | static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type) | |
9a1c13e9 | 374 | { |
eeef9c68 ZA |
375 | void *va = kmap_atomic(page, type); |
376 | ||
9a1c13e9 ZA |
377 | /* |
378 | * Internally, the VMI ROM must map virtual addresses to physical | |
379 | * addresses for processing MMU updates. By the time MMU updates | |
380 | * are issued, this information is typically already lost. | |
381 | * Fortunately, the VMI provides a cache of mapping slots for active | |
382 | * page tables. | |
383 | * | |
384 | * We use slot zero for the linear mapping of physical memory, and | |
385 | * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1. | |
386 | * | |
387 | * args: SLOT VA COUNT PFN | |
388 | */ | |
389 | BUG_ON(type != KM_PTE0 && type != KM_PTE1); | |
eeef9c68 ZA |
390 | vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page)); |
391 | ||
392 | return va; | |
9a1c13e9 | 393 | } |
eeef9c68 | 394 | #endif |
9a1c13e9 | 395 | |
f8639939 | 396 | static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn) |
7ce0bcfd ZA |
397 | { |
398 | vmi_set_page_type(pfn, VMI_PAGE_L1); | |
399 | vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); | |
400 | } | |
401 | ||
f8639939 | 402 | static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn) |
7ce0bcfd ZA |
403 | { |
404 | /* | |
405 | * This call comes in very early, before mem_map is setup. | |
406 | * It is called only for swapper_pg_dir, which already has | |
407 | * data on it. | |
408 | */ | |
409 | vmi_set_page_type(pfn, VMI_PAGE_L2); | |
410 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); | |
411 | } | |
412 | ||
f8639939 | 413 | static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count) |
7ce0bcfd ZA |
414 | { |
415 | vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE); | |
416 | vmi_check_page_type(clonepfn, VMI_PAGE_L2); | |
417 | vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); | |
418 | } | |
419 | ||
f8639939 | 420 | static void vmi_release_pte(unsigned long pfn) |
7ce0bcfd ZA |
421 | { |
422 | vmi_ops.release_page(pfn, VMI_PAGE_L1); | |
423 | vmi_set_page_type(pfn, VMI_PAGE_NORMAL); | |
424 | } | |
425 | ||
f8639939 | 426 | static void vmi_release_pmd(unsigned long pfn) |
7ce0bcfd ZA |
427 | { |
428 | vmi_ops.release_page(pfn, VMI_PAGE_L2); | |
429 | vmi_set_page_type(pfn, VMI_PAGE_NORMAL); | |
430 | } | |
431 | ||
432 | /* | |
433 | * Helper macros for MMU update flags. We can defer updates until a flush | |
434 | * or page invalidation only if the update is to the current address space | |
435 | * (otherwise, there is no flush). We must check against init_mm, since | |
436 | * this could be a kernel update, which usually passes init_mm, although | |
437 | * sometimes this check can be skipped if we know the particular function | |
438 | * is only called on user mode PTEs. We could change the kernel to pass | |
439 | * current->active_mm here, but in particular, I was unsure if changing | |
440 | * mm/highmem.c to do this would still be correct on other architectures. | |
441 | */ | |
442 | #define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \ | |
443 | (!mustbeuser && (mm) == &init_mm)) | |
444 | #define vmi_flags_addr(mm, addr, level, user) \ | |
445 | ((level) | (is_current_as(mm, user) ? \ | |
446 | (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0)) | |
447 | #define vmi_flags_addr_defer(mm, addr, level, user) \ | |
448 | ((level) | (is_current_as(mm, user) ? \ | |
449 | (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0)) | |
450 | ||
3dc494e8 | 451 | static void vmi_update_pte(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
7ce0bcfd ZA |
452 | { |
453 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); | |
454 | vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); | |
455 | } | |
456 | ||
3dc494e8 | 457 | static void vmi_update_pte_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
7ce0bcfd ZA |
458 | { |
459 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); | |
460 | vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0)); | |
461 | } | |
462 | ||
463 | static void vmi_set_pte(pte_t *ptep, pte_t pte) | |
464 | { | |
465 | /* XXX because of set_pmd_pte, this can be called on PT or PD layers */ | |
466 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE | VMI_PAGE_PD); | |
467 | vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT); | |
468 | } | |
469 | ||
3dc494e8 | 470 | static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) |
7ce0bcfd ZA |
471 | { |
472 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); | |
473 | vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); | |
474 | } | |
475 | ||
476 | static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval) | |
477 | { | |
478 | #ifdef CONFIG_X86_PAE | |
e3328701 | 479 | const pte_t pte = { .pte = pmdval.pmd }; |
7ce0bcfd ZA |
480 | vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PMD); |
481 | #else | |
482 | const pte_t pte = { pmdval.pud.pgd.pgd }; | |
483 | vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PGD); | |
484 | #endif | |
485 | vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD); | |
486 | } | |
487 | ||
488 | #ifdef CONFIG_X86_PAE | |
489 | ||
490 | static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval) | |
491 | { | |
492 | /* | |
493 | * XXX This is called from set_pmd_pte, but at both PT | |
494 | * and PD layers so the VMI_PAGE_PT flag is wrong. But | |
495 | * it is only called for large page mapping changes, | |
496 | * the Xen backend, doesn't support large pages, and the | |
497 | * ESX backend doesn't depend on the flag. | |
498 | */ | |
499 | set_64bit((unsigned long long *)ptep,pte_val(pteval)); | |
500 | vmi_ops.update_pte(ptep, VMI_PAGE_PT); | |
501 | } | |
502 | ||
503 | static void vmi_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) | |
504 | { | |
505 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); | |
506 | vmi_ops.set_pte(pte, ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 1)); | |
507 | } | |
508 | ||
509 | static void vmi_set_pud(pud_t *pudp, pud_t pudval) | |
510 | { | |
511 | /* Um, eww */ | |
e3328701 | 512 | const pte_t pte = { .pte = pudval.pgd.pgd }; |
7ce0bcfd ZA |
513 | vmi_check_page_type(__pa(pudp) >> PAGE_SHIFT, VMI_PAGE_PGD); |
514 | vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP); | |
515 | } | |
516 | ||
517 | static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
518 | { | |
e3328701 | 519 | const pte_t pte = { .pte = 0 }; |
7ce0bcfd ZA |
520 | vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); |
521 | vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); | |
522 | } | |
523 | ||
8eb68fae | 524 | static void vmi_pmd_clear(pmd_t *pmd) |
7ce0bcfd | 525 | { |
e3328701 | 526 | const pte_t pte = { .pte = 0 }; |
7ce0bcfd ZA |
527 | vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD); |
528 | vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD); | |
529 | } | |
530 | #endif | |
531 | ||
532 | #ifdef CONFIG_SMP | |
c6b36e9a | 533 | static void __devinit |
7ce0bcfd ZA |
534 | vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, |
535 | unsigned long start_esp) | |
536 | { | |
c6b36e9a ZA |
537 | struct vmi_ap_state ap; |
538 | ||
7ce0bcfd ZA |
539 | /* Default everything to zero. This is fine for most GPRs. */ |
540 | memset(&ap, 0, sizeof(struct vmi_ap_state)); | |
541 | ||
542 | ap.gdtr_limit = GDT_SIZE - 1; | |
543 | ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid); | |
544 | ||
545 | ap.idtr_limit = IDT_ENTRIES * 8 - 1; | |
546 | ap.idtr_base = (unsigned long) idt_table; | |
547 | ||
548 | ap.ldtr = 0; | |
549 | ||
550 | ap.cs = __KERNEL_CS; | |
551 | ap.eip = (unsigned long) start_eip; | |
552 | ap.ss = __KERNEL_DS; | |
553 | ap.esp = (unsigned long) start_esp; | |
554 | ||
555 | ap.ds = __USER_DS; | |
556 | ap.es = __USER_DS; | |
7c3576d2 | 557 | ap.fs = __KERNEL_PERCPU; |
7ce0bcfd ZA |
558 | ap.gs = 0; |
559 | ||
560 | ap.eflags = 0; | |
561 | ||
7ce0bcfd ZA |
562 | #ifdef CONFIG_X86_PAE |
563 | /* efer should match BSP efer. */ | |
564 | if (cpu_has_nx) { | |
565 | unsigned l, h; | |
566 | rdmsr(MSR_EFER, l, h); | |
567 | ap.efer = (unsigned long long) h << 32 | l; | |
568 | } | |
569 | #endif | |
570 | ||
571 | ap.cr3 = __pa(swapper_pg_dir); | |
572 | /* Protected mode, paging, AM, WP, NE, MP. */ | |
573 | ap.cr0 = 0x80050023; | |
574 | ap.cr4 = mmu_cr4_features; | |
c6b36e9a | 575 | vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid); |
7ce0bcfd ZA |
576 | } |
577 | #endif | |
578 | ||
8965c1c0 | 579 | static void vmi_enter_lazy_cpu(void) |
49f19710 | 580 | { |
8965c1c0 JF |
581 | paravirt_enter_lazy_cpu(); |
582 | vmi_ops.set_lazy_mode(2); | |
583 | } | |
49f19710 | 584 | |
8965c1c0 JF |
585 | static void vmi_enter_lazy_mmu(void) |
586 | { | |
587 | paravirt_enter_lazy_mmu(); | |
588 | vmi_ops.set_lazy_mode(1); | |
589 | } | |
49f19710 | 590 | |
8965c1c0 JF |
591 | static void vmi_leave_lazy(void) |
592 | { | |
593 | paravirt_leave_lazy(paravirt_get_lazy_mode()); | |
594 | vmi_ops.set_lazy_mode(0); | |
49f19710 ZA |
595 | } |
596 | ||
7ce0bcfd ZA |
597 | static inline int __init check_vmi_rom(struct vrom_header *rom) |
598 | { | |
599 | struct pci_header *pci; | |
600 | struct pnp_header *pnp; | |
601 | const char *manufacturer = "UNKNOWN"; | |
602 | const char *product = "UNKNOWN"; | |
603 | const char *license = "unspecified"; | |
604 | ||
605 | if (rom->rom_signature != 0xaa55) | |
606 | return 0; | |
607 | if (rom->vrom_signature != VMI_SIGNATURE) | |
608 | return 0; | |
609 | if (rom->api_version_maj != VMI_API_REV_MAJOR || | |
610 | rom->api_version_min+1 < VMI_API_REV_MINOR+1) { | |
611 | printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n", | |
612 | rom->api_version_maj, | |
613 | rom->api_version_min); | |
614 | return 0; | |
615 | } | |
616 | ||
617 | /* | |
618 | * Relying on the VMI_SIGNATURE field is not 100% safe, so check | |
619 | * the PCI header and device type to make sure this is really a | |
620 | * VMI device. | |
621 | */ | |
622 | if (!rom->pci_header_offs) { | |
623 | printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n"); | |
624 | return 0; | |
625 | } | |
626 | ||
627 | pci = (struct pci_header *)((char *)rom+rom->pci_header_offs); | |
628 | if (pci->vendorID != PCI_VENDOR_ID_VMWARE || | |
629 | pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) { | |
630 | /* Allow it to run... anyways, but warn */ | |
631 | printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n"); | |
632 | } | |
633 | ||
634 | if (rom->pnp_header_offs) { | |
635 | pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs); | |
636 | if (pnp->manufacturer_offset) | |
637 | manufacturer = (const char *)rom+pnp->manufacturer_offset; | |
638 | if (pnp->product_offset) | |
639 | product = (const char *)rom+pnp->product_offset; | |
640 | } | |
641 | ||
642 | if (rom->license_offs) | |
643 | license = (char *)rom+rom->license_offs; | |
644 | ||
645 | printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n", | |
646 | manufacturer, product, | |
647 | rom->api_version_maj, rom->api_version_min, | |
648 | pci->rom_version_maj, pci->rom_version_min); | |
649 | ||
302cf930 AK |
650 | /* Don't allow BSD/MIT here for now because we don't want to end up |
651 | with any binary only shim layers */ | |
652 | if (strcmp(license, "GPL") && strcmp(license, "GPL v2")) { | |
653 | printk(KERN_WARNING "VMI: Non GPL license `%s' found for ROM. Not used.\n", | |
654 | license); | |
655 | return 0; | |
656 | } | |
657 | ||
7ce0bcfd ZA |
658 | return 1; |
659 | } | |
660 | ||
661 | /* | |
662 | * Probe for the VMI option ROM | |
663 | */ | |
664 | static inline int __init probe_vmi_rom(void) | |
665 | { | |
666 | unsigned long base; | |
667 | ||
668 | /* VMI ROM is in option ROM area, check signature */ | |
669 | for (base = 0xC0000; base < 0xE0000; base += 2048) { | |
670 | struct vrom_header *romstart; | |
671 | romstart = (struct vrom_header *)isa_bus_to_virt(base); | |
672 | if (check_vmi_rom(romstart)) { | |
673 | vmi_rom = romstart; | |
674 | return 1; | |
675 | } | |
676 | } | |
677 | return 0; | |
678 | } | |
679 | ||
680 | /* | |
681 | * VMI setup common to all processors | |
682 | */ | |
683 | void vmi_bringup(void) | |
684 | { | |
685 | /* We must establish the lowmem mapping for MMU ops to work */ | |
772205f6 | 686 | if (vmi_ops.set_linear_mapping) |
31343d8a | 687 | vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0); |
7ce0bcfd ZA |
688 | } |
689 | ||
690 | /* | |
772205f6 | 691 | * Return a pointer to a VMI function or NULL if unimplemented |
7ce0bcfd ZA |
692 | */ |
693 | static void *vmi_get_function(int vmicall) | |
694 | { | |
695 | u64 reloc; | |
696 | const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; | |
697 | reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall); | |
698 | BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); | |
699 | if (rel->type == VMI_RELOCATION_CALL_REL) | |
700 | return (void *)rel->eip; | |
701 | else | |
772205f6 | 702 | return NULL; |
7ce0bcfd ZA |
703 | } |
704 | ||
705 | /* | |
706 | * Helper macro for making the VMI paravirt-ops fill code readable. | |
772205f6 ZA |
707 | * For unimplemented operations, fall back to default, unless nop |
708 | * is returned by the ROM. | |
7ce0bcfd ZA |
709 | */ |
710 | #define para_fill(opname, vmicall) \ | |
711 | do { \ | |
712 | reloc = call_vrom_long_func(vmi_rom, get_reloc, \ | |
713 | VMI_CALL_##vmicall); \ | |
0492c371 | 714 | if (rel->type == VMI_RELOCATION_CALL_REL) \ |
93b1eab3 | 715 | opname = (void *)rel->eip; \ |
0492c371 | 716 | else if (rel->type == VMI_RELOCATION_NOP) \ |
93b1eab3 | 717 | opname = (void *)vmi_nop; \ |
0492c371 ZA |
718 | else if (rel->type != VMI_RELOCATION_NONE) \ |
719 | printk(KERN_WARNING "VMI: Unknown relocation " \ | |
720 | "type %d for " #vmicall"\n",\ | |
721 | rel->type); \ | |
772205f6 ZA |
722 | } while (0) |
723 | ||
724 | /* | |
725 | * Helper macro for making the VMI paravirt-ops fill code readable. | |
726 | * For cached operations which do not match the VMI ROM ABI and must | |
727 | * go through a tranlation stub. Ignore NOPs, since it is not clear | |
728 | * a NOP * VMI function corresponds to a NOP paravirt-op when the | |
729 | * functions are not in 1-1 correspondence. | |
730 | */ | |
731 | #define para_wrap(opname, wrapper, cache, vmicall) \ | |
732 | do { \ | |
733 | reloc = call_vrom_long_func(vmi_rom, get_reloc, \ | |
734 | VMI_CALL_##vmicall); \ | |
735 | BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \ | |
736 | if (rel->type == VMI_RELOCATION_CALL_REL) { \ | |
93b1eab3 | 737 | opname = wrapper; \ |
772205f6 | 738 | vmi_ops.cache = (void *)rel->eip; \ |
7ce0bcfd ZA |
739 | } \ |
740 | } while (0) | |
741 | ||
742 | /* | |
743 | * Activate the VMI interface and switch into paravirtualized mode | |
744 | */ | |
745 | static inline int __init activate_vmi(void) | |
746 | { | |
747 | short kernel_cs; | |
748 | u64 reloc; | |
749 | const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; | |
750 | ||
751 | if (call_vrom_func(vmi_rom, vmi_init) != 0) { | |
752 | printk(KERN_ERR "VMI ROM failed to initialize!"); | |
753 | return 0; | |
754 | } | |
755 | savesegment(cs, kernel_cs); | |
756 | ||
93b1eab3 JF |
757 | pv_info.paravirt_enabled = 1; |
758 | pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; | |
759 | pv_info.name = "vmi"; | |
7ce0bcfd | 760 | |
93b1eab3 | 761 | pv_init_ops.patch = vmi_patch; |
7ce0bcfd ZA |
762 | |
763 | /* | |
764 | * Many of these operations are ABI compatible with VMI. | |
765 | * This means we can fill in the paravirt-ops with direct | |
766 | * pointers into the VMI ROM. If the calling convention for | |
767 | * these operations changes, this code needs to be updated. | |
768 | * | |
769 | * Exceptions | |
770 | * CPUID paravirt-op uses pointers, not the native ISA | |
771 | * halt has no VMI equivalent; all VMI halts are "safe" | |
772 | * no MSR support yet - just trap and emulate. VMI uses the | |
773 | * same ABI as the native ISA, but Linux wants exceptions | |
774 | * from bogus MSR read / write handled | |
775 | * rdpmc is not yet used in Linux | |
776 | */ | |
777 | ||
772205f6 | 778 | /* CPUID is special, so very special it gets wrapped like a present */ |
93b1eab3 JF |
779 | para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID); |
780 | ||
781 | para_fill(pv_cpu_ops.clts, CLTS); | |
782 | para_fill(pv_cpu_ops.get_debugreg, GetDR); | |
783 | para_fill(pv_cpu_ops.set_debugreg, SetDR); | |
784 | para_fill(pv_cpu_ops.read_cr0, GetCR0); | |
785 | para_fill(pv_mmu_ops.read_cr2, GetCR2); | |
786 | para_fill(pv_mmu_ops.read_cr3, GetCR3); | |
787 | para_fill(pv_cpu_ops.read_cr4, GetCR4); | |
788 | para_fill(pv_cpu_ops.write_cr0, SetCR0); | |
789 | para_fill(pv_mmu_ops.write_cr2, SetCR2); | |
790 | para_fill(pv_mmu_ops.write_cr3, SetCR3); | |
791 | para_fill(pv_cpu_ops.write_cr4, SetCR4); | |
792 | para_fill(pv_irq_ops.save_fl, GetInterruptMask); | |
793 | para_fill(pv_irq_ops.restore_fl, SetInterruptMask); | |
794 | para_fill(pv_irq_ops.irq_disable, DisableInterrupts); | |
795 | para_fill(pv_irq_ops.irq_enable, EnableInterrupts); | |
796 | ||
797 | para_fill(pv_cpu_ops.wbinvd, WBINVD); | |
798 | para_fill(pv_cpu_ops.read_tsc, RDTSC); | |
772205f6 ZA |
799 | |
800 | /* The following we emulate with trap and emulate for now */ | |
7ce0bcfd ZA |
801 | /* paravirt_ops.read_msr = vmi_rdmsr */ |
802 | /* paravirt_ops.write_msr = vmi_wrmsr */ | |
7ce0bcfd ZA |
803 | /* paravirt_ops.rdpmc = vmi_rdpmc */ |
804 | ||
772205f6 | 805 | /* TR interface doesn't pass TR value, wrap */ |
93b1eab3 | 806 | para_wrap(pv_cpu_ops.load_tr_desc, vmi_set_tr, set_tr, SetTR); |
7ce0bcfd ZA |
807 | |
808 | /* LDT is special, too */ | |
93b1eab3 JF |
809 | para_wrap(pv_cpu_ops.set_ldt, vmi_set_ldt, _set_ldt, SetLDT); |
810 | ||
811 | para_fill(pv_cpu_ops.load_gdt, SetGDT); | |
812 | para_fill(pv_cpu_ops.load_idt, SetIDT); | |
813 | para_fill(pv_cpu_ops.store_gdt, GetGDT); | |
814 | para_fill(pv_cpu_ops.store_idt, GetIDT); | |
815 | para_fill(pv_cpu_ops.store_tr, GetTR); | |
816 | pv_cpu_ops.load_tls = vmi_load_tls; | |
75b8bb3e GOC |
817 | para_wrap(pv_cpu_ops.write_ldt_entry, vmi_write_ldt_entry, |
818 | write_ldt_entry, WriteLDTEntry); | |
014b15be GOC |
819 | para_wrap(pv_cpu_ops.write_gdt_entry, vmi_write_gdt_entry, |
820 | write_gdt_entry, WriteGDTEntry); | |
8d947344 GOC |
821 | para_wrap(pv_cpu_ops.write_idt_entry, vmi_write_idt_entry, |
822 | write_idt_entry, WriteIDTEntry); | |
faca6227 | 823 | para_wrap(pv_cpu_ops.load_sp0, vmi_load_sp0, set_kernel_stack, UpdateKernelStack); |
93b1eab3 JF |
824 | para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask); |
825 | para_fill(pv_cpu_ops.io_delay, IODelay); | |
8965c1c0 JF |
826 | |
827 | para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu, | |
828 | set_lazy_mode, SetLazyMode); | |
829 | para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy, | |
830 | set_lazy_mode, SetLazyMode); | |
831 | ||
832 | para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu, | |
833 | set_lazy_mode, SetLazyMode); | |
834 | para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy, | |
835 | set_lazy_mode, SetLazyMode); | |
7ce0bcfd | 836 | |
772205f6 | 837 | /* user and kernel flush are just handled with different flags to FlushTLB */ |
93b1eab3 JF |
838 | para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB); |
839 | para_wrap(pv_mmu_ops.flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB); | |
840 | para_fill(pv_mmu_ops.flush_tlb_single, InvalPage); | |
7ce0bcfd ZA |
841 | |
842 | /* | |
843 | * Until a standard flag format can be agreed on, we need to | |
844 | * implement these as wrappers in Linux. Get the VMI ROM | |
845 | * function pointers for the two backend calls. | |
846 | */ | |
847 | #ifdef CONFIG_X86_PAE | |
848 | vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong); | |
849 | vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong); | |
850 | #else | |
851 | vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE); | |
852 | vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE); | |
853 | #endif | |
7ce0bcfd | 854 | |
772205f6 | 855 | if (vmi_ops.set_pte) { |
93b1eab3 JF |
856 | pv_mmu_ops.set_pte = vmi_set_pte; |
857 | pv_mmu_ops.set_pte_at = vmi_set_pte_at; | |
858 | pv_mmu_ops.set_pmd = vmi_set_pmd; | |
7ce0bcfd | 859 | #ifdef CONFIG_X86_PAE |
93b1eab3 JF |
860 | pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic; |
861 | pv_mmu_ops.set_pte_present = vmi_set_pte_present; | |
862 | pv_mmu_ops.set_pud = vmi_set_pud; | |
863 | pv_mmu_ops.pte_clear = vmi_pte_clear; | |
864 | pv_mmu_ops.pmd_clear = vmi_pmd_clear; | |
7ce0bcfd | 865 | #endif |
772205f6 ZA |
866 | } |
867 | ||
868 | if (vmi_ops.update_pte) { | |
93b1eab3 JF |
869 | pv_mmu_ops.pte_update = vmi_update_pte; |
870 | pv_mmu_ops.pte_update_defer = vmi_update_pte_defer; | |
772205f6 ZA |
871 | } |
872 | ||
873 | vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); | |
874 | if (vmi_ops.allocate_page) { | |
6944a9c8 JF |
875 | pv_mmu_ops.alloc_pte = vmi_allocate_pte; |
876 | pv_mmu_ops.alloc_pmd = vmi_allocate_pmd; | |
877 | pv_mmu_ops.alloc_pmd_clone = vmi_allocate_pmd_clone; | |
772205f6 ZA |
878 | } |
879 | ||
880 | vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); | |
881 | if (vmi_ops.release_page) { | |
6944a9c8 JF |
882 | pv_mmu_ops.release_pte = vmi_release_pte; |
883 | pv_mmu_ops.release_pmd = vmi_release_pmd; | |
772205f6 | 884 | } |
eeef9c68 ZA |
885 | |
886 | /* Set linear is needed in all cases */ | |
887 | vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); | |
888 | #ifdef CONFIG_HIGHPTE | |
889 | if (vmi_ops.set_linear_mapping) | |
93b1eab3 | 890 | pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte; |
a27fe809 | 891 | #endif |
772205f6 | 892 | |
7ce0bcfd ZA |
893 | /* |
894 | * These MUST always be patched. Don't support indirect jumps | |
895 | * through these operations, as the VMI interface may use either | |
896 | * a jump or a call to get to these operations, depending on | |
897 | * the backend. They are performance critical anyway, so requiring | |
898 | * a patch is not a big problem. | |
899 | */ | |
d75cd22f | 900 | pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0; |
93b1eab3 | 901 | pv_cpu_ops.iret = (void *)0xbadbab0; |
7ce0bcfd ZA |
902 | |
903 | #ifdef CONFIG_SMP | |
93b1eab3 | 904 | para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState); |
7ce0bcfd ZA |
905 | #endif |
906 | ||
907 | #ifdef CONFIG_X86_LOCAL_APIC | |
9a8f0e6b SS |
908 | para_fill(apic_ops->read, APICRead); |
909 | para_fill(apic_ops->write, APICWrite); | |
7ce0bcfd ZA |
910 | #endif |
911 | ||
bbab4f3b ZA |
912 | /* |
913 | * Check for VMI timer functionality by probing for a cycle frequency method | |
914 | */ | |
915 | reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency); | |
772205f6 | 916 | if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) { |
bbab4f3b ZA |
917 | vmi_timer_ops.get_cycle_frequency = (void *)rel->eip; |
918 | vmi_timer_ops.get_cycle_counter = | |
919 | vmi_get_function(VMI_CALL_GetCycleCounter); | |
920 | vmi_timer_ops.get_wallclock = | |
921 | vmi_get_function(VMI_CALL_GetWallclockTime); | |
922 | vmi_timer_ops.wallclock_updated = | |
923 | vmi_get_function(VMI_CALL_WallclockUpdated); | |
924 | vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm); | |
925 | vmi_timer_ops.cancel_alarm = | |
926 | vmi_get_function(VMI_CALL_CancelAlarm); | |
93b1eab3 JF |
927 | pv_time_ops.time_init = vmi_time_init; |
928 | pv_time_ops.get_wallclock = vmi_get_wallclock; | |
929 | pv_time_ops.set_wallclock = vmi_set_wallclock; | |
bbab4f3b | 930 | #ifdef CONFIG_X86_LOCAL_APIC |
93b1eab3 JF |
931 | pv_apic_ops.setup_boot_clock = vmi_time_bsp_init; |
932 | pv_apic_ops.setup_secondary_clock = vmi_time_ap_init; | |
bbab4f3b | 933 | #endif |
93b1eab3 | 934 | pv_time_ops.sched_clock = vmi_sched_clock; |
e93ef949 | 935 | pv_time_ops.get_tsc_khz = vmi_tsc_khz; |
772205f6 ZA |
936 | |
937 | /* We have true wallclock functions; disable CMOS clock sync */ | |
938 | no_sync_cmos_clock = 1; | |
939 | } else { | |
940 | disable_noidle = 1; | |
941 | disable_vmi_timer = 1; | |
bbab4f3b | 942 | } |
772205f6 | 943 | |
93b1eab3 | 944 | para_fill(pv_irq_ops.safe_halt, Halt); |
bbab4f3b | 945 | |
7ce0bcfd ZA |
946 | /* |
947 | * Alternative instruction rewriting doesn't happen soon enough | |
948 | * to convert VMI_IRET to a call instead of a jump; so we have | |
949 | * to do this before IRQs get reenabled. Fortunately, it is | |
950 | * idempotent. | |
951 | */ | |
441d40dc | 952 | apply_paravirt(__parainstructions, __parainstructions_end); |
7ce0bcfd ZA |
953 | |
954 | vmi_bringup(); | |
955 | ||
956 | return 1; | |
957 | } | |
958 | ||
959 | #undef para_fill | |
960 | ||
961 | void __init vmi_init(void) | |
962 | { | |
963 | unsigned long flags; | |
964 | ||
965 | if (!vmi_rom) | |
966 | probe_vmi_rom(); | |
967 | else | |
968 | check_vmi_rom(vmi_rom); | |
969 | ||
970 | /* In case probing for or validating the ROM failed, basil */ | |
971 | if (!vmi_rom) | |
972 | return; | |
973 | ||
974 | reserve_top_address(-vmi_rom->virtual_top); | |
975 | ||
976 | local_irq_save(flags); | |
977 | activate_vmi(); | |
7507ba34 ZA |
978 | |
979 | #ifdef CONFIG_X86_IO_APIC | |
772205f6 | 980 | /* This is virtual hardware; timer routing is wired correctly */ |
7ce0bcfd ZA |
981 | no_timer_check = 1; |
982 | #endif | |
983 | local_irq_restore(flags & X86_EFLAGS_IF); | |
984 | } | |
985 | ||
986 | static int __init parse_vmi(char *arg) | |
987 | { | |
988 | if (!arg) | |
989 | return -EINVAL; | |
990 | ||
eda08b1b | 991 | if (!strcmp(arg, "disable_pge")) { |
53756d37 | 992 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); |
7ce0bcfd ZA |
993 | disable_pge = 1; |
994 | } else if (!strcmp(arg, "disable_pse")) { | |
53756d37 | 995 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE); |
7ce0bcfd ZA |
996 | disable_pse = 1; |
997 | } else if (!strcmp(arg, "disable_sep")) { | |
53756d37 | 998 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP); |
7ce0bcfd ZA |
999 | disable_sep = 1; |
1000 | } else if (!strcmp(arg, "disable_tsc")) { | |
53756d37 | 1001 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC); |
7ce0bcfd ZA |
1002 | disable_tsc = 1; |
1003 | } else if (!strcmp(arg, "disable_mtrr")) { | |
53756d37 | 1004 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_MTRR); |
7ce0bcfd | 1005 | disable_mtrr = 1; |
772205f6 ZA |
1006 | } else if (!strcmp(arg, "disable_timer")) { |
1007 | disable_vmi_timer = 1; | |
1008 | disable_noidle = 1; | |
7507ba34 ZA |
1009 | } else if (!strcmp(arg, "disable_noidle")) |
1010 | disable_noidle = 1; | |
7ce0bcfd ZA |
1011 | return 0; |
1012 | } | |
1013 | ||
1014 | early_param("vmi", parse_vmi); |