x86: change write_gdt_entry signature.
[deliverable/linux.git] / arch / x86 / kernel / vmi_32.c
CommitLineData
7ce0bcfd
ZA
1/*
2 * VMI specific paravirt-ops implementation
3 *
4 * Copyright (C) 2005, VMware, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
15 * details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 * Send feedback to zach@vmware.com
22 *
23 */
24
25#include <linux/module.h>
7ce0bcfd
ZA
26#include <linux/cpu.h>
27#include <linux/bootmem.h>
28#include <linux/mm.h>
eeef9c68 29#include <linux/highmem.h>
fa0aa866 30#include <linux/sched.h>
7ce0bcfd
ZA
31#include <asm/vmi.h>
32#include <asm/io.h>
33#include <asm/fixmap.h>
34#include <asm/apicdef.h>
35#include <asm/apic.h>
36#include <asm/processor.h>
37#include <asm/timer.h>
bbab4f3b 38#include <asm/vmi_time.h>
8f485612 39#include <asm/kmap_types.h>
7ce0bcfd
ZA
40
41/* Convenient for calling VMI functions indirectly in the ROM */
42typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
43typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
44
45#define call_vrom_func(rom,func) \
46 (((VROMFUNC *)(rom->func))())
47
48#define call_vrom_long_func(rom,func,arg) \
49 (((VROMLONGFUNC *)(rom->func)) (arg))
50
51static struct vrom_header *vmi_rom;
7ce0bcfd
ZA
52static int disable_pge;
53static int disable_pse;
54static int disable_sep;
55static int disable_tsc;
56static int disable_mtrr;
7507ba34 57static int disable_noidle;
772205f6 58static int disable_vmi_timer;
7ce0bcfd
ZA
59
60/* Cached VMI operations */
30a1528d 61static struct {
7ce0bcfd
ZA
62 void (*cpuid)(void /* non-c */);
63 void (*_set_ldt)(u32 selector);
64 void (*set_tr)(u32 selector);
8d947344 65 void (*write_idt_entry)(struct desc_struct *, int, u32, u32);
014b15be 66 void (*write_gdt_entry)(struct desc_struct *, int, u32, u32);
faca6227 67 void (*set_kernel_stack)(u32 selector, u32 sp0);
7ce0bcfd
ZA
68 void (*allocate_page)(u32, u32, u32, u32, u32);
69 void (*release_page)(u32, u32);
70 void (*set_pte)(pte_t, pte_t *, unsigned);
71 void (*update_pte)(pte_t *, unsigned);
eeef9c68
ZA
72 void (*set_linear_mapping)(int, void *, u32, u32);
73 void (*_flush_tlb)(int);
7ce0bcfd 74 void (*set_initial_ap_state)(int, int);
bbab4f3b 75 void (*halt)(void);
49f19710 76 void (*set_lazy_mode)(int mode);
7ce0bcfd
ZA
77} vmi_ops;
78
e0bb8643
ZA
79/* Cached VMI operations */
80struct vmi_timer_ops vmi_timer_ops;
81
7ce0bcfd
ZA
82/*
83 * VMI patching routines.
84 */
85#define MNEM_CALL 0xe8
86#define MNEM_JMP 0xe9
87#define MNEM_RET 0xc3
88
7ce0bcfd
ZA
89#define IRQ_PATCH_INT_MASK 0
90#define IRQ_PATCH_DISABLE 5
91
ab144f5e 92static inline void patch_offset(void *insnbuf,
65ea5b03 93 unsigned long ip, unsigned long dest)
7ce0bcfd 94{
65ea5b03 95 *(unsigned long *)(insnbuf+1) = dest-ip-5;
7ce0bcfd
ZA
96}
97
ab144f5e 98static unsigned patch_internal(int call, unsigned len, void *insnbuf,
65ea5b03 99 unsigned long ip)
7ce0bcfd
ZA
100{
101 u64 reloc;
102 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
103 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
104 switch(rel->type) {
105 case VMI_RELOCATION_CALL_REL:
106 BUG_ON(len < 5);
ab144f5e 107 *(char *)insnbuf = MNEM_CALL;
65ea5b03 108 patch_offset(insnbuf, ip, (unsigned long)rel->eip);
7ce0bcfd
ZA
109 return 5;
110
111 case VMI_RELOCATION_JUMP_REL:
112 BUG_ON(len < 5);
ab144f5e 113 *(char *)insnbuf = MNEM_JMP;
65ea5b03 114 patch_offset(insnbuf, ip, (unsigned long)rel->eip);
7ce0bcfd
ZA
115 return 5;
116
117 case VMI_RELOCATION_NOP:
118 /* obliterate the whole thing */
119 return 0;
120
121 case VMI_RELOCATION_NONE:
122 /* leave native code in place */
123 break;
124
125 default:
126 BUG();
127 }
128 return len;
129}
130
131/*
132 * Apply patch if appropriate, return length of new instruction
133 * sequence. The callee does nop padding for us.
134 */
ab144f5e 135static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
65ea5b03 136 unsigned long ip, unsigned len)
7ce0bcfd
ZA
137{
138 switch (type) {
93b1eab3 139 case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
ab144f5e 140 return patch_internal(VMI_CALL_DisableInterrupts, len,
65ea5b03 141 insns, ip);
93b1eab3 142 case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
ab144f5e 143 return patch_internal(VMI_CALL_EnableInterrupts, len,
65ea5b03 144 insns, ip);
93b1eab3 145 case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
ab144f5e 146 return patch_internal(VMI_CALL_SetInterruptMask, len,
65ea5b03 147 insns, ip);
93b1eab3 148 case PARAVIRT_PATCH(pv_irq_ops.save_fl):
ab144f5e 149 return patch_internal(VMI_CALL_GetInterruptMask, len,
65ea5b03 150 insns, ip);
93b1eab3 151 case PARAVIRT_PATCH(pv_cpu_ops.iret):
65ea5b03 152 return patch_internal(VMI_CALL_IRET, len, insns, ip);
6abcd98f 153 case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret):
65ea5b03 154 return patch_internal(VMI_CALL_SYSEXIT, len, insns, ip);
7ce0bcfd
ZA
155 default:
156 break;
157 }
158 return len;
159}
160
161/* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
65ea5b03
PA
162static void vmi_cpuid(unsigned int *ax, unsigned int *bx,
163 unsigned int *cx, unsigned int *dx)
7ce0bcfd
ZA
164{
165 int override = 0;
65ea5b03 166 if (*ax == 1)
7ce0bcfd
ZA
167 override = 1;
168 asm volatile ("call *%6"
65ea5b03
PA
169 : "=a" (*ax),
170 "=b" (*bx),
171 "=c" (*cx),
172 "=d" (*dx)
173 : "0" (*ax), "2" (*cx), "r" (vmi_ops.cpuid));
7ce0bcfd
ZA
174 if (override) {
175 if (disable_pse)
65ea5b03 176 *dx &= ~X86_FEATURE_PSE;
7ce0bcfd 177 if (disable_pge)
65ea5b03 178 *dx &= ~X86_FEATURE_PGE;
7ce0bcfd 179 if (disable_sep)
65ea5b03 180 *dx &= ~X86_FEATURE_SEP;
7ce0bcfd 181 if (disable_tsc)
65ea5b03 182 *dx &= ~X86_FEATURE_TSC;
7ce0bcfd 183 if (disable_mtrr)
65ea5b03 184 *dx &= ~X86_FEATURE_MTRR;
7ce0bcfd
ZA
185 }
186}
187
188static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
189{
190 if (gdt[nr].a != new->a || gdt[nr].b != new->b)
014b15be 191 write_gdt_entry(gdt, nr, new, 0);
7ce0bcfd
ZA
192}
193
194static void vmi_load_tls(struct thread_struct *t, unsigned int cpu)
195{
196 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
197 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0]);
198 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1]);
199 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2]);
200}
201
202static void vmi_set_ldt(const void *addr, unsigned entries)
203{
204 unsigned cpu = smp_processor_id();
014b15be 205 struct desc_struct desc;
7ce0bcfd 206
014b15be 207 pack_descriptor(&desc, (unsigned long)addr,
7ce0bcfd 208 entries * sizeof(struct desc_struct) - 1,
014b15be
GOC
209 DESC_LDT, 0);
210 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &desc, DESC_LDT);
7ce0bcfd
ZA
211 vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0);
212}
213
214static void vmi_set_tr(void)
215{
216 vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct));
217}
218
8d947344
GOC
219static void vmi_write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
220{
221 u32 *idt_entry = (u32 *)g;
222 vmi_ops.write_idt_entry(dt, entry, idt_entry[0], idt_entry[2]);
223}
224
014b15be
GOC
225static void vmi_write_gdt_entry(struct desc_struct *dt, int entry,
226 const void *desc, int type)
227{
228 u32 *gdt_entry = (u32 *)desc;
229 vmi_ops.write_gdt_entry(dt, entry, gdt_entry[0], gdt_entry[2]);
230}
231
faca6227 232static void vmi_load_sp0(struct tss_struct *tss,
7ce0bcfd
ZA
233 struct thread_struct *thread)
234{
faca6227 235 tss->x86_tss.sp0 = thread->sp0;
7ce0bcfd
ZA
236
237 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
a75c54f9
RR
238 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
239 tss->x86_tss.ss1 = thread->sysenter_cs;
7ce0bcfd
ZA
240 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
241 }
faca6227 242 vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.sp0);
7ce0bcfd
ZA
243}
244
245static void vmi_flush_tlb_user(void)
246{
eeef9c68 247 vmi_ops._flush_tlb(VMI_FLUSH_TLB);
7ce0bcfd
ZA
248}
249
250static void vmi_flush_tlb_kernel(void)
251{
eeef9c68 252 vmi_ops._flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL);
7ce0bcfd
ZA
253}
254
255/* Stub to do nothing at all; used for delays and unimplemented calls */
256static void vmi_nop(void)
257{
258}
259
7ce0bcfd
ZA
260#ifdef CONFIG_DEBUG_PAGE_TYPE
261
262#ifdef CONFIG_X86_PAE
263#define MAX_BOOT_PTS (2048+4+1)
264#else
265#define MAX_BOOT_PTS (1024+1)
266#endif
267
268/*
269 * During boot, mem_map is not yet available in paging_init, so stash
270 * all the boot page allocations here.
271 */
272static struct {
273 u32 pfn;
274 int type;
275} boot_page_allocations[MAX_BOOT_PTS];
276static int num_boot_page_allocations;
277static int boot_allocations_applied;
278
279void vmi_apply_boot_page_allocations(void)
280{
281 int i;
282 BUG_ON(!mem_map);
283 for (i = 0; i < num_boot_page_allocations; i++) {
284 struct page *page = pfn_to_page(boot_page_allocations[i].pfn);
285 page->type = boot_page_allocations[i].type;
286 page->type = boot_page_allocations[i].type &
287 ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE);
288 }
289 boot_allocations_applied = 1;
290}
291
292static void record_page_type(u32 pfn, int type)
293{
294 BUG_ON(num_boot_page_allocations >= MAX_BOOT_PTS);
295 boot_page_allocations[num_boot_page_allocations].pfn = pfn;
296 boot_page_allocations[num_boot_page_allocations].type = type;
297 num_boot_page_allocations++;
298}
299
300static void check_zeroed_page(u32 pfn, int type, struct page *page)
301{
302 u32 *ptr;
303 int i;
304 int limit = PAGE_SIZE / sizeof(int);
305
306 if (page_address(page))
307 ptr = (u32 *)page_address(page);
308 else
309 ptr = (u32 *)__va(pfn << PAGE_SHIFT);
310 /*
311 * When cloning the root in non-PAE mode, only the userspace
312 * pdes need to be zeroed.
313 */
314 if (type & VMI_PAGE_CLONE)
315 limit = USER_PTRS_PER_PGD;
316 for (i = 0; i < limit; i++)
317 BUG_ON(ptr[i]);
318}
319
320/*
321 * We stash the page type into struct page so we can verify the page
322 * types are used properly.
323 */
324static void vmi_set_page_type(u32 pfn, int type)
325{
326 /* PAE can have multiple roots per page - don't track */
327 if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP))
328 return;
329
330 if (boot_allocations_applied) {
331 struct page *page = pfn_to_page(pfn);
332 if (type != VMI_PAGE_NORMAL)
333 BUG_ON(page->type);
334 else
335 BUG_ON(page->type == VMI_PAGE_NORMAL);
336 page->type = type & ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE);
337 if (type & VMI_PAGE_ZEROED)
338 check_zeroed_page(pfn, type, page);
339 } else {
340 record_page_type(pfn, type);
341 }
342}
343
344static void vmi_check_page_type(u32 pfn, int type)
345{
346 /* PAE can have multiple roots per page - skip checks */
347 if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP))
348 return;
349
350 type &= ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE);
351 if (boot_allocations_applied) {
352 struct page *page = pfn_to_page(pfn);
353 BUG_ON((page->type ^ type) & VMI_PAGE_PAE);
354 BUG_ON(type == VMI_PAGE_NORMAL && page->type);
355 BUG_ON((type & page->type) == 0);
356 }
357}
358#else
359#define vmi_set_page_type(p,t) do { } while (0)
360#define vmi_check_page_type(p,t) do { } while (0)
361#endif
362
eeef9c68
ZA
363#ifdef CONFIG_HIGHPTE
364static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
9a1c13e9 365{
eeef9c68
ZA
366 void *va = kmap_atomic(page, type);
367
9a1c13e9
ZA
368 /*
369 * Internally, the VMI ROM must map virtual addresses to physical
370 * addresses for processing MMU updates. By the time MMU updates
371 * are issued, this information is typically already lost.
372 * Fortunately, the VMI provides a cache of mapping slots for active
373 * page tables.
374 *
375 * We use slot zero for the linear mapping of physical memory, and
376 * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
377 *
378 * args: SLOT VA COUNT PFN
379 */
380 BUG_ON(type != KM_PTE0 && type != KM_PTE1);
eeef9c68
ZA
381 vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page));
382
383 return va;
9a1c13e9 384}
eeef9c68 385#endif
9a1c13e9 386
fdb4c338 387static void vmi_allocate_pt(struct mm_struct *mm, u32 pfn)
7ce0bcfd
ZA
388{
389 vmi_set_page_type(pfn, VMI_PAGE_L1);
390 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
391}
392
393static void vmi_allocate_pd(u32 pfn)
394{
395 /*
396 * This call comes in very early, before mem_map is setup.
397 * It is called only for swapper_pg_dir, which already has
398 * data on it.
399 */
400 vmi_set_page_type(pfn, VMI_PAGE_L2);
401 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
402}
403
404static void vmi_allocate_pd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count)
405{
406 vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE);
407 vmi_check_page_type(clonepfn, VMI_PAGE_L2);
408 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
409}
410
411static void vmi_release_pt(u32 pfn)
412{
413 vmi_ops.release_page(pfn, VMI_PAGE_L1);
414 vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
415}
416
417static void vmi_release_pd(u32 pfn)
418{
419 vmi_ops.release_page(pfn, VMI_PAGE_L2);
420 vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
421}
422
423/*
424 * Helper macros for MMU update flags. We can defer updates until a flush
425 * or page invalidation only if the update is to the current address space
426 * (otherwise, there is no flush). We must check against init_mm, since
427 * this could be a kernel update, which usually passes init_mm, although
428 * sometimes this check can be skipped if we know the particular function
429 * is only called on user mode PTEs. We could change the kernel to pass
430 * current->active_mm here, but in particular, I was unsure if changing
431 * mm/highmem.c to do this would still be correct on other architectures.
432 */
433#define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \
434 (!mustbeuser && (mm) == &init_mm))
435#define vmi_flags_addr(mm, addr, level, user) \
436 ((level) | (is_current_as(mm, user) ? \
437 (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
438#define vmi_flags_addr_defer(mm, addr, level, user) \
439 ((level) | (is_current_as(mm, user) ? \
440 (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
441
3dc494e8 442static void vmi_update_pte(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
7ce0bcfd
ZA
443{
444 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
445 vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
446}
447
3dc494e8 448static void vmi_update_pte_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
7ce0bcfd
ZA
449{
450 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
451 vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0));
452}
453
454static void vmi_set_pte(pte_t *ptep, pte_t pte)
455{
456 /* XXX because of set_pmd_pte, this can be called on PT or PD layers */
457 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE | VMI_PAGE_PD);
458 vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT);
459}
460
3dc494e8 461static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
7ce0bcfd
ZA
462{
463 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
464 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
465}
466
467static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval)
468{
469#ifdef CONFIG_X86_PAE
470 const pte_t pte = { pmdval.pmd, pmdval.pmd >> 32 };
471 vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PMD);
472#else
473 const pte_t pte = { pmdval.pud.pgd.pgd };
474 vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PGD);
475#endif
476 vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD);
477}
478
479#ifdef CONFIG_X86_PAE
480
481static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval)
482{
483 /*
484 * XXX This is called from set_pmd_pte, but at both PT
485 * and PD layers so the VMI_PAGE_PT flag is wrong. But
486 * it is only called for large page mapping changes,
487 * the Xen backend, doesn't support large pages, and the
488 * ESX backend doesn't depend on the flag.
489 */
490 set_64bit((unsigned long long *)ptep,pte_val(pteval));
491 vmi_ops.update_pte(ptep, VMI_PAGE_PT);
492}
493
494static void vmi_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
495{
496 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
497 vmi_ops.set_pte(pte, ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 1));
498}
499
500static void vmi_set_pud(pud_t *pudp, pud_t pudval)
501{
502 /* Um, eww */
503 const pte_t pte = { pudval.pgd.pgd, pudval.pgd.pgd >> 32 };
504 vmi_check_page_type(__pa(pudp) >> PAGE_SHIFT, VMI_PAGE_PGD);
505 vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP);
506}
507
508static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
509{
510 const pte_t pte = { 0 };
511 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
512 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
513}
514
8eb68fae 515static void vmi_pmd_clear(pmd_t *pmd)
7ce0bcfd
ZA
516{
517 const pte_t pte = { 0 };
518 vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD);
519 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
520}
521#endif
522
523#ifdef CONFIG_SMP
c6b36e9a 524static void __devinit
7ce0bcfd
ZA
525vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
526 unsigned long start_esp)
527{
c6b36e9a
ZA
528 struct vmi_ap_state ap;
529
7ce0bcfd
ZA
530 /* Default everything to zero. This is fine for most GPRs. */
531 memset(&ap, 0, sizeof(struct vmi_ap_state));
532
533 ap.gdtr_limit = GDT_SIZE - 1;
534 ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid);
535
536 ap.idtr_limit = IDT_ENTRIES * 8 - 1;
537 ap.idtr_base = (unsigned long) idt_table;
538
539 ap.ldtr = 0;
540
541 ap.cs = __KERNEL_CS;
542 ap.eip = (unsigned long) start_eip;
543 ap.ss = __KERNEL_DS;
544 ap.esp = (unsigned long) start_esp;
545
546 ap.ds = __USER_DS;
547 ap.es = __USER_DS;
7c3576d2 548 ap.fs = __KERNEL_PERCPU;
7ce0bcfd
ZA
549 ap.gs = 0;
550
551 ap.eflags = 0;
552
7ce0bcfd
ZA
553#ifdef CONFIG_X86_PAE
554 /* efer should match BSP efer. */
555 if (cpu_has_nx) {
556 unsigned l, h;
557 rdmsr(MSR_EFER, l, h);
558 ap.efer = (unsigned long long) h << 32 | l;
559 }
560#endif
561
562 ap.cr3 = __pa(swapper_pg_dir);
563 /* Protected mode, paging, AM, WP, NE, MP. */
564 ap.cr0 = 0x80050023;
565 ap.cr4 = mmu_cr4_features;
c6b36e9a 566 vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid);
7ce0bcfd
ZA
567}
568#endif
569
8965c1c0 570static void vmi_enter_lazy_cpu(void)
49f19710 571{
8965c1c0
JF
572 paravirt_enter_lazy_cpu();
573 vmi_ops.set_lazy_mode(2);
574}
49f19710 575
8965c1c0
JF
576static void vmi_enter_lazy_mmu(void)
577{
578 paravirt_enter_lazy_mmu();
579 vmi_ops.set_lazy_mode(1);
580}
49f19710 581
8965c1c0
JF
582static void vmi_leave_lazy(void)
583{
584 paravirt_leave_lazy(paravirt_get_lazy_mode());
585 vmi_ops.set_lazy_mode(0);
49f19710
ZA
586}
587
7ce0bcfd
ZA
588static inline int __init check_vmi_rom(struct vrom_header *rom)
589{
590 struct pci_header *pci;
591 struct pnp_header *pnp;
592 const char *manufacturer = "UNKNOWN";
593 const char *product = "UNKNOWN";
594 const char *license = "unspecified";
595
596 if (rom->rom_signature != 0xaa55)
597 return 0;
598 if (rom->vrom_signature != VMI_SIGNATURE)
599 return 0;
600 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
601 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
602 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
603 rom->api_version_maj,
604 rom->api_version_min);
605 return 0;
606 }
607
608 /*
609 * Relying on the VMI_SIGNATURE field is not 100% safe, so check
610 * the PCI header and device type to make sure this is really a
611 * VMI device.
612 */
613 if (!rom->pci_header_offs) {
614 printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n");
615 return 0;
616 }
617
618 pci = (struct pci_header *)((char *)rom+rom->pci_header_offs);
619 if (pci->vendorID != PCI_VENDOR_ID_VMWARE ||
620 pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) {
621 /* Allow it to run... anyways, but warn */
622 printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n");
623 }
624
625 if (rom->pnp_header_offs) {
626 pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs);
627 if (pnp->manufacturer_offset)
628 manufacturer = (const char *)rom+pnp->manufacturer_offset;
629 if (pnp->product_offset)
630 product = (const char *)rom+pnp->product_offset;
631 }
632
633 if (rom->license_offs)
634 license = (char *)rom+rom->license_offs;
635
636 printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n",
637 manufacturer, product,
638 rom->api_version_maj, rom->api_version_min,
639 pci->rom_version_maj, pci->rom_version_min);
640
302cf930
AK
641 /* Don't allow BSD/MIT here for now because we don't want to end up
642 with any binary only shim layers */
643 if (strcmp(license, "GPL") && strcmp(license, "GPL v2")) {
644 printk(KERN_WARNING "VMI: Non GPL license `%s' found for ROM. Not used.\n",
645 license);
646 return 0;
647 }
648
7ce0bcfd
ZA
649 return 1;
650}
651
652/*
653 * Probe for the VMI option ROM
654 */
655static inline int __init probe_vmi_rom(void)
656{
657 unsigned long base;
658
659 /* VMI ROM is in option ROM area, check signature */
660 for (base = 0xC0000; base < 0xE0000; base += 2048) {
661 struct vrom_header *romstart;
662 romstart = (struct vrom_header *)isa_bus_to_virt(base);
663 if (check_vmi_rom(romstart)) {
664 vmi_rom = romstart;
665 return 1;
666 }
667 }
668 return 0;
669}
670
671/*
672 * VMI setup common to all processors
673 */
674void vmi_bringup(void)
675{
676 /* We must establish the lowmem mapping for MMU ops to work */
772205f6 677 if (vmi_ops.set_linear_mapping)
eeef9c68 678 vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0);
7ce0bcfd
ZA
679}
680
681/*
772205f6 682 * Return a pointer to a VMI function or NULL if unimplemented
7ce0bcfd
ZA
683 */
684static void *vmi_get_function(int vmicall)
685{
686 u64 reloc;
687 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
688 reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall);
689 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);
690 if (rel->type == VMI_RELOCATION_CALL_REL)
691 return (void *)rel->eip;
692 else
772205f6 693 return NULL;
7ce0bcfd
ZA
694}
695
696/*
697 * Helper macro for making the VMI paravirt-ops fill code readable.
772205f6
ZA
698 * For unimplemented operations, fall back to default, unless nop
699 * is returned by the ROM.
7ce0bcfd
ZA
700 */
701#define para_fill(opname, vmicall) \
702do { \
703 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
704 VMI_CALL_##vmicall); \
0492c371 705 if (rel->type == VMI_RELOCATION_CALL_REL) \
93b1eab3 706 opname = (void *)rel->eip; \
0492c371 707 else if (rel->type == VMI_RELOCATION_NOP) \
93b1eab3 708 opname = (void *)vmi_nop; \
0492c371
ZA
709 else if (rel->type != VMI_RELOCATION_NONE) \
710 printk(KERN_WARNING "VMI: Unknown relocation " \
711 "type %d for " #vmicall"\n",\
712 rel->type); \
772205f6
ZA
713} while (0)
714
715/*
716 * Helper macro for making the VMI paravirt-ops fill code readable.
717 * For cached operations which do not match the VMI ROM ABI and must
718 * go through a tranlation stub. Ignore NOPs, since it is not clear
719 * a NOP * VMI function corresponds to a NOP paravirt-op when the
720 * functions are not in 1-1 correspondence.
721 */
722#define para_wrap(opname, wrapper, cache, vmicall) \
723do { \
724 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
725 VMI_CALL_##vmicall); \
726 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \
727 if (rel->type == VMI_RELOCATION_CALL_REL) { \
93b1eab3 728 opname = wrapper; \
772205f6 729 vmi_ops.cache = (void *)rel->eip; \
7ce0bcfd
ZA
730 } \
731} while (0)
732
733/*
734 * Activate the VMI interface and switch into paravirtualized mode
735 */
736static inline int __init activate_vmi(void)
737{
738 short kernel_cs;
739 u64 reloc;
740 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
741
742 if (call_vrom_func(vmi_rom, vmi_init) != 0) {
743 printk(KERN_ERR "VMI ROM failed to initialize!");
744 return 0;
745 }
746 savesegment(cs, kernel_cs);
747
93b1eab3
JF
748 pv_info.paravirt_enabled = 1;
749 pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
750 pv_info.name = "vmi";
7ce0bcfd 751
93b1eab3 752 pv_init_ops.patch = vmi_patch;
7ce0bcfd
ZA
753
754 /*
755 * Many of these operations are ABI compatible with VMI.
756 * This means we can fill in the paravirt-ops with direct
757 * pointers into the VMI ROM. If the calling convention for
758 * these operations changes, this code needs to be updated.
759 *
760 * Exceptions
761 * CPUID paravirt-op uses pointers, not the native ISA
762 * halt has no VMI equivalent; all VMI halts are "safe"
763 * no MSR support yet - just trap and emulate. VMI uses the
764 * same ABI as the native ISA, but Linux wants exceptions
765 * from bogus MSR read / write handled
766 * rdpmc is not yet used in Linux
767 */
768
772205f6 769 /* CPUID is special, so very special it gets wrapped like a present */
93b1eab3
JF
770 para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID);
771
772 para_fill(pv_cpu_ops.clts, CLTS);
773 para_fill(pv_cpu_ops.get_debugreg, GetDR);
774 para_fill(pv_cpu_ops.set_debugreg, SetDR);
775 para_fill(pv_cpu_ops.read_cr0, GetCR0);
776 para_fill(pv_mmu_ops.read_cr2, GetCR2);
777 para_fill(pv_mmu_ops.read_cr3, GetCR3);
778 para_fill(pv_cpu_ops.read_cr4, GetCR4);
779 para_fill(pv_cpu_ops.write_cr0, SetCR0);
780 para_fill(pv_mmu_ops.write_cr2, SetCR2);
781 para_fill(pv_mmu_ops.write_cr3, SetCR3);
782 para_fill(pv_cpu_ops.write_cr4, SetCR4);
783 para_fill(pv_irq_ops.save_fl, GetInterruptMask);
784 para_fill(pv_irq_ops.restore_fl, SetInterruptMask);
785 para_fill(pv_irq_ops.irq_disable, DisableInterrupts);
786 para_fill(pv_irq_ops.irq_enable, EnableInterrupts);
787
788 para_fill(pv_cpu_ops.wbinvd, WBINVD);
789 para_fill(pv_cpu_ops.read_tsc, RDTSC);
772205f6
ZA
790
791 /* The following we emulate with trap and emulate for now */
7ce0bcfd
ZA
792 /* paravirt_ops.read_msr = vmi_rdmsr */
793 /* paravirt_ops.write_msr = vmi_wrmsr */
7ce0bcfd
ZA
794 /* paravirt_ops.rdpmc = vmi_rdpmc */
795
772205f6 796 /* TR interface doesn't pass TR value, wrap */
93b1eab3 797 para_wrap(pv_cpu_ops.load_tr_desc, vmi_set_tr, set_tr, SetTR);
7ce0bcfd
ZA
798
799 /* LDT is special, too */
93b1eab3
JF
800 para_wrap(pv_cpu_ops.set_ldt, vmi_set_ldt, _set_ldt, SetLDT);
801
802 para_fill(pv_cpu_ops.load_gdt, SetGDT);
803 para_fill(pv_cpu_ops.load_idt, SetIDT);
804 para_fill(pv_cpu_ops.store_gdt, GetGDT);
805 para_fill(pv_cpu_ops.store_idt, GetIDT);
806 para_fill(pv_cpu_ops.store_tr, GetTR);
807 pv_cpu_ops.load_tls = vmi_load_tls;
808 para_fill(pv_cpu_ops.write_ldt_entry, WriteLDTEntry);
014b15be
GOC
809 para_wrap(pv_cpu_ops.write_gdt_entry, vmi_write_gdt_entry,
810 write_gdt_entry, WriteGDTEntry);
8d947344
GOC
811 para_wrap(pv_cpu_ops.write_idt_entry, vmi_write_idt_entry,
812 write_idt_entry, WriteIDTEntry);
faca6227 813 para_wrap(pv_cpu_ops.load_sp0, vmi_load_sp0, set_kernel_stack, UpdateKernelStack);
93b1eab3
JF
814 para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
815 para_fill(pv_cpu_ops.io_delay, IODelay);
8965c1c0
JF
816
817 para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu,
818 set_lazy_mode, SetLazyMode);
819 para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy,
820 set_lazy_mode, SetLazyMode);
821
822 para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
823 set_lazy_mode, SetLazyMode);
824 para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy,
825 set_lazy_mode, SetLazyMode);
7ce0bcfd 826
772205f6 827 /* user and kernel flush are just handled with different flags to FlushTLB */
93b1eab3
JF
828 para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB);
829 para_wrap(pv_mmu_ops.flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB);
830 para_fill(pv_mmu_ops.flush_tlb_single, InvalPage);
7ce0bcfd
ZA
831
832 /*
833 * Until a standard flag format can be agreed on, we need to
834 * implement these as wrappers in Linux. Get the VMI ROM
835 * function pointers for the two backend calls.
836 */
837#ifdef CONFIG_X86_PAE
838 vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong);
839 vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong);
840#else
841 vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE);
842 vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE);
843#endif
7ce0bcfd 844
772205f6 845 if (vmi_ops.set_pte) {
93b1eab3
JF
846 pv_mmu_ops.set_pte = vmi_set_pte;
847 pv_mmu_ops.set_pte_at = vmi_set_pte_at;
848 pv_mmu_ops.set_pmd = vmi_set_pmd;
7ce0bcfd 849#ifdef CONFIG_X86_PAE
93b1eab3
JF
850 pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic;
851 pv_mmu_ops.set_pte_present = vmi_set_pte_present;
852 pv_mmu_ops.set_pud = vmi_set_pud;
853 pv_mmu_ops.pte_clear = vmi_pte_clear;
854 pv_mmu_ops.pmd_clear = vmi_pmd_clear;
7ce0bcfd 855#endif
772205f6
ZA
856 }
857
858 if (vmi_ops.update_pte) {
93b1eab3
JF
859 pv_mmu_ops.pte_update = vmi_update_pte;
860 pv_mmu_ops.pte_update_defer = vmi_update_pte_defer;
772205f6
ZA
861 }
862
863 vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
864 if (vmi_ops.allocate_page) {
93b1eab3
JF
865 pv_mmu_ops.alloc_pt = vmi_allocate_pt;
866 pv_mmu_ops.alloc_pd = vmi_allocate_pd;
867 pv_mmu_ops.alloc_pd_clone = vmi_allocate_pd_clone;
772205f6
ZA
868 }
869
870 vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
871 if (vmi_ops.release_page) {
93b1eab3
JF
872 pv_mmu_ops.release_pt = vmi_release_pt;
873 pv_mmu_ops.release_pd = vmi_release_pd;
772205f6 874 }
eeef9c68
ZA
875
876 /* Set linear is needed in all cases */
877 vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
878#ifdef CONFIG_HIGHPTE
879 if (vmi_ops.set_linear_mapping)
93b1eab3 880 pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
a27fe809 881#endif
772205f6 882
7ce0bcfd
ZA
883 /*
884 * These MUST always be patched. Don't support indirect jumps
885 * through these operations, as the VMI interface may use either
886 * a jump or a call to get to these operations, depending on
887 * the backend. They are performance critical anyway, so requiring
888 * a patch is not a big problem.
889 */
6abcd98f 890 pv_cpu_ops.irq_enable_syscall_ret = (void *)0xfeedbab0;
93b1eab3 891 pv_cpu_ops.iret = (void *)0xbadbab0;
7ce0bcfd
ZA
892
893#ifdef CONFIG_SMP
93b1eab3 894 para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState);
7ce0bcfd
ZA
895#endif
896
897#ifdef CONFIG_X86_LOCAL_APIC
93b1eab3
JF
898 para_fill(pv_apic_ops.apic_read, APICRead);
899 para_fill(pv_apic_ops.apic_write, APICWrite);
900 para_fill(pv_apic_ops.apic_write_atomic, APICWrite);
7ce0bcfd
ZA
901#endif
902
bbab4f3b
ZA
903 /*
904 * Check for VMI timer functionality by probing for a cycle frequency method
905 */
906 reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency);
772205f6 907 if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) {
bbab4f3b
ZA
908 vmi_timer_ops.get_cycle_frequency = (void *)rel->eip;
909 vmi_timer_ops.get_cycle_counter =
910 vmi_get_function(VMI_CALL_GetCycleCounter);
911 vmi_timer_ops.get_wallclock =
912 vmi_get_function(VMI_CALL_GetWallclockTime);
913 vmi_timer_ops.wallclock_updated =
914 vmi_get_function(VMI_CALL_WallclockUpdated);
915 vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
916 vmi_timer_ops.cancel_alarm =
917 vmi_get_function(VMI_CALL_CancelAlarm);
93b1eab3
JF
918 pv_time_ops.time_init = vmi_time_init;
919 pv_time_ops.get_wallclock = vmi_get_wallclock;
920 pv_time_ops.set_wallclock = vmi_set_wallclock;
bbab4f3b 921#ifdef CONFIG_X86_LOCAL_APIC
93b1eab3
JF
922 pv_apic_ops.setup_boot_clock = vmi_time_bsp_init;
923 pv_apic_ops.setup_secondary_clock = vmi_time_ap_init;
bbab4f3b 924#endif
93b1eab3
JF
925 pv_time_ops.sched_clock = vmi_sched_clock;
926 pv_time_ops.get_cpu_khz = vmi_cpu_khz;
772205f6
ZA
927
928 /* We have true wallclock functions; disable CMOS clock sync */
929 no_sync_cmos_clock = 1;
930 } else {
931 disable_noidle = 1;
932 disable_vmi_timer = 1;
bbab4f3b 933 }
772205f6 934
93b1eab3 935 para_fill(pv_irq_ops.safe_halt, Halt);
bbab4f3b 936
7ce0bcfd
ZA
937 /*
938 * Alternative instruction rewriting doesn't happen soon enough
939 * to convert VMI_IRET to a call instead of a jump; so we have
940 * to do this before IRQs get reenabled. Fortunately, it is
941 * idempotent.
942 */
441d40dc 943 apply_paravirt(__parainstructions, __parainstructions_end);
7ce0bcfd
ZA
944
945 vmi_bringup();
946
947 return 1;
948}
949
950#undef para_fill
951
952void __init vmi_init(void)
953{
954 unsigned long flags;
955
956 if (!vmi_rom)
957 probe_vmi_rom();
958 else
959 check_vmi_rom(vmi_rom);
960
961 /* In case probing for or validating the ROM failed, basil */
962 if (!vmi_rom)
963 return;
964
965 reserve_top_address(-vmi_rom->virtual_top);
966
967 local_irq_save(flags);
968 activate_vmi();
7507ba34
ZA
969
970#ifdef CONFIG_X86_IO_APIC
772205f6 971 /* This is virtual hardware; timer routing is wired correctly */
7ce0bcfd
ZA
972 no_timer_check = 1;
973#endif
974 local_irq_restore(flags & X86_EFLAGS_IF);
975}
976
977static int __init parse_vmi(char *arg)
978{
979 if (!arg)
980 return -EINVAL;
981
eda08b1b 982 if (!strcmp(arg, "disable_pge")) {
53756d37 983 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
7ce0bcfd
ZA
984 disable_pge = 1;
985 } else if (!strcmp(arg, "disable_pse")) {
53756d37 986 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE);
7ce0bcfd
ZA
987 disable_pse = 1;
988 } else if (!strcmp(arg, "disable_sep")) {
53756d37 989 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP);
7ce0bcfd
ZA
990 disable_sep = 1;
991 } else if (!strcmp(arg, "disable_tsc")) {
53756d37 992 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC);
7ce0bcfd
ZA
993 disable_tsc = 1;
994 } else if (!strcmp(arg, "disable_mtrr")) {
53756d37 995 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_MTRR);
7ce0bcfd 996 disable_mtrr = 1;
772205f6
ZA
997 } else if (!strcmp(arg, "disable_timer")) {
998 disable_vmi_timer = 1;
999 disable_noidle = 1;
7507ba34
ZA
1000 } else if (!strcmp(arg, "disable_noidle"))
1001 disable_noidle = 1;
7ce0bcfd
ZA
1002 return 0;
1003}
1004
1005early_param("vmi", parse_vmi);
This page took 0.193738 seconds and 5 git commands to generate.