Commit | Line | Data |
---|---|---|
5ead97c8 JF |
1 | /* |
2 | * Core of Xen paravirt_ops implementation. | |
3 | * | |
4 | * This file contains the xen_paravirt_ops structure itself, and the | |
5 | * implementations for: | |
6 | * - privileged instructions | |
7 | * - interrupt flags | |
8 | * - segment operations | |
9 | * - booting and setup | |
10 | * | |
11 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
12 | */ | |
13 | ||
14 | #include <linux/kernel.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/smp.h> | |
17 | #include <linux/preempt.h> | |
f120f13e | 18 | #include <linux/hardirq.h> |
5ead97c8 JF |
19 | #include <linux/percpu.h> |
20 | #include <linux/delay.h> | |
21 | #include <linux/start_kernel.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/bootmem.h> | |
24 | #include <linux/module.h> | |
f4f97b3e JF |
25 | #include <linux/mm.h> |
26 | #include <linux/page-flags.h> | |
27 | #include <linux/highmem.h> | |
f87e4cac | 28 | #include <linux/smp.h> |
5ead97c8 JF |
29 | |
30 | #include <xen/interface/xen.h> | |
31 | #include <xen/interface/physdev.h> | |
32 | #include <xen/interface/vcpu.h> | |
33 | #include <xen/features.h> | |
34 | #include <xen/page.h> | |
35 | ||
36 | #include <asm/paravirt.h> | |
37 | #include <asm/page.h> | |
38 | #include <asm/xen/hypercall.h> | |
39 | #include <asm/xen/hypervisor.h> | |
40 | #include <asm/fixmap.h> | |
41 | #include <asm/processor.h> | |
42 | #include <asm/setup.h> | |
43 | #include <asm/desc.h> | |
44 | #include <asm/pgtable.h> | |
f87e4cac | 45 | #include <asm/tlbflush.h> |
5ead97c8 JF |
46 | |
47 | #include "xen-ops.h" | |
3b827c1b | 48 | #include "mmu.h" |
5ead97c8 JF |
49 | #include "multicalls.h" |
50 | ||
51 | EXPORT_SYMBOL_GPL(hypercall_page); | |
52 | ||
53 | DEFINE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode); | |
54 | ||
55 | DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); | |
56 | DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); | |
57 | DEFINE_PER_CPU(unsigned long, xen_cr3); | |
58 | ||
59 | struct start_info *xen_start_info; | |
60 | EXPORT_SYMBOL_GPL(xen_start_info); | |
61 | ||
f87e4cac | 62 | void xen_vcpu_setup(int cpu) |
5ead97c8 JF |
63 | { |
64 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; | |
65 | } | |
66 | ||
67 | static void __init xen_banner(void) | |
68 | { | |
69 | printk(KERN_INFO "Booting paravirtualized kernel on %s\n", | |
70 | paravirt_ops.name); | |
71 | printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic); | |
72 | } | |
73 | ||
74 | static void xen_cpuid(unsigned int *eax, unsigned int *ebx, | |
75 | unsigned int *ecx, unsigned int *edx) | |
76 | { | |
77 | unsigned maskedx = ~0; | |
78 | ||
79 | /* | |
80 | * Mask out inconvenient features, to try and disable as many | |
81 | * unsupported kernel subsystems as possible. | |
82 | */ | |
83 | if (*eax == 1) | |
84 | maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */ | |
85 | (1 << X86_FEATURE_ACPI) | /* disable ACPI */ | |
86 | (1 << X86_FEATURE_ACC)); /* thermal monitoring */ | |
87 | ||
88 | asm(XEN_EMULATE_PREFIX "cpuid" | |
89 | : "=a" (*eax), | |
90 | "=b" (*ebx), | |
91 | "=c" (*ecx), | |
92 | "=d" (*edx) | |
93 | : "0" (*eax), "2" (*ecx)); | |
94 | *edx &= maskedx; | |
95 | } | |
96 | ||
97 | static void xen_set_debugreg(int reg, unsigned long val) | |
98 | { | |
99 | HYPERVISOR_set_debugreg(reg, val); | |
100 | } | |
101 | ||
102 | static unsigned long xen_get_debugreg(int reg) | |
103 | { | |
104 | return HYPERVISOR_get_debugreg(reg); | |
105 | } | |
106 | ||
107 | static unsigned long xen_save_fl(void) | |
108 | { | |
109 | struct vcpu_info *vcpu; | |
110 | unsigned long flags; | |
111 | ||
5ead97c8 | 112 | vcpu = x86_read_percpu(xen_vcpu); |
f120f13e | 113 | |
5ead97c8 JF |
114 | /* flag has opposite sense of mask */ |
115 | flags = !vcpu->evtchn_upcall_mask; | |
5ead97c8 JF |
116 | |
117 | /* convert to IF type flag | |
118 | -0 -> 0x00000000 | |
119 | -1 -> 0xffffffff | |
120 | */ | |
121 | return (-flags) & X86_EFLAGS_IF; | |
122 | } | |
123 | ||
124 | static void xen_restore_fl(unsigned long flags) | |
125 | { | |
126 | struct vcpu_info *vcpu; | |
127 | ||
5ead97c8 JF |
128 | /* convert from IF type flag */ |
129 | flags = !(flags & X86_EFLAGS_IF); | |
f120f13e JF |
130 | |
131 | /* There's a one instruction preempt window here. We need to | |
132 | make sure we're don't switch CPUs between getting the vcpu | |
133 | pointer and updating the mask. */ | |
134 | preempt_disable(); | |
5ead97c8 JF |
135 | vcpu = x86_read_percpu(xen_vcpu); |
136 | vcpu->evtchn_upcall_mask = flags; | |
f120f13e | 137 | preempt_enable_no_resched(); |
5ead97c8 | 138 | |
f120f13e JF |
139 | /* Doesn't matter if we get preempted here, because any |
140 | pending event will get dealt with anyway. */ | |
5ead97c8 | 141 | |
f120f13e JF |
142 | if (flags == 0) { |
143 | preempt_check_resched(); | |
144 | barrier(); /* unmask then check (avoid races) */ | |
5ead97c8 JF |
145 | if (unlikely(vcpu->evtchn_upcall_pending)) |
146 | force_evtchn_callback(); | |
f120f13e | 147 | } |
5ead97c8 JF |
148 | } |
149 | ||
150 | static void xen_irq_disable(void) | |
151 | { | |
f120f13e JF |
152 | /* There's a one instruction preempt window here. We need to |
153 | make sure we're don't switch CPUs between getting the vcpu | |
154 | pointer and updating the mask. */ | |
5ead97c8 | 155 | preempt_disable(); |
f120f13e | 156 | x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1; |
5ead97c8 JF |
157 | preempt_enable_no_resched(); |
158 | } | |
159 | ||
160 | static void xen_irq_enable(void) | |
161 | { | |
162 | struct vcpu_info *vcpu; | |
163 | ||
f120f13e JF |
164 | /* There's a one instruction preempt window here. We need to |
165 | make sure we're don't switch CPUs between getting the vcpu | |
166 | pointer and updating the mask. */ | |
5ead97c8 JF |
167 | preempt_disable(); |
168 | vcpu = x86_read_percpu(xen_vcpu); | |
169 | vcpu->evtchn_upcall_mask = 0; | |
f120f13e | 170 | preempt_enable_no_resched(); |
5ead97c8 | 171 | |
f120f13e JF |
172 | /* Doesn't matter if we get preempted here, because any |
173 | pending event will get dealt with anyway. */ | |
5ead97c8 | 174 | |
f120f13e | 175 | barrier(); /* unmask then check (avoid races) */ |
5ead97c8 JF |
176 | if (unlikely(vcpu->evtchn_upcall_pending)) |
177 | force_evtchn_callback(); | |
5ead97c8 JF |
178 | } |
179 | ||
180 | static void xen_safe_halt(void) | |
181 | { | |
182 | /* Blocking includes an implicit local_irq_enable(). */ | |
183 | if (HYPERVISOR_sched_op(SCHEDOP_block, 0) != 0) | |
184 | BUG(); | |
185 | } | |
186 | ||
187 | static void xen_halt(void) | |
188 | { | |
189 | if (irqs_disabled()) | |
190 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | |
191 | else | |
192 | xen_safe_halt(); | |
193 | } | |
194 | ||
195 | static void xen_set_lazy_mode(enum paravirt_lazy_mode mode) | |
196 | { | |
f120f13e JF |
197 | BUG_ON(preemptible()); |
198 | ||
5ead97c8 JF |
199 | switch (mode) { |
200 | case PARAVIRT_LAZY_NONE: | |
201 | BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE); | |
202 | break; | |
203 | ||
204 | case PARAVIRT_LAZY_MMU: | |
205 | case PARAVIRT_LAZY_CPU: | |
206 | BUG_ON(x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE); | |
207 | break; | |
208 | ||
209 | case PARAVIRT_LAZY_FLUSH: | |
210 | /* flush if necessary, but don't change state */ | |
211 | if (x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE) | |
212 | xen_mc_flush(); | |
213 | return; | |
214 | } | |
215 | ||
216 | xen_mc_flush(); | |
217 | x86_write_percpu(xen_lazy_mode, mode); | |
218 | } | |
219 | ||
220 | static unsigned long xen_store_tr(void) | |
221 | { | |
222 | return 0; | |
223 | } | |
224 | ||
225 | static void xen_set_ldt(const void *addr, unsigned entries) | |
226 | { | |
227 | unsigned long linear_addr = (unsigned long)addr; | |
228 | struct mmuext_op *op; | |
229 | struct multicall_space mcs = xen_mc_entry(sizeof(*op)); | |
230 | ||
231 | op = mcs.args; | |
232 | op->cmd = MMUEXT_SET_LDT; | |
233 | if (linear_addr) { | |
234 | /* ldt my be vmalloced, use arbitrary_virt_to_machine */ | |
235 | xmaddr_t maddr; | |
236 | maddr = arbitrary_virt_to_machine((unsigned long)addr); | |
237 | linear_addr = (unsigned long)maddr.maddr; | |
238 | } | |
239 | op->arg1.linear_addr = linear_addr; | |
240 | op->arg2.nr_ents = entries; | |
241 | ||
242 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
243 | ||
244 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
245 | } | |
246 | ||
247 | static void xen_load_gdt(const struct Xgt_desc_struct *dtr) | |
248 | { | |
249 | unsigned long *frames; | |
250 | unsigned long va = dtr->address; | |
251 | unsigned int size = dtr->size + 1; | |
252 | unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; | |
253 | int f; | |
254 | struct multicall_space mcs; | |
255 | ||
256 | /* A GDT can be up to 64k in size, which corresponds to 8192 | |
257 | 8-byte entries, or 16 4k pages.. */ | |
258 | ||
259 | BUG_ON(size > 65536); | |
260 | BUG_ON(va & ~PAGE_MASK); | |
261 | ||
262 | mcs = xen_mc_entry(sizeof(*frames) * pages); | |
263 | frames = mcs.args; | |
264 | ||
265 | for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { | |
266 | frames[f] = virt_to_mfn(va); | |
267 | make_lowmem_page_readonly((void *)va); | |
268 | } | |
269 | ||
270 | MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); | |
271 | ||
272 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
273 | } | |
274 | ||
275 | static void load_TLS_descriptor(struct thread_struct *t, | |
276 | unsigned int cpu, unsigned int i) | |
277 | { | |
278 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | |
279 | xmaddr_t maddr = virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); | |
280 | struct multicall_space mc = __xen_mc_entry(0); | |
281 | ||
282 | MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]); | |
283 | } | |
284 | ||
285 | static void xen_load_tls(struct thread_struct *t, unsigned int cpu) | |
286 | { | |
287 | xen_mc_batch(); | |
288 | ||
289 | load_TLS_descriptor(t, cpu, 0); | |
290 | load_TLS_descriptor(t, cpu, 1); | |
291 | load_TLS_descriptor(t, cpu, 2); | |
292 | ||
293 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
294 | } | |
295 | ||
296 | static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, | |
297 | u32 low, u32 high) | |
298 | { | |
299 | unsigned long lp = (unsigned long)&dt[entrynum]; | |
300 | xmaddr_t mach_lp = virt_to_machine(lp); | |
301 | u64 entry = (u64)high << 32 | low; | |
302 | ||
f120f13e JF |
303 | preempt_disable(); |
304 | ||
5ead97c8 JF |
305 | xen_mc_flush(); |
306 | if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry)) | |
307 | BUG(); | |
f120f13e JF |
308 | |
309 | preempt_enable(); | |
5ead97c8 JF |
310 | } |
311 | ||
312 | static int cvt_gate_to_trap(int vector, u32 low, u32 high, | |
313 | struct trap_info *info) | |
314 | { | |
315 | u8 type, dpl; | |
316 | ||
317 | type = (high >> 8) & 0x1f; | |
318 | dpl = (high >> 13) & 3; | |
319 | ||
320 | if (type != 0xf && type != 0xe) | |
321 | return 0; | |
322 | ||
323 | info->vector = vector; | |
324 | info->address = (high & 0xffff0000) | (low & 0x0000ffff); | |
325 | info->cs = low >> 16; | |
326 | info->flags = dpl; | |
327 | /* interrupt gates clear IF */ | |
328 | if (type == 0xe) | |
329 | info->flags |= 4; | |
330 | ||
331 | return 1; | |
332 | } | |
333 | ||
334 | /* Locations of each CPU's IDT */ | |
335 | static DEFINE_PER_CPU(struct Xgt_desc_struct, idt_desc); | |
336 | ||
337 | /* Set an IDT entry. If the entry is part of the current IDT, then | |
338 | also update Xen. */ | |
339 | static void xen_write_idt_entry(struct desc_struct *dt, int entrynum, | |
340 | u32 low, u32 high) | |
341 | { | |
5ead97c8 | 342 | unsigned long p = (unsigned long)&dt[entrynum]; |
f120f13e JF |
343 | unsigned long start, end; |
344 | ||
345 | preempt_disable(); | |
346 | ||
347 | start = __get_cpu_var(idt_desc).address; | |
348 | end = start + __get_cpu_var(idt_desc).size + 1; | |
5ead97c8 JF |
349 | |
350 | xen_mc_flush(); | |
351 | ||
352 | write_dt_entry(dt, entrynum, low, high); | |
353 | ||
354 | if (p >= start && (p + 8) <= end) { | |
355 | struct trap_info info[2]; | |
356 | ||
357 | info[1].address = 0; | |
358 | ||
359 | if (cvt_gate_to_trap(entrynum, low, high, &info[0])) | |
360 | if (HYPERVISOR_set_trap_table(info)) | |
361 | BUG(); | |
362 | } | |
f120f13e JF |
363 | |
364 | preempt_enable(); | |
5ead97c8 JF |
365 | } |
366 | ||
f87e4cac JF |
367 | static void xen_convert_trap_info(const struct Xgt_desc_struct *desc, |
368 | struct trap_info *traps) | |
5ead97c8 | 369 | { |
5ead97c8 JF |
370 | unsigned in, out, count; |
371 | ||
5ead97c8 JF |
372 | count = (desc->size+1) / 8; |
373 | BUG_ON(count > 256); | |
374 | ||
5ead97c8 JF |
375 | for (in = out = 0; in < count; in++) { |
376 | const u32 *entry = (u32 *)(desc->address + in * 8); | |
377 | ||
378 | if (cvt_gate_to_trap(in, entry[0], entry[1], &traps[out])) | |
379 | out++; | |
380 | } | |
381 | traps[out].address = 0; | |
f87e4cac JF |
382 | } |
383 | ||
384 | void xen_copy_trap_info(struct trap_info *traps) | |
385 | { | |
f120f13e | 386 | const struct Xgt_desc_struct *desc = &__get_cpu_var(idt_desc); |
f87e4cac JF |
387 | |
388 | xen_convert_trap_info(desc, traps); | |
f87e4cac JF |
389 | } |
390 | ||
391 | /* Load a new IDT into Xen. In principle this can be per-CPU, so we | |
392 | hold a spinlock to protect the static traps[] array (static because | |
393 | it avoids allocation, and saves stack space). */ | |
394 | static void xen_load_idt(const struct Xgt_desc_struct *desc) | |
395 | { | |
396 | static DEFINE_SPINLOCK(lock); | |
397 | static struct trap_info traps[257]; | |
f87e4cac JF |
398 | |
399 | spin_lock(&lock); | |
400 | ||
f120f13e JF |
401 | __get_cpu_var(idt_desc) = *desc; |
402 | ||
f87e4cac | 403 | xen_convert_trap_info(desc, traps); |
5ead97c8 JF |
404 | |
405 | xen_mc_flush(); | |
406 | if (HYPERVISOR_set_trap_table(traps)) | |
407 | BUG(); | |
408 | ||
409 | spin_unlock(&lock); | |
410 | } | |
411 | ||
412 | /* Write a GDT descriptor entry. Ignore LDT descriptors, since | |
413 | they're handled differently. */ | |
414 | static void xen_write_gdt_entry(struct desc_struct *dt, int entry, | |
415 | u32 low, u32 high) | |
416 | { | |
f120f13e JF |
417 | preempt_disable(); |
418 | ||
5ead97c8 JF |
419 | switch ((high >> 8) & 0xff) { |
420 | case DESCTYPE_LDT: | |
421 | case DESCTYPE_TSS: | |
422 | /* ignore */ | |
423 | break; | |
424 | ||
425 | default: { | |
426 | xmaddr_t maddr = virt_to_machine(&dt[entry]); | |
427 | u64 desc = (u64)high << 32 | low; | |
428 | ||
429 | xen_mc_flush(); | |
430 | if (HYPERVISOR_update_descriptor(maddr.maddr, desc)) | |
431 | BUG(); | |
432 | } | |
433 | ||
434 | } | |
f120f13e JF |
435 | |
436 | preempt_enable(); | |
5ead97c8 JF |
437 | } |
438 | ||
439 | static void xen_load_esp0(struct tss_struct *tss, | |
f120f13e | 440 | struct thread_struct *thread) |
5ead97c8 JF |
441 | { |
442 | struct multicall_space mcs = xen_mc_entry(0); | |
443 | MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0); | |
444 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
445 | } | |
446 | ||
447 | static void xen_set_iopl_mask(unsigned mask) | |
448 | { | |
449 | struct physdev_set_iopl set_iopl; | |
450 | ||
451 | /* Force the change at ring 0. */ | |
452 | set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3; | |
453 | HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); | |
454 | } | |
455 | ||
456 | static void xen_io_delay(void) | |
457 | { | |
458 | } | |
459 | ||
460 | #ifdef CONFIG_X86_LOCAL_APIC | |
461 | static unsigned long xen_apic_read(unsigned long reg) | |
462 | { | |
463 | return 0; | |
464 | } | |
f87e4cac JF |
465 | |
466 | static void xen_apic_write(unsigned long reg, unsigned long val) | |
467 | { | |
468 | /* Warn to see if there's any stray references */ | |
469 | WARN_ON(1); | |
470 | } | |
5ead97c8 JF |
471 | #endif |
472 | ||
473 | static void xen_flush_tlb(void) | |
474 | { | |
475 | struct mmuext_op op; | |
476 | ||
477 | op.cmd = MMUEXT_TLB_FLUSH_LOCAL; | |
478 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | |
479 | BUG(); | |
480 | } | |
481 | ||
482 | static void xen_flush_tlb_single(unsigned long addr) | |
483 | { | |
484 | struct mmuext_op op; | |
485 | ||
486 | op.cmd = MMUEXT_INVLPG_LOCAL; | |
487 | op.arg1.linear_addr = addr & PAGE_MASK; | |
488 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | |
489 | BUG(); | |
490 | } | |
491 | ||
f87e4cac JF |
492 | static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm, |
493 | unsigned long va) | |
494 | { | |
495 | struct mmuext_op op; | |
496 | cpumask_t cpumask = *cpus; | |
497 | ||
498 | /* | |
499 | * A couple of (to be removed) sanity checks: | |
500 | * | |
501 | * - current CPU must not be in mask | |
502 | * - mask must exist :) | |
503 | */ | |
504 | BUG_ON(cpus_empty(cpumask)); | |
505 | BUG_ON(cpu_isset(smp_processor_id(), cpumask)); | |
506 | BUG_ON(!mm); | |
507 | ||
508 | /* If a CPU which we ran on has gone down, OK. */ | |
509 | cpus_and(cpumask, cpumask, cpu_online_map); | |
510 | if (cpus_empty(cpumask)) | |
511 | return; | |
512 | ||
513 | if (va == TLB_FLUSH_ALL) { | |
514 | op.cmd = MMUEXT_TLB_FLUSH_MULTI; | |
515 | op.arg2.vcpumask = (void *)cpus; | |
516 | } else { | |
517 | op.cmd = MMUEXT_INVLPG_MULTI; | |
518 | op.arg1.linear_addr = va; | |
519 | op.arg2.vcpumask = (void *)cpus; | |
520 | } | |
521 | ||
522 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | |
523 | BUG(); | |
524 | } | |
525 | ||
5ead97c8 JF |
526 | static unsigned long xen_read_cr2(void) |
527 | { | |
528 | return x86_read_percpu(xen_vcpu)->arch.cr2; | |
529 | } | |
530 | ||
531 | static void xen_write_cr4(unsigned long cr4) | |
532 | { | |
533 | /* never allow TSC to be disabled */ | |
534 | native_write_cr4(cr4 & ~X86_CR4_TSD); | |
535 | } | |
536 | ||
5ead97c8 JF |
537 | static unsigned long xen_read_cr3(void) |
538 | { | |
539 | return x86_read_percpu(xen_cr3); | |
540 | } | |
541 | ||
542 | static void xen_write_cr3(unsigned long cr3) | |
543 | { | |
f120f13e JF |
544 | BUG_ON(preemptible()); |
545 | ||
5ead97c8 JF |
546 | if (cr3 == x86_read_percpu(xen_cr3)) { |
547 | /* just a simple tlb flush */ | |
548 | xen_flush_tlb(); | |
549 | return; | |
550 | } | |
551 | ||
552 | x86_write_percpu(xen_cr3, cr3); | |
553 | ||
554 | ||
555 | { | |
556 | struct mmuext_op *op; | |
557 | struct multicall_space mcs = xen_mc_entry(sizeof(*op)); | |
558 | unsigned long mfn = pfn_to_mfn(PFN_DOWN(cr3)); | |
559 | ||
560 | op = mcs.args; | |
561 | op->cmd = MMUEXT_NEW_BASEPTR; | |
562 | op->arg1.mfn = mfn; | |
563 | ||
564 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
565 | ||
566 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
567 | } | |
568 | } | |
569 | ||
f4f97b3e JF |
570 | /* Early in boot, while setting up the initial pagetable, assume |
571 | everything is pinned. */ | |
9a4029fd | 572 | static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn) |
5ead97c8 | 573 | { |
f4f97b3e | 574 | BUG_ON(mem_map); /* should only be used early */ |
5ead97c8 JF |
575 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); |
576 | } | |
577 | ||
f4f97b3e JF |
578 | /* This needs to make sure the new pte page is pinned iff its being |
579 | attached to a pinned pagetable. */ | |
580 | static void xen_alloc_pt(struct mm_struct *mm, u32 pfn) | |
5ead97c8 | 581 | { |
f4f97b3e | 582 | struct page *page = pfn_to_page(pfn); |
5ead97c8 | 583 | |
f4f97b3e JF |
584 | if (PagePinned(virt_to_page(mm->pgd))) { |
585 | SetPagePinned(page); | |
586 | ||
587 | if (!PageHighMem(page)) | |
588 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); | |
589 | else | |
590 | /* make sure there are no stray mappings of | |
591 | this page */ | |
592 | kmap_flush_unused(); | |
593 | } | |
5ead97c8 JF |
594 | } |
595 | ||
f4f97b3e | 596 | /* This should never happen until we're OK to use struct page */ |
5ead97c8 JF |
597 | static void xen_release_pt(u32 pfn) |
598 | { | |
f4f97b3e JF |
599 | struct page *page = pfn_to_page(pfn); |
600 | ||
601 | if (PagePinned(page)) { | |
602 | if (!PageHighMem(page)) | |
603 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | |
604 | } | |
5ead97c8 JF |
605 | } |
606 | ||
f4f97b3e JF |
607 | #ifdef CONFIG_HIGHPTE |
608 | static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) | |
5ead97c8 | 609 | { |
f4f97b3e JF |
610 | pgprot_t prot = PAGE_KERNEL; |
611 | ||
612 | if (PagePinned(page)) | |
613 | prot = PAGE_KERNEL_RO; | |
614 | ||
615 | if (0 && PageHighMem(page)) | |
616 | printk("mapping highpte %lx type %d prot %s\n", | |
617 | page_to_pfn(page), type, | |
618 | (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ"); | |
619 | ||
620 | return kmap_atomic_prot(page, type, prot); | |
5ead97c8 | 621 | } |
f4f97b3e | 622 | #endif |
5ead97c8 | 623 | |
9a4029fd JF |
624 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) |
625 | { | |
626 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | |
627 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | |
628 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | |
629 | pte_val_ma(pte)); | |
630 | ||
631 | return pte; | |
632 | } | |
633 | ||
634 | /* Init-time set_pte while constructing initial pagetables, which | |
635 | doesn't allow RO pagetable pages to be remapped RW */ | |
636 | static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) | |
637 | { | |
638 | pte = mask_rw_pte(ptep, pte); | |
639 | ||
640 | xen_set_pte(ptep, pte); | |
641 | } | |
642 | ||
5ead97c8 JF |
643 | static __init void xen_pagetable_setup_start(pgd_t *base) |
644 | { | |
645 | pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base; | |
646 | ||
9a4029fd JF |
647 | /* special set_pte for pagetable initialization */ |
648 | paravirt_ops.set_pte = xen_set_pte_init; | |
649 | ||
5ead97c8 JF |
650 | init_mm.pgd = base; |
651 | /* | |
652 | * copy top-level of Xen-supplied pagetable into place. For | |
653 | * !PAE we can use this as-is, but for PAE it is a stand-in | |
654 | * while we copy the pmd pages. | |
655 | */ | |
656 | memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | |
657 | ||
658 | if (PTRS_PER_PMD > 1) { | |
659 | int i; | |
660 | /* | |
661 | * For PAE, need to allocate new pmds, rather than | |
662 | * share Xen's, since Xen doesn't like pmd's being | |
663 | * shared between address spaces. | |
664 | */ | |
665 | for (i = 0; i < PTRS_PER_PGD; i++) { | |
666 | if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) { | |
667 | pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); | |
668 | ||
669 | memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]), | |
670 | PAGE_SIZE); | |
671 | ||
f4f97b3e | 672 | make_lowmem_page_readonly(pmd); |
5ead97c8 JF |
673 | |
674 | set_pgd(&base[i], __pgd(1 + __pa(pmd))); | |
675 | } else | |
676 | pgd_clear(&base[i]); | |
677 | } | |
678 | } | |
679 | ||
680 | /* make sure zero_page is mapped RO so we can use it in pagetables */ | |
681 | make_lowmem_page_readonly(empty_zero_page); | |
682 | make_lowmem_page_readonly(base); | |
683 | /* | |
684 | * Switch to new pagetable. This is done before | |
685 | * pagetable_init has done anything so that the new pages | |
686 | * added to the table can be prepared properly for Xen. | |
687 | */ | |
688 | xen_write_cr3(__pa(base)); | |
689 | } | |
690 | ||
691 | static __init void xen_pagetable_setup_done(pgd_t *base) | |
692 | { | |
f4f97b3e JF |
693 | /* This will work as long as patching hasn't happened yet |
694 | (which it hasn't) */ | |
695 | paravirt_ops.alloc_pt = xen_alloc_pt; | |
9a4029fd | 696 | paravirt_ops.set_pte = xen_set_pte; |
f4f97b3e | 697 | |
5ead97c8 JF |
698 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
699 | /* | |
700 | * Create a mapping for the shared info page. | |
701 | * Should be set_fixmap(), but shared_info is a machine | |
702 | * address with no corresponding pseudo-phys address. | |
703 | */ | |
5ead97c8 JF |
704 | set_pte_mfn(fix_to_virt(FIX_PARAVIRT_BOOTMAP), |
705 | PFN_DOWN(xen_start_info->shared_info), | |
706 | PAGE_KERNEL); | |
5ead97c8 JF |
707 | |
708 | HYPERVISOR_shared_info = | |
709 | (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); | |
710 | ||
711 | } else | |
712 | HYPERVISOR_shared_info = | |
713 | (struct shared_info *)__va(xen_start_info->shared_info); | |
714 | ||
f4f97b3e JF |
715 | /* Actually pin the pagetable down, but we can't set PG_pinned |
716 | yet because the page structures don't exist yet. */ | |
717 | { | |
718 | struct mmuext_op op; | |
719 | #ifdef CONFIG_X86_PAE | |
720 | op.cmd = MMUEXT_PIN_L3_TABLE; | |
721 | #else | |
722 | op.cmd = MMUEXT_PIN_L3_TABLE; | |
723 | #endif | |
724 | op.arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(base))); | |
725 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | |
726 | BUG(); | |
727 | } | |
5ead97c8 JF |
728 | |
729 | xen_vcpu_setup(smp_processor_id()); | |
730 | } | |
731 | ||
732 | static const struct paravirt_ops xen_paravirt_ops __initdata = { | |
733 | .paravirt_enabled = 1, | |
734 | .shared_kernel_pmd = 0, | |
735 | ||
736 | .name = "Xen", | |
737 | .banner = xen_banner, | |
738 | ||
739 | .patch = paravirt_patch_default, | |
740 | ||
741 | .memory_setup = xen_memory_setup, | |
742 | .arch_setup = xen_arch_setup, | |
e46cdb66 | 743 | .init_IRQ = xen_init_IRQ, |
f4f97b3e | 744 | .post_allocator_init = xen_mark_init_mm_pinned, |
5ead97c8 | 745 | |
15c84731 JF |
746 | .time_init = xen_time_init, |
747 | .set_wallclock = xen_set_wallclock, | |
748 | .get_wallclock = xen_get_wallclock, | |
749 | .get_cpu_khz = xen_cpu_khz, | |
ab550288 | 750 | .sched_clock = xen_sched_clock, |
15c84731 | 751 | |
5ead97c8 JF |
752 | .cpuid = xen_cpuid, |
753 | ||
754 | .set_debugreg = xen_set_debugreg, | |
755 | .get_debugreg = xen_get_debugreg, | |
756 | ||
757 | .clts = native_clts, | |
758 | ||
759 | .read_cr0 = native_read_cr0, | |
760 | .write_cr0 = native_write_cr0, | |
761 | ||
762 | .read_cr2 = xen_read_cr2, | |
763 | .write_cr2 = native_write_cr2, | |
764 | ||
765 | .read_cr3 = xen_read_cr3, | |
766 | .write_cr3 = xen_write_cr3, | |
767 | ||
768 | .read_cr4 = native_read_cr4, | |
769 | .read_cr4_safe = native_read_cr4_safe, | |
770 | .write_cr4 = xen_write_cr4, | |
771 | ||
772 | .save_fl = xen_save_fl, | |
773 | .restore_fl = xen_restore_fl, | |
774 | .irq_disable = xen_irq_disable, | |
775 | .irq_enable = xen_irq_enable, | |
776 | .safe_halt = xen_safe_halt, | |
777 | .halt = xen_halt, | |
778 | .wbinvd = native_wbinvd, | |
779 | ||
780 | .read_msr = native_read_msr_safe, | |
781 | .write_msr = native_write_msr_safe, | |
782 | .read_tsc = native_read_tsc, | |
783 | .read_pmc = native_read_pmc, | |
784 | ||
785 | .iret = (void *)&hypercall_page[__HYPERVISOR_iret], | |
786 | .irq_enable_sysexit = NULL, /* never called */ | |
787 | ||
788 | .load_tr_desc = paravirt_nop, | |
789 | .set_ldt = xen_set_ldt, | |
790 | .load_gdt = xen_load_gdt, | |
791 | .load_idt = xen_load_idt, | |
792 | .load_tls = xen_load_tls, | |
793 | ||
794 | .store_gdt = native_store_gdt, | |
795 | .store_idt = native_store_idt, | |
796 | .store_tr = xen_store_tr, | |
797 | ||
798 | .write_ldt_entry = xen_write_ldt_entry, | |
799 | .write_gdt_entry = xen_write_gdt_entry, | |
800 | .write_idt_entry = xen_write_idt_entry, | |
801 | .load_esp0 = xen_load_esp0, | |
802 | ||
803 | .set_iopl_mask = xen_set_iopl_mask, | |
804 | .io_delay = xen_io_delay, | |
805 | ||
806 | #ifdef CONFIG_X86_LOCAL_APIC | |
f87e4cac JF |
807 | .apic_write = xen_apic_write, |
808 | .apic_write_atomic = xen_apic_write, | |
5ead97c8 JF |
809 | .apic_read = xen_apic_read, |
810 | .setup_boot_clock = paravirt_nop, | |
811 | .setup_secondary_clock = paravirt_nop, | |
812 | .startup_ipi_hook = paravirt_nop, | |
813 | #endif | |
814 | ||
815 | .flush_tlb_user = xen_flush_tlb, | |
816 | .flush_tlb_kernel = xen_flush_tlb, | |
817 | .flush_tlb_single = xen_flush_tlb_single, | |
f87e4cac | 818 | .flush_tlb_others = xen_flush_tlb_others, |
5ead97c8 JF |
819 | |
820 | .pte_update = paravirt_nop, | |
821 | .pte_update_defer = paravirt_nop, | |
822 | ||
823 | .pagetable_setup_start = xen_pagetable_setup_start, | |
824 | .pagetable_setup_done = xen_pagetable_setup_done, | |
825 | ||
f4f97b3e | 826 | .alloc_pt = xen_alloc_pt_init, |
5ead97c8 | 827 | .release_pt = xen_release_pt, |
f4f97b3e JF |
828 | .alloc_pd = paravirt_nop, |
829 | .alloc_pd_clone = paravirt_nop, | |
830 | .release_pd = paravirt_nop, | |
831 | ||
832 | #ifdef CONFIG_HIGHPTE | |
833 | .kmap_atomic_pte = xen_kmap_atomic_pte, | |
834 | #endif | |
5ead97c8 | 835 | |
9a4029fd | 836 | .set_pte = NULL, /* see xen_pagetable_setup_* */ |
3b827c1b JF |
837 | .set_pte_at = xen_set_pte_at, |
838 | .set_pmd = xen_set_pmd, | |
839 | ||
840 | .pte_val = xen_pte_val, | |
841 | .pgd_val = xen_pgd_val, | |
842 | ||
843 | .make_pte = xen_make_pte, | |
844 | .make_pgd = xen_make_pgd, | |
845 | ||
846 | #ifdef CONFIG_X86_PAE | |
847 | .set_pte_atomic = xen_set_pte_atomic, | |
848 | .set_pte_present = xen_set_pte_at, | |
849 | .set_pud = xen_set_pud, | |
850 | .pte_clear = xen_pte_clear, | |
851 | .pmd_clear = xen_pmd_clear, | |
852 | ||
853 | .make_pmd = xen_make_pmd, | |
854 | .pmd_val = xen_pmd_val, | |
855 | #endif /* PAE */ | |
856 | ||
857 | .activate_mm = xen_activate_mm, | |
858 | .dup_mmap = xen_dup_mmap, | |
859 | .exit_mmap = xen_exit_mmap, | |
860 | ||
5ead97c8 JF |
861 | .set_lazy_mode = xen_set_lazy_mode, |
862 | }; | |
863 | ||
f87e4cac JF |
864 | #ifdef CONFIG_SMP |
865 | static const struct smp_ops xen_smp_ops __initdata = { | |
866 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, | |
867 | .smp_prepare_cpus = xen_smp_prepare_cpus, | |
868 | .cpu_up = xen_cpu_up, | |
869 | .smp_cpus_done = xen_smp_cpus_done, | |
870 | ||
871 | .smp_send_stop = xen_smp_send_stop, | |
872 | .smp_send_reschedule = xen_smp_send_reschedule, | |
873 | .smp_call_function_mask = xen_smp_call_function_mask, | |
874 | }; | |
875 | #endif /* CONFIG_SMP */ | |
876 | ||
5ead97c8 JF |
877 | /* First C function to be called on Xen boot */ |
878 | asmlinkage void __init xen_start_kernel(void) | |
879 | { | |
880 | pgd_t *pgd; | |
881 | ||
882 | if (!xen_start_info) | |
883 | return; | |
884 | ||
885 | BUG_ON(memcmp(xen_start_info->magic, "xen-3.0", 7) != 0); | |
886 | ||
887 | /* Install Xen paravirt ops */ | |
888 | paravirt_ops = xen_paravirt_ops; | |
f87e4cac JF |
889 | #ifdef CONFIG_SMP |
890 | smp_ops = xen_smp_ops; | |
891 | #endif | |
5ead97c8 JF |
892 | |
893 | xen_setup_features(); | |
894 | ||
895 | /* Get mfn list */ | |
896 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | |
897 | phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list; | |
898 | ||
899 | pgd = (pgd_t *)xen_start_info->pt_base; | |
900 | ||
901 | init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE; | |
902 | ||
903 | init_mm.pgd = pgd; /* use the Xen pagetables to start */ | |
904 | ||
905 | /* keep using Xen gdt for now; no urgent need to change it */ | |
906 | ||
907 | x86_write_percpu(xen_cr3, __pa(pgd)); | |
908 | xen_vcpu_setup(0); | |
909 | ||
910 | paravirt_ops.kernel_rpl = 1; | |
911 | if (xen_feature(XENFEAT_supervisor_mode_kernel)) | |
912 | paravirt_ops.kernel_rpl = 0; | |
913 | ||
914 | /* set the limit of our address space */ | |
915 | reserve_top_address(-HYPERVISOR_VIRT_START + 2 * PAGE_SIZE); | |
916 | ||
917 | /* set up basic CPUID stuff */ | |
918 | cpu_detect(&new_cpu_data); | |
919 | new_cpu_data.hard_math = 1; | |
920 | new_cpu_data.x86_capability[0] = cpuid_edx(1); | |
921 | ||
922 | /* Poke various useful things into boot_params */ | |
923 | LOADER_TYPE = (9 << 4) | 0; | |
924 | INITRD_START = xen_start_info->mod_start ? __pa(xen_start_info->mod_start) : 0; | |
925 | INITRD_SIZE = xen_start_info->mod_len; | |
926 | ||
927 | /* Start the world */ | |
928 | start_kernel(); | |
929 | } |