Commit | Line | Data |
---|---|---|
b024b793 XZ |
1 | /* |
2 | * kvm_ia64.c: Basic KVM suppport On Itanium series processors | |
3 | * | |
4 | * | |
5 | * Copyright (C) 2007, Intel Corporation. | |
6 | * Xiantao Zhang (xiantao.zhang@intel.com) | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms and conditions of the GNU General Public License, | |
10 | * version 2, as published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
19 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
20 | * | |
21 | */ | |
22 | ||
23 | #include <linux/module.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/gfp.h> | |
27 | #include <linux/fs.h> | |
28 | #include <linux/smp.h> | |
29 | #include <linux/kvm_host.h> | |
30 | #include <linux/kvm.h> | |
31 | #include <linux/bitops.h> | |
32 | #include <linux/hrtimer.h> | |
33 | #include <linux/uaccess.h> | |
2381ad24 | 34 | #include <linux/intel-iommu.h> |
b024b793 XZ |
35 | |
36 | #include <asm/pgtable.h> | |
37 | #include <asm/gcc_intrin.h> | |
38 | #include <asm/pal.h> | |
39 | #include <asm/cacheflush.h> | |
40 | #include <asm/div64.h> | |
41 | #include <asm/tlb.h> | |
9f726323 | 42 | #include <asm/elf.h> |
b024b793 XZ |
43 | |
44 | #include "misc.h" | |
45 | #include "vti.h" | |
46 | #include "iodev.h" | |
47 | #include "ioapic.h" | |
48 | #include "lapic.h" | |
2f749771 | 49 | #include "irq.h" |
b024b793 XZ |
50 | |
51 | static unsigned long kvm_vmm_base; | |
52 | static unsigned long kvm_vsa_base; | |
53 | static unsigned long kvm_vm_buffer; | |
54 | static unsigned long kvm_vm_buffer_size; | |
55 | unsigned long kvm_vmm_gp; | |
56 | ||
57 | static long vp_env_info; | |
58 | ||
59 | static struct kvm_vmm_info *kvm_vmm_info; | |
60 | ||
61 | static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu); | |
62 | ||
63 | struct kvm_stats_debugfs_item debugfs_entries[] = { | |
64 | { NULL } | |
65 | }; | |
66 | ||
b024b793 XZ |
67 | static void kvm_flush_icache(unsigned long start, unsigned long len) |
68 | { | |
69 | int l; | |
70 | ||
71 | for (l = 0; l < (len + 32); l += 32) | |
72 | ia64_fc(start + l); | |
73 | ||
74 | ia64_sync_i(); | |
75 | ia64_srlz_i(); | |
76 | } | |
77 | ||
78 | static void kvm_flush_tlb_all(void) | |
79 | { | |
80 | unsigned long i, j, count0, count1, stride0, stride1, addr; | |
81 | long flags; | |
82 | ||
83 | addr = local_cpu_data->ptce_base; | |
84 | count0 = local_cpu_data->ptce_count[0]; | |
85 | count1 = local_cpu_data->ptce_count[1]; | |
86 | stride0 = local_cpu_data->ptce_stride[0]; | |
87 | stride1 = local_cpu_data->ptce_stride[1]; | |
88 | ||
89 | local_irq_save(flags); | |
90 | for (i = 0; i < count0; ++i) { | |
91 | for (j = 0; j < count1; ++j) { | |
92 | ia64_ptce(addr); | |
93 | addr += stride1; | |
94 | } | |
95 | addr += stride0; | |
96 | } | |
97 | local_irq_restore(flags); | |
98 | ia64_srlz_i(); /* srlz.i implies srlz.d */ | |
99 | } | |
100 | ||
101 | long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) | |
102 | { | |
103 | struct ia64_pal_retval iprv; | |
104 | ||
105 | PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva, | |
106 | (u64)opt_handler); | |
107 | ||
108 | return iprv.status; | |
109 | } | |
110 | ||
111 | static DEFINE_SPINLOCK(vp_lock); | |
112 | ||
113 | void kvm_arch_hardware_enable(void *garbage) | |
114 | { | |
115 | long status; | |
116 | long tmp_base; | |
117 | unsigned long pte; | |
118 | unsigned long saved_psr; | |
119 | int slot; | |
120 | ||
121 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), | |
122 | PAGE_KERNEL)); | |
123 | local_irq_save(saved_psr); | |
124 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | |
cab7a1ee | 125 | local_irq_restore(saved_psr); |
b024b793 XZ |
126 | if (slot < 0) |
127 | return; | |
b024b793 XZ |
128 | |
129 | spin_lock(&vp_lock); | |
130 | status = ia64_pal_vp_init_env(kvm_vsa_base ? | |
131 | VP_INIT_ENV : VP_INIT_ENV_INITALIZE, | |
132 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); | |
133 | if (status != 0) { | |
134 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); | |
135 | return ; | |
136 | } | |
137 | ||
138 | if (!kvm_vsa_base) { | |
139 | kvm_vsa_base = tmp_base; | |
140 | printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base); | |
141 | } | |
142 | spin_unlock(&vp_lock); | |
143 | ia64_ptr_entry(0x3, slot); | |
144 | } | |
145 | ||
146 | void kvm_arch_hardware_disable(void *garbage) | |
147 | { | |
148 | ||
149 | long status; | |
150 | int slot; | |
151 | unsigned long pte; | |
152 | unsigned long saved_psr; | |
153 | unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA); | |
154 | ||
155 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), | |
156 | PAGE_KERNEL)); | |
157 | ||
158 | local_irq_save(saved_psr); | |
159 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | |
cab7a1ee | 160 | local_irq_restore(saved_psr); |
b024b793 XZ |
161 | if (slot < 0) |
162 | return; | |
b024b793 XZ |
163 | |
164 | status = ia64_pal_vp_exit_env(host_iva); | |
165 | if (status) | |
166 | printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n", | |
167 | status); | |
168 | ia64_ptr_entry(0x3, slot); | |
169 | } | |
170 | ||
171 | void kvm_arch_check_processor_compat(void *rtn) | |
172 | { | |
173 | *(int *)rtn = 0; | |
174 | } | |
175 | ||
176 | int kvm_dev_ioctl_check_extension(long ext) | |
177 | { | |
178 | ||
179 | int r; | |
180 | ||
181 | switch (ext) { | |
182 | case KVM_CAP_IRQCHIP: | |
8c4b537d | 183 | case KVM_CAP_MP_STATE: |
b024b793 XZ |
184 | |
185 | r = 1; | |
186 | break; | |
7f39f8ac LV |
187 | case KVM_CAP_COALESCED_MMIO: |
188 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | |
189 | break; | |
2381ad24 XZ |
190 | case KVM_CAP_IOMMU: |
191 | r = intel_iommu_found(); | |
192 | break; | |
b024b793 XZ |
193 | default: |
194 | r = 0; | |
195 | } | |
196 | return r; | |
197 | ||
198 | } | |
199 | ||
200 | static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, | |
92760499 | 201 | gpa_t addr, int len, int is_write) |
b024b793 XZ |
202 | { |
203 | struct kvm_io_device *dev; | |
204 | ||
92760499 | 205 | dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write); |
b024b793 XZ |
206 | |
207 | return dev; | |
208 | } | |
209 | ||
210 | static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
211 | { | |
212 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | |
213 | kvm_run->hw.hardware_exit_reason = 1; | |
214 | return 0; | |
215 | } | |
216 | ||
217 | static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
218 | { | |
219 | struct kvm_mmio_req *p; | |
220 | struct kvm_io_device *mmio_dev; | |
221 | ||
222 | p = kvm_get_vcpu_ioreq(vcpu); | |
223 | ||
224 | if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) | |
225 | goto mmio; | |
226 | vcpu->mmio_needed = 1; | |
227 | vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr; | |
228 | vcpu->mmio_size = kvm_run->mmio.len = p->size; | |
229 | vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; | |
230 | ||
231 | if (vcpu->mmio_is_write) | |
232 | memcpy(vcpu->mmio_data, &p->data, p->size); | |
233 | memcpy(kvm_run->mmio.data, &p->data, p->size); | |
234 | kvm_run->exit_reason = KVM_EXIT_MMIO; | |
235 | return 0; | |
236 | mmio: | |
92760499 | 237 | mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir); |
b024b793 XZ |
238 | if (mmio_dev) { |
239 | if (!p->dir) | |
240 | kvm_iodevice_write(mmio_dev, p->addr, p->size, | |
241 | &p->data); | |
242 | else | |
243 | kvm_iodevice_read(mmio_dev, p->addr, p->size, | |
244 | &p->data); | |
245 | ||
246 | } else | |
247 | printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); | |
248 | p->state = STATE_IORESP_READY; | |
249 | ||
250 | return 1; | |
251 | } | |
252 | ||
253 | static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
254 | { | |
255 | struct exit_ctl_data *p; | |
256 | ||
257 | p = kvm_get_exit_data(vcpu); | |
258 | ||
259 | if (p->exit_reason == EXIT_REASON_PAL_CALL) | |
260 | return kvm_pal_emul(vcpu, kvm_run); | |
261 | else { | |
262 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | |
263 | kvm_run->hw.hardware_exit_reason = 2; | |
264 | return 0; | |
265 | } | |
266 | } | |
267 | ||
268 | static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
269 | { | |
270 | struct exit_ctl_data *p; | |
271 | ||
272 | p = kvm_get_exit_data(vcpu); | |
273 | ||
274 | if (p->exit_reason == EXIT_REASON_SAL_CALL) { | |
275 | kvm_sal_emul(vcpu); | |
276 | return 1; | |
277 | } else { | |
278 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | |
279 | kvm_run->hw.hardware_exit_reason = 3; | |
280 | return 0; | |
281 | } | |
282 | ||
283 | } | |
284 | ||
285 | /* | |
286 | * offset: address offset to IPI space. | |
287 | * value: deliver value. | |
288 | */ | |
289 | static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm, | |
290 | uint64_t vector) | |
291 | { | |
292 | switch (dm) { | |
293 | case SAPIC_FIXED: | |
294 | kvm_apic_set_irq(vcpu, vector, 0); | |
295 | break; | |
296 | case SAPIC_NMI: | |
297 | kvm_apic_set_irq(vcpu, 2, 0); | |
298 | break; | |
299 | case SAPIC_EXTINT: | |
300 | kvm_apic_set_irq(vcpu, 0, 0); | |
301 | break; | |
302 | case SAPIC_INIT: | |
303 | case SAPIC_PMI: | |
304 | default: | |
305 | printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n"); | |
306 | break; | |
307 | } | |
308 | } | |
309 | ||
310 | static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, | |
311 | unsigned long eid) | |
312 | { | |
313 | union ia64_lid lid; | |
314 | int i; | |
315 | ||
316 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | |
317 | if (kvm->vcpus[i]) { | |
318 | lid.val = VCPU_LID(kvm->vcpus[i]); | |
319 | if (lid.id == id && lid.eid == eid) | |
320 | return kvm->vcpus[i]; | |
321 | } | |
322 | } | |
323 | ||
324 | return NULL; | |
325 | } | |
326 | ||
327 | static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
328 | { | |
329 | struct exit_ctl_data *p = kvm_get_exit_data(vcpu); | |
330 | struct kvm_vcpu *target_vcpu; | |
331 | struct kvm_pt_regs *regs; | |
332 | union ia64_ipi_a addr = p->u.ipi_data.addr; | |
333 | union ia64_ipi_d data = p->u.ipi_data.data; | |
334 | ||
335 | target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid); | |
336 | if (!target_vcpu) | |
337 | return handle_vm_error(vcpu, kvm_run); | |
338 | ||
339 | if (!target_vcpu->arch.launched) { | |
340 | regs = vcpu_regs(target_vcpu); | |
341 | ||
342 | regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; | |
343 | regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; | |
344 | ||
a4535290 | 345 | target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
b024b793 XZ |
346 | if (waitqueue_active(&target_vcpu->wq)) |
347 | wake_up_interruptible(&target_vcpu->wq); | |
348 | } else { | |
349 | vcpu_deliver_ipi(target_vcpu, data.dm, data.vector); | |
350 | if (target_vcpu != vcpu) | |
351 | kvm_vcpu_kick(target_vcpu); | |
352 | } | |
353 | ||
354 | return 1; | |
355 | } | |
356 | ||
357 | struct call_data { | |
358 | struct kvm_ptc_g ptc_g_data; | |
359 | struct kvm_vcpu *vcpu; | |
360 | }; | |
361 | ||
362 | static void vcpu_global_purge(void *info) | |
363 | { | |
364 | struct call_data *p = (struct call_data *)info; | |
365 | struct kvm_vcpu *vcpu = p->vcpu; | |
366 | ||
367 | if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) | |
368 | return; | |
369 | ||
370 | set_bit(KVM_REQ_PTC_G, &vcpu->requests); | |
371 | if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) { | |
372 | vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] = | |
373 | p->ptc_g_data; | |
374 | } else { | |
375 | clear_bit(KVM_REQ_PTC_G, &vcpu->requests); | |
376 | vcpu->arch.ptc_g_count = 0; | |
377 | set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); | |
378 | } | |
379 | } | |
380 | ||
381 | static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
382 | { | |
383 | struct exit_ctl_data *p = kvm_get_exit_data(vcpu); | |
384 | struct kvm *kvm = vcpu->kvm; | |
385 | struct call_data call_data; | |
386 | int i; | |
decc9016 | 387 | |
b024b793 XZ |
388 | call_data.ptc_g_data = p->u.ptc_g_data; |
389 | ||
390 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | |
391 | if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == | |
a4535290 | 392 | KVM_MP_STATE_UNINITIALIZED || |
b024b793 XZ |
393 | vcpu == kvm->vcpus[i]) |
394 | continue; | |
395 | ||
396 | if (waitqueue_active(&kvm->vcpus[i]->wq)) | |
397 | wake_up_interruptible(&kvm->vcpus[i]->wq); | |
398 | ||
399 | if (kvm->vcpus[i]->cpu != -1) { | |
400 | call_data.vcpu = kvm->vcpus[i]; | |
401 | smp_call_function_single(kvm->vcpus[i]->cpu, | |
2f73ccab | 402 | vcpu_global_purge, &call_data, 1); |
b024b793 XZ |
403 | } else |
404 | printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); | |
405 | ||
406 | } | |
407 | return 1; | |
408 | } | |
409 | ||
410 | static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
411 | { | |
412 | return 1; | |
413 | } | |
414 | ||
415 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |
416 | { | |
417 | ||
418 | ktime_t kt; | |
419 | long itc_diff; | |
420 | unsigned long vcpu_now_itc; | |
b024b793 XZ |
421 | unsigned long expires; |
422 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; | |
423 | unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; | |
424 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | |
425 | ||
decc9016 | 426 | if (irqchip_in_kernel(vcpu->kvm)) { |
b024b793 | 427 | |
decc9016 | 428 | vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; |
b024b793 | 429 | |
decc9016 XZ |
430 | if (time_after(vcpu_now_itc, vpd->itm)) { |
431 | vcpu->arch.timer_check = 1; | |
432 | return 1; | |
433 | } | |
434 | itc_diff = vpd->itm - vcpu_now_itc; | |
435 | if (itc_diff < 0) | |
436 | itc_diff = -itc_diff; | |
437 | ||
438 | expires = div64_u64(itc_diff, cyc_per_usec); | |
439 | kt = ktime_set(0, 1000 * expires); | |
440 | ||
decc9016 XZ |
441 | vcpu->arch.ht_active = 1; |
442 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); | |
b024b793 | 443 | |
a4535290 | 444 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; |
b024b793 XZ |
445 | kvm_vcpu_block(vcpu); |
446 | hrtimer_cancel(p_ht); | |
447 | vcpu->arch.ht_active = 0; | |
448 | ||
decc9016 XZ |
449 | if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) |
450 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) | |
451 | vcpu->arch.mp_state = | |
452 | KVM_MP_STATE_RUNNABLE; | |
decc9016 | 453 | |
a4535290 | 454 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) |
b024b793 XZ |
455 | return -EINTR; |
456 | return 1; | |
457 | } else { | |
458 | printk(KERN_ERR"kvm: Unsupported userspace halt!"); | |
459 | return 0; | |
460 | } | |
461 | } | |
462 | ||
463 | static int handle_vm_shutdown(struct kvm_vcpu *vcpu, | |
464 | struct kvm_run *kvm_run) | |
465 | { | |
466 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | |
467 | return 0; | |
468 | } | |
469 | ||
470 | static int handle_external_interrupt(struct kvm_vcpu *vcpu, | |
471 | struct kvm_run *kvm_run) | |
472 | { | |
473 | return 1; | |
474 | } | |
475 | ||
7d637978 XZ |
476 | static int handle_vcpu_debug(struct kvm_vcpu *vcpu, |
477 | struct kvm_run *kvm_run) | |
478 | { | |
479 | printk("VMM: %s", vcpu->arch.log_buf); | |
480 | return 1; | |
481 | } | |
482 | ||
b024b793 XZ |
483 | static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, |
484 | struct kvm_run *kvm_run) = { | |
485 | [EXIT_REASON_VM_PANIC] = handle_vm_error, | |
486 | [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio, | |
487 | [EXIT_REASON_PAL_CALL] = handle_pal_call, | |
488 | [EXIT_REASON_SAL_CALL] = handle_sal_call, | |
489 | [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6, | |
490 | [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown, | |
491 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | |
492 | [EXIT_REASON_IPI] = handle_ipi, | |
493 | [EXIT_REASON_PTC_G] = handle_global_purge, | |
7d637978 | 494 | [EXIT_REASON_DEBUG] = handle_vcpu_debug, |
b024b793 XZ |
495 | |
496 | }; | |
497 | ||
498 | static const int kvm_vti_max_exit_handlers = | |
499 | sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); | |
500 | ||
b024b793 XZ |
501 | static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) |
502 | { | |
503 | struct exit_ctl_data *p_exit_data; | |
504 | ||
505 | p_exit_data = kvm_get_exit_data(vcpu); | |
506 | return p_exit_data->exit_reason; | |
507 | } | |
508 | ||
509 | /* | |
510 | * The guest has exited. See if we can fix it or if we need userspace | |
511 | * assistance. | |
512 | */ | |
513 | static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |
514 | { | |
515 | u32 exit_reason = kvm_get_exit_reason(vcpu); | |
516 | vcpu->arch.last_exit = exit_reason; | |
517 | ||
518 | if (exit_reason < kvm_vti_max_exit_handlers | |
519 | && kvm_vti_exit_handlers[exit_reason]) | |
520 | return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run); | |
521 | else { | |
522 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | |
523 | kvm_run->hw.hardware_exit_reason = exit_reason; | |
524 | } | |
525 | return 0; | |
526 | } | |
527 | ||
528 | static inline void vti_set_rr6(unsigned long rr6) | |
529 | { | |
530 | ia64_set_rr(RR6, rr6); | |
531 | ia64_srlz_i(); | |
532 | } | |
533 | ||
534 | static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu) | |
535 | { | |
536 | unsigned long pte; | |
537 | struct kvm *kvm = vcpu->kvm; | |
538 | int r; | |
539 | ||
540 | /*Insert a pair of tr to map vmm*/ | |
541 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); | |
542 | r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | |
543 | if (r < 0) | |
544 | goto out; | |
545 | vcpu->arch.vmm_tr_slot = r; | |
546 | /*Insert a pairt of tr to map data of vm*/ | |
547 | pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL)); | |
548 | r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE, | |
549 | pte, KVM_VM_DATA_SHIFT); | |
550 | if (r < 0) | |
551 | goto out; | |
552 | vcpu->arch.vm_tr_slot = r; | |
553 | r = 0; | |
554 | out: | |
555 | return r; | |
556 | ||
557 | } | |
558 | ||
559 | static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu) | |
560 | { | |
561 | ||
562 | ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot); | |
563 | ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot); | |
564 | ||
565 | } | |
566 | ||
567 | static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) | |
568 | { | |
569 | int cpu = smp_processor_id(); | |
570 | ||
571 | if (vcpu->arch.last_run_cpu != cpu || | |
572 | per_cpu(last_vcpu, cpu) != vcpu) { | |
573 | per_cpu(last_vcpu, cpu) = vcpu; | |
574 | vcpu->arch.last_run_cpu = cpu; | |
575 | kvm_flush_tlb_all(); | |
576 | } | |
577 | ||
578 | vcpu->arch.host_rr6 = ia64_get_rr(RR6); | |
579 | vti_set_rr6(vcpu->arch.vmm_rr); | |
580 | return kvm_insert_vmm_mapping(vcpu); | |
581 | } | |
582 | static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) | |
583 | { | |
584 | kvm_purge_vmm_mapping(vcpu); | |
585 | vti_set_rr6(vcpu->arch.host_rr6); | |
586 | } | |
587 | ||
588 | static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
589 | { | |
590 | union context *host_ctx, *guest_ctx; | |
591 | int r; | |
592 | ||
593 | /*Get host and guest context with guest address space.*/ | |
594 | host_ctx = kvm_get_host_context(vcpu); | |
595 | guest_ctx = kvm_get_guest_context(vcpu); | |
596 | ||
597 | r = kvm_vcpu_pre_transition(vcpu); | |
598 | if (r < 0) | |
599 | goto out; | |
600 | kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); | |
601 | kvm_vcpu_post_transition(vcpu); | |
602 | r = 0; | |
603 | out: | |
604 | return r; | |
605 | } | |
606 | ||
607 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
608 | { | |
609 | int r; | |
610 | ||
611 | again: | |
612 | preempt_disable(); | |
b024b793 XZ |
613 | local_irq_disable(); |
614 | ||
615 | if (signal_pending(current)) { | |
616 | local_irq_enable(); | |
617 | preempt_enable(); | |
618 | r = -EINTR; | |
619 | kvm_run->exit_reason = KVM_EXIT_INTR; | |
620 | goto out; | |
621 | } | |
622 | ||
623 | vcpu->guest_mode = 1; | |
624 | kvm_guest_enter(); | |
decc9016 | 625 | down_read(&vcpu->kvm->slots_lock); |
b024b793 XZ |
626 | r = vti_vcpu_run(vcpu, kvm_run); |
627 | if (r < 0) { | |
628 | local_irq_enable(); | |
629 | preempt_enable(); | |
630 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | |
631 | goto out; | |
632 | } | |
633 | ||
634 | vcpu->arch.launched = 1; | |
635 | vcpu->guest_mode = 0; | |
636 | local_irq_enable(); | |
637 | ||
638 | /* | |
639 | * We must have an instruction between local_irq_enable() and | |
640 | * kvm_guest_exit(), so the timer interrupt isn't delayed by | |
641 | * the interrupt shadow. The stat.exits increment will do nicely. | |
642 | * But we need to prevent reordering, hence this barrier(): | |
643 | */ | |
644 | barrier(); | |
b024b793 | 645 | kvm_guest_exit(); |
decc9016 | 646 | up_read(&vcpu->kvm->slots_lock); |
b024b793 XZ |
647 | preempt_enable(); |
648 | ||
649 | r = kvm_handle_exit(kvm_run, vcpu); | |
650 | ||
651 | if (r > 0) { | |
652 | if (!need_resched()) | |
653 | goto again; | |
654 | } | |
655 | ||
656 | out: | |
657 | if (r > 0) { | |
658 | kvm_resched(vcpu); | |
659 | goto again; | |
660 | } | |
661 | ||
662 | return r; | |
663 | } | |
664 | ||
665 | static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) | |
666 | { | |
667 | struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); | |
668 | ||
669 | if (!vcpu->mmio_is_write) | |
670 | memcpy(&p->data, vcpu->mmio_data, 8); | |
671 | p->state = STATE_IORESP_READY; | |
672 | } | |
673 | ||
674 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
675 | { | |
676 | int r; | |
677 | sigset_t sigsaved; | |
678 | ||
679 | vcpu_load(vcpu); | |
680 | ||
a2e4e289 XZ |
681 | if (vcpu->sigset_active) |
682 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | |
683 | ||
a4535290 | 684 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { |
b024b793 | 685 | kvm_vcpu_block(vcpu); |
decc9016 | 686 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
a2e4e289 XZ |
687 | r = -EAGAIN; |
688 | goto out; | |
b024b793 XZ |
689 | } |
690 | ||
b024b793 XZ |
691 | if (vcpu->mmio_needed) { |
692 | memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); | |
693 | kvm_set_mmio_data(vcpu); | |
694 | vcpu->mmio_read_completed = 1; | |
695 | vcpu->mmio_needed = 0; | |
696 | } | |
697 | r = __vcpu_run(vcpu, kvm_run); | |
a2e4e289 | 698 | out: |
b024b793 XZ |
699 | if (vcpu->sigset_active) |
700 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | |
701 | ||
702 | vcpu_put(vcpu); | |
703 | return r; | |
704 | } | |
705 | ||
b024b793 XZ |
706 | static struct kvm *kvm_alloc_kvm(void) |
707 | { | |
708 | ||
709 | struct kvm *kvm; | |
710 | uint64_t vm_base; | |
711 | ||
a917f7af XZ |
712 | BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE); |
713 | ||
b024b793 XZ |
714 | vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); |
715 | ||
716 | if (!vm_base) | |
717 | return ERR_PTR(-ENOMEM); | |
b024b793 | 718 | |
b024b793 | 719 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); |
a917f7af XZ |
720 | kvm = (struct kvm *)(vm_base + |
721 | offsetof(struct kvm_vm_data, kvm_vm_struct)); | |
b024b793 | 722 | kvm->arch.vm_base = vm_base; |
a917f7af | 723 | printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base); |
b024b793 XZ |
724 | |
725 | return kvm; | |
726 | } | |
727 | ||
728 | struct kvm_io_range { | |
729 | unsigned long start; | |
730 | unsigned long size; | |
731 | unsigned long type; | |
732 | }; | |
733 | ||
734 | static const struct kvm_io_range io_ranges[] = { | |
735 | {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, | |
736 | {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, | |
737 | {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, | |
738 | {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC}, | |
739 | {PIB_START, PIB_SIZE, GPFN_PIB}, | |
740 | }; | |
741 | ||
742 | static void kvm_build_io_pmt(struct kvm *kvm) | |
743 | { | |
744 | unsigned long i, j; | |
745 | ||
746 | /* Mark I/O ranges */ | |
747 | for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range)); | |
748 | i++) { | |
749 | for (j = io_ranges[i].start; | |
750 | j < io_ranges[i].start + io_ranges[i].size; | |
751 | j += PAGE_SIZE) | |
752 | kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT, | |
753 | io_ranges[i].type, 0); | |
754 | } | |
755 | ||
756 | } | |
757 | ||
758 | /*Use unused rids to virtualize guest rid.*/ | |
759 | #define GUEST_PHYSICAL_RR0 0x1739 | |
760 | #define GUEST_PHYSICAL_RR4 0x2739 | |
761 | #define VMM_INIT_RR 0x1660 | |
762 | ||
763 | static void kvm_init_vm(struct kvm *kvm) | |
764 | { | |
b024b793 XZ |
765 | BUG_ON(!kvm); |
766 | ||
767 | kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; | |
768 | kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; | |
769 | kvm->arch.vmm_init_rr = VMM_INIT_RR; | |
770 | ||
b024b793 XZ |
771 | /* |
772 | *Fill P2M entries for MMIO/IO ranges | |
773 | */ | |
774 | kvm_build_io_pmt(kvm); | |
775 | ||
2381ad24 | 776 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); |
5550af4d SY |
777 | |
778 | /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ | |
779 | set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); | |
b024b793 XZ |
780 | } |
781 | ||
782 | struct kvm *kvm_arch_create_vm(void) | |
783 | { | |
784 | struct kvm *kvm = kvm_alloc_kvm(); | |
785 | ||
786 | if (IS_ERR(kvm)) | |
787 | return ERR_PTR(-ENOMEM); | |
788 | kvm_init_vm(kvm); | |
789 | ||
790 | return kvm; | |
791 | ||
792 | } | |
793 | ||
794 | static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, | |
795 | struct kvm_irqchip *chip) | |
796 | { | |
797 | int r; | |
798 | ||
799 | r = 0; | |
800 | switch (chip->chip_id) { | |
801 | case KVM_IRQCHIP_IOAPIC: | |
802 | memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), | |
803 | sizeof(struct kvm_ioapic_state)); | |
804 | break; | |
805 | default: | |
806 | r = -EINVAL; | |
807 | break; | |
808 | } | |
809 | return r; | |
810 | } | |
811 | ||
812 | static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |
813 | { | |
814 | int r; | |
815 | ||
816 | r = 0; | |
817 | switch (chip->chip_id) { | |
818 | case KVM_IRQCHIP_IOAPIC: | |
819 | memcpy(ioapic_irqchip(kvm), | |
820 | &chip->chip.ioapic, | |
821 | sizeof(struct kvm_ioapic_state)); | |
822 | break; | |
823 | default: | |
824 | r = -EINVAL; | |
825 | break; | |
826 | } | |
827 | return r; | |
828 | } | |
829 | ||
830 | #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x | |
831 | ||
832 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
833 | { | |
834 | int i; | |
835 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | |
836 | int r; | |
837 | ||
838 | vcpu_load(vcpu); | |
839 | ||
840 | for (i = 0; i < 16; i++) { | |
841 | vpd->vgr[i] = regs->vpd.vgr[i]; | |
842 | vpd->vbgr[i] = regs->vpd.vbgr[i]; | |
843 | } | |
844 | for (i = 0; i < 128; i++) | |
845 | vpd->vcr[i] = regs->vpd.vcr[i]; | |
846 | vpd->vhpi = regs->vpd.vhpi; | |
847 | vpd->vnat = regs->vpd.vnat; | |
848 | vpd->vbnat = regs->vpd.vbnat; | |
849 | vpd->vpsr = regs->vpd.vpsr; | |
850 | ||
851 | vpd->vpr = regs->vpd.vpr; | |
852 | ||
853 | r = -EFAULT; | |
854 | r = copy_from_user(&vcpu->arch.guest, regs->saved_guest, | |
855 | sizeof(union context)); | |
856 | if (r) | |
857 | goto out; | |
858 | r = copy_from_user(vcpu + 1, regs->saved_stack + | |
859 | sizeof(struct kvm_vcpu), | |
a917f7af | 860 | KVM_STK_OFFSET - sizeof(struct kvm_vcpu)); |
b024b793 XZ |
861 | if (r) |
862 | goto out; | |
863 | vcpu->arch.exit_data = | |
864 | ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data; | |
865 | ||
866 | RESTORE_REGS(mp_state); | |
867 | RESTORE_REGS(vmm_rr); | |
868 | memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS); | |
869 | memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS); | |
870 | RESTORE_REGS(itr_regions); | |
871 | RESTORE_REGS(dtr_regions); | |
872 | RESTORE_REGS(tc_regions); | |
873 | RESTORE_REGS(irq_check); | |
874 | RESTORE_REGS(itc_check); | |
875 | RESTORE_REGS(timer_check); | |
876 | RESTORE_REGS(timer_pending); | |
877 | RESTORE_REGS(last_itc); | |
878 | for (i = 0; i < 8; i++) { | |
879 | vcpu->arch.vrr[i] = regs->vrr[i]; | |
880 | vcpu->arch.ibr[i] = regs->ibr[i]; | |
881 | vcpu->arch.dbr[i] = regs->dbr[i]; | |
882 | } | |
883 | for (i = 0; i < 4; i++) | |
884 | vcpu->arch.insvc[i] = regs->insvc[i]; | |
885 | RESTORE_REGS(xtp); | |
886 | RESTORE_REGS(metaphysical_rr0); | |
887 | RESTORE_REGS(metaphysical_rr4); | |
888 | RESTORE_REGS(metaphysical_saved_rr0); | |
889 | RESTORE_REGS(metaphysical_saved_rr4); | |
890 | RESTORE_REGS(fp_psr); | |
891 | RESTORE_REGS(saved_gp); | |
892 | ||
893 | vcpu->arch.irq_new_pending = 1; | |
894 | vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC); | |
895 | set_bit(KVM_REQ_RESUME, &vcpu->requests); | |
896 | ||
897 | vcpu_put(vcpu); | |
898 | r = 0; | |
899 | out: | |
900 | return r; | |
901 | } | |
902 | ||
903 | long kvm_arch_vm_ioctl(struct file *filp, | |
904 | unsigned int ioctl, unsigned long arg) | |
905 | { | |
906 | struct kvm *kvm = filp->private_data; | |
907 | void __user *argp = (void __user *)arg; | |
908 | int r = -EINVAL; | |
909 | ||
910 | switch (ioctl) { | |
911 | case KVM_SET_MEMORY_REGION: { | |
912 | struct kvm_memory_region kvm_mem; | |
913 | struct kvm_userspace_memory_region kvm_userspace_mem; | |
914 | ||
915 | r = -EFAULT; | |
916 | if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) | |
917 | goto out; | |
918 | kvm_userspace_mem.slot = kvm_mem.slot; | |
919 | kvm_userspace_mem.flags = kvm_mem.flags; | |
920 | kvm_userspace_mem.guest_phys_addr = | |
921 | kvm_mem.guest_phys_addr; | |
922 | kvm_userspace_mem.memory_size = kvm_mem.memory_size; | |
923 | r = kvm_vm_ioctl_set_memory_region(kvm, | |
924 | &kvm_userspace_mem, 0); | |
925 | if (r) | |
926 | goto out; | |
927 | break; | |
928 | } | |
929 | case KVM_CREATE_IRQCHIP: | |
930 | r = -EFAULT; | |
931 | r = kvm_ioapic_init(kvm); | |
932 | if (r) | |
933 | goto out; | |
934 | break; | |
935 | case KVM_IRQ_LINE: { | |
936 | struct kvm_irq_level irq_event; | |
937 | ||
938 | r = -EFAULT; | |
939 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) | |
940 | goto out; | |
941 | if (irqchip_in_kernel(kvm)) { | |
942 | mutex_lock(&kvm->lock); | |
5550af4d SY |
943 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
944 | irq_event.irq, irq_event.level); | |
b024b793 XZ |
945 | mutex_unlock(&kvm->lock); |
946 | r = 0; | |
947 | } | |
948 | break; | |
949 | } | |
950 | case KVM_GET_IRQCHIP: { | |
951 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | |
952 | struct kvm_irqchip chip; | |
953 | ||
954 | r = -EFAULT; | |
955 | if (copy_from_user(&chip, argp, sizeof chip)) | |
956 | goto out; | |
957 | r = -ENXIO; | |
958 | if (!irqchip_in_kernel(kvm)) | |
959 | goto out; | |
960 | r = kvm_vm_ioctl_get_irqchip(kvm, &chip); | |
961 | if (r) | |
962 | goto out; | |
963 | r = -EFAULT; | |
964 | if (copy_to_user(argp, &chip, sizeof chip)) | |
965 | goto out; | |
966 | r = 0; | |
967 | break; | |
968 | } | |
969 | case KVM_SET_IRQCHIP: { | |
970 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | |
971 | struct kvm_irqchip chip; | |
972 | ||
973 | r = -EFAULT; | |
974 | if (copy_from_user(&chip, argp, sizeof chip)) | |
975 | goto out; | |
976 | r = -ENXIO; | |
977 | if (!irqchip_in_kernel(kvm)) | |
978 | goto out; | |
979 | r = kvm_vm_ioctl_set_irqchip(kvm, &chip); | |
980 | if (r) | |
981 | goto out; | |
982 | r = 0; | |
983 | break; | |
984 | } | |
985 | default: | |
986 | ; | |
987 | } | |
988 | out: | |
989 | return r; | |
990 | } | |
991 | ||
992 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
993 | struct kvm_sregs *sregs) | |
994 | { | |
995 | return -EINVAL; | |
996 | } | |
997 | ||
998 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
999 | struct kvm_sregs *sregs) | |
1000 | { | |
1001 | return -EINVAL; | |
1002 | ||
1003 | } | |
1004 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |
1005 | struct kvm_translation *tr) | |
1006 | { | |
1007 | ||
1008 | return -EINVAL; | |
1009 | } | |
1010 | ||
1011 | static int kvm_alloc_vmm_area(void) | |
1012 | { | |
1013 | if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) { | |
1014 | kvm_vmm_base = __get_free_pages(GFP_KERNEL, | |
1015 | get_order(KVM_VMM_SIZE)); | |
1016 | if (!kvm_vmm_base) | |
1017 | return -ENOMEM; | |
1018 | ||
1019 | memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); | |
1020 | kvm_vm_buffer = kvm_vmm_base + VMM_SIZE; | |
1021 | ||
1022 | printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n", | |
1023 | kvm_vmm_base, kvm_vm_buffer); | |
1024 | } | |
1025 | ||
1026 | return 0; | |
1027 | } | |
1028 | ||
1029 | static void kvm_free_vmm_area(void) | |
1030 | { | |
1031 | if (kvm_vmm_base) { | |
1032 | /*Zero this area before free to avoid bits leak!!*/ | |
1033 | memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); | |
1034 | free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE)); | |
1035 | kvm_vmm_base = 0; | |
1036 | kvm_vm_buffer = 0; | |
1037 | kvm_vsa_base = 0; | |
1038 | } | |
1039 | } | |
1040 | ||
b024b793 XZ |
1041 | static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
1042 | { | |
1043 | } | |
1044 | ||
1045 | static int vti_init_vpd(struct kvm_vcpu *vcpu) | |
1046 | { | |
1047 | int i; | |
1048 | union cpuid3_t cpuid3; | |
1049 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | |
1050 | ||
1051 | if (IS_ERR(vpd)) | |
1052 | return PTR_ERR(vpd); | |
1053 | ||
1054 | /* CPUID init */ | |
1055 | for (i = 0; i < 5; i++) | |
1056 | vpd->vcpuid[i] = ia64_get_cpuid(i); | |
1057 | ||
1058 | /* Limit the CPUID number to 5 */ | |
1059 | cpuid3.value = vpd->vcpuid[3]; | |
1060 | cpuid3.number = 4; /* 5 - 1 */ | |
1061 | vpd->vcpuid[3] = cpuid3.value; | |
1062 | ||
1063 | /*Set vac and vdc fields*/ | |
1064 | vpd->vac.a_from_int_cr = 1; | |
1065 | vpd->vac.a_to_int_cr = 1; | |
1066 | vpd->vac.a_from_psr = 1; | |
1067 | vpd->vac.a_from_cpuid = 1; | |
1068 | vpd->vac.a_cover = 1; | |
1069 | vpd->vac.a_bsw = 1; | |
1070 | vpd->vac.a_int = 1; | |
1071 | vpd->vdc.d_vmsw = 1; | |
1072 | ||
1073 | /*Set virtual buffer*/ | |
1074 | vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE; | |
1075 | ||
1076 | return 0; | |
1077 | } | |
1078 | ||
1079 | static int vti_create_vp(struct kvm_vcpu *vcpu) | |
1080 | { | |
1081 | long ret; | |
1082 | struct vpd *vpd = vcpu->arch.vpd; | |
1083 | unsigned long vmm_ivt; | |
1084 | ||
1085 | vmm_ivt = kvm_vmm_info->vmm_ivt; | |
1086 | ||
1087 | printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt); | |
1088 | ||
1089 | ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0); | |
1090 | ||
1091 | if (ret) { | |
1092 | printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n"); | |
1093 | return -EINVAL; | |
1094 | } | |
1095 | return 0; | |
1096 | } | |
1097 | ||
1098 | static void init_ptce_info(struct kvm_vcpu *vcpu) | |
1099 | { | |
1100 | ia64_ptce_info_t ptce = {0}; | |
1101 | ||
1102 | ia64_get_ptce(&ptce); | |
1103 | vcpu->arch.ptce_base = ptce.base; | |
1104 | vcpu->arch.ptce_count[0] = ptce.count[0]; | |
1105 | vcpu->arch.ptce_count[1] = ptce.count[1]; | |
1106 | vcpu->arch.ptce_stride[0] = ptce.stride[0]; | |
1107 | vcpu->arch.ptce_stride[1] = ptce.stride[1]; | |
1108 | } | |
1109 | ||
1110 | static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) | |
1111 | { | |
1112 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; | |
1113 | ||
1114 | if (hrtimer_cancel(p_ht)) | |
18dd36af | 1115 | hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS); |
b024b793 XZ |
1116 | } |
1117 | ||
1118 | static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) | |
1119 | { | |
1120 | struct kvm_vcpu *vcpu; | |
1121 | wait_queue_head_t *q; | |
1122 | ||
1123 | vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); | |
decc9016 XZ |
1124 | q = &vcpu->wq; |
1125 | ||
a4535290 | 1126 | if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) |
b024b793 XZ |
1127 | goto out; |
1128 | ||
decc9016 | 1129 | if (waitqueue_active(q)) |
b024b793 | 1130 | wake_up_interruptible(q); |
decc9016 | 1131 | |
b024b793 | 1132 | out: |
decc9016 | 1133 | vcpu->arch.timer_fired = 1; |
b024b793 XZ |
1134 | vcpu->arch.timer_check = 1; |
1135 | return HRTIMER_NORESTART; | |
1136 | } | |
1137 | ||
1138 | #define PALE_RESET_ENTRY 0x80000000ffffffb0UL | |
1139 | ||
1140 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |
1141 | { | |
1142 | struct kvm_vcpu *v; | |
1143 | int r; | |
1144 | int i; | |
1145 | long itc_offset; | |
1146 | struct kvm *kvm = vcpu->kvm; | |
1147 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | |
1148 | ||
1149 | union context *p_ctx = &vcpu->arch.guest; | |
1150 | struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu); | |
1151 | ||
1152 | /*Init vcpu context for first run.*/ | |
1153 | if (IS_ERR(vmm_vcpu)) | |
1154 | return PTR_ERR(vmm_vcpu); | |
1155 | ||
1156 | if (vcpu->vcpu_id == 0) { | |
a4535290 | 1157 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
b024b793 XZ |
1158 | |
1159 | /*Set entry address for first run.*/ | |
1160 | regs->cr_iip = PALE_RESET_ENTRY; | |
1161 | ||
a917f7af | 1162 | /*Initialize itc offset for vcpus*/ |
b024b793 | 1163 | itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); |
a917f7af XZ |
1164 | for (i = 0; i < KVM_MAX_VCPUS; i++) { |
1165 | v = (struct kvm_vcpu *)((char *)vcpu + | |
1166 | sizeof(struct kvm_vcpu_data) * i); | |
b024b793 XZ |
1167 | v->arch.itc_offset = itc_offset; |
1168 | v->arch.last_itc = 0; | |
1169 | } | |
1170 | } else | |
a4535290 | 1171 | vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; |
b024b793 XZ |
1172 | |
1173 | r = -ENOMEM; | |
1174 | vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); | |
1175 | if (!vcpu->arch.apic) | |
1176 | goto out; | |
1177 | vcpu->arch.apic->vcpu = vcpu; | |
1178 | ||
1179 | p_ctx->gr[1] = 0; | |
a917f7af | 1180 | p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET); |
b024b793 XZ |
1181 | p_ctx->gr[13] = (unsigned long)vmm_vcpu; |
1182 | p_ctx->psr = 0x1008522000UL; | |
1183 | p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ | |
1184 | p_ctx->caller_unat = 0; | |
1185 | p_ctx->pr = 0x0; | |
1186 | p_ctx->ar[36] = 0x0; /*unat*/ | |
1187 | p_ctx->ar[19] = 0x0; /*rnat*/ | |
1188 | p_ctx->ar[18] = (unsigned long)vmm_vcpu + | |
1189 | ((sizeof(struct kvm_vcpu)+15) & ~15); | |
1190 | p_ctx->ar[64] = 0x0; /*pfs*/ | |
1191 | p_ctx->cr[0] = 0x7e04UL; | |
1192 | p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt; | |
1193 | p_ctx->cr[8] = 0x3c; | |
1194 | ||
1195 | /*Initilize region register*/ | |
1196 | p_ctx->rr[0] = 0x30; | |
1197 | p_ctx->rr[1] = 0x30; | |
1198 | p_ctx->rr[2] = 0x30; | |
1199 | p_ctx->rr[3] = 0x30; | |
1200 | p_ctx->rr[4] = 0x30; | |
1201 | p_ctx->rr[5] = 0x30; | |
1202 | p_ctx->rr[7] = 0x30; | |
1203 | ||
1204 | /*Initilize branch register 0*/ | |
1205 | p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry; | |
1206 | ||
1207 | vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr; | |
1208 | vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0; | |
1209 | vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4; | |
1210 | ||
1211 | hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
1212 | vcpu->arch.hlt_timer.function = hlt_timer_fn; | |
1213 | ||
1214 | vcpu->arch.last_run_cpu = -1; | |
a917f7af | 1215 | vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id); |
b024b793 XZ |
1216 | vcpu->arch.vsa_base = kvm_vsa_base; |
1217 | vcpu->arch.__gp = kvm_vmm_gp; | |
1218 | vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); | |
a917f7af XZ |
1219 | vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id); |
1220 | vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id); | |
b024b793 XZ |
1221 | init_ptce_info(vcpu); |
1222 | ||
1223 | r = 0; | |
1224 | out: | |
1225 | return r; | |
1226 | } | |
1227 | ||
1228 | static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id) | |
1229 | { | |
1230 | unsigned long psr; | |
1231 | int r; | |
1232 | ||
1233 | local_irq_save(psr); | |
1234 | r = kvm_insert_vmm_mapping(vcpu); | |
1235 | if (r) | |
1236 | goto fail; | |
1237 | r = kvm_vcpu_init(vcpu, vcpu->kvm, id); | |
1238 | if (r) | |
1239 | goto fail; | |
1240 | ||
1241 | r = vti_init_vpd(vcpu); | |
1242 | if (r) { | |
1243 | printk(KERN_DEBUG"kvm: vpd init error!!\n"); | |
1244 | goto uninit; | |
1245 | } | |
1246 | ||
1247 | r = vti_create_vp(vcpu); | |
1248 | if (r) | |
1249 | goto uninit; | |
1250 | ||
1251 | kvm_purge_vmm_mapping(vcpu); | |
1252 | local_irq_restore(psr); | |
1253 | ||
1254 | return 0; | |
1255 | uninit: | |
1256 | kvm_vcpu_uninit(vcpu); | |
1257 | fail: | |
cab7a1ee | 1258 | local_irq_restore(psr); |
b024b793 XZ |
1259 | return r; |
1260 | } | |
1261 | ||
1262 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |
1263 | unsigned int id) | |
1264 | { | |
1265 | struct kvm_vcpu *vcpu; | |
1266 | unsigned long vm_base = kvm->arch.vm_base; | |
1267 | int r; | |
1268 | int cpu; | |
1269 | ||
a917f7af XZ |
1270 | BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2); |
1271 | ||
1272 | r = -EINVAL; | |
1273 | if (id >= KVM_MAX_VCPUS) { | |
1274 | printk(KERN_ERR"kvm: Can't configure vcpus > %ld", | |
1275 | KVM_MAX_VCPUS); | |
1276 | goto fail; | |
1277 | } | |
1278 | ||
b024b793 XZ |
1279 | r = -ENOMEM; |
1280 | if (!vm_base) { | |
1281 | printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); | |
1282 | goto fail; | |
1283 | } | |
a917f7af XZ |
1284 | vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data, |
1285 | vcpu_data[id].vcpu_struct)); | |
b024b793 XZ |
1286 | vcpu->kvm = kvm; |
1287 | ||
1288 | cpu = get_cpu(); | |
1289 | vti_vcpu_load(vcpu, cpu); | |
1290 | r = vti_vcpu_setup(vcpu, id); | |
1291 | put_cpu(); | |
1292 | ||
1293 | if (r) { | |
1294 | printk(KERN_DEBUG"kvm: vcpu_setup error!!\n"); | |
1295 | goto fail; | |
1296 | } | |
1297 | ||
1298 | return vcpu; | |
1299 | fail: | |
1300 | return ERR_PTR(r); | |
1301 | } | |
1302 | ||
1303 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |
1304 | { | |
1305 | return 0; | |
1306 | } | |
1307 | ||
1308 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
1309 | { | |
1310 | return -EINVAL; | |
1311 | } | |
1312 | ||
1313 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
1314 | { | |
1315 | return -EINVAL; | |
1316 | } | |
1317 | ||
1318 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | |
1319 | struct kvm_debug_guest *dbg) | |
1320 | { | |
1321 | return -EINVAL; | |
1322 | } | |
1323 | ||
1324 | static void free_kvm(struct kvm *kvm) | |
1325 | { | |
1326 | unsigned long vm_base = kvm->arch.vm_base; | |
1327 | ||
1328 | if (vm_base) { | |
1329 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); | |
1330 | free_pages(vm_base, get_order(KVM_VM_DATA_SIZE)); | |
1331 | } | |
1332 | ||
1333 | } | |
1334 | ||
1335 | static void kvm_release_vm_pages(struct kvm *kvm) | |
1336 | { | |
1337 | struct kvm_memory_slot *memslot; | |
1338 | int i, j; | |
1339 | unsigned long base_gfn; | |
1340 | ||
1341 | for (i = 0; i < kvm->nmemslots; i++) { | |
1342 | memslot = &kvm->memslots[i]; | |
1343 | base_gfn = memslot->base_gfn; | |
1344 | ||
1345 | for (j = 0; j < memslot->npages; j++) { | |
1346 | if (memslot->rmap[j]) | |
1347 | put_page((struct page *)memslot->rmap[j]); | |
1348 | } | |
1349 | } | |
1350 | } | |
1351 | ||
1352 | void kvm_arch_destroy_vm(struct kvm *kvm) | |
1353 | { | |
2381ad24 XZ |
1354 | kvm_iommu_unmap_guest(kvm); |
1355 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | |
1356 | kvm_free_all_assigned_devices(kvm); | |
1357 | #endif | |
b024b793 XZ |
1358 | kfree(kvm->arch.vioapic); |
1359 | kvm_release_vm_pages(kvm); | |
1360 | kvm_free_physmem(kvm); | |
1361 | free_kvm(kvm); | |
1362 | } | |
1363 | ||
1364 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |
1365 | { | |
1366 | } | |
1367 | ||
1368 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
1369 | { | |
1370 | if (cpu != vcpu->cpu) { | |
1371 | vcpu->cpu = cpu; | |
1372 | if (vcpu->arch.ht_active) | |
1373 | kvm_migrate_hlt_timer(vcpu); | |
1374 | } | |
1375 | } | |
1376 | ||
1377 | #define SAVE_REGS(_x) regs->_x = vcpu->arch._x | |
1378 | ||
1379 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
1380 | { | |
1381 | int i; | |
1382 | int r; | |
1383 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | |
1384 | vcpu_load(vcpu); | |
1385 | ||
1386 | for (i = 0; i < 16; i++) { | |
1387 | regs->vpd.vgr[i] = vpd->vgr[i]; | |
1388 | regs->vpd.vbgr[i] = vpd->vbgr[i]; | |
1389 | } | |
1390 | for (i = 0; i < 128; i++) | |
1391 | regs->vpd.vcr[i] = vpd->vcr[i]; | |
1392 | regs->vpd.vhpi = vpd->vhpi; | |
1393 | regs->vpd.vnat = vpd->vnat; | |
1394 | regs->vpd.vbnat = vpd->vbnat; | |
1395 | regs->vpd.vpsr = vpd->vpsr; | |
1396 | regs->vpd.vpr = vpd->vpr; | |
1397 | ||
1398 | r = -EFAULT; | |
1399 | r = copy_to_user(regs->saved_guest, &vcpu->arch.guest, | |
1400 | sizeof(union context)); | |
1401 | if (r) | |
1402 | goto out; | |
a917f7af | 1403 | r = copy_to_user(regs->saved_stack, (void *)vcpu, KVM_STK_OFFSET); |
b024b793 XZ |
1404 | if (r) |
1405 | goto out; | |
1406 | SAVE_REGS(mp_state); | |
1407 | SAVE_REGS(vmm_rr); | |
1408 | memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); | |
1409 | memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS); | |
1410 | SAVE_REGS(itr_regions); | |
1411 | SAVE_REGS(dtr_regions); | |
1412 | SAVE_REGS(tc_regions); | |
1413 | SAVE_REGS(irq_check); | |
1414 | SAVE_REGS(itc_check); | |
1415 | SAVE_REGS(timer_check); | |
1416 | SAVE_REGS(timer_pending); | |
1417 | SAVE_REGS(last_itc); | |
1418 | for (i = 0; i < 8; i++) { | |
1419 | regs->vrr[i] = vcpu->arch.vrr[i]; | |
1420 | regs->ibr[i] = vcpu->arch.ibr[i]; | |
1421 | regs->dbr[i] = vcpu->arch.dbr[i]; | |
1422 | } | |
1423 | for (i = 0; i < 4; i++) | |
1424 | regs->insvc[i] = vcpu->arch.insvc[i]; | |
1425 | regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC); | |
1426 | SAVE_REGS(xtp); | |
1427 | SAVE_REGS(metaphysical_rr0); | |
1428 | SAVE_REGS(metaphysical_rr4); | |
1429 | SAVE_REGS(metaphysical_saved_rr0); | |
1430 | SAVE_REGS(metaphysical_saved_rr4); | |
1431 | SAVE_REGS(fp_psr); | |
1432 | SAVE_REGS(saved_gp); | |
1433 | vcpu_put(vcpu); | |
1434 | r = 0; | |
1435 | out: | |
1436 | return r; | |
1437 | } | |
1438 | ||
1439 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |
1440 | { | |
1441 | ||
1442 | hrtimer_cancel(&vcpu->arch.hlt_timer); | |
1443 | kfree(vcpu->arch.apic); | |
1444 | } | |
1445 | ||
1446 | ||
1447 | long kvm_arch_vcpu_ioctl(struct file *filp, | |
1448 | unsigned int ioctl, unsigned long arg) | |
1449 | { | |
1450 | return -EINVAL; | |
1451 | } | |
1452 | ||
1453 | int kvm_arch_set_memory_region(struct kvm *kvm, | |
1454 | struct kvm_userspace_memory_region *mem, | |
1455 | struct kvm_memory_slot old, | |
1456 | int user_alloc) | |
1457 | { | |
1458 | unsigned long i; | |
1cbea809 | 1459 | unsigned long pfn; |
b024b793 XZ |
1460 | int npages = mem->memory_size >> PAGE_SHIFT; |
1461 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; | |
1462 | unsigned long base_gfn = memslot->base_gfn; | |
1463 | ||
a917f7af XZ |
1464 | if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) |
1465 | return -ENOMEM; | |
1466 | ||
b024b793 | 1467 | for (i = 0; i < npages; i++) { |
1cbea809 XZ |
1468 | pfn = gfn_to_pfn(kvm, base_gfn + i); |
1469 | if (!kvm_is_mmio_pfn(pfn)) { | |
1470 | kvm_set_pmt_entry(kvm, base_gfn + i, | |
1471 | pfn << PAGE_SHIFT, | |
b010eb51 | 1472 | _PAGE_AR_RWX | _PAGE_MA_WB); |
1cbea809 XZ |
1473 | memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); |
1474 | } else { | |
1475 | kvm_set_pmt_entry(kvm, base_gfn + i, | |
b010eb51 | 1476 | GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), |
1cbea809 XZ |
1477 | _PAGE_MA_UC); |
1478 | memslot->rmap[i] = 0; | |
1479 | } | |
b024b793 XZ |
1480 | } |
1481 | ||
1482 | return 0; | |
1483 | } | |
1484 | ||
34d4cb8f MT |
1485 | void kvm_arch_flush_shadow(struct kvm *kvm) |
1486 | { | |
1487 | } | |
b024b793 XZ |
1488 | |
1489 | long kvm_arch_dev_ioctl(struct file *filp, | |
1490 | unsigned int ioctl, unsigned long arg) | |
1491 | { | |
1492 | return -EINVAL; | |
1493 | } | |
1494 | ||
1495 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |
1496 | { | |
1497 | kvm_vcpu_uninit(vcpu); | |
1498 | } | |
1499 | ||
1500 | static int vti_cpu_has_kvm_support(void) | |
1501 | { | |
1502 | long avail = 1, status = 1, control = 1; | |
1503 | long ret; | |
1504 | ||
1505 | ret = ia64_pal_proc_get_features(&avail, &status, &control, 0); | |
1506 | if (ret) | |
1507 | goto out; | |
1508 | ||
1509 | if (!(avail & PAL_PROC_VM_BIT)) | |
1510 | goto out; | |
1511 | ||
1512 | printk(KERN_DEBUG"kvm: Hardware Supports VT\n"); | |
1513 | ||
1514 | ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info); | |
1515 | if (ret) | |
1516 | goto out; | |
1517 | printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size); | |
1518 | ||
1519 | if (!(vp_env_info & VP_OPCODE)) { | |
1520 | printk(KERN_WARNING"kvm: No opcode ability on hardware, " | |
1521 | "vm_env_info:0x%lx\n", vp_env_info); | |
1522 | } | |
1523 | ||
1524 | return 1; | |
1525 | out: | |
1526 | return 0; | |
1527 | } | |
1528 | ||
1529 | static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info, | |
1530 | struct module *module) | |
1531 | { | |
1532 | unsigned long module_base; | |
1533 | unsigned long vmm_size; | |
1534 | ||
1535 | unsigned long vmm_offset, func_offset, fdesc_offset; | |
1536 | struct fdesc *p_fdesc; | |
1537 | ||
1538 | BUG_ON(!module); | |
1539 | ||
1540 | if (!kvm_vmm_base) { | |
1541 | printk("kvm: kvm area hasn't been initilized yet!!\n"); | |
1542 | return -EFAULT; | |
1543 | } | |
1544 | ||
1545 | /*Calculate new position of relocated vmm module.*/ | |
1546 | module_base = (unsigned long)module->module_core; | |
1547 | vmm_size = module->core_size; | |
1548 | if (unlikely(vmm_size > KVM_VMM_SIZE)) | |
1549 | return -EFAULT; | |
1550 | ||
1551 | memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size); | |
1552 | kvm_flush_icache(kvm_vmm_base, vmm_size); | |
1553 | ||
1554 | /*Recalculate kvm_vmm_info based on new VMM*/ | |
1555 | vmm_offset = vmm_info->vmm_ivt - module_base; | |
1556 | kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset; | |
1557 | printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n", | |
1558 | kvm_vmm_info->vmm_ivt); | |
1559 | ||
1560 | fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base; | |
1561 | kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE + | |
1562 | fdesc_offset); | |
1563 | func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base; | |
1564 | p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); | |
1565 | p_fdesc->ip = KVM_VMM_BASE + func_offset; | |
1566 | p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base); | |
1567 | ||
1568 | printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n", | |
1569 | KVM_VMM_BASE+func_offset); | |
1570 | ||
1571 | fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base; | |
1572 | kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE + | |
1573 | fdesc_offset); | |
1574 | func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base; | |
1575 | p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); | |
1576 | p_fdesc->ip = KVM_VMM_BASE + func_offset; | |
1577 | p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base); | |
1578 | ||
1579 | kvm_vmm_gp = p_fdesc->gp; | |
1580 | ||
1581 | printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n", | |
1582 | kvm_vmm_info->vmm_entry); | |
1583 | printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n", | |
1584 | KVM_VMM_BASE + func_offset); | |
1585 | ||
1586 | return 0; | |
1587 | } | |
1588 | ||
1589 | int kvm_arch_init(void *opaque) | |
1590 | { | |
1591 | int r; | |
1592 | struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque; | |
1593 | ||
1594 | if (!vti_cpu_has_kvm_support()) { | |
1595 | printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n"); | |
1596 | r = -EOPNOTSUPP; | |
1597 | goto out; | |
1598 | } | |
1599 | ||
1600 | if (kvm_vmm_info) { | |
1601 | printk(KERN_ERR "kvm: Already loaded VMM module!\n"); | |
1602 | r = -EEXIST; | |
1603 | goto out; | |
1604 | } | |
1605 | ||
1606 | r = -ENOMEM; | |
1607 | kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL); | |
1608 | if (!kvm_vmm_info) | |
1609 | goto out; | |
1610 | ||
1611 | if (kvm_alloc_vmm_area()) | |
1612 | goto out_free0; | |
1613 | ||
1614 | r = kvm_relocate_vmm(vmm_info, vmm_info->module); | |
1615 | if (r) | |
1616 | goto out_free1; | |
1617 | ||
1618 | return 0; | |
1619 | ||
1620 | out_free1: | |
1621 | kvm_free_vmm_area(); | |
1622 | out_free0: | |
1623 | kfree(kvm_vmm_info); | |
1624 | out: | |
1625 | return r; | |
1626 | } | |
1627 | ||
1628 | void kvm_arch_exit(void) | |
1629 | { | |
1630 | kvm_free_vmm_area(); | |
1631 | kfree(kvm_vmm_info); | |
1632 | kvm_vmm_info = NULL; | |
1633 | } | |
1634 | ||
1635 | static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |
1636 | struct kvm_dirty_log *log) | |
1637 | { | |
1638 | struct kvm_memory_slot *memslot; | |
1639 | int r, i; | |
1640 | long n, base; | |
a917f7af XZ |
1641 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + |
1642 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); | |
b024b793 XZ |
1643 | |
1644 | r = -EINVAL; | |
1645 | if (log->slot >= KVM_MEMORY_SLOTS) | |
1646 | goto out; | |
1647 | ||
1648 | memslot = &kvm->memslots[log->slot]; | |
1649 | r = -ENOENT; | |
1650 | if (!memslot->dirty_bitmap) | |
1651 | goto out; | |
1652 | ||
1653 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
1654 | base = memslot->base_gfn / BITS_PER_LONG; | |
1655 | ||
1656 | for (i = 0; i < n/sizeof(long); ++i) { | |
1657 | memslot->dirty_bitmap[i] = dirty_bitmap[base + i]; | |
1658 | dirty_bitmap[base + i] = 0; | |
1659 | } | |
1660 | r = 0; | |
1661 | out: | |
1662 | return r; | |
1663 | } | |
1664 | ||
1665 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |
1666 | struct kvm_dirty_log *log) | |
1667 | { | |
1668 | int r; | |
1669 | int n; | |
1670 | struct kvm_memory_slot *memslot; | |
1671 | int is_dirty = 0; | |
1672 | ||
1673 | spin_lock(&kvm->arch.dirty_log_lock); | |
1674 | ||
1675 | r = kvm_ia64_sync_dirty_log(kvm, log); | |
1676 | if (r) | |
1677 | goto out; | |
1678 | ||
1679 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | |
1680 | if (r) | |
1681 | goto out; | |
1682 | ||
1683 | /* If nothing is dirty, don't bother messing with page tables. */ | |
1684 | if (is_dirty) { | |
1685 | kvm_flush_remote_tlbs(kvm); | |
1686 | memslot = &kvm->memslots[log->slot]; | |
1687 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
1688 | memset(memslot->dirty_bitmap, 0, n); | |
1689 | } | |
1690 | r = 0; | |
1691 | out: | |
1692 | spin_unlock(&kvm->arch.dirty_log_lock); | |
1693 | return r; | |
1694 | } | |
1695 | ||
1696 | int kvm_arch_hardware_setup(void) | |
1697 | { | |
1698 | return 0; | |
1699 | } | |
1700 | ||
1701 | void kvm_arch_hardware_unsetup(void) | |
1702 | { | |
1703 | } | |
1704 | ||
1705 | static void vcpu_kick_intr(void *info) | |
1706 | { | |
1707 | #ifdef DEBUG | |
1708 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info; | |
1709 | printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu); | |
1710 | #endif | |
1711 | } | |
1712 | ||
1713 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | |
1714 | { | |
1715 | int ipi_pcpu = vcpu->cpu; | |
decc9016 | 1716 | int cpu = get_cpu(); |
b024b793 XZ |
1717 | |
1718 | if (waitqueue_active(&vcpu->wq)) | |
1719 | wake_up_interruptible(&vcpu->wq); | |
1720 | ||
decc9016 | 1721 | if (vcpu->guest_mode && cpu != ipi_pcpu) |
2f73ccab | 1722 | smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); |
decc9016 | 1723 | put_cpu(); |
b024b793 XZ |
1724 | } |
1725 | ||
1726 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) | |
1727 | { | |
1728 | ||
1729 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | |
1730 | ||
1731 | if (!test_and_set_bit(vec, &vpd->irr[0])) { | |
1732 | vcpu->arch.irq_new_pending = 1; | |
decc9016 | 1733 | kvm_vcpu_kick(vcpu); |
b024b793 XZ |
1734 | return 1; |
1735 | } | |
1736 | return 0; | |
1737 | } | |
1738 | ||
1739 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) | |
1740 | { | |
1741 | return apic->vcpu->vcpu_id == dest; | |
1742 | } | |
1743 | ||
1744 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) | |
1745 | { | |
1746 | return 0; | |
1747 | } | |
1748 | ||
1749 | struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, | |
1750 | unsigned long bitmap) | |
1751 | { | |
1752 | struct kvm_vcpu *lvcpu = kvm->vcpus[0]; | |
1753 | int i; | |
1754 | ||
1755 | for (i = 1; i < KVM_MAX_VCPUS; i++) { | |
1756 | if (!kvm->vcpus[i]) | |
1757 | continue; | |
1758 | if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp) | |
1759 | lvcpu = kvm->vcpus[i]; | |
1760 | } | |
1761 | ||
1762 | return lvcpu; | |
1763 | } | |
1764 | ||
1765 | static int find_highest_bits(int *dat) | |
1766 | { | |
1767 | u32 bits, bitnum; | |
1768 | int i; | |
1769 | ||
1770 | /* loop for all 256 bits */ | |
1771 | for (i = 7; i >= 0 ; i--) { | |
1772 | bits = dat[i]; | |
1773 | if (bits) { | |
1774 | bitnum = fls(bits); | |
1775 | return i * 32 + bitnum - 1; | |
1776 | } | |
1777 | } | |
1778 | ||
1779 | return -1; | |
1780 | } | |
1781 | ||
1782 | int kvm_highest_pending_irq(struct kvm_vcpu *vcpu) | |
1783 | { | |
1784 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | |
1785 | ||
1786 | if (vpd->irr[0] & (1UL << NMI_VECTOR)) | |
1787 | return NMI_VECTOR; | |
1788 | if (vpd->irr[0] & (1UL << ExtINT_VECTOR)) | |
1789 | return ExtINT_VECTOR; | |
1790 | ||
1791 | return find_highest_bits((int *)&vpd->irr[0]); | |
1792 | } | |
1793 | ||
1794 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | |
1795 | { | |
1796 | if (kvm_highest_pending_irq(vcpu) != -1) | |
1797 | return 1; | |
1798 | return 0; | |
1799 | } | |
1800 | ||
3d80840d MT |
1801 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
1802 | { | |
decc9016 | 1803 | return vcpu->arch.timer_fired; |
3d80840d MT |
1804 | } |
1805 | ||
b024b793 XZ |
1806 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |
1807 | { | |
1808 | return gfn; | |
1809 | } | |
1810 | ||
1811 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | |
1812 | { | |
a4535290 | 1813 | return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE; |
b024b793 | 1814 | } |
62d9f0db MT |
1815 | |
1816 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | |
1817 | struct kvm_mp_state *mp_state) | |
1818 | { | |
8c4b537d XZ |
1819 | vcpu_load(vcpu); |
1820 | mp_state->mp_state = vcpu->arch.mp_state; | |
1821 | vcpu_put(vcpu); | |
1822 | return 0; | |
1823 | } | |
1824 | ||
1825 | static int vcpu_reset(struct kvm_vcpu *vcpu) | |
1826 | { | |
1827 | int r; | |
1828 | long psr; | |
1829 | local_irq_save(psr); | |
1830 | r = kvm_insert_vmm_mapping(vcpu); | |
1831 | if (r) | |
1832 | goto fail; | |
1833 | ||
1834 | vcpu->arch.launched = 0; | |
1835 | kvm_arch_vcpu_uninit(vcpu); | |
1836 | r = kvm_arch_vcpu_init(vcpu); | |
1837 | if (r) | |
1838 | goto fail; | |
1839 | ||
1840 | kvm_purge_vmm_mapping(vcpu); | |
1841 | r = 0; | |
1842 | fail: | |
1843 | local_irq_restore(psr); | |
1844 | return r; | |
62d9f0db MT |
1845 | } |
1846 | ||
1847 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
1848 | struct kvm_mp_state *mp_state) | |
1849 | { | |
8c4b537d XZ |
1850 | int r = 0; |
1851 | ||
1852 | vcpu_load(vcpu); | |
1853 | vcpu->arch.mp_state = mp_state->mp_state; | |
1854 | if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) | |
1855 | r = vcpu_reset(vcpu); | |
1856 | vcpu_put(vcpu); | |
1857 | return r; | |
62d9f0db | 1858 | } |