Commit | Line | Data |
---|---|---|
b024b793 XZ |
1 | /* |
2 | * kvm_ia64.c: Basic KVM suppport On Itanium series processors | |
3 | * | |
4 | * | |
5 | * Copyright (C) 2007, Intel Corporation. | |
6 | * Xiantao Zhang (xiantao.zhang@intel.com) | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms and conditions of the GNU General Public License, | |
10 | * version 2, as published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
19 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
20 | * | |
21 | */ | |
22 | ||
23 | #include <linux/module.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/gfp.h> | |
27 | #include <linux/fs.h> | |
28 | #include <linux/smp.h> | |
29 | #include <linux/kvm_host.h> | |
30 | #include <linux/kvm.h> | |
31 | #include <linux/bitops.h> | |
32 | #include <linux/hrtimer.h> | |
33 | #include <linux/uaccess.h> | |
19de40a8 | 34 | #include <linux/iommu.h> |
2381ad24 | 35 | #include <linux/intel-iommu.h> |
b024b793 XZ |
36 | |
37 | #include <asm/pgtable.h> | |
38 | #include <asm/gcc_intrin.h> | |
39 | #include <asm/pal.h> | |
40 | #include <asm/cacheflush.h> | |
41 | #include <asm/div64.h> | |
42 | #include <asm/tlb.h> | |
9f726323 | 43 | #include <asm/elf.h> |
b024b793 XZ |
44 | |
45 | #include "misc.h" | |
46 | #include "vti.h" | |
47 | #include "iodev.h" | |
48 | #include "ioapic.h" | |
49 | #include "lapic.h" | |
2f749771 | 50 | #include "irq.h" |
b024b793 XZ |
51 | |
52 | static unsigned long kvm_vmm_base; | |
53 | static unsigned long kvm_vsa_base; | |
54 | static unsigned long kvm_vm_buffer; | |
55 | static unsigned long kvm_vm_buffer_size; | |
56 | unsigned long kvm_vmm_gp; | |
57 | ||
58 | static long vp_env_info; | |
59 | ||
60 | static struct kvm_vmm_info *kvm_vmm_info; | |
61 | ||
62 | static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu); | |
63 | ||
64 | struct kvm_stats_debugfs_item debugfs_entries[] = { | |
65 | { NULL } | |
66 | }; | |
67 | ||
b024b793 XZ |
68 | static void kvm_flush_icache(unsigned long start, unsigned long len) |
69 | { | |
70 | int l; | |
71 | ||
72 | for (l = 0; l < (len + 32); l += 32) | |
73 | ia64_fc(start + l); | |
74 | ||
75 | ia64_sync_i(); | |
76 | ia64_srlz_i(); | |
77 | } | |
78 | ||
79 | static void kvm_flush_tlb_all(void) | |
80 | { | |
81 | unsigned long i, j, count0, count1, stride0, stride1, addr; | |
82 | long flags; | |
83 | ||
84 | addr = local_cpu_data->ptce_base; | |
85 | count0 = local_cpu_data->ptce_count[0]; | |
86 | count1 = local_cpu_data->ptce_count[1]; | |
87 | stride0 = local_cpu_data->ptce_stride[0]; | |
88 | stride1 = local_cpu_data->ptce_stride[1]; | |
89 | ||
90 | local_irq_save(flags); | |
91 | for (i = 0; i < count0; ++i) { | |
92 | for (j = 0; j < count1; ++j) { | |
93 | ia64_ptce(addr); | |
94 | addr += stride1; | |
95 | } | |
96 | addr += stride0; | |
97 | } | |
98 | local_irq_restore(flags); | |
99 | ia64_srlz_i(); /* srlz.i implies srlz.d */ | |
100 | } | |
101 | ||
102 | long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) | |
103 | { | |
104 | struct ia64_pal_retval iprv; | |
105 | ||
106 | PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva, | |
107 | (u64)opt_handler); | |
108 | ||
109 | return iprv.status; | |
110 | } | |
111 | ||
112 | static DEFINE_SPINLOCK(vp_lock); | |
113 | ||
114 | void kvm_arch_hardware_enable(void *garbage) | |
115 | { | |
116 | long status; | |
117 | long tmp_base; | |
118 | unsigned long pte; | |
119 | unsigned long saved_psr; | |
120 | int slot; | |
121 | ||
122 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), | |
123 | PAGE_KERNEL)); | |
124 | local_irq_save(saved_psr); | |
125 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | |
cab7a1ee | 126 | local_irq_restore(saved_psr); |
b024b793 XZ |
127 | if (slot < 0) |
128 | return; | |
b024b793 XZ |
129 | |
130 | spin_lock(&vp_lock); | |
131 | status = ia64_pal_vp_init_env(kvm_vsa_base ? | |
132 | VP_INIT_ENV : VP_INIT_ENV_INITALIZE, | |
133 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); | |
134 | if (status != 0) { | |
135 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); | |
136 | return ; | |
137 | } | |
138 | ||
139 | if (!kvm_vsa_base) { | |
140 | kvm_vsa_base = tmp_base; | |
141 | printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base); | |
142 | } | |
143 | spin_unlock(&vp_lock); | |
144 | ia64_ptr_entry(0x3, slot); | |
145 | } | |
146 | ||
147 | void kvm_arch_hardware_disable(void *garbage) | |
148 | { | |
149 | ||
150 | long status; | |
151 | int slot; | |
152 | unsigned long pte; | |
153 | unsigned long saved_psr; | |
154 | unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA); | |
155 | ||
156 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), | |
157 | PAGE_KERNEL)); | |
158 | ||
159 | local_irq_save(saved_psr); | |
160 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | |
cab7a1ee | 161 | local_irq_restore(saved_psr); |
b024b793 XZ |
162 | if (slot < 0) |
163 | return; | |
b024b793 XZ |
164 | |
165 | status = ia64_pal_vp_exit_env(host_iva); | |
166 | if (status) | |
167 | printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n", | |
168 | status); | |
169 | ia64_ptr_entry(0x3, slot); | |
170 | } | |
171 | ||
172 | void kvm_arch_check_processor_compat(void *rtn) | |
173 | { | |
174 | *(int *)rtn = 0; | |
175 | } | |
176 | ||
177 | int kvm_dev_ioctl_check_extension(long ext) | |
178 | { | |
179 | ||
180 | int r; | |
181 | ||
182 | switch (ext) { | |
183 | case KVM_CAP_IRQCHIP: | |
8c4b537d | 184 | case KVM_CAP_MP_STATE: |
b024b793 XZ |
185 | |
186 | r = 1; | |
187 | break; | |
7f39f8ac LV |
188 | case KVM_CAP_COALESCED_MMIO: |
189 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | |
190 | break; | |
2381ad24 | 191 | case KVM_CAP_IOMMU: |
19de40a8 | 192 | r = iommu_found(); |
2381ad24 | 193 | break; |
b024b793 XZ |
194 | default: |
195 | r = 0; | |
196 | } | |
197 | return r; | |
198 | ||
199 | } | |
200 | ||
201 | static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, | |
92760499 | 202 | gpa_t addr, int len, int is_write) |
b024b793 XZ |
203 | { |
204 | struct kvm_io_device *dev; | |
205 | ||
92760499 | 206 | dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write); |
b024b793 XZ |
207 | |
208 | return dev; | |
209 | } | |
210 | ||
211 | static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
212 | { | |
213 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | |
214 | kvm_run->hw.hardware_exit_reason = 1; | |
215 | return 0; | |
216 | } | |
217 | ||
218 | static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
219 | { | |
220 | struct kvm_mmio_req *p; | |
221 | struct kvm_io_device *mmio_dev; | |
222 | ||
223 | p = kvm_get_vcpu_ioreq(vcpu); | |
224 | ||
225 | if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) | |
226 | goto mmio; | |
227 | vcpu->mmio_needed = 1; | |
228 | vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr; | |
229 | vcpu->mmio_size = kvm_run->mmio.len = p->size; | |
230 | vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; | |
231 | ||
232 | if (vcpu->mmio_is_write) | |
233 | memcpy(vcpu->mmio_data, &p->data, p->size); | |
234 | memcpy(kvm_run->mmio.data, &p->data, p->size); | |
235 | kvm_run->exit_reason = KVM_EXIT_MMIO; | |
236 | return 0; | |
237 | mmio: | |
92760499 | 238 | mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir); |
b024b793 XZ |
239 | if (mmio_dev) { |
240 | if (!p->dir) | |
241 | kvm_iodevice_write(mmio_dev, p->addr, p->size, | |
242 | &p->data); | |
243 | else | |
244 | kvm_iodevice_read(mmio_dev, p->addr, p->size, | |
245 | &p->data); | |
246 | ||
247 | } else | |
248 | printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); | |
249 | p->state = STATE_IORESP_READY; | |
250 | ||
251 | return 1; | |
252 | } | |
253 | ||
254 | static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
255 | { | |
256 | struct exit_ctl_data *p; | |
257 | ||
258 | p = kvm_get_exit_data(vcpu); | |
259 | ||
260 | if (p->exit_reason == EXIT_REASON_PAL_CALL) | |
261 | return kvm_pal_emul(vcpu, kvm_run); | |
262 | else { | |
263 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | |
264 | kvm_run->hw.hardware_exit_reason = 2; | |
265 | return 0; | |
266 | } | |
267 | } | |
268 | ||
269 | static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
270 | { | |
271 | struct exit_ctl_data *p; | |
272 | ||
273 | p = kvm_get_exit_data(vcpu); | |
274 | ||
275 | if (p->exit_reason == EXIT_REASON_SAL_CALL) { | |
276 | kvm_sal_emul(vcpu); | |
277 | return 1; | |
278 | } else { | |
279 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | |
280 | kvm_run->hw.hardware_exit_reason = 3; | |
281 | return 0; | |
282 | } | |
283 | ||
284 | } | |
285 | ||
286 | /* | |
287 | * offset: address offset to IPI space. | |
288 | * value: deliver value. | |
289 | */ | |
290 | static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm, | |
291 | uint64_t vector) | |
292 | { | |
293 | switch (dm) { | |
294 | case SAPIC_FIXED: | |
295 | kvm_apic_set_irq(vcpu, vector, 0); | |
296 | break; | |
297 | case SAPIC_NMI: | |
298 | kvm_apic_set_irq(vcpu, 2, 0); | |
299 | break; | |
300 | case SAPIC_EXTINT: | |
301 | kvm_apic_set_irq(vcpu, 0, 0); | |
302 | break; | |
303 | case SAPIC_INIT: | |
304 | case SAPIC_PMI: | |
305 | default: | |
306 | printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n"); | |
307 | break; | |
308 | } | |
309 | } | |
310 | ||
311 | static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id, | |
312 | unsigned long eid) | |
313 | { | |
314 | union ia64_lid lid; | |
315 | int i; | |
316 | ||
317 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | |
318 | if (kvm->vcpus[i]) { | |
319 | lid.val = VCPU_LID(kvm->vcpus[i]); | |
320 | if (lid.id == id && lid.eid == eid) | |
321 | return kvm->vcpus[i]; | |
322 | } | |
323 | } | |
324 | ||
325 | return NULL; | |
326 | } | |
327 | ||
328 | static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
329 | { | |
330 | struct exit_ctl_data *p = kvm_get_exit_data(vcpu); | |
331 | struct kvm_vcpu *target_vcpu; | |
332 | struct kvm_pt_regs *regs; | |
333 | union ia64_ipi_a addr = p->u.ipi_data.addr; | |
334 | union ia64_ipi_d data = p->u.ipi_data.data; | |
335 | ||
336 | target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid); | |
337 | if (!target_vcpu) | |
338 | return handle_vm_error(vcpu, kvm_run); | |
339 | ||
340 | if (!target_vcpu->arch.launched) { | |
341 | regs = vcpu_regs(target_vcpu); | |
342 | ||
343 | regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip; | |
344 | regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp; | |
345 | ||
a4535290 | 346 | target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
b024b793 XZ |
347 | if (waitqueue_active(&target_vcpu->wq)) |
348 | wake_up_interruptible(&target_vcpu->wq); | |
349 | } else { | |
350 | vcpu_deliver_ipi(target_vcpu, data.dm, data.vector); | |
351 | if (target_vcpu != vcpu) | |
352 | kvm_vcpu_kick(target_vcpu); | |
353 | } | |
354 | ||
355 | return 1; | |
356 | } | |
357 | ||
358 | struct call_data { | |
359 | struct kvm_ptc_g ptc_g_data; | |
360 | struct kvm_vcpu *vcpu; | |
361 | }; | |
362 | ||
363 | static void vcpu_global_purge(void *info) | |
364 | { | |
365 | struct call_data *p = (struct call_data *)info; | |
366 | struct kvm_vcpu *vcpu = p->vcpu; | |
367 | ||
368 | if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) | |
369 | return; | |
370 | ||
371 | set_bit(KVM_REQ_PTC_G, &vcpu->requests); | |
372 | if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) { | |
373 | vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] = | |
374 | p->ptc_g_data; | |
375 | } else { | |
376 | clear_bit(KVM_REQ_PTC_G, &vcpu->requests); | |
377 | vcpu->arch.ptc_g_count = 0; | |
378 | set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); | |
379 | } | |
380 | } | |
381 | ||
382 | static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
383 | { | |
384 | struct exit_ctl_data *p = kvm_get_exit_data(vcpu); | |
385 | struct kvm *kvm = vcpu->kvm; | |
386 | struct call_data call_data; | |
387 | int i; | |
decc9016 | 388 | |
b024b793 XZ |
389 | call_data.ptc_g_data = p->u.ptc_g_data; |
390 | ||
391 | for (i = 0; i < KVM_MAX_VCPUS; i++) { | |
392 | if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == | |
a4535290 | 393 | KVM_MP_STATE_UNINITIALIZED || |
b024b793 XZ |
394 | vcpu == kvm->vcpus[i]) |
395 | continue; | |
396 | ||
397 | if (waitqueue_active(&kvm->vcpus[i]->wq)) | |
398 | wake_up_interruptible(&kvm->vcpus[i]->wq); | |
399 | ||
400 | if (kvm->vcpus[i]->cpu != -1) { | |
401 | call_data.vcpu = kvm->vcpus[i]; | |
402 | smp_call_function_single(kvm->vcpus[i]->cpu, | |
2f73ccab | 403 | vcpu_global_purge, &call_data, 1); |
b024b793 XZ |
404 | } else |
405 | printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); | |
406 | ||
407 | } | |
408 | return 1; | |
409 | } | |
410 | ||
411 | static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
412 | { | |
413 | return 1; | |
414 | } | |
415 | ||
416 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |
417 | { | |
418 | ||
419 | ktime_t kt; | |
420 | long itc_diff; | |
421 | unsigned long vcpu_now_itc; | |
b024b793 XZ |
422 | unsigned long expires; |
423 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; | |
424 | unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; | |
425 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | |
426 | ||
decc9016 | 427 | if (irqchip_in_kernel(vcpu->kvm)) { |
b024b793 | 428 | |
decc9016 | 429 | vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; |
b024b793 | 430 | |
decc9016 XZ |
431 | if (time_after(vcpu_now_itc, vpd->itm)) { |
432 | vcpu->arch.timer_check = 1; | |
433 | return 1; | |
434 | } | |
435 | itc_diff = vpd->itm - vcpu_now_itc; | |
436 | if (itc_diff < 0) | |
437 | itc_diff = -itc_diff; | |
438 | ||
439 | expires = div64_u64(itc_diff, cyc_per_usec); | |
440 | kt = ktime_set(0, 1000 * expires); | |
441 | ||
decc9016 XZ |
442 | vcpu->arch.ht_active = 1; |
443 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); | |
b024b793 | 444 | |
a4535290 | 445 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; |
b024b793 XZ |
446 | kvm_vcpu_block(vcpu); |
447 | hrtimer_cancel(p_ht); | |
448 | vcpu->arch.ht_active = 0; | |
449 | ||
decc9016 XZ |
450 | if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) |
451 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) | |
452 | vcpu->arch.mp_state = | |
453 | KVM_MP_STATE_RUNNABLE; | |
decc9016 | 454 | |
a4535290 | 455 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) |
b024b793 XZ |
456 | return -EINTR; |
457 | return 1; | |
458 | } else { | |
459 | printk(KERN_ERR"kvm: Unsupported userspace halt!"); | |
460 | return 0; | |
461 | } | |
462 | } | |
463 | ||
464 | static int handle_vm_shutdown(struct kvm_vcpu *vcpu, | |
465 | struct kvm_run *kvm_run) | |
466 | { | |
467 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; | |
468 | return 0; | |
469 | } | |
470 | ||
471 | static int handle_external_interrupt(struct kvm_vcpu *vcpu, | |
472 | struct kvm_run *kvm_run) | |
473 | { | |
474 | return 1; | |
475 | } | |
476 | ||
7d637978 XZ |
477 | static int handle_vcpu_debug(struct kvm_vcpu *vcpu, |
478 | struct kvm_run *kvm_run) | |
479 | { | |
480 | printk("VMM: %s", vcpu->arch.log_buf); | |
481 | return 1; | |
482 | } | |
483 | ||
b024b793 XZ |
484 | static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, |
485 | struct kvm_run *kvm_run) = { | |
486 | [EXIT_REASON_VM_PANIC] = handle_vm_error, | |
487 | [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio, | |
488 | [EXIT_REASON_PAL_CALL] = handle_pal_call, | |
489 | [EXIT_REASON_SAL_CALL] = handle_sal_call, | |
490 | [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6, | |
491 | [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown, | |
492 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | |
493 | [EXIT_REASON_IPI] = handle_ipi, | |
494 | [EXIT_REASON_PTC_G] = handle_global_purge, | |
7d637978 | 495 | [EXIT_REASON_DEBUG] = handle_vcpu_debug, |
b024b793 XZ |
496 | |
497 | }; | |
498 | ||
499 | static const int kvm_vti_max_exit_handlers = | |
500 | sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); | |
501 | ||
b024b793 XZ |
502 | static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) |
503 | { | |
504 | struct exit_ctl_data *p_exit_data; | |
505 | ||
506 | p_exit_data = kvm_get_exit_data(vcpu); | |
507 | return p_exit_data->exit_reason; | |
508 | } | |
509 | ||
510 | /* | |
511 | * The guest has exited. See if we can fix it or if we need userspace | |
512 | * assistance. | |
513 | */ | |
514 | static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |
515 | { | |
516 | u32 exit_reason = kvm_get_exit_reason(vcpu); | |
517 | vcpu->arch.last_exit = exit_reason; | |
518 | ||
519 | if (exit_reason < kvm_vti_max_exit_handlers | |
520 | && kvm_vti_exit_handlers[exit_reason]) | |
521 | return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run); | |
522 | else { | |
523 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | |
524 | kvm_run->hw.hardware_exit_reason = exit_reason; | |
525 | } | |
526 | return 0; | |
527 | } | |
528 | ||
529 | static inline void vti_set_rr6(unsigned long rr6) | |
530 | { | |
531 | ia64_set_rr(RR6, rr6); | |
532 | ia64_srlz_i(); | |
533 | } | |
534 | ||
535 | static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu) | |
536 | { | |
537 | unsigned long pte; | |
538 | struct kvm *kvm = vcpu->kvm; | |
539 | int r; | |
540 | ||
541 | /*Insert a pair of tr to map vmm*/ | |
542 | pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL)); | |
543 | r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | |
544 | if (r < 0) | |
545 | goto out; | |
546 | vcpu->arch.vmm_tr_slot = r; | |
547 | /*Insert a pairt of tr to map data of vm*/ | |
548 | pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL)); | |
549 | r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE, | |
550 | pte, KVM_VM_DATA_SHIFT); | |
551 | if (r < 0) | |
552 | goto out; | |
553 | vcpu->arch.vm_tr_slot = r; | |
554 | r = 0; | |
555 | out: | |
556 | return r; | |
557 | ||
558 | } | |
559 | ||
560 | static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu) | |
561 | { | |
562 | ||
563 | ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot); | |
564 | ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot); | |
565 | ||
566 | } | |
567 | ||
568 | static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu) | |
569 | { | |
570 | int cpu = smp_processor_id(); | |
571 | ||
572 | if (vcpu->arch.last_run_cpu != cpu || | |
573 | per_cpu(last_vcpu, cpu) != vcpu) { | |
574 | per_cpu(last_vcpu, cpu) = vcpu; | |
575 | vcpu->arch.last_run_cpu = cpu; | |
576 | kvm_flush_tlb_all(); | |
577 | } | |
578 | ||
579 | vcpu->arch.host_rr6 = ia64_get_rr(RR6); | |
580 | vti_set_rr6(vcpu->arch.vmm_rr); | |
581 | return kvm_insert_vmm_mapping(vcpu); | |
582 | } | |
583 | static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) | |
584 | { | |
585 | kvm_purge_vmm_mapping(vcpu); | |
586 | vti_set_rr6(vcpu->arch.host_rr6); | |
587 | } | |
588 | ||
589 | static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
590 | { | |
591 | union context *host_ctx, *guest_ctx; | |
592 | int r; | |
593 | ||
594 | /*Get host and guest context with guest address space.*/ | |
595 | host_ctx = kvm_get_host_context(vcpu); | |
596 | guest_ctx = kvm_get_guest_context(vcpu); | |
597 | ||
598 | r = kvm_vcpu_pre_transition(vcpu); | |
599 | if (r < 0) | |
600 | goto out; | |
601 | kvm_vmm_info->tramp_entry(host_ctx, guest_ctx); | |
602 | kvm_vcpu_post_transition(vcpu); | |
603 | r = 0; | |
604 | out: | |
605 | return r; | |
606 | } | |
607 | ||
608 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
609 | { | |
610 | int r; | |
611 | ||
612 | again: | |
613 | preempt_disable(); | |
b024b793 XZ |
614 | local_irq_disable(); |
615 | ||
616 | if (signal_pending(current)) { | |
617 | local_irq_enable(); | |
618 | preempt_enable(); | |
619 | r = -EINTR; | |
620 | kvm_run->exit_reason = KVM_EXIT_INTR; | |
621 | goto out; | |
622 | } | |
623 | ||
624 | vcpu->guest_mode = 1; | |
625 | kvm_guest_enter(); | |
decc9016 | 626 | down_read(&vcpu->kvm->slots_lock); |
b024b793 XZ |
627 | r = vti_vcpu_run(vcpu, kvm_run); |
628 | if (r < 0) { | |
629 | local_irq_enable(); | |
630 | preempt_enable(); | |
631 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | |
632 | goto out; | |
633 | } | |
634 | ||
635 | vcpu->arch.launched = 1; | |
636 | vcpu->guest_mode = 0; | |
637 | local_irq_enable(); | |
638 | ||
639 | /* | |
640 | * We must have an instruction between local_irq_enable() and | |
641 | * kvm_guest_exit(), so the timer interrupt isn't delayed by | |
642 | * the interrupt shadow. The stat.exits increment will do nicely. | |
643 | * But we need to prevent reordering, hence this barrier(): | |
644 | */ | |
645 | barrier(); | |
b024b793 | 646 | kvm_guest_exit(); |
decc9016 | 647 | up_read(&vcpu->kvm->slots_lock); |
b024b793 XZ |
648 | preempt_enable(); |
649 | ||
650 | r = kvm_handle_exit(kvm_run, vcpu); | |
651 | ||
652 | if (r > 0) { | |
653 | if (!need_resched()) | |
654 | goto again; | |
655 | } | |
656 | ||
657 | out: | |
658 | if (r > 0) { | |
659 | kvm_resched(vcpu); | |
660 | goto again; | |
661 | } | |
662 | ||
663 | return r; | |
664 | } | |
665 | ||
666 | static void kvm_set_mmio_data(struct kvm_vcpu *vcpu) | |
667 | { | |
668 | struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); | |
669 | ||
670 | if (!vcpu->mmio_is_write) | |
671 | memcpy(&p->data, vcpu->mmio_data, 8); | |
672 | p->state = STATE_IORESP_READY; | |
673 | } | |
674 | ||
675 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |
676 | { | |
677 | int r; | |
678 | sigset_t sigsaved; | |
679 | ||
680 | vcpu_load(vcpu); | |
681 | ||
a2e4e289 XZ |
682 | if (vcpu->sigset_active) |
683 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | |
684 | ||
a4535290 | 685 | if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { |
b024b793 | 686 | kvm_vcpu_block(vcpu); |
decc9016 | 687 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
a2e4e289 XZ |
688 | r = -EAGAIN; |
689 | goto out; | |
b024b793 XZ |
690 | } |
691 | ||
b024b793 XZ |
692 | if (vcpu->mmio_needed) { |
693 | memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); | |
694 | kvm_set_mmio_data(vcpu); | |
695 | vcpu->mmio_read_completed = 1; | |
696 | vcpu->mmio_needed = 0; | |
697 | } | |
698 | r = __vcpu_run(vcpu, kvm_run); | |
a2e4e289 | 699 | out: |
b024b793 XZ |
700 | if (vcpu->sigset_active) |
701 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | |
702 | ||
703 | vcpu_put(vcpu); | |
704 | return r; | |
705 | } | |
706 | ||
b024b793 XZ |
707 | static struct kvm *kvm_alloc_kvm(void) |
708 | { | |
709 | ||
710 | struct kvm *kvm; | |
711 | uint64_t vm_base; | |
712 | ||
a917f7af XZ |
713 | BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE); |
714 | ||
b024b793 XZ |
715 | vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); |
716 | ||
717 | if (!vm_base) | |
718 | return ERR_PTR(-ENOMEM); | |
b024b793 | 719 | |
b024b793 | 720 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); |
a917f7af XZ |
721 | kvm = (struct kvm *)(vm_base + |
722 | offsetof(struct kvm_vm_data, kvm_vm_struct)); | |
b024b793 | 723 | kvm->arch.vm_base = vm_base; |
a917f7af | 724 | printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base); |
b024b793 XZ |
725 | |
726 | return kvm; | |
727 | } | |
728 | ||
729 | struct kvm_io_range { | |
730 | unsigned long start; | |
731 | unsigned long size; | |
732 | unsigned long type; | |
733 | }; | |
734 | ||
735 | static const struct kvm_io_range io_ranges[] = { | |
736 | {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, | |
737 | {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, | |
738 | {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, | |
739 | {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC}, | |
740 | {PIB_START, PIB_SIZE, GPFN_PIB}, | |
741 | }; | |
742 | ||
743 | static void kvm_build_io_pmt(struct kvm *kvm) | |
744 | { | |
745 | unsigned long i, j; | |
746 | ||
747 | /* Mark I/O ranges */ | |
748 | for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range)); | |
749 | i++) { | |
750 | for (j = io_ranges[i].start; | |
751 | j < io_ranges[i].start + io_ranges[i].size; | |
752 | j += PAGE_SIZE) | |
753 | kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT, | |
754 | io_ranges[i].type, 0); | |
755 | } | |
756 | ||
757 | } | |
758 | ||
759 | /*Use unused rids to virtualize guest rid.*/ | |
760 | #define GUEST_PHYSICAL_RR0 0x1739 | |
761 | #define GUEST_PHYSICAL_RR4 0x2739 | |
762 | #define VMM_INIT_RR 0x1660 | |
763 | ||
764 | static void kvm_init_vm(struct kvm *kvm) | |
765 | { | |
b024b793 XZ |
766 | BUG_ON(!kvm); |
767 | ||
768 | kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; | |
769 | kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; | |
770 | kvm->arch.vmm_init_rr = VMM_INIT_RR; | |
771 | ||
b024b793 XZ |
772 | /* |
773 | *Fill P2M entries for MMIO/IO ranges | |
774 | */ | |
775 | kvm_build_io_pmt(kvm); | |
776 | ||
2381ad24 | 777 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); |
5550af4d SY |
778 | |
779 | /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ | |
780 | set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); | |
b024b793 XZ |
781 | } |
782 | ||
783 | struct kvm *kvm_arch_create_vm(void) | |
784 | { | |
785 | struct kvm *kvm = kvm_alloc_kvm(); | |
786 | ||
787 | if (IS_ERR(kvm)) | |
788 | return ERR_PTR(-ENOMEM); | |
789 | kvm_init_vm(kvm); | |
790 | ||
791 | return kvm; | |
792 | ||
793 | } | |
794 | ||
795 | static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, | |
796 | struct kvm_irqchip *chip) | |
797 | { | |
798 | int r; | |
799 | ||
800 | r = 0; | |
801 | switch (chip->chip_id) { | |
802 | case KVM_IRQCHIP_IOAPIC: | |
803 | memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), | |
804 | sizeof(struct kvm_ioapic_state)); | |
805 | break; | |
806 | default: | |
807 | r = -EINVAL; | |
808 | break; | |
809 | } | |
810 | return r; | |
811 | } | |
812 | ||
813 | static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |
814 | { | |
815 | int r; | |
816 | ||
817 | r = 0; | |
818 | switch (chip->chip_id) { | |
819 | case KVM_IRQCHIP_IOAPIC: | |
820 | memcpy(ioapic_irqchip(kvm), | |
821 | &chip->chip.ioapic, | |
822 | sizeof(struct kvm_ioapic_state)); | |
823 | break; | |
824 | default: | |
825 | r = -EINVAL; | |
826 | break; | |
827 | } | |
828 | return r; | |
829 | } | |
830 | ||
831 | #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x | |
832 | ||
833 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
834 | { | |
b024b793 | 835 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
042b26ed | 836 | int i; |
b024b793 XZ |
837 | |
838 | vcpu_load(vcpu); | |
839 | ||
840 | for (i = 0; i < 16; i++) { | |
841 | vpd->vgr[i] = regs->vpd.vgr[i]; | |
842 | vpd->vbgr[i] = regs->vpd.vbgr[i]; | |
843 | } | |
844 | for (i = 0; i < 128; i++) | |
845 | vpd->vcr[i] = regs->vpd.vcr[i]; | |
846 | vpd->vhpi = regs->vpd.vhpi; | |
847 | vpd->vnat = regs->vpd.vnat; | |
848 | vpd->vbnat = regs->vpd.vbnat; | |
849 | vpd->vpsr = regs->vpd.vpsr; | |
850 | ||
851 | vpd->vpr = regs->vpd.vpr; | |
852 | ||
042b26ed | 853 | memcpy(&vcpu->arch.guest, ®s->saved_guest, sizeof(union context)); |
b024b793 XZ |
854 | |
855 | RESTORE_REGS(mp_state); | |
856 | RESTORE_REGS(vmm_rr); | |
857 | memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS); | |
858 | memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS); | |
859 | RESTORE_REGS(itr_regions); | |
860 | RESTORE_REGS(dtr_regions); | |
861 | RESTORE_REGS(tc_regions); | |
862 | RESTORE_REGS(irq_check); | |
863 | RESTORE_REGS(itc_check); | |
864 | RESTORE_REGS(timer_check); | |
865 | RESTORE_REGS(timer_pending); | |
866 | RESTORE_REGS(last_itc); | |
867 | for (i = 0; i < 8; i++) { | |
868 | vcpu->arch.vrr[i] = regs->vrr[i]; | |
869 | vcpu->arch.ibr[i] = regs->ibr[i]; | |
870 | vcpu->arch.dbr[i] = regs->dbr[i]; | |
871 | } | |
872 | for (i = 0; i < 4; i++) | |
873 | vcpu->arch.insvc[i] = regs->insvc[i]; | |
874 | RESTORE_REGS(xtp); | |
875 | RESTORE_REGS(metaphysical_rr0); | |
876 | RESTORE_REGS(metaphysical_rr4); | |
877 | RESTORE_REGS(metaphysical_saved_rr0); | |
878 | RESTORE_REGS(metaphysical_saved_rr4); | |
879 | RESTORE_REGS(fp_psr); | |
880 | RESTORE_REGS(saved_gp); | |
881 | ||
882 | vcpu->arch.irq_new_pending = 1; | |
883 | vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC); | |
884 | set_bit(KVM_REQ_RESUME, &vcpu->requests); | |
885 | ||
886 | vcpu_put(vcpu); | |
042b26ed JS |
887 | |
888 | return 0; | |
b024b793 XZ |
889 | } |
890 | ||
891 | long kvm_arch_vm_ioctl(struct file *filp, | |
892 | unsigned int ioctl, unsigned long arg) | |
893 | { | |
894 | struct kvm *kvm = filp->private_data; | |
895 | void __user *argp = (void __user *)arg; | |
896 | int r = -EINVAL; | |
897 | ||
898 | switch (ioctl) { | |
899 | case KVM_SET_MEMORY_REGION: { | |
900 | struct kvm_memory_region kvm_mem; | |
901 | struct kvm_userspace_memory_region kvm_userspace_mem; | |
902 | ||
903 | r = -EFAULT; | |
904 | if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) | |
905 | goto out; | |
906 | kvm_userspace_mem.slot = kvm_mem.slot; | |
907 | kvm_userspace_mem.flags = kvm_mem.flags; | |
908 | kvm_userspace_mem.guest_phys_addr = | |
909 | kvm_mem.guest_phys_addr; | |
910 | kvm_userspace_mem.memory_size = kvm_mem.memory_size; | |
911 | r = kvm_vm_ioctl_set_memory_region(kvm, | |
912 | &kvm_userspace_mem, 0); | |
913 | if (r) | |
914 | goto out; | |
915 | break; | |
916 | } | |
917 | case KVM_CREATE_IRQCHIP: | |
918 | r = -EFAULT; | |
919 | r = kvm_ioapic_init(kvm); | |
920 | if (r) | |
921 | goto out; | |
922 | break; | |
923 | case KVM_IRQ_LINE: { | |
924 | struct kvm_irq_level irq_event; | |
925 | ||
926 | r = -EFAULT; | |
927 | if (copy_from_user(&irq_event, argp, sizeof irq_event)) | |
928 | goto out; | |
929 | if (irqchip_in_kernel(kvm)) { | |
930 | mutex_lock(&kvm->lock); | |
5550af4d SY |
931 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
932 | irq_event.irq, irq_event.level); | |
b024b793 XZ |
933 | mutex_unlock(&kvm->lock); |
934 | r = 0; | |
935 | } | |
936 | break; | |
937 | } | |
938 | case KVM_GET_IRQCHIP: { | |
939 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | |
940 | struct kvm_irqchip chip; | |
941 | ||
942 | r = -EFAULT; | |
943 | if (copy_from_user(&chip, argp, sizeof chip)) | |
944 | goto out; | |
945 | r = -ENXIO; | |
946 | if (!irqchip_in_kernel(kvm)) | |
947 | goto out; | |
948 | r = kvm_vm_ioctl_get_irqchip(kvm, &chip); | |
949 | if (r) | |
950 | goto out; | |
951 | r = -EFAULT; | |
952 | if (copy_to_user(argp, &chip, sizeof chip)) | |
953 | goto out; | |
954 | r = 0; | |
955 | break; | |
956 | } | |
957 | case KVM_SET_IRQCHIP: { | |
958 | /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ | |
959 | struct kvm_irqchip chip; | |
960 | ||
961 | r = -EFAULT; | |
962 | if (copy_from_user(&chip, argp, sizeof chip)) | |
963 | goto out; | |
964 | r = -ENXIO; | |
965 | if (!irqchip_in_kernel(kvm)) | |
966 | goto out; | |
967 | r = kvm_vm_ioctl_set_irqchip(kvm, &chip); | |
968 | if (r) | |
969 | goto out; | |
970 | r = 0; | |
971 | break; | |
972 | } | |
973 | default: | |
974 | ; | |
975 | } | |
976 | out: | |
977 | return r; | |
978 | } | |
979 | ||
980 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
981 | struct kvm_sregs *sregs) | |
982 | { | |
983 | return -EINVAL; | |
984 | } | |
985 | ||
986 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
987 | struct kvm_sregs *sregs) | |
988 | { | |
989 | return -EINVAL; | |
990 | ||
991 | } | |
992 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |
993 | struct kvm_translation *tr) | |
994 | { | |
995 | ||
996 | return -EINVAL; | |
997 | } | |
998 | ||
999 | static int kvm_alloc_vmm_area(void) | |
1000 | { | |
1001 | if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) { | |
1002 | kvm_vmm_base = __get_free_pages(GFP_KERNEL, | |
1003 | get_order(KVM_VMM_SIZE)); | |
1004 | if (!kvm_vmm_base) | |
1005 | return -ENOMEM; | |
1006 | ||
1007 | memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); | |
1008 | kvm_vm_buffer = kvm_vmm_base + VMM_SIZE; | |
1009 | ||
1010 | printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n", | |
1011 | kvm_vmm_base, kvm_vm_buffer); | |
1012 | } | |
1013 | ||
1014 | return 0; | |
1015 | } | |
1016 | ||
1017 | static void kvm_free_vmm_area(void) | |
1018 | { | |
1019 | if (kvm_vmm_base) { | |
1020 | /*Zero this area before free to avoid bits leak!!*/ | |
1021 | memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE); | |
1022 | free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE)); | |
1023 | kvm_vmm_base = 0; | |
1024 | kvm_vm_buffer = 0; | |
1025 | kvm_vsa_base = 0; | |
1026 | } | |
1027 | } | |
1028 | ||
b024b793 XZ |
1029 | static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
1030 | { | |
1031 | } | |
1032 | ||
1033 | static int vti_init_vpd(struct kvm_vcpu *vcpu) | |
1034 | { | |
1035 | int i; | |
1036 | union cpuid3_t cpuid3; | |
1037 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | |
1038 | ||
1039 | if (IS_ERR(vpd)) | |
1040 | return PTR_ERR(vpd); | |
1041 | ||
1042 | /* CPUID init */ | |
1043 | for (i = 0; i < 5; i++) | |
1044 | vpd->vcpuid[i] = ia64_get_cpuid(i); | |
1045 | ||
1046 | /* Limit the CPUID number to 5 */ | |
1047 | cpuid3.value = vpd->vcpuid[3]; | |
1048 | cpuid3.number = 4; /* 5 - 1 */ | |
1049 | vpd->vcpuid[3] = cpuid3.value; | |
1050 | ||
1051 | /*Set vac and vdc fields*/ | |
1052 | vpd->vac.a_from_int_cr = 1; | |
1053 | vpd->vac.a_to_int_cr = 1; | |
1054 | vpd->vac.a_from_psr = 1; | |
1055 | vpd->vac.a_from_cpuid = 1; | |
1056 | vpd->vac.a_cover = 1; | |
1057 | vpd->vac.a_bsw = 1; | |
1058 | vpd->vac.a_int = 1; | |
1059 | vpd->vdc.d_vmsw = 1; | |
1060 | ||
1061 | /*Set virtual buffer*/ | |
1062 | vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE; | |
1063 | ||
1064 | return 0; | |
1065 | } | |
1066 | ||
1067 | static int vti_create_vp(struct kvm_vcpu *vcpu) | |
1068 | { | |
1069 | long ret; | |
1070 | struct vpd *vpd = vcpu->arch.vpd; | |
1071 | unsigned long vmm_ivt; | |
1072 | ||
1073 | vmm_ivt = kvm_vmm_info->vmm_ivt; | |
1074 | ||
1075 | printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt); | |
1076 | ||
1077 | ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0); | |
1078 | ||
1079 | if (ret) { | |
1080 | printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n"); | |
1081 | return -EINVAL; | |
1082 | } | |
1083 | return 0; | |
1084 | } | |
1085 | ||
1086 | static void init_ptce_info(struct kvm_vcpu *vcpu) | |
1087 | { | |
1088 | ia64_ptce_info_t ptce = {0}; | |
1089 | ||
1090 | ia64_get_ptce(&ptce); | |
1091 | vcpu->arch.ptce_base = ptce.base; | |
1092 | vcpu->arch.ptce_count[0] = ptce.count[0]; | |
1093 | vcpu->arch.ptce_count[1] = ptce.count[1]; | |
1094 | vcpu->arch.ptce_stride[0] = ptce.stride[0]; | |
1095 | vcpu->arch.ptce_stride[1] = ptce.stride[1]; | |
1096 | } | |
1097 | ||
1098 | static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu) | |
1099 | { | |
1100 | struct hrtimer *p_ht = &vcpu->arch.hlt_timer; | |
1101 | ||
1102 | if (hrtimer_cancel(p_ht)) | |
18dd36af | 1103 | hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS); |
b024b793 XZ |
1104 | } |
1105 | ||
1106 | static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) | |
1107 | { | |
1108 | struct kvm_vcpu *vcpu; | |
1109 | wait_queue_head_t *q; | |
1110 | ||
1111 | vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); | |
decc9016 XZ |
1112 | q = &vcpu->wq; |
1113 | ||
a4535290 | 1114 | if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) |
b024b793 XZ |
1115 | goto out; |
1116 | ||
decc9016 | 1117 | if (waitqueue_active(q)) |
b024b793 | 1118 | wake_up_interruptible(q); |
decc9016 | 1119 | |
b024b793 | 1120 | out: |
decc9016 | 1121 | vcpu->arch.timer_fired = 1; |
b024b793 XZ |
1122 | vcpu->arch.timer_check = 1; |
1123 | return HRTIMER_NORESTART; | |
1124 | } | |
1125 | ||
1126 | #define PALE_RESET_ENTRY 0x80000000ffffffb0UL | |
1127 | ||
1128 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |
1129 | { | |
1130 | struct kvm_vcpu *v; | |
1131 | int r; | |
1132 | int i; | |
1133 | long itc_offset; | |
1134 | struct kvm *kvm = vcpu->kvm; | |
1135 | struct kvm_pt_regs *regs = vcpu_regs(vcpu); | |
1136 | ||
1137 | union context *p_ctx = &vcpu->arch.guest; | |
1138 | struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu); | |
1139 | ||
1140 | /*Init vcpu context for first run.*/ | |
1141 | if (IS_ERR(vmm_vcpu)) | |
1142 | return PTR_ERR(vmm_vcpu); | |
1143 | ||
1144 | if (vcpu->vcpu_id == 0) { | |
a4535290 | 1145 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
b024b793 XZ |
1146 | |
1147 | /*Set entry address for first run.*/ | |
1148 | regs->cr_iip = PALE_RESET_ENTRY; | |
1149 | ||
a917f7af | 1150 | /*Initialize itc offset for vcpus*/ |
b024b793 | 1151 | itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); |
a917f7af XZ |
1152 | for (i = 0; i < KVM_MAX_VCPUS; i++) { |
1153 | v = (struct kvm_vcpu *)((char *)vcpu + | |
1154 | sizeof(struct kvm_vcpu_data) * i); | |
b024b793 XZ |
1155 | v->arch.itc_offset = itc_offset; |
1156 | v->arch.last_itc = 0; | |
1157 | } | |
1158 | } else | |
a4535290 | 1159 | vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; |
b024b793 XZ |
1160 | |
1161 | r = -ENOMEM; | |
1162 | vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL); | |
1163 | if (!vcpu->arch.apic) | |
1164 | goto out; | |
1165 | vcpu->arch.apic->vcpu = vcpu; | |
1166 | ||
1167 | p_ctx->gr[1] = 0; | |
a917f7af | 1168 | p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET); |
b024b793 XZ |
1169 | p_ctx->gr[13] = (unsigned long)vmm_vcpu; |
1170 | p_ctx->psr = 0x1008522000UL; | |
1171 | p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ | |
1172 | p_ctx->caller_unat = 0; | |
1173 | p_ctx->pr = 0x0; | |
1174 | p_ctx->ar[36] = 0x0; /*unat*/ | |
1175 | p_ctx->ar[19] = 0x0; /*rnat*/ | |
1176 | p_ctx->ar[18] = (unsigned long)vmm_vcpu + | |
1177 | ((sizeof(struct kvm_vcpu)+15) & ~15); | |
1178 | p_ctx->ar[64] = 0x0; /*pfs*/ | |
1179 | p_ctx->cr[0] = 0x7e04UL; | |
1180 | p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt; | |
1181 | p_ctx->cr[8] = 0x3c; | |
1182 | ||
1183 | /*Initilize region register*/ | |
1184 | p_ctx->rr[0] = 0x30; | |
1185 | p_ctx->rr[1] = 0x30; | |
1186 | p_ctx->rr[2] = 0x30; | |
1187 | p_ctx->rr[3] = 0x30; | |
1188 | p_ctx->rr[4] = 0x30; | |
1189 | p_ctx->rr[5] = 0x30; | |
1190 | p_ctx->rr[7] = 0x30; | |
1191 | ||
1192 | /*Initilize branch register 0*/ | |
1193 | p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry; | |
1194 | ||
1195 | vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr; | |
1196 | vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0; | |
1197 | vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4; | |
1198 | ||
1199 | hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
1200 | vcpu->arch.hlt_timer.function = hlt_timer_fn; | |
1201 | ||
1202 | vcpu->arch.last_run_cpu = -1; | |
a917f7af | 1203 | vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id); |
b024b793 XZ |
1204 | vcpu->arch.vsa_base = kvm_vsa_base; |
1205 | vcpu->arch.__gp = kvm_vmm_gp; | |
1206 | vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); | |
a917f7af XZ |
1207 | vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id); |
1208 | vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id); | |
b024b793 XZ |
1209 | init_ptce_info(vcpu); |
1210 | ||
1211 | r = 0; | |
1212 | out: | |
1213 | return r; | |
1214 | } | |
1215 | ||
1216 | static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id) | |
1217 | { | |
1218 | unsigned long psr; | |
1219 | int r; | |
1220 | ||
1221 | local_irq_save(psr); | |
1222 | r = kvm_insert_vmm_mapping(vcpu); | |
1223 | if (r) | |
1224 | goto fail; | |
1225 | r = kvm_vcpu_init(vcpu, vcpu->kvm, id); | |
1226 | if (r) | |
1227 | goto fail; | |
1228 | ||
1229 | r = vti_init_vpd(vcpu); | |
1230 | if (r) { | |
1231 | printk(KERN_DEBUG"kvm: vpd init error!!\n"); | |
1232 | goto uninit; | |
1233 | } | |
1234 | ||
1235 | r = vti_create_vp(vcpu); | |
1236 | if (r) | |
1237 | goto uninit; | |
1238 | ||
1239 | kvm_purge_vmm_mapping(vcpu); | |
1240 | local_irq_restore(psr); | |
1241 | ||
1242 | return 0; | |
1243 | uninit: | |
1244 | kvm_vcpu_uninit(vcpu); | |
1245 | fail: | |
cab7a1ee | 1246 | local_irq_restore(psr); |
b024b793 XZ |
1247 | return r; |
1248 | } | |
1249 | ||
1250 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |
1251 | unsigned int id) | |
1252 | { | |
1253 | struct kvm_vcpu *vcpu; | |
1254 | unsigned long vm_base = kvm->arch.vm_base; | |
1255 | int r; | |
1256 | int cpu; | |
1257 | ||
a917f7af XZ |
1258 | BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2); |
1259 | ||
1260 | r = -EINVAL; | |
1261 | if (id >= KVM_MAX_VCPUS) { | |
1262 | printk(KERN_ERR"kvm: Can't configure vcpus > %ld", | |
1263 | KVM_MAX_VCPUS); | |
1264 | goto fail; | |
1265 | } | |
1266 | ||
b024b793 XZ |
1267 | r = -ENOMEM; |
1268 | if (!vm_base) { | |
1269 | printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); | |
1270 | goto fail; | |
1271 | } | |
a917f7af XZ |
1272 | vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data, |
1273 | vcpu_data[id].vcpu_struct)); | |
b024b793 XZ |
1274 | vcpu->kvm = kvm; |
1275 | ||
1276 | cpu = get_cpu(); | |
1277 | vti_vcpu_load(vcpu, cpu); | |
1278 | r = vti_vcpu_setup(vcpu, id); | |
1279 | put_cpu(); | |
1280 | ||
1281 | if (r) { | |
1282 | printk(KERN_DEBUG"kvm: vcpu_setup error!!\n"); | |
1283 | goto fail; | |
1284 | } | |
1285 | ||
1286 | return vcpu; | |
1287 | fail: | |
1288 | return ERR_PTR(r); | |
1289 | } | |
1290 | ||
1291 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |
1292 | { | |
1293 | return 0; | |
1294 | } | |
1295 | ||
1296 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
1297 | { | |
1298 | return -EINVAL; | |
1299 | } | |
1300 | ||
1301 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
1302 | { | |
1303 | return -EINVAL; | |
1304 | } | |
1305 | ||
1306 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | |
1307 | struct kvm_debug_guest *dbg) | |
1308 | { | |
1309 | return -EINVAL; | |
1310 | } | |
1311 | ||
1312 | static void free_kvm(struct kvm *kvm) | |
1313 | { | |
1314 | unsigned long vm_base = kvm->arch.vm_base; | |
1315 | ||
1316 | if (vm_base) { | |
1317 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); | |
1318 | free_pages(vm_base, get_order(KVM_VM_DATA_SIZE)); | |
1319 | } | |
1320 | ||
1321 | } | |
1322 | ||
1323 | static void kvm_release_vm_pages(struct kvm *kvm) | |
1324 | { | |
1325 | struct kvm_memory_slot *memslot; | |
1326 | int i, j; | |
1327 | unsigned long base_gfn; | |
1328 | ||
1329 | for (i = 0; i < kvm->nmemslots; i++) { | |
1330 | memslot = &kvm->memslots[i]; | |
1331 | base_gfn = memslot->base_gfn; | |
1332 | ||
1333 | for (j = 0; j < memslot->npages; j++) { | |
1334 | if (memslot->rmap[j]) | |
1335 | put_page((struct page *)memslot->rmap[j]); | |
1336 | } | |
1337 | } | |
1338 | } | |
1339 | ||
ad8ba2cd SY |
1340 | void kvm_arch_sync_events(struct kvm *kvm) |
1341 | { | |
1342 | } | |
1343 | ||
b024b793 XZ |
1344 | void kvm_arch_destroy_vm(struct kvm *kvm) |
1345 | { | |
2381ad24 XZ |
1346 | kvm_iommu_unmap_guest(kvm); |
1347 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | |
1348 | kvm_free_all_assigned_devices(kvm); | |
1349 | #endif | |
b024b793 XZ |
1350 | kfree(kvm->arch.vioapic); |
1351 | kvm_release_vm_pages(kvm); | |
1352 | kvm_free_physmem(kvm); | |
1353 | free_kvm(kvm); | |
1354 | } | |
1355 | ||
1356 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |
1357 | { | |
1358 | } | |
1359 | ||
1360 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
1361 | { | |
1362 | if (cpu != vcpu->cpu) { | |
1363 | vcpu->cpu = cpu; | |
1364 | if (vcpu->arch.ht_active) | |
1365 | kvm_migrate_hlt_timer(vcpu); | |
1366 | } | |
1367 | } | |
1368 | ||
1369 | #define SAVE_REGS(_x) regs->_x = vcpu->arch._x | |
1370 | ||
1371 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
1372 | { | |
b024b793 | 1373 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
042b26ed JS |
1374 | int i; |
1375 | ||
b024b793 XZ |
1376 | vcpu_load(vcpu); |
1377 | ||
1378 | for (i = 0; i < 16; i++) { | |
1379 | regs->vpd.vgr[i] = vpd->vgr[i]; | |
1380 | regs->vpd.vbgr[i] = vpd->vbgr[i]; | |
1381 | } | |
1382 | for (i = 0; i < 128; i++) | |
1383 | regs->vpd.vcr[i] = vpd->vcr[i]; | |
1384 | regs->vpd.vhpi = vpd->vhpi; | |
1385 | regs->vpd.vnat = vpd->vnat; | |
1386 | regs->vpd.vbnat = vpd->vbnat; | |
1387 | regs->vpd.vpsr = vpd->vpsr; | |
1388 | regs->vpd.vpr = vpd->vpr; | |
1389 | ||
042b26ed JS |
1390 | memcpy(®s->saved_guest, &vcpu->arch.guest, sizeof(union context)); |
1391 | ||
b024b793 XZ |
1392 | SAVE_REGS(mp_state); |
1393 | SAVE_REGS(vmm_rr); | |
1394 | memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); | |
1395 | memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS); | |
1396 | SAVE_REGS(itr_regions); | |
1397 | SAVE_REGS(dtr_regions); | |
1398 | SAVE_REGS(tc_regions); | |
1399 | SAVE_REGS(irq_check); | |
1400 | SAVE_REGS(itc_check); | |
1401 | SAVE_REGS(timer_check); | |
1402 | SAVE_REGS(timer_pending); | |
1403 | SAVE_REGS(last_itc); | |
1404 | for (i = 0; i < 8; i++) { | |
1405 | regs->vrr[i] = vcpu->arch.vrr[i]; | |
1406 | regs->ibr[i] = vcpu->arch.ibr[i]; | |
1407 | regs->dbr[i] = vcpu->arch.dbr[i]; | |
1408 | } | |
1409 | for (i = 0; i < 4; i++) | |
1410 | regs->insvc[i] = vcpu->arch.insvc[i]; | |
1411 | regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC); | |
1412 | SAVE_REGS(xtp); | |
1413 | SAVE_REGS(metaphysical_rr0); | |
1414 | SAVE_REGS(metaphysical_rr4); | |
1415 | SAVE_REGS(metaphysical_saved_rr0); | |
1416 | SAVE_REGS(metaphysical_saved_rr4); | |
1417 | SAVE_REGS(fp_psr); | |
1418 | SAVE_REGS(saved_gp); | |
042b26ed | 1419 | |
b024b793 | 1420 | vcpu_put(vcpu); |
042b26ed | 1421 | return 0; |
b024b793 XZ |
1422 | } |
1423 | ||
1424 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |
1425 | { | |
1426 | ||
1427 | hrtimer_cancel(&vcpu->arch.hlt_timer); | |
1428 | kfree(vcpu->arch.apic); | |
1429 | } | |
1430 | ||
1431 | ||
1432 | long kvm_arch_vcpu_ioctl(struct file *filp, | |
1433 | unsigned int ioctl, unsigned long arg) | |
1434 | { | |
1435 | return -EINVAL; | |
1436 | } | |
1437 | ||
1438 | int kvm_arch_set_memory_region(struct kvm *kvm, | |
1439 | struct kvm_userspace_memory_region *mem, | |
1440 | struct kvm_memory_slot old, | |
1441 | int user_alloc) | |
1442 | { | |
1443 | unsigned long i; | |
1cbea809 | 1444 | unsigned long pfn; |
b024b793 XZ |
1445 | int npages = mem->memory_size >> PAGE_SHIFT; |
1446 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; | |
1447 | unsigned long base_gfn = memslot->base_gfn; | |
1448 | ||
a917f7af XZ |
1449 | if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) |
1450 | return -ENOMEM; | |
1451 | ||
b024b793 | 1452 | for (i = 0; i < npages; i++) { |
1cbea809 XZ |
1453 | pfn = gfn_to_pfn(kvm, base_gfn + i); |
1454 | if (!kvm_is_mmio_pfn(pfn)) { | |
1455 | kvm_set_pmt_entry(kvm, base_gfn + i, | |
1456 | pfn << PAGE_SHIFT, | |
b010eb51 | 1457 | _PAGE_AR_RWX | _PAGE_MA_WB); |
1cbea809 XZ |
1458 | memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); |
1459 | } else { | |
1460 | kvm_set_pmt_entry(kvm, base_gfn + i, | |
b010eb51 | 1461 | GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), |
1cbea809 XZ |
1462 | _PAGE_MA_UC); |
1463 | memslot->rmap[i] = 0; | |
1464 | } | |
b024b793 XZ |
1465 | } |
1466 | ||
1467 | return 0; | |
1468 | } | |
1469 | ||
34d4cb8f MT |
1470 | void kvm_arch_flush_shadow(struct kvm *kvm) |
1471 | { | |
1472 | } | |
b024b793 XZ |
1473 | |
1474 | long kvm_arch_dev_ioctl(struct file *filp, | |
1475 | unsigned int ioctl, unsigned long arg) | |
1476 | { | |
1477 | return -EINVAL; | |
1478 | } | |
1479 | ||
1480 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |
1481 | { | |
1482 | kvm_vcpu_uninit(vcpu); | |
1483 | } | |
1484 | ||
1485 | static int vti_cpu_has_kvm_support(void) | |
1486 | { | |
1487 | long avail = 1, status = 1, control = 1; | |
1488 | long ret; | |
1489 | ||
1490 | ret = ia64_pal_proc_get_features(&avail, &status, &control, 0); | |
1491 | if (ret) | |
1492 | goto out; | |
1493 | ||
1494 | if (!(avail & PAL_PROC_VM_BIT)) | |
1495 | goto out; | |
1496 | ||
1497 | printk(KERN_DEBUG"kvm: Hardware Supports VT\n"); | |
1498 | ||
1499 | ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info); | |
1500 | if (ret) | |
1501 | goto out; | |
1502 | printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size); | |
1503 | ||
1504 | if (!(vp_env_info & VP_OPCODE)) { | |
1505 | printk(KERN_WARNING"kvm: No opcode ability on hardware, " | |
1506 | "vm_env_info:0x%lx\n", vp_env_info); | |
1507 | } | |
1508 | ||
1509 | return 1; | |
1510 | out: | |
1511 | return 0; | |
1512 | } | |
1513 | ||
1514 | static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info, | |
1515 | struct module *module) | |
1516 | { | |
1517 | unsigned long module_base; | |
1518 | unsigned long vmm_size; | |
1519 | ||
1520 | unsigned long vmm_offset, func_offset, fdesc_offset; | |
1521 | struct fdesc *p_fdesc; | |
1522 | ||
1523 | BUG_ON(!module); | |
1524 | ||
1525 | if (!kvm_vmm_base) { | |
1526 | printk("kvm: kvm area hasn't been initilized yet!!\n"); | |
1527 | return -EFAULT; | |
1528 | } | |
1529 | ||
1530 | /*Calculate new position of relocated vmm module.*/ | |
1531 | module_base = (unsigned long)module->module_core; | |
1532 | vmm_size = module->core_size; | |
1533 | if (unlikely(vmm_size > KVM_VMM_SIZE)) | |
1534 | return -EFAULT; | |
1535 | ||
1536 | memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size); | |
1537 | kvm_flush_icache(kvm_vmm_base, vmm_size); | |
1538 | ||
1539 | /*Recalculate kvm_vmm_info based on new VMM*/ | |
1540 | vmm_offset = vmm_info->vmm_ivt - module_base; | |
1541 | kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset; | |
1542 | printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n", | |
1543 | kvm_vmm_info->vmm_ivt); | |
1544 | ||
1545 | fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base; | |
1546 | kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE + | |
1547 | fdesc_offset); | |
1548 | func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base; | |
1549 | p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); | |
1550 | p_fdesc->ip = KVM_VMM_BASE + func_offset; | |
1551 | p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base); | |
1552 | ||
1553 | printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n", | |
1554 | KVM_VMM_BASE+func_offset); | |
1555 | ||
1556 | fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base; | |
1557 | kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE + | |
1558 | fdesc_offset); | |
1559 | func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base; | |
1560 | p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset); | |
1561 | p_fdesc->ip = KVM_VMM_BASE + func_offset; | |
1562 | p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base); | |
1563 | ||
1564 | kvm_vmm_gp = p_fdesc->gp; | |
1565 | ||
1566 | printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n", | |
1567 | kvm_vmm_info->vmm_entry); | |
1568 | printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n", | |
1569 | KVM_VMM_BASE + func_offset); | |
1570 | ||
1571 | return 0; | |
1572 | } | |
1573 | ||
1574 | int kvm_arch_init(void *opaque) | |
1575 | { | |
1576 | int r; | |
1577 | struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque; | |
1578 | ||
1579 | if (!vti_cpu_has_kvm_support()) { | |
1580 | printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n"); | |
1581 | r = -EOPNOTSUPP; | |
1582 | goto out; | |
1583 | } | |
1584 | ||
1585 | if (kvm_vmm_info) { | |
1586 | printk(KERN_ERR "kvm: Already loaded VMM module!\n"); | |
1587 | r = -EEXIST; | |
1588 | goto out; | |
1589 | } | |
1590 | ||
1591 | r = -ENOMEM; | |
1592 | kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL); | |
1593 | if (!kvm_vmm_info) | |
1594 | goto out; | |
1595 | ||
1596 | if (kvm_alloc_vmm_area()) | |
1597 | goto out_free0; | |
1598 | ||
1599 | r = kvm_relocate_vmm(vmm_info, vmm_info->module); | |
1600 | if (r) | |
1601 | goto out_free1; | |
1602 | ||
1603 | return 0; | |
1604 | ||
1605 | out_free1: | |
1606 | kvm_free_vmm_area(); | |
1607 | out_free0: | |
1608 | kfree(kvm_vmm_info); | |
1609 | out: | |
1610 | return r; | |
1611 | } | |
1612 | ||
1613 | void kvm_arch_exit(void) | |
1614 | { | |
1615 | kvm_free_vmm_area(); | |
1616 | kfree(kvm_vmm_info); | |
1617 | kvm_vmm_info = NULL; | |
1618 | } | |
1619 | ||
1620 | static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |
1621 | struct kvm_dirty_log *log) | |
1622 | { | |
1623 | struct kvm_memory_slot *memslot; | |
1624 | int r, i; | |
1625 | long n, base; | |
a917f7af XZ |
1626 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + |
1627 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); | |
b024b793 XZ |
1628 | |
1629 | r = -EINVAL; | |
1630 | if (log->slot >= KVM_MEMORY_SLOTS) | |
1631 | goto out; | |
1632 | ||
1633 | memslot = &kvm->memslots[log->slot]; | |
1634 | r = -ENOENT; | |
1635 | if (!memslot->dirty_bitmap) | |
1636 | goto out; | |
1637 | ||
1638 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
1639 | base = memslot->base_gfn / BITS_PER_LONG; | |
1640 | ||
1641 | for (i = 0; i < n/sizeof(long); ++i) { | |
1642 | memslot->dirty_bitmap[i] = dirty_bitmap[base + i]; | |
1643 | dirty_bitmap[base + i] = 0; | |
1644 | } | |
1645 | r = 0; | |
1646 | out: | |
1647 | return r; | |
1648 | } | |
1649 | ||
1650 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |
1651 | struct kvm_dirty_log *log) | |
1652 | { | |
1653 | int r; | |
1654 | int n; | |
1655 | struct kvm_memory_slot *memslot; | |
1656 | int is_dirty = 0; | |
1657 | ||
1658 | spin_lock(&kvm->arch.dirty_log_lock); | |
1659 | ||
1660 | r = kvm_ia64_sync_dirty_log(kvm, log); | |
1661 | if (r) | |
1662 | goto out; | |
1663 | ||
1664 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | |
1665 | if (r) | |
1666 | goto out; | |
1667 | ||
1668 | /* If nothing is dirty, don't bother messing with page tables. */ | |
1669 | if (is_dirty) { | |
1670 | kvm_flush_remote_tlbs(kvm); | |
1671 | memslot = &kvm->memslots[log->slot]; | |
1672 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
1673 | memset(memslot->dirty_bitmap, 0, n); | |
1674 | } | |
1675 | r = 0; | |
1676 | out: | |
1677 | spin_unlock(&kvm->arch.dirty_log_lock); | |
1678 | return r; | |
1679 | } | |
1680 | ||
1681 | int kvm_arch_hardware_setup(void) | |
1682 | { | |
1683 | return 0; | |
1684 | } | |
1685 | ||
1686 | void kvm_arch_hardware_unsetup(void) | |
1687 | { | |
1688 | } | |
1689 | ||
1690 | static void vcpu_kick_intr(void *info) | |
1691 | { | |
1692 | #ifdef DEBUG | |
1693 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info; | |
1694 | printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu); | |
1695 | #endif | |
1696 | } | |
1697 | ||
1698 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | |
1699 | { | |
1700 | int ipi_pcpu = vcpu->cpu; | |
decc9016 | 1701 | int cpu = get_cpu(); |
b024b793 XZ |
1702 | |
1703 | if (waitqueue_active(&vcpu->wq)) | |
1704 | wake_up_interruptible(&vcpu->wq); | |
1705 | ||
decc9016 | 1706 | if (vcpu->guest_mode && cpu != ipi_pcpu) |
2f73ccab | 1707 | smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); |
decc9016 | 1708 | put_cpu(); |
b024b793 XZ |
1709 | } |
1710 | ||
1711 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) | |
1712 | { | |
1713 | ||
1714 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | |
1715 | ||
1716 | if (!test_and_set_bit(vec, &vpd->irr[0])) { | |
1717 | vcpu->arch.irq_new_pending = 1; | |
decc9016 | 1718 | kvm_vcpu_kick(vcpu); |
b024b793 XZ |
1719 | return 1; |
1720 | } | |
1721 | return 0; | |
1722 | } | |
1723 | ||
1724 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) | |
1725 | { | |
1726 | return apic->vcpu->vcpu_id == dest; | |
1727 | } | |
1728 | ||
1729 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) | |
1730 | { | |
1731 | return 0; | |
1732 | } | |
1733 | ||
1734 | struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, | |
1735 | unsigned long bitmap) | |
1736 | { | |
1737 | struct kvm_vcpu *lvcpu = kvm->vcpus[0]; | |
1738 | int i; | |
1739 | ||
1740 | for (i = 1; i < KVM_MAX_VCPUS; i++) { | |
1741 | if (!kvm->vcpus[i]) | |
1742 | continue; | |
1743 | if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp) | |
1744 | lvcpu = kvm->vcpus[i]; | |
1745 | } | |
1746 | ||
1747 | return lvcpu; | |
1748 | } | |
1749 | ||
1750 | static int find_highest_bits(int *dat) | |
1751 | { | |
1752 | u32 bits, bitnum; | |
1753 | int i; | |
1754 | ||
1755 | /* loop for all 256 bits */ | |
1756 | for (i = 7; i >= 0 ; i--) { | |
1757 | bits = dat[i]; | |
1758 | if (bits) { | |
1759 | bitnum = fls(bits); | |
1760 | return i * 32 + bitnum - 1; | |
1761 | } | |
1762 | } | |
1763 | ||
1764 | return -1; | |
1765 | } | |
1766 | ||
1767 | int kvm_highest_pending_irq(struct kvm_vcpu *vcpu) | |
1768 | { | |
1769 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | |
1770 | ||
1771 | if (vpd->irr[0] & (1UL << NMI_VECTOR)) | |
1772 | return NMI_VECTOR; | |
1773 | if (vpd->irr[0] & (1UL << ExtINT_VECTOR)) | |
1774 | return ExtINT_VECTOR; | |
1775 | ||
1776 | return find_highest_bits((int *)&vpd->irr[0]); | |
1777 | } | |
1778 | ||
1779 | int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | |
1780 | { | |
1781 | if (kvm_highest_pending_irq(vcpu) != -1) | |
1782 | return 1; | |
1783 | return 0; | |
1784 | } | |
1785 | ||
3d80840d MT |
1786 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
1787 | { | |
decc9016 | 1788 | return vcpu->arch.timer_fired; |
3d80840d MT |
1789 | } |
1790 | ||
b024b793 XZ |
1791 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |
1792 | { | |
1793 | return gfn; | |
1794 | } | |
1795 | ||
1796 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | |
1797 | { | |
a4535290 | 1798 | return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE; |
b024b793 | 1799 | } |
62d9f0db MT |
1800 | |
1801 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | |
1802 | struct kvm_mp_state *mp_state) | |
1803 | { | |
8c4b537d XZ |
1804 | vcpu_load(vcpu); |
1805 | mp_state->mp_state = vcpu->arch.mp_state; | |
1806 | vcpu_put(vcpu); | |
1807 | return 0; | |
1808 | } | |
1809 | ||
1810 | static int vcpu_reset(struct kvm_vcpu *vcpu) | |
1811 | { | |
1812 | int r; | |
1813 | long psr; | |
1814 | local_irq_save(psr); | |
1815 | r = kvm_insert_vmm_mapping(vcpu); | |
1816 | if (r) | |
1817 | goto fail; | |
1818 | ||
1819 | vcpu->arch.launched = 0; | |
1820 | kvm_arch_vcpu_uninit(vcpu); | |
1821 | r = kvm_arch_vcpu_init(vcpu); | |
1822 | if (r) | |
1823 | goto fail; | |
1824 | ||
1825 | kvm_purge_vmm_mapping(vcpu); | |
1826 | r = 0; | |
1827 | fail: | |
1828 | local_irq_restore(psr); | |
1829 | return r; | |
62d9f0db MT |
1830 | } |
1831 | ||
1832 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
1833 | struct kvm_mp_state *mp_state) | |
1834 | { | |
8c4b537d XZ |
1835 | int r = 0; |
1836 | ||
1837 | vcpu_load(vcpu); | |
1838 | vcpu->arch.mp_state = mp_state->mp_state; | |
1839 | if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) | |
1840 | r = vcpu_reset(vcpu); | |
1841 | vcpu_put(vcpu); | |
1842 | return r; | |
62d9f0db | 1843 | } |