KVM: convert bus to slots_lock
[deliverable/linux.git] / arch / ia64 / kvm / kvm-ia64.c
CommitLineData
b024b793
XZ
1/*
2 * kvm_ia64.c: Basic KVM suppport On Itanium series processors
3 *
4 *
5 * Copyright (C) 2007, Intel Corporation.
6 * Xiantao Zhang (xiantao.zhang@intel.com)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/percpu.h>
26#include <linux/gfp.h>
27#include <linux/fs.h>
28#include <linux/smp.h>
29#include <linux/kvm_host.h>
30#include <linux/kvm.h>
31#include <linux/bitops.h>
32#include <linux/hrtimer.h>
33#include <linux/uaccess.h>
19de40a8 34#include <linux/iommu.h>
2381ad24 35#include <linux/intel-iommu.h>
b024b793
XZ
36
37#include <asm/pgtable.h>
38#include <asm/gcc_intrin.h>
39#include <asm/pal.h>
40#include <asm/cacheflush.h>
41#include <asm/div64.h>
42#include <asm/tlb.h>
9f726323 43#include <asm/elf.h>
0c72ea7f
JS
44#include <asm/sn/addrs.h>
45#include <asm/sn/clksupport.h>
46#include <asm/sn/shub_mmr.h>
b024b793
XZ
47
48#include "misc.h"
49#include "vti.h"
50#include "iodev.h"
51#include "ioapic.h"
52#include "lapic.h"
2f749771 53#include "irq.h"
b024b793
XZ
54
55static unsigned long kvm_vmm_base;
56static unsigned long kvm_vsa_base;
57static unsigned long kvm_vm_buffer;
58static unsigned long kvm_vm_buffer_size;
59unsigned long kvm_vmm_gp;
60
61static long vp_env_info;
62
63static struct kvm_vmm_info *kvm_vmm_info;
64
65static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
66
67struct kvm_stats_debugfs_item debugfs_entries[] = {
68 { NULL }
69};
70
c6c9fcdf
JS
71static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu)
72{
73#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
74 if (vcpu->kvm->arch.is_sn2)
75 return rtc_time();
76 else
77#endif
78 return ia64_getreg(_IA64_REG_AR_ITC);
79}
80
b024b793
XZ
81static void kvm_flush_icache(unsigned long start, unsigned long len)
82{
83 int l;
84
85 for (l = 0; l < (len + 32); l += 32)
7120569c 86 ia64_fc((void *)(start + l));
b024b793
XZ
87
88 ia64_sync_i();
89 ia64_srlz_i();
90}
91
92static void kvm_flush_tlb_all(void)
93{
94 unsigned long i, j, count0, count1, stride0, stride1, addr;
95 long flags;
96
97 addr = local_cpu_data->ptce_base;
98 count0 = local_cpu_data->ptce_count[0];
99 count1 = local_cpu_data->ptce_count[1];
100 stride0 = local_cpu_data->ptce_stride[0];
101 stride1 = local_cpu_data->ptce_stride[1];
102
103 local_irq_save(flags);
104 for (i = 0; i < count0; ++i) {
105 for (j = 0; j < count1; ++j) {
106 ia64_ptce(addr);
107 addr += stride1;
108 }
109 addr += stride0;
110 }
111 local_irq_restore(flags);
112 ia64_srlz_i(); /* srlz.i implies srlz.d */
113}
114
115long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
116{
117 struct ia64_pal_retval iprv;
118
119 PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
120 (u64)opt_handler);
121
122 return iprv.status;
123}
124
125static DEFINE_SPINLOCK(vp_lock);
126
127void kvm_arch_hardware_enable(void *garbage)
128{
129 long status;
130 long tmp_base;
131 unsigned long pte;
132 unsigned long saved_psr;
133 int slot;
134
0c72ea7f 135 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
b024b793
XZ
136 local_irq_save(saved_psr);
137 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
cab7a1ee 138 local_irq_restore(saved_psr);
b024b793
XZ
139 if (slot < 0)
140 return;
b024b793
XZ
141
142 spin_lock(&vp_lock);
143 status = ia64_pal_vp_init_env(kvm_vsa_base ?
144 VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
146 if (status != 0) {
147 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
148 return ;
149 }
150
151 if (!kvm_vsa_base) {
152 kvm_vsa_base = tmp_base;
153 printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
154 }
155 spin_unlock(&vp_lock);
156 ia64_ptr_entry(0x3, slot);
157}
158
159void kvm_arch_hardware_disable(void *garbage)
160{
161
162 long status;
163 int slot;
164 unsigned long pte;
165 unsigned long saved_psr;
166 unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
167
168 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
169 PAGE_KERNEL));
170
171 local_irq_save(saved_psr);
172 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
cab7a1ee 173 local_irq_restore(saved_psr);
b024b793
XZ
174 if (slot < 0)
175 return;
b024b793
XZ
176
177 status = ia64_pal_vp_exit_env(host_iva);
178 if (status)
179 printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
180 status);
181 ia64_ptr_entry(0x3, slot);
182}
183
184void kvm_arch_check_processor_compat(void *rtn)
185{
186 *(int *)rtn = 0;
187}
188
189int kvm_dev_ioctl_check_extension(long ext)
190{
191
192 int r;
193
194 switch (ext) {
195 case KVM_CAP_IRQCHIP:
8c4b537d 196 case KVM_CAP_MP_STATE:
4925663a 197 case KVM_CAP_IRQ_INJECT_STATUS:
b024b793
XZ
198 r = 1;
199 break;
7f39f8ac
LV
200 case KVM_CAP_COALESCED_MMIO:
201 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
202 break;
2381ad24 203 case KVM_CAP_IOMMU:
19de40a8 204 r = iommu_found();
2381ad24 205 break;
b024b793
XZ
206 default:
207 r = 0;
208 }
209 return r;
210
211}
212
213static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
92760499 214 gpa_t addr, int len, int is_write)
b024b793
XZ
215{
216 struct kvm_io_device *dev;
217
92760499 218 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write);
b024b793
XZ
219
220 return dev;
221}
222
223static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
224{
225 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
226 kvm_run->hw.hardware_exit_reason = 1;
227 return 0;
228}
229
230static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
231{
232 struct kvm_mmio_req *p;
233 struct kvm_io_device *mmio_dev;
234
235 p = kvm_get_vcpu_ioreq(vcpu);
236
237 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
238 goto mmio;
239 vcpu->mmio_needed = 1;
240 vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
241 vcpu->mmio_size = kvm_run->mmio.len = p->size;
242 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
243
244 if (vcpu->mmio_is_write)
245 memcpy(vcpu->mmio_data, &p->data, p->size);
246 memcpy(kvm_run->mmio.data, &p->data, p->size);
247 kvm_run->exit_reason = KVM_EXIT_MMIO;
248 return 0;
249mmio:
92760499 250 mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir);
b024b793
XZ
251 if (mmio_dev) {
252 if (!p->dir)
253 kvm_iodevice_write(mmio_dev, p->addr, p->size,
254 &p->data);
255 else
256 kvm_iodevice_read(mmio_dev, p->addr, p->size,
257 &p->data);
258
259 } else
260 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
261 p->state = STATE_IORESP_READY;
262
263 return 1;
264}
265
266static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
267{
268 struct exit_ctl_data *p;
269
270 p = kvm_get_exit_data(vcpu);
271
272 if (p->exit_reason == EXIT_REASON_PAL_CALL)
273 return kvm_pal_emul(vcpu, kvm_run);
274 else {
275 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
276 kvm_run->hw.hardware_exit_reason = 2;
277 return 0;
278 }
279}
280
281static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
282{
283 struct exit_ctl_data *p;
284
285 p = kvm_get_exit_data(vcpu);
286
287 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
288 kvm_sal_emul(vcpu);
289 return 1;
290 } else {
291 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
292 kvm_run->hw.hardware_exit_reason = 3;
293 return 0;
294 }
295
296}
297
58c2dde1
GN
298static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
299{
300 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
301
302 if (!test_and_set_bit(vector, &vpd->irr[0])) {
303 vcpu->arch.irq_new_pending = 1;
304 kvm_vcpu_kick(vcpu);
305 return 1;
306 }
307 return 0;
308}
309
b024b793
XZ
310/*
311 * offset: address offset to IPI space.
312 * value: deliver value.
313 */
314static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
315 uint64_t vector)
316{
317 switch (dm) {
318 case SAPIC_FIXED:
b024b793
XZ
319 break;
320 case SAPIC_NMI:
58c2dde1 321 vector = 2;
b024b793
XZ
322 break;
323 case SAPIC_EXTINT:
58c2dde1 324 vector = 0;
b024b793
XZ
325 break;
326 case SAPIC_INIT:
327 case SAPIC_PMI:
328 default:
329 printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
58c2dde1 330 return;
b024b793 331 }
58c2dde1 332 __apic_accept_irq(vcpu, vector);
b024b793
XZ
333}
334
335static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
336 unsigned long eid)
337{
338 union ia64_lid lid;
339 int i;
988a2cae 340 struct kvm_vcpu *vcpu;
b024b793 341
988a2cae
GN
342 kvm_for_each_vcpu(i, vcpu, kvm) {
343 lid.val = VCPU_LID(vcpu);
344 if (lid.id == id && lid.eid == eid)
345 return vcpu;
b024b793
XZ
346 }
347
348 return NULL;
349}
350
351static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
352{
353 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
354 struct kvm_vcpu *target_vcpu;
355 struct kvm_pt_regs *regs;
356 union ia64_ipi_a addr = p->u.ipi_data.addr;
357 union ia64_ipi_d data = p->u.ipi_data.data;
358
359 target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
360 if (!target_vcpu)
361 return handle_vm_error(vcpu, kvm_run);
362
363 if (!target_vcpu->arch.launched) {
364 regs = vcpu_regs(target_vcpu);
365
366 regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
367 regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
368
a4535290 369 target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b024b793
XZ
370 if (waitqueue_active(&target_vcpu->wq))
371 wake_up_interruptible(&target_vcpu->wq);
372 } else {
373 vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
374 if (target_vcpu != vcpu)
375 kvm_vcpu_kick(target_vcpu);
376 }
377
378 return 1;
379}
380
381struct call_data {
382 struct kvm_ptc_g ptc_g_data;
383 struct kvm_vcpu *vcpu;
384};
385
386static void vcpu_global_purge(void *info)
387{
388 struct call_data *p = (struct call_data *)info;
389 struct kvm_vcpu *vcpu = p->vcpu;
390
391 if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
392 return;
393
394 set_bit(KVM_REQ_PTC_G, &vcpu->requests);
395 if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
396 vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
397 p->ptc_g_data;
398 } else {
399 clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
400 vcpu->arch.ptc_g_count = 0;
401 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
402 }
403}
404
405static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
406{
407 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
408 struct kvm *kvm = vcpu->kvm;
409 struct call_data call_data;
410 int i;
988a2cae 411 struct kvm_vcpu *vcpui;
decc9016 412
b024b793
XZ
413 call_data.ptc_g_data = p->u.ptc_g_data;
414
988a2cae
GN
415 kvm_for_each_vcpu(i, vcpui, kvm) {
416 if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
417 vcpu == vcpui)
b024b793
XZ
418 continue;
419
988a2cae
GN
420 if (waitqueue_active(&vcpui->wq))
421 wake_up_interruptible(&vcpui->wq);
b024b793 422
988a2cae
GN
423 if (vcpui->cpu != -1) {
424 call_data.vcpu = vcpui;
425 smp_call_function_single(vcpui->cpu,
2f73ccab 426 vcpu_global_purge, &call_data, 1);
b024b793
XZ
427 } else
428 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
429
430 }
431 return 1;
432}
433
434static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
435{
436 return 1;
437}
438
0c72ea7f
JS
439static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu)
440{
441 unsigned long pte, rtc_phys_addr, map_addr;
442 int slot;
443
444 map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT);
445 rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC;
446 pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC));
447 slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT);
448 vcpu->arch.sn_rtc_tr_slot = slot;
449 if (slot < 0) {
450 printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n");
451 slot = 0;
452 }
453 return slot;
454}
455
b024b793
XZ
456int kvm_emulate_halt(struct kvm_vcpu *vcpu)
457{
458
459 ktime_t kt;
460 long itc_diff;
461 unsigned long vcpu_now_itc;
b024b793
XZ
462 unsigned long expires;
463 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
464 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
465 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
466
decc9016 467 if (irqchip_in_kernel(vcpu->kvm)) {
b024b793 468
c6c9fcdf 469 vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset;
b024b793 470
decc9016
XZ
471 if (time_after(vcpu_now_itc, vpd->itm)) {
472 vcpu->arch.timer_check = 1;
473 return 1;
474 }
475 itc_diff = vpd->itm - vcpu_now_itc;
476 if (itc_diff < 0)
477 itc_diff = -itc_diff;
478
479 expires = div64_u64(itc_diff, cyc_per_usec);
480 kt = ktime_set(0, 1000 * expires);
481
decc9016
XZ
482 vcpu->arch.ht_active = 1;
483 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
b024b793 484
a4535290 485 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
b024b793
XZ
486 kvm_vcpu_block(vcpu);
487 hrtimer_cancel(p_ht);
488 vcpu->arch.ht_active = 0;
489
09cec754
GN
490 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) ||
491 kvm_cpu_has_pending_timer(vcpu))
decc9016 492 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
09cec754 493 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
decc9016 494
a4535290 495 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
b024b793
XZ
496 return -EINTR;
497 return 1;
498 } else {
499 printk(KERN_ERR"kvm: Unsupported userspace halt!");
500 return 0;
501 }
502}
503
504static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
505 struct kvm_run *kvm_run)
506{
507 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
508 return 0;
509}
510
511static int handle_external_interrupt(struct kvm_vcpu *vcpu,
512 struct kvm_run *kvm_run)
513{
514 return 1;
515}
516
7d637978
XZ
517static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
518 struct kvm_run *kvm_run)
519{
520 printk("VMM: %s", vcpu->arch.log_buf);
521 return 1;
522}
523
b024b793
XZ
524static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
525 struct kvm_run *kvm_run) = {
526 [EXIT_REASON_VM_PANIC] = handle_vm_error,
527 [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
528 [EXIT_REASON_PAL_CALL] = handle_pal_call,
529 [EXIT_REASON_SAL_CALL] = handle_sal_call,
530 [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
531 [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
532 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
533 [EXIT_REASON_IPI] = handle_ipi,
534 [EXIT_REASON_PTC_G] = handle_global_purge,
7d637978 535 [EXIT_REASON_DEBUG] = handle_vcpu_debug,
b024b793
XZ
536
537};
538
539static const int kvm_vti_max_exit_handlers =
540 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
541
b024b793
XZ
542static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
543{
544 struct exit_ctl_data *p_exit_data;
545
546 p_exit_data = kvm_get_exit_data(vcpu);
547 return p_exit_data->exit_reason;
548}
549
550/*
551 * The guest has exited. See if we can fix it or if we need userspace
552 * assistance.
553 */
554static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
555{
556 u32 exit_reason = kvm_get_exit_reason(vcpu);
557 vcpu->arch.last_exit = exit_reason;
558
559 if (exit_reason < kvm_vti_max_exit_handlers
560 && kvm_vti_exit_handlers[exit_reason])
561 return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
562 else {
563 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
564 kvm_run->hw.hardware_exit_reason = exit_reason;
565 }
566 return 0;
567}
568
569static inline void vti_set_rr6(unsigned long rr6)
570{
571 ia64_set_rr(RR6, rr6);
572 ia64_srlz_i();
573}
574
575static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
576{
577 unsigned long pte;
578 struct kvm *kvm = vcpu->kvm;
579 int r;
580
581 /*Insert a pair of tr to map vmm*/
582 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
583 r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
584 if (r < 0)
585 goto out;
586 vcpu->arch.vmm_tr_slot = r;
587 /*Insert a pairt of tr to map data of vm*/
588 pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
589 r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
590 pte, KVM_VM_DATA_SHIFT);
591 if (r < 0)
592 goto out;
593 vcpu->arch.vm_tr_slot = r;
0c72ea7f
JS
594
595#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
596 if (kvm->arch.is_sn2) {
597 r = kvm_sn2_setup_mappings(vcpu);
598 if (r < 0)
599 goto out;
600 }
601#endif
602
b024b793
XZ
603 r = 0;
604out:
605 return r;
b024b793
XZ
606}
607
608static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
609{
0c72ea7f 610 struct kvm *kvm = vcpu->kvm;
b024b793
XZ
611 ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
612 ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
0c72ea7f
JS
613#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
614 if (kvm->arch.is_sn2)
615 ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot);
616#endif
b024b793
XZ
617}
618
619static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
620{
4d13c3b0
JS
621 unsigned long psr;
622 int r;
b024b793
XZ
623 int cpu = smp_processor_id();
624
625 if (vcpu->arch.last_run_cpu != cpu ||
626 per_cpu(last_vcpu, cpu) != vcpu) {
627 per_cpu(last_vcpu, cpu) = vcpu;
628 vcpu->arch.last_run_cpu = cpu;
629 kvm_flush_tlb_all();
630 }
631
632 vcpu->arch.host_rr6 = ia64_get_rr(RR6);
633 vti_set_rr6(vcpu->arch.vmm_rr);
4d13c3b0
JS
634 local_irq_save(psr);
635 r = kvm_insert_vmm_mapping(vcpu);
636 local_irq_restore(psr);
637 return r;
b024b793 638}
c6b60c69 639
b024b793
XZ
640static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
641{
642 kvm_purge_vmm_mapping(vcpu);
643 vti_set_rr6(vcpu->arch.host_rr6);
644}
645
c6b60c69 646static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
b024b793
XZ
647{
648 union context *host_ctx, *guest_ctx;
649 int r;
650
c6b60c69
JS
651 /*
652 * down_read() may sleep and return with interrupts enabled
653 */
654 down_read(&vcpu->kvm->slots_lock);
b024b793
XZ
655
656again:
b024b793 657 if (signal_pending(current)) {
b024b793
XZ
658 r = -EINTR;
659 kvm_run->exit_reason = KVM_EXIT_INTR;
660 goto out;
661 }
662
d24d2c1c
JS
663 preempt_disable();
664 local_irq_disable();
665
c6b60c69
JS
666 /*Get host and guest context with guest address space.*/
667 host_ctx = kvm_get_host_context(vcpu);
668 guest_ctx = kvm_get_guest_context(vcpu);
669
32f88400 670 clear_bit(KVM_REQ_KICK, &vcpu->requests);
c6b60c69
JS
671
672 r = kvm_vcpu_pre_transition(vcpu);
673 if (r < 0)
674 goto vcpu_run_fail;
675
676 up_read(&vcpu->kvm->slots_lock);
b024b793 677 kvm_guest_enter();
c6b60c69
JS
678
679 /*
680 * Transition to the guest
681 */
682 kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
683
684 kvm_vcpu_post_transition(vcpu);
b024b793
XZ
685
686 vcpu->arch.launched = 1;
32f88400 687 set_bit(KVM_REQ_KICK, &vcpu->requests);
b024b793
XZ
688 local_irq_enable();
689
690 /*
691 * We must have an instruction between local_irq_enable() and
692 * kvm_guest_exit(), so the timer interrupt isn't delayed by
693 * the interrupt shadow. The stat.exits increment will do nicely.
694 * But we need to prevent reordering, hence this barrier():
695 */
696 barrier();
b024b793 697 kvm_guest_exit();
b024b793
XZ
698 preempt_enable();
699
c6b60c69
JS
700 down_read(&vcpu->kvm->slots_lock);
701
b024b793
XZ
702 r = kvm_handle_exit(kvm_run, vcpu);
703
704 if (r > 0) {
705 if (!need_resched())
706 goto again;
707 }
708
709out:
c6b60c69 710 up_read(&vcpu->kvm->slots_lock);
b024b793
XZ
711 if (r > 0) {
712 kvm_resched(vcpu);
c6b60c69 713 down_read(&vcpu->kvm->slots_lock);
b024b793
XZ
714 goto again;
715 }
716
717 return r;
c6b60c69
JS
718
719vcpu_run_fail:
720 local_irq_enable();
721 preempt_enable();
722 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
723 goto out;
b024b793
XZ
724}
725
726static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
727{
728 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
729
730 if (!vcpu->mmio_is_write)
731 memcpy(&p->data, vcpu->mmio_data, 8);
732 p->state = STATE_IORESP_READY;
733}
734
735int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
736{
737 int r;
738 sigset_t sigsaved;
739
740 vcpu_load(vcpu);
741
a2e4e289
XZ
742 if (vcpu->sigset_active)
743 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
744
a4535290 745 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
b024b793 746 kvm_vcpu_block(vcpu);
decc9016 747 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
a2e4e289
XZ
748 r = -EAGAIN;
749 goto out;
b024b793
XZ
750 }
751
b024b793
XZ
752 if (vcpu->mmio_needed) {
753 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
754 kvm_set_mmio_data(vcpu);
755 vcpu->mmio_read_completed = 1;
756 vcpu->mmio_needed = 0;
757 }
758 r = __vcpu_run(vcpu, kvm_run);
a2e4e289 759out:
b024b793
XZ
760 if (vcpu->sigset_active)
761 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
762
763 vcpu_put(vcpu);
764 return r;
765}
766
b024b793
XZ
767static struct kvm *kvm_alloc_kvm(void)
768{
769
770 struct kvm *kvm;
771 uint64_t vm_base;
772
a917f7af
XZ
773 BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
774
b024b793
XZ
775 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
776
777 if (!vm_base)
778 return ERR_PTR(-ENOMEM);
b024b793 779
b024b793 780 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
a917f7af
XZ
781 kvm = (struct kvm *)(vm_base +
782 offsetof(struct kvm_vm_data, kvm_vm_struct));
b024b793 783 kvm->arch.vm_base = vm_base;
a917f7af 784 printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
b024b793
XZ
785
786 return kvm;
787}
788
789struct kvm_io_range {
790 unsigned long start;
791 unsigned long size;
792 unsigned long type;
793};
794
795static const struct kvm_io_range io_ranges[] = {
796 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
797 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
798 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
799 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
800 {PIB_START, PIB_SIZE, GPFN_PIB},
801};
802
803static void kvm_build_io_pmt(struct kvm *kvm)
804{
805 unsigned long i, j;
806
807 /* Mark I/O ranges */
808 for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
809 i++) {
810 for (j = io_ranges[i].start;
811 j < io_ranges[i].start + io_ranges[i].size;
812 j += PAGE_SIZE)
813 kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
814 io_ranges[i].type, 0);
815 }
816
817}
818
819/*Use unused rids to virtualize guest rid.*/
820#define GUEST_PHYSICAL_RR0 0x1739
821#define GUEST_PHYSICAL_RR4 0x2739
822#define VMM_INIT_RR 0x1660
823
824static void kvm_init_vm(struct kvm *kvm)
825{
b024b793
XZ
826 BUG_ON(!kvm);
827
828 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
829 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
830 kvm->arch.vmm_init_rr = VMM_INIT_RR;
831
b024b793
XZ
832 /*
833 *Fill P2M entries for MMIO/IO ranges
834 */
835 kvm_build_io_pmt(kvm);
836
2381ad24 837 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
5550af4d
SY
838
839 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
840 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
b024b793
XZ
841}
842
843struct kvm *kvm_arch_create_vm(void)
844{
845 struct kvm *kvm = kvm_alloc_kvm();
846
847 if (IS_ERR(kvm))
848 return ERR_PTR(-ENOMEM);
0c72ea7f
JS
849
850 kvm->arch.is_sn2 = ia64_platform_is("sn2");
851
b024b793
XZ
852 kvm_init_vm(kvm);
853
854 return kvm;
855
856}
857
858static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
859 struct kvm_irqchip *chip)
860{
861 int r;
862
863 r = 0;
864 switch (chip->chip_id) {
865 case KVM_IRQCHIP_IOAPIC:
866 memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm),
867 sizeof(struct kvm_ioapic_state));
868 break;
869 default:
870 r = -EINVAL;
871 break;
872 }
873 return r;
874}
875
876static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
877{
878 int r;
879
880 r = 0;
881 switch (chip->chip_id) {
882 case KVM_IRQCHIP_IOAPIC:
883 memcpy(ioapic_irqchip(kvm),
884 &chip->chip.ioapic,
885 sizeof(struct kvm_ioapic_state));
886 break;
887 default:
888 r = -EINVAL;
889 break;
890 }
891 return r;
892}
893
894#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
895
896int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
897{
b024b793 898 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
042b26ed 899 int i;
b024b793
XZ
900
901 vcpu_load(vcpu);
902
903 for (i = 0; i < 16; i++) {
904 vpd->vgr[i] = regs->vpd.vgr[i];
905 vpd->vbgr[i] = regs->vpd.vbgr[i];
906 }
907 for (i = 0; i < 128; i++)
908 vpd->vcr[i] = regs->vpd.vcr[i];
909 vpd->vhpi = regs->vpd.vhpi;
910 vpd->vnat = regs->vpd.vnat;
911 vpd->vbnat = regs->vpd.vbnat;
912 vpd->vpsr = regs->vpd.vpsr;
913
914 vpd->vpr = regs->vpd.vpr;
915
042b26ed 916 memcpy(&vcpu->arch.guest, &regs->saved_guest, sizeof(union context));
b024b793
XZ
917
918 RESTORE_REGS(mp_state);
919 RESTORE_REGS(vmm_rr);
920 memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
921 memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
922 RESTORE_REGS(itr_regions);
923 RESTORE_REGS(dtr_regions);
924 RESTORE_REGS(tc_regions);
925 RESTORE_REGS(irq_check);
926 RESTORE_REGS(itc_check);
927 RESTORE_REGS(timer_check);
928 RESTORE_REGS(timer_pending);
929 RESTORE_REGS(last_itc);
930 for (i = 0; i < 8; i++) {
931 vcpu->arch.vrr[i] = regs->vrr[i];
932 vcpu->arch.ibr[i] = regs->ibr[i];
933 vcpu->arch.dbr[i] = regs->dbr[i];
934 }
935 for (i = 0; i < 4; i++)
936 vcpu->arch.insvc[i] = regs->insvc[i];
937 RESTORE_REGS(xtp);
938 RESTORE_REGS(metaphysical_rr0);
939 RESTORE_REGS(metaphysical_rr4);
940 RESTORE_REGS(metaphysical_saved_rr0);
941 RESTORE_REGS(metaphysical_saved_rr4);
942 RESTORE_REGS(fp_psr);
943 RESTORE_REGS(saved_gp);
944
945 vcpu->arch.irq_new_pending = 1;
c6c9fcdf 946 vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
b024b793
XZ
947 set_bit(KVM_REQ_RESUME, &vcpu->requests);
948
949 vcpu_put(vcpu);
042b26ed
JS
950
951 return 0;
b024b793
XZ
952}
953
954long kvm_arch_vm_ioctl(struct file *filp,
955 unsigned int ioctl, unsigned long arg)
956{
957 struct kvm *kvm = filp->private_data;
958 void __user *argp = (void __user *)arg;
959 int r = -EINVAL;
960
961 switch (ioctl) {
962 case KVM_SET_MEMORY_REGION: {
963 struct kvm_memory_region kvm_mem;
964 struct kvm_userspace_memory_region kvm_userspace_mem;
965
966 r = -EFAULT;
967 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
968 goto out;
969 kvm_userspace_mem.slot = kvm_mem.slot;
970 kvm_userspace_mem.flags = kvm_mem.flags;
971 kvm_userspace_mem.guest_phys_addr =
972 kvm_mem.guest_phys_addr;
973 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
974 r = kvm_vm_ioctl_set_memory_region(kvm,
975 &kvm_userspace_mem, 0);
976 if (r)
977 goto out;
978 break;
979 }
980 case KVM_CREATE_IRQCHIP:
981 r = -EFAULT;
982 r = kvm_ioapic_init(kvm);
983 if (r)
984 goto out;
399ec807
AK
985 r = kvm_setup_default_irq_routing(kvm);
986 if (r) {
987 kfree(kvm->arch.vioapic);
988 goto out;
989 }
b024b793 990 break;
4925663a 991 case KVM_IRQ_LINE_STATUS:
b024b793
XZ
992 case KVM_IRQ_LINE: {
993 struct kvm_irq_level irq_event;
994
995 r = -EFAULT;
996 if (copy_from_user(&irq_event, argp, sizeof irq_event))
997 goto out;
998 if (irqchip_in_kernel(kvm)) {
4925663a 999 __s32 status;
fa40a821 1000 mutex_lock(&kvm->irq_lock);
4925663a 1001 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
5550af4d 1002 irq_event.irq, irq_event.level);
fa40a821 1003 mutex_unlock(&kvm->irq_lock);
4925663a
GN
1004 if (ioctl == KVM_IRQ_LINE_STATUS) {
1005 irq_event.status = status;
1006 if (copy_to_user(argp, &irq_event,
1007 sizeof irq_event))
1008 goto out;
1009 }
b024b793
XZ
1010 r = 0;
1011 }
1012 break;
1013 }
1014 case KVM_GET_IRQCHIP: {
1015 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1016 struct kvm_irqchip chip;
1017
1018 r = -EFAULT;
1019 if (copy_from_user(&chip, argp, sizeof chip))
1020 goto out;
1021 r = -ENXIO;
1022 if (!irqchip_in_kernel(kvm))
1023 goto out;
1024 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
1025 if (r)
1026 goto out;
1027 r = -EFAULT;
1028 if (copy_to_user(argp, &chip, sizeof chip))
1029 goto out;
1030 r = 0;
1031 break;
1032 }
1033 case KVM_SET_IRQCHIP: {
1034 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1035 struct kvm_irqchip chip;
1036
1037 r = -EFAULT;
1038 if (copy_from_user(&chip, argp, sizeof chip))
1039 goto out;
1040 r = -ENXIO;
1041 if (!irqchip_in_kernel(kvm))
1042 goto out;
1043 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1044 if (r)
1045 goto out;
1046 r = 0;
1047 break;
1048 }
1049 default:
1050 ;
1051 }
1052out:
1053 return r;
1054}
1055
1056int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1057 struct kvm_sregs *sregs)
1058{
1059 return -EINVAL;
1060}
1061
1062int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1063 struct kvm_sregs *sregs)
1064{
1065 return -EINVAL;
1066
1067}
1068int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1069 struct kvm_translation *tr)
1070{
1071
1072 return -EINVAL;
1073}
1074
1075static int kvm_alloc_vmm_area(void)
1076{
1077 if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
1078 kvm_vmm_base = __get_free_pages(GFP_KERNEL,
1079 get_order(KVM_VMM_SIZE));
1080 if (!kvm_vmm_base)
1081 return -ENOMEM;
1082
1083 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1084 kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
1085
1086 printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
1087 kvm_vmm_base, kvm_vm_buffer);
1088 }
1089
1090 return 0;
1091}
1092
1093static void kvm_free_vmm_area(void)
1094{
1095 if (kvm_vmm_base) {
1096 /*Zero this area before free to avoid bits leak!!*/
1097 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1098 free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
1099 kvm_vmm_base = 0;
1100 kvm_vm_buffer = 0;
1101 kvm_vsa_base = 0;
1102 }
1103}
1104
b024b793
XZ
1105static int vti_init_vpd(struct kvm_vcpu *vcpu)
1106{
1107 int i;
1108 union cpuid3_t cpuid3;
1109 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1110
1111 if (IS_ERR(vpd))
1112 return PTR_ERR(vpd);
1113
1114 /* CPUID init */
1115 for (i = 0; i < 5; i++)
1116 vpd->vcpuid[i] = ia64_get_cpuid(i);
1117
1118 /* Limit the CPUID number to 5 */
1119 cpuid3.value = vpd->vcpuid[3];
1120 cpuid3.number = 4; /* 5 - 1 */
1121 vpd->vcpuid[3] = cpuid3.value;
1122
1123 /*Set vac and vdc fields*/
1124 vpd->vac.a_from_int_cr = 1;
1125 vpd->vac.a_to_int_cr = 1;
1126 vpd->vac.a_from_psr = 1;
1127 vpd->vac.a_from_cpuid = 1;
1128 vpd->vac.a_cover = 1;
1129 vpd->vac.a_bsw = 1;
1130 vpd->vac.a_int = 1;
1131 vpd->vdc.d_vmsw = 1;
1132
1133 /*Set virtual buffer*/
1134 vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
1135
1136 return 0;
1137}
1138
1139static int vti_create_vp(struct kvm_vcpu *vcpu)
1140{
1141 long ret;
1142 struct vpd *vpd = vcpu->arch.vpd;
1143 unsigned long vmm_ivt;
1144
1145 vmm_ivt = kvm_vmm_info->vmm_ivt;
1146
1147 printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
1148
1149 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
1150
1151 if (ret) {
1152 printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
1153 return -EINVAL;
1154 }
1155 return 0;
1156}
1157
1158static void init_ptce_info(struct kvm_vcpu *vcpu)
1159{
1160 ia64_ptce_info_t ptce = {0};
1161
1162 ia64_get_ptce(&ptce);
1163 vcpu->arch.ptce_base = ptce.base;
1164 vcpu->arch.ptce_count[0] = ptce.count[0];
1165 vcpu->arch.ptce_count[1] = ptce.count[1];
1166 vcpu->arch.ptce_stride[0] = ptce.stride[0];
1167 vcpu->arch.ptce_stride[1] = ptce.stride[1];
1168}
1169
1170static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
1171{
1172 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
1173
1174 if (hrtimer_cancel(p_ht))
18dd36af 1175 hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS);
b024b793
XZ
1176}
1177
1178static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
1179{
1180 struct kvm_vcpu *vcpu;
1181 wait_queue_head_t *q;
1182
1183 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
decc9016
XZ
1184 q = &vcpu->wq;
1185
a4535290 1186 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
b024b793
XZ
1187 goto out;
1188
decc9016 1189 if (waitqueue_active(q))
b024b793 1190 wake_up_interruptible(q);
decc9016 1191
b024b793 1192out:
decc9016 1193 vcpu->arch.timer_fired = 1;
b024b793
XZ
1194 vcpu->arch.timer_check = 1;
1195 return HRTIMER_NORESTART;
1196}
1197
1198#define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1199
1200int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1201{
1202 struct kvm_vcpu *v;
1203 int r;
1204 int i;
1205 long itc_offset;
1206 struct kvm *kvm = vcpu->kvm;
1207 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1208
1209 union context *p_ctx = &vcpu->arch.guest;
1210 struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
1211
1212 /*Init vcpu context for first run.*/
1213 if (IS_ERR(vmm_vcpu))
1214 return PTR_ERR(vmm_vcpu);
1215
c5af89b6 1216 if (kvm_vcpu_is_bsp(vcpu)) {
a4535290 1217 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b024b793
XZ
1218
1219 /*Set entry address for first run.*/
1220 regs->cr_iip = PALE_RESET_ENTRY;
1221
a917f7af 1222 /*Initialize itc offset for vcpus*/
c6c9fcdf 1223 itc_offset = 0UL - kvm_get_itc(vcpu);
3032b925 1224 for (i = 0; i < KVM_MAX_VCPUS; i++) {
a917f7af
XZ
1225 v = (struct kvm_vcpu *)((char *)vcpu +
1226 sizeof(struct kvm_vcpu_data) * i);
b024b793
XZ
1227 v->arch.itc_offset = itc_offset;
1228 v->arch.last_itc = 0;
1229 }
1230 } else
a4535290 1231 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
b024b793
XZ
1232
1233 r = -ENOMEM;
1234 vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
1235 if (!vcpu->arch.apic)
1236 goto out;
1237 vcpu->arch.apic->vcpu = vcpu;
1238
1239 p_ctx->gr[1] = 0;
a917f7af 1240 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
b024b793
XZ
1241 p_ctx->gr[13] = (unsigned long)vmm_vcpu;
1242 p_ctx->psr = 0x1008522000UL;
1243 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
1244 p_ctx->caller_unat = 0;
1245 p_ctx->pr = 0x0;
1246 p_ctx->ar[36] = 0x0; /*unat*/
1247 p_ctx->ar[19] = 0x0; /*rnat*/
1248 p_ctx->ar[18] = (unsigned long)vmm_vcpu +
1249 ((sizeof(struct kvm_vcpu)+15) & ~15);
1250 p_ctx->ar[64] = 0x0; /*pfs*/
1251 p_ctx->cr[0] = 0x7e04UL;
1252 p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
1253 p_ctx->cr[8] = 0x3c;
1254
1255 /*Initilize region register*/
1256 p_ctx->rr[0] = 0x30;
1257 p_ctx->rr[1] = 0x30;
1258 p_ctx->rr[2] = 0x30;
1259 p_ctx->rr[3] = 0x30;
1260 p_ctx->rr[4] = 0x30;
1261 p_ctx->rr[5] = 0x30;
1262 p_ctx->rr[7] = 0x30;
1263
1264 /*Initilize branch register 0*/
1265 p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
1266
1267 vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
1268 vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
1269 vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
1270
1271 hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1272 vcpu->arch.hlt_timer.function = hlt_timer_fn;
1273
1274 vcpu->arch.last_run_cpu = -1;
a917f7af 1275 vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
b024b793
XZ
1276 vcpu->arch.vsa_base = kvm_vsa_base;
1277 vcpu->arch.__gp = kvm_vmm_gp;
1278 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
a917f7af
XZ
1279 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
1280 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
b024b793
XZ
1281 init_ptce_info(vcpu);
1282
1283 r = 0;
1284out:
1285 return r;
1286}
1287
1288static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
1289{
1290 unsigned long psr;
1291 int r;
1292
1293 local_irq_save(psr);
1294 r = kvm_insert_vmm_mapping(vcpu);
457459c3 1295 local_irq_restore(psr);
b024b793
XZ
1296 if (r)
1297 goto fail;
1298 r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
1299 if (r)
1300 goto fail;
1301
1302 r = vti_init_vpd(vcpu);
1303 if (r) {
1304 printk(KERN_DEBUG"kvm: vpd init error!!\n");
1305 goto uninit;
1306 }
1307
1308 r = vti_create_vp(vcpu);
1309 if (r)
1310 goto uninit;
1311
1312 kvm_purge_vmm_mapping(vcpu);
b024b793
XZ
1313
1314 return 0;
1315uninit:
1316 kvm_vcpu_uninit(vcpu);
1317fail:
1318 return r;
1319}
1320
1321struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1322 unsigned int id)
1323{
1324 struct kvm_vcpu *vcpu;
1325 unsigned long vm_base = kvm->arch.vm_base;
1326 int r;
1327 int cpu;
1328
a917f7af
XZ
1329 BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
1330
1331 r = -EINVAL;
1332 if (id >= KVM_MAX_VCPUS) {
1333 printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
1334 KVM_MAX_VCPUS);
1335 goto fail;
1336 }
1337
b024b793
XZ
1338 r = -ENOMEM;
1339 if (!vm_base) {
1340 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
1341 goto fail;
1342 }
a917f7af
XZ
1343 vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
1344 vcpu_data[id].vcpu_struct));
b024b793
XZ
1345 vcpu->kvm = kvm;
1346
1347 cpu = get_cpu();
b024b793
XZ
1348 r = vti_vcpu_setup(vcpu, id);
1349 put_cpu();
1350
1351 if (r) {
1352 printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
1353 goto fail;
1354 }
1355
1356 return vcpu;
1357fail:
1358 return ERR_PTR(r);
1359}
1360
1361int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1362{
1363 return 0;
1364}
1365
1366int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1367{
1368 return -EINVAL;
1369}
1370
1371int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1372{
1373 return -EINVAL;
1374}
1375
d0bfb940
JK
1376int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1377 struct kvm_guest_debug *dbg)
b024b793
XZ
1378{
1379 return -EINVAL;
1380}
1381
1382static void free_kvm(struct kvm *kvm)
1383{
1384 unsigned long vm_base = kvm->arch.vm_base;
1385
1386 if (vm_base) {
1387 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
1388 free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
1389 }
1390
1391}
1392
1393static void kvm_release_vm_pages(struct kvm *kvm)
1394{
1395 struct kvm_memory_slot *memslot;
1396 int i, j;
1397 unsigned long base_gfn;
1398
1399 for (i = 0; i < kvm->nmemslots; i++) {
1400 memslot = &kvm->memslots[i];
1401 base_gfn = memslot->base_gfn;
1402
1403 for (j = 0; j < memslot->npages; j++) {
1404 if (memslot->rmap[j])
1405 put_page((struct page *)memslot->rmap[j]);
1406 }
1407 }
1408}
1409
ad8ba2cd
SY
1410void kvm_arch_sync_events(struct kvm *kvm)
1411{
1412}
1413
b024b793
XZ
1414void kvm_arch_destroy_vm(struct kvm *kvm)
1415{
2381ad24
XZ
1416 kvm_iommu_unmap_guest(kvm);
1417#ifdef KVM_CAP_DEVICE_ASSIGNMENT
1418 kvm_free_all_assigned_devices(kvm);
1419#endif
b024b793
XZ
1420 kfree(kvm->arch.vioapic);
1421 kvm_release_vm_pages(kvm);
1422 kvm_free_physmem(kvm);
1423 free_kvm(kvm);
1424}
1425
1426void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1427{
1428}
1429
1430void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1431{
1432 if (cpu != vcpu->cpu) {
1433 vcpu->cpu = cpu;
1434 if (vcpu->arch.ht_active)
1435 kvm_migrate_hlt_timer(vcpu);
1436 }
1437}
1438
1439#define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1440
1441int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1442{
b024b793 1443 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
042b26ed
JS
1444 int i;
1445
b024b793
XZ
1446 vcpu_load(vcpu);
1447
1448 for (i = 0; i < 16; i++) {
1449 regs->vpd.vgr[i] = vpd->vgr[i];
1450 regs->vpd.vbgr[i] = vpd->vbgr[i];
1451 }
1452 for (i = 0; i < 128; i++)
1453 regs->vpd.vcr[i] = vpd->vcr[i];
1454 regs->vpd.vhpi = vpd->vhpi;
1455 regs->vpd.vnat = vpd->vnat;
1456 regs->vpd.vbnat = vpd->vbnat;
1457 regs->vpd.vpsr = vpd->vpsr;
1458 regs->vpd.vpr = vpd->vpr;
1459
042b26ed
JS
1460 memcpy(&regs->saved_guest, &vcpu->arch.guest, sizeof(union context));
1461
b024b793
XZ
1462 SAVE_REGS(mp_state);
1463 SAVE_REGS(vmm_rr);
1464 memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
1465 memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
1466 SAVE_REGS(itr_regions);
1467 SAVE_REGS(dtr_regions);
1468 SAVE_REGS(tc_regions);
1469 SAVE_REGS(irq_check);
1470 SAVE_REGS(itc_check);
1471 SAVE_REGS(timer_check);
1472 SAVE_REGS(timer_pending);
1473 SAVE_REGS(last_itc);
1474 for (i = 0; i < 8; i++) {
1475 regs->vrr[i] = vcpu->arch.vrr[i];
1476 regs->ibr[i] = vcpu->arch.ibr[i];
1477 regs->dbr[i] = vcpu->arch.dbr[i];
1478 }
1479 for (i = 0; i < 4; i++)
1480 regs->insvc[i] = vcpu->arch.insvc[i];
c6c9fcdf 1481 regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu);
b024b793
XZ
1482 SAVE_REGS(xtp);
1483 SAVE_REGS(metaphysical_rr0);
1484 SAVE_REGS(metaphysical_rr4);
1485 SAVE_REGS(metaphysical_saved_rr0);
1486 SAVE_REGS(metaphysical_saved_rr4);
1487 SAVE_REGS(fp_psr);
1488 SAVE_REGS(saved_gp);
042b26ed 1489
b024b793 1490 vcpu_put(vcpu);
042b26ed 1491 return 0;
b024b793
XZ
1492}
1493
e9a999fe
JS
1494int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
1495 struct kvm_ia64_vcpu_stack *stack)
1496{
1497 memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
1498 return 0;
1499}
1500
1501int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
1502 struct kvm_ia64_vcpu_stack *stack)
1503{
1504 memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
1505 sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
1506
1507 vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
1508 return 0;
1509}
1510
b024b793
XZ
1511void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1512{
1513
1514 hrtimer_cancel(&vcpu->arch.hlt_timer);
1515 kfree(vcpu->arch.apic);
1516}
1517
1518
1519long kvm_arch_vcpu_ioctl(struct file *filp,
e9a999fe 1520 unsigned int ioctl, unsigned long arg)
b024b793 1521{
e9a999fe
JS
1522 struct kvm_vcpu *vcpu = filp->private_data;
1523 void __user *argp = (void __user *)arg;
1524 struct kvm_ia64_vcpu_stack *stack = NULL;
1525 long r;
1526
1527 switch (ioctl) {
1528 case KVM_IA64_VCPU_GET_STACK: {
1529 struct kvm_ia64_vcpu_stack __user *user_stack;
1530 void __user *first_p = argp;
1531
1532 r = -EFAULT;
1533 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1534 goto out;
1535
1536 if (!access_ok(VERIFY_WRITE, user_stack,
1537 sizeof(struct kvm_ia64_vcpu_stack))) {
1538 printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
1539 "Illegal user destination address for stack\n");
1540 goto out;
1541 }
1542 stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1543 if (!stack) {
1544 r = -ENOMEM;
1545 goto out;
1546 }
1547
1548 r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
1549 if (r)
1550 goto out;
1551
1552 if (copy_to_user(user_stack, stack,
1553 sizeof(struct kvm_ia64_vcpu_stack)))
1554 goto out;
1555
1556 break;
1557 }
1558 case KVM_IA64_VCPU_SET_STACK: {
1559 struct kvm_ia64_vcpu_stack __user *user_stack;
1560 void __user *first_p = argp;
1561
1562 r = -EFAULT;
1563 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1564 goto out;
1565
1566 if (!access_ok(VERIFY_READ, user_stack,
1567 sizeof(struct kvm_ia64_vcpu_stack))) {
1568 printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
1569 "Illegal user address for stack\n");
1570 goto out;
1571 }
1572 stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1573 if (!stack) {
1574 r = -ENOMEM;
1575 goto out;
1576 }
1577 if (copy_from_user(stack, user_stack,
1578 sizeof(struct kvm_ia64_vcpu_stack)))
1579 goto out;
1580
1581 r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
1582 break;
1583 }
1584
1585 default:
1586 r = -EINVAL;
1587 }
1588
1589out:
1590 kfree(stack);
1591 return r;
b024b793
XZ
1592}
1593
1594int kvm_arch_set_memory_region(struct kvm *kvm,
1595 struct kvm_userspace_memory_region *mem,
1596 struct kvm_memory_slot old,
1597 int user_alloc)
1598{
1599 unsigned long i;
1cbea809 1600 unsigned long pfn;
b024b793
XZ
1601 int npages = mem->memory_size >> PAGE_SHIFT;
1602 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1603 unsigned long base_gfn = memslot->base_gfn;
1604
a917f7af
XZ
1605 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
1606 return -ENOMEM;
1607
b024b793 1608 for (i = 0; i < npages; i++) {
1cbea809
XZ
1609 pfn = gfn_to_pfn(kvm, base_gfn + i);
1610 if (!kvm_is_mmio_pfn(pfn)) {
1611 kvm_set_pmt_entry(kvm, base_gfn + i,
1612 pfn << PAGE_SHIFT,
b010eb51 1613 _PAGE_AR_RWX | _PAGE_MA_WB);
1cbea809
XZ
1614 memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
1615 } else {
1616 kvm_set_pmt_entry(kvm, base_gfn + i,
b010eb51 1617 GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
1cbea809
XZ
1618 _PAGE_MA_UC);
1619 memslot->rmap[i] = 0;
1620 }
b024b793
XZ
1621 }
1622
1623 return 0;
1624}
1625
34d4cb8f
MT
1626void kvm_arch_flush_shadow(struct kvm *kvm)
1627{
64f6afbd 1628 kvm_flush_remote_tlbs(kvm);
34d4cb8f 1629}
b024b793
XZ
1630
1631long kvm_arch_dev_ioctl(struct file *filp,
e9a999fe 1632 unsigned int ioctl, unsigned long arg)
b024b793
XZ
1633{
1634 return -EINVAL;
1635}
1636
1637void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1638{
1639 kvm_vcpu_uninit(vcpu);
1640}
1641
1642static int vti_cpu_has_kvm_support(void)
1643{
1644 long avail = 1, status = 1, control = 1;
1645 long ret;
1646
1647 ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
1648 if (ret)
1649 goto out;
1650
1651 if (!(avail & PAL_PROC_VM_BIT))
1652 goto out;
1653
1654 printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
1655
1656 ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
1657 if (ret)
1658 goto out;
1659 printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
1660
1661 if (!(vp_env_info & VP_OPCODE)) {
1662 printk(KERN_WARNING"kvm: No opcode ability on hardware, "
1663 "vm_env_info:0x%lx\n", vp_env_info);
1664 }
1665
1666 return 1;
1667out:
1668 return 0;
1669}
1670
0b5d7a2c
JS
1671
1672/*
1673 * On SN2, the ITC isn't stable, so copy in fast path code to use the
1674 * SN2 RTC, replacing the ITC based default verion.
1675 */
1676static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info,
1677 struct module *module)
1678{
1679 unsigned long new_ar, new_ar_sn2;
1680 unsigned long module_base;
1681
1682 if (!ia64_platform_is("sn2"))
1683 return;
1684
1685 module_base = (unsigned long)module->module_core;
1686
1687 new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base;
1688 new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base;
1689
1690 printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC "
1691 "as source\n");
1692
1693 /*
1694 * Copy the SN2 version of mov_ar into place. They are both
1695 * the same size, so 6 bundles is sufficient (6 * 0x10).
1696 */
1697 memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60);
1698}
1699
b024b793 1700static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
0b5d7a2c 1701 struct module *module)
b024b793
XZ
1702{
1703 unsigned long module_base;
1704 unsigned long vmm_size;
1705
1706 unsigned long vmm_offset, func_offset, fdesc_offset;
1707 struct fdesc *p_fdesc;
1708
1709 BUG_ON(!module);
1710
1711 if (!kvm_vmm_base) {
1712 printk("kvm: kvm area hasn't been initilized yet!!\n");
1713 return -EFAULT;
1714 }
1715
1716 /*Calculate new position of relocated vmm module.*/
1717 module_base = (unsigned long)module->module_core;
1718 vmm_size = module->core_size;
1719 if (unlikely(vmm_size > KVM_VMM_SIZE))
1720 return -EFAULT;
1721
1722 memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
0b5d7a2c 1723 kvm_patch_vmm(vmm_info, module);
b024b793
XZ
1724 kvm_flush_icache(kvm_vmm_base, vmm_size);
1725
1726 /*Recalculate kvm_vmm_info based on new VMM*/
1727 vmm_offset = vmm_info->vmm_ivt - module_base;
1728 kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
1729 printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
1730 kvm_vmm_info->vmm_ivt);
1731
1732 fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
1733 kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
1734 fdesc_offset);
1735 func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
1736 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1737 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1738 p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
1739
1740 printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
1741 KVM_VMM_BASE+func_offset);
1742
1743 fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
1744 kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
1745 fdesc_offset);
1746 func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
1747 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1748 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1749 p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
1750
1751 kvm_vmm_gp = p_fdesc->gp;
1752
1753 printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
1754 kvm_vmm_info->vmm_entry);
1755 printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
1756 KVM_VMM_BASE + func_offset);
1757
1758 return 0;
1759}
1760
1761int kvm_arch_init(void *opaque)
1762{
1763 int r;
1764 struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
1765
1766 if (!vti_cpu_has_kvm_support()) {
1767 printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
1768 r = -EOPNOTSUPP;
1769 goto out;
1770 }
1771
1772 if (kvm_vmm_info) {
1773 printk(KERN_ERR "kvm: Already loaded VMM module!\n");
1774 r = -EEXIST;
1775 goto out;
1776 }
1777
1778 r = -ENOMEM;
1779 kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
1780 if (!kvm_vmm_info)
1781 goto out;
1782
1783 if (kvm_alloc_vmm_area())
1784 goto out_free0;
1785
1786 r = kvm_relocate_vmm(vmm_info, vmm_info->module);
1787 if (r)
1788 goto out_free1;
1789
1790 return 0;
1791
1792out_free1:
1793 kvm_free_vmm_area();
1794out_free0:
1795 kfree(kvm_vmm_info);
1796out:
1797 return r;
1798}
1799
1800void kvm_arch_exit(void)
1801{
1802 kvm_free_vmm_area();
1803 kfree(kvm_vmm_info);
1804 kvm_vmm_info = NULL;
1805}
1806
1807static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1808 struct kvm_dirty_log *log)
1809{
1810 struct kvm_memory_slot *memslot;
1811 int r, i;
1812 long n, base;
a917f7af
XZ
1813 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1814 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
b024b793
XZ
1815
1816 r = -EINVAL;
1817 if (log->slot >= KVM_MEMORY_SLOTS)
1818 goto out;
1819
1820 memslot = &kvm->memslots[log->slot];
1821 r = -ENOENT;
1822 if (!memslot->dirty_bitmap)
1823 goto out;
1824
1825 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1826 base = memslot->base_gfn / BITS_PER_LONG;
1827
1828 for (i = 0; i < n/sizeof(long); ++i) {
1829 memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
1830 dirty_bitmap[base + i] = 0;
1831 }
1832 r = 0;
1833out:
1834 return r;
1835}
1836
1837int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1838 struct kvm_dirty_log *log)
1839{
1840 int r;
1841 int n;
1842 struct kvm_memory_slot *memslot;
1843 int is_dirty = 0;
1844
1845 spin_lock(&kvm->arch.dirty_log_lock);
1846
1847 r = kvm_ia64_sync_dirty_log(kvm, log);
1848 if (r)
1849 goto out;
1850
1851 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1852 if (r)
1853 goto out;
1854
1855 /* If nothing is dirty, don't bother messing with page tables. */
1856 if (is_dirty) {
1857 kvm_flush_remote_tlbs(kvm);
1858 memslot = &kvm->memslots[log->slot];
1859 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1860 memset(memslot->dirty_bitmap, 0, n);
1861 }
1862 r = 0;
1863out:
1864 spin_unlock(&kvm->arch.dirty_log_lock);
1865 return r;
1866}
1867
1868int kvm_arch_hardware_setup(void)
1869{
1870 return 0;
1871}
1872
1873void kvm_arch_hardware_unsetup(void)
1874{
1875}
1876
b024b793
XZ
1877void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1878{
32f88400
MT
1879 int me;
1880 int cpu = vcpu->cpu;
b024b793
XZ
1881
1882 if (waitqueue_active(&vcpu->wq))
1883 wake_up_interruptible(&vcpu->wq);
1884
32f88400
MT
1885 me = get_cpu();
1886 if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
1887 if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
1888 smp_send_reschedule(cpu);
decc9016 1889 put_cpu();
b024b793
XZ
1890}
1891
58c2dde1 1892int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
b024b793 1893{
58c2dde1 1894 return __apic_accept_irq(vcpu, irq->vector);
b024b793
XZ
1895}
1896
1897int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
1898{
1899 return apic->vcpu->vcpu_id == dest;
1900}
1901
1902int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
1903{
1904 return 0;
1905}
1906
e1035715 1907int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
b024b793 1908{
e1035715 1909 return vcpu1->arch.xtp - vcpu2->arch.xtp;
b024b793
XZ
1910}
1911
343f94fe
GN
1912int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
1913 int short_hand, int dest, int dest_mode)
1914{
58c2dde1 1915 struct kvm_lapic *target = vcpu->arch.apic;
343f94fe
GN
1916 return (dest_mode == 0) ?
1917 kvm_apic_match_physical_addr(target, dest) :
1918 kvm_apic_match_logical_addr(target, dest);
1919}
1920
b024b793
XZ
1921static int find_highest_bits(int *dat)
1922{
1923 u32 bits, bitnum;
1924 int i;
1925
1926 /* loop for all 256 bits */
1927 for (i = 7; i >= 0 ; i--) {
1928 bits = dat[i];
1929 if (bits) {
1930 bitnum = fls(bits);
1931 return i * 32 + bitnum - 1;
1932 }
1933 }
1934
1935 return -1;
1936}
1937
1938int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
1939{
1940 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1941
1942 if (vpd->irr[0] & (1UL << NMI_VECTOR))
1943 return NMI_VECTOR;
1944 if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
1945 return ExtINT_VECTOR;
1946
1947 return find_highest_bits((int *)&vpd->irr[0]);
1948}
1949
1950int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
1951{
1952 if (kvm_highest_pending_irq(vcpu) != -1)
1953 return 1;
1954 return 0;
1955}
1956
78646121
GN
1957int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
1958{
1959 /* do real check here */
1960 return 1;
1961}
1962
3d80840d
MT
1963int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1964{
decc9016 1965 return vcpu->arch.timer_fired;
3d80840d
MT
1966}
1967
b024b793
XZ
1968gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1969{
1970 return gfn;
1971}
1972
1973int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1974{
a4535290 1975 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE;
b024b793 1976}
62d9f0db
MT
1977
1978int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1979 struct kvm_mp_state *mp_state)
1980{
8c4b537d
XZ
1981 vcpu_load(vcpu);
1982 mp_state->mp_state = vcpu->arch.mp_state;
1983 vcpu_put(vcpu);
1984 return 0;
1985}
1986
1987static int vcpu_reset(struct kvm_vcpu *vcpu)
1988{
1989 int r;
1990 long psr;
1991 local_irq_save(psr);
1992 r = kvm_insert_vmm_mapping(vcpu);
43890ae8 1993 local_irq_restore(psr);
8c4b537d
XZ
1994 if (r)
1995 goto fail;
1996
1997 vcpu->arch.launched = 0;
1998 kvm_arch_vcpu_uninit(vcpu);
1999 r = kvm_arch_vcpu_init(vcpu);
2000 if (r)
2001 goto fail;
2002
2003 kvm_purge_vmm_mapping(vcpu);
2004 r = 0;
2005fail:
8c4b537d 2006 return r;
62d9f0db
MT
2007}
2008
2009int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2010 struct kvm_mp_state *mp_state)
2011{
8c4b537d
XZ
2012 int r = 0;
2013
2014 vcpu_load(vcpu);
2015 vcpu->arch.mp_state = mp_state->mp_state;
2016 if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
2017 r = vcpu_reset(vcpu);
2018 vcpu_put(vcpu);
2019 return r;
62d9f0db 2020}
This page took 0.261378 seconds and 5 git commands to generate.