tracing/function-graph-tracer: make arch generic push pop functions
[deliverable/linux.git] / arch / ia64 / kvm / process.c
1 /*
2 * process.c: handle interruption inject for guests.
3 * Copyright (c) 2005, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Shaofan Li (Susue Li) <susie.li@intel.com>
19 * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 * Xiantao Zhang (xiantao.zhang@intel.com)
22 */
23 #include "vcpu.h"
24
25 #include <asm/pal.h>
26 #include <asm/sal.h>
27 #include <asm/fpswa.h>
28 #include <asm/kregs.h>
29 #include <asm/tlb.h>
30
31 fpswa_interface_t *vmm_fpswa_interface;
32
33 #define IA64_VHPT_TRANS_VECTOR 0x0000
34 #define IA64_INST_TLB_VECTOR 0x0400
35 #define IA64_DATA_TLB_VECTOR 0x0800
36 #define IA64_ALT_INST_TLB_VECTOR 0x0c00
37 #define IA64_ALT_DATA_TLB_VECTOR 0x1000
38 #define IA64_DATA_NESTED_TLB_VECTOR 0x1400
39 #define IA64_INST_KEY_MISS_VECTOR 0x1800
40 #define IA64_DATA_KEY_MISS_VECTOR 0x1c00
41 #define IA64_DIRTY_BIT_VECTOR 0x2000
42 #define IA64_INST_ACCESS_BIT_VECTOR 0x2400
43 #define IA64_DATA_ACCESS_BIT_VECTOR 0x2800
44 #define IA64_BREAK_VECTOR 0x2c00
45 #define IA64_EXTINT_VECTOR 0x3000
46 #define IA64_PAGE_NOT_PRESENT_VECTOR 0x5000
47 #define IA64_KEY_PERMISSION_VECTOR 0x5100
48 #define IA64_INST_ACCESS_RIGHTS_VECTOR 0x5200
49 #define IA64_DATA_ACCESS_RIGHTS_VECTOR 0x5300
50 #define IA64_GENEX_VECTOR 0x5400
51 #define IA64_DISABLED_FPREG_VECTOR 0x5500
52 #define IA64_NAT_CONSUMPTION_VECTOR 0x5600
53 #define IA64_SPECULATION_VECTOR 0x5700 /* UNUSED */
54 #define IA64_DEBUG_VECTOR 0x5900
55 #define IA64_UNALIGNED_REF_VECTOR 0x5a00
56 #define IA64_UNSUPPORTED_DATA_REF_VECTOR 0x5b00
57 #define IA64_FP_FAULT_VECTOR 0x5c00
58 #define IA64_FP_TRAP_VECTOR 0x5d00
59 #define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR 0x5e00
60 #define IA64_TAKEN_BRANCH_TRAP_VECTOR 0x5f00
61 #define IA64_SINGLE_STEP_TRAP_VECTOR 0x6000
62
63 /* SDM vol2 5.5 - IVA based interruption handling */
64 #define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\
65 IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT | \
66 IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT)
67
68 #define DOMN_PAL_REQUEST 0x110000
69 #define DOMN_SAL_REQUEST 0x110001
70
71 static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800,
72 0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00,
73 0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400,
74 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00,
75 0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600,
76 0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00,
77 0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800,
78 0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00
79 };
80
81 static void collect_interruption(struct kvm_vcpu *vcpu)
82 {
83 u64 ipsr;
84 u64 vdcr;
85 u64 vifs;
86 unsigned long vpsr;
87 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
88
89 vpsr = vcpu_get_psr(vcpu);
90 vcpu_bsw0(vcpu);
91 if (vpsr & IA64_PSR_IC) {
92
93 /* Sync mpsr id/da/dd/ss/ed bits to vipsr
94 * since after guest do rfi, we still want these bits on in
95 * mpsr
96 */
97
98 ipsr = regs->cr_ipsr;
99 vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
100 | IA64_PSR_DD | IA64_PSR_SS
101 | IA64_PSR_ED));
102 vcpu_set_ipsr(vcpu, vpsr);
103
104 /* Currently, for trap, we do not advance IIP to next
105 * instruction. That's because we assume caller already
106 * set up IIP correctly
107 */
108
109 vcpu_set_iip(vcpu , regs->cr_iip);
110
111 /* set vifs.v to zero */
112 vifs = VCPU(vcpu, ifs);
113 vifs &= ~IA64_IFS_V;
114 vcpu_set_ifs(vcpu, vifs);
115
116 vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa));
117 }
118
119 vdcr = VCPU(vcpu, dcr);
120
121 /* Set guest psr
122 * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
123 * be: set to the value of dcr.be
124 * pp: set to the value of dcr.pp
125 */
126 vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
127 vpsr |= (vdcr & IA64_DCR_BE);
128
129 /* VDCR pp bit position is different from VPSR pp bit */
130 if (vdcr & IA64_DCR_PP) {
131 vpsr |= IA64_PSR_PP;
132 } else {
133 vpsr &= ~IA64_PSR_PP;;
134 }
135
136 vcpu_set_psr(vcpu, vpsr);
137
138 }
139
140 void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec)
141 {
142 u64 viva;
143 struct kvm_pt_regs *regs;
144 union ia64_isr pt_isr;
145
146 regs = vcpu_regs(vcpu);
147
148 /* clear cr.isr.ir (incomplete register frame)*/
149 pt_isr.val = VMX(vcpu, cr_isr);
150 pt_isr.ir = 0;
151 VMX(vcpu, cr_isr) = pt_isr.val;
152
153 collect_interruption(vcpu);
154
155 viva = vcpu_get_iva(vcpu);
156 regs->cr_iip = viva + vec;
157 }
158
159 static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
160 {
161 union ia64_rr rr, rr1;
162
163 rr.val = vcpu_get_rr(vcpu, ifa);
164 rr1.val = 0;
165 rr1.ps = rr.ps;
166 rr1.rid = rr.rid;
167 return (rr1.val);
168 }
169
170
171 /*
172 * Set vIFA & vITIR & vIHA, when vPSR.ic =1
173 * Parameter:
174 * set_ifa: if true, set vIFA
175 * set_itir: if true, set vITIR
176 * set_iha: if true, set vIHA
177 */
178 void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr,
179 int set_ifa, int set_itir, int set_iha)
180 {
181 long vpsr;
182 u64 value;
183
184 vpsr = VCPU(vcpu, vpsr);
185 /* Vol2, Table 8-1 */
186 if (vpsr & IA64_PSR_IC) {
187 if (set_ifa)
188 vcpu_set_ifa(vcpu, vadr);
189 if (set_itir) {
190 value = vcpu_get_itir_on_fault(vcpu, vadr);
191 vcpu_set_itir(vcpu, value);
192 }
193
194 if (set_iha) {
195 value = vcpu_thash(vcpu, vadr);
196 vcpu_set_iha(vcpu, value);
197 }
198 }
199 }
200
201 /*
202 * Data TLB Fault
203 * @ Data TLB vector
204 * Refer to SDM Vol2 Table 5-6 & 8-1
205 */
206 void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
207 {
208 /* If vPSR.ic, IFA, ITIR, IHA */
209 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
210 inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR);
211 }
212
213 /*
214 * Instruction TLB Fault
215 * @ Instruction TLB vector
216 * Refer to SDM Vol2 Table 5-6 & 8-1
217 */
218 void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
219 {
220 /* If vPSR.ic, IFA, ITIR, IHA */
221 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
222 inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
223 }
224
225
226
227 /*
228 * Data Nested TLB Fault
229 * @ Data Nested TLB Vector
230 * Refer to SDM Vol2 Table 5-6 & 8-1
231 */
232 void nested_dtlb(struct kvm_vcpu *vcpu)
233 {
234 inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR);
235 }
236
237 /*
238 * Alternate Data TLB Fault
239 * @ Alternate Data TLB vector
240 * Refer to SDM Vol2 Table 5-6 & 8-1
241 */
242 void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
243 {
244 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
245 inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
246 }
247
248
249 /*
250 * Data TLB Fault
251 * @ Data TLB vector
252 * Refer to SDM Vol2 Table 5-6 & 8-1
253 */
254 void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr)
255 {
256 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
257 inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR);
258 }
259
260 /* Deal with:
261 * VHPT Translation Vector
262 */
263 static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
264 {
265 /* If vPSR.ic, IFA, ITIR, IHA*/
266 set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
267 inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
268
269
270 }
271
272 /*
273 * VHPT Instruction Fault
274 * @ VHPT Translation vector
275 * Refer to SDM Vol2 Table 5-6 & 8-1
276 */
277 void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
278 {
279 _vhpt_fault(vcpu, vadr);
280 }
281
282
283 /*
284 * VHPT Data Fault
285 * @ VHPT Translation vector
286 * Refer to SDM Vol2 Table 5-6 & 8-1
287 */
288 void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
289 {
290 _vhpt_fault(vcpu, vadr);
291 }
292
293
294
295 /*
296 * Deal with:
297 * General Exception vector
298 */
299 void _general_exception(struct kvm_vcpu *vcpu)
300 {
301 inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
302 }
303
304
305 /*
306 * Illegal Operation Fault
307 * @ General Exception Vector
308 * Refer to SDM Vol2 Table 5-6 & 8-1
309 */
310 void illegal_op(struct kvm_vcpu *vcpu)
311 {
312 _general_exception(vcpu);
313 }
314
315 /*
316 * Illegal Dependency Fault
317 * @ General Exception Vector
318 * Refer to SDM Vol2 Table 5-6 & 8-1
319 */
320 void illegal_dep(struct kvm_vcpu *vcpu)
321 {
322 _general_exception(vcpu);
323 }
324
325 /*
326 * Reserved Register/Field Fault
327 * @ General Exception Vector
328 * Refer to SDM Vol2 Table 5-6 & 8-1
329 */
330 void rsv_reg_field(struct kvm_vcpu *vcpu)
331 {
332 _general_exception(vcpu);
333 }
334 /*
335 * Privileged Operation Fault
336 * @ General Exception Vector
337 * Refer to SDM Vol2 Table 5-6 & 8-1
338 */
339
340 void privilege_op(struct kvm_vcpu *vcpu)
341 {
342 _general_exception(vcpu);
343 }
344
345 /*
346 * Unimplement Data Address Fault
347 * @ General Exception Vector
348 * Refer to SDM Vol2 Table 5-6 & 8-1
349 */
350 void unimpl_daddr(struct kvm_vcpu *vcpu)
351 {
352 _general_exception(vcpu);
353 }
354
355 /*
356 * Privileged Register Fault
357 * @ General Exception Vector
358 * Refer to SDM Vol2 Table 5-6 & 8-1
359 */
360 void privilege_reg(struct kvm_vcpu *vcpu)
361 {
362 _general_exception(vcpu);
363 }
364
365 /* Deal with
366 * Nat consumption vector
367 * Parameter:
368 * vaddr: Optional, if t == REGISTER
369 */
370 static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr,
371 enum tlb_miss_type t)
372 {
373 /* If vPSR.ic && t == DATA/INST, IFA */
374 if (t == DATA || t == INSTRUCTION) {
375 /* IFA */
376 set_ifa_itir_iha(vcpu, vadr, 1, 0, 0);
377 }
378
379 inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR);
380 }
381
382 /*
383 * Instruction Nat Page Consumption Fault
384 * @ Nat Consumption Vector
385 * Refer to SDM Vol2 Table 5-6 & 8-1
386 */
387 void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
388 {
389 _nat_consumption_fault(vcpu, vadr, INSTRUCTION);
390 }
391
392 /*
393 * Register Nat Consumption Fault
394 * @ Nat Consumption Vector
395 * Refer to SDM Vol2 Table 5-6 & 8-1
396 */
397 void rnat_consumption(struct kvm_vcpu *vcpu)
398 {
399 _nat_consumption_fault(vcpu, 0, REGISTER);
400 }
401
402 /*
403 * Data Nat Page Consumption Fault
404 * @ Nat Consumption Vector
405 * Refer to SDM Vol2 Table 5-6 & 8-1
406 */
407 void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
408 {
409 _nat_consumption_fault(vcpu, vadr, DATA);
410 }
411
412 /* Deal with
413 * Page not present vector
414 */
415 static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
416 {
417 /* If vPSR.ic, IFA, ITIR */
418 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
419 inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
420 }
421
422
423 void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
424 {
425 __page_not_present(vcpu, vadr);
426 }
427
428
429 void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
430 {
431 __page_not_present(vcpu, vadr);
432 }
433
434
435 /* Deal with
436 * Data access rights vector
437 */
438 void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr)
439 {
440 /* If vPSR.ic, IFA, ITIR */
441 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
442 inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
443 }
444
445 fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
446 unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
447 unsigned long *ifs, struct kvm_pt_regs *regs)
448 {
449 fp_state_t fp_state;
450 fpswa_ret_t ret;
451 struct kvm_vcpu *vcpu = current_vcpu;
452
453 uint64_t old_rr7 = ia64_get_rr(7UL<<61);
454
455 if (!vmm_fpswa_interface)
456 return (fpswa_ret_t) {-1, 0, 0, 0};
457
458 /*
459 * Just let fpswa driver to use hardware fp registers.
460 * No fp register is valid in memory.
461 */
462 memset(&fp_state, 0, sizeof(fp_state_t));
463
464 /*
465 * unsigned long (*EFI_FPSWA) (
466 * unsigned long trap_type,
467 * void *Bundle,
468 * unsigned long *pipsr,
469 * unsigned long *pfsr,
470 * unsigned long *pisr,
471 * unsigned long *ppreds,
472 * unsigned long *pifs,
473 * void *fp_state);
474 */
475 /*Call host fpswa interface directly to virtualize
476 *guest fpswa request!
477 */
478 ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]);
479 ia64_srlz_d();
480
481 ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle,
482 ipsr, fpsr, isr, pr, ifs, &fp_state);
483 ia64_set_rr(7UL << 61, old_rr7);
484 ia64_srlz_d();
485 return ret;
486 }
487
488 /*
489 * Handle floating-point assist faults and traps for domain.
490 */
491 unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs,
492 unsigned long isr)
493 {
494 struct kvm_vcpu *v = current_vcpu;
495 IA64_BUNDLE bundle;
496 unsigned long fault_ip;
497 fpswa_ret_t ret;
498
499 fault_ip = regs->cr_iip;
500 /*
501 * When the FP trap occurs, the trapping instruction is completed.
502 * If ipsr.ri == 0, there is the trapping instruction in previous
503 * bundle.
504 */
505 if (!fp_fault && (ia64_psr(regs)->ri == 0))
506 fault_ip -= 16;
507
508 if (fetch_code(v, fault_ip, &bundle))
509 return -EAGAIN;
510
511 if (!bundle.i64[0] && !bundle.i64[1])
512 return -EACCES;
513
514 ret = vmm_fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
515 &isr, &regs->pr, &regs->cr_ifs, regs);
516 return ret.status;
517 }
518
519 void reflect_interruption(u64 ifa, u64 isr, u64 iim,
520 u64 vec, struct kvm_pt_regs *regs)
521 {
522 u64 vector;
523 int status ;
524 struct kvm_vcpu *vcpu = current_vcpu;
525 u64 vpsr = VCPU(vcpu, vpsr);
526
527 vector = vec2off[vec];
528
529 if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) {
530 panic_vm(vcpu, "Interruption with vector :0x%lx occurs "
531 "with psr.ic = 0\n", vector);
532 return;
533 }
534
535 switch (vec) {
536 case 32: /*IA64_FP_FAULT_VECTOR*/
537 status = vmm_handle_fpu_swa(1, regs, isr);
538 if (!status) {
539 vcpu_increment_iip(vcpu);
540 return;
541 } else if (-EAGAIN == status)
542 return;
543 break;
544 case 33: /*IA64_FP_TRAP_VECTOR*/
545 status = vmm_handle_fpu_swa(0, regs, isr);
546 if (!status)
547 return ;
548 else if (-EAGAIN == status) {
549 vcpu_decrement_iip(vcpu);
550 return ;
551 }
552 break;
553 }
554
555 VCPU(vcpu, isr) = isr;
556 VCPU(vcpu, iipa) = regs->cr_iip;
557 if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
558 VCPU(vcpu, iim) = iim;
559 else
560 set_ifa_itir_iha(vcpu, ifa, 1, 1, 1);
561
562 inject_guest_interruption(vcpu, vector);
563 }
564
565 static void set_pal_call_data(struct kvm_vcpu *vcpu)
566 {
567 struct exit_ctl_data *p = &vcpu->arch.exit_data;
568
569 /*FIXME:For static and stacked convention, firmware
570 * has put the parameters in gr28-gr31 before
571 * break to vmm !!*/
572
573 p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28);
574 p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29);
575 p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
576 p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
577 p->exit_reason = EXIT_REASON_PAL_CALL;
578 }
579
580 static void set_pal_call_result(struct kvm_vcpu *vcpu)
581 {
582 struct exit_ctl_data *p = &vcpu->arch.exit_data;
583
584 if (p->exit_reason == EXIT_REASON_PAL_CALL) {
585 vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0);
586 vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0);
587 vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0);
588 vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0);
589 } else
590 panic_vm(vcpu, "Mis-set for exit reason!\n");
591 }
592
593 static void set_sal_call_data(struct kvm_vcpu *vcpu)
594 {
595 struct exit_ctl_data *p = &vcpu->arch.exit_data;
596
597 p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32);
598 p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33);
599 p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34);
600 p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35);
601 p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36);
602 p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37);
603 p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38);
604 p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39);
605 p->exit_reason = EXIT_REASON_SAL_CALL;
606 }
607
608 static void set_sal_call_result(struct kvm_vcpu *vcpu)
609 {
610 struct exit_ctl_data *p = &vcpu->arch.exit_data;
611
612 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
613 vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0);
614 vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0);
615 vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0);
616 vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0);
617 } else
618 panic_vm(vcpu, "Mis-set for exit reason!\n");
619 }
620
621 void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
622 unsigned long isr, unsigned long iim)
623 {
624 struct kvm_vcpu *v = current_vcpu;
625
626 if (ia64_psr(regs)->cpl == 0) {
627 /* Allow hypercalls only when cpl = 0. */
628 if (iim == DOMN_PAL_REQUEST) {
629 set_pal_call_data(v);
630 vmm_transition(v);
631 set_pal_call_result(v);
632 vcpu_increment_iip(v);
633 return;
634 } else if (iim == DOMN_SAL_REQUEST) {
635 set_sal_call_data(v);
636 vmm_transition(v);
637 set_sal_call_result(v);
638 vcpu_increment_iip(v);
639 return;
640 }
641 }
642 reflect_interruption(ifa, isr, iim, 11, regs);
643 }
644
645 void check_pending_irq(struct kvm_vcpu *vcpu)
646 {
647 int mask, h_pending, h_inservice;
648 u64 isr;
649 unsigned long vpsr;
650 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
651
652 h_pending = highest_pending_irq(vcpu);
653 if (h_pending == NULL_VECTOR) {
654 update_vhpi(vcpu, NULL_VECTOR);
655 return;
656 }
657 h_inservice = highest_inservice_irq(vcpu);
658
659 vpsr = VCPU(vcpu, vpsr);
660 mask = irq_masked(vcpu, h_pending, h_inservice);
661 if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) {
662 isr = vpsr & IA64_PSR_RI;
663 update_vhpi(vcpu, h_pending);
664 reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
665 } else if (mask == IRQ_MASKED_BY_INSVC) {
666 if (VCPU(vcpu, vhpi))
667 update_vhpi(vcpu, NULL_VECTOR);
668 } else {
669 /* masked by vpsr.i or vtpr.*/
670 update_vhpi(vcpu, h_pending);
671 }
672 }
673
674 static void generate_exirq(struct kvm_vcpu *vcpu)
675 {
676 unsigned vpsr;
677 uint64_t isr;
678
679 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
680
681 vpsr = VCPU(vcpu, vpsr);
682 isr = vpsr & IA64_PSR_RI;
683 if (!(vpsr & IA64_PSR_IC))
684 panic_vm(vcpu, "Trying to inject one IRQ with psr.ic=0\n");
685 reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
686 }
687
688 void vhpi_detection(struct kvm_vcpu *vcpu)
689 {
690 uint64_t threshold, vhpi;
691 union ia64_tpr vtpr;
692 struct ia64_psr vpsr;
693
694 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
695 vtpr.val = VCPU(vcpu, tpr);
696
697 threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
698 vhpi = VCPU(vcpu, vhpi);
699 if (vhpi > threshold) {
700 /* interrupt actived*/
701 generate_exirq(vcpu);
702 }
703 }
704
705
706 void leave_hypervisor_tail(void)
707 {
708 struct kvm_vcpu *v = current_vcpu;
709
710 if (VMX(v, timer_check)) {
711 VMX(v, timer_check) = 0;
712 if (VMX(v, itc_check)) {
713 if (vcpu_get_itc(v) > VCPU(v, itm)) {
714 if (!(VCPU(v, itv) & (1 << 16))) {
715 vcpu_pend_interrupt(v, VCPU(v, itv)
716 & 0xff);
717 VMX(v, itc_check) = 0;
718 } else {
719 v->arch.timer_pending = 1;
720 }
721 VMX(v, last_itc) = VCPU(v, itm) + 1;
722 }
723 }
724 }
725
726 rmb();
727 if (v->arch.irq_new_pending) {
728 v->arch.irq_new_pending = 0;
729 VMX(v, irq_check) = 0;
730 check_pending_irq(v);
731 return;
732 }
733 if (VMX(v, irq_check)) {
734 VMX(v, irq_check) = 0;
735 vhpi_detection(v);
736 }
737 }
738
739
740 static inline void handle_lds(struct kvm_pt_regs *regs)
741 {
742 regs->cr_ipsr |= IA64_PSR_ED;
743 }
744
745 void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type)
746 {
747 unsigned long pte;
748 union ia64_rr rr;
749
750 rr.val = ia64_get_rr(vadr);
751 pte = vadr & _PAGE_PPN_MASK;
752 pte = pte | PHY_PAGE_WB;
753 thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type);
754 return;
755 }
756
757 void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs)
758 {
759 unsigned long vpsr;
760 int type;
761
762 u64 vhpt_adr, gppa, pteval, rr, itir;
763 union ia64_isr misr;
764 union ia64_pta vpta;
765 struct thash_data *data;
766 struct kvm_vcpu *v = current_vcpu;
767
768 vpsr = VCPU(v, vpsr);
769 misr.val = VMX(v, cr_isr);
770
771 type = vec;
772
773 if (is_physical_mode(v) && (!(vadr << 1 >> 62))) {
774 if (vec == 2) {
775 if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) {
776 emulate_io_inst(v, ((vadr << 1) >> 1), 4);
777 return;
778 }
779 }
780 physical_tlb_miss(v, vadr, type);
781 return;
782 }
783 data = vtlb_lookup(v, vadr, type);
784 if (data != 0) {
785 if (type == D_TLB) {
786 gppa = (vadr & ((1UL << data->ps) - 1))
787 + (data->ppn >> (data->ps - 12) << data->ps);
788 if (__gpfn_is_io(gppa >> PAGE_SHIFT)) {
789 if (data->pl >= ((regs->cr_ipsr >>
790 IA64_PSR_CPL0_BIT) & 3))
791 emulate_io_inst(v, gppa, data->ma);
792 else {
793 vcpu_set_isr(v, misr.val);
794 data_access_rights(v, vadr);
795 }
796 return ;
797 }
798 }
799 thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
800
801 } else if (type == D_TLB) {
802 if (misr.sp) {
803 handle_lds(regs);
804 return;
805 }
806
807 rr = vcpu_get_rr(v, vadr);
808 itir = rr & (RR_RID_MASK | RR_PS_MASK);
809
810 if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) {
811 if (vpsr & IA64_PSR_IC) {
812 vcpu_set_isr(v, misr.val);
813 alt_dtlb(v, vadr);
814 } else {
815 nested_dtlb(v);
816 }
817 return ;
818 }
819
820 vpta.val = vcpu_get_pta(v);
821 /* avoid recursively walking (short format) VHPT */
822
823 vhpt_adr = vcpu_thash(v, vadr);
824 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
825 /* VHPT successfully read. */
826 if (!(pteval & _PAGE_P)) {
827 if (vpsr & IA64_PSR_IC) {
828 vcpu_set_isr(v, misr.val);
829 dtlb_fault(v, vadr);
830 } else {
831 nested_dtlb(v);
832 }
833 } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
834 thash_purge_and_insert(v, pteval, itir,
835 vadr, D_TLB);
836 } else if (vpsr & IA64_PSR_IC) {
837 vcpu_set_isr(v, misr.val);
838 dtlb_fault(v, vadr);
839 } else {
840 nested_dtlb(v);
841 }
842 } else {
843 /* Can't read VHPT. */
844 if (vpsr & IA64_PSR_IC) {
845 vcpu_set_isr(v, misr.val);
846 dvhpt_fault(v, vadr);
847 } else {
848 nested_dtlb(v);
849 }
850 }
851 } else if (type == I_TLB) {
852 if (!(vpsr & IA64_PSR_IC))
853 misr.ni = 1;
854 if (!vhpt_enabled(v, vadr, INST_REF)) {
855 vcpu_set_isr(v, misr.val);
856 alt_itlb(v, vadr);
857 return;
858 }
859
860 vpta.val = vcpu_get_pta(v);
861
862 vhpt_adr = vcpu_thash(v, vadr);
863 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
864 /* VHPT successfully read. */
865 if (pteval & _PAGE_P) {
866 if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
867 vcpu_set_isr(v, misr.val);
868 itlb_fault(v, vadr);
869 return ;
870 }
871 rr = vcpu_get_rr(v, vadr);
872 itir = rr & (RR_RID_MASK | RR_PS_MASK);
873 thash_purge_and_insert(v, pteval, itir,
874 vadr, I_TLB);
875 } else {
876 vcpu_set_isr(v, misr.val);
877 inst_page_not_present(v, vadr);
878 }
879 } else {
880 vcpu_set_isr(v, misr.val);
881 ivhpt_fault(v, vadr);
882 }
883 }
884 }
885
886 void kvm_vexirq(struct kvm_vcpu *vcpu)
887 {
888 u64 vpsr, isr;
889 struct kvm_pt_regs *regs;
890
891 regs = vcpu_regs(vcpu);
892 vpsr = VCPU(vcpu, vpsr);
893 isr = vpsr & IA64_PSR_RI;
894 reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/
895 }
896
897 void kvm_ia64_handle_irq(struct kvm_vcpu *v)
898 {
899 struct exit_ctl_data *p = &v->arch.exit_data;
900 long psr;
901
902 local_irq_save(psr);
903 p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
904 vmm_transition(v);
905 local_irq_restore(psr);
906
907 VMX(v, timer_check) = 1;
908
909 }
910
911 static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos)
912 {
913 u64 oldrid, moldrid, oldpsbits, vaddr;
914 struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos];
915 vaddr = p->vaddr;
916
917 oldrid = VMX(v, vrr[0]);
918 VMX(v, vrr[0]) = p->rr;
919 oldpsbits = VMX(v, psbits[0]);
920 VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]);
921 moldrid = ia64_get_rr(0x0);
922 ia64_set_rr(0x0, vrrtomrr(p->rr));
923 ia64_srlz_d();
924
925 vaddr = PAGEALIGN(vaddr, p->ps);
926 thash_purge_entries_remote(v, vaddr, p->ps);
927
928 VMX(v, vrr[0]) = oldrid;
929 VMX(v, psbits[0]) = oldpsbits;
930 ia64_set_rr(0x0, moldrid);
931 ia64_dv_serialize_data();
932 }
933
934 static void vcpu_do_resume(struct kvm_vcpu *vcpu)
935 {
936 /*Re-init VHPT and VTLB once from resume*/
937 vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES;
938 thash_init(&vcpu->arch.vhpt, VHPT_SHIFT);
939 vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES;
940 thash_init(&vcpu->arch.vtlb, VTLB_SHIFT);
941
942 ia64_set_pta(vcpu->arch.vhpt.pta.val);
943 }
944
945 static void vmm_sanity_check(struct kvm_vcpu *vcpu)
946 {
947 struct exit_ctl_data *p = &vcpu->arch.exit_data;
948
949 if (!vmm_sanity && p->exit_reason != EXIT_REASON_DEBUG) {
950 panic_vm(vcpu, "Failed to do vmm sanity check,"
951 "it maybe caused by crashed vmm!!\n\n");
952 }
953 }
954
955 static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
956 {
957 vmm_sanity_check(vcpu); /*Guarantee vcpu runing on healthy vmm!*/
958
959 if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) {
960 vcpu_do_resume(vcpu);
961 return;
962 }
963
964 if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) {
965 thash_purge_all(vcpu);
966 return;
967 }
968
969 if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) {
970 while (vcpu->arch.ptc_g_count > 0)
971 ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count);
972 }
973 }
974
975 void vmm_transition(struct kvm_vcpu *vcpu)
976 {
977 ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd,
978 1, 0, 0, 0, 0, 0);
979 vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host);
980 ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd,
981 1, 0, 0, 0, 0, 0);
982 kvm_do_resume_op(vcpu);
983 }
984
985 void vmm_panic_handler(u64 vec)
986 {
987 struct kvm_vcpu *vcpu = current_vcpu;
988 vmm_sanity = 0;
989 panic_vm(vcpu, "Unexpected interruption occurs in VMM, vector:0x%lx\n",
990 vec2off[vec]);
991 }
This page took 0.07431 seconds and 5 git commands to generate.