Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[deliverable/linux.git] / arch / powerpc / kvm / book3s_hv_rmhandlers.S
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/reg.h>
23 #include <asm/mmu.h>
24 #include <asm/page.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/mmu-hash64.h>
31
32 #ifdef __LITTLE_ENDIAN__
33 #error Need to fix lppaca and SLB shadow accesses in little endian mode
34 #endif
35
36 /*
37 * Call kvmppc_hv_entry in real mode.
38 * Must be called with interrupts hard-disabled.
39 *
40 * Input Registers:
41 *
42 * LR = return address to continue at after eventually re-enabling MMU
43 */
44 _GLOBAL(kvmppc_hv_entry_trampoline)
45 mflr r0
46 std r0, PPC_LR_STKOFF(r1)
47 stdu r1, -112(r1)
48 mfmsr r10
49 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
50 li r0,MSR_RI
51 andc r0,r10,r0
52 li r6,MSR_IR | MSR_DR
53 andc r6,r10,r6
54 mtmsrd r0,1 /* clear RI in MSR */
55 mtsrr0 r5
56 mtsrr1 r6
57 RFI
58
59 kvmppc_call_hv_entry:
60 bl kvmppc_hv_entry
61
62 /* Back from guest - restore host state and return to caller */
63
64 /* Restore host DABR and DABRX */
65 ld r5,HSTATE_DABR(r13)
66 li r6,7
67 mtspr SPRN_DABR,r5
68 mtspr SPRN_DABRX,r6
69
70 /* Restore SPRG3 */
71 ld r3,PACA_SPRG3(r13)
72 mtspr SPRN_SPRG3,r3
73
74 /*
75 * Reload DEC. HDEC interrupts were disabled when
76 * we reloaded the host's LPCR value.
77 */
78 ld r3, HSTATE_DECEXP(r13)
79 mftb r4
80 subf r4, r4, r3
81 mtspr SPRN_DEC, r4
82
83 /* Reload the host's PMU registers */
84 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
85 lbz r4, LPPACA_PMCINUSE(r3)
86 cmpwi r4, 0
87 beq 23f /* skip if not */
88 lwz r3, HSTATE_PMC(r13)
89 lwz r4, HSTATE_PMC + 4(r13)
90 lwz r5, HSTATE_PMC + 8(r13)
91 lwz r6, HSTATE_PMC + 12(r13)
92 lwz r8, HSTATE_PMC + 16(r13)
93 lwz r9, HSTATE_PMC + 20(r13)
94 BEGIN_FTR_SECTION
95 lwz r10, HSTATE_PMC + 24(r13)
96 lwz r11, HSTATE_PMC + 28(r13)
97 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
98 mtspr SPRN_PMC1, r3
99 mtspr SPRN_PMC2, r4
100 mtspr SPRN_PMC3, r5
101 mtspr SPRN_PMC4, r6
102 mtspr SPRN_PMC5, r8
103 mtspr SPRN_PMC6, r9
104 BEGIN_FTR_SECTION
105 mtspr SPRN_PMC7, r10
106 mtspr SPRN_PMC8, r11
107 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
108 ld r3, HSTATE_MMCR(r13)
109 ld r4, HSTATE_MMCR + 8(r13)
110 ld r5, HSTATE_MMCR + 16(r13)
111 mtspr SPRN_MMCR1, r4
112 mtspr SPRN_MMCRA, r5
113 mtspr SPRN_MMCR0, r3
114 isync
115 23:
116
117 /*
118 * For external and machine check interrupts, we need
119 * to call the Linux handler to process the interrupt.
120 * We do that by jumping to absolute address 0x500 for
121 * external interrupts, or the machine_check_fwnmi label
122 * for machine checks (since firmware might have patched
123 * the vector area at 0x200). The [h]rfid at the end of the
124 * handler will return to the book3s_hv_interrupts.S code.
125 * For other interrupts we do the rfid to get back
126 * to the book3s_hv_interrupts.S code here.
127 */
128 ld r8, 112+PPC_LR_STKOFF(r1)
129 addi r1, r1, 112
130 ld r7, HSTATE_HOST_MSR(r13)
131
132 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
133 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
134 BEGIN_FTR_SECTION
135 beq 11f
136 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
137
138 /* RFI into the highmem handler, or branch to interrupt handler */
139 mfmsr r6
140 li r0, MSR_RI
141 andc r6, r6, r0
142 mtmsrd r6, 1 /* Clear RI in MSR */
143 mtsrr0 r8
144 mtsrr1 r7
145 beqa 0x500 /* external interrupt (PPC970) */
146 beq cr1, 13f /* machine check */
147 RFI
148
149 /* On POWER7, we have external interrupts set to use HSRR0/1 */
150 11: mtspr SPRN_HSRR0, r8
151 mtspr SPRN_HSRR1, r7
152 ba 0x500
153
154 13: b machine_check_fwnmi
155
156
157 /*
158 * We come in here when wakened from nap mode on a secondary hw thread.
159 * Relocation is off and most register values are lost.
160 * r13 points to the PACA.
161 */
162 .globl kvm_start_guest
163 kvm_start_guest:
164 ld r1,PACAEMERGSP(r13)
165 subi r1,r1,STACK_FRAME_OVERHEAD
166 ld r2,PACATOC(r13)
167
168 li r0,KVM_HWTHREAD_IN_KVM
169 stb r0,HSTATE_HWTHREAD_STATE(r13)
170
171 /* NV GPR values from power7_idle() will no longer be valid */
172 li r0,1
173 stb r0,PACA_NAPSTATELOST(r13)
174
175 /* were we napping due to cede? */
176 lbz r0,HSTATE_NAPPING(r13)
177 cmpwi r0,0
178 bne kvm_end_cede
179
180 /*
181 * We weren't napping due to cede, so this must be a secondary
182 * thread being woken up to run a guest, or being woken up due
183 * to a stray IPI. (Or due to some machine check or hypervisor
184 * maintenance interrupt while the core is in KVM.)
185 */
186
187 /* Check the wake reason in SRR1 to see why we got here */
188 mfspr r3,SPRN_SRR1
189 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
190 cmpwi r3,4 /* was it an external interrupt? */
191 bne 27f /* if not */
192 ld r5,HSTATE_XICS_PHYS(r13)
193 li r7,XICS_XIRR /* if it was an external interrupt, */
194 lwzcix r8,r5,r7 /* get and ack the interrupt */
195 sync
196 clrldi. r9,r8,40 /* get interrupt source ID. */
197 beq 28f /* none there? */
198 cmpwi r9,XICS_IPI /* was it an IPI? */
199 bne 29f
200 li r0,0xff
201 li r6,XICS_MFRR
202 stbcix r0,r5,r6 /* clear IPI */
203 stwcix r8,r5,r7 /* EOI the interrupt */
204 sync /* order loading of vcpu after that */
205
206 /* get vcpu pointer, NULL if we have no vcpu to run */
207 ld r4,HSTATE_KVM_VCPU(r13)
208 cmpdi r4,0
209 /* if we have no vcpu to run, go back to sleep */
210 beq kvm_no_guest
211 b 30f
212
213 27: /* XXX should handle hypervisor maintenance interrupts etc. here */
214 b kvm_no_guest
215 28: /* SRR1 said external but ICP said nope?? */
216 b kvm_no_guest
217 29: /* External non-IPI interrupt to offline secondary thread? help?? */
218 stw r8,HSTATE_SAVED_XIRR(r13)
219 b kvm_no_guest
220
221 30: bl kvmppc_hv_entry
222
223 /* Back from the guest, go back to nap */
224 /* Clear our vcpu pointer so we don't come back in early */
225 li r0, 0
226 std r0, HSTATE_KVM_VCPU(r13)
227 lwsync
228 /* Clear any pending IPI - we're an offline thread */
229 ld r5, HSTATE_XICS_PHYS(r13)
230 li r7, XICS_XIRR
231 lwzcix r3, r5, r7 /* ack any pending interrupt */
232 rlwinm. r0, r3, 0, 0xffffff /* any pending? */
233 beq 37f
234 sync
235 li r0, 0xff
236 li r6, XICS_MFRR
237 stbcix r0, r5, r6 /* clear the IPI */
238 stwcix r3, r5, r7 /* EOI it */
239 37: sync
240
241 /* increment the nap count and then go to nap mode */
242 ld r4, HSTATE_KVM_VCORE(r13)
243 addi r4, r4, VCORE_NAP_COUNT
244 lwsync /* make previous updates visible */
245 51: lwarx r3, 0, r4
246 addi r3, r3, 1
247 stwcx. r3, 0, r4
248 bne 51b
249
250 kvm_no_guest:
251 li r0, KVM_HWTHREAD_IN_NAP
252 stb r0, HSTATE_HWTHREAD_STATE(r13)
253 li r3, LPCR_PECE0
254 mfspr r4, SPRN_LPCR
255 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
256 mtspr SPRN_LPCR, r4
257 isync
258 std r0, HSTATE_SCRATCH0(r13)
259 ptesync
260 ld r0, HSTATE_SCRATCH0(r13)
261 1: cmpd r0, r0
262 bne 1b
263 nap
264 b .
265
266 /******************************************************************************
267 * *
268 * Entry code *
269 * *
270 *****************************************************************************/
271
272 .global kvmppc_hv_entry
273 kvmppc_hv_entry:
274
275 /* Required state:
276 *
277 * R4 = vcpu pointer
278 * MSR = ~IR|DR
279 * R13 = PACA
280 * R1 = host R1
281 * all other volatile GPRS = free
282 */
283 mflr r0
284 std r0, PPC_LR_STKOFF(r1)
285 stdu r1, -112(r1)
286
287 /* Set partition DABR */
288 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
289 li r5,3
290 ld r6,VCPU_DABR(r4)
291 mtspr SPRN_DABRX,r5
292 mtspr SPRN_DABR,r6
293 BEGIN_FTR_SECTION
294 isync
295 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
296
297 /* Load guest PMU registers */
298 /* R4 is live here (vcpu pointer) */
299 li r3, 1
300 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
301 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
302 isync
303 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
304 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
305 lwz r6, VCPU_PMC + 8(r4)
306 lwz r7, VCPU_PMC + 12(r4)
307 lwz r8, VCPU_PMC + 16(r4)
308 lwz r9, VCPU_PMC + 20(r4)
309 BEGIN_FTR_SECTION
310 lwz r10, VCPU_PMC + 24(r4)
311 lwz r11, VCPU_PMC + 28(r4)
312 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
313 mtspr SPRN_PMC1, r3
314 mtspr SPRN_PMC2, r5
315 mtspr SPRN_PMC3, r6
316 mtspr SPRN_PMC4, r7
317 mtspr SPRN_PMC5, r8
318 mtspr SPRN_PMC6, r9
319 BEGIN_FTR_SECTION
320 mtspr SPRN_PMC7, r10
321 mtspr SPRN_PMC8, r11
322 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
323 ld r3, VCPU_MMCR(r4)
324 ld r5, VCPU_MMCR + 8(r4)
325 ld r6, VCPU_MMCR + 16(r4)
326 ld r7, VCPU_SIAR(r4)
327 ld r8, VCPU_SDAR(r4)
328 mtspr SPRN_MMCR1, r5
329 mtspr SPRN_MMCRA, r6
330 mtspr SPRN_SIAR, r7
331 mtspr SPRN_SDAR, r8
332 mtspr SPRN_MMCR0, r3
333 isync
334
335 /* Load up FP, VMX and VSX registers */
336 bl kvmppc_load_fp
337
338 ld r14, VCPU_GPR(R14)(r4)
339 ld r15, VCPU_GPR(R15)(r4)
340 ld r16, VCPU_GPR(R16)(r4)
341 ld r17, VCPU_GPR(R17)(r4)
342 ld r18, VCPU_GPR(R18)(r4)
343 ld r19, VCPU_GPR(R19)(r4)
344 ld r20, VCPU_GPR(R20)(r4)
345 ld r21, VCPU_GPR(R21)(r4)
346 ld r22, VCPU_GPR(R22)(r4)
347 ld r23, VCPU_GPR(R23)(r4)
348 ld r24, VCPU_GPR(R24)(r4)
349 ld r25, VCPU_GPR(R25)(r4)
350 ld r26, VCPU_GPR(R26)(r4)
351 ld r27, VCPU_GPR(R27)(r4)
352 ld r28, VCPU_GPR(R28)(r4)
353 ld r29, VCPU_GPR(R29)(r4)
354 ld r30, VCPU_GPR(R30)(r4)
355 ld r31, VCPU_GPR(R31)(r4)
356
357 BEGIN_FTR_SECTION
358 /* Switch DSCR to guest value */
359 ld r5, VCPU_DSCR(r4)
360 mtspr SPRN_DSCR, r5
361 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
362
363 /*
364 * Set the decrementer to the guest decrementer.
365 */
366 ld r8,VCPU_DEC_EXPIRES(r4)
367 mftb r7
368 subf r3,r7,r8
369 mtspr SPRN_DEC,r3
370 stw r3,VCPU_DEC(r4)
371
372 ld r5, VCPU_SPRG0(r4)
373 ld r6, VCPU_SPRG1(r4)
374 ld r7, VCPU_SPRG2(r4)
375 ld r8, VCPU_SPRG3(r4)
376 mtspr SPRN_SPRG0, r5
377 mtspr SPRN_SPRG1, r6
378 mtspr SPRN_SPRG2, r7
379 mtspr SPRN_SPRG3, r8
380
381 /* Save R1 in the PACA */
382 std r1, HSTATE_HOST_R1(r13)
383
384 /* Load up DAR and DSISR */
385 ld r5, VCPU_DAR(r4)
386 lwz r6, VCPU_DSISR(r4)
387 mtspr SPRN_DAR, r5
388 mtspr SPRN_DSISR, r6
389
390 li r6, KVM_GUEST_MODE_HOST_HV
391 stb r6, HSTATE_IN_GUEST(r13)
392
393 BEGIN_FTR_SECTION
394 /* Restore AMR and UAMOR, set AMOR to all 1s */
395 ld r5,VCPU_AMR(r4)
396 ld r6,VCPU_UAMOR(r4)
397 li r7,-1
398 mtspr SPRN_AMR,r5
399 mtspr SPRN_UAMOR,r6
400 mtspr SPRN_AMOR,r7
401 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
402
403 /* Clear out SLB */
404 li r6,0
405 slbmte r6,r6
406 slbia
407 ptesync
408
409 BEGIN_FTR_SECTION
410 b 30f
411 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
412 /*
413 * POWER7 host -> guest partition switch code.
414 * We don't have to lock against concurrent tlbies,
415 * but we do have to coordinate across hardware threads.
416 */
417 /* Increment entry count iff exit count is zero. */
418 ld r5,HSTATE_KVM_VCORE(r13)
419 addi r9,r5,VCORE_ENTRY_EXIT
420 21: lwarx r3,0,r9
421 cmpwi r3,0x100 /* any threads starting to exit? */
422 bge secondary_too_late /* if so we're too late to the party */
423 addi r3,r3,1
424 stwcx. r3,0,r9
425 bne 21b
426
427 /* Primary thread switches to guest partition. */
428 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
429 lwz r6,VCPU_PTID(r4)
430 cmpwi r6,0
431 bne 20f
432 ld r6,KVM_SDR1(r9)
433 lwz r7,KVM_LPID(r9)
434 li r0,LPID_RSVD /* switch to reserved LPID */
435 mtspr SPRN_LPID,r0
436 ptesync
437 mtspr SPRN_SDR1,r6 /* switch to partition page table */
438 mtspr SPRN_LPID,r7
439 isync
440
441 /* See if we need to flush the TLB */
442 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
443 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
444 srdi r6,r6,6 /* doubleword number */
445 sldi r6,r6,3 /* address offset */
446 add r6,r6,r9
447 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
448 li r0,1
449 sld r0,r0,r7
450 ld r7,0(r6)
451 and. r7,r7,r0
452 beq 22f
453 23: ldarx r7,0,r6 /* if set, clear the bit */
454 andc r7,r7,r0
455 stdcx. r7,0,r6
456 bne 23b
457 li r6,128 /* and flush the TLB */
458 mtctr r6
459 li r7,0x800 /* IS field = 0b10 */
460 ptesync
461 28: tlbiel r7
462 addi r7,r7,0x1000
463 bdnz 28b
464 ptesync
465
466 /* Add timebase offset onto timebase */
467 22: ld r8,VCORE_TB_OFFSET(r5)
468 cmpdi r8,0
469 beq 37f
470 mftb r6 /* current host timebase */
471 add r8,r8,r6
472 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
473 mftb r7 /* check if lower 24 bits overflowed */
474 clrldi r6,r6,40
475 clrldi r7,r7,40
476 cmpld r7,r6
477 bge 37f
478 addis r8,r8,0x100 /* if so, increment upper 40 bits */
479 mtspr SPRN_TBU40,r8
480
481 /* Load guest PCR value to select appropriate compat mode */
482 37: ld r7, VCORE_PCR(r5)
483 cmpdi r7, 0
484 beq 38f
485 mtspr SPRN_PCR, r7
486 38:
487 li r0,1
488 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
489 b 10f
490
491 /* Secondary threads wait for primary to have done partition switch */
492 20: lbz r0,VCORE_IN_GUEST(r5)
493 cmpwi r0,0
494 beq 20b
495
496 /* Set LPCR and RMOR. */
497 10: ld r8,VCORE_LPCR(r5)
498 mtspr SPRN_LPCR,r8
499 ld r8,KVM_RMOR(r9)
500 mtspr SPRN_RMOR,r8
501 isync
502
503 /* Increment yield count if they have a VPA */
504 ld r3, VCPU_VPA(r4)
505 cmpdi r3, 0
506 beq 25f
507 lwz r5, LPPACA_YIELDCOUNT(r3)
508 addi r5, r5, 1
509 stw r5, LPPACA_YIELDCOUNT(r3)
510 li r6, 1
511 stb r6, VCPU_VPA_DIRTY(r4)
512 25:
513 /* Check if HDEC expires soon */
514 mfspr r3,SPRN_HDEC
515 cmpwi r3,10
516 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
517 mr r9,r4
518 blt hdec_soon
519
520 /* Save purr/spurr */
521 mfspr r5,SPRN_PURR
522 mfspr r6,SPRN_SPURR
523 std r5,HSTATE_PURR(r13)
524 std r6,HSTATE_SPURR(r13)
525 ld r7,VCPU_PURR(r4)
526 ld r8,VCPU_SPURR(r4)
527 mtspr SPRN_PURR,r7
528 mtspr SPRN_SPURR,r8
529 b 31f
530
531 /*
532 * PPC970 host -> guest partition switch code.
533 * We have to lock against concurrent tlbies,
534 * using native_tlbie_lock to lock against host tlbies
535 * and kvm->arch.tlbie_lock to lock against guest tlbies.
536 * We also have to invalidate the TLB since its
537 * entries aren't tagged with the LPID.
538 */
539 30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
540
541 /* first take native_tlbie_lock */
542 .section ".toc","aw"
543 toc_tlbie_lock:
544 .tc native_tlbie_lock[TC],native_tlbie_lock
545 .previous
546 ld r3,toc_tlbie_lock@toc(2)
547 #ifdef __BIG_ENDIAN__
548 lwz r8,PACA_LOCK_TOKEN(r13)
549 #else
550 lwz r8,PACAPACAINDEX(r13)
551 #endif
552 24: lwarx r0,0,r3
553 cmpwi r0,0
554 bne 24b
555 stwcx. r8,0,r3
556 bne 24b
557 isync
558
559 ld r5,HSTATE_KVM_VCORE(r13)
560 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
561 li r0,0x18f
562 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
563 or r0,r7,r0
564 ptesync
565 sync
566 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
567 isync
568 li r0,0
569 stw r0,0(r3) /* drop native_tlbie_lock */
570
571 /* invalidate the whole TLB */
572 li r0,256
573 mtctr r0
574 li r6,0
575 25: tlbiel r6
576 addi r6,r6,0x1000
577 bdnz 25b
578 ptesync
579
580 /* Take the guest's tlbie_lock */
581 addi r3,r9,KVM_TLBIE_LOCK
582 24: lwarx r0,0,r3
583 cmpwi r0,0
584 bne 24b
585 stwcx. r8,0,r3
586 bne 24b
587 isync
588 ld r6,KVM_SDR1(r9)
589 mtspr SPRN_SDR1,r6 /* switch to partition page table */
590
591 /* Set up HID4 with the guest's LPID etc. */
592 sync
593 mtspr SPRN_HID4,r7
594 isync
595
596 /* drop the guest's tlbie_lock */
597 li r0,0
598 stw r0,0(r3)
599
600 /* Check if HDEC expires soon */
601 mfspr r3,SPRN_HDEC
602 cmpwi r3,10
603 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
604 mr r9,r4
605 blt hdec_soon
606
607 /* Enable HDEC interrupts */
608 mfspr r0,SPRN_HID0
609 li r3,1
610 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
611 sync
612 mtspr SPRN_HID0,r0
613 mfspr r0,SPRN_HID0
614 mfspr r0,SPRN_HID0
615 mfspr r0,SPRN_HID0
616 mfspr r0,SPRN_HID0
617 mfspr r0,SPRN_HID0
618 mfspr r0,SPRN_HID0
619
620 /* Load up guest SLB entries */
621 31: lwz r5,VCPU_SLB_MAX(r4)
622 cmpwi r5,0
623 beq 9f
624 mtctr r5
625 addi r6,r4,VCPU_SLB
626 1: ld r8,VCPU_SLB_E(r6)
627 ld r9,VCPU_SLB_V(r6)
628 slbmte r9,r8
629 addi r6,r6,VCPU_SLB_SIZE
630 bdnz 1b
631 9:
632
633 /* Restore state of CTRL run bit; assume 1 on entry */
634 lwz r5,VCPU_CTRL(r4)
635 andi. r5,r5,1
636 bne 4f
637 mfspr r6,SPRN_CTRLF
638 clrrdi r6,r6,1
639 mtspr SPRN_CTRLT,r6
640 4:
641 ld r6, VCPU_CTR(r4)
642 lwz r7, VCPU_XER(r4)
643
644 mtctr r6
645 mtxer r7
646
647 ld r10, VCPU_PC(r4)
648 ld r11, VCPU_MSR(r4)
649 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
650 ld r6, VCPU_SRR0(r4)
651 ld r7, VCPU_SRR1(r4)
652
653 /* r11 = vcpu->arch.msr & ~MSR_HV */
654 rldicl r11, r11, 63 - MSR_HV_LG, 1
655 rotldi r11, r11, 1 + MSR_HV_LG
656 ori r11, r11, MSR_ME
657
658 /* Check if we can deliver an external or decrementer interrupt now */
659 ld r0,VCPU_PENDING_EXC(r4)
660 lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
661 and r0,r0,r8
662 cmpdi cr1,r0,0
663 andi. r0,r11,MSR_EE
664 beq cr1,11f
665 BEGIN_FTR_SECTION
666 mfspr r8,SPRN_LPCR
667 ori r8,r8,LPCR_MER
668 mtspr SPRN_LPCR,r8
669 isync
670 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
671 beq 5f
672 li r0,BOOK3S_INTERRUPT_EXTERNAL
673 12: mr r6,r10
674 mr r10,r0
675 mr r7,r11
676 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
677 rotldi r11,r11,63
678 b 5f
679 11: beq 5f
680 mfspr r0,SPRN_DEC
681 cmpwi r0,0
682 li r0,BOOK3S_INTERRUPT_DECREMENTER
683 blt 12b
684
685 /* Move SRR0 and SRR1 into the respective regs */
686 5: mtspr SPRN_SRR0, r6
687 mtspr SPRN_SRR1, r7
688
689 fast_guest_return:
690 li r0,0
691 stb r0,VCPU_CEDED(r4) /* cancel cede */
692 mtspr SPRN_HSRR0,r10
693 mtspr SPRN_HSRR1,r11
694
695 /* Activate guest mode, so faults get handled by KVM */
696 li r9, KVM_GUEST_MODE_GUEST_HV
697 stb r9, HSTATE_IN_GUEST(r13)
698
699 /* Enter guest */
700
701 BEGIN_FTR_SECTION
702 ld r5, VCPU_CFAR(r4)
703 mtspr SPRN_CFAR, r5
704 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
705 BEGIN_FTR_SECTION
706 ld r0, VCPU_PPR(r4)
707 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
708
709 ld r5, VCPU_LR(r4)
710 lwz r6, VCPU_CR(r4)
711 mtlr r5
712 mtcr r6
713
714 ld r1, VCPU_GPR(R1)(r4)
715 ld r2, VCPU_GPR(R2)(r4)
716 ld r3, VCPU_GPR(R3)(r4)
717 ld r5, VCPU_GPR(R5)(r4)
718 ld r6, VCPU_GPR(R6)(r4)
719 ld r7, VCPU_GPR(R7)(r4)
720 ld r8, VCPU_GPR(R8)(r4)
721 ld r9, VCPU_GPR(R9)(r4)
722 ld r10, VCPU_GPR(R10)(r4)
723 ld r11, VCPU_GPR(R11)(r4)
724 ld r12, VCPU_GPR(R12)(r4)
725 ld r13, VCPU_GPR(R13)(r4)
726
727 BEGIN_FTR_SECTION
728 mtspr SPRN_PPR, r0
729 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
730 ld r0, VCPU_GPR(R0)(r4)
731 ld r4, VCPU_GPR(R4)(r4)
732
733 hrfid
734 b .
735
736 /******************************************************************************
737 * *
738 * Exit code *
739 * *
740 *****************************************************************************/
741
742 /*
743 * We come here from the first-level interrupt handlers.
744 */
745 .globl kvmppc_interrupt_hv
746 kvmppc_interrupt_hv:
747 /*
748 * Register contents:
749 * R12 = interrupt vector
750 * R13 = PACA
751 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
752 * guest R13 saved in SPRN_SCRATCH0
753 */
754 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
755 std r9, HSTATE_HOST_R2(r13)
756
757 lbz r9, HSTATE_IN_GUEST(r13)
758 cmpwi r9, KVM_GUEST_MODE_HOST_HV
759 beq kvmppc_bad_host_intr
760 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
761 cmpwi r9, KVM_GUEST_MODE_GUEST
762 ld r9, HSTATE_HOST_R2(r13)
763 beq kvmppc_interrupt_pr
764 #endif
765 /* We're now back in the host but in guest MMU context */
766 li r9, KVM_GUEST_MODE_HOST_HV
767 stb r9, HSTATE_IN_GUEST(r13)
768
769 ld r9, HSTATE_KVM_VCPU(r13)
770
771 /* Save registers */
772
773 std r0, VCPU_GPR(R0)(r9)
774 std r1, VCPU_GPR(R1)(r9)
775 std r2, VCPU_GPR(R2)(r9)
776 std r3, VCPU_GPR(R3)(r9)
777 std r4, VCPU_GPR(R4)(r9)
778 std r5, VCPU_GPR(R5)(r9)
779 std r6, VCPU_GPR(R6)(r9)
780 std r7, VCPU_GPR(R7)(r9)
781 std r8, VCPU_GPR(R8)(r9)
782 ld r0, HSTATE_HOST_R2(r13)
783 std r0, VCPU_GPR(R9)(r9)
784 std r10, VCPU_GPR(R10)(r9)
785 std r11, VCPU_GPR(R11)(r9)
786 ld r3, HSTATE_SCRATCH0(r13)
787 lwz r4, HSTATE_SCRATCH1(r13)
788 std r3, VCPU_GPR(R12)(r9)
789 stw r4, VCPU_CR(r9)
790 BEGIN_FTR_SECTION
791 ld r3, HSTATE_CFAR(r13)
792 std r3, VCPU_CFAR(r9)
793 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
794 BEGIN_FTR_SECTION
795 ld r4, HSTATE_PPR(r13)
796 std r4, VCPU_PPR(r9)
797 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
798
799 /* Restore R1/R2 so we can handle faults */
800 ld r1, HSTATE_HOST_R1(r13)
801 ld r2, PACATOC(r13)
802
803 mfspr r10, SPRN_SRR0
804 mfspr r11, SPRN_SRR1
805 std r10, VCPU_SRR0(r9)
806 std r11, VCPU_SRR1(r9)
807 andi. r0, r12, 2 /* need to read HSRR0/1? */
808 beq 1f
809 mfspr r10, SPRN_HSRR0
810 mfspr r11, SPRN_HSRR1
811 clrrdi r12, r12, 2
812 1: std r10, VCPU_PC(r9)
813 std r11, VCPU_MSR(r9)
814
815 GET_SCRATCH0(r3)
816 mflr r4
817 std r3, VCPU_GPR(R13)(r9)
818 std r4, VCPU_LR(r9)
819
820 stw r12,VCPU_TRAP(r9)
821
822 /* Save HEIR (HV emulation assist reg) in last_inst
823 if this is an HEI (HV emulation interrupt, e40) */
824 li r3,KVM_INST_FETCH_FAILED
825 BEGIN_FTR_SECTION
826 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
827 bne 11f
828 mfspr r3,SPRN_HEIR
829 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
830 11: stw r3,VCPU_LAST_INST(r9)
831
832 /* these are volatile across C function calls */
833 mfctr r3
834 mfxer r4
835 std r3, VCPU_CTR(r9)
836 stw r4, VCPU_XER(r9)
837
838 BEGIN_FTR_SECTION
839 /* If this is a page table miss then see if it's theirs or ours */
840 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
841 beq kvmppc_hdsi
842 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
843 beq kvmppc_hisi
844 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
845
846 /* See if this is a leftover HDEC interrupt */
847 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
848 bne 2f
849 mfspr r3,SPRN_HDEC
850 cmpwi r3,0
851 bge ignore_hdec
852 2:
853 /* See if this is an hcall we can handle in real mode */
854 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
855 beq hcall_try_real_mode
856
857 /* Only handle external interrupts here on arch 206 and later */
858 BEGIN_FTR_SECTION
859 b ext_interrupt_to_host
860 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
861
862 /* External interrupt ? */
863 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
864 bne+ ext_interrupt_to_host
865
866 /* External interrupt, first check for host_ipi. If this is
867 * set, we know the host wants us out so let's do it now
868 */
869 do_ext_interrupt:
870 bl kvmppc_read_intr
871 cmpdi r3, 0
872 bgt ext_interrupt_to_host
873
874 /* Allright, looks like an IPI for the guest, we need to set MER */
875 /* Check if any CPU is heading out to the host, if so head out too */
876 ld r5, HSTATE_KVM_VCORE(r13)
877 lwz r0, VCORE_ENTRY_EXIT(r5)
878 cmpwi r0, 0x100
879 bge ext_interrupt_to_host
880
881 /* See if there is a pending interrupt for the guest */
882 mfspr r8, SPRN_LPCR
883 ld r0, VCPU_PENDING_EXC(r9)
884 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
885 rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
886 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
887 beq 2f
888
889 /* And if the guest EE is set, we can deliver immediately, else
890 * we return to the guest with MER set
891 */
892 andi. r0, r11, MSR_EE
893 beq 2f
894 mtspr SPRN_SRR0, r10
895 mtspr SPRN_SRR1, r11
896 li r10, BOOK3S_INTERRUPT_EXTERNAL
897 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
898 rotldi r11, r11, 63
899 2: mr r4, r9
900 mtspr SPRN_LPCR, r8
901 b fast_guest_return
902
903 ext_interrupt_to_host:
904
905 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
906 /* Save more register state */
907 mfdar r6
908 mfdsisr r7
909 std r6, VCPU_DAR(r9)
910 stw r7, VCPU_DSISR(r9)
911 BEGIN_FTR_SECTION
912 /* don't overwrite fault_dar/fault_dsisr if HDSI */
913 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
914 beq 6f
915 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
916 std r6, VCPU_FAULT_DAR(r9)
917 stw r7, VCPU_FAULT_DSISR(r9)
918
919 /* See if it is a machine check */
920 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
921 beq machine_check_realmode
922 mc_cont:
923
924 /* Save guest CTRL register, set runlatch to 1 */
925 6: mfspr r6,SPRN_CTRLF
926 stw r6,VCPU_CTRL(r9)
927 andi. r0,r6,1
928 bne 4f
929 ori r6,r6,1
930 mtspr SPRN_CTRLT,r6
931 4:
932 /* Read the guest SLB and save it away */
933 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
934 mtctr r0
935 li r6,0
936 addi r7,r9,VCPU_SLB
937 li r5,0
938 1: slbmfee r8,r6
939 andis. r0,r8,SLB_ESID_V@h
940 beq 2f
941 add r8,r8,r6 /* put index in */
942 slbmfev r3,r6
943 std r8,VCPU_SLB_E(r7)
944 std r3,VCPU_SLB_V(r7)
945 addi r7,r7,VCPU_SLB_SIZE
946 addi r5,r5,1
947 2: addi r6,r6,1
948 bdnz 1b
949 stw r5,VCPU_SLB_MAX(r9)
950
951 /*
952 * Save the guest PURR/SPURR
953 */
954 BEGIN_FTR_SECTION
955 mfspr r5,SPRN_PURR
956 mfspr r6,SPRN_SPURR
957 ld r7,VCPU_PURR(r9)
958 ld r8,VCPU_SPURR(r9)
959 std r5,VCPU_PURR(r9)
960 std r6,VCPU_SPURR(r9)
961 subf r5,r7,r5
962 subf r6,r8,r6
963
964 /*
965 * Restore host PURR/SPURR and add guest times
966 * so that the time in the guest gets accounted.
967 */
968 ld r3,HSTATE_PURR(r13)
969 ld r4,HSTATE_SPURR(r13)
970 add r3,r3,r5
971 add r4,r4,r6
972 mtspr SPRN_PURR,r3
973 mtspr SPRN_SPURR,r4
974 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
975
976 /* Clear out SLB */
977 li r5,0
978 slbmte r5,r5
979 slbia
980 ptesync
981
982 hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */
983 BEGIN_FTR_SECTION
984 b 32f
985 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
986 /*
987 * POWER7 guest -> host partition switch code.
988 * We don't have to lock against tlbies but we do
989 * have to coordinate the hardware threads.
990 */
991 /* Increment the threads-exiting-guest count in the 0xff00
992 bits of vcore->entry_exit_count */
993 lwsync
994 ld r5,HSTATE_KVM_VCORE(r13)
995 addi r6,r5,VCORE_ENTRY_EXIT
996 41: lwarx r3,0,r6
997 addi r0,r3,0x100
998 stwcx. r0,0,r6
999 bne 41b
1000 lwsync
1001
1002 /*
1003 * At this point we have an interrupt that we have to pass
1004 * up to the kernel or qemu; we can't handle it in real mode.
1005 * Thus we have to do a partition switch, so we have to
1006 * collect the other threads, if we are the first thread
1007 * to take an interrupt. To do this, we set the HDEC to 0,
1008 * which causes an HDEC interrupt in all threads within 2ns
1009 * because the HDEC register is shared between all 4 threads.
1010 * However, we don't need to bother if this is an HDEC
1011 * interrupt, since the other threads will already be on their
1012 * way here in that case.
1013 */
1014 cmpwi r3,0x100 /* Are we the first here? */
1015 bge 43f
1016 cmpwi r3,1 /* Are any other threads in the guest? */
1017 ble 43f
1018 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1019 beq 40f
1020 li r0,0
1021 mtspr SPRN_HDEC,r0
1022 40:
1023 /*
1024 * Send an IPI to any napping threads, since an HDEC interrupt
1025 * doesn't wake CPUs up from nap.
1026 */
1027 lwz r3,VCORE_NAPPING_THREADS(r5)
1028 lwz r4,VCPU_PTID(r9)
1029 li r0,1
1030 sld r0,r0,r4
1031 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1032 beq 43f
1033 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1034 subf r6,r4,r13
1035 42: andi. r0,r3,1
1036 beq 44f
1037 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1038 li r0,IPI_PRIORITY
1039 li r7,XICS_MFRR
1040 stbcix r0,r7,r8 /* trigger the IPI */
1041 44: srdi. r3,r3,1
1042 addi r6,r6,PACA_SIZE
1043 bne 42b
1044
1045 /* Secondary threads wait for primary to do partition switch */
1046 43: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
1047 ld r5,HSTATE_KVM_VCORE(r13)
1048 lwz r3,VCPU_PTID(r9)
1049 cmpwi r3,0
1050 beq 15f
1051 HMT_LOW
1052 13: lbz r3,VCORE_IN_GUEST(r5)
1053 cmpwi r3,0
1054 bne 13b
1055 HMT_MEDIUM
1056 b 16f
1057
1058 /* Primary thread waits for all the secondaries to exit guest */
1059 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1060 srwi r0,r3,8
1061 clrldi r3,r3,56
1062 cmpw r3,r0
1063 bne 15b
1064 isync
1065
1066 /* Primary thread switches back to host partition */
1067 ld r6,KVM_HOST_SDR1(r4)
1068 lwz r7,KVM_HOST_LPID(r4)
1069 li r8,LPID_RSVD /* switch to reserved LPID */
1070 mtspr SPRN_LPID,r8
1071 ptesync
1072 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1073 mtspr SPRN_LPID,r7
1074 isync
1075
1076 /* Subtract timebase offset from timebase */
1077 ld r8,VCORE_TB_OFFSET(r5)
1078 cmpdi r8,0
1079 beq 17f
1080 mftb r6 /* current host timebase */
1081 subf r8,r8,r6
1082 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1083 mftb r7 /* check if lower 24 bits overflowed */
1084 clrldi r6,r6,40
1085 clrldi r7,r7,40
1086 cmpld r7,r6
1087 bge 17f
1088 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1089 mtspr SPRN_TBU40,r8
1090
1091 /* Reset PCR */
1092 17: ld r0, VCORE_PCR(r5)
1093 cmpdi r0, 0
1094 beq 18f
1095 li r0, 0
1096 mtspr SPRN_PCR, r0
1097 18:
1098 /* Signal secondary CPUs to continue */
1099 stb r0,VCORE_IN_GUEST(r5)
1100 lis r8,0x7fff /* MAX_INT@h */
1101 mtspr SPRN_HDEC,r8
1102
1103 16: ld r8,KVM_HOST_LPCR(r4)
1104 mtspr SPRN_LPCR,r8
1105 isync
1106 b 33f
1107
1108 /*
1109 * PPC970 guest -> host partition switch code.
1110 * We have to lock against concurrent tlbies, and
1111 * we have to flush the whole TLB.
1112 */
1113 32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
1114
1115 /* Take the guest's tlbie_lock */
1116 #ifdef __BIG_ENDIAN__
1117 lwz r8,PACA_LOCK_TOKEN(r13)
1118 #else
1119 lwz r8,PACAPACAINDEX(r13)
1120 #endif
1121 addi r3,r4,KVM_TLBIE_LOCK
1122 24: lwarx r0,0,r3
1123 cmpwi r0,0
1124 bne 24b
1125 stwcx. r8,0,r3
1126 bne 24b
1127 isync
1128
1129 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
1130 li r0,0x18f
1131 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
1132 or r0,r7,r0
1133 ptesync
1134 sync
1135 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
1136 isync
1137 li r0,0
1138 stw r0,0(r3) /* drop guest tlbie_lock */
1139
1140 /* invalidate the whole TLB */
1141 li r0,256
1142 mtctr r0
1143 li r6,0
1144 25: tlbiel r6
1145 addi r6,r6,0x1000
1146 bdnz 25b
1147 ptesync
1148
1149 /* take native_tlbie_lock */
1150 ld r3,toc_tlbie_lock@toc(2)
1151 24: lwarx r0,0,r3
1152 cmpwi r0,0
1153 bne 24b
1154 stwcx. r8,0,r3
1155 bne 24b
1156 isync
1157
1158 ld r6,KVM_HOST_SDR1(r4)
1159 mtspr SPRN_SDR1,r6 /* switch to host page table */
1160
1161 /* Set up host HID4 value */
1162 sync
1163 mtspr SPRN_HID4,r7
1164 isync
1165 li r0,0
1166 stw r0,0(r3) /* drop native_tlbie_lock */
1167
1168 lis r8,0x7fff /* MAX_INT@h */
1169 mtspr SPRN_HDEC,r8
1170
1171 /* Disable HDEC interrupts */
1172 mfspr r0,SPRN_HID0
1173 li r3,0
1174 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
1175 sync
1176 mtspr SPRN_HID0,r0
1177 mfspr r0,SPRN_HID0
1178 mfspr r0,SPRN_HID0
1179 mfspr r0,SPRN_HID0
1180 mfspr r0,SPRN_HID0
1181 mfspr r0,SPRN_HID0
1182 mfspr r0,SPRN_HID0
1183
1184 /* load host SLB entries */
1185 33: ld r8,PACA_SLBSHADOWPTR(r13)
1186
1187 .rept SLB_NUM_BOLTED
1188 ld r5,SLBSHADOW_SAVEAREA(r8)
1189 ld r6,SLBSHADOW_SAVEAREA+8(r8)
1190 andis. r7,r5,SLB_ESID_V@h
1191 beq 1f
1192 slbmte r6,r5
1193 1: addi r8,r8,16
1194 .endr
1195
1196 /* Save DEC */
1197 mfspr r5,SPRN_DEC
1198 mftb r6
1199 extsw r5,r5
1200 add r5,r5,r6
1201 std r5,VCPU_DEC_EXPIRES(r9)
1202
1203 /* Save and reset AMR and UAMOR before turning on the MMU */
1204 BEGIN_FTR_SECTION
1205 mfspr r5,SPRN_AMR
1206 mfspr r6,SPRN_UAMOR
1207 std r5,VCPU_AMR(r9)
1208 std r6,VCPU_UAMOR(r9)
1209 li r6,0
1210 mtspr SPRN_AMR,r6
1211 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1212
1213 /* Unset guest mode */
1214 li r0, KVM_GUEST_MODE_NONE
1215 stb r0, HSTATE_IN_GUEST(r13)
1216
1217 /* Switch DSCR back to host value */
1218 BEGIN_FTR_SECTION
1219 mfspr r8, SPRN_DSCR
1220 ld r7, HSTATE_DSCR(r13)
1221 std r8, VCPU_DSCR(r9)
1222 mtspr SPRN_DSCR, r7
1223 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1224
1225 /* Save non-volatile GPRs */
1226 std r14, VCPU_GPR(R14)(r9)
1227 std r15, VCPU_GPR(R15)(r9)
1228 std r16, VCPU_GPR(R16)(r9)
1229 std r17, VCPU_GPR(R17)(r9)
1230 std r18, VCPU_GPR(R18)(r9)
1231 std r19, VCPU_GPR(R19)(r9)
1232 std r20, VCPU_GPR(R20)(r9)
1233 std r21, VCPU_GPR(R21)(r9)
1234 std r22, VCPU_GPR(R22)(r9)
1235 std r23, VCPU_GPR(R23)(r9)
1236 std r24, VCPU_GPR(R24)(r9)
1237 std r25, VCPU_GPR(R25)(r9)
1238 std r26, VCPU_GPR(R26)(r9)
1239 std r27, VCPU_GPR(R27)(r9)
1240 std r28, VCPU_GPR(R28)(r9)
1241 std r29, VCPU_GPR(R29)(r9)
1242 std r30, VCPU_GPR(R30)(r9)
1243 std r31, VCPU_GPR(R31)(r9)
1244
1245 /* Save SPRGs */
1246 mfspr r3, SPRN_SPRG0
1247 mfspr r4, SPRN_SPRG1
1248 mfspr r5, SPRN_SPRG2
1249 mfspr r6, SPRN_SPRG3
1250 std r3, VCPU_SPRG0(r9)
1251 std r4, VCPU_SPRG1(r9)
1252 std r5, VCPU_SPRG2(r9)
1253 std r6, VCPU_SPRG3(r9)
1254
1255 /* save FP state */
1256 mr r3, r9
1257 bl .kvmppc_save_fp
1258
1259 /* Increment yield count if they have a VPA */
1260 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1261 cmpdi r8, 0
1262 beq 25f
1263 lwz r3, LPPACA_YIELDCOUNT(r8)
1264 addi r3, r3, 1
1265 stw r3, LPPACA_YIELDCOUNT(r8)
1266 li r3, 1
1267 stb r3, VCPU_VPA_DIRTY(r9)
1268 25:
1269 /* Save PMU registers if requested */
1270 /* r8 and cr0.eq are live here */
1271 li r3, 1
1272 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1273 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1274 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1275 mfspr r6, SPRN_MMCRA
1276 BEGIN_FTR_SECTION
1277 /* On P7, clear MMCRA in order to disable SDAR updates */
1278 li r7, 0
1279 mtspr SPRN_MMCRA, r7
1280 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1281 isync
1282 beq 21f /* if no VPA, save PMU stuff anyway */
1283 lbz r7, LPPACA_PMCINUSE(r8)
1284 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1285 bne 21f
1286 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1287 b 22f
1288 21: mfspr r5, SPRN_MMCR1
1289 mfspr r7, SPRN_SIAR
1290 mfspr r8, SPRN_SDAR
1291 std r4, VCPU_MMCR(r9)
1292 std r5, VCPU_MMCR + 8(r9)
1293 std r6, VCPU_MMCR + 16(r9)
1294 std r7, VCPU_SIAR(r9)
1295 std r8, VCPU_SDAR(r9)
1296 mfspr r3, SPRN_PMC1
1297 mfspr r4, SPRN_PMC2
1298 mfspr r5, SPRN_PMC3
1299 mfspr r6, SPRN_PMC4
1300 mfspr r7, SPRN_PMC5
1301 mfspr r8, SPRN_PMC6
1302 BEGIN_FTR_SECTION
1303 mfspr r10, SPRN_PMC7
1304 mfspr r11, SPRN_PMC8
1305 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1306 stw r3, VCPU_PMC(r9)
1307 stw r4, VCPU_PMC + 4(r9)
1308 stw r5, VCPU_PMC + 8(r9)
1309 stw r6, VCPU_PMC + 12(r9)
1310 stw r7, VCPU_PMC + 16(r9)
1311 stw r8, VCPU_PMC + 20(r9)
1312 BEGIN_FTR_SECTION
1313 stw r10, VCPU_PMC + 24(r9)
1314 stw r11, VCPU_PMC + 28(r9)
1315 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1316 22:
1317 ld r0, 112+PPC_LR_STKOFF(r1)
1318 addi r1, r1, 112
1319 mtlr r0
1320 blr
1321 secondary_too_late:
1322 ld r5,HSTATE_KVM_VCORE(r13)
1323 HMT_LOW
1324 13: lbz r3,VCORE_IN_GUEST(r5)
1325 cmpwi r3,0
1326 bne 13b
1327 HMT_MEDIUM
1328 li r0, KVM_GUEST_MODE_NONE
1329 stb r0, HSTATE_IN_GUEST(r13)
1330 ld r11,PACA_SLBSHADOWPTR(r13)
1331
1332 .rept SLB_NUM_BOLTED
1333 ld r5,SLBSHADOW_SAVEAREA(r11)
1334 ld r6,SLBSHADOW_SAVEAREA+8(r11)
1335 andis. r7,r5,SLB_ESID_V@h
1336 beq 1f
1337 slbmte r6,r5
1338 1: addi r11,r11,16
1339 .endr
1340 b 22b
1341
1342 /*
1343 * Check whether an HDSI is an HPTE not found fault or something else.
1344 * If it is an HPTE not found fault that is due to the guest accessing
1345 * a page that they have mapped but which we have paged out, then
1346 * we continue on with the guest exit path. In all other cases,
1347 * reflect the HDSI to the guest as a DSI.
1348 */
1349 kvmppc_hdsi:
1350 mfspr r4, SPRN_HDAR
1351 mfspr r6, SPRN_HDSISR
1352 /* HPTE not found fault or protection fault? */
1353 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1354 beq 1f /* if not, send it to the guest */
1355 andi. r0, r11, MSR_DR /* data relocation enabled? */
1356 beq 3f
1357 clrrdi r0, r4, 28
1358 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1359 bne 1f /* if no SLB entry found */
1360 4: std r4, VCPU_FAULT_DAR(r9)
1361 stw r6, VCPU_FAULT_DSISR(r9)
1362
1363 /* Search the hash table. */
1364 mr r3, r9 /* vcpu pointer */
1365 li r7, 1 /* data fault */
1366 bl .kvmppc_hpte_hv_fault
1367 ld r9, HSTATE_KVM_VCPU(r13)
1368 ld r10, VCPU_PC(r9)
1369 ld r11, VCPU_MSR(r9)
1370 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1371 cmpdi r3, 0 /* retry the instruction */
1372 beq 6f
1373 cmpdi r3, -1 /* handle in kernel mode */
1374 beq guest_exit_cont
1375 cmpdi r3, -2 /* MMIO emulation; need instr word */
1376 beq 2f
1377
1378 /* Synthesize a DSI for the guest */
1379 ld r4, VCPU_FAULT_DAR(r9)
1380 mr r6, r3
1381 1: mtspr SPRN_DAR, r4
1382 mtspr SPRN_DSISR, r6
1383 mtspr SPRN_SRR0, r10
1384 mtspr SPRN_SRR1, r11
1385 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1386 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1387 rotldi r11, r11, 63
1388 fast_interrupt_c_return:
1389 6: ld r7, VCPU_CTR(r9)
1390 lwz r8, VCPU_XER(r9)
1391 mtctr r7
1392 mtxer r8
1393 mr r4, r9
1394 b fast_guest_return
1395
1396 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1397 ld r5, KVM_VRMA_SLB_V(r5)
1398 b 4b
1399
1400 /* If this is for emulated MMIO, load the instruction word */
1401 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1402
1403 /* Set guest mode to 'jump over instruction' so if lwz faults
1404 * we'll just continue at the next IP. */
1405 li r0, KVM_GUEST_MODE_SKIP
1406 stb r0, HSTATE_IN_GUEST(r13)
1407
1408 /* Do the access with MSR:DR enabled */
1409 mfmsr r3
1410 ori r4, r3, MSR_DR /* Enable paging for data */
1411 mtmsrd r4
1412 lwz r8, 0(r10)
1413 mtmsrd r3
1414
1415 /* Store the result */
1416 stw r8, VCPU_LAST_INST(r9)
1417
1418 /* Unset guest mode. */
1419 li r0, KVM_GUEST_MODE_HOST_HV
1420 stb r0, HSTATE_IN_GUEST(r13)
1421 b guest_exit_cont
1422
1423 /*
1424 * Similarly for an HISI, reflect it to the guest as an ISI unless
1425 * it is an HPTE not found fault for a page that we have paged out.
1426 */
1427 kvmppc_hisi:
1428 andis. r0, r11, SRR1_ISI_NOPT@h
1429 beq 1f
1430 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1431 beq 3f
1432 clrrdi r0, r10, 28
1433 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1434 bne 1f /* if no SLB entry found */
1435 4:
1436 /* Search the hash table. */
1437 mr r3, r9 /* vcpu pointer */
1438 mr r4, r10
1439 mr r6, r11
1440 li r7, 0 /* instruction fault */
1441 bl .kvmppc_hpte_hv_fault
1442 ld r9, HSTATE_KVM_VCPU(r13)
1443 ld r10, VCPU_PC(r9)
1444 ld r11, VCPU_MSR(r9)
1445 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1446 cmpdi r3, 0 /* retry the instruction */
1447 beq fast_interrupt_c_return
1448 cmpdi r3, -1 /* handle in kernel mode */
1449 beq guest_exit_cont
1450
1451 /* Synthesize an ISI for the guest */
1452 mr r11, r3
1453 1: mtspr SPRN_SRR0, r10
1454 mtspr SPRN_SRR1, r11
1455 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1456 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1457 rotldi r11, r11, 63
1458 b fast_interrupt_c_return
1459
1460 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1461 ld r5, KVM_VRMA_SLB_V(r6)
1462 b 4b
1463
1464 /*
1465 * Try to handle an hcall in real mode.
1466 * Returns to the guest if we handle it, or continues on up to
1467 * the kernel if we can't (i.e. if we don't have a handler for
1468 * it, or if the handler returns H_TOO_HARD).
1469 */
1470 .globl hcall_try_real_mode
1471 hcall_try_real_mode:
1472 ld r3,VCPU_GPR(R3)(r9)
1473 andi. r0,r11,MSR_PR
1474 bne guest_exit_cont
1475 clrrdi r3,r3,2
1476 cmpldi r3,hcall_real_table_end - hcall_real_table
1477 bge guest_exit_cont
1478 LOAD_REG_ADDR(r4, hcall_real_table)
1479 lwax r3,r3,r4
1480 cmpwi r3,0
1481 beq guest_exit_cont
1482 add r3,r3,r4
1483 mtctr r3
1484 mr r3,r9 /* get vcpu pointer */
1485 ld r4,VCPU_GPR(R4)(r9)
1486 bctrl
1487 cmpdi r3,H_TOO_HARD
1488 beq hcall_real_fallback
1489 ld r4,HSTATE_KVM_VCPU(r13)
1490 std r3,VCPU_GPR(R3)(r4)
1491 ld r10,VCPU_PC(r4)
1492 ld r11,VCPU_MSR(r4)
1493 b fast_guest_return
1494
1495 /* We've attempted a real mode hcall, but it's punted it back
1496 * to userspace. We need to restore some clobbered volatiles
1497 * before resuming the pass-it-to-qemu path */
1498 hcall_real_fallback:
1499 li r12,BOOK3S_INTERRUPT_SYSCALL
1500 ld r9, HSTATE_KVM_VCPU(r13)
1501
1502 b guest_exit_cont
1503
1504 .globl hcall_real_table
1505 hcall_real_table:
1506 .long 0 /* 0 - unused */
1507 .long .kvmppc_h_remove - hcall_real_table
1508 .long .kvmppc_h_enter - hcall_real_table
1509 .long .kvmppc_h_read - hcall_real_table
1510 .long 0 /* 0x10 - H_CLEAR_MOD */
1511 .long 0 /* 0x14 - H_CLEAR_REF */
1512 .long .kvmppc_h_protect - hcall_real_table
1513 .long 0 /* 0x1c - H_GET_TCE */
1514 .long .kvmppc_h_put_tce - hcall_real_table
1515 .long 0 /* 0x24 - H_SET_SPRG0 */
1516 .long .kvmppc_h_set_dabr - hcall_real_table
1517 .long 0 /* 0x2c */
1518 .long 0 /* 0x30 */
1519 .long 0 /* 0x34 */
1520 .long 0 /* 0x38 */
1521 .long 0 /* 0x3c */
1522 .long 0 /* 0x40 */
1523 .long 0 /* 0x44 */
1524 .long 0 /* 0x48 */
1525 .long 0 /* 0x4c */
1526 .long 0 /* 0x50 */
1527 .long 0 /* 0x54 */
1528 .long 0 /* 0x58 */
1529 .long 0 /* 0x5c */
1530 .long 0 /* 0x60 */
1531 #ifdef CONFIG_KVM_XICS
1532 .long .kvmppc_rm_h_eoi - hcall_real_table
1533 .long .kvmppc_rm_h_cppr - hcall_real_table
1534 .long .kvmppc_rm_h_ipi - hcall_real_table
1535 .long 0 /* 0x70 - H_IPOLL */
1536 .long .kvmppc_rm_h_xirr - hcall_real_table
1537 #else
1538 .long 0 /* 0x64 - H_EOI */
1539 .long 0 /* 0x68 - H_CPPR */
1540 .long 0 /* 0x6c - H_IPI */
1541 .long 0 /* 0x70 - H_IPOLL */
1542 .long 0 /* 0x74 - H_XIRR */
1543 #endif
1544 .long 0 /* 0x78 */
1545 .long 0 /* 0x7c */
1546 .long 0 /* 0x80 */
1547 .long 0 /* 0x84 */
1548 .long 0 /* 0x88 */
1549 .long 0 /* 0x8c */
1550 .long 0 /* 0x90 */
1551 .long 0 /* 0x94 */
1552 .long 0 /* 0x98 */
1553 .long 0 /* 0x9c */
1554 .long 0 /* 0xa0 */
1555 .long 0 /* 0xa4 */
1556 .long 0 /* 0xa8 */
1557 .long 0 /* 0xac */
1558 .long 0 /* 0xb0 */
1559 .long 0 /* 0xb4 */
1560 .long 0 /* 0xb8 */
1561 .long 0 /* 0xbc */
1562 .long 0 /* 0xc0 */
1563 .long 0 /* 0xc4 */
1564 .long 0 /* 0xc8 */
1565 .long 0 /* 0xcc */
1566 .long 0 /* 0xd0 */
1567 .long 0 /* 0xd4 */
1568 .long 0 /* 0xd8 */
1569 .long 0 /* 0xdc */
1570 .long .kvmppc_h_cede - hcall_real_table
1571 .long 0 /* 0xe4 */
1572 .long 0 /* 0xe8 */
1573 .long 0 /* 0xec */
1574 .long 0 /* 0xf0 */
1575 .long 0 /* 0xf4 */
1576 .long 0 /* 0xf8 */
1577 .long 0 /* 0xfc */
1578 .long 0 /* 0x100 */
1579 .long 0 /* 0x104 */
1580 .long 0 /* 0x108 */
1581 .long 0 /* 0x10c */
1582 .long 0 /* 0x110 */
1583 .long 0 /* 0x114 */
1584 .long 0 /* 0x118 */
1585 .long 0 /* 0x11c */
1586 .long 0 /* 0x120 */
1587 .long .kvmppc_h_bulk_remove - hcall_real_table
1588 hcall_real_table_end:
1589
1590 ignore_hdec:
1591 mr r4,r9
1592 b fast_guest_return
1593
1594 _GLOBAL(kvmppc_h_set_dabr)
1595 std r4,VCPU_DABR(r3)
1596 /* Work around P7 bug where DABR can get corrupted on mtspr */
1597 1: mtspr SPRN_DABR,r4
1598 mfspr r5, SPRN_DABR
1599 cmpd r4, r5
1600 bne 1b
1601 isync
1602 li r3,0
1603 blr
1604
1605 _GLOBAL(kvmppc_h_cede)
1606 ori r11,r11,MSR_EE
1607 std r11,VCPU_MSR(r3)
1608 li r0,1
1609 stb r0,VCPU_CEDED(r3)
1610 sync /* order setting ceded vs. testing prodded */
1611 lbz r5,VCPU_PRODDED(r3)
1612 cmpwi r5,0
1613 bne kvm_cede_prodded
1614 li r0,0 /* set trap to 0 to say hcall is handled */
1615 stw r0,VCPU_TRAP(r3)
1616 li r0,H_SUCCESS
1617 std r0,VCPU_GPR(R3)(r3)
1618 BEGIN_FTR_SECTION
1619 b kvm_cede_exit /* just send it up to host on 970 */
1620 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1621
1622 /*
1623 * Set our bit in the bitmask of napping threads unless all the
1624 * other threads are already napping, in which case we send this
1625 * up to the host.
1626 */
1627 ld r5,HSTATE_KVM_VCORE(r13)
1628 lwz r6,VCPU_PTID(r3)
1629 lwz r8,VCORE_ENTRY_EXIT(r5)
1630 clrldi r8,r8,56
1631 li r0,1
1632 sld r0,r0,r6
1633 addi r6,r5,VCORE_NAPPING_THREADS
1634 31: lwarx r4,0,r6
1635 or r4,r4,r0
1636 PPC_POPCNTW(R7,R4)
1637 cmpw r7,r8
1638 bge kvm_cede_exit
1639 stwcx. r4,0,r6
1640 bne 31b
1641 li r0,1
1642 stb r0,HSTATE_NAPPING(r13)
1643 /* order napping_threads update vs testing entry_exit_count */
1644 lwsync
1645 mr r4,r3
1646 lwz r7,VCORE_ENTRY_EXIT(r5)
1647 cmpwi r7,0x100
1648 bge 33f /* another thread already exiting */
1649
1650 /*
1651 * Although not specifically required by the architecture, POWER7
1652 * preserves the following registers in nap mode, even if an SMT mode
1653 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1654 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1655 */
1656 /* Save non-volatile GPRs */
1657 std r14, VCPU_GPR(R14)(r3)
1658 std r15, VCPU_GPR(R15)(r3)
1659 std r16, VCPU_GPR(R16)(r3)
1660 std r17, VCPU_GPR(R17)(r3)
1661 std r18, VCPU_GPR(R18)(r3)
1662 std r19, VCPU_GPR(R19)(r3)
1663 std r20, VCPU_GPR(R20)(r3)
1664 std r21, VCPU_GPR(R21)(r3)
1665 std r22, VCPU_GPR(R22)(r3)
1666 std r23, VCPU_GPR(R23)(r3)
1667 std r24, VCPU_GPR(R24)(r3)
1668 std r25, VCPU_GPR(R25)(r3)
1669 std r26, VCPU_GPR(R26)(r3)
1670 std r27, VCPU_GPR(R27)(r3)
1671 std r28, VCPU_GPR(R28)(r3)
1672 std r29, VCPU_GPR(R29)(r3)
1673 std r30, VCPU_GPR(R30)(r3)
1674 std r31, VCPU_GPR(R31)(r3)
1675
1676 /* save FP state */
1677 bl .kvmppc_save_fp
1678
1679 /*
1680 * Take a nap until a decrementer or external interrupt occurs,
1681 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
1682 */
1683 li r0,1
1684 stb r0,HSTATE_HWTHREAD_REQ(r13)
1685 mfspr r5,SPRN_LPCR
1686 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
1687 mtspr SPRN_LPCR,r5
1688 isync
1689 li r0, 0
1690 std r0, HSTATE_SCRATCH0(r13)
1691 ptesync
1692 ld r0, HSTATE_SCRATCH0(r13)
1693 1: cmpd r0, r0
1694 bne 1b
1695 nap
1696 b .
1697
1698 kvm_end_cede:
1699 /* get vcpu pointer */
1700 ld r4, HSTATE_KVM_VCPU(r13)
1701
1702 /* Woken by external or decrementer interrupt */
1703 ld r1, HSTATE_HOST_R1(r13)
1704
1705 /* load up FP state */
1706 bl kvmppc_load_fp
1707
1708 /* Load NV GPRS */
1709 ld r14, VCPU_GPR(R14)(r4)
1710 ld r15, VCPU_GPR(R15)(r4)
1711 ld r16, VCPU_GPR(R16)(r4)
1712 ld r17, VCPU_GPR(R17)(r4)
1713 ld r18, VCPU_GPR(R18)(r4)
1714 ld r19, VCPU_GPR(R19)(r4)
1715 ld r20, VCPU_GPR(R20)(r4)
1716 ld r21, VCPU_GPR(R21)(r4)
1717 ld r22, VCPU_GPR(R22)(r4)
1718 ld r23, VCPU_GPR(R23)(r4)
1719 ld r24, VCPU_GPR(R24)(r4)
1720 ld r25, VCPU_GPR(R25)(r4)
1721 ld r26, VCPU_GPR(R26)(r4)
1722 ld r27, VCPU_GPR(R27)(r4)
1723 ld r28, VCPU_GPR(R28)(r4)
1724 ld r29, VCPU_GPR(R29)(r4)
1725 ld r30, VCPU_GPR(R30)(r4)
1726 ld r31, VCPU_GPR(R31)(r4)
1727
1728 /* clear our bit in vcore->napping_threads */
1729 33: ld r5,HSTATE_KVM_VCORE(r13)
1730 lwz r3,VCPU_PTID(r4)
1731 li r0,1
1732 sld r0,r0,r3
1733 addi r6,r5,VCORE_NAPPING_THREADS
1734 32: lwarx r7,0,r6
1735 andc r7,r7,r0
1736 stwcx. r7,0,r6
1737 bne 32b
1738 li r0,0
1739 stb r0,HSTATE_NAPPING(r13)
1740
1741 /* Check the wake reason in SRR1 to see why we got here */
1742 mfspr r3, SPRN_SRR1
1743 rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
1744 cmpwi r3, 4 /* was it an external interrupt? */
1745 li r12, BOOK3S_INTERRUPT_EXTERNAL
1746 mr r9, r4
1747 ld r10, VCPU_PC(r9)
1748 ld r11, VCPU_MSR(r9)
1749 beq do_ext_interrupt /* if so */
1750
1751 /* see if any other thread is already exiting */
1752 lwz r0,VCORE_ENTRY_EXIT(r5)
1753 cmpwi r0,0x100
1754 blt kvmppc_cede_reentry /* if not go back to guest */
1755
1756 /* some threads are exiting, so go to the guest exit path */
1757 b hcall_real_fallback
1758
1759 /* cede when already previously prodded case */
1760 kvm_cede_prodded:
1761 li r0,0
1762 stb r0,VCPU_PRODDED(r3)
1763 sync /* order testing prodded vs. clearing ceded */
1764 stb r0,VCPU_CEDED(r3)
1765 li r3,H_SUCCESS
1766 blr
1767
1768 /* we've ceded but we want to give control to the host */
1769 kvm_cede_exit:
1770 b hcall_real_fallback
1771
1772 /* Try to handle a machine check in real mode */
1773 machine_check_realmode:
1774 mr r3, r9 /* get vcpu pointer */
1775 bl .kvmppc_realmode_machine_check
1776 nop
1777 cmpdi r3, 0 /* continue exiting from guest? */
1778 ld r9, HSTATE_KVM_VCPU(r13)
1779 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1780 beq mc_cont
1781 /* If not, deliver a machine check. SRR0/1 are already set */
1782 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
1783 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1784 rotldi r11, r11, 63
1785 b fast_interrupt_c_return
1786
1787 /*
1788 * Determine what sort of external interrupt is pending (if any).
1789 * Returns:
1790 * 0 if no interrupt is pending
1791 * 1 if an interrupt is pending that needs to be handled by the host
1792 * -1 if there was a guest wakeup IPI (which has now been cleared)
1793 */
1794 kvmppc_read_intr:
1795 /* see if a host IPI is pending */
1796 li r3, 1
1797 lbz r0, HSTATE_HOST_IPI(r13)
1798 cmpwi r0, 0
1799 bne 1f
1800
1801 /* Now read the interrupt from the ICP */
1802 ld r6, HSTATE_XICS_PHYS(r13)
1803 li r7, XICS_XIRR
1804 cmpdi r6, 0
1805 beq- 1f
1806 lwzcix r0, r6, r7
1807 rlwinm. r3, r0, 0, 0xffffff
1808 sync
1809 beq 1f /* if nothing pending in the ICP */
1810
1811 /* We found something in the ICP...
1812 *
1813 * If it's not an IPI, stash it in the PACA and return to
1814 * the host, we don't (yet) handle directing real external
1815 * interrupts directly to the guest
1816 */
1817 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
1818 li r3, 1
1819 bne 42f
1820
1821 /* It's an IPI, clear the MFRR and EOI it */
1822 li r3, 0xff
1823 li r8, XICS_MFRR
1824 stbcix r3, r6, r8 /* clear the IPI */
1825 stwcix r0, r6, r7 /* EOI it */
1826 sync
1827
1828 /* We need to re-check host IPI now in case it got set in the
1829 * meantime. If it's clear, we bounce the interrupt to the
1830 * guest
1831 */
1832 lbz r0, HSTATE_HOST_IPI(r13)
1833 cmpwi r0, 0
1834 bne- 43f
1835
1836 /* OK, it's an IPI for us */
1837 li r3, -1
1838 1: blr
1839
1840 42: /* It's not an IPI and it's for the host, stash it in the PACA
1841 * before exit, it will be picked up by the host ICP driver
1842 */
1843 stw r0, HSTATE_SAVED_XIRR(r13)
1844 b 1b
1845
1846 43: /* We raced with the host, we need to resend that IPI, bummer */
1847 li r0, IPI_PRIORITY
1848 stbcix r0, r6, r8 /* set the IPI */
1849 sync
1850 b 1b
1851
1852 /*
1853 * Save away FP, VMX and VSX registers.
1854 * r3 = vcpu pointer
1855 */
1856 _GLOBAL(kvmppc_save_fp)
1857 mfmsr r5
1858 ori r8,r5,MSR_FP
1859 #ifdef CONFIG_ALTIVEC
1860 BEGIN_FTR_SECTION
1861 oris r8,r8,MSR_VEC@h
1862 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1863 #endif
1864 #ifdef CONFIG_VSX
1865 BEGIN_FTR_SECTION
1866 oris r8,r8,MSR_VSX@h
1867 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1868 #endif
1869 mtmsrd r8
1870 isync
1871 #ifdef CONFIG_VSX
1872 BEGIN_FTR_SECTION
1873 reg = 0
1874 .rept 32
1875 li r6,reg*16+VCPU_VSRS
1876 STXVD2X(reg,R6,R3)
1877 reg = reg + 1
1878 .endr
1879 FTR_SECTION_ELSE
1880 #endif
1881 reg = 0
1882 .rept 32
1883 stfd reg,reg*8+VCPU_FPRS(r3)
1884 reg = reg + 1
1885 .endr
1886 #ifdef CONFIG_VSX
1887 ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1888 #endif
1889 mffs fr0
1890 stfd fr0,VCPU_FPSCR(r3)
1891
1892 #ifdef CONFIG_ALTIVEC
1893 BEGIN_FTR_SECTION
1894 reg = 0
1895 .rept 32
1896 li r6,reg*16+VCPU_VRS
1897 stvx reg,r6,r3
1898 reg = reg + 1
1899 .endr
1900 mfvscr vr0
1901 li r6,VCPU_VSCR
1902 stvx vr0,r6,r3
1903 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1904 #endif
1905 mfspr r6,SPRN_VRSAVE
1906 stw r6,VCPU_VRSAVE(r3)
1907 mtmsrd r5
1908 isync
1909 blr
1910
1911 /*
1912 * Load up FP, VMX and VSX registers
1913 * r4 = vcpu pointer
1914 */
1915 .globl kvmppc_load_fp
1916 kvmppc_load_fp:
1917 mfmsr r9
1918 ori r8,r9,MSR_FP
1919 #ifdef CONFIG_ALTIVEC
1920 BEGIN_FTR_SECTION
1921 oris r8,r8,MSR_VEC@h
1922 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1923 #endif
1924 #ifdef CONFIG_VSX
1925 BEGIN_FTR_SECTION
1926 oris r8,r8,MSR_VSX@h
1927 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1928 #endif
1929 mtmsrd r8
1930 isync
1931 lfd fr0,VCPU_FPSCR(r4)
1932 MTFSF_L(fr0)
1933 #ifdef CONFIG_VSX
1934 BEGIN_FTR_SECTION
1935 reg = 0
1936 .rept 32
1937 li r7,reg*16+VCPU_VSRS
1938 LXVD2X(reg,R7,R4)
1939 reg = reg + 1
1940 .endr
1941 FTR_SECTION_ELSE
1942 #endif
1943 reg = 0
1944 .rept 32
1945 lfd reg,reg*8+VCPU_FPRS(r4)
1946 reg = reg + 1
1947 .endr
1948 #ifdef CONFIG_VSX
1949 ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1950 #endif
1951
1952 #ifdef CONFIG_ALTIVEC
1953 BEGIN_FTR_SECTION
1954 li r7,VCPU_VSCR
1955 lvx vr0,r7,r4
1956 mtvscr vr0
1957 reg = 0
1958 .rept 32
1959 li r7,reg*16+VCPU_VRS
1960 lvx reg,r7,r4
1961 reg = reg + 1
1962 .endr
1963 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1964 #endif
1965 lwz r7,VCPU_VRSAVE(r4)
1966 mtspr SPRN_VRSAVE,r7
1967 blr
1968
1969 /*
1970 * We come here if we get any exception or interrupt while we are
1971 * executing host real mode code while in guest MMU context.
1972 * For now just spin, but we should do something better.
1973 */
1974 kvmppc_bad_host_intr:
1975 b .
This page took 0.069099 seconds and 6 git commands to generate.