Commit | Line | Data |
---|---|---|
de56a948 PM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
12 | * | |
13 | * Derived from book3s_rmhandlers.S and other files, which are: | |
14 | * | |
15 | * Copyright SUSE Linux Products GmbH 2009 | |
16 | * | |
17 | * Authors: Alexander Graf <agraf@suse.de> | |
18 | */ | |
19 | ||
20 | #include <asm/ppc_asm.h> | |
21 | #include <asm/kvm_asm.h> | |
22 | #include <asm/reg.h> | |
177339d7 | 23 | #include <asm/mmu.h> |
de56a948 | 24 | #include <asm/page.h> |
177339d7 PM |
25 | #include <asm/ptrace.h> |
26 | #include <asm/hvcall.h> | |
de56a948 PM |
27 | #include <asm/asm-offsets.h> |
28 | #include <asm/exception-64s.h> | |
f0888f70 | 29 | #include <asm/kvm_book3s_asm.h> |
de56a948 PM |
30 | |
31 | /***************************************************************************** | |
32 | * * | |
33 | * Real Mode handlers that need to be in the linear mapping * | |
34 | * * | |
35 | ****************************************************************************/ | |
36 | ||
de56a948 PM |
37 | .globl kvmppc_skip_interrupt |
38 | kvmppc_skip_interrupt: | |
39 | mfspr r13,SPRN_SRR0 | |
40 | addi r13,r13,4 | |
41 | mtspr SPRN_SRR0,r13 | |
42 | GET_SCRATCH0(r13) | |
43 | rfid | |
44 | b . | |
45 | ||
46 | .globl kvmppc_skip_Hinterrupt | |
47 | kvmppc_skip_Hinterrupt: | |
48 | mfspr r13,SPRN_HSRR0 | |
49 | addi r13,r13,4 | |
50 | mtspr SPRN_HSRR0,r13 | |
51 | GET_SCRATCH0(r13) | |
52 | hrfid | |
53 | b . | |
54 | ||
55 | /* | |
19ccb76a | 56 | * Call kvmppc_hv_entry in real mode. |
de56a948 PM |
57 | * Must be called with interrupts hard-disabled. |
58 | * | |
59 | * Input Registers: | |
60 | * | |
61 | * LR = return address to continue at after eventually re-enabling MMU | |
62 | */ | |
63 | _GLOBAL(kvmppc_hv_entry_trampoline) | |
64 | mfmsr r10 | |
65 | LOAD_REG_ADDR(r5, kvmppc_hv_entry) | |
66 | li r0,MSR_RI | |
67 | andc r0,r10,r0 | |
68 | li r6,MSR_IR | MSR_DR | |
69 | andc r6,r10,r6 | |
70 | mtmsrd r0,1 /* clear RI in MSR */ | |
71 | mtsrr0 r5 | |
72 | mtsrr1 r6 | |
73 | RFI | |
74 | ||
75 | #define ULONG_SIZE 8 | |
76 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | |
77 | ||
78 | /****************************************************************************** | |
79 | * * | |
80 | * Entry code * | |
81 | * * | |
82 | *****************************************************************************/ | |
83 | ||
371fefd6 PM |
84 | #define XICS_XIRR 4 |
85 | #define XICS_QIRR 0xc | |
f0888f70 | 86 | #define XICS_IPI 2 /* interrupt source # for IPIs */ |
371fefd6 PM |
87 | |
88 | /* | |
89 | * We come in here when wakened from nap mode on a secondary hw thread. | |
90 | * Relocation is off and most register values are lost. | |
91 | * r13 points to the PACA. | |
92 | */ | |
93 | .globl kvm_start_guest | |
94 | kvm_start_guest: | |
95 | ld r1,PACAEMERGSP(r13) | |
96 | subi r1,r1,STACK_FRAME_OVERHEAD | |
19ccb76a PM |
97 | ld r2,PACATOC(r13) |
98 | ||
f0888f70 PM |
99 | li r0,KVM_HWTHREAD_IN_KVM |
100 | stb r0,HSTATE_HWTHREAD_STATE(r13) | |
371fefd6 | 101 | |
f0888f70 PM |
102 | /* NV GPR values from power7_idle() will no longer be valid */ |
103 | li r0,1 | |
104 | stb r0,PACA_NAPSTATELOST(r13) | |
371fefd6 | 105 | |
f0888f70 PM |
106 | /* get vcpu pointer, NULL if we have no vcpu to run */ |
107 | ld r4,HSTATE_KVM_VCPU(r13) | |
108 | cmpdi cr1,r4,0 | |
109 | ||
110 | /* Check the wake reason in SRR1 to see why we got here */ | |
111 | mfspr r3,SPRN_SRR1 | |
112 | rlwinm r3,r3,44-31,0x7 /* extract wake reason field */ | |
113 | cmpwi r3,4 /* was it an external interrupt? */ | |
114 | bne 27f | |
115 | ||
116 | /* | |
117 | * External interrupt - for now assume it is an IPI, since we | |
118 | * should never get any other interrupts sent to offline threads. | |
119 | * Only do this for secondary threads. | |
120 | */ | |
121 | beq cr1,25f | |
122 | lwz r3,VCPU_PTID(r4) | |
123 | cmpwi r3,0 | |
124 | beq 27f | |
125 | 25: ld r5,HSTATE_XICS_PHYS(r13) | |
126 | li r0,0xff | |
127 | li r6,XICS_QIRR | |
128 | li r7,XICS_XIRR | |
129 | lwzcix r8,r5,r7 /* get and ack the interrupt */ | |
371fefd6 | 130 | sync |
f0888f70 PM |
131 | clrldi. r9,r8,40 /* get interrupt source ID. */ |
132 | beq 27f /* none there? */ | |
133 | cmpwi r9,XICS_IPI | |
134 | bne 26f | |
135 | stbcix r0,r5,r6 /* clear IPI */ | |
136 | 26: stwcix r8,r5,r7 /* EOI the interrupt */ | |
371fefd6 | 137 | |
f0888f70 PM |
138 | 27: /* XXX should handle hypervisor maintenance interrupts etc. here */ |
139 | ||
140 | /* if we have no vcpu to run, go back to sleep */ | |
141 | beq cr1,kvm_no_guest | |
142 | ||
143 | /* were we napping due to cede? */ | |
144 | lbz r0,HSTATE_NAPPING(r13) | |
145 | cmpwi r0,0 | |
146 | bne kvm_end_cede | |
2fde6d20 | 147 | |
de56a948 PM |
148 | .global kvmppc_hv_entry |
149 | kvmppc_hv_entry: | |
150 | ||
151 | /* Required state: | |
152 | * | |
153 | * R4 = vcpu pointer | |
154 | * MSR = ~IR|DR | |
155 | * R13 = PACA | |
156 | * R1 = host R1 | |
157 | * all other volatile GPRS = free | |
158 | */ | |
159 | mflr r0 | |
160 | std r0, HSTATE_VMHANDLER(r13) | |
161 | ||
8943633c PM |
162 | /* Set partition DABR */ |
163 | /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ | |
164 | li r5,3 | |
165 | ld r6,VCPU_DABR(r4) | |
166 | mtspr SPRN_DABRX,r5 | |
167 | mtspr SPRN_DABR,r6 | |
168 | BEGIN_FTR_SECTION | |
169 | isync | |
170 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |
de56a948 PM |
171 | |
172 | /* Load guest PMU registers */ | |
173 | /* R4 is live here (vcpu pointer) */ | |
174 | li r3, 1 | |
175 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | |
176 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | |
177 | isync | |
178 | lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ | |
179 | lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ | |
180 | lwz r6, VCPU_PMC + 8(r4) | |
181 | lwz r7, VCPU_PMC + 12(r4) | |
182 | lwz r8, VCPU_PMC + 16(r4) | |
183 | lwz r9, VCPU_PMC + 20(r4) | |
9e368f29 PM |
184 | BEGIN_FTR_SECTION |
185 | lwz r10, VCPU_PMC + 24(r4) | |
186 | lwz r11, VCPU_PMC + 28(r4) | |
187 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |
de56a948 PM |
188 | mtspr SPRN_PMC1, r3 |
189 | mtspr SPRN_PMC2, r5 | |
190 | mtspr SPRN_PMC3, r6 | |
191 | mtspr SPRN_PMC4, r7 | |
192 | mtspr SPRN_PMC5, r8 | |
193 | mtspr SPRN_PMC6, r9 | |
9e368f29 PM |
194 | BEGIN_FTR_SECTION |
195 | mtspr SPRN_PMC7, r10 | |
196 | mtspr SPRN_PMC8, r11 | |
197 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |
de56a948 PM |
198 | ld r3, VCPU_MMCR(r4) |
199 | ld r5, VCPU_MMCR + 8(r4) | |
200 | ld r6, VCPU_MMCR + 16(r4) | |
201 | mtspr SPRN_MMCR1, r5 | |
202 | mtspr SPRN_MMCRA, r6 | |
203 | mtspr SPRN_MMCR0, r3 | |
204 | isync | |
205 | ||
206 | /* Load up FP, VMX and VSX registers */ | |
207 | bl kvmppc_load_fp | |
208 | ||
8943633c PM |
209 | ld r14, VCPU_GPR(r14)(r4) |
210 | ld r15, VCPU_GPR(r15)(r4) | |
211 | ld r16, VCPU_GPR(r16)(r4) | |
212 | ld r17, VCPU_GPR(r17)(r4) | |
213 | ld r18, VCPU_GPR(r18)(r4) | |
214 | ld r19, VCPU_GPR(r19)(r4) | |
215 | ld r20, VCPU_GPR(r20)(r4) | |
216 | ld r21, VCPU_GPR(r21)(r4) | |
217 | ld r22, VCPU_GPR(r22)(r4) | |
218 | ld r23, VCPU_GPR(r23)(r4) | |
219 | ld r24, VCPU_GPR(r24)(r4) | |
220 | ld r25, VCPU_GPR(r25)(r4) | |
221 | ld r26, VCPU_GPR(r26)(r4) | |
222 | ld r27, VCPU_GPR(r27)(r4) | |
223 | ld r28, VCPU_GPR(r28)(r4) | |
224 | ld r29, VCPU_GPR(r29)(r4) | |
225 | ld r30, VCPU_GPR(r30)(r4) | |
226 | ld r31, VCPU_GPR(r31)(r4) | |
227 | ||
9e368f29 | 228 | BEGIN_FTR_SECTION |
de56a948 PM |
229 | /* Switch DSCR to guest value */ |
230 | ld r5, VCPU_DSCR(r4) | |
231 | mtspr SPRN_DSCR, r5 | |
9e368f29 | 232 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
de56a948 PM |
233 | |
234 | /* | |
235 | * Set the decrementer to the guest decrementer. | |
236 | */ | |
237 | ld r8,VCPU_DEC_EXPIRES(r4) | |
238 | mftb r7 | |
239 | subf r3,r7,r8 | |
240 | mtspr SPRN_DEC,r3 | |
241 | stw r3,VCPU_DEC(r4) | |
242 | ||
243 | ld r5, VCPU_SPRG0(r4) | |
244 | ld r6, VCPU_SPRG1(r4) | |
245 | ld r7, VCPU_SPRG2(r4) | |
246 | ld r8, VCPU_SPRG3(r4) | |
247 | mtspr SPRN_SPRG0, r5 | |
248 | mtspr SPRN_SPRG1, r6 | |
249 | mtspr SPRN_SPRG2, r7 | |
250 | mtspr SPRN_SPRG3, r8 | |
251 | ||
252 | /* Save R1 in the PACA */ | |
253 | std r1, HSTATE_HOST_R1(r13) | |
254 | ||
a8606e20 PM |
255 | /* Increment yield count if they have a VPA */ |
256 | ld r3, VCPU_VPA(r4) | |
257 | cmpdi r3, 0 | |
258 | beq 25f | |
259 | lwz r5, LPPACA_YIELDCOUNT(r3) | |
260 | addi r5, r5, 1 | |
261 | stw r5, LPPACA_YIELDCOUNT(r3) | |
262 | 25: | |
de56a948 PM |
263 | /* Load up DAR and DSISR */ |
264 | ld r5, VCPU_DAR(r4) | |
265 | lwz r6, VCPU_DSISR(r4) | |
266 | mtspr SPRN_DAR, r5 | |
267 | mtspr SPRN_DSISR, r6 | |
268 | ||
9e368f29 | 269 | BEGIN_FTR_SECTION |
de56a948 PM |
270 | /* Restore AMR and UAMOR, set AMOR to all 1s */ |
271 | ld r5,VCPU_AMR(r4) | |
272 | ld r6,VCPU_UAMOR(r4) | |
273 | li r7,-1 | |
274 | mtspr SPRN_AMR,r5 | |
275 | mtspr SPRN_UAMOR,r6 | |
276 | mtspr SPRN_AMOR,r7 | |
9e368f29 | 277 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
de56a948 PM |
278 | |
279 | /* Clear out SLB */ | |
280 | li r6,0 | |
281 | slbmte r6,r6 | |
282 | slbia | |
283 | ptesync | |
284 | ||
9e368f29 PM |
285 | BEGIN_FTR_SECTION |
286 | b 30f | |
287 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |
288 | /* | |
289 | * POWER7 host -> guest partition switch code. | |
290 | * We don't have to lock against concurrent tlbies, | |
291 | * but we do have to coordinate across hardware threads. | |
292 | */ | |
371fefd6 PM |
293 | /* Increment entry count iff exit count is zero. */ |
294 | ld r5,HSTATE_KVM_VCORE(r13) | |
295 | addi r9,r5,VCORE_ENTRY_EXIT | |
296 | 21: lwarx r3,0,r9 | |
297 | cmpwi r3,0x100 /* any threads starting to exit? */ | |
298 | bge secondary_too_late /* if so we're too late to the party */ | |
299 | addi r3,r3,1 | |
300 | stwcx. r3,0,r9 | |
301 | bne 21b | |
302 | ||
303 | /* Primary thread switches to guest partition. */ | |
aa04b4cc | 304 | ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ |
371fefd6 PM |
305 | lwz r6,VCPU_PTID(r4) |
306 | cmpwi r6,0 | |
307 | bne 20f | |
de56a948 PM |
308 | ld r6,KVM_SDR1(r9) |
309 | lwz r7,KVM_LPID(r9) | |
310 | li r0,LPID_RSVD /* switch to reserved LPID */ | |
311 | mtspr SPRN_LPID,r0 | |
312 | ptesync | |
313 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ | |
314 | mtspr SPRN_LPID,r7 | |
315 | isync | |
371fefd6 PM |
316 | li r0,1 |
317 | stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ | |
318 | b 10f | |
319 | ||
320 | /* Secondary threads wait for primary to have done partition switch */ | |
321 | 20: lbz r0,VCORE_IN_GUEST(r5) | |
322 | cmpwi r0,0 | |
323 | beq 20b | |
aa04b4cc | 324 | |
19ccb76a | 325 | /* Set LPCR and RMOR. */ |
aa04b4cc | 326 | 10: ld r8,KVM_LPCR(r9) |
19ccb76a | 327 | mtspr SPRN_LPCR,r8 |
aa04b4cc PM |
328 | ld r8,KVM_RMOR(r9) |
329 | mtspr SPRN_RMOR,r8 | |
de56a948 PM |
330 | isync |
331 | ||
332 | /* Check if HDEC expires soon */ | |
333 | mfspr r3,SPRN_HDEC | |
334 | cmpwi r3,10 | |
335 | li r12,BOOK3S_INTERRUPT_HV_DECREMENTER | |
336 | mr r9,r4 | |
337 | blt hdec_soon | |
338 | ||
339 | /* | |
340 | * Invalidate the TLB if we could possibly have stale TLB | |
341 | * entries for this partition on this core due to the use | |
342 | * of tlbiel. | |
371fefd6 | 343 | * XXX maybe only need this on primary thread? |
de56a948 PM |
344 | */ |
345 | ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ | |
346 | lwz r5,VCPU_VCPUID(r4) | |
347 | lhz r6,PACAPACAINDEX(r13) | |
371fefd6 | 348 | rldimi r6,r5,0,62 /* XXX map as if threads 1:1 p:v */ |
de56a948 PM |
349 | lhz r8,VCPU_LAST_CPU(r4) |
350 | sldi r7,r6,1 /* see if this is the same vcpu */ | |
351 | add r7,r7,r9 /* as last ran on this pcpu */ | |
352 | lhz r0,KVM_LAST_VCPU(r7) | |
353 | cmpw r6,r8 /* on the same cpu core as last time? */ | |
354 | bne 3f | |
355 | cmpw r0,r5 /* same vcpu as this core last ran? */ | |
356 | beq 1f | |
357 | 3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */ | |
358 | sth r5,KVM_LAST_VCPU(r7) | |
359 | li r6,128 | |
360 | mtctr r6 | |
361 | li r7,0x800 /* IS field = 0b10 */ | |
362 | ptesync | |
363 | 2: tlbiel r7 | |
364 | addi r7,r7,0x1000 | |
365 | bdnz 2b | |
366 | ptesync | |
367 | 1: | |
368 | ||
369 | /* Save purr/spurr */ | |
370 | mfspr r5,SPRN_PURR | |
371 | mfspr r6,SPRN_SPURR | |
372 | std r5,HSTATE_PURR(r13) | |
373 | std r6,HSTATE_SPURR(r13) | |
374 | ld r7,VCPU_PURR(r4) | |
375 | ld r8,VCPU_SPURR(r4) | |
376 | mtspr SPRN_PURR,r7 | |
377 | mtspr SPRN_SPURR,r8 | |
9e368f29 PM |
378 | b 31f |
379 | ||
380 | /* | |
381 | * PPC970 host -> guest partition switch code. | |
382 | * We have to lock against concurrent tlbies, | |
383 | * using native_tlbie_lock to lock against host tlbies | |
384 | * and kvm->arch.tlbie_lock to lock against guest tlbies. | |
385 | * We also have to invalidate the TLB since its | |
386 | * entries aren't tagged with the LPID. | |
387 | */ | |
388 | 30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ | |
389 | ||
390 | /* first take native_tlbie_lock */ | |
391 | .section ".toc","aw" | |
392 | toc_tlbie_lock: | |
393 | .tc native_tlbie_lock[TC],native_tlbie_lock | |
394 | .previous | |
395 | ld r3,toc_tlbie_lock@toc(2) | |
396 | lwz r8,PACA_LOCK_TOKEN(r13) | |
397 | 24: lwarx r0,0,r3 | |
398 | cmpwi r0,0 | |
399 | bne 24b | |
400 | stwcx. r8,0,r3 | |
401 | bne 24b | |
402 | isync | |
403 | ||
404 | ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */ | |
405 | li r0,0x18f | |
406 | rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ | |
407 | or r0,r7,r0 | |
408 | ptesync | |
409 | sync | |
410 | mtspr SPRN_HID4,r0 /* switch to reserved LPID */ | |
411 | isync | |
412 | li r0,0 | |
413 | stw r0,0(r3) /* drop native_tlbie_lock */ | |
414 | ||
415 | /* invalidate the whole TLB */ | |
416 | li r0,256 | |
417 | mtctr r0 | |
418 | li r6,0 | |
419 | 25: tlbiel r6 | |
420 | addi r6,r6,0x1000 | |
421 | bdnz 25b | |
422 | ptesync | |
423 | ||
424 | /* Take the guest's tlbie_lock */ | |
425 | addi r3,r9,KVM_TLBIE_LOCK | |
426 | 24: lwarx r0,0,r3 | |
427 | cmpwi r0,0 | |
428 | bne 24b | |
429 | stwcx. r8,0,r3 | |
430 | bne 24b | |
431 | isync | |
432 | ld r6,KVM_SDR1(r9) | |
433 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ | |
434 | ||
435 | /* Set up HID4 with the guest's LPID etc. */ | |
436 | sync | |
437 | mtspr SPRN_HID4,r7 | |
438 | isync | |
439 | ||
440 | /* drop the guest's tlbie_lock */ | |
441 | li r0,0 | |
442 | stw r0,0(r3) | |
443 | ||
444 | /* Check if HDEC expires soon */ | |
445 | mfspr r3,SPRN_HDEC | |
446 | cmpwi r3,10 | |
447 | li r12,BOOK3S_INTERRUPT_HV_DECREMENTER | |
448 | mr r9,r4 | |
449 | blt hdec_soon | |
450 | ||
451 | /* Enable HDEC interrupts */ | |
452 | mfspr r0,SPRN_HID0 | |
453 | li r3,1 | |
454 | rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 | |
455 | sync | |
456 | mtspr SPRN_HID0,r0 | |
457 | mfspr r0,SPRN_HID0 | |
458 | mfspr r0,SPRN_HID0 | |
459 | mfspr r0,SPRN_HID0 | |
460 | mfspr r0,SPRN_HID0 | |
461 | mfspr r0,SPRN_HID0 | |
462 | mfspr r0,SPRN_HID0 | |
de56a948 PM |
463 | |
464 | /* Load up guest SLB entries */ | |
9e368f29 | 465 | 31: lwz r5,VCPU_SLB_MAX(r4) |
de56a948 PM |
466 | cmpwi r5,0 |
467 | beq 9f | |
468 | mtctr r5 | |
469 | addi r6,r4,VCPU_SLB | |
470 | 1: ld r8,VCPU_SLB_E(r6) | |
471 | ld r9,VCPU_SLB_V(r6) | |
472 | slbmte r9,r8 | |
473 | addi r6,r6,VCPU_SLB_SIZE | |
474 | bdnz 1b | |
475 | 9: | |
476 | ||
477 | /* Restore state of CTRL run bit; assume 1 on entry */ | |
478 | lwz r5,VCPU_CTRL(r4) | |
479 | andi. r5,r5,1 | |
480 | bne 4f | |
481 | mfspr r6,SPRN_CTRLF | |
482 | clrrdi r6,r6,1 | |
483 | mtspr SPRN_CTRLT,r6 | |
484 | 4: | |
485 | ld r6, VCPU_CTR(r4) | |
486 | lwz r7, VCPU_XER(r4) | |
487 | ||
488 | mtctr r6 | |
489 | mtxer r7 | |
490 | ||
19ccb76a | 491 | kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ |
de56a948 PM |
492 | ld r6, VCPU_SRR0(r4) |
493 | ld r7, VCPU_SRR1(r4) | |
de56a948 | 494 | ld r10, VCPU_PC(r4) |
19ccb76a | 495 | ld r11, VCPU_MSR(r4) /* r11 = vcpu->arch.msr & ~MSR_HV */ |
de56a948 | 496 | |
de56a948 PM |
497 | rldicl r11, r11, 63 - MSR_HV_LG, 1 |
498 | rotldi r11, r11, 1 + MSR_HV_LG | |
499 | ori r11, r11, MSR_ME | |
500 | ||
19ccb76a PM |
501 | /* Check if we can deliver an external or decrementer interrupt now */ |
502 | ld r0,VCPU_PENDING_EXC(r4) | |
503 | li r8,(1 << BOOK3S_IRQPRIO_EXTERNAL) | |
504 | oris r8,r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h | |
505 | and r0,r0,r8 | |
506 | cmpdi cr1,r0,0 | |
507 | andi. r0,r11,MSR_EE | |
508 | beq cr1,11f | |
509 | BEGIN_FTR_SECTION | |
510 | mfspr r8,SPRN_LPCR | |
511 | ori r8,r8,LPCR_MER | |
512 | mtspr SPRN_LPCR,r8 | |
513 | isync | |
514 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |
515 | beq 5f | |
516 | li r0,BOOK3S_INTERRUPT_EXTERNAL | |
517 | 12: mr r6,r10 | |
518 | mr r10,r0 | |
519 | mr r7,r11 | |
520 | li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ | |
521 | rotldi r11,r11,63 | |
522 | b 5f | |
523 | 11: beq 5f | |
524 | mfspr r0,SPRN_DEC | |
525 | cmpwi r0,0 | |
526 | li r0,BOOK3S_INTERRUPT_DECREMENTER | |
527 | blt 12b | |
528 | ||
529 | /* Move SRR0 and SRR1 into the respective regs */ | |
530 | 5: mtspr SPRN_SRR0, r6 | |
531 | mtspr SPRN_SRR1, r7 | |
532 | li r0,0 | |
533 | stb r0,VCPU_CEDED(r4) /* cancel cede */ | |
534 | ||
de56a948 PM |
535 | fast_guest_return: |
536 | mtspr SPRN_HSRR0,r10 | |
537 | mtspr SPRN_HSRR1,r11 | |
538 | ||
539 | /* Activate guest mode, so faults get handled by KVM */ | |
540 | li r9, KVM_GUEST_MODE_GUEST | |
541 | stb r9, HSTATE_IN_GUEST(r13) | |
542 | ||
543 | /* Enter guest */ | |
544 | ||
545 | ld r5, VCPU_LR(r4) | |
546 | lwz r6, VCPU_CR(r4) | |
547 | mtlr r5 | |
548 | mtcr r6 | |
549 | ||
550 | ld r0, VCPU_GPR(r0)(r4) | |
551 | ld r1, VCPU_GPR(r1)(r4) | |
552 | ld r2, VCPU_GPR(r2)(r4) | |
553 | ld r3, VCPU_GPR(r3)(r4) | |
554 | ld r5, VCPU_GPR(r5)(r4) | |
555 | ld r6, VCPU_GPR(r6)(r4) | |
556 | ld r7, VCPU_GPR(r7)(r4) | |
557 | ld r8, VCPU_GPR(r8)(r4) | |
558 | ld r9, VCPU_GPR(r9)(r4) | |
559 | ld r10, VCPU_GPR(r10)(r4) | |
560 | ld r11, VCPU_GPR(r11)(r4) | |
561 | ld r12, VCPU_GPR(r12)(r4) | |
562 | ld r13, VCPU_GPR(r13)(r4) | |
563 | ||
564 | ld r4, VCPU_GPR(r4)(r4) | |
565 | ||
566 | hrfid | |
567 | b . | |
568 | ||
569 | /****************************************************************************** | |
570 | * * | |
571 | * Exit code * | |
572 | * * | |
573 | *****************************************************************************/ | |
574 | ||
575 | /* | |
576 | * We come here from the first-level interrupt handlers. | |
577 | */ | |
578 | .globl kvmppc_interrupt | |
579 | kvmppc_interrupt: | |
580 | /* | |
581 | * Register contents: | |
582 | * R12 = interrupt vector | |
583 | * R13 = PACA | |
584 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 | |
585 | * guest R13 saved in SPRN_SCRATCH0 | |
586 | */ | |
587 | /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ | |
588 | std r9, HSTATE_HOST_R2(r13) | |
589 | ld r9, HSTATE_KVM_VCPU(r13) | |
590 | ||
591 | /* Save registers */ | |
592 | ||
593 | std r0, VCPU_GPR(r0)(r9) | |
594 | std r1, VCPU_GPR(r1)(r9) | |
595 | std r2, VCPU_GPR(r2)(r9) | |
596 | std r3, VCPU_GPR(r3)(r9) | |
597 | std r4, VCPU_GPR(r4)(r9) | |
598 | std r5, VCPU_GPR(r5)(r9) | |
599 | std r6, VCPU_GPR(r6)(r9) | |
600 | std r7, VCPU_GPR(r7)(r9) | |
601 | std r8, VCPU_GPR(r8)(r9) | |
602 | ld r0, HSTATE_HOST_R2(r13) | |
603 | std r0, VCPU_GPR(r9)(r9) | |
604 | std r10, VCPU_GPR(r10)(r9) | |
605 | std r11, VCPU_GPR(r11)(r9) | |
606 | ld r3, HSTATE_SCRATCH0(r13) | |
607 | lwz r4, HSTATE_SCRATCH1(r13) | |
608 | std r3, VCPU_GPR(r12)(r9) | |
609 | stw r4, VCPU_CR(r9) | |
610 | ||
611 | /* Restore R1/R2 so we can handle faults */ | |
612 | ld r1, HSTATE_HOST_R1(r13) | |
613 | ld r2, PACATOC(r13) | |
614 | ||
615 | mfspr r10, SPRN_SRR0 | |
616 | mfspr r11, SPRN_SRR1 | |
617 | std r10, VCPU_SRR0(r9) | |
618 | std r11, VCPU_SRR1(r9) | |
619 | andi. r0, r12, 2 /* need to read HSRR0/1? */ | |
620 | beq 1f | |
621 | mfspr r10, SPRN_HSRR0 | |
622 | mfspr r11, SPRN_HSRR1 | |
623 | clrrdi r12, r12, 2 | |
624 | 1: std r10, VCPU_PC(r9) | |
625 | std r11, VCPU_MSR(r9) | |
626 | ||
627 | GET_SCRATCH0(r3) | |
628 | mflr r4 | |
629 | std r3, VCPU_GPR(r13)(r9) | |
630 | std r4, VCPU_LR(r9) | |
631 | ||
632 | /* Unset guest mode */ | |
633 | li r0, KVM_GUEST_MODE_NONE | |
634 | stb r0, HSTATE_IN_GUEST(r13) | |
635 | ||
636 | stw r12,VCPU_TRAP(r9) | |
637 | ||
697d3899 PM |
638 | /* Save HEIR (HV emulation assist reg) in last_inst |
639 | if this is an HEI (HV emulation interrupt, e40) */ | |
640 | li r3,KVM_INST_FETCH_FAILED | |
641 | BEGIN_FTR_SECTION | |
642 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST | |
643 | bne 11f | |
644 | mfspr r3,SPRN_HEIR | |
645 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |
646 | 11: stw r3,VCPU_LAST_INST(r9) | |
647 | ||
648 | /* these are volatile across C function calls */ | |
649 | mfctr r3 | |
650 | mfxer r4 | |
651 | std r3, VCPU_CTR(r9) | |
652 | stw r4, VCPU_XER(r9) | |
653 | ||
654 | BEGIN_FTR_SECTION | |
655 | /* If this is a page table miss then see if it's theirs or ours */ | |
656 | cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE | |
657 | beq kvmppc_hdsi | |
342d3db7 PM |
658 | cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE |
659 | beq kvmppc_hisi | |
697d3899 PM |
660 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
661 | ||
de56a948 PM |
662 | /* See if this is a leftover HDEC interrupt */ |
663 | cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER | |
664 | bne 2f | |
665 | mfspr r3,SPRN_HDEC | |
666 | cmpwi r3,0 | |
667 | bge ignore_hdec | |
668 | 2: | |
697d3899 | 669 | /* See if this is an hcall we can handle in real mode */ |
a8606e20 PM |
670 | cmpwi r12,BOOK3S_INTERRUPT_SYSCALL |
671 | beq hcall_try_real_mode | |
de56a948 PM |
672 | |
673 | /* Check for mediated interrupts (could be done earlier really ...) */ | |
9e368f29 | 674 | BEGIN_FTR_SECTION |
de56a948 PM |
675 | cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL |
676 | bne+ 1f | |
de56a948 PM |
677 | andi. r0,r11,MSR_EE |
678 | beq 1f | |
19ccb76a | 679 | mfspr r5,SPRN_LPCR |
de56a948 PM |
680 | andi. r0,r5,LPCR_MER |
681 | bne bounce_ext_interrupt | |
682 | 1: | |
9e368f29 | 683 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
de56a948 | 684 | |
697d3899 | 685 | nohpte_cont: |
19ccb76a | 686 | hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ |
de56a948 PM |
687 | /* Save DEC */ |
688 | mfspr r5,SPRN_DEC | |
689 | mftb r6 | |
690 | extsw r5,r5 | |
691 | add r5,r5,r6 | |
692 | std r5,VCPU_DEC_EXPIRES(r9) | |
693 | ||
de56a948 | 694 | /* Save more register state */ |
de56a948 PM |
695 | mfdar r6 |
696 | mfdsisr r7 | |
de56a948 PM |
697 | std r6, VCPU_DAR(r9) |
698 | stw r7, VCPU_DSISR(r9) | |
9e368f29 | 699 | BEGIN_FTR_SECTION |
697d3899 | 700 | /* don't overwrite fault_dar/fault_dsisr if HDSI */ |
de56a948 PM |
701 | cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE |
702 | beq 6f | |
9e368f29 | 703 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
697d3899 | 704 | std r6, VCPU_FAULT_DAR(r9) |
de56a948 PM |
705 | stw r7, VCPU_FAULT_DSISR(r9) |
706 | ||
707 | /* Save guest CTRL register, set runlatch to 1 */ | |
697d3899 | 708 | 6: mfspr r6,SPRN_CTRLF |
de56a948 PM |
709 | stw r6,VCPU_CTRL(r9) |
710 | andi. r0,r6,1 | |
711 | bne 4f | |
712 | ori r6,r6,1 | |
713 | mtspr SPRN_CTRLT,r6 | |
714 | 4: | |
715 | /* Read the guest SLB and save it away */ | |
716 | lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ | |
717 | mtctr r0 | |
718 | li r6,0 | |
719 | addi r7,r9,VCPU_SLB | |
720 | li r5,0 | |
721 | 1: slbmfee r8,r6 | |
722 | andis. r0,r8,SLB_ESID_V@h | |
723 | beq 2f | |
724 | add r8,r8,r6 /* put index in */ | |
725 | slbmfev r3,r6 | |
726 | std r8,VCPU_SLB_E(r7) | |
727 | std r3,VCPU_SLB_V(r7) | |
728 | addi r7,r7,VCPU_SLB_SIZE | |
729 | addi r5,r5,1 | |
730 | 2: addi r6,r6,1 | |
731 | bdnz 1b | |
732 | stw r5,VCPU_SLB_MAX(r9) | |
733 | ||
734 | /* | |
735 | * Save the guest PURR/SPURR | |
736 | */ | |
9e368f29 | 737 | BEGIN_FTR_SECTION |
de56a948 PM |
738 | mfspr r5,SPRN_PURR |
739 | mfspr r6,SPRN_SPURR | |
740 | ld r7,VCPU_PURR(r9) | |
741 | ld r8,VCPU_SPURR(r9) | |
742 | std r5,VCPU_PURR(r9) | |
743 | std r6,VCPU_SPURR(r9) | |
744 | subf r5,r7,r5 | |
745 | subf r6,r8,r6 | |
746 | ||
747 | /* | |
748 | * Restore host PURR/SPURR and add guest times | |
749 | * so that the time in the guest gets accounted. | |
750 | */ | |
751 | ld r3,HSTATE_PURR(r13) | |
752 | ld r4,HSTATE_SPURR(r13) | |
753 | add r3,r3,r5 | |
754 | add r4,r4,r6 | |
755 | mtspr SPRN_PURR,r3 | |
756 | mtspr SPRN_SPURR,r4 | |
9e368f29 | 757 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) |
de56a948 PM |
758 | |
759 | /* Clear out SLB */ | |
760 | li r5,0 | |
761 | slbmte r5,r5 | |
762 | slbia | |
763 | ptesync | |
764 | ||
19ccb76a | 765 | hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */ |
9e368f29 PM |
766 | BEGIN_FTR_SECTION |
767 | b 32f | |
768 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |
769 | /* | |
770 | * POWER7 guest -> host partition switch code. | |
771 | * We don't have to lock against tlbies but we do | |
772 | * have to coordinate the hardware threads. | |
773 | */ | |
371fefd6 PM |
774 | /* Increment the threads-exiting-guest count in the 0xff00 |
775 | bits of vcore->entry_exit_count */ | |
776 | lwsync | |
777 | ld r5,HSTATE_KVM_VCORE(r13) | |
778 | addi r6,r5,VCORE_ENTRY_EXIT | |
779 | 41: lwarx r3,0,r6 | |
780 | addi r0,r3,0x100 | |
781 | stwcx. r0,0,r6 | |
782 | bne 41b | |
19ccb76a | 783 | lwsync |
371fefd6 PM |
784 | |
785 | /* | |
786 | * At this point we have an interrupt that we have to pass | |
787 | * up to the kernel or qemu; we can't handle it in real mode. | |
788 | * Thus we have to do a partition switch, so we have to | |
789 | * collect the other threads, if we are the first thread | |
790 | * to take an interrupt. To do this, we set the HDEC to 0, | |
791 | * which causes an HDEC interrupt in all threads within 2ns | |
792 | * because the HDEC register is shared between all 4 threads. | |
793 | * However, we don't need to bother if this is an HDEC | |
794 | * interrupt, since the other threads will already be on their | |
795 | * way here in that case. | |
796 | */ | |
19ccb76a PM |
797 | cmpwi r3,0x100 /* Are we the first here? */ |
798 | bge 43f | |
799 | cmpwi r3,1 /* Are any other threads in the guest? */ | |
800 | ble 43f | |
371fefd6 PM |
801 | cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER |
802 | beq 40f | |
371fefd6 PM |
803 | li r0,0 |
804 | mtspr SPRN_HDEC,r0 | |
805 | 40: | |
19ccb76a PM |
806 | /* |
807 | * Send an IPI to any napping threads, since an HDEC interrupt | |
808 | * doesn't wake CPUs up from nap. | |
809 | */ | |
810 | lwz r3,VCORE_NAPPING_THREADS(r5) | |
811 | lwz r4,VCPU_PTID(r9) | |
812 | li r0,1 | |
2f584a14 | 813 | sld r0,r0,r4 |
19ccb76a PM |
814 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ |
815 | beq 43f | |
816 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ | |
817 | subf r6,r4,r13 | |
818 | 42: andi. r0,r3,1 | |
819 | beq 44f | |
820 | ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ | |
821 | li r0,IPI_PRIORITY | |
822 | li r7,XICS_QIRR | |
823 | stbcix r0,r7,r8 /* trigger the IPI */ | |
824 | 44: srdi. r3,r3,1 | |
825 | addi r6,r6,PACA_SIZE | |
826 | bne 42b | |
371fefd6 PM |
827 | |
828 | /* Secondary threads wait for primary to do partition switch */ | |
19ccb76a | 829 | 43: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ |
371fefd6 PM |
830 | ld r5,HSTATE_KVM_VCORE(r13) |
831 | lwz r3,VCPU_PTID(r9) | |
832 | cmpwi r3,0 | |
833 | beq 15f | |
834 | HMT_LOW | |
835 | 13: lbz r3,VCORE_IN_GUEST(r5) | |
836 | cmpwi r3,0 | |
837 | bne 13b | |
838 | HMT_MEDIUM | |
839 | b 16f | |
840 | ||
841 | /* Primary thread waits for all the secondaries to exit guest */ | |
842 | 15: lwz r3,VCORE_ENTRY_EXIT(r5) | |
843 | srwi r0,r3,8 | |
844 | clrldi r3,r3,56 | |
845 | cmpw r3,r0 | |
846 | bne 15b | |
847 | isync | |
848 | ||
849 | /* Primary thread switches back to host partition */ | |
de56a948 PM |
850 | ld r6,KVM_HOST_SDR1(r4) |
851 | lwz r7,KVM_HOST_LPID(r4) | |
852 | li r8,LPID_RSVD /* switch to reserved LPID */ | |
853 | mtspr SPRN_LPID,r8 | |
854 | ptesync | |
855 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ | |
856 | mtspr SPRN_LPID,r7 | |
857 | isync | |
371fefd6 PM |
858 | li r0,0 |
859 | stb r0,VCORE_IN_GUEST(r5) | |
de56a948 PM |
860 | lis r8,0x7fff /* MAX_INT@h */ |
861 | mtspr SPRN_HDEC,r8 | |
862 | ||
371fefd6 | 863 | 16: ld r8,KVM_HOST_LPCR(r4) |
de56a948 PM |
864 | mtspr SPRN_LPCR,r8 |
865 | isync | |
9e368f29 PM |
866 | b 33f |
867 | ||
868 | /* | |
869 | * PPC970 guest -> host partition switch code. | |
870 | * We have to lock against concurrent tlbies, and | |
871 | * we have to flush the whole TLB. | |
872 | */ | |
873 | 32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ | |
874 | ||
875 | /* Take the guest's tlbie_lock */ | |
876 | lwz r8,PACA_LOCK_TOKEN(r13) | |
877 | addi r3,r4,KVM_TLBIE_LOCK | |
878 | 24: lwarx r0,0,r3 | |
879 | cmpwi r0,0 | |
880 | bne 24b | |
881 | stwcx. r8,0,r3 | |
882 | bne 24b | |
883 | isync | |
884 | ||
885 | ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */ | |
886 | li r0,0x18f | |
887 | rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ | |
888 | or r0,r7,r0 | |
889 | ptesync | |
890 | sync | |
891 | mtspr SPRN_HID4,r0 /* switch to reserved LPID */ | |
892 | isync | |
893 | li r0,0 | |
894 | stw r0,0(r3) /* drop guest tlbie_lock */ | |
895 | ||
896 | /* invalidate the whole TLB */ | |
897 | li r0,256 | |
898 | mtctr r0 | |
899 | li r6,0 | |
900 | 25: tlbiel r6 | |
901 | addi r6,r6,0x1000 | |
902 | bdnz 25b | |
903 | ptesync | |
904 | ||
905 | /* take native_tlbie_lock */ | |
906 | ld r3,toc_tlbie_lock@toc(2) | |
907 | 24: lwarx r0,0,r3 | |
908 | cmpwi r0,0 | |
909 | bne 24b | |
910 | stwcx. r8,0,r3 | |
911 | bne 24b | |
912 | isync | |
913 | ||
914 | ld r6,KVM_HOST_SDR1(r4) | |
915 | mtspr SPRN_SDR1,r6 /* switch to host page table */ | |
916 | ||
917 | /* Set up host HID4 value */ | |
918 | sync | |
919 | mtspr SPRN_HID4,r7 | |
920 | isync | |
921 | li r0,0 | |
922 | stw r0,0(r3) /* drop native_tlbie_lock */ | |
923 | ||
924 | lis r8,0x7fff /* MAX_INT@h */ | |
925 | mtspr SPRN_HDEC,r8 | |
926 | ||
927 | /* Disable HDEC interrupts */ | |
928 | mfspr r0,SPRN_HID0 | |
929 | li r3,0 | |
930 | rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 | |
931 | sync | |
932 | mtspr SPRN_HID0,r0 | |
933 | mfspr r0,SPRN_HID0 | |
934 | mfspr r0,SPRN_HID0 | |
935 | mfspr r0,SPRN_HID0 | |
936 | mfspr r0,SPRN_HID0 | |
937 | mfspr r0,SPRN_HID0 | |
938 | mfspr r0,SPRN_HID0 | |
de56a948 PM |
939 | |
940 | /* load host SLB entries */ | |
9e368f29 | 941 | 33: ld r8,PACA_SLBSHADOWPTR(r13) |
de56a948 PM |
942 | |
943 | .rept SLB_NUM_BOLTED | |
944 | ld r5,SLBSHADOW_SAVEAREA(r8) | |
945 | ld r6,SLBSHADOW_SAVEAREA+8(r8) | |
946 | andis. r7,r5,SLB_ESID_V@h | |
947 | beq 1f | |
948 | slbmte r6,r5 | |
949 | 1: addi r8,r8,16 | |
950 | .endr | |
951 | ||
952 | /* Save and reset AMR and UAMOR before turning on the MMU */ | |
9e368f29 | 953 | BEGIN_FTR_SECTION |
de56a948 PM |
954 | mfspr r5,SPRN_AMR |
955 | mfspr r6,SPRN_UAMOR | |
956 | std r5,VCPU_AMR(r9) | |
957 | std r6,VCPU_UAMOR(r9) | |
958 | li r6,0 | |
959 | mtspr SPRN_AMR,r6 | |
9e368f29 | 960 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
de56a948 | 961 | |
de56a948 | 962 | /* Switch DSCR back to host value */ |
9e368f29 | 963 | BEGIN_FTR_SECTION |
de56a948 PM |
964 | mfspr r8, SPRN_DSCR |
965 | ld r7, HSTATE_DSCR(r13) | |
966 | std r8, VCPU_DSCR(r7) | |
967 | mtspr SPRN_DSCR, r7 | |
9e368f29 | 968 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
de56a948 PM |
969 | |
970 | /* Save non-volatile GPRs */ | |
971 | std r14, VCPU_GPR(r14)(r9) | |
972 | std r15, VCPU_GPR(r15)(r9) | |
973 | std r16, VCPU_GPR(r16)(r9) | |
974 | std r17, VCPU_GPR(r17)(r9) | |
975 | std r18, VCPU_GPR(r18)(r9) | |
976 | std r19, VCPU_GPR(r19)(r9) | |
977 | std r20, VCPU_GPR(r20)(r9) | |
978 | std r21, VCPU_GPR(r21)(r9) | |
979 | std r22, VCPU_GPR(r22)(r9) | |
980 | std r23, VCPU_GPR(r23)(r9) | |
981 | std r24, VCPU_GPR(r24)(r9) | |
982 | std r25, VCPU_GPR(r25)(r9) | |
983 | std r26, VCPU_GPR(r26)(r9) | |
984 | std r27, VCPU_GPR(r27)(r9) | |
985 | std r28, VCPU_GPR(r28)(r9) | |
986 | std r29, VCPU_GPR(r29)(r9) | |
987 | std r30, VCPU_GPR(r30)(r9) | |
988 | std r31, VCPU_GPR(r31)(r9) | |
989 | ||
990 | /* Save SPRGs */ | |
991 | mfspr r3, SPRN_SPRG0 | |
992 | mfspr r4, SPRN_SPRG1 | |
993 | mfspr r5, SPRN_SPRG2 | |
994 | mfspr r6, SPRN_SPRG3 | |
995 | std r3, VCPU_SPRG0(r9) | |
996 | std r4, VCPU_SPRG1(r9) | |
997 | std r5, VCPU_SPRG2(r9) | |
998 | std r6, VCPU_SPRG3(r9) | |
999 | ||
8943633c PM |
1000 | /* save FP state */ |
1001 | mr r3, r9 | |
1002 | bl .kvmppc_save_fp | |
1003 | ||
a8606e20 PM |
1004 | /* Increment yield count if they have a VPA */ |
1005 | ld r8, VCPU_VPA(r9) /* do they have a VPA? */ | |
1006 | cmpdi r8, 0 | |
1007 | beq 25f | |
1008 | lwz r3, LPPACA_YIELDCOUNT(r8) | |
1009 | addi r3, r3, 1 | |
1010 | stw r3, LPPACA_YIELDCOUNT(r8) | |
1011 | 25: | |
1012 | /* Save PMU registers if requested */ | |
1013 | /* r8 and cr0.eq are live here */ | |
de56a948 PM |
1014 | li r3, 1 |
1015 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | |
1016 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ | |
1017 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | |
8943633c PM |
1018 | mfspr r6, SPRN_MMCRA |
1019 | BEGIN_FTR_SECTION | |
1020 | /* On P7, clear MMCRA in order to disable SDAR updates */ | |
1021 | li r7, 0 | |
1022 | mtspr SPRN_MMCRA, r7 | |
1023 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |
de56a948 | 1024 | isync |
a8606e20 PM |
1025 | beq 21f /* if no VPA, save PMU stuff anyway */ |
1026 | lbz r7, LPPACA_PMCINUSE(r8) | |
1027 | cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ | |
1028 | bne 21f | |
1029 | std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ | |
1030 | b 22f | |
1031 | 21: mfspr r5, SPRN_MMCR1 | |
de56a948 PM |
1032 | std r4, VCPU_MMCR(r9) |
1033 | std r5, VCPU_MMCR + 8(r9) | |
1034 | std r6, VCPU_MMCR + 16(r9) | |
1035 | mfspr r3, SPRN_PMC1 | |
1036 | mfspr r4, SPRN_PMC2 | |
1037 | mfspr r5, SPRN_PMC3 | |
1038 | mfspr r6, SPRN_PMC4 | |
1039 | mfspr r7, SPRN_PMC5 | |
1040 | mfspr r8, SPRN_PMC6 | |
9e368f29 PM |
1041 | BEGIN_FTR_SECTION |
1042 | mfspr r10, SPRN_PMC7 | |
1043 | mfspr r11, SPRN_PMC8 | |
1044 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |
de56a948 PM |
1045 | stw r3, VCPU_PMC(r9) |
1046 | stw r4, VCPU_PMC + 4(r9) | |
1047 | stw r5, VCPU_PMC + 8(r9) | |
1048 | stw r6, VCPU_PMC + 12(r9) | |
1049 | stw r7, VCPU_PMC + 16(r9) | |
1050 | stw r8, VCPU_PMC + 20(r9) | |
9e368f29 PM |
1051 | BEGIN_FTR_SECTION |
1052 | stw r10, VCPU_PMC + 24(r9) | |
1053 | stw r11, VCPU_PMC + 28(r9) | |
1054 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |
de56a948 | 1055 | 22: |
de56a948 | 1056 | |
9e368f29 PM |
1057 | /* Secondary threads go off to take a nap on POWER7 */ |
1058 | BEGIN_FTR_SECTION | |
8943633c | 1059 | lwz r0,VCPU_PTID(r9) |
371fefd6 PM |
1060 | cmpwi r0,0 |
1061 | bne secondary_nap | |
9e368f29 | 1062 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
371fefd6 | 1063 | |
8943633c PM |
1064 | /* Restore host DABR and DABRX */ |
1065 | ld r5,HSTATE_DABR(r13) | |
1066 | li r6,7 | |
1067 | mtspr SPRN_DABR,r5 | |
1068 | mtspr SPRN_DABRX,r6 | |
1069 | ||
de56a948 PM |
1070 | /* |
1071 | * Reload DEC. HDEC interrupts were disabled when | |
1072 | * we reloaded the host's LPCR value. | |
1073 | */ | |
1074 | ld r3, HSTATE_DECEXP(r13) | |
1075 | mftb r4 | |
1076 | subf r4, r4, r3 | |
1077 | mtspr SPRN_DEC, r4 | |
1078 | ||
1079 | /* Reload the host's PMU registers */ | |
1080 | ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ | |
1081 | lbz r4, LPPACA_PMCINUSE(r3) | |
1082 | cmpwi r4, 0 | |
1083 | beq 23f /* skip if not */ | |
1084 | lwz r3, HSTATE_PMC(r13) | |
1085 | lwz r4, HSTATE_PMC + 4(r13) | |
1086 | lwz r5, HSTATE_PMC + 8(r13) | |
1087 | lwz r6, HSTATE_PMC + 12(r13) | |
1088 | lwz r8, HSTATE_PMC + 16(r13) | |
1089 | lwz r9, HSTATE_PMC + 20(r13) | |
9e368f29 PM |
1090 | BEGIN_FTR_SECTION |
1091 | lwz r10, HSTATE_PMC + 24(r13) | |
1092 | lwz r11, HSTATE_PMC + 28(r13) | |
1093 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |
de56a948 PM |
1094 | mtspr SPRN_PMC1, r3 |
1095 | mtspr SPRN_PMC2, r4 | |
1096 | mtspr SPRN_PMC3, r5 | |
1097 | mtspr SPRN_PMC4, r6 | |
1098 | mtspr SPRN_PMC5, r8 | |
1099 | mtspr SPRN_PMC6, r9 | |
9e368f29 PM |
1100 | BEGIN_FTR_SECTION |
1101 | mtspr SPRN_PMC7, r10 | |
1102 | mtspr SPRN_PMC8, r11 | |
1103 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |
de56a948 PM |
1104 | ld r3, HSTATE_MMCR(r13) |
1105 | ld r4, HSTATE_MMCR + 8(r13) | |
1106 | ld r5, HSTATE_MMCR + 16(r13) | |
1107 | mtspr SPRN_MMCR1, r4 | |
1108 | mtspr SPRN_MMCRA, r5 | |
1109 | mtspr SPRN_MMCR0, r3 | |
1110 | isync | |
1111 | 23: | |
1112 | /* | |
1113 | * For external and machine check interrupts, we need | |
1114 | * to call the Linux handler to process the interrupt. | |
1115 | * We do that by jumping to the interrupt vector address | |
1116 | * which we have in r12. The [h]rfid at the end of the | |
1117 | * handler will return to the book3s_hv_interrupts.S code. | |
1118 | * For other interrupts we do the rfid to get back | |
1119 | * to the book3s_interrupts.S code here. | |
1120 | */ | |
1121 | ld r8, HSTATE_VMHANDLER(r13) | |
1122 | ld r7, HSTATE_HOST_MSR(r13) | |
1123 | ||
1124 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | |
1125 | beq 11f | |
1126 | cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK | |
1127 | ||
1128 | /* RFI into the highmem handler, or branch to interrupt handler */ | |
9e368f29 | 1129 | 12: mfmsr r6 |
de56a948 PM |
1130 | mtctr r12 |
1131 | li r0, MSR_RI | |
1132 | andc r6, r6, r0 | |
1133 | mtmsrd r6, 1 /* Clear RI in MSR */ | |
1134 | mtsrr0 r8 | |
1135 | mtsrr1 r7 | |
1136 | beqctr | |
1137 | RFI | |
1138 | ||
9e368f29 PM |
1139 | 11: |
1140 | BEGIN_FTR_SECTION | |
1141 | b 12b | |
1142 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |
1143 | mtspr SPRN_HSRR0, r8 | |
de56a948 PM |
1144 | mtspr SPRN_HSRR1, r7 |
1145 | ba 0x500 | |
1146 | ||
697d3899 PM |
1147 | /* |
1148 | * Check whether an HDSI is an HPTE not found fault or something else. | |
1149 | * If it is an HPTE not found fault that is due to the guest accessing | |
1150 | * a page that they have mapped but which we have paged out, then | |
1151 | * we continue on with the guest exit path. In all other cases, | |
1152 | * reflect the HDSI to the guest as a DSI. | |
1153 | */ | |
1154 | kvmppc_hdsi: | |
1155 | mfspr r4, SPRN_HDAR | |
1156 | mfspr r6, SPRN_HDSISR | |
4cf302bc PM |
1157 | /* HPTE not found fault or protection fault? */ |
1158 | andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h | |
697d3899 PM |
1159 | beq 1f /* if not, send it to the guest */ |
1160 | andi. r0, r11, MSR_DR /* data relocation enabled? */ | |
1161 | beq 3f | |
1162 | clrrdi r0, r4, 28 | |
1163 | PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ | |
1164 | bne 1f /* if no SLB entry found */ | |
1165 | 4: std r4, VCPU_FAULT_DAR(r9) | |
1166 | stw r6, VCPU_FAULT_DSISR(r9) | |
1167 | ||
1168 | /* Search the hash table. */ | |
1169 | mr r3, r9 /* vcpu pointer */ | |
342d3db7 | 1170 | li r7, 1 /* data fault */ |
697d3899 PM |
1171 | bl .kvmppc_hpte_hv_fault |
1172 | ld r9, HSTATE_KVM_VCPU(r13) | |
1173 | ld r10, VCPU_PC(r9) | |
1174 | ld r11, VCPU_MSR(r9) | |
1175 | li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE | |
1176 | cmpdi r3, 0 /* retry the instruction */ | |
1177 | beq 6f | |
1178 | cmpdi r3, -1 /* handle in kernel mode */ | |
1179 | beq nohpte_cont | |
1180 | cmpdi r3, -2 /* MMIO emulation; need instr word */ | |
1181 | beq 2f | |
1182 | ||
1183 | /* Synthesize a DSI for the guest */ | |
1184 | ld r4, VCPU_FAULT_DAR(r9) | |
1185 | mr r6, r3 | |
1186 | 1: mtspr SPRN_DAR, r4 | |
1187 | mtspr SPRN_DSISR, r6 | |
1188 | mtspr SPRN_SRR0, r10 | |
1189 | mtspr SPRN_SRR1, r11 | |
1190 | li r10, BOOK3S_INTERRUPT_DATA_STORAGE | |
1191 | li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ | |
1192 | rotldi r11, r11, 63 | |
1193 | 6: ld r7, VCPU_CTR(r9) | |
1194 | lwz r8, VCPU_XER(r9) | |
1195 | mtctr r7 | |
1196 | mtxer r8 | |
1197 | mr r4, r9 | |
1198 | b fast_guest_return | |
1199 | ||
1200 | 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ | |
1201 | ld r5, KVM_VRMA_SLB_V(r5) | |
1202 | b 4b | |
1203 | ||
1204 | /* If this is for emulated MMIO, load the instruction word */ | |
1205 | 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ | |
1206 | ||
1207 | /* Set guest mode to 'jump over instruction' so if lwz faults | |
1208 | * we'll just continue at the next IP. */ | |
1209 | li r0, KVM_GUEST_MODE_SKIP | |
1210 | stb r0, HSTATE_IN_GUEST(r13) | |
1211 | ||
1212 | /* Do the access with MSR:DR enabled */ | |
1213 | mfmsr r3 | |
1214 | ori r4, r3, MSR_DR /* Enable paging for data */ | |
1215 | mtmsrd r4 | |
1216 | lwz r8, 0(r10) | |
1217 | mtmsrd r3 | |
1218 | ||
1219 | /* Store the result */ | |
1220 | stw r8, VCPU_LAST_INST(r9) | |
1221 | ||
1222 | /* Unset guest mode. */ | |
1223 | li r0, KVM_GUEST_MODE_NONE | |
1224 | stb r0, HSTATE_IN_GUEST(r13) | |
1225 | b nohpte_cont | |
de56a948 | 1226 | |
342d3db7 PM |
1227 | /* |
1228 | * Similarly for an HISI, reflect it to the guest as an ISI unless | |
1229 | * it is an HPTE not found fault for a page that we have paged out. | |
1230 | */ | |
1231 | kvmppc_hisi: | |
1232 | andis. r0, r11, SRR1_ISI_NOPT@h | |
1233 | beq 1f | |
1234 | andi. r0, r11, MSR_IR /* instruction relocation enabled? */ | |
1235 | beq 3f | |
1236 | clrrdi r0, r10, 28 | |
1237 | PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ | |
1238 | bne 1f /* if no SLB entry found */ | |
1239 | 4: | |
1240 | /* Search the hash table. */ | |
1241 | mr r3, r9 /* vcpu pointer */ | |
1242 | mr r4, r10 | |
1243 | mr r6, r11 | |
1244 | li r7, 0 /* instruction fault */ | |
1245 | bl .kvmppc_hpte_hv_fault | |
1246 | ld r9, HSTATE_KVM_VCPU(r13) | |
1247 | ld r10, VCPU_PC(r9) | |
1248 | ld r11, VCPU_MSR(r9) | |
1249 | li r12, BOOK3S_INTERRUPT_H_INST_STORAGE | |
1250 | cmpdi r3, 0 /* retry the instruction */ | |
1251 | beq 6f | |
1252 | cmpdi r3, -1 /* handle in kernel mode */ | |
1253 | beq nohpte_cont | |
1254 | ||
1255 | /* Synthesize an ISI for the guest */ | |
1256 | mr r11, r3 | |
1257 | 1: mtspr SPRN_SRR0, r10 | |
1258 | mtspr SPRN_SRR1, r11 | |
1259 | li r10, BOOK3S_INTERRUPT_INST_STORAGE | |
1260 | li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ | |
1261 | rotldi r11, r11, 63 | |
1262 | 6: ld r7, VCPU_CTR(r9) | |
1263 | lwz r8, VCPU_XER(r9) | |
1264 | mtctr r7 | |
1265 | mtxer r8 | |
1266 | mr r4, r9 | |
1267 | b fast_guest_return | |
1268 | ||
1269 | 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ | |
1270 | ld r5, KVM_VRMA_SLB_V(r6) | |
1271 | b 4b | |
1272 | ||
a8606e20 PM |
1273 | /* |
1274 | * Try to handle an hcall in real mode. | |
1275 | * Returns to the guest if we handle it, or continues on up to | |
1276 | * the kernel if we can't (i.e. if we don't have a handler for | |
1277 | * it, or if the handler returns H_TOO_HARD). | |
1278 | */ | |
1279 | .globl hcall_try_real_mode | |
1280 | hcall_try_real_mode: | |
1281 | ld r3,VCPU_GPR(r3)(r9) | |
1282 | andi. r0,r11,MSR_PR | |
1283 | bne hcall_real_cont | |
1284 | clrrdi r3,r3,2 | |
1285 | cmpldi r3,hcall_real_table_end - hcall_real_table | |
1286 | bge hcall_real_cont | |
1287 | LOAD_REG_ADDR(r4, hcall_real_table) | |
1288 | lwzx r3,r3,r4 | |
1289 | cmpwi r3,0 | |
1290 | beq hcall_real_cont | |
1291 | add r3,r3,r4 | |
1292 | mtctr r3 | |
1293 | mr r3,r9 /* get vcpu pointer */ | |
1294 | ld r4,VCPU_GPR(r4)(r9) | |
1295 | bctrl | |
1296 | cmpdi r3,H_TOO_HARD | |
1297 | beq hcall_real_fallback | |
1298 | ld r4,HSTATE_KVM_VCPU(r13) | |
1299 | std r3,VCPU_GPR(r3)(r4) | |
1300 | ld r10,VCPU_PC(r4) | |
1301 | ld r11,VCPU_MSR(r4) | |
1302 | b fast_guest_return | |
1303 | ||
1304 | /* We've attempted a real mode hcall, but it's punted it back | |
1305 | * to userspace. We need to restore some clobbered volatiles | |
1306 | * before resuming the pass-it-to-qemu path */ | |
1307 | hcall_real_fallback: | |
1308 | li r12,BOOK3S_INTERRUPT_SYSCALL | |
1309 | ld r9, HSTATE_KVM_VCPU(r13) | |
a8606e20 PM |
1310 | |
1311 | b hcall_real_cont | |
1312 | ||
1313 | .globl hcall_real_table | |
1314 | hcall_real_table: | |
1315 | .long 0 /* 0 - unused */ | |
1316 | .long .kvmppc_h_remove - hcall_real_table | |
1317 | .long .kvmppc_h_enter - hcall_real_table | |
1318 | .long .kvmppc_h_read - hcall_real_table | |
1319 | .long 0 /* 0x10 - H_CLEAR_MOD */ | |
1320 | .long 0 /* 0x14 - H_CLEAR_REF */ | |
1321 | .long .kvmppc_h_protect - hcall_real_table | |
1322 | .long 0 /* 0x1c - H_GET_TCE */ | |
54738c09 | 1323 | .long .kvmppc_h_put_tce - hcall_real_table |
a8606e20 PM |
1324 | .long 0 /* 0x24 - H_SET_SPRG0 */ |
1325 | .long .kvmppc_h_set_dabr - hcall_real_table | |
1326 | .long 0 /* 0x2c */ | |
1327 | .long 0 /* 0x30 */ | |
1328 | .long 0 /* 0x34 */ | |
1329 | .long 0 /* 0x38 */ | |
1330 | .long 0 /* 0x3c */ | |
1331 | .long 0 /* 0x40 */ | |
1332 | .long 0 /* 0x44 */ | |
1333 | .long 0 /* 0x48 */ | |
1334 | .long 0 /* 0x4c */ | |
1335 | .long 0 /* 0x50 */ | |
1336 | .long 0 /* 0x54 */ | |
1337 | .long 0 /* 0x58 */ | |
1338 | .long 0 /* 0x5c */ | |
1339 | .long 0 /* 0x60 */ | |
1340 | .long 0 /* 0x64 */ | |
1341 | .long 0 /* 0x68 */ | |
1342 | .long 0 /* 0x6c */ | |
1343 | .long 0 /* 0x70 */ | |
1344 | .long 0 /* 0x74 */ | |
1345 | .long 0 /* 0x78 */ | |
1346 | .long 0 /* 0x7c */ | |
1347 | .long 0 /* 0x80 */ | |
1348 | .long 0 /* 0x84 */ | |
1349 | .long 0 /* 0x88 */ | |
1350 | .long 0 /* 0x8c */ | |
1351 | .long 0 /* 0x90 */ | |
1352 | .long 0 /* 0x94 */ | |
1353 | .long 0 /* 0x98 */ | |
1354 | .long 0 /* 0x9c */ | |
1355 | .long 0 /* 0xa0 */ | |
1356 | .long 0 /* 0xa4 */ | |
1357 | .long 0 /* 0xa8 */ | |
1358 | .long 0 /* 0xac */ | |
1359 | .long 0 /* 0xb0 */ | |
1360 | .long 0 /* 0xb4 */ | |
1361 | .long 0 /* 0xb8 */ | |
1362 | .long 0 /* 0xbc */ | |
1363 | .long 0 /* 0xc0 */ | |
1364 | .long 0 /* 0xc4 */ | |
1365 | .long 0 /* 0xc8 */ | |
1366 | .long 0 /* 0xcc */ | |
1367 | .long 0 /* 0xd0 */ | |
1368 | .long 0 /* 0xd4 */ | |
1369 | .long 0 /* 0xd8 */ | |
1370 | .long 0 /* 0xdc */ | |
19ccb76a | 1371 | .long .kvmppc_h_cede - hcall_real_table |
a8606e20 PM |
1372 | .long 0 /* 0xe4 */ |
1373 | .long 0 /* 0xe8 */ | |
1374 | .long 0 /* 0xec */ | |
1375 | .long 0 /* 0xf0 */ | |
1376 | .long 0 /* 0xf4 */ | |
1377 | .long 0 /* 0xf8 */ | |
1378 | .long 0 /* 0xfc */ | |
1379 | .long 0 /* 0x100 */ | |
1380 | .long 0 /* 0x104 */ | |
1381 | .long 0 /* 0x108 */ | |
1382 | .long 0 /* 0x10c */ | |
1383 | .long 0 /* 0x110 */ | |
1384 | .long 0 /* 0x114 */ | |
1385 | .long 0 /* 0x118 */ | |
1386 | .long 0 /* 0x11c */ | |
1387 | .long 0 /* 0x120 */ | |
1388 | .long .kvmppc_h_bulk_remove - hcall_real_table | |
1389 | hcall_real_table_end: | |
1390 | ||
de56a948 PM |
1391 | ignore_hdec: |
1392 | mr r4,r9 | |
1393 | b fast_guest_return | |
1394 | ||
1395 | bounce_ext_interrupt: | |
1396 | mr r4,r9 | |
1397 | mtspr SPRN_SRR0,r10 | |
1398 | mtspr SPRN_SRR1,r11 | |
1399 | li r10,BOOK3S_INTERRUPT_EXTERNAL | |
19ccb76a PM |
1400 | li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ |
1401 | rotldi r11,r11,63 | |
de56a948 PM |
1402 | b fast_guest_return |
1403 | ||
a8606e20 PM |
1404 | _GLOBAL(kvmppc_h_set_dabr) |
1405 | std r4,VCPU_DABR(r3) | |
8943633c PM |
1406 | /* Work around P7 bug where DABR can get corrupted on mtspr */ |
1407 | 1: mtspr SPRN_DABR,r4 | |
1408 | mfspr r5, SPRN_DABR | |
1409 | cmpd r4, r5 | |
1410 | bne 1b | |
1411 | isync | |
a8606e20 PM |
1412 | li r3,0 |
1413 | blr | |
1414 | ||
19ccb76a PM |
1415 | _GLOBAL(kvmppc_h_cede) |
1416 | ori r11,r11,MSR_EE | |
1417 | std r11,VCPU_MSR(r3) | |
1418 | li r0,1 | |
1419 | stb r0,VCPU_CEDED(r3) | |
1420 | sync /* order setting ceded vs. testing prodded */ | |
1421 | lbz r5,VCPU_PRODDED(r3) | |
1422 | cmpwi r5,0 | |
1423 | bne 1f | |
1424 | li r0,0 /* set trap to 0 to say hcall is handled */ | |
1425 | stw r0,VCPU_TRAP(r3) | |
1426 | li r0,H_SUCCESS | |
1427 | std r0,VCPU_GPR(r3)(r3) | |
1428 | BEGIN_FTR_SECTION | |
1429 | b 2f /* just send it up to host on 970 */ | |
1430 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |
1431 | ||
1432 | /* | |
1433 | * Set our bit in the bitmask of napping threads unless all the | |
1434 | * other threads are already napping, in which case we send this | |
1435 | * up to the host. | |
1436 | */ | |
1437 | ld r5,HSTATE_KVM_VCORE(r13) | |
1438 | lwz r6,VCPU_PTID(r3) | |
1439 | lwz r8,VCORE_ENTRY_EXIT(r5) | |
1440 | clrldi r8,r8,56 | |
1441 | li r0,1 | |
1442 | sld r0,r0,r6 | |
1443 | addi r6,r5,VCORE_NAPPING_THREADS | |
1444 | 31: lwarx r4,0,r6 | |
1445 | or r4,r4,r0 | |
ad61d64e | 1446 | PPC_POPCNTW(r7,r4) |
19ccb76a PM |
1447 | cmpw r7,r8 |
1448 | bge 2f | |
1449 | stwcx. r4,0,r6 | |
1450 | bne 31b | |
1451 | li r0,1 | |
1452 | stb r0,HSTATE_NAPPING(r13) | |
1453 | /* order napping_threads update vs testing entry_exit_count */ | |
1454 | lwsync | |
1455 | mr r4,r3 | |
1456 | lwz r7,VCORE_ENTRY_EXIT(r5) | |
1457 | cmpwi r7,0x100 | |
1458 | bge 33f /* another thread already exiting */ | |
1459 | ||
1460 | /* | |
1461 | * Although not specifically required by the architecture, POWER7 | |
1462 | * preserves the following registers in nap mode, even if an SMT mode | |
1463 | * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, | |
1464 | * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. | |
1465 | */ | |
1466 | /* Save non-volatile GPRs */ | |
1467 | std r14, VCPU_GPR(r14)(r3) | |
1468 | std r15, VCPU_GPR(r15)(r3) | |
1469 | std r16, VCPU_GPR(r16)(r3) | |
1470 | std r17, VCPU_GPR(r17)(r3) | |
1471 | std r18, VCPU_GPR(r18)(r3) | |
1472 | std r19, VCPU_GPR(r19)(r3) | |
1473 | std r20, VCPU_GPR(r20)(r3) | |
1474 | std r21, VCPU_GPR(r21)(r3) | |
1475 | std r22, VCPU_GPR(r22)(r3) | |
1476 | std r23, VCPU_GPR(r23)(r3) | |
1477 | std r24, VCPU_GPR(r24)(r3) | |
1478 | std r25, VCPU_GPR(r25)(r3) | |
1479 | std r26, VCPU_GPR(r26)(r3) | |
1480 | std r27, VCPU_GPR(r27)(r3) | |
1481 | std r28, VCPU_GPR(r28)(r3) | |
1482 | std r29, VCPU_GPR(r29)(r3) | |
1483 | std r30, VCPU_GPR(r30)(r3) | |
1484 | std r31, VCPU_GPR(r31)(r3) | |
1485 | ||
1486 | /* save FP state */ | |
1487 | bl .kvmppc_save_fp | |
1488 | ||
1489 | /* | |
1490 | * Take a nap until a decrementer or external interrupt occurs, | |
1491 | * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR | |
1492 | */ | |
f0888f70 PM |
1493 | li r0,1 |
1494 | stb r0,HSTATE_HWTHREAD_REQ(r13) | |
19ccb76a PM |
1495 | mfspr r5,SPRN_LPCR |
1496 | ori r5,r5,LPCR_PECE0 | LPCR_PECE1 | |
1497 | mtspr SPRN_LPCR,r5 | |
1498 | isync | |
1499 | li r0, 0 | |
1500 | std r0, HSTATE_SCRATCH0(r13) | |
1501 | ptesync | |
1502 | ld r0, HSTATE_SCRATCH0(r13) | |
1503 | 1: cmpd r0, r0 | |
1504 | bne 1b | |
1505 | nap | |
1506 | b . | |
1507 | ||
1508 | kvm_end_cede: | |
1509 | /* Woken by external or decrementer interrupt */ | |
1510 | ld r1, HSTATE_HOST_R1(r13) | |
19ccb76a | 1511 | |
19ccb76a PM |
1512 | /* load up FP state */ |
1513 | bl kvmppc_load_fp | |
1514 | ||
1515 | /* Load NV GPRS */ | |
1516 | ld r14, VCPU_GPR(r14)(r4) | |
1517 | ld r15, VCPU_GPR(r15)(r4) | |
1518 | ld r16, VCPU_GPR(r16)(r4) | |
1519 | ld r17, VCPU_GPR(r17)(r4) | |
1520 | ld r18, VCPU_GPR(r18)(r4) | |
1521 | ld r19, VCPU_GPR(r19)(r4) | |
1522 | ld r20, VCPU_GPR(r20)(r4) | |
1523 | ld r21, VCPU_GPR(r21)(r4) | |
1524 | ld r22, VCPU_GPR(r22)(r4) | |
1525 | ld r23, VCPU_GPR(r23)(r4) | |
1526 | ld r24, VCPU_GPR(r24)(r4) | |
1527 | ld r25, VCPU_GPR(r25)(r4) | |
1528 | ld r26, VCPU_GPR(r26)(r4) | |
1529 | ld r27, VCPU_GPR(r27)(r4) | |
1530 | ld r28, VCPU_GPR(r28)(r4) | |
1531 | ld r29, VCPU_GPR(r29)(r4) | |
1532 | ld r30, VCPU_GPR(r30)(r4) | |
1533 | ld r31, VCPU_GPR(r31)(r4) | |
1534 | ||
1535 | /* clear our bit in vcore->napping_threads */ | |
1536 | 33: ld r5,HSTATE_KVM_VCORE(r13) | |
1537 | lwz r3,VCPU_PTID(r4) | |
1538 | li r0,1 | |
1539 | sld r0,r0,r3 | |
1540 | addi r6,r5,VCORE_NAPPING_THREADS | |
1541 | 32: lwarx r7,0,r6 | |
1542 | andc r7,r7,r0 | |
1543 | stwcx. r7,0,r6 | |
1544 | bne 32b | |
1545 | li r0,0 | |
1546 | stb r0,HSTATE_NAPPING(r13) | |
1547 | ||
1548 | /* see if any other thread is already exiting */ | |
1549 | lwz r0,VCORE_ENTRY_EXIT(r5) | |
1550 | cmpwi r0,0x100 | |
1551 | blt kvmppc_cede_reentry /* if not go back to guest */ | |
1552 | ||
1553 | /* some threads are exiting, so go to the guest exit path */ | |
1554 | b hcall_real_fallback | |
1555 | ||
1556 | /* cede when already previously prodded case */ | |
1557 | 1: li r0,0 | |
1558 | stb r0,VCPU_PRODDED(r3) | |
1559 | sync /* order testing prodded vs. clearing ceded */ | |
1560 | stb r0,VCPU_CEDED(r3) | |
1561 | li r3,H_SUCCESS | |
1562 | blr | |
1563 | ||
1564 | /* we've ceded but we want to give control to the host */ | |
1565 | 2: li r3,H_TOO_HARD | |
1566 | blr | |
1567 | ||
371fefd6 PM |
1568 | secondary_too_late: |
1569 | ld r5,HSTATE_KVM_VCORE(r13) | |
1570 | HMT_LOW | |
1571 | 13: lbz r3,VCORE_IN_GUEST(r5) | |
1572 | cmpwi r3,0 | |
1573 | bne 13b | |
1574 | HMT_MEDIUM | |
1575 | ld r11,PACA_SLBSHADOWPTR(r13) | |
1576 | ||
1577 | .rept SLB_NUM_BOLTED | |
1578 | ld r5,SLBSHADOW_SAVEAREA(r11) | |
1579 | ld r6,SLBSHADOW_SAVEAREA+8(r11) | |
1580 | andis. r7,r5,SLB_ESID_V@h | |
1581 | beq 1f | |
1582 | slbmte r6,r5 | |
1583 | 1: addi r11,r11,16 | |
1584 | .endr | |
371fefd6 PM |
1585 | |
1586 | secondary_nap: | |
19ccb76a PM |
1587 | /* Clear any pending IPI - assume we're a secondary thread */ |
1588 | ld r5, HSTATE_XICS_PHYS(r13) | |
1589 | li r7, XICS_XIRR | |
1590 | lwzcix r3, r5, r7 /* ack any pending interrupt */ | |
1591 | rlwinm. r0, r3, 0, 0xffffff /* any pending? */ | |
1592 | beq 37f | |
1593 | sync | |
371fefd6 PM |
1594 | li r0, 0xff |
1595 | li r6, XICS_QIRR | |
19ccb76a PM |
1596 | stbcix r0, r5, r6 /* clear the IPI */ |
1597 | stwcix r3, r5, r7 /* EOI it */ | |
1598 | 37: sync | |
371fefd6 PM |
1599 | |
1600 | /* increment the nap count and then go to nap mode */ | |
1601 | ld r4, HSTATE_KVM_VCORE(r13) | |
1602 | addi r4, r4, VCORE_NAP_COUNT | |
1603 | lwsync /* make previous updates visible */ | |
1604 | 51: lwarx r3, 0, r4 | |
1605 | addi r3, r3, 1 | |
1606 | stwcx. r3, 0, r4 | |
1607 | bne 51b | |
371fefd6 | 1608 | |
f0888f70 PM |
1609 | kvm_no_guest: |
1610 | li r0, KVM_HWTHREAD_IN_NAP | |
1611 | stb r0, HSTATE_HWTHREAD_STATE(r13) | |
1612 | li r0, 0 | |
1613 | std r0, HSTATE_KVM_VCPU(r13) | |
1614 | ||
19ccb76a | 1615 | li r3, LPCR_PECE0 |
371fefd6 | 1616 | mfspr r4, SPRN_LPCR |
19ccb76a | 1617 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 |
371fefd6 | 1618 | mtspr SPRN_LPCR, r4 |
19ccb76a | 1619 | isync |
371fefd6 PM |
1620 | std r0, HSTATE_SCRATCH0(r13) |
1621 | ptesync | |
1622 | ld r0, HSTATE_SCRATCH0(r13) | |
1623 | 1: cmpd r0, r0 | |
1624 | bne 1b | |
1625 | nap | |
1626 | b . | |
1627 | ||
de56a948 PM |
1628 | /* |
1629 | * Save away FP, VMX and VSX registers. | |
1630 | * r3 = vcpu pointer | |
a8606e20 | 1631 | */ |
de56a948 | 1632 | _GLOBAL(kvmppc_save_fp) |
8943633c PM |
1633 | mfmsr r5 |
1634 | ori r8,r5,MSR_FP | |
de56a948 PM |
1635 | #ifdef CONFIG_ALTIVEC |
1636 | BEGIN_FTR_SECTION | |
1637 | oris r8,r8,MSR_VEC@h | |
1638 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
1639 | #endif | |
1640 | #ifdef CONFIG_VSX | |
1641 | BEGIN_FTR_SECTION | |
1642 | oris r8,r8,MSR_VSX@h | |
1643 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
1644 | #endif | |
1645 | mtmsrd r8 | |
1646 | isync | |
1647 | #ifdef CONFIG_VSX | |
1648 | BEGIN_FTR_SECTION | |
1649 | reg = 0 | |
1650 | .rept 32 | |
1651 | li r6,reg*16+VCPU_VSRS | |
2c740c58 | 1652 | STXVD2X(reg,r6,r3) |
de56a948 PM |
1653 | reg = reg + 1 |
1654 | .endr | |
1655 | FTR_SECTION_ELSE | |
1656 | #endif | |
1657 | reg = 0 | |
1658 | .rept 32 | |
1659 | stfd reg,reg*8+VCPU_FPRS(r3) | |
1660 | reg = reg + 1 | |
1661 | .endr | |
1662 | #ifdef CONFIG_VSX | |
1663 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) | |
1664 | #endif | |
1665 | mffs fr0 | |
1666 | stfd fr0,VCPU_FPSCR(r3) | |
1667 | ||
1668 | #ifdef CONFIG_ALTIVEC | |
1669 | BEGIN_FTR_SECTION | |
1670 | reg = 0 | |
1671 | .rept 32 | |
1672 | li r6,reg*16+VCPU_VRS | |
1673 | stvx reg,r6,r3 | |
1674 | reg = reg + 1 | |
1675 | .endr | |
1676 | mfvscr vr0 | |
1677 | li r6,VCPU_VSCR | |
1678 | stvx vr0,r6,r3 | |
1679 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
1680 | #endif | |
1681 | mfspr r6,SPRN_VRSAVE | |
1682 | stw r6,VCPU_VRSAVE(r3) | |
8943633c | 1683 | mtmsrd r5 |
de56a948 PM |
1684 | isync |
1685 | blr | |
1686 | ||
1687 | /* | |
1688 | * Load up FP, VMX and VSX registers | |
1689 | * r4 = vcpu pointer | |
1690 | */ | |
1691 | .globl kvmppc_load_fp | |
1692 | kvmppc_load_fp: | |
1693 | mfmsr r9 | |
1694 | ori r8,r9,MSR_FP | |
1695 | #ifdef CONFIG_ALTIVEC | |
1696 | BEGIN_FTR_SECTION | |
1697 | oris r8,r8,MSR_VEC@h | |
1698 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
1699 | #endif | |
1700 | #ifdef CONFIG_VSX | |
1701 | BEGIN_FTR_SECTION | |
1702 | oris r8,r8,MSR_VSX@h | |
1703 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
1704 | #endif | |
1705 | mtmsrd r8 | |
1706 | isync | |
1707 | lfd fr0,VCPU_FPSCR(r4) | |
1708 | MTFSF_L(fr0) | |
1709 | #ifdef CONFIG_VSX | |
1710 | BEGIN_FTR_SECTION | |
1711 | reg = 0 | |
1712 | .rept 32 | |
1713 | li r7,reg*16+VCPU_VSRS | |
2c740c58 | 1714 | LXVD2X(reg,r7,r4) |
de56a948 PM |
1715 | reg = reg + 1 |
1716 | .endr | |
1717 | FTR_SECTION_ELSE | |
1718 | #endif | |
1719 | reg = 0 | |
1720 | .rept 32 | |
1721 | lfd reg,reg*8+VCPU_FPRS(r4) | |
1722 | reg = reg + 1 | |
1723 | .endr | |
1724 | #ifdef CONFIG_VSX | |
1725 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) | |
1726 | #endif | |
1727 | ||
1728 | #ifdef CONFIG_ALTIVEC | |
1729 | BEGIN_FTR_SECTION | |
1730 | li r7,VCPU_VSCR | |
1731 | lvx vr0,r7,r4 | |
1732 | mtvscr vr0 | |
1733 | reg = 0 | |
1734 | .rept 32 | |
1735 | li r7,reg*16+VCPU_VRS | |
1736 | lvx reg,r7,r4 | |
1737 | reg = reg + 1 | |
1738 | .endr | |
1739 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
1740 | #endif | |
1741 | lwz r7,VCPU_VRSAVE(r4) | |
1742 | mtspr SPRN_VRSAVE,r7 | |
1743 | blr |