KVM: Add barriers to allow mmu_notifier_retry to be used locklessly
[deliverable/linux.git] / arch / powerpc / kvm / book3s_hv_rm_mmu.c
CommitLineData
a8606e20
PM
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/hugetlb.h>
c77162de 14#include <linux/module.h>
a8606e20
PM
15
16#include <asm/tlbflush.h>
17#include <asm/kvm_ppc.h>
18#include <asm/kvm_book3s.h>
19#include <asm/mmu-hash64.h>
20#include <asm/hvcall.h>
21#include <asm/synch.h>
22#include <asm/ppc-opcode.h>
23
b2b2f165
PM
24/*
25 * Since this file is built in even if KVM is a module, we need
26 * a local copy of this function for the case where kvm_main.c is
27 * modular.
28 */
29static struct kvm_memory_slot *builtin_gfn_to_memslot(struct kvm *kvm,
30 gfn_t gfn)
31{
32 struct kvm_memslots *slots;
33 struct kvm_memory_slot *memslot;
34
35 slots = kvm_memslots(kvm);
36 kvm_for_each_memslot(memslot, slots)
37 if (gfn >= memslot->base_gfn &&
38 gfn < memslot->base_gfn + memslot->npages)
39 return memslot;
40 return NULL;
41}
42
8936dda4
PM
43/* Translate address of a vmalloc'd thing to a linear map address */
44static void *real_vmalloc_addr(void *x)
45{
46 unsigned long addr = (unsigned long) x;
47 pte_t *p;
48
49 p = find_linux_pte(swapper_pg_dir, addr);
50 if (!p || !pte_present(*p))
51 return NULL;
52 /* assume we don't have huge pages in vmalloc space... */
53 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
54 return __va(addr);
55}
a8606e20 56
06ce2c63
PM
57/*
58 * Add this HPTE into the chain for the real page.
59 * Must be called with the chain locked; it unlocks the chain.
60 */
342d3db7 61void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
06ce2c63
PM
62 unsigned long *rmap, long pte_index, int realmode)
63{
64 struct revmap_entry *head, *tail;
65 unsigned long i;
66
67 if (*rmap & KVMPPC_RMAP_PRESENT) {
68 i = *rmap & KVMPPC_RMAP_INDEX;
69 head = &kvm->arch.revmap[i];
70 if (realmode)
71 head = real_vmalloc_addr(head);
72 tail = &kvm->arch.revmap[head->back];
73 if (realmode)
74 tail = real_vmalloc_addr(tail);
75 rev->forw = i;
76 rev->back = head->back;
77 tail->forw = pte_index;
78 head->back = pte_index;
79 } else {
80 rev->forw = rev->back = pte_index;
81 i = pte_index;
82 }
83 smp_wmb();
84 *rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */
85}
342d3db7 86EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
06ce2c63
PM
87
88/* Remove this HPTE from the chain for a real page */
89static void remove_revmap_chain(struct kvm *kvm, long pte_index,
90 unsigned long hpte_v)
91{
92 struct revmap_entry *rev, *next, *prev;
93 unsigned long gfn, ptel, head;
94 struct kvm_memory_slot *memslot;
95 unsigned long *rmap;
96
97 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
98 ptel = rev->guest_rpte;
99 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
100 memslot = builtin_gfn_to_memslot(kvm, gfn);
101 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
102 return;
103
104 rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]);
105 lock_rmap(rmap);
106
107 head = *rmap & KVMPPC_RMAP_INDEX;
108 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
109 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
110 next->back = rev->back;
111 prev->forw = rev->forw;
112 if (head == pte_index) {
113 head = rev->forw;
114 if (head == pte_index)
115 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
116 else
117 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
118 }
119 unlock_rmap(rmap);
120}
121
342d3db7
PM
122static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva,
123 unsigned long *pte_sizep)
124{
125 pte_t *ptep;
126 unsigned long ps = *pte_sizep;
127 unsigned int shift;
128
129 ptep = find_linux_pte_or_hugepte(vcpu->arch.pgdir, hva, &shift);
130 if (!ptep)
131 return __pte(0);
132 if (shift)
133 *pte_sizep = 1ul << shift;
134 else
135 *pte_sizep = PAGE_SIZE;
136 if (ps > *pte_sizep)
137 return __pte(0);
138 if (!pte_present(*ptep))
139 return __pte(0);
140 return kvmppc_read_update_linux_pte(ptep);
141}
142
a8606e20
PM
143long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
144 long pte_index, unsigned long pteh, unsigned long ptel)
145{
a8606e20 146 struct kvm *kvm = vcpu->kvm;
c77162de 147 unsigned long i, pa, gpa, gfn, psize;
342d3db7 148 unsigned long slot_fn, hva;
a8606e20 149 unsigned long *hpte;
8936dda4
PM
150 struct revmap_entry *rev;
151 unsigned long g_ptel = ptel;
b2b2f165 152 struct kvm_memory_slot *memslot;
c77162de 153 unsigned long *physp, pte_size;
9d0ef5ea 154 unsigned long is_io;
06ce2c63 155 unsigned long *rmap;
342d3db7
PM
156 pte_t pte;
157 unsigned long mmu_seq;
c77162de
PM
158 bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
159
160 psize = hpte_page_size(pteh, ptel);
161 if (!psize)
a8606e20 162 return H_PARAMETER;
697d3899 163 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
b2b2f165 164
342d3db7
PM
165 /* used later to detect if we might have been invalidated */
166 mmu_seq = kvm->mmu_notifier_seq;
167 smp_rmb();
168
c77162de
PM
169 /* Find the memslot (if any) for this address */
170 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
171 gfn = gpa >> PAGE_SHIFT;
b2b2f165 172 memslot = builtin_gfn_to_memslot(kvm, gfn);
697d3899 173 pa = 0;
342d3db7 174 is_io = ~0ul;
697d3899
PM
175 rmap = NULL;
176 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
177 /* PPC970 can't do emulated MMIO */
178 if (!cpu_has_feature(CPU_FTR_ARCH_206))
179 return H_PARAMETER;
180 /* Emulated MMIO - mark this with key=31 */
181 pteh |= HPTE_V_ABSENT;
182 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
183 goto do_insert;
184 }
da9d1d7f
PM
185
186 /* Check if the requested page fits entirely in the memslot. */
187 if (!slot_is_aligned(memslot, psize))
188 return H_PARAMETER;
c77162de 189 slot_fn = gfn - memslot->base_gfn;
06ce2c63 190 rmap = &memslot->rmap[slot_fn];
c77162de 191
342d3db7
PM
192 if (!kvm->arch.using_mmu_notifiers) {
193 physp = kvm->arch.slot_phys[memslot->id];
194 if (!physp)
195 return H_PARAMETER;
196 physp += slot_fn;
197 if (realmode)
198 physp = real_vmalloc_addr(physp);
199 pa = *physp;
200 if (!pa)
201 return H_TOO_HARD;
202 is_io = pa & (HPTE_R_I | HPTE_R_W);
203 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
204 pa &= PAGE_MASK;
205 } else {
206 /* Translate to host virtual address */
207 hva = gfn_to_hva_memslot(memslot, gfn);
208
209 /* Look up the Linux PTE for the backing page */
210 pte_size = psize;
211 pte = lookup_linux_pte(vcpu, hva, &pte_size);
212 if (pte_present(pte)) {
213 is_io = hpte_cache_bits(pte_val(pte));
214 pa = pte_pfn(pte) << PAGE_SHIFT;
215 }
216 }
c77162de
PM
217 if (pte_size < psize)
218 return H_PARAMETER;
219 if (pa && pte_size > psize)
220 pa |= gpa & (pte_size - 1);
221
222 ptel &= ~(HPTE_R_PP0 - psize);
223 ptel |= pa;
342d3db7
PM
224
225 if (pa)
226 pteh |= HPTE_V_VALID;
227 else
228 pteh |= HPTE_V_ABSENT;
c77162de 229
a8606e20 230 /* Check WIMG */
342d3db7 231 if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
9d0ef5ea
PM
232 if (is_io)
233 return H_PARAMETER;
234 /*
235 * Allow guest to map emulated device memory as
236 * uncacheable, but actually make it cacheable.
237 */
238 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
239 ptel |= HPTE_R_M;
240 }
075295dd 241
342d3db7 242 /* Find and lock the HPTEG slot to use */
697d3899 243 do_insert:
8936dda4 244 if (pte_index >= HPT_NPTE)
a8606e20
PM
245 return H_PARAMETER;
246 if (likely((flags & H_EXACT) == 0)) {
247 pte_index &= ~7UL;
248 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
075295dd 249 for (i = 0; i < 8; ++i) {
a8606e20 250 if ((*hpte & HPTE_V_VALID) == 0 &&
697d3899
PM
251 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
252 HPTE_V_ABSENT))
a8606e20
PM
253 break;
254 hpte += 2;
255 }
075295dd
PM
256 if (i == 8) {
257 /*
258 * Since try_lock_hpte doesn't retry (not even stdcx.
259 * failures), it could be that there is a free slot
260 * but we transiently failed to lock it. Try again,
261 * actually locking each slot and checking it.
262 */
263 hpte -= 16;
264 for (i = 0; i < 8; ++i) {
265 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
266 cpu_relax();
697d3899 267 if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
075295dd
PM
268 break;
269 *hpte &= ~HPTE_V_HVLOCK;
270 hpte += 2;
271 }
272 if (i == 8)
273 return H_PTEG_FULL;
274 }
8936dda4 275 pte_index += i;
a8606e20 276 } else {
a8606e20 277 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
697d3899
PM
278 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
279 HPTE_V_ABSENT)) {
075295dd
PM
280 /* Lock the slot and check again */
281 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
282 cpu_relax();
697d3899 283 if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
075295dd
PM
284 *hpte &= ~HPTE_V_HVLOCK;
285 return H_PTEG_FULL;
286 }
287 }
a8606e20 288 }
8936dda4
PM
289
290 /* Save away the guest's idea of the second HPTE dword */
06ce2c63
PM
291 rev = &kvm->arch.revmap[pte_index];
292 if (realmode)
293 rev = real_vmalloc_addr(rev);
8936dda4
PM
294 if (rev)
295 rev->guest_rpte = g_ptel;
06ce2c63
PM
296
297 /* Link HPTE into reverse-map chain */
697d3899
PM
298 if (pteh & HPTE_V_VALID) {
299 if (realmode)
300 rmap = real_vmalloc_addr(rmap);
301 lock_rmap(rmap);
342d3db7
PM
302 /* Check for pending invalidations under the rmap chain lock */
303 if (kvm->arch.using_mmu_notifiers &&
304 mmu_notifier_retry(vcpu, mmu_seq)) {
305 /* inval in progress, write a non-present HPTE */
306 pteh |= HPTE_V_ABSENT;
307 pteh &= ~HPTE_V_VALID;
308 unlock_rmap(rmap);
309 } else {
310 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
311 realmode);
312 }
697d3899 313 }
06ce2c63 314
a8606e20 315 hpte[1] = ptel;
06ce2c63
PM
316
317 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
a8606e20
PM
318 eieio();
319 hpte[0] = pteh;
320 asm volatile("ptesync" : : : "memory");
06ce2c63 321
8936dda4 322 vcpu->arch.gpr[4] = pte_index;
a8606e20
PM
323 return H_SUCCESS;
324}
c77162de 325EXPORT_SYMBOL_GPL(kvmppc_h_enter);
a8606e20 326
a8606e20
PM
327#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
328
329static inline int try_lock_tlbie(unsigned int *lock)
330{
331 unsigned int tmp, old;
332 unsigned int token = LOCK_TOKEN;
333
334 asm volatile("1:lwarx %1,0,%2\n"
335 " cmpwi cr0,%1,0\n"
336 " bne 2f\n"
337 " stwcx. %3,0,%2\n"
338 " bne- 1b\n"
339 " isync\n"
340 "2:"
341 : "=&r" (tmp), "=&r" (old)
342 : "r" (lock), "r" (token)
343 : "cc", "memory");
344 return old == 0;
345}
346
347long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
348 unsigned long pte_index, unsigned long avpn,
349 unsigned long va)
350{
351 struct kvm *kvm = vcpu->kvm;
352 unsigned long *hpte;
353 unsigned long v, r, rb;
354
8936dda4 355 if (pte_index >= HPT_NPTE)
a8606e20
PM
356 return H_PARAMETER;
357 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
075295dd 358 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
a8606e20 359 cpu_relax();
697d3899 360 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
a8606e20
PM
361 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
362 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
363 hpte[0] &= ~HPTE_V_HVLOCK;
364 return H_NOT_FOUND;
365 }
366 if (atomic_read(&kvm->online_vcpus) == 1)
367 flags |= H_LOCAL;
368 vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
369 vcpu->arch.gpr[5] = r = hpte[1];
370 rb = compute_tlbie_rb(v, r, pte_index);
697d3899
PM
371 if (v & HPTE_V_VALID)
372 remove_revmap_chain(kvm, pte_index, v);
06ce2c63 373 smp_wmb();
a8606e20 374 hpte[0] = 0;
697d3899
PM
375 if (!(v & HPTE_V_VALID))
376 return H_SUCCESS;
a8606e20 377 if (!(flags & H_LOCAL)) {
697d3899 378 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
a8606e20
PM
379 cpu_relax();
380 asm volatile("ptesync" : : : "memory");
381 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
382 : : "r" (rb), "r" (kvm->arch.lpid));
383 asm volatile("ptesync" : : : "memory");
384 kvm->arch.tlbie_lock = 0;
385 } else {
386 asm volatile("ptesync" : : : "memory");
387 asm volatile("tlbiel %0" : : "r" (rb));
388 asm volatile("ptesync" : : : "memory");
389 }
390 return H_SUCCESS;
391}
392
393long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
394{
395 struct kvm *kvm = vcpu->kvm;
396 unsigned long *args = &vcpu->arch.gpr[4];
397 unsigned long *hp, tlbrb[4];
398 long int i, found;
399 long int n_inval = 0;
400 unsigned long flags, req, pte_index;
401 long int local = 0;
402 long int ret = H_SUCCESS;
403
404 if (atomic_read(&kvm->online_vcpus) == 1)
405 local = 1;
406 for (i = 0; i < 4; ++i) {
407 pte_index = args[i * 2];
408 flags = pte_index >> 56;
409 pte_index &= ((1ul << 56) - 1);
410 req = flags >> 6;
411 flags &= 3;
412 if (req == 3)
413 break;
414 if (req != 1 || flags == 3 ||
8936dda4 415 pte_index >= HPT_NPTE) {
a8606e20
PM
416 /* parameter error */
417 args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
418 ret = H_PARAMETER;
419 break;
420 }
421 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
075295dd 422 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
a8606e20
PM
423 cpu_relax();
424 found = 0;
697d3899 425 if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
a8606e20
PM
426 switch (flags & 3) {
427 case 0: /* absolute */
428 found = 1;
429 break;
430 case 1: /* andcond */
431 if (!(hp[0] & args[i * 2 + 1]))
432 found = 1;
433 break;
434 case 2: /* AVPN */
435 if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
436 found = 1;
437 break;
438 }
439 }
440 if (!found) {
441 hp[0] &= ~HPTE_V_HVLOCK;
442 args[i * 2] = ((0x90 | flags) << 56) + pte_index;
443 continue;
444 }
445 /* insert R and C bits from PTE */
446 flags |= (hp[1] >> 5) & 0x0c;
447 args[i * 2] = ((0x80 | flags) << 56) + pte_index;
697d3899
PM
448 if (hp[0] & HPTE_V_VALID) {
449 tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
450 remove_revmap_chain(kvm, pte_index, hp[0]);
451 }
06ce2c63 452 smp_wmb();
a8606e20
PM
453 hp[0] = 0;
454 }
455 if (n_inval == 0)
456 return ret;
457
458 if (!local) {
459 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
460 cpu_relax();
461 asm volatile("ptesync" : : : "memory");
462 for (i = 0; i < n_inval; ++i)
463 asm volatile(PPC_TLBIE(%1,%0)
464 : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
465 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
466 kvm->arch.tlbie_lock = 0;
467 } else {
468 asm volatile("ptesync" : : : "memory");
469 for (i = 0; i < n_inval; ++i)
470 asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
471 asm volatile("ptesync" : : : "memory");
472 }
473 return ret;
474}
475
476long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
477 unsigned long pte_index, unsigned long avpn,
478 unsigned long va)
479{
480 struct kvm *kvm = vcpu->kvm;
481 unsigned long *hpte;
8936dda4
PM
482 struct revmap_entry *rev;
483 unsigned long v, r, rb, mask, bits;
a8606e20 484
8936dda4 485 if (pte_index >= HPT_NPTE)
a8606e20 486 return H_PARAMETER;
697d3899 487
a8606e20 488 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
075295dd 489 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
a8606e20 490 cpu_relax();
697d3899 491 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
a8606e20
PM
492 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
493 hpte[0] &= ~HPTE_V_HVLOCK;
494 return H_NOT_FOUND;
495 }
697d3899 496
a8606e20
PM
497 if (atomic_read(&kvm->online_vcpus) == 1)
498 flags |= H_LOCAL;
499 v = hpte[0];
8936dda4
PM
500 bits = (flags << 55) & HPTE_R_PP0;
501 bits |= (flags << 48) & HPTE_R_KEY_HI;
502 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
503
504 /* Update guest view of 2nd HPTE dword */
505 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
506 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
507 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
508 if (rev) {
509 r = (rev->guest_rpte & ~mask) | bits;
510 rev->guest_rpte = r;
511 }
512 r = (hpte[1] & ~mask) | bits;
513
514 /* Update HPTE */
697d3899
PM
515 if (v & HPTE_V_VALID) {
516 rb = compute_tlbie_rb(v, r, pte_index);
517 hpte[0] = v & ~HPTE_V_VALID;
518 if (!(flags & H_LOCAL)) {
519 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
520 cpu_relax();
521 asm volatile("ptesync" : : : "memory");
522 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
523 : : "r" (rb), "r" (kvm->arch.lpid));
524 asm volatile("ptesync" : : : "memory");
525 kvm->arch.tlbie_lock = 0;
526 } else {
527 asm volatile("ptesync" : : : "memory");
528 asm volatile("tlbiel %0" : : "r" (rb));
529 asm volatile("ptesync" : : : "memory");
530 }
a8606e20
PM
531 }
532 hpte[1] = r;
533 eieio();
534 hpte[0] = v & ~HPTE_V_HVLOCK;
535 asm volatile("ptesync" : : : "memory");
536 return H_SUCCESS;
537}
538
a8606e20
PM
539long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
540 unsigned long pte_index)
541{
542 struct kvm *kvm = vcpu->kvm;
697d3899 543 unsigned long *hpte, v, r;
a8606e20 544 int i, n = 1;
8936dda4 545 struct revmap_entry *rev = NULL;
a8606e20 546
8936dda4 547 if (pte_index >= HPT_NPTE)
a8606e20
PM
548 return H_PARAMETER;
549 if (flags & H_READ_4) {
550 pte_index &= ~3;
551 n = 4;
552 }
8936dda4
PM
553 if (flags & H_R_XLATE)
554 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
a8606e20
PM
555 for (i = 0; i < n; ++i, ++pte_index) {
556 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
697d3899 557 v = hpte[0] & ~HPTE_V_HVLOCK;
a8606e20 558 r = hpte[1];
697d3899
PM
559 if (v & HPTE_V_ABSENT) {
560 v &= ~HPTE_V_ABSENT;
561 v |= HPTE_V_VALID;
562 }
563 if (v & HPTE_V_VALID) {
8936dda4
PM
564 if (rev)
565 r = rev[i].guest_rpte;
566 else
567 r = hpte[1] | HPTE_R_RPN;
568 }
697d3899 569 vcpu->arch.gpr[4 + i * 2] = v;
a8606e20
PM
570 vcpu->arch.gpr[5 + i * 2] = r;
571 }
572 return H_SUCCESS;
573}
697d3899 574
342d3db7
PM
575void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
576 unsigned long pte_index)
577{
578 unsigned long rb;
579
580 hptep[0] &= ~HPTE_V_VALID;
581 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
582 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
583 cpu_relax();
584 asm volatile("ptesync" : : : "memory");
585 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
586 : : "r" (rb), "r" (kvm->arch.lpid));
587 asm volatile("ptesync" : : : "memory");
588 kvm->arch.tlbie_lock = 0;
589}
590EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
591
697d3899
PM
592static int slb_base_page_shift[4] = {
593 24, /* 16M */
594 16, /* 64k */
595 34, /* 16G */
596 20, /* 1M, unsupported */
597};
598
599long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
600 unsigned long valid)
601{
602 unsigned int i;
603 unsigned int pshift;
604 unsigned long somask;
605 unsigned long vsid, hash;
606 unsigned long avpn;
607 unsigned long *hpte;
608 unsigned long mask, val;
609 unsigned long v, r;
610
611 /* Get page shift, work out hash and AVPN etc. */
612 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
613 val = 0;
614 pshift = 12;
615 if (slb_v & SLB_VSID_L) {
616 mask |= HPTE_V_LARGE;
617 val |= HPTE_V_LARGE;
618 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
619 }
620 if (slb_v & SLB_VSID_B_1T) {
621 somask = (1UL << 40) - 1;
622 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
623 vsid ^= vsid << 25;
624 } else {
625 somask = (1UL << 28) - 1;
626 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
627 }
628 hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK;
629 avpn = slb_v & ~(somask >> 16); /* also includes B */
630 avpn |= (eaddr & somask) >> 16;
631
632 if (pshift >= 24)
633 avpn &= ~((1UL << (pshift - 16)) - 1);
634 else
635 avpn &= ~0x7fUL;
636 val |= avpn;
637
638 for (;;) {
639 hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
640
641 for (i = 0; i < 16; i += 2) {
642 /* Read the PTE racily */
643 v = hpte[i] & ~HPTE_V_HVLOCK;
644
645 /* Check valid/absent, hash, segment size and AVPN */
646 if (!(v & valid) || (v & mask) != val)
647 continue;
648
649 /* Lock the PTE and read it under the lock */
650 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
651 cpu_relax();
652 v = hpte[i] & ~HPTE_V_HVLOCK;
653 r = hpte[i+1];
654
655 /*
656 * Check the HPTE again, including large page size
657 * Since we don't currently allow any MPSS (mixed
658 * page-size segment) page sizes, it is sufficient
659 * to check against the actual page size.
660 */
661 if ((v & valid) && (v & mask) == val &&
662 hpte_page_size(v, r) == (1ul << pshift))
663 /* Return with the HPTE still locked */
664 return (hash << 3) + (i >> 1);
665
666 /* Unlock and move on */
667 hpte[i] = v;
668 }
669
670 if (val & HPTE_V_SECONDARY)
671 break;
672 val |= HPTE_V_SECONDARY;
673 hash = hash ^ HPT_HASH_MASK;
674 }
675 return -1;
676}
677EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
678
679/*
680 * Called in real mode to check whether an HPTE not found fault
342d3db7 681 * is due to accessing a paged-out page or an emulated MMIO page.
697d3899
PM
682 * Returns a possibly modified status (DSISR) value if not
683 * (i.e. pass the interrupt to the guest),
684 * -1 to pass the fault up to host kernel mode code, -2 to do that
342d3db7 685 * and also load the instruction word (for MMIO emulation),
697d3899
PM
686 * or 0 if we should make the guest retry the access.
687 */
688long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
342d3db7 689 unsigned long slb_v, unsigned int status, bool data)
697d3899
PM
690{
691 struct kvm *kvm = vcpu->kvm;
692 long int index;
693 unsigned long v, r, gr;
694 unsigned long *hpte;
695 unsigned long valid;
696 struct revmap_entry *rev;
697 unsigned long pp, key;
698
699 valid = HPTE_V_VALID | HPTE_V_ABSENT;
342d3db7 700
697d3899
PM
701 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
702 if (index < 0)
703 return status; /* there really was no HPTE */
704
705 hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
706 v = hpte[0] & ~HPTE_V_HVLOCK;
707 r = hpte[1];
708 rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
709 gr = rev->guest_rpte;
710
711 /* Unlock the HPTE */
712 asm volatile("lwsync" : : : "memory");
713 hpte[0] = v;
714
715 /* If the HPTE is valid by now, retry the instruction */
716 if (v & HPTE_V_VALID)
717 return 0;
718
719 /* Check access permissions to the page */
720 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
721 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
342d3db7
PM
722 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
723 if (!data) {
724 if (gr & (HPTE_R_N | HPTE_R_G))
725 return status | SRR1_ISI_N_OR_G;
726 if (!hpte_read_permission(pp, slb_v & key))
727 return status | SRR1_ISI_PROT;
728 } else if (status & DSISR_ISSTORE) {
697d3899
PM
729 /* check write permission */
730 if (!hpte_write_permission(pp, slb_v & key))
342d3db7 731 return status | DSISR_PROTFAULT;
697d3899
PM
732 } else {
733 if (!hpte_read_permission(pp, slb_v & key))
342d3db7 734 return status | DSISR_PROTFAULT;
697d3899
PM
735 }
736
737 /* Check storage key, if applicable */
342d3db7 738 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
697d3899
PM
739 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
740 if (status & DSISR_ISSTORE)
741 perm >>= 1;
742 if (perm & 1)
342d3db7 743 return status | DSISR_KEYFAULT;
697d3899
PM
744 }
745
746 /* Save HPTE info for virtual-mode handler */
747 vcpu->arch.pgfault_addr = addr;
748 vcpu->arch.pgfault_index = index;
749 vcpu->arch.pgfault_hpte[0] = v;
750 vcpu->arch.pgfault_hpte[1] = r;
751
342d3db7
PM
752 /* Check the storage key to see if it is possibly emulated MMIO */
753 if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
754 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
755 (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
697d3899
PM
756 return -2; /* MMIO emulation - load instr word */
757
758 return -1; /* send fault up to host kernel mode */
697d3899 759}
This page took 0.095082 seconds and 5 git commands to generate.