powerpc/kvm: Fix VSID usage in 64-bit "PR" KVM
[deliverable/linux.git] / arch / powerpc / kvm / book3s_hv_rm_mmu.c
CommitLineData
a8606e20
PM
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/hugetlb.h>
c77162de 14#include <linux/module.h>
a8606e20
PM
15
16#include <asm/tlbflush.h>
17#include <asm/kvm_ppc.h>
18#include <asm/kvm_book3s.h>
19#include <asm/mmu-hash64.h>
20#include <asm/hvcall.h>
21#include <asm/synch.h>
22#include <asm/ppc-opcode.h>
23
8936dda4
PM
24/* Translate address of a vmalloc'd thing to a linear map address */
25static void *real_vmalloc_addr(void *x)
26{
27 unsigned long addr = (unsigned long) x;
28 pte_t *p;
29
30 p = find_linux_pte(swapper_pg_dir, addr);
31 if (!p || !pte_present(*p))
32 return NULL;
33 /* assume we don't have huge pages in vmalloc space... */
34 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
35 return __va(addr);
36}
a8606e20 37
06ce2c63
PM
38/*
39 * Add this HPTE into the chain for the real page.
40 * Must be called with the chain locked; it unlocks the chain.
41 */
342d3db7 42void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
06ce2c63
PM
43 unsigned long *rmap, long pte_index, int realmode)
44{
45 struct revmap_entry *head, *tail;
46 unsigned long i;
47
48 if (*rmap & KVMPPC_RMAP_PRESENT) {
49 i = *rmap & KVMPPC_RMAP_INDEX;
50 head = &kvm->arch.revmap[i];
51 if (realmode)
52 head = real_vmalloc_addr(head);
53 tail = &kvm->arch.revmap[head->back];
54 if (realmode)
55 tail = real_vmalloc_addr(tail);
56 rev->forw = i;
57 rev->back = head->back;
58 tail->forw = pte_index;
59 head->back = pte_index;
60 } else {
61 rev->forw = rev->back = pte_index;
62 i = pte_index;
63 }
64 smp_wmb();
65 *rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */
66}
342d3db7 67EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
06ce2c63
PM
68
69/* Remove this HPTE from the chain for a real page */
70static void remove_revmap_chain(struct kvm *kvm, long pte_index,
bad3b507
PM
71 struct revmap_entry *rev,
72 unsigned long hpte_v, unsigned long hpte_r)
06ce2c63 73{
bad3b507 74 struct revmap_entry *next, *prev;
06ce2c63
PM
75 unsigned long gfn, ptel, head;
76 struct kvm_memory_slot *memslot;
77 unsigned long *rmap;
bad3b507 78 unsigned long rcbits;
06ce2c63 79
bad3b507
PM
80 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
81 ptel = rev->guest_rpte |= rcbits;
06ce2c63 82 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
9d4cba7f 83 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
06ce2c63
PM
84 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
85 return;
86
87 rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]);
88 lock_rmap(rmap);
89
90 head = *rmap & KVMPPC_RMAP_INDEX;
91 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
92 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
93 next->back = rev->back;
94 prev->forw = rev->forw;
95 if (head == pte_index) {
96 head = rev->forw;
97 if (head == pte_index)
98 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
99 else
100 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
101 }
bad3b507 102 *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
06ce2c63
PM
103 unlock_rmap(rmap);
104}
105
342d3db7 106static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva,
4cf302bc 107 int writing, unsigned long *pte_sizep)
342d3db7
PM
108{
109 pte_t *ptep;
110 unsigned long ps = *pte_sizep;
111 unsigned int shift;
112
113 ptep = find_linux_pte_or_hugepte(vcpu->arch.pgdir, hva, &shift);
114 if (!ptep)
115 return __pte(0);
116 if (shift)
117 *pte_sizep = 1ul << shift;
118 else
119 *pte_sizep = PAGE_SIZE;
120 if (ps > *pte_sizep)
121 return __pte(0);
122 if (!pte_present(*ptep))
123 return __pte(0);
4cf302bc 124 return kvmppc_read_update_linux_pte(ptep, writing);
342d3db7
PM
125}
126
a92bce95
PM
127static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
128{
129 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
130 hpte[0] = hpte_v;
131}
132
a8606e20
PM
133long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
134 long pte_index, unsigned long pteh, unsigned long ptel)
135{
a8606e20 136 struct kvm *kvm = vcpu->kvm;
c77162de 137 unsigned long i, pa, gpa, gfn, psize;
342d3db7 138 unsigned long slot_fn, hva;
a8606e20 139 unsigned long *hpte;
8936dda4
PM
140 struct revmap_entry *rev;
141 unsigned long g_ptel = ptel;
b2b2f165 142 struct kvm_memory_slot *memslot;
c77162de 143 unsigned long *physp, pte_size;
9d0ef5ea 144 unsigned long is_io;
06ce2c63 145 unsigned long *rmap;
342d3db7 146 pte_t pte;
4cf302bc 147 unsigned int writing;
342d3db7 148 unsigned long mmu_seq;
bad3b507 149 unsigned long rcbits;
c77162de
PM
150 bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
151
152 psize = hpte_page_size(pteh, ptel);
153 if (!psize)
a8606e20 154 return H_PARAMETER;
4cf302bc 155 writing = hpte_is_writable(ptel);
697d3899 156 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
b2b2f165 157
342d3db7
PM
158 /* used later to detect if we might have been invalidated */
159 mmu_seq = kvm->mmu_notifier_seq;
160 smp_rmb();
161
c77162de
PM
162 /* Find the memslot (if any) for this address */
163 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
164 gfn = gpa >> PAGE_SHIFT;
9d4cba7f 165 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
697d3899 166 pa = 0;
342d3db7 167 is_io = ~0ul;
697d3899
PM
168 rmap = NULL;
169 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
170 /* PPC970 can't do emulated MMIO */
171 if (!cpu_has_feature(CPU_FTR_ARCH_206))
172 return H_PARAMETER;
173 /* Emulated MMIO - mark this with key=31 */
174 pteh |= HPTE_V_ABSENT;
175 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
176 goto do_insert;
177 }
da9d1d7f
PM
178
179 /* Check if the requested page fits entirely in the memslot. */
180 if (!slot_is_aligned(memslot, psize))
181 return H_PARAMETER;
c77162de 182 slot_fn = gfn - memslot->base_gfn;
06ce2c63 183 rmap = &memslot->rmap[slot_fn];
c77162de 184
342d3db7
PM
185 if (!kvm->arch.using_mmu_notifiers) {
186 physp = kvm->arch.slot_phys[memslot->id];
187 if (!physp)
188 return H_PARAMETER;
189 physp += slot_fn;
190 if (realmode)
191 physp = real_vmalloc_addr(physp);
192 pa = *physp;
193 if (!pa)
194 return H_TOO_HARD;
195 is_io = pa & (HPTE_R_I | HPTE_R_W);
196 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
197 pa &= PAGE_MASK;
198 } else {
199 /* Translate to host virtual address */
200 hva = gfn_to_hva_memslot(memslot, gfn);
201
202 /* Look up the Linux PTE for the backing page */
203 pte_size = psize;
4cf302bc 204 pte = lookup_linux_pte(vcpu, hva, writing, &pte_size);
342d3db7 205 if (pte_present(pte)) {
4cf302bc
PM
206 if (writing && !pte_write(pte))
207 /* make the actual HPTE be read-only */
208 ptel = hpte_make_readonly(ptel);
342d3db7
PM
209 is_io = hpte_cache_bits(pte_val(pte));
210 pa = pte_pfn(pte) << PAGE_SHIFT;
211 }
212 }
c77162de
PM
213 if (pte_size < psize)
214 return H_PARAMETER;
215 if (pa && pte_size > psize)
216 pa |= gpa & (pte_size - 1);
217
218 ptel &= ~(HPTE_R_PP0 - psize);
219 ptel |= pa;
342d3db7
PM
220
221 if (pa)
222 pteh |= HPTE_V_VALID;
223 else
224 pteh |= HPTE_V_ABSENT;
c77162de 225
a8606e20 226 /* Check WIMG */
342d3db7 227 if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
9d0ef5ea
PM
228 if (is_io)
229 return H_PARAMETER;
230 /*
231 * Allow guest to map emulated device memory as
232 * uncacheable, but actually make it cacheable.
233 */
234 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
235 ptel |= HPTE_R_M;
236 }
075295dd 237
342d3db7 238 /* Find and lock the HPTEG slot to use */
697d3899 239 do_insert:
8936dda4 240 if (pte_index >= HPT_NPTE)
a8606e20
PM
241 return H_PARAMETER;
242 if (likely((flags & H_EXACT) == 0)) {
243 pte_index &= ~7UL;
244 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
075295dd 245 for (i = 0; i < 8; ++i) {
a8606e20 246 if ((*hpte & HPTE_V_VALID) == 0 &&
697d3899
PM
247 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
248 HPTE_V_ABSENT))
a8606e20
PM
249 break;
250 hpte += 2;
251 }
075295dd
PM
252 if (i == 8) {
253 /*
254 * Since try_lock_hpte doesn't retry (not even stdcx.
255 * failures), it could be that there is a free slot
256 * but we transiently failed to lock it. Try again,
257 * actually locking each slot and checking it.
258 */
259 hpte -= 16;
260 for (i = 0; i < 8; ++i) {
261 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
262 cpu_relax();
697d3899 263 if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
075295dd
PM
264 break;
265 *hpte &= ~HPTE_V_HVLOCK;
266 hpte += 2;
267 }
268 if (i == 8)
269 return H_PTEG_FULL;
270 }
8936dda4 271 pte_index += i;
a8606e20 272 } else {
a8606e20 273 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
697d3899
PM
274 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
275 HPTE_V_ABSENT)) {
075295dd
PM
276 /* Lock the slot and check again */
277 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
278 cpu_relax();
697d3899 279 if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
075295dd
PM
280 *hpte &= ~HPTE_V_HVLOCK;
281 return H_PTEG_FULL;
282 }
283 }
a8606e20 284 }
8936dda4
PM
285
286 /* Save away the guest's idea of the second HPTE dword */
06ce2c63
PM
287 rev = &kvm->arch.revmap[pte_index];
288 if (realmode)
289 rev = real_vmalloc_addr(rev);
8936dda4
PM
290 if (rev)
291 rev->guest_rpte = g_ptel;
06ce2c63
PM
292
293 /* Link HPTE into reverse-map chain */
697d3899
PM
294 if (pteh & HPTE_V_VALID) {
295 if (realmode)
296 rmap = real_vmalloc_addr(rmap);
297 lock_rmap(rmap);
342d3db7
PM
298 /* Check for pending invalidations under the rmap chain lock */
299 if (kvm->arch.using_mmu_notifiers &&
300 mmu_notifier_retry(vcpu, mmu_seq)) {
301 /* inval in progress, write a non-present HPTE */
302 pteh |= HPTE_V_ABSENT;
303 pteh &= ~HPTE_V_VALID;
304 unlock_rmap(rmap);
305 } else {
306 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
307 realmode);
bad3b507
PM
308 /* Only set R/C in real HPTE if already set in *rmap */
309 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
310 ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
342d3db7 311 }
697d3899 312 }
06ce2c63 313
a8606e20 314 hpte[1] = ptel;
06ce2c63
PM
315
316 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
a8606e20
PM
317 eieio();
318 hpte[0] = pteh;
319 asm volatile("ptesync" : : : "memory");
06ce2c63 320
8936dda4 321 vcpu->arch.gpr[4] = pte_index;
a8606e20
PM
322 return H_SUCCESS;
323}
c77162de 324EXPORT_SYMBOL_GPL(kvmppc_h_enter);
a8606e20 325
a8606e20
PM
326#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
327
328static inline int try_lock_tlbie(unsigned int *lock)
329{
330 unsigned int tmp, old;
331 unsigned int token = LOCK_TOKEN;
332
333 asm volatile("1:lwarx %1,0,%2\n"
334 " cmpwi cr0,%1,0\n"
335 " bne 2f\n"
336 " stwcx. %3,0,%2\n"
337 " bne- 1b\n"
338 " isync\n"
339 "2:"
340 : "=&r" (tmp), "=&r" (old)
341 : "r" (lock), "r" (token)
342 : "cc", "memory");
343 return old == 0;
344}
345
346long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
347 unsigned long pte_index, unsigned long avpn,
348 unsigned long va)
349{
350 struct kvm *kvm = vcpu->kvm;
351 unsigned long *hpte;
352 unsigned long v, r, rb;
a92bce95 353 struct revmap_entry *rev;
a8606e20 354
8936dda4 355 if (pte_index >= HPT_NPTE)
a8606e20
PM
356 return H_PARAMETER;
357 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
075295dd 358 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
a8606e20 359 cpu_relax();
697d3899 360 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
a8606e20
PM
361 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
362 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
363 hpte[0] &= ~HPTE_V_HVLOCK;
364 return H_NOT_FOUND;
365 }
a92bce95
PM
366
367 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
368 v = hpte[0] & ~HPTE_V_HVLOCK;
369 if (v & HPTE_V_VALID) {
370 hpte[0] &= ~HPTE_V_VALID;
371 rb = compute_tlbie_rb(v, hpte[1], pte_index);
372 if (!(flags & H_LOCAL) && atomic_read(&kvm->online_vcpus) > 1) {
373 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
374 cpu_relax();
375 asm volatile("ptesync" : : : "memory");
376 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
377 : : "r" (rb), "r" (kvm->arch.lpid));
378 asm volatile("ptesync" : : : "memory");
379 kvm->arch.tlbie_lock = 0;
380 } else {
381 asm volatile("ptesync" : : : "memory");
382 asm volatile("tlbiel %0" : : "r" (rb));
383 asm volatile("ptesync" : : : "memory");
384 }
bad3b507
PM
385 /* Read PTE low word after tlbie to get final R/C values */
386 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
a8606e20 387 }
a92bce95
PM
388 r = rev->guest_rpte;
389 unlock_hpte(hpte, 0);
390
391 vcpu->arch.gpr[4] = v;
392 vcpu->arch.gpr[5] = r;
a8606e20
PM
393 return H_SUCCESS;
394}
395
396long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
397{
398 struct kvm *kvm = vcpu->kvm;
399 unsigned long *args = &vcpu->arch.gpr[4];
a92bce95
PM
400 unsigned long *hp, *hptes[4], tlbrb[4];
401 long int i, j, k, n, found, indexes[4];
402 unsigned long flags, req, pte_index, rcbits;
a8606e20
PM
403 long int local = 0;
404 long int ret = H_SUCCESS;
a92bce95 405 struct revmap_entry *rev, *revs[4];
a8606e20
PM
406
407 if (atomic_read(&kvm->online_vcpus) == 1)
408 local = 1;
a92bce95
PM
409 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
410 n = 0;
411 for (; i < 4; ++i) {
412 j = i * 2;
413 pte_index = args[j];
414 flags = pte_index >> 56;
415 pte_index &= ((1ul << 56) - 1);
416 req = flags >> 6;
417 flags &= 3;
418 if (req == 3) { /* no more requests */
419 i = 4;
a8606e20 420 break;
a92bce95
PM
421 }
422 if (req != 1 || flags == 3 || pte_index >= HPT_NPTE) {
423 /* parameter error */
424 args[j] = ((0xa0 | flags) << 56) + pte_index;
425 ret = H_PARAMETER;
a8606e20 426 break;
a92bce95
PM
427 }
428 hp = (unsigned long *)
429 (kvm->arch.hpt_virt + (pte_index << 4));
430 /* to avoid deadlock, don't spin except for first */
431 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
432 if (n)
433 break;
434 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
435 cpu_relax();
436 }
437 found = 0;
438 if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
439 switch (flags & 3) {
440 case 0: /* absolute */
a8606e20 441 found = 1;
a92bce95
PM
442 break;
443 case 1: /* andcond */
444 if (!(hp[0] & args[j + 1]))
445 found = 1;
446 break;
447 case 2: /* AVPN */
448 if ((hp[0] & ~0x7fUL) == args[j + 1])
449 found = 1;
450 break;
451 }
452 }
453 if (!found) {
454 hp[0] &= ~HPTE_V_HVLOCK;
455 args[j] = ((0x90 | flags) << 56) + pte_index;
456 continue;
a8606e20 457 }
a92bce95
PM
458
459 args[j] = ((0x80 | flags) << 56) + pte_index;
460 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
a92bce95 461
bad3b507
PM
462 if (!(hp[0] & HPTE_V_VALID)) {
463 /* insert R and C bits from PTE */
464 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
465 args[j] |= rcbits << (56 - 5);
a92bce95 466 continue;
bad3b507 467 }
a92bce95
PM
468
469 hp[0] &= ~HPTE_V_VALID; /* leave it locked */
470 tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
471 indexes[n] = j;
472 hptes[n] = hp;
473 revs[n] = rev;
474 ++n;
a8606e20 475 }
a92bce95
PM
476
477 if (!n)
478 break;
479
480 /* Now that we've collected a batch, do the tlbies */
481 if (!local) {
482 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
483 cpu_relax();
484 asm volatile("ptesync" : : : "memory");
485 for (k = 0; k < n; ++k)
486 asm volatile(PPC_TLBIE(%1,%0) : :
487 "r" (tlbrb[k]),
488 "r" (kvm->arch.lpid));
489 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
490 kvm->arch.tlbie_lock = 0;
491 } else {
492 asm volatile("ptesync" : : : "memory");
493 for (k = 0; k < n; ++k)
494 asm volatile("tlbiel %0" : : "r" (tlbrb[k]));
495 asm volatile("ptesync" : : : "memory");
a8606e20 496 }
a92bce95 497
bad3b507 498 /* Read PTE low words after tlbie to get final R/C values */
a92bce95
PM
499 for (k = 0; k < n; ++k) {
500 j = indexes[k];
501 pte_index = args[j] & ((1ul << 56) - 1);
502 hp = hptes[k];
503 rev = revs[k];
bad3b507
PM
504 remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]);
505 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
506 args[j] |= rcbits << (56 - 5);
507 hp[0] = 0;
697d3899 508 }
a8606e20 509 }
a92bce95 510
a8606e20
PM
511 return ret;
512}
513
514long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
515 unsigned long pte_index, unsigned long avpn,
516 unsigned long va)
517{
518 struct kvm *kvm = vcpu->kvm;
519 unsigned long *hpte;
8936dda4
PM
520 struct revmap_entry *rev;
521 unsigned long v, r, rb, mask, bits;
a8606e20 522
8936dda4 523 if (pte_index >= HPT_NPTE)
a8606e20 524 return H_PARAMETER;
697d3899 525
a8606e20 526 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
075295dd 527 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
a8606e20 528 cpu_relax();
697d3899 529 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
a8606e20
PM
530 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
531 hpte[0] &= ~HPTE_V_HVLOCK;
532 return H_NOT_FOUND;
533 }
697d3899 534
a8606e20
PM
535 if (atomic_read(&kvm->online_vcpus) == 1)
536 flags |= H_LOCAL;
537 v = hpte[0];
8936dda4
PM
538 bits = (flags << 55) & HPTE_R_PP0;
539 bits |= (flags << 48) & HPTE_R_KEY_HI;
540 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
541
542 /* Update guest view of 2nd HPTE dword */
543 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
544 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
545 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
546 if (rev) {
547 r = (rev->guest_rpte & ~mask) | bits;
548 rev->guest_rpte = r;
549 }
550 r = (hpte[1] & ~mask) | bits;
551
552 /* Update HPTE */
697d3899
PM
553 if (v & HPTE_V_VALID) {
554 rb = compute_tlbie_rb(v, r, pte_index);
555 hpte[0] = v & ~HPTE_V_VALID;
556 if (!(flags & H_LOCAL)) {
557 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
558 cpu_relax();
559 asm volatile("ptesync" : : : "memory");
560 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
561 : : "r" (rb), "r" (kvm->arch.lpid));
562 asm volatile("ptesync" : : : "memory");
563 kvm->arch.tlbie_lock = 0;
564 } else {
565 asm volatile("ptesync" : : : "memory");
566 asm volatile("tlbiel %0" : : "r" (rb));
567 asm volatile("ptesync" : : : "memory");
568 }
a8606e20
PM
569 }
570 hpte[1] = r;
571 eieio();
572 hpte[0] = v & ~HPTE_V_HVLOCK;
573 asm volatile("ptesync" : : : "memory");
574 return H_SUCCESS;
575}
576
a8606e20
PM
577long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
578 unsigned long pte_index)
579{
580 struct kvm *kvm = vcpu->kvm;
697d3899 581 unsigned long *hpte, v, r;
a8606e20 582 int i, n = 1;
8936dda4 583 struct revmap_entry *rev = NULL;
a8606e20 584
8936dda4 585 if (pte_index >= HPT_NPTE)
a8606e20
PM
586 return H_PARAMETER;
587 if (flags & H_READ_4) {
588 pte_index &= ~3;
589 n = 4;
590 }
bad3b507 591 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
a8606e20
PM
592 for (i = 0; i < n; ++i, ++pte_index) {
593 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
697d3899 594 v = hpte[0] & ~HPTE_V_HVLOCK;
a8606e20 595 r = hpte[1];
697d3899
PM
596 if (v & HPTE_V_ABSENT) {
597 v &= ~HPTE_V_ABSENT;
598 v |= HPTE_V_VALID;
599 }
bad3b507
PM
600 if (v & HPTE_V_VALID)
601 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
697d3899 602 vcpu->arch.gpr[4 + i * 2] = v;
a8606e20
PM
603 vcpu->arch.gpr[5 + i * 2] = r;
604 }
605 return H_SUCCESS;
606}
697d3899 607
342d3db7
PM
608void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
609 unsigned long pte_index)
610{
611 unsigned long rb;
612
613 hptep[0] &= ~HPTE_V_VALID;
614 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
615 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
616 cpu_relax();
617 asm volatile("ptesync" : : : "memory");
618 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
619 : : "r" (rb), "r" (kvm->arch.lpid));
620 asm volatile("ptesync" : : : "memory");
621 kvm->arch.tlbie_lock = 0;
622}
623EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
624
55514893
PM
625void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
626 unsigned long pte_index)
627{
628 unsigned long rb;
629 unsigned char rbyte;
630
631 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
632 rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
633 /* modify only the second-last byte, which contains the ref bit */
634 *((char *)hptep + 14) = rbyte;
635 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
636 cpu_relax();
637 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
638 : : "r" (rb), "r" (kvm->arch.lpid));
639 asm volatile("ptesync" : : : "memory");
640 kvm->arch.tlbie_lock = 0;
641}
642EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
643
697d3899
PM
644static int slb_base_page_shift[4] = {
645 24, /* 16M */
646 16, /* 64k */
647 34, /* 16G */
648 20, /* 1M, unsupported */
649};
650
651long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
652 unsigned long valid)
653{
654 unsigned int i;
655 unsigned int pshift;
656 unsigned long somask;
657 unsigned long vsid, hash;
658 unsigned long avpn;
659 unsigned long *hpte;
660 unsigned long mask, val;
661 unsigned long v, r;
662
663 /* Get page shift, work out hash and AVPN etc. */
664 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
665 val = 0;
666 pshift = 12;
667 if (slb_v & SLB_VSID_L) {
668 mask |= HPTE_V_LARGE;
669 val |= HPTE_V_LARGE;
670 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
671 }
672 if (slb_v & SLB_VSID_B_1T) {
673 somask = (1UL << 40) - 1;
674 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
675 vsid ^= vsid << 25;
676 } else {
677 somask = (1UL << 28) - 1;
678 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
679 }
680 hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK;
681 avpn = slb_v & ~(somask >> 16); /* also includes B */
682 avpn |= (eaddr & somask) >> 16;
683
684 if (pshift >= 24)
685 avpn &= ~((1UL << (pshift - 16)) - 1);
686 else
687 avpn &= ~0x7fUL;
688 val |= avpn;
689
690 for (;;) {
691 hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
692
693 for (i = 0; i < 16; i += 2) {
694 /* Read the PTE racily */
695 v = hpte[i] & ~HPTE_V_HVLOCK;
696
697 /* Check valid/absent, hash, segment size and AVPN */
698 if (!(v & valid) || (v & mask) != val)
699 continue;
700
701 /* Lock the PTE and read it under the lock */
702 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
703 cpu_relax();
704 v = hpte[i] & ~HPTE_V_HVLOCK;
705 r = hpte[i+1];
706
707 /*
708 * Check the HPTE again, including large page size
709 * Since we don't currently allow any MPSS (mixed
710 * page-size segment) page sizes, it is sufficient
711 * to check against the actual page size.
712 */
713 if ((v & valid) && (v & mask) == val &&
714 hpte_page_size(v, r) == (1ul << pshift))
715 /* Return with the HPTE still locked */
716 return (hash << 3) + (i >> 1);
717
718 /* Unlock and move on */
719 hpte[i] = v;
720 }
721
722 if (val & HPTE_V_SECONDARY)
723 break;
724 val |= HPTE_V_SECONDARY;
725 hash = hash ^ HPT_HASH_MASK;
726 }
727 return -1;
728}
729EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
730
731/*
732 * Called in real mode to check whether an HPTE not found fault
4cf302bc
PM
733 * is due to accessing a paged-out page or an emulated MMIO page,
734 * or if a protection fault is due to accessing a page that the
735 * guest wanted read/write access to but which we made read-only.
697d3899
PM
736 * Returns a possibly modified status (DSISR) value if not
737 * (i.e. pass the interrupt to the guest),
738 * -1 to pass the fault up to host kernel mode code, -2 to do that
342d3db7 739 * and also load the instruction word (for MMIO emulation),
697d3899
PM
740 * or 0 if we should make the guest retry the access.
741 */
742long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
342d3db7 743 unsigned long slb_v, unsigned int status, bool data)
697d3899
PM
744{
745 struct kvm *kvm = vcpu->kvm;
746 long int index;
747 unsigned long v, r, gr;
748 unsigned long *hpte;
749 unsigned long valid;
750 struct revmap_entry *rev;
751 unsigned long pp, key;
752
4cf302bc
PM
753 /* For protection fault, expect to find a valid HPTE */
754 valid = HPTE_V_VALID;
755 if (status & DSISR_NOHPTE)
756 valid |= HPTE_V_ABSENT;
342d3db7 757
697d3899 758 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
4cf302bc
PM
759 if (index < 0) {
760 if (status & DSISR_NOHPTE)
761 return status; /* there really was no HPTE */
762 return 0; /* for prot fault, HPTE disappeared */
763 }
697d3899
PM
764 hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
765 v = hpte[0] & ~HPTE_V_HVLOCK;
766 r = hpte[1];
767 rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
768 gr = rev->guest_rpte;
769
a92bce95 770 unlock_hpte(hpte, v);
697d3899 771
4cf302bc
PM
772 /* For not found, if the HPTE is valid by now, retry the instruction */
773 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
697d3899
PM
774 return 0;
775
776 /* Check access permissions to the page */
777 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
778 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
342d3db7
PM
779 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
780 if (!data) {
781 if (gr & (HPTE_R_N | HPTE_R_G))
782 return status | SRR1_ISI_N_OR_G;
783 if (!hpte_read_permission(pp, slb_v & key))
784 return status | SRR1_ISI_PROT;
785 } else if (status & DSISR_ISSTORE) {
697d3899
PM
786 /* check write permission */
787 if (!hpte_write_permission(pp, slb_v & key))
342d3db7 788 return status | DSISR_PROTFAULT;
697d3899
PM
789 } else {
790 if (!hpte_read_permission(pp, slb_v & key))
342d3db7 791 return status | DSISR_PROTFAULT;
697d3899
PM
792 }
793
794 /* Check storage key, if applicable */
342d3db7 795 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
697d3899
PM
796 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
797 if (status & DSISR_ISSTORE)
798 perm >>= 1;
799 if (perm & 1)
342d3db7 800 return status | DSISR_KEYFAULT;
697d3899
PM
801 }
802
803 /* Save HPTE info for virtual-mode handler */
804 vcpu->arch.pgfault_addr = addr;
805 vcpu->arch.pgfault_index = index;
806 vcpu->arch.pgfault_hpte[0] = v;
807 vcpu->arch.pgfault_hpte[1] = r;
808
342d3db7
PM
809 /* Check the storage key to see if it is possibly emulated MMIO */
810 if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
811 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
812 (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
697d3899
PM
813 return -2; /* MMIO emulation - load instr word */
814
815 return -1; /* send fault up to host kernel mode */
697d3899 816}
This page took 0.106194 seconds and 5 git commands to generate.