Commit | Line | Data |
---|---|---|
a8606e20 PM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
7 | */ | |
8 | ||
9 | #include <linux/types.h> | |
10 | #include <linux/string.h> | |
11 | #include <linux/kvm.h> | |
12 | #include <linux/kvm_host.h> | |
13 | #include <linux/hugetlb.h> | |
c77162de | 14 | #include <linux/module.h> |
a8606e20 PM |
15 | |
16 | #include <asm/tlbflush.h> | |
17 | #include <asm/kvm_ppc.h> | |
18 | #include <asm/kvm_book3s.h> | |
19 | #include <asm/mmu-hash64.h> | |
20 | #include <asm/hvcall.h> | |
21 | #include <asm/synch.h> | |
22 | #include <asm/ppc-opcode.h> | |
23 | ||
8936dda4 PM |
24 | /* Translate address of a vmalloc'd thing to a linear map address */ |
25 | static void *real_vmalloc_addr(void *x) | |
26 | { | |
27 | unsigned long addr = (unsigned long) x; | |
28 | pte_t *p; | |
29 | ||
30 | p = find_linux_pte(swapper_pg_dir, addr); | |
31 | if (!p || !pte_present(*p)) | |
32 | return NULL; | |
33 | /* assume we don't have huge pages in vmalloc space... */ | |
34 | addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK); | |
35 | return __va(addr); | |
36 | } | |
a8606e20 | 37 | |
06ce2c63 PM |
38 | /* |
39 | * Add this HPTE into the chain for the real page. | |
40 | * Must be called with the chain locked; it unlocks the chain. | |
41 | */ | |
342d3db7 | 42 | void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, |
06ce2c63 PM |
43 | unsigned long *rmap, long pte_index, int realmode) |
44 | { | |
45 | struct revmap_entry *head, *tail; | |
46 | unsigned long i; | |
47 | ||
48 | if (*rmap & KVMPPC_RMAP_PRESENT) { | |
49 | i = *rmap & KVMPPC_RMAP_INDEX; | |
50 | head = &kvm->arch.revmap[i]; | |
51 | if (realmode) | |
52 | head = real_vmalloc_addr(head); | |
53 | tail = &kvm->arch.revmap[head->back]; | |
54 | if (realmode) | |
55 | tail = real_vmalloc_addr(tail); | |
56 | rev->forw = i; | |
57 | rev->back = head->back; | |
58 | tail->forw = pte_index; | |
59 | head->back = pte_index; | |
60 | } else { | |
61 | rev->forw = rev->back = pte_index; | |
4879f241 PM |
62 | *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | |
63 | pte_index | KVMPPC_RMAP_PRESENT; | |
06ce2c63 | 64 | } |
4879f241 | 65 | unlock_rmap(rmap); |
06ce2c63 | 66 | } |
342d3db7 | 67 | EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain); |
06ce2c63 PM |
68 | |
69 | /* Remove this HPTE from the chain for a real page */ | |
70 | static void remove_revmap_chain(struct kvm *kvm, long pte_index, | |
bad3b507 PM |
71 | struct revmap_entry *rev, |
72 | unsigned long hpte_v, unsigned long hpte_r) | |
06ce2c63 | 73 | { |
bad3b507 | 74 | struct revmap_entry *next, *prev; |
06ce2c63 PM |
75 | unsigned long gfn, ptel, head; |
76 | struct kvm_memory_slot *memslot; | |
77 | unsigned long *rmap; | |
bad3b507 | 78 | unsigned long rcbits; |
06ce2c63 | 79 | |
bad3b507 PM |
80 | rcbits = hpte_r & (HPTE_R_R | HPTE_R_C); |
81 | ptel = rev->guest_rpte |= rcbits; | |
06ce2c63 | 82 | gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel)); |
9d4cba7f | 83 | memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); |
dfe49dbd | 84 | if (!memslot) |
06ce2c63 PM |
85 | return; |
86 | ||
d89cc617 | 87 | rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); |
06ce2c63 PM |
88 | lock_rmap(rmap); |
89 | ||
90 | head = *rmap & KVMPPC_RMAP_INDEX; | |
91 | next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]); | |
92 | prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]); | |
93 | next->back = rev->back; | |
94 | prev->forw = rev->forw; | |
95 | if (head == pte_index) { | |
96 | head = rev->forw; | |
97 | if (head == pte_index) | |
98 | *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); | |
99 | else | |
100 | *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head; | |
101 | } | |
bad3b507 | 102 | *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT; |
06ce2c63 PM |
103 | unlock_rmap(rmap); |
104 | } | |
105 | ||
7ed661bf | 106 | static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva, |
4cf302bc | 107 | int writing, unsigned long *pte_sizep) |
342d3db7 PM |
108 | { |
109 | pte_t *ptep; | |
110 | unsigned long ps = *pte_sizep; | |
111 | unsigned int shift; | |
112 | ||
7ed661bf | 113 | ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift); |
342d3db7 PM |
114 | if (!ptep) |
115 | return __pte(0); | |
116 | if (shift) | |
117 | *pte_sizep = 1ul << shift; | |
118 | else | |
119 | *pte_sizep = PAGE_SIZE; | |
120 | if (ps > *pte_sizep) | |
121 | return __pte(0); | |
122 | if (!pte_present(*ptep)) | |
123 | return __pte(0); | |
4cf302bc | 124 | return kvmppc_read_update_linux_pte(ptep, writing); |
342d3db7 PM |
125 | } |
126 | ||
a92bce95 PM |
127 | static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v) |
128 | { | |
129 | asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); | |
130 | hpte[0] = hpte_v; | |
131 | } | |
132 | ||
7ed661bf PM |
133 | long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, |
134 | long pte_index, unsigned long pteh, unsigned long ptel, | |
135 | pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret) | |
a8606e20 | 136 | { |
c77162de | 137 | unsigned long i, pa, gpa, gfn, psize; |
342d3db7 | 138 | unsigned long slot_fn, hva; |
a8606e20 | 139 | unsigned long *hpte; |
8936dda4 PM |
140 | struct revmap_entry *rev; |
141 | unsigned long g_ptel = ptel; | |
b2b2f165 | 142 | struct kvm_memory_slot *memslot; |
c77162de | 143 | unsigned long *physp, pte_size; |
9d0ef5ea | 144 | unsigned long is_io; |
06ce2c63 | 145 | unsigned long *rmap; |
342d3db7 | 146 | pte_t pte; |
4cf302bc | 147 | unsigned int writing; |
342d3db7 | 148 | unsigned long mmu_seq; |
bad3b507 | 149 | unsigned long rcbits; |
c77162de PM |
150 | |
151 | psize = hpte_page_size(pteh, ptel); | |
152 | if (!psize) | |
a8606e20 | 153 | return H_PARAMETER; |
4cf302bc | 154 | writing = hpte_is_writable(ptel); |
697d3899 | 155 | pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); |
b2b2f165 | 156 | |
342d3db7 PM |
157 | /* used later to detect if we might have been invalidated */ |
158 | mmu_seq = kvm->mmu_notifier_seq; | |
159 | smp_rmb(); | |
160 | ||
c77162de PM |
161 | /* Find the memslot (if any) for this address */ |
162 | gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); | |
163 | gfn = gpa >> PAGE_SHIFT; | |
9d4cba7f | 164 | memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); |
697d3899 | 165 | pa = 0; |
342d3db7 | 166 | is_io = ~0ul; |
697d3899 PM |
167 | rmap = NULL; |
168 | if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) { | |
169 | /* PPC970 can't do emulated MMIO */ | |
170 | if (!cpu_has_feature(CPU_FTR_ARCH_206)) | |
171 | return H_PARAMETER; | |
172 | /* Emulated MMIO - mark this with key=31 */ | |
173 | pteh |= HPTE_V_ABSENT; | |
174 | ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO; | |
175 | goto do_insert; | |
176 | } | |
da9d1d7f PM |
177 | |
178 | /* Check if the requested page fits entirely in the memslot. */ | |
179 | if (!slot_is_aligned(memslot, psize)) | |
180 | return H_PARAMETER; | |
c77162de | 181 | slot_fn = gfn - memslot->base_gfn; |
d89cc617 | 182 | rmap = &memslot->arch.rmap[slot_fn]; |
c77162de | 183 | |
342d3db7 | 184 | if (!kvm->arch.using_mmu_notifiers) { |
a66b48c3 | 185 | physp = memslot->arch.slot_phys; |
342d3db7 PM |
186 | if (!physp) |
187 | return H_PARAMETER; | |
188 | physp += slot_fn; | |
189 | if (realmode) | |
190 | physp = real_vmalloc_addr(physp); | |
191 | pa = *physp; | |
192 | if (!pa) | |
193 | return H_TOO_HARD; | |
194 | is_io = pa & (HPTE_R_I | HPTE_R_W); | |
195 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); | |
196 | pa &= PAGE_MASK; | |
197 | } else { | |
198 | /* Translate to host virtual address */ | |
66a03505 | 199 | hva = __gfn_to_hva_memslot(memslot, gfn); |
342d3db7 PM |
200 | |
201 | /* Look up the Linux PTE for the backing page */ | |
202 | pte_size = psize; | |
7ed661bf | 203 | pte = lookup_linux_pte(pgdir, hva, writing, &pte_size); |
342d3db7 | 204 | if (pte_present(pte)) { |
4cf302bc PM |
205 | if (writing && !pte_write(pte)) |
206 | /* make the actual HPTE be read-only */ | |
207 | ptel = hpte_make_readonly(ptel); | |
342d3db7 PM |
208 | is_io = hpte_cache_bits(pte_val(pte)); |
209 | pa = pte_pfn(pte) << PAGE_SHIFT; | |
210 | } | |
211 | } | |
7ed661bf | 212 | |
c77162de PM |
213 | if (pte_size < psize) |
214 | return H_PARAMETER; | |
215 | if (pa && pte_size > psize) | |
216 | pa |= gpa & (pte_size - 1); | |
217 | ||
218 | ptel &= ~(HPTE_R_PP0 - psize); | |
219 | ptel |= pa; | |
342d3db7 PM |
220 | |
221 | if (pa) | |
222 | pteh |= HPTE_V_VALID; | |
223 | else | |
224 | pteh |= HPTE_V_ABSENT; | |
c77162de | 225 | |
a8606e20 | 226 | /* Check WIMG */ |
342d3db7 | 227 | if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) { |
9d0ef5ea PM |
228 | if (is_io) |
229 | return H_PARAMETER; | |
230 | /* | |
231 | * Allow guest to map emulated device memory as | |
232 | * uncacheable, but actually make it cacheable. | |
233 | */ | |
234 | ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G); | |
235 | ptel |= HPTE_R_M; | |
236 | } | |
075295dd | 237 | |
342d3db7 | 238 | /* Find and lock the HPTEG slot to use */ |
697d3899 | 239 | do_insert: |
32fad281 | 240 | if (pte_index >= kvm->arch.hpt_npte) |
a8606e20 PM |
241 | return H_PARAMETER; |
242 | if (likely((flags & H_EXACT) == 0)) { | |
243 | pte_index &= ~7UL; | |
244 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | |
075295dd | 245 | for (i = 0; i < 8; ++i) { |
a8606e20 | 246 | if ((*hpte & HPTE_V_VALID) == 0 && |
697d3899 PM |
247 | try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | |
248 | HPTE_V_ABSENT)) | |
a8606e20 PM |
249 | break; |
250 | hpte += 2; | |
251 | } | |
075295dd PM |
252 | if (i == 8) { |
253 | /* | |
254 | * Since try_lock_hpte doesn't retry (not even stdcx. | |
255 | * failures), it could be that there is a free slot | |
256 | * but we transiently failed to lock it. Try again, | |
257 | * actually locking each slot and checking it. | |
258 | */ | |
259 | hpte -= 16; | |
260 | for (i = 0; i < 8; ++i) { | |
261 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) | |
262 | cpu_relax(); | |
697d3899 | 263 | if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT))) |
075295dd PM |
264 | break; |
265 | *hpte &= ~HPTE_V_HVLOCK; | |
266 | hpte += 2; | |
267 | } | |
268 | if (i == 8) | |
269 | return H_PTEG_FULL; | |
270 | } | |
8936dda4 | 271 | pte_index += i; |
a8606e20 | 272 | } else { |
a8606e20 | 273 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); |
697d3899 PM |
274 | if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | |
275 | HPTE_V_ABSENT)) { | |
075295dd PM |
276 | /* Lock the slot and check again */ |
277 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) | |
278 | cpu_relax(); | |
697d3899 | 279 | if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { |
075295dd PM |
280 | *hpte &= ~HPTE_V_HVLOCK; |
281 | return H_PTEG_FULL; | |
282 | } | |
283 | } | |
a8606e20 | 284 | } |
8936dda4 PM |
285 | |
286 | /* Save away the guest's idea of the second HPTE dword */ | |
06ce2c63 PM |
287 | rev = &kvm->arch.revmap[pte_index]; |
288 | if (realmode) | |
289 | rev = real_vmalloc_addr(rev); | |
8936dda4 PM |
290 | if (rev) |
291 | rev->guest_rpte = g_ptel; | |
06ce2c63 PM |
292 | |
293 | /* Link HPTE into reverse-map chain */ | |
697d3899 PM |
294 | if (pteh & HPTE_V_VALID) { |
295 | if (realmode) | |
296 | rmap = real_vmalloc_addr(rmap); | |
297 | lock_rmap(rmap); | |
342d3db7 PM |
298 | /* Check for pending invalidations under the rmap chain lock */ |
299 | if (kvm->arch.using_mmu_notifiers && | |
7ed661bf | 300 | mmu_notifier_retry(kvm, mmu_seq)) { |
342d3db7 PM |
301 | /* inval in progress, write a non-present HPTE */ |
302 | pteh |= HPTE_V_ABSENT; | |
303 | pteh &= ~HPTE_V_VALID; | |
304 | unlock_rmap(rmap); | |
305 | } else { | |
306 | kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index, | |
307 | realmode); | |
bad3b507 PM |
308 | /* Only set R/C in real HPTE if already set in *rmap */ |
309 | rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; | |
310 | ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C); | |
342d3db7 | 311 | } |
697d3899 | 312 | } |
06ce2c63 | 313 | |
a8606e20 | 314 | hpte[1] = ptel; |
06ce2c63 PM |
315 | |
316 | /* Write the first HPTE dword, unlocking the HPTE and making it valid */ | |
a8606e20 PM |
317 | eieio(); |
318 | hpte[0] = pteh; | |
319 | asm volatile("ptesync" : : : "memory"); | |
06ce2c63 | 320 | |
7ed661bf | 321 | *pte_idx_ret = pte_index; |
a8606e20 PM |
322 | return H_SUCCESS; |
323 | } | |
7ed661bf PM |
324 | EXPORT_SYMBOL_GPL(kvmppc_do_h_enter); |
325 | ||
326 | long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, | |
327 | long pte_index, unsigned long pteh, unsigned long ptel) | |
328 | { | |
329 | return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, | |
330 | vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); | |
331 | } | |
a8606e20 | 332 | |
a8606e20 PM |
333 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) |
334 | ||
335 | static inline int try_lock_tlbie(unsigned int *lock) | |
336 | { | |
337 | unsigned int tmp, old; | |
338 | unsigned int token = LOCK_TOKEN; | |
339 | ||
340 | asm volatile("1:lwarx %1,0,%2\n" | |
341 | " cmpwi cr0,%1,0\n" | |
342 | " bne 2f\n" | |
343 | " stwcx. %3,0,%2\n" | |
344 | " bne- 1b\n" | |
345 | " isync\n" | |
346 | "2:" | |
347 | : "=&r" (tmp), "=&r" (old) | |
348 | : "r" (lock), "r" (token) | |
349 | : "cc", "memory"); | |
350 | return old == 0; | |
351 | } | |
352 | ||
353 | long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, | |
354 | unsigned long pte_index, unsigned long avpn, | |
355 | unsigned long va) | |
356 | { | |
357 | struct kvm *kvm = vcpu->kvm; | |
358 | unsigned long *hpte; | |
359 | unsigned long v, r, rb; | |
a92bce95 | 360 | struct revmap_entry *rev; |
a8606e20 | 361 | |
32fad281 | 362 | if (pte_index >= kvm->arch.hpt_npte) |
a8606e20 PM |
363 | return H_PARAMETER; |
364 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | |
075295dd | 365 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
a8606e20 | 366 | cpu_relax(); |
697d3899 | 367 | if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || |
a8606e20 PM |
368 | ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) || |
369 | ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) { | |
370 | hpte[0] &= ~HPTE_V_HVLOCK; | |
371 | return H_NOT_FOUND; | |
372 | } | |
a92bce95 PM |
373 | |
374 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | |
375 | v = hpte[0] & ~HPTE_V_HVLOCK; | |
376 | if (v & HPTE_V_VALID) { | |
377 | hpte[0] &= ~HPTE_V_VALID; | |
378 | rb = compute_tlbie_rb(v, hpte[1], pte_index); | |
379 | if (!(flags & H_LOCAL) && atomic_read(&kvm->online_vcpus) > 1) { | |
380 | while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) | |
381 | cpu_relax(); | |
382 | asm volatile("ptesync" : : : "memory"); | |
383 | asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" | |
384 | : : "r" (rb), "r" (kvm->arch.lpid)); | |
385 | asm volatile("ptesync" : : : "memory"); | |
386 | kvm->arch.tlbie_lock = 0; | |
387 | } else { | |
388 | asm volatile("ptesync" : : : "memory"); | |
389 | asm volatile("tlbiel %0" : : "r" (rb)); | |
390 | asm volatile("ptesync" : : : "memory"); | |
391 | } | |
bad3b507 PM |
392 | /* Read PTE low word after tlbie to get final R/C values */ |
393 | remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]); | |
a8606e20 | 394 | } |
a92bce95 PM |
395 | r = rev->guest_rpte; |
396 | unlock_hpte(hpte, 0); | |
397 | ||
398 | vcpu->arch.gpr[4] = v; | |
399 | vcpu->arch.gpr[5] = r; | |
a8606e20 PM |
400 | return H_SUCCESS; |
401 | } | |
402 | ||
403 | long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | |
404 | { | |
405 | struct kvm *kvm = vcpu->kvm; | |
406 | unsigned long *args = &vcpu->arch.gpr[4]; | |
a92bce95 PM |
407 | unsigned long *hp, *hptes[4], tlbrb[4]; |
408 | long int i, j, k, n, found, indexes[4]; | |
409 | unsigned long flags, req, pte_index, rcbits; | |
a8606e20 PM |
410 | long int local = 0; |
411 | long int ret = H_SUCCESS; | |
a92bce95 | 412 | struct revmap_entry *rev, *revs[4]; |
a8606e20 PM |
413 | |
414 | if (atomic_read(&kvm->online_vcpus) == 1) | |
415 | local = 1; | |
a92bce95 PM |
416 | for (i = 0; i < 4 && ret == H_SUCCESS; ) { |
417 | n = 0; | |
418 | for (; i < 4; ++i) { | |
419 | j = i * 2; | |
420 | pte_index = args[j]; | |
421 | flags = pte_index >> 56; | |
422 | pte_index &= ((1ul << 56) - 1); | |
423 | req = flags >> 6; | |
424 | flags &= 3; | |
425 | if (req == 3) { /* no more requests */ | |
426 | i = 4; | |
a8606e20 | 427 | break; |
a92bce95 | 428 | } |
32fad281 PM |
429 | if (req != 1 || flags == 3 || |
430 | pte_index >= kvm->arch.hpt_npte) { | |
a92bce95 PM |
431 | /* parameter error */ |
432 | args[j] = ((0xa0 | flags) << 56) + pte_index; | |
433 | ret = H_PARAMETER; | |
a8606e20 | 434 | break; |
a92bce95 PM |
435 | } |
436 | hp = (unsigned long *) | |
437 | (kvm->arch.hpt_virt + (pte_index << 4)); | |
438 | /* to avoid deadlock, don't spin except for first */ | |
439 | if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) { | |
440 | if (n) | |
441 | break; | |
442 | while (!try_lock_hpte(hp, HPTE_V_HVLOCK)) | |
443 | cpu_relax(); | |
444 | } | |
445 | found = 0; | |
446 | if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) { | |
447 | switch (flags & 3) { | |
448 | case 0: /* absolute */ | |
a8606e20 | 449 | found = 1; |
a92bce95 PM |
450 | break; |
451 | case 1: /* andcond */ | |
452 | if (!(hp[0] & args[j + 1])) | |
453 | found = 1; | |
454 | break; | |
455 | case 2: /* AVPN */ | |
456 | if ((hp[0] & ~0x7fUL) == args[j + 1]) | |
457 | found = 1; | |
458 | break; | |
459 | } | |
460 | } | |
461 | if (!found) { | |
462 | hp[0] &= ~HPTE_V_HVLOCK; | |
463 | args[j] = ((0x90 | flags) << 56) + pte_index; | |
464 | continue; | |
a8606e20 | 465 | } |
a92bce95 PM |
466 | |
467 | args[j] = ((0x80 | flags) << 56) + pte_index; | |
468 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | |
a92bce95 | 469 | |
bad3b507 PM |
470 | if (!(hp[0] & HPTE_V_VALID)) { |
471 | /* insert R and C bits from PTE */ | |
472 | rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); | |
473 | args[j] |= rcbits << (56 - 5); | |
51bfd299 | 474 | hp[0] = 0; |
a92bce95 | 475 | continue; |
bad3b507 | 476 | } |
a92bce95 PM |
477 | |
478 | hp[0] &= ~HPTE_V_VALID; /* leave it locked */ | |
479 | tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index); | |
480 | indexes[n] = j; | |
481 | hptes[n] = hp; | |
482 | revs[n] = rev; | |
483 | ++n; | |
a8606e20 | 484 | } |
a92bce95 PM |
485 | |
486 | if (!n) | |
487 | break; | |
488 | ||
489 | /* Now that we've collected a batch, do the tlbies */ | |
490 | if (!local) { | |
491 | while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) | |
492 | cpu_relax(); | |
493 | asm volatile("ptesync" : : : "memory"); | |
494 | for (k = 0; k < n; ++k) | |
495 | asm volatile(PPC_TLBIE(%1,%0) : : | |
496 | "r" (tlbrb[k]), | |
497 | "r" (kvm->arch.lpid)); | |
498 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); | |
499 | kvm->arch.tlbie_lock = 0; | |
500 | } else { | |
501 | asm volatile("ptesync" : : : "memory"); | |
502 | for (k = 0; k < n; ++k) | |
503 | asm volatile("tlbiel %0" : : "r" (tlbrb[k])); | |
504 | asm volatile("ptesync" : : : "memory"); | |
a8606e20 | 505 | } |
a92bce95 | 506 | |
bad3b507 | 507 | /* Read PTE low words after tlbie to get final R/C values */ |
a92bce95 PM |
508 | for (k = 0; k < n; ++k) { |
509 | j = indexes[k]; | |
510 | pte_index = args[j] & ((1ul << 56) - 1); | |
511 | hp = hptes[k]; | |
512 | rev = revs[k]; | |
bad3b507 PM |
513 | remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]); |
514 | rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); | |
515 | args[j] |= rcbits << (56 - 5); | |
516 | hp[0] = 0; | |
697d3899 | 517 | } |
a8606e20 | 518 | } |
a92bce95 | 519 | |
a8606e20 PM |
520 | return ret; |
521 | } | |
522 | ||
523 | long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, | |
524 | unsigned long pte_index, unsigned long avpn, | |
525 | unsigned long va) | |
526 | { | |
527 | struct kvm *kvm = vcpu->kvm; | |
528 | unsigned long *hpte; | |
8936dda4 PM |
529 | struct revmap_entry *rev; |
530 | unsigned long v, r, rb, mask, bits; | |
a8606e20 | 531 | |
32fad281 | 532 | if (pte_index >= kvm->arch.hpt_npte) |
a8606e20 | 533 | return H_PARAMETER; |
697d3899 | 534 | |
a8606e20 | 535 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); |
075295dd | 536 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
a8606e20 | 537 | cpu_relax(); |
697d3899 | 538 | if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || |
a8606e20 PM |
539 | ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) { |
540 | hpte[0] &= ~HPTE_V_HVLOCK; | |
541 | return H_NOT_FOUND; | |
542 | } | |
697d3899 | 543 | |
a8606e20 PM |
544 | if (atomic_read(&kvm->online_vcpus) == 1) |
545 | flags |= H_LOCAL; | |
546 | v = hpte[0]; | |
8936dda4 PM |
547 | bits = (flags << 55) & HPTE_R_PP0; |
548 | bits |= (flags << 48) & HPTE_R_KEY_HI; | |
549 | bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); | |
550 | ||
551 | /* Update guest view of 2nd HPTE dword */ | |
552 | mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | | |
553 | HPTE_R_KEY_HI | HPTE_R_KEY_LO; | |
554 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | |
555 | if (rev) { | |
556 | r = (rev->guest_rpte & ~mask) | bits; | |
557 | rev->guest_rpte = r; | |
558 | } | |
559 | r = (hpte[1] & ~mask) | bits; | |
560 | ||
561 | /* Update HPTE */ | |
697d3899 PM |
562 | if (v & HPTE_V_VALID) { |
563 | rb = compute_tlbie_rb(v, r, pte_index); | |
564 | hpte[0] = v & ~HPTE_V_VALID; | |
565 | if (!(flags & H_LOCAL)) { | |
566 | while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) | |
567 | cpu_relax(); | |
568 | asm volatile("ptesync" : : : "memory"); | |
569 | asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" | |
570 | : : "r" (rb), "r" (kvm->arch.lpid)); | |
571 | asm volatile("ptesync" : : : "memory"); | |
572 | kvm->arch.tlbie_lock = 0; | |
573 | } else { | |
574 | asm volatile("ptesync" : : : "memory"); | |
575 | asm volatile("tlbiel %0" : : "r" (rb)); | |
576 | asm volatile("ptesync" : : : "memory"); | |
577 | } | |
a8606e20 PM |
578 | } |
579 | hpte[1] = r; | |
580 | eieio(); | |
581 | hpte[0] = v & ~HPTE_V_HVLOCK; | |
582 | asm volatile("ptesync" : : : "memory"); | |
583 | return H_SUCCESS; | |
584 | } | |
585 | ||
a8606e20 PM |
586 | long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, |
587 | unsigned long pte_index) | |
588 | { | |
589 | struct kvm *kvm = vcpu->kvm; | |
697d3899 | 590 | unsigned long *hpte, v, r; |
a8606e20 | 591 | int i, n = 1; |
8936dda4 | 592 | struct revmap_entry *rev = NULL; |
a8606e20 | 593 | |
32fad281 | 594 | if (pte_index >= kvm->arch.hpt_npte) |
a8606e20 PM |
595 | return H_PARAMETER; |
596 | if (flags & H_READ_4) { | |
597 | pte_index &= ~3; | |
598 | n = 4; | |
599 | } | |
bad3b507 | 600 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); |
a8606e20 PM |
601 | for (i = 0; i < n; ++i, ++pte_index) { |
602 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | |
697d3899 | 603 | v = hpte[0] & ~HPTE_V_HVLOCK; |
a8606e20 | 604 | r = hpte[1]; |
697d3899 PM |
605 | if (v & HPTE_V_ABSENT) { |
606 | v &= ~HPTE_V_ABSENT; | |
607 | v |= HPTE_V_VALID; | |
608 | } | |
bad3b507 PM |
609 | if (v & HPTE_V_VALID) |
610 | r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); | |
697d3899 | 611 | vcpu->arch.gpr[4 + i * 2] = v; |
a8606e20 PM |
612 | vcpu->arch.gpr[5 + i * 2] = r; |
613 | } | |
614 | return H_SUCCESS; | |
615 | } | |
697d3899 | 616 | |
342d3db7 PM |
617 | void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, |
618 | unsigned long pte_index) | |
619 | { | |
620 | unsigned long rb; | |
621 | ||
622 | hptep[0] &= ~HPTE_V_VALID; | |
623 | rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index); | |
624 | while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) | |
625 | cpu_relax(); | |
626 | asm volatile("ptesync" : : : "memory"); | |
627 | asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" | |
628 | : : "r" (rb), "r" (kvm->arch.lpid)); | |
629 | asm volatile("ptesync" : : : "memory"); | |
630 | kvm->arch.tlbie_lock = 0; | |
631 | } | |
632 | EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte); | |
633 | ||
55514893 PM |
634 | void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep, |
635 | unsigned long pte_index) | |
636 | { | |
637 | unsigned long rb; | |
638 | unsigned char rbyte; | |
639 | ||
640 | rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index); | |
641 | rbyte = (hptep[1] & ~HPTE_R_R) >> 8; | |
642 | /* modify only the second-last byte, which contains the ref bit */ | |
643 | *((char *)hptep + 14) = rbyte; | |
644 | while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) | |
645 | cpu_relax(); | |
646 | asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" | |
647 | : : "r" (rb), "r" (kvm->arch.lpid)); | |
648 | asm volatile("ptesync" : : : "memory"); | |
649 | kvm->arch.tlbie_lock = 0; | |
650 | } | |
651 | EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte); | |
652 | ||
697d3899 PM |
653 | static int slb_base_page_shift[4] = { |
654 | 24, /* 16M */ | |
655 | 16, /* 64k */ | |
656 | 34, /* 16G */ | |
657 | 20, /* 1M, unsupported */ | |
658 | }; | |
659 | ||
660 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | |
661 | unsigned long valid) | |
662 | { | |
663 | unsigned int i; | |
664 | unsigned int pshift; | |
665 | unsigned long somask; | |
666 | unsigned long vsid, hash; | |
667 | unsigned long avpn; | |
668 | unsigned long *hpte; | |
669 | unsigned long mask, val; | |
670 | unsigned long v, r; | |
671 | ||
672 | /* Get page shift, work out hash and AVPN etc. */ | |
673 | mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY; | |
674 | val = 0; | |
675 | pshift = 12; | |
676 | if (slb_v & SLB_VSID_L) { | |
677 | mask |= HPTE_V_LARGE; | |
678 | val |= HPTE_V_LARGE; | |
679 | pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4]; | |
680 | } | |
681 | if (slb_v & SLB_VSID_B_1T) { | |
682 | somask = (1UL << 40) - 1; | |
683 | vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T; | |
684 | vsid ^= vsid << 25; | |
685 | } else { | |
686 | somask = (1UL << 28) - 1; | |
687 | vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; | |
688 | } | |
32fad281 | 689 | hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask; |
697d3899 PM |
690 | avpn = slb_v & ~(somask >> 16); /* also includes B */ |
691 | avpn |= (eaddr & somask) >> 16; | |
692 | ||
693 | if (pshift >= 24) | |
694 | avpn &= ~((1UL << (pshift - 16)) - 1); | |
695 | else | |
696 | avpn &= ~0x7fUL; | |
697 | val |= avpn; | |
698 | ||
699 | for (;;) { | |
700 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7)); | |
701 | ||
702 | for (i = 0; i < 16; i += 2) { | |
703 | /* Read the PTE racily */ | |
704 | v = hpte[i] & ~HPTE_V_HVLOCK; | |
705 | ||
706 | /* Check valid/absent, hash, segment size and AVPN */ | |
707 | if (!(v & valid) || (v & mask) != val) | |
708 | continue; | |
709 | ||
710 | /* Lock the PTE and read it under the lock */ | |
711 | while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK)) | |
712 | cpu_relax(); | |
713 | v = hpte[i] & ~HPTE_V_HVLOCK; | |
714 | r = hpte[i+1]; | |
715 | ||
716 | /* | |
717 | * Check the HPTE again, including large page size | |
718 | * Since we don't currently allow any MPSS (mixed | |
719 | * page-size segment) page sizes, it is sufficient | |
720 | * to check against the actual page size. | |
721 | */ | |
722 | if ((v & valid) && (v & mask) == val && | |
723 | hpte_page_size(v, r) == (1ul << pshift)) | |
724 | /* Return with the HPTE still locked */ | |
725 | return (hash << 3) + (i >> 1); | |
726 | ||
727 | /* Unlock and move on */ | |
728 | hpte[i] = v; | |
729 | } | |
730 | ||
731 | if (val & HPTE_V_SECONDARY) | |
732 | break; | |
733 | val |= HPTE_V_SECONDARY; | |
32fad281 | 734 | hash = hash ^ kvm->arch.hpt_mask; |
697d3899 PM |
735 | } |
736 | return -1; | |
737 | } | |
738 | EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte); | |
739 | ||
740 | /* | |
741 | * Called in real mode to check whether an HPTE not found fault | |
4cf302bc PM |
742 | * is due to accessing a paged-out page or an emulated MMIO page, |
743 | * or if a protection fault is due to accessing a page that the | |
744 | * guest wanted read/write access to but which we made read-only. | |
697d3899 PM |
745 | * Returns a possibly modified status (DSISR) value if not |
746 | * (i.e. pass the interrupt to the guest), | |
747 | * -1 to pass the fault up to host kernel mode code, -2 to do that | |
342d3db7 | 748 | * and also load the instruction word (for MMIO emulation), |
697d3899 PM |
749 | * or 0 if we should make the guest retry the access. |
750 | */ | |
751 | long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, | |
342d3db7 | 752 | unsigned long slb_v, unsigned int status, bool data) |
697d3899 PM |
753 | { |
754 | struct kvm *kvm = vcpu->kvm; | |
755 | long int index; | |
756 | unsigned long v, r, gr; | |
757 | unsigned long *hpte; | |
758 | unsigned long valid; | |
759 | struct revmap_entry *rev; | |
760 | unsigned long pp, key; | |
761 | ||
4cf302bc PM |
762 | /* For protection fault, expect to find a valid HPTE */ |
763 | valid = HPTE_V_VALID; | |
764 | if (status & DSISR_NOHPTE) | |
765 | valid |= HPTE_V_ABSENT; | |
342d3db7 | 766 | |
697d3899 | 767 | index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid); |
4cf302bc PM |
768 | if (index < 0) { |
769 | if (status & DSISR_NOHPTE) | |
770 | return status; /* there really was no HPTE */ | |
771 | return 0; /* for prot fault, HPTE disappeared */ | |
772 | } | |
697d3899 PM |
773 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); |
774 | v = hpte[0] & ~HPTE_V_HVLOCK; | |
775 | r = hpte[1]; | |
776 | rev = real_vmalloc_addr(&kvm->arch.revmap[index]); | |
777 | gr = rev->guest_rpte; | |
778 | ||
a92bce95 | 779 | unlock_hpte(hpte, v); |
697d3899 | 780 | |
4cf302bc PM |
781 | /* For not found, if the HPTE is valid by now, retry the instruction */ |
782 | if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID)) | |
697d3899 PM |
783 | return 0; |
784 | ||
785 | /* Check access permissions to the page */ | |
786 | pp = gr & (HPTE_R_PP0 | HPTE_R_PP); | |
787 | key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; | |
342d3db7 PM |
788 | status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */ |
789 | if (!data) { | |
790 | if (gr & (HPTE_R_N | HPTE_R_G)) | |
791 | return status | SRR1_ISI_N_OR_G; | |
792 | if (!hpte_read_permission(pp, slb_v & key)) | |
793 | return status | SRR1_ISI_PROT; | |
794 | } else if (status & DSISR_ISSTORE) { | |
697d3899 PM |
795 | /* check write permission */ |
796 | if (!hpte_write_permission(pp, slb_v & key)) | |
342d3db7 | 797 | return status | DSISR_PROTFAULT; |
697d3899 PM |
798 | } else { |
799 | if (!hpte_read_permission(pp, slb_v & key)) | |
342d3db7 | 800 | return status | DSISR_PROTFAULT; |
697d3899 PM |
801 | } |
802 | ||
803 | /* Check storage key, if applicable */ | |
342d3db7 | 804 | if (data && (vcpu->arch.shregs.msr & MSR_DR)) { |
697d3899 PM |
805 | unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr); |
806 | if (status & DSISR_ISSTORE) | |
807 | perm >>= 1; | |
808 | if (perm & 1) | |
342d3db7 | 809 | return status | DSISR_KEYFAULT; |
697d3899 PM |
810 | } |
811 | ||
812 | /* Save HPTE info for virtual-mode handler */ | |
813 | vcpu->arch.pgfault_addr = addr; | |
814 | vcpu->arch.pgfault_index = index; | |
815 | vcpu->arch.pgfault_hpte[0] = v; | |
816 | vcpu->arch.pgfault_hpte[1] = r; | |
817 | ||
342d3db7 PM |
818 | /* Check the storage key to see if it is possibly emulated MMIO */ |
819 | if (data && (vcpu->arch.shregs.msr & MSR_IR) && | |
820 | (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) == | |
821 | (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) | |
697d3899 PM |
822 | return -2; /* MMIO emulation - load instr word */ |
823 | ||
824 | return -1; /* send fault up to host kernel mode */ | |
697d3899 | 825 | } |