Commit | Line | Data |
---|---|---|
a8606e20 PM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
7 | */ | |
8 | ||
9 | #include <linux/types.h> | |
10 | #include <linux/string.h> | |
11 | #include <linux/kvm.h> | |
12 | #include <linux/kvm_host.h> | |
13 | #include <linux/hugetlb.h> | |
c77162de | 14 | #include <linux/module.h> |
a8606e20 PM |
15 | |
16 | #include <asm/tlbflush.h> | |
17 | #include <asm/kvm_ppc.h> | |
18 | #include <asm/kvm_book3s.h> | |
19 | #include <asm/mmu-hash64.h> | |
20 | #include <asm/hvcall.h> | |
21 | #include <asm/synch.h> | |
22 | #include <asm/ppc-opcode.h> | |
23 | ||
b2b2f165 PM |
24 | /* |
25 | * Since this file is built in even if KVM is a module, we need | |
26 | * a local copy of this function for the case where kvm_main.c is | |
27 | * modular. | |
28 | */ | |
29 | static struct kvm_memory_slot *builtin_gfn_to_memslot(struct kvm *kvm, | |
30 | gfn_t gfn) | |
31 | { | |
32 | struct kvm_memslots *slots; | |
33 | struct kvm_memory_slot *memslot; | |
34 | ||
35 | slots = kvm_memslots(kvm); | |
36 | kvm_for_each_memslot(memslot, slots) | |
37 | if (gfn >= memslot->base_gfn && | |
38 | gfn < memslot->base_gfn + memslot->npages) | |
39 | return memslot; | |
40 | return NULL; | |
41 | } | |
42 | ||
8936dda4 PM |
43 | /* Translate address of a vmalloc'd thing to a linear map address */ |
44 | static void *real_vmalloc_addr(void *x) | |
45 | { | |
46 | unsigned long addr = (unsigned long) x; | |
47 | pte_t *p; | |
48 | ||
49 | p = find_linux_pte(swapper_pg_dir, addr); | |
50 | if (!p || !pte_present(*p)) | |
51 | return NULL; | |
52 | /* assume we don't have huge pages in vmalloc space... */ | |
53 | addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK); | |
54 | return __va(addr); | |
55 | } | |
a8606e20 | 56 | |
06ce2c63 PM |
57 | /* |
58 | * Add this HPTE into the chain for the real page. | |
59 | * Must be called with the chain locked; it unlocks the chain. | |
60 | */ | |
61 | static void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, | |
62 | unsigned long *rmap, long pte_index, int realmode) | |
63 | { | |
64 | struct revmap_entry *head, *tail; | |
65 | unsigned long i; | |
66 | ||
67 | if (*rmap & KVMPPC_RMAP_PRESENT) { | |
68 | i = *rmap & KVMPPC_RMAP_INDEX; | |
69 | head = &kvm->arch.revmap[i]; | |
70 | if (realmode) | |
71 | head = real_vmalloc_addr(head); | |
72 | tail = &kvm->arch.revmap[head->back]; | |
73 | if (realmode) | |
74 | tail = real_vmalloc_addr(tail); | |
75 | rev->forw = i; | |
76 | rev->back = head->back; | |
77 | tail->forw = pte_index; | |
78 | head->back = pte_index; | |
79 | } else { | |
80 | rev->forw = rev->back = pte_index; | |
81 | i = pte_index; | |
82 | } | |
83 | smp_wmb(); | |
84 | *rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */ | |
85 | } | |
86 | ||
87 | /* Remove this HPTE from the chain for a real page */ | |
88 | static void remove_revmap_chain(struct kvm *kvm, long pte_index, | |
89 | unsigned long hpte_v) | |
90 | { | |
91 | struct revmap_entry *rev, *next, *prev; | |
92 | unsigned long gfn, ptel, head; | |
93 | struct kvm_memory_slot *memslot; | |
94 | unsigned long *rmap; | |
95 | ||
96 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | |
97 | ptel = rev->guest_rpte; | |
98 | gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel)); | |
99 | memslot = builtin_gfn_to_memslot(kvm, gfn); | |
100 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) | |
101 | return; | |
102 | ||
103 | rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]); | |
104 | lock_rmap(rmap); | |
105 | ||
106 | head = *rmap & KVMPPC_RMAP_INDEX; | |
107 | next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]); | |
108 | prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]); | |
109 | next->back = rev->back; | |
110 | prev->forw = rev->forw; | |
111 | if (head == pte_index) { | |
112 | head = rev->forw; | |
113 | if (head == pte_index) | |
114 | *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); | |
115 | else | |
116 | *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head; | |
117 | } | |
118 | unlock_rmap(rmap); | |
119 | } | |
120 | ||
a8606e20 PM |
121 | long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, |
122 | long pte_index, unsigned long pteh, unsigned long ptel) | |
123 | { | |
a8606e20 | 124 | struct kvm *kvm = vcpu->kvm; |
c77162de PM |
125 | unsigned long i, pa, gpa, gfn, psize; |
126 | unsigned long slot_fn; | |
a8606e20 | 127 | unsigned long *hpte; |
8936dda4 PM |
128 | struct revmap_entry *rev; |
129 | unsigned long g_ptel = ptel; | |
b2b2f165 | 130 | struct kvm_memory_slot *memslot; |
c77162de | 131 | unsigned long *physp, pte_size; |
9d0ef5ea | 132 | unsigned long is_io; |
06ce2c63 | 133 | unsigned long *rmap; |
c77162de PM |
134 | bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING; |
135 | ||
136 | psize = hpte_page_size(pteh, ptel); | |
137 | if (!psize) | |
a8606e20 | 138 | return H_PARAMETER; |
697d3899 | 139 | pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); |
b2b2f165 | 140 | |
c77162de PM |
141 | /* Find the memslot (if any) for this address */ |
142 | gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); | |
143 | gfn = gpa >> PAGE_SHIFT; | |
b2b2f165 | 144 | memslot = builtin_gfn_to_memslot(kvm, gfn); |
697d3899 PM |
145 | pa = 0; |
146 | rmap = NULL; | |
147 | if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) { | |
148 | /* PPC970 can't do emulated MMIO */ | |
149 | if (!cpu_has_feature(CPU_FTR_ARCH_206)) | |
150 | return H_PARAMETER; | |
151 | /* Emulated MMIO - mark this with key=31 */ | |
152 | pteh |= HPTE_V_ABSENT; | |
153 | ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO; | |
154 | goto do_insert; | |
155 | } | |
da9d1d7f PM |
156 | |
157 | /* Check if the requested page fits entirely in the memslot. */ | |
158 | if (!slot_is_aligned(memslot, psize)) | |
159 | return H_PARAMETER; | |
c77162de | 160 | slot_fn = gfn - memslot->base_gfn; |
06ce2c63 | 161 | rmap = &memslot->rmap[slot_fn]; |
c77162de | 162 | |
b2b2f165 PM |
163 | physp = kvm->arch.slot_phys[memslot->id]; |
164 | if (!physp) | |
165 | return H_PARAMETER; | |
c77162de PM |
166 | physp += slot_fn; |
167 | if (realmode) | |
168 | physp = real_vmalloc_addr(physp); | |
b2b2f165 | 169 | pa = *physp; |
a8606e20 | 170 | if (!pa) |
c77162de | 171 | return H_TOO_HARD; |
9d0ef5ea | 172 | is_io = pa & (HPTE_R_I | HPTE_R_W); |
da9d1d7f | 173 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); |
b2b2f165 PM |
174 | pa &= PAGE_MASK; |
175 | ||
c77162de PM |
176 | if (pte_size < psize) |
177 | return H_PARAMETER; | |
178 | if (pa && pte_size > psize) | |
179 | pa |= gpa & (pte_size - 1); | |
180 | ||
181 | ptel &= ~(HPTE_R_PP0 - psize); | |
182 | ptel |= pa; | |
697d3899 | 183 | pteh |= HPTE_V_VALID; |
c77162de | 184 | |
a8606e20 | 185 | /* Check WIMG */ |
9d0ef5ea PM |
186 | if (!hpte_cache_flags_ok(ptel, is_io)) { |
187 | if (is_io) | |
188 | return H_PARAMETER; | |
189 | /* | |
190 | * Allow guest to map emulated device memory as | |
191 | * uncacheable, but actually make it cacheable. | |
192 | */ | |
193 | ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G); | |
194 | ptel |= HPTE_R_M; | |
195 | } | |
075295dd | 196 | |
697d3899 | 197 | do_insert: |
8936dda4 | 198 | if (pte_index >= HPT_NPTE) |
a8606e20 PM |
199 | return H_PARAMETER; |
200 | if (likely((flags & H_EXACT) == 0)) { | |
201 | pte_index &= ~7UL; | |
202 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | |
075295dd | 203 | for (i = 0; i < 8; ++i) { |
a8606e20 | 204 | if ((*hpte & HPTE_V_VALID) == 0 && |
697d3899 PM |
205 | try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | |
206 | HPTE_V_ABSENT)) | |
a8606e20 PM |
207 | break; |
208 | hpte += 2; | |
209 | } | |
075295dd PM |
210 | if (i == 8) { |
211 | /* | |
212 | * Since try_lock_hpte doesn't retry (not even stdcx. | |
213 | * failures), it could be that there is a free slot | |
214 | * but we transiently failed to lock it. Try again, | |
215 | * actually locking each slot and checking it. | |
216 | */ | |
217 | hpte -= 16; | |
218 | for (i = 0; i < 8; ++i) { | |
219 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) | |
220 | cpu_relax(); | |
697d3899 | 221 | if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT))) |
075295dd PM |
222 | break; |
223 | *hpte &= ~HPTE_V_HVLOCK; | |
224 | hpte += 2; | |
225 | } | |
226 | if (i == 8) | |
227 | return H_PTEG_FULL; | |
228 | } | |
8936dda4 | 229 | pte_index += i; |
a8606e20 | 230 | } else { |
a8606e20 | 231 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); |
697d3899 PM |
232 | if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | |
233 | HPTE_V_ABSENT)) { | |
075295dd PM |
234 | /* Lock the slot and check again */ |
235 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) | |
236 | cpu_relax(); | |
697d3899 | 237 | if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { |
075295dd PM |
238 | *hpte &= ~HPTE_V_HVLOCK; |
239 | return H_PTEG_FULL; | |
240 | } | |
241 | } | |
a8606e20 | 242 | } |
8936dda4 PM |
243 | |
244 | /* Save away the guest's idea of the second HPTE dword */ | |
06ce2c63 PM |
245 | rev = &kvm->arch.revmap[pte_index]; |
246 | if (realmode) | |
247 | rev = real_vmalloc_addr(rev); | |
8936dda4 PM |
248 | if (rev) |
249 | rev->guest_rpte = g_ptel; | |
06ce2c63 PM |
250 | |
251 | /* Link HPTE into reverse-map chain */ | |
697d3899 PM |
252 | if (pteh & HPTE_V_VALID) { |
253 | if (realmode) | |
254 | rmap = real_vmalloc_addr(rmap); | |
255 | lock_rmap(rmap); | |
256 | kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index, realmode); | |
257 | } | |
06ce2c63 | 258 | |
a8606e20 | 259 | hpte[1] = ptel; |
06ce2c63 PM |
260 | |
261 | /* Write the first HPTE dword, unlocking the HPTE and making it valid */ | |
a8606e20 PM |
262 | eieio(); |
263 | hpte[0] = pteh; | |
264 | asm volatile("ptesync" : : : "memory"); | |
06ce2c63 | 265 | |
8936dda4 | 266 | vcpu->arch.gpr[4] = pte_index; |
a8606e20 PM |
267 | return H_SUCCESS; |
268 | } | |
c77162de | 269 | EXPORT_SYMBOL_GPL(kvmppc_h_enter); |
a8606e20 | 270 | |
a8606e20 PM |
271 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) |
272 | ||
273 | static inline int try_lock_tlbie(unsigned int *lock) | |
274 | { | |
275 | unsigned int tmp, old; | |
276 | unsigned int token = LOCK_TOKEN; | |
277 | ||
278 | asm volatile("1:lwarx %1,0,%2\n" | |
279 | " cmpwi cr0,%1,0\n" | |
280 | " bne 2f\n" | |
281 | " stwcx. %3,0,%2\n" | |
282 | " bne- 1b\n" | |
283 | " isync\n" | |
284 | "2:" | |
285 | : "=&r" (tmp), "=&r" (old) | |
286 | : "r" (lock), "r" (token) | |
287 | : "cc", "memory"); | |
288 | return old == 0; | |
289 | } | |
290 | ||
291 | long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, | |
292 | unsigned long pte_index, unsigned long avpn, | |
293 | unsigned long va) | |
294 | { | |
295 | struct kvm *kvm = vcpu->kvm; | |
296 | unsigned long *hpte; | |
297 | unsigned long v, r, rb; | |
298 | ||
8936dda4 | 299 | if (pte_index >= HPT_NPTE) |
a8606e20 PM |
300 | return H_PARAMETER; |
301 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | |
075295dd | 302 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
a8606e20 | 303 | cpu_relax(); |
697d3899 | 304 | if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || |
a8606e20 PM |
305 | ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) || |
306 | ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) { | |
307 | hpte[0] &= ~HPTE_V_HVLOCK; | |
308 | return H_NOT_FOUND; | |
309 | } | |
310 | if (atomic_read(&kvm->online_vcpus) == 1) | |
311 | flags |= H_LOCAL; | |
312 | vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK; | |
313 | vcpu->arch.gpr[5] = r = hpte[1]; | |
314 | rb = compute_tlbie_rb(v, r, pte_index); | |
697d3899 PM |
315 | if (v & HPTE_V_VALID) |
316 | remove_revmap_chain(kvm, pte_index, v); | |
06ce2c63 | 317 | smp_wmb(); |
a8606e20 | 318 | hpte[0] = 0; |
697d3899 PM |
319 | if (!(v & HPTE_V_VALID)) |
320 | return H_SUCCESS; | |
a8606e20 | 321 | if (!(flags & H_LOCAL)) { |
697d3899 | 322 | while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) |
a8606e20 PM |
323 | cpu_relax(); |
324 | asm volatile("ptesync" : : : "memory"); | |
325 | asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" | |
326 | : : "r" (rb), "r" (kvm->arch.lpid)); | |
327 | asm volatile("ptesync" : : : "memory"); | |
328 | kvm->arch.tlbie_lock = 0; | |
329 | } else { | |
330 | asm volatile("ptesync" : : : "memory"); | |
331 | asm volatile("tlbiel %0" : : "r" (rb)); | |
332 | asm volatile("ptesync" : : : "memory"); | |
333 | } | |
334 | return H_SUCCESS; | |
335 | } | |
336 | ||
337 | long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | |
338 | { | |
339 | struct kvm *kvm = vcpu->kvm; | |
340 | unsigned long *args = &vcpu->arch.gpr[4]; | |
341 | unsigned long *hp, tlbrb[4]; | |
342 | long int i, found; | |
343 | long int n_inval = 0; | |
344 | unsigned long flags, req, pte_index; | |
345 | long int local = 0; | |
346 | long int ret = H_SUCCESS; | |
347 | ||
348 | if (atomic_read(&kvm->online_vcpus) == 1) | |
349 | local = 1; | |
350 | for (i = 0; i < 4; ++i) { | |
351 | pte_index = args[i * 2]; | |
352 | flags = pte_index >> 56; | |
353 | pte_index &= ((1ul << 56) - 1); | |
354 | req = flags >> 6; | |
355 | flags &= 3; | |
356 | if (req == 3) | |
357 | break; | |
358 | if (req != 1 || flags == 3 || | |
8936dda4 | 359 | pte_index >= HPT_NPTE) { |
a8606e20 PM |
360 | /* parameter error */ |
361 | args[i * 2] = ((0xa0 | flags) << 56) + pte_index; | |
362 | ret = H_PARAMETER; | |
363 | break; | |
364 | } | |
365 | hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | |
075295dd | 366 | while (!try_lock_hpte(hp, HPTE_V_HVLOCK)) |
a8606e20 PM |
367 | cpu_relax(); |
368 | found = 0; | |
697d3899 | 369 | if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) { |
a8606e20 PM |
370 | switch (flags & 3) { |
371 | case 0: /* absolute */ | |
372 | found = 1; | |
373 | break; | |
374 | case 1: /* andcond */ | |
375 | if (!(hp[0] & args[i * 2 + 1])) | |
376 | found = 1; | |
377 | break; | |
378 | case 2: /* AVPN */ | |
379 | if ((hp[0] & ~0x7fUL) == args[i * 2 + 1]) | |
380 | found = 1; | |
381 | break; | |
382 | } | |
383 | } | |
384 | if (!found) { | |
385 | hp[0] &= ~HPTE_V_HVLOCK; | |
386 | args[i * 2] = ((0x90 | flags) << 56) + pte_index; | |
387 | continue; | |
388 | } | |
389 | /* insert R and C bits from PTE */ | |
390 | flags |= (hp[1] >> 5) & 0x0c; | |
391 | args[i * 2] = ((0x80 | flags) << 56) + pte_index; | |
697d3899 PM |
392 | if (hp[0] & HPTE_V_VALID) { |
393 | tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index); | |
394 | remove_revmap_chain(kvm, pte_index, hp[0]); | |
395 | } | |
06ce2c63 | 396 | smp_wmb(); |
a8606e20 PM |
397 | hp[0] = 0; |
398 | } | |
399 | if (n_inval == 0) | |
400 | return ret; | |
401 | ||
402 | if (!local) { | |
403 | while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) | |
404 | cpu_relax(); | |
405 | asm volatile("ptesync" : : : "memory"); | |
406 | for (i = 0; i < n_inval; ++i) | |
407 | asm volatile(PPC_TLBIE(%1,%0) | |
408 | : : "r" (tlbrb[i]), "r" (kvm->arch.lpid)); | |
409 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); | |
410 | kvm->arch.tlbie_lock = 0; | |
411 | } else { | |
412 | asm volatile("ptesync" : : : "memory"); | |
413 | for (i = 0; i < n_inval; ++i) | |
414 | asm volatile("tlbiel %0" : : "r" (tlbrb[i])); | |
415 | asm volatile("ptesync" : : : "memory"); | |
416 | } | |
417 | return ret; | |
418 | } | |
419 | ||
420 | long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, | |
421 | unsigned long pte_index, unsigned long avpn, | |
422 | unsigned long va) | |
423 | { | |
424 | struct kvm *kvm = vcpu->kvm; | |
425 | unsigned long *hpte; | |
8936dda4 PM |
426 | struct revmap_entry *rev; |
427 | unsigned long v, r, rb, mask, bits; | |
a8606e20 | 428 | |
8936dda4 | 429 | if (pte_index >= HPT_NPTE) |
a8606e20 | 430 | return H_PARAMETER; |
697d3899 | 431 | |
a8606e20 | 432 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); |
075295dd | 433 | while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) |
a8606e20 | 434 | cpu_relax(); |
697d3899 | 435 | if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || |
a8606e20 PM |
436 | ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) { |
437 | hpte[0] &= ~HPTE_V_HVLOCK; | |
438 | return H_NOT_FOUND; | |
439 | } | |
697d3899 | 440 | |
a8606e20 PM |
441 | if (atomic_read(&kvm->online_vcpus) == 1) |
442 | flags |= H_LOCAL; | |
443 | v = hpte[0]; | |
8936dda4 PM |
444 | bits = (flags << 55) & HPTE_R_PP0; |
445 | bits |= (flags << 48) & HPTE_R_KEY_HI; | |
446 | bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); | |
447 | ||
448 | /* Update guest view of 2nd HPTE dword */ | |
449 | mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | | |
450 | HPTE_R_KEY_HI | HPTE_R_KEY_LO; | |
451 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | |
452 | if (rev) { | |
453 | r = (rev->guest_rpte & ~mask) | bits; | |
454 | rev->guest_rpte = r; | |
455 | } | |
456 | r = (hpte[1] & ~mask) | bits; | |
457 | ||
458 | /* Update HPTE */ | |
697d3899 PM |
459 | if (v & HPTE_V_VALID) { |
460 | rb = compute_tlbie_rb(v, r, pte_index); | |
461 | hpte[0] = v & ~HPTE_V_VALID; | |
462 | if (!(flags & H_LOCAL)) { | |
463 | while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) | |
464 | cpu_relax(); | |
465 | asm volatile("ptesync" : : : "memory"); | |
466 | asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" | |
467 | : : "r" (rb), "r" (kvm->arch.lpid)); | |
468 | asm volatile("ptesync" : : : "memory"); | |
469 | kvm->arch.tlbie_lock = 0; | |
470 | } else { | |
471 | asm volatile("ptesync" : : : "memory"); | |
472 | asm volatile("tlbiel %0" : : "r" (rb)); | |
473 | asm volatile("ptesync" : : : "memory"); | |
474 | } | |
a8606e20 PM |
475 | } |
476 | hpte[1] = r; | |
477 | eieio(); | |
478 | hpte[0] = v & ~HPTE_V_HVLOCK; | |
479 | asm volatile("ptesync" : : : "memory"); | |
480 | return H_SUCCESS; | |
481 | } | |
482 | ||
a8606e20 PM |
483 | long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, |
484 | unsigned long pte_index) | |
485 | { | |
486 | struct kvm *kvm = vcpu->kvm; | |
697d3899 | 487 | unsigned long *hpte, v, r; |
a8606e20 | 488 | int i, n = 1; |
8936dda4 | 489 | struct revmap_entry *rev = NULL; |
a8606e20 | 490 | |
8936dda4 | 491 | if (pte_index >= HPT_NPTE) |
a8606e20 PM |
492 | return H_PARAMETER; |
493 | if (flags & H_READ_4) { | |
494 | pte_index &= ~3; | |
495 | n = 4; | |
496 | } | |
8936dda4 PM |
497 | if (flags & H_R_XLATE) |
498 | rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); | |
a8606e20 PM |
499 | for (i = 0; i < n; ++i, ++pte_index) { |
500 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); | |
697d3899 | 501 | v = hpte[0] & ~HPTE_V_HVLOCK; |
a8606e20 | 502 | r = hpte[1]; |
697d3899 PM |
503 | if (v & HPTE_V_ABSENT) { |
504 | v &= ~HPTE_V_ABSENT; | |
505 | v |= HPTE_V_VALID; | |
506 | } | |
507 | if (v & HPTE_V_VALID) { | |
8936dda4 PM |
508 | if (rev) |
509 | r = rev[i].guest_rpte; | |
510 | else | |
511 | r = hpte[1] | HPTE_R_RPN; | |
512 | } | |
697d3899 | 513 | vcpu->arch.gpr[4 + i * 2] = v; |
a8606e20 PM |
514 | vcpu->arch.gpr[5 + i * 2] = r; |
515 | } | |
516 | return H_SUCCESS; | |
517 | } | |
697d3899 PM |
518 | |
519 | static int slb_base_page_shift[4] = { | |
520 | 24, /* 16M */ | |
521 | 16, /* 64k */ | |
522 | 34, /* 16G */ | |
523 | 20, /* 1M, unsupported */ | |
524 | }; | |
525 | ||
526 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | |
527 | unsigned long valid) | |
528 | { | |
529 | unsigned int i; | |
530 | unsigned int pshift; | |
531 | unsigned long somask; | |
532 | unsigned long vsid, hash; | |
533 | unsigned long avpn; | |
534 | unsigned long *hpte; | |
535 | unsigned long mask, val; | |
536 | unsigned long v, r; | |
537 | ||
538 | /* Get page shift, work out hash and AVPN etc. */ | |
539 | mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY; | |
540 | val = 0; | |
541 | pshift = 12; | |
542 | if (slb_v & SLB_VSID_L) { | |
543 | mask |= HPTE_V_LARGE; | |
544 | val |= HPTE_V_LARGE; | |
545 | pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4]; | |
546 | } | |
547 | if (slb_v & SLB_VSID_B_1T) { | |
548 | somask = (1UL << 40) - 1; | |
549 | vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T; | |
550 | vsid ^= vsid << 25; | |
551 | } else { | |
552 | somask = (1UL << 28) - 1; | |
553 | vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; | |
554 | } | |
555 | hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK; | |
556 | avpn = slb_v & ~(somask >> 16); /* also includes B */ | |
557 | avpn |= (eaddr & somask) >> 16; | |
558 | ||
559 | if (pshift >= 24) | |
560 | avpn &= ~((1UL << (pshift - 16)) - 1); | |
561 | else | |
562 | avpn &= ~0x7fUL; | |
563 | val |= avpn; | |
564 | ||
565 | for (;;) { | |
566 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7)); | |
567 | ||
568 | for (i = 0; i < 16; i += 2) { | |
569 | /* Read the PTE racily */ | |
570 | v = hpte[i] & ~HPTE_V_HVLOCK; | |
571 | ||
572 | /* Check valid/absent, hash, segment size and AVPN */ | |
573 | if (!(v & valid) || (v & mask) != val) | |
574 | continue; | |
575 | ||
576 | /* Lock the PTE and read it under the lock */ | |
577 | while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK)) | |
578 | cpu_relax(); | |
579 | v = hpte[i] & ~HPTE_V_HVLOCK; | |
580 | r = hpte[i+1]; | |
581 | ||
582 | /* | |
583 | * Check the HPTE again, including large page size | |
584 | * Since we don't currently allow any MPSS (mixed | |
585 | * page-size segment) page sizes, it is sufficient | |
586 | * to check against the actual page size. | |
587 | */ | |
588 | if ((v & valid) && (v & mask) == val && | |
589 | hpte_page_size(v, r) == (1ul << pshift)) | |
590 | /* Return with the HPTE still locked */ | |
591 | return (hash << 3) + (i >> 1); | |
592 | ||
593 | /* Unlock and move on */ | |
594 | hpte[i] = v; | |
595 | } | |
596 | ||
597 | if (val & HPTE_V_SECONDARY) | |
598 | break; | |
599 | val |= HPTE_V_SECONDARY; | |
600 | hash = hash ^ HPT_HASH_MASK; | |
601 | } | |
602 | return -1; | |
603 | } | |
604 | EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte); | |
605 | ||
606 | /* | |
607 | * Called in real mode to check whether an HPTE not found fault | |
608 | * is due to accessing an emulated MMIO page. | |
609 | * Returns a possibly modified status (DSISR) value if not | |
610 | * (i.e. pass the interrupt to the guest), | |
611 | * -1 to pass the fault up to host kernel mode code, -2 to do that | |
612 | * and also load the instruction word, | |
613 | * or 0 if we should make the guest retry the access. | |
614 | */ | |
615 | long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, | |
616 | unsigned long slb_v, unsigned int status) | |
617 | { | |
618 | struct kvm *kvm = vcpu->kvm; | |
619 | long int index; | |
620 | unsigned long v, r, gr; | |
621 | unsigned long *hpte; | |
622 | unsigned long valid; | |
623 | struct revmap_entry *rev; | |
624 | unsigned long pp, key; | |
625 | ||
626 | valid = HPTE_V_VALID | HPTE_V_ABSENT; | |
627 | index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid); | |
628 | if (index < 0) | |
629 | return status; /* there really was no HPTE */ | |
630 | ||
631 | hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | |
632 | v = hpte[0] & ~HPTE_V_HVLOCK; | |
633 | r = hpte[1]; | |
634 | rev = real_vmalloc_addr(&kvm->arch.revmap[index]); | |
635 | gr = rev->guest_rpte; | |
636 | ||
637 | /* Unlock the HPTE */ | |
638 | asm volatile("lwsync" : : : "memory"); | |
639 | hpte[0] = v; | |
640 | ||
641 | /* If the HPTE is valid by now, retry the instruction */ | |
642 | if (v & HPTE_V_VALID) | |
643 | return 0; | |
644 | ||
645 | /* Check access permissions to the page */ | |
646 | pp = gr & (HPTE_R_PP0 | HPTE_R_PP); | |
647 | key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; | |
648 | if (status & DSISR_ISSTORE) { | |
649 | /* check write permission */ | |
650 | if (!hpte_write_permission(pp, slb_v & key)) | |
651 | goto protfault; | |
652 | } else { | |
653 | if (!hpte_read_permission(pp, slb_v & key)) | |
654 | goto protfault; | |
655 | } | |
656 | ||
657 | /* Check storage key, if applicable */ | |
658 | if (vcpu->arch.shregs.msr & MSR_DR) { | |
659 | unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr); | |
660 | if (status & DSISR_ISSTORE) | |
661 | perm >>= 1; | |
662 | if (perm & 1) | |
663 | return (status & ~DSISR_NOHPTE) | DSISR_KEYFAULT; | |
664 | } | |
665 | ||
666 | /* Save HPTE info for virtual-mode handler */ | |
667 | vcpu->arch.pgfault_addr = addr; | |
668 | vcpu->arch.pgfault_index = index; | |
669 | vcpu->arch.pgfault_hpte[0] = v; | |
670 | vcpu->arch.pgfault_hpte[1] = r; | |
671 | ||
672 | if (vcpu->arch.shregs.msr & MSR_IR) | |
673 | return -2; /* MMIO emulation - load instr word */ | |
674 | ||
675 | return -1; /* send fault up to host kernel mode */ | |
676 | ||
677 | protfault: | |
678 | return (status & ~DSISR_NOHPTE) | DSISR_PROTFAULT; | |
679 | } |