KVM: PPC: Keep a record of HV guest view of hashed page table entries
[deliverable/linux.git] / arch / powerpc / kvm / book3s_hv_rm_mmu.c
CommitLineData
a8606e20
PM
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/hugetlb.h>
14
15#include <asm/tlbflush.h>
16#include <asm/kvm_ppc.h>
17#include <asm/kvm_book3s.h>
18#include <asm/mmu-hash64.h>
19#include <asm/hvcall.h>
20#include <asm/synch.h>
21#include <asm/ppc-opcode.h>
22
8936dda4
PM
23/* Translate address of a vmalloc'd thing to a linear map address */
24static void *real_vmalloc_addr(void *x)
25{
26 unsigned long addr = (unsigned long) x;
27 pte_t *p;
28
29 p = find_linux_pte(swapper_pg_dir, addr);
30 if (!p || !pte_present(*p))
31 return NULL;
32 /* assume we don't have huge pages in vmalloc space... */
33 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
34 return __va(addr);
35}
a8606e20
PM
36
37#define HPTE_V_HVLOCK 0x40UL
38
39static inline long lock_hpte(unsigned long *hpte, unsigned long bits)
40{
41 unsigned long tmp, old;
42
43 asm volatile(" ldarx %0,0,%2\n"
44 " and. %1,%0,%3\n"
45 " bne 2f\n"
46 " ori %0,%0,%4\n"
47 " stdcx. %0,0,%2\n"
48 " beq+ 2f\n"
49 " li %1,%3\n"
50 "2: isync"
51 : "=&r" (tmp), "=&r" (old)
52 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
53 : "cc", "memory");
54 return old == 0;
55}
56
57long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
58 long pte_index, unsigned long pteh, unsigned long ptel)
59{
60 unsigned long porder;
61 struct kvm *kvm = vcpu->kvm;
62 unsigned long i, lpn, pa;
63 unsigned long *hpte;
8936dda4
PM
64 struct revmap_entry *rev;
65 unsigned long g_ptel = ptel;
a8606e20
PM
66
67 /* only handle 4k, 64k and 16M pages for now */
68 porder = 12;
69 if (pteh & HPTE_V_LARGE) {
9e368f29
PM
70 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
71 (ptel & 0xf000) == 0x1000) {
a8606e20
PM
72 /* 64k page */
73 porder = 16;
74 } else if ((ptel & 0xff000) == 0) {
75 /* 16M page */
76 porder = 24;
77 /* lowest AVA bit must be 0 for 16M pages */
78 if (pteh & 0x80)
79 return H_PARAMETER;
80 } else
81 return H_PARAMETER;
82 }
83 lpn = (ptel & HPTE_R_RPN) >> kvm->arch.ram_porder;
84 if (lpn >= kvm->arch.ram_npages || porder > kvm->arch.ram_porder)
85 return H_PARAMETER;
86 pa = kvm->arch.ram_pginfo[lpn].pfn << PAGE_SHIFT;
87 if (!pa)
88 return H_PARAMETER;
89 /* Check WIMG */
90 if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
91 (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
92 return H_PARAMETER;
93 pteh &= ~0x60UL;
94 ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
95 ptel |= pa;
8936dda4 96 if (pte_index >= HPT_NPTE)
a8606e20
PM
97 return H_PARAMETER;
98 if (likely((flags & H_EXACT) == 0)) {
99 pte_index &= ~7UL;
100 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
101 for (i = 0; ; ++i) {
102 if (i == 8)
103 return H_PTEG_FULL;
104 if ((*hpte & HPTE_V_VALID) == 0 &&
105 lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
106 break;
107 hpte += 2;
108 }
8936dda4 109 pte_index += i;
a8606e20 110 } else {
a8606e20
PM
111 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
112 if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
113 return H_PTEG_FULL;
114 }
8936dda4
PM
115
116 /* Save away the guest's idea of the second HPTE dword */
117 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
118 if (rev)
119 rev->guest_rpte = g_ptel;
a8606e20
PM
120 hpte[1] = ptel;
121 eieio();
122 hpte[0] = pteh;
123 asm volatile("ptesync" : : : "memory");
8936dda4 124 vcpu->arch.gpr[4] = pte_index;
a8606e20
PM
125 return H_SUCCESS;
126}
127
a8606e20
PM
128#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
129
130static inline int try_lock_tlbie(unsigned int *lock)
131{
132 unsigned int tmp, old;
133 unsigned int token = LOCK_TOKEN;
134
135 asm volatile("1:lwarx %1,0,%2\n"
136 " cmpwi cr0,%1,0\n"
137 " bne 2f\n"
138 " stwcx. %3,0,%2\n"
139 " bne- 1b\n"
140 " isync\n"
141 "2:"
142 : "=&r" (tmp), "=&r" (old)
143 : "r" (lock), "r" (token)
144 : "cc", "memory");
145 return old == 0;
146}
147
148long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
149 unsigned long pte_index, unsigned long avpn,
150 unsigned long va)
151{
152 struct kvm *kvm = vcpu->kvm;
153 unsigned long *hpte;
154 unsigned long v, r, rb;
155
8936dda4 156 if (pte_index >= HPT_NPTE)
a8606e20
PM
157 return H_PARAMETER;
158 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
159 while (!lock_hpte(hpte, HPTE_V_HVLOCK))
160 cpu_relax();
161 if ((hpte[0] & HPTE_V_VALID) == 0 ||
162 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
163 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
164 hpte[0] &= ~HPTE_V_HVLOCK;
165 return H_NOT_FOUND;
166 }
167 if (atomic_read(&kvm->online_vcpus) == 1)
168 flags |= H_LOCAL;
169 vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
170 vcpu->arch.gpr[5] = r = hpte[1];
171 rb = compute_tlbie_rb(v, r, pte_index);
172 hpte[0] = 0;
173 if (!(flags & H_LOCAL)) {
174 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
175 cpu_relax();
176 asm volatile("ptesync" : : : "memory");
177 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
178 : : "r" (rb), "r" (kvm->arch.lpid));
179 asm volatile("ptesync" : : : "memory");
180 kvm->arch.tlbie_lock = 0;
181 } else {
182 asm volatile("ptesync" : : : "memory");
183 asm volatile("tlbiel %0" : : "r" (rb));
184 asm volatile("ptesync" : : : "memory");
185 }
186 return H_SUCCESS;
187}
188
189long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
190{
191 struct kvm *kvm = vcpu->kvm;
192 unsigned long *args = &vcpu->arch.gpr[4];
193 unsigned long *hp, tlbrb[4];
194 long int i, found;
195 long int n_inval = 0;
196 unsigned long flags, req, pte_index;
197 long int local = 0;
198 long int ret = H_SUCCESS;
199
200 if (atomic_read(&kvm->online_vcpus) == 1)
201 local = 1;
202 for (i = 0; i < 4; ++i) {
203 pte_index = args[i * 2];
204 flags = pte_index >> 56;
205 pte_index &= ((1ul << 56) - 1);
206 req = flags >> 6;
207 flags &= 3;
208 if (req == 3)
209 break;
210 if (req != 1 || flags == 3 ||
8936dda4 211 pte_index >= HPT_NPTE) {
a8606e20
PM
212 /* parameter error */
213 args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
214 ret = H_PARAMETER;
215 break;
216 }
217 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
218 while (!lock_hpte(hp, HPTE_V_HVLOCK))
219 cpu_relax();
220 found = 0;
221 if (hp[0] & HPTE_V_VALID) {
222 switch (flags & 3) {
223 case 0: /* absolute */
224 found = 1;
225 break;
226 case 1: /* andcond */
227 if (!(hp[0] & args[i * 2 + 1]))
228 found = 1;
229 break;
230 case 2: /* AVPN */
231 if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
232 found = 1;
233 break;
234 }
235 }
236 if (!found) {
237 hp[0] &= ~HPTE_V_HVLOCK;
238 args[i * 2] = ((0x90 | flags) << 56) + pte_index;
239 continue;
240 }
241 /* insert R and C bits from PTE */
242 flags |= (hp[1] >> 5) & 0x0c;
243 args[i * 2] = ((0x80 | flags) << 56) + pte_index;
244 tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
245 hp[0] = 0;
246 }
247 if (n_inval == 0)
248 return ret;
249
250 if (!local) {
251 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
252 cpu_relax();
253 asm volatile("ptesync" : : : "memory");
254 for (i = 0; i < n_inval; ++i)
255 asm volatile(PPC_TLBIE(%1,%0)
256 : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
257 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
258 kvm->arch.tlbie_lock = 0;
259 } else {
260 asm volatile("ptesync" : : : "memory");
261 for (i = 0; i < n_inval; ++i)
262 asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
263 asm volatile("ptesync" : : : "memory");
264 }
265 return ret;
266}
267
268long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
269 unsigned long pte_index, unsigned long avpn,
270 unsigned long va)
271{
272 struct kvm *kvm = vcpu->kvm;
273 unsigned long *hpte;
8936dda4
PM
274 struct revmap_entry *rev;
275 unsigned long v, r, rb, mask, bits;
a8606e20 276
8936dda4 277 if (pte_index >= HPT_NPTE)
a8606e20
PM
278 return H_PARAMETER;
279 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
280 while (!lock_hpte(hpte, HPTE_V_HVLOCK))
281 cpu_relax();
282 if ((hpte[0] & HPTE_V_VALID) == 0 ||
283 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
284 hpte[0] &= ~HPTE_V_HVLOCK;
285 return H_NOT_FOUND;
286 }
287 if (atomic_read(&kvm->online_vcpus) == 1)
288 flags |= H_LOCAL;
289 v = hpte[0];
8936dda4
PM
290 bits = (flags << 55) & HPTE_R_PP0;
291 bits |= (flags << 48) & HPTE_R_KEY_HI;
292 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
293
294 /* Update guest view of 2nd HPTE dword */
295 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
296 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
297 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
298 if (rev) {
299 r = (rev->guest_rpte & ~mask) | bits;
300 rev->guest_rpte = r;
301 }
302 r = (hpte[1] & ~mask) | bits;
303
304 /* Update HPTE */
a8606e20
PM
305 rb = compute_tlbie_rb(v, r, pte_index);
306 hpte[0] = v & ~HPTE_V_VALID;
307 if (!(flags & H_LOCAL)) {
308 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
309 cpu_relax();
310 asm volatile("ptesync" : : : "memory");
311 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
312 : : "r" (rb), "r" (kvm->arch.lpid));
313 asm volatile("ptesync" : : : "memory");
314 kvm->arch.tlbie_lock = 0;
315 } else {
316 asm volatile("ptesync" : : : "memory");
317 asm volatile("tlbiel %0" : : "r" (rb));
318 asm volatile("ptesync" : : : "memory");
319 }
320 hpte[1] = r;
321 eieio();
322 hpte[0] = v & ~HPTE_V_HVLOCK;
323 asm volatile("ptesync" : : : "memory");
324 return H_SUCCESS;
325}
326
a8606e20
PM
327long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
328 unsigned long pte_index)
329{
330 struct kvm *kvm = vcpu->kvm;
331 unsigned long *hpte, r;
332 int i, n = 1;
8936dda4 333 struct revmap_entry *rev = NULL;
a8606e20 334
8936dda4 335 if (pte_index >= HPT_NPTE)
a8606e20
PM
336 return H_PARAMETER;
337 if (flags & H_READ_4) {
338 pte_index &= ~3;
339 n = 4;
340 }
8936dda4
PM
341 if (flags & H_R_XLATE)
342 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
a8606e20
PM
343 for (i = 0; i < n; ++i, ++pte_index) {
344 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
345 r = hpte[1];
8936dda4
PM
346 if (hpte[0] & HPTE_V_VALID) {
347 if (rev)
348 r = rev[i].guest_rpte;
349 else
350 r = hpte[1] | HPTE_R_RPN;
351 }
a8606e20
PM
352 vcpu->arch.gpr[4 + i * 2] = hpte[0];
353 vcpu->arch.gpr[5 + i * 2] = r;
354 }
355 return H_SUCCESS;
356}
This page took 0.067599 seconds and 5 git commands to generate.