KVM: PPC: Handle some PAPR hcalls in the kernel
[deliverable/linux.git] / arch / powerpc / kvm / book3s_hv_rm_mmu.c
CommitLineData
a8606e20
PM
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/hugetlb.h>
14
15#include <asm/tlbflush.h>
16#include <asm/kvm_ppc.h>
17#include <asm/kvm_book3s.h>
18#include <asm/mmu-hash64.h>
19#include <asm/hvcall.h>
20#include <asm/synch.h>
21#include <asm/ppc-opcode.h>
22
23/* For now use fixed-size 16MB page table */
24#define HPT_ORDER 24
25#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
26#define HPT_HASH_MASK (HPT_NPTEG - 1)
27
28#define HPTE_V_HVLOCK 0x40UL
29
30static inline long lock_hpte(unsigned long *hpte, unsigned long bits)
31{
32 unsigned long tmp, old;
33
34 asm volatile(" ldarx %0,0,%2\n"
35 " and. %1,%0,%3\n"
36 " bne 2f\n"
37 " ori %0,%0,%4\n"
38 " stdcx. %0,0,%2\n"
39 " beq+ 2f\n"
40 " li %1,%3\n"
41 "2: isync"
42 : "=&r" (tmp), "=&r" (old)
43 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
44 : "cc", "memory");
45 return old == 0;
46}
47
48long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
49 long pte_index, unsigned long pteh, unsigned long ptel)
50{
51 unsigned long porder;
52 struct kvm *kvm = vcpu->kvm;
53 unsigned long i, lpn, pa;
54 unsigned long *hpte;
55
56 /* only handle 4k, 64k and 16M pages for now */
57 porder = 12;
58 if (pteh & HPTE_V_LARGE) {
59 if ((ptel & 0xf000) == 0x1000) {
60 /* 64k page */
61 porder = 16;
62 } else if ((ptel & 0xff000) == 0) {
63 /* 16M page */
64 porder = 24;
65 /* lowest AVA bit must be 0 for 16M pages */
66 if (pteh & 0x80)
67 return H_PARAMETER;
68 } else
69 return H_PARAMETER;
70 }
71 lpn = (ptel & HPTE_R_RPN) >> kvm->arch.ram_porder;
72 if (lpn >= kvm->arch.ram_npages || porder > kvm->arch.ram_porder)
73 return H_PARAMETER;
74 pa = kvm->arch.ram_pginfo[lpn].pfn << PAGE_SHIFT;
75 if (!pa)
76 return H_PARAMETER;
77 /* Check WIMG */
78 if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
79 (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
80 return H_PARAMETER;
81 pteh &= ~0x60UL;
82 ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
83 ptel |= pa;
84 if (pte_index >= (HPT_NPTEG << 3))
85 return H_PARAMETER;
86 if (likely((flags & H_EXACT) == 0)) {
87 pte_index &= ~7UL;
88 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
89 for (i = 0; ; ++i) {
90 if (i == 8)
91 return H_PTEG_FULL;
92 if ((*hpte & HPTE_V_VALID) == 0 &&
93 lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
94 break;
95 hpte += 2;
96 }
97 } else {
98 i = 0;
99 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
100 if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
101 return H_PTEG_FULL;
102 }
103 hpte[1] = ptel;
104 eieio();
105 hpte[0] = pteh;
106 asm volatile("ptesync" : : : "memory");
107 atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt);
108 vcpu->arch.gpr[4] = pte_index + i;
109 return H_SUCCESS;
110}
111
112static unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
113 unsigned long pte_index)
114{
115 unsigned long rb, va_low;
116
117 rb = (v & ~0x7fUL) << 16; /* AVA field */
118 va_low = pte_index >> 3;
119 if (v & HPTE_V_SECONDARY)
120 va_low = ~va_low;
121 /* xor vsid from AVA */
122 if (!(v & HPTE_V_1TB_SEG))
123 va_low ^= v >> 12;
124 else
125 va_low ^= v >> 24;
126 va_low &= 0x7ff;
127 if (v & HPTE_V_LARGE) {
128 rb |= 1; /* L field */
129 if (r & 0xff000) {
130 /* non-16MB large page, must be 64k */
131 /* (masks depend on page size) */
132 rb |= 0x1000; /* page encoding in LP field */
133 rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
134 rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
135 }
136 } else {
137 /* 4kB page */
138 rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
139 }
140 rb |= (v >> 54) & 0x300; /* B field */
141 return rb;
142}
143
144#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
145
146static inline int try_lock_tlbie(unsigned int *lock)
147{
148 unsigned int tmp, old;
149 unsigned int token = LOCK_TOKEN;
150
151 asm volatile("1:lwarx %1,0,%2\n"
152 " cmpwi cr0,%1,0\n"
153 " bne 2f\n"
154 " stwcx. %3,0,%2\n"
155 " bne- 1b\n"
156 " isync\n"
157 "2:"
158 : "=&r" (tmp), "=&r" (old)
159 : "r" (lock), "r" (token)
160 : "cc", "memory");
161 return old == 0;
162}
163
164long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
165 unsigned long pte_index, unsigned long avpn,
166 unsigned long va)
167{
168 struct kvm *kvm = vcpu->kvm;
169 unsigned long *hpte;
170 unsigned long v, r, rb;
171
172 if (pte_index >= (HPT_NPTEG << 3))
173 return H_PARAMETER;
174 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
175 while (!lock_hpte(hpte, HPTE_V_HVLOCK))
176 cpu_relax();
177 if ((hpte[0] & HPTE_V_VALID) == 0 ||
178 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
179 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
180 hpte[0] &= ~HPTE_V_HVLOCK;
181 return H_NOT_FOUND;
182 }
183 if (atomic_read(&kvm->online_vcpus) == 1)
184 flags |= H_LOCAL;
185 vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
186 vcpu->arch.gpr[5] = r = hpte[1];
187 rb = compute_tlbie_rb(v, r, pte_index);
188 hpte[0] = 0;
189 if (!(flags & H_LOCAL)) {
190 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
191 cpu_relax();
192 asm volatile("ptesync" : : : "memory");
193 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
194 : : "r" (rb), "r" (kvm->arch.lpid));
195 asm volatile("ptesync" : : : "memory");
196 kvm->arch.tlbie_lock = 0;
197 } else {
198 asm volatile("ptesync" : : : "memory");
199 asm volatile("tlbiel %0" : : "r" (rb));
200 asm volatile("ptesync" : : : "memory");
201 }
202 return H_SUCCESS;
203}
204
205long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
206{
207 struct kvm *kvm = vcpu->kvm;
208 unsigned long *args = &vcpu->arch.gpr[4];
209 unsigned long *hp, tlbrb[4];
210 long int i, found;
211 long int n_inval = 0;
212 unsigned long flags, req, pte_index;
213 long int local = 0;
214 long int ret = H_SUCCESS;
215
216 if (atomic_read(&kvm->online_vcpus) == 1)
217 local = 1;
218 for (i = 0; i < 4; ++i) {
219 pte_index = args[i * 2];
220 flags = pte_index >> 56;
221 pte_index &= ((1ul << 56) - 1);
222 req = flags >> 6;
223 flags &= 3;
224 if (req == 3)
225 break;
226 if (req != 1 || flags == 3 ||
227 pte_index >= (HPT_NPTEG << 3)) {
228 /* parameter error */
229 args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
230 ret = H_PARAMETER;
231 break;
232 }
233 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
234 while (!lock_hpte(hp, HPTE_V_HVLOCK))
235 cpu_relax();
236 found = 0;
237 if (hp[0] & HPTE_V_VALID) {
238 switch (flags & 3) {
239 case 0: /* absolute */
240 found = 1;
241 break;
242 case 1: /* andcond */
243 if (!(hp[0] & args[i * 2 + 1]))
244 found = 1;
245 break;
246 case 2: /* AVPN */
247 if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
248 found = 1;
249 break;
250 }
251 }
252 if (!found) {
253 hp[0] &= ~HPTE_V_HVLOCK;
254 args[i * 2] = ((0x90 | flags) << 56) + pte_index;
255 continue;
256 }
257 /* insert R and C bits from PTE */
258 flags |= (hp[1] >> 5) & 0x0c;
259 args[i * 2] = ((0x80 | flags) << 56) + pte_index;
260 tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
261 hp[0] = 0;
262 }
263 if (n_inval == 0)
264 return ret;
265
266 if (!local) {
267 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
268 cpu_relax();
269 asm volatile("ptesync" : : : "memory");
270 for (i = 0; i < n_inval; ++i)
271 asm volatile(PPC_TLBIE(%1,%0)
272 : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
273 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
274 kvm->arch.tlbie_lock = 0;
275 } else {
276 asm volatile("ptesync" : : : "memory");
277 for (i = 0; i < n_inval; ++i)
278 asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
279 asm volatile("ptesync" : : : "memory");
280 }
281 return ret;
282}
283
284long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
285 unsigned long pte_index, unsigned long avpn,
286 unsigned long va)
287{
288 struct kvm *kvm = vcpu->kvm;
289 unsigned long *hpte;
290 unsigned long v, r, rb;
291
292 if (pte_index >= (HPT_NPTEG << 3))
293 return H_PARAMETER;
294 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
295 while (!lock_hpte(hpte, HPTE_V_HVLOCK))
296 cpu_relax();
297 if ((hpte[0] & HPTE_V_VALID) == 0 ||
298 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
299 hpte[0] &= ~HPTE_V_HVLOCK;
300 return H_NOT_FOUND;
301 }
302 if (atomic_read(&kvm->online_vcpus) == 1)
303 flags |= H_LOCAL;
304 v = hpte[0];
305 r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
306 HPTE_R_KEY_HI | HPTE_R_KEY_LO);
307 r |= (flags << 55) & HPTE_R_PP0;
308 r |= (flags << 48) & HPTE_R_KEY_HI;
309 r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
310 rb = compute_tlbie_rb(v, r, pte_index);
311 hpte[0] = v & ~HPTE_V_VALID;
312 if (!(flags & H_LOCAL)) {
313 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
314 cpu_relax();
315 asm volatile("ptesync" : : : "memory");
316 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
317 : : "r" (rb), "r" (kvm->arch.lpid));
318 asm volatile("ptesync" : : : "memory");
319 kvm->arch.tlbie_lock = 0;
320 } else {
321 asm volatile("ptesync" : : : "memory");
322 asm volatile("tlbiel %0" : : "r" (rb));
323 asm volatile("ptesync" : : : "memory");
324 }
325 hpte[1] = r;
326 eieio();
327 hpte[0] = v & ~HPTE_V_HVLOCK;
328 asm volatile("ptesync" : : : "memory");
329 return H_SUCCESS;
330}
331
332static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr)
333{
334 long int i;
335 unsigned long offset, rpn;
336
337 offset = realaddr & (kvm->arch.ram_psize - 1);
338 rpn = (realaddr - offset) >> PAGE_SHIFT;
339 for (i = 0; i < kvm->arch.ram_npages; ++i)
340 if (rpn == kvm->arch.ram_pginfo[i].pfn)
341 return (i << PAGE_SHIFT) + offset;
342 return HPTE_R_RPN; /* all 1s in the RPN field */
343}
344
345long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
346 unsigned long pte_index)
347{
348 struct kvm *kvm = vcpu->kvm;
349 unsigned long *hpte, r;
350 int i, n = 1;
351
352 if (pte_index >= (HPT_NPTEG << 3))
353 return H_PARAMETER;
354 if (flags & H_READ_4) {
355 pte_index &= ~3;
356 n = 4;
357 }
358 for (i = 0; i < n; ++i, ++pte_index) {
359 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
360 r = hpte[1];
361 if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID))
362 r = reverse_xlate(kvm, r & HPTE_R_RPN) |
363 (r & ~HPTE_R_RPN);
364 vcpu->arch.gpr[4 + i * 2] = hpte[0];
365 vcpu->arch.gpr[5 + i * 2] = r;
366 }
367 return H_SUCCESS;
368}
This page took 0.03632 seconds and 5 git commands to generate.