KVM: PPC: Make wakeups work again for Book3S HV guests
[deliverable/linux.git] / arch / powerpc / kvm / book3s_hv_rm_mmu.c
CommitLineData
a8606e20
PM
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/hugetlb.h>
14
15#include <asm/tlbflush.h>
16#include <asm/kvm_ppc.h>
17#include <asm/kvm_book3s.h>
18#include <asm/mmu-hash64.h>
19#include <asm/hvcall.h>
20#include <asm/synch.h>
21#include <asm/ppc-opcode.h>
22
23/* For now use fixed-size 16MB page table */
24#define HPT_ORDER 24
25#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
26#define HPT_HASH_MASK (HPT_NPTEG - 1)
27
28#define HPTE_V_HVLOCK 0x40UL
29
30static inline long lock_hpte(unsigned long *hpte, unsigned long bits)
31{
32 unsigned long tmp, old;
33
34 asm volatile(" ldarx %0,0,%2\n"
35 " and. %1,%0,%3\n"
36 " bne 2f\n"
37 " ori %0,%0,%4\n"
38 " stdcx. %0,0,%2\n"
39 " beq+ 2f\n"
40 " li %1,%3\n"
41 "2: isync"
42 : "=&r" (tmp), "=&r" (old)
43 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
44 : "cc", "memory");
45 return old == 0;
46}
47
48long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
49 long pte_index, unsigned long pteh, unsigned long ptel)
50{
51 unsigned long porder;
52 struct kvm *kvm = vcpu->kvm;
53 unsigned long i, lpn, pa;
54 unsigned long *hpte;
55
56 /* only handle 4k, 64k and 16M pages for now */
57 porder = 12;
58 if (pteh & HPTE_V_LARGE) {
9e368f29
PM
59 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
60 (ptel & 0xf000) == 0x1000) {
a8606e20
PM
61 /* 64k page */
62 porder = 16;
63 } else if ((ptel & 0xff000) == 0) {
64 /* 16M page */
65 porder = 24;
66 /* lowest AVA bit must be 0 for 16M pages */
67 if (pteh & 0x80)
68 return H_PARAMETER;
69 } else
70 return H_PARAMETER;
71 }
72 lpn = (ptel & HPTE_R_RPN) >> kvm->arch.ram_porder;
73 if (lpn >= kvm->arch.ram_npages || porder > kvm->arch.ram_porder)
74 return H_PARAMETER;
75 pa = kvm->arch.ram_pginfo[lpn].pfn << PAGE_SHIFT;
76 if (!pa)
77 return H_PARAMETER;
78 /* Check WIMG */
79 if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
80 (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
81 return H_PARAMETER;
82 pteh &= ~0x60UL;
83 ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
84 ptel |= pa;
85 if (pte_index >= (HPT_NPTEG << 3))
86 return H_PARAMETER;
87 if (likely((flags & H_EXACT) == 0)) {
88 pte_index &= ~7UL;
89 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
90 for (i = 0; ; ++i) {
91 if (i == 8)
92 return H_PTEG_FULL;
93 if ((*hpte & HPTE_V_VALID) == 0 &&
94 lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
95 break;
96 hpte += 2;
97 }
98 } else {
99 i = 0;
100 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
101 if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
102 return H_PTEG_FULL;
103 }
104 hpte[1] = ptel;
105 eieio();
106 hpte[0] = pteh;
107 asm volatile("ptesync" : : : "memory");
108 atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt);
109 vcpu->arch.gpr[4] = pte_index + i;
110 return H_SUCCESS;
111}
112
a8606e20
PM
113#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
114
115static inline int try_lock_tlbie(unsigned int *lock)
116{
117 unsigned int tmp, old;
118 unsigned int token = LOCK_TOKEN;
119
120 asm volatile("1:lwarx %1,0,%2\n"
121 " cmpwi cr0,%1,0\n"
122 " bne 2f\n"
123 " stwcx. %3,0,%2\n"
124 " bne- 1b\n"
125 " isync\n"
126 "2:"
127 : "=&r" (tmp), "=&r" (old)
128 : "r" (lock), "r" (token)
129 : "cc", "memory");
130 return old == 0;
131}
132
133long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
134 unsigned long pte_index, unsigned long avpn,
135 unsigned long va)
136{
137 struct kvm *kvm = vcpu->kvm;
138 unsigned long *hpte;
139 unsigned long v, r, rb;
140
141 if (pte_index >= (HPT_NPTEG << 3))
142 return H_PARAMETER;
143 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
144 while (!lock_hpte(hpte, HPTE_V_HVLOCK))
145 cpu_relax();
146 if ((hpte[0] & HPTE_V_VALID) == 0 ||
147 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
148 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
149 hpte[0] &= ~HPTE_V_HVLOCK;
150 return H_NOT_FOUND;
151 }
152 if (atomic_read(&kvm->online_vcpus) == 1)
153 flags |= H_LOCAL;
154 vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
155 vcpu->arch.gpr[5] = r = hpte[1];
156 rb = compute_tlbie_rb(v, r, pte_index);
157 hpte[0] = 0;
158 if (!(flags & H_LOCAL)) {
159 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
160 cpu_relax();
161 asm volatile("ptesync" : : : "memory");
162 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
163 : : "r" (rb), "r" (kvm->arch.lpid));
164 asm volatile("ptesync" : : : "memory");
165 kvm->arch.tlbie_lock = 0;
166 } else {
167 asm volatile("ptesync" : : : "memory");
168 asm volatile("tlbiel %0" : : "r" (rb));
169 asm volatile("ptesync" : : : "memory");
170 }
171 return H_SUCCESS;
172}
173
174long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
175{
176 struct kvm *kvm = vcpu->kvm;
177 unsigned long *args = &vcpu->arch.gpr[4];
178 unsigned long *hp, tlbrb[4];
179 long int i, found;
180 long int n_inval = 0;
181 unsigned long flags, req, pte_index;
182 long int local = 0;
183 long int ret = H_SUCCESS;
184
185 if (atomic_read(&kvm->online_vcpus) == 1)
186 local = 1;
187 for (i = 0; i < 4; ++i) {
188 pte_index = args[i * 2];
189 flags = pte_index >> 56;
190 pte_index &= ((1ul << 56) - 1);
191 req = flags >> 6;
192 flags &= 3;
193 if (req == 3)
194 break;
195 if (req != 1 || flags == 3 ||
196 pte_index >= (HPT_NPTEG << 3)) {
197 /* parameter error */
198 args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
199 ret = H_PARAMETER;
200 break;
201 }
202 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
203 while (!lock_hpte(hp, HPTE_V_HVLOCK))
204 cpu_relax();
205 found = 0;
206 if (hp[0] & HPTE_V_VALID) {
207 switch (flags & 3) {
208 case 0: /* absolute */
209 found = 1;
210 break;
211 case 1: /* andcond */
212 if (!(hp[0] & args[i * 2 + 1]))
213 found = 1;
214 break;
215 case 2: /* AVPN */
216 if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
217 found = 1;
218 break;
219 }
220 }
221 if (!found) {
222 hp[0] &= ~HPTE_V_HVLOCK;
223 args[i * 2] = ((0x90 | flags) << 56) + pte_index;
224 continue;
225 }
226 /* insert R and C bits from PTE */
227 flags |= (hp[1] >> 5) & 0x0c;
228 args[i * 2] = ((0x80 | flags) << 56) + pte_index;
229 tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
230 hp[0] = 0;
231 }
232 if (n_inval == 0)
233 return ret;
234
235 if (!local) {
236 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
237 cpu_relax();
238 asm volatile("ptesync" : : : "memory");
239 for (i = 0; i < n_inval; ++i)
240 asm volatile(PPC_TLBIE(%1,%0)
241 : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
242 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
243 kvm->arch.tlbie_lock = 0;
244 } else {
245 asm volatile("ptesync" : : : "memory");
246 for (i = 0; i < n_inval; ++i)
247 asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
248 asm volatile("ptesync" : : : "memory");
249 }
250 return ret;
251}
252
253long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
254 unsigned long pte_index, unsigned long avpn,
255 unsigned long va)
256{
257 struct kvm *kvm = vcpu->kvm;
258 unsigned long *hpte;
259 unsigned long v, r, rb;
260
261 if (pte_index >= (HPT_NPTEG << 3))
262 return H_PARAMETER;
263 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
264 while (!lock_hpte(hpte, HPTE_V_HVLOCK))
265 cpu_relax();
266 if ((hpte[0] & HPTE_V_VALID) == 0 ||
267 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
268 hpte[0] &= ~HPTE_V_HVLOCK;
269 return H_NOT_FOUND;
270 }
271 if (atomic_read(&kvm->online_vcpus) == 1)
272 flags |= H_LOCAL;
273 v = hpte[0];
274 r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
275 HPTE_R_KEY_HI | HPTE_R_KEY_LO);
276 r |= (flags << 55) & HPTE_R_PP0;
277 r |= (flags << 48) & HPTE_R_KEY_HI;
278 r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
279 rb = compute_tlbie_rb(v, r, pte_index);
280 hpte[0] = v & ~HPTE_V_VALID;
281 if (!(flags & H_LOCAL)) {
282 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
283 cpu_relax();
284 asm volatile("ptesync" : : : "memory");
285 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
286 : : "r" (rb), "r" (kvm->arch.lpid));
287 asm volatile("ptesync" : : : "memory");
288 kvm->arch.tlbie_lock = 0;
289 } else {
290 asm volatile("ptesync" : : : "memory");
291 asm volatile("tlbiel %0" : : "r" (rb));
292 asm volatile("ptesync" : : : "memory");
293 }
294 hpte[1] = r;
295 eieio();
296 hpte[0] = v & ~HPTE_V_HVLOCK;
297 asm volatile("ptesync" : : : "memory");
298 return H_SUCCESS;
299}
300
301static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr)
302{
303 long int i;
304 unsigned long offset, rpn;
305
306 offset = realaddr & (kvm->arch.ram_psize - 1);
307 rpn = (realaddr - offset) >> PAGE_SHIFT;
308 for (i = 0; i < kvm->arch.ram_npages; ++i)
309 if (rpn == kvm->arch.ram_pginfo[i].pfn)
310 return (i << PAGE_SHIFT) + offset;
311 return HPTE_R_RPN; /* all 1s in the RPN field */
312}
313
314long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
315 unsigned long pte_index)
316{
317 struct kvm *kvm = vcpu->kvm;
318 unsigned long *hpte, r;
319 int i, n = 1;
320
321 if (pte_index >= (HPT_NPTEG << 3))
322 return H_PARAMETER;
323 if (flags & H_READ_4) {
324 pte_index &= ~3;
325 n = 4;
326 }
327 for (i = 0; i < n; ++i, ++pte_index) {
328 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
329 r = hpte[1];
330 if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID))
331 r = reverse_xlate(kvm, r & HPTE_R_RPN) |
332 (r & ~HPTE_R_RPN);
333 vcpu->arch.gpr[4 + i * 2] = hpte[0];
334 vcpu->arch.gpr[5 + i * 2] = r;
335 }
336 return H_SUCCESS;
337}
This page took 0.064357 seconds and 5 git commands to generate.