ALSA: hda_intel: add position_fix quirk for Asus K53E
[deliverable/linux.git] / arch / powerpc / kvm / book3s_64_mmu_host.c
1 /*
2 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, version 2, as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 */
21
22 #include <linux/kvm_host.h>
23
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/mmu-hash64.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
30 #include "trace.h"
31
32 #define PTE_SIZE 12
33
34 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
35 {
36 ppc_md.hpte_invalidate(pte->slot, pte->host_va,
37 MMU_PAGE_4K, MMU_SEGSIZE_256M,
38 false);
39 }
40
41 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
42 * a hash, so we don't waste cycles on looping */
43 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
44 {
45 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
46 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
47 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
48 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
49 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
50 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
51 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
52 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
53 }
54
55
56 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
57 {
58 struct kvmppc_sid_map *map;
59 u16 sid_map_mask;
60
61 if (vcpu->arch.shared->msr & MSR_PR)
62 gvsid |= VSID_PR;
63
64 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
65 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
66 if (map->valid && (map->guest_vsid == gvsid)) {
67 trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
68 return map;
69 }
70
71 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
72 if (map->valid && (map->guest_vsid == gvsid)) {
73 trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
74 return map;
75 }
76
77 trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
78 return NULL;
79 }
80
81 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
82 {
83 pfn_t hpaddr;
84 ulong hash, hpteg, va;
85 u64 vsid;
86 int ret;
87 int rflags = 0x192;
88 int vflags = 0;
89 int attempt = 0;
90 struct kvmppc_sid_map *map;
91 int r = 0;
92
93 /* Get host physical address for gpa */
94 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
95 if (is_error_pfn(hpaddr)) {
96 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
97 r = -EINVAL;
98 goto out;
99 }
100 hpaddr <<= PAGE_SHIFT;
101 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
102
103 /* and write the mapping ea -> hpa into the pt */
104 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
105 map = find_sid_vsid(vcpu, vsid);
106 if (!map) {
107 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
108 WARN_ON(ret < 0);
109 map = find_sid_vsid(vcpu, vsid);
110 }
111 if (!map) {
112 printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
113 vsid, orig_pte->eaddr);
114 WARN_ON(true);
115 r = -EINVAL;
116 goto out;
117 }
118
119 vsid = map->host_vsid;
120 va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
121
122 if (!orig_pte->may_write)
123 rflags |= HPTE_R_PP;
124 else
125 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
126
127 if (!orig_pte->may_execute)
128 rflags |= HPTE_R_N;
129
130 hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
131
132 map_again:
133 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
134
135 /* In case we tried normal mapping already, let's nuke old entries */
136 if (attempt > 1)
137 if (ppc_md.hpte_remove(hpteg) < 0) {
138 r = -1;
139 goto out;
140 }
141
142 ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
143
144 if (ret < 0) {
145 /* If we couldn't map a primary PTE, try a secondary */
146 hash = ~hash;
147 vflags ^= HPTE_V_SECONDARY;
148 attempt++;
149 goto map_again;
150 } else {
151 struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
152
153 trace_kvm_book3s_64_mmu_map(rflags, hpteg, va, hpaddr, orig_pte);
154
155 /* The ppc_md code may give us a secondary entry even though we
156 asked for a primary. Fix up. */
157 if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
158 hash = ~hash;
159 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
160 }
161
162 pte->slot = hpteg + (ret & 7);
163 pte->host_va = va;
164 pte->pte = *orig_pte;
165 pte->pfn = hpaddr >> PAGE_SHIFT;
166
167 kvmppc_mmu_hpte_cache_map(vcpu, pte);
168 }
169
170 out:
171 return r;
172 }
173
174 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
175 {
176 struct kvmppc_sid_map *map;
177 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
178 u16 sid_map_mask;
179 static int backwards_map = 0;
180
181 if (vcpu->arch.shared->msr & MSR_PR)
182 gvsid |= VSID_PR;
183
184 /* We might get collisions that trap in preceding order, so let's
185 map them differently */
186
187 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
188 if (backwards_map)
189 sid_map_mask = SID_MAP_MASK - sid_map_mask;
190
191 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
192
193 /* Make sure we're taking the other map next time */
194 backwards_map = !backwards_map;
195
196 /* Uh-oh ... out of mappings. Let's flush! */
197 if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
198 vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
199 memset(vcpu_book3s->sid_map, 0,
200 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
201 kvmppc_mmu_pte_flush(vcpu, 0, 0);
202 kvmppc_mmu_flush_segments(vcpu);
203 }
204 map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
205
206 map->guest_vsid = gvsid;
207 map->valid = true;
208
209 trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
210
211 return map;
212 }
213
214 static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
215 {
216 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
217 int i;
218 int max_slb_size = 64;
219 int found_inval = -1;
220 int r;
221
222 if (!svcpu->slb_max)
223 svcpu->slb_max = 1;
224
225 /* Are we overwriting? */
226 for (i = 1; i < svcpu->slb_max; i++) {
227 if (!(svcpu->slb[i].esid & SLB_ESID_V))
228 found_inval = i;
229 else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
230 r = i;
231 goto out;
232 }
233 }
234
235 /* Found a spare entry that was invalidated before */
236 if (found_inval > 0) {
237 r = found_inval;
238 goto out;
239 }
240
241 /* No spare invalid entry, so create one */
242
243 if (mmu_slb_size < 64)
244 max_slb_size = mmu_slb_size;
245
246 /* Overflowing -> purge */
247 if ((svcpu->slb_max) == max_slb_size)
248 kvmppc_mmu_flush_segments(vcpu);
249
250 r = svcpu->slb_max;
251 svcpu->slb_max++;
252
253 out:
254 svcpu_put(svcpu);
255 return r;
256 }
257
258 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
259 {
260 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
261 u64 esid = eaddr >> SID_SHIFT;
262 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
263 u64 slb_vsid = SLB_VSID_USER;
264 u64 gvsid;
265 int slb_index;
266 struct kvmppc_sid_map *map;
267 int r = 0;
268
269 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
270
271 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
272 /* Invalidate an entry */
273 svcpu->slb[slb_index].esid = 0;
274 r = -ENOENT;
275 goto out;
276 }
277
278 map = find_sid_vsid(vcpu, gvsid);
279 if (!map)
280 map = create_sid_map(vcpu, gvsid);
281
282 map->guest_esid = esid;
283
284 slb_vsid |= (map->host_vsid << 12);
285 slb_vsid &= ~SLB_VSID_KP;
286 slb_esid |= slb_index;
287
288 svcpu->slb[slb_index].esid = slb_esid;
289 svcpu->slb[slb_index].vsid = slb_vsid;
290
291 trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
292
293 out:
294 svcpu_put(svcpu);
295 return r;
296 }
297
298 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
299 {
300 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
301 svcpu->slb_max = 1;
302 svcpu->slb[0].esid = 0;
303 svcpu_put(svcpu);
304 }
305
306 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
307 {
308 kvmppc_mmu_hpte_destroy(vcpu);
309 __destroy_context(to_book3s(vcpu)->context_id[0]);
310 }
311
312 int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
313 {
314 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
315 int err;
316
317 err = __init_new_context();
318 if (err < 0)
319 return -1;
320 vcpu3s->context_id[0] = err;
321
322 vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
323 << USER_ESID_BITS) - 1;
324 vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
325 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
326
327 kvmppc_mmu_hpte_init(vcpu);
328
329 return 0;
330 }
This page took 0.047505 seconds and 5 git commands to generate.