Commit | Line | Data |
---|---|---|
bbf45ba5 HB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
16 | * | |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
18 | */ | |
19 | ||
20 | #include <linux/types.h> | |
21 | #include <linux/string.h> | |
31711f22 | 22 | #include <linux/kvm.h> |
bbf45ba5 HB |
23 | #include <linux/kvm_host.h> |
24 | #include <linux/highmem.h> | |
7924bd41 HB |
25 | |
26 | #include <asm/tlbflush.h> | |
bbf45ba5 HB |
27 | #include <asm/mmu-44x.h> |
28 | #include <asm/kvm_ppc.h> | |
db93f574 | 29 | #include <asm/kvm_44x.h> |
73e75b41 | 30 | #include "timing.h" |
bbf45ba5 HB |
31 | |
32 | #include "44x_tlb.h" | |
46f43c6e | 33 | #include "trace.h" |
bbf45ba5 | 34 | |
89168618 HB |
35 | #ifndef PPC44x_TLBE_SIZE |
36 | #define PPC44x_TLBE_SIZE PPC44x_TLB_4K | |
37 | #endif | |
38 | ||
39 | #define PAGE_SIZE_4K (1<<12) | |
40 | #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) | |
41 | ||
df9b856c HB |
42 | #define PPC44x_TLB_UATTR_MASK \ |
43 | (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3) | |
bbf45ba5 HB |
44 | #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) |
45 | #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) | |
46 | ||
a0d7b9f2 HB |
47 | #ifdef DEBUG |
48 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) | |
49 | { | |
50 | struct kvmppc_44x_tlbe *tlbe; | |
51 | int i; | |
52 | ||
53 | printk("vcpu %d TLB dump:\n", vcpu->vcpu_id); | |
54 | printk("| %2s | %3s | %8s | %8s | %8s |\n", | |
55 | "nr", "tid", "word0", "word1", "word2"); | |
56 | ||
7924bd41 | 57 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { |
db93f574 | 58 | tlbe = &vcpu_44x->guest_tlb[i]; |
a0d7b9f2 HB |
59 | if (tlbe->word0 & PPC44x_TLB_VALID) |
60 | printk(" G%2d | %02X | %08X | %08X | %08X |\n", | |
61 | i, tlbe->tid, tlbe->word0, tlbe->word1, | |
62 | tlbe->word2); | |
63 | } | |
a0d7b9f2 HB |
64 | } |
65 | #endif | |
66 | ||
7924bd41 HB |
67 | static inline void kvmppc_44x_tlbie(unsigned int index) |
68 | { | |
69 | /* 0 <= index < 64, so the V bit is clear and we can use the index as | |
70 | * word0. */ | |
71 | asm volatile( | |
72 | "tlbwe %[index], %[index], 0\n" | |
73 | : | |
74 | : [index] "r"(index) | |
75 | ); | |
76 | } | |
77 | ||
c5fbdffb HB |
78 | static inline void kvmppc_44x_tlbre(unsigned int index, |
79 | struct kvmppc_44x_tlbe *tlbe) | |
80 | { | |
81 | asm volatile( | |
82 | "tlbre %[word0], %[index], 0\n" | |
83 | "mfspr %[tid], %[sprn_mmucr]\n" | |
84 | "andi. %[tid], %[tid], 0xff\n" | |
85 | "tlbre %[word1], %[index], 1\n" | |
86 | "tlbre %[word2], %[index], 2\n" | |
87 | : [word0] "=r"(tlbe->word0), | |
88 | [word1] "=r"(tlbe->word1), | |
89 | [word2] "=r"(tlbe->word2), | |
90 | [tid] "=r"(tlbe->tid) | |
91 | : [index] "r"(index), | |
92 | [sprn_mmucr] "i"(SPRN_MMUCR) | |
93 | : "cc" | |
94 | ); | |
95 | } | |
96 | ||
7924bd41 HB |
97 | static inline void kvmppc_44x_tlbwe(unsigned int index, |
98 | struct kvmppc_44x_tlbe *stlbe) | |
99 | { | |
100 | unsigned long tmp; | |
101 | ||
102 | asm volatile( | |
103 | "mfspr %[tmp], %[sprn_mmucr]\n" | |
104 | "rlwimi %[tmp], %[tid], 0, 0xff\n" | |
105 | "mtspr %[sprn_mmucr], %[tmp]\n" | |
106 | "tlbwe %[word0], %[index], 0\n" | |
107 | "tlbwe %[word1], %[index], 1\n" | |
108 | "tlbwe %[word2], %[index], 2\n" | |
109 | : [tmp] "=&r"(tmp) | |
110 | : [word0] "r"(stlbe->word0), | |
111 | [word1] "r"(stlbe->word1), | |
112 | [word2] "r"(stlbe->word2), | |
113 | [tid] "r"(stlbe->tid), | |
114 | [index] "r"(index), | |
115 | [sprn_mmucr] "i"(SPRN_MMUCR) | |
116 | ); | |
117 | } | |
118 | ||
bbf45ba5 HB |
119 | static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) |
120 | { | |
df9b856c HB |
121 | /* We only care about the guest's permission and user bits. */ |
122 | attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK; | |
bbf45ba5 HB |
123 | |
124 | if (!usermode) { | |
125 | /* Guest is in supervisor mode, so we need to translate guest | |
126 | * supervisor permissions into user permissions. */ | |
127 | attrib &= ~PPC44x_TLB_USER_PERM_MASK; | |
128 | attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3; | |
129 | } | |
130 | ||
131 | /* Make sure host can always access this memory. */ | |
132 | attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW; | |
133 | ||
df9b856c HB |
134 | /* WIMGE = 0b00100 */ |
135 | attrib |= PPC44x_TLB_M; | |
136 | ||
bbf45ba5 HB |
137 | return attrib; |
138 | } | |
139 | ||
c5fbdffb HB |
140 | /* Load shadow TLB back into hardware. */ |
141 | void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu) | |
142 | { | |
143 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | |
144 | int i; | |
145 | ||
146 | for (i = 0; i <= tlb_44x_hwater; i++) { | |
147 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | |
148 | ||
149 | if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) | |
150 | kvmppc_44x_tlbwe(i, stlbe); | |
151 | } | |
152 | } | |
153 | ||
154 | static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x, | |
155 | unsigned int i) | |
156 | { | |
157 | vcpu_44x->shadow_tlb_mod[i] = 1; | |
158 | } | |
159 | ||
160 | /* Save hardware TLB to the vcpu, and invalidate all guest mappings. */ | |
161 | void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu) | |
162 | { | |
163 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | |
164 | int i; | |
165 | ||
166 | for (i = 0; i <= tlb_44x_hwater; i++) { | |
167 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | |
168 | ||
169 | if (vcpu_44x->shadow_tlb_mod[i]) | |
170 | kvmppc_44x_tlbre(i, stlbe); | |
171 | ||
172 | if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) | |
173 | kvmppc_44x_tlbie(i); | |
174 | } | |
175 | } | |
176 | ||
177 | ||
bbf45ba5 HB |
178 | /* Search the guest TLB for a matching entry. */ |
179 | int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, | |
180 | unsigned int as) | |
181 | { | |
db93f574 | 182 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
bbf45ba5 HB |
183 | int i; |
184 | ||
185 | /* XXX Replace loop with fancy data structures. */ | |
7924bd41 | 186 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { |
db93f574 | 187 | struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i]; |
bbf45ba5 HB |
188 | unsigned int tid; |
189 | ||
190 | if (eaddr < get_tlb_eaddr(tlbe)) | |
191 | continue; | |
192 | ||
193 | if (eaddr > get_tlb_end(tlbe)) | |
194 | continue; | |
195 | ||
196 | tid = get_tlb_tid(tlbe); | |
197 | if (tid && (tid != pid)) | |
198 | continue; | |
199 | ||
200 | if (!get_tlb_v(tlbe)) | |
201 | continue; | |
202 | ||
203 | if (get_tlb_ts(tlbe) != as) | |
204 | continue; | |
205 | ||
206 | return i; | |
207 | } | |
208 | ||
209 | return -1; | |
210 | } | |
211 | ||
be8d1cae HB |
212 | gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, |
213 | gva_t eaddr) | |
214 | { | |
215 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | |
216 | struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; | |
217 | unsigned int pgmask = get_tlb_bytes(gtlbe) - 1; | |
218 | ||
219 | return get_tlb_raddr(gtlbe) | (eaddr & pgmask); | |
220 | } | |
221 | ||
fa86b8dd | 222 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
bbf45ba5 HB |
223 | { |
224 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); | |
bbf45ba5 | 225 | |
7924bd41 | 226 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
bbf45ba5 HB |
227 | } |
228 | ||
fa86b8dd | 229 | int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
bbf45ba5 HB |
230 | { |
231 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); | |
bbf45ba5 | 232 | |
7924bd41 | 233 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
bbf45ba5 HB |
234 | } |
235 | ||
b52a638c HB |
236 | void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) |
237 | { | |
238 | } | |
239 | ||
240 | void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) | |
241 | { | |
242 | } | |
243 | ||
7924bd41 HB |
244 | static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, |
245 | unsigned int stlb_index) | |
bbf45ba5 | 246 | { |
7924bd41 | 247 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index]; |
bbf45ba5 | 248 | |
7924bd41 HB |
249 | if (!ref->page) |
250 | return; | |
bbf45ba5 | 251 | |
7924bd41 HB |
252 | /* Discard from the TLB. */ |
253 | /* Note: we could actually invalidate a host mapping, if the host overwrote | |
254 | * this TLB entry since we inserted a guest mapping. */ | |
255 | kvmppc_44x_tlbie(stlb_index); | |
bbf45ba5 | 256 | |
7924bd41 HB |
257 | /* Now release the page. */ |
258 | if (ref->writeable) | |
259 | kvm_release_page_dirty(ref->page); | |
260 | else | |
261 | kvm_release_page_clean(ref->page); | |
c30f8a6c | 262 | |
7924bd41 HB |
263 | ref->page = NULL; |
264 | ||
265 | /* XXX set tlb_44x_index to stlb_index? */ | |
266 | ||
46f43c6e | 267 | trace_kvm_stlb_inval(stlb_index); |
c30f8a6c HB |
268 | } |
269 | ||
ecc0981f | 270 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
83aae4a8 | 271 | { |
db93f574 | 272 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
7924bd41 | 273 | int i; |
db93f574 | 274 | |
7924bd41 HB |
275 | for (i = 0; i <= tlb_44x_hwater; i++) |
276 | kvmppc_44x_shadow_release(vcpu_44x, i); | |
83aae4a8 HB |
277 | } |
278 | ||
89168618 HB |
279 | /** |
280 | * kvmppc_mmu_map -- create a host mapping for guest memory | |
281 | * | |
282 | * If the guest wanted a larger page than the host supports, only the first | |
283 | * host page is mapped here and the rest are demand faulted. | |
284 | * | |
285 | * If the guest wanted a smaller page than the host page size, we map only the | |
286 | * guest-size page (i.e. not a full host page mapping). | |
287 | * | |
288 | * Caller must ensure that the specified guest TLB entry is safe to insert into | |
289 | * the shadow TLB. | |
290 | */ | |
58a96214 HB |
291 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, |
292 | unsigned int gtlb_index) | |
bbf45ba5 | 293 | { |
7924bd41 | 294 | struct kvmppc_44x_tlbe stlbe; |
db93f574 | 295 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
58a96214 | 296 | struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; |
7924bd41 | 297 | struct kvmppc_44x_shadow_ref *ref; |
bbf45ba5 | 298 | struct page *new_page; |
bbf45ba5 | 299 | hpa_t hpaddr; |
89168618 | 300 | gfn_t gfn; |
58a96214 HB |
301 | u32 asid = gtlbe->tid; |
302 | u32 flags = gtlbe->word2; | |
303 | u32 max_bytes = get_tlb_bytes(gtlbe); | |
bbf45ba5 HB |
304 | unsigned int victim; |
305 | ||
7924bd41 HB |
306 | /* Select TLB entry to clobber. Indirectly guard against races with the TLB |
307 | * miss handler by disabling interrupts. */ | |
308 | local_irq_disable(); | |
309 | victim = ++tlb_44x_index; | |
310 | if (victim > tlb_44x_hwater) | |
311 | victim = 0; | |
312 | tlb_44x_index = victim; | |
313 | local_irq_enable(); | |
bbf45ba5 HB |
314 | |
315 | /* Get reference to new page. */ | |
89168618 | 316 | gfn = gpaddr >> PAGE_SHIFT; |
bbf45ba5 HB |
317 | new_page = gfn_to_page(vcpu->kvm, gfn); |
318 | if (is_error_page(new_page)) { | |
9dcb40e1 | 319 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); |
bbf45ba5 HB |
320 | kvm_release_page_clean(new_page); |
321 | return; | |
322 | } | |
323 | hpaddr = page_to_phys(new_page); | |
324 | ||
7924bd41 HB |
325 | /* Invalidate any previous shadow mappings. */ |
326 | kvmppc_44x_shadow_release(vcpu_44x, victim); | |
bbf45ba5 HB |
327 | |
328 | /* XXX Make sure (va, size) doesn't overlap any other | |
329 | * entries. 440x6 user manual says the result would be | |
330 | * "undefined." */ | |
331 | ||
332 | /* XXX what about AS? */ | |
333 | ||
bbf45ba5 | 334 | /* Force TS=1 for all guest mappings. */ |
7924bd41 | 335 | stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS; |
89168618 HB |
336 | |
337 | if (max_bytes >= PAGE_SIZE) { | |
338 | /* Guest mapping is larger than or equal to host page size. We can use | |
339 | * a "native" host mapping. */ | |
7924bd41 | 340 | stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE; |
89168618 HB |
341 | } else { |
342 | /* Guest mapping is smaller than host page size. We must restrict the | |
343 | * size of the mapping to be at most the smaller of the two, but for | |
344 | * simplicity we fall back to a 4K mapping (this is probably what the | |
345 | * guest is using anyways). */ | |
7924bd41 | 346 | stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K; |
89168618 HB |
347 | |
348 | /* 'hpaddr' is a host page, which is larger than the mapping we're | |
349 | * inserting here. To compensate, we must add the in-page offset to the | |
350 | * sub-page. */ | |
351 | hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K); | |
352 | } | |
353 | ||
7924bd41 HB |
354 | stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); |
355 | stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, | |
bbf45ba5 | 356 | vcpu->arch.msr & MSR_PR); |
7924bd41 HB |
357 | stlbe.tid = !(asid & 0xff); |
358 | ||
359 | /* Keep track of the reference so we can properly release it later. */ | |
360 | ref = &vcpu_44x->shadow_refs[victim]; | |
361 | ref->page = new_page; | |
362 | ref->gtlb_index = gtlb_index; | |
363 | ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW); | |
364 | ref->tid = stlbe.tid; | |
365 | ||
366 | /* Insert shadow mapping into hardware TLB. */ | |
c5fbdffb | 367 | kvmppc_44x_tlbe_set_modified(vcpu_44x, victim); |
7924bd41 | 368 | kvmppc_44x_tlbwe(victim, &stlbe); |
46f43c6e MT |
369 | trace_kvm_stlb_write(victim, stlbe.tid, stlbe.word0, stlbe.word1, |
370 | stlbe.word2); | |
bbf45ba5 HB |
371 | } |
372 | ||
7924bd41 HB |
373 | /* For a particular guest TLB entry, invalidate the corresponding host TLB |
374 | * mappings and release the host pages. */ | |
375 | static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu, | |
376 | unsigned int gtlb_index) | |
bbf45ba5 | 377 | { |
db93f574 | 378 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
bbf45ba5 HB |
379 | int i; |
380 | ||
7924bd41 HB |
381 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { |
382 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; | |
383 | if (ref->gtlb_index == gtlb_index) | |
384 | kvmppc_44x_shadow_release(vcpu_44x, i); | |
bbf45ba5 | 385 | } |
bbf45ba5 HB |
386 | } |
387 | ||
bbf45ba5 | 388 | void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) |
fe4e771d HB |
389 | { |
390 | vcpu->arch.shadow_pid = !usermode; | |
391 | } | |
392 | ||
393 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) | |
bbf45ba5 | 394 | { |
db93f574 | 395 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
bbf45ba5 HB |
396 | int i; |
397 | ||
fe4e771d HB |
398 | if (unlikely(vcpu->arch.pid == new_pid)) |
399 | return; | |
400 | ||
401 | vcpu->arch.pid = new_pid; | |
402 | ||
403 | /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it | |
404 | * can't access guest kernel mappings (TID=1). When we switch to a new | |
405 | * guest PID, which will also use host PID=0, we must discard the old guest | |
406 | * userspace mappings. */ | |
7924bd41 HB |
407 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { |
408 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; | |
409 | ||
410 | if (ref->tid == 0) | |
411 | kvmppc_44x_shadow_release(vcpu_44x, i); | |
bbf45ba5 | 412 | } |
bbf45ba5 | 413 | } |
a0d7b9f2 HB |
414 | |
415 | static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | |
0f55dc48 | 416 | const struct kvmppc_44x_tlbe *tlbe) |
a0d7b9f2 HB |
417 | { |
418 | gpa_t gpa; | |
419 | ||
420 | if (!get_tlb_v(tlbe)) | |
421 | return 0; | |
422 | ||
423 | /* Does it match current guest AS? */ | |
424 | /* XXX what about IS != DS? */ | |
425 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) | |
426 | return 0; | |
427 | ||
428 | gpa = get_tlb_raddr(tlbe); | |
429 | if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) | |
430 | /* Mapping is not for RAM. */ | |
431 | return 0; | |
432 | ||
433 | return 1; | |
434 | } | |
435 | ||
75f74f0d | 436 | int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) |
a0d7b9f2 | 437 | { |
db93f574 | 438 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
0f55dc48 | 439 | struct kvmppc_44x_tlbe *tlbe; |
7924bd41 | 440 | unsigned int gtlb_index; |
a0d7b9f2 | 441 | |
8e5b26b5 | 442 | gtlb_index = kvmppc_get_gpr(vcpu, ra); |
7924bd41 HB |
443 | if (gtlb_index > KVM44x_GUEST_TLB_SIZE) { |
444 | printk("%s: index %d\n", __func__, gtlb_index); | |
a0d7b9f2 HB |
445 | kvmppc_dump_vcpu(vcpu); |
446 | return EMULATE_FAIL; | |
447 | } | |
448 | ||
7924bd41 | 449 | tlbe = &vcpu_44x->guest_tlb[gtlb_index]; |
a0d7b9f2 | 450 | |
7924bd41 HB |
451 | /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */ |
452 | if (tlbe->word0 & PPC44x_TLB_VALID) | |
453 | kvmppc_44x_invalidate(vcpu, gtlb_index); | |
a0d7b9f2 HB |
454 | |
455 | switch (ws) { | |
456 | case PPC44x_TLB_PAGEID: | |
bf5d4025 | 457 | tlbe->tid = get_mmucr_stid(vcpu); |
8e5b26b5 | 458 | tlbe->word0 = kvmppc_get_gpr(vcpu, rs); |
a0d7b9f2 HB |
459 | break; |
460 | ||
461 | case PPC44x_TLB_XLAT: | |
8e5b26b5 | 462 | tlbe->word1 = kvmppc_get_gpr(vcpu, rs); |
a0d7b9f2 HB |
463 | break; |
464 | ||
465 | case PPC44x_TLB_ATTRIB: | |
8e5b26b5 | 466 | tlbe->word2 = kvmppc_get_gpr(vcpu, rs); |
a0d7b9f2 HB |
467 | break; |
468 | ||
469 | default: | |
470 | return EMULATE_FAIL; | |
471 | } | |
472 | ||
473 | if (tlbe_is_host_safe(vcpu, tlbe)) { | |
7924bd41 | 474 | gva_t eaddr; |
89168618 | 475 | gpa_t gpaddr; |
89168618 HB |
476 | u32 bytes; |
477 | ||
a0d7b9f2 | 478 | eaddr = get_tlb_eaddr(tlbe); |
89168618 HB |
479 | gpaddr = get_tlb_raddr(tlbe); |
480 | ||
481 | /* Use the advertised page size to mask effective and real addrs. */ | |
482 | bytes = get_tlb_bytes(tlbe); | |
483 | eaddr &= ~(bytes - 1); | |
484 | gpaddr &= ~(bytes - 1); | |
485 | ||
58a96214 | 486 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); |
a0d7b9f2 HB |
487 | } |
488 | ||
46f43c6e MT |
489 | trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, |
490 | tlbe->word2); | |
a0d7b9f2 | 491 | |
73e75b41 | 492 | kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); |
a0d7b9f2 HB |
493 | return EMULATE_DONE; |
494 | } | |
495 | ||
75f74f0d | 496 | int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) |
a0d7b9f2 HB |
497 | { |
498 | u32 ea; | |
7924bd41 | 499 | int gtlb_index; |
a0d7b9f2 HB |
500 | unsigned int as = get_mmucr_sts(vcpu); |
501 | unsigned int pid = get_mmucr_stid(vcpu); | |
502 | ||
8e5b26b5 | 503 | ea = kvmppc_get_gpr(vcpu, rb); |
a0d7b9f2 | 504 | if (ra) |
8e5b26b5 | 505 | ea += kvmppc_get_gpr(vcpu, ra); |
a0d7b9f2 | 506 | |
7924bd41 | 507 | gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); |
a0d7b9f2 | 508 | if (rc) { |
992b5b29 AG |
509 | u32 cr = kvmppc_get_cr(vcpu); |
510 | ||
7924bd41 | 511 | if (gtlb_index < 0) |
992b5b29 | 512 | kvmppc_set_cr(vcpu, cr & ~0x20000000); |
a0d7b9f2 | 513 | else |
992b5b29 | 514 | kvmppc_set_cr(vcpu, cr | 0x20000000); |
a0d7b9f2 | 515 | } |
8e5b26b5 | 516 | kvmppc_set_gpr(vcpu, rt, gtlb_index); |
a0d7b9f2 | 517 | |
73e75b41 | 518 | kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); |
a0d7b9f2 HB |
519 | return EMULATE_DONE; |
520 | } |