Commit | Line | Data |
---|---|---|
73196cd3 | 1 | /* |
c7ba7771 | 2 | * Copyright (C) 2010,2012 Freescale Semiconductor, Inc. All rights reserved. |
73196cd3 SW |
3 | * |
4 | * Author: Varun Sethi, <varun.sethi@freescale.com> | |
5 | * | |
6 | * Description: | |
7 | * This file is derived from arch/powerpc/kvm/e500.c, | |
8 | * by Yu Liu <yu.liu@freescale.com>. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License, version 2, as | |
12 | * published by the Free Software Foundation. | |
13 | */ | |
14 | ||
15 | #include <linux/kvm_host.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/err.h> | |
18 | #include <linux/export.h> | |
398a76c6 AG |
19 | #include <linux/miscdevice.h> |
20 | #include <linux/module.h> | |
73196cd3 SW |
21 | |
22 | #include <asm/reg.h> | |
23 | #include <asm/cputable.h> | |
24 | #include <asm/tlbflush.h> | |
25 | #include <asm/kvm_ppc.h> | |
26 | #include <asm/dbell.h> | |
27 | ||
28 | #include "booke.h" | |
29 | #include "e500.h" | |
30 | ||
31 | void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type) | |
32 | { | |
33 | enum ppc_dbell dbell_type; | |
34 | unsigned long tag; | |
35 | ||
36 | switch (type) { | |
37 | case INT_CLASS_NONCRIT: | |
38 | dbell_type = PPC_G_DBELL; | |
39 | break; | |
40 | case INT_CLASS_CRIT: | |
41 | dbell_type = PPC_G_DBELL_CRIT; | |
42 | break; | |
43 | case INT_CLASS_MC: | |
44 | dbell_type = PPC_G_DBELL_MC; | |
45 | break; | |
46 | default: | |
47 | WARN_ONCE(1, "%s: unknown int type %d\n", __func__, type); | |
48 | return; | |
49 | } | |
50 | ||
188e267c MC |
51 | preempt_disable(); |
52 | tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id; | |
73196cd3 SW |
53 | mb(); |
54 | ppc_msgsnd(dbell_type, 0, tag); | |
188e267c | 55 | preempt_enable(); |
73196cd3 SW |
56 | } |
57 | ||
58 | /* gtlbe must not be mapped by more than one host tlb entry */ | |
59 | void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, | |
60 | struct kvm_book3e_206_tlb_entry *gtlbe) | |
61 | { | |
62 | unsigned int tid, ts; | |
66c9897d | 63 | gva_t eaddr; |
188e267c | 64 | u32 val; |
73196cd3 SW |
65 | unsigned long flags; |
66 | ||
67 | ts = get_tlb_ts(gtlbe); | |
68 | tid = get_tlb_tid(gtlbe); | |
73196cd3 SW |
69 | |
70 | /* We search the host TLB to invalidate its shadow TLB entry */ | |
71 | val = (tid << 16) | ts; | |
72 | eaddr = get_tlb_eaddr(gtlbe); | |
73 | ||
74 | local_irq_save(flags); | |
75 | ||
76 | mtspr(SPRN_MAS6, val); | |
188e267c | 77 | mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu)); |
73196cd3 SW |
78 | |
79 | asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); | |
80 | val = mfspr(SPRN_MAS1); | |
81 | if (val & MAS1_VALID) { | |
82 | mtspr(SPRN_MAS1, val & ~MAS1_VALID); | |
83 | asm volatile("tlbwe"); | |
84 | } | |
85 | mtspr(SPRN_MAS5, 0); | |
86 | /* NOTE: tlbsx also updates mas8, so clear it for host tlbwe */ | |
87 | mtspr(SPRN_MAS8, 0); | |
88 | isync(); | |
89 | ||
90 | local_irq_restore(flags); | |
91 | } | |
92 | ||
93 | void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) | |
94 | { | |
95 | unsigned long flags; | |
96 | ||
97 | local_irq_save(flags); | |
188e267c | 98 | mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu)); |
73196cd3 SW |
99 | asm volatile("tlbilxlpid"); |
100 | mtspr(SPRN_MAS5, 0); | |
101 | local_irq_restore(flags); | |
102 | } | |
103 | ||
104 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) | |
105 | { | |
106 | vcpu->arch.pid = pid; | |
107 | } | |
108 | ||
109 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | |
110 | { | |
111 | } | |
112 | ||
188e267c | 113 | /* We use two lpids per VM */ |
1f0eeb7e | 114 | static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid); |
c5e6cb05 | 115 | |
3a167bea | 116 | static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) |
73196cd3 SW |
117 | { |
118 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
119 | ||
120 | kvmppc_booke_vcpu_load(vcpu, cpu); | |
121 | ||
188e267c | 122 | mtspr(SPRN_LPID, get_lpid(vcpu)); |
73196cd3 SW |
123 | mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); |
124 | mtspr(SPRN_GPIR, vcpu->vcpu_id); | |
125 | mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); | |
188e267c MC |
126 | vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT); |
127 | vcpu->arch.epsc = vcpu->arch.eplc; | |
73196cd3 SW |
128 | mtspr(SPRN_EPLC, vcpu->arch.eplc); |
129 | mtspr(SPRN_EPSC, vcpu->arch.epsc); | |
130 | ||
131 | mtspr(SPRN_GIVPR, vcpu->arch.ivpr); | |
132 | mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); | |
133 | mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); | |
134 | mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0); | |
135 | mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1); | |
136 | mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2); | |
137 | mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3); | |
138 | ||
139 | mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0); | |
140 | mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1); | |
141 | ||
142 | mtspr(SPRN_GEPR, vcpu->arch.epr); | |
143 | mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); | |
144 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); | |
145 | ||
c5e6cb05 | 146 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || |
69111bac | 147 | __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) { |
73196cd3 | 148 | kvmppc_e500_tlbil_all(vcpu_e500); |
69111bac | 149 | __this_cpu_write(last_vcpu_of_lpid[get_lpid(vcpu)], vcpu); |
c5e6cb05 | 150 | } |
73196cd3 SW |
151 | } |
152 | ||
3a167bea | 153 | static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu) |
73196cd3 SW |
154 | { |
155 | vcpu->arch.eplc = mfspr(SPRN_EPLC); | |
156 | vcpu->arch.epsc = mfspr(SPRN_EPSC); | |
157 | ||
158 | vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0); | |
159 | vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1); | |
160 | vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2); | |
161 | vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3); | |
162 | ||
163 | vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0); | |
164 | vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1); | |
165 | ||
166 | vcpu->arch.epr = mfspr(SPRN_GEPR); | |
167 | vcpu->arch.shared->dar = mfspr(SPRN_GDEAR); | |
168 | vcpu->arch.shared->esr = mfspr(SPRN_GESR); | |
169 | ||
170 | vcpu->arch.oldpir = mfspr(SPRN_PIR); | |
171 | ||
172 | kvmppc_booke_vcpu_put(vcpu); | |
173 | } | |
174 | ||
175 | int kvmppc_core_check_processor_compat(void) | |
176 | { | |
177 | int r; | |
178 | ||
179 | if (strcmp(cur_cpu_spec->cpu_name, "e500mc") == 0) | |
180 | r = 0; | |
181 | else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0) | |
182 | r = 0; | |
d2ca32a2 MC |
183 | #ifdef CONFIG_ALTIVEC |
184 | /* | |
446957ba | 185 | * Since guests have the privilege to enable AltiVec, we need AltiVec |
d2ca32a2 MC |
186 | * support in the host to save/restore their context. |
187 | * Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit | |
188 | * because it's cleared in the absence of CONFIG_ALTIVEC! | |
189 | */ | |
190 | else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0) | |
191 | r = 0; | |
192 | #endif | |
73196cd3 SW |
193 | else |
194 | r = -ENOTSUPP; | |
195 | ||
196 | return r; | |
197 | } | |
198 | ||
199 | int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |
200 | { | |
201 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
202 | ||
203 | vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \ | |
204 | SPRN_EPCR_DUVD; | |
c7ba7771 MC |
205 | #ifdef CONFIG_64BIT |
206 | vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM; | |
207 | #endif | |
37277b11 | 208 | vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP; |
73196cd3 SW |
209 | |
210 | vcpu->arch.pvr = mfspr(SPRN_PVR); | |
211 | vcpu_e500->svr = mfspr(SPRN_SVR); | |
212 | ||
213 | vcpu->arch.cpu_type = KVM_CPU_E500MC; | |
214 | ||
215 | return 0; | |
216 | } | |
217 | ||
3a167bea AK |
218 | static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu, |
219 | struct kvm_sregs *sregs) | |
73196cd3 SW |
220 | { |
221 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
222 | ||
223 | sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_PM | | |
224 | KVM_SREGS_E_PC; | |
225 | sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL; | |
226 | ||
227 | sregs->u.e.impl.fsl.features = 0; | |
228 | sregs->u.e.impl.fsl.svr = vcpu_e500->svr; | |
229 | sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; | |
230 | sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; | |
231 | ||
232 | kvmppc_get_sregs_e500_tlb(vcpu, sregs); | |
233 | ||
234 | sregs->u.e.ivor_high[3] = | |
235 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; | |
236 | sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; | |
237 | sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; | |
238 | ||
3a167bea | 239 | return kvmppc_get_sregs_ivor(vcpu, sregs); |
73196cd3 SW |
240 | } |
241 | ||
3a167bea AK |
242 | static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu, |
243 | struct kvm_sregs *sregs) | |
73196cd3 SW |
244 | { |
245 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
246 | int ret; | |
247 | ||
248 | if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { | |
249 | vcpu_e500->svr = sregs->u.e.impl.fsl.svr; | |
250 | vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0; | |
251 | vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; | |
252 | } | |
253 | ||
254 | ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs); | |
255 | if (ret < 0) | |
256 | return ret; | |
257 | ||
258 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) | |
259 | return 0; | |
260 | ||
261 | if (sregs->u.e.features & KVM_SREGS_E_PM) { | |
262 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = | |
263 | sregs->u.e.ivor_high[3]; | |
264 | } | |
265 | ||
266 | if (sregs->u.e.features & KVM_SREGS_E_PC) { | |
267 | vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = | |
268 | sregs->u.e.ivor_high[4]; | |
269 | vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = | |
270 | sregs->u.e.ivor_high[5]; | |
271 | } | |
272 | ||
273 | return kvmppc_set_sregs_ivor(vcpu, sregs); | |
274 | } | |
275 | ||
3a167bea AK |
276 | static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, |
277 | union kvmppc_one_reg *val) | |
35b299e2 | 278 | { |
28d2f421 BB |
279 | int r = 0; |
280 | ||
281 | switch (id) { | |
282 | case KVM_REG_PPC_SPRG9: | |
283 | *val = get_reg_val(id, vcpu->arch.sprg9); | |
284 | break; | |
285 | default: | |
286 | r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); | |
287 | } | |
288 | ||
a85d2aa2 | 289 | return r; |
35b299e2 MC |
290 | } |
291 | ||
3a167bea AK |
292 | static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, |
293 | union kvmppc_one_reg *val) | |
35b299e2 | 294 | { |
28d2f421 BB |
295 | int r = 0; |
296 | ||
297 | switch (id) { | |
298 | case KVM_REG_PPC_SPRG9: | |
299 | vcpu->arch.sprg9 = set_reg_val(id, *val); | |
300 | break; | |
301 | default: | |
302 | r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val); | |
303 | } | |
304 | ||
a85d2aa2 | 305 | return r; |
35b299e2 MC |
306 | } |
307 | ||
3a167bea AK |
308 | static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm, |
309 | unsigned int id) | |
73196cd3 SW |
310 | { |
311 | struct kvmppc_vcpu_e500 *vcpu_e500; | |
312 | struct kvm_vcpu *vcpu; | |
313 | int err; | |
314 | ||
315 | vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | |
316 | if (!vcpu_e500) { | |
317 | err = -ENOMEM; | |
318 | goto out; | |
319 | } | |
320 | vcpu = &vcpu_e500->vcpu; | |
321 | ||
322 | /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */ | |
323 | vcpu->arch.oldpir = 0xffffffff; | |
324 | ||
325 | err = kvm_vcpu_init(vcpu, kvm, id); | |
326 | if (err) | |
327 | goto free_vcpu; | |
328 | ||
329 | err = kvmppc_e500_tlb_init(vcpu_e500); | |
330 | if (err) | |
331 | goto uninit_vcpu; | |
332 | ||
333 | vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | |
334 | if (!vcpu->arch.shared) | |
335 | goto uninit_tlb; | |
336 | ||
337 | return vcpu; | |
338 | ||
339 | uninit_tlb: | |
340 | kvmppc_e500_tlb_uninit(vcpu_e500); | |
341 | uninit_vcpu: | |
342 | kvm_vcpu_uninit(vcpu); | |
343 | ||
344 | free_vcpu: | |
345 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); | |
346 | out: | |
347 | return ERR_PTR(err); | |
348 | } | |
349 | ||
3a167bea | 350 | static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu) |
73196cd3 SW |
351 | { |
352 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
353 | ||
354 | free_page((unsigned long)vcpu->arch.shared); | |
355 | kvmppc_e500_tlb_uninit(vcpu_e500); | |
356 | kvm_vcpu_uninit(vcpu); | |
357 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); | |
358 | } | |
359 | ||
3a167bea | 360 | static int kvmppc_core_init_vm_e500mc(struct kvm *kvm) |
73196cd3 SW |
361 | { |
362 | int lpid; | |
363 | ||
364 | lpid = kvmppc_alloc_lpid(); | |
365 | if (lpid < 0) | |
366 | return lpid; | |
367 | ||
188e267c MC |
368 | /* |
369 | * Use two lpids per VM on cores with two threads like e6500. Use | |
370 | * even numbers to speedup vcpu lpid computation with consecutive lpids | |
371 | * per VM. vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on. | |
372 | */ | |
373 | if (threads_per_core == 2) | |
374 | lpid <<= 1; | |
375 | ||
73196cd3 SW |
376 | kvm->arch.lpid = lpid; |
377 | return 0; | |
378 | } | |
379 | ||
3a167bea | 380 | static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm) |
73196cd3 | 381 | { |
188e267c MC |
382 | int lpid = kvm->arch.lpid; |
383 | ||
384 | if (threads_per_core == 2) | |
385 | lpid >>= 1; | |
386 | ||
387 | kvmppc_free_lpid(lpid); | |
73196cd3 SW |
388 | } |
389 | ||
3a167bea AK |
390 | static struct kvmppc_ops kvm_ops_e500mc = { |
391 | .get_sregs = kvmppc_core_get_sregs_e500mc, | |
392 | .set_sregs = kvmppc_core_set_sregs_e500mc, | |
393 | .get_one_reg = kvmppc_get_one_reg_e500mc, | |
394 | .set_one_reg = kvmppc_set_one_reg_e500mc, | |
395 | .vcpu_load = kvmppc_core_vcpu_load_e500mc, | |
396 | .vcpu_put = kvmppc_core_vcpu_put_e500mc, | |
397 | .vcpu_create = kvmppc_core_vcpu_create_e500mc, | |
398 | .vcpu_free = kvmppc_core_vcpu_free_e500mc, | |
399 | .mmu_destroy = kvmppc_mmu_destroy_e500, | |
400 | .init_vm = kvmppc_core_init_vm_e500mc, | |
401 | .destroy_vm = kvmppc_core_destroy_vm_e500mc, | |
402 | .emulate_op = kvmppc_core_emulate_op_e500, | |
403 | .emulate_mtspr = kvmppc_core_emulate_mtspr_e500, | |
404 | .emulate_mfspr = kvmppc_core_emulate_mfspr_e500, | |
405 | }; | |
406 | ||
73196cd3 SW |
407 | static int __init kvmppc_e500mc_init(void) |
408 | { | |
409 | int r; | |
410 | ||
411 | r = kvmppc_booke_init(); | |
412 | if (r) | |
3a167bea | 413 | goto err_out; |
73196cd3 | 414 | |
188e267c MC |
415 | /* |
416 | * Use two lpids per VM on dual threaded processors like e6500 | |
417 | * to workarround the lack of tlb write conditional instruction. | |
418 | * Expose half the number of available hardware lpids to the lpid | |
419 | * allocator. | |
420 | */ | |
421 | kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core); | |
73196cd3 SW |
422 | kvmppc_claim_lpid(0); /* host */ |
423 | ||
cbbc58d4 | 424 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); |
3a167bea AK |
425 | if (r) |
426 | goto err_out; | |
cbbc58d4 AK |
427 | kvm_ops_e500mc.owner = THIS_MODULE; |
428 | kvmppc_pr_ops = &kvm_ops_e500mc; | |
429 | ||
3a167bea AK |
430 | err_out: |
431 | return r; | |
73196cd3 SW |
432 | } |
433 | ||
434 | static void __exit kvmppc_e500mc_exit(void) | |
435 | { | |
cbbc58d4 | 436 | kvmppc_pr_ops = NULL; |
73196cd3 SW |
437 | kvmppc_booke_exit(); |
438 | } | |
439 | ||
440 | module_init(kvmppc_e500mc_init); | |
441 | module_exit(kvmppc_e500mc_exit); | |
398a76c6 AG |
442 | MODULE_ALIAS_MISCDEV(KVM_MINOR); |
443 | MODULE_ALIAS("devname:kvm"); |