2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
18 #include <linux/bootmem.h>
19 #include <asm/cacheflush.h>
23 #define SYNCI_TEMPLATE 0x041f0000
24 #define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
25 #define SYNCI_OFFSET ((x) & 0xffff)
27 #define LW_TEMPLATE 0x8c000000
28 #define CLEAR_TEMPLATE 0x00000020
29 #define SW_TEMPLATE 0xac000000
32 * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
34 * @opc: PC of instruction to replace.
35 * @replace: Instruction to write
37 static int kvm_mips_trans_replace(struct kvm_vcpu
*vcpu
, u32
*opc
, u32 replace
)
39 unsigned long kseg0_opc
, flags
;
41 if (KVM_GUEST_KSEGX(opc
) == KVM_GUEST_KSEG0
) {
43 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
44 (vcpu
, (unsigned long) opc
));
45 memcpy((void *)kseg0_opc
, (void *)&replace
, sizeof(u32
));
46 local_flush_icache_range(kseg0_opc
, kseg0_opc
+ 32);
47 } else if (KVM_GUEST_KSEGX((unsigned long) opc
) == KVM_GUEST_KSEG23
) {
48 local_irq_save(flags
);
49 memcpy((void *)opc
, (void *)&replace
, sizeof(u32
));
50 local_flush_icache_range((unsigned long)opc
,
51 (unsigned long)opc
+ 32);
52 local_irq_restore(flags
);
54 kvm_err("%s: Invalid address: %p\n", __func__
, opc
);
61 int kvm_mips_trans_cache_index(u32 inst
, u32
*opc
,
62 struct kvm_vcpu
*vcpu
)
64 /* Replace the CACHE instruction, with a NOP */
65 return kvm_mips_trans_replace(vcpu
, opc
, 0x00000000);
69 * Address based CACHE instructions are transformed into synci(s). A little
70 * heavy for just D-cache invalidates, but avoids an expensive trap
72 int kvm_mips_trans_cache_va(u32 inst
, u32
*opc
,
73 struct kvm_vcpu
*vcpu
)
75 u32 synci_inst
= SYNCI_TEMPLATE
, base
, offset
;
77 base
= (inst
>> 21) & 0x1f;
78 offset
= inst
& 0xffff;
79 synci_inst
|= (base
<< 21);
82 return kvm_mips_trans_replace(vcpu
, opc
, synci_inst
);
85 int kvm_mips_trans_mfc0(u32 inst
, u32
*opc
, struct kvm_vcpu
*vcpu
)
90 rt
= (inst
>> 16) & 0x1f;
91 rd
= (inst
>> 11) & 0x1f;
94 if ((rd
== MIPS_CP0_ERRCTL
) && (sel
== 0)) {
95 mfc0_inst
= CLEAR_TEMPLATE
;
96 mfc0_inst
|= ((rt
& 0x1f) << 11);
98 mfc0_inst
= LW_TEMPLATE
;
99 mfc0_inst
|= ((rt
& 0x1f) << 16);
100 mfc0_inst
|= offsetof(struct kvm_mips_commpage
,
104 return kvm_mips_trans_replace(vcpu
, opc
, mfc0_inst
);
107 int kvm_mips_trans_mtc0(u32 inst
, u32
*opc
, struct kvm_vcpu
*vcpu
)
110 u32 mtc0_inst
= SW_TEMPLATE
;
112 rt
= (inst
>> 16) & 0x1f;
113 rd
= (inst
>> 11) & 0x1f;
116 mtc0_inst
|= ((rt
& 0x1f) << 16);
117 mtc0_inst
|= offsetof(struct kvm_mips_commpage
, cop0
.reg
[rd
][sel
]);
119 return kvm_mips_trans_replace(vcpu
, opc
, mtc0_inst
);