2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/bsearch.h>
24 #include <linux/kvm_host.h>
26 #include <linux/uaccess.h>
28 #include <asm/cacheflush.h>
29 #include <asm/cputype.h>
30 #include <asm/debug-monitors.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_asm.h>
34 #include <asm/kvm_coproc.h>
35 #include <asm/kvm_emulate.h>
36 #include <asm/kvm_host.h>
37 #include <asm/kvm_mmu.h>
38 #include <asm/perf_event.h>
40 #include <trace/events/kvm.h>
47 * All of this file is extremly similar to the ARM coproc.c, but the
48 * types are different. My gut feeling is that it should be pretty
49 * easy to merge, but that would be an ABI breakage -- again. VFP
50 * would also need to be abstracted.
52 * For AArch32, we only take care of what is being trapped. Anything
53 * that has to do with init and userspace access has to go via the
57 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
58 static u32 cache_levels
;
60 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
63 /* Which cache CCSIDR represents depends on CSSELR value. */
64 static u32
get_ccsidr(u32 csselr
)
68 /* Make sure noone else changes CSSELR during this! */
70 /* Put value into CSSELR */
71 asm volatile("msr csselr_el1, %x0" : : "r" (csselr
));
73 /* Read result out of CCSIDR */
74 asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr
));
81 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
83 static bool access_dcsw(struct kvm_vcpu
*vcpu
,
84 struct sys_reg_params
*p
,
85 const struct sys_reg_desc
*r
)
88 return read_from_write_only(vcpu
, p
);
90 kvm_set_way_flush(vcpu
);
95 * Generic accessor for VM registers. Only called as long as HCR_TVM
96 * is set. If the guest enables the MMU, we stop trapping the VM
97 * sys_regs and leave it in complete control of the caches.
99 static bool access_vm_reg(struct kvm_vcpu
*vcpu
,
100 struct sys_reg_params
*p
,
101 const struct sys_reg_desc
*r
)
103 bool was_enabled
= vcpu_has_cache_enabled(vcpu
);
105 BUG_ON(!p
->is_write
);
107 if (!p
->is_aarch32
) {
108 vcpu_sys_reg(vcpu
, r
->reg
) = p
->regval
;
111 vcpu_cp15_64_high(vcpu
, r
->reg
) = upper_32_bits(p
->regval
);
112 vcpu_cp15_64_low(vcpu
, r
->reg
) = lower_32_bits(p
->regval
);
115 kvm_toggle_cache(vcpu
, was_enabled
);
120 * Trap handler for the GICv3 SGI generation system register.
121 * Forward the request to the VGIC emulation.
122 * The cp15_64 code makes sure this automatically works
123 * for both AArch64 and AArch32 accesses.
125 static bool access_gic_sgi(struct kvm_vcpu
*vcpu
,
126 struct sys_reg_params
*p
,
127 const struct sys_reg_desc
*r
)
130 return read_from_write_only(vcpu
, p
);
132 vgic_v3_dispatch_sgi(vcpu
, p
->regval
);
137 static bool access_gic_sre(struct kvm_vcpu
*vcpu
,
138 struct sys_reg_params
*p
,
139 const struct sys_reg_desc
*r
)
142 return ignore_write(vcpu
, p
);
144 p
->regval
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_sre
;
148 static bool trap_raz_wi(struct kvm_vcpu
*vcpu
,
149 struct sys_reg_params
*p
,
150 const struct sys_reg_desc
*r
)
153 return ignore_write(vcpu
, p
);
155 return read_zero(vcpu
, p
);
158 static bool trap_oslsr_el1(struct kvm_vcpu
*vcpu
,
159 struct sys_reg_params
*p
,
160 const struct sys_reg_desc
*r
)
163 return ignore_write(vcpu
, p
);
165 p
->regval
= (1 << 3);
170 static bool trap_dbgauthstatus_el1(struct kvm_vcpu
*vcpu
,
171 struct sys_reg_params
*p
,
172 const struct sys_reg_desc
*r
)
175 return ignore_write(vcpu
, p
);
178 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val
));
185 * We want to avoid world-switching all the DBG registers all the
188 * - If we've touched any debug register, it is likely that we're
189 * going to touch more of them. It then makes sense to disable the
190 * traps and start doing the save/restore dance
191 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
192 * then mandatory to save/restore the registers, as the guest
195 * For this, we use a DIRTY bit, indicating the guest has modified the
196 * debug registers, used as follow:
199 * - If the dirty bit is set (because we're coming back from trapping),
200 * disable the traps, save host registers, restore guest registers.
201 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
202 * set the dirty bit, disable the traps, save host registers,
203 * restore guest registers.
204 * - Otherwise, enable the traps
207 * - If the dirty bit is set, save guest registers, restore host
208 * registers and clear the dirty bit. This ensure that the host can
209 * now use the debug registers.
211 static bool trap_debug_regs(struct kvm_vcpu
*vcpu
,
212 struct sys_reg_params
*p
,
213 const struct sys_reg_desc
*r
)
216 vcpu_sys_reg(vcpu
, r
->reg
) = p
->regval
;
217 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
219 p
->regval
= vcpu_sys_reg(vcpu
, r
->reg
);
222 trace_trap_reg(__func__
, r
->reg
, p
->is_write
, p
->regval
);
228 * reg_to_dbg/dbg_to_reg
230 * A 32 bit write to a debug register leave top bits alone
231 * A 32 bit read from a debug register only returns the bottom bits
233 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
234 * hyp.S code switches between host and guest values in future.
236 static void reg_to_dbg(struct kvm_vcpu
*vcpu
,
237 struct sys_reg_params
*p
,
244 val
|= ((*dbg_reg
>> 32) << 32);
248 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
251 static void dbg_to_reg(struct kvm_vcpu
*vcpu
,
252 struct sys_reg_params
*p
,
255 p
->regval
= *dbg_reg
;
257 p
->regval
&= 0xffffffffUL
;
260 static bool trap_bvr(struct kvm_vcpu
*vcpu
,
261 struct sys_reg_params
*p
,
262 const struct sys_reg_desc
*rd
)
264 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
267 reg_to_dbg(vcpu
, p
, dbg_reg
);
269 dbg_to_reg(vcpu
, p
, dbg_reg
);
271 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
276 static int set_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
277 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
279 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
281 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
286 static int get_bvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
287 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
289 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
291 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
296 static void reset_bvr(struct kvm_vcpu
*vcpu
,
297 const struct sys_reg_desc
*rd
)
299 vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
] = rd
->val
;
302 static bool trap_bcr(struct kvm_vcpu
*vcpu
,
303 struct sys_reg_params
*p
,
304 const struct sys_reg_desc
*rd
)
306 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
309 reg_to_dbg(vcpu
, p
, dbg_reg
);
311 dbg_to_reg(vcpu
, p
, dbg_reg
);
313 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
318 static int set_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
319 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
321 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
323 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
329 static int get_bcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
330 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
332 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
];
334 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
339 static void reset_bcr(struct kvm_vcpu
*vcpu
,
340 const struct sys_reg_desc
*rd
)
342 vcpu
->arch
.vcpu_debug_state
.dbg_bcr
[rd
->reg
] = rd
->val
;
345 static bool trap_wvr(struct kvm_vcpu
*vcpu
,
346 struct sys_reg_params
*p
,
347 const struct sys_reg_desc
*rd
)
349 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
352 reg_to_dbg(vcpu
, p
, dbg_reg
);
354 dbg_to_reg(vcpu
, p
, dbg_reg
);
356 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
,
357 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
]);
362 static int set_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
363 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
365 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
367 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
372 static int get_wvr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
373 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
375 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
];
377 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
382 static void reset_wvr(struct kvm_vcpu
*vcpu
,
383 const struct sys_reg_desc
*rd
)
385 vcpu
->arch
.vcpu_debug_state
.dbg_wvr
[rd
->reg
] = rd
->val
;
388 static bool trap_wcr(struct kvm_vcpu
*vcpu
,
389 struct sys_reg_params
*p
,
390 const struct sys_reg_desc
*rd
)
392 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
395 reg_to_dbg(vcpu
, p
, dbg_reg
);
397 dbg_to_reg(vcpu
, p
, dbg_reg
);
399 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
404 static int set_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
405 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
407 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
409 if (copy_from_user(r
, uaddr
, KVM_REG_SIZE(reg
->id
)) != 0)
414 static int get_wcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*rd
,
415 const struct kvm_one_reg
*reg
, void __user
*uaddr
)
417 __u64
*r
= &vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
];
419 if (copy_to_user(uaddr
, r
, KVM_REG_SIZE(reg
->id
)) != 0)
424 static void reset_wcr(struct kvm_vcpu
*vcpu
,
425 const struct sys_reg_desc
*rd
)
427 vcpu
->arch
.vcpu_debug_state
.dbg_wcr
[rd
->reg
] = rd
->val
;
430 static void reset_amair_el1(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
434 asm volatile("mrs %0, amair_el1\n" : "=r" (amair
));
435 vcpu_sys_reg(vcpu
, AMAIR_EL1
) = amair
;
438 static void reset_mpidr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
443 * Map the vcpu_id into the first three affinity level fields of
444 * the MPIDR. We limit the number of VCPUs in level 0 due to a
445 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
446 * of the GICv3 to be able to address each CPU directly when
449 mpidr
= (vcpu
->vcpu_id
& 0x0f) << MPIDR_LEVEL_SHIFT(0);
450 mpidr
|= ((vcpu
->vcpu_id
>> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
451 mpidr
|= ((vcpu
->vcpu_id
>> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
452 vcpu_sys_reg(vcpu
, MPIDR_EL1
) = (1ULL << 31) | mpidr
;
455 static void reset_pmcr(struct kvm_vcpu
*vcpu
, const struct sys_reg_desc
*r
)
459 asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr
));
460 /* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) is reset to UNKNOWN
461 * except PMCR.E resetting to zero.
463 val
= ((pmcr
& ~ARMV8_PMU_PMCR_MASK
)
464 | (ARMV8_PMU_PMCR_MASK
& 0xdecafbad)) & (~ARMV8_PMU_PMCR_E
);
465 vcpu_sys_reg(vcpu
, PMCR_EL0
) = val
;
468 static bool pmu_access_el0_disabled(struct kvm_vcpu
*vcpu
)
470 u64 reg
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
472 return !((reg
& ARMV8_PMU_USERENR_EN
) || vcpu_mode_priv(vcpu
));
475 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu
*vcpu
)
477 u64 reg
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
479 return !((reg
& (ARMV8_PMU_USERENR_SW
| ARMV8_PMU_USERENR_EN
))
480 || vcpu_mode_priv(vcpu
));
483 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu
*vcpu
)
485 u64 reg
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
487 return !((reg
& (ARMV8_PMU_USERENR_CR
| ARMV8_PMU_USERENR_EN
))
488 || vcpu_mode_priv(vcpu
));
491 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu
*vcpu
)
493 u64 reg
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
);
495 return !((reg
& (ARMV8_PMU_USERENR_ER
| ARMV8_PMU_USERENR_EN
))
496 || vcpu_mode_priv(vcpu
));
499 static bool access_pmcr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
500 const struct sys_reg_desc
*r
)
504 if (!kvm_arm_pmu_v3_ready(vcpu
))
505 return trap_raz_wi(vcpu
, p
, r
);
507 if (pmu_access_el0_disabled(vcpu
))
511 /* Only update writeable bits of PMCR */
512 val
= vcpu_sys_reg(vcpu
, PMCR_EL0
);
513 val
&= ~ARMV8_PMU_PMCR_MASK
;
514 val
|= p
->regval
& ARMV8_PMU_PMCR_MASK
;
515 vcpu_sys_reg(vcpu
, PMCR_EL0
) = val
;
516 kvm_pmu_handle_pmcr(vcpu
, val
);
518 /* PMCR.P & PMCR.C are RAZ */
519 val
= vcpu_sys_reg(vcpu
, PMCR_EL0
)
520 & ~(ARMV8_PMU_PMCR_P
| ARMV8_PMU_PMCR_C
);
527 static bool access_pmselr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
528 const struct sys_reg_desc
*r
)
530 if (!kvm_arm_pmu_v3_ready(vcpu
))
531 return trap_raz_wi(vcpu
, p
, r
);
533 if (pmu_access_event_counter_el0_disabled(vcpu
))
537 vcpu_sys_reg(vcpu
, PMSELR_EL0
) = p
->regval
;
539 /* return PMSELR.SEL field */
540 p
->regval
= vcpu_sys_reg(vcpu
, PMSELR_EL0
)
541 & ARMV8_PMU_COUNTER_MASK
;
546 static bool access_pmceid(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
547 const struct sys_reg_desc
*r
)
551 if (!kvm_arm_pmu_v3_ready(vcpu
))
552 return trap_raz_wi(vcpu
, p
, r
);
556 if (pmu_access_el0_disabled(vcpu
))
560 asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid
));
562 asm volatile("mrs %0, pmceid1_el0\n" : "=r" (pmceid
));
569 static bool pmu_counter_idx_valid(struct kvm_vcpu
*vcpu
, u64 idx
)
573 pmcr
= vcpu_sys_reg(vcpu
, PMCR_EL0
);
574 val
= (pmcr
>> ARMV8_PMU_PMCR_N_SHIFT
) & ARMV8_PMU_PMCR_N_MASK
;
575 if (idx
>= val
&& idx
!= ARMV8_PMU_CYCLE_IDX
)
581 static bool access_pmu_evcntr(struct kvm_vcpu
*vcpu
,
582 struct sys_reg_params
*p
,
583 const struct sys_reg_desc
*r
)
587 if (!kvm_arm_pmu_v3_ready(vcpu
))
588 return trap_raz_wi(vcpu
, p
, r
);
590 if (r
->CRn
== 9 && r
->CRm
== 13) {
593 if (pmu_access_event_counter_el0_disabled(vcpu
))
596 idx
= vcpu_sys_reg(vcpu
, PMSELR_EL0
)
597 & ARMV8_PMU_COUNTER_MASK
;
598 } else if (r
->Op2
== 0) {
600 if (pmu_access_cycle_counter_el0_disabled(vcpu
))
603 idx
= ARMV8_PMU_CYCLE_IDX
;
607 } else if (r
->CRn
== 14 && (r
->CRm
& 12) == 8) {
609 if (pmu_access_event_counter_el0_disabled(vcpu
))
612 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
617 if (!pmu_counter_idx_valid(vcpu
, idx
))
621 if (pmu_access_el0_disabled(vcpu
))
624 kvm_pmu_set_counter_value(vcpu
, idx
, p
->regval
);
626 p
->regval
= kvm_pmu_get_counter_value(vcpu
, idx
);
632 static bool access_pmu_evtyper(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
633 const struct sys_reg_desc
*r
)
637 if (!kvm_arm_pmu_v3_ready(vcpu
))
638 return trap_raz_wi(vcpu
, p
, r
);
640 if (pmu_access_el0_disabled(vcpu
))
643 if (r
->CRn
== 9 && r
->CRm
== 13 && r
->Op2
== 1) {
645 idx
= vcpu_sys_reg(vcpu
, PMSELR_EL0
) & ARMV8_PMU_COUNTER_MASK
;
646 reg
= PMEVTYPER0_EL0
+ idx
;
647 } else if (r
->CRn
== 14 && (r
->CRm
& 12) == 12) {
648 idx
= ((r
->CRm
& 3) << 3) | (r
->Op2
& 7);
649 if (idx
== ARMV8_PMU_CYCLE_IDX
)
653 reg
= PMEVTYPER0_EL0
+ idx
;
658 if (!pmu_counter_idx_valid(vcpu
, idx
))
662 kvm_pmu_set_counter_event_type(vcpu
, p
->regval
, idx
);
663 vcpu_sys_reg(vcpu
, reg
) = p
->regval
& ARMV8_PMU_EVTYPE_MASK
;
665 p
->regval
= vcpu_sys_reg(vcpu
, reg
) & ARMV8_PMU_EVTYPE_MASK
;
671 static bool access_pmcnten(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
672 const struct sys_reg_desc
*r
)
676 if (!kvm_arm_pmu_v3_ready(vcpu
))
677 return trap_raz_wi(vcpu
, p
, r
);
679 if (pmu_access_el0_disabled(vcpu
))
682 mask
= kvm_pmu_valid_counter_mask(vcpu
);
684 val
= p
->regval
& mask
;
686 /* accessing PMCNTENSET_EL0 */
687 vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) |= val
;
688 kvm_pmu_enable_counter(vcpu
, val
);
690 /* accessing PMCNTENCLR_EL0 */
691 vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) &= ~val
;
692 kvm_pmu_disable_counter(vcpu
, val
);
695 p
->regval
= vcpu_sys_reg(vcpu
, PMCNTENSET_EL0
) & mask
;
701 static bool access_pminten(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
702 const struct sys_reg_desc
*r
)
704 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
706 if (!kvm_arm_pmu_v3_ready(vcpu
))
707 return trap_raz_wi(vcpu
, p
, r
);
709 if (!vcpu_mode_priv(vcpu
))
713 u64 val
= p
->regval
& mask
;
716 /* accessing PMINTENSET_EL1 */
717 vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) |= val
;
719 /* accessing PMINTENCLR_EL1 */
720 vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) &= ~val
;
722 p
->regval
= vcpu_sys_reg(vcpu
, PMINTENSET_EL1
) & mask
;
728 static bool access_pmovs(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
729 const struct sys_reg_desc
*r
)
731 u64 mask
= kvm_pmu_valid_counter_mask(vcpu
);
733 if (!kvm_arm_pmu_v3_ready(vcpu
))
734 return trap_raz_wi(vcpu
, p
, r
);
736 if (pmu_access_el0_disabled(vcpu
))
741 /* accessing PMOVSSET_EL0 */
742 kvm_pmu_overflow_set(vcpu
, p
->regval
& mask
);
744 /* accessing PMOVSCLR_EL0 */
745 vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) &= ~(p
->regval
& mask
);
747 p
->regval
= vcpu_sys_reg(vcpu
, PMOVSSET_EL0
) & mask
;
753 static bool access_pmswinc(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
754 const struct sys_reg_desc
*r
)
758 if (!kvm_arm_pmu_v3_ready(vcpu
))
759 return trap_raz_wi(vcpu
, p
, r
);
761 if (pmu_write_swinc_el0_disabled(vcpu
))
765 mask
= kvm_pmu_valid_counter_mask(vcpu
);
766 kvm_pmu_software_increment(vcpu
, p
->regval
& mask
);
773 static bool access_pmuserenr(struct kvm_vcpu
*vcpu
, struct sys_reg_params
*p
,
774 const struct sys_reg_desc
*r
)
776 if (!kvm_arm_pmu_v3_ready(vcpu
))
777 return trap_raz_wi(vcpu
, p
, r
);
780 if (!vcpu_mode_priv(vcpu
))
783 vcpu_sys_reg(vcpu
, PMUSERENR_EL0
) = p
->regval
784 & ARMV8_PMU_USERENR_MASK
;
786 p
->regval
= vcpu_sys_reg(vcpu
, PMUSERENR_EL0
)
787 & ARMV8_PMU_USERENR_MASK
;
793 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
794 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
796 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
797 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
799 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
800 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
802 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
803 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
805 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
806 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
808 /* Macro to expand the PMEVCNTRn_EL0 register */
809 #define PMU_PMEVCNTR_EL0(n) \
810 /* PMEVCNTRn_EL0 */ \
811 { Op0(0b11), Op1(0b011), CRn(0b1110), \
812 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
813 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
815 /* Macro to expand the PMEVTYPERn_EL0 register */
816 #define PMU_PMEVTYPER_EL0(n) \
817 /* PMEVTYPERn_EL0 */ \
818 { Op0(0b11), Op1(0b011), CRn(0b1110), \
819 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
820 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
823 * Architected system registers.
824 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
826 * Debug handling: We do trap most, if not all debug related system
827 * registers. The implementation is good enough to ensure that a guest
828 * can use these with minimal performance degradation. The drawback is
829 * that we don't implement any of the external debug, none of the
830 * OSlock protocol. This should be revisited if we ever encounter a
831 * more demanding guest...
833 static const struct sys_reg_desc sys_reg_descs
[] = {
835 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
838 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
841 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
844 DBG_BCR_BVR_WCR_WVR_EL1(0),
845 DBG_BCR_BVR_WCR_WVR_EL1(1),
847 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
848 trap_debug_regs
, reset_val
, MDCCINT_EL1
, 0 },
850 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
851 trap_debug_regs
, reset_val
, MDSCR_EL1
, 0 },
852 DBG_BCR_BVR_WCR_WVR_EL1(2),
853 DBG_BCR_BVR_WCR_WVR_EL1(3),
854 DBG_BCR_BVR_WCR_WVR_EL1(4),
855 DBG_BCR_BVR_WCR_WVR_EL1(5),
856 DBG_BCR_BVR_WCR_WVR_EL1(6),
857 DBG_BCR_BVR_WCR_WVR_EL1(7),
858 DBG_BCR_BVR_WCR_WVR_EL1(8),
859 DBG_BCR_BVR_WCR_WVR_EL1(9),
860 DBG_BCR_BVR_WCR_WVR_EL1(10),
861 DBG_BCR_BVR_WCR_WVR_EL1(11),
862 DBG_BCR_BVR_WCR_WVR_EL1(12),
863 DBG_BCR_BVR_WCR_WVR_EL1(13),
864 DBG_BCR_BVR_WCR_WVR_EL1(14),
865 DBG_BCR_BVR_WCR_WVR_EL1(15),
868 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
871 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
874 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
877 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
880 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
882 /* DBGCLAIMSET_EL1 */
883 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
885 /* DBGCLAIMCLR_EL1 */
886 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
888 /* DBGAUTHSTATUS_EL1 */
889 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
890 trap_dbgauthstatus_el1
},
893 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
896 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
898 /* DBGDTR[TR]X_EL0 */
899 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
903 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
904 NULL
, reset_val
, DBGVCR32_EL2
, 0 },
907 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
908 NULL
, reset_mpidr
, MPIDR_EL1
},
910 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
911 access_vm_reg
, reset_val
, SCTLR_EL1
, 0x00C50078 },
913 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
914 NULL
, reset_val
, CPACR_EL1
, 0 },
916 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
917 access_vm_reg
, reset_unknown
, TTBR0_EL1
},
919 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
920 access_vm_reg
, reset_unknown
, TTBR1_EL1
},
922 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
923 access_vm_reg
, reset_val
, TCR_EL1
, 0 },
926 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
927 access_vm_reg
, reset_unknown
, AFSR0_EL1
},
929 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
930 access_vm_reg
, reset_unknown
, AFSR1_EL1
},
932 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
933 access_vm_reg
, reset_unknown
, ESR_EL1
},
935 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
936 access_vm_reg
, reset_unknown
, FAR_EL1
},
938 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
939 NULL
, reset_unknown
, PAR_EL1
},
942 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
943 access_pminten
, reset_unknown
, PMINTENSET_EL1
},
945 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
946 access_pminten
, NULL
, PMINTENSET_EL1
},
949 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
950 access_vm_reg
, reset_unknown
, MAIR_EL1
},
952 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
953 access_vm_reg
, reset_amair_el1
, AMAIR_EL1
},
956 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
957 NULL
, reset_val
, VBAR_EL1
, 0 },
960 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
963 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
967 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
968 access_vm_reg
, reset_val
, CONTEXTIDR_EL1
, 0 },
970 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
971 NULL
, reset_unknown
, TPIDR_EL1
},
974 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
975 NULL
, reset_val
, CNTKCTL_EL1
, 0},
978 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
979 NULL
, reset_unknown
, CSSELR_EL1
},
982 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
983 access_pmcr
, reset_pmcr
, },
985 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
986 access_pmcnten
, reset_unknown
, PMCNTENSET_EL0
},
988 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
989 access_pmcnten
, NULL
, PMCNTENSET_EL0
},
991 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
992 access_pmovs
, NULL
, PMOVSSET_EL0
},
994 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
995 access_pmswinc
, reset_unknown
, PMSWINC_EL0
},
997 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
998 access_pmselr
, reset_unknown
, PMSELR_EL0
},
1000 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
1003 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
1006 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
1007 access_pmu_evcntr
, reset_unknown
, PMCCNTR_EL0
},
1008 /* PMXEVTYPER_EL0 */
1009 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
1010 access_pmu_evtyper
},
1012 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
1013 access_pmu_evcntr
},
1015 * This register resets as unknown in 64bit mode while it resets as zero
1016 * in 32bit mode. Here we choose to reset it as zero for consistency.
1018 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
1019 access_pmuserenr
, reset_val
, PMUSERENR_EL0
, 0 },
1021 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
1022 access_pmovs
, reset_unknown
, PMOVSSET_EL0
},
1025 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
1026 NULL
, reset_unknown
, TPIDR_EL0
},
1028 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
1029 NULL
, reset_unknown
, TPIDRRO_EL0
},
1032 PMU_PMEVCNTR_EL0(0),
1033 PMU_PMEVCNTR_EL0(1),
1034 PMU_PMEVCNTR_EL0(2),
1035 PMU_PMEVCNTR_EL0(3),
1036 PMU_PMEVCNTR_EL0(4),
1037 PMU_PMEVCNTR_EL0(5),
1038 PMU_PMEVCNTR_EL0(6),
1039 PMU_PMEVCNTR_EL0(7),
1040 PMU_PMEVCNTR_EL0(8),
1041 PMU_PMEVCNTR_EL0(9),
1042 PMU_PMEVCNTR_EL0(10),
1043 PMU_PMEVCNTR_EL0(11),
1044 PMU_PMEVCNTR_EL0(12),
1045 PMU_PMEVCNTR_EL0(13),
1046 PMU_PMEVCNTR_EL0(14),
1047 PMU_PMEVCNTR_EL0(15),
1048 PMU_PMEVCNTR_EL0(16),
1049 PMU_PMEVCNTR_EL0(17),
1050 PMU_PMEVCNTR_EL0(18),
1051 PMU_PMEVCNTR_EL0(19),
1052 PMU_PMEVCNTR_EL0(20),
1053 PMU_PMEVCNTR_EL0(21),
1054 PMU_PMEVCNTR_EL0(22),
1055 PMU_PMEVCNTR_EL0(23),
1056 PMU_PMEVCNTR_EL0(24),
1057 PMU_PMEVCNTR_EL0(25),
1058 PMU_PMEVCNTR_EL0(26),
1059 PMU_PMEVCNTR_EL0(27),
1060 PMU_PMEVCNTR_EL0(28),
1061 PMU_PMEVCNTR_EL0(29),
1062 PMU_PMEVCNTR_EL0(30),
1063 /* PMEVTYPERn_EL0 */
1064 PMU_PMEVTYPER_EL0(0),
1065 PMU_PMEVTYPER_EL0(1),
1066 PMU_PMEVTYPER_EL0(2),
1067 PMU_PMEVTYPER_EL0(3),
1068 PMU_PMEVTYPER_EL0(4),
1069 PMU_PMEVTYPER_EL0(5),
1070 PMU_PMEVTYPER_EL0(6),
1071 PMU_PMEVTYPER_EL0(7),
1072 PMU_PMEVTYPER_EL0(8),
1073 PMU_PMEVTYPER_EL0(9),
1074 PMU_PMEVTYPER_EL0(10),
1075 PMU_PMEVTYPER_EL0(11),
1076 PMU_PMEVTYPER_EL0(12),
1077 PMU_PMEVTYPER_EL0(13),
1078 PMU_PMEVTYPER_EL0(14),
1079 PMU_PMEVTYPER_EL0(15),
1080 PMU_PMEVTYPER_EL0(16),
1081 PMU_PMEVTYPER_EL0(17),
1082 PMU_PMEVTYPER_EL0(18),
1083 PMU_PMEVTYPER_EL0(19),
1084 PMU_PMEVTYPER_EL0(20),
1085 PMU_PMEVTYPER_EL0(21),
1086 PMU_PMEVTYPER_EL0(22),
1087 PMU_PMEVTYPER_EL0(23),
1088 PMU_PMEVTYPER_EL0(24),
1089 PMU_PMEVTYPER_EL0(25),
1090 PMU_PMEVTYPER_EL0(26),
1091 PMU_PMEVTYPER_EL0(27),
1092 PMU_PMEVTYPER_EL0(28),
1093 PMU_PMEVTYPER_EL0(29),
1094 PMU_PMEVTYPER_EL0(30),
1096 * This register resets as unknown in 64bit mode while it resets as zero
1097 * in 32bit mode. Here we choose to reset it as zero for consistency.
1099 { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111),
1100 access_pmu_evtyper
, reset_val
, PMCCFILTR_EL0
, 0 },
1103 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
1104 NULL
, reset_unknown
, DACR32_EL2
},
1106 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
1107 NULL
, reset_unknown
, IFSR32_EL2
},
1109 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
1110 NULL
, reset_val
, FPEXC32_EL2
, 0x70 },
1113 static bool trap_dbgidr(struct kvm_vcpu
*vcpu
,
1114 struct sys_reg_params
*p
,
1115 const struct sys_reg_desc
*r
)
1118 return ignore_write(vcpu
, p
);
1120 u64 dfr
= read_system_reg(SYS_ID_AA64DFR0_EL1
);
1121 u64 pfr
= read_system_reg(SYS_ID_AA64PFR0_EL1
);
1122 u32 el3
= !!cpuid_feature_extract_unsigned_field(pfr
, ID_AA64PFR0_EL3_SHIFT
);
1124 p
->regval
= ((((dfr
>> ID_AA64DFR0_WRPS_SHIFT
) & 0xf) << 28) |
1125 (((dfr
>> ID_AA64DFR0_BRPS_SHIFT
) & 0xf) << 24) |
1126 (((dfr
>> ID_AA64DFR0_CTX_CMPS_SHIFT
) & 0xf) << 20)
1127 | (6 << 16) | (el3
<< 14) | (el3
<< 12));
1132 static bool trap_debug32(struct kvm_vcpu
*vcpu
,
1133 struct sys_reg_params
*p
,
1134 const struct sys_reg_desc
*r
)
1137 vcpu_cp14(vcpu
, r
->reg
) = p
->regval
;
1138 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
1140 p
->regval
= vcpu_cp14(vcpu
, r
->reg
);
1146 /* AArch32 debug register mappings
1148 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1149 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1151 * All control registers and watchpoint value registers are mapped to
1152 * the lower 32 bits of their AArch64 equivalents. We share the trap
1153 * handlers with the above AArch64 code which checks what mode the
1157 static bool trap_xvr(struct kvm_vcpu
*vcpu
,
1158 struct sys_reg_params
*p
,
1159 const struct sys_reg_desc
*rd
)
1161 u64
*dbg_reg
= &vcpu
->arch
.vcpu_debug_state
.dbg_bvr
[rd
->reg
];
1166 val
&= 0xffffffffUL
;
1167 val
|= p
->regval
<< 32;
1170 vcpu
->arch
.debug_flags
|= KVM_ARM64_DEBUG_DIRTY
;
1172 p
->regval
= *dbg_reg
>> 32;
1175 trace_trap_reg(__func__
, rd
->reg
, p
->is_write
, *dbg_reg
);
1180 #define DBG_BCR_BVR_WCR_WVR(n) \
1182 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1184 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1186 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1188 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1190 #define DBGBXVR(n) \
1191 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1194 * Trapped cp14 registers. We generally ignore most of the external
1195 * debug, on the principle that they don't really make sense to a
1196 * guest. Revisit this one day, would this principle change.
1198 static const struct sys_reg_desc cp14_regs
[] = {
1200 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr
},
1202 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi
},
1204 DBG_BCR_BVR_WCR_WVR(0),
1206 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi
},
1207 DBG_BCR_BVR_WCR_WVR(1),
1209 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32
},
1211 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32
},
1212 DBG_BCR_BVR_WCR_WVR(2),
1213 /* DBGDTR[RT]Xint */
1214 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi
},
1215 /* DBGDTR[RT]Xext */
1216 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi
},
1217 DBG_BCR_BVR_WCR_WVR(3),
1218 DBG_BCR_BVR_WCR_WVR(4),
1219 DBG_BCR_BVR_WCR_WVR(5),
1221 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi
},
1223 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi
},
1224 DBG_BCR_BVR_WCR_WVR(6),
1226 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32
},
1227 DBG_BCR_BVR_WCR_WVR(7),
1228 DBG_BCR_BVR_WCR_WVR(8),
1229 DBG_BCR_BVR_WCR_WVR(9),
1230 DBG_BCR_BVR_WCR_WVR(10),
1231 DBG_BCR_BVR_WCR_WVR(11),
1232 DBG_BCR_BVR_WCR_WVR(12),
1233 DBG_BCR_BVR_WCR_WVR(13),
1234 DBG_BCR_BVR_WCR_WVR(14),
1235 DBG_BCR_BVR_WCR_WVR(15),
1237 /* DBGDRAR (32bit) */
1238 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi
},
1242 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi
},
1245 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1
},
1249 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi
},
1252 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi
},
1265 /* DBGDSAR (32bit) */
1266 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi
},
1269 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi
},
1271 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi
},
1273 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi
},
1275 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi
},
1277 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi
},
1279 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1
},
1282 /* Trapped cp14 64bit registers */
1283 static const struct sys_reg_desc cp14_64_regs
[] = {
1284 /* DBGDRAR (64bit) */
1285 { Op1( 0), CRm( 1), .access
= trap_raz_wi
},
1287 /* DBGDSAR (64bit) */
1288 { Op1( 0), CRm( 2), .access
= trap_raz_wi
},
1291 /* Macro to expand the PMEVCNTRn register */
1292 #define PMU_PMEVCNTR(n) \
1294 { Op1(0), CRn(0b1110), \
1295 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1298 /* Macro to expand the PMEVTYPERn register */
1299 #define PMU_PMEVTYPER(n) \
1301 { Op1(0), CRn(0b1110), \
1302 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1303 access_pmu_evtyper }
1306 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1307 * depending on the way they are accessed (as a 32bit or a 64bit
1310 static const struct sys_reg_desc cp15_regs
[] = {
1311 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
},
1313 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c1_SCTLR
},
1314 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c2_TTBR0
},
1315 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c2_TTBR1
},
1316 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg
, NULL
, c2_TTBCR
},
1317 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c3_DACR
},
1318 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c5_DFSR
},
1319 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c5_IFSR
},
1320 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg
, NULL
, c5_ADFSR
},
1321 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg
, NULL
, c5_AIFSR
},
1322 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg
, NULL
, c6_DFAR
},
1323 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg
, NULL
, c6_IFAR
},
1326 * DC{C,I,CI}SW operations:
1328 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw
},
1329 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw
},
1330 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw
},
1333 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr
},
1334 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten
},
1335 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten
},
1336 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs
},
1337 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc
},
1338 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr
},
1339 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid
},
1340 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid
},
1341 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr
},
1342 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper
},
1343 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr
},
1344 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr
},
1345 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten
},
1346 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten
},
1347 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs
},
1349 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c10_PRRR
},
1350 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg
, NULL
, c10_NMRR
},
1351 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg
, NULL
, c10_AMAIR0
},
1352 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg
, NULL
, c10_AMAIR1
},
1355 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre
},
1357 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg
, NULL
, c13_CID
},
1424 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper
},
1427 static const struct sys_reg_desc cp15_64_regs
[] = {
1428 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c2_TTBR0
},
1429 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr
},
1430 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi
},
1431 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg
, NULL
, c2_TTBR1
},
1434 /* Target specific emulation tables */
1435 static struct kvm_sys_reg_target_table
*target_tables
[KVM_ARM_NUM_TARGETS
];
1437 void kvm_register_target_sys_reg_table(unsigned int target
,
1438 struct kvm_sys_reg_target_table
*table
)
1440 target_tables
[target
] = table
;
1443 /* Get specific register table for this target. */
1444 static const struct sys_reg_desc
*get_target_table(unsigned target
,
1448 struct kvm_sys_reg_target_table
*table
;
1450 table
= target_tables
[target
];
1452 *num
= table
->table64
.num
;
1453 return table
->table64
.table
;
1455 *num
= table
->table32
.num
;
1456 return table
->table32
.table
;
1460 #define reg_to_match_value(x) \
1462 unsigned long val; \
1463 val = (x)->Op0 << 14; \
1464 val |= (x)->Op1 << 11; \
1465 val |= (x)->CRn << 7; \
1466 val |= (x)->CRm << 3; \
1471 static int match_sys_reg(const void *key
, const void *elt
)
1473 const unsigned long pval
= (unsigned long)key
;
1474 const struct sys_reg_desc
*r
= elt
;
1476 return pval
- reg_to_match_value(r
);
1479 static const struct sys_reg_desc
*find_reg(const struct sys_reg_params
*params
,
1480 const struct sys_reg_desc table
[],
1483 unsigned long pval
= reg_to_match_value(params
);
1485 return bsearch((void *)pval
, table
, num
, sizeof(table
[0]), match_sys_reg
);
1488 int kvm_handle_cp14_load_store(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1490 kvm_inject_undefined(vcpu
);
1495 * emulate_cp -- tries to match a sys_reg access in a handling table, and
1496 * call the corresponding trap handler.
1498 * @params: pointer to the descriptor of the access
1499 * @table: array of trap descriptors
1500 * @num: size of the trap descriptor array
1502 * Return 0 if the access has been handled, and -1 if not.
1504 static int emulate_cp(struct kvm_vcpu
*vcpu
,
1505 struct sys_reg_params
*params
,
1506 const struct sys_reg_desc
*table
,
1509 const struct sys_reg_desc
*r
;
1512 return -1; /* Not handled */
1514 r
= find_reg(params
, table
, num
);
1518 * Not having an accessor means that we have
1519 * configured a trap that we don't know how to
1520 * handle. This certainly qualifies as a gross bug
1521 * that should be fixed right away.
1525 if (likely(r
->access(vcpu
, params
, r
))) {
1526 /* Skip instruction, since it was emulated */
1527 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
1537 static void unhandled_cp_access(struct kvm_vcpu
*vcpu
,
1538 struct sys_reg_params
*params
)
1540 u8 hsr_ec
= kvm_vcpu_trap_get_class(vcpu
);
1544 case ESR_ELx_EC_CP15_32
:
1545 case ESR_ELx_EC_CP15_64
:
1548 case ESR_ELx_EC_CP14_MR
:
1549 case ESR_ELx_EC_CP14_64
:
1556 kvm_err("Unsupported guest CP%d access at: %08lx\n",
1557 cp
, *vcpu_pc(vcpu
));
1558 print_sys_reg_instr(params
);
1559 kvm_inject_undefined(vcpu
);
1563 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1564 * @vcpu: The VCPU pointer
1565 * @run: The kvm_run struct
1567 static int kvm_handle_cp_64(struct kvm_vcpu
*vcpu
,
1568 const struct sys_reg_desc
*global
,
1570 const struct sys_reg_desc
*target_specific
,
1573 struct sys_reg_params params
;
1574 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
1575 int Rt
= (hsr
>> 5) & 0xf;
1576 int Rt2
= (hsr
>> 10) & 0xf;
1578 params
.is_aarch32
= true;
1579 params
.is_32bit
= false;
1580 params
.CRm
= (hsr
>> 1) & 0xf;
1581 params
.is_write
= ((hsr
& 1) == 0);
1584 params
.Op1
= (hsr
>> 16) & 0xf;
1589 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1590 * backends between AArch32 and AArch64, we get away with it.
1592 if (params
.is_write
) {
1593 params
.regval
= vcpu_get_reg(vcpu
, Rt
) & 0xffffffff;
1594 params
.regval
|= vcpu_get_reg(vcpu
, Rt2
) << 32;
1597 if (!emulate_cp(vcpu
, ¶ms
, target_specific
, nr_specific
))
1599 if (!emulate_cp(vcpu
, ¶ms
, global
, nr_global
))
1602 unhandled_cp_access(vcpu
, ¶ms
);
1605 /* Split up the value between registers for the read side */
1606 if (!params
.is_write
) {
1607 vcpu_set_reg(vcpu
, Rt
, lower_32_bits(params
.regval
));
1608 vcpu_set_reg(vcpu
, Rt2
, upper_32_bits(params
.regval
));
1615 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
1616 * @vcpu: The VCPU pointer
1617 * @run: The kvm_run struct
1619 static int kvm_handle_cp_32(struct kvm_vcpu
*vcpu
,
1620 const struct sys_reg_desc
*global
,
1622 const struct sys_reg_desc
*target_specific
,
1625 struct sys_reg_params params
;
1626 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
1627 int Rt
= (hsr
>> 5) & 0xf;
1629 params
.is_aarch32
= true;
1630 params
.is_32bit
= true;
1631 params
.CRm
= (hsr
>> 1) & 0xf;
1632 params
.regval
= vcpu_get_reg(vcpu
, Rt
);
1633 params
.is_write
= ((hsr
& 1) == 0);
1634 params
.CRn
= (hsr
>> 10) & 0xf;
1636 params
.Op1
= (hsr
>> 14) & 0x7;
1637 params
.Op2
= (hsr
>> 17) & 0x7;
1639 if (!emulate_cp(vcpu
, ¶ms
, target_specific
, nr_specific
) ||
1640 !emulate_cp(vcpu
, ¶ms
, global
, nr_global
)) {
1641 if (!params
.is_write
)
1642 vcpu_set_reg(vcpu
, Rt
, params
.regval
);
1646 unhandled_cp_access(vcpu
, ¶ms
);
1650 int kvm_handle_cp15_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1652 const struct sys_reg_desc
*target_specific
;
1655 target_specific
= get_target_table(vcpu
->arch
.target
, false, &num
);
1656 return kvm_handle_cp_64(vcpu
,
1657 cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
),
1658 target_specific
, num
);
1661 int kvm_handle_cp15_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1663 const struct sys_reg_desc
*target_specific
;
1666 target_specific
= get_target_table(vcpu
->arch
.target
, false, &num
);
1667 return kvm_handle_cp_32(vcpu
,
1668 cp15_regs
, ARRAY_SIZE(cp15_regs
),
1669 target_specific
, num
);
1672 int kvm_handle_cp14_64(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1674 return kvm_handle_cp_64(vcpu
,
1675 cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
),
1679 int kvm_handle_cp14_32(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1681 return kvm_handle_cp_32(vcpu
,
1682 cp14_regs
, ARRAY_SIZE(cp14_regs
),
1686 static int emulate_sys_reg(struct kvm_vcpu
*vcpu
,
1687 struct sys_reg_params
*params
)
1690 const struct sys_reg_desc
*table
, *r
;
1692 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
1694 /* Search target-specific then generic table. */
1695 r
= find_reg(params
, table
, num
);
1697 r
= find_reg(params
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
1701 * Not having an accessor means that we have
1702 * configured a trap that we don't know how to
1703 * handle. This certainly qualifies as a gross bug
1704 * that should be fixed right away.
1708 if (likely(r
->access(vcpu
, params
, r
))) {
1709 /* Skip instruction, since it was emulated */
1710 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
1713 /* If access function fails, it should complain. */
1715 kvm_err("Unsupported guest sys_reg access at: %lx\n",
1717 print_sys_reg_instr(params
);
1719 kvm_inject_undefined(vcpu
);
1723 static void reset_sys_reg_descs(struct kvm_vcpu
*vcpu
,
1724 const struct sys_reg_desc
*table
, size_t num
)
1728 for (i
= 0; i
< num
; i
++)
1730 table
[i
].reset(vcpu
, &table
[i
]);
1734 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
1735 * @vcpu: The VCPU pointer
1736 * @run: The kvm_run struct
1738 int kvm_handle_sys_reg(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1740 struct sys_reg_params params
;
1741 unsigned long esr
= kvm_vcpu_get_hsr(vcpu
);
1742 int Rt
= (esr
>> 5) & 0x1f;
1745 trace_kvm_handle_sys_reg(esr
);
1747 params
.is_aarch32
= false;
1748 params
.is_32bit
= false;
1749 params
.Op0
= (esr
>> 20) & 3;
1750 params
.Op1
= (esr
>> 14) & 0x7;
1751 params
.CRn
= (esr
>> 10) & 0xf;
1752 params
.CRm
= (esr
>> 1) & 0xf;
1753 params
.Op2
= (esr
>> 17) & 0x7;
1754 params
.regval
= vcpu_get_reg(vcpu
, Rt
);
1755 params
.is_write
= !(esr
& 1);
1757 ret
= emulate_sys_reg(vcpu
, ¶ms
);
1759 if (!params
.is_write
)
1760 vcpu_set_reg(vcpu
, Rt
, params
.regval
);
1764 /******************************************************************************
1766 *****************************************************************************/
1768 static bool index_to_params(u64 id
, struct sys_reg_params
*params
)
1770 switch (id
& KVM_REG_SIZE_MASK
) {
1771 case KVM_REG_SIZE_U64
:
1772 /* Any unused index bits means it's not valid. */
1773 if (id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
1774 | KVM_REG_ARM_COPROC_MASK
1775 | KVM_REG_ARM64_SYSREG_OP0_MASK
1776 | KVM_REG_ARM64_SYSREG_OP1_MASK
1777 | KVM_REG_ARM64_SYSREG_CRN_MASK
1778 | KVM_REG_ARM64_SYSREG_CRM_MASK
1779 | KVM_REG_ARM64_SYSREG_OP2_MASK
))
1781 params
->Op0
= ((id
& KVM_REG_ARM64_SYSREG_OP0_MASK
)
1782 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT
);
1783 params
->Op1
= ((id
& KVM_REG_ARM64_SYSREG_OP1_MASK
)
1784 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT
);
1785 params
->CRn
= ((id
& KVM_REG_ARM64_SYSREG_CRN_MASK
)
1786 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT
);
1787 params
->CRm
= ((id
& KVM_REG_ARM64_SYSREG_CRM_MASK
)
1788 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT
);
1789 params
->Op2
= ((id
& KVM_REG_ARM64_SYSREG_OP2_MASK
)
1790 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT
);
1797 /* Decode an index value, and find the sys_reg_desc entry. */
1798 static const struct sys_reg_desc
*index_to_sys_reg_desc(struct kvm_vcpu
*vcpu
,
1802 const struct sys_reg_desc
*table
, *r
;
1803 struct sys_reg_params params
;
1805 /* We only do sys_reg for now. */
1806 if ((id
& KVM_REG_ARM_COPROC_MASK
) != KVM_REG_ARM64_SYSREG
)
1809 if (!index_to_params(id
, ¶ms
))
1812 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
1813 r
= find_reg(¶ms
, table
, num
);
1815 r
= find_reg(¶ms
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
1817 /* Not saved in the sys_reg array? */
1825 * These are the invariant sys_reg registers: we let the guest see the
1826 * host versions of these, so they're part of the guest state.
1828 * A future CPU may provide a mechanism to present different values to
1829 * the guest, or a future kvm may trap them.
1832 #define FUNCTION_INVARIANT(reg) \
1833 static void get_##reg(struct kvm_vcpu *v, \
1834 const struct sys_reg_desc *r) \
1838 asm volatile("mrs %0, " __stringify(reg) "\n" \
1840 ((struct sys_reg_desc *)r)->val = val; \
1843 FUNCTION_INVARIANT(midr_el1
)
1844 FUNCTION_INVARIANT(ctr_el0
)
1845 FUNCTION_INVARIANT(revidr_el1
)
1846 FUNCTION_INVARIANT(id_pfr0_el1
)
1847 FUNCTION_INVARIANT(id_pfr1_el1
)
1848 FUNCTION_INVARIANT(id_dfr0_el1
)
1849 FUNCTION_INVARIANT(id_afr0_el1
)
1850 FUNCTION_INVARIANT(id_mmfr0_el1
)
1851 FUNCTION_INVARIANT(id_mmfr1_el1
)
1852 FUNCTION_INVARIANT(id_mmfr2_el1
)
1853 FUNCTION_INVARIANT(id_mmfr3_el1
)
1854 FUNCTION_INVARIANT(id_isar0_el1
)
1855 FUNCTION_INVARIANT(id_isar1_el1
)
1856 FUNCTION_INVARIANT(id_isar2_el1
)
1857 FUNCTION_INVARIANT(id_isar3_el1
)
1858 FUNCTION_INVARIANT(id_isar4_el1
)
1859 FUNCTION_INVARIANT(id_isar5_el1
)
1860 FUNCTION_INVARIANT(clidr_el1
)
1861 FUNCTION_INVARIANT(aidr_el1
)
1863 /* ->val is filled in by kvm_sys_reg_table_init() */
1864 static struct sys_reg_desc invariant_sys_regs
[] = {
1865 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
1866 NULL
, get_midr_el1
},
1867 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
1868 NULL
, get_revidr_el1
},
1869 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
1870 NULL
, get_id_pfr0_el1
},
1871 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
1872 NULL
, get_id_pfr1_el1
},
1873 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
1874 NULL
, get_id_dfr0_el1
},
1875 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
1876 NULL
, get_id_afr0_el1
},
1877 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
1878 NULL
, get_id_mmfr0_el1
},
1879 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
1880 NULL
, get_id_mmfr1_el1
},
1881 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
1882 NULL
, get_id_mmfr2_el1
},
1883 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
1884 NULL
, get_id_mmfr3_el1
},
1885 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
1886 NULL
, get_id_isar0_el1
},
1887 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
1888 NULL
, get_id_isar1_el1
},
1889 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
1890 NULL
, get_id_isar2_el1
},
1891 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
1892 NULL
, get_id_isar3_el1
},
1893 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
1894 NULL
, get_id_isar4_el1
},
1895 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
1896 NULL
, get_id_isar5_el1
},
1897 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
1898 NULL
, get_clidr_el1
},
1899 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
1900 NULL
, get_aidr_el1
},
1901 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
1902 NULL
, get_ctr_el0
},
1905 static int reg_from_user(u64
*val
, const void __user
*uaddr
, u64 id
)
1907 if (copy_from_user(val
, uaddr
, KVM_REG_SIZE(id
)) != 0)
1912 static int reg_to_user(void __user
*uaddr
, const u64
*val
, u64 id
)
1914 if (copy_to_user(uaddr
, val
, KVM_REG_SIZE(id
)) != 0)
1919 static int get_invariant_sys_reg(u64 id
, void __user
*uaddr
)
1921 struct sys_reg_params params
;
1922 const struct sys_reg_desc
*r
;
1924 if (!index_to_params(id
, ¶ms
))
1927 r
= find_reg(¶ms
, invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
));
1931 return reg_to_user(uaddr
, &r
->val
, id
);
1934 static int set_invariant_sys_reg(u64 id
, void __user
*uaddr
)
1936 struct sys_reg_params params
;
1937 const struct sys_reg_desc
*r
;
1939 u64 val
= 0; /* Make sure high bits are 0 for 32-bit regs */
1941 if (!index_to_params(id
, ¶ms
))
1943 r
= find_reg(¶ms
, invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
));
1947 err
= reg_from_user(&val
, uaddr
, id
);
1951 /* This is what we mean by invariant: you can't change it. */
1958 static bool is_valid_cache(u32 val
)
1962 if (val
>= CSSELR_MAX
)
1965 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
1967 ctype
= (cache_levels
>> (level
* 3)) & 7;
1970 case 0: /* No cache */
1972 case 1: /* Instruction cache only */
1974 case 2: /* Data cache only */
1975 case 4: /* Unified cache */
1977 case 3: /* Separate instruction and data caches */
1979 default: /* Reserved: we can't know instruction or data. */
1984 static int demux_c15_get(u64 id
, void __user
*uaddr
)
1987 u32 __user
*uval
= uaddr
;
1989 /* Fail if we have unknown bits set. */
1990 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
1991 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
1994 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
1995 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
1996 if (KVM_REG_SIZE(id
) != 4)
1998 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
1999 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
2000 if (!is_valid_cache(val
))
2003 return put_user(get_ccsidr(val
), uval
);
2009 static int demux_c15_set(u64 id
, void __user
*uaddr
)
2012 u32 __user
*uval
= uaddr
;
2014 /* Fail if we have unknown bits set. */
2015 if (id
& ~(KVM_REG_ARCH_MASK
|KVM_REG_SIZE_MASK
|KVM_REG_ARM_COPROC_MASK
2016 | ((1 << KVM_REG_ARM_COPROC_SHIFT
)-1)))
2019 switch (id
& KVM_REG_ARM_DEMUX_ID_MASK
) {
2020 case KVM_REG_ARM_DEMUX_ID_CCSIDR
:
2021 if (KVM_REG_SIZE(id
) != 4)
2023 val
= (id
& KVM_REG_ARM_DEMUX_VAL_MASK
)
2024 >> KVM_REG_ARM_DEMUX_VAL_SHIFT
;
2025 if (!is_valid_cache(val
))
2028 if (get_user(newval
, uval
))
2031 /* This is also invariant: you can't change it. */
2032 if (newval
!= get_ccsidr(val
))
2040 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
2042 const struct sys_reg_desc
*r
;
2043 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
2045 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
2046 return demux_c15_get(reg
->id
, uaddr
);
2048 if (KVM_REG_SIZE(reg
->id
) != sizeof(__u64
))
2051 r
= index_to_sys_reg_desc(vcpu
, reg
->id
);
2053 return get_invariant_sys_reg(reg
->id
, uaddr
);
2056 return (r
->get_user
)(vcpu
, r
, reg
, uaddr
);
2058 return reg_to_user(uaddr
, &vcpu_sys_reg(vcpu
, r
->reg
), reg
->id
);
2061 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
2063 const struct sys_reg_desc
*r
;
2064 void __user
*uaddr
= (void __user
*)(unsigned long)reg
->addr
;
2066 if ((reg
->id
& KVM_REG_ARM_COPROC_MASK
) == KVM_REG_ARM_DEMUX
)
2067 return demux_c15_set(reg
->id
, uaddr
);
2069 if (KVM_REG_SIZE(reg
->id
) != sizeof(__u64
))
2072 r
= index_to_sys_reg_desc(vcpu
, reg
->id
);
2074 return set_invariant_sys_reg(reg
->id
, uaddr
);
2077 return (r
->set_user
)(vcpu
, r
, reg
, uaddr
);
2079 return reg_from_user(&vcpu_sys_reg(vcpu
, r
->reg
), uaddr
, reg
->id
);
2082 static unsigned int num_demux_regs(void)
2084 unsigned int i
, count
= 0;
2086 for (i
= 0; i
< CSSELR_MAX
; i
++)
2087 if (is_valid_cache(i
))
2093 static int write_demux_regids(u64 __user
*uindices
)
2095 u64 val
= KVM_REG_ARM64
| KVM_REG_SIZE_U32
| KVM_REG_ARM_DEMUX
;
2098 val
|= KVM_REG_ARM_DEMUX_ID_CCSIDR
;
2099 for (i
= 0; i
< CSSELR_MAX
; i
++) {
2100 if (!is_valid_cache(i
))
2102 if (put_user(val
| i
, uindices
))
2109 static u64
sys_reg_to_index(const struct sys_reg_desc
*reg
)
2111 return (KVM_REG_ARM64
| KVM_REG_SIZE_U64
|
2112 KVM_REG_ARM64_SYSREG
|
2113 (reg
->Op0
<< KVM_REG_ARM64_SYSREG_OP0_SHIFT
) |
2114 (reg
->Op1
<< KVM_REG_ARM64_SYSREG_OP1_SHIFT
) |
2115 (reg
->CRn
<< KVM_REG_ARM64_SYSREG_CRN_SHIFT
) |
2116 (reg
->CRm
<< KVM_REG_ARM64_SYSREG_CRM_SHIFT
) |
2117 (reg
->Op2
<< KVM_REG_ARM64_SYSREG_OP2_SHIFT
));
2120 static bool copy_reg_to_user(const struct sys_reg_desc
*reg
, u64 __user
**uind
)
2125 if (put_user(sys_reg_to_index(reg
), *uind
))
2132 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2133 static int walk_sys_regs(struct kvm_vcpu
*vcpu
, u64 __user
*uind
)
2135 const struct sys_reg_desc
*i1
, *i2
, *end1
, *end2
;
2136 unsigned int total
= 0;
2139 /* We check for duplicates here, to allow arch-specific overrides. */
2140 i1
= get_target_table(vcpu
->arch
.target
, true, &num
);
2143 end2
= sys_reg_descs
+ ARRAY_SIZE(sys_reg_descs
);
2145 BUG_ON(i1
== end1
|| i2
== end2
);
2147 /* Walk carefully, as both tables may refer to the same register. */
2149 int cmp
= cmp_sys_reg(i1
, i2
);
2150 /* target-specific overrides generic entry. */
2152 /* Ignore registers we trap but don't save. */
2154 if (!copy_reg_to_user(i1
, &uind
))
2159 /* Ignore registers we trap but don't save. */
2161 if (!copy_reg_to_user(i2
, &uind
))
2167 if (cmp
<= 0 && ++i1
== end1
)
2169 if (cmp
>= 0 && ++i2
== end2
)
2175 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu
*vcpu
)
2177 return ARRAY_SIZE(invariant_sys_regs
)
2179 + walk_sys_regs(vcpu
, (u64 __user
*)NULL
);
2182 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
2187 /* Then give them all the invariant registers' indices. */
2188 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++) {
2189 if (put_user(sys_reg_to_index(&invariant_sys_regs
[i
]), uindices
))
2194 err
= walk_sys_regs(vcpu
, uindices
);
2199 return write_demux_regids(uindices
);
2202 static int check_sysreg_table(const struct sys_reg_desc
*table
, unsigned int n
)
2206 for (i
= 1; i
< n
; i
++) {
2207 if (cmp_sys_reg(&table
[i
-1], &table
[i
]) >= 0) {
2208 kvm_err("sys_reg table %p out of order (%d)\n", table
, i
- 1);
2216 void kvm_sys_reg_table_init(void)
2219 struct sys_reg_desc clidr
;
2221 /* Make sure tables are unique and in order. */
2222 BUG_ON(check_sysreg_table(sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
)));
2223 BUG_ON(check_sysreg_table(cp14_regs
, ARRAY_SIZE(cp14_regs
)));
2224 BUG_ON(check_sysreg_table(cp14_64_regs
, ARRAY_SIZE(cp14_64_regs
)));
2225 BUG_ON(check_sysreg_table(cp15_regs
, ARRAY_SIZE(cp15_regs
)));
2226 BUG_ON(check_sysreg_table(cp15_64_regs
, ARRAY_SIZE(cp15_64_regs
)));
2227 BUG_ON(check_sysreg_table(invariant_sys_regs
, ARRAY_SIZE(invariant_sys_regs
)));
2229 /* We abuse the reset function to overwrite the table itself. */
2230 for (i
= 0; i
< ARRAY_SIZE(invariant_sys_regs
); i
++)
2231 invariant_sys_regs
[i
].reset(NULL
, &invariant_sys_regs
[i
]);
2234 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
2236 * If software reads the Cache Type fields from Ctype1
2237 * upwards, once it has seen a value of 0b000, no caches
2238 * exist at further-out levels of the hierarchy. So, for
2239 * example, if Ctype3 is the first Cache Type field with a
2240 * value of 0b000, the values of Ctype4 to Ctype7 must be
2243 get_clidr_el1(NULL
, &clidr
); /* Ugly... */
2244 cache_levels
= clidr
.val
;
2245 for (i
= 0; i
< 7; i
++)
2246 if (((cache_levels
>> (i
*3)) & 7) == 0)
2248 /* Clear all higher bits. */
2249 cache_levels
&= (1 << (i
*3))-1;
2253 * kvm_reset_sys_regs - sets system registers to reset value
2254 * @vcpu: The VCPU pointer
2256 * This function finds the right table above and sets the registers on the
2257 * virtual CPU struct to their architecturally defined reset values.
2259 void kvm_reset_sys_regs(struct kvm_vcpu
*vcpu
)
2262 const struct sys_reg_desc
*table
;
2264 /* Catch someone adding a register without putting in reset entry. */
2265 memset(&vcpu
->arch
.ctxt
.sys_regs
, 0x42, sizeof(vcpu
->arch
.ctxt
.sys_regs
));
2267 /* Generic chip reset first (so target could override). */
2268 reset_sys_reg_descs(vcpu
, sys_reg_descs
, ARRAY_SIZE(sys_reg_descs
));
2270 table
= get_target_table(vcpu
->arch
.target
, true, &num
);
2271 reset_sys_reg_descs(vcpu
, table
, num
);
2273 for (num
= 1; num
< NR_SYS_REGS
; num
++)
2274 if (vcpu_sys_reg(vcpu
, num
) == 0x4242424242424242)
2275 panic("Didn't reset vcpu_sys_reg(%zi)", num
);