ARM: KVM: get rid of S2_PGD_SIZE
[deliverable/linux.git] / arch / arm / kvm / interrupts_head.S
CommitLineData
348b2b07
MZ
1#include <linux/irqchip/arm-gic.h>
2
f7ed45be
CD
3#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
4#define VCPU_USR_SP (VCPU_USR_REG(13))
5#define VCPU_USR_LR (VCPU_USR_REG(14))
6#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4))
7
8/*
9 * Many of these macros need to access the VCPU structure, which is always
10 * held in r0. These macros should never clobber r1, as it is used to hold the
11 * exception code on the return path (except of course the macro that switches
12 * all the registers before the final jump to the VM).
13 */
14vcpu .req r0 @ vcpu pointer always in r0
15
16/* Clobbers {r2-r6} */
17.macro store_vfp_state vfp_base
18 @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions
19 VFPFMRX r2, FPEXC
20 @ Make sure VFP is enabled so we can touch the registers.
21 orr r6, r2, #FPEXC_EN
22 VFPFMXR FPEXC, r6
23
24 VFPFMRX r3, FPSCR
25 tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
26 beq 1f
27 @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
28 @ we only need to save them if FPEXC_EX is set.
29 VFPFMRX r4, FPINST
30 tst r2, #FPEXC_FP2V
31 VFPFMRX r5, FPINST2, ne @ vmrsne
32 bic r6, r2, #FPEXC_EX @ FPEXC_EX disable
33 VFPFMXR FPEXC, r6
341:
35 VFPFSTMIA \vfp_base, r6 @ Save VFP registers
36 stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2
37.endm
38
39/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */
40.macro restore_vfp_state vfp_base
41 VFPFLDMIA \vfp_base, r6 @ Load VFP registers
42 ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2
43
44 VFPFMXR FPSCR, r3
45 tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
46 beq 1f
47 VFPFMXR FPINST, r4
48 tst r2, #FPEXC_FP2V
49 VFPFMXR FPINST2, r5, ne
501:
51 VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN)
52.endm
53
54/* These are simply for the macros to work - value don't have meaning */
55.equ usr, 0
56.equ svc, 1
57.equ abt, 2
58.equ und, 3
59.equ irq, 4
60.equ fiq, 5
61
62.macro push_host_regs_mode mode
63 mrs r2, SP_\mode
64 mrs r3, LR_\mode
65 mrs r4, SPSR_\mode
66 push {r2, r3, r4}
67.endm
68
69/*
70 * Store all host persistent registers on the stack.
71 * Clobbers all registers, in all modes, except r0 and r1.
72 */
73.macro save_host_regs
74 /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */
75 mrs r2, ELR_hyp
76 push {r2}
77
78 /* usr regs */
79 push {r4-r12} @ r0-r3 are always clobbered
80 mrs r2, SP_usr
81 mov r3, lr
82 push {r2, r3}
83
84 push_host_regs_mode svc
85 push_host_regs_mode abt
86 push_host_regs_mode und
87 push_host_regs_mode irq
88
89 /* fiq regs */
90 mrs r2, r8_fiq
91 mrs r3, r9_fiq
92 mrs r4, r10_fiq
93 mrs r5, r11_fiq
94 mrs r6, r12_fiq
95 mrs r7, SP_fiq
96 mrs r8, LR_fiq
97 mrs r9, SPSR_fiq
98 push {r2-r9}
99.endm
100
101.macro pop_host_regs_mode mode
102 pop {r2, r3, r4}
103 msr SP_\mode, r2
104 msr LR_\mode, r3
105 msr SPSR_\mode, r4
106.endm
107
108/*
109 * Restore all host registers from the stack.
110 * Clobbers all registers, in all modes, except r0 and r1.
111 */
112.macro restore_host_regs
113 pop {r2-r9}
114 msr r8_fiq, r2
115 msr r9_fiq, r3
116 msr r10_fiq, r4
117 msr r11_fiq, r5
118 msr r12_fiq, r6
119 msr SP_fiq, r7
120 msr LR_fiq, r8
121 msr SPSR_fiq, r9
122
123 pop_host_regs_mode irq
124 pop_host_regs_mode und
125 pop_host_regs_mode abt
126 pop_host_regs_mode svc
127
128 pop {r2, r3}
129 msr SP_usr, r2
130 mov lr, r3
131 pop {r4-r12}
132
133 pop {r2}
134 msr ELR_hyp, r2
135.endm
136
137/*
138 * Restore SP, LR and SPSR for a given mode. offset is the offset of
139 * this mode's registers from the VCPU base.
140 *
141 * Assumes vcpu pointer in vcpu reg
142 *
143 * Clobbers r1, r2, r3, r4.
144 */
145.macro restore_guest_regs_mode mode, offset
146 add r1, vcpu, \offset
147 ldm r1, {r2, r3, r4}
148 msr SP_\mode, r2
149 msr LR_\mode, r3
150 msr SPSR_\mode, r4
151.endm
152
153/*
154 * Restore all guest registers from the vcpu struct.
155 *
156 * Assumes vcpu pointer in vcpu reg
157 *
158 * Clobbers *all* registers.
159 */
160.macro restore_guest_regs
161 restore_guest_regs_mode svc, #VCPU_SVC_REGS
162 restore_guest_regs_mode abt, #VCPU_ABT_REGS
163 restore_guest_regs_mode und, #VCPU_UND_REGS
164 restore_guest_regs_mode irq, #VCPU_IRQ_REGS
165
166 add r1, vcpu, #VCPU_FIQ_REGS
167 ldm r1, {r2-r9}
168 msr r8_fiq, r2
169 msr r9_fiq, r3
170 msr r10_fiq, r4
171 msr r11_fiq, r5
172 msr r12_fiq, r6
173 msr SP_fiq, r7
174 msr LR_fiq, r8
175 msr SPSR_fiq, r9
176
177 @ Load return state
178 ldr r2, [vcpu, #VCPU_PC]
179 ldr r3, [vcpu, #VCPU_CPSR]
180 msr ELR_hyp, r2
181 msr SPSR_cxsf, r3
182
183 @ Load user registers
184 ldr r2, [vcpu, #VCPU_USR_SP]
185 ldr r3, [vcpu, #VCPU_USR_LR]
186 msr SP_usr, r2
187 mov lr, r3
188 add vcpu, vcpu, #(VCPU_USR_REGS)
189 ldm vcpu, {r0-r12}
190.endm
191
192/*
193 * Save SP, LR and SPSR for a given mode. offset is the offset of
194 * this mode's registers from the VCPU base.
195 *
196 * Assumes vcpu pointer in vcpu reg
197 *
198 * Clobbers r2, r3, r4, r5.
199 */
200.macro save_guest_regs_mode mode, offset
201 add r2, vcpu, \offset
202 mrs r3, SP_\mode
203 mrs r4, LR_\mode
204 mrs r5, SPSR_\mode
205 stm r2, {r3, r4, r5}
206.endm
207
208/*
209 * Save all guest registers to the vcpu struct
210 * Expects guest's r0, r1, r2 on the stack.
211 *
212 * Assumes vcpu pointer in vcpu reg
213 *
214 * Clobbers r2, r3, r4, r5.
215 */
216.macro save_guest_regs
217 @ Store usr registers
218 add r2, vcpu, #VCPU_USR_REG(3)
219 stm r2, {r3-r12}
220 add r2, vcpu, #VCPU_USR_REG(0)
221 pop {r3, r4, r5} @ r0, r1, r2
222 stm r2, {r3, r4, r5}
223 mrs r2, SP_usr
224 mov r3, lr
225 str r2, [vcpu, #VCPU_USR_SP]
226 str r3, [vcpu, #VCPU_USR_LR]
227
228 @ Store return state
229 mrs r2, ELR_hyp
230 mrs r3, spsr
231 str r2, [vcpu, #VCPU_PC]
232 str r3, [vcpu, #VCPU_CPSR]
233
234 @ Store other guest registers
235 save_guest_regs_mode svc, #VCPU_SVC_REGS
236 save_guest_regs_mode abt, #VCPU_ABT_REGS
237 save_guest_regs_mode und, #VCPU_UND_REGS
238 save_guest_regs_mode irq, #VCPU_IRQ_REGS
239.endm
240
241/* Reads cp15 registers from hardware and stores them in memory
242 * @store_to_vcpu: If 0, registers are written in-order to the stack,
243 * otherwise to the VCPU struct pointed to by vcpup
244 *
245 * Assumes vcpu pointer in vcpu reg
246 *
247 * Clobbers r2 - r12
248 */
249.macro read_cp15_state store_to_vcpu
250 mrc p15, 0, r2, c1, c0, 0 @ SCTLR
251 mrc p15, 0, r3, c1, c0, 2 @ CPACR
252 mrc p15, 0, r4, c2, c0, 2 @ TTBCR
253 mrc p15, 0, r5, c3, c0, 0 @ DACR
254 mrrc p15, 0, r6, r7, c2 @ TTBR 0
255 mrrc p15, 1, r8, r9, c2 @ TTBR 1
256 mrc p15, 0, r10, c10, c2, 0 @ PRRR
257 mrc p15, 0, r11, c10, c2, 1 @ NMRR
258 mrc p15, 2, r12, c0, c0, 0 @ CSSELR
259
260 .if \store_to_vcpu == 0
261 push {r2-r12} @ Push CP15 registers
262 .else
263 str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
264 str r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
265 str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
266 str r5, [vcpu, #CP15_OFFSET(c3_DACR)]
267 add r2, vcpu, #CP15_OFFSET(c2_TTBR0)
268 strd r6, r7, [r2]
269 add r2, vcpu, #CP15_OFFSET(c2_TTBR1)
270 strd r8, r9, [r2]
271 str r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
272 str r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
273 str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
274 .endif
275
276 mrc p15, 0, r2, c13, c0, 1 @ CID
277 mrc p15, 0, r3, c13, c0, 2 @ TID_URW
278 mrc p15, 0, r4, c13, c0, 3 @ TID_URO
279 mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV
280 mrc p15, 0, r6, c5, c0, 0 @ DFSR
281 mrc p15, 0, r7, c5, c0, 1 @ IFSR
282 mrc p15, 0, r8, c5, c1, 0 @ ADFSR
283 mrc p15, 0, r9, c5, c1, 1 @ AIFSR
284 mrc p15, 0, r10, c6, c0, 0 @ DFAR
285 mrc p15, 0, r11, c6, c0, 2 @ IFAR
286 mrc p15, 0, r12, c12, c0, 0 @ VBAR
287
288 .if \store_to_vcpu == 0
289 push {r2-r12} @ Push CP15 registers
290 .else
291 str r2, [vcpu, #CP15_OFFSET(c13_CID)]
292 str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
293 str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
294 str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
295 str r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
296 str r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
297 str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
298 str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
299 str r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
300 str r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
301 str r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
302 .endif
c7e3ba64
MZ
303
304 mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL
305
306 .if \store_to_vcpu == 0
307 push {r2}
308 .else
309 str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
310 .endif
f7ed45be
CD
311.endm
312
313/*
314 * Reads cp15 registers from memory and writes them to hardware
315 * @read_from_vcpu: If 0, registers are read in-order from the stack,
316 * otherwise from the VCPU struct pointed to by vcpup
317 *
318 * Assumes vcpu pointer in vcpu reg
319 */
320.macro write_cp15_state read_from_vcpu
c7e3ba64
MZ
321 .if \read_from_vcpu == 0
322 pop {r2}
323 .else
324 ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
325 .endif
326
327 mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL
328
f7ed45be
CD
329 .if \read_from_vcpu == 0
330 pop {r2-r12}
331 .else
332 ldr r2, [vcpu, #CP15_OFFSET(c13_CID)]
333 ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
334 ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
335 ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
336 ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
337 ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
338 ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
339 ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
340 ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
341 ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
342 ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
343 .endif
344
345 mcr p15, 0, r2, c13, c0, 1 @ CID
346 mcr p15, 0, r3, c13, c0, 2 @ TID_URW
347 mcr p15, 0, r4, c13, c0, 3 @ TID_URO
348 mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV
349 mcr p15, 0, r6, c5, c0, 0 @ DFSR
350 mcr p15, 0, r7, c5, c0, 1 @ IFSR
351 mcr p15, 0, r8, c5, c1, 0 @ ADFSR
352 mcr p15, 0, r9, c5, c1, 1 @ AIFSR
353 mcr p15, 0, r10, c6, c0, 0 @ DFAR
354 mcr p15, 0, r11, c6, c0, 2 @ IFAR
355 mcr p15, 0, r12, c12, c0, 0 @ VBAR
356
357 .if \read_from_vcpu == 0
358 pop {r2-r12}
359 .else
360 ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
361 ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
362 ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
363 ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)]
364 add r12, vcpu, #CP15_OFFSET(c2_TTBR0)
365 ldrd r6, r7, [r12]
366 add r12, vcpu, #CP15_OFFSET(c2_TTBR1)
367 ldrd r8, r9, [r12]
368 ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
369 ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
370 ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
371 .endif
372
373 mcr p15, 0, r2, c1, c0, 0 @ SCTLR
374 mcr p15, 0, r3, c1, c0, 2 @ CPACR
375 mcr p15, 0, r4, c2, c0, 2 @ TTBCR
376 mcr p15, 0, r5, c3, c0, 0 @ DACR
377 mcrr p15, 0, r6, r7, c2 @ TTBR 0
378 mcrr p15, 1, r8, r9, c2 @ TTBR 1
379 mcr p15, 0, r10, c10, c2, 0 @ PRRR
380 mcr p15, 0, r11, c10, c2, 1 @ NMRR
381 mcr p15, 2, r12, c0, c0, 0 @ CSSELR
382.endm
383
384/*
385 * Save the VGIC CPU state into memory
386 *
387 * Assumes vcpu pointer in vcpu reg
388 */
389.macro save_vgic_state
348b2b07
MZ
390#ifdef CONFIG_KVM_ARM_VGIC
391 /* Get VGIC VCTRL base into r2 */
392 ldr r2, [vcpu, #VCPU_KVM]
393 ldr r2, [r2, #KVM_VGIC_VCTRL]
394 cmp r2, #0
395 beq 2f
396
397 /* Compute the address of struct vgic_cpu */
398 add r11, vcpu, #VCPU_VGIC_CPU
399
400 /* Save all interesting registers */
401 ldr r3, [r2, #GICH_HCR]
402 ldr r4, [r2, #GICH_VMCR]
403 ldr r5, [r2, #GICH_MISR]
404 ldr r6, [r2, #GICH_EISR0]
405 ldr r7, [r2, #GICH_EISR1]
406 ldr r8, [r2, #GICH_ELRSR0]
407 ldr r9, [r2, #GICH_ELRSR1]
408 ldr r10, [r2, #GICH_APR]
409
410 str r3, [r11, #VGIC_CPU_HCR]
411 str r4, [r11, #VGIC_CPU_VMCR]
412 str r5, [r11, #VGIC_CPU_MISR]
413 str r6, [r11, #VGIC_CPU_EISR]
414 str r7, [r11, #(VGIC_CPU_EISR + 4)]
415 str r8, [r11, #VGIC_CPU_ELRSR]
416 str r9, [r11, #(VGIC_CPU_ELRSR + 4)]
417 str r10, [r11, #VGIC_CPU_APR]
418
419 /* Clear GICH_HCR */
420 mov r5, #0
421 str r5, [r2, #GICH_HCR]
422
423 /* Save list registers */
424 add r2, r2, #GICH_LR0
425 add r3, r11, #VGIC_CPU_LR
426 ldr r4, [r11, #VGIC_CPU_NR_LR]
4271: ldr r6, [r2], #4
428 str r6, [r3], #4
429 subs r4, r4, #1
430 bne 1b
4312:
432#endif
f7ed45be
CD
433.endm
434
435/*
436 * Restore the VGIC CPU state from memory
437 *
438 * Assumes vcpu pointer in vcpu reg
439 */
440.macro restore_vgic_state
348b2b07
MZ
441#ifdef CONFIG_KVM_ARM_VGIC
442 /* Get VGIC VCTRL base into r2 */
443 ldr r2, [vcpu, #VCPU_KVM]
444 ldr r2, [r2, #KVM_VGIC_VCTRL]
445 cmp r2, #0
446 beq 2f
447
448 /* Compute the address of struct vgic_cpu */
449 add r11, vcpu, #VCPU_VGIC_CPU
450
451 /* We only restore a minimal set of registers */
452 ldr r3, [r11, #VGIC_CPU_HCR]
453 ldr r4, [r11, #VGIC_CPU_VMCR]
454 ldr r8, [r11, #VGIC_CPU_APR]
455
456 str r3, [r2, #GICH_HCR]
457 str r4, [r2, #GICH_VMCR]
458 str r8, [r2, #GICH_APR]
459
460 /* Restore list registers */
461 add r2, r2, #GICH_LR0
462 add r3, r11, #VGIC_CPU_LR
463 ldr r4, [r11, #VGIC_CPU_NR_LR]
4641: ldr r6, [r3], #4
465 str r6, [r2], #4
466 subs r4, r4, #1
467 bne 1b
4682:
469#endif
f7ed45be
CD
470.endm
471
53e72406
MZ
472#define CNTHCTL_PL1PCTEN (1 << 0)
473#define CNTHCTL_PL1PCEN (1 << 1)
474
475/*
476 * Save the timer state onto the VCPU and allow physical timer/counter access
477 * for the host.
478 *
479 * Assumes vcpu pointer in vcpu reg
c7e3ba64 480 * Clobbers r2-r5
53e72406
MZ
481 */
482.macro save_timer_state
c7e3ba64
MZ
483#ifdef CONFIG_KVM_ARM_TIMER
484 ldr r4, [vcpu, #VCPU_KVM]
485 ldr r2, [r4, #KVM_TIMER_ENABLED]
486 cmp r2, #0
487 beq 1f
488
489 mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
490 str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
491 bic r2, #1 @ Clear ENABLE
492 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
493 isb
494
495 mrrc p15, 3, r2, r3, c14 @ CNTV_CVAL
496 ldr r4, =VCPU_TIMER_CNTV_CVAL
497 add r5, vcpu, r4
498 strd r2, r3, [r5]
499
5001:
501#endif
53e72406
MZ
502 @ Allow physical timer/counter access for the host
503 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
504 orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
505 mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
506.endm
507
508/*
509 * Load the timer state from the VCPU and deny physical timer/counter access
510 * for the host.
511 *
512 * Assumes vcpu pointer in vcpu reg
c7e3ba64 513 * Clobbers r2-r5
53e72406
MZ
514 */
515.macro restore_timer_state
516 @ Disallow physical timer access for the guest
517 @ Physical counter access is allowed
518 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
519 orr r2, r2, #CNTHCTL_PL1PCTEN
520 bic r2, r2, #CNTHCTL_PL1PCEN
521 mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
c7e3ba64
MZ
522
523#ifdef CONFIG_KVM_ARM_TIMER
524 ldr r4, [vcpu, #VCPU_KVM]
525 ldr r2, [r4, #KVM_TIMER_ENABLED]
526 cmp r2, #0
527 beq 1f
528
529 ldr r2, [r4, #KVM_TIMER_CNTVOFF]
530 ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
531 mcrr p15, 4, r2, r3, c14 @ CNTVOFF
532
533 ldr r4, =VCPU_TIMER_CNTV_CVAL
534 add r5, vcpu, r4
535 ldrd r2, r3, [r5]
536 mcrr p15, 3, r2, r3, c14 @ CNTV_CVAL
537 isb
538
539 ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
540 and r2, r2, #3
541 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
5421:
543#endif
53e72406
MZ
544.endm
545
f7ed45be
CD
546.equ vmentry, 0
547.equ vmexit, 1
548
549/* Configures the HSTR (Hyp System Trap Register) on entry/return
550 * (hardware reset value is 0) */
551.macro set_hstr operation
552 mrc p15, 4, r2, c1, c1, 3
553 ldr r3, =HSTR_T(15)
554 .if \operation == vmentry
555 orr r2, r2, r3 @ Trap CR{15}
556 .else
557 bic r2, r2, r3 @ Don't trap any CRx accesses
558 .endif
559 mcr p15, 4, r2, c1, c1, 3
560.endm
561
562/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
563 * (hardware reset value is 0). Keep previous value in r2. */
564.macro set_hcptr operation, mask
565 mrc p15, 4, r2, c1, c1, 2
566 ldr r3, =\mask
567 .if \operation == vmentry
568 orr r3, r2, r3 @ Trap coproc-accesses defined in mask
569 .else
570 bic r3, r2, r3 @ Don't trap defined coproc-accesses
571 .endif
572 mcr p15, 4, r3, c1, c1, 2
573.endm
574
575/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
576 * (hardware reset value is 0) */
577.macro set_hdcr operation
578 mrc p15, 4, r2, c1, c1, 1
579 ldr r3, =(HDCR_TPM|HDCR_TPMCR)
580 .if \operation == vmentry
581 orr r2, r2, r3 @ Trap some perfmon accesses
582 .else
583 bic r2, r2, r3 @ Don't trap any perfmon accesses
584 .endif
585 mcr p15, 4, r2, c1, c1, 1
586.endm
587
588/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
589.macro configure_hyp_role operation
590 mrc p15, 4, r2, c1, c1, 0 @ HCR
591 bic r2, r2, #HCR_VIRT_EXCP_MASK
592 ldr r3, =HCR_GUEST_MASK
593 .if \operation == vmentry
594 orr r2, r2, r3
595 ldr r3, [vcpu, #VCPU_IRQ_LINES]
596 orr r2, r2, r3
597 .else
598 bic r2, r2, r3
599 .endif
600 mcr p15, 4, r2, c1, c1, 0
601.endm
602
603.macro load_vcpu
604 mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR
605.endm
This page took 0.065939 seconds and 5 git commands to generate.