5befd010e23255c151999334daf9336912b4b664
[deliverable/linux.git] / arch / arm64 / kvm / hyp.S
1 /*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/linkage.h>
19
20 #include <asm/asm-offsets.h>
21 #include <asm/assembler.h>
22 #include <asm/debug-monitors.h>
23 #include <asm/esr.h>
24 #include <asm/fpsimdmacros.h>
25 #include <asm/kvm.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_asm.h>
28 #include <asm/kvm_mmu.h>
29 #include <asm/memory.h>
30
31 #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
32 #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
33 #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
34 #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
35
36 .text
37 .pushsection .hyp.text, "ax"
38 .align PAGE_SHIFT
39
40 .macro save_common_regs
41 // x2: base address for cpu context
42 // x3: tmp register
43
44 add x3, x2, #CPU_XREG_OFFSET(19)
45 stp x19, x20, [x3]
46 stp x21, x22, [x3, #16]
47 stp x23, x24, [x3, #32]
48 stp x25, x26, [x3, #48]
49 stp x27, x28, [x3, #64]
50 stp x29, lr, [x3, #80]
51
52 mrs x19, sp_el0
53 mrs x20, elr_el2 // EL1 PC
54 mrs x21, spsr_el2 // EL1 pstate
55
56 stp x19, x20, [x3, #96]
57 str x21, [x3, #112]
58
59 mrs x22, sp_el1
60 mrs x23, elr_el1
61 mrs x24, spsr_el1
62
63 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
64 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
65 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
66 .endm
67
68 .macro restore_common_regs
69 // x2: base address for cpu context
70 // x3: tmp register
71
72 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
73 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
74 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
75
76 msr sp_el1, x22
77 msr elr_el1, x23
78 msr spsr_el1, x24
79
80 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
81 ldp x19, x20, [x3]
82 ldr x21, [x3, #16]
83
84 msr sp_el0, x19
85 msr elr_el2, x20 // EL1 PC
86 msr spsr_el2, x21 // EL1 pstate
87
88 add x3, x2, #CPU_XREG_OFFSET(19)
89 ldp x19, x20, [x3]
90 ldp x21, x22, [x3, #16]
91 ldp x23, x24, [x3, #32]
92 ldp x25, x26, [x3, #48]
93 ldp x27, x28, [x3, #64]
94 ldp x29, lr, [x3, #80]
95 .endm
96
97 .macro save_host_regs
98 save_common_regs
99 .endm
100
101 .macro restore_host_regs
102 restore_common_regs
103 .endm
104
105 .macro save_fpsimd
106 // x2: cpu context address
107 // x3, x4: tmp regs
108 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
109 fpsimd_save x3, 4
110 .endm
111
112 .macro restore_fpsimd
113 // x2: cpu context address
114 // x3, x4: tmp regs
115 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
116 fpsimd_restore x3, 4
117 .endm
118
119 .macro save_guest_regs
120 // x0 is the vcpu address
121 // x1 is the return code, do not corrupt!
122 // x2 is the cpu context
123 // x3 is a tmp register
124 // Guest's x0-x3 are on the stack
125
126 // Compute base to save registers
127 add x3, x2, #CPU_XREG_OFFSET(4)
128 stp x4, x5, [x3]
129 stp x6, x7, [x3, #16]
130 stp x8, x9, [x3, #32]
131 stp x10, x11, [x3, #48]
132 stp x12, x13, [x3, #64]
133 stp x14, x15, [x3, #80]
134 stp x16, x17, [x3, #96]
135 str x18, [x3, #112]
136
137 pop x6, x7 // x2, x3
138 pop x4, x5 // x0, x1
139
140 add x3, x2, #CPU_XREG_OFFSET(0)
141 stp x4, x5, [x3]
142 stp x6, x7, [x3, #16]
143
144 save_common_regs
145 .endm
146
147 .macro restore_guest_regs
148 // x0 is the vcpu address.
149 // x2 is the cpu context
150 // x3 is a tmp register
151
152 // Prepare x0-x3 for later restore
153 add x3, x2, #CPU_XREG_OFFSET(0)
154 ldp x4, x5, [x3]
155 ldp x6, x7, [x3, #16]
156 push x4, x5 // Push x0-x3 on the stack
157 push x6, x7
158
159 // x4-x18
160 ldp x4, x5, [x3, #32]
161 ldp x6, x7, [x3, #48]
162 ldp x8, x9, [x3, #64]
163 ldp x10, x11, [x3, #80]
164 ldp x12, x13, [x3, #96]
165 ldp x14, x15, [x3, #112]
166 ldp x16, x17, [x3, #128]
167 ldr x18, [x3, #144]
168
169 // x19-x29, lr, sp*, elr*, spsr*
170 restore_common_regs
171
172 // Last bits of the 64bit state
173 pop x2, x3
174 pop x0, x1
175
176 // Do not touch any register after this!
177 .endm
178
179 /*
180 * Macros to perform system register save/restore.
181 *
182 * Ordering here is absolutely critical, and must be kept consistent
183 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
184 * and in kvm_asm.h.
185 *
186 * In other words, don't touch any of these unless you know what
187 * you are doing.
188 */
189 .macro save_sysregs
190 // x2: base address for cpu context
191 // x3: tmp register
192
193 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
194
195 mrs x4, vmpidr_el2
196 mrs x5, csselr_el1
197 mrs x6, sctlr_el1
198 mrs x7, actlr_el1
199 mrs x8, cpacr_el1
200 mrs x9, ttbr0_el1
201 mrs x10, ttbr1_el1
202 mrs x11, tcr_el1
203 mrs x12, esr_el1
204 mrs x13, afsr0_el1
205 mrs x14, afsr1_el1
206 mrs x15, far_el1
207 mrs x16, mair_el1
208 mrs x17, vbar_el1
209 mrs x18, contextidr_el1
210 mrs x19, tpidr_el0
211 mrs x20, tpidrro_el0
212 mrs x21, tpidr_el1
213 mrs x22, amair_el1
214 mrs x23, cntkctl_el1
215 mrs x24, par_el1
216 mrs x25, mdscr_el1
217
218 stp x4, x5, [x3]
219 stp x6, x7, [x3, #16]
220 stp x8, x9, [x3, #32]
221 stp x10, x11, [x3, #48]
222 stp x12, x13, [x3, #64]
223 stp x14, x15, [x3, #80]
224 stp x16, x17, [x3, #96]
225 stp x18, x19, [x3, #112]
226 stp x20, x21, [x3, #128]
227 stp x22, x23, [x3, #144]
228 stp x24, x25, [x3, #160]
229 .endm
230
231 .macro save_debug
232 // x2: base address for cpu context
233 // x3: tmp register
234
235 mrs x26, id_aa64dfr0_el1
236 ubfx x24, x26, #12, #4 // Extract BRPs
237 ubfx x25, x26, #20, #4 // Extract WRPs
238 mov w26, #15
239 sub w24, w26, w24 // How many BPs to skip
240 sub w25, w26, w25 // How many WPs to skip
241
242 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
243
244 adr x26, 1f
245 add x26, x26, x24, lsl #2
246 br x26
247 1:
248 mrs x20, dbgbcr15_el1
249 mrs x19, dbgbcr14_el1
250 mrs x18, dbgbcr13_el1
251 mrs x17, dbgbcr12_el1
252 mrs x16, dbgbcr11_el1
253 mrs x15, dbgbcr10_el1
254 mrs x14, dbgbcr9_el1
255 mrs x13, dbgbcr8_el1
256 mrs x12, dbgbcr7_el1
257 mrs x11, dbgbcr6_el1
258 mrs x10, dbgbcr5_el1
259 mrs x9, dbgbcr4_el1
260 mrs x8, dbgbcr3_el1
261 mrs x7, dbgbcr2_el1
262 mrs x6, dbgbcr1_el1
263 mrs x5, dbgbcr0_el1
264
265 adr x26, 1f
266 add x26, x26, x24, lsl #2
267 br x26
268
269 1:
270 str x20, [x3, #(15 * 8)]
271 str x19, [x3, #(14 * 8)]
272 str x18, [x3, #(13 * 8)]
273 str x17, [x3, #(12 * 8)]
274 str x16, [x3, #(11 * 8)]
275 str x15, [x3, #(10 * 8)]
276 str x14, [x3, #(9 * 8)]
277 str x13, [x3, #(8 * 8)]
278 str x12, [x3, #(7 * 8)]
279 str x11, [x3, #(6 * 8)]
280 str x10, [x3, #(5 * 8)]
281 str x9, [x3, #(4 * 8)]
282 str x8, [x3, #(3 * 8)]
283 str x7, [x3, #(2 * 8)]
284 str x6, [x3, #(1 * 8)]
285 str x5, [x3, #(0 * 8)]
286
287 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
288
289 adr x26, 1f
290 add x26, x26, x24, lsl #2
291 br x26
292 1:
293 mrs x20, dbgbvr15_el1
294 mrs x19, dbgbvr14_el1
295 mrs x18, dbgbvr13_el1
296 mrs x17, dbgbvr12_el1
297 mrs x16, dbgbvr11_el1
298 mrs x15, dbgbvr10_el1
299 mrs x14, dbgbvr9_el1
300 mrs x13, dbgbvr8_el1
301 mrs x12, dbgbvr7_el1
302 mrs x11, dbgbvr6_el1
303 mrs x10, dbgbvr5_el1
304 mrs x9, dbgbvr4_el1
305 mrs x8, dbgbvr3_el1
306 mrs x7, dbgbvr2_el1
307 mrs x6, dbgbvr1_el1
308 mrs x5, dbgbvr0_el1
309
310 adr x26, 1f
311 add x26, x26, x24, lsl #2
312 br x26
313
314 1:
315 str x20, [x3, #(15 * 8)]
316 str x19, [x3, #(14 * 8)]
317 str x18, [x3, #(13 * 8)]
318 str x17, [x3, #(12 * 8)]
319 str x16, [x3, #(11 * 8)]
320 str x15, [x3, #(10 * 8)]
321 str x14, [x3, #(9 * 8)]
322 str x13, [x3, #(8 * 8)]
323 str x12, [x3, #(7 * 8)]
324 str x11, [x3, #(6 * 8)]
325 str x10, [x3, #(5 * 8)]
326 str x9, [x3, #(4 * 8)]
327 str x8, [x3, #(3 * 8)]
328 str x7, [x3, #(2 * 8)]
329 str x6, [x3, #(1 * 8)]
330 str x5, [x3, #(0 * 8)]
331
332 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
333
334 adr x26, 1f
335 add x26, x26, x25, lsl #2
336 br x26
337 1:
338 mrs x20, dbgwcr15_el1
339 mrs x19, dbgwcr14_el1
340 mrs x18, dbgwcr13_el1
341 mrs x17, dbgwcr12_el1
342 mrs x16, dbgwcr11_el1
343 mrs x15, dbgwcr10_el1
344 mrs x14, dbgwcr9_el1
345 mrs x13, dbgwcr8_el1
346 mrs x12, dbgwcr7_el1
347 mrs x11, dbgwcr6_el1
348 mrs x10, dbgwcr5_el1
349 mrs x9, dbgwcr4_el1
350 mrs x8, dbgwcr3_el1
351 mrs x7, dbgwcr2_el1
352 mrs x6, dbgwcr1_el1
353 mrs x5, dbgwcr0_el1
354
355 adr x26, 1f
356 add x26, x26, x25, lsl #2
357 br x26
358
359 1:
360 str x20, [x3, #(15 * 8)]
361 str x19, [x3, #(14 * 8)]
362 str x18, [x3, #(13 * 8)]
363 str x17, [x3, #(12 * 8)]
364 str x16, [x3, #(11 * 8)]
365 str x15, [x3, #(10 * 8)]
366 str x14, [x3, #(9 * 8)]
367 str x13, [x3, #(8 * 8)]
368 str x12, [x3, #(7 * 8)]
369 str x11, [x3, #(6 * 8)]
370 str x10, [x3, #(5 * 8)]
371 str x9, [x3, #(4 * 8)]
372 str x8, [x3, #(3 * 8)]
373 str x7, [x3, #(2 * 8)]
374 str x6, [x3, #(1 * 8)]
375 str x5, [x3, #(0 * 8)]
376
377 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
378
379 adr x26, 1f
380 add x26, x26, x25, lsl #2
381 br x26
382 1:
383 mrs x20, dbgwvr15_el1
384 mrs x19, dbgwvr14_el1
385 mrs x18, dbgwvr13_el1
386 mrs x17, dbgwvr12_el1
387 mrs x16, dbgwvr11_el1
388 mrs x15, dbgwvr10_el1
389 mrs x14, dbgwvr9_el1
390 mrs x13, dbgwvr8_el1
391 mrs x12, dbgwvr7_el1
392 mrs x11, dbgwvr6_el1
393 mrs x10, dbgwvr5_el1
394 mrs x9, dbgwvr4_el1
395 mrs x8, dbgwvr3_el1
396 mrs x7, dbgwvr2_el1
397 mrs x6, dbgwvr1_el1
398 mrs x5, dbgwvr0_el1
399
400 adr x26, 1f
401 add x26, x26, x25, lsl #2
402 br x26
403
404 1:
405 str x20, [x3, #(15 * 8)]
406 str x19, [x3, #(14 * 8)]
407 str x18, [x3, #(13 * 8)]
408 str x17, [x3, #(12 * 8)]
409 str x16, [x3, #(11 * 8)]
410 str x15, [x3, #(10 * 8)]
411 str x14, [x3, #(9 * 8)]
412 str x13, [x3, #(8 * 8)]
413 str x12, [x3, #(7 * 8)]
414 str x11, [x3, #(6 * 8)]
415 str x10, [x3, #(5 * 8)]
416 str x9, [x3, #(4 * 8)]
417 str x8, [x3, #(3 * 8)]
418 str x7, [x3, #(2 * 8)]
419 str x6, [x3, #(1 * 8)]
420 str x5, [x3, #(0 * 8)]
421
422 mrs x21, mdccint_el1
423 str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
424 .endm
425
426 .macro restore_sysregs
427 // x2: base address for cpu context
428 // x3: tmp register
429
430 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
431
432 ldp x4, x5, [x3]
433 ldp x6, x7, [x3, #16]
434 ldp x8, x9, [x3, #32]
435 ldp x10, x11, [x3, #48]
436 ldp x12, x13, [x3, #64]
437 ldp x14, x15, [x3, #80]
438 ldp x16, x17, [x3, #96]
439 ldp x18, x19, [x3, #112]
440 ldp x20, x21, [x3, #128]
441 ldp x22, x23, [x3, #144]
442 ldp x24, x25, [x3, #160]
443
444 msr vmpidr_el2, x4
445 msr csselr_el1, x5
446 msr sctlr_el1, x6
447 msr actlr_el1, x7
448 msr cpacr_el1, x8
449 msr ttbr0_el1, x9
450 msr ttbr1_el1, x10
451 msr tcr_el1, x11
452 msr esr_el1, x12
453 msr afsr0_el1, x13
454 msr afsr1_el1, x14
455 msr far_el1, x15
456 msr mair_el1, x16
457 msr vbar_el1, x17
458 msr contextidr_el1, x18
459 msr tpidr_el0, x19
460 msr tpidrro_el0, x20
461 msr tpidr_el1, x21
462 msr amair_el1, x22
463 msr cntkctl_el1, x23
464 msr par_el1, x24
465 msr mdscr_el1, x25
466 .endm
467
468 .macro restore_debug
469 // x2: base address for cpu context
470 // x3: tmp register
471
472 mrs x26, id_aa64dfr0_el1
473 ubfx x24, x26, #12, #4 // Extract BRPs
474 ubfx x25, x26, #20, #4 // Extract WRPs
475 mov w26, #15
476 sub w24, w26, w24 // How many BPs to skip
477 sub w25, w26, w25 // How many WPs to skip
478
479 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
480
481 adr x26, 1f
482 add x26, x26, x24, lsl #2
483 br x26
484 1:
485 ldr x20, [x3, #(15 * 8)]
486 ldr x19, [x3, #(14 * 8)]
487 ldr x18, [x3, #(13 * 8)]
488 ldr x17, [x3, #(12 * 8)]
489 ldr x16, [x3, #(11 * 8)]
490 ldr x15, [x3, #(10 * 8)]
491 ldr x14, [x3, #(9 * 8)]
492 ldr x13, [x3, #(8 * 8)]
493 ldr x12, [x3, #(7 * 8)]
494 ldr x11, [x3, #(6 * 8)]
495 ldr x10, [x3, #(5 * 8)]
496 ldr x9, [x3, #(4 * 8)]
497 ldr x8, [x3, #(3 * 8)]
498 ldr x7, [x3, #(2 * 8)]
499 ldr x6, [x3, #(1 * 8)]
500 ldr x5, [x3, #(0 * 8)]
501
502 adr x26, 1f
503 add x26, x26, x24, lsl #2
504 br x26
505 1:
506 msr dbgbcr15_el1, x20
507 msr dbgbcr14_el1, x19
508 msr dbgbcr13_el1, x18
509 msr dbgbcr12_el1, x17
510 msr dbgbcr11_el1, x16
511 msr dbgbcr10_el1, x15
512 msr dbgbcr9_el1, x14
513 msr dbgbcr8_el1, x13
514 msr dbgbcr7_el1, x12
515 msr dbgbcr6_el1, x11
516 msr dbgbcr5_el1, x10
517 msr dbgbcr4_el1, x9
518 msr dbgbcr3_el1, x8
519 msr dbgbcr2_el1, x7
520 msr dbgbcr1_el1, x6
521 msr dbgbcr0_el1, x5
522
523 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
524
525 adr x26, 1f
526 add x26, x26, x24, lsl #2
527 br x26
528 1:
529 ldr x20, [x3, #(15 * 8)]
530 ldr x19, [x3, #(14 * 8)]
531 ldr x18, [x3, #(13 * 8)]
532 ldr x17, [x3, #(12 * 8)]
533 ldr x16, [x3, #(11 * 8)]
534 ldr x15, [x3, #(10 * 8)]
535 ldr x14, [x3, #(9 * 8)]
536 ldr x13, [x3, #(8 * 8)]
537 ldr x12, [x3, #(7 * 8)]
538 ldr x11, [x3, #(6 * 8)]
539 ldr x10, [x3, #(5 * 8)]
540 ldr x9, [x3, #(4 * 8)]
541 ldr x8, [x3, #(3 * 8)]
542 ldr x7, [x3, #(2 * 8)]
543 ldr x6, [x3, #(1 * 8)]
544 ldr x5, [x3, #(0 * 8)]
545
546 adr x26, 1f
547 add x26, x26, x24, lsl #2
548 br x26
549 1:
550 msr dbgbvr15_el1, x20
551 msr dbgbvr14_el1, x19
552 msr dbgbvr13_el1, x18
553 msr dbgbvr12_el1, x17
554 msr dbgbvr11_el1, x16
555 msr dbgbvr10_el1, x15
556 msr dbgbvr9_el1, x14
557 msr dbgbvr8_el1, x13
558 msr dbgbvr7_el1, x12
559 msr dbgbvr6_el1, x11
560 msr dbgbvr5_el1, x10
561 msr dbgbvr4_el1, x9
562 msr dbgbvr3_el1, x8
563 msr dbgbvr2_el1, x7
564 msr dbgbvr1_el1, x6
565 msr dbgbvr0_el1, x5
566
567 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
568
569 adr x26, 1f
570 add x26, x26, x25, lsl #2
571 br x26
572 1:
573 ldr x20, [x3, #(15 * 8)]
574 ldr x19, [x3, #(14 * 8)]
575 ldr x18, [x3, #(13 * 8)]
576 ldr x17, [x3, #(12 * 8)]
577 ldr x16, [x3, #(11 * 8)]
578 ldr x15, [x3, #(10 * 8)]
579 ldr x14, [x3, #(9 * 8)]
580 ldr x13, [x3, #(8 * 8)]
581 ldr x12, [x3, #(7 * 8)]
582 ldr x11, [x3, #(6 * 8)]
583 ldr x10, [x3, #(5 * 8)]
584 ldr x9, [x3, #(4 * 8)]
585 ldr x8, [x3, #(3 * 8)]
586 ldr x7, [x3, #(2 * 8)]
587 ldr x6, [x3, #(1 * 8)]
588 ldr x5, [x3, #(0 * 8)]
589
590 adr x26, 1f
591 add x26, x26, x25, lsl #2
592 br x26
593 1:
594 msr dbgwcr15_el1, x20
595 msr dbgwcr14_el1, x19
596 msr dbgwcr13_el1, x18
597 msr dbgwcr12_el1, x17
598 msr dbgwcr11_el1, x16
599 msr dbgwcr10_el1, x15
600 msr dbgwcr9_el1, x14
601 msr dbgwcr8_el1, x13
602 msr dbgwcr7_el1, x12
603 msr dbgwcr6_el1, x11
604 msr dbgwcr5_el1, x10
605 msr dbgwcr4_el1, x9
606 msr dbgwcr3_el1, x8
607 msr dbgwcr2_el1, x7
608 msr dbgwcr1_el1, x6
609 msr dbgwcr0_el1, x5
610
611 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
612
613 adr x26, 1f
614 add x26, x26, x25, lsl #2
615 br x26
616 1:
617 ldr x20, [x3, #(15 * 8)]
618 ldr x19, [x3, #(14 * 8)]
619 ldr x18, [x3, #(13 * 8)]
620 ldr x17, [x3, #(12 * 8)]
621 ldr x16, [x3, #(11 * 8)]
622 ldr x15, [x3, #(10 * 8)]
623 ldr x14, [x3, #(9 * 8)]
624 ldr x13, [x3, #(8 * 8)]
625 ldr x12, [x3, #(7 * 8)]
626 ldr x11, [x3, #(6 * 8)]
627 ldr x10, [x3, #(5 * 8)]
628 ldr x9, [x3, #(4 * 8)]
629 ldr x8, [x3, #(3 * 8)]
630 ldr x7, [x3, #(2 * 8)]
631 ldr x6, [x3, #(1 * 8)]
632 ldr x5, [x3, #(0 * 8)]
633
634 adr x26, 1f
635 add x26, x26, x25, lsl #2
636 br x26
637 1:
638 msr dbgwvr15_el1, x20
639 msr dbgwvr14_el1, x19
640 msr dbgwvr13_el1, x18
641 msr dbgwvr12_el1, x17
642 msr dbgwvr11_el1, x16
643 msr dbgwvr10_el1, x15
644 msr dbgwvr9_el1, x14
645 msr dbgwvr8_el1, x13
646 msr dbgwvr7_el1, x12
647 msr dbgwvr6_el1, x11
648 msr dbgwvr5_el1, x10
649 msr dbgwvr4_el1, x9
650 msr dbgwvr3_el1, x8
651 msr dbgwvr2_el1, x7
652 msr dbgwvr1_el1, x6
653 msr dbgwvr0_el1, x5
654
655 ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
656 msr mdccint_el1, x21
657 .endm
658
659 .macro skip_32bit_state tmp, target
660 // Skip 32bit state if not needed
661 mrs \tmp, hcr_el2
662 tbnz \tmp, #HCR_RW_SHIFT, \target
663 .endm
664
665 .macro skip_tee_state tmp, target
666 // Skip ThumbEE state if not needed
667 mrs \tmp, id_pfr0_el1
668 tbz \tmp, #12, \target
669 .endm
670
671 .macro skip_debug_state tmp, target
672 ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
673 tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
674 .endm
675
676 .macro compute_debug_state target
677 // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
678 // is set, we do a full save/restore cycle and disable trapping.
679 add x25, x0, #VCPU_CONTEXT
680
681 // Check the state of MDSCR_EL1
682 ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
683 and x26, x25, #DBG_MDSCR_KDE
684 and x25, x25, #DBG_MDSCR_MDE
685 adds xzr, x25, x26
686 b.eq 9998f // Nothing to see there
687
688 // If any interesting bits was set, we must set the flag
689 mov x26, #KVM_ARM64_DEBUG_DIRTY
690 str x26, [x0, #VCPU_DEBUG_FLAGS]
691 b 9999f // Don't skip restore
692
693 9998:
694 // Otherwise load the flags from memory in case we recently
695 // trapped
696 skip_debug_state x25, \target
697 9999:
698 .endm
699
700 .macro save_guest_32bit_state
701 skip_32bit_state x3, 1f
702
703 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
704 mrs x4, spsr_abt
705 mrs x5, spsr_und
706 mrs x6, spsr_irq
707 mrs x7, spsr_fiq
708 stp x4, x5, [x3]
709 stp x6, x7, [x3, #16]
710
711 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
712 mrs x4, dacr32_el2
713 mrs x5, ifsr32_el2
714 mrs x6, fpexc32_el2
715 stp x4, x5, [x3]
716 str x6, [x3, #16]
717
718 skip_debug_state x8, 2f
719 mrs x7, dbgvcr32_el2
720 str x7, [x3, #24]
721 2:
722 skip_tee_state x8, 1f
723
724 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
725 mrs x4, teecr32_el1
726 mrs x5, teehbr32_el1
727 stp x4, x5, [x3]
728 1:
729 .endm
730
731 .macro restore_guest_32bit_state
732 skip_32bit_state x3, 1f
733
734 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
735 ldp x4, x5, [x3]
736 ldp x6, x7, [x3, #16]
737 msr spsr_abt, x4
738 msr spsr_und, x5
739 msr spsr_irq, x6
740 msr spsr_fiq, x7
741
742 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
743 ldp x4, x5, [x3]
744 ldr x6, [x3, #16]
745 msr dacr32_el2, x4
746 msr ifsr32_el2, x5
747 msr fpexc32_el2, x6
748
749 skip_debug_state x8, 2f
750 ldr x7, [x3, #24]
751 msr dbgvcr32_el2, x7
752 2:
753 skip_tee_state x8, 1f
754
755 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
756 ldp x4, x5, [x3]
757 msr teecr32_el1, x4
758 msr teehbr32_el1, x5
759 1:
760 .endm
761
762 .macro activate_traps
763 ldr x2, [x0, #VCPU_HCR_EL2]
764 msr hcr_el2, x2
765 mov x2, #CPTR_EL2_TTA
766 msr cptr_el2, x2
767
768 mov x2, #(1 << 15) // Trap CP15 Cr=15
769 msr hstr_el2, x2
770
771 mrs x2, mdcr_el2
772 and x2, x2, #MDCR_EL2_HPMN_MASK
773 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
774 orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
775
776 // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
777 // if not dirty.
778 ldr x3, [x0, #VCPU_DEBUG_FLAGS]
779 tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
780 orr x2, x2, #MDCR_EL2_TDA
781 1:
782 msr mdcr_el2, x2
783 .endm
784
785 .macro deactivate_traps
786 mov x2, #HCR_RW
787 msr hcr_el2, x2
788 msr cptr_el2, xzr
789 msr hstr_el2, xzr
790
791 mrs x2, mdcr_el2
792 and x2, x2, #MDCR_EL2_HPMN_MASK
793 msr mdcr_el2, x2
794 .endm
795
796 .macro activate_vm
797 ldr x1, [x0, #VCPU_KVM]
798 kern_hyp_va x1
799 ldr x2, [x1, #KVM_VTTBR]
800 msr vttbr_el2, x2
801 .endm
802
803 .macro deactivate_vm
804 msr vttbr_el2, xzr
805 .endm
806
807 /*
808 * Call into the vgic backend for state saving
809 */
810 .macro save_vgic_state
811 adr x24, __vgic_sr_vectors
812 ldr x24, [x24, VGIC_SAVE_FN]
813 kern_hyp_va x24
814 blr x24
815 mrs x24, hcr_el2
816 mov x25, #HCR_INT_OVERRIDE
817 neg x25, x25
818 and x24, x24, x25
819 msr hcr_el2, x24
820 .endm
821
822 /*
823 * Call into the vgic backend for state restoring
824 */
825 .macro restore_vgic_state
826 mrs x24, hcr_el2
827 ldr x25, [x0, #VCPU_IRQ_LINES]
828 orr x24, x24, #HCR_INT_OVERRIDE
829 orr x24, x24, x25
830 msr hcr_el2, x24
831 adr x24, __vgic_sr_vectors
832 ldr x24, [x24, #VGIC_RESTORE_FN]
833 kern_hyp_va x24
834 blr x24
835 .endm
836
837 .macro save_timer_state
838 // x0: vcpu pointer
839 ldr x2, [x0, #VCPU_KVM]
840 kern_hyp_va x2
841 ldr w3, [x2, #KVM_TIMER_ENABLED]
842 cbz w3, 1f
843
844 mrs x3, cntv_ctl_el0
845 and x3, x3, #3
846 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
847 bic x3, x3, #1 // Clear Enable
848 msr cntv_ctl_el0, x3
849
850 isb
851
852 mrs x3, cntv_cval_el0
853 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
854
855 1:
856 // Allow physical timer/counter access for the host
857 mrs x2, cnthctl_el2
858 orr x2, x2, #3
859 msr cnthctl_el2, x2
860
861 // Clear cntvoff for the host
862 msr cntvoff_el2, xzr
863 .endm
864
865 .macro restore_timer_state
866 // x0: vcpu pointer
867 // Disallow physical timer access for the guest
868 // Physical counter access is allowed
869 mrs x2, cnthctl_el2
870 orr x2, x2, #1
871 bic x2, x2, #2
872 msr cnthctl_el2, x2
873
874 ldr x2, [x0, #VCPU_KVM]
875 kern_hyp_va x2
876 ldr w3, [x2, #KVM_TIMER_ENABLED]
877 cbz w3, 1f
878
879 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
880 msr cntvoff_el2, x3
881 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
882 msr cntv_cval_el0, x2
883 isb
884
885 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
886 and x2, x2, #3
887 msr cntv_ctl_el0, x2
888 1:
889 .endm
890
891 __save_sysregs:
892 save_sysregs
893 ret
894
895 __restore_sysregs:
896 restore_sysregs
897 ret
898
899 __save_debug:
900 save_debug
901 ret
902
903 __restore_debug:
904 restore_debug
905 ret
906
907 __save_fpsimd:
908 save_fpsimd
909 ret
910
911 __restore_fpsimd:
912 restore_fpsimd
913 ret
914
915 /*
916 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
917 *
918 * This is the world switch. The first half of the function
919 * deals with entering the guest, and anything from __kvm_vcpu_return
920 * to the end of the function deals with reentering the host.
921 * On the enter path, only x0 (vcpu pointer) must be preserved until
922 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
923 * code) must both be preserved until the epilogue.
924 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
925 */
926 ENTRY(__kvm_vcpu_run)
927 kern_hyp_va x0
928 msr tpidr_el2, x0 // Save the vcpu register
929
930 // Host context
931 ldr x2, [x0, #VCPU_HOST_CONTEXT]
932 kern_hyp_va x2
933
934 save_host_regs
935 bl __save_fpsimd
936 bl __save_sysregs
937
938 compute_debug_state 1f
939 bl __save_debug
940 1:
941 activate_traps
942 activate_vm
943
944 restore_vgic_state
945 restore_timer_state
946
947 // Guest context
948 add x2, x0, #VCPU_CONTEXT
949
950 bl __restore_sysregs
951 bl __restore_fpsimd
952
953 skip_debug_state x3, 1f
954 bl __restore_debug
955 1:
956 restore_guest_32bit_state
957 restore_guest_regs
958
959 // That's it, no more messing around.
960 eret
961
962 __kvm_vcpu_return:
963 // Assume x0 is the vcpu pointer, x1 the return code
964 // Guest's x0-x3 are on the stack
965
966 // Guest context
967 add x2, x0, #VCPU_CONTEXT
968
969 save_guest_regs
970 bl __save_fpsimd
971 bl __save_sysregs
972
973 skip_debug_state x3, 1f
974 bl __save_debug
975 1:
976 save_guest_32bit_state
977
978 save_timer_state
979 save_vgic_state
980
981 deactivate_traps
982 deactivate_vm
983
984 // Host context
985 ldr x2, [x0, #VCPU_HOST_CONTEXT]
986 kern_hyp_va x2
987
988 bl __restore_sysregs
989 bl __restore_fpsimd
990
991 skip_debug_state x3, 1f
992 // Clear the dirty flag for the next run, as all the state has
993 // already been saved. Note that we nuke the whole 64bit word.
994 // If we ever add more flags, we'll have to be more careful...
995 str xzr, [x0, #VCPU_DEBUG_FLAGS]
996 bl __restore_debug
997 1:
998 restore_host_regs
999
1000 mov x0, x1
1001 ret
1002 END(__kvm_vcpu_run)
1003
1004 // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
1005 ENTRY(__kvm_tlb_flush_vmid_ipa)
1006 dsb ishst
1007
1008 kern_hyp_va x0
1009 ldr x2, [x0, #KVM_VTTBR]
1010 msr vttbr_el2, x2
1011 isb
1012
1013 /*
1014 * We could do so much better if we had the VA as well.
1015 * Instead, we invalidate Stage-2 for this IPA, and the
1016 * whole of Stage-1. Weep...
1017 */
1018 lsr x1, x1, #12
1019 tlbi ipas2e1is, x1
1020 /*
1021 * We have to ensure completion of the invalidation at Stage-2,
1022 * since a table walk on another CPU could refill a TLB with a
1023 * complete (S1 + S2) walk based on the old Stage-2 mapping if
1024 * the Stage-1 invalidation happened first.
1025 */
1026 dsb ish
1027 tlbi vmalle1is
1028 dsb ish
1029 isb
1030
1031 msr vttbr_el2, xzr
1032 ret
1033 ENDPROC(__kvm_tlb_flush_vmid_ipa)
1034
1035 /**
1036 * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
1037 * @struct kvm *kvm - pointer to kvm structure
1038 *
1039 * Invalidates all Stage 1 and 2 TLB entries for current VMID.
1040 */
1041 ENTRY(__kvm_tlb_flush_vmid)
1042 dsb ishst
1043
1044 kern_hyp_va x0
1045 ldr x2, [x0, #KVM_VTTBR]
1046 msr vttbr_el2, x2
1047 isb
1048
1049 tlbi vmalls12e1is
1050 dsb ish
1051 isb
1052
1053 msr vttbr_el2, xzr
1054 ret
1055 ENDPROC(__kvm_tlb_flush_vmid)
1056
1057 ENTRY(__kvm_flush_vm_context)
1058 dsb ishst
1059 tlbi alle1is
1060 ic ialluis
1061 dsb ish
1062 ret
1063 ENDPROC(__kvm_flush_vm_context)
1064
1065 // struct vgic_sr_vectors __vgi_sr_vectors;
1066 .align 3
1067 ENTRY(__vgic_sr_vectors)
1068 .skip VGIC_SR_VECTOR_SZ
1069 ENDPROC(__vgic_sr_vectors)
1070
1071 __kvm_hyp_panic:
1072 // Guess the context by looking at VTTBR:
1073 // If zero, then we're already a host.
1074 // Otherwise restore a minimal host context before panicing.
1075 mrs x0, vttbr_el2
1076 cbz x0, 1f
1077
1078 mrs x0, tpidr_el2
1079
1080 deactivate_traps
1081 deactivate_vm
1082
1083 ldr x2, [x0, #VCPU_HOST_CONTEXT]
1084 kern_hyp_va x2
1085
1086 bl __restore_sysregs
1087
1088 1: adr x0, __hyp_panic_str
1089 adr x1, 2f
1090 ldp x2, x3, [x1]
1091 sub x0, x0, x2
1092 add x0, x0, x3
1093 mrs x1, spsr_el2
1094 mrs x2, elr_el2
1095 mrs x3, esr_el2
1096 mrs x4, far_el2
1097 mrs x5, hpfar_el2
1098 mrs x6, par_el1
1099 mrs x7, tpidr_el2
1100
1101 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
1102 PSR_MODE_EL1h)
1103 msr spsr_el2, lr
1104 ldr lr, =panic
1105 msr elr_el2, lr
1106 eret
1107
1108 .align 3
1109 2: .quad HYP_PAGE_OFFSET
1110 .quad PAGE_OFFSET
1111 ENDPROC(__kvm_hyp_panic)
1112
1113 __hyp_panic_str:
1114 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
1115
1116 .align 2
1117
1118 /*
1119 * u64 kvm_call_hyp(void *hypfn, ...);
1120 *
1121 * This is not really a variadic function in the classic C-way and care must
1122 * be taken when calling this to ensure parameters are passed in registers
1123 * only, since the stack will change between the caller and the callee.
1124 *
1125 * Call the function with the first argument containing a pointer to the
1126 * function you wish to call in Hyp mode, and subsequent arguments will be
1127 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
1128 * function pointer can be passed). The function being called must be mapped
1129 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
1130 * passed in r0 and r1.
1131 *
1132 * A function pointer with a value of 0 has a special meaning, and is
1133 * used to implement __hyp_get_vectors in the same way as in
1134 * arch/arm64/kernel/hyp_stub.S.
1135 */
1136 ENTRY(kvm_call_hyp)
1137 hvc #0
1138 ret
1139 ENDPROC(kvm_call_hyp)
1140
1141 .macro invalid_vector label, target
1142 .align 2
1143 \label:
1144 b \target
1145 ENDPROC(\label)
1146 .endm
1147
1148 /* None of these should ever happen */
1149 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
1150 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
1151 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
1152 invalid_vector el2t_error_invalid, __kvm_hyp_panic
1153 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
1154 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
1155 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
1156 invalid_vector el2h_error_invalid, __kvm_hyp_panic
1157 invalid_vector el1_sync_invalid, __kvm_hyp_panic
1158 invalid_vector el1_irq_invalid, __kvm_hyp_panic
1159 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
1160 invalid_vector el1_error_invalid, __kvm_hyp_panic
1161
1162 el1_sync: // Guest trapped into EL2
1163 push x0, x1
1164 push x2, x3
1165
1166 mrs x1, esr_el2
1167 lsr x2, x1, #ESR_ELx_EC_SHIFT
1168
1169 cmp x2, #ESR_ELx_EC_HVC64
1170 b.ne el1_trap
1171
1172 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
1173 cbnz x3, el1_trap // called HVC
1174
1175 /* Here, we're pretty sure the host called HVC. */
1176 pop x2, x3
1177 pop x0, x1
1178
1179 /* Check for __hyp_get_vectors */
1180 cbnz x0, 1f
1181 mrs x0, vbar_el2
1182 b 2f
1183
1184 1: push lr, xzr
1185
1186 /*
1187 * Compute the function address in EL2, and shuffle the parameters.
1188 */
1189 kern_hyp_va x0
1190 mov lr, x0
1191 mov x0, x1
1192 mov x1, x2
1193 mov x2, x3
1194 blr lr
1195
1196 pop lr, xzr
1197 2: eret
1198
1199 el1_trap:
1200 /*
1201 * x1: ESR
1202 * x2: ESR_EC
1203 */
1204 cmp x2, #ESR_ELx_EC_DABT_LOW
1205 mov x0, #ESR_ELx_EC_IABT_LOW
1206 ccmp x2, x0, #4, ne
1207 b.ne 1f // Not an abort we care about
1208
1209 /* This is an abort. Check for permission fault */
1210 and x2, x1, #ESR_ELx_FSC_TYPE
1211 cmp x2, #FSC_PERM
1212 b.ne 1f // Not a permission fault
1213
1214 /*
1215 * Check for Stage-1 page table walk, which is guaranteed
1216 * to give a valid HPFAR_EL2.
1217 */
1218 tbnz x1, #7, 1f // S1PTW is set
1219
1220 /* Preserve PAR_EL1 */
1221 mrs x3, par_el1
1222 push x3, xzr
1223
1224 /*
1225 * Permission fault, HPFAR_EL2 is invalid.
1226 * Resolve the IPA the hard way using the guest VA.
1227 * Stage-1 translation already validated the memory access rights.
1228 * As such, we can use the EL1 translation regime, and don't have
1229 * to distinguish between EL0 and EL1 access.
1230 */
1231 mrs x2, far_el2
1232 at s1e1r, x2
1233 isb
1234
1235 /* Read result */
1236 mrs x3, par_el1
1237 pop x0, xzr // Restore PAR_EL1 from the stack
1238 msr par_el1, x0
1239 tbnz x3, #0, 3f // Bail out if we failed the translation
1240 ubfx x3, x3, #12, #36 // Extract IPA
1241 lsl x3, x3, #4 // and present it like HPFAR
1242 b 2f
1243
1244 1: mrs x3, hpfar_el2
1245 mrs x2, far_el2
1246
1247 2: mrs x0, tpidr_el2
1248 str w1, [x0, #VCPU_ESR_EL2]
1249 str x2, [x0, #VCPU_FAR_EL2]
1250 str x3, [x0, #VCPU_HPFAR_EL2]
1251
1252 mov x1, #ARM_EXCEPTION_TRAP
1253 b __kvm_vcpu_return
1254
1255 /*
1256 * Translation failed. Just return to the guest and
1257 * let it fault again. Another CPU is probably playing
1258 * behind our back.
1259 */
1260 3: pop x2, x3
1261 pop x0, x1
1262
1263 eret
1264
1265 el1_irq:
1266 push x0, x1
1267 push x2, x3
1268 mrs x0, tpidr_el2
1269 mov x1, #ARM_EXCEPTION_IRQ
1270 b __kvm_vcpu_return
1271
1272 .ltorg
1273
1274 .align 11
1275
1276 ENTRY(__kvm_hyp_vector)
1277 ventry el2t_sync_invalid // Synchronous EL2t
1278 ventry el2t_irq_invalid // IRQ EL2t
1279 ventry el2t_fiq_invalid // FIQ EL2t
1280 ventry el2t_error_invalid // Error EL2t
1281
1282 ventry el2h_sync_invalid // Synchronous EL2h
1283 ventry el2h_irq_invalid // IRQ EL2h
1284 ventry el2h_fiq_invalid // FIQ EL2h
1285 ventry el2h_error_invalid // Error EL2h
1286
1287 ventry el1_sync // Synchronous 64-bit EL1
1288 ventry el1_irq // IRQ 64-bit EL1
1289 ventry el1_fiq_invalid // FIQ 64-bit EL1
1290 ventry el1_error_invalid // Error 64-bit EL1
1291
1292 ventry el1_sync // Synchronous 32-bit EL1
1293 ventry el1_irq // IRQ 32-bit EL1
1294 ventry el1_fiq_invalid // FIQ 32-bit EL1
1295 ventry el1_error_invalid // Error 32-bit EL1
1296 ENDPROC(__kvm_hyp_vector)
1297
1298 .popsection
This page took 0.114541 seconds and 4 git commands to generate.