2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/uaccess.h>
29 #include <linux/irqchip/arm-gic.h>
31 #include <asm/kvm_emulate.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_mmu.h>
36 * How the whole thing works (courtesy of Christoffer Dall):
38 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
39 * something is pending on the CPU interface.
40 * - Interrupts that are pending on the distributor are stored on the
41 * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
42 * ioctls and guest mmio ops, and other in-kernel peripherals such as the
44 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
46 * - To calculate the oracle, we need info for each cpu from
47 * compute_pending_for_cpu, which considers:
48 * - PPI: dist->irq_pending & dist->irq_enable
49 * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
50 * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
51 * registers, stored on each vcpu. We only keep one bit of
52 * information per interrupt, making sure that only one vcpu can
53 * accept the interrupt.
54 * - If any of the above state changes, we must recalculate the oracle.
55 * - The same is true when injecting an interrupt, except that we only
56 * consider a single interrupt at a time. The irq_spi_cpu array
57 * contains the target CPU for each SPI.
59 * The handling of level interrupts adds some extra complexity. We
60 * need to track when the interrupt has been EOIed, so we can sample
61 * the 'line' again. This is achieved as such:
63 * - When a level interrupt is moved onto a vcpu, the corresponding
64 * bit in irq_queued is set. As long as this bit is set, the line
65 * will be ignored for further interrupts. The interrupt is injected
66 * into the vcpu with the GICH_LR_EOI bit set (generate a
67 * maintenance interrupt on EOI).
68 * - When the interrupt is EOIed, the maintenance interrupt fires,
69 * and clears the corresponding bit in irq_queued. This allows the
70 * interrupt line to be sampled again.
71 * - Note that level-triggered interrupts can also be set to pending from
72 * writes to GICD_ISPENDRn and lowering the external input line does not
73 * cause the interrupt to become inactive in such a situation.
74 * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
75 * inactive as long as the external input line is held high.
78 #define VGIC_ADDR_UNDEF (-1)
79 #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
81 #define PRODUCT_ID_KVM 0x4b /* ASCII code K */
82 #define IMPLEMENTER_ARM 0x43b
83 #define GICC_ARCH_VERSION_V2 0x2
85 #define ACCESS_READ_VALUE (1 << 0)
86 #define ACCESS_READ_RAZ (0 << 0)
87 #define ACCESS_READ_MASK(x) ((x) & (1 << 0))
88 #define ACCESS_WRITE_IGNORED (0 << 1)
89 #define ACCESS_WRITE_SETBIT (1 << 1)
90 #define ACCESS_WRITE_CLEARBIT (2 << 1)
91 #define ACCESS_WRITE_VALUE (3 << 1)
92 #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
94 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
);
95 static void vgic_retire_lr(int lr_nr
, int irq
, struct kvm_vcpu
*vcpu
);
96 static void vgic_update_state(struct kvm
*kvm
);
97 static void vgic_kick_vcpus(struct kvm
*kvm
);
98 static u8
*vgic_get_sgi_sources(struct vgic_dist
*dist
, int vcpu_id
, int sgi
);
99 static void vgic_dispatch_sgi(struct kvm_vcpu
*vcpu
, u32 reg
);
100 static struct vgic_lr
vgic_get_lr(const struct kvm_vcpu
*vcpu
, int lr
);
101 static void vgic_set_lr(struct kvm_vcpu
*vcpu
, int lr
, struct vgic_lr lr_desc
);
102 static void vgic_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
);
103 static void vgic_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
);
105 static const struct vgic_ops
*vgic_ops
;
106 static const struct vgic_params
*vgic
;
109 * struct vgic_bitmap contains a bitmap made of unsigned longs, but
110 * extracts u32s out of them.
112 * This does not work on 64-bit BE systems, because the bitmap access
113 * will store two consecutive 32-bit words with the higher-addressed
114 * register's bits at the lower index and the lower-addressed register's
115 * bits at the higher index.
117 * Therefore, swizzle the register index when accessing the 32-bit word
118 * registers to access the right register's value.
120 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
121 #define REG_OFFSET_SWIZZLE 1
123 #define REG_OFFSET_SWIZZLE 0
126 static int vgic_init_bitmap(struct vgic_bitmap
*b
, int nr_cpus
, int nr_irqs
)
130 nr_longs
= nr_cpus
+ BITS_TO_LONGS(nr_irqs
- VGIC_NR_PRIVATE_IRQS
);
132 b
->private = kzalloc(sizeof(unsigned long) * nr_longs
, GFP_KERNEL
);
136 b
->shared
= b
->private + nr_cpus
;
141 static void vgic_free_bitmap(struct vgic_bitmap
*b
)
148 static u32
*vgic_bitmap_get_reg(struct vgic_bitmap
*x
,
149 int cpuid
, u32 offset
)
153 return (u32
*)(x
->private + cpuid
) + REG_OFFSET_SWIZZLE
;
155 return (u32
*)(x
->shared
) + ((offset
- 1) ^ REG_OFFSET_SWIZZLE
);
158 static int vgic_bitmap_get_irq_val(struct vgic_bitmap
*x
,
161 if (irq
< VGIC_NR_PRIVATE_IRQS
)
162 return test_bit(irq
, x
->private + cpuid
);
164 return test_bit(irq
- VGIC_NR_PRIVATE_IRQS
, x
->shared
);
167 static void vgic_bitmap_set_irq_val(struct vgic_bitmap
*x
, int cpuid
,
172 if (irq
< VGIC_NR_PRIVATE_IRQS
) {
173 reg
= x
->private + cpuid
;
176 irq
-= VGIC_NR_PRIVATE_IRQS
;
185 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap
*x
, int cpuid
)
187 return x
->private + cpuid
;
190 static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap
*x
)
195 static int vgic_init_bytemap(struct vgic_bytemap
*x
, int nr_cpus
, int nr_irqs
)
199 size
= nr_cpus
* VGIC_NR_PRIVATE_IRQS
;
200 size
+= nr_irqs
- VGIC_NR_PRIVATE_IRQS
;
202 x
->private = kzalloc(size
, GFP_KERNEL
);
206 x
->shared
= x
->private + nr_cpus
* VGIC_NR_PRIVATE_IRQS
/ sizeof(u32
);
210 static void vgic_free_bytemap(struct vgic_bytemap
*b
)
217 static u32
*vgic_bytemap_get_reg(struct vgic_bytemap
*x
, int cpuid
, u32 offset
)
221 if (offset
< VGIC_NR_PRIVATE_IRQS
) {
223 offset
+= cpuid
* VGIC_NR_PRIVATE_IRQS
;
226 offset
-= VGIC_NR_PRIVATE_IRQS
;
229 return reg
+ (offset
/ sizeof(u32
));
232 #define VGIC_CFG_LEVEL 0
233 #define VGIC_CFG_EDGE 1
235 static bool vgic_irq_is_edge(struct kvm_vcpu
*vcpu
, int irq
)
237 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
240 irq_val
= vgic_bitmap_get_irq_val(&dist
->irq_cfg
, vcpu
->vcpu_id
, irq
);
241 return irq_val
== VGIC_CFG_EDGE
;
244 static int vgic_irq_is_enabled(struct kvm_vcpu
*vcpu
, int irq
)
246 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
248 return vgic_bitmap_get_irq_val(&dist
->irq_enabled
, vcpu
->vcpu_id
, irq
);
251 static int vgic_irq_is_queued(struct kvm_vcpu
*vcpu
, int irq
)
253 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
255 return vgic_bitmap_get_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
);
258 static void vgic_irq_set_queued(struct kvm_vcpu
*vcpu
, int irq
)
260 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
262 vgic_bitmap_set_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
, 1);
265 static void vgic_irq_clear_queued(struct kvm_vcpu
*vcpu
, int irq
)
267 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
269 vgic_bitmap_set_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
, 0);
272 static int vgic_dist_irq_get_level(struct kvm_vcpu
*vcpu
, int irq
)
274 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
276 return vgic_bitmap_get_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
);
279 static void vgic_dist_irq_set_level(struct kvm_vcpu
*vcpu
, int irq
)
281 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
283 vgic_bitmap_set_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
, 1);
286 static void vgic_dist_irq_clear_level(struct kvm_vcpu
*vcpu
, int irq
)
288 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
290 vgic_bitmap_set_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
, 0);
293 static int vgic_dist_irq_soft_pend(struct kvm_vcpu
*vcpu
, int irq
)
295 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
297 return vgic_bitmap_get_irq_val(&dist
->irq_soft_pend
, vcpu
->vcpu_id
, irq
);
300 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu
*vcpu
, int irq
)
302 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
304 vgic_bitmap_set_irq_val(&dist
->irq_soft_pend
, vcpu
->vcpu_id
, irq
, 0);
307 static int vgic_dist_irq_is_pending(struct kvm_vcpu
*vcpu
, int irq
)
309 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
311 return vgic_bitmap_get_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
);
314 static void vgic_dist_irq_set_pending(struct kvm_vcpu
*vcpu
, int irq
)
316 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
318 vgic_bitmap_set_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
, 1);
321 static void vgic_dist_irq_clear_pending(struct kvm_vcpu
*vcpu
, int irq
)
323 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
325 vgic_bitmap_set_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
, 0);
328 static void vgic_cpu_irq_set(struct kvm_vcpu
*vcpu
, int irq
)
330 if (irq
< VGIC_NR_PRIVATE_IRQS
)
331 set_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
333 set_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
334 vcpu
->arch
.vgic_cpu
.pending_shared
);
337 static void vgic_cpu_irq_clear(struct kvm_vcpu
*vcpu
, int irq
)
339 if (irq
< VGIC_NR_PRIVATE_IRQS
)
340 clear_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
342 clear_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
343 vcpu
->arch
.vgic_cpu
.pending_shared
);
346 static bool vgic_can_sample_irq(struct kvm_vcpu
*vcpu
, int irq
)
348 return vgic_irq_is_edge(vcpu
, irq
) || !vgic_irq_is_queued(vcpu
, irq
);
351 static u32
mmio_data_read(struct kvm_exit_mmio
*mmio
, u32 mask
)
353 return le32_to_cpu(*((u32
*)mmio
->data
)) & mask
;
356 static void mmio_data_write(struct kvm_exit_mmio
*mmio
, u32 mask
, u32 value
)
358 *((u32
*)mmio
->data
) = cpu_to_le32(value
) & mask
;
362 * vgic_reg_access - access vgic register
363 * @mmio: pointer to the data describing the mmio access
364 * @reg: pointer to the virtual backing of vgic distributor data
365 * @offset: least significant 2 bits used for word offset
366 * @mode: ACCESS_ mode (see defines above)
368 * Helper to make vgic register access easier using one of the access
369 * modes defined for vgic register access
370 * (read,raz,write-ignored,setbit,clearbit,write)
372 static void vgic_reg_access(struct kvm_exit_mmio
*mmio
, u32
*reg
,
373 phys_addr_t offset
, int mode
)
375 int word_offset
= (offset
& 3) * 8;
376 u32 mask
= (1UL << (mmio
->len
* 8)) - 1;
380 * Any alignment fault should have been delivered to the guest
381 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
387 BUG_ON(mode
!= (ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
));
391 if (mmio
->is_write
) {
392 u32 data
= mmio_data_read(mmio
, mask
) << word_offset
;
393 switch (ACCESS_WRITE_MASK(mode
)) {
394 case ACCESS_WRITE_IGNORED
:
397 case ACCESS_WRITE_SETBIT
:
401 case ACCESS_WRITE_CLEARBIT
:
405 case ACCESS_WRITE_VALUE
:
406 regval
= (regval
& ~(mask
<< word_offset
)) | data
;
411 switch (ACCESS_READ_MASK(mode
)) {
412 case ACCESS_READ_RAZ
:
416 case ACCESS_READ_VALUE
:
417 mmio_data_write(mmio
, mask
, regval
>> word_offset
);
422 static bool handle_mmio_misc(struct kvm_vcpu
*vcpu
,
423 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
426 u32 word_offset
= offset
& 3;
428 switch (offset
& ~3) {
429 case 0: /* GICD_CTLR */
430 reg
= vcpu
->kvm
->arch
.vgic
.enabled
;
431 vgic_reg_access(mmio
, ®
, word_offset
,
432 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
433 if (mmio
->is_write
) {
434 vcpu
->kvm
->arch
.vgic
.enabled
= reg
& 1;
435 vgic_update_state(vcpu
->kvm
);
440 case 4: /* GICD_TYPER */
441 reg
= (atomic_read(&vcpu
->kvm
->online_vcpus
) - 1) << 5;
442 reg
|= (VGIC_NR_IRQS
>> 5) - 1;
443 vgic_reg_access(mmio
, ®
, word_offset
,
444 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
447 case 8: /* GICD_IIDR */
448 reg
= (PRODUCT_ID_KVM
<< 24) | (IMPLEMENTER_ARM
<< 0);
449 vgic_reg_access(mmio
, ®
, word_offset
,
450 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
457 static bool handle_mmio_raz_wi(struct kvm_vcpu
*vcpu
,
458 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
460 vgic_reg_access(mmio
, NULL
, offset
,
461 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
465 static bool handle_mmio_set_enable_reg(struct kvm_vcpu
*vcpu
,
466 struct kvm_exit_mmio
*mmio
,
469 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_enabled
,
470 vcpu
->vcpu_id
, offset
);
471 vgic_reg_access(mmio
, reg
, offset
,
472 ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
);
473 if (mmio
->is_write
) {
474 vgic_update_state(vcpu
->kvm
);
481 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu
*vcpu
,
482 struct kvm_exit_mmio
*mmio
,
485 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_enabled
,
486 vcpu
->vcpu_id
, offset
);
487 vgic_reg_access(mmio
, reg
, offset
,
488 ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
);
489 if (mmio
->is_write
) {
490 if (offset
< 4) /* Force SGI enabled */
492 vgic_retire_disabled_irqs(vcpu
);
493 vgic_update_state(vcpu
->kvm
);
500 static bool handle_mmio_set_pending_reg(struct kvm_vcpu
*vcpu
,
501 struct kvm_exit_mmio
*mmio
,
506 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
508 reg
= vgic_bitmap_get_reg(&dist
->irq_cfg
, vcpu
->vcpu_id
, offset
);
509 level_mask
= (~(*reg
));
511 /* Mark both level and edge triggered irqs as pending */
512 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu
->vcpu_id
, offset
);
514 vgic_reg_access(mmio
, reg
, offset
,
515 ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
);
517 if (mmio
->is_write
) {
518 /* Set the soft-pending flag only for level-triggered irqs */
519 reg
= vgic_bitmap_get_reg(&dist
->irq_soft_pend
,
520 vcpu
->vcpu_id
, offset
);
521 vgic_reg_access(mmio
, reg
, offset
,
522 ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
);
525 /* Ignore writes to SGIs */
528 *reg
|= orig
& 0xffff;
531 vgic_update_state(vcpu
->kvm
);
538 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu
*vcpu
,
539 struct kvm_exit_mmio
*mmio
,
544 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
546 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu
->vcpu_id
, offset
);
548 vgic_reg_access(mmio
, reg
, offset
,
549 ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
);
550 if (mmio
->is_write
) {
551 /* Re-set level triggered level-active interrupts */
552 level_active
= vgic_bitmap_get_reg(&dist
->irq_level
,
553 vcpu
->vcpu_id
, offset
);
554 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
,
555 vcpu
->vcpu_id
, offset
);
556 *reg
|= *level_active
;
558 /* Ignore writes to SGIs */
561 *reg
|= orig
& 0xffff;
564 /* Clear soft-pending flags */
565 reg
= vgic_bitmap_get_reg(&dist
->irq_soft_pend
,
566 vcpu
->vcpu_id
, offset
);
567 vgic_reg_access(mmio
, reg
, offset
,
568 ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
);
570 vgic_update_state(vcpu
->kvm
);
577 static bool handle_mmio_priority_reg(struct kvm_vcpu
*vcpu
,
578 struct kvm_exit_mmio
*mmio
,
581 u32
*reg
= vgic_bytemap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_priority
,
582 vcpu
->vcpu_id
, offset
);
583 vgic_reg_access(mmio
, reg
, offset
,
584 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
588 #define GICD_ITARGETSR_SIZE 32
589 #define GICD_CPUTARGETS_BITS 8
590 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
591 static u32
vgic_get_target_reg(struct kvm
*kvm
, int irq
)
593 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
597 irq
-= VGIC_NR_PRIVATE_IRQS
;
599 for (i
= 0; i
< GICD_IRQS_PER_ITARGETSR
; i
++)
600 val
|= 1 << (dist
->irq_spi_cpu
[irq
+ i
] + i
* 8);
605 static void vgic_set_target_reg(struct kvm
*kvm
, u32 val
, int irq
)
607 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
608 struct kvm_vcpu
*vcpu
;
613 irq
-= VGIC_NR_PRIVATE_IRQS
;
616 * Pick the LSB in each byte. This ensures we target exactly
617 * one vcpu per IRQ. If the byte is null, assume we target
620 for (i
= 0; i
< GICD_IRQS_PER_ITARGETSR
; i
++) {
621 int shift
= i
* GICD_CPUTARGETS_BITS
;
622 target
= ffs((val
>> shift
) & 0xffU
);
623 target
= target
? (target
- 1) : 0;
624 dist
->irq_spi_cpu
[irq
+ i
] = target
;
625 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
626 bmap
= vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[c
]);
628 set_bit(irq
+ i
, bmap
);
630 clear_bit(irq
+ i
, bmap
);
635 static bool handle_mmio_target_reg(struct kvm_vcpu
*vcpu
,
636 struct kvm_exit_mmio
*mmio
,
641 /* We treat the banked interrupts targets as read-only */
643 u32 roreg
= 1 << vcpu
->vcpu_id
;
645 roreg
|= roreg
<< 16;
647 vgic_reg_access(mmio
, &roreg
, offset
,
648 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
652 reg
= vgic_get_target_reg(vcpu
->kvm
, offset
& ~3U);
653 vgic_reg_access(mmio
, ®
, offset
,
654 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
655 if (mmio
->is_write
) {
656 vgic_set_target_reg(vcpu
->kvm
, reg
, offset
& ~3U);
657 vgic_update_state(vcpu
->kvm
);
664 static u32
vgic_cfg_expand(u16 val
)
670 * Turn a 16bit value like abcd...mnop into a 32bit word
671 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
673 for (i
= 0; i
< 16; i
++)
674 res
|= ((val
>> i
) & VGIC_CFG_EDGE
) << (2 * i
+ 1);
679 static u16
vgic_cfg_compress(u32 val
)
685 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
686 * abcd...mnop which is what we really care about.
688 for (i
= 0; i
< 16; i
++)
689 res
|= ((val
>> (i
* 2 + 1)) & VGIC_CFG_EDGE
) << i
;
695 * The distributor uses 2 bits per IRQ for the CFG register, but the
696 * LSB is always 0. As such, we only keep the upper bit, and use the
697 * two above functions to compress/expand the bits
699 static bool handle_mmio_cfg_reg(struct kvm_vcpu
*vcpu
,
700 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
705 reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_cfg
,
706 vcpu
->vcpu_id
, offset
>> 1);
713 val
= vgic_cfg_expand(val
);
714 vgic_reg_access(mmio
, &val
, offset
,
715 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
716 if (mmio
->is_write
) {
718 *reg
= ~0U; /* Force PPIs/SGIs to 1 */
722 val
= vgic_cfg_compress(val
);
727 *reg
&= 0xffff << 16;
735 static bool handle_mmio_sgi_reg(struct kvm_vcpu
*vcpu
,
736 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
739 vgic_reg_access(mmio
, ®
, offset
,
740 ACCESS_READ_RAZ
| ACCESS_WRITE_VALUE
);
741 if (mmio
->is_write
) {
742 vgic_dispatch_sgi(vcpu
, reg
);
743 vgic_update_state(vcpu
->kvm
);
751 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
752 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
754 * Move any pending IRQs that have already been assigned to LRs back to the
755 * emulated distributor state so that the complete emulated state can be read
756 * from the main emulation structures without investigating the LRs.
758 * Note that IRQs in the active state in the LRs get their pending state moved
759 * to the distributor but the active state stays in the LRs, because we don't
760 * track the active state on the distributor side.
762 static void vgic_unqueue_irqs(struct kvm_vcpu
*vcpu
)
764 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
765 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
766 int vcpu_id
= vcpu
->vcpu_id
;
769 for_each_set_bit(i
, vgic_cpu
->lr_used
, vgic_cpu
->nr_lr
) {
770 struct vgic_lr lr
= vgic_get_lr(vcpu
, i
);
773 * There are three options for the state bits:
777 * 11: pending and active
779 * If the LR holds only an active interrupt (not pending) then
780 * just leave it alone.
782 if ((lr
.state
& LR_STATE_MASK
) == LR_STATE_ACTIVE
)
786 * Reestablish the pending state on the distributor and the
787 * CPU interface. It may have already been pending, but that
788 * is fine, then we are only setting a few bits that were
791 vgic_dist_irq_set_pending(vcpu
, lr
.irq
);
792 if (lr
.irq
< VGIC_NR_SGIS
)
793 *vgic_get_sgi_sources(dist
, vcpu_id
, lr
.irq
) |= 1 << lr
.source
;
794 lr
.state
&= ~LR_STATE_PENDING
;
795 vgic_set_lr(vcpu
, i
, lr
);
798 * If there's no state left on the LR (it could still be
799 * active), then the LR does not hold any useful info and can
800 * be marked as free for other use.
802 if (!(lr
.state
& LR_STATE_MASK
)) {
803 vgic_retire_lr(i
, lr
.irq
, vcpu
);
804 vgic_irq_clear_queued(vcpu
, lr
.irq
);
807 /* Finally update the VGIC state. */
808 vgic_update_state(vcpu
->kvm
);
812 /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
813 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu
*vcpu
,
814 struct kvm_exit_mmio
*mmio
,
817 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
819 int min_sgi
= (offset
& ~0x3) * 4;
820 int max_sgi
= min_sgi
+ 3;
821 int vcpu_id
= vcpu
->vcpu_id
;
824 /* Copy source SGIs from distributor side */
825 for (sgi
= min_sgi
; sgi
<= max_sgi
; sgi
++) {
826 int shift
= 8 * (sgi
- min_sgi
);
827 reg
|= ((u32
)*vgic_get_sgi_sources(dist
, vcpu_id
, sgi
)) << shift
;
830 mmio_data_write(mmio
, ~0, reg
);
834 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu
*vcpu
,
835 struct kvm_exit_mmio
*mmio
,
836 phys_addr_t offset
, bool set
)
838 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
840 int min_sgi
= (offset
& ~0x3) * 4;
841 int max_sgi
= min_sgi
+ 3;
842 int vcpu_id
= vcpu
->vcpu_id
;
844 bool updated
= false;
846 reg
= mmio_data_read(mmio
, ~0);
848 /* Clear pending SGIs on the distributor */
849 for (sgi
= min_sgi
; sgi
<= max_sgi
; sgi
++) {
850 u8 mask
= reg
>> (8 * (sgi
- min_sgi
));
851 u8
*src
= vgic_get_sgi_sources(dist
, vcpu_id
, sgi
);
853 if ((*src
& mask
) != mask
)
864 vgic_update_state(vcpu
->kvm
);
869 static bool handle_mmio_sgi_set(struct kvm_vcpu
*vcpu
,
870 struct kvm_exit_mmio
*mmio
,
874 return read_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
);
876 return write_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
, true);
879 static bool handle_mmio_sgi_clear(struct kvm_vcpu
*vcpu
,
880 struct kvm_exit_mmio
*mmio
,
884 return read_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
);
886 return write_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
, false);
890 * I would have liked to use the kvm_bus_io_*() API instead, but it
891 * cannot cope with banked registers (only the VM pointer is passed
892 * around, and we need the vcpu). One of these days, someone please
898 bool (*handle_mmio
)(struct kvm_vcpu
*vcpu
, struct kvm_exit_mmio
*mmio
,
902 static const struct mmio_range vgic_dist_ranges
[] = {
904 .base
= GIC_DIST_CTRL
,
906 .handle_mmio
= handle_mmio_misc
,
909 .base
= GIC_DIST_IGROUP
,
910 .len
= VGIC_NR_IRQS
/ 8,
911 .handle_mmio
= handle_mmio_raz_wi
,
914 .base
= GIC_DIST_ENABLE_SET
,
915 .len
= VGIC_NR_IRQS
/ 8,
916 .handle_mmio
= handle_mmio_set_enable_reg
,
919 .base
= GIC_DIST_ENABLE_CLEAR
,
920 .len
= VGIC_NR_IRQS
/ 8,
921 .handle_mmio
= handle_mmio_clear_enable_reg
,
924 .base
= GIC_DIST_PENDING_SET
,
925 .len
= VGIC_NR_IRQS
/ 8,
926 .handle_mmio
= handle_mmio_set_pending_reg
,
929 .base
= GIC_DIST_PENDING_CLEAR
,
930 .len
= VGIC_NR_IRQS
/ 8,
931 .handle_mmio
= handle_mmio_clear_pending_reg
,
934 .base
= GIC_DIST_ACTIVE_SET
,
935 .len
= VGIC_NR_IRQS
/ 8,
936 .handle_mmio
= handle_mmio_raz_wi
,
939 .base
= GIC_DIST_ACTIVE_CLEAR
,
940 .len
= VGIC_NR_IRQS
/ 8,
941 .handle_mmio
= handle_mmio_raz_wi
,
944 .base
= GIC_DIST_PRI
,
946 .handle_mmio
= handle_mmio_priority_reg
,
949 .base
= GIC_DIST_TARGET
,
951 .handle_mmio
= handle_mmio_target_reg
,
954 .base
= GIC_DIST_CONFIG
,
955 .len
= VGIC_NR_IRQS
/ 4,
956 .handle_mmio
= handle_mmio_cfg_reg
,
959 .base
= GIC_DIST_SOFTINT
,
961 .handle_mmio
= handle_mmio_sgi_reg
,
964 .base
= GIC_DIST_SGI_PENDING_CLEAR
,
966 .handle_mmio
= handle_mmio_sgi_clear
,
969 .base
= GIC_DIST_SGI_PENDING_SET
,
971 .handle_mmio
= handle_mmio_sgi_set
,
977 struct mmio_range
*find_matching_range(const struct mmio_range
*ranges
,
978 struct kvm_exit_mmio
*mmio
,
981 const struct mmio_range
*r
= ranges
;
984 if (offset
>= r
->base
&&
985 (offset
+ mmio
->len
) <= (r
->base
+ r
->len
))
994 * vgic_handle_mmio - handle an in-kernel MMIO access
995 * @vcpu: pointer to the vcpu performing the access
996 * @run: pointer to the kvm_run structure
997 * @mmio: pointer to the data describing the access
999 * returns true if the MMIO access has been performed in kernel space,
1000 * and false if it needs to be emulated in user space.
1002 bool vgic_handle_mmio(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
1003 struct kvm_exit_mmio
*mmio
)
1005 const struct mmio_range
*range
;
1006 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1007 unsigned long base
= dist
->vgic_dist_base
;
1009 unsigned long offset
;
1011 if (!irqchip_in_kernel(vcpu
->kvm
) ||
1012 mmio
->phys_addr
< base
||
1013 (mmio
->phys_addr
+ mmio
->len
) > (base
+ KVM_VGIC_V2_DIST_SIZE
))
1016 /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
1017 if (mmio
->len
> 4) {
1018 kvm_inject_dabt(vcpu
, mmio
->phys_addr
);
1022 offset
= mmio
->phys_addr
- base
;
1023 range
= find_matching_range(vgic_dist_ranges
, mmio
, offset
);
1024 if (unlikely(!range
|| !range
->handle_mmio
)) {
1025 pr_warn("Unhandled access %d %08llx %d\n",
1026 mmio
->is_write
, mmio
->phys_addr
, mmio
->len
);
1030 spin_lock(&vcpu
->kvm
->arch
.vgic
.lock
);
1031 offset
= mmio
->phys_addr
- range
->base
- base
;
1032 updated_state
= range
->handle_mmio(vcpu
, mmio
, offset
);
1033 spin_unlock(&vcpu
->kvm
->arch
.vgic
.lock
);
1034 kvm_prepare_mmio(run
, mmio
);
1035 kvm_handle_mmio_return(vcpu
, run
);
1038 vgic_kick_vcpus(vcpu
->kvm
);
1043 static u8
*vgic_get_sgi_sources(struct vgic_dist
*dist
, int vcpu_id
, int sgi
)
1045 return dist
->irq_sgi_sources
+ vcpu_id
* VGIC_NR_SGIS
+ sgi
;
1048 static void vgic_dispatch_sgi(struct kvm_vcpu
*vcpu
, u32 reg
)
1050 struct kvm
*kvm
= vcpu
->kvm
;
1051 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1052 int nrcpus
= atomic_read(&kvm
->online_vcpus
);
1054 int sgi
, mode
, c
, vcpu_id
;
1056 vcpu_id
= vcpu
->vcpu_id
;
1059 target_cpus
= (reg
>> 16) & 0xff;
1060 mode
= (reg
>> 24) & 3;
1069 target_cpus
= ((1 << nrcpus
) - 1) & ~(1 << vcpu_id
) & 0xff;
1073 target_cpus
= 1 << vcpu_id
;
1077 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1078 if (target_cpus
& 1) {
1079 /* Flag the SGI as pending */
1080 vgic_dist_irq_set_pending(vcpu
, sgi
);
1081 *vgic_get_sgi_sources(dist
, c
, sgi
) |= 1 << vcpu_id
;
1082 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi
, vcpu_id
, c
);
1089 static int compute_pending_for_cpu(struct kvm_vcpu
*vcpu
)
1091 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1092 unsigned long *pending
, *enabled
, *pend_percpu
, *pend_shared
;
1093 unsigned long pending_private
, pending_shared
;
1096 vcpu_id
= vcpu
->vcpu_id
;
1097 pend_percpu
= vcpu
->arch
.vgic_cpu
.pending_percpu
;
1098 pend_shared
= vcpu
->arch
.vgic_cpu
.pending_shared
;
1100 pending
= vgic_bitmap_get_cpu_map(&dist
->irq_pending
, vcpu_id
);
1101 enabled
= vgic_bitmap_get_cpu_map(&dist
->irq_enabled
, vcpu_id
);
1102 bitmap_and(pend_percpu
, pending
, enabled
, VGIC_NR_PRIVATE_IRQS
);
1104 pending
= vgic_bitmap_get_shared_map(&dist
->irq_pending
);
1105 enabled
= vgic_bitmap_get_shared_map(&dist
->irq_enabled
);
1106 bitmap_and(pend_shared
, pending
, enabled
, VGIC_NR_SHARED_IRQS
);
1107 bitmap_and(pend_shared
, pend_shared
,
1108 vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[vcpu_id
]),
1109 VGIC_NR_SHARED_IRQS
);
1111 pending_private
= find_first_bit(pend_percpu
, VGIC_NR_PRIVATE_IRQS
);
1112 pending_shared
= find_first_bit(pend_shared
, VGIC_NR_SHARED_IRQS
);
1113 return (pending_private
< VGIC_NR_PRIVATE_IRQS
||
1114 pending_shared
< VGIC_NR_SHARED_IRQS
);
1118 * Update the interrupt state and determine which CPUs have pending
1119 * interrupts. Must be called with distributor lock held.
1121 static void vgic_update_state(struct kvm
*kvm
)
1123 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1124 struct kvm_vcpu
*vcpu
;
1127 if (!dist
->enabled
) {
1128 set_bit(0, dist
->irq_pending_on_cpu
);
1132 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1133 if (compute_pending_for_cpu(vcpu
)) {
1134 pr_debug("CPU%d has pending interrupts\n", c
);
1135 set_bit(c
, dist
->irq_pending_on_cpu
);
1140 static struct vgic_lr
vgic_get_lr(const struct kvm_vcpu
*vcpu
, int lr
)
1142 return vgic_ops
->get_lr(vcpu
, lr
);
1145 static void vgic_set_lr(struct kvm_vcpu
*vcpu
, int lr
,
1148 vgic_ops
->set_lr(vcpu
, lr
, vlr
);
1151 static void vgic_sync_lr_elrsr(struct kvm_vcpu
*vcpu
, int lr
,
1154 vgic_ops
->sync_lr_elrsr(vcpu
, lr
, vlr
);
1157 static inline u64
vgic_get_elrsr(struct kvm_vcpu
*vcpu
)
1159 return vgic_ops
->get_elrsr(vcpu
);
1162 static inline u64
vgic_get_eisr(struct kvm_vcpu
*vcpu
)
1164 return vgic_ops
->get_eisr(vcpu
);
1167 static inline u32
vgic_get_interrupt_status(struct kvm_vcpu
*vcpu
)
1169 return vgic_ops
->get_interrupt_status(vcpu
);
1172 static inline void vgic_enable_underflow(struct kvm_vcpu
*vcpu
)
1174 vgic_ops
->enable_underflow(vcpu
);
1177 static inline void vgic_disable_underflow(struct kvm_vcpu
*vcpu
)
1179 vgic_ops
->disable_underflow(vcpu
);
1182 static inline void vgic_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
)
1184 vgic_ops
->get_vmcr(vcpu
, vmcr
);
1187 static void vgic_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
)
1189 vgic_ops
->set_vmcr(vcpu
, vmcr
);
1192 static inline void vgic_enable(struct kvm_vcpu
*vcpu
)
1194 vgic_ops
->enable(vcpu
);
1197 static void vgic_retire_lr(int lr_nr
, int irq
, struct kvm_vcpu
*vcpu
)
1199 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1200 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr_nr
);
1203 vgic_set_lr(vcpu
, lr_nr
, vlr
);
1204 clear_bit(lr_nr
, vgic_cpu
->lr_used
);
1205 vgic_cpu
->vgic_irq_lr_map
[irq
] = LR_EMPTY
;
1209 * An interrupt may have been disabled after being made pending on the
1210 * CPU interface (the classic case is a timer running while we're
1211 * rebooting the guest - the interrupt would kick as soon as the CPU
1212 * interface gets enabled, with deadly consequences).
1214 * The solution is to examine already active LRs, and check the
1215 * interrupt is still enabled. If not, just retire it.
1217 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
)
1219 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1222 for_each_set_bit(lr
, vgic_cpu
->lr_used
, vgic
->nr_lr
) {
1223 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr
);
1225 if (!vgic_irq_is_enabled(vcpu
, vlr
.irq
)) {
1226 vgic_retire_lr(lr
, vlr
.irq
, vcpu
);
1227 if (vgic_irq_is_queued(vcpu
, vlr
.irq
))
1228 vgic_irq_clear_queued(vcpu
, vlr
.irq
);
1234 * Queue an interrupt to a CPU virtual interface. Return true on success,
1235 * or false if it wasn't possible to queue it.
1237 static bool vgic_queue_irq(struct kvm_vcpu
*vcpu
, u8 sgi_source_id
, int irq
)
1239 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1243 /* Sanitize the input... */
1244 BUG_ON(sgi_source_id
& ~7);
1245 BUG_ON(sgi_source_id
&& irq
>= VGIC_NR_SGIS
);
1246 BUG_ON(irq
>= VGIC_NR_IRQS
);
1248 kvm_debug("Queue IRQ%d\n", irq
);
1250 lr
= vgic_cpu
->vgic_irq_lr_map
[irq
];
1252 /* Do we have an active interrupt for the same CPUID? */
1253 if (lr
!= LR_EMPTY
) {
1254 vlr
= vgic_get_lr(vcpu
, lr
);
1255 if (vlr
.source
== sgi_source_id
) {
1256 kvm_debug("LR%d piggyback for IRQ%d\n", lr
, vlr
.irq
);
1257 BUG_ON(!test_bit(lr
, vgic_cpu
->lr_used
));
1258 vlr
.state
|= LR_STATE_PENDING
;
1259 vgic_set_lr(vcpu
, lr
, vlr
);
1264 /* Try to use another LR for this interrupt */
1265 lr
= find_first_zero_bit((unsigned long *)vgic_cpu
->lr_used
,
1267 if (lr
>= vgic
->nr_lr
)
1270 kvm_debug("LR%d allocated for IRQ%d %x\n", lr
, irq
, sgi_source_id
);
1271 vgic_cpu
->vgic_irq_lr_map
[irq
] = lr
;
1272 set_bit(lr
, vgic_cpu
->lr_used
);
1275 vlr
.source
= sgi_source_id
;
1276 vlr
.state
= LR_STATE_PENDING
;
1277 if (!vgic_irq_is_edge(vcpu
, irq
))
1278 vlr
.state
|= LR_EOI_INT
;
1280 vgic_set_lr(vcpu
, lr
, vlr
);
1285 static bool vgic_queue_sgi(struct kvm_vcpu
*vcpu
, int irq
)
1287 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1288 unsigned long sources
;
1289 int vcpu_id
= vcpu
->vcpu_id
;
1292 sources
= *vgic_get_sgi_sources(dist
, vcpu_id
, irq
);
1294 for_each_set_bit(c
, &sources
, VGIC_MAX_CPUS
) {
1295 if (vgic_queue_irq(vcpu
, c
, irq
))
1296 clear_bit(c
, &sources
);
1299 *vgic_get_sgi_sources(dist
, vcpu_id
, irq
) = sources
;
1302 * If the sources bitmap has been cleared it means that we
1303 * could queue all the SGIs onto link registers (see the
1304 * clear_bit above), and therefore we are done with them in
1305 * our emulated gic and can get rid of them.
1308 vgic_dist_irq_clear_pending(vcpu
, irq
);
1309 vgic_cpu_irq_clear(vcpu
, irq
);
1316 static bool vgic_queue_hwirq(struct kvm_vcpu
*vcpu
, int irq
)
1318 if (!vgic_can_sample_irq(vcpu
, irq
))
1319 return true; /* level interrupt, already queued */
1321 if (vgic_queue_irq(vcpu
, 0, irq
)) {
1322 if (vgic_irq_is_edge(vcpu
, irq
)) {
1323 vgic_dist_irq_clear_pending(vcpu
, irq
);
1324 vgic_cpu_irq_clear(vcpu
, irq
);
1326 vgic_irq_set_queued(vcpu
, irq
);
1336 * Fill the list registers with pending interrupts before running the
1339 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1341 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1342 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1346 vcpu_id
= vcpu
->vcpu_id
;
1349 * We may not have any pending interrupt, or the interrupts
1350 * may have been serviced from another vcpu. In all cases,
1353 if (!kvm_vgic_vcpu_pending_irq(vcpu
)) {
1354 pr_debug("CPU%d has no pending interrupt\n", vcpu_id
);
1359 for_each_set_bit(i
, vgic_cpu
->pending_percpu
, VGIC_NR_SGIS
) {
1360 if (!vgic_queue_sgi(vcpu
, i
))
1365 for_each_set_bit_from(i
, vgic_cpu
->pending_percpu
, VGIC_NR_PRIVATE_IRQS
) {
1366 if (!vgic_queue_hwirq(vcpu
, i
))
1371 for_each_set_bit(i
, vgic_cpu
->pending_shared
, VGIC_NR_SHARED_IRQS
) {
1372 if (!vgic_queue_hwirq(vcpu
, i
+ VGIC_NR_PRIVATE_IRQS
))
1378 vgic_enable_underflow(vcpu
);
1380 vgic_disable_underflow(vcpu
);
1382 * We're about to run this VCPU, and we've consumed
1383 * everything the distributor had in store for
1384 * us. Claim we don't have anything pending. We'll
1385 * adjust that if needed while exiting.
1387 clear_bit(vcpu_id
, dist
->irq_pending_on_cpu
);
1391 static bool vgic_process_maintenance(struct kvm_vcpu
*vcpu
)
1393 u32 status
= vgic_get_interrupt_status(vcpu
);
1394 bool level_pending
= false;
1396 kvm_debug("STATUS = %08x\n", status
);
1398 if (status
& INT_STATUS_EOI
) {
1400 * Some level interrupts have been EOIed. Clear their
1403 u64 eisr
= vgic_get_eisr(vcpu
);
1404 unsigned long *eisr_ptr
= (unsigned long *)&eisr
;
1407 for_each_set_bit(lr
, eisr_ptr
, vgic
->nr_lr
) {
1408 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr
);
1409 WARN_ON(vgic_irq_is_edge(vcpu
, vlr
.irq
));
1411 vgic_irq_clear_queued(vcpu
, vlr
.irq
);
1412 WARN_ON(vlr
.state
& LR_STATE_MASK
);
1414 vgic_set_lr(vcpu
, lr
, vlr
);
1417 * If the IRQ was EOIed it was also ACKed and we we
1418 * therefore assume we can clear the soft pending
1419 * state (should it had been set) for this interrupt.
1421 * Note: if the IRQ soft pending state was set after
1422 * the IRQ was acked, it actually shouldn't be
1423 * cleared, but we have no way of knowing that unless
1424 * we start trapping ACKs when the soft-pending state
1427 vgic_dist_irq_clear_soft_pend(vcpu
, vlr
.irq
);
1429 /* Any additional pending interrupt? */
1430 if (vgic_dist_irq_get_level(vcpu
, vlr
.irq
)) {
1431 vgic_cpu_irq_set(vcpu
, vlr
.irq
);
1432 level_pending
= true;
1434 vgic_dist_irq_clear_pending(vcpu
, vlr
.irq
);
1435 vgic_cpu_irq_clear(vcpu
, vlr
.irq
);
1439 * Despite being EOIed, the LR may not have
1440 * been marked as empty.
1442 vgic_sync_lr_elrsr(vcpu
, lr
, vlr
);
1446 if (status
& INT_STATUS_UNDERFLOW
)
1447 vgic_disable_underflow(vcpu
);
1449 return level_pending
;
1453 * Sync back the VGIC state after a guest run. The distributor lock is
1454 * needed so we don't get preempted in the middle of the state processing.
1456 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1458 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1459 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1461 unsigned long *elrsr_ptr
;
1465 level_pending
= vgic_process_maintenance(vcpu
);
1466 elrsr
= vgic_get_elrsr(vcpu
);
1467 elrsr_ptr
= (unsigned long *)&elrsr
;
1469 /* Clear mappings for empty LRs */
1470 for_each_set_bit(lr
, elrsr_ptr
, vgic
->nr_lr
) {
1473 if (!test_and_clear_bit(lr
, vgic_cpu
->lr_used
))
1476 vlr
= vgic_get_lr(vcpu
, lr
);
1478 BUG_ON(vlr
.irq
>= VGIC_NR_IRQS
);
1479 vgic_cpu
->vgic_irq_lr_map
[vlr
.irq
] = LR_EMPTY
;
1482 /* Check if we still have something up our sleeve... */
1483 pending
= find_first_zero_bit(elrsr_ptr
, vgic
->nr_lr
);
1484 if (level_pending
|| pending
< vgic
->nr_lr
)
1485 set_bit(vcpu
->vcpu_id
, dist
->irq_pending_on_cpu
);
1488 void kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1490 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1492 if (!irqchip_in_kernel(vcpu
->kvm
))
1495 spin_lock(&dist
->lock
);
1496 __kvm_vgic_flush_hwstate(vcpu
);
1497 spin_unlock(&dist
->lock
);
1500 void kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1502 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1504 if (!irqchip_in_kernel(vcpu
->kvm
))
1507 spin_lock(&dist
->lock
);
1508 __kvm_vgic_sync_hwstate(vcpu
);
1509 spin_unlock(&dist
->lock
);
1512 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu
*vcpu
)
1514 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1516 if (!irqchip_in_kernel(vcpu
->kvm
))
1519 return test_bit(vcpu
->vcpu_id
, dist
->irq_pending_on_cpu
);
1522 static void vgic_kick_vcpus(struct kvm
*kvm
)
1524 struct kvm_vcpu
*vcpu
;
1528 * We've injected an interrupt, time to find out who deserves
1531 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1532 if (kvm_vgic_vcpu_pending_irq(vcpu
))
1533 kvm_vcpu_kick(vcpu
);
1537 static int vgic_validate_injection(struct kvm_vcpu
*vcpu
, int irq
, int level
)
1539 int edge_triggered
= vgic_irq_is_edge(vcpu
, irq
);
1542 * Only inject an interrupt if:
1543 * - edge triggered and we have a rising edge
1544 * - level triggered and we change level
1546 if (edge_triggered
) {
1547 int state
= vgic_dist_irq_is_pending(vcpu
, irq
);
1548 return level
> state
;
1550 int state
= vgic_dist_irq_get_level(vcpu
, irq
);
1551 return level
!= state
;
1555 static bool vgic_update_irq_pending(struct kvm
*kvm
, int cpuid
,
1556 unsigned int irq_num
, bool level
)
1558 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1559 struct kvm_vcpu
*vcpu
;
1560 int edge_triggered
, level_triggered
;
1564 spin_lock(&dist
->lock
);
1566 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1567 edge_triggered
= vgic_irq_is_edge(vcpu
, irq_num
);
1568 level_triggered
= !edge_triggered
;
1570 if (!vgic_validate_injection(vcpu
, irq_num
, level
)) {
1575 if (irq_num
>= VGIC_NR_PRIVATE_IRQS
) {
1576 cpuid
= dist
->irq_spi_cpu
[irq_num
- VGIC_NR_PRIVATE_IRQS
];
1577 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1580 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num
, level
, cpuid
);
1583 if (level_triggered
)
1584 vgic_dist_irq_set_level(vcpu
, irq_num
);
1585 vgic_dist_irq_set_pending(vcpu
, irq_num
);
1587 if (level_triggered
) {
1588 vgic_dist_irq_clear_level(vcpu
, irq_num
);
1589 if (!vgic_dist_irq_soft_pend(vcpu
, irq_num
))
1590 vgic_dist_irq_clear_pending(vcpu
, irq_num
);
1592 vgic_dist_irq_clear_pending(vcpu
, irq_num
);
1596 enabled
= vgic_irq_is_enabled(vcpu
, irq_num
);
1603 if (!vgic_can_sample_irq(vcpu
, irq_num
)) {
1605 * Level interrupt in progress, will be picked up
1613 vgic_cpu_irq_set(vcpu
, irq_num
);
1614 set_bit(cpuid
, dist
->irq_pending_on_cpu
);
1618 spin_unlock(&dist
->lock
);
1624 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1625 * @kvm: The VM structure pointer
1626 * @cpuid: The CPU for PPIs
1627 * @irq_num: The IRQ number that is assigned to the device
1628 * @level: Edge-triggered: true: to trigger the interrupt
1629 * false: to ignore the call
1630 * Level-sensitive true: activates an interrupt
1631 * false: deactivates an interrupt
1633 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1634 * level-sensitive interrupts. You can think of the level parameter as 1
1635 * being HIGH and 0 being LOW and all devices being active-HIGH.
1637 int kvm_vgic_inject_irq(struct kvm
*kvm
, int cpuid
, unsigned int irq_num
,
1640 if (likely(vgic_initialized(kvm
)) &&
1641 vgic_update_irq_pending(kvm
, cpuid
, irq_num
, level
))
1642 vgic_kick_vcpus(kvm
);
1647 static irqreturn_t
vgic_maintenance_handler(int irq
, void *data
)
1650 * We cannot rely on the vgic maintenance interrupt to be
1651 * delivered synchronously. This means we can only use it to
1652 * exit the VM, and we perform the handling of EOIed
1653 * interrupts on the exit path (see vgic_process_maintenance).
1658 void kvm_vgic_vcpu_destroy(struct kvm_vcpu
*vcpu
)
1660 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1662 kfree(vgic_cpu
->pending_shared
);
1663 kfree(vgic_cpu
->vgic_irq_lr_map
);
1664 vgic_cpu
->pending_shared
= NULL
;
1665 vgic_cpu
->vgic_irq_lr_map
= NULL
;
1668 static int vgic_vcpu_init_maps(struct kvm_vcpu
*vcpu
, int nr_irqs
)
1670 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1672 int sz
= (nr_irqs
- VGIC_NR_PRIVATE_IRQS
) / 8;
1673 vgic_cpu
->pending_shared
= kzalloc(sz
, GFP_KERNEL
);
1674 vgic_cpu
->vgic_irq_lr_map
= kzalloc(nr_irqs
, GFP_KERNEL
);
1676 if (!vgic_cpu
->pending_shared
|| !vgic_cpu
->vgic_irq_lr_map
) {
1677 kvm_vgic_vcpu_destroy(vcpu
);
1685 * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
1686 * @vcpu: pointer to the vcpu struct
1688 * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
1689 * this vcpu and enable the VGIC for this VCPU
1691 int kvm_vgic_vcpu_init(struct kvm_vcpu
*vcpu
)
1693 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1694 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1697 if (vcpu
->vcpu_id
>= VGIC_MAX_CPUS
)
1700 for (i
= 0; i
< VGIC_NR_IRQS
; i
++) {
1701 if (i
< VGIC_NR_PPIS
)
1702 vgic_bitmap_set_irq_val(&dist
->irq_enabled
,
1703 vcpu
->vcpu_id
, i
, 1);
1704 if (i
< VGIC_NR_PRIVATE_IRQS
)
1705 vgic_bitmap_set_irq_val(&dist
->irq_cfg
,
1706 vcpu
->vcpu_id
, i
, VGIC_CFG_EDGE
);
1708 vgic_cpu
->vgic_irq_lr_map
[i
] = LR_EMPTY
;
1712 * Store the number of LRs per vcpu, so we don't have to go
1713 * all the way to the distributor structure to find out. Only
1714 * assembly code should use this one.
1716 vgic_cpu
->nr_lr
= vgic
->nr_lr
;
1723 void kvm_vgic_destroy(struct kvm
*kvm
)
1725 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1726 struct kvm_vcpu
*vcpu
;
1729 kvm_for_each_vcpu(i
, vcpu
, kvm
)
1730 kvm_vgic_vcpu_destroy(vcpu
);
1732 vgic_free_bitmap(&dist
->irq_enabled
);
1733 vgic_free_bitmap(&dist
->irq_level
);
1734 vgic_free_bitmap(&dist
->irq_pending
);
1735 vgic_free_bitmap(&dist
->irq_soft_pend
);
1736 vgic_free_bitmap(&dist
->irq_queued
);
1737 vgic_free_bitmap(&dist
->irq_cfg
);
1738 vgic_free_bytemap(&dist
->irq_priority
);
1739 if (dist
->irq_spi_target
) {
1740 for (i
= 0; i
< dist
->nr_cpus
; i
++)
1741 vgic_free_bitmap(&dist
->irq_spi_target
[i
]);
1743 kfree(dist
->irq_sgi_sources
);
1744 kfree(dist
->irq_spi_cpu
);
1745 kfree(dist
->irq_spi_target
);
1746 kfree(dist
->irq_pending_on_cpu
);
1747 dist
->irq_sgi_sources
= NULL
;
1748 dist
->irq_spi_cpu
= NULL
;
1749 dist
->irq_spi_target
= NULL
;
1750 dist
->irq_pending_on_cpu
= NULL
;
1754 * Allocate and initialize the various data structures. Must be called
1755 * with kvm->lock held!
1757 static int vgic_init_maps(struct kvm
*kvm
)
1759 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1760 struct kvm_vcpu
*vcpu
;
1761 int nr_cpus
, nr_irqs
;
1764 nr_cpus
= dist
->nr_cpus
= VGIC_MAX_CPUS
;
1765 nr_irqs
= dist
->nr_irqs
= VGIC_NR_IRQS
;
1767 ret
= vgic_init_bitmap(&dist
->irq_enabled
, nr_cpus
, nr_irqs
);
1768 ret
|= vgic_init_bitmap(&dist
->irq_level
, nr_cpus
, nr_irqs
);
1769 ret
|= vgic_init_bitmap(&dist
->irq_pending
, nr_cpus
, nr_irqs
);
1770 ret
|= vgic_init_bitmap(&dist
->irq_soft_pend
, nr_cpus
, nr_irqs
);
1771 ret
|= vgic_init_bitmap(&dist
->irq_queued
, nr_cpus
, nr_irqs
);
1772 ret
|= vgic_init_bitmap(&dist
->irq_cfg
, nr_cpus
, nr_irqs
);
1773 ret
|= vgic_init_bytemap(&dist
->irq_priority
, nr_cpus
, nr_irqs
);
1778 dist
->irq_sgi_sources
= kzalloc(nr_cpus
* VGIC_NR_SGIS
, GFP_KERNEL
);
1779 dist
->irq_spi_cpu
= kzalloc(nr_irqs
- VGIC_NR_PRIVATE_IRQS
, GFP_KERNEL
);
1780 dist
->irq_spi_target
= kzalloc(sizeof(*dist
->irq_spi_target
) * nr_cpus
,
1782 dist
->irq_pending_on_cpu
= kzalloc(BITS_TO_LONGS(nr_cpus
) * sizeof(long),
1784 if (!dist
->irq_sgi_sources
||
1785 !dist
->irq_spi_cpu
||
1786 !dist
->irq_spi_target
||
1787 !dist
->irq_pending_on_cpu
) {
1792 for (i
= 0; i
< nr_cpus
; i
++)
1793 ret
|= vgic_init_bitmap(&dist
->irq_spi_target
[i
],
1799 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1800 ret
= vgic_vcpu_init_maps(vcpu
, nr_irqs
);
1802 kvm_err("VGIC: Failed to allocate vcpu memory\n");
1809 kvm_vgic_destroy(kvm
);
1815 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
1816 * @kvm: pointer to the kvm struct
1818 * Map the virtual CPU interface into the VM before running any VCPUs. We
1819 * can't do this at creation time, because user space must first set the
1820 * virtual CPU interface address in the guest physical address space. Also
1821 * initialize the ITARGETSRn regs to 0 on the emulated distributor.
1823 int kvm_vgic_init(struct kvm
*kvm
)
1827 if (!irqchip_in_kernel(kvm
))
1830 mutex_lock(&kvm
->lock
);
1832 if (vgic_initialized(kvm
))
1835 if (IS_VGIC_ADDR_UNDEF(kvm
->arch
.vgic
.vgic_dist_base
) ||
1836 IS_VGIC_ADDR_UNDEF(kvm
->arch
.vgic
.vgic_cpu_base
)) {
1837 kvm_err("Need to set vgic cpu and dist addresses first\n");
1842 ret
= kvm_phys_addr_ioremap(kvm
, kvm
->arch
.vgic
.vgic_cpu_base
,
1843 vgic
->vcpu_base
, KVM_VGIC_V2_CPU_SIZE
);
1845 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1849 for (i
= VGIC_NR_PRIVATE_IRQS
; i
< VGIC_NR_IRQS
; i
+= 4)
1850 vgic_set_target_reg(kvm
, 0, i
);
1852 kvm
->arch
.vgic
.ready
= true;
1854 mutex_unlock(&kvm
->lock
);
1858 int kvm_vgic_create(struct kvm
*kvm
)
1860 int i
, vcpu_lock_idx
= -1, ret
= 0;
1861 struct kvm_vcpu
*vcpu
;
1863 mutex_lock(&kvm
->lock
);
1865 if (kvm
->arch
.vgic
.vctrl_base
) {
1871 * Any time a vcpu is run, vcpu_load is called which tries to grab the
1872 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
1873 * that no other VCPUs are run while we create the vgic.
1875 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1876 if (!mutex_trylock(&vcpu
->mutex
))
1881 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1882 if (vcpu
->arch
.has_run_once
) {
1888 spin_lock_init(&kvm
->arch
.vgic
.lock
);
1889 kvm
->arch
.vgic
.in_kernel
= true;
1890 kvm
->arch
.vgic
.vctrl_base
= vgic
->vctrl_base
;
1891 kvm
->arch
.vgic
.vgic_dist_base
= VGIC_ADDR_UNDEF
;
1892 kvm
->arch
.vgic
.vgic_cpu_base
= VGIC_ADDR_UNDEF
;
1894 ret
= vgic_init_maps(kvm
);
1896 kvm_err("Unable to allocate maps\n");
1899 for (; vcpu_lock_idx
>= 0; vcpu_lock_idx
--) {
1900 vcpu
= kvm_get_vcpu(kvm
, vcpu_lock_idx
);
1901 mutex_unlock(&vcpu
->mutex
);
1905 mutex_unlock(&kvm
->lock
);
1909 static int vgic_ioaddr_overlap(struct kvm
*kvm
)
1911 phys_addr_t dist
= kvm
->arch
.vgic
.vgic_dist_base
;
1912 phys_addr_t cpu
= kvm
->arch
.vgic
.vgic_cpu_base
;
1914 if (IS_VGIC_ADDR_UNDEF(dist
) || IS_VGIC_ADDR_UNDEF(cpu
))
1916 if ((dist
<= cpu
&& dist
+ KVM_VGIC_V2_DIST_SIZE
> cpu
) ||
1917 (cpu
<= dist
&& cpu
+ KVM_VGIC_V2_CPU_SIZE
> dist
))
1922 static int vgic_ioaddr_assign(struct kvm
*kvm
, phys_addr_t
*ioaddr
,
1923 phys_addr_t addr
, phys_addr_t size
)
1927 if (addr
& ~KVM_PHYS_MASK
)
1930 if (addr
& (SZ_4K
- 1))
1933 if (!IS_VGIC_ADDR_UNDEF(*ioaddr
))
1935 if (addr
+ size
< addr
)
1939 ret
= vgic_ioaddr_overlap(kvm
);
1941 *ioaddr
= VGIC_ADDR_UNDEF
;
1947 * kvm_vgic_addr - set or get vgic VM base addresses
1948 * @kvm: pointer to the vm struct
1949 * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
1950 * @addr: pointer to address value
1951 * @write: if true set the address in the VM address space, if false read the
1954 * Set or get the vgic base addresses for the distributor and the virtual CPU
1955 * interface in the VM physical address space. These addresses are properties
1956 * of the emulated core/SoC and therefore user space initially knows this
1959 int kvm_vgic_addr(struct kvm
*kvm
, unsigned long type
, u64
*addr
, bool write
)
1962 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
1964 mutex_lock(&kvm
->lock
);
1966 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
1968 r
= vgic_ioaddr_assign(kvm
, &vgic
->vgic_dist_base
,
1969 *addr
, KVM_VGIC_V2_DIST_SIZE
);
1971 *addr
= vgic
->vgic_dist_base
;
1974 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
1976 r
= vgic_ioaddr_assign(kvm
, &vgic
->vgic_cpu_base
,
1977 *addr
, KVM_VGIC_V2_CPU_SIZE
);
1979 *addr
= vgic
->vgic_cpu_base
;
1986 mutex_unlock(&kvm
->lock
);
1990 static bool handle_cpu_mmio_misc(struct kvm_vcpu
*vcpu
,
1991 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
1993 bool updated
= false;
1994 struct vgic_vmcr vmcr
;
1998 vgic_get_vmcr(vcpu
, &vmcr
);
2000 switch (offset
& ~0x3) {
2002 vmcr_field
= &vmcr
.ctlr
;
2004 case GIC_CPU_PRIMASK
:
2005 vmcr_field
= &vmcr
.pmr
;
2007 case GIC_CPU_BINPOINT
:
2008 vmcr_field
= &vmcr
.bpr
;
2010 case GIC_CPU_ALIAS_BINPOINT
:
2011 vmcr_field
= &vmcr
.abpr
;
2017 if (!mmio
->is_write
) {
2019 mmio_data_write(mmio
, ~0, reg
);
2021 reg
= mmio_data_read(mmio
, ~0);
2022 if (reg
!= *vmcr_field
) {
2024 vgic_set_vmcr(vcpu
, &vmcr
);
2031 static bool handle_mmio_abpr(struct kvm_vcpu
*vcpu
,
2032 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
2034 return handle_cpu_mmio_misc(vcpu
, mmio
, GIC_CPU_ALIAS_BINPOINT
);
2037 static bool handle_cpu_mmio_ident(struct kvm_vcpu
*vcpu
,
2038 struct kvm_exit_mmio
*mmio
,
2047 reg
= (PRODUCT_ID_KVM
<< 20) |
2048 (GICC_ARCH_VERSION_V2
<< 16) |
2049 (IMPLEMENTER_ARM
<< 0);
2050 mmio_data_write(mmio
, ~0, reg
);
2055 * CPU Interface Register accesses - these are not accessed by the VM, but by
2056 * user space for saving and restoring VGIC state.
2058 static const struct mmio_range vgic_cpu_ranges
[] = {
2060 .base
= GIC_CPU_CTRL
,
2062 .handle_mmio
= handle_cpu_mmio_misc
,
2065 .base
= GIC_CPU_ALIAS_BINPOINT
,
2067 .handle_mmio
= handle_mmio_abpr
,
2070 .base
= GIC_CPU_ACTIVEPRIO
,
2072 .handle_mmio
= handle_mmio_raz_wi
,
2075 .base
= GIC_CPU_IDENT
,
2077 .handle_mmio
= handle_cpu_mmio_ident
,
2081 static int vgic_attr_regs_access(struct kvm_device
*dev
,
2082 struct kvm_device_attr
*attr
,
2083 u32
*reg
, bool is_write
)
2085 const struct mmio_range
*r
= NULL
, *ranges
;
2088 struct kvm_vcpu
*vcpu
, *tmp_vcpu
;
2089 struct vgic_dist
*vgic
;
2090 struct kvm_exit_mmio mmio
;
2092 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
2093 cpuid
= (attr
->attr
& KVM_DEV_ARM_VGIC_CPUID_MASK
) >>
2094 KVM_DEV_ARM_VGIC_CPUID_SHIFT
;
2096 mutex_lock(&dev
->kvm
->lock
);
2098 if (cpuid
>= atomic_read(&dev
->kvm
->online_vcpus
)) {
2103 vcpu
= kvm_get_vcpu(dev
->kvm
, cpuid
);
2104 vgic
= &dev
->kvm
->arch
.vgic
;
2107 mmio
.is_write
= is_write
;
2109 mmio_data_write(&mmio
, ~0, *reg
);
2110 switch (attr
->group
) {
2111 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
2112 mmio
.phys_addr
= vgic
->vgic_dist_base
+ offset
;
2113 ranges
= vgic_dist_ranges
;
2115 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
2116 mmio
.phys_addr
= vgic
->vgic_cpu_base
+ offset
;
2117 ranges
= vgic_cpu_ranges
;
2122 r
= find_matching_range(ranges
, &mmio
, offset
);
2124 if (unlikely(!r
|| !r
->handle_mmio
)) {
2130 spin_lock(&vgic
->lock
);
2133 * Ensure that no other VCPU is running by checking the vcpu->cpu
2134 * field. If no other VPCUs are running we can safely access the VGIC
2135 * state, because even if another VPU is run after this point, that
2136 * VCPU will not touch the vgic state, because it will block on
2137 * getting the vgic->lock in kvm_vgic_sync_hwstate().
2139 kvm_for_each_vcpu(c
, tmp_vcpu
, dev
->kvm
) {
2140 if (unlikely(tmp_vcpu
->cpu
!= -1)) {
2142 goto out_vgic_unlock
;
2147 * Move all pending IRQs from the LRs on all VCPUs so the pending
2148 * state can be properly represented in the register state accessible
2151 kvm_for_each_vcpu(c
, tmp_vcpu
, dev
->kvm
)
2152 vgic_unqueue_irqs(tmp_vcpu
);
2155 r
->handle_mmio(vcpu
, &mmio
, offset
);
2158 *reg
= mmio_data_read(&mmio
, ~0);
2162 spin_unlock(&vgic
->lock
);
2164 mutex_unlock(&dev
->kvm
->lock
);
2168 static int vgic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2172 switch (attr
->group
) {
2173 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2174 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2176 unsigned long type
= (unsigned long)attr
->attr
;
2178 if (copy_from_user(&addr
, uaddr
, sizeof(addr
)))
2181 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, true);
2182 return (r
== -ENODEV
) ? -ENXIO
: r
;
2185 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
2186 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
: {
2187 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
2190 if (get_user(reg
, uaddr
))
2193 return vgic_attr_regs_access(dev
, attr
, ®
, true);
2201 static int vgic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2205 switch (attr
->group
) {
2206 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2207 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2209 unsigned long type
= (unsigned long)attr
->attr
;
2211 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, false);
2213 return (r
== -ENODEV
) ? -ENXIO
: r
;
2215 if (copy_to_user(uaddr
, &addr
, sizeof(addr
)))
2220 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
2221 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
: {
2222 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
2225 r
= vgic_attr_regs_access(dev
, attr
, ®
, false);
2228 r
= put_user(reg
, uaddr
);
2237 static int vgic_has_attr_regs(const struct mmio_range
*ranges
,
2240 struct kvm_exit_mmio dev_attr_mmio
;
2242 dev_attr_mmio
.len
= 4;
2243 if (find_matching_range(ranges
, &dev_attr_mmio
, offset
))
2249 static int vgic_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2253 switch (attr
->group
) {
2254 case KVM_DEV_ARM_VGIC_GRP_ADDR
:
2255 switch (attr
->attr
) {
2256 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
2257 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
2261 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
2262 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
2263 return vgic_has_attr_regs(vgic_dist_ranges
, offset
);
2264 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
2265 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
2266 return vgic_has_attr_regs(vgic_cpu_ranges
, offset
);
2271 static void vgic_destroy(struct kvm_device
*dev
)
2276 static int vgic_create(struct kvm_device
*dev
, u32 type
)
2278 return kvm_vgic_create(dev
->kvm
);
2281 static struct kvm_device_ops kvm_arm_vgic_v2_ops
= {
2282 .name
= "kvm-arm-vgic",
2283 .create
= vgic_create
,
2284 .destroy
= vgic_destroy
,
2285 .set_attr
= vgic_set_attr
,
2286 .get_attr
= vgic_get_attr
,
2287 .has_attr
= vgic_has_attr
,
2290 static void vgic_init_maintenance_interrupt(void *info
)
2292 enable_percpu_irq(vgic
->maint_irq
, 0);
2295 static int vgic_cpu_notify(struct notifier_block
*self
,
2296 unsigned long action
, void *cpu
)
2300 case CPU_STARTING_FROZEN
:
2301 vgic_init_maintenance_interrupt(NULL
);
2304 case CPU_DYING_FROZEN
:
2305 disable_percpu_irq(vgic
->maint_irq
);
2312 static struct notifier_block vgic_cpu_nb
= {
2313 .notifier_call
= vgic_cpu_notify
,
2316 static const struct of_device_id vgic_ids
[] = {
2317 { .compatible
= "arm,cortex-a15-gic", .data
= vgic_v2_probe
, },
2318 { .compatible
= "arm,gic-v3", .data
= vgic_v3_probe
, },
2322 int kvm_vgic_hyp_init(void)
2324 const struct of_device_id
*matched_id
;
2325 const int (*vgic_probe
)(struct device_node
*,const struct vgic_ops
**,
2326 const struct vgic_params
**);
2327 struct device_node
*vgic_node
;
2330 vgic_node
= of_find_matching_node_and_match(NULL
,
2331 vgic_ids
, &matched_id
);
2333 kvm_err("error: no compatible GIC node found\n");
2337 vgic_probe
= matched_id
->data
;
2338 ret
= vgic_probe(vgic_node
, &vgic_ops
, &vgic
);
2342 ret
= request_percpu_irq(vgic
->maint_irq
, vgic_maintenance_handler
,
2343 "vgic", kvm_get_running_vcpus());
2345 kvm_err("Cannot register interrupt %d\n", vgic
->maint_irq
);
2349 ret
= __register_cpu_notifier(&vgic_cpu_nb
);
2351 kvm_err("Cannot register vgic CPU notifier\n");
2355 /* Callback into for arch code for setup */
2356 vgic_arch_setup(vgic
);
2358 on_each_cpu(vgic_init_maintenance_interrupt
, NULL
, 1);
2360 return kvm_register_device_ops(&kvm_arm_vgic_v2_ops
,
2361 KVM_DEV_TYPE_ARM_VGIC_V2
);
2364 free_percpu_irq(vgic
->maint_irq
, kvm_get_running_vcpus());