2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/uaccess.h>
29 #include <linux/irqchip/arm-gic.h>
31 #include <asm/kvm_emulate.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_mmu.h>
36 * How the whole thing works (courtesy of Christoffer Dall):
38 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
39 * something is pending on the CPU interface.
40 * - Interrupts that are pending on the distributor are stored on the
41 * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
42 * ioctls and guest mmio ops, and other in-kernel peripherals such as the
44 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
46 * - To calculate the oracle, we need info for each cpu from
47 * compute_pending_for_cpu, which considers:
48 * - PPI: dist->irq_pending & dist->irq_enable
49 * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
50 * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
51 * registers, stored on each vcpu. We only keep one bit of
52 * information per interrupt, making sure that only one vcpu can
53 * accept the interrupt.
54 * - If any of the above state changes, we must recalculate the oracle.
55 * - The same is true when injecting an interrupt, except that we only
56 * consider a single interrupt at a time. The irq_spi_cpu array
57 * contains the target CPU for each SPI.
59 * The handling of level interrupts adds some extra complexity. We
60 * need to track when the interrupt has been EOIed, so we can sample
61 * the 'line' again. This is achieved as such:
63 * - When a level interrupt is moved onto a vcpu, the corresponding
64 * bit in irq_queued is set. As long as this bit is set, the line
65 * will be ignored for further interrupts. The interrupt is injected
66 * into the vcpu with the GICH_LR_EOI bit set (generate a
67 * maintenance interrupt on EOI).
68 * - When the interrupt is EOIed, the maintenance interrupt fires,
69 * and clears the corresponding bit in irq_queued. This allows the
70 * interrupt line to be sampled again.
71 * - Note that level-triggered interrupts can also be set to pending from
72 * writes to GICD_ISPENDRn and lowering the external input line does not
73 * cause the interrupt to become inactive in such a situation.
74 * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
75 * inactive as long as the external input line is held high.
78 #define VGIC_ADDR_UNDEF (-1)
79 #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
81 #define PRODUCT_ID_KVM 0x4b /* ASCII code K */
82 #define IMPLEMENTER_ARM 0x43b
83 #define GICC_ARCH_VERSION_V2 0x2
85 #define ACCESS_READ_VALUE (1 << 0)
86 #define ACCESS_READ_RAZ (0 << 0)
87 #define ACCESS_READ_MASK(x) ((x) & (1 << 0))
88 #define ACCESS_WRITE_IGNORED (0 << 1)
89 #define ACCESS_WRITE_SETBIT (1 << 1)
90 #define ACCESS_WRITE_CLEARBIT (2 << 1)
91 #define ACCESS_WRITE_VALUE (3 << 1)
92 #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
94 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
);
95 static void vgic_retire_lr(int lr_nr
, int irq
, struct kvm_vcpu
*vcpu
);
96 static void vgic_update_state(struct kvm
*kvm
);
97 static void vgic_kick_vcpus(struct kvm
*kvm
);
98 static void vgic_dispatch_sgi(struct kvm_vcpu
*vcpu
, u32 reg
);
99 static struct vgic_lr
vgic_get_lr(const struct kvm_vcpu
*vcpu
, int lr
);
100 static void vgic_set_lr(struct kvm_vcpu
*vcpu
, int lr
, struct vgic_lr lr_desc
);
101 static void vgic_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
);
102 static void vgic_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
);
104 static const struct vgic_ops
*vgic_ops
;
105 static const struct vgic_params
*vgic
;
108 * struct vgic_bitmap contains unions that provide two views of
109 * the same data. In one case it is an array of registers of
110 * u32's, and in the other case it is a bitmap of unsigned
113 * This does not work on 64-bit BE systems, because the bitmap access
114 * will store two consecutive 32-bit words with the higher-addressed
115 * register's bits at the lower index and the lower-addressed register's
116 * bits at the higher index.
118 * Therefore, swizzle the register index when accessing the 32-bit word
119 * registers to access the right register's value.
121 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
122 #define REG_OFFSET_SWIZZLE 1
124 #define REG_OFFSET_SWIZZLE 0
127 static u32
*vgic_bitmap_get_reg(struct vgic_bitmap
*x
,
128 int cpuid
, u32 offset
)
132 return x
->percpu
[cpuid
].reg
+ (offset
^ REG_OFFSET_SWIZZLE
);
134 return x
->shared
.reg
+ ((offset
- 1) ^ REG_OFFSET_SWIZZLE
);
137 static int vgic_bitmap_get_irq_val(struct vgic_bitmap
*x
,
140 if (irq
< VGIC_NR_PRIVATE_IRQS
)
141 return test_bit(irq
, x
->percpu
[cpuid
].reg_ul
);
143 return test_bit(irq
- VGIC_NR_PRIVATE_IRQS
, x
->shared
.reg_ul
);
146 static void vgic_bitmap_set_irq_val(struct vgic_bitmap
*x
, int cpuid
,
151 if (irq
< VGIC_NR_PRIVATE_IRQS
) {
152 reg
= x
->percpu
[cpuid
].reg_ul
;
154 reg
= x
->shared
.reg_ul
;
155 irq
-= VGIC_NR_PRIVATE_IRQS
;
164 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap
*x
, int cpuid
)
166 if (unlikely(cpuid
>= VGIC_MAX_CPUS
))
168 return x
->percpu
[cpuid
].reg_ul
;
171 static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap
*x
)
173 return x
->shared
.reg_ul
;
176 static u32
*vgic_bytemap_get_reg(struct vgic_bytemap
*x
, int cpuid
, u32 offset
)
179 BUG_ON(offset
> (VGIC_NR_IRQS
/ 4));
181 return x
->percpu
[cpuid
] + offset
;
183 return x
->shared
+ offset
- 8;
186 #define VGIC_CFG_LEVEL 0
187 #define VGIC_CFG_EDGE 1
189 static bool vgic_irq_is_edge(struct kvm_vcpu
*vcpu
, int irq
)
191 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
194 irq_val
= vgic_bitmap_get_irq_val(&dist
->irq_cfg
, vcpu
->vcpu_id
, irq
);
195 return irq_val
== VGIC_CFG_EDGE
;
198 static int vgic_irq_is_enabled(struct kvm_vcpu
*vcpu
, int irq
)
200 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
202 return vgic_bitmap_get_irq_val(&dist
->irq_enabled
, vcpu
->vcpu_id
, irq
);
205 static int vgic_irq_is_queued(struct kvm_vcpu
*vcpu
, int irq
)
207 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
209 return vgic_bitmap_get_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
);
212 static void vgic_irq_set_queued(struct kvm_vcpu
*vcpu
, int irq
)
214 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
216 vgic_bitmap_set_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
, 1);
219 static void vgic_irq_clear_queued(struct kvm_vcpu
*vcpu
, int irq
)
221 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
223 vgic_bitmap_set_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
, 0);
226 static int vgic_dist_irq_get_level(struct kvm_vcpu
*vcpu
, int irq
)
228 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
230 return vgic_bitmap_get_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
);
233 static void vgic_dist_irq_set_level(struct kvm_vcpu
*vcpu
, int irq
)
235 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
237 vgic_bitmap_set_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
, 1);
240 static void vgic_dist_irq_clear_level(struct kvm_vcpu
*vcpu
, int irq
)
242 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
244 vgic_bitmap_set_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
, 0);
247 static int vgic_dist_irq_soft_pend(struct kvm_vcpu
*vcpu
, int irq
)
249 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
251 return vgic_bitmap_get_irq_val(&dist
->irq_soft_pend
, vcpu
->vcpu_id
, irq
);
254 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu
*vcpu
, int irq
)
256 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
258 vgic_bitmap_set_irq_val(&dist
->irq_soft_pend
, vcpu
->vcpu_id
, irq
, 0);
261 static int vgic_dist_irq_is_pending(struct kvm_vcpu
*vcpu
, int irq
)
263 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
265 return vgic_bitmap_get_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
);
268 static void vgic_dist_irq_set_pending(struct kvm_vcpu
*vcpu
, int irq
)
270 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
272 vgic_bitmap_set_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
, 1);
275 static void vgic_dist_irq_clear_pending(struct kvm_vcpu
*vcpu
, int irq
)
277 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
279 vgic_bitmap_set_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
, 0);
282 static void vgic_cpu_irq_set(struct kvm_vcpu
*vcpu
, int irq
)
284 if (irq
< VGIC_NR_PRIVATE_IRQS
)
285 set_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
287 set_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
288 vcpu
->arch
.vgic_cpu
.pending_shared
);
291 static void vgic_cpu_irq_clear(struct kvm_vcpu
*vcpu
, int irq
)
293 if (irq
< VGIC_NR_PRIVATE_IRQS
)
294 clear_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
296 clear_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
297 vcpu
->arch
.vgic_cpu
.pending_shared
);
300 static bool vgic_can_sample_irq(struct kvm_vcpu
*vcpu
, int irq
)
302 return vgic_irq_is_edge(vcpu
, irq
) || !vgic_irq_is_queued(vcpu
, irq
);
305 static u32
mmio_data_read(struct kvm_exit_mmio
*mmio
, u32 mask
)
307 return le32_to_cpu(*((u32
*)mmio
->data
)) & mask
;
310 static void mmio_data_write(struct kvm_exit_mmio
*mmio
, u32 mask
, u32 value
)
312 *((u32
*)mmio
->data
) = cpu_to_le32(value
) & mask
;
316 * vgic_reg_access - access vgic register
317 * @mmio: pointer to the data describing the mmio access
318 * @reg: pointer to the virtual backing of vgic distributor data
319 * @offset: least significant 2 bits used for word offset
320 * @mode: ACCESS_ mode (see defines above)
322 * Helper to make vgic register access easier using one of the access
323 * modes defined for vgic register access
324 * (read,raz,write-ignored,setbit,clearbit,write)
326 static void vgic_reg_access(struct kvm_exit_mmio
*mmio
, u32
*reg
,
327 phys_addr_t offset
, int mode
)
329 int word_offset
= (offset
& 3) * 8;
330 u32 mask
= (1UL << (mmio
->len
* 8)) - 1;
334 * Any alignment fault should have been delivered to the guest
335 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
341 BUG_ON(mode
!= (ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
));
345 if (mmio
->is_write
) {
346 u32 data
= mmio_data_read(mmio
, mask
) << word_offset
;
347 switch (ACCESS_WRITE_MASK(mode
)) {
348 case ACCESS_WRITE_IGNORED
:
351 case ACCESS_WRITE_SETBIT
:
355 case ACCESS_WRITE_CLEARBIT
:
359 case ACCESS_WRITE_VALUE
:
360 regval
= (regval
& ~(mask
<< word_offset
)) | data
;
365 switch (ACCESS_READ_MASK(mode
)) {
366 case ACCESS_READ_RAZ
:
370 case ACCESS_READ_VALUE
:
371 mmio_data_write(mmio
, mask
, regval
>> word_offset
);
376 static bool handle_mmio_misc(struct kvm_vcpu
*vcpu
,
377 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
380 u32 word_offset
= offset
& 3;
382 switch (offset
& ~3) {
383 case 0: /* GICD_CTLR */
384 reg
= vcpu
->kvm
->arch
.vgic
.enabled
;
385 vgic_reg_access(mmio
, ®
, word_offset
,
386 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
387 if (mmio
->is_write
) {
388 vcpu
->kvm
->arch
.vgic
.enabled
= reg
& 1;
389 vgic_update_state(vcpu
->kvm
);
394 case 4: /* GICD_TYPER */
395 reg
= (atomic_read(&vcpu
->kvm
->online_vcpus
) - 1) << 5;
396 reg
|= (VGIC_NR_IRQS
>> 5) - 1;
397 vgic_reg_access(mmio
, ®
, word_offset
,
398 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
401 case 8: /* GICD_IIDR */
402 reg
= (PRODUCT_ID_KVM
<< 24) | (IMPLEMENTER_ARM
<< 0);
403 vgic_reg_access(mmio
, ®
, word_offset
,
404 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
411 static bool handle_mmio_raz_wi(struct kvm_vcpu
*vcpu
,
412 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
414 vgic_reg_access(mmio
, NULL
, offset
,
415 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
419 static bool handle_mmio_set_enable_reg(struct kvm_vcpu
*vcpu
,
420 struct kvm_exit_mmio
*mmio
,
423 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_enabled
,
424 vcpu
->vcpu_id
, offset
);
425 vgic_reg_access(mmio
, reg
, offset
,
426 ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
);
427 if (mmio
->is_write
) {
428 vgic_update_state(vcpu
->kvm
);
435 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu
*vcpu
,
436 struct kvm_exit_mmio
*mmio
,
439 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_enabled
,
440 vcpu
->vcpu_id
, offset
);
441 vgic_reg_access(mmio
, reg
, offset
,
442 ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
);
443 if (mmio
->is_write
) {
444 if (offset
< 4) /* Force SGI enabled */
446 vgic_retire_disabled_irqs(vcpu
);
447 vgic_update_state(vcpu
->kvm
);
454 static bool handle_mmio_set_pending_reg(struct kvm_vcpu
*vcpu
,
455 struct kvm_exit_mmio
*mmio
,
460 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
462 reg
= vgic_bitmap_get_reg(&dist
->irq_cfg
, vcpu
->vcpu_id
, offset
);
463 level_mask
= (~(*reg
));
465 /* Mark both level and edge triggered irqs as pending */
466 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu
->vcpu_id
, offset
);
468 vgic_reg_access(mmio
, reg
, offset
,
469 ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
);
471 if (mmio
->is_write
) {
472 /* Set the soft-pending flag only for level-triggered irqs */
473 reg
= vgic_bitmap_get_reg(&dist
->irq_soft_pend
,
474 vcpu
->vcpu_id
, offset
);
475 vgic_reg_access(mmio
, reg
, offset
,
476 ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
);
479 /* Ignore writes to SGIs */
482 *reg
|= orig
& 0xffff;
485 vgic_update_state(vcpu
->kvm
);
492 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu
*vcpu
,
493 struct kvm_exit_mmio
*mmio
,
498 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
500 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu
->vcpu_id
, offset
);
502 vgic_reg_access(mmio
, reg
, offset
,
503 ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
);
504 if (mmio
->is_write
) {
505 /* Re-set level triggered level-active interrupts */
506 level_active
= vgic_bitmap_get_reg(&dist
->irq_level
,
507 vcpu
->vcpu_id
, offset
);
508 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
,
509 vcpu
->vcpu_id
, offset
);
510 *reg
|= *level_active
;
512 /* Ignore writes to SGIs */
515 *reg
|= orig
& 0xffff;
518 /* Clear soft-pending flags */
519 reg
= vgic_bitmap_get_reg(&dist
->irq_soft_pend
,
520 vcpu
->vcpu_id
, offset
);
521 vgic_reg_access(mmio
, reg
, offset
,
522 ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
);
524 vgic_update_state(vcpu
->kvm
);
531 static bool handle_mmio_priority_reg(struct kvm_vcpu
*vcpu
,
532 struct kvm_exit_mmio
*mmio
,
535 u32
*reg
= vgic_bytemap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_priority
,
536 vcpu
->vcpu_id
, offset
);
537 vgic_reg_access(mmio
, reg
, offset
,
538 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
542 #define GICD_ITARGETSR_SIZE 32
543 #define GICD_CPUTARGETS_BITS 8
544 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
545 static u32
vgic_get_target_reg(struct kvm
*kvm
, int irq
)
547 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
551 irq
-= VGIC_NR_PRIVATE_IRQS
;
553 for (i
= 0; i
< GICD_IRQS_PER_ITARGETSR
; i
++)
554 val
|= 1 << (dist
->irq_spi_cpu
[irq
+ i
] + i
* 8);
559 static void vgic_set_target_reg(struct kvm
*kvm
, u32 val
, int irq
)
561 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
562 struct kvm_vcpu
*vcpu
;
567 irq
-= VGIC_NR_PRIVATE_IRQS
;
570 * Pick the LSB in each byte. This ensures we target exactly
571 * one vcpu per IRQ. If the byte is null, assume we target
574 for (i
= 0; i
< GICD_IRQS_PER_ITARGETSR
; i
++) {
575 int shift
= i
* GICD_CPUTARGETS_BITS
;
576 target
= ffs((val
>> shift
) & 0xffU
);
577 target
= target
? (target
- 1) : 0;
578 dist
->irq_spi_cpu
[irq
+ i
] = target
;
579 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
580 bmap
= vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[c
]);
582 set_bit(irq
+ i
, bmap
);
584 clear_bit(irq
+ i
, bmap
);
589 static bool handle_mmio_target_reg(struct kvm_vcpu
*vcpu
,
590 struct kvm_exit_mmio
*mmio
,
595 /* We treat the banked interrupts targets as read-only */
597 u32 roreg
= 1 << vcpu
->vcpu_id
;
599 roreg
|= roreg
<< 16;
601 vgic_reg_access(mmio
, &roreg
, offset
,
602 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
606 reg
= vgic_get_target_reg(vcpu
->kvm
, offset
& ~3U);
607 vgic_reg_access(mmio
, ®
, offset
,
608 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
609 if (mmio
->is_write
) {
610 vgic_set_target_reg(vcpu
->kvm
, reg
, offset
& ~3U);
611 vgic_update_state(vcpu
->kvm
);
618 static u32
vgic_cfg_expand(u16 val
)
624 * Turn a 16bit value like abcd...mnop into a 32bit word
625 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
627 for (i
= 0; i
< 16; i
++)
628 res
|= ((val
>> i
) & VGIC_CFG_EDGE
) << (2 * i
+ 1);
633 static u16
vgic_cfg_compress(u32 val
)
639 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
640 * abcd...mnop which is what we really care about.
642 for (i
= 0; i
< 16; i
++)
643 res
|= ((val
>> (i
* 2 + 1)) & VGIC_CFG_EDGE
) << i
;
649 * The distributor uses 2 bits per IRQ for the CFG register, but the
650 * LSB is always 0. As such, we only keep the upper bit, and use the
651 * two above functions to compress/expand the bits
653 static bool handle_mmio_cfg_reg(struct kvm_vcpu
*vcpu
,
654 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
659 reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_cfg
,
660 vcpu
->vcpu_id
, offset
>> 1);
667 val
= vgic_cfg_expand(val
);
668 vgic_reg_access(mmio
, &val
, offset
,
669 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
670 if (mmio
->is_write
) {
672 *reg
= ~0U; /* Force PPIs/SGIs to 1 */
676 val
= vgic_cfg_compress(val
);
681 *reg
&= 0xffff << 16;
689 static bool handle_mmio_sgi_reg(struct kvm_vcpu
*vcpu
,
690 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
693 vgic_reg_access(mmio
, ®
, offset
,
694 ACCESS_READ_RAZ
| ACCESS_WRITE_VALUE
);
695 if (mmio
->is_write
) {
696 vgic_dispatch_sgi(vcpu
, reg
);
697 vgic_update_state(vcpu
->kvm
);
705 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
706 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
708 * Move any pending IRQs that have already been assigned to LRs back to the
709 * emulated distributor state so that the complete emulated state can be read
710 * from the main emulation structures without investigating the LRs.
712 * Note that IRQs in the active state in the LRs get their pending state moved
713 * to the distributor but the active state stays in the LRs, because we don't
714 * track the active state on the distributor side.
716 static void vgic_unqueue_irqs(struct kvm_vcpu
*vcpu
)
718 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
719 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
720 int vcpu_id
= vcpu
->vcpu_id
;
723 for_each_set_bit(i
, vgic_cpu
->lr_used
, vgic_cpu
->nr_lr
) {
724 struct vgic_lr lr
= vgic_get_lr(vcpu
, i
);
727 * There are three options for the state bits:
731 * 11: pending and active
733 * If the LR holds only an active interrupt (not pending) then
734 * just leave it alone.
736 if ((lr
.state
& LR_STATE_MASK
) == LR_STATE_ACTIVE
)
740 * Reestablish the pending state on the distributor and the
741 * CPU interface. It may have already been pending, but that
742 * is fine, then we are only setting a few bits that were
745 vgic_dist_irq_set_pending(vcpu
, lr
.irq
);
746 if (lr
.irq
< VGIC_NR_SGIS
)
747 dist
->irq_sgi_sources
[vcpu_id
][lr
.irq
] |= 1 << lr
.source
;
748 lr
.state
&= ~LR_STATE_PENDING
;
749 vgic_set_lr(vcpu
, i
, lr
);
752 * If there's no state left on the LR (it could still be
753 * active), then the LR does not hold any useful info and can
754 * be marked as free for other use.
756 if (!(lr
.state
& LR_STATE_MASK
)) {
757 vgic_retire_lr(i
, lr
.irq
, vcpu
);
758 vgic_irq_clear_queued(vcpu
, lr
.irq
);
761 /* Finally update the VGIC state. */
762 vgic_update_state(vcpu
->kvm
);
766 /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
767 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu
*vcpu
,
768 struct kvm_exit_mmio
*mmio
,
771 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
773 int min_sgi
= (offset
& ~0x3) * 4;
774 int max_sgi
= min_sgi
+ 3;
775 int vcpu_id
= vcpu
->vcpu_id
;
778 /* Copy source SGIs from distributor side */
779 for (sgi
= min_sgi
; sgi
<= max_sgi
; sgi
++) {
780 int shift
= 8 * (sgi
- min_sgi
);
781 reg
|= (u32
)dist
->irq_sgi_sources
[vcpu_id
][sgi
] << shift
;
784 mmio_data_write(mmio
, ~0, reg
);
788 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu
*vcpu
,
789 struct kvm_exit_mmio
*mmio
,
790 phys_addr_t offset
, bool set
)
792 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
794 int min_sgi
= (offset
& ~0x3) * 4;
795 int max_sgi
= min_sgi
+ 3;
796 int vcpu_id
= vcpu
->vcpu_id
;
798 bool updated
= false;
800 reg
= mmio_data_read(mmio
, ~0);
802 /* Clear pending SGIs on the distributor */
803 for (sgi
= min_sgi
; sgi
<= max_sgi
; sgi
++) {
804 u8 mask
= reg
>> (8 * (sgi
- min_sgi
));
806 if ((dist
->irq_sgi_sources
[vcpu_id
][sgi
] & mask
) != mask
)
808 dist
->irq_sgi_sources
[vcpu_id
][sgi
] |= mask
;
810 if (dist
->irq_sgi_sources
[vcpu_id
][sgi
] & mask
)
812 dist
->irq_sgi_sources
[vcpu_id
][sgi
] &= ~mask
;
817 vgic_update_state(vcpu
->kvm
);
822 static bool handle_mmio_sgi_set(struct kvm_vcpu
*vcpu
,
823 struct kvm_exit_mmio
*mmio
,
827 return read_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
);
829 return write_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
, true);
832 static bool handle_mmio_sgi_clear(struct kvm_vcpu
*vcpu
,
833 struct kvm_exit_mmio
*mmio
,
837 return read_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
);
839 return write_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
, false);
843 * I would have liked to use the kvm_bus_io_*() API instead, but it
844 * cannot cope with banked registers (only the VM pointer is passed
845 * around, and we need the vcpu). One of these days, someone please
851 bool (*handle_mmio
)(struct kvm_vcpu
*vcpu
, struct kvm_exit_mmio
*mmio
,
855 static const struct mmio_range vgic_dist_ranges
[] = {
857 .base
= GIC_DIST_CTRL
,
859 .handle_mmio
= handle_mmio_misc
,
862 .base
= GIC_DIST_IGROUP
,
863 .len
= VGIC_NR_IRQS
/ 8,
864 .handle_mmio
= handle_mmio_raz_wi
,
867 .base
= GIC_DIST_ENABLE_SET
,
868 .len
= VGIC_NR_IRQS
/ 8,
869 .handle_mmio
= handle_mmio_set_enable_reg
,
872 .base
= GIC_DIST_ENABLE_CLEAR
,
873 .len
= VGIC_NR_IRQS
/ 8,
874 .handle_mmio
= handle_mmio_clear_enable_reg
,
877 .base
= GIC_DIST_PENDING_SET
,
878 .len
= VGIC_NR_IRQS
/ 8,
879 .handle_mmio
= handle_mmio_set_pending_reg
,
882 .base
= GIC_DIST_PENDING_CLEAR
,
883 .len
= VGIC_NR_IRQS
/ 8,
884 .handle_mmio
= handle_mmio_clear_pending_reg
,
887 .base
= GIC_DIST_ACTIVE_SET
,
888 .len
= VGIC_NR_IRQS
/ 8,
889 .handle_mmio
= handle_mmio_raz_wi
,
892 .base
= GIC_DIST_ACTIVE_CLEAR
,
893 .len
= VGIC_NR_IRQS
/ 8,
894 .handle_mmio
= handle_mmio_raz_wi
,
897 .base
= GIC_DIST_PRI
,
899 .handle_mmio
= handle_mmio_priority_reg
,
902 .base
= GIC_DIST_TARGET
,
904 .handle_mmio
= handle_mmio_target_reg
,
907 .base
= GIC_DIST_CONFIG
,
908 .len
= VGIC_NR_IRQS
/ 4,
909 .handle_mmio
= handle_mmio_cfg_reg
,
912 .base
= GIC_DIST_SOFTINT
,
914 .handle_mmio
= handle_mmio_sgi_reg
,
917 .base
= GIC_DIST_SGI_PENDING_CLEAR
,
919 .handle_mmio
= handle_mmio_sgi_clear
,
922 .base
= GIC_DIST_SGI_PENDING_SET
,
924 .handle_mmio
= handle_mmio_sgi_set
,
930 struct mmio_range
*find_matching_range(const struct mmio_range
*ranges
,
931 struct kvm_exit_mmio
*mmio
,
934 const struct mmio_range
*r
= ranges
;
937 if (offset
>= r
->base
&&
938 (offset
+ mmio
->len
) <= (r
->base
+ r
->len
))
947 * vgic_handle_mmio - handle an in-kernel MMIO access
948 * @vcpu: pointer to the vcpu performing the access
949 * @run: pointer to the kvm_run structure
950 * @mmio: pointer to the data describing the access
952 * returns true if the MMIO access has been performed in kernel space,
953 * and false if it needs to be emulated in user space.
955 bool vgic_handle_mmio(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
956 struct kvm_exit_mmio
*mmio
)
958 const struct mmio_range
*range
;
959 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
960 unsigned long base
= dist
->vgic_dist_base
;
962 unsigned long offset
;
964 if (!irqchip_in_kernel(vcpu
->kvm
) ||
965 mmio
->phys_addr
< base
||
966 (mmio
->phys_addr
+ mmio
->len
) > (base
+ KVM_VGIC_V2_DIST_SIZE
))
969 /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
971 kvm_inject_dabt(vcpu
, mmio
->phys_addr
);
975 offset
= mmio
->phys_addr
- base
;
976 range
= find_matching_range(vgic_dist_ranges
, mmio
, offset
);
977 if (unlikely(!range
|| !range
->handle_mmio
)) {
978 pr_warn("Unhandled access %d %08llx %d\n",
979 mmio
->is_write
, mmio
->phys_addr
, mmio
->len
);
983 spin_lock(&vcpu
->kvm
->arch
.vgic
.lock
);
984 offset
= mmio
->phys_addr
- range
->base
- base
;
985 updated_state
= range
->handle_mmio(vcpu
, mmio
, offset
);
986 spin_unlock(&vcpu
->kvm
->arch
.vgic
.lock
);
987 kvm_prepare_mmio(run
, mmio
);
988 kvm_handle_mmio_return(vcpu
, run
);
991 vgic_kick_vcpus(vcpu
->kvm
);
996 static void vgic_dispatch_sgi(struct kvm_vcpu
*vcpu
, u32 reg
)
998 struct kvm
*kvm
= vcpu
->kvm
;
999 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1000 int nrcpus
= atomic_read(&kvm
->online_vcpus
);
1002 int sgi
, mode
, c
, vcpu_id
;
1004 vcpu_id
= vcpu
->vcpu_id
;
1007 target_cpus
= (reg
>> 16) & 0xff;
1008 mode
= (reg
>> 24) & 3;
1017 target_cpus
= ((1 << nrcpus
) - 1) & ~(1 << vcpu_id
) & 0xff;
1021 target_cpus
= 1 << vcpu_id
;
1025 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1026 if (target_cpus
& 1) {
1027 /* Flag the SGI as pending */
1028 vgic_dist_irq_set_pending(vcpu
, sgi
);
1029 dist
->irq_sgi_sources
[c
][sgi
] |= 1 << vcpu_id
;
1030 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi
, vcpu_id
, c
);
1037 static int compute_pending_for_cpu(struct kvm_vcpu
*vcpu
)
1039 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1040 unsigned long *pending
, *enabled
, *pend_percpu
, *pend_shared
;
1041 unsigned long pending_private
, pending_shared
;
1044 vcpu_id
= vcpu
->vcpu_id
;
1045 pend_percpu
= vcpu
->arch
.vgic_cpu
.pending_percpu
;
1046 pend_shared
= vcpu
->arch
.vgic_cpu
.pending_shared
;
1048 pending
= vgic_bitmap_get_cpu_map(&dist
->irq_pending
, vcpu_id
);
1049 enabled
= vgic_bitmap_get_cpu_map(&dist
->irq_enabled
, vcpu_id
);
1050 bitmap_and(pend_percpu
, pending
, enabled
, VGIC_NR_PRIVATE_IRQS
);
1052 pending
= vgic_bitmap_get_shared_map(&dist
->irq_pending
);
1053 enabled
= vgic_bitmap_get_shared_map(&dist
->irq_enabled
);
1054 bitmap_and(pend_shared
, pending
, enabled
, VGIC_NR_SHARED_IRQS
);
1055 bitmap_and(pend_shared
, pend_shared
,
1056 vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[vcpu_id
]),
1057 VGIC_NR_SHARED_IRQS
);
1059 pending_private
= find_first_bit(pend_percpu
, VGIC_NR_PRIVATE_IRQS
);
1060 pending_shared
= find_first_bit(pend_shared
, VGIC_NR_SHARED_IRQS
);
1061 return (pending_private
< VGIC_NR_PRIVATE_IRQS
||
1062 pending_shared
< VGIC_NR_SHARED_IRQS
);
1066 * Update the interrupt state and determine which CPUs have pending
1067 * interrupts. Must be called with distributor lock held.
1069 static void vgic_update_state(struct kvm
*kvm
)
1071 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1072 struct kvm_vcpu
*vcpu
;
1075 if (!dist
->enabled
) {
1076 set_bit(0, &dist
->irq_pending_on_cpu
);
1080 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1081 if (compute_pending_for_cpu(vcpu
)) {
1082 pr_debug("CPU%d has pending interrupts\n", c
);
1083 set_bit(c
, &dist
->irq_pending_on_cpu
);
1088 static struct vgic_lr
vgic_get_lr(const struct kvm_vcpu
*vcpu
, int lr
)
1090 return vgic_ops
->get_lr(vcpu
, lr
);
1093 static void vgic_set_lr(struct kvm_vcpu
*vcpu
, int lr
,
1096 vgic_ops
->set_lr(vcpu
, lr
, vlr
);
1099 static void vgic_sync_lr_elrsr(struct kvm_vcpu
*vcpu
, int lr
,
1102 vgic_ops
->sync_lr_elrsr(vcpu
, lr
, vlr
);
1105 static inline u64
vgic_get_elrsr(struct kvm_vcpu
*vcpu
)
1107 return vgic_ops
->get_elrsr(vcpu
);
1110 static inline u64
vgic_get_eisr(struct kvm_vcpu
*vcpu
)
1112 return vgic_ops
->get_eisr(vcpu
);
1115 static inline u32
vgic_get_interrupt_status(struct kvm_vcpu
*vcpu
)
1117 return vgic_ops
->get_interrupt_status(vcpu
);
1120 static inline void vgic_enable_underflow(struct kvm_vcpu
*vcpu
)
1122 vgic_ops
->enable_underflow(vcpu
);
1125 static inline void vgic_disable_underflow(struct kvm_vcpu
*vcpu
)
1127 vgic_ops
->disable_underflow(vcpu
);
1130 static inline void vgic_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
)
1132 vgic_ops
->get_vmcr(vcpu
, vmcr
);
1135 static void vgic_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
)
1137 vgic_ops
->set_vmcr(vcpu
, vmcr
);
1140 static inline void vgic_enable(struct kvm_vcpu
*vcpu
)
1142 vgic_ops
->enable(vcpu
);
1145 static void vgic_retire_lr(int lr_nr
, int irq
, struct kvm_vcpu
*vcpu
)
1147 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1148 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr_nr
);
1151 vgic_set_lr(vcpu
, lr_nr
, vlr
);
1152 clear_bit(lr_nr
, vgic_cpu
->lr_used
);
1153 vgic_cpu
->vgic_irq_lr_map
[irq
] = LR_EMPTY
;
1157 * An interrupt may have been disabled after being made pending on the
1158 * CPU interface (the classic case is a timer running while we're
1159 * rebooting the guest - the interrupt would kick as soon as the CPU
1160 * interface gets enabled, with deadly consequences).
1162 * The solution is to examine already active LRs, and check the
1163 * interrupt is still enabled. If not, just retire it.
1165 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
)
1167 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1170 for_each_set_bit(lr
, vgic_cpu
->lr_used
, vgic
->nr_lr
) {
1171 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr
);
1173 if (!vgic_irq_is_enabled(vcpu
, vlr
.irq
)) {
1174 vgic_retire_lr(lr
, vlr
.irq
, vcpu
);
1175 if (vgic_irq_is_queued(vcpu
, vlr
.irq
))
1176 vgic_irq_clear_queued(vcpu
, vlr
.irq
);
1182 * Queue an interrupt to a CPU virtual interface. Return true on success,
1183 * or false if it wasn't possible to queue it.
1185 static bool vgic_queue_irq(struct kvm_vcpu
*vcpu
, u8 sgi_source_id
, int irq
)
1187 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1191 /* Sanitize the input... */
1192 BUG_ON(sgi_source_id
& ~7);
1193 BUG_ON(sgi_source_id
&& irq
>= VGIC_NR_SGIS
);
1194 BUG_ON(irq
>= VGIC_NR_IRQS
);
1196 kvm_debug("Queue IRQ%d\n", irq
);
1198 lr
= vgic_cpu
->vgic_irq_lr_map
[irq
];
1200 /* Do we have an active interrupt for the same CPUID? */
1201 if (lr
!= LR_EMPTY
) {
1202 vlr
= vgic_get_lr(vcpu
, lr
);
1203 if (vlr
.source
== sgi_source_id
) {
1204 kvm_debug("LR%d piggyback for IRQ%d\n", lr
, vlr
.irq
);
1205 BUG_ON(!test_bit(lr
, vgic_cpu
->lr_used
));
1206 vlr
.state
|= LR_STATE_PENDING
;
1207 vgic_set_lr(vcpu
, lr
, vlr
);
1212 /* Try to use another LR for this interrupt */
1213 lr
= find_first_zero_bit((unsigned long *)vgic_cpu
->lr_used
,
1215 if (lr
>= vgic
->nr_lr
)
1218 kvm_debug("LR%d allocated for IRQ%d %x\n", lr
, irq
, sgi_source_id
);
1219 vgic_cpu
->vgic_irq_lr_map
[irq
] = lr
;
1220 set_bit(lr
, vgic_cpu
->lr_used
);
1223 vlr
.source
= sgi_source_id
;
1224 vlr
.state
= LR_STATE_PENDING
;
1225 if (!vgic_irq_is_edge(vcpu
, irq
))
1226 vlr
.state
|= LR_EOI_INT
;
1228 vgic_set_lr(vcpu
, lr
, vlr
);
1233 static bool vgic_queue_sgi(struct kvm_vcpu
*vcpu
, int irq
)
1235 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1236 unsigned long sources
;
1237 int vcpu_id
= vcpu
->vcpu_id
;
1240 sources
= dist
->irq_sgi_sources
[vcpu_id
][irq
];
1242 for_each_set_bit(c
, &sources
, VGIC_MAX_CPUS
) {
1243 if (vgic_queue_irq(vcpu
, c
, irq
))
1244 clear_bit(c
, &sources
);
1247 dist
->irq_sgi_sources
[vcpu_id
][irq
] = sources
;
1250 * If the sources bitmap has been cleared it means that we
1251 * could queue all the SGIs onto link registers (see the
1252 * clear_bit above), and therefore we are done with them in
1253 * our emulated gic and can get rid of them.
1256 vgic_dist_irq_clear_pending(vcpu
, irq
);
1257 vgic_cpu_irq_clear(vcpu
, irq
);
1264 static bool vgic_queue_hwirq(struct kvm_vcpu
*vcpu
, int irq
)
1266 if (!vgic_can_sample_irq(vcpu
, irq
))
1267 return true; /* level interrupt, already queued */
1269 if (vgic_queue_irq(vcpu
, 0, irq
)) {
1270 if (vgic_irq_is_edge(vcpu
, irq
)) {
1271 vgic_dist_irq_clear_pending(vcpu
, irq
);
1272 vgic_cpu_irq_clear(vcpu
, irq
);
1274 vgic_irq_set_queued(vcpu
, irq
);
1284 * Fill the list registers with pending interrupts before running the
1287 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1289 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1290 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1294 vcpu_id
= vcpu
->vcpu_id
;
1297 * We may not have any pending interrupt, or the interrupts
1298 * may have been serviced from another vcpu. In all cases,
1301 if (!kvm_vgic_vcpu_pending_irq(vcpu
)) {
1302 pr_debug("CPU%d has no pending interrupt\n", vcpu_id
);
1307 for_each_set_bit(i
, vgic_cpu
->pending_percpu
, VGIC_NR_SGIS
) {
1308 if (!vgic_queue_sgi(vcpu
, i
))
1313 for_each_set_bit_from(i
, vgic_cpu
->pending_percpu
, VGIC_NR_PRIVATE_IRQS
) {
1314 if (!vgic_queue_hwirq(vcpu
, i
))
1319 for_each_set_bit(i
, vgic_cpu
->pending_shared
, VGIC_NR_SHARED_IRQS
) {
1320 if (!vgic_queue_hwirq(vcpu
, i
+ VGIC_NR_PRIVATE_IRQS
))
1326 vgic_enable_underflow(vcpu
);
1328 vgic_disable_underflow(vcpu
);
1330 * We're about to run this VCPU, and we've consumed
1331 * everything the distributor had in store for
1332 * us. Claim we don't have anything pending. We'll
1333 * adjust that if needed while exiting.
1335 clear_bit(vcpu_id
, &dist
->irq_pending_on_cpu
);
1339 static bool vgic_process_maintenance(struct kvm_vcpu
*vcpu
)
1341 u32 status
= vgic_get_interrupt_status(vcpu
);
1342 bool level_pending
= false;
1344 kvm_debug("STATUS = %08x\n", status
);
1346 if (status
& INT_STATUS_EOI
) {
1348 * Some level interrupts have been EOIed. Clear their
1351 u64 eisr
= vgic_get_eisr(vcpu
);
1352 unsigned long *eisr_ptr
= (unsigned long *)&eisr
;
1355 for_each_set_bit(lr
, eisr_ptr
, vgic
->nr_lr
) {
1356 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr
);
1357 WARN_ON(vgic_irq_is_edge(vcpu
, vlr
.irq
));
1359 vgic_irq_clear_queued(vcpu
, vlr
.irq
);
1360 WARN_ON(vlr
.state
& LR_STATE_MASK
);
1362 vgic_set_lr(vcpu
, lr
, vlr
);
1365 * If the IRQ was EOIed it was also ACKed and we we
1366 * therefore assume we can clear the soft pending
1367 * state (should it had been set) for this interrupt.
1369 * Note: if the IRQ soft pending state was set after
1370 * the IRQ was acked, it actually shouldn't be
1371 * cleared, but we have no way of knowing that unless
1372 * we start trapping ACKs when the soft-pending state
1375 vgic_dist_irq_clear_soft_pend(vcpu
, vlr
.irq
);
1377 /* Any additional pending interrupt? */
1378 if (vgic_dist_irq_get_level(vcpu
, vlr
.irq
)) {
1379 vgic_cpu_irq_set(vcpu
, vlr
.irq
);
1380 level_pending
= true;
1382 vgic_dist_irq_clear_pending(vcpu
, vlr
.irq
);
1383 vgic_cpu_irq_clear(vcpu
, vlr
.irq
);
1387 * Despite being EOIed, the LR may not have
1388 * been marked as empty.
1390 vgic_sync_lr_elrsr(vcpu
, lr
, vlr
);
1394 if (status
& INT_STATUS_UNDERFLOW
)
1395 vgic_disable_underflow(vcpu
);
1397 return level_pending
;
1401 * Sync back the VGIC state after a guest run. The distributor lock is
1402 * needed so we don't get preempted in the middle of the state processing.
1404 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1406 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1407 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1409 unsigned long *elrsr_ptr
;
1413 level_pending
= vgic_process_maintenance(vcpu
);
1414 elrsr
= vgic_get_elrsr(vcpu
);
1415 elrsr_ptr
= (unsigned long *)&elrsr
;
1417 /* Clear mappings for empty LRs */
1418 for_each_set_bit(lr
, elrsr_ptr
, vgic
->nr_lr
) {
1421 if (!test_and_clear_bit(lr
, vgic_cpu
->lr_used
))
1424 vlr
= vgic_get_lr(vcpu
, lr
);
1426 BUG_ON(vlr
.irq
>= VGIC_NR_IRQS
);
1427 vgic_cpu
->vgic_irq_lr_map
[vlr
.irq
] = LR_EMPTY
;
1430 /* Check if we still have something up our sleeve... */
1431 pending
= find_first_zero_bit(elrsr_ptr
, vgic
->nr_lr
);
1432 if (level_pending
|| pending
< vgic
->nr_lr
)
1433 set_bit(vcpu
->vcpu_id
, &dist
->irq_pending_on_cpu
);
1436 void kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1438 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1440 if (!irqchip_in_kernel(vcpu
->kvm
))
1443 spin_lock(&dist
->lock
);
1444 __kvm_vgic_flush_hwstate(vcpu
);
1445 spin_unlock(&dist
->lock
);
1448 void kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1450 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1452 if (!irqchip_in_kernel(vcpu
->kvm
))
1455 spin_lock(&dist
->lock
);
1456 __kvm_vgic_sync_hwstate(vcpu
);
1457 spin_unlock(&dist
->lock
);
1460 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu
*vcpu
)
1462 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1464 if (!irqchip_in_kernel(vcpu
->kvm
))
1467 return test_bit(vcpu
->vcpu_id
, &dist
->irq_pending_on_cpu
);
1470 static void vgic_kick_vcpus(struct kvm
*kvm
)
1472 struct kvm_vcpu
*vcpu
;
1476 * We've injected an interrupt, time to find out who deserves
1479 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1480 if (kvm_vgic_vcpu_pending_irq(vcpu
))
1481 kvm_vcpu_kick(vcpu
);
1485 static int vgic_validate_injection(struct kvm_vcpu
*vcpu
, int irq
, int level
)
1487 int edge_triggered
= vgic_irq_is_edge(vcpu
, irq
);
1490 * Only inject an interrupt if:
1491 * - edge triggered and we have a rising edge
1492 * - level triggered and we change level
1494 if (edge_triggered
) {
1495 int state
= vgic_dist_irq_is_pending(vcpu
, irq
);
1496 return level
> state
;
1498 int state
= vgic_dist_irq_get_level(vcpu
, irq
);
1499 return level
!= state
;
1503 static bool vgic_update_irq_pending(struct kvm
*kvm
, int cpuid
,
1504 unsigned int irq_num
, bool level
)
1506 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1507 struct kvm_vcpu
*vcpu
;
1508 int edge_triggered
, level_triggered
;
1512 spin_lock(&dist
->lock
);
1514 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1515 edge_triggered
= vgic_irq_is_edge(vcpu
, irq_num
);
1516 level_triggered
= !edge_triggered
;
1518 if (!vgic_validate_injection(vcpu
, irq_num
, level
)) {
1523 if (irq_num
>= VGIC_NR_PRIVATE_IRQS
) {
1524 cpuid
= dist
->irq_spi_cpu
[irq_num
- VGIC_NR_PRIVATE_IRQS
];
1525 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1528 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num
, level
, cpuid
);
1531 if (level_triggered
)
1532 vgic_dist_irq_set_level(vcpu
, irq_num
);
1533 vgic_dist_irq_set_pending(vcpu
, irq_num
);
1535 if (level_triggered
) {
1536 vgic_dist_irq_clear_level(vcpu
, irq_num
);
1537 if (!vgic_dist_irq_soft_pend(vcpu
, irq_num
))
1538 vgic_dist_irq_clear_pending(vcpu
, irq_num
);
1540 vgic_dist_irq_clear_pending(vcpu
, irq_num
);
1544 enabled
= vgic_irq_is_enabled(vcpu
, irq_num
);
1551 if (!vgic_can_sample_irq(vcpu
, irq_num
)) {
1553 * Level interrupt in progress, will be picked up
1561 vgic_cpu_irq_set(vcpu
, irq_num
);
1562 set_bit(cpuid
, &dist
->irq_pending_on_cpu
);
1566 spin_unlock(&dist
->lock
);
1572 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1573 * @kvm: The VM structure pointer
1574 * @cpuid: The CPU for PPIs
1575 * @irq_num: The IRQ number that is assigned to the device
1576 * @level: Edge-triggered: true: to trigger the interrupt
1577 * false: to ignore the call
1578 * Level-sensitive true: activates an interrupt
1579 * false: deactivates an interrupt
1581 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1582 * level-sensitive interrupts. You can think of the level parameter as 1
1583 * being HIGH and 0 being LOW and all devices being active-HIGH.
1585 int kvm_vgic_inject_irq(struct kvm
*kvm
, int cpuid
, unsigned int irq_num
,
1588 if (likely(vgic_initialized(kvm
)) &&
1589 vgic_update_irq_pending(kvm
, cpuid
, irq_num
, level
))
1590 vgic_kick_vcpus(kvm
);
1595 static irqreturn_t
vgic_maintenance_handler(int irq
, void *data
)
1598 * We cannot rely on the vgic maintenance interrupt to be
1599 * delivered synchronously. This means we can only use it to
1600 * exit the VM, and we perform the handling of EOIed
1601 * interrupts on the exit path (see vgic_process_maintenance).
1607 * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
1608 * @vcpu: pointer to the vcpu struct
1610 * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
1611 * this vcpu and enable the VGIC for this VCPU
1613 int kvm_vgic_vcpu_init(struct kvm_vcpu
*vcpu
)
1615 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1616 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1619 if (vcpu
->vcpu_id
>= VGIC_MAX_CPUS
)
1622 for (i
= 0; i
< VGIC_NR_IRQS
; i
++) {
1623 if (i
< VGIC_NR_PPIS
)
1624 vgic_bitmap_set_irq_val(&dist
->irq_enabled
,
1625 vcpu
->vcpu_id
, i
, 1);
1626 if (i
< VGIC_NR_PRIVATE_IRQS
)
1627 vgic_bitmap_set_irq_val(&dist
->irq_cfg
,
1628 vcpu
->vcpu_id
, i
, VGIC_CFG_EDGE
);
1630 vgic_cpu
->vgic_irq_lr_map
[i
] = LR_EMPTY
;
1634 * Store the number of LRs per vcpu, so we don't have to go
1635 * all the way to the distributor structure to find out. Only
1636 * assembly code should use this one.
1638 vgic_cpu
->nr_lr
= vgic
->nr_lr
;
1646 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
1647 * @kvm: pointer to the kvm struct
1649 * Map the virtual CPU interface into the VM before running any VCPUs. We
1650 * can't do this at creation time, because user space must first set the
1651 * virtual CPU interface address in the guest physical address space. Also
1652 * initialize the ITARGETSRn regs to 0 on the emulated distributor.
1654 int kvm_vgic_init(struct kvm
*kvm
)
1658 if (!irqchip_in_kernel(kvm
))
1661 mutex_lock(&kvm
->lock
);
1663 if (vgic_initialized(kvm
))
1666 if (IS_VGIC_ADDR_UNDEF(kvm
->arch
.vgic
.vgic_dist_base
) ||
1667 IS_VGIC_ADDR_UNDEF(kvm
->arch
.vgic
.vgic_cpu_base
)) {
1668 kvm_err("Need to set vgic cpu and dist addresses first\n");
1673 ret
= kvm_phys_addr_ioremap(kvm
, kvm
->arch
.vgic
.vgic_cpu_base
,
1674 vgic
->vcpu_base
, KVM_VGIC_V2_CPU_SIZE
);
1676 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1680 for (i
= VGIC_NR_PRIVATE_IRQS
; i
< VGIC_NR_IRQS
; i
+= 4)
1681 vgic_set_target_reg(kvm
, 0, i
);
1683 kvm
->arch
.vgic
.ready
= true;
1685 mutex_unlock(&kvm
->lock
);
1689 int kvm_vgic_create(struct kvm
*kvm
)
1691 int i
, vcpu_lock_idx
= -1, ret
= 0;
1692 struct kvm_vcpu
*vcpu
;
1694 mutex_lock(&kvm
->lock
);
1696 if (kvm
->arch
.vgic
.vctrl_base
) {
1702 * Any time a vcpu is run, vcpu_load is called which tries to grab the
1703 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
1704 * that no other VCPUs are run while we create the vgic.
1706 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1707 if (!mutex_trylock(&vcpu
->mutex
))
1712 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1713 if (vcpu
->arch
.has_run_once
) {
1719 spin_lock_init(&kvm
->arch
.vgic
.lock
);
1720 kvm
->arch
.vgic
.in_kernel
= true;
1721 kvm
->arch
.vgic
.vctrl_base
= vgic
->vctrl_base
;
1722 kvm
->arch
.vgic
.vgic_dist_base
= VGIC_ADDR_UNDEF
;
1723 kvm
->arch
.vgic
.vgic_cpu_base
= VGIC_ADDR_UNDEF
;
1726 for (; vcpu_lock_idx
>= 0; vcpu_lock_idx
--) {
1727 vcpu
= kvm_get_vcpu(kvm
, vcpu_lock_idx
);
1728 mutex_unlock(&vcpu
->mutex
);
1732 mutex_unlock(&kvm
->lock
);
1736 static int vgic_ioaddr_overlap(struct kvm
*kvm
)
1738 phys_addr_t dist
= kvm
->arch
.vgic
.vgic_dist_base
;
1739 phys_addr_t cpu
= kvm
->arch
.vgic
.vgic_cpu_base
;
1741 if (IS_VGIC_ADDR_UNDEF(dist
) || IS_VGIC_ADDR_UNDEF(cpu
))
1743 if ((dist
<= cpu
&& dist
+ KVM_VGIC_V2_DIST_SIZE
> cpu
) ||
1744 (cpu
<= dist
&& cpu
+ KVM_VGIC_V2_CPU_SIZE
> dist
))
1749 static int vgic_ioaddr_assign(struct kvm
*kvm
, phys_addr_t
*ioaddr
,
1750 phys_addr_t addr
, phys_addr_t size
)
1754 if (addr
& ~KVM_PHYS_MASK
)
1757 if (addr
& (SZ_4K
- 1))
1760 if (!IS_VGIC_ADDR_UNDEF(*ioaddr
))
1762 if (addr
+ size
< addr
)
1766 ret
= vgic_ioaddr_overlap(kvm
);
1768 *ioaddr
= VGIC_ADDR_UNDEF
;
1774 * kvm_vgic_addr - set or get vgic VM base addresses
1775 * @kvm: pointer to the vm struct
1776 * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
1777 * @addr: pointer to address value
1778 * @write: if true set the address in the VM address space, if false read the
1781 * Set or get the vgic base addresses for the distributor and the virtual CPU
1782 * interface in the VM physical address space. These addresses are properties
1783 * of the emulated core/SoC and therefore user space initially knows this
1786 int kvm_vgic_addr(struct kvm
*kvm
, unsigned long type
, u64
*addr
, bool write
)
1789 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
1791 mutex_lock(&kvm
->lock
);
1793 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
1795 r
= vgic_ioaddr_assign(kvm
, &vgic
->vgic_dist_base
,
1796 *addr
, KVM_VGIC_V2_DIST_SIZE
);
1798 *addr
= vgic
->vgic_dist_base
;
1801 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
1803 r
= vgic_ioaddr_assign(kvm
, &vgic
->vgic_cpu_base
,
1804 *addr
, KVM_VGIC_V2_CPU_SIZE
);
1806 *addr
= vgic
->vgic_cpu_base
;
1813 mutex_unlock(&kvm
->lock
);
1817 static bool handle_cpu_mmio_misc(struct kvm_vcpu
*vcpu
,
1818 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
1820 bool updated
= false;
1821 struct vgic_vmcr vmcr
;
1825 vgic_get_vmcr(vcpu
, &vmcr
);
1827 switch (offset
& ~0x3) {
1829 vmcr_field
= &vmcr
.ctlr
;
1831 case GIC_CPU_PRIMASK
:
1832 vmcr_field
= &vmcr
.pmr
;
1834 case GIC_CPU_BINPOINT
:
1835 vmcr_field
= &vmcr
.bpr
;
1837 case GIC_CPU_ALIAS_BINPOINT
:
1838 vmcr_field
= &vmcr
.abpr
;
1844 if (!mmio
->is_write
) {
1846 mmio_data_write(mmio
, ~0, reg
);
1848 reg
= mmio_data_read(mmio
, ~0);
1849 if (reg
!= *vmcr_field
) {
1851 vgic_set_vmcr(vcpu
, &vmcr
);
1858 static bool handle_mmio_abpr(struct kvm_vcpu
*vcpu
,
1859 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
1861 return handle_cpu_mmio_misc(vcpu
, mmio
, GIC_CPU_ALIAS_BINPOINT
);
1864 static bool handle_cpu_mmio_ident(struct kvm_vcpu
*vcpu
,
1865 struct kvm_exit_mmio
*mmio
,
1874 reg
= (PRODUCT_ID_KVM
<< 20) |
1875 (GICC_ARCH_VERSION_V2
<< 16) |
1876 (IMPLEMENTER_ARM
<< 0);
1877 mmio_data_write(mmio
, ~0, reg
);
1882 * CPU Interface Register accesses - these are not accessed by the VM, but by
1883 * user space for saving and restoring VGIC state.
1885 static const struct mmio_range vgic_cpu_ranges
[] = {
1887 .base
= GIC_CPU_CTRL
,
1889 .handle_mmio
= handle_cpu_mmio_misc
,
1892 .base
= GIC_CPU_ALIAS_BINPOINT
,
1894 .handle_mmio
= handle_mmio_abpr
,
1897 .base
= GIC_CPU_ACTIVEPRIO
,
1899 .handle_mmio
= handle_mmio_raz_wi
,
1902 .base
= GIC_CPU_IDENT
,
1904 .handle_mmio
= handle_cpu_mmio_ident
,
1908 static int vgic_attr_regs_access(struct kvm_device
*dev
,
1909 struct kvm_device_attr
*attr
,
1910 u32
*reg
, bool is_write
)
1912 const struct mmio_range
*r
= NULL
, *ranges
;
1915 struct kvm_vcpu
*vcpu
, *tmp_vcpu
;
1916 struct vgic_dist
*vgic
;
1917 struct kvm_exit_mmio mmio
;
1919 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
1920 cpuid
= (attr
->attr
& KVM_DEV_ARM_VGIC_CPUID_MASK
) >>
1921 KVM_DEV_ARM_VGIC_CPUID_SHIFT
;
1923 mutex_lock(&dev
->kvm
->lock
);
1925 if (cpuid
>= atomic_read(&dev
->kvm
->online_vcpus
)) {
1930 vcpu
= kvm_get_vcpu(dev
->kvm
, cpuid
);
1931 vgic
= &dev
->kvm
->arch
.vgic
;
1934 mmio
.is_write
= is_write
;
1936 mmio_data_write(&mmio
, ~0, *reg
);
1937 switch (attr
->group
) {
1938 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
1939 mmio
.phys_addr
= vgic
->vgic_dist_base
+ offset
;
1940 ranges
= vgic_dist_ranges
;
1942 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
1943 mmio
.phys_addr
= vgic
->vgic_cpu_base
+ offset
;
1944 ranges
= vgic_cpu_ranges
;
1949 r
= find_matching_range(ranges
, &mmio
, offset
);
1951 if (unlikely(!r
|| !r
->handle_mmio
)) {
1957 spin_lock(&vgic
->lock
);
1960 * Ensure that no other VCPU is running by checking the vcpu->cpu
1961 * field. If no other VPCUs are running we can safely access the VGIC
1962 * state, because even if another VPU is run after this point, that
1963 * VCPU will not touch the vgic state, because it will block on
1964 * getting the vgic->lock in kvm_vgic_sync_hwstate().
1966 kvm_for_each_vcpu(c
, tmp_vcpu
, dev
->kvm
) {
1967 if (unlikely(tmp_vcpu
->cpu
!= -1)) {
1969 goto out_vgic_unlock
;
1974 * Move all pending IRQs from the LRs on all VCPUs so the pending
1975 * state can be properly represented in the register state accessible
1978 kvm_for_each_vcpu(c
, tmp_vcpu
, dev
->kvm
)
1979 vgic_unqueue_irqs(tmp_vcpu
);
1982 r
->handle_mmio(vcpu
, &mmio
, offset
);
1985 *reg
= mmio_data_read(&mmio
, ~0);
1989 spin_unlock(&vgic
->lock
);
1991 mutex_unlock(&dev
->kvm
->lock
);
1995 static int vgic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1999 switch (attr
->group
) {
2000 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2001 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2003 unsigned long type
= (unsigned long)attr
->attr
;
2005 if (copy_from_user(&addr
, uaddr
, sizeof(addr
)))
2008 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, true);
2009 return (r
== -ENODEV
) ? -ENXIO
: r
;
2012 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
2013 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
: {
2014 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
2017 if (get_user(reg
, uaddr
))
2020 return vgic_attr_regs_access(dev
, attr
, ®
, true);
2028 static int vgic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2032 switch (attr
->group
) {
2033 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2034 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2036 unsigned long type
= (unsigned long)attr
->attr
;
2038 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, false);
2040 return (r
== -ENODEV
) ? -ENXIO
: r
;
2042 if (copy_to_user(uaddr
, &addr
, sizeof(addr
)))
2047 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
2048 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
: {
2049 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
2052 r
= vgic_attr_regs_access(dev
, attr
, ®
, false);
2055 r
= put_user(reg
, uaddr
);
2064 static int vgic_has_attr_regs(const struct mmio_range
*ranges
,
2067 struct kvm_exit_mmio dev_attr_mmio
;
2069 dev_attr_mmio
.len
= 4;
2070 if (find_matching_range(ranges
, &dev_attr_mmio
, offset
))
2076 static int vgic_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2080 switch (attr
->group
) {
2081 case KVM_DEV_ARM_VGIC_GRP_ADDR
:
2082 switch (attr
->attr
) {
2083 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
2084 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
2088 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
2089 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
2090 return vgic_has_attr_regs(vgic_dist_ranges
, offset
);
2091 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
2092 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
2093 return vgic_has_attr_regs(vgic_cpu_ranges
, offset
);
2098 static void vgic_destroy(struct kvm_device
*dev
)
2103 static int vgic_create(struct kvm_device
*dev
, u32 type
)
2105 return kvm_vgic_create(dev
->kvm
);
2108 static struct kvm_device_ops kvm_arm_vgic_v2_ops
= {
2109 .name
= "kvm-arm-vgic",
2110 .create
= vgic_create
,
2111 .destroy
= vgic_destroy
,
2112 .set_attr
= vgic_set_attr
,
2113 .get_attr
= vgic_get_attr
,
2114 .has_attr
= vgic_has_attr
,
2117 static void vgic_init_maintenance_interrupt(void *info
)
2119 enable_percpu_irq(vgic
->maint_irq
, 0);
2122 static int vgic_cpu_notify(struct notifier_block
*self
,
2123 unsigned long action
, void *cpu
)
2127 case CPU_STARTING_FROZEN
:
2128 vgic_init_maintenance_interrupt(NULL
);
2131 case CPU_DYING_FROZEN
:
2132 disable_percpu_irq(vgic
->maint_irq
);
2139 static struct notifier_block vgic_cpu_nb
= {
2140 .notifier_call
= vgic_cpu_notify
,
2143 static const struct of_device_id vgic_ids
[] = {
2144 { .compatible
= "arm,cortex-a15-gic", .data
= vgic_v2_probe
, },
2145 { .compatible
= "arm,gic-v3", .data
= vgic_v3_probe
, },
2149 int kvm_vgic_hyp_init(void)
2151 const struct of_device_id
*matched_id
;
2152 const int (*vgic_probe
)(struct device_node
*,const struct vgic_ops
**,
2153 const struct vgic_params
**);
2154 struct device_node
*vgic_node
;
2157 vgic_node
= of_find_matching_node_and_match(NULL
,
2158 vgic_ids
, &matched_id
);
2160 kvm_err("error: no compatible GIC node found\n");
2164 vgic_probe
= matched_id
->data
;
2165 ret
= vgic_probe(vgic_node
, &vgic_ops
, &vgic
);
2169 ret
= request_percpu_irq(vgic
->maint_irq
, vgic_maintenance_handler
,
2170 "vgic", kvm_get_running_vcpus());
2172 kvm_err("Cannot register interrupt %d\n", vgic
->maint_irq
);
2176 ret
= __register_cpu_notifier(&vgic_cpu_nb
);
2178 kvm_err("Cannot register vgic CPU notifier\n");
2182 /* Callback into for arch code for setup */
2183 vgic_arch_setup(vgic
);
2185 on_each_cpu(vgic_init_maintenance_interrupt
, NULL
, 1);
2187 return kvm_register_device_ops(&kvm_arm_vgic_v2_ops
,
2188 KVM_DEV_TYPE_ARM_VGIC_V2
);
2191 free_percpu_irq(vgic
->maint_irq
, kvm_get_running_vcpus());