2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/uaccess.h>
29 #include <linux/irqchip/arm-gic.h>
31 #include <asm/kvm_emulate.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_mmu.h>
36 * How the whole thing works (courtesy of Christoffer Dall):
38 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
39 * something is pending on the CPU interface.
40 * - Interrupts that are pending on the distributor are stored on the
41 * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
42 * ioctls and guest mmio ops, and other in-kernel peripherals such as the
44 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
46 * - To calculate the oracle, we need info for each cpu from
47 * compute_pending_for_cpu, which considers:
48 * - PPI: dist->irq_pending & dist->irq_enable
49 * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
50 * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
51 * registers, stored on each vcpu. We only keep one bit of
52 * information per interrupt, making sure that only one vcpu can
53 * accept the interrupt.
54 * - If any of the above state changes, we must recalculate the oracle.
55 * - The same is true when injecting an interrupt, except that we only
56 * consider a single interrupt at a time. The irq_spi_cpu array
57 * contains the target CPU for each SPI.
59 * The handling of level interrupts adds some extra complexity. We
60 * need to track when the interrupt has been EOIed, so we can sample
61 * the 'line' again. This is achieved as such:
63 * - When a level interrupt is moved onto a vcpu, the corresponding
64 * bit in irq_queued is set. As long as this bit is set, the line
65 * will be ignored for further interrupts. The interrupt is injected
66 * into the vcpu with the GICH_LR_EOI bit set (generate a
67 * maintenance interrupt on EOI).
68 * - When the interrupt is EOIed, the maintenance interrupt fires,
69 * and clears the corresponding bit in irq_queued. This allows the
70 * interrupt line to be sampled again.
71 * - Note that level-triggered interrupts can also be set to pending from
72 * writes to GICD_ISPENDRn and lowering the external input line does not
73 * cause the interrupt to become inactive in such a situation.
74 * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
75 * inactive as long as the external input line is held high.
78 #define VGIC_ADDR_UNDEF (-1)
79 #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
81 #define PRODUCT_ID_KVM 0x4b /* ASCII code K */
82 #define IMPLEMENTER_ARM 0x43b
83 #define GICC_ARCH_VERSION_V2 0x2
85 #define ACCESS_READ_VALUE (1 << 0)
86 #define ACCESS_READ_RAZ (0 << 0)
87 #define ACCESS_READ_MASK(x) ((x) & (1 << 0))
88 #define ACCESS_WRITE_IGNORED (0 << 1)
89 #define ACCESS_WRITE_SETBIT (1 << 1)
90 #define ACCESS_WRITE_CLEARBIT (2 << 1)
91 #define ACCESS_WRITE_VALUE (3 << 1)
92 #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
94 static int vgic_init(struct kvm
*kvm
);
95 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
);
96 static void vgic_retire_lr(int lr_nr
, int irq
, struct kvm_vcpu
*vcpu
);
97 static void vgic_update_state(struct kvm
*kvm
);
98 static void vgic_kick_vcpus(struct kvm
*kvm
);
99 static u8
*vgic_get_sgi_sources(struct vgic_dist
*dist
, int vcpu_id
, int sgi
);
100 static void vgic_dispatch_sgi(struct kvm_vcpu
*vcpu
, u32 reg
);
101 static struct vgic_lr
vgic_get_lr(const struct kvm_vcpu
*vcpu
, int lr
);
102 static void vgic_set_lr(struct kvm_vcpu
*vcpu
, int lr
, struct vgic_lr lr_desc
);
103 static void vgic_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
);
104 static void vgic_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
);
106 static const struct vgic_ops
*vgic_ops
;
107 static const struct vgic_params
*vgic
;
109 static void add_sgi_source(struct kvm_vcpu
*vcpu
, int irq
, int source
)
111 vcpu
->kvm
->arch
.vgic
.vm_ops
.add_sgi_source(vcpu
, irq
, source
);
114 static bool queue_sgi(struct kvm_vcpu
*vcpu
, int irq
)
116 return vcpu
->kvm
->arch
.vgic
.vm_ops
.queue_sgi(vcpu
, irq
);
119 int kvm_vgic_map_resources(struct kvm
*kvm
)
121 return kvm
->arch
.vgic
.vm_ops
.map_resources(kvm
, vgic
);
125 * struct vgic_bitmap contains a bitmap made of unsigned longs, but
126 * extracts u32s out of them.
128 * This does not work on 64-bit BE systems, because the bitmap access
129 * will store two consecutive 32-bit words with the higher-addressed
130 * register's bits at the lower index and the lower-addressed register's
131 * bits at the higher index.
133 * Therefore, swizzle the register index when accessing the 32-bit word
134 * registers to access the right register's value.
136 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
137 #define REG_OFFSET_SWIZZLE 1
139 #define REG_OFFSET_SWIZZLE 0
142 static int vgic_init_bitmap(struct vgic_bitmap
*b
, int nr_cpus
, int nr_irqs
)
146 nr_longs
= nr_cpus
+ BITS_TO_LONGS(nr_irqs
- VGIC_NR_PRIVATE_IRQS
);
148 b
->private = kzalloc(sizeof(unsigned long) * nr_longs
, GFP_KERNEL
);
152 b
->shared
= b
->private + nr_cpus
;
157 static void vgic_free_bitmap(struct vgic_bitmap
*b
)
165 * Call this function to convert a u64 value to an unsigned long * bitmask
166 * in a way that works on both 32-bit and 64-bit LE and BE platforms.
168 * Warning: Calling this function may modify *val.
170 static unsigned long *u64_to_bitmask(u64
*val
)
172 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
173 *val
= (*val
>> 32) | (*val
<< 32);
175 return (unsigned long *)val
;
178 static u32
*vgic_bitmap_get_reg(struct vgic_bitmap
*x
,
179 int cpuid
, u32 offset
)
183 return (u32
*)(x
->private + cpuid
) + REG_OFFSET_SWIZZLE
;
185 return (u32
*)(x
->shared
) + ((offset
- 1) ^ REG_OFFSET_SWIZZLE
);
188 static int vgic_bitmap_get_irq_val(struct vgic_bitmap
*x
,
191 if (irq
< VGIC_NR_PRIVATE_IRQS
)
192 return test_bit(irq
, x
->private + cpuid
);
194 return test_bit(irq
- VGIC_NR_PRIVATE_IRQS
, x
->shared
);
197 static void vgic_bitmap_set_irq_val(struct vgic_bitmap
*x
, int cpuid
,
202 if (irq
< VGIC_NR_PRIVATE_IRQS
) {
203 reg
= x
->private + cpuid
;
206 irq
-= VGIC_NR_PRIVATE_IRQS
;
215 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap
*x
, int cpuid
)
217 return x
->private + cpuid
;
220 static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap
*x
)
225 static int vgic_init_bytemap(struct vgic_bytemap
*x
, int nr_cpus
, int nr_irqs
)
229 size
= nr_cpus
* VGIC_NR_PRIVATE_IRQS
;
230 size
+= nr_irqs
- VGIC_NR_PRIVATE_IRQS
;
232 x
->private = kzalloc(size
, GFP_KERNEL
);
236 x
->shared
= x
->private + nr_cpus
* VGIC_NR_PRIVATE_IRQS
/ sizeof(u32
);
240 static void vgic_free_bytemap(struct vgic_bytemap
*b
)
247 static u32
*vgic_bytemap_get_reg(struct vgic_bytemap
*x
, int cpuid
, u32 offset
)
251 if (offset
< VGIC_NR_PRIVATE_IRQS
) {
253 offset
+= cpuid
* VGIC_NR_PRIVATE_IRQS
;
256 offset
-= VGIC_NR_PRIVATE_IRQS
;
259 return reg
+ (offset
/ sizeof(u32
));
262 #define VGIC_CFG_LEVEL 0
263 #define VGIC_CFG_EDGE 1
265 static bool vgic_irq_is_edge(struct kvm_vcpu
*vcpu
, int irq
)
267 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
270 irq_val
= vgic_bitmap_get_irq_val(&dist
->irq_cfg
, vcpu
->vcpu_id
, irq
);
271 return irq_val
== VGIC_CFG_EDGE
;
274 static int vgic_irq_is_enabled(struct kvm_vcpu
*vcpu
, int irq
)
276 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
278 return vgic_bitmap_get_irq_val(&dist
->irq_enabled
, vcpu
->vcpu_id
, irq
);
281 static int vgic_irq_is_queued(struct kvm_vcpu
*vcpu
, int irq
)
283 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
285 return vgic_bitmap_get_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
);
288 static void vgic_irq_set_queued(struct kvm_vcpu
*vcpu
, int irq
)
290 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
292 vgic_bitmap_set_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
, 1);
295 static void vgic_irq_clear_queued(struct kvm_vcpu
*vcpu
, int irq
)
297 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
299 vgic_bitmap_set_irq_val(&dist
->irq_queued
, vcpu
->vcpu_id
, irq
, 0);
302 static int vgic_dist_irq_get_level(struct kvm_vcpu
*vcpu
, int irq
)
304 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
306 return vgic_bitmap_get_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
);
309 static void vgic_dist_irq_set_level(struct kvm_vcpu
*vcpu
, int irq
)
311 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
313 vgic_bitmap_set_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
, 1);
316 static void vgic_dist_irq_clear_level(struct kvm_vcpu
*vcpu
, int irq
)
318 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
320 vgic_bitmap_set_irq_val(&dist
->irq_level
, vcpu
->vcpu_id
, irq
, 0);
323 static int vgic_dist_irq_soft_pend(struct kvm_vcpu
*vcpu
, int irq
)
325 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
327 return vgic_bitmap_get_irq_val(&dist
->irq_soft_pend
, vcpu
->vcpu_id
, irq
);
330 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu
*vcpu
, int irq
)
332 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
334 vgic_bitmap_set_irq_val(&dist
->irq_soft_pend
, vcpu
->vcpu_id
, irq
, 0);
337 static int vgic_dist_irq_is_pending(struct kvm_vcpu
*vcpu
, int irq
)
339 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
341 return vgic_bitmap_get_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
);
344 static void vgic_dist_irq_set_pending(struct kvm_vcpu
*vcpu
, int irq
)
346 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
348 vgic_bitmap_set_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
, 1);
351 static void vgic_dist_irq_clear_pending(struct kvm_vcpu
*vcpu
, int irq
)
353 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
355 vgic_bitmap_set_irq_val(&dist
->irq_pending
, vcpu
->vcpu_id
, irq
, 0);
358 static void vgic_cpu_irq_set(struct kvm_vcpu
*vcpu
, int irq
)
360 if (irq
< VGIC_NR_PRIVATE_IRQS
)
361 set_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
363 set_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
364 vcpu
->arch
.vgic_cpu
.pending_shared
);
367 static void vgic_cpu_irq_clear(struct kvm_vcpu
*vcpu
, int irq
)
369 if (irq
< VGIC_NR_PRIVATE_IRQS
)
370 clear_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
372 clear_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
373 vcpu
->arch
.vgic_cpu
.pending_shared
);
376 static bool vgic_can_sample_irq(struct kvm_vcpu
*vcpu
, int irq
)
378 return vgic_irq_is_edge(vcpu
, irq
) || !vgic_irq_is_queued(vcpu
, irq
);
381 static u32
mmio_data_read(struct kvm_exit_mmio
*mmio
, u32 mask
)
383 return le32_to_cpu(*((u32
*)mmio
->data
)) & mask
;
386 static void mmio_data_write(struct kvm_exit_mmio
*mmio
, u32 mask
, u32 value
)
388 *((u32
*)mmio
->data
) = cpu_to_le32(value
) & mask
;
392 * vgic_reg_access - access vgic register
393 * @mmio: pointer to the data describing the mmio access
394 * @reg: pointer to the virtual backing of vgic distributor data
395 * @offset: least significant 2 bits used for word offset
396 * @mode: ACCESS_ mode (see defines above)
398 * Helper to make vgic register access easier using one of the access
399 * modes defined for vgic register access
400 * (read,raz,write-ignored,setbit,clearbit,write)
402 static void vgic_reg_access(struct kvm_exit_mmio
*mmio
, u32
*reg
,
403 phys_addr_t offset
, int mode
)
405 int word_offset
= (offset
& 3) * 8;
406 u32 mask
= (1UL << (mmio
->len
* 8)) - 1;
410 * Any alignment fault should have been delivered to the guest
411 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
417 BUG_ON(mode
!= (ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
));
421 if (mmio
->is_write
) {
422 u32 data
= mmio_data_read(mmio
, mask
) << word_offset
;
423 switch (ACCESS_WRITE_MASK(mode
)) {
424 case ACCESS_WRITE_IGNORED
:
427 case ACCESS_WRITE_SETBIT
:
431 case ACCESS_WRITE_CLEARBIT
:
435 case ACCESS_WRITE_VALUE
:
436 regval
= (regval
& ~(mask
<< word_offset
)) | data
;
441 switch (ACCESS_READ_MASK(mode
)) {
442 case ACCESS_READ_RAZ
:
446 case ACCESS_READ_VALUE
:
447 mmio_data_write(mmio
, mask
, regval
>> word_offset
);
452 static bool handle_mmio_misc(struct kvm_vcpu
*vcpu
,
453 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
456 u32 word_offset
= offset
& 3;
458 switch (offset
& ~3) {
459 case 0: /* GICD_CTLR */
460 reg
= vcpu
->kvm
->arch
.vgic
.enabled
;
461 vgic_reg_access(mmio
, ®
, word_offset
,
462 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
463 if (mmio
->is_write
) {
464 vcpu
->kvm
->arch
.vgic
.enabled
= reg
& 1;
465 vgic_update_state(vcpu
->kvm
);
470 case 4: /* GICD_TYPER */
471 reg
= (atomic_read(&vcpu
->kvm
->online_vcpus
) - 1) << 5;
472 reg
|= (vcpu
->kvm
->arch
.vgic
.nr_irqs
>> 5) - 1;
473 vgic_reg_access(mmio
, ®
, word_offset
,
474 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
477 case 8: /* GICD_IIDR */
478 reg
= (PRODUCT_ID_KVM
<< 24) | (IMPLEMENTER_ARM
<< 0);
479 vgic_reg_access(mmio
, ®
, word_offset
,
480 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
487 static bool handle_mmio_raz_wi(struct kvm_vcpu
*vcpu
,
488 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
490 vgic_reg_access(mmio
, NULL
, offset
,
491 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
495 static bool vgic_handle_enable_reg(struct kvm
*kvm
, struct kvm_exit_mmio
*mmio
,
496 phys_addr_t offset
, int vcpu_id
, int access
)
499 int mode
= ACCESS_READ_VALUE
| access
;
500 struct kvm_vcpu
*target_vcpu
= kvm_get_vcpu(kvm
, vcpu_id
);
502 reg
= vgic_bitmap_get_reg(&kvm
->arch
.vgic
.irq_enabled
, vcpu_id
, offset
);
503 vgic_reg_access(mmio
, reg
, offset
, mode
);
504 if (mmio
->is_write
) {
505 if (access
& ACCESS_WRITE_CLEARBIT
) {
506 if (offset
< 4) /* Force SGI enabled */
508 vgic_retire_disabled_irqs(target_vcpu
);
510 vgic_update_state(kvm
);
517 static bool handle_mmio_set_enable_reg(struct kvm_vcpu
*vcpu
,
518 struct kvm_exit_mmio
*mmio
,
521 return vgic_handle_enable_reg(vcpu
->kvm
, mmio
, offset
,
522 vcpu
->vcpu_id
, ACCESS_WRITE_SETBIT
);
525 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu
*vcpu
,
526 struct kvm_exit_mmio
*mmio
,
529 return vgic_handle_enable_reg(vcpu
->kvm
, mmio
, offset
,
530 vcpu
->vcpu_id
, ACCESS_WRITE_CLEARBIT
);
533 static bool vgic_handle_set_pending_reg(struct kvm
*kvm
,
534 struct kvm_exit_mmio
*mmio
,
535 phys_addr_t offset
, int vcpu_id
)
539 int mode
= ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
;
540 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
542 reg
= vgic_bitmap_get_reg(&dist
->irq_cfg
, vcpu_id
, offset
);
543 level_mask
= (~(*reg
));
545 /* Mark both level and edge triggered irqs as pending */
546 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu_id
, offset
);
548 vgic_reg_access(mmio
, reg
, offset
, mode
);
550 if (mmio
->is_write
) {
551 /* Set the soft-pending flag only for level-triggered irqs */
552 reg
= vgic_bitmap_get_reg(&dist
->irq_soft_pend
,
554 vgic_reg_access(mmio
, reg
, offset
, mode
);
557 /* Ignore writes to SGIs */
560 *reg
|= orig
& 0xffff;
563 vgic_update_state(kvm
);
570 static bool vgic_handle_clear_pending_reg(struct kvm
*kvm
,
571 struct kvm_exit_mmio
*mmio
,
572 phys_addr_t offset
, int vcpu_id
)
576 int mode
= ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
;
577 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
579 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu_id
, offset
);
581 vgic_reg_access(mmio
, reg
, offset
, mode
);
582 if (mmio
->is_write
) {
583 /* Re-set level triggered level-active interrupts */
584 level_active
= vgic_bitmap_get_reg(&dist
->irq_level
,
586 reg
= vgic_bitmap_get_reg(&dist
->irq_pending
, vcpu_id
, offset
);
587 *reg
|= *level_active
;
589 /* Ignore writes to SGIs */
592 *reg
|= orig
& 0xffff;
595 /* Clear soft-pending flags */
596 reg
= vgic_bitmap_get_reg(&dist
->irq_soft_pend
,
598 vgic_reg_access(mmio
, reg
, offset
, mode
);
600 vgic_update_state(kvm
);
606 static bool handle_mmio_set_pending_reg(struct kvm_vcpu
*vcpu
,
607 struct kvm_exit_mmio
*mmio
,
610 return vgic_handle_set_pending_reg(vcpu
->kvm
, mmio
, offset
,
614 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu
*vcpu
,
615 struct kvm_exit_mmio
*mmio
,
618 return vgic_handle_clear_pending_reg(vcpu
->kvm
, mmio
, offset
,
622 static bool handle_mmio_priority_reg(struct kvm_vcpu
*vcpu
,
623 struct kvm_exit_mmio
*mmio
,
626 u32
*reg
= vgic_bytemap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_priority
,
627 vcpu
->vcpu_id
, offset
);
628 vgic_reg_access(mmio
, reg
, offset
,
629 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
633 #define GICD_ITARGETSR_SIZE 32
634 #define GICD_CPUTARGETS_BITS 8
635 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
636 static u32
vgic_get_target_reg(struct kvm
*kvm
, int irq
)
638 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
642 irq
-= VGIC_NR_PRIVATE_IRQS
;
644 for (i
= 0; i
< GICD_IRQS_PER_ITARGETSR
; i
++)
645 val
|= 1 << (dist
->irq_spi_cpu
[irq
+ i
] + i
* 8);
650 static void vgic_set_target_reg(struct kvm
*kvm
, u32 val
, int irq
)
652 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
653 struct kvm_vcpu
*vcpu
;
658 irq
-= VGIC_NR_PRIVATE_IRQS
;
661 * Pick the LSB in each byte. This ensures we target exactly
662 * one vcpu per IRQ. If the byte is null, assume we target
665 for (i
= 0; i
< GICD_IRQS_PER_ITARGETSR
; i
++) {
666 int shift
= i
* GICD_CPUTARGETS_BITS
;
667 target
= ffs((val
>> shift
) & 0xffU
);
668 target
= target
? (target
- 1) : 0;
669 dist
->irq_spi_cpu
[irq
+ i
] = target
;
670 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
671 bmap
= vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[c
]);
673 set_bit(irq
+ i
, bmap
);
675 clear_bit(irq
+ i
, bmap
);
680 static bool handle_mmio_target_reg(struct kvm_vcpu
*vcpu
,
681 struct kvm_exit_mmio
*mmio
,
686 /* We treat the banked interrupts targets as read-only */
688 u32 roreg
= 1 << vcpu
->vcpu_id
;
690 roreg
|= roreg
<< 16;
692 vgic_reg_access(mmio
, &roreg
, offset
,
693 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
697 reg
= vgic_get_target_reg(vcpu
->kvm
, offset
& ~3U);
698 vgic_reg_access(mmio
, ®
, offset
,
699 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
700 if (mmio
->is_write
) {
701 vgic_set_target_reg(vcpu
->kvm
, reg
, offset
& ~3U);
702 vgic_update_state(vcpu
->kvm
);
709 static u32
vgic_cfg_expand(u16 val
)
715 * Turn a 16bit value like abcd...mnop into a 32bit word
716 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
718 for (i
= 0; i
< 16; i
++)
719 res
|= ((val
>> i
) & VGIC_CFG_EDGE
) << (2 * i
+ 1);
724 static u16
vgic_cfg_compress(u32 val
)
730 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
731 * abcd...mnop which is what we really care about.
733 for (i
= 0; i
< 16; i
++)
734 res
|= ((val
>> (i
* 2 + 1)) & VGIC_CFG_EDGE
) << i
;
740 * The distributor uses 2 bits per IRQ for the CFG register, but the
741 * LSB is always 0. As such, we only keep the upper bit, and use the
742 * two above functions to compress/expand the bits
744 static bool vgic_handle_cfg_reg(u32
*reg
, struct kvm_exit_mmio
*mmio
,
754 val
= vgic_cfg_expand(val
);
755 vgic_reg_access(mmio
, &val
, offset
,
756 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
757 if (mmio
->is_write
) {
759 *reg
= ~0U; /* Force PPIs/SGIs to 1 */
763 val
= vgic_cfg_compress(val
);
768 *reg
&= 0xffff << 16;
776 static bool handle_mmio_cfg_reg(struct kvm_vcpu
*vcpu
,
777 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
781 reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_cfg
,
782 vcpu
->vcpu_id
, offset
>> 1);
784 return vgic_handle_cfg_reg(reg
, mmio
, offset
);
787 static bool handle_mmio_sgi_reg(struct kvm_vcpu
*vcpu
,
788 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
791 vgic_reg_access(mmio
, ®
, offset
,
792 ACCESS_READ_RAZ
| ACCESS_WRITE_VALUE
);
793 if (mmio
->is_write
) {
794 vgic_dispatch_sgi(vcpu
, reg
);
795 vgic_update_state(vcpu
->kvm
);
802 static void vgic_v2_add_sgi_source(struct kvm_vcpu
*vcpu
, int irq
, int source
)
804 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
806 *vgic_get_sgi_sources(dist
, vcpu
->vcpu_id
, irq
) |= 1 << source
;
810 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
811 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
813 * Move any pending IRQs that have already been assigned to LRs back to the
814 * emulated distributor state so that the complete emulated state can be read
815 * from the main emulation structures without investigating the LRs.
817 * Note that IRQs in the active state in the LRs get their pending state moved
818 * to the distributor but the active state stays in the LRs, because we don't
819 * track the active state on the distributor side.
821 static void vgic_unqueue_irqs(struct kvm_vcpu
*vcpu
)
823 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
826 for_each_set_bit(i
, vgic_cpu
->lr_used
, vgic_cpu
->nr_lr
) {
827 struct vgic_lr lr
= vgic_get_lr(vcpu
, i
);
830 * There are three options for the state bits:
834 * 11: pending and active
836 * If the LR holds only an active interrupt (not pending) then
837 * just leave it alone.
839 if ((lr
.state
& LR_STATE_MASK
) == LR_STATE_ACTIVE
)
843 * Reestablish the pending state on the distributor and the
844 * CPU interface. It may have already been pending, but that
845 * is fine, then we are only setting a few bits that were
848 vgic_dist_irq_set_pending(vcpu
, lr
.irq
);
849 if (lr
.irq
< VGIC_NR_SGIS
)
850 add_sgi_source(vcpu
, lr
.irq
, lr
.source
);
851 lr
.state
&= ~LR_STATE_PENDING
;
852 vgic_set_lr(vcpu
, i
, lr
);
855 * If there's no state left on the LR (it could still be
856 * active), then the LR does not hold any useful info and can
857 * be marked as free for other use.
859 if (!(lr
.state
& LR_STATE_MASK
)) {
860 vgic_retire_lr(i
, lr
.irq
, vcpu
);
861 vgic_irq_clear_queued(vcpu
, lr
.irq
);
864 /* Finally update the VGIC state. */
865 vgic_update_state(vcpu
->kvm
);
869 /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
870 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu
*vcpu
,
871 struct kvm_exit_mmio
*mmio
,
874 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
876 int min_sgi
= (offset
& ~0x3);
877 int max_sgi
= min_sgi
+ 3;
878 int vcpu_id
= vcpu
->vcpu_id
;
881 /* Copy source SGIs from distributor side */
882 for (sgi
= min_sgi
; sgi
<= max_sgi
; sgi
++) {
883 int shift
= 8 * (sgi
- min_sgi
);
884 reg
|= ((u32
)*vgic_get_sgi_sources(dist
, vcpu_id
, sgi
)) << shift
;
887 mmio_data_write(mmio
, ~0, reg
);
891 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu
*vcpu
,
892 struct kvm_exit_mmio
*mmio
,
893 phys_addr_t offset
, bool set
)
895 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
897 int min_sgi
= (offset
& ~0x3);
898 int max_sgi
= min_sgi
+ 3;
899 int vcpu_id
= vcpu
->vcpu_id
;
901 bool updated
= false;
903 reg
= mmio_data_read(mmio
, ~0);
905 /* Clear pending SGIs on the distributor */
906 for (sgi
= min_sgi
; sgi
<= max_sgi
; sgi
++) {
907 u8 mask
= reg
>> (8 * (sgi
- min_sgi
));
908 u8
*src
= vgic_get_sgi_sources(dist
, vcpu_id
, sgi
);
910 if ((*src
& mask
) != mask
)
921 vgic_update_state(vcpu
->kvm
);
926 static bool handle_mmio_sgi_set(struct kvm_vcpu
*vcpu
,
927 struct kvm_exit_mmio
*mmio
,
931 return read_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
);
933 return write_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
, true);
936 static bool handle_mmio_sgi_clear(struct kvm_vcpu
*vcpu
,
937 struct kvm_exit_mmio
*mmio
,
941 return read_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
);
943 return write_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
, false);
947 * I would have liked to use the kvm_bus_io_*() API instead, but it
948 * cannot cope with banked registers (only the VM pointer is passed
949 * around, and we need the vcpu). One of these days, someone please
956 bool (*handle_mmio
)(struct kvm_vcpu
*vcpu
, struct kvm_exit_mmio
*mmio
,
960 static const struct mmio_range vgic_dist_ranges
[] = {
962 .base
= GIC_DIST_CTRL
,
965 .handle_mmio
= handle_mmio_misc
,
968 .base
= GIC_DIST_IGROUP
,
969 .len
= VGIC_MAX_IRQS
/ 8,
971 .handle_mmio
= handle_mmio_raz_wi
,
974 .base
= GIC_DIST_ENABLE_SET
,
975 .len
= VGIC_MAX_IRQS
/ 8,
977 .handle_mmio
= handle_mmio_set_enable_reg
,
980 .base
= GIC_DIST_ENABLE_CLEAR
,
981 .len
= VGIC_MAX_IRQS
/ 8,
983 .handle_mmio
= handle_mmio_clear_enable_reg
,
986 .base
= GIC_DIST_PENDING_SET
,
987 .len
= VGIC_MAX_IRQS
/ 8,
989 .handle_mmio
= handle_mmio_set_pending_reg
,
992 .base
= GIC_DIST_PENDING_CLEAR
,
993 .len
= VGIC_MAX_IRQS
/ 8,
995 .handle_mmio
= handle_mmio_clear_pending_reg
,
998 .base
= GIC_DIST_ACTIVE_SET
,
999 .len
= VGIC_MAX_IRQS
/ 8,
1001 .handle_mmio
= handle_mmio_raz_wi
,
1004 .base
= GIC_DIST_ACTIVE_CLEAR
,
1005 .len
= VGIC_MAX_IRQS
/ 8,
1007 .handle_mmio
= handle_mmio_raz_wi
,
1010 .base
= GIC_DIST_PRI
,
1011 .len
= VGIC_MAX_IRQS
,
1013 .handle_mmio
= handle_mmio_priority_reg
,
1016 .base
= GIC_DIST_TARGET
,
1017 .len
= VGIC_MAX_IRQS
,
1019 .handle_mmio
= handle_mmio_target_reg
,
1022 .base
= GIC_DIST_CONFIG
,
1023 .len
= VGIC_MAX_IRQS
/ 4,
1025 .handle_mmio
= handle_mmio_cfg_reg
,
1028 .base
= GIC_DIST_SOFTINT
,
1030 .handle_mmio
= handle_mmio_sgi_reg
,
1033 .base
= GIC_DIST_SGI_PENDING_CLEAR
,
1034 .len
= VGIC_NR_SGIS
,
1035 .handle_mmio
= handle_mmio_sgi_clear
,
1038 .base
= GIC_DIST_SGI_PENDING_SET
,
1039 .len
= VGIC_NR_SGIS
,
1040 .handle_mmio
= handle_mmio_sgi_set
,
1046 struct mmio_range
*find_matching_range(const struct mmio_range
*ranges
,
1047 struct kvm_exit_mmio
*mmio
,
1050 const struct mmio_range
*r
= ranges
;
1053 if (offset
>= r
->base
&&
1054 (offset
+ mmio
->len
) <= (r
->base
+ r
->len
))
1062 static bool vgic_validate_access(const struct vgic_dist
*dist
,
1063 const struct mmio_range
*range
,
1064 unsigned long offset
)
1068 if (!range
->bits_per_irq
)
1069 return true; /* Not an irq-based access */
1071 irq
= offset
* 8 / range
->bits_per_irq
;
1072 if (irq
>= dist
->nr_irqs
)
1079 * Call the respective handler function for the given range.
1080 * We split up any 64 bit accesses into two consecutive 32 bit
1081 * handler calls and merge the result afterwards.
1082 * We do this in a little endian fashion regardless of the host's
1083 * or guest's endianness, because the GIC is always LE and the rest of
1084 * the code (vgic_reg_access) also puts it in a LE fashion already.
1085 * At this point we have already identified the handle function, so
1086 * range points to that one entry and offset is relative to this.
1088 static bool call_range_handler(struct kvm_vcpu
*vcpu
,
1089 struct kvm_exit_mmio
*mmio
,
1090 unsigned long offset
,
1091 const struct mmio_range
*range
)
1093 u32
*data32
= (void *)mmio
->data
;
1094 struct kvm_exit_mmio mmio32
;
1097 if (likely(mmio
->len
<= 4))
1098 return range
->handle_mmio(vcpu
, mmio
, offset
);
1101 * Any access bigger than 4 bytes (that we currently handle in KVM)
1102 * is actually 8 bytes long, caused by a 64-bit access
1106 mmio32
.is_write
= mmio
->is_write
;
1108 mmio32
.phys_addr
= mmio
->phys_addr
+ 4;
1110 *(u32
*)mmio32
.data
= data32
[1];
1111 ret
= range
->handle_mmio(vcpu
, &mmio32
, offset
+ 4);
1112 if (!mmio
->is_write
)
1113 data32
[1] = *(u32
*)mmio32
.data
;
1115 mmio32
.phys_addr
= mmio
->phys_addr
;
1117 *(u32
*)mmio32
.data
= data32
[0];
1118 ret
|= range
->handle_mmio(vcpu
, &mmio32
, offset
);
1119 if (!mmio
->is_write
)
1120 data32
[0] = *(u32
*)mmio32
.data
;
1126 * vgic_handle_mmio_range - handle an in-kernel MMIO access
1127 * @vcpu: pointer to the vcpu performing the access
1128 * @run: pointer to the kvm_run structure
1129 * @mmio: pointer to the data describing the access
1130 * @ranges: array of MMIO ranges in a given region
1131 * @mmio_base: base address of that region
1133 * returns true if the MMIO access could be performed
1135 static bool vgic_handle_mmio_range(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
1136 struct kvm_exit_mmio
*mmio
,
1137 const struct mmio_range
*ranges
,
1138 unsigned long mmio_base
)
1140 const struct mmio_range
*range
;
1141 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1143 unsigned long offset
;
1145 offset
= mmio
->phys_addr
- mmio_base
;
1146 range
= find_matching_range(ranges
, mmio
, offset
);
1147 if (unlikely(!range
|| !range
->handle_mmio
)) {
1148 pr_warn("Unhandled access %d %08llx %d\n",
1149 mmio
->is_write
, mmio
->phys_addr
, mmio
->len
);
1153 spin_lock(&vcpu
->kvm
->arch
.vgic
.lock
);
1154 offset
-= range
->base
;
1155 if (vgic_validate_access(dist
, range
, offset
)) {
1156 updated_state
= call_range_handler(vcpu
, mmio
, offset
, range
);
1158 if (!mmio
->is_write
)
1159 memset(mmio
->data
, 0, mmio
->len
);
1160 updated_state
= false;
1162 spin_unlock(&vcpu
->kvm
->arch
.vgic
.lock
);
1163 kvm_prepare_mmio(run
, mmio
);
1164 kvm_handle_mmio_return(vcpu
, run
);
1167 vgic_kick_vcpus(vcpu
->kvm
);
1172 static inline bool is_in_range(phys_addr_t addr
, unsigned long len
,
1173 phys_addr_t baseaddr
, unsigned long size
)
1175 return (addr
>= baseaddr
) && (addr
+ len
<= baseaddr
+ size
);
1178 static bool vgic_v2_handle_mmio(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
1179 struct kvm_exit_mmio
*mmio
)
1181 unsigned long base
= vcpu
->kvm
->arch
.vgic
.vgic_dist_base
;
1183 if (!is_in_range(mmio
->phys_addr
, mmio
->len
, base
,
1184 KVM_VGIC_V2_DIST_SIZE
))
1187 /* GICv2 does not support accesses wider than 32 bits */
1188 if (mmio
->len
> 4) {
1189 kvm_inject_dabt(vcpu
, mmio
->phys_addr
);
1193 return vgic_handle_mmio_range(vcpu
, run
, mmio
, vgic_dist_ranges
, base
);
1197 * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
1198 * @vcpu: pointer to the vcpu performing the access
1199 * @run: pointer to the kvm_run structure
1200 * @mmio: pointer to the data describing the access
1202 * returns true if the MMIO access has been performed in kernel space,
1203 * and false if it needs to be emulated in user space.
1204 * Calls the actual handling routine for the selected VGIC model.
1206 bool vgic_handle_mmio(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
1207 struct kvm_exit_mmio
*mmio
)
1209 if (!irqchip_in_kernel(vcpu
->kvm
))
1213 * This will currently call either vgic_v2_handle_mmio() or
1214 * vgic_v3_handle_mmio(), which in turn will call
1215 * vgic_handle_mmio_range() defined above.
1217 return vcpu
->kvm
->arch
.vgic
.vm_ops
.handle_mmio(vcpu
, run
, mmio
);
1220 static u8
*vgic_get_sgi_sources(struct vgic_dist
*dist
, int vcpu_id
, int sgi
)
1222 return dist
->irq_sgi_sources
+ vcpu_id
* VGIC_NR_SGIS
+ sgi
;
1225 static void vgic_dispatch_sgi(struct kvm_vcpu
*vcpu
, u32 reg
)
1227 struct kvm
*kvm
= vcpu
->kvm
;
1228 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1229 int nrcpus
= atomic_read(&kvm
->online_vcpus
);
1231 int sgi
, mode
, c
, vcpu_id
;
1233 vcpu_id
= vcpu
->vcpu_id
;
1236 target_cpus
= (reg
>> 16) & 0xff;
1237 mode
= (reg
>> 24) & 3;
1246 target_cpus
= ((1 << nrcpus
) - 1) & ~(1 << vcpu_id
) & 0xff;
1250 target_cpus
= 1 << vcpu_id
;
1254 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1255 if (target_cpus
& 1) {
1256 /* Flag the SGI as pending */
1257 vgic_dist_irq_set_pending(vcpu
, sgi
);
1258 *vgic_get_sgi_sources(dist
, c
, sgi
) |= 1 << vcpu_id
;
1259 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi
, vcpu_id
, c
);
1266 static int vgic_nr_shared_irqs(struct vgic_dist
*dist
)
1268 return dist
->nr_irqs
- VGIC_NR_PRIVATE_IRQS
;
1271 static int compute_pending_for_cpu(struct kvm_vcpu
*vcpu
)
1273 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1274 unsigned long *pending
, *enabled
, *pend_percpu
, *pend_shared
;
1275 unsigned long pending_private
, pending_shared
;
1276 int nr_shared
= vgic_nr_shared_irqs(dist
);
1279 vcpu_id
= vcpu
->vcpu_id
;
1280 pend_percpu
= vcpu
->arch
.vgic_cpu
.pending_percpu
;
1281 pend_shared
= vcpu
->arch
.vgic_cpu
.pending_shared
;
1283 pending
= vgic_bitmap_get_cpu_map(&dist
->irq_pending
, vcpu_id
);
1284 enabled
= vgic_bitmap_get_cpu_map(&dist
->irq_enabled
, vcpu_id
);
1285 bitmap_and(pend_percpu
, pending
, enabled
, VGIC_NR_PRIVATE_IRQS
);
1287 pending
= vgic_bitmap_get_shared_map(&dist
->irq_pending
);
1288 enabled
= vgic_bitmap_get_shared_map(&dist
->irq_enabled
);
1289 bitmap_and(pend_shared
, pending
, enabled
, nr_shared
);
1290 bitmap_and(pend_shared
, pend_shared
,
1291 vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[vcpu_id
]),
1294 pending_private
= find_first_bit(pend_percpu
, VGIC_NR_PRIVATE_IRQS
);
1295 pending_shared
= find_first_bit(pend_shared
, nr_shared
);
1296 return (pending_private
< VGIC_NR_PRIVATE_IRQS
||
1297 pending_shared
< vgic_nr_shared_irqs(dist
));
1301 * Update the interrupt state and determine which CPUs have pending
1302 * interrupts. Must be called with distributor lock held.
1304 static void vgic_update_state(struct kvm
*kvm
)
1306 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1307 struct kvm_vcpu
*vcpu
;
1310 if (!dist
->enabled
) {
1311 set_bit(0, dist
->irq_pending_on_cpu
);
1315 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1316 if (compute_pending_for_cpu(vcpu
)) {
1317 pr_debug("CPU%d has pending interrupts\n", c
);
1318 set_bit(c
, dist
->irq_pending_on_cpu
);
1323 static struct vgic_lr
vgic_get_lr(const struct kvm_vcpu
*vcpu
, int lr
)
1325 return vgic_ops
->get_lr(vcpu
, lr
);
1328 static void vgic_set_lr(struct kvm_vcpu
*vcpu
, int lr
,
1331 vgic_ops
->set_lr(vcpu
, lr
, vlr
);
1334 static void vgic_sync_lr_elrsr(struct kvm_vcpu
*vcpu
, int lr
,
1337 vgic_ops
->sync_lr_elrsr(vcpu
, lr
, vlr
);
1340 static inline u64
vgic_get_elrsr(struct kvm_vcpu
*vcpu
)
1342 return vgic_ops
->get_elrsr(vcpu
);
1345 static inline u64
vgic_get_eisr(struct kvm_vcpu
*vcpu
)
1347 return vgic_ops
->get_eisr(vcpu
);
1350 static inline u32
vgic_get_interrupt_status(struct kvm_vcpu
*vcpu
)
1352 return vgic_ops
->get_interrupt_status(vcpu
);
1355 static inline void vgic_enable_underflow(struct kvm_vcpu
*vcpu
)
1357 vgic_ops
->enable_underflow(vcpu
);
1360 static inline void vgic_disable_underflow(struct kvm_vcpu
*vcpu
)
1362 vgic_ops
->disable_underflow(vcpu
);
1365 static inline void vgic_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
)
1367 vgic_ops
->get_vmcr(vcpu
, vmcr
);
1370 static void vgic_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcr
)
1372 vgic_ops
->set_vmcr(vcpu
, vmcr
);
1375 static inline void vgic_enable(struct kvm_vcpu
*vcpu
)
1377 vgic_ops
->enable(vcpu
);
1380 static void vgic_retire_lr(int lr_nr
, int irq
, struct kvm_vcpu
*vcpu
)
1382 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1383 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr_nr
);
1386 vgic_set_lr(vcpu
, lr_nr
, vlr
);
1387 clear_bit(lr_nr
, vgic_cpu
->lr_used
);
1388 vgic_cpu
->vgic_irq_lr_map
[irq
] = LR_EMPTY
;
1392 * An interrupt may have been disabled after being made pending on the
1393 * CPU interface (the classic case is a timer running while we're
1394 * rebooting the guest - the interrupt would kick as soon as the CPU
1395 * interface gets enabled, with deadly consequences).
1397 * The solution is to examine already active LRs, and check the
1398 * interrupt is still enabled. If not, just retire it.
1400 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
)
1402 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1405 for_each_set_bit(lr
, vgic_cpu
->lr_used
, vgic
->nr_lr
) {
1406 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr
);
1408 if (!vgic_irq_is_enabled(vcpu
, vlr
.irq
)) {
1409 vgic_retire_lr(lr
, vlr
.irq
, vcpu
);
1410 if (vgic_irq_is_queued(vcpu
, vlr
.irq
))
1411 vgic_irq_clear_queued(vcpu
, vlr
.irq
);
1417 * Queue an interrupt to a CPU virtual interface. Return true on success,
1418 * or false if it wasn't possible to queue it.
1420 static bool vgic_queue_irq(struct kvm_vcpu
*vcpu
, u8 sgi_source_id
, int irq
)
1422 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1423 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1427 /* Sanitize the input... */
1428 BUG_ON(sgi_source_id
& ~7);
1429 BUG_ON(sgi_source_id
&& irq
>= VGIC_NR_SGIS
);
1430 BUG_ON(irq
>= dist
->nr_irqs
);
1432 kvm_debug("Queue IRQ%d\n", irq
);
1434 lr
= vgic_cpu
->vgic_irq_lr_map
[irq
];
1436 /* Do we have an active interrupt for the same CPUID? */
1437 if (lr
!= LR_EMPTY
) {
1438 vlr
= vgic_get_lr(vcpu
, lr
);
1439 if (vlr
.source
== sgi_source_id
) {
1440 kvm_debug("LR%d piggyback for IRQ%d\n", lr
, vlr
.irq
);
1441 BUG_ON(!test_bit(lr
, vgic_cpu
->lr_used
));
1442 vlr
.state
|= LR_STATE_PENDING
;
1443 vgic_set_lr(vcpu
, lr
, vlr
);
1448 /* Try to use another LR for this interrupt */
1449 lr
= find_first_zero_bit((unsigned long *)vgic_cpu
->lr_used
,
1451 if (lr
>= vgic
->nr_lr
)
1454 kvm_debug("LR%d allocated for IRQ%d %x\n", lr
, irq
, sgi_source_id
);
1455 vgic_cpu
->vgic_irq_lr_map
[irq
] = lr
;
1456 set_bit(lr
, vgic_cpu
->lr_used
);
1459 vlr
.source
= sgi_source_id
;
1460 vlr
.state
= LR_STATE_PENDING
;
1461 if (!vgic_irq_is_edge(vcpu
, irq
))
1462 vlr
.state
|= LR_EOI_INT
;
1464 vgic_set_lr(vcpu
, lr
, vlr
);
1469 static bool vgic_v2_queue_sgi(struct kvm_vcpu
*vcpu
, int irq
)
1471 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1472 unsigned long sources
;
1473 int vcpu_id
= vcpu
->vcpu_id
;
1476 sources
= *vgic_get_sgi_sources(dist
, vcpu_id
, irq
);
1478 for_each_set_bit(c
, &sources
, dist
->nr_cpus
) {
1479 if (vgic_queue_irq(vcpu
, c
, irq
))
1480 clear_bit(c
, &sources
);
1483 *vgic_get_sgi_sources(dist
, vcpu_id
, irq
) = sources
;
1486 * If the sources bitmap has been cleared it means that we
1487 * could queue all the SGIs onto link registers (see the
1488 * clear_bit above), and therefore we are done with them in
1489 * our emulated gic and can get rid of them.
1492 vgic_dist_irq_clear_pending(vcpu
, irq
);
1493 vgic_cpu_irq_clear(vcpu
, irq
);
1500 static bool vgic_queue_hwirq(struct kvm_vcpu
*vcpu
, int irq
)
1502 if (!vgic_can_sample_irq(vcpu
, irq
))
1503 return true; /* level interrupt, already queued */
1505 if (vgic_queue_irq(vcpu
, 0, irq
)) {
1506 if (vgic_irq_is_edge(vcpu
, irq
)) {
1507 vgic_dist_irq_clear_pending(vcpu
, irq
);
1508 vgic_cpu_irq_clear(vcpu
, irq
);
1510 vgic_irq_set_queued(vcpu
, irq
);
1520 * Fill the list registers with pending interrupts before running the
1523 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1525 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1526 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1530 vcpu_id
= vcpu
->vcpu_id
;
1533 * We may not have any pending interrupt, or the interrupts
1534 * may have been serviced from another vcpu. In all cases,
1537 if (!kvm_vgic_vcpu_pending_irq(vcpu
)) {
1538 pr_debug("CPU%d has no pending interrupt\n", vcpu_id
);
1543 for_each_set_bit(i
, vgic_cpu
->pending_percpu
, VGIC_NR_SGIS
) {
1544 if (!queue_sgi(vcpu
, i
))
1549 for_each_set_bit_from(i
, vgic_cpu
->pending_percpu
, VGIC_NR_PRIVATE_IRQS
) {
1550 if (!vgic_queue_hwirq(vcpu
, i
))
1555 for_each_set_bit(i
, vgic_cpu
->pending_shared
, vgic_nr_shared_irqs(dist
)) {
1556 if (!vgic_queue_hwirq(vcpu
, i
+ VGIC_NR_PRIVATE_IRQS
))
1562 vgic_enable_underflow(vcpu
);
1564 vgic_disable_underflow(vcpu
);
1566 * We're about to run this VCPU, and we've consumed
1567 * everything the distributor had in store for
1568 * us. Claim we don't have anything pending. We'll
1569 * adjust that if needed while exiting.
1571 clear_bit(vcpu_id
, dist
->irq_pending_on_cpu
);
1575 static bool vgic_process_maintenance(struct kvm_vcpu
*vcpu
)
1577 u32 status
= vgic_get_interrupt_status(vcpu
);
1578 bool level_pending
= false;
1580 kvm_debug("STATUS = %08x\n", status
);
1582 if (status
& INT_STATUS_EOI
) {
1584 * Some level interrupts have been EOIed. Clear their
1587 u64 eisr
= vgic_get_eisr(vcpu
);
1588 unsigned long *eisr_ptr
= u64_to_bitmask(&eisr
);
1591 for_each_set_bit(lr
, eisr_ptr
, vgic
->nr_lr
) {
1592 struct vgic_lr vlr
= vgic_get_lr(vcpu
, lr
);
1593 WARN_ON(vgic_irq_is_edge(vcpu
, vlr
.irq
));
1595 vgic_irq_clear_queued(vcpu
, vlr
.irq
);
1596 WARN_ON(vlr
.state
& LR_STATE_MASK
);
1598 vgic_set_lr(vcpu
, lr
, vlr
);
1601 * If the IRQ was EOIed it was also ACKed and we we
1602 * therefore assume we can clear the soft pending
1603 * state (should it had been set) for this interrupt.
1605 * Note: if the IRQ soft pending state was set after
1606 * the IRQ was acked, it actually shouldn't be
1607 * cleared, but we have no way of knowing that unless
1608 * we start trapping ACKs when the soft-pending state
1611 vgic_dist_irq_clear_soft_pend(vcpu
, vlr
.irq
);
1613 /* Any additional pending interrupt? */
1614 if (vgic_dist_irq_get_level(vcpu
, vlr
.irq
)) {
1615 vgic_cpu_irq_set(vcpu
, vlr
.irq
);
1616 level_pending
= true;
1618 vgic_dist_irq_clear_pending(vcpu
, vlr
.irq
);
1619 vgic_cpu_irq_clear(vcpu
, vlr
.irq
);
1623 * Despite being EOIed, the LR may not have
1624 * been marked as empty.
1626 vgic_sync_lr_elrsr(vcpu
, lr
, vlr
);
1630 if (status
& INT_STATUS_UNDERFLOW
)
1631 vgic_disable_underflow(vcpu
);
1633 return level_pending
;
1637 * Sync back the VGIC state after a guest run. The distributor lock is
1638 * needed so we don't get preempted in the middle of the state processing.
1640 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1642 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1643 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1645 unsigned long *elrsr_ptr
;
1649 level_pending
= vgic_process_maintenance(vcpu
);
1650 elrsr
= vgic_get_elrsr(vcpu
);
1651 elrsr_ptr
= u64_to_bitmask(&elrsr
);
1653 /* Clear mappings for empty LRs */
1654 for_each_set_bit(lr
, elrsr_ptr
, vgic
->nr_lr
) {
1657 if (!test_and_clear_bit(lr
, vgic_cpu
->lr_used
))
1660 vlr
= vgic_get_lr(vcpu
, lr
);
1662 BUG_ON(vlr
.irq
>= dist
->nr_irqs
);
1663 vgic_cpu
->vgic_irq_lr_map
[vlr
.irq
] = LR_EMPTY
;
1666 /* Check if we still have something up our sleeve... */
1667 pending
= find_first_zero_bit(elrsr_ptr
, vgic
->nr_lr
);
1668 if (level_pending
|| pending
< vgic
->nr_lr
)
1669 set_bit(vcpu
->vcpu_id
, dist
->irq_pending_on_cpu
);
1672 void kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1674 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1676 if (!irqchip_in_kernel(vcpu
->kvm
))
1679 spin_lock(&dist
->lock
);
1680 __kvm_vgic_flush_hwstate(vcpu
);
1681 spin_unlock(&dist
->lock
);
1684 void kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1686 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1688 if (!irqchip_in_kernel(vcpu
->kvm
))
1691 spin_lock(&dist
->lock
);
1692 __kvm_vgic_sync_hwstate(vcpu
);
1693 spin_unlock(&dist
->lock
);
1696 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu
*vcpu
)
1698 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1700 if (!irqchip_in_kernel(vcpu
->kvm
))
1703 return test_bit(vcpu
->vcpu_id
, dist
->irq_pending_on_cpu
);
1706 static void vgic_kick_vcpus(struct kvm
*kvm
)
1708 struct kvm_vcpu
*vcpu
;
1712 * We've injected an interrupt, time to find out who deserves
1715 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1716 if (kvm_vgic_vcpu_pending_irq(vcpu
))
1717 kvm_vcpu_kick(vcpu
);
1721 static int vgic_validate_injection(struct kvm_vcpu
*vcpu
, int irq
, int level
)
1723 int edge_triggered
= vgic_irq_is_edge(vcpu
, irq
);
1726 * Only inject an interrupt if:
1727 * - edge triggered and we have a rising edge
1728 * - level triggered and we change level
1730 if (edge_triggered
) {
1731 int state
= vgic_dist_irq_is_pending(vcpu
, irq
);
1732 return level
> state
;
1734 int state
= vgic_dist_irq_get_level(vcpu
, irq
);
1735 return level
!= state
;
1739 static int vgic_update_irq_pending(struct kvm
*kvm
, int cpuid
,
1740 unsigned int irq_num
, bool level
)
1742 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1743 struct kvm_vcpu
*vcpu
;
1744 int edge_triggered
, level_triggered
;
1748 spin_lock(&dist
->lock
);
1750 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1751 edge_triggered
= vgic_irq_is_edge(vcpu
, irq_num
);
1752 level_triggered
= !edge_triggered
;
1754 if (!vgic_validate_injection(vcpu
, irq_num
, level
)) {
1759 if (irq_num
>= VGIC_NR_PRIVATE_IRQS
) {
1760 cpuid
= dist
->irq_spi_cpu
[irq_num
- VGIC_NR_PRIVATE_IRQS
];
1761 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1764 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num
, level
, cpuid
);
1767 if (level_triggered
)
1768 vgic_dist_irq_set_level(vcpu
, irq_num
);
1769 vgic_dist_irq_set_pending(vcpu
, irq_num
);
1771 if (level_triggered
) {
1772 vgic_dist_irq_clear_level(vcpu
, irq_num
);
1773 if (!vgic_dist_irq_soft_pend(vcpu
, irq_num
))
1774 vgic_dist_irq_clear_pending(vcpu
, irq_num
);
1781 enabled
= vgic_irq_is_enabled(vcpu
, irq_num
);
1788 if (!vgic_can_sample_irq(vcpu
, irq_num
)) {
1790 * Level interrupt in progress, will be picked up
1798 vgic_cpu_irq_set(vcpu
, irq_num
);
1799 set_bit(cpuid
, dist
->irq_pending_on_cpu
);
1803 spin_unlock(&dist
->lock
);
1805 return ret
? cpuid
: -EINVAL
;
1809 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1810 * @kvm: The VM structure pointer
1811 * @cpuid: The CPU for PPIs
1812 * @irq_num: The IRQ number that is assigned to the device
1813 * @level: Edge-triggered: true: to trigger the interrupt
1814 * false: to ignore the call
1815 * Level-sensitive true: activates an interrupt
1816 * false: deactivates an interrupt
1818 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1819 * level-sensitive interrupts. You can think of the level parameter as 1
1820 * being HIGH and 0 being LOW and all devices being active-HIGH.
1822 int kvm_vgic_inject_irq(struct kvm
*kvm
, int cpuid
, unsigned int irq_num
,
1828 if (unlikely(!vgic_initialized(kvm
))) {
1830 * We only provide the automatic initialization of the VGIC
1831 * for the legacy case of a GICv2. Any other type must
1832 * be explicitly initialized once setup with the respective
1835 if (kvm
->arch
.vgic
.vgic_model
!= KVM_DEV_TYPE_ARM_VGIC_V2
) {
1839 mutex_lock(&kvm
->lock
);
1840 ret
= vgic_init(kvm
);
1841 mutex_unlock(&kvm
->lock
);
1847 vcpu_id
= vgic_update_irq_pending(kvm
, cpuid
, irq_num
, level
);
1849 /* kick the specified vcpu */
1850 kvm_vcpu_kick(kvm_get_vcpu(kvm
, vcpu_id
));
1857 static irqreturn_t
vgic_maintenance_handler(int irq
, void *data
)
1860 * We cannot rely on the vgic maintenance interrupt to be
1861 * delivered synchronously. This means we can only use it to
1862 * exit the VM, and we perform the handling of EOIed
1863 * interrupts on the exit path (see vgic_process_maintenance).
1868 void kvm_vgic_vcpu_destroy(struct kvm_vcpu
*vcpu
)
1870 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1872 kfree(vgic_cpu
->pending_shared
);
1873 kfree(vgic_cpu
->vgic_irq_lr_map
);
1874 vgic_cpu
->pending_shared
= NULL
;
1875 vgic_cpu
->vgic_irq_lr_map
= NULL
;
1878 static int vgic_vcpu_init_maps(struct kvm_vcpu
*vcpu
, int nr_irqs
)
1880 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1882 int sz
= (nr_irqs
- VGIC_NR_PRIVATE_IRQS
) / 8;
1883 vgic_cpu
->pending_shared
= kzalloc(sz
, GFP_KERNEL
);
1884 vgic_cpu
->vgic_irq_lr_map
= kmalloc(nr_irqs
, GFP_KERNEL
);
1886 if (!vgic_cpu
->pending_shared
|| !vgic_cpu
->vgic_irq_lr_map
) {
1887 kvm_vgic_vcpu_destroy(vcpu
);
1891 memset(vgic_cpu
->vgic_irq_lr_map
, LR_EMPTY
, nr_irqs
);
1894 * Store the number of LRs per vcpu, so we don't have to go
1895 * all the way to the distributor structure to find out. Only
1896 * assembly code should use this one.
1898 vgic_cpu
->nr_lr
= vgic
->nr_lr
;
1904 * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
1906 * The host's GIC naturally limits the maximum amount of VCPUs a guest
1909 int kvm_vgic_get_max_vcpus(void)
1911 return vgic
->max_gic_vcpus
;
1914 void kvm_vgic_destroy(struct kvm
*kvm
)
1916 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1917 struct kvm_vcpu
*vcpu
;
1920 kvm_for_each_vcpu(i
, vcpu
, kvm
)
1921 kvm_vgic_vcpu_destroy(vcpu
);
1923 vgic_free_bitmap(&dist
->irq_enabled
);
1924 vgic_free_bitmap(&dist
->irq_level
);
1925 vgic_free_bitmap(&dist
->irq_pending
);
1926 vgic_free_bitmap(&dist
->irq_soft_pend
);
1927 vgic_free_bitmap(&dist
->irq_queued
);
1928 vgic_free_bitmap(&dist
->irq_cfg
);
1929 vgic_free_bytemap(&dist
->irq_priority
);
1930 if (dist
->irq_spi_target
) {
1931 for (i
= 0; i
< dist
->nr_cpus
; i
++)
1932 vgic_free_bitmap(&dist
->irq_spi_target
[i
]);
1934 kfree(dist
->irq_sgi_sources
);
1935 kfree(dist
->irq_spi_cpu
);
1936 kfree(dist
->irq_spi_target
);
1937 kfree(dist
->irq_pending_on_cpu
);
1938 dist
->irq_sgi_sources
= NULL
;
1939 dist
->irq_spi_cpu
= NULL
;
1940 dist
->irq_spi_target
= NULL
;
1941 dist
->irq_pending_on_cpu
= NULL
;
1945 static int vgic_v2_init_model(struct kvm
*kvm
)
1949 for (i
= VGIC_NR_PRIVATE_IRQS
; i
< kvm
->arch
.vgic
.nr_irqs
; i
+= 4)
1950 vgic_set_target_reg(kvm
, 0, i
);
1956 * Allocate and initialize the various data structures. Must be called
1957 * with kvm->lock held!
1959 static int vgic_init(struct kvm
*kvm
)
1961 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1962 struct kvm_vcpu
*vcpu
;
1963 int nr_cpus
, nr_irqs
;
1964 int ret
, i
, vcpu_id
;
1966 if (vgic_initialized(kvm
))
1969 nr_cpus
= dist
->nr_cpus
= atomic_read(&kvm
->online_vcpus
);
1970 if (!nr_cpus
) /* No vcpus? Can't be good... */
1974 * If nobody configured the number of interrupts, use the
1978 dist
->nr_irqs
= VGIC_NR_IRQS_LEGACY
;
1980 nr_irqs
= dist
->nr_irqs
;
1982 ret
= vgic_init_bitmap(&dist
->irq_enabled
, nr_cpus
, nr_irqs
);
1983 ret
|= vgic_init_bitmap(&dist
->irq_level
, nr_cpus
, nr_irqs
);
1984 ret
|= vgic_init_bitmap(&dist
->irq_pending
, nr_cpus
, nr_irqs
);
1985 ret
|= vgic_init_bitmap(&dist
->irq_soft_pend
, nr_cpus
, nr_irqs
);
1986 ret
|= vgic_init_bitmap(&dist
->irq_queued
, nr_cpus
, nr_irqs
);
1987 ret
|= vgic_init_bitmap(&dist
->irq_cfg
, nr_cpus
, nr_irqs
);
1988 ret
|= vgic_init_bytemap(&dist
->irq_priority
, nr_cpus
, nr_irqs
);
1993 dist
->irq_sgi_sources
= kzalloc(nr_cpus
* VGIC_NR_SGIS
, GFP_KERNEL
);
1994 dist
->irq_spi_cpu
= kzalloc(nr_irqs
- VGIC_NR_PRIVATE_IRQS
, GFP_KERNEL
);
1995 dist
->irq_spi_target
= kzalloc(sizeof(*dist
->irq_spi_target
) * nr_cpus
,
1997 dist
->irq_pending_on_cpu
= kzalloc(BITS_TO_LONGS(nr_cpus
) * sizeof(long),
1999 if (!dist
->irq_sgi_sources
||
2000 !dist
->irq_spi_cpu
||
2001 !dist
->irq_spi_target
||
2002 !dist
->irq_pending_on_cpu
) {
2007 for (i
= 0; i
< nr_cpus
; i
++)
2008 ret
|= vgic_init_bitmap(&dist
->irq_spi_target
[i
],
2014 ret
= kvm
->arch
.vgic
.vm_ops
.init_model(kvm
);
2018 kvm_for_each_vcpu(vcpu_id
, vcpu
, kvm
) {
2019 ret
= vgic_vcpu_init_maps(vcpu
, nr_irqs
);
2021 kvm_err("VGIC: Failed to allocate vcpu memory\n");
2025 for (i
= 0; i
< dist
->nr_irqs
; i
++) {
2026 if (i
< VGIC_NR_PPIS
)
2027 vgic_bitmap_set_irq_val(&dist
->irq_enabled
,
2028 vcpu
->vcpu_id
, i
, 1);
2029 if (i
< VGIC_NR_PRIVATE_IRQS
)
2030 vgic_bitmap_set_irq_val(&dist
->irq_cfg
,
2040 kvm_vgic_destroy(kvm
);
2046 * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
2047 * @kvm: pointer to the kvm struct
2049 * Map the virtual CPU interface into the VM before running any VCPUs. We
2050 * can't do this at creation time, because user space must first set the
2051 * virtual CPU interface address in the guest physical address space.
2053 static int vgic_v2_map_resources(struct kvm
*kvm
,
2054 const struct vgic_params
*params
)
2058 if (!irqchip_in_kernel(kvm
))
2061 mutex_lock(&kvm
->lock
);
2063 if (vgic_ready(kvm
))
2066 if (IS_VGIC_ADDR_UNDEF(kvm
->arch
.vgic
.vgic_dist_base
) ||
2067 IS_VGIC_ADDR_UNDEF(kvm
->arch
.vgic
.vgic_cpu_base
)) {
2068 kvm_err("Need to set vgic cpu and dist addresses first\n");
2074 * Initialize the vgic if this hasn't already been done on demand by
2075 * accessing the vgic state from userspace.
2077 ret
= vgic_init(kvm
);
2079 kvm_err("Unable to allocate maps\n");
2083 ret
= kvm_phys_addr_ioremap(kvm
, kvm
->arch
.vgic
.vgic_cpu_base
,
2084 params
->vcpu_base
, KVM_VGIC_V2_CPU_SIZE
,
2087 kvm_err("Unable to remap VGIC CPU to VCPU\n");
2091 kvm
->arch
.vgic
.ready
= true;
2094 kvm_vgic_destroy(kvm
);
2095 mutex_unlock(&kvm
->lock
);
2099 static void vgic_v2_init_emulation(struct kvm
*kvm
)
2101 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
2103 dist
->vm_ops
.handle_mmio
= vgic_v2_handle_mmio
;
2104 dist
->vm_ops
.queue_sgi
= vgic_v2_queue_sgi
;
2105 dist
->vm_ops
.add_sgi_source
= vgic_v2_add_sgi_source
;
2106 dist
->vm_ops
.init_model
= vgic_v2_init_model
;
2107 dist
->vm_ops
.map_resources
= vgic_v2_map_resources
;
2109 kvm
->arch
.max_vcpus
= VGIC_V2_MAX_CPUS
;
2112 static int init_vgic_model(struct kvm
*kvm
, int type
)
2115 case KVM_DEV_TYPE_ARM_VGIC_V2
:
2116 vgic_v2_init_emulation(kvm
);
2122 if (atomic_read(&kvm
->online_vcpus
) > kvm
->arch
.max_vcpus
)
2128 int kvm_vgic_create(struct kvm
*kvm
, u32 type
)
2130 int i
, vcpu_lock_idx
= -1, ret
;
2131 struct kvm_vcpu
*vcpu
;
2133 mutex_lock(&kvm
->lock
);
2135 if (irqchip_in_kernel(kvm
)) {
2141 * Any time a vcpu is run, vcpu_load is called which tries to grab the
2142 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
2143 * that no other VCPUs are run while we create the vgic.
2146 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
2147 if (!mutex_trylock(&vcpu
->mutex
))
2152 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
2153 if (vcpu
->arch
.has_run_once
)
2158 ret
= init_vgic_model(kvm
, type
);
2162 spin_lock_init(&kvm
->arch
.vgic
.lock
);
2163 kvm
->arch
.vgic
.in_kernel
= true;
2164 kvm
->arch
.vgic
.vgic_model
= type
;
2165 kvm
->arch
.vgic
.vctrl_base
= vgic
->vctrl_base
;
2166 kvm
->arch
.vgic
.vgic_dist_base
= VGIC_ADDR_UNDEF
;
2167 kvm
->arch
.vgic
.vgic_cpu_base
= VGIC_ADDR_UNDEF
;
2170 for (; vcpu_lock_idx
>= 0; vcpu_lock_idx
--) {
2171 vcpu
= kvm_get_vcpu(kvm
, vcpu_lock_idx
);
2172 mutex_unlock(&vcpu
->mutex
);
2176 mutex_unlock(&kvm
->lock
);
2180 static int vgic_ioaddr_overlap(struct kvm
*kvm
)
2182 phys_addr_t dist
= kvm
->arch
.vgic
.vgic_dist_base
;
2183 phys_addr_t cpu
= kvm
->arch
.vgic
.vgic_cpu_base
;
2185 if (IS_VGIC_ADDR_UNDEF(dist
) || IS_VGIC_ADDR_UNDEF(cpu
))
2187 if ((dist
<= cpu
&& dist
+ KVM_VGIC_V2_DIST_SIZE
> cpu
) ||
2188 (cpu
<= dist
&& cpu
+ KVM_VGIC_V2_CPU_SIZE
> dist
))
2193 static int vgic_ioaddr_assign(struct kvm
*kvm
, phys_addr_t
*ioaddr
,
2194 phys_addr_t addr
, phys_addr_t size
)
2198 if (addr
& ~KVM_PHYS_MASK
)
2201 if (addr
& (SZ_4K
- 1))
2204 if (!IS_VGIC_ADDR_UNDEF(*ioaddr
))
2206 if (addr
+ size
< addr
)
2210 ret
= vgic_ioaddr_overlap(kvm
);
2212 *ioaddr
= VGIC_ADDR_UNDEF
;
2218 * kvm_vgic_addr - set or get vgic VM base addresses
2219 * @kvm: pointer to the vm struct
2220 * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
2221 * @addr: pointer to address value
2222 * @write: if true set the address in the VM address space, if false read the
2225 * Set or get the vgic base addresses for the distributor and the virtual CPU
2226 * interface in the VM physical address space. These addresses are properties
2227 * of the emulated core/SoC and therefore user space initially knows this
2230 int kvm_vgic_addr(struct kvm
*kvm
, unsigned long type
, u64
*addr
, bool write
)
2233 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
2235 mutex_lock(&kvm
->lock
);
2237 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
2239 r
= vgic_ioaddr_assign(kvm
, &vgic
->vgic_dist_base
,
2240 *addr
, KVM_VGIC_V2_DIST_SIZE
);
2242 *addr
= vgic
->vgic_dist_base
;
2245 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
2247 r
= vgic_ioaddr_assign(kvm
, &vgic
->vgic_cpu_base
,
2248 *addr
, KVM_VGIC_V2_CPU_SIZE
);
2250 *addr
= vgic
->vgic_cpu_base
;
2257 mutex_unlock(&kvm
->lock
);
2261 static bool handle_cpu_mmio_misc(struct kvm_vcpu
*vcpu
,
2262 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
2264 bool updated
= false;
2265 struct vgic_vmcr vmcr
;
2269 vgic_get_vmcr(vcpu
, &vmcr
);
2271 switch (offset
& ~0x3) {
2273 vmcr_field
= &vmcr
.ctlr
;
2275 case GIC_CPU_PRIMASK
:
2276 vmcr_field
= &vmcr
.pmr
;
2278 case GIC_CPU_BINPOINT
:
2279 vmcr_field
= &vmcr
.bpr
;
2281 case GIC_CPU_ALIAS_BINPOINT
:
2282 vmcr_field
= &vmcr
.abpr
;
2288 if (!mmio
->is_write
) {
2290 mmio_data_write(mmio
, ~0, reg
);
2292 reg
= mmio_data_read(mmio
, ~0);
2293 if (reg
!= *vmcr_field
) {
2295 vgic_set_vmcr(vcpu
, &vmcr
);
2302 static bool handle_mmio_abpr(struct kvm_vcpu
*vcpu
,
2303 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
2305 return handle_cpu_mmio_misc(vcpu
, mmio
, GIC_CPU_ALIAS_BINPOINT
);
2308 static bool handle_cpu_mmio_ident(struct kvm_vcpu
*vcpu
,
2309 struct kvm_exit_mmio
*mmio
,
2318 reg
= (PRODUCT_ID_KVM
<< 20) |
2319 (GICC_ARCH_VERSION_V2
<< 16) |
2320 (IMPLEMENTER_ARM
<< 0);
2321 mmio_data_write(mmio
, ~0, reg
);
2326 * CPU Interface Register accesses - these are not accessed by the VM, but by
2327 * user space for saving and restoring VGIC state.
2329 static const struct mmio_range vgic_cpu_ranges
[] = {
2331 .base
= GIC_CPU_CTRL
,
2333 .handle_mmio
= handle_cpu_mmio_misc
,
2336 .base
= GIC_CPU_ALIAS_BINPOINT
,
2338 .handle_mmio
= handle_mmio_abpr
,
2341 .base
= GIC_CPU_ACTIVEPRIO
,
2343 .handle_mmio
= handle_mmio_raz_wi
,
2346 .base
= GIC_CPU_IDENT
,
2348 .handle_mmio
= handle_cpu_mmio_ident
,
2352 static int vgic_attr_regs_access(struct kvm_device
*dev
,
2353 struct kvm_device_attr
*attr
,
2354 u32
*reg
, bool is_write
)
2356 const struct mmio_range
*r
= NULL
, *ranges
;
2359 struct kvm_vcpu
*vcpu
, *tmp_vcpu
;
2360 struct vgic_dist
*vgic
;
2361 struct kvm_exit_mmio mmio
;
2363 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
2364 cpuid
= (attr
->attr
& KVM_DEV_ARM_VGIC_CPUID_MASK
) >>
2365 KVM_DEV_ARM_VGIC_CPUID_SHIFT
;
2367 mutex_lock(&dev
->kvm
->lock
);
2369 ret
= vgic_init(dev
->kvm
);
2373 if (cpuid
>= atomic_read(&dev
->kvm
->online_vcpus
)) {
2378 vcpu
= kvm_get_vcpu(dev
->kvm
, cpuid
);
2379 vgic
= &dev
->kvm
->arch
.vgic
;
2382 mmio
.is_write
= is_write
;
2384 mmio_data_write(&mmio
, ~0, *reg
);
2385 switch (attr
->group
) {
2386 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
2387 mmio
.phys_addr
= vgic
->vgic_dist_base
+ offset
;
2388 ranges
= vgic_dist_ranges
;
2390 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
2391 mmio
.phys_addr
= vgic
->vgic_cpu_base
+ offset
;
2392 ranges
= vgic_cpu_ranges
;
2397 r
= find_matching_range(ranges
, &mmio
, offset
);
2399 if (unlikely(!r
|| !r
->handle_mmio
)) {
2405 spin_lock(&vgic
->lock
);
2408 * Ensure that no other VCPU is running by checking the vcpu->cpu
2409 * field. If no other VPCUs are running we can safely access the VGIC
2410 * state, because even if another VPU is run after this point, that
2411 * VCPU will not touch the vgic state, because it will block on
2412 * getting the vgic->lock in kvm_vgic_sync_hwstate().
2414 kvm_for_each_vcpu(c
, tmp_vcpu
, dev
->kvm
) {
2415 if (unlikely(tmp_vcpu
->cpu
!= -1)) {
2417 goto out_vgic_unlock
;
2422 * Move all pending IRQs from the LRs on all VCPUs so the pending
2423 * state can be properly represented in the register state accessible
2426 kvm_for_each_vcpu(c
, tmp_vcpu
, dev
->kvm
)
2427 vgic_unqueue_irqs(tmp_vcpu
);
2430 r
->handle_mmio(vcpu
, &mmio
, offset
);
2433 *reg
= mmio_data_read(&mmio
, ~0);
2437 spin_unlock(&vgic
->lock
);
2439 mutex_unlock(&dev
->kvm
->lock
);
2443 static int vgic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2447 switch (attr
->group
) {
2448 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2449 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2451 unsigned long type
= (unsigned long)attr
->attr
;
2453 if (copy_from_user(&addr
, uaddr
, sizeof(addr
)))
2456 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, true);
2457 return (r
== -ENODEV
) ? -ENXIO
: r
;
2460 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
2461 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
: {
2462 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
2465 if (get_user(reg
, uaddr
))
2468 return vgic_attr_regs_access(dev
, attr
, ®
, true);
2470 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
: {
2471 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
2475 if (get_user(val
, uaddr
))
2480 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
2481 * - at most 1024 interrupts
2482 * - a multiple of 32 interrupts
2484 if (val
< (VGIC_NR_PRIVATE_IRQS
+ 32) ||
2485 val
> VGIC_MAX_IRQS
||
2489 mutex_lock(&dev
->kvm
->lock
);
2491 if (vgic_ready(dev
->kvm
) || dev
->kvm
->arch
.vgic
.nr_irqs
)
2494 dev
->kvm
->arch
.vgic
.nr_irqs
= val
;
2496 mutex_unlock(&dev
->kvm
->lock
);
2500 case KVM_DEV_ARM_VGIC_GRP_CTRL
: {
2501 switch (attr
->attr
) {
2502 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
2503 r
= vgic_init(dev
->kvm
);
2513 static int vgic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2517 switch (attr
->group
) {
2518 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2519 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2521 unsigned long type
= (unsigned long)attr
->attr
;
2523 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, false);
2525 return (r
== -ENODEV
) ? -ENXIO
: r
;
2527 if (copy_to_user(uaddr
, &addr
, sizeof(addr
)))
2532 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
2533 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
: {
2534 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
2537 r
= vgic_attr_regs_access(dev
, attr
, ®
, false);
2540 r
= put_user(reg
, uaddr
);
2543 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
: {
2544 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
2545 r
= put_user(dev
->kvm
->arch
.vgic
.nr_irqs
, uaddr
);
2554 static int vgic_has_attr_regs(const struct mmio_range
*ranges
,
2557 struct kvm_exit_mmio dev_attr_mmio
;
2559 dev_attr_mmio
.len
= 4;
2560 if (find_matching_range(ranges
, &dev_attr_mmio
, offset
))
2566 static int vgic_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2570 switch (attr
->group
) {
2571 case KVM_DEV_ARM_VGIC_GRP_ADDR
:
2572 switch (attr
->attr
) {
2573 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
2574 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
2578 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
2579 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
2580 return vgic_has_attr_regs(vgic_dist_ranges
, offset
);
2581 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
2582 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
2583 return vgic_has_attr_regs(vgic_cpu_ranges
, offset
);
2584 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS
:
2586 case KVM_DEV_ARM_VGIC_GRP_CTRL
:
2587 switch (attr
->attr
) {
2588 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
2595 static void vgic_destroy(struct kvm_device
*dev
)
2600 static int vgic_create(struct kvm_device
*dev
, u32 type
)
2602 return kvm_vgic_create(dev
->kvm
, type
);
2605 struct kvm_device_ops kvm_arm_vgic_v2_ops
= {
2606 .name
= "kvm-arm-vgic",
2607 .create
= vgic_create
,
2608 .destroy
= vgic_destroy
,
2609 .set_attr
= vgic_set_attr
,
2610 .get_attr
= vgic_get_attr
,
2611 .has_attr
= vgic_has_attr
,
2614 static void vgic_init_maintenance_interrupt(void *info
)
2616 enable_percpu_irq(vgic
->maint_irq
, 0);
2619 static int vgic_cpu_notify(struct notifier_block
*self
,
2620 unsigned long action
, void *cpu
)
2624 case CPU_STARTING_FROZEN
:
2625 vgic_init_maintenance_interrupt(NULL
);
2628 case CPU_DYING_FROZEN
:
2629 disable_percpu_irq(vgic
->maint_irq
);
2636 static struct notifier_block vgic_cpu_nb
= {
2637 .notifier_call
= vgic_cpu_notify
,
2640 static const struct of_device_id vgic_ids
[] = {
2641 { .compatible
= "arm,cortex-a15-gic", .data
= vgic_v2_probe
, },
2642 { .compatible
= "arm,gic-v3", .data
= vgic_v3_probe
, },
2646 int kvm_vgic_hyp_init(void)
2648 const struct of_device_id
*matched_id
;
2649 const int (*vgic_probe
)(struct device_node
*,const struct vgic_ops
**,
2650 const struct vgic_params
**);
2651 struct device_node
*vgic_node
;
2654 vgic_node
= of_find_matching_node_and_match(NULL
,
2655 vgic_ids
, &matched_id
);
2657 kvm_err("error: no compatible GIC node found\n");
2661 vgic_probe
= matched_id
->data
;
2662 ret
= vgic_probe(vgic_node
, &vgic_ops
, &vgic
);
2666 ret
= request_percpu_irq(vgic
->maint_irq
, vgic_maintenance_handler
,
2667 "vgic", kvm_get_running_vcpus());
2669 kvm_err("Cannot register interrupt %d\n", vgic
->maint_irq
);
2673 ret
= __register_cpu_notifier(&vgic_cpu_nb
);
2675 kvm_err("Cannot register vgic CPU notifier\n");
2679 /* Callback into for arch code for setup */
2680 vgic_arch_setup(vgic
);
2682 on_each_cpu(vgic_init_maintenance_interrupt
, NULL
, 1);
2687 free_percpu_irq(vgic
->maint_irq
, kvm_get_running_vcpus());