2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/uaccess.h>
29 #include <linux/irqchip/arm-gic.h>
31 #include <asm/kvm_emulate.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_mmu.h>
36 * How the whole thing works (courtesy of Christoffer Dall):
38 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
39 * something is pending
40 * - VGIC pending interrupts are stored on the vgic.irq_state vgic
41 * bitmap (this bitmap is updated by both user land ioctls and guest
42 * mmio ops, and other in-kernel peripherals such as the
43 * arch. timers) and indicate the 'wire' state.
44 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
46 * - To calculate the oracle, we need info for each cpu from
47 * compute_pending_for_cpu, which considers:
48 * - PPI: dist->irq_state & dist->irq_enable
49 * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target
50 * - irq_spi_target is a 'formatted' version of the GICD_ICFGR
51 * registers, stored on each vcpu. We only keep one bit of
52 * information per interrupt, making sure that only one vcpu can
53 * accept the interrupt.
54 * - The same is true when injecting an interrupt, except that we only
55 * consider a single interrupt at a time. The irq_spi_cpu array
56 * contains the target CPU for each SPI.
58 * The handling of level interrupts adds some extra complexity. We
59 * need to track when the interrupt has been EOIed, so we can sample
60 * the 'line' again. This is achieved as such:
62 * - When a level interrupt is moved onto a vcpu, the corresponding
63 * bit in irq_active is set. As long as this bit is set, the line
64 * will be ignored for further interrupts. The interrupt is injected
65 * into the vcpu with the GICH_LR_EOI bit set (generate a
66 * maintenance interrupt on EOI).
67 * - When the interrupt is EOIed, the maintenance interrupt fires,
68 * and clears the corresponding bit in irq_active. This allow the
69 * interrupt line to be sampled again.
72 #define VGIC_ADDR_UNDEF (-1)
73 #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
75 #define PRODUCT_ID_KVM 0x4b /* ASCII code K */
76 #define IMPLEMENTER_ARM 0x43b
77 #define GICC_ARCH_VERSION_V2 0x2
79 /* Physical address of vgic virtual cpu interface */
80 static phys_addr_t vgic_vcpu_base
;
82 /* Virtual control interface base address */
83 static void __iomem
*vgic_vctrl_base
;
85 static struct device_node
*vgic_node
;
87 #define ACCESS_READ_VALUE (1 << 0)
88 #define ACCESS_READ_RAZ (0 << 0)
89 #define ACCESS_READ_MASK(x) ((x) & (1 << 0))
90 #define ACCESS_WRITE_IGNORED (0 << 1)
91 #define ACCESS_WRITE_SETBIT (1 << 1)
92 #define ACCESS_WRITE_CLEARBIT (2 << 1)
93 #define ACCESS_WRITE_VALUE (3 << 1)
94 #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
96 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
);
97 static void vgic_update_state(struct kvm
*kvm
);
98 static void vgic_kick_vcpus(struct kvm
*kvm
);
99 static void vgic_dispatch_sgi(struct kvm_vcpu
*vcpu
, u32 reg
);
100 static u32 vgic_nr_lr
;
102 static unsigned int vgic_maint_irq
;
104 static u32
*vgic_bitmap_get_reg(struct vgic_bitmap
*x
,
105 int cpuid
, u32 offset
)
109 return x
->percpu
[cpuid
].reg
;
111 return x
->shared
.reg
+ offset
- 1;
114 static int vgic_bitmap_get_irq_val(struct vgic_bitmap
*x
,
117 if (irq
< VGIC_NR_PRIVATE_IRQS
)
118 return test_bit(irq
, x
->percpu
[cpuid
].reg_ul
);
120 return test_bit(irq
- VGIC_NR_PRIVATE_IRQS
, x
->shared
.reg_ul
);
123 static void vgic_bitmap_set_irq_val(struct vgic_bitmap
*x
, int cpuid
,
128 if (irq
< VGIC_NR_PRIVATE_IRQS
) {
129 reg
= x
->percpu
[cpuid
].reg_ul
;
131 reg
= x
->shared
.reg_ul
;
132 irq
-= VGIC_NR_PRIVATE_IRQS
;
141 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap
*x
, int cpuid
)
143 if (unlikely(cpuid
>= VGIC_MAX_CPUS
))
145 return x
->percpu
[cpuid
].reg_ul
;
148 static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap
*x
)
150 return x
->shared
.reg_ul
;
153 static u32
*vgic_bytemap_get_reg(struct vgic_bytemap
*x
, int cpuid
, u32 offset
)
156 BUG_ON(offset
> (VGIC_NR_IRQS
/ 4));
158 return x
->percpu
[cpuid
] + offset
;
160 return x
->shared
+ offset
- 8;
163 #define VGIC_CFG_LEVEL 0
164 #define VGIC_CFG_EDGE 1
166 static bool vgic_irq_is_edge(struct kvm_vcpu
*vcpu
, int irq
)
168 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
171 irq_val
= vgic_bitmap_get_irq_val(&dist
->irq_cfg
, vcpu
->vcpu_id
, irq
);
172 return irq_val
== VGIC_CFG_EDGE
;
175 static int vgic_irq_is_enabled(struct kvm_vcpu
*vcpu
, int irq
)
177 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
179 return vgic_bitmap_get_irq_val(&dist
->irq_enabled
, vcpu
->vcpu_id
, irq
);
182 static int vgic_irq_is_active(struct kvm_vcpu
*vcpu
, int irq
)
184 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
186 return vgic_bitmap_get_irq_val(&dist
->irq_active
, vcpu
->vcpu_id
, irq
);
189 static void vgic_irq_set_active(struct kvm_vcpu
*vcpu
, int irq
)
191 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
193 vgic_bitmap_set_irq_val(&dist
->irq_active
, vcpu
->vcpu_id
, irq
, 1);
196 static void vgic_irq_clear_active(struct kvm_vcpu
*vcpu
, int irq
)
198 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
200 vgic_bitmap_set_irq_val(&dist
->irq_active
, vcpu
->vcpu_id
, irq
, 0);
203 static int vgic_dist_irq_is_pending(struct kvm_vcpu
*vcpu
, int irq
)
205 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
207 return vgic_bitmap_get_irq_val(&dist
->irq_state
, vcpu
->vcpu_id
, irq
);
210 static void vgic_dist_irq_set(struct kvm_vcpu
*vcpu
, int irq
)
212 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
214 vgic_bitmap_set_irq_val(&dist
->irq_state
, vcpu
->vcpu_id
, irq
, 1);
217 static void vgic_dist_irq_clear(struct kvm_vcpu
*vcpu
, int irq
)
219 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
221 vgic_bitmap_set_irq_val(&dist
->irq_state
, vcpu
->vcpu_id
, irq
, 0);
224 static void vgic_cpu_irq_set(struct kvm_vcpu
*vcpu
, int irq
)
226 if (irq
< VGIC_NR_PRIVATE_IRQS
)
227 set_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
229 set_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
230 vcpu
->arch
.vgic_cpu
.pending_shared
);
233 static void vgic_cpu_irq_clear(struct kvm_vcpu
*vcpu
, int irq
)
235 if (irq
< VGIC_NR_PRIVATE_IRQS
)
236 clear_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
238 clear_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
239 vcpu
->arch
.vgic_cpu
.pending_shared
);
242 static u32
mmio_data_read(struct kvm_exit_mmio
*mmio
, u32 mask
)
244 return *((u32
*)mmio
->data
) & mask
;
247 static void mmio_data_write(struct kvm_exit_mmio
*mmio
, u32 mask
, u32 value
)
249 *((u32
*)mmio
->data
) = value
& mask
;
253 * vgic_reg_access - access vgic register
254 * @mmio: pointer to the data describing the mmio access
255 * @reg: pointer to the virtual backing of vgic distributor data
256 * @offset: least significant 2 bits used for word offset
257 * @mode: ACCESS_ mode (see defines above)
259 * Helper to make vgic register access easier using one of the access
260 * modes defined for vgic register access
261 * (read,raz,write-ignored,setbit,clearbit,write)
263 static void vgic_reg_access(struct kvm_exit_mmio
*mmio
, u32
*reg
,
264 phys_addr_t offset
, int mode
)
266 int word_offset
= (offset
& 3) * 8;
267 u32 mask
= (1UL << (mmio
->len
* 8)) - 1;
271 * Any alignment fault should have been delivered to the guest
272 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
278 BUG_ON(mode
!= (ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
));
282 if (mmio
->is_write
) {
283 u32 data
= mmio_data_read(mmio
, mask
) << word_offset
;
284 switch (ACCESS_WRITE_MASK(mode
)) {
285 case ACCESS_WRITE_IGNORED
:
288 case ACCESS_WRITE_SETBIT
:
292 case ACCESS_WRITE_CLEARBIT
:
296 case ACCESS_WRITE_VALUE
:
297 regval
= (regval
& ~(mask
<< word_offset
)) | data
;
302 switch (ACCESS_READ_MASK(mode
)) {
303 case ACCESS_READ_RAZ
:
307 case ACCESS_READ_VALUE
:
308 mmio_data_write(mmio
, mask
, regval
>> word_offset
);
313 static bool handle_mmio_misc(struct kvm_vcpu
*vcpu
,
314 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
317 u32 word_offset
= offset
& 3;
319 switch (offset
& ~3) {
320 case 0: /* GICD_CTLR */
321 reg
= vcpu
->kvm
->arch
.vgic
.enabled
;
322 vgic_reg_access(mmio
, ®
, word_offset
,
323 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
324 if (mmio
->is_write
) {
325 vcpu
->kvm
->arch
.vgic
.enabled
= reg
& 1;
326 vgic_update_state(vcpu
->kvm
);
331 case 4: /* GICD_TYPER */
332 reg
= (atomic_read(&vcpu
->kvm
->online_vcpus
) - 1) << 5;
333 reg
|= (VGIC_NR_IRQS
>> 5) - 1;
334 vgic_reg_access(mmio
, ®
, word_offset
,
335 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
338 case 8: /* GICD_IIDR */
339 reg
= (PRODUCT_ID_KVM
<< 24) | (IMPLEMENTER_ARM
<< 0);
340 vgic_reg_access(mmio
, ®
, word_offset
,
341 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
348 static bool handle_mmio_raz_wi(struct kvm_vcpu
*vcpu
,
349 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
351 vgic_reg_access(mmio
, NULL
, offset
,
352 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
356 static bool handle_mmio_set_enable_reg(struct kvm_vcpu
*vcpu
,
357 struct kvm_exit_mmio
*mmio
,
360 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_enabled
,
361 vcpu
->vcpu_id
, offset
);
362 vgic_reg_access(mmio
, reg
, offset
,
363 ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
);
364 if (mmio
->is_write
) {
365 vgic_update_state(vcpu
->kvm
);
372 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu
*vcpu
,
373 struct kvm_exit_mmio
*mmio
,
376 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_enabled
,
377 vcpu
->vcpu_id
, offset
);
378 vgic_reg_access(mmio
, reg
, offset
,
379 ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
);
380 if (mmio
->is_write
) {
381 if (offset
< 4) /* Force SGI enabled */
383 vgic_retire_disabled_irqs(vcpu
);
384 vgic_update_state(vcpu
->kvm
);
391 static bool handle_mmio_set_pending_reg(struct kvm_vcpu
*vcpu
,
392 struct kvm_exit_mmio
*mmio
,
395 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_state
,
396 vcpu
->vcpu_id
, offset
);
397 vgic_reg_access(mmio
, reg
, offset
,
398 ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
);
399 if (mmio
->is_write
) {
400 vgic_update_state(vcpu
->kvm
);
407 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu
*vcpu
,
408 struct kvm_exit_mmio
*mmio
,
411 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_state
,
412 vcpu
->vcpu_id
, offset
);
413 vgic_reg_access(mmio
, reg
, offset
,
414 ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
);
415 if (mmio
->is_write
) {
416 vgic_update_state(vcpu
->kvm
);
423 static bool handle_mmio_priority_reg(struct kvm_vcpu
*vcpu
,
424 struct kvm_exit_mmio
*mmio
,
427 u32
*reg
= vgic_bytemap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_priority
,
428 vcpu
->vcpu_id
, offset
);
429 vgic_reg_access(mmio
, reg
, offset
,
430 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
434 #define GICD_ITARGETSR_SIZE 32
435 #define GICD_CPUTARGETS_BITS 8
436 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
437 static u32
vgic_get_target_reg(struct kvm
*kvm
, int irq
)
439 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
443 irq
-= VGIC_NR_PRIVATE_IRQS
;
445 for (i
= 0; i
< GICD_IRQS_PER_ITARGETSR
; i
++)
446 val
|= 1 << (dist
->irq_spi_cpu
[irq
+ i
] + i
* 8);
451 static void vgic_set_target_reg(struct kvm
*kvm
, u32 val
, int irq
)
453 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
454 struct kvm_vcpu
*vcpu
;
459 irq
-= VGIC_NR_PRIVATE_IRQS
;
462 * Pick the LSB in each byte. This ensures we target exactly
463 * one vcpu per IRQ. If the byte is null, assume we target
466 for (i
= 0; i
< GICD_IRQS_PER_ITARGETSR
; i
++) {
467 int shift
= i
* GICD_CPUTARGETS_BITS
;
468 target
= ffs((val
>> shift
) & 0xffU
);
469 target
= target
? (target
- 1) : 0;
470 dist
->irq_spi_cpu
[irq
+ i
] = target
;
471 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
472 bmap
= vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[c
]);
474 set_bit(irq
+ i
, bmap
);
476 clear_bit(irq
+ i
, bmap
);
481 static bool handle_mmio_target_reg(struct kvm_vcpu
*vcpu
,
482 struct kvm_exit_mmio
*mmio
,
487 /* We treat the banked interrupts targets as read-only */
489 u32 roreg
= 1 << vcpu
->vcpu_id
;
491 roreg
|= roreg
<< 16;
493 vgic_reg_access(mmio
, &roreg
, offset
,
494 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
498 reg
= vgic_get_target_reg(vcpu
->kvm
, offset
& ~3U);
499 vgic_reg_access(mmio
, ®
, offset
,
500 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
501 if (mmio
->is_write
) {
502 vgic_set_target_reg(vcpu
->kvm
, reg
, offset
& ~3U);
503 vgic_update_state(vcpu
->kvm
);
510 static u32
vgic_cfg_expand(u16 val
)
516 * Turn a 16bit value like abcd...mnop into a 32bit word
517 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
519 for (i
= 0; i
< 16; i
++)
520 res
|= ((val
>> i
) & VGIC_CFG_EDGE
) << (2 * i
+ 1);
525 static u16
vgic_cfg_compress(u32 val
)
531 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
532 * abcd...mnop which is what we really care about.
534 for (i
= 0; i
< 16; i
++)
535 res
|= ((val
>> (i
* 2 + 1)) & VGIC_CFG_EDGE
) << i
;
541 * The distributor uses 2 bits per IRQ for the CFG register, but the
542 * LSB is always 0. As such, we only keep the upper bit, and use the
543 * two above functions to compress/expand the bits
545 static bool handle_mmio_cfg_reg(struct kvm_vcpu
*vcpu
,
546 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
551 reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_cfg
,
552 vcpu
->vcpu_id
, offset
>> 1);
559 val
= vgic_cfg_expand(val
);
560 vgic_reg_access(mmio
, &val
, offset
,
561 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
562 if (mmio
->is_write
) {
564 *reg
= ~0U; /* Force PPIs/SGIs to 1 */
568 val
= vgic_cfg_compress(val
);
573 *reg
&= 0xffff << 16;
581 static bool handle_mmio_sgi_reg(struct kvm_vcpu
*vcpu
,
582 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
585 vgic_reg_access(mmio
, ®
, offset
,
586 ACCESS_READ_RAZ
| ACCESS_WRITE_VALUE
);
587 if (mmio
->is_write
) {
588 vgic_dispatch_sgi(vcpu
, reg
);
589 vgic_update_state(vcpu
->kvm
);
596 #define LR_CPUID(lr) \
597 (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
598 #define LR_IRQID(lr) \
599 ((lr) & GICH_LR_VIRTUALID)
601 static void vgic_retire_lr(int lr_nr
, int irq
, struct vgic_cpu
*vgic_cpu
)
603 clear_bit(lr_nr
, vgic_cpu
->lr_used
);
604 vgic_cpu
->vgic_lr
[lr_nr
] &= ~GICH_LR_STATE
;
605 vgic_cpu
->vgic_irq_lr_map
[irq
] = LR_EMPTY
;
609 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
610 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
612 * Move any pending IRQs that have already been assigned to LRs back to the
613 * emulated distributor state so that the complete emulated state can be read
614 * from the main emulation structures without investigating the LRs.
616 * Note that IRQs in the active state in the LRs get their pending state moved
617 * to the distributor but the active state stays in the LRs, because we don't
618 * track the active state on the distributor side.
620 static void vgic_unqueue_irqs(struct kvm_vcpu
*vcpu
)
622 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
623 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
624 int vcpu_id
= vcpu
->vcpu_id
;
625 int i
, irq
, source_cpu
;
628 for_each_set_bit(i
, vgic_cpu
->lr_used
, vgic_cpu
->nr_lr
) {
629 lr
= &vgic_cpu
->vgic_lr
[i
];
631 source_cpu
= LR_CPUID(*lr
);
634 * There are three options for the state bits:
638 * 11: pending and active
640 * If the LR holds only an active interrupt (not pending) then
641 * just leave it alone.
643 if ((*lr
& GICH_LR_STATE
) == GICH_LR_ACTIVE_BIT
)
647 * Reestablish the pending state on the distributor and the
648 * CPU interface. It may have already been pending, but that
649 * is fine, then we are only setting a few bits that were
652 vgic_dist_irq_set(vcpu
, irq
);
653 if (irq
< VGIC_NR_SGIS
)
654 dist
->irq_sgi_sources
[vcpu_id
][irq
] |= 1 << source_cpu
;
655 *lr
&= ~GICH_LR_PENDING_BIT
;
658 * If there's no state left on the LR (it could still be
659 * active), then the LR does not hold any useful info and can
660 * be marked as free for other use.
662 if (!(*lr
& GICH_LR_STATE
))
663 vgic_retire_lr(i
, irq
, vgic_cpu
);
665 /* Finally update the VGIC state. */
666 vgic_update_state(vcpu
->kvm
);
670 /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
671 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu
*vcpu
,
672 struct kvm_exit_mmio
*mmio
,
675 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
677 int min_sgi
= (offset
& ~0x3) * 4;
678 int max_sgi
= min_sgi
+ 3;
679 int vcpu_id
= vcpu
->vcpu_id
;
682 /* Copy source SGIs from distributor side */
683 for (sgi
= min_sgi
; sgi
<= max_sgi
; sgi
++) {
684 int shift
= 8 * (sgi
- min_sgi
);
685 reg
|= (u32
)dist
->irq_sgi_sources
[vcpu_id
][sgi
] << shift
;
688 mmio_data_write(mmio
, ~0, reg
);
692 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu
*vcpu
,
693 struct kvm_exit_mmio
*mmio
,
694 phys_addr_t offset
, bool set
)
696 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
698 int min_sgi
= (offset
& ~0x3) * 4;
699 int max_sgi
= min_sgi
+ 3;
700 int vcpu_id
= vcpu
->vcpu_id
;
702 bool updated
= false;
704 reg
= mmio_data_read(mmio
, ~0);
706 /* Clear pending SGIs on the distributor */
707 for (sgi
= min_sgi
; sgi
<= max_sgi
; sgi
++) {
708 u8 mask
= reg
>> (8 * (sgi
- min_sgi
));
710 if ((dist
->irq_sgi_sources
[vcpu_id
][sgi
] & mask
) != mask
)
712 dist
->irq_sgi_sources
[vcpu_id
][sgi
] |= mask
;
714 if (dist
->irq_sgi_sources
[vcpu_id
][sgi
] & mask
)
716 dist
->irq_sgi_sources
[vcpu_id
][sgi
] &= ~mask
;
721 vgic_update_state(vcpu
->kvm
);
726 static bool handle_mmio_sgi_set(struct kvm_vcpu
*vcpu
,
727 struct kvm_exit_mmio
*mmio
,
731 return read_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
);
733 return write_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
, true);
736 static bool handle_mmio_sgi_clear(struct kvm_vcpu
*vcpu
,
737 struct kvm_exit_mmio
*mmio
,
741 return read_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
);
743 return write_set_clear_sgi_pend_reg(vcpu
, mmio
, offset
, false);
747 * I would have liked to use the kvm_bus_io_*() API instead, but it
748 * cannot cope with banked registers (only the VM pointer is passed
749 * around, and we need the vcpu). One of these days, someone please
755 bool (*handle_mmio
)(struct kvm_vcpu
*vcpu
, struct kvm_exit_mmio
*mmio
,
759 static const struct mmio_range vgic_dist_ranges
[] = {
761 .base
= GIC_DIST_CTRL
,
763 .handle_mmio
= handle_mmio_misc
,
766 .base
= GIC_DIST_IGROUP
,
767 .len
= VGIC_NR_IRQS
/ 8,
768 .handle_mmio
= handle_mmio_raz_wi
,
771 .base
= GIC_DIST_ENABLE_SET
,
772 .len
= VGIC_NR_IRQS
/ 8,
773 .handle_mmio
= handle_mmio_set_enable_reg
,
776 .base
= GIC_DIST_ENABLE_CLEAR
,
777 .len
= VGIC_NR_IRQS
/ 8,
778 .handle_mmio
= handle_mmio_clear_enable_reg
,
781 .base
= GIC_DIST_PENDING_SET
,
782 .len
= VGIC_NR_IRQS
/ 8,
783 .handle_mmio
= handle_mmio_set_pending_reg
,
786 .base
= GIC_DIST_PENDING_CLEAR
,
787 .len
= VGIC_NR_IRQS
/ 8,
788 .handle_mmio
= handle_mmio_clear_pending_reg
,
791 .base
= GIC_DIST_ACTIVE_SET
,
792 .len
= VGIC_NR_IRQS
/ 8,
793 .handle_mmio
= handle_mmio_raz_wi
,
796 .base
= GIC_DIST_ACTIVE_CLEAR
,
797 .len
= VGIC_NR_IRQS
/ 8,
798 .handle_mmio
= handle_mmio_raz_wi
,
801 .base
= GIC_DIST_PRI
,
803 .handle_mmio
= handle_mmio_priority_reg
,
806 .base
= GIC_DIST_TARGET
,
808 .handle_mmio
= handle_mmio_target_reg
,
811 .base
= GIC_DIST_CONFIG
,
812 .len
= VGIC_NR_IRQS
/ 4,
813 .handle_mmio
= handle_mmio_cfg_reg
,
816 .base
= GIC_DIST_SOFTINT
,
818 .handle_mmio
= handle_mmio_sgi_reg
,
821 .base
= GIC_DIST_SGI_PENDING_CLEAR
,
823 .handle_mmio
= handle_mmio_sgi_clear
,
826 .base
= GIC_DIST_SGI_PENDING_SET
,
828 .handle_mmio
= handle_mmio_sgi_set
,
834 struct mmio_range
*find_matching_range(const struct mmio_range
*ranges
,
835 struct kvm_exit_mmio
*mmio
,
838 const struct mmio_range
*r
= ranges
;
841 if (offset
>= r
->base
&&
842 (offset
+ mmio
->len
) <= (r
->base
+ r
->len
))
851 * vgic_handle_mmio - handle an in-kernel MMIO access
852 * @vcpu: pointer to the vcpu performing the access
853 * @run: pointer to the kvm_run structure
854 * @mmio: pointer to the data describing the access
856 * returns true if the MMIO access has been performed in kernel space,
857 * and false if it needs to be emulated in user space.
859 bool vgic_handle_mmio(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
860 struct kvm_exit_mmio
*mmio
)
862 const struct mmio_range
*range
;
863 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
864 unsigned long base
= dist
->vgic_dist_base
;
866 unsigned long offset
;
868 if (!irqchip_in_kernel(vcpu
->kvm
) ||
869 mmio
->phys_addr
< base
||
870 (mmio
->phys_addr
+ mmio
->len
) > (base
+ KVM_VGIC_V2_DIST_SIZE
))
873 /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
875 kvm_inject_dabt(vcpu
, mmio
->phys_addr
);
879 offset
= mmio
->phys_addr
- base
;
880 range
= find_matching_range(vgic_dist_ranges
, mmio
, offset
);
881 if (unlikely(!range
|| !range
->handle_mmio
)) {
882 pr_warn("Unhandled access %d %08llx %d\n",
883 mmio
->is_write
, mmio
->phys_addr
, mmio
->len
);
887 spin_lock(&vcpu
->kvm
->arch
.vgic
.lock
);
888 offset
= mmio
->phys_addr
- range
->base
- base
;
889 updated_state
= range
->handle_mmio(vcpu
, mmio
, offset
);
890 spin_unlock(&vcpu
->kvm
->arch
.vgic
.lock
);
891 kvm_prepare_mmio(run
, mmio
);
892 kvm_handle_mmio_return(vcpu
, run
);
895 vgic_kick_vcpus(vcpu
->kvm
);
900 static void vgic_dispatch_sgi(struct kvm_vcpu
*vcpu
, u32 reg
)
902 struct kvm
*kvm
= vcpu
->kvm
;
903 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
904 int nrcpus
= atomic_read(&kvm
->online_vcpus
);
906 int sgi
, mode
, c
, vcpu_id
;
908 vcpu_id
= vcpu
->vcpu_id
;
911 target_cpus
= (reg
>> 16) & 0xff;
912 mode
= (reg
>> 24) & 3;
921 target_cpus
= ((1 << nrcpus
) - 1) & ~(1 << vcpu_id
) & 0xff;
925 target_cpus
= 1 << vcpu_id
;
929 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
930 if (target_cpus
& 1) {
931 /* Flag the SGI as pending */
932 vgic_dist_irq_set(vcpu
, sgi
);
933 dist
->irq_sgi_sources
[c
][sgi
] |= 1 << vcpu_id
;
934 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi
, vcpu_id
, c
);
941 static int compute_pending_for_cpu(struct kvm_vcpu
*vcpu
)
943 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
944 unsigned long *pending
, *enabled
, *pend_percpu
, *pend_shared
;
945 unsigned long pending_private
, pending_shared
;
948 vcpu_id
= vcpu
->vcpu_id
;
949 pend_percpu
= vcpu
->arch
.vgic_cpu
.pending_percpu
;
950 pend_shared
= vcpu
->arch
.vgic_cpu
.pending_shared
;
952 pending
= vgic_bitmap_get_cpu_map(&dist
->irq_state
, vcpu_id
);
953 enabled
= vgic_bitmap_get_cpu_map(&dist
->irq_enabled
, vcpu_id
);
954 bitmap_and(pend_percpu
, pending
, enabled
, VGIC_NR_PRIVATE_IRQS
);
956 pending
= vgic_bitmap_get_shared_map(&dist
->irq_state
);
957 enabled
= vgic_bitmap_get_shared_map(&dist
->irq_enabled
);
958 bitmap_and(pend_shared
, pending
, enabled
, VGIC_NR_SHARED_IRQS
);
959 bitmap_and(pend_shared
, pend_shared
,
960 vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[vcpu_id
]),
961 VGIC_NR_SHARED_IRQS
);
963 pending_private
= find_first_bit(pend_percpu
, VGIC_NR_PRIVATE_IRQS
);
964 pending_shared
= find_first_bit(pend_shared
, VGIC_NR_SHARED_IRQS
);
965 return (pending_private
< VGIC_NR_PRIVATE_IRQS
||
966 pending_shared
< VGIC_NR_SHARED_IRQS
);
970 * Update the interrupt state and determine which CPUs have pending
971 * interrupts. Must be called with distributor lock held.
973 static void vgic_update_state(struct kvm
*kvm
)
975 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
976 struct kvm_vcpu
*vcpu
;
979 if (!dist
->enabled
) {
980 set_bit(0, &dist
->irq_pending_on_cpu
);
984 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
985 if (compute_pending_for_cpu(vcpu
)) {
986 pr_debug("CPU%d has pending interrupts\n", c
);
987 set_bit(c
, &dist
->irq_pending_on_cpu
);
992 #define MK_LR_PEND(src, irq) \
993 (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
996 * An interrupt may have been disabled after being made pending on the
997 * CPU interface (the classic case is a timer running while we're
998 * rebooting the guest - the interrupt would kick as soon as the CPU
999 * interface gets enabled, with deadly consequences).
1001 * The solution is to examine already active LRs, and check the
1002 * interrupt is still enabled. If not, just retire it.
1004 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
)
1006 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1009 for_each_set_bit(lr
, vgic_cpu
->lr_used
, vgic_cpu
->nr_lr
) {
1010 int irq
= vgic_cpu
->vgic_lr
[lr
] & GICH_LR_VIRTUALID
;
1012 if (!vgic_irq_is_enabled(vcpu
, irq
)) {
1013 vgic_retire_lr(lr
, irq
, vgic_cpu
);
1014 if (vgic_irq_is_active(vcpu
, irq
))
1015 vgic_irq_clear_active(vcpu
, irq
);
1021 * Queue an interrupt to a CPU virtual interface. Return true on success,
1022 * or false if it wasn't possible to queue it.
1024 static bool vgic_queue_irq(struct kvm_vcpu
*vcpu
, u8 sgi_source_id
, int irq
)
1026 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1029 /* Sanitize the input... */
1030 BUG_ON(sgi_source_id
& ~7);
1031 BUG_ON(sgi_source_id
&& irq
>= VGIC_NR_SGIS
);
1032 BUG_ON(irq
>= VGIC_NR_IRQS
);
1034 kvm_debug("Queue IRQ%d\n", irq
);
1036 lr
= vgic_cpu
->vgic_irq_lr_map
[irq
];
1038 /* Do we have an active interrupt for the same CPUID? */
1039 if (lr
!= LR_EMPTY
&&
1040 (LR_CPUID(vgic_cpu
->vgic_lr
[lr
]) == sgi_source_id
)) {
1041 kvm_debug("LR%d piggyback for IRQ%d %x\n",
1042 lr
, irq
, vgic_cpu
->vgic_lr
[lr
]);
1043 BUG_ON(!test_bit(lr
, vgic_cpu
->lr_used
));
1044 vgic_cpu
->vgic_lr
[lr
] |= GICH_LR_PENDING_BIT
;
1048 /* Try to use another LR for this interrupt */
1049 lr
= find_first_zero_bit((unsigned long *)vgic_cpu
->lr_used
,
1051 if (lr
>= vgic_cpu
->nr_lr
)
1054 kvm_debug("LR%d allocated for IRQ%d %x\n", lr
, irq
, sgi_source_id
);
1055 vgic_cpu
->vgic_lr
[lr
] = MK_LR_PEND(sgi_source_id
, irq
);
1056 vgic_cpu
->vgic_irq_lr_map
[irq
] = lr
;
1057 set_bit(lr
, vgic_cpu
->lr_used
);
1059 if (!vgic_irq_is_edge(vcpu
, irq
))
1060 vgic_cpu
->vgic_lr
[lr
] |= GICH_LR_EOI
;
1065 static bool vgic_queue_sgi(struct kvm_vcpu
*vcpu
, int irq
)
1067 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1068 unsigned long sources
;
1069 int vcpu_id
= vcpu
->vcpu_id
;
1072 sources
= dist
->irq_sgi_sources
[vcpu_id
][irq
];
1074 for_each_set_bit(c
, &sources
, VGIC_MAX_CPUS
) {
1075 if (vgic_queue_irq(vcpu
, c
, irq
))
1076 clear_bit(c
, &sources
);
1079 dist
->irq_sgi_sources
[vcpu_id
][irq
] = sources
;
1082 * If the sources bitmap has been cleared it means that we
1083 * could queue all the SGIs onto link registers (see the
1084 * clear_bit above), and therefore we are done with them in
1085 * our emulated gic and can get rid of them.
1088 vgic_dist_irq_clear(vcpu
, irq
);
1089 vgic_cpu_irq_clear(vcpu
, irq
);
1096 static bool vgic_queue_hwirq(struct kvm_vcpu
*vcpu
, int irq
)
1098 if (vgic_irq_is_active(vcpu
, irq
))
1099 return true; /* level interrupt, already queued */
1101 if (vgic_queue_irq(vcpu
, 0, irq
)) {
1102 if (vgic_irq_is_edge(vcpu
, irq
)) {
1103 vgic_dist_irq_clear(vcpu
, irq
);
1104 vgic_cpu_irq_clear(vcpu
, irq
);
1106 vgic_irq_set_active(vcpu
, irq
);
1116 * Fill the list registers with pending interrupts before running the
1119 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1121 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1122 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1126 vcpu_id
= vcpu
->vcpu_id
;
1129 * We may not have any pending interrupt, or the interrupts
1130 * may have been serviced from another vcpu. In all cases,
1133 if (!kvm_vgic_vcpu_pending_irq(vcpu
)) {
1134 pr_debug("CPU%d has no pending interrupt\n", vcpu_id
);
1139 for_each_set_bit(i
, vgic_cpu
->pending_percpu
, VGIC_NR_SGIS
) {
1140 if (!vgic_queue_sgi(vcpu
, i
))
1145 for_each_set_bit_from(i
, vgic_cpu
->pending_percpu
, VGIC_NR_PRIVATE_IRQS
) {
1146 if (!vgic_queue_hwirq(vcpu
, i
))
1151 for_each_set_bit(i
, vgic_cpu
->pending_shared
, VGIC_NR_SHARED_IRQS
) {
1152 if (!vgic_queue_hwirq(vcpu
, i
+ VGIC_NR_PRIVATE_IRQS
))
1158 vgic_cpu
->vgic_hcr
|= GICH_HCR_UIE
;
1160 vgic_cpu
->vgic_hcr
&= ~GICH_HCR_UIE
;
1162 * We're about to run this VCPU, and we've consumed
1163 * everything the distributor had in store for
1164 * us. Claim we don't have anything pending. We'll
1165 * adjust that if needed while exiting.
1167 clear_bit(vcpu_id
, &dist
->irq_pending_on_cpu
);
1171 static bool vgic_process_maintenance(struct kvm_vcpu
*vcpu
)
1173 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1174 bool level_pending
= false;
1176 kvm_debug("MISR = %08x\n", vgic_cpu
->vgic_misr
);
1178 if (vgic_cpu
->vgic_misr
& GICH_MISR_EOI
) {
1180 * Some level interrupts have been EOIed. Clear their
1185 for_each_set_bit(lr
, (unsigned long *)vgic_cpu
->vgic_eisr
,
1187 irq
= vgic_cpu
->vgic_lr
[lr
] & GICH_LR_VIRTUALID
;
1189 vgic_irq_clear_active(vcpu
, irq
);
1190 vgic_cpu
->vgic_lr
[lr
] &= ~GICH_LR_EOI
;
1192 /* Any additional pending interrupt? */
1193 if (vgic_dist_irq_is_pending(vcpu
, irq
)) {
1194 vgic_cpu_irq_set(vcpu
, irq
);
1195 level_pending
= true;
1197 vgic_cpu_irq_clear(vcpu
, irq
);
1201 * Despite being EOIed, the LR may not have
1202 * been marked as empty.
1204 set_bit(lr
, (unsigned long *)vgic_cpu
->vgic_elrsr
);
1205 vgic_cpu
->vgic_lr
[lr
] &= ~GICH_LR_ACTIVE_BIT
;
1209 if (vgic_cpu
->vgic_misr
& GICH_MISR_U
)
1210 vgic_cpu
->vgic_hcr
&= ~GICH_HCR_UIE
;
1212 return level_pending
;
1216 * Sync back the VGIC state after a guest run. The distributor lock is
1217 * needed so we don't get preempted in the middle of the state processing.
1219 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1221 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1222 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1226 level_pending
= vgic_process_maintenance(vcpu
);
1228 /* Clear mappings for empty LRs */
1229 for_each_set_bit(lr
, (unsigned long *)vgic_cpu
->vgic_elrsr
,
1233 if (!test_and_clear_bit(lr
, vgic_cpu
->lr_used
))
1236 irq
= vgic_cpu
->vgic_lr
[lr
] & GICH_LR_VIRTUALID
;
1238 BUG_ON(irq
>= VGIC_NR_IRQS
);
1239 vgic_cpu
->vgic_irq_lr_map
[irq
] = LR_EMPTY
;
1242 /* Check if we still have something up our sleeve... */
1243 pending
= find_first_zero_bit((unsigned long *)vgic_cpu
->vgic_elrsr
,
1245 if (level_pending
|| pending
< vgic_cpu
->nr_lr
)
1246 set_bit(vcpu
->vcpu_id
, &dist
->irq_pending_on_cpu
);
1249 void kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1251 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1253 if (!irqchip_in_kernel(vcpu
->kvm
))
1256 spin_lock(&dist
->lock
);
1257 __kvm_vgic_flush_hwstate(vcpu
);
1258 spin_unlock(&dist
->lock
);
1261 void kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1263 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1265 if (!irqchip_in_kernel(vcpu
->kvm
))
1268 spin_lock(&dist
->lock
);
1269 __kvm_vgic_sync_hwstate(vcpu
);
1270 spin_unlock(&dist
->lock
);
1273 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu
*vcpu
)
1275 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1277 if (!irqchip_in_kernel(vcpu
->kvm
))
1280 return test_bit(vcpu
->vcpu_id
, &dist
->irq_pending_on_cpu
);
1283 static void vgic_kick_vcpus(struct kvm
*kvm
)
1285 struct kvm_vcpu
*vcpu
;
1289 * We've injected an interrupt, time to find out who deserves
1292 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1293 if (kvm_vgic_vcpu_pending_irq(vcpu
))
1294 kvm_vcpu_kick(vcpu
);
1298 static int vgic_validate_injection(struct kvm_vcpu
*vcpu
, int irq
, int level
)
1300 int is_edge
= vgic_irq_is_edge(vcpu
, irq
);
1301 int state
= vgic_dist_irq_is_pending(vcpu
, irq
);
1304 * Only inject an interrupt if:
1305 * - edge triggered and we have a rising edge
1306 * - level triggered and we change level
1309 return level
> state
;
1311 return level
!= state
;
1314 static bool vgic_update_irq_state(struct kvm
*kvm
, int cpuid
,
1315 unsigned int irq_num
, bool level
)
1317 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1318 struct kvm_vcpu
*vcpu
;
1319 int is_edge
, is_level
;
1323 spin_lock(&dist
->lock
);
1325 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1326 is_edge
= vgic_irq_is_edge(vcpu
, irq_num
);
1327 is_level
= !is_edge
;
1329 if (!vgic_validate_injection(vcpu
, irq_num
, level
)) {
1334 if (irq_num
>= VGIC_NR_PRIVATE_IRQS
) {
1335 cpuid
= dist
->irq_spi_cpu
[irq_num
- VGIC_NR_PRIVATE_IRQS
];
1336 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1339 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num
, level
, cpuid
);
1342 vgic_dist_irq_set(vcpu
, irq_num
);
1344 vgic_dist_irq_clear(vcpu
, irq_num
);
1346 enabled
= vgic_irq_is_enabled(vcpu
, irq_num
);
1353 if (is_level
&& vgic_irq_is_active(vcpu
, irq_num
)) {
1355 * Level interrupt in progress, will be picked up
1363 vgic_cpu_irq_set(vcpu
, irq_num
);
1364 set_bit(cpuid
, &dist
->irq_pending_on_cpu
);
1368 spin_unlock(&dist
->lock
);
1374 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1375 * @kvm: The VM structure pointer
1376 * @cpuid: The CPU for PPIs
1377 * @irq_num: The IRQ number that is assigned to the device
1378 * @level: Edge-triggered: true: to trigger the interrupt
1379 * false: to ignore the call
1380 * Level-sensitive true: activates an interrupt
1381 * false: deactivates an interrupt
1383 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1384 * level-sensitive interrupts. You can think of the level parameter as 1
1385 * being HIGH and 0 being LOW and all devices being active-HIGH.
1387 int kvm_vgic_inject_irq(struct kvm
*kvm
, int cpuid
, unsigned int irq_num
,
1390 if (vgic_update_irq_state(kvm
, cpuid
, irq_num
, level
))
1391 vgic_kick_vcpus(kvm
);
1396 static irqreturn_t
vgic_maintenance_handler(int irq
, void *data
)
1399 * We cannot rely on the vgic maintenance interrupt to be
1400 * delivered synchronously. This means we can only use it to
1401 * exit the VM, and we perform the handling of EOIed
1402 * interrupts on the exit path (see vgic_process_maintenance).
1408 * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
1409 * @vcpu: pointer to the vcpu struct
1411 * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
1412 * this vcpu and enable the VGIC for this VCPU
1414 int kvm_vgic_vcpu_init(struct kvm_vcpu
*vcpu
)
1416 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1417 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1420 if (vcpu
->vcpu_id
>= VGIC_MAX_CPUS
)
1423 for (i
= 0; i
< VGIC_NR_IRQS
; i
++) {
1424 if (i
< VGIC_NR_PPIS
)
1425 vgic_bitmap_set_irq_val(&dist
->irq_enabled
,
1426 vcpu
->vcpu_id
, i
, 1);
1427 if (i
< VGIC_NR_PRIVATE_IRQS
)
1428 vgic_bitmap_set_irq_val(&dist
->irq_cfg
,
1429 vcpu
->vcpu_id
, i
, VGIC_CFG_EDGE
);
1431 vgic_cpu
->vgic_irq_lr_map
[i
] = LR_EMPTY
;
1435 * By forcing VMCR to zero, the GIC will restore the binary
1436 * points to their reset values. Anything else resets to zero
1439 vgic_cpu
->vgic_vmcr
= 0;
1441 vgic_cpu
->nr_lr
= vgic_nr_lr
;
1442 vgic_cpu
->vgic_hcr
= GICH_HCR_EN
; /* Get the show on the road... */
1447 static void vgic_init_maintenance_interrupt(void *info
)
1449 enable_percpu_irq(vgic_maint_irq
, 0);
1452 static int vgic_cpu_notify(struct notifier_block
*self
,
1453 unsigned long action
, void *cpu
)
1457 case CPU_STARTING_FROZEN
:
1458 vgic_init_maintenance_interrupt(NULL
);
1461 case CPU_DYING_FROZEN
:
1462 disable_percpu_irq(vgic_maint_irq
);
1469 static struct notifier_block vgic_cpu_nb
= {
1470 .notifier_call
= vgic_cpu_notify
,
1473 int kvm_vgic_hyp_init(void)
1476 struct resource vctrl_res
;
1477 struct resource vcpu_res
;
1479 vgic_node
= of_find_compatible_node(NULL
, NULL
, "arm,cortex-a15-gic");
1481 kvm_err("error: no compatible vgic node in DT\n");
1485 vgic_maint_irq
= irq_of_parse_and_map(vgic_node
, 0);
1486 if (!vgic_maint_irq
) {
1487 kvm_err("error getting vgic maintenance irq from DT\n");
1492 ret
= request_percpu_irq(vgic_maint_irq
, vgic_maintenance_handler
,
1493 "vgic", kvm_get_running_vcpus());
1495 kvm_err("Cannot register interrupt %d\n", vgic_maint_irq
);
1499 ret
= __register_cpu_notifier(&vgic_cpu_nb
);
1501 kvm_err("Cannot register vgic CPU notifier\n");
1505 ret
= of_address_to_resource(vgic_node
, 2, &vctrl_res
);
1507 kvm_err("Cannot obtain VCTRL resource\n");
1511 vgic_vctrl_base
= of_iomap(vgic_node
, 2);
1512 if (!vgic_vctrl_base
) {
1513 kvm_err("Cannot ioremap VCTRL\n");
1518 vgic_nr_lr
= readl_relaxed(vgic_vctrl_base
+ GICH_VTR
);
1519 vgic_nr_lr
= (vgic_nr_lr
& 0x3f) + 1;
1521 ret
= create_hyp_io_mappings(vgic_vctrl_base
,
1522 vgic_vctrl_base
+ resource_size(&vctrl_res
),
1525 kvm_err("Cannot map VCTRL into hyp\n");
1529 if (of_address_to_resource(vgic_node
, 3, &vcpu_res
)) {
1530 kvm_err("Cannot obtain VCPU resource\n");
1535 if (!PAGE_ALIGNED(vcpu_res
.start
)) {
1536 kvm_err("GICV physical address 0x%llx not page aligned\n",
1537 (unsigned long long)vcpu_res
.start
);
1542 if (!PAGE_ALIGNED(resource_size(&vcpu_res
))) {
1543 kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
1544 (unsigned long long)resource_size(&vcpu_res
),
1550 vgic_vcpu_base
= vcpu_res
.start
;
1552 kvm_info("%s@%llx IRQ%d\n", vgic_node
->name
,
1553 vctrl_res
.start
, vgic_maint_irq
);
1554 on_each_cpu(vgic_init_maintenance_interrupt
, NULL
, 1);
1559 iounmap(vgic_vctrl_base
);
1561 free_percpu_irq(vgic_maint_irq
, kvm_get_running_vcpus());
1563 of_node_put(vgic_node
);
1568 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
1569 * @kvm: pointer to the kvm struct
1571 * Map the virtual CPU interface into the VM before running any VCPUs. We
1572 * can't do this at creation time, because user space must first set the
1573 * virtual CPU interface address in the guest physical address space. Also
1574 * initialize the ITARGETSRn regs to 0 on the emulated distributor.
1576 int kvm_vgic_init(struct kvm
*kvm
)
1580 if (!irqchip_in_kernel(kvm
))
1583 mutex_lock(&kvm
->lock
);
1585 if (vgic_initialized(kvm
))
1588 if (IS_VGIC_ADDR_UNDEF(kvm
->arch
.vgic
.vgic_dist_base
) ||
1589 IS_VGIC_ADDR_UNDEF(kvm
->arch
.vgic
.vgic_cpu_base
)) {
1590 kvm_err("Need to set vgic cpu and dist addresses first\n");
1595 ret
= kvm_phys_addr_ioremap(kvm
, kvm
->arch
.vgic
.vgic_cpu_base
,
1596 vgic_vcpu_base
, KVM_VGIC_V2_CPU_SIZE
);
1598 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1602 for (i
= VGIC_NR_PRIVATE_IRQS
; i
< VGIC_NR_IRQS
; i
+= 4)
1603 vgic_set_target_reg(kvm
, 0, i
);
1605 kvm
->arch
.vgic
.ready
= true;
1607 mutex_unlock(&kvm
->lock
);
1611 int kvm_vgic_create(struct kvm
*kvm
)
1613 int i
, vcpu_lock_idx
= -1, ret
= 0;
1614 struct kvm_vcpu
*vcpu
;
1616 mutex_lock(&kvm
->lock
);
1618 if (kvm
->arch
.vgic
.vctrl_base
) {
1624 * Any time a vcpu is run, vcpu_load is called which tries to grab the
1625 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
1626 * that no other VCPUs are run while we create the vgic.
1628 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1629 if (!mutex_trylock(&vcpu
->mutex
))
1634 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1635 if (vcpu
->arch
.has_run_once
) {
1641 spin_lock_init(&kvm
->arch
.vgic
.lock
);
1642 kvm
->arch
.vgic
.vctrl_base
= vgic_vctrl_base
;
1643 kvm
->arch
.vgic
.vgic_dist_base
= VGIC_ADDR_UNDEF
;
1644 kvm
->arch
.vgic
.vgic_cpu_base
= VGIC_ADDR_UNDEF
;
1647 for (; vcpu_lock_idx
>= 0; vcpu_lock_idx
--) {
1648 vcpu
= kvm_get_vcpu(kvm
, vcpu_lock_idx
);
1649 mutex_unlock(&vcpu
->mutex
);
1653 mutex_unlock(&kvm
->lock
);
1657 static bool vgic_ioaddr_overlap(struct kvm
*kvm
)
1659 phys_addr_t dist
= kvm
->arch
.vgic
.vgic_dist_base
;
1660 phys_addr_t cpu
= kvm
->arch
.vgic
.vgic_cpu_base
;
1662 if (IS_VGIC_ADDR_UNDEF(dist
) || IS_VGIC_ADDR_UNDEF(cpu
))
1664 if ((dist
<= cpu
&& dist
+ KVM_VGIC_V2_DIST_SIZE
> cpu
) ||
1665 (cpu
<= dist
&& cpu
+ KVM_VGIC_V2_CPU_SIZE
> dist
))
1670 static int vgic_ioaddr_assign(struct kvm
*kvm
, phys_addr_t
*ioaddr
,
1671 phys_addr_t addr
, phys_addr_t size
)
1675 if (addr
& ~KVM_PHYS_MASK
)
1678 if (addr
& (SZ_4K
- 1))
1681 if (!IS_VGIC_ADDR_UNDEF(*ioaddr
))
1683 if (addr
+ size
< addr
)
1687 ret
= vgic_ioaddr_overlap(kvm
);
1689 *ioaddr
= VGIC_ADDR_UNDEF
;
1695 * kvm_vgic_addr - set or get vgic VM base addresses
1696 * @kvm: pointer to the vm struct
1697 * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
1698 * @addr: pointer to address value
1699 * @write: if true set the address in the VM address space, if false read the
1702 * Set or get the vgic base addresses for the distributor and the virtual CPU
1703 * interface in the VM physical address space. These addresses are properties
1704 * of the emulated core/SoC and therefore user space initially knows this
1707 int kvm_vgic_addr(struct kvm
*kvm
, unsigned long type
, u64
*addr
, bool write
)
1710 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
1712 mutex_lock(&kvm
->lock
);
1714 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
1716 r
= vgic_ioaddr_assign(kvm
, &vgic
->vgic_dist_base
,
1717 *addr
, KVM_VGIC_V2_DIST_SIZE
);
1719 *addr
= vgic
->vgic_dist_base
;
1722 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
1724 r
= vgic_ioaddr_assign(kvm
, &vgic
->vgic_cpu_base
,
1725 *addr
, KVM_VGIC_V2_CPU_SIZE
);
1727 *addr
= vgic
->vgic_cpu_base
;
1734 mutex_unlock(&kvm
->lock
);
1738 static bool handle_cpu_mmio_misc(struct kvm_vcpu
*vcpu
,
1739 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
1741 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1742 u32 reg
, mask
= 0, shift
= 0;
1743 bool updated
= false;
1745 switch (offset
& ~0x3) {
1747 mask
= GICH_VMCR_CTRL_MASK
;
1748 shift
= GICH_VMCR_CTRL_SHIFT
;
1750 case GIC_CPU_PRIMASK
:
1751 mask
= GICH_VMCR_PRIMASK_MASK
;
1752 shift
= GICH_VMCR_PRIMASK_SHIFT
;
1754 case GIC_CPU_BINPOINT
:
1755 mask
= GICH_VMCR_BINPOINT_MASK
;
1756 shift
= GICH_VMCR_BINPOINT_SHIFT
;
1758 case GIC_CPU_ALIAS_BINPOINT
:
1759 mask
= GICH_VMCR_ALIAS_BINPOINT_MASK
;
1760 shift
= GICH_VMCR_ALIAS_BINPOINT_SHIFT
;
1764 if (!mmio
->is_write
) {
1765 reg
= (vgic_cpu
->vgic_vmcr
& mask
) >> shift
;
1766 mmio_data_write(mmio
, ~0, reg
);
1768 reg
= mmio_data_read(mmio
, ~0);
1769 reg
= (reg
<< shift
) & mask
;
1770 if (reg
!= (vgic_cpu
->vgic_vmcr
& mask
))
1772 vgic_cpu
->vgic_vmcr
&= ~mask
;
1773 vgic_cpu
->vgic_vmcr
|= reg
;
1778 static bool handle_mmio_abpr(struct kvm_vcpu
*vcpu
,
1779 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
1781 return handle_cpu_mmio_misc(vcpu
, mmio
, GIC_CPU_ALIAS_BINPOINT
);
1784 static bool handle_cpu_mmio_ident(struct kvm_vcpu
*vcpu
,
1785 struct kvm_exit_mmio
*mmio
,
1794 reg
= (PRODUCT_ID_KVM
<< 20) |
1795 (GICC_ARCH_VERSION_V2
<< 16) |
1796 (IMPLEMENTER_ARM
<< 0);
1797 mmio_data_write(mmio
, ~0, reg
);
1802 * CPU Interface Register accesses - these are not accessed by the VM, but by
1803 * user space for saving and restoring VGIC state.
1805 static const struct mmio_range vgic_cpu_ranges
[] = {
1807 .base
= GIC_CPU_CTRL
,
1809 .handle_mmio
= handle_cpu_mmio_misc
,
1812 .base
= GIC_CPU_ALIAS_BINPOINT
,
1814 .handle_mmio
= handle_mmio_abpr
,
1817 .base
= GIC_CPU_ACTIVEPRIO
,
1819 .handle_mmio
= handle_mmio_raz_wi
,
1822 .base
= GIC_CPU_IDENT
,
1824 .handle_mmio
= handle_cpu_mmio_ident
,
1828 static int vgic_attr_regs_access(struct kvm_device
*dev
,
1829 struct kvm_device_attr
*attr
,
1830 u32
*reg
, bool is_write
)
1832 const struct mmio_range
*r
= NULL
, *ranges
;
1835 struct kvm_vcpu
*vcpu
, *tmp_vcpu
;
1836 struct vgic_dist
*vgic
;
1837 struct kvm_exit_mmio mmio
;
1839 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
1840 cpuid
= (attr
->attr
& KVM_DEV_ARM_VGIC_CPUID_MASK
) >>
1841 KVM_DEV_ARM_VGIC_CPUID_SHIFT
;
1843 mutex_lock(&dev
->kvm
->lock
);
1845 if (cpuid
>= atomic_read(&dev
->kvm
->online_vcpus
)) {
1850 vcpu
= kvm_get_vcpu(dev
->kvm
, cpuid
);
1851 vgic
= &dev
->kvm
->arch
.vgic
;
1854 mmio
.is_write
= is_write
;
1856 mmio_data_write(&mmio
, ~0, *reg
);
1857 switch (attr
->group
) {
1858 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
1859 mmio
.phys_addr
= vgic
->vgic_dist_base
+ offset
;
1860 ranges
= vgic_dist_ranges
;
1862 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
1863 mmio
.phys_addr
= vgic
->vgic_cpu_base
+ offset
;
1864 ranges
= vgic_cpu_ranges
;
1869 r
= find_matching_range(ranges
, &mmio
, offset
);
1871 if (unlikely(!r
|| !r
->handle_mmio
)) {
1877 spin_lock(&vgic
->lock
);
1880 * Ensure that no other VCPU is running by checking the vcpu->cpu
1881 * field. If no other VPCUs are running we can safely access the VGIC
1882 * state, because even if another VPU is run after this point, that
1883 * VCPU will not touch the vgic state, because it will block on
1884 * getting the vgic->lock in kvm_vgic_sync_hwstate().
1886 kvm_for_each_vcpu(c
, tmp_vcpu
, dev
->kvm
) {
1887 if (unlikely(tmp_vcpu
->cpu
!= -1)) {
1889 goto out_vgic_unlock
;
1894 * Move all pending IRQs from the LRs on all VCPUs so the pending
1895 * state can be properly represented in the register state accessible
1898 kvm_for_each_vcpu(c
, tmp_vcpu
, dev
->kvm
)
1899 vgic_unqueue_irqs(tmp_vcpu
);
1902 r
->handle_mmio(vcpu
, &mmio
, offset
);
1905 *reg
= mmio_data_read(&mmio
, ~0);
1909 spin_unlock(&vgic
->lock
);
1911 mutex_unlock(&dev
->kvm
->lock
);
1915 static int vgic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1919 switch (attr
->group
) {
1920 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
1921 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
1923 unsigned long type
= (unsigned long)attr
->attr
;
1925 if (copy_from_user(&addr
, uaddr
, sizeof(addr
)))
1928 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, true);
1929 return (r
== -ENODEV
) ? -ENXIO
: r
;
1932 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
1933 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
: {
1934 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
1937 if (get_user(reg
, uaddr
))
1940 return vgic_attr_regs_access(dev
, attr
, ®
, true);
1948 static int vgic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1952 switch (attr
->group
) {
1953 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
1954 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
1956 unsigned long type
= (unsigned long)attr
->attr
;
1958 r
= kvm_vgic_addr(dev
->kvm
, type
, &addr
, false);
1960 return (r
== -ENODEV
) ? -ENXIO
: r
;
1962 if (copy_to_user(uaddr
, &addr
, sizeof(addr
)))
1967 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
1968 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
: {
1969 u32 __user
*uaddr
= (u32 __user
*)(long)attr
->addr
;
1972 r
= vgic_attr_regs_access(dev
, attr
, ®
, false);
1975 r
= put_user(reg
, uaddr
);
1984 static int vgic_has_attr_regs(const struct mmio_range
*ranges
,
1987 struct kvm_exit_mmio dev_attr_mmio
;
1989 dev_attr_mmio
.len
= 4;
1990 if (find_matching_range(ranges
, &dev_attr_mmio
, offset
))
1996 static int vgic_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
2000 switch (attr
->group
) {
2001 case KVM_DEV_ARM_VGIC_GRP_ADDR
:
2002 switch (attr
->attr
) {
2003 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
2004 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
2008 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
2009 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
2010 return vgic_has_attr_regs(vgic_dist_ranges
, offset
);
2011 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
2012 offset
= attr
->attr
& KVM_DEV_ARM_VGIC_OFFSET_MASK
;
2013 return vgic_has_attr_regs(vgic_cpu_ranges
, offset
);
2018 static void vgic_destroy(struct kvm_device
*dev
)
2023 static int vgic_create(struct kvm_device
*dev
, u32 type
)
2025 return kvm_vgic_create(dev
->kvm
);
2028 struct kvm_device_ops kvm_arm_vgic_v2_ops
= {
2029 .name
= "kvm-arm-vgic",
2030 .create
= vgic_create
,
2031 .destroy
= vgic_destroy
,
2032 .set_attr
= vgic_set_attr
,
2033 .get_attr
= vgic_get_attr
,
2034 .has_attr
= vgic_has_attr
,