2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
28 #include <linux/irqchip/arm-gic.h>
30 #include <asm/kvm_emulate.h>
31 #include <asm/kvm_arm.h>
32 #include <asm/kvm_mmu.h>
35 * How the whole thing works (courtesy of Christoffer Dall):
37 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
38 * something is pending
39 * - VGIC pending interrupts are stored on the vgic.irq_state vgic
40 * bitmap (this bitmap is updated by both user land ioctls and guest
41 * mmio ops, and other in-kernel peripherals such as the
42 * arch. timers) and indicate the 'wire' state.
43 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
45 * - To calculate the oracle, we need info for each cpu from
46 * compute_pending_for_cpu, which considers:
47 * - PPI: dist->irq_state & dist->irq_enable
48 * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target
49 * - irq_spi_target is a 'formatted' version of the GICD_ICFGR
50 * registers, stored on each vcpu. We only keep one bit of
51 * information per interrupt, making sure that only one vcpu can
52 * accept the interrupt.
53 * - The same is true when injecting an interrupt, except that we only
54 * consider a single interrupt at a time. The irq_spi_cpu array
55 * contains the target CPU for each SPI.
57 * The handling of level interrupts adds some extra complexity. We
58 * need to track when the interrupt has been EOIed, so we can sample
59 * the 'line' again. This is achieved as such:
61 * - When a level interrupt is moved onto a vcpu, the corresponding
62 * bit in irq_active is set. As long as this bit is set, the line
63 * will be ignored for further interrupts. The interrupt is injected
64 * into the vcpu with the GICH_LR_EOI bit set (generate a
65 * maintenance interrupt on EOI).
66 * - When the interrupt is EOIed, the maintenance interrupt fires,
67 * and clears the corresponding bit in irq_active. This allow the
68 * interrupt line to be sampled again.
71 #define VGIC_ADDR_UNDEF (-1)
72 #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
74 /* Physical address of vgic virtual cpu interface */
75 static phys_addr_t vgic_vcpu_base
;
77 /* Virtual control interface base address */
78 static void __iomem
*vgic_vctrl_base
;
80 static struct device_node
*vgic_node
;
82 #define ACCESS_READ_VALUE (1 << 0)
83 #define ACCESS_READ_RAZ (0 << 0)
84 #define ACCESS_READ_MASK(x) ((x) & (1 << 0))
85 #define ACCESS_WRITE_IGNORED (0 << 1)
86 #define ACCESS_WRITE_SETBIT (1 << 1)
87 #define ACCESS_WRITE_CLEARBIT (2 << 1)
88 #define ACCESS_WRITE_VALUE (3 << 1)
89 #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
91 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
);
92 static void vgic_update_state(struct kvm
*kvm
);
93 static void vgic_kick_vcpus(struct kvm
*kvm
);
94 static void vgic_dispatch_sgi(struct kvm_vcpu
*vcpu
, u32 reg
);
95 static u32 vgic_nr_lr
;
97 static unsigned int vgic_maint_irq
;
99 static u32
*vgic_bitmap_get_reg(struct vgic_bitmap
*x
,
100 int cpuid
, u32 offset
)
104 return x
->percpu
[cpuid
].reg
;
106 return x
->shared
.reg
+ offset
- 1;
109 static int vgic_bitmap_get_irq_val(struct vgic_bitmap
*x
,
112 if (irq
< VGIC_NR_PRIVATE_IRQS
)
113 return test_bit(irq
, x
->percpu
[cpuid
].reg_ul
);
115 return test_bit(irq
- VGIC_NR_PRIVATE_IRQS
, x
->shared
.reg_ul
);
118 static void vgic_bitmap_set_irq_val(struct vgic_bitmap
*x
, int cpuid
,
123 if (irq
< VGIC_NR_PRIVATE_IRQS
) {
124 reg
= x
->percpu
[cpuid
].reg_ul
;
126 reg
= x
->shared
.reg_ul
;
127 irq
-= VGIC_NR_PRIVATE_IRQS
;
136 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap
*x
, int cpuid
)
138 if (unlikely(cpuid
>= VGIC_MAX_CPUS
))
140 return x
->percpu
[cpuid
].reg_ul
;
143 static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap
*x
)
145 return x
->shared
.reg_ul
;
148 static u32
*vgic_bytemap_get_reg(struct vgic_bytemap
*x
, int cpuid
, u32 offset
)
151 BUG_ON(offset
> (VGIC_NR_IRQS
/ 4));
153 return x
->percpu
[cpuid
] + offset
;
155 return x
->shared
+ offset
- 8;
158 #define VGIC_CFG_LEVEL 0
159 #define VGIC_CFG_EDGE 1
161 static bool vgic_irq_is_edge(struct kvm_vcpu
*vcpu
, int irq
)
163 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
166 irq_val
= vgic_bitmap_get_irq_val(&dist
->irq_cfg
, vcpu
->vcpu_id
, irq
);
167 return irq_val
== VGIC_CFG_EDGE
;
170 static int vgic_irq_is_enabled(struct kvm_vcpu
*vcpu
, int irq
)
172 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
174 return vgic_bitmap_get_irq_val(&dist
->irq_enabled
, vcpu
->vcpu_id
, irq
);
177 static int vgic_irq_is_active(struct kvm_vcpu
*vcpu
, int irq
)
179 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
181 return vgic_bitmap_get_irq_val(&dist
->irq_active
, vcpu
->vcpu_id
, irq
);
184 static void vgic_irq_set_active(struct kvm_vcpu
*vcpu
, int irq
)
186 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
188 vgic_bitmap_set_irq_val(&dist
->irq_active
, vcpu
->vcpu_id
, irq
, 1);
191 static void vgic_irq_clear_active(struct kvm_vcpu
*vcpu
, int irq
)
193 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
195 vgic_bitmap_set_irq_val(&dist
->irq_active
, vcpu
->vcpu_id
, irq
, 0);
198 static int vgic_dist_irq_is_pending(struct kvm_vcpu
*vcpu
, int irq
)
200 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
202 return vgic_bitmap_get_irq_val(&dist
->irq_state
, vcpu
->vcpu_id
, irq
);
205 static void vgic_dist_irq_set(struct kvm_vcpu
*vcpu
, int irq
)
207 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
209 vgic_bitmap_set_irq_val(&dist
->irq_state
, vcpu
->vcpu_id
, irq
, 1);
212 static void vgic_dist_irq_clear(struct kvm_vcpu
*vcpu
, int irq
)
214 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
216 vgic_bitmap_set_irq_val(&dist
->irq_state
, vcpu
->vcpu_id
, irq
, 0);
219 static void vgic_cpu_irq_set(struct kvm_vcpu
*vcpu
, int irq
)
221 if (irq
< VGIC_NR_PRIVATE_IRQS
)
222 set_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
224 set_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
225 vcpu
->arch
.vgic_cpu
.pending_shared
);
228 static void vgic_cpu_irq_clear(struct kvm_vcpu
*vcpu
, int irq
)
230 if (irq
< VGIC_NR_PRIVATE_IRQS
)
231 clear_bit(irq
, vcpu
->arch
.vgic_cpu
.pending_percpu
);
233 clear_bit(irq
- VGIC_NR_PRIVATE_IRQS
,
234 vcpu
->arch
.vgic_cpu
.pending_shared
);
237 static u32
mmio_data_read(struct kvm_exit_mmio
*mmio
, u32 mask
)
239 return *((u32
*)mmio
->data
) & mask
;
242 static void mmio_data_write(struct kvm_exit_mmio
*mmio
, u32 mask
, u32 value
)
244 *((u32
*)mmio
->data
) = value
& mask
;
248 * vgic_reg_access - access vgic register
249 * @mmio: pointer to the data describing the mmio access
250 * @reg: pointer to the virtual backing of vgic distributor data
251 * @offset: least significant 2 bits used for word offset
252 * @mode: ACCESS_ mode (see defines above)
254 * Helper to make vgic register access easier using one of the access
255 * modes defined for vgic register access
256 * (read,raz,write-ignored,setbit,clearbit,write)
258 static void vgic_reg_access(struct kvm_exit_mmio
*mmio
, u32
*reg
,
259 phys_addr_t offset
, int mode
)
261 int word_offset
= (offset
& 3) * 8;
262 u32 mask
= (1UL << (mmio
->len
* 8)) - 1;
266 * Any alignment fault should have been delivered to the guest
267 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
273 BUG_ON(mode
!= (ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
));
277 if (mmio
->is_write
) {
278 u32 data
= mmio_data_read(mmio
, mask
) << word_offset
;
279 switch (ACCESS_WRITE_MASK(mode
)) {
280 case ACCESS_WRITE_IGNORED
:
283 case ACCESS_WRITE_SETBIT
:
287 case ACCESS_WRITE_CLEARBIT
:
291 case ACCESS_WRITE_VALUE
:
292 regval
= (regval
& ~(mask
<< word_offset
)) | data
;
297 switch (ACCESS_READ_MASK(mode
)) {
298 case ACCESS_READ_RAZ
:
302 case ACCESS_READ_VALUE
:
303 mmio_data_write(mmio
, mask
, regval
>> word_offset
);
308 static bool handle_mmio_misc(struct kvm_vcpu
*vcpu
,
309 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
312 u32 word_offset
= offset
& 3;
314 switch (offset
& ~3) {
316 reg
= vcpu
->kvm
->arch
.vgic
.enabled
;
317 vgic_reg_access(mmio
, ®
, word_offset
,
318 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
319 if (mmio
->is_write
) {
320 vcpu
->kvm
->arch
.vgic
.enabled
= reg
& 1;
321 vgic_update_state(vcpu
->kvm
);
327 reg
= (atomic_read(&vcpu
->kvm
->online_vcpus
) - 1) << 5;
328 reg
|= (VGIC_NR_IRQS
>> 5) - 1;
329 vgic_reg_access(mmio
, ®
, word_offset
,
330 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
335 vgic_reg_access(mmio
, ®
, word_offset
,
336 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
343 static bool handle_mmio_raz_wi(struct kvm_vcpu
*vcpu
,
344 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
346 vgic_reg_access(mmio
, NULL
, offset
,
347 ACCESS_READ_RAZ
| ACCESS_WRITE_IGNORED
);
351 static bool handle_mmio_set_enable_reg(struct kvm_vcpu
*vcpu
,
352 struct kvm_exit_mmio
*mmio
,
355 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_enabled
,
356 vcpu
->vcpu_id
, offset
);
357 vgic_reg_access(mmio
, reg
, offset
,
358 ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
);
359 if (mmio
->is_write
) {
360 vgic_update_state(vcpu
->kvm
);
367 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu
*vcpu
,
368 struct kvm_exit_mmio
*mmio
,
371 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_enabled
,
372 vcpu
->vcpu_id
, offset
);
373 vgic_reg_access(mmio
, reg
, offset
,
374 ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
);
375 if (mmio
->is_write
) {
376 if (offset
< 4) /* Force SGI enabled */
378 vgic_retire_disabled_irqs(vcpu
);
379 vgic_update_state(vcpu
->kvm
);
386 static bool handle_mmio_set_pending_reg(struct kvm_vcpu
*vcpu
,
387 struct kvm_exit_mmio
*mmio
,
390 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_state
,
391 vcpu
->vcpu_id
, offset
);
392 vgic_reg_access(mmio
, reg
, offset
,
393 ACCESS_READ_VALUE
| ACCESS_WRITE_SETBIT
);
394 if (mmio
->is_write
) {
395 vgic_update_state(vcpu
->kvm
);
402 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu
*vcpu
,
403 struct kvm_exit_mmio
*mmio
,
406 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_state
,
407 vcpu
->vcpu_id
, offset
);
408 vgic_reg_access(mmio
, reg
, offset
,
409 ACCESS_READ_VALUE
| ACCESS_WRITE_CLEARBIT
);
410 if (mmio
->is_write
) {
411 vgic_update_state(vcpu
->kvm
);
418 static bool handle_mmio_priority_reg(struct kvm_vcpu
*vcpu
,
419 struct kvm_exit_mmio
*mmio
,
422 u32
*reg
= vgic_bytemap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_priority
,
423 vcpu
->vcpu_id
, offset
);
424 vgic_reg_access(mmio
, reg
, offset
,
425 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
429 #define GICD_ITARGETSR_SIZE 32
430 #define GICD_CPUTARGETS_BITS 8
431 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
432 static u32
vgic_get_target_reg(struct kvm
*kvm
, int irq
)
434 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
435 struct kvm_vcpu
*vcpu
;
440 irq
-= VGIC_NR_PRIVATE_IRQS
;
442 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
443 bmap
= vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[c
]);
444 for (i
= 0; i
< GICD_IRQS_PER_ITARGETSR
; i
++)
445 if (test_bit(irq
+ i
, bmap
))
446 val
|= 1 << (c
+ i
* 8);
452 static void vgic_set_target_reg(struct kvm
*kvm
, u32 val
, int irq
)
454 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
455 struct kvm_vcpu
*vcpu
;
460 irq
-= VGIC_NR_PRIVATE_IRQS
;
463 * Pick the LSB in each byte. This ensures we target exactly
464 * one vcpu per IRQ. If the byte is null, assume we target
467 for (i
= 0; i
< GICD_IRQS_PER_ITARGETSR
; i
++) {
468 int shift
= i
* GICD_CPUTARGETS_BITS
;
469 target
= ffs((val
>> shift
) & 0xffU
);
470 target
= target
? (target
- 1) : 0;
471 dist
->irq_spi_cpu
[irq
+ i
] = target
;
472 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
473 bmap
= vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[c
]);
475 set_bit(irq
+ i
, bmap
);
477 clear_bit(irq
+ i
, bmap
);
482 static bool handle_mmio_target_reg(struct kvm_vcpu
*vcpu
,
483 struct kvm_exit_mmio
*mmio
,
488 /* We treat the banked interrupts targets as read-only */
490 u32 roreg
= 1 << vcpu
->vcpu_id
;
492 roreg
|= roreg
<< 16;
494 vgic_reg_access(mmio
, &roreg
, offset
,
495 ACCESS_READ_VALUE
| ACCESS_WRITE_IGNORED
);
499 reg
= vgic_get_target_reg(vcpu
->kvm
, offset
& ~3U);
500 vgic_reg_access(mmio
, ®
, offset
,
501 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
502 if (mmio
->is_write
) {
503 vgic_set_target_reg(vcpu
->kvm
, reg
, offset
& ~3U);
504 vgic_update_state(vcpu
->kvm
);
511 static u32
vgic_cfg_expand(u16 val
)
517 * Turn a 16bit value like abcd...mnop into a 32bit word
518 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
520 for (i
= 0; i
< 16; i
++)
521 res
|= ((val
>> i
) & VGIC_CFG_EDGE
) << (2 * i
+ 1);
526 static u16
vgic_cfg_compress(u32 val
)
532 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
533 * abcd...mnop which is what we really care about.
535 for (i
= 0; i
< 16; i
++)
536 res
|= ((val
>> (i
* 2 + 1)) & VGIC_CFG_EDGE
) << i
;
542 * The distributor uses 2 bits per IRQ for the CFG register, but the
543 * LSB is always 0. As such, we only keep the upper bit, and use the
544 * two above functions to compress/expand the bits
546 static bool handle_mmio_cfg_reg(struct kvm_vcpu
*vcpu
,
547 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
550 u32
*reg
= vgic_bitmap_get_reg(&vcpu
->kvm
->arch
.vgic
.irq_cfg
,
551 vcpu
->vcpu_id
, offset
>> 1);
557 val
= vgic_cfg_expand(val
);
558 vgic_reg_access(mmio
, &val
, offset
,
559 ACCESS_READ_VALUE
| ACCESS_WRITE_VALUE
);
560 if (mmio
->is_write
) {
562 *reg
= ~0U; /* Force PPIs/SGIs to 1 */
566 val
= vgic_cfg_compress(val
);
571 *reg
&= 0xffff << 16;
579 static bool handle_mmio_sgi_reg(struct kvm_vcpu
*vcpu
,
580 struct kvm_exit_mmio
*mmio
, phys_addr_t offset
)
583 vgic_reg_access(mmio
, ®
, offset
,
584 ACCESS_READ_RAZ
| ACCESS_WRITE_VALUE
);
585 if (mmio
->is_write
) {
586 vgic_dispatch_sgi(vcpu
, reg
);
587 vgic_update_state(vcpu
->kvm
);
595 * I would have liked to use the kvm_bus_io_*() API instead, but it
596 * cannot cope with banked registers (only the VM pointer is passed
597 * around, and we need the vcpu). One of these days, someone please
603 bool (*handle_mmio
)(struct kvm_vcpu
*vcpu
, struct kvm_exit_mmio
*mmio
,
607 static const struct mmio_range vgic_ranges
[] = {
609 .base
= GIC_DIST_CTRL
,
611 .handle_mmio
= handle_mmio_misc
,
614 .base
= GIC_DIST_IGROUP
,
615 .len
= VGIC_NR_IRQS
/ 8,
616 .handle_mmio
= handle_mmio_raz_wi
,
619 .base
= GIC_DIST_ENABLE_SET
,
620 .len
= VGIC_NR_IRQS
/ 8,
621 .handle_mmio
= handle_mmio_set_enable_reg
,
624 .base
= GIC_DIST_ENABLE_CLEAR
,
625 .len
= VGIC_NR_IRQS
/ 8,
626 .handle_mmio
= handle_mmio_clear_enable_reg
,
629 .base
= GIC_DIST_PENDING_SET
,
630 .len
= VGIC_NR_IRQS
/ 8,
631 .handle_mmio
= handle_mmio_set_pending_reg
,
634 .base
= GIC_DIST_PENDING_CLEAR
,
635 .len
= VGIC_NR_IRQS
/ 8,
636 .handle_mmio
= handle_mmio_clear_pending_reg
,
639 .base
= GIC_DIST_ACTIVE_SET
,
640 .len
= VGIC_NR_IRQS
/ 8,
641 .handle_mmio
= handle_mmio_raz_wi
,
644 .base
= GIC_DIST_ACTIVE_CLEAR
,
645 .len
= VGIC_NR_IRQS
/ 8,
646 .handle_mmio
= handle_mmio_raz_wi
,
649 .base
= GIC_DIST_PRI
,
651 .handle_mmio
= handle_mmio_priority_reg
,
654 .base
= GIC_DIST_TARGET
,
656 .handle_mmio
= handle_mmio_target_reg
,
659 .base
= GIC_DIST_CONFIG
,
660 .len
= VGIC_NR_IRQS
/ 4,
661 .handle_mmio
= handle_mmio_cfg_reg
,
664 .base
= GIC_DIST_SOFTINT
,
666 .handle_mmio
= handle_mmio_sgi_reg
,
672 struct mmio_range
*find_matching_range(const struct mmio_range
*ranges
,
673 struct kvm_exit_mmio
*mmio
,
676 const struct mmio_range
*r
= ranges
;
677 phys_addr_t addr
= mmio
->phys_addr
- base
;
680 if (addr
>= r
->base
&&
681 (addr
+ mmio
->len
) <= (r
->base
+ r
->len
))
690 * vgic_handle_mmio - handle an in-kernel MMIO access
691 * @vcpu: pointer to the vcpu performing the access
692 * @run: pointer to the kvm_run structure
693 * @mmio: pointer to the data describing the access
695 * returns true if the MMIO access has been performed in kernel space,
696 * and false if it needs to be emulated in user space.
698 bool vgic_handle_mmio(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
699 struct kvm_exit_mmio
*mmio
)
701 const struct mmio_range
*range
;
702 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
703 unsigned long base
= dist
->vgic_dist_base
;
705 unsigned long offset
;
707 if (!irqchip_in_kernel(vcpu
->kvm
) ||
708 mmio
->phys_addr
< base
||
709 (mmio
->phys_addr
+ mmio
->len
) > (base
+ KVM_VGIC_V2_DIST_SIZE
))
712 /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
714 kvm_inject_dabt(vcpu
, mmio
->phys_addr
);
718 range
= find_matching_range(vgic_ranges
, mmio
, base
);
719 if (unlikely(!range
|| !range
->handle_mmio
)) {
720 pr_warn("Unhandled access %d %08llx %d\n",
721 mmio
->is_write
, mmio
->phys_addr
, mmio
->len
);
725 spin_lock(&vcpu
->kvm
->arch
.vgic
.lock
);
726 offset
= mmio
->phys_addr
- range
->base
- base
;
727 updated_state
= range
->handle_mmio(vcpu
, mmio
, offset
);
728 spin_unlock(&vcpu
->kvm
->arch
.vgic
.lock
);
729 kvm_prepare_mmio(run
, mmio
);
730 kvm_handle_mmio_return(vcpu
, run
);
733 vgic_kick_vcpus(vcpu
->kvm
);
738 static void vgic_dispatch_sgi(struct kvm_vcpu
*vcpu
, u32 reg
)
740 struct kvm
*kvm
= vcpu
->kvm
;
741 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
742 int nrcpus
= atomic_read(&kvm
->online_vcpus
);
744 int sgi
, mode
, c
, vcpu_id
;
746 vcpu_id
= vcpu
->vcpu_id
;
749 target_cpus
= (reg
>> 16) & 0xff;
750 mode
= (reg
>> 24) & 3;
758 target_cpus
= ((1 << nrcpus
) - 1) & ~(1 << vcpu_id
) & 0xff;
762 target_cpus
= 1 << vcpu_id
;
766 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
767 if (target_cpus
& 1) {
768 /* Flag the SGI as pending */
769 vgic_dist_irq_set(vcpu
, sgi
);
770 dist
->irq_sgi_sources
[c
][sgi
] |= 1 << vcpu_id
;
771 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi
, vcpu_id
, c
);
778 static int compute_pending_for_cpu(struct kvm_vcpu
*vcpu
)
780 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
781 unsigned long *pending
, *enabled
, *pend_percpu
, *pend_shared
;
782 unsigned long pending_private
, pending_shared
;
785 vcpu_id
= vcpu
->vcpu_id
;
786 pend_percpu
= vcpu
->arch
.vgic_cpu
.pending_percpu
;
787 pend_shared
= vcpu
->arch
.vgic_cpu
.pending_shared
;
789 pending
= vgic_bitmap_get_cpu_map(&dist
->irq_state
, vcpu_id
);
790 enabled
= vgic_bitmap_get_cpu_map(&dist
->irq_enabled
, vcpu_id
);
791 bitmap_and(pend_percpu
, pending
, enabled
, VGIC_NR_PRIVATE_IRQS
);
793 pending
= vgic_bitmap_get_shared_map(&dist
->irq_state
);
794 enabled
= vgic_bitmap_get_shared_map(&dist
->irq_enabled
);
795 bitmap_and(pend_shared
, pending
, enabled
, VGIC_NR_SHARED_IRQS
);
796 bitmap_and(pend_shared
, pend_shared
,
797 vgic_bitmap_get_shared_map(&dist
->irq_spi_target
[vcpu_id
]),
798 VGIC_NR_SHARED_IRQS
);
800 pending_private
= find_first_bit(pend_percpu
, VGIC_NR_PRIVATE_IRQS
);
801 pending_shared
= find_first_bit(pend_shared
, VGIC_NR_SHARED_IRQS
);
802 return (pending_private
< VGIC_NR_PRIVATE_IRQS
||
803 pending_shared
< VGIC_NR_SHARED_IRQS
);
807 * Update the interrupt state and determine which CPUs have pending
808 * interrupts. Must be called with distributor lock held.
810 static void vgic_update_state(struct kvm
*kvm
)
812 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
813 struct kvm_vcpu
*vcpu
;
816 if (!dist
->enabled
) {
817 set_bit(0, &dist
->irq_pending_on_cpu
);
821 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
822 if (compute_pending_for_cpu(vcpu
)) {
823 pr_debug("CPU%d has pending interrupts\n", c
);
824 set_bit(c
, &dist
->irq_pending_on_cpu
);
829 #define LR_CPUID(lr) \
830 (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
831 #define MK_LR_PEND(src, irq) \
832 (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
835 * An interrupt may have been disabled after being made pending on the
836 * CPU interface (the classic case is a timer running while we're
837 * rebooting the guest - the interrupt would kick as soon as the CPU
838 * interface gets enabled, with deadly consequences).
840 * The solution is to examine already active LRs, and check the
841 * interrupt is still enabled. If not, just retire it.
843 static void vgic_retire_disabled_irqs(struct kvm_vcpu
*vcpu
)
845 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
848 for_each_set_bit(lr
, vgic_cpu
->lr_used
, vgic_cpu
->nr_lr
) {
849 int irq
= vgic_cpu
->vgic_lr
[lr
] & GICH_LR_VIRTUALID
;
851 if (!vgic_irq_is_enabled(vcpu
, irq
)) {
852 vgic_cpu
->vgic_irq_lr_map
[irq
] = LR_EMPTY
;
853 clear_bit(lr
, vgic_cpu
->lr_used
);
854 vgic_cpu
->vgic_lr
[lr
] &= ~GICH_LR_STATE
;
855 if (vgic_irq_is_active(vcpu
, irq
))
856 vgic_irq_clear_active(vcpu
, irq
);
862 * Queue an interrupt to a CPU virtual interface. Return true on success,
863 * or false if it wasn't possible to queue it.
865 static bool vgic_queue_irq(struct kvm_vcpu
*vcpu
, u8 sgi_source_id
, int irq
)
867 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
870 /* Sanitize the input... */
871 BUG_ON(sgi_source_id
& ~7);
872 BUG_ON(sgi_source_id
&& irq
>= VGIC_NR_SGIS
);
873 BUG_ON(irq
>= VGIC_NR_IRQS
);
875 kvm_debug("Queue IRQ%d\n", irq
);
877 lr
= vgic_cpu
->vgic_irq_lr_map
[irq
];
879 /* Do we have an active interrupt for the same CPUID? */
880 if (lr
!= LR_EMPTY
&&
881 (LR_CPUID(vgic_cpu
->vgic_lr
[lr
]) == sgi_source_id
)) {
882 kvm_debug("LR%d piggyback for IRQ%d %x\n",
883 lr
, irq
, vgic_cpu
->vgic_lr
[lr
]);
884 BUG_ON(!test_bit(lr
, vgic_cpu
->lr_used
));
885 vgic_cpu
->vgic_lr
[lr
] |= GICH_LR_PENDING_BIT
;
890 /* Try to use another LR for this interrupt */
891 lr
= find_first_zero_bit((unsigned long *)vgic_cpu
->lr_used
,
893 if (lr
>= vgic_cpu
->nr_lr
)
896 kvm_debug("LR%d allocated for IRQ%d %x\n", lr
, irq
, sgi_source_id
);
897 vgic_cpu
->vgic_lr
[lr
] = MK_LR_PEND(sgi_source_id
, irq
);
898 vgic_cpu
->vgic_irq_lr_map
[irq
] = lr
;
899 set_bit(lr
, vgic_cpu
->lr_used
);
902 if (!vgic_irq_is_edge(vcpu
, irq
))
903 vgic_cpu
->vgic_lr
[lr
] |= GICH_LR_EOI
;
908 static bool vgic_queue_sgi(struct kvm_vcpu
*vcpu
, int irq
)
910 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
911 unsigned long sources
;
912 int vcpu_id
= vcpu
->vcpu_id
;
915 sources
= dist
->irq_sgi_sources
[vcpu_id
][irq
];
917 for_each_set_bit(c
, &sources
, VGIC_MAX_CPUS
) {
918 if (vgic_queue_irq(vcpu
, c
, irq
))
919 clear_bit(c
, &sources
);
922 dist
->irq_sgi_sources
[vcpu_id
][irq
] = sources
;
925 * If the sources bitmap has been cleared it means that we
926 * could queue all the SGIs onto link registers (see the
927 * clear_bit above), and therefore we are done with them in
928 * our emulated gic and can get rid of them.
931 vgic_dist_irq_clear(vcpu
, irq
);
932 vgic_cpu_irq_clear(vcpu
, irq
);
939 static bool vgic_queue_hwirq(struct kvm_vcpu
*vcpu
, int irq
)
941 if (vgic_irq_is_active(vcpu
, irq
))
942 return true; /* level interrupt, already queued */
944 if (vgic_queue_irq(vcpu
, 0, irq
)) {
945 if (vgic_irq_is_edge(vcpu
, irq
)) {
946 vgic_dist_irq_clear(vcpu
, irq
);
947 vgic_cpu_irq_clear(vcpu
, irq
);
949 vgic_irq_set_active(vcpu
, irq
);
959 * Fill the list registers with pending interrupts before running the
962 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
964 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
965 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
969 vcpu_id
= vcpu
->vcpu_id
;
972 * We may not have any pending interrupt, or the interrupts
973 * may have been serviced from another vcpu. In all cases,
976 if (!kvm_vgic_vcpu_pending_irq(vcpu
)) {
977 pr_debug("CPU%d has no pending interrupt\n", vcpu_id
);
982 for_each_set_bit(i
, vgic_cpu
->pending_percpu
, VGIC_NR_SGIS
) {
983 if (!vgic_queue_sgi(vcpu
, i
))
988 for_each_set_bit_from(i
, vgic_cpu
->pending_percpu
, VGIC_NR_PRIVATE_IRQS
) {
989 if (!vgic_queue_hwirq(vcpu
, i
))
994 for_each_set_bit(i
, vgic_cpu
->pending_shared
, VGIC_NR_SHARED_IRQS
) {
995 if (!vgic_queue_hwirq(vcpu
, i
+ VGIC_NR_PRIVATE_IRQS
))
1001 vgic_cpu
->vgic_hcr
|= GICH_HCR_UIE
;
1003 vgic_cpu
->vgic_hcr
&= ~GICH_HCR_UIE
;
1005 * We're about to run this VCPU, and we've consumed
1006 * everything the distributor had in store for
1007 * us. Claim we don't have anything pending. We'll
1008 * adjust that if needed while exiting.
1010 clear_bit(vcpu_id
, &dist
->irq_pending_on_cpu
);
1014 static bool vgic_process_maintenance(struct kvm_vcpu
*vcpu
)
1016 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1017 bool level_pending
= false;
1019 kvm_debug("MISR = %08x\n", vgic_cpu
->vgic_misr
);
1022 * We do not need to take the distributor lock here, since the only
1023 * action we perform is clearing the irq_active_bit for an EOIed
1024 * level interrupt. There is a potential race with
1025 * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we
1026 * check if the interrupt is already active. Two possibilities:
1028 * - The queuing is occurring on the same vcpu: cannot happen,
1029 * as we're already in the context of this vcpu, and
1030 * executing the handler
1031 * - The interrupt has been migrated to another vcpu, and we
1032 * ignore this interrupt for this run. Big deal. It is still
1033 * pending though, and will get considered when this vcpu
1036 if (vgic_cpu
->vgic_misr
& GICH_MISR_EOI
) {
1038 * Some level interrupts have been EOIed. Clear their
1043 for_each_set_bit(lr
, (unsigned long *)vgic_cpu
->vgic_eisr
,
1045 irq
= vgic_cpu
->vgic_lr
[lr
] & GICH_LR_VIRTUALID
;
1047 vgic_irq_clear_active(vcpu
, irq
);
1048 vgic_cpu
->vgic_lr
[lr
] &= ~GICH_LR_EOI
;
1050 /* Any additional pending interrupt? */
1051 if (vgic_dist_irq_is_pending(vcpu
, irq
)) {
1052 vgic_cpu_irq_set(vcpu
, irq
);
1053 level_pending
= true;
1055 vgic_cpu_irq_clear(vcpu
, irq
);
1060 if (vgic_cpu
->vgic_misr
& GICH_MISR_U
)
1061 vgic_cpu
->vgic_hcr
&= ~GICH_HCR_UIE
;
1063 return level_pending
;
1067 * Sync back the VGIC state after a guest run. We do not really touch
1068 * the distributor here (the irq_pending_on_cpu bit is safe to set),
1069 * so there is no need for taking its lock.
1071 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1073 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1074 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1078 level_pending
= vgic_process_maintenance(vcpu
);
1080 /* Clear mappings for empty LRs */
1081 for_each_set_bit(lr
, (unsigned long *)vgic_cpu
->vgic_elrsr
,
1085 if (!test_and_clear_bit(lr
, vgic_cpu
->lr_used
))
1088 irq
= vgic_cpu
->vgic_lr
[lr
] & GICH_LR_VIRTUALID
;
1090 BUG_ON(irq
>= VGIC_NR_IRQS
);
1091 vgic_cpu
->vgic_irq_lr_map
[irq
] = LR_EMPTY
;
1094 /* Check if we still have something up our sleeve... */
1095 pending
= find_first_zero_bit((unsigned long *)vgic_cpu
->vgic_elrsr
,
1097 if (level_pending
|| pending
< vgic_cpu
->nr_lr
)
1098 set_bit(vcpu
->vcpu_id
, &dist
->irq_pending_on_cpu
);
1101 void kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
1103 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1105 if (!irqchip_in_kernel(vcpu
->kvm
))
1108 spin_lock(&dist
->lock
);
1109 __kvm_vgic_flush_hwstate(vcpu
);
1110 spin_unlock(&dist
->lock
);
1113 void kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
1115 if (!irqchip_in_kernel(vcpu
->kvm
))
1118 __kvm_vgic_sync_hwstate(vcpu
);
1121 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu
*vcpu
)
1123 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1125 if (!irqchip_in_kernel(vcpu
->kvm
))
1128 return test_bit(vcpu
->vcpu_id
, &dist
->irq_pending_on_cpu
);
1131 static void vgic_kick_vcpus(struct kvm
*kvm
)
1133 struct kvm_vcpu
*vcpu
;
1137 * We've injected an interrupt, time to find out who deserves
1140 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
1141 if (kvm_vgic_vcpu_pending_irq(vcpu
))
1142 kvm_vcpu_kick(vcpu
);
1146 static int vgic_validate_injection(struct kvm_vcpu
*vcpu
, int irq
, int level
)
1148 int is_edge
= vgic_irq_is_edge(vcpu
, irq
);
1149 int state
= vgic_dist_irq_is_pending(vcpu
, irq
);
1152 * Only inject an interrupt if:
1153 * - edge triggered and we have a rising edge
1154 * - level triggered and we change level
1157 return level
> state
;
1159 return level
!= state
;
1162 static bool vgic_update_irq_state(struct kvm
*kvm
, int cpuid
,
1163 unsigned int irq_num
, bool level
)
1165 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1166 struct kvm_vcpu
*vcpu
;
1167 int is_edge
, is_level
;
1171 spin_lock(&dist
->lock
);
1173 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1174 is_edge
= vgic_irq_is_edge(vcpu
, irq_num
);
1175 is_level
= !is_edge
;
1177 if (!vgic_validate_injection(vcpu
, irq_num
, level
)) {
1182 if (irq_num
>= VGIC_NR_PRIVATE_IRQS
) {
1183 cpuid
= dist
->irq_spi_cpu
[irq_num
- VGIC_NR_PRIVATE_IRQS
];
1184 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
1187 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num
, level
, cpuid
);
1190 vgic_dist_irq_set(vcpu
, irq_num
);
1192 vgic_dist_irq_clear(vcpu
, irq_num
);
1194 enabled
= vgic_irq_is_enabled(vcpu
, irq_num
);
1201 if (is_level
&& vgic_irq_is_active(vcpu
, irq_num
)) {
1203 * Level interrupt in progress, will be picked up
1211 vgic_cpu_irq_set(vcpu
, irq_num
);
1212 set_bit(cpuid
, &dist
->irq_pending_on_cpu
);
1216 spin_unlock(&dist
->lock
);
1222 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1223 * @kvm: The VM structure pointer
1224 * @cpuid: The CPU for PPIs
1225 * @irq_num: The IRQ number that is assigned to the device
1226 * @level: Edge-triggered: true: to trigger the interrupt
1227 * false: to ignore the call
1228 * Level-sensitive true: activates an interrupt
1229 * false: deactivates an interrupt
1231 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1232 * level-sensitive interrupts. You can think of the level parameter as 1
1233 * being HIGH and 0 being LOW and all devices being active-HIGH.
1235 int kvm_vgic_inject_irq(struct kvm
*kvm
, int cpuid
, unsigned int irq_num
,
1238 if (vgic_update_irq_state(kvm
, cpuid
, irq_num
, level
))
1239 vgic_kick_vcpus(kvm
);
1244 static irqreturn_t
vgic_maintenance_handler(int irq
, void *data
)
1247 * We cannot rely on the vgic maintenance interrupt to be
1248 * delivered synchronously. This means we can only use it to
1249 * exit the VM, and we perform the handling of EOIed
1250 * interrupts on the exit path (see vgic_process_maintenance).
1255 int kvm_vgic_vcpu_init(struct kvm_vcpu
*vcpu
)
1257 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
1258 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
1261 if (!irqchip_in_kernel(vcpu
->kvm
))
1264 if (vcpu
->vcpu_id
>= VGIC_MAX_CPUS
)
1267 for (i
= 0; i
< VGIC_NR_IRQS
; i
++) {
1268 if (i
< VGIC_NR_PPIS
)
1269 vgic_bitmap_set_irq_val(&dist
->irq_enabled
,
1270 vcpu
->vcpu_id
, i
, 1);
1271 if (i
< VGIC_NR_PRIVATE_IRQS
)
1272 vgic_bitmap_set_irq_val(&dist
->irq_cfg
,
1273 vcpu
->vcpu_id
, i
, VGIC_CFG_EDGE
);
1275 vgic_cpu
->vgic_irq_lr_map
[i
] = LR_EMPTY
;
1279 * By forcing VMCR to zero, the GIC will restore the binary
1280 * points to their reset values. Anything else resets to zero
1283 vgic_cpu
->vgic_vmcr
= 0;
1285 vgic_cpu
->nr_lr
= vgic_nr_lr
;
1286 vgic_cpu
->vgic_hcr
= GICH_HCR_EN
; /* Get the show on the road... */
1291 static void vgic_init_maintenance_interrupt(void *info
)
1293 enable_percpu_irq(vgic_maint_irq
, 0);
1296 static int vgic_cpu_notify(struct notifier_block
*self
,
1297 unsigned long action
, void *cpu
)
1301 case CPU_STARTING_FROZEN
:
1302 vgic_init_maintenance_interrupt(NULL
);
1305 case CPU_DYING_FROZEN
:
1306 disable_percpu_irq(vgic_maint_irq
);
1313 static struct notifier_block vgic_cpu_nb
= {
1314 .notifier_call
= vgic_cpu_notify
,
1317 int kvm_vgic_hyp_init(void)
1320 struct resource vctrl_res
;
1321 struct resource vcpu_res
;
1323 vgic_node
= of_find_compatible_node(NULL
, NULL
, "arm,cortex-a15-gic");
1325 kvm_err("error: no compatible vgic node in DT\n");
1329 vgic_maint_irq
= irq_of_parse_and_map(vgic_node
, 0);
1330 if (!vgic_maint_irq
) {
1331 kvm_err("error getting vgic maintenance irq from DT\n");
1336 ret
= request_percpu_irq(vgic_maint_irq
, vgic_maintenance_handler
,
1337 "vgic", kvm_get_running_vcpus());
1339 kvm_err("Cannot register interrupt %d\n", vgic_maint_irq
);
1343 ret
= register_cpu_notifier(&vgic_cpu_nb
);
1345 kvm_err("Cannot register vgic CPU notifier\n");
1349 ret
= of_address_to_resource(vgic_node
, 2, &vctrl_res
);
1351 kvm_err("Cannot obtain VCTRL resource\n");
1355 vgic_vctrl_base
= of_iomap(vgic_node
, 2);
1356 if (!vgic_vctrl_base
) {
1357 kvm_err("Cannot ioremap VCTRL\n");
1362 vgic_nr_lr
= readl_relaxed(vgic_vctrl_base
+ GICH_VTR
);
1363 vgic_nr_lr
= (vgic_nr_lr
& 0x3f) + 1;
1365 ret
= create_hyp_io_mappings(vgic_vctrl_base
,
1366 vgic_vctrl_base
+ resource_size(&vctrl_res
),
1369 kvm_err("Cannot map VCTRL into hyp\n");
1373 kvm_info("%s@%llx IRQ%d\n", vgic_node
->name
,
1374 vctrl_res
.start
, vgic_maint_irq
);
1375 on_each_cpu(vgic_init_maintenance_interrupt
, NULL
, 1);
1377 if (of_address_to_resource(vgic_node
, 3, &vcpu_res
)) {
1378 kvm_err("Cannot obtain VCPU resource\n");
1382 vgic_vcpu_base
= vcpu_res
.start
;
1387 iounmap(vgic_vctrl_base
);
1389 free_percpu_irq(vgic_maint_irq
, kvm_get_running_vcpus());
1391 of_node_put(vgic_node
);
1395 int kvm_vgic_init(struct kvm
*kvm
)
1399 mutex_lock(&kvm
->lock
);
1401 if (vgic_initialized(kvm
))
1404 if (IS_VGIC_ADDR_UNDEF(kvm
->arch
.vgic
.vgic_dist_base
) ||
1405 IS_VGIC_ADDR_UNDEF(kvm
->arch
.vgic
.vgic_cpu_base
)) {
1406 kvm_err("Need to set vgic cpu and dist addresses first\n");
1411 ret
= kvm_phys_addr_ioremap(kvm
, kvm
->arch
.vgic
.vgic_cpu_base
,
1412 vgic_vcpu_base
, KVM_VGIC_V2_CPU_SIZE
);
1414 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1418 for (i
= VGIC_NR_PRIVATE_IRQS
; i
< VGIC_NR_IRQS
; i
+= 4)
1419 vgic_set_target_reg(kvm
, 0, i
);
1421 kvm_timer_init(kvm
);
1422 kvm
->arch
.vgic
.ready
= true;
1424 mutex_unlock(&kvm
->lock
);
1428 int kvm_vgic_create(struct kvm
*kvm
)
1432 mutex_lock(&kvm
->lock
);
1434 if (atomic_read(&kvm
->online_vcpus
) || kvm
->arch
.vgic
.vctrl_base
) {
1439 spin_lock_init(&kvm
->arch
.vgic
.lock
);
1440 kvm
->arch
.vgic
.vctrl_base
= vgic_vctrl_base
;
1441 kvm
->arch
.vgic
.vgic_dist_base
= VGIC_ADDR_UNDEF
;
1442 kvm
->arch
.vgic
.vgic_cpu_base
= VGIC_ADDR_UNDEF
;
1445 mutex_unlock(&kvm
->lock
);
1449 static bool vgic_ioaddr_overlap(struct kvm
*kvm
)
1451 phys_addr_t dist
= kvm
->arch
.vgic
.vgic_dist_base
;
1452 phys_addr_t cpu
= kvm
->arch
.vgic
.vgic_cpu_base
;
1454 if (IS_VGIC_ADDR_UNDEF(dist
) || IS_VGIC_ADDR_UNDEF(cpu
))
1456 if ((dist
<= cpu
&& dist
+ KVM_VGIC_V2_DIST_SIZE
> cpu
) ||
1457 (cpu
<= dist
&& cpu
+ KVM_VGIC_V2_CPU_SIZE
> dist
))
1462 static int vgic_ioaddr_assign(struct kvm
*kvm
, phys_addr_t
*ioaddr
,
1463 phys_addr_t addr
, phys_addr_t size
)
1467 if (!IS_VGIC_ADDR_UNDEF(*ioaddr
))
1469 if (addr
+ size
< addr
)
1472 ret
= vgic_ioaddr_overlap(kvm
);
1479 int kvm_vgic_set_addr(struct kvm
*kvm
, unsigned long type
, u64 addr
)
1482 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
1484 if (addr
& ~KVM_PHYS_MASK
)
1487 if (addr
& ~PAGE_MASK
)
1490 mutex_lock(&kvm
->lock
);
1492 case KVM_VGIC_V2_ADDR_TYPE_DIST
:
1493 r
= vgic_ioaddr_assign(kvm
, &vgic
->vgic_dist_base
,
1494 addr
, KVM_VGIC_V2_DIST_SIZE
);
1496 case KVM_VGIC_V2_ADDR_TYPE_CPU
:
1497 r
= vgic_ioaddr_assign(kvm
, &vgic
->vgic_cpu_base
,
1498 addr
, KVM_VGIC_V2_CPU_SIZE
);
1504 mutex_unlock(&kvm
->lock
);