Commit | Line | Data |
---|---|---|
1a89dd91 MZ |
1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
17 | */ | |
18 | ||
01ac5e34 | 19 | #include <linux/cpu.h> |
1a89dd91 MZ |
20 | #include <linux/kvm.h> |
21 | #include <linux/kvm_host.h> | |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/io.h> | |
01ac5e34 MZ |
24 | #include <linux/of.h> |
25 | #include <linux/of_address.h> | |
26 | #include <linux/of_irq.h> | |
2a2f3e26 | 27 | #include <linux/uaccess.h> |
01ac5e34 MZ |
28 | |
29 | #include <linux/irqchip/arm-gic.h> | |
30 | ||
1a89dd91 | 31 | #include <asm/kvm_emulate.h> |
01ac5e34 MZ |
32 | #include <asm/kvm_arm.h> |
33 | #include <asm/kvm_mmu.h> | |
1a89dd91 | 34 | |
b47ef92a MZ |
35 | /* |
36 | * How the whole thing works (courtesy of Christoffer Dall): | |
37 | * | |
38 | * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if | |
39 | * something is pending | |
40 | * - VGIC pending interrupts are stored on the vgic.irq_state vgic | |
41 | * bitmap (this bitmap is updated by both user land ioctls and guest | |
42 | * mmio ops, and other in-kernel peripherals such as the | |
43 | * arch. timers) and indicate the 'wire' state. | |
44 | * - Every time the bitmap changes, the irq_pending_on_cpu oracle is | |
45 | * recalculated | |
46 | * - To calculate the oracle, we need info for each cpu from | |
47 | * compute_pending_for_cpu, which considers: | |
48 | * - PPI: dist->irq_state & dist->irq_enable | |
49 | * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target | |
50 | * - irq_spi_target is a 'formatted' version of the GICD_ICFGR | |
51 | * registers, stored on each vcpu. We only keep one bit of | |
52 | * information per interrupt, making sure that only one vcpu can | |
53 | * accept the interrupt. | |
54 | * - The same is true when injecting an interrupt, except that we only | |
55 | * consider a single interrupt at a time. The irq_spi_cpu array | |
56 | * contains the target CPU for each SPI. | |
57 | * | |
58 | * The handling of level interrupts adds some extra complexity. We | |
59 | * need to track when the interrupt has been EOIed, so we can sample | |
60 | * the 'line' again. This is achieved as such: | |
61 | * | |
62 | * - When a level interrupt is moved onto a vcpu, the corresponding | |
63 | * bit in irq_active is set. As long as this bit is set, the line | |
64 | * will be ignored for further interrupts. The interrupt is injected | |
65 | * into the vcpu with the GICH_LR_EOI bit set (generate a | |
66 | * maintenance interrupt on EOI). | |
67 | * - When the interrupt is EOIed, the maintenance interrupt fires, | |
68 | * and clears the corresponding bit in irq_active. This allow the | |
69 | * interrupt line to be sampled again. | |
70 | */ | |
71 | ||
330690cd CD |
72 | #define VGIC_ADDR_UNDEF (-1) |
73 | #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) | |
74 | ||
fa20f5ae CD |
75 | #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ |
76 | #define IMPLEMENTER_ARM 0x43b | |
77 | #define GICC_ARCH_VERSION_V2 0x2 | |
78 | ||
01ac5e34 MZ |
79 | /* Physical address of vgic virtual cpu interface */ |
80 | static phys_addr_t vgic_vcpu_base; | |
81 | ||
82 | /* Virtual control interface base address */ | |
83 | static void __iomem *vgic_vctrl_base; | |
84 | ||
85 | static struct device_node *vgic_node; | |
86 | ||
1a89dd91 MZ |
87 | #define ACCESS_READ_VALUE (1 << 0) |
88 | #define ACCESS_READ_RAZ (0 << 0) | |
89 | #define ACCESS_READ_MASK(x) ((x) & (1 << 0)) | |
90 | #define ACCESS_WRITE_IGNORED (0 << 1) | |
91 | #define ACCESS_WRITE_SETBIT (1 << 1) | |
92 | #define ACCESS_WRITE_CLEARBIT (2 << 1) | |
93 | #define ACCESS_WRITE_VALUE (3 << 1) | |
94 | #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) | |
95 | ||
a1fcb44e | 96 | static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); |
8d5c6b06 | 97 | static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); |
b47ef92a | 98 | static void vgic_update_state(struct kvm *kvm); |
5863c2ce | 99 | static void vgic_kick_vcpus(struct kvm *kvm); |
b47ef92a | 100 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); |
8d5c6b06 MZ |
101 | static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); |
102 | static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); | |
beee38b9 MZ |
103 | static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); |
104 | static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | |
01ac5e34 | 105 | |
beee38b9 | 106 | static u32 vgic_nr_lr; |
01ac5e34 | 107 | static unsigned int vgic_maint_irq; |
b47ef92a MZ |
108 | |
109 | static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, | |
110 | int cpuid, u32 offset) | |
111 | { | |
112 | offset >>= 2; | |
113 | if (!offset) | |
114 | return x->percpu[cpuid].reg; | |
115 | else | |
116 | return x->shared.reg + offset - 1; | |
117 | } | |
118 | ||
119 | static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x, | |
120 | int cpuid, int irq) | |
121 | { | |
122 | if (irq < VGIC_NR_PRIVATE_IRQS) | |
123 | return test_bit(irq, x->percpu[cpuid].reg_ul); | |
124 | ||
125 | return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul); | |
126 | } | |
127 | ||
128 | static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, | |
129 | int irq, int val) | |
130 | { | |
131 | unsigned long *reg; | |
132 | ||
133 | if (irq < VGIC_NR_PRIVATE_IRQS) { | |
134 | reg = x->percpu[cpuid].reg_ul; | |
135 | } else { | |
136 | reg = x->shared.reg_ul; | |
137 | irq -= VGIC_NR_PRIVATE_IRQS; | |
138 | } | |
139 | ||
140 | if (val) | |
141 | set_bit(irq, reg); | |
142 | else | |
143 | clear_bit(irq, reg); | |
144 | } | |
145 | ||
146 | static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid) | |
147 | { | |
148 | if (unlikely(cpuid >= VGIC_MAX_CPUS)) | |
149 | return NULL; | |
150 | return x->percpu[cpuid].reg_ul; | |
151 | } | |
152 | ||
153 | static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x) | |
154 | { | |
155 | return x->shared.reg_ul; | |
156 | } | |
157 | ||
158 | static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset) | |
159 | { | |
160 | offset >>= 2; | |
161 | BUG_ON(offset > (VGIC_NR_IRQS / 4)); | |
8d98915b | 162 | if (offset < 8) |
b47ef92a MZ |
163 | return x->percpu[cpuid] + offset; |
164 | else | |
165 | return x->shared + offset - 8; | |
166 | } | |
167 | ||
168 | #define VGIC_CFG_LEVEL 0 | |
169 | #define VGIC_CFG_EDGE 1 | |
170 | ||
171 | static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq) | |
172 | { | |
173 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
174 | int irq_val; | |
175 | ||
176 | irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq); | |
177 | return irq_val == VGIC_CFG_EDGE; | |
178 | } | |
179 | ||
180 | static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq) | |
181 | { | |
182 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
183 | ||
184 | return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq); | |
185 | } | |
186 | ||
9d949dce MZ |
187 | static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq) |
188 | { | |
189 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
190 | ||
191 | return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq); | |
192 | } | |
193 | ||
194 | static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq) | |
195 | { | |
196 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
197 | ||
198 | vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1); | |
199 | } | |
200 | ||
201 | static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq) | |
202 | { | |
203 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
204 | ||
205 | vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0); | |
206 | } | |
207 | ||
208 | static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) | |
209 | { | |
210 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
211 | ||
212 | return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq); | |
213 | } | |
214 | ||
b47ef92a MZ |
215 | static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq) |
216 | { | |
217 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
218 | ||
219 | vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1); | |
220 | } | |
221 | ||
222 | static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq) | |
223 | { | |
224 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
225 | ||
226 | vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0); | |
227 | } | |
228 | ||
229 | static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq) | |
230 | { | |
231 | if (irq < VGIC_NR_PRIVATE_IRQS) | |
232 | set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); | |
233 | else | |
234 | set_bit(irq - VGIC_NR_PRIVATE_IRQS, | |
235 | vcpu->arch.vgic_cpu.pending_shared); | |
236 | } | |
237 | ||
238 | static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq) | |
239 | { | |
240 | if (irq < VGIC_NR_PRIVATE_IRQS) | |
241 | clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); | |
242 | else | |
243 | clear_bit(irq - VGIC_NR_PRIVATE_IRQS, | |
244 | vcpu->arch.vgic_cpu.pending_shared); | |
245 | } | |
246 | ||
1a89dd91 MZ |
247 | static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) |
248 | { | |
249 | return *((u32 *)mmio->data) & mask; | |
250 | } | |
251 | ||
252 | static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value) | |
253 | { | |
254 | *((u32 *)mmio->data) = value & mask; | |
255 | } | |
256 | ||
257 | /** | |
258 | * vgic_reg_access - access vgic register | |
259 | * @mmio: pointer to the data describing the mmio access | |
260 | * @reg: pointer to the virtual backing of vgic distributor data | |
261 | * @offset: least significant 2 bits used for word offset | |
262 | * @mode: ACCESS_ mode (see defines above) | |
263 | * | |
264 | * Helper to make vgic register access easier using one of the access | |
265 | * modes defined for vgic register access | |
266 | * (read,raz,write-ignored,setbit,clearbit,write) | |
267 | */ | |
268 | static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, | |
269 | phys_addr_t offset, int mode) | |
270 | { | |
271 | int word_offset = (offset & 3) * 8; | |
272 | u32 mask = (1UL << (mmio->len * 8)) - 1; | |
273 | u32 regval; | |
274 | ||
275 | /* | |
276 | * Any alignment fault should have been delivered to the guest | |
277 | * directly (ARM ARM B3.12.7 "Prioritization of aborts"). | |
278 | */ | |
279 | ||
280 | if (reg) { | |
281 | regval = *reg; | |
282 | } else { | |
283 | BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED)); | |
284 | regval = 0; | |
285 | } | |
286 | ||
287 | if (mmio->is_write) { | |
288 | u32 data = mmio_data_read(mmio, mask) << word_offset; | |
289 | switch (ACCESS_WRITE_MASK(mode)) { | |
290 | case ACCESS_WRITE_IGNORED: | |
291 | return; | |
292 | ||
293 | case ACCESS_WRITE_SETBIT: | |
294 | regval |= data; | |
295 | break; | |
296 | ||
297 | case ACCESS_WRITE_CLEARBIT: | |
298 | regval &= ~data; | |
299 | break; | |
300 | ||
301 | case ACCESS_WRITE_VALUE: | |
302 | regval = (regval & ~(mask << word_offset)) | data; | |
303 | break; | |
304 | } | |
305 | *reg = regval; | |
306 | } else { | |
307 | switch (ACCESS_READ_MASK(mode)) { | |
308 | case ACCESS_READ_RAZ: | |
309 | regval = 0; | |
310 | /* fall through */ | |
311 | ||
312 | case ACCESS_READ_VALUE: | |
313 | mmio_data_write(mmio, mask, regval >> word_offset); | |
314 | } | |
315 | } | |
316 | } | |
317 | ||
b47ef92a MZ |
318 | static bool handle_mmio_misc(struct kvm_vcpu *vcpu, |
319 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | |
320 | { | |
321 | u32 reg; | |
322 | u32 word_offset = offset & 3; | |
323 | ||
324 | switch (offset & ~3) { | |
fa20f5ae | 325 | case 0: /* GICD_CTLR */ |
b47ef92a MZ |
326 | reg = vcpu->kvm->arch.vgic.enabled; |
327 | vgic_reg_access(mmio, ®, word_offset, | |
328 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | |
329 | if (mmio->is_write) { | |
330 | vcpu->kvm->arch.vgic.enabled = reg & 1; | |
331 | vgic_update_state(vcpu->kvm); | |
332 | return true; | |
333 | } | |
334 | break; | |
335 | ||
fa20f5ae | 336 | case 4: /* GICD_TYPER */ |
b47ef92a MZ |
337 | reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; |
338 | reg |= (VGIC_NR_IRQS >> 5) - 1; | |
339 | vgic_reg_access(mmio, ®, word_offset, | |
340 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | |
341 | break; | |
342 | ||
fa20f5ae CD |
343 | case 8: /* GICD_IIDR */ |
344 | reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); | |
b47ef92a MZ |
345 | vgic_reg_access(mmio, ®, word_offset, |
346 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | |
347 | break; | |
348 | } | |
349 | ||
350 | return false; | |
351 | } | |
352 | ||
353 | static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, | |
354 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | |
355 | { | |
356 | vgic_reg_access(mmio, NULL, offset, | |
357 | ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); | |
358 | return false; | |
359 | } | |
360 | ||
361 | static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu, | |
362 | struct kvm_exit_mmio *mmio, | |
363 | phys_addr_t offset) | |
364 | { | |
365 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled, | |
366 | vcpu->vcpu_id, offset); | |
367 | vgic_reg_access(mmio, reg, offset, | |
368 | ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); | |
369 | if (mmio->is_write) { | |
370 | vgic_update_state(vcpu->kvm); | |
371 | return true; | |
372 | } | |
373 | ||
374 | return false; | |
375 | } | |
376 | ||
377 | static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu, | |
378 | struct kvm_exit_mmio *mmio, | |
379 | phys_addr_t offset) | |
380 | { | |
381 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled, | |
382 | vcpu->vcpu_id, offset); | |
383 | vgic_reg_access(mmio, reg, offset, | |
384 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); | |
385 | if (mmio->is_write) { | |
386 | if (offset < 4) /* Force SGI enabled */ | |
387 | *reg |= 0xffff; | |
a1fcb44e | 388 | vgic_retire_disabled_irqs(vcpu); |
b47ef92a MZ |
389 | vgic_update_state(vcpu->kvm); |
390 | return true; | |
391 | } | |
392 | ||
393 | return false; | |
394 | } | |
395 | ||
396 | static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu, | |
397 | struct kvm_exit_mmio *mmio, | |
398 | phys_addr_t offset) | |
399 | { | |
400 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state, | |
401 | vcpu->vcpu_id, offset); | |
402 | vgic_reg_access(mmio, reg, offset, | |
403 | ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); | |
404 | if (mmio->is_write) { | |
405 | vgic_update_state(vcpu->kvm); | |
406 | return true; | |
407 | } | |
408 | ||
409 | return false; | |
410 | } | |
411 | ||
412 | static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu, | |
413 | struct kvm_exit_mmio *mmio, | |
414 | phys_addr_t offset) | |
415 | { | |
416 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state, | |
417 | vcpu->vcpu_id, offset); | |
418 | vgic_reg_access(mmio, reg, offset, | |
419 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); | |
420 | if (mmio->is_write) { | |
421 | vgic_update_state(vcpu->kvm); | |
422 | return true; | |
423 | } | |
424 | ||
425 | return false; | |
426 | } | |
427 | ||
428 | static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu, | |
429 | struct kvm_exit_mmio *mmio, | |
430 | phys_addr_t offset) | |
431 | { | |
432 | u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority, | |
433 | vcpu->vcpu_id, offset); | |
434 | vgic_reg_access(mmio, reg, offset, | |
435 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | |
436 | return false; | |
437 | } | |
438 | ||
439 | #define GICD_ITARGETSR_SIZE 32 | |
440 | #define GICD_CPUTARGETS_BITS 8 | |
441 | #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS) | |
442 | static u32 vgic_get_target_reg(struct kvm *kvm, int irq) | |
443 | { | |
444 | struct vgic_dist *dist = &kvm->arch.vgic; | |
986af8e0 | 445 | int i; |
b47ef92a MZ |
446 | u32 val = 0; |
447 | ||
448 | irq -= VGIC_NR_PRIVATE_IRQS; | |
449 | ||
986af8e0 MZ |
450 | for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) |
451 | val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8); | |
b47ef92a MZ |
452 | |
453 | return val; | |
454 | } | |
455 | ||
456 | static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq) | |
457 | { | |
458 | struct vgic_dist *dist = &kvm->arch.vgic; | |
459 | struct kvm_vcpu *vcpu; | |
460 | int i, c; | |
461 | unsigned long *bmap; | |
462 | u32 target; | |
463 | ||
464 | irq -= VGIC_NR_PRIVATE_IRQS; | |
465 | ||
466 | /* | |
467 | * Pick the LSB in each byte. This ensures we target exactly | |
468 | * one vcpu per IRQ. If the byte is null, assume we target | |
469 | * CPU0. | |
470 | */ | |
471 | for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) { | |
472 | int shift = i * GICD_CPUTARGETS_BITS; | |
473 | target = ffs((val >> shift) & 0xffU); | |
474 | target = target ? (target - 1) : 0; | |
475 | dist->irq_spi_cpu[irq + i] = target; | |
476 | kvm_for_each_vcpu(c, vcpu, kvm) { | |
477 | bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]); | |
478 | if (c == target) | |
479 | set_bit(irq + i, bmap); | |
480 | else | |
481 | clear_bit(irq + i, bmap); | |
482 | } | |
483 | } | |
484 | } | |
485 | ||
486 | static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu, | |
487 | struct kvm_exit_mmio *mmio, | |
488 | phys_addr_t offset) | |
489 | { | |
490 | u32 reg; | |
491 | ||
492 | /* We treat the banked interrupts targets as read-only */ | |
493 | if (offset < 32) { | |
494 | u32 roreg = 1 << vcpu->vcpu_id; | |
495 | roreg |= roreg << 8; | |
496 | roreg |= roreg << 16; | |
497 | ||
498 | vgic_reg_access(mmio, &roreg, offset, | |
499 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | |
500 | return false; | |
501 | } | |
502 | ||
503 | reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U); | |
504 | vgic_reg_access(mmio, ®, offset, | |
505 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | |
506 | if (mmio->is_write) { | |
507 | vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U); | |
508 | vgic_update_state(vcpu->kvm); | |
509 | return true; | |
510 | } | |
511 | ||
512 | return false; | |
513 | } | |
514 | ||
515 | static u32 vgic_cfg_expand(u16 val) | |
516 | { | |
517 | u32 res = 0; | |
518 | int i; | |
519 | ||
520 | /* | |
521 | * Turn a 16bit value like abcd...mnop into a 32bit word | |
522 | * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is. | |
523 | */ | |
524 | for (i = 0; i < 16; i++) | |
525 | res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1); | |
526 | ||
527 | return res; | |
528 | } | |
529 | ||
530 | static u16 vgic_cfg_compress(u32 val) | |
531 | { | |
532 | u16 res = 0; | |
533 | int i; | |
534 | ||
535 | /* | |
536 | * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like | |
537 | * abcd...mnop which is what we really care about. | |
538 | */ | |
539 | for (i = 0; i < 16; i++) | |
540 | res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i; | |
541 | ||
542 | return res; | |
543 | } | |
544 | ||
545 | /* | |
546 | * The distributor uses 2 bits per IRQ for the CFG register, but the | |
547 | * LSB is always 0. As such, we only keep the upper bit, and use the | |
548 | * two above functions to compress/expand the bits | |
549 | */ | |
550 | static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, | |
551 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | |
552 | { | |
553 | u32 val; | |
6545eae3 MZ |
554 | u32 *reg; |
555 | ||
6545eae3 | 556 | reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, |
f2ae85b2 | 557 | vcpu->vcpu_id, offset >> 1); |
6545eae3 | 558 | |
f2ae85b2 | 559 | if (offset & 4) |
b47ef92a MZ |
560 | val = *reg >> 16; |
561 | else | |
562 | val = *reg & 0xffff; | |
563 | ||
564 | val = vgic_cfg_expand(val); | |
565 | vgic_reg_access(mmio, &val, offset, | |
566 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | |
567 | if (mmio->is_write) { | |
f2ae85b2 | 568 | if (offset < 8) { |
b47ef92a MZ |
569 | *reg = ~0U; /* Force PPIs/SGIs to 1 */ |
570 | return false; | |
571 | } | |
572 | ||
573 | val = vgic_cfg_compress(val); | |
f2ae85b2 | 574 | if (offset & 4) { |
b47ef92a MZ |
575 | *reg &= 0xffff; |
576 | *reg |= val << 16; | |
577 | } else { | |
578 | *reg &= 0xffff << 16; | |
579 | *reg |= val; | |
580 | } | |
581 | } | |
582 | ||
583 | return false; | |
584 | } | |
585 | ||
586 | static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, | |
587 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | |
588 | { | |
589 | u32 reg; | |
590 | vgic_reg_access(mmio, ®, offset, | |
591 | ACCESS_READ_RAZ | ACCESS_WRITE_VALUE); | |
592 | if (mmio->is_write) { | |
593 | vgic_dispatch_sgi(vcpu, reg); | |
594 | vgic_update_state(vcpu->kvm); | |
595 | return true; | |
596 | } | |
597 | ||
598 | return false; | |
599 | } | |
600 | ||
cbd333a4 CD |
601 | /** |
602 | * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor | |
603 | * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs | |
604 | * | |
605 | * Move any pending IRQs that have already been assigned to LRs back to the | |
606 | * emulated distributor state so that the complete emulated state can be read | |
607 | * from the main emulation structures without investigating the LRs. | |
608 | * | |
609 | * Note that IRQs in the active state in the LRs get their pending state moved | |
610 | * to the distributor but the active state stays in the LRs, because we don't | |
611 | * track the active state on the distributor side. | |
612 | */ | |
613 | static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | |
614 | { | |
615 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
616 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
617 | int vcpu_id = vcpu->vcpu_id; | |
8d5c6b06 | 618 | int i; |
cbd333a4 CD |
619 | |
620 | for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) { | |
8d5c6b06 | 621 | struct vgic_lr lr = vgic_get_lr(vcpu, i); |
cbd333a4 CD |
622 | |
623 | /* | |
624 | * There are three options for the state bits: | |
625 | * | |
626 | * 01: pending | |
627 | * 10: active | |
628 | * 11: pending and active | |
629 | * | |
630 | * If the LR holds only an active interrupt (not pending) then | |
631 | * just leave it alone. | |
632 | */ | |
8d5c6b06 | 633 | if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE) |
cbd333a4 CD |
634 | continue; |
635 | ||
636 | /* | |
637 | * Reestablish the pending state on the distributor and the | |
638 | * CPU interface. It may have already been pending, but that | |
639 | * is fine, then we are only setting a few bits that were | |
640 | * already set. | |
641 | */ | |
8d5c6b06 MZ |
642 | vgic_dist_irq_set(vcpu, lr.irq); |
643 | if (lr.irq < VGIC_NR_SGIS) | |
644 | dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source; | |
645 | lr.state &= ~LR_STATE_PENDING; | |
646 | vgic_set_lr(vcpu, i, lr); | |
cbd333a4 CD |
647 | |
648 | /* | |
649 | * If there's no state left on the LR (it could still be | |
650 | * active), then the LR does not hold any useful info and can | |
651 | * be marked as free for other use. | |
652 | */ | |
8d5c6b06 MZ |
653 | if (!(lr.state & LR_STATE_MASK)) |
654 | vgic_retire_lr(i, lr.irq, vcpu); | |
cbd333a4 CD |
655 | |
656 | /* Finally update the VGIC state. */ | |
657 | vgic_update_state(vcpu->kvm); | |
658 | } | |
659 | } | |
660 | ||
90a5355e CD |
661 | /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */ |
662 | static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | |
663 | struct kvm_exit_mmio *mmio, | |
664 | phys_addr_t offset) | |
c07a0191 | 665 | { |
90a5355e CD |
666 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
667 | int sgi; | |
668 | int min_sgi = (offset & ~0x3) * 4; | |
669 | int max_sgi = min_sgi + 3; | |
670 | int vcpu_id = vcpu->vcpu_id; | |
671 | u32 reg = 0; | |
672 | ||
673 | /* Copy source SGIs from distributor side */ | |
674 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { | |
675 | int shift = 8 * (sgi - min_sgi); | |
676 | reg |= (u32)dist->irq_sgi_sources[vcpu_id][sgi] << shift; | |
677 | } | |
678 | ||
679 | mmio_data_write(mmio, ~0, reg); | |
c07a0191 CD |
680 | return false; |
681 | } | |
682 | ||
90a5355e CD |
683 | static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, |
684 | struct kvm_exit_mmio *mmio, | |
685 | phys_addr_t offset, bool set) | |
686 | { | |
687 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
688 | int sgi; | |
689 | int min_sgi = (offset & ~0x3) * 4; | |
690 | int max_sgi = min_sgi + 3; | |
691 | int vcpu_id = vcpu->vcpu_id; | |
692 | u32 reg; | |
693 | bool updated = false; | |
694 | ||
695 | reg = mmio_data_read(mmio, ~0); | |
696 | ||
697 | /* Clear pending SGIs on the distributor */ | |
698 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { | |
699 | u8 mask = reg >> (8 * (sgi - min_sgi)); | |
700 | if (set) { | |
701 | if ((dist->irq_sgi_sources[vcpu_id][sgi] & mask) != mask) | |
702 | updated = true; | |
703 | dist->irq_sgi_sources[vcpu_id][sgi] |= mask; | |
704 | } else { | |
705 | if (dist->irq_sgi_sources[vcpu_id][sgi] & mask) | |
706 | updated = true; | |
707 | dist->irq_sgi_sources[vcpu_id][sgi] &= ~mask; | |
708 | } | |
709 | } | |
710 | ||
711 | if (updated) | |
712 | vgic_update_state(vcpu->kvm); | |
713 | ||
714 | return updated; | |
715 | } | |
716 | ||
c07a0191 CD |
717 | static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu, |
718 | struct kvm_exit_mmio *mmio, | |
719 | phys_addr_t offset) | |
720 | { | |
90a5355e CD |
721 | if (!mmio->is_write) |
722 | return read_set_clear_sgi_pend_reg(vcpu, mmio, offset); | |
723 | else | |
724 | return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true); | |
725 | } | |
726 | ||
727 | static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu, | |
728 | struct kvm_exit_mmio *mmio, | |
729 | phys_addr_t offset) | |
730 | { | |
731 | if (!mmio->is_write) | |
732 | return read_set_clear_sgi_pend_reg(vcpu, mmio, offset); | |
733 | else | |
734 | return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false); | |
c07a0191 CD |
735 | } |
736 | ||
1a89dd91 MZ |
737 | /* |
738 | * I would have liked to use the kvm_bus_io_*() API instead, but it | |
739 | * cannot cope with banked registers (only the VM pointer is passed | |
740 | * around, and we need the vcpu). One of these days, someone please | |
741 | * fix it! | |
742 | */ | |
743 | struct mmio_range { | |
744 | phys_addr_t base; | |
745 | unsigned long len; | |
746 | bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, | |
747 | phys_addr_t offset); | |
748 | }; | |
749 | ||
1006e8cb | 750 | static const struct mmio_range vgic_dist_ranges[] = { |
b47ef92a MZ |
751 | { |
752 | .base = GIC_DIST_CTRL, | |
753 | .len = 12, | |
754 | .handle_mmio = handle_mmio_misc, | |
755 | }, | |
756 | { | |
757 | .base = GIC_DIST_IGROUP, | |
758 | .len = VGIC_NR_IRQS / 8, | |
759 | .handle_mmio = handle_mmio_raz_wi, | |
760 | }, | |
761 | { | |
762 | .base = GIC_DIST_ENABLE_SET, | |
763 | .len = VGIC_NR_IRQS / 8, | |
764 | .handle_mmio = handle_mmio_set_enable_reg, | |
765 | }, | |
766 | { | |
767 | .base = GIC_DIST_ENABLE_CLEAR, | |
768 | .len = VGIC_NR_IRQS / 8, | |
769 | .handle_mmio = handle_mmio_clear_enable_reg, | |
770 | }, | |
771 | { | |
772 | .base = GIC_DIST_PENDING_SET, | |
773 | .len = VGIC_NR_IRQS / 8, | |
774 | .handle_mmio = handle_mmio_set_pending_reg, | |
775 | }, | |
776 | { | |
777 | .base = GIC_DIST_PENDING_CLEAR, | |
778 | .len = VGIC_NR_IRQS / 8, | |
779 | .handle_mmio = handle_mmio_clear_pending_reg, | |
780 | }, | |
781 | { | |
782 | .base = GIC_DIST_ACTIVE_SET, | |
783 | .len = VGIC_NR_IRQS / 8, | |
784 | .handle_mmio = handle_mmio_raz_wi, | |
785 | }, | |
786 | { | |
787 | .base = GIC_DIST_ACTIVE_CLEAR, | |
788 | .len = VGIC_NR_IRQS / 8, | |
789 | .handle_mmio = handle_mmio_raz_wi, | |
790 | }, | |
791 | { | |
792 | .base = GIC_DIST_PRI, | |
793 | .len = VGIC_NR_IRQS, | |
794 | .handle_mmio = handle_mmio_priority_reg, | |
795 | }, | |
796 | { | |
797 | .base = GIC_DIST_TARGET, | |
798 | .len = VGIC_NR_IRQS, | |
799 | .handle_mmio = handle_mmio_target_reg, | |
800 | }, | |
801 | { | |
802 | .base = GIC_DIST_CONFIG, | |
803 | .len = VGIC_NR_IRQS / 4, | |
804 | .handle_mmio = handle_mmio_cfg_reg, | |
805 | }, | |
806 | { | |
807 | .base = GIC_DIST_SOFTINT, | |
808 | .len = 4, | |
809 | .handle_mmio = handle_mmio_sgi_reg, | |
810 | }, | |
c07a0191 CD |
811 | { |
812 | .base = GIC_DIST_SGI_PENDING_CLEAR, | |
813 | .len = VGIC_NR_SGIS, | |
814 | .handle_mmio = handle_mmio_sgi_clear, | |
815 | }, | |
816 | { | |
817 | .base = GIC_DIST_SGI_PENDING_SET, | |
818 | .len = VGIC_NR_SGIS, | |
819 | .handle_mmio = handle_mmio_sgi_set, | |
820 | }, | |
1a89dd91 MZ |
821 | {} |
822 | }; | |
823 | ||
824 | static const | |
825 | struct mmio_range *find_matching_range(const struct mmio_range *ranges, | |
826 | struct kvm_exit_mmio *mmio, | |
1006e8cb | 827 | phys_addr_t offset) |
1a89dd91 MZ |
828 | { |
829 | const struct mmio_range *r = ranges; | |
1a89dd91 MZ |
830 | |
831 | while (r->len) { | |
1006e8cb CD |
832 | if (offset >= r->base && |
833 | (offset + mmio->len) <= (r->base + r->len)) | |
1a89dd91 MZ |
834 | return r; |
835 | r++; | |
836 | } | |
837 | ||
838 | return NULL; | |
839 | } | |
840 | ||
841 | /** | |
842 | * vgic_handle_mmio - handle an in-kernel MMIO access | |
843 | * @vcpu: pointer to the vcpu performing the access | |
844 | * @run: pointer to the kvm_run structure | |
845 | * @mmio: pointer to the data describing the access | |
846 | * | |
847 | * returns true if the MMIO access has been performed in kernel space, | |
848 | * and false if it needs to be emulated in user space. | |
849 | */ | |
850 | bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | |
851 | struct kvm_exit_mmio *mmio) | |
852 | { | |
b47ef92a MZ |
853 | const struct mmio_range *range; |
854 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
855 | unsigned long base = dist->vgic_dist_base; | |
856 | bool updated_state; | |
857 | unsigned long offset; | |
858 | ||
859 | if (!irqchip_in_kernel(vcpu->kvm) || | |
860 | mmio->phys_addr < base || | |
861 | (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE)) | |
862 | return false; | |
863 | ||
864 | /* We don't support ldrd / strd or ldm / stm to the emulated vgic */ | |
865 | if (mmio->len > 4) { | |
866 | kvm_inject_dabt(vcpu, mmio->phys_addr); | |
867 | return true; | |
868 | } | |
869 | ||
1006e8cb CD |
870 | offset = mmio->phys_addr - base; |
871 | range = find_matching_range(vgic_dist_ranges, mmio, offset); | |
b47ef92a MZ |
872 | if (unlikely(!range || !range->handle_mmio)) { |
873 | pr_warn("Unhandled access %d %08llx %d\n", | |
874 | mmio->is_write, mmio->phys_addr, mmio->len); | |
875 | return false; | |
876 | } | |
877 | ||
878 | spin_lock(&vcpu->kvm->arch.vgic.lock); | |
879 | offset = mmio->phys_addr - range->base - base; | |
880 | updated_state = range->handle_mmio(vcpu, mmio, offset); | |
881 | spin_unlock(&vcpu->kvm->arch.vgic.lock); | |
882 | kvm_prepare_mmio(run, mmio); | |
883 | kvm_handle_mmio_return(vcpu, run); | |
884 | ||
5863c2ce MZ |
885 | if (updated_state) |
886 | vgic_kick_vcpus(vcpu->kvm); | |
887 | ||
b47ef92a MZ |
888 | return true; |
889 | } | |
890 | ||
891 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) | |
892 | { | |
893 | struct kvm *kvm = vcpu->kvm; | |
894 | struct vgic_dist *dist = &kvm->arch.vgic; | |
895 | int nrcpus = atomic_read(&kvm->online_vcpus); | |
896 | u8 target_cpus; | |
897 | int sgi, mode, c, vcpu_id; | |
898 | ||
899 | vcpu_id = vcpu->vcpu_id; | |
900 | ||
901 | sgi = reg & 0xf; | |
902 | target_cpus = (reg >> 16) & 0xff; | |
903 | mode = (reg >> 24) & 3; | |
904 | ||
905 | switch (mode) { | |
906 | case 0: | |
907 | if (!target_cpus) | |
908 | return; | |
91021a6c | 909 | break; |
b47ef92a MZ |
910 | |
911 | case 1: | |
912 | target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff; | |
913 | break; | |
914 | ||
915 | case 2: | |
916 | target_cpus = 1 << vcpu_id; | |
917 | break; | |
918 | } | |
919 | ||
920 | kvm_for_each_vcpu(c, vcpu, kvm) { | |
921 | if (target_cpus & 1) { | |
922 | /* Flag the SGI as pending */ | |
923 | vgic_dist_irq_set(vcpu, sgi); | |
924 | dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id; | |
925 | kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c); | |
926 | } | |
927 | ||
928 | target_cpus >>= 1; | |
929 | } | |
930 | } | |
931 | ||
932 | static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) | |
933 | { | |
9d949dce MZ |
934 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
935 | unsigned long *pending, *enabled, *pend_percpu, *pend_shared; | |
936 | unsigned long pending_private, pending_shared; | |
937 | int vcpu_id; | |
938 | ||
939 | vcpu_id = vcpu->vcpu_id; | |
940 | pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; | |
941 | pend_shared = vcpu->arch.vgic_cpu.pending_shared; | |
942 | ||
943 | pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id); | |
944 | enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); | |
945 | bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); | |
946 | ||
947 | pending = vgic_bitmap_get_shared_map(&dist->irq_state); | |
948 | enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled); | |
949 | bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS); | |
950 | bitmap_and(pend_shared, pend_shared, | |
951 | vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]), | |
952 | VGIC_NR_SHARED_IRQS); | |
953 | ||
954 | pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS); | |
955 | pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS); | |
956 | return (pending_private < VGIC_NR_PRIVATE_IRQS || | |
957 | pending_shared < VGIC_NR_SHARED_IRQS); | |
b47ef92a MZ |
958 | } |
959 | ||
960 | /* | |
961 | * Update the interrupt state and determine which CPUs have pending | |
962 | * interrupts. Must be called with distributor lock held. | |
963 | */ | |
964 | static void vgic_update_state(struct kvm *kvm) | |
965 | { | |
966 | struct vgic_dist *dist = &kvm->arch.vgic; | |
967 | struct kvm_vcpu *vcpu; | |
968 | int c; | |
969 | ||
970 | if (!dist->enabled) { | |
971 | set_bit(0, &dist->irq_pending_on_cpu); | |
972 | return; | |
973 | } | |
974 | ||
975 | kvm_for_each_vcpu(c, vcpu, kvm) { | |
976 | if (compute_pending_for_cpu(vcpu)) { | |
977 | pr_debug("CPU%d has pending interrupts\n", c); | |
978 | set_bit(c, &dist->irq_pending_on_cpu); | |
979 | } | |
980 | } | |
1a89dd91 | 981 | } |
330690cd | 982 | |
8d5c6b06 MZ |
983 | static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr) |
984 | { | |
985 | struct vgic_lr lr_desc; | |
986 | u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr]; | |
987 | ||
988 | lr_desc.irq = val & GICH_LR_VIRTUALID; | |
989 | if (lr_desc.irq <= 15) | |
990 | lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7; | |
991 | else | |
992 | lr_desc.source = 0; | |
993 | lr_desc.state = 0; | |
994 | ||
995 | if (val & GICH_LR_PENDING_BIT) | |
996 | lr_desc.state |= LR_STATE_PENDING; | |
997 | if (val & GICH_LR_ACTIVE_BIT) | |
998 | lr_desc.state |= LR_STATE_ACTIVE; | |
999 | if (val & GICH_LR_EOI) | |
1000 | lr_desc.state |= LR_EOI_INT; | |
1001 | ||
1002 | return lr_desc; | |
1003 | } | |
1004 | ||
1005 | static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr, | |
1006 | struct vgic_lr lr_desc) | |
1007 | { | |
1008 | u32 lr_val = (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | lr_desc.irq; | |
1009 | ||
1010 | if (lr_desc.state & LR_STATE_PENDING) | |
1011 | lr_val |= GICH_LR_PENDING_BIT; | |
1012 | if (lr_desc.state & LR_STATE_ACTIVE) | |
1013 | lr_val |= GICH_LR_ACTIVE_BIT; | |
1014 | if (lr_desc.state & LR_EOI_INT) | |
1015 | lr_val |= GICH_LR_EOI; | |
1016 | ||
1017 | vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val; | |
1018 | } | |
1019 | ||
69bb2c9f MZ |
1020 | static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, |
1021 | struct vgic_lr lr_desc) | |
1022 | { | |
1023 | if (!(lr_desc.state & LR_STATE_MASK)) | |
1024 | set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr); | |
1025 | } | |
1026 | ||
1027 | static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) | |
1028 | { | |
1029 | u64 val; | |
1030 | ||
1031 | #if BITS_PER_LONG == 64 | |
1032 | val = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[1]; | |
1033 | val <<= 32; | |
1034 | val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[0]; | |
1035 | #else | |
1036 | val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr; | |
1037 | #endif | |
1038 | return val; | |
1039 | } | |
1040 | ||
8d6a0313 MZ |
1041 | static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) |
1042 | { | |
1043 | u64 val; | |
1044 | ||
1045 | #if BITS_PER_LONG == 64 | |
1046 | val = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[1]; | |
1047 | val <<= 32; | |
1048 | val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[0]; | |
1049 | #else | |
1050 | val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; | |
1051 | #endif | |
1052 | return val; | |
1053 | } | |
1054 | ||
495dd859 MZ |
1055 | static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) |
1056 | { | |
1057 | u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; | |
1058 | u32 ret = 0; | |
1059 | ||
1060 | if (misr & GICH_MISR_EOI) | |
1061 | ret |= INT_STATUS_EOI; | |
1062 | if (misr & GICH_MISR_U) | |
1063 | ret |= INT_STATUS_UNDERFLOW; | |
1064 | ||
1065 | return ret; | |
1066 | } | |
1067 | ||
909d9b50 MZ |
1068 | static void vgic_v2_enable_underflow(struct kvm_vcpu *vcpu) |
1069 | { | |
1070 | vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE; | |
1071 | } | |
1072 | ||
1073 | static void vgic_v2_disable_underflow(struct kvm_vcpu *vcpu) | |
1074 | { | |
1075 | vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE; | |
1076 | } | |
1077 | ||
beee38b9 MZ |
1078 | static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) |
1079 | { | |
1080 | u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr; | |
1081 | ||
1082 | vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT; | |
1083 | vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT; | |
1084 | vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT; | |
1085 | vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT; | |
1086 | } | |
1087 | ||
1088 | static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) | |
1089 | { | |
1090 | u32 vmcr; | |
1091 | ||
1092 | vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK; | |
1093 | vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK; | |
1094 | vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK; | |
1095 | vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK; | |
1096 | ||
1097 | vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; | |
1098 | } | |
1099 | ||
8d5c6b06 MZ |
1100 | static const struct vgic_ops vgic_ops = { |
1101 | .get_lr = vgic_v2_get_lr, | |
1102 | .set_lr = vgic_v2_set_lr, | |
69bb2c9f MZ |
1103 | .sync_lr_elrsr = vgic_v2_sync_lr_elrsr, |
1104 | .get_elrsr = vgic_v2_get_elrsr, | |
8d6a0313 | 1105 | .get_eisr = vgic_v2_get_eisr, |
495dd859 | 1106 | .get_interrupt_status = vgic_v2_get_interrupt_status, |
909d9b50 MZ |
1107 | .enable_underflow = vgic_v2_enable_underflow, |
1108 | .disable_underflow = vgic_v2_disable_underflow, | |
beee38b9 MZ |
1109 | .get_vmcr = vgic_v2_get_vmcr, |
1110 | .set_vmcr = vgic_v2_set_vmcr, | |
8d5c6b06 MZ |
1111 | }; |
1112 | ||
1113 | static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr) | |
1114 | { | |
1115 | return vgic_ops.get_lr(vcpu, lr); | |
1116 | } | |
1117 | ||
1118 | static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, | |
1119 | struct vgic_lr vlr) | |
1120 | { | |
1121 | vgic_ops.set_lr(vcpu, lr, vlr); | |
1122 | } | |
1123 | ||
69bb2c9f MZ |
1124 | static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, |
1125 | struct vgic_lr vlr) | |
1126 | { | |
1127 | vgic_ops.sync_lr_elrsr(vcpu, lr, vlr); | |
1128 | } | |
1129 | ||
1130 | static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu) | |
1131 | { | |
1132 | return vgic_ops.get_elrsr(vcpu); | |
1133 | } | |
1134 | ||
8d6a0313 MZ |
1135 | static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu) |
1136 | { | |
1137 | return vgic_ops.get_eisr(vcpu); | |
1138 | } | |
1139 | ||
495dd859 MZ |
1140 | static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) |
1141 | { | |
1142 | return vgic_ops.get_interrupt_status(vcpu); | |
1143 | } | |
1144 | ||
909d9b50 MZ |
1145 | static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu) |
1146 | { | |
1147 | vgic_ops.enable_underflow(vcpu); | |
1148 | } | |
1149 | ||
1150 | static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu) | |
1151 | { | |
1152 | vgic_ops.disable_underflow(vcpu); | |
1153 | } | |
1154 | ||
beee38b9 MZ |
1155 | static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) |
1156 | { | |
1157 | vgic_ops.get_vmcr(vcpu, vmcr); | |
1158 | } | |
1159 | ||
1160 | static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) | |
1161 | { | |
1162 | vgic_ops.set_vmcr(vcpu, vmcr); | |
1163 | } | |
1164 | ||
8d5c6b06 MZ |
1165 | static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) |
1166 | { | |
1167 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
1168 | struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); | |
1169 | ||
1170 | vlr.state = 0; | |
1171 | vgic_set_lr(vcpu, lr_nr, vlr); | |
1172 | clear_bit(lr_nr, vgic_cpu->lr_used); | |
1173 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | |
1174 | } | |
a1fcb44e MZ |
1175 | |
1176 | /* | |
1177 | * An interrupt may have been disabled after being made pending on the | |
1178 | * CPU interface (the classic case is a timer running while we're | |
1179 | * rebooting the guest - the interrupt would kick as soon as the CPU | |
1180 | * interface gets enabled, with deadly consequences). | |
1181 | * | |
1182 | * The solution is to examine already active LRs, and check the | |
1183 | * interrupt is still enabled. If not, just retire it. | |
1184 | */ | |
1185 | static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) | |
1186 | { | |
1187 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
1188 | int lr; | |
1189 | ||
1190 | for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) { | |
8d5c6b06 | 1191 | struct vgic_lr vlr = vgic_get_lr(vcpu, lr); |
a1fcb44e | 1192 | |
8d5c6b06 MZ |
1193 | if (!vgic_irq_is_enabled(vcpu, vlr.irq)) { |
1194 | vgic_retire_lr(lr, vlr.irq, vcpu); | |
1195 | if (vgic_irq_is_active(vcpu, vlr.irq)) | |
1196 | vgic_irq_clear_active(vcpu, vlr.irq); | |
a1fcb44e MZ |
1197 | } |
1198 | } | |
1199 | } | |
1200 | ||
9d949dce MZ |
1201 | /* |
1202 | * Queue an interrupt to a CPU virtual interface. Return true on success, | |
1203 | * or false if it wasn't possible to queue it. | |
1204 | */ | |
1205 | static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |
1206 | { | |
1207 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
8d5c6b06 | 1208 | struct vgic_lr vlr; |
9d949dce MZ |
1209 | int lr; |
1210 | ||
1211 | /* Sanitize the input... */ | |
1212 | BUG_ON(sgi_source_id & ~7); | |
1213 | BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS); | |
1214 | BUG_ON(irq >= VGIC_NR_IRQS); | |
1215 | ||
1216 | kvm_debug("Queue IRQ%d\n", irq); | |
1217 | ||
1218 | lr = vgic_cpu->vgic_irq_lr_map[irq]; | |
1219 | ||
1220 | /* Do we have an active interrupt for the same CPUID? */ | |
8d5c6b06 MZ |
1221 | if (lr != LR_EMPTY) { |
1222 | vlr = vgic_get_lr(vcpu, lr); | |
1223 | if (vlr.source == sgi_source_id) { | |
1224 | kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq); | |
1225 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); | |
1226 | vlr.state |= LR_STATE_PENDING; | |
1227 | vgic_set_lr(vcpu, lr, vlr); | |
1228 | return true; | |
1229 | } | |
9d949dce MZ |
1230 | } |
1231 | ||
1232 | /* Try to use another LR for this interrupt */ | |
1233 | lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used, | |
1234 | vgic_cpu->nr_lr); | |
1235 | if (lr >= vgic_cpu->nr_lr) | |
1236 | return false; | |
1237 | ||
1238 | kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id); | |
9d949dce MZ |
1239 | vgic_cpu->vgic_irq_lr_map[irq] = lr; |
1240 | set_bit(lr, vgic_cpu->lr_used); | |
1241 | ||
8d5c6b06 MZ |
1242 | vlr.irq = irq; |
1243 | vlr.source = sgi_source_id; | |
1244 | vlr.state = LR_STATE_PENDING; | |
9d949dce | 1245 | if (!vgic_irq_is_edge(vcpu, irq)) |
8d5c6b06 MZ |
1246 | vlr.state |= LR_EOI_INT; |
1247 | ||
1248 | vgic_set_lr(vcpu, lr, vlr); | |
9d949dce MZ |
1249 | |
1250 | return true; | |
1251 | } | |
1252 | ||
1253 | static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq) | |
1254 | { | |
1255 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
1256 | unsigned long sources; | |
1257 | int vcpu_id = vcpu->vcpu_id; | |
1258 | int c; | |
1259 | ||
1260 | sources = dist->irq_sgi_sources[vcpu_id][irq]; | |
1261 | ||
1262 | for_each_set_bit(c, &sources, VGIC_MAX_CPUS) { | |
1263 | if (vgic_queue_irq(vcpu, c, irq)) | |
1264 | clear_bit(c, &sources); | |
1265 | } | |
1266 | ||
1267 | dist->irq_sgi_sources[vcpu_id][irq] = sources; | |
1268 | ||
1269 | /* | |
1270 | * If the sources bitmap has been cleared it means that we | |
1271 | * could queue all the SGIs onto link registers (see the | |
1272 | * clear_bit above), and therefore we are done with them in | |
1273 | * our emulated gic and can get rid of them. | |
1274 | */ | |
1275 | if (!sources) { | |
1276 | vgic_dist_irq_clear(vcpu, irq); | |
1277 | vgic_cpu_irq_clear(vcpu, irq); | |
1278 | return true; | |
1279 | } | |
1280 | ||
1281 | return false; | |
1282 | } | |
1283 | ||
1284 | static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq) | |
1285 | { | |
1286 | if (vgic_irq_is_active(vcpu, irq)) | |
1287 | return true; /* level interrupt, already queued */ | |
1288 | ||
1289 | if (vgic_queue_irq(vcpu, 0, irq)) { | |
1290 | if (vgic_irq_is_edge(vcpu, irq)) { | |
1291 | vgic_dist_irq_clear(vcpu, irq); | |
1292 | vgic_cpu_irq_clear(vcpu, irq); | |
1293 | } else { | |
1294 | vgic_irq_set_active(vcpu, irq); | |
1295 | } | |
1296 | ||
1297 | return true; | |
1298 | } | |
1299 | ||
1300 | return false; | |
1301 | } | |
1302 | ||
1303 | /* | |
1304 | * Fill the list registers with pending interrupts before running the | |
1305 | * guest. | |
1306 | */ | |
1307 | static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |
1308 | { | |
1309 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
1310 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
1311 | int i, vcpu_id; | |
1312 | int overflow = 0; | |
1313 | ||
1314 | vcpu_id = vcpu->vcpu_id; | |
1315 | ||
1316 | /* | |
1317 | * We may not have any pending interrupt, or the interrupts | |
1318 | * may have been serviced from another vcpu. In all cases, | |
1319 | * move along. | |
1320 | */ | |
1321 | if (!kvm_vgic_vcpu_pending_irq(vcpu)) { | |
1322 | pr_debug("CPU%d has no pending interrupt\n", vcpu_id); | |
1323 | goto epilog; | |
1324 | } | |
1325 | ||
1326 | /* SGIs */ | |
1327 | for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) { | |
1328 | if (!vgic_queue_sgi(vcpu, i)) | |
1329 | overflow = 1; | |
1330 | } | |
1331 | ||
1332 | /* PPIs */ | |
1333 | for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) { | |
1334 | if (!vgic_queue_hwirq(vcpu, i)) | |
1335 | overflow = 1; | |
1336 | } | |
1337 | ||
1338 | /* SPIs */ | |
1339 | for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) { | |
1340 | if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) | |
1341 | overflow = 1; | |
1342 | } | |
1343 | ||
1344 | epilog: | |
1345 | if (overflow) { | |
909d9b50 | 1346 | vgic_enable_underflow(vcpu); |
9d949dce | 1347 | } else { |
909d9b50 | 1348 | vgic_disable_underflow(vcpu); |
9d949dce MZ |
1349 | /* |
1350 | * We're about to run this VCPU, and we've consumed | |
1351 | * everything the distributor had in store for | |
1352 | * us. Claim we don't have anything pending. We'll | |
1353 | * adjust that if needed while exiting. | |
1354 | */ | |
1355 | clear_bit(vcpu_id, &dist->irq_pending_on_cpu); | |
1356 | } | |
1357 | } | |
1358 | ||
1359 | static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |
1360 | { | |
1361 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
495dd859 | 1362 | u32 status = vgic_get_interrupt_status(vcpu); |
9d949dce MZ |
1363 | bool level_pending = false; |
1364 | ||
495dd859 | 1365 | kvm_debug("STATUS = %08x\n", status); |
9d949dce | 1366 | |
495dd859 | 1367 | if (status & INT_STATUS_EOI) { |
9d949dce MZ |
1368 | /* |
1369 | * Some level interrupts have been EOIed. Clear their | |
1370 | * active bit. | |
1371 | */ | |
8d6a0313 MZ |
1372 | u64 eisr = vgic_get_eisr(vcpu); |
1373 | unsigned long *eisr_ptr = (unsigned long *)&eisr; | |
8d5c6b06 | 1374 | int lr; |
9d949dce | 1375 | |
8d6a0313 | 1376 | for_each_set_bit(lr, eisr_ptr, vgic_cpu->nr_lr) { |
8d5c6b06 | 1377 | struct vgic_lr vlr = vgic_get_lr(vcpu, lr); |
9d949dce | 1378 | |
8d5c6b06 MZ |
1379 | vgic_irq_clear_active(vcpu, vlr.irq); |
1380 | WARN_ON(vlr.state & LR_STATE_MASK); | |
1381 | vlr.state = 0; | |
1382 | vgic_set_lr(vcpu, lr, vlr); | |
9d949dce MZ |
1383 | |
1384 | /* Any additional pending interrupt? */ | |
8d5c6b06 MZ |
1385 | if (vgic_dist_irq_is_pending(vcpu, vlr.irq)) { |
1386 | vgic_cpu_irq_set(vcpu, vlr.irq); | |
9d949dce MZ |
1387 | level_pending = true; |
1388 | } else { | |
8d5c6b06 | 1389 | vgic_cpu_irq_clear(vcpu, vlr.irq); |
9d949dce | 1390 | } |
75da01e1 MZ |
1391 | |
1392 | /* | |
1393 | * Despite being EOIed, the LR may not have | |
1394 | * been marked as empty. | |
1395 | */ | |
69bb2c9f | 1396 | vgic_sync_lr_elrsr(vcpu, lr, vlr); |
9d949dce MZ |
1397 | } |
1398 | } | |
1399 | ||
495dd859 | 1400 | if (status & INT_STATUS_UNDERFLOW) |
909d9b50 | 1401 | vgic_disable_underflow(vcpu); |
9d949dce MZ |
1402 | |
1403 | return level_pending; | |
1404 | } | |
1405 | ||
1406 | /* | |
33c83cb3 MZ |
1407 | * Sync back the VGIC state after a guest run. The distributor lock is |
1408 | * needed so we don't get preempted in the middle of the state processing. | |
9d949dce MZ |
1409 | */ |
1410 | static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | |
1411 | { | |
1412 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
1413 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
69bb2c9f MZ |
1414 | u64 elrsr; |
1415 | unsigned long *elrsr_ptr; | |
9d949dce MZ |
1416 | int lr, pending; |
1417 | bool level_pending; | |
1418 | ||
1419 | level_pending = vgic_process_maintenance(vcpu); | |
69bb2c9f MZ |
1420 | elrsr = vgic_get_elrsr(vcpu); |
1421 | elrsr_ptr = (unsigned long *)&elrsr; | |
9d949dce MZ |
1422 | |
1423 | /* Clear mappings for empty LRs */ | |
69bb2c9f | 1424 | for_each_set_bit(lr, elrsr_ptr, vgic_cpu->nr_lr) { |
8d5c6b06 | 1425 | struct vgic_lr vlr; |
9d949dce MZ |
1426 | |
1427 | if (!test_and_clear_bit(lr, vgic_cpu->lr_used)) | |
1428 | continue; | |
1429 | ||
8d5c6b06 | 1430 | vlr = vgic_get_lr(vcpu, lr); |
9d949dce | 1431 | |
8d5c6b06 MZ |
1432 | BUG_ON(vlr.irq >= VGIC_NR_IRQS); |
1433 | vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY; | |
9d949dce MZ |
1434 | } |
1435 | ||
1436 | /* Check if we still have something up our sleeve... */ | |
69bb2c9f | 1437 | pending = find_first_zero_bit(elrsr_ptr, vgic_cpu->nr_lr); |
9d949dce MZ |
1438 | if (level_pending || pending < vgic_cpu->nr_lr) |
1439 | set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); | |
1440 | } | |
1441 | ||
1442 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |
1443 | { | |
1444 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
1445 | ||
1446 | if (!irqchip_in_kernel(vcpu->kvm)) | |
1447 | return; | |
1448 | ||
1449 | spin_lock(&dist->lock); | |
1450 | __kvm_vgic_flush_hwstate(vcpu); | |
1451 | spin_unlock(&dist->lock); | |
1452 | } | |
1453 | ||
1454 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | |
1455 | { | |
33c83cb3 MZ |
1456 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
1457 | ||
9d949dce MZ |
1458 | if (!irqchip_in_kernel(vcpu->kvm)) |
1459 | return; | |
1460 | ||
33c83cb3 | 1461 | spin_lock(&dist->lock); |
9d949dce | 1462 | __kvm_vgic_sync_hwstate(vcpu); |
33c83cb3 | 1463 | spin_unlock(&dist->lock); |
9d949dce MZ |
1464 | } |
1465 | ||
1466 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | |
1467 | { | |
1468 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
1469 | ||
1470 | if (!irqchip_in_kernel(vcpu->kvm)) | |
1471 | return 0; | |
1472 | ||
1473 | return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); | |
1474 | } | |
1475 | ||
5863c2ce MZ |
1476 | static void vgic_kick_vcpus(struct kvm *kvm) |
1477 | { | |
1478 | struct kvm_vcpu *vcpu; | |
1479 | int c; | |
1480 | ||
1481 | /* | |
1482 | * We've injected an interrupt, time to find out who deserves | |
1483 | * a good kick... | |
1484 | */ | |
1485 | kvm_for_each_vcpu(c, vcpu, kvm) { | |
1486 | if (kvm_vgic_vcpu_pending_irq(vcpu)) | |
1487 | kvm_vcpu_kick(vcpu); | |
1488 | } | |
1489 | } | |
1490 | ||
1491 | static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) | |
1492 | { | |
1493 | int is_edge = vgic_irq_is_edge(vcpu, irq); | |
1494 | int state = vgic_dist_irq_is_pending(vcpu, irq); | |
1495 | ||
1496 | /* | |
1497 | * Only inject an interrupt if: | |
1498 | * - edge triggered and we have a rising edge | |
1499 | * - level triggered and we change level | |
1500 | */ | |
1501 | if (is_edge) | |
1502 | return level > state; | |
1503 | else | |
1504 | return level != state; | |
1505 | } | |
1506 | ||
1507 | static bool vgic_update_irq_state(struct kvm *kvm, int cpuid, | |
1508 | unsigned int irq_num, bool level) | |
1509 | { | |
1510 | struct vgic_dist *dist = &kvm->arch.vgic; | |
1511 | struct kvm_vcpu *vcpu; | |
1512 | int is_edge, is_level; | |
1513 | int enabled; | |
1514 | bool ret = true; | |
1515 | ||
1516 | spin_lock(&dist->lock); | |
1517 | ||
1518 | vcpu = kvm_get_vcpu(kvm, cpuid); | |
1519 | is_edge = vgic_irq_is_edge(vcpu, irq_num); | |
1520 | is_level = !is_edge; | |
1521 | ||
1522 | if (!vgic_validate_injection(vcpu, irq_num, level)) { | |
1523 | ret = false; | |
1524 | goto out; | |
1525 | } | |
1526 | ||
1527 | if (irq_num >= VGIC_NR_PRIVATE_IRQS) { | |
1528 | cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS]; | |
1529 | vcpu = kvm_get_vcpu(kvm, cpuid); | |
1530 | } | |
1531 | ||
1532 | kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid); | |
1533 | ||
1534 | if (level) | |
1535 | vgic_dist_irq_set(vcpu, irq_num); | |
1536 | else | |
1537 | vgic_dist_irq_clear(vcpu, irq_num); | |
1538 | ||
1539 | enabled = vgic_irq_is_enabled(vcpu, irq_num); | |
1540 | ||
1541 | if (!enabled) { | |
1542 | ret = false; | |
1543 | goto out; | |
1544 | } | |
1545 | ||
1546 | if (is_level && vgic_irq_is_active(vcpu, irq_num)) { | |
1547 | /* | |
1548 | * Level interrupt in progress, will be picked up | |
1549 | * when EOId. | |
1550 | */ | |
1551 | ret = false; | |
1552 | goto out; | |
1553 | } | |
1554 | ||
1555 | if (level) { | |
1556 | vgic_cpu_irq_set(vcpu, irq_num); | |
1557 | set_bit(cpuid, &dist->irq_pending_on_cpu); | |
1558 | } | |
1559 | ||
1560 | out: | |
1561 | spin_unlock(&dist->lock); | |
1562 | ||
1563 | return ret; | |
1564 | } | |
1565 | ||
1566 | /** | |
1567 | * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic | |
1568 | * @kvm: The VM structure pointer | |
1569 | * @cpuid: The CPU for PPIs | |
1570 | * @irq_num: The IRQ number that is assigned to the device | |
1571 | * @level: Edge-triggered: true: to trigger the interrupt | |
1572 | * false: to ignore the call | |
1573 | * Level-sensitive true: activates an interrupt | |
1574 | * false: deactivates an interrupt | |
1575 | * | |
1576 | * The GIC is not concerned with devices being active-LOW or active-HIGH for | |
1577 | * level-sensitive interrupts. You can think of the level parameter as 1 | |
1578 | * being HIGH and 0 being LOW and all devices being active-HIGH. | |
1579 | */ | |
1580 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, | |
1581 | bool level) | |
1582 | { | |
1583 | if (vgic_update_irq_state(kvm, cpuid, irq_num, level)) | |
1584 | vgic_kick_vcpus(kvm); | |
1585 | ||
1586 | return 0; | |
1587 | } | |
1588 | ||
01ac5e34 MZ |
1589 | static irqreturn_t vgic_maintenance_handler(int irq, void *data) |
1590 | { | |
1591 | /* | |
1592 | * We cannot rely on the vgic maintenance interrupt to be | |
1593 | * delivered synchronously. This means we can only use it to | |
1594 | * exit the VM, and we perform the handling of EOIed | |
1595 | * interrupts on the exit path (see vgic_process_maintenance). | |
1596 | */ | |
1597 | return IRQ_HANDLED; | |
1598 | } | |
1599 | ||
e1ba0207 CD |
1600 | /** |
1601 | * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state | |
1602 | * @vcpu: pointer to the vcpu struct | |
1603 | * | |
1604 | * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to | |
1605 | * this vcpu and enable the VGIC for this VCPU | |
1606 | */ | |
01ac5e34 MZ |
1607 | int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) |
1608 | { | |
1609 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
1610 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
1611 | int i; | |
1612 | ||
01ac5e34 MZ |
1613 | if (vcpu->vcpu_id >= VGIC_MAX_CPUS) |
1614 | return -EBUSY; | |
1615 | ||
1616 | for (i = 0; i < VGIC_NR_IRQS; i++) { | |
1617 | if (i < VGIC_NR_PPIS) | |
1618 | vgic_bitmap_set_irq_val(&dist->irq_enabled, | |
1619 | vcpu->vcpu_id, i, 1); | |
1620 | if (i < VGIC_NR_PRIVATE_IRQS) | |
1621 | vgic_bitmap_set_irq_val(&dist->irq_cfg, | |
1622 | vcpu->vcpu_id, i, VGIC_CFG_EDGE); | |
1623 | ||
1624 | vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY; | |
1625 | } | |
1626 | ||
1627 | /* | |
1628 | * By forcing VMCR to zero, the GIC will restore the binary | |
1629 | * points to their reset values. Anything else resets to zero | |
1630 | * anyway. | |
1631 | */ | |
eede821d | 1632 | vgic_cpu->vgic_v2.vgic_vmcr = 0; |
01ac5e34 MZ |
1633 | |
1634 | vgic_cpu->nr_lr = vgic_nr_lr; | |
eede821d | 1635 | vgic_cpu->vgic_v2.vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */ |
01ac5e34 MZ |
1636 | |
1637 | return 0; | |
1638 | } | |
1639 | ||
1640 | static void vgic_init_maintenance_interrupt(void *info) | |
1641 | { | |
1642 | enable_percpu_irq(vgic_maint_irq, 0); | |
1643 | } | |
1644 | ||
1645 | static int vgic_cpu_notify(struct notifier_block *self, | |
1646 | unsigned long action, void *cpu) | |
1647 | { | |
1648 | switch (action) { | |
1649 | case CPU_STARTING: | |
1650 | case CPU_STARTING_FROZEN: | |
1651 | vgic_init_maintenance_interrupt(NULL); | |
1652 | break; | |
1653 | case CPU_DYING: | |
1654 | case CPU_DYING_FROZEN: | |
1655 | disable_percpu_irq(vgic_maint_irq); | |
1656 | break; | |
1657 | } | |
1658 | ||
1659 | return NOTIFY_OK; | |
1660 | } | |
1661 | ||
1662 | static struct notifier_block vgic_cpu_nb = { | |
1663 | .notifier_call = vgic_cpu_notify, | |
1664 | }; | |
1665 | ||
1666 | int kvm_vgic_hyp_init(void) | |
1667 | { | |
1668 | int ret; | |
1669 | struct resource vctrl_res; | |
1670 | struct resource vcpu_res; | |
1671 | ||
1672 | vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic"); | |
1673 | if (!vgic_node) { | |
1674 | kvm_err("error: no compatible vgic node in DT\n"); | |
1675 | return -ENODEV; | |
1676 | } | |
1677 | ||
1678 | vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0); | |
1679 | if (!vgic_maint_irq) { | |
1680 | kvm_err("error getting vgic maintenance irq from DT\n"); | |
1681 | ret = -ENXIO; | |
1682 | goto out; | |
1683 | } | |
1684 | ||
1685 | ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler, | |
1686 | "vgic", kvm_get_running_vcpus()); | |
1687 | if (ret) { | |
1688 | kvm_err("Cannot register interrupt %d\n", vgic_maint_irq); | |
1689 | goto out; | |
1690 | } | |
1691 | ||
553f809e | 1692 | ret = __register_cpu_notifier(&vgic_cpu_nb); |
01ac5e34 MZ |
1693 | if (ret) { |
1694 | kvm_err("Cannot register vgic CPU notifier\n"); | |
1695 | goto out_free_irq; | |
1696 | } | |
1697 | ||
1698 | ret = of_address_to_resource(vgic_node, 2, &vctrl_res); | |
1699 | if (ret) { | |
1700 | kvm_err("Cannot obtain VCTRL resource\n"); | |
1701 | goto out_free_irq; | |
1702 | } | |
1703 | ||
1704 | vgic_vctrl_base = of_iomap(vgic_node, 2); | |
1705 | if (!vgic_vctrl_base) { | |
1706 | kvm_err("Cannot ioremap VCTRL\n"); | |
1707 | ret = -ENOMEM; | |
1708 | goto out_free_irq; | |
1709 | } | |
1710 | ||
1711 | vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR); | |
1712 | vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1; | |
1713 | ||
1714 | ret = create_hyp_io_mappings(vgic_vctrl_base, | |
1715 | vgic_vctrl_base + resource_size(&vctrl_res), | |
1716 | vctrl_res.start); | |
1717 | if (ret) { | |
1718 | kvm_err("Cannot map VCTRL into hyp\n"); | |
1719 | goto out_unmap; | |
1720 | } | |
1721 | ||
1722 | kvm_info("%s@%llx IRQ%d\n", vgic_node->name, | |
1723 | vctrl_res.start, vgic_maint_irq); | |
1724 | on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); | |
1725 | ||
1726 | if (of_address_to_resource(vgic_node, 3, &vcpu_res)) { | |
1727 | kvm_err("Cannot obtain VCPU resource\n"); | |
1728 | ret = -ENXIO; | |
1729 | goto out_unmap; | |
1730 | } | |
1731 | vgic_vcpu_base = vcpu_res.start; | |
1732 | ||
1733 | goto out; | |
1734 | ||
1735 | out_unmap: | |
1736 | iounmap(vgic_vctrl_base); | |
1737 | out_free_irq: | |
1738 | free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus()); | |
1739 | out: | |
1740 | of_node_put(vgic_node); | |
1741 | return ret; | |
1742 | } | |
1743 | ||
e1ba0207 CD |
1744 | /** |
1745 | * kvm_vgic_init - Initialize global VGIC state before running any VCPUs | |
1746 | * @kvm: pointer to the kvm struct | |
1747 | * | |
1748 | * Map the virtual CPU interface into the VM before running any VCPUs. We | |
1749 | * can't do this at creation time, because user space must first set the | |
1750 | * virtual CPU interface address in the guest physical address space. Also | |
1751 | * initialize the ITARGETSRn regs to 0 on the emulated distributor. | |
1752 | */ | |
01ac5e34 MZ |
1753 | int kvm_vgic_init(struct kvm *kvm) |
1754 | { | |
1755 | int ret = 0, i; | |
1756 | ||
e1ba0207 CD |
1757 | if (!irqchip_in_kernel(kvm)) |
1758 | return 0; | |
1759 | ||
01ac5e34 MZ |
1760 | mutex_lock(&kvm->lock); |
1761 | ||
1762 | if (vgic_initialized(kvm)) | |
1763 | goto out; | |
1764 | ||
1765 | if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) || | |
1766 | IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) { | |
1767 | kvm_err("Need to set vgic cpu and dist addresses first\n"); | |
1768 | ret = -ENXIO; | |
1769 | goto out; | |
1770 | } | |
1771 | ||
1772 | ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, | |
1773 | vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE); | |
1774 | if (ret) { | |
1775 | kvm_err("Unable to remap VGIC CPU to VCPU\n"); | |
1776 | goto out; | |
1777 | } | |
1778 | ||
1779 | for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4) | |
1780 | vgic_set_target_reg(kvm, 0, i); | |
1781 | ||
1782 | kvm->arch.vgic.ready = true; | |
1783 | out: | |
1784 | mutex_unlock(&kvm->lock); | |
1785 | return ret; | |
1786 | } | |
1787 | ||
1788 | int kvm_vgic_create(struct kvm *kvm) | |
1789 | { | |
7330672b CD |
1790 | int i, vcpu_lock_idx = -1, ret = 0; |
1791 | struct kvm_vcpu *vcpu; | |
01ac5e34 MZ |
1792 | |
1793 | mutex_lock(&kvm->lock); | |
1794 | ||
7330672b | 1795 | if (kvm->arch.vgic.vctrl_base) { |
01ac5e34 MZ |
1796 | ret = -EEXIST; |
1797 | goto out; | |
1798 | } | |
1799 | ||
7330672b CD |
1800 | /* |
1801 | * Any time a vcpu is run, vcpu_load is called which tries to grab the | |
1802 | * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure | |
1803 | * that no other VCPUs are run while we create the vgic. | |
1804 | */ | |
1805 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
1806 | if (!mutex_trylock(&vcpu->mutex)) | |
1807 | goto out_unlock; | |
1808 | vcpu_lock_idx = i; | |
1809 | } | |
1810 | ||
1811 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
1812 | if (vcpu->arch.has_run_once) { | |
1813 | ret = -EBUSY; | |
1814 | goto out_unlock; | |
1815 | } | |
1816 | } | |
1817 | ||
01ac5e34 MZ |
1818 | spin_lock_init(&kvm->arch.vgic.lock); |
1819 | kvm->arch.vgic.vctrl_base = vgic_vctrl_base; | |
1820 | kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; | |
1821 | kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; | |
1822 | ||
7330672b CD |
1823 | out_unlock: |
1824 | for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { | |
1825 | vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); | |
1826 | mutex_unlock(&vcpu->mutex); | |
1827 | } | |
1828 | ||
01ac5e34 MZ |
1829 | out: |
1830 | mutex_unlock(&kvm->lock); | |
1831 | return ret; | |
1832 | } | |
1833 | ||
330690cd CD |
1834 | static bool vgic_ioaddr_overlap(struct kvm *kvm) |
1835 | { | |
1836 | phys_addr_t dist = kvm->arch.vgic.vgic_dist_base; | |
1837 | phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base; | |
1838 | ||
1839 | if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu)) | |
1840 | return 0; | |
1841 | if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) || | |
1842 | (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist)) | |
1843 | return -EBUSY; | |
1844 | return 0; | |
1845 | } | |
1846 | ||
1847 | static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr, | |
1848 | phys_addr_t addr, phys_addr_t size) | |
1849 | { | |
1850 | int ret; | |
1851 | ||
ce01e4e8 CD |
1852 | if (addr & ~KVM_PHYS_MASK) |
1853 | return -E2BIG; | |
1854 | ||
1855 | if (addr & (SZ_4K - 1)) | |
1856 | return -EINVAL; | |
1857 | ||
330690cd CD |
1858 | if (!IS_VGIC_ADDR_UNDEF(*ioaddr)) |
1859 | return -EEXIST; | |
1860 | if (addr + size < addr) | |
1861 | return -EINVAL; | |
1862 | ||
30c21170 | 1863 | *ioaddr = addr; |
330690cd CD |
1864 | ret = vgic_ioaddr_overlap(kvm); |
1865 | if (ret) | |
30c21170 HW |
1866 | *ioaddr = VGIC_ADDR_UNDEF; |
1867 | ||
330690cd CD |
1868 | return ret; |
1869 | } | |
1870 | ||
ce01e4e8 CD |
1871 | /** |
1872 | * kvm_vgic_addr - set or get vgic VM base addresses | |
1873 | * @kvm: pointer to the vm struct | |
1874 | * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX | |
1875 | * @addr: pointer to address value | |
1876 | * @write: if true set the address in the VM address space, if false read the | |
1877 | * address | |
1878 | * | |
1879 | * Set or get the vgic base addresses for the distributor and the virtual CPU | |
1880 | * interface in the VM physical address space. These addresses are properties | |
1881 | * of the emulated core/SoC and therefore user space initially knows this | |
1882 | * information. | |
1883 | */ | |
1884 | int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) | |
330690cd CD |
1885 | { |
1886 | int r = 0; | |
1887 | struct vgic_dist *vgic = &kvm->arch.vgic; | |
1888 | ||
330690cd CD |
1889 | mutex_lock(&kvm->lock); |
1890 | switch (type) { | |
1891 | case KVM_VGIC_V2_ADDR_TYPE_DIST: | |
ce01e4e8 CD |
1892 | if (write) { |
1893 | r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base, | |
1894 | *addr, KVM_VGIC_V2_DIST_SIZE); | |
1895 | } else { | |
1896 | *addr = vgic->vgic_dist_base; | |
1897 | } | |
330690cd CD |
1898 | break; |
1899 | case KVM_VGIC_V2_ADDR_TYPE_CPU: | |
ce01e4e8 CD |
1900 | if (write) { |
1901 | r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base, | |
1902 | *addr, KVM_VGIC_V2_CPU_SIZE); | |
1903 | } else { | |
1904 | *addr = vgic->vgic_cpu_base; | |
1905 | } | |
330690cd CD |
1906 | break; |
1907 | default: | |
1908 | r = -ENODEV; | |
1909 | } | |
1910 | ||
1911 | mutex_unlock(&kvm->lock); | |
1912 | return r; | |
1913 | } | |
7330672b | 1914 | |
c07a0191 CD |
1915 | static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, |
1916 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | |
1917 | { | |
fa20f5ae | 1918 | bool updated = false; |
beee38b9 MZ |
1919 | struct vgic_vmcr vmcr; |
1920 | u32 *vmcr_field; | |
1921 | u32 reg; | |
1922 | ||
1923 | vgic_get_vmcr(vcpu, &vmcr); | |
fa20f5ae CD |
1924 | |
1925 | switch (offset & ~0x3) { | |
1926 | case GIC_CPU_CTRL: | |
beee38b9 | 1927 | vmcr_field = &vmcr.ctlr; |
fa20f5ae CD |
1928 | break; |
1929 | case GIC_CPU_PRIMASK: | |
beee38b9 | 1930 | vmcr_field = &vmcr.pmr; |
fa20f5ae CD |
1931 | break; |
1932 | case GIC_CPU_BINPOINT: | |
beee38b9 | 1933 | vmcr_field = &vmcr.bpr; |
fa20f5ae CD |
1934 | break; |
1935 | case GIC_CPU_ALIAS_BINPOINT: | |
beee38b9 | 1936 | vmcr_field = &vmcr.abpr; |
fa20f5ae | 1937 | break; |
beee38b9 MZ |
1938 | default: |
1939 | BUG(); | |
fa20f5ae CD |
1940 | } |
1941 | ||
1942 | if (!mmio->is_write) { | |
beee38b9 | 1943 | reg = *vmcr_field; |
fa20f5ae CD |
1944 | mmio_data_write(mmio, ~0, reg); |
1945 | } else { | |
1946 | reg = mmio_data_read(mmio, ~0); | |
beee38b9 MZ |
1947 | if (reg != *vmcr_field) { |
1948 | *vmcr_field = reg; | |
1949 | vgic_set_vmcr(vcpu, &vmcr); | |
fa20f5ae | 1950 | updated = true; |
beee38b9 | 1951 | } |
fa20f5ae CD |
1952 | } |
1953 | return updated; | |
1954 | } | |
1955 | ||
1956 | static bool handle_mmio_abpr(struct kvm_vcpu *vcpu, | |
1957 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | |
1958 | { | |
1959 | return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT); | |
c07a0191 CD |
1960 | } |
1961 | ||
fa20f5ae CD |
1962 | static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu, |
1963 | struct kvm_exit_mmio *mmio, | |
1964 | phys_addr_t offset) | |
1965 | { | |
1966 | u32 reg; | |
1967 | ||
1968 | if (mmio->is_write) | |
1969 | return false; | |
1970 | ||
1971 | /* GICC_IIDR */ | |
1972 | reg = (PRODUCT_ID_KVM << 20) | | |
1973 | (GICC_ARCH_VERSION_V2 << 16) | | |
1974 | (IMPLEMENTER_ARM << 0); | |
1975 | mmio_data_write(mmio, ~0, reg); | |
1976 | return false; | |
1977 | } | |
1978 | ||
1979 | /* | |
1980 | * CPU Interface Register accesses - these are not accessed by the VM, but by | |
1981 | * user space for saving and restoring VGIC state. | |
1982 | */ | |
c07a0191 CD |
1983 | static const struct mmio_range vgic_cpu_ranges[] = { |
1984 | { | |
1985 | .base = GIC_CPU_CTRL, | |
1986 | .len = 12, | |
1987 | .handle_mmio = handle_cpu_mmio_misc, | |
1988 | }, | |
1989 | { | |
1990 | .base = GIC_CPU_ALIAS_BINPOINT, | |
1991 | .len = 4, | |
fa20f5ae | 1992 | .handle_mmio = handle_mmio_abpr, |
c07a0191 CD |
1993 | }, |
1994 | { | |
1995 | .base = GIC_CPU_ACTIVEPRIO, | |
1996 | .len = 16, | |
fa20f5ae | 1997 | .handle_mmio = handle_mmio_raz_wi, |
c07a0191 CD |
1998 | }, |
1999 | { | |
2000 | .base = GIC_CPU_IDENT, | |
2001 | .len = 4, | |
fa20f5ae | 2002 | .handle_mmio = handle_cpu_mmio_ident, |
c07a0191 CD |
2003 | }, |
2004 | }; | |
2005 | ||
2006 | static int vgic_attr_regs_access(struct kvm_device *dev, | |
2007 | struct kvm_device_attr *attr, | |
2008 | u32 *reg, bool is_write) | |
2009 | { | |
2010 | const struct mmio_range *r = NULL, *ranges; | |
2011 | phys_addr_t offset; | |
2012 | int ret, cpuid, c; | |
2013 | struct kvm_vcpu *vcpu, *tmp_vcpu; | |
2014 | struct vgic_dist *vgic; | |
2015 | struct kvm_exit_mmio mmio; | |
2016 | ||
2017 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | |
2018 | cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> | |
2019 | KVM_DEV_ARM_VGIC_CPUID_SHIFT; | |
2020 | ||
2021 | mutex_lock(&dev->kvm->lock); | |
2022 | ||
2023 | if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) { | |
2024 | ret = -EINVAL; | |
2025 | goto out; | |
2026 | } | |
2027 | ||
2028 | vcpu = kvm_get_vcpu(dev->kvm, cpuid); | |
2029 | vgic = &dev->kvm->arch.vgic; | |
2030 | ||
2031 | mmio.len = 4; | |
2032 | mmio.is_write = is_write; | |
2033 | if (is_write) | |
2034 | mmio_data_write(&mmio, ~0, *reg); | |
2035 | switch (attr->group) { | |
2036 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | |
2037 | mmio.phys_addr = vgic->vgic_dist_base + offset; | |
2038 | ranges = vgic_dist_ranges; | |
2039 | break; | |
2040 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: | |
2041 | mmio.phys_addr = vgic->vgic_cpu_base + offset; | |
2042 | ranges = vgic_cpu_ranges; | |
2043 | break; | |
2044 | default: | |
2045 | BUG(); | |
2046 | } | |
2047 | r = find_matching_range(ranges, &mmio, offset); | |
2048 | ||
2049 | if (unlikely(!r || !r->handle_mmio)) { | |
2050 | ret = -ENXIO; | |
2051 | goto out; | |
2052 | } | |
2053 | ||
2054 | ||
2055 | spin_lock(&vgic->lock); | |
2056 | ||
2057 | /* | |
2058 | * Ensure that no other VCPU is running by checking the vcpu->cpu | |
2059 | * field. If no other VPCUs are running we can safely access the VGIC | |
2060 | * state, because even if another VPU is run after this point, that | |
2061 | * VCPU will not touch the vgic state, because it will block on | |
2062 | * getting the vgic->lock in kvm_vgic_sync_hwstate(). | |
2063 | */ | |
2064 | kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) { | |
2065 | if (unlikely(tmp_vcpu->cpu != -1)) { | |
2066 | ret = -EBUSY; | |
2067 | goto out_vgic_unlock; | |
2068 | } | |
2069 | } | |
2070 | ||
cbd333a4 CD |
2071 | /* |
2072 | * Move all pending IRQs from the LRs on all VCPUs so the pending | |
2073 | * state can be properly represented in the register state accessible | |
2074 | * through this API. | |
2075 | */ | |
2076 | kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) | |
2077 | vgic_unqueue_irqs(tmp_vcpu); | |
2078 | ||
c07a0191 CD |
2079 | offset -= r->base; |
2080 | r->handle_mmio(vcpu, &mmio, offset); | |
2081 | ||
2082 | if (!is_write) | |
2083 | *reg = mmio_data_read(&mmio, ~0); | |
2084 | ||
2085 | ret = 0; | |
2086 | out_vgic_unlock: | |
2087 | spin_unlock(&vgic->lock); | |
2088 | out: | |
2089 | mutex_unlock(&dev->kvm->lock); | |
2090 | return ret; | |
2091 | } | |
2092 | ||
7330672b CD |
2093 | static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) |
2094 | { | |
ce01e4e8 CD |
2095 | int r; |
2096 | ||
2097 | switch (attr->group) { | |
2098 | case KVM_DEV_ARM_VGIC_GRP_ADDR: { | |
2099 | u64 __user *uaddr = (u64 __user *)(long)attr->addr; | |
2100 | u64 addr; | |
2101 | unsigned long type = (unsigned long)attr->attr; | |
2102 | ||
2103 | if (copy_from_user(&addr, uaddr, sizeof(addr))) | |
2104 | return -EFAULT; | |
2105 | ||
2106 | r = kvm_vgic_addr(dev->kvm, type, &addr, true); | |
2107 | return (r == -ENODEV) ? -ENXIO : r; | |
2108 | } | |
c07a0191 CD |
2109 | |
2110 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | |
2111 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { | |
2112 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | |
2113 | u32 reg; | |
2114 | ||
2115 | if (get_user(reg, uaddr)) | |
2116 | return -EFAULT; | |
2117 | ||
2118 | return vgic_attr_regs_access(dev, attr, ®, true); | |
2119 | } | |
2120 | ||
ce01e4e8 CD |
2121 | } |
2122 | ||
7330672b CD |
2123 | return -ENXIO; |
2124 | } | |
2125 | ||
2126 | static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |
2127 | { | |
ce01e4e8 CD |
2128 | int r = -ENXIO; |
2129 | ||
2130 | switch (attr->group) { | |
2131 | case KVM_DEV_ARM_VGIC_GRP_ADDR: { | |
2132 | u64 __user *uaddr = (u64 __user *)(long)attr->addr; | |
2133 | u64 addr; | |
2134 | unsigned long type = (unsigned long)attr->attr; | |
2135 | ||
2136 | r = kvm_vgic_addr(dev->kvm, type, &addr, false); | |
2137 | if (r) | |
2138 | return (r == -ENODEV) ? -ENXIO : r; | |
2139 | ||
2140 | if (copy_to_user(uaddr, &addr, sizeof(addr))) | |
2141 | return -EFAULT; | |
c07a0191 CD |
2142 | break; |
2143 | } | |
2144 | ||
2145 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | |
2146 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { | |
2147 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | |
2148 | u32 reg = 0; | |
2149 | ||
2150 | r = vgic_attr_regs_access(dev, attr, ®, false); | |
2151 | if (r) | |
2152 | return r; | |
2153 | r = put_user(reg, uaddr); | |
2154 | break; | |
ce01e4e8 | 2155 | } |
c07a0191 | 2156 | |
ce01e4e8 CD |
2157 | } |
2158 | ||
2159 | return r; | |
7330672b CD |
2160 | } |
2161 | ||
c07a0191 CD |
2162 | static int vgic_has_attr_regs(const struct mmio_range *ranges, |
2163 | phys_addr_t offset) | |
2164 | { | |
2165 | struct kvm_exit_mmio dev_attr_mmio; | |
2166 | ||
2167 | dev_attr_mmio.len = 4; | |
2168 | if (find_matching_range(ranges, &dev_attr_mmio, offset)) | |
2169 | return 0; | |
2170 | else | |
2171 | return -ENXIO; | |
2172 | } | |
2173 | ||
7330672b CD |
2174 | static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) |
2175 | { | |
c07a0191 CD |
2176 | phys_addr_t offset; |
2177 | ||
ce01e4e8 CD |
2178 | switch (attr->group) { |
2179 | case KVM_DEV_ARM_VGIC_GRP_ADDR: | |
2180 | switch (attr->attr) { | |
2181 | case KVM_VGIC_V2_ADDR_TYPE_DIST: | |
2182 | case KVM_VGIC_V2_ADDR_TYPE_CPU: | |
2183 | return 0; | |
2184 | } | |
2185 | break; | |
c07a0191 CD |
2186 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: |
2187 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | |
2188 | return vgic_has_attr_regs(vgic_dist_ranges, offset); | |
2189 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: | |
2190 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | |
2191 | return vgic_has_attr_regs(vgic_cpu_ranges, offset); | |
ce01e4e8 | 2192 | } |
7330672b CD |
2193 | return -ENXIO; |
2194 | } | |
2195 | ||
2196 | static void vgic_destroy(struct kvm_device *dev) | |
2197 | { | |
2198 | kfree(dev); | |
2199 | } | |
2200 | ||
2201 | static int vgic_create(struct kvm_device *dev, u32 type) | |
2202 | { | |
2203 | return kvm_vgic_create(dev->kvm); | |
2204 | } | |
2205 | ||
2206 | struct kvm_device_ops kvm_arm_vgic_v2_ops = { | |
2207 | .name = "kvm-arm-vgic", | |
2208 | .create = vgic_create, | |
2209 | .destroy = vgic_destroy, | |
2210 | .set_attr = vgic_set_attr, | |
2211 | .get_attr = vgic_get_attr, | |
2212 | .has_attr = vgic_has_attr, | |
2213 | }; |