Commit | Line | Data |
---|---|---|
1a89dd91 MZ |
1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
17 | */ | |
18 | ||
19 | #include <linux/kvm.h> | |
20 | #include <linux/kvm_host.h> | |
21 | #include <linux/interrupt.h> | |
22 | #include <linux/io.h> | |
23 | #include <asm/kvm_emulate.h> | |
24 | ||
b47ef92a MZ |
25 | /* |
26 | * How the whole thing works (courtesy of Christoffer Dall): | |
27 | * | |
28 | * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if | |
29 | * something is pending | |
30 | * - VGIC pending interrupts are stored on the vgic.irq_state vgic | |
31 | * bitmap (this bitmap is updated by both user land ioctls and guest | |
32 | * mmio ops, and other in-kernel peripherals such as the | |
33 | * arch. timers) and indicate the 'wire' state. | |
34 | * - Every time the bitmap changes, the irq_pending_on_cpu oracle is | |
35 | * recalculated | |
36 | * - To calculate the oracle, we need info for each cpu from | |
37 | * compute_pending_for_cpu, which considers: | |
38 | * - PPI: dist->irq_state & dist->irq_enable | |
39 | * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target | |
40 | * - irq_spi_target is a 'formatted' version of the GICD_ICFGR | |
41 | * registers, stored on each vcpu. We only keep one bit of | |
42 | * information per interrupt, making sure that only one vcpu can | |
43 | * accept the interrupt. | |
44 | * - The same is true when injecting an interrupt, except that we only | |
45 | * consider a single interrupt at a time. The irq_spi_cpu array | |
46 | * contains the target CPU for each SPI. | |
47 | * | |
48 | * The handling of level interrupts adds some extra complexity. We | |
49 | * need to track when the interrupt has been EOIed, so we can sample | |
50 | * the 'line' again. This is achieved as such: | |
51 | * | |
52 | * - When a level interrupt is moved onto a vcpu, the corresponding | |
53 | * bit in irq_active is set. As long as this bit is set, the line | |
54 | * will be ignored for further interrupts. The interrupt is injected | |
55 | * into the vcpu with the GICH_LR_EOI bit set (generate a | |
56 | * maintenance interrupt on EOI). | |
57 | * - When the interrupt is EOIed, the maintenance interrupt fires, | |
58 | * and clears the corresponding bit in irq_active. This allow the | |
59 | * interrupt line to be sampled again. | |
60 | */ | |
61 | ||
330690cd CD |
62 | #define VGIC_ADDR_UNDEF (-1) |
63 | #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) | |
64 | ||
1a89dd91 MZ |
65 | #define ACCESS_READ_VALUE (1 << 0) |
66 | #define ACCESS_READ_RAZ (0 << 0) | |
67 | #define ACCESS_READ_MASK(x) ((x) & (1 << 0)) | |
68 | #define ACCESS_WRITE_IGNORED (0 << 1) | |
69 | #define ACCESS_WRITE_SETBIT (1 << 1) | |
70 | #define ACCESS_WRITE_CLEARBIT (2 << 1) | |
71 | #define ACCESS_WRITE_VALUE (3 << 1) | |
72 | #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) | |
73 | ||
a1fcb44e | 74 | static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); |
b47ef92a MZ |
75 | static void vgic_update_state(struct kvm *kvm); |
76 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); | |
77 | ||
78 | static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, | |
79 | int cpuid, u32 offset) | |
80 | { | |
81 | offset >>= 2; | |
82 | if (!offset) | |
83 | return x->percpu[cpuid].reg; | |
84 | else | |
85 | return x->shared.reg + offset - 1; | |
86 | } | |
87 | ||
88 | static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x, | |
89 | int cpuid, int irq) | |
90 | { | |
91 | if (irq < VGIC_NR_PRIVATE_IRQS) | |
92 | return test_bit(irq, x->percpu[cpuid].reg_ul); | |
93 | ||
94 | return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul); | |
95 | } | |
96 | ||
97 | static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, | |
98 | int irq, int val) | |
99 | { | |
100 | unsigned long *reg; | |
101 | ||
102 | if (irq < VGIC_NR_PRIVATE_IRQS) { | |
103 | reg = x->percpu[cpuid].reg_ul; | |
104 | } else { | |
105 | reg = x->shared.reg_ul; | |
106 | irq -= VGIC_NR_PRIVATE_IRQS; | |
107 | } | |
108 | ||
109 | if (val) | |
110 | set_bit(irq, reg); | |
111 | else | |
112 | clear_bit(irq, reg); | |
113 | } | |
114 | ||
115 | static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid) | |
116 | { | |
117 | if (unlikely(cpuid >= VGIC_MAX_CPUS)) | |
118 | return NULL; | |
119 | return x->percpu[cpuid].reg_ul; | |
120 | } | |
121 | ||
122 | static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x) | |
123 | { | |
124 | return x->shared.reg_ul; | |
125 | } | |
126 | ||
127 | static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset) | |
128 | { | |
129 | offset >>= 2; | |
130 | BUG_ON(offset > (VGIC_NR_IRQS / 4)); | |
131 | if (offset < 4) | |
132 | return x->percpu[cpuid] + offset; | |
133 | else | |
134 | return x->shared + offset - 8; | |
135 | } | |
136 | ||
137 | #define VGIC_CFG_LEVEL 0 | |
138 | #define VGIC_CFG_EDGE 1 | |
139 | ||
140 | static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq) | |
141 | { | |
142 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
143 | int irq_val; | |
144 | ||
145 | irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq); | |
146 | return irq_val == VGIC_CFG_EDGE; | |
147 | } | |
148 | ||
149 | static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq) | |
150 | { | |
151 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
152 | ||
153 | return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq); | |
154 | } | |
155 | ||
9d949dce MZ |
156 | static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq) |
157 | { | |
158 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
159 | ||
160 | return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq); | |
161 | } | |
162 | ||
163 | static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq) | |
164 | { | |
165 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
166 | ||
167 | vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1); | |
168 | } | |
169 | ||
170 | static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq) | |
171 | { | |
172 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
173 | ||
174 | vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0); | |
175 | } | |
176 | ||
177 | static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) | |
178 | { | |
179 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
180 | ||
181 | return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq); | |
182 | } | |
183 | ||
b47ef92a MZ |
184 | static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq) |
185 | { | |
186 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
187 | ||
188 | vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1); | |
189 | } | |
190 | ||
191 | static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq) | |
192 | { | |
193 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
194 | ||
195 | vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0); | |
196 | } | |
197 | ||
198 | static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq) | |
199 | { | |
200 | if (irq < VGIC_NR_PRIVATE_IRQS) | |
201 | set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); | |
202 | else | |
203 | set_bit(irq - VGIC_NR_PRIVATE_IRQS, | |
204 | vcpu->arch.vgic_cpu.pending_shared); | |
205 | } | |
206 | ||
207 | static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq) | |
208 | { | |
209 | if (irq < VGIC_NR_PRIVATE_IRQS) | |
210 | clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); | |
211 | else | |
212 | clear_bit(irq - VGIC_NR_PRIVATE_IRQS, | |
213 | vcpu->arch.vgic_cpu.pending_shared); | |
214 | } | |
215 | ||
1a89dd91 MZ |
216 | static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) |
217 | { | |
218 | return *((u32 *)mmio->data) & mask; | |
219 | } | |
220 | ||
221 | static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value) | |
222 | { | |
223 | *((u32 *)mmio->data) = value & mask; | |
224 | } | |
225 | ||
226 | /** | |
227 | * vgic_reg_access - access vgic register | |
228 | * @mmio: pointer to the data describing the mmio access | |
229 | * @reg: pointer to the virtual backing of vgic distributor data | |
230 | * @offset: least significant 2 bits used for word offset | |
231 | * @mode: ACCESS_ mode (see defines above) | |
232 | * | |
233 | * Helper to make vgic register access easier using one of the access | |
234 | * modes defined for vgic register access | |
235 | * (read,raz,write-ignored,setbit,clearbit,write) | |
236 | */ | |
237 | static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, | |
238 | phys_addr_t offset, int mode) | |
239 | { | |
240 | int word_offset = (offset & 3) * 8; | |
241 | u32 mask = (1UL << (mmio->len * 8)) - 1; | |
242 | u32 regval; | |
243 | ||
244 | /* | |
245 | * Any alignment fault should have been delivered to the guest | |
246 | * directly (ARM ARM B3.12.7 "Prioritization of aborts"). | |
247 | */ | |
248 | ||
249 | if (reg) { | |
250 | regval = *reg; | |
251 | } else { | |
252 | BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED)); | |
253 | regval = 0; | |
254 | } | |
255 | ||
256 | if (mmio->is_write) { | |
257 | u32 data = mmio_data_read(mmio, mask) << word_offset; | |
258 | switch (ACCESS_WRITE_MASK(mode)) { | |
259 | case ACCESS_WRITE_IGNORED: | |
260 | return; | |
261 | ||
262 | case ACCESS_WRITE_SETBIT: | |
263 | regval |= data; | |
264 | break; | |
265 | ||
266 | case ACCESS_WRITE_CLEARBIT: | |
267 | regval &= ~data; | |
268 | break; | |
269 | ||
270 | case ACCESS_WRITE_VALUE: | |
271 | regval = (regval & ~(mask << word_offset)) | data; | |
272 | break; | |
273 | } | |
274 | *reg = regval; | |
275 | } else { | |
276 | switch (ACCESS_READ_MASK(mode)) { | |
277 | case ACCESS_READ_RAZ: | |
278 | regval = 0; | |
279 | /* fall through */ | |
280 | ||
281 | case ACCESS_READ_VALUE: | |
282 | mmio_data_write(mmio, mask, regval >> word_offset); | |
283 | } | |
284 | } | |
285 | } | |
286 | ||
b47ef92a MZ |
287 | static bool handle_mmio_misc(struct kvm_vcpu *vcpu, |
288 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | |
289 | { | |
290 | u32 reg; | |
291 | u32 word_offset = offset & 3; | |
292 | ||
293 | switch (offset & ~3) { | |
294 | case 0: /* CTLR */ | |
295 | reg = vcpu->kvm->arch.vgic.enabled; | |
296 | vgic_reg_access(mmio, ®, word_offset, | |
297 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | |
298 | if (mmio->is_write) { | |
299 | vcpu->kvm->arch.vgic.enabled = reg & 1; | |
300 | vgic_update_state(vcpu->kvm); | |
301 | return true; | |
302 | } | |
303 | break; | |
304 | ||
305 | case 4: /* TYPER */ | |
306 | reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; | |
307 | reg |= (VGIC_NR_IRQS >> 5) - 1; | |
308 | vgic_reg_access(mmio, ®, word_offset, | |
309 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | |
310 | break; | |
311 | ||
312 | case 8: /* IIDR */ | |
313 | reg = 0x4B00043B; | |
314 | vgic_reg_access(mmio, ®, word_offset, | |
315 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | |
316 | break; | |
317 | } | |
318 | ||
319 | return false; | |
320 | } | |
321 | ||
322 | static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, | |
323 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | |
324 | { | |
325 | vgic_reg_access(mmio, NULL, offset, | |
326 | ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); | |
327 | return false; | |
328 | } | |
329 | ||
330 | static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu, | |
331 | struct kvm_exit_mmio *mmio, | |
332 | phys_addr_t offset) | |
333 | { | |
334 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled, | |
335 | vcpu->vcpu_id, offset); | |
336 | vgic_reg_access(mmio, reg, offset, | |
337 | ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); | |
338 | if (mmio->is_write) { | |
339 | vgic_update_state(vcpu->kvm); | |
340 | return true; | |
341 | } | |
342 | ||
343 | return false; | |
344 | } | |
345 | ||
346 | static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu, | |
347 | struct kvm_exit_mmio *mmio, | |
348 | phys_addr_t offset) | |
349 | { | |
350 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled, | |
351 | vcpu->vcpu_id, offset); | |
352 | vgic_reg_access(mmio, reg, offset, | |
353 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); | |
354 | if (mmio->is_write) { | |
355 | if (offset < 4) /* Force SGI enabled */ | |
356 | *reg |= 0xffff; | |
a1fcb44e | 357 | vgic_retire_disabled_irqs(vcpu); |
b47ef92a MZ |
358 | vgic_update_state(vcpu->kvm); |
359 | return true; | |
360 | } | |
361 | ||
362 | return false; | |
363 | } | |
364 | ||
365 | static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu, | |
366 | struct kvm_exit_mmio *mmio, | |
367 | phys_addr_t offset) | |
368 | { | |
369 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state, | |
370 | vcpu->vcpu_id, offset); | |
371 | vgic_reg_access(mmio, reg, offset, | |
372 | ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); | |
373 | if (mmio->is_write) { | |
374 | vgic_update_state(vcpu->kvm); | |
375 | return true; | |
376 | } | |
377 | ||
378 | return false; | |
379 | } | |
380 | ||
381 | static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu, | |
382 | struct kvm_exit_mmio *mmio, | |
383 | phys_addr_t offset) | |
384 | { | |
385 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state, | |
386 | vcpu->vcpu_id, offset); | |
387 | vgic_reg_access(mmio, reg, offset, | |
388 | ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); | |
389 | if (mmio->is_write) { | |
390 | vgic_update_state(vcpu->kvm); | |
391 | return true; | |
392 | } | |
393 | ||
394 | return false; | |
395 | } | |
396 | ||
397 | static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu, | |
398 | struct kvm_exit_mmio *mmio, | |
399 | phys_addr_t offset) | |
400 | { | |
401 | u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority, | |
402 | vcpu->vcpu_id, offset); | |
403 | vgic_reg_access(mmio, reg, offset, | |
404 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | |
405 | return false; | |
406 | } | |
407 | ||
408 | #define GICD_ITARGETSR_SIZE 32 | |
409 | #define GICD_CPUTARGETS_BITS 8 | |
410 | #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS) | |
411 | static u32 vgic_get_target_reg(struct kvm *kvm, int irq) | |
412 | { | |
413 | struct vgic_dist *dist = &kvm->arch.vgic; | |
414 | struct kvm_vcpu *vcpu; | |
415 | int i, c; | |
416 | unsigned long *bmap; | |
417 | u32 val = 0; | |
418 | ||
419 | irq -= VGIC_NR_PRIVATE_IRQS; | |
420 | ||
421 | kvm_for_each_vcpu(c, vcpu, kvm) { | |
422 | bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]); | |
423 | for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) | |
424 | if (test_bit(irq + i, bmap)) | |
425 | val |= 1 << (c + i * 8); | |
426 | } | |
427 | ||
428 | return val; | |
429 | } | |
430 | ||
431 | static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq) | |
432 | { | |
433 | struct vgic_dist *dist = &kvm->arch.vgic; | |
434 | struct kvm_vcpu *vcpu; | |
435 | int i, c; | |
436 | unsigned long *bmap; | |
437 | u32 target; | |
438 | ||
439 | irq -= VGIC_NR_PRIVATE_IRQS; | |
440 | ||
441 | /* | |
442 | * Pick the LSB in each byte. This ensures we target exactly | |
443 | * one vcpu per IRQ. If the byte is null, assume we target | |
444 | * CPU0. | |
445 | */ | |
446 | for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) { | |
447 | int shift = i * GICD_CPUTARGETS_BITS; | |
448 | target = ffs((val >> shift) & 0xffU); | |
449 | target = target ? (target - 1) : 0; | |
450 | dist->irq_spi_cpu[irq + i] = target; | |
451 | kvm_for_each_vcpu(c, vcpu, kvm) { | |
452 | bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]); | |
453 | if (c == target) | |
454 | set_bit(irq + i, bmap); | |
455 | else | |
456 | clear_bit(irq + i, bmap); | |
457 | } | |
458 | } | |
459 | } | |
460 | ||
461 | static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu, | |
462 | struct kvm_exit_mmio *mmio, | |
463 | phys_addr_t offset) | |
464 | { | |
465 | u32 reg; | |
466 | ||
467 | /* We treat the banked interrupts targets as read-only */ | |
468 | if (offset < 32) { | |
469 | u32 roreg = 1 << vcpu->vcpu_id; | |
470 | roreg |= roreg << 8; | |
471 | roreg |= roreg << 16; | |
472 | ||
473 | vgic_reg_access(mmio, &roreg, offset, | |
474 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | |
475 | return false; | |
476 | } | |
477 | ||
478 | reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U); | |
479 | vgic_reg_access(mmio, ®, offset, | |
480 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | |
481 | if (mmio->is_write) { | |
482 | vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U); | |
483 | vgic_update_state(vcpu->kvm); | |
484 | return true; | |
485 | } | |
486 | ||
487 | return false; | |
488 | } | |
489 | ||
490 | static u32 vgic_cfg_expand(u16 val) | |
491 | { | |
492 | u32 res = 0; | |
493 | int i; | |
494 | ||
495 | /* | |
496 | * Turn a 16bit value like abcd...mnop into a 32bit word | |
497 | * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is. | |
498 | */ | |
499 | for (i = 0; i < 16; i++) | |
500 | res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1); | |
501 | ||
502 | return res; | |
503 | } | |
504 | ||
505 | static u16 vgic_cfg_compress(u32 val) | |
506 | { | |
507 | u16 res = 0; | |
508 | int i; | |
509 | ||
510 | /* | |
511 | * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like | |
512 | * abcd...mnop which is what we really care about. | |
513 | */ | |
514 | for (i = 0; i < 16; i++) | |
515 | res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i; | |
516 | ||
517 | return res; | |
518 | } | |
519 | ||
520 | /* | |
521 | * The distributor uses 2 bits per IRQ for the CFG register, but the | |
522 | * LSB is always 0. As such, we only keep the upper bit, and use the | |
523 | * two above functions to compress/expand the bits | |
524 | */ | |
525 | static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, | |
526 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | |
527 | { | |
528 | u32 val; | |
529 | u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, | |
530 | vcpu->vcpu_id, offset >> 1); | |
531 | if (offset & 2) | |
532 | val = *reg >> 16; | |
533 | else | |
534 | val = *reg & 0xffff; | |
535 | ||
536 | val = vgic_cfg_expand(val); | |
537 | vgic_reg_access(mmio, &val, offset, | |
538 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | |
539 | if (mmio->is_write) { | |
540 | if (offset < 4) { | |
541 | *reg = ~0U; /* Force PPIs/SGIs to 1 */ | |
542 | return false; | |
543 | } | |
544 | ||
545 | val = vgic_cfg_compress(val); | |
546 | if (offset & 2) { | |
547 | *reg &= 0xffff; | |
548 | *reg |= val << 16; | |
549 | } else { | |
550 | *reg &= 0xffff << 16; | |
551 | *reg |= val; | |
552 | } | |
553 | } | |
554 | ||
555 | return false; | |
556 | } | |
557 | ||
558 | static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, | |
559 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | |
560 | { | |
561 | u32 reg; | |
562 | vgic_reg_access(mmio, ®, offset, | |
563 | ACCESS_READ_RAZ | ACCESS_WRITE_VALUE); | |
564 | if (mmio->is_write) { | |
565 | vgic_dispatch_sgi(vcpu, reg); | |
566 | vgic_update_state(vcpu->kvm); | |
567 | return true; | |
568 | } | |
569 | ||
570 | return false; | |
571 | } | |
572 | ||
1a89dd91 MZ |
573 | /* |
574 | * I would have liked to use the kvm_bus_io_*() API instead, but it | |
575 | * cannot cope with banked registers (only the VM pointer is passed | |
576 | * around, and we need the vcpu). One of these days, someone please | |
577 | * fix it! | |
578 | */ | |
579 | struct mmio_range { | |
580 | phys_addr_t base; | |
581 | unsigned long len; | |
582 | bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, | |
583 | phys_addr_t offset); | |
584 | }; | |
585 | ||
586 | static const struct mmio_range vgic_ranges[] = { | |
b47ef92a MZ |
587 | { |
588 | .base = GIC_DIST_CTRL, | |
589 | .len = 12, | |
590 | .handle_mmio = handle_mmio_misc, | |
591 | }, | |
592 | { | |
593 | .base = GIC_DIST_IGROUP, | |
594 | .len = VGIC_NR_IRQS / 8, | |
595 | .handle_mmio = handle_mmio_raz_wi, | |
596 | }, | |
597 | { | |
598 | .base = GIC_DIST_ENABLE_SET, | |
599 | .len = VGIC_NR_IRQS / 8, | |
600 | .handle_mmio = handle_mmio_set_enable_reg, | |
601 | }, | |
602 | { | |
603 | .base = GIC_DIST_ENABLE_CLEAR, | |
604 | .len = VGIC_NR_IRQS / 8, | |
605 | .handle_mmio = handle_mmio_clear_enable_reg, | |
606 | }, | |
607 | { | |
608 | .base = GIC_DIST_PENDING_SET, | |
609 | .len = VGIC_NR_IRQS / 8, | |
610 | .handle_mmio = handle_mmio_set_pending_reg, | |
611 | }, | |
612 | { | |
613 | .base = GIC_DIST_PENDING_CLEAR, | |
614 | .len = VGIC_NR_IRQS / 8, | |
615 | .handle_mmio = handle_mmio_clear_pending_reg, | |
616 | }, | |
617 | { | |
618 | .base = GIC_DIST_ACTIVE_SET, | |
619 | .len = VGIC_NR_IRQS / 8, | |
620 | .handle_mmio = handle_mmio_raz_wi, | |
621 | }, | |
622 | { | |
623 | .base = GIC_DIST_ACTIVE_CLEAR, | |
624 | .len = VGIC_NR_IRQS / 8, | |
625 | .handle_mmio = handle_mmio_raz_wi, | |
626 | }, | |
627 | { | |
628 | .base = GIC_DIST_PRI, | |
629 | .len = VGIC_NR_IRQS, | |
630 | .handle_mmio = handle_mmio_priority_reg, | |
631 | }, | |
632 | { | |
633 | .base = GIC_DIST_TARGET, | |
634 | .len = VGIC_NR_IRQS, | |
635 | .handle_mmio = handle_mmio_target_reg, | |
636 | }, | |
637 | { | |
638 | .base = GIC_DIST_CONFIG, | |
639 | .len = VGIC_NR_IRQS / 4, | |
640 | .handle_mmio = handle_mmio_cfg_reg, | |
641 | }, | |
642 | { | |
643 | .base = GIC_DIST_SOFTINT, | |
644 | .len = 4, | |
645 | .handle_mmio = handle_mmio_sgi_reg, | |
646 | }, | |
1a89dd91 MZ |
647 | {} |
648 | }; | |
649 | ||
650 | static const | |
651 | struct mmio_range *find_matching_range(const struct mmio_range *ranges, | |
652 | struct kvm_exit_mmio *mmio, | |
653 | phys_addr_t base) | |
654 | { | |
655 | const struct mmio_range *r = ranges; | |
656 | phys_addr_t addr = mmio->phys_addr - base; | |
657 | ||
658 | while (r->len) { | |
659 | if (addr >= r->base && | |
660 | (addr + mmio->len) <= (r->base + r->len)) | |
661 | return r; | |
662 | r++; | |
663 | } | |
664 | ||
665 | return NULL; | |
666 | } | |
667 | ||
668 | /** | |
669 | * vgic_handle_mmio - handle an in-kernel MMIO access | |
670 | * @vcpu: pointer to the vcpu performing the access | |
671 | * @run: pointer to the kvm_run structure | |
672 | * @mmio: pointer to the data describing the access | |
673 | * | |
674 | * returns true if the MMIO access has been performed in kernel space, | |
675 | * and false if it needs to be emulated in user space. | |
676 | */ | |
677 | bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | |
678 | struct kvm_exit_mmio *mmio) | |
679 | { | |
b47ef92a MZ |
680 | const struct mmio_range *range; |
681 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
682 | unsigned long base = dist->vgic_dist_base; | |
683 | bool updated_state; | |
684 | unsigned long offset; | |
685 | ||
686 | if (!irqchip_in_kernel(vcpu->kvm) || | |
687 | mmio->phys_addr < base || | |
688 | (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE)) | |
689 | return false; | |
690 | ||
691 | /* We don't support ldrd / strd or ldm / stm to the emulated vgic */ | |
692 | if (mmio->len > 4) { | |
693 | kvm_inject_dabt(vcpu, mmio->phys_addr); | |
694 | return true; | |
695 | } | |
696 | ||
697 | range = find_matching_range(vgic_ranges, mmio, base); | |
698 | if (unlikely(!range || !range->handle_mmio)) { | |
699 | pr_warn("Unhandled access %d %08llx %d\n", | |
700 | mmio->is_write, mmio->phys_addr, mmio->len); | |
701 | return false; | |
702 | } | |
703 | ||
704 | spin_lock(&vcpu->kvm->arch.vgic.lock); | |
705 | offset = mmio->phys_addr - range->base - base; | |
706 | updated_state = range->handle_mmio(vcpu, mmio, offset); | |
707 | spin_unlock(&vcpu->kvm->arch.vgic.lock); | |
708 | kvm_prepare_mmio(run, mmio); | |
709 | kvm_handle_mmio_return(vcpu, run); | |
710 | ||
711 | return true; | |
712 | } | |
713 | ||
714 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) | |
715 | { | |
716 | struct kvm *kvm = vcpu->kvm; | |
717 | struct vgic_dist *dist = &kvm->arch.vgic; | |
718 | int nrcpus = atomic_read(&kvm->online_vcpus); | |
719 | u8 target_cpus; | |
720 | int sgi, mode, c, vcpu_id; | |
721 | ||
722 | vcpu_id = vcpu->vcpu_id; | |
723 | ||
724 | sgi = reg & 0xf; | |
725 | target_cpus = (reg >> 16) & 0xff; | |
726 | mode = (reg >> 24) & 3; | |
727 | ||
728 | switch (mode) { | |
729 | case 0: | |
730 | if (!target_cpus) | |
731 | return; | |
732 | ||
733 | case 1: | |
734 | target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff; | |
735 | break; | |
736 | ||
737 | case 2: | |
738 | target_cpus = 1 << vcpu_id; | |
739 | break; | |
740 | } | |
741 | ||
742 | kvm_for_each_vcpu(c, vcpu, kvm) { | |
743 | if (target_cpus & 1) { | |
744 | /* Flag the SGI as pending */ | |
745 | vgic_dist_irq_set(vcpu, sgi); | |
746 | dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id; | |
747 | kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c); | |
748 | } | |
749 | ||
750 | target_cpus >>= 1; | |
751 | } | |
752 | } | |
753 | ||
754 | static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) | |
755 | { | |
9d949dce MZ |
756 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
757 | unsigned long *pending, *enabled, *pend_percpu, *pend_shared; | |
758 | unsigned long pending_private, pending_shared; | |
759 | int vcpu_id; | |
760 | ||
761 | vcpu_id = vcpu->vcpu_id; | |
762 | pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; | |
763 | pend_shared = vcpu->arch.vgic_cpu.pending_shared; | |
764 | ||
765 | pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id); | |
766 | enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); | |
767 | bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); | |
768 | ||
769 | pending = vgic_bitmap_get_shared_map(&dist->irq_state); | |
770 | enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled); | |
771 | bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS); | |
772 | bitmap_and(pend_shared, pend_shared, | |
773 | vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]), | |
774 | VGIC_NR_SHARED_IRQS); | |
775 | ||
776 | pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS); | |
777 | pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS); | |
778 | return (pending_private < VGIC_NR_PRIVATE_IRQS || | |
779 | pending_shared < VGIC_NR_SHARED_IRQS); | |
b47ef92a MZ |
780 | } |
781 | ||
782 | /* | |
783 | * Update the interrupt state and determine which CPUs have pending | |
784 | * interrupts. Must be called with distributor lock held. | |
785 | */ | |
786 | static void vgic_update_state(struct kvm *kvm) | |
787 | { | |
788 | struct vgic_dist *dist = &kvm->arch.vgic; | |
789 | struct kvm_vcpu *vcpu; | |
790 | int c; | |
791 | ||
792 | if (!dist->enabled) { | |
793 | set_bit(0, &dist->irq_pending_on_cpu); | |
794 | return; | |
795 | } | |
796 | ||
797 | kvm_for_each_vcpu(c, vcpu, kvm) { | |
798 | if (compute_pending_for_cpu(vcpu)) { | |
799 | pr_debug("CPU%d has pending interrupts\n", c); | |
800 | set_bit(c, &dist->irq_pending_on_cpu); | |
801 | } | |
802 | } | |
1a89dd91 | 803 | } |
330690cd | 804 | |
9d949dce MZ |
805 | #define LR_CPUID(lr) \ |
806 | (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT) | |
807 | #define MK_LR_PEND(src, irq) \ | |
808 | (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq)) | |
a1fcb44e MZ |
809 | |
810 | /* | |
811 | * An interrupt may have been disabled after being made pending on the | |
812 | * CPU interface (the classic case is a timer running while we're | |
813 | * rebooting the guest - the interrupt would kick as soon as the CPU | |
814 | * interface gets enabled, with deadly consequences). | |
815 | * | |
816 | * The solution is to examine already active LRs, and check the | |
817 | * interrupt is still enabled. If not, just retire it. | |
818 | */ | |
819 | static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) | |
820 | { | |
821 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
822 | int lr; | |
823 | ||
824 | for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) { | |
825 | int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; | |
826 | ||
827 | if (!vgic_irq_is_enabled(vcpu, irq)) { | |
828 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | |
829 | clear_bit(lr, vgic_cpu->lr_used); | |
830 | vgic_cpu->vgic_lr[lr] &= ~GICH_LR_STATE; | |
831 | if (vgic_irq_is_active(vcpu, irq)) | |
832 | vgic_irq_clear_active(vcpu, irq); | |
833 | } | |
834 | } | |
835 | } | |
836 | ||
9d949dce MZ |
837 | /* |
838 | * Queue an interrupt to a CPU virtual interface. Return true on success, | |
839 | * or false if it wasn't possible to queue it. | |
840 | */ | |
841 | static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |
842 | { | |
843 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
844 | int lr; | |
845 | ||
846 | /* Sanitize the input... */ | |
847 | BUG_ON(sgi_source_id & ~7); | |
848 | BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS); | |
849 | BUG_ON(irq >= VGIC_NR_IRQS); | |
850 | ||
851 | kvm_debug("Queue IRQ%d\n", irq); | |
852 | ||
853 | lr = vgic_cpu->vgic_irq_lr_map[irq]; | |
854 | ||
855 | /* Do we have an active interrupt for the same CPUID? */ | |
856 | if (lr != LR_EMPTY && | |
857 | (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) { | |
858 | kvm_debug("LR%d piggyback for IRQ%d %x\n", | |
859 | lr, irq, vgic_cpu->vgic_lr[lr]); | |
860 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); | |
861 | vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; | |
862 | ||
863 | goto out; | |
864 | } | |
865 | ||
866 | /* Try to use another LR for this interrupt */ | |
867 | lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used, | |
868 | vgic_cpu->nr_lr); | |
869 | if (lr >= vgic_cpu->nr_lr) | |
870 | return false; | |
871 | ||
872 | kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id); | |
873 | vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq); | |
874 | vgic_cpu->vgic_irq_lr_map[irq] = lr; | |
875 | set_bit(lr, vgic_cpu->lr_used); | |
876 | ||
877 | out: | |
878 | if (!vgic_irq_is_edge(vcpu, irq)) | |
879 | vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; | |
880 | ||
881 | return true; | |
882 | } | |
883 | ||
884 | static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq) | |
885 | { | |
886 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
887 | unsigned long sources; | |
888 | int vcpu_id = vcpu->vcpu_id; | |
889 | int c; | |
890 | ||
891 | sources = dist->irq_sgi_sources[vcpu_id][irq]; | |
892 | ||
893 | for_each_set_bit(c, &sources, VGIC_MAX_CPUS) { | |
894 | if (vgic_queue_irq(vcpu, c, irq)) | |
895 | clear_bit(c, &sources); | |
896 | } | |
897 | ||
898 | dist->irq_sgi_sources[vcpu_id][irq] = sources; | |
899 | ||
900 | /* | |
901 | * If the sources bitmap has been cleared it means that we | |
902 | * could queue all the SGIs onto link registers (see the | |
903 | * clear_bit above), and therefore we are done with them in | |
904 | * our emulated gic and can get rid of them. | |
905 | */ | |
906 | if (!sources) { | |
907 | vgic_dist_irq_clear(vcpu, irq); | |
908 | vgic_cpu_irq_clear(vcpu, irq); | |
909 | return true; | |
910 | } | |
911 | ||
912 | return false; | |
913 | } | |
914 | ||
915 | static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq) | |
916 | { | |
917 | if (vgic_irq_is_active(vcpu, irq)) | |
918 | return true; /* level interrupt, already queued */ | |
919 | ||
920 | if (vgic_queue_irq(vcpu, 0, irq)) { | |
921 | if (vgic_irq_is_edge(vcpu, irq)) { | |
922 | vgic_dist_irq_clear(vcpu, irq); | |
923 | vgic_cpu_irq_clear(vcpu, irq); | |
924 | } else { | |
925 | vgic_irq_set_active(vcpu, irq); | |
926 | } | |
927 | ||
928 | return true; | |
929 | } | |
930 | ||
931 | return false; | |
932 | } | |
933 | ||
934 | /* | |
935 | * Fill the list registers with pending interrupts before running the | |
936 | * guest. | |
937 | */ | |
938 | static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |
939 | { | |
940 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
941 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
942 | int i, vcpu_id; | |
943 | int overflow = 0; | |
944 | ||
945 | vcpu_id = vcpu->vcpu_id; | |
946 | ||
947 | /* | |
948 | * We may not have any pending interrupt, or the interrupts | |
949 | * may have been serviced from another vcpu. In all cases, | |
950 | * move along. | |
951 | */ | |
952 | if (!kvm_vgic_vcpu_pending_irq(vcpu)) { | |
953 | pr_debug("CPU%d has no pending interrupt\n", vcpu_id); | |
954 | goto epilog; | |
955 | } | |
956 | ||
957 | /* SGIs */ | |
958 | for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) { | |
959 | if (!vgic_queue_sgi(vcpu, i)) | |
960 | overflow = 1; | |
961 | } | |
962 | ||
963 | /* PPIs */ | |
964 | for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) { | |
965 | if (!vgic_queue_hwirq(vcpu, i)) | |
966 | overflow = 1; | |
967 | } | |
968 | ||
969 | /* SPIs */ | |
970 | for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) { | |
971 | if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) | |
972 | overflow = 1; | |
973 | } | |
974 | ||
975 | epilog: | |
976 | if (overflow) { | |
977 | vgic_cpu->vgic_hcr |= GICH_HCR_UIE; | |
978 | } else { | |
979 | vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; | |
980 | /* | |
981 | * We're about to run this VCPU, and we've consumed | |
982 | * everything the distributor had in store for | |
983 | * us. Claim we don't have anything pending. We'll | |
984 | * adjust that if needed while exiting. | |
985 | */ | |
986 | clear_bit(vcpu_id, &dist->irq_pending_on_cpu); | |
987 | } | |
988 | } | |
989 | ||
990 | static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |
991 | { | |
992 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
993 | bool level_pending = false; | |
994 | ||
995 | kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); | |
996 | ||
997 | /* | |
998 | * We do not need to take the distributor lock here, since the only | |
999 | * action we perform is clearing the irq_active_bit for an EOIed | |
1000 | * level interrupt. There is a potential race with | |
1001 | * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we | |
1002 | * check if the interrupt is already active. Two possibilities: | |
1003 | * | |
1004 | * - The queuing is occurring on the same vcpu: cannot happen, | |
1005 | * as we're already in the context of this vcpu, and | |
1006 | * executing the handler | |
1007 | * - The interrupt has been migrated to another vcpu, and we | |
1008 | * ignore this interrupt for this run. Big deal. It is still | |
1009 | * pending though, and will get considered when this vcpu | |
1010 | * exits. | |
1011 | */ | |
1012 | if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { | |
1013 | /* | |
1014 | * Some level interrupts have been EOIed. Clear their | |
1015 | * active bit. | |
1016 | */ | |
1017 | int lr, irq; | |
1018 | ||
1019 | for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr, | |
1020 | vgic_cpu->nr_lr) { | |
1021 | irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; | |
1022 | ||
1023 | vgic_irq_clear_active(vcpu, irq); | |
1024 | vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI; | |
1025 | ||
1026 | /* Any additional pending interrupt? */ | |
1027 | if (vgic_dist_irq_is_pending(vcpu, irq)) { | |
1028 | vgic_cpu_irq_set(vcpu, irq); | |
1029 | level_pending = true; | |
1030 | } else { | |
1031 | vgic_cpu_irq_clear(vcpu, irq); | |
1032 | } | |
1033 | } | |
1034 | } | |
1035 | ||
1036 | if (vgic_cpu->vgic_misr & GICH_MISR_U) | |
1037 | vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; | |
1038 | ||
1039 | return level_pending; | |
1040 | } | |
1041 | ||
1042 | /* | |
1043 | * Sync back the VGIC state after a guest run. We do not really touch | |
1044 | * the distributor here (the irq_pending_on_cpu bit is safe to set), | |
1045 | * so there is no need for taking its lock. | |
1046 | */ | |
1047 | static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | |
1048 | { | |
1049 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
1050 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
1051 | int lr, pending; | |
1052 | bool level_pending; | |
1053 | ||
1054 | level_pending = vgic_process_maintenance(vcpu); | |
1055 | ||
1056 | /* Clear mappings for empty LRs */ | |
1057 | for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr, | |
1058 | vgic_cpu->nr_lr) { | |
1059 | int irq; | |
1060 | ||
1061 | if (!test_and_clear_bit(lr, vgic_cpu->lr_used)) | |
1062 | continue; | |
1063 | ||
1064 | irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; | |
1065 | ||
1066 | BUG_ON(irq >= VGIC_NR_IRQS); | |
1067 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | |
1068 | } | |
1069 | ||
1070 | /* Check if we still have something up our sleeve... */ | |
1071 | pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr, | |
1072 | vgic_cpu->nr_lr); | |
1073 | if (level_pending || pending < vgic_cpu->nr_lr) | |
1074 | set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); | |
1075 | } | |
1076 | ||
1077 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |
1078 | { | |
1079 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
1080 | ||
1081 | if (!irqchip_in_kernel(vcpu->kvm)) | |
1082 | return; | |
1083 | ||
1084 | spin_lock(&dist->lock); | |
1085 | __kvm_vgic_flush_hwstate(vcpu); | |
1086 | spin_unlock(&dist->lock); | |
1087 | } | |
1088 | ||
1089 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | |
1090 | { | |
1091 | if (!irqchip_in_kernel(vcpu->kvm)) | |
1092 | return; | |
1093 | ||
1094 | __kvm_vgic_sync_hwstate(vcpu); | |
1095 | } | |
1096 | ||
1097 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | |
1098 | { | |
1099 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | |
1100 | ||
1101 | if (!irqchip_in_kernel(vcpu->kvm)) | |
1102 | return 0; | |
1103 | ||
1104 | return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); | |
1105 | } | |
1106 | ||
330690cd CD |
1107 | static bool vgic_ioaddr_overlap(struct kvm *kvm) |
1108 | { | |
1109 | phys_addr_t dist = kvm->arch.vgic.vgic_dist_base; | |
1110 | phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base; | |
1111 | ||
1112 | if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu)) | |
1113 | return 0; | |
1114 | if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) || | |
1115 | (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist)) | |
1116 | return -EBUSY; | |
1117 | return 0; | |
1118 | } | |
1119 | ||
1120 | static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr, | |
1121 | phys_addr_t addr, phys_addr_t size) | |
1122 | { | |
1123 | int ret; | |
1124 | ||
1125 | if (!IS_VGIC_ADDR_UNDEF(*ioaddr)) | |
1126 | return -EEXIST; | |
1127 | if (addr + size < addr) | |
1128 | return -EINVAL; | |
1129 | ||
1130 | ret = vgic_ioaddr_overlap(kvm); | |
1131 | if (ret) | |
1132 | return ret; | |
1133 | *ioaddr = addr; | |
1134 | return ret; | |
1135 | } | |
1136 | ||
1137 | int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr) | |
1138 | { | |
1139 | int r = 0; | |
1140 | struct vgic_dist *vgic = &kvm->arch.vgic; | |
1141 | ||
1142 | if (addr & ~KVM_PHYS_MASK) | |
1143 | return -E2BIG; | |
1144 | ||
1145 | if (addr & ~PAGE_MASK) | |
1146 | return -EINVAL; | |
1147 | ||
1148 | mutex_lock(&kvm->lock); | |
1149 | switch (type) { | |
1150 | case KVM_VGIC_V2_ADDR_TYPE_DIST: | |
1151 | r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base, | |
1152 | addr, KVM_VGIC_V2_DIST_SIZE); | |
1153 | break; | |
1154 | case KVM_VGIC_V2_ADDR_TYPE_CPU: | |
1155 | r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base, | |
1156 | addr, KVM_VGIC_V2_CPU_SIZE); | |
1157 | break; | |
1158 | default: | |
1159 | r = -ENODEV; | |
1160 | } | |
1161 | ||
1162 | mutex_unlock(&kvm->lock); | |
1163 | return r; | |
1164 | } |