2 * Copyright (C) 2015, 2016 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/kvm.h>
18 #include <linux/kvm_host.h>
19 #include <linux/list_sort.h>
23 #define CREATE_TRACE_POINTS
26 #ifdef CONFIG_DEBUG_SPINLOCK
27 #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
29 #define DEBUG_SPINLOCK_BUG_ON(p)
32 struct vgic_global
__section(.hyp
.text
) kvm_vgic_global_state
;
35 * Locking order is always:
36 * vgic_cpu->ap_list_lock
39 * (that is, always take the ap_list_lock before the struct vgic_irq lock).
41 * When taking more than one ap_list_lock at the same time, always take the
42 * lowest numbered VCPU's ap_list_lock first, so:
43 * vcpuX->vcpu_id < vcpuY->vcpu_id:
44 * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
45 * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
48 struct vgic_irq
*vgic_get_irq(struct kvm
*kvm
, struct kvm_vcpu
*vcpu
,
52 if (intid
<= VGIC_MAX_PRIVATE
)
53 return &vcpu
->arch
.vgic_cpu
.private_irqs
[intid
];
56 if (intid
<= VGIC_MAX_SPI
)
57 return &kvm
->arch
.vgic
.spis
[intid
- VGIC_NR_PRIVATE_IRQS
];
59 /* LPIs are not yet covered */
60 if (intid
>= VGIC_MIN_LPI
)
63 WARN(1, "Looking up struct vgic_irq for reserved INTID");
68 * kvm_vgic_target_oracle - compute the target vcpu for an irq
70 * @irq: The irq to route. Must be already locked.
72 * Based on the current state of the interrupt (enabled, pending,
73 * active, vcpu and target_vcpu), compute the next vcpu this should be
74 * given to. Return NULL if this shouldn't be injected at all.
76 * Requires the IRQ lock to be held.
78 static struct kvm_vcpu
*vgic_target_oracle(struct vgic_irq
*irq
)
80 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq
->irq_lock
));
82 /* If the interrupt is active, it must stay on the current vcpu */
84 return irq
->vcpu
? : irq
->target_vcpu
;
87 * If the IRQ is not active but enabled and pending, we should direct
88 * it to its configured target VCPU.
89 * If the distributor is disabled, pending interrupts shouldn't be
92 if (irq
->enabled
&& irq
->pending
) {
93 if (unlikely(irq
->target_vcpu
&&
94 !irq
->target_vcpu
->kvm
->arch
.vgic
.enabled
))
97 return irq
->target_vcpu
;
100 /* If neither active nor pending and enabled, then this IRQ should not
101 * be queued to any VCPU.
107 * The order of items in the ap_lists defines how we'll pack things in LRs as
108 * well, the first items in the list being the first things populated in the
111 * A hard rule is that active interrupts can never be pushed out of the LRs
112 * (and therefore take priority) since we cannot reliably trap on deactivation
113 * of IRQs and therefore they have to be present in the LRs.
115 * Otherwise things should be sorted by the priority field and the GIC
116 * hardware support will take care of preemption of priority groups etc.
118 * Return negative if "a" sorts before "b", 0 to preserve order, and positive
119 * to sort "b" before "a".
121 static int vgic_irq_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
123 struct vgic_irq
*irqa
= container_of(a
, struct vgic_irq
, ap_list
);
124 struct vgic_irq
*irqb
= container_of(b
, struct vgic_irq
, ap_list
);
128 spin_lock(&irqa
->irq_lock
);
129 spin_lock_nested(&irqb
->irq_lock
, SINGLE_DEPTH_NESTING
);
131 if (irqa
->active
|| irqb
->active
) {
132 ret
= (int)irqb
->active
- (int)irqa
->active
;
136 penda
= irqa
->enabled
&& irqa
->pending
;
137 pendb
= irqb
->enabled
&& irqb
->pending
;
139 if (!penda
|| !pendb
) {
140 ret
= (int)pendb
- (int)penda
;
144 /* Both pending and enabled, sort by priority */
145 ret
= irqa
->priority
- irqb
->priority
;
147 spin_unlock(&irqb
->irq_lock
);
148 spin_unlock(&irqa
->irq_lock
);
152 /* Must be called with the ap_list_lock held */
153 static void vgic_sort_ap_list(struct kvm_vcpu
*vcpu
)
155 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
157 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu
->ap_list_lock
));
159 list_sort(NULL
, &vgic_cpu
->ap_list_head
, vgic_irq_cmp
);
163 * Only valid injection if changing level for level-triggered IRQs or for a
166 static bool vgic_validate_injection(struct vgic_irq
*irq
, bool level
)
168 switch (irq
->config
) {
169 case VGIC_CONFIG_LEVEL
:
170 return irq
->line_level
!= level
;
171 case VGIC_CONFIG_EDGE
:
179 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
180 * Do the queuing if necessary, taking the right locks in the right order.
181 * Returns true when the IRQ was queued, false otherwise.
183 * Needs to be entered with the IRQ lock already held, but will return
184 * with all locks dropped.
186 bool vgic_queue_irq_unlock(struct kvm
*kvm
, struct vgic_irq
*irq
)
188 struct kvm_vcpu
*vcpu
;
190 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq
->irq_lock
));
193 vcpu
= vgic_target_oracle(irq
);
194 if (irq
->vcpu
|| !vcpu
) {
196 * If this IRQ is already on a VCPU's ap_list, then it
197 * cannot be moved or modified and there is no more work for
200 * Otherwise, if the irq is not pending and enabled, it does
201 * not need to be inserted into an ap_list and there is also
202 * no more work for us to do.
204 spin_unlock(&irq
->irq_lock
);
209 * We must unlock the irq lock to take the ap_list_lock where
210 * we are going to insert this new pending interrupt.
212 spin_unlock(&irq
->irq_lock
);
214 /* someone can do stuff here, which we re-check below */
216 spin_lock(&vcpu
->arch
.vgic_cpu
.ap_list_lock
);
217 spin_lock(&irq
->irq_lock
);
220 * Did something change behind our backs?
222 * There are two cases:
223 * 1) The irq lost its pending state or was disabled behind our
224 * backs and/or it was queued to another VCPU's ap_list.
225 * 2) Someone changed the affinity on this irq behind our
226 * backs and we are now holding the wrong ap_list_lock.
228 * In both cases, drop the locks and retry.
231 if (unlikely(irq
->vcpu
|| vcpu
!= vgic_target_oracle(irq
))) {
232 spin_unlock(&irq
->irq_lock
);
233 spin_unlock(&vcpu
->arch
.vgic_cpu
.ap_list_lock
);
235 spin_lock(&irq
->irq_lock
);
239 list_add_tail(&irq
->ap_list
, &vcpu
->arch
.vgic_cpu
.ap_list_head
);
242 spin_unlock(&irq
->irq_lock
);
243 spin_unlock(&vcpu
->arch
.vgic_cpu
.ap_list_lock
);
250 static int vgic_update_irq_pending(struct kvm
*kvm
, int cpuid
,
251 unsigned int intid
, bool level
,
254 struct kvm_vcpu
*vcpu
;
255 struct vgic_irq
*irq
;
258 trace_vgic_update_irq_pending(cpuid
, intid
, level
);
260 ret
= vgic_lazy_init(kvm
);
264 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
265 if (!vcpu
&& intid
< VGIC_NR_PRIVATE_IRQS
)
268 irq
= vgic_get_irq(kvm
, vcpu
, intid
);
272 if (irq
->hw
!= mapped_irq
)
275 spin_lock(&irq
->irq_lock
);
277 if (!vgic_validate_injection(irq
, level
)) {
278 /* Nothing to see here, move along... */
279 spin_unlock(&irq
->irq_lock
);
283 if (irq
->config
== VGIC_CONFIG_LEVEL
) {
284 irq
->line_level
= level
;
285 irq
->pending
= level
|| irq
->soft_pending
;
290 vgic_queue_irq_unlock(kvm
, irq
);
296 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
297 * @kvm: The VM structure pointer
298 * @cpuid: The CPU for PPIs
299 * @intid: The INTID to inject a new state to.
300 * @level: Edge-triggered: true: to trigger the interrupt
301 * false: to ignore the call
302 * Level-sensitive true: raise the input signal
303 * false: lower the input signal
305 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
306 * level-sensitive interrupts. You can think of the level parameter as 1
307 * being HIGH and 0 being LOW and all devices being active-HIGH.
309 int kvm_vgic_inject_irq(struct kvm
*kvm
, int cpuid
, unsigned int intid
,
312 return vgic_update_irq_pending(kvm
, cpuid
, intid
, level
, false);
315 int kvm_vgic_inject_mapped_irq(struct kvm
*kvm
, int cpuid
, unsigned int intid
,
318 return vgic_update_irq_pending(kvm
, cpuid
, intid
, level
, true);
321 int kvm_vgic_map_phys_irq(struct kvm_vcpu
*vcpu
, u32 virt_irq
, u32 phys_irq
)
323 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, virt_irq
);
327 spin_lock(&irq
->irq_lock
);
330 irq
->hwintid
= phys_irq
;
332 spin_unlock(&irq
->irq_lock
);
337 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu
*vcpu
, unsigned int virt_irq
)
339 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, virt_irq
);
343 if (!vgic_initialized(vcpu
->kvm
))
346 spin_lock(&irq
->irq_lock
);
351 spin_unlock(&irq
->irq_lock
);
357 * vgic_prune_ap_list - Remove non-relevant interrupts from the list
359 * @vcpu: The VCPU pointer
361 * Go over the list of "interesting" interrupts, and prune those that we
362 * won't have to consider in the near future.
364 static void vgic_prune_ap_list(struct kvm_vcpu
*vcpu
)
366 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
367 struct vgic_irq
*irq
, *tmp
;
370 spin_lock(&vgic_cpu
->ap_list_lock
);
372 list_for_each_entry_safe(irq
, tmp
, &vgic_cpu
->ap_list_head
, ap_list
) {
373 struct kvm_vcpu
*target_vcpu
, *vcpuA
, *vcpuB
;
375 spin_lock(&irq
->irq_lock
);
377 BUG_ON(vcpu
!= irq
->vcpu
);
379 target_vcpu
= vgic_target_oracle(irq
);
383 * We don't need to process this interrupt any
384 * further, move it off the list.
386 list_del(&irq
->ap_list
);
388 spin_unlock(&irq
->irq_lock
);
392 if (target_vcpu
== vcpu
) {
393 /* We're on the right CPU */
394 spin_unlock(&irq
->irq_lock
);
398 /* This interrupt looks like it has to be migrated. */
400 spin_unlock(&irq
->irq_lock
);
401 spin_unlock(&vgic_cpu
->ap_list_lock
);
404 * Ensure locking order by always locking the smallest
407 if (vcpu
->vcpu_id
< target_vcpu
->vcpu_id
) {
415 spin_lock(&vcpuA
->arch
.vgic_cpu
.ap_list_lock
);
416 spin_lock_nested(&vcpuB
->arch
.vgic_cpu
.ap_list_lock
,
417 SINGLE_DEPTH_NESTING
);
418 spin_lock(&irq
->irq_lock
);
421 * If the affinity has been preserved, move the
422 * interrupt around. Otherwise, it means things have
423 * changed while the interrupt was unlocked, and we
424 * need to replay this.
426 * In all cases, we cannot trust the list not to have
427 * changed, so we restart from the beginning.
429 if (target_vcpu
== vgic_target_oracle(irq
)) {
430 struct vgic_cpu
*new_cpu
= &target_vcpu
->arch
.vgic_cpu
;
432 list_del(&irq
->ap_list
);
433 irq
->vcpu
= target_vcpu
;
434 list_add_tail(&irq
->ap_list
, &new_cpu
->ap_list_head
);
437 spin_unlock(&irq
->irq_lock
);
438 spin_unlock(&vcpuB
->arch
.vgic_cpu
.ap_list_lock
);
439 spin_unlock(&vcpuA
->arch
.vgic_cpu
.ap_list_lock
);
443 spin_unlock(&vgic_cpu
->ap_list_lock
);
446 static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu
*vcpu
)
448 if (kvm_vgic_global_state
.type
== VGIC_V2
)
449 vgic_v2_process_maintenance(vcpu
);
451 vgic_v3_process_maintenance(vcpu
);
454 static inline void vgic_fold_lr_state(struct kvm_vcpu
*vcpu
)
456 if (kvm_vgic_global_state
.type
== VGIC_V2
)
457 vgic_v2_fold_lr_state(vcpu
);
459 vgic_v3_fold_lr_state(vcpu
);
462 /* Requires the irq_lock to be held. */
463 static inline void vgic_populate_lr(struct kvm_vcpu
*vcpu
,
464 struct vgic_irq
*irq
, int lr
)
466 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq
->irq_lock
));
468 if (kvm_vgic_global_state
.type
== VGIC_V2
)
469 vgic_v2_populate_lr(vcpu
, irq
, lr
);
471 vgic_v3_populate_lr(vcpu
, irq
, lr
);
474 static inline void vgic_clear_lr(struct kvm_vcpu
*vcpu
, int lr
)
476 if (kvm_vgic_global_state
.type
== VGIC_V2
)
477 vgic_v2_clear_lr(vcpu
, lr
);
479 vgic_v3_clear_lr(vcpu
, lr
);
482 static inline void vgic_set_underflow(struct kvm_vcpu
*vcpu
)
484 if (kvm_vgic_global_state
.type
== VGIC_V2
)
485 vgic_v2_set_underflow(vcpu
);
487 vgic_v3_set_underflow(vcpu
);
490 /* Requires the ap_list_lock to be held. */
491 static int compute_ap_list_depth(struct kvm_vcpu
*vcpu
)
493 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
494 struct vgic_irq
*irq
;
497 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu
->ap_list_lock
));
499 list_for_each_entry(irq
, &vgic_cpu
->ap_list_head
, ap_list
) {
500 spin_lock(&irq
->irq_lock
);
501 /* GICv2 SGIs can count for more than one... */
502 if (vgic_irq_is_sgi(irq
->intid
) && irq
->source
)
503 count
+= hweight8(irq
->source
);
506 spin_unlock(&irq
->irq_lock
);
511 /* Requires the VCPU's ap_list_lock to be held. */
512 static void vgic_flush_lr_state(struct kvm_vcpu
*vcpu
)
514 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
515 struct vgic_irq
*irq
;
518 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu
->ap_list_lock
));
520 if (compute_ap_list_depth(vcpu
) > kvm_vgic_global_state
.nr_lr
) {
521 vgic_set_underflow(vcpu
);
522 vgic_sort_ap_list(vcpu
);
525 list_for_each_entry(irq
, &vgic_cpu
->ap_list_head
, ap_list
) {
526 spin_lock(&irq
->irq_lock
);
528 if (unlikely(vgic_target_oracle(irq
) != vcpu
))
532 * If we get an SGI with multiple sources, try to get
533 * them in all at once.
536 vgic_populate_lr(vcpu
, irq
, count
++);
537 } while (irq
->source
&& count
< kvm_vgic_global_state
.nr_lr
);
540 spin_unlock(&irq
->irq_lock
);
542 if (count
== kvm_vgic_global_state
.nr_lr
)
546 vcpu
->arch
.vgic_cpu
.used_lrs
= count
;
548 /* Nuke remaining LRs */
549 for ( ; count
< kvm_vgic_global_state
.nr_lr
; count
++)
550 vgic_clear_lr(vcpu
, count
);
553 /* Sync back the hardware VGIC state into our emulation after a guest's run. */
554 void kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
556 vgic_process_maintenance_interrupt(vcpu
);
557 vgic_fold_lr_state(vcpu
);
558 vgic_prune_ap_list(vcpu
);
561 /* Flush our emulation state into the GIC hardware before entering the guest. */
562 void kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
564 spin_lock(&vcpu
->arch
.vgic_cpu
.ap_list_lock
);
565 vgic_flush_lr_state(vcpu
);
566 spin_unlock(&vcpu
->arch
.vgic_cpu
.ap_list_lock
);
569 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu
*vcpu
)
571 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
572 struct vgic_irq
*irq
;
573 bool pending
= false;
575 if (!vcpu
->kvm
->arch
.vgic
.enabled
)
578 spin_lock(&vgic_cpu
->ap_list_lock
);
580 list_for_each_entry(irq
, &vgic_cpu
->ap_list_head
, ap_list
) {
581 spin_lock(&irq
->irq_lock
);
582 pending
= irq
->pending
&& irq
->enabled
;
583 spin_unlock(&irq
->irq_lock
);
589 spin_unlock(&vgic_cpu
->ap_list_lock
);
594 void vgic_kick_vcpus(struct kvm
*kvm
)
596 struct kvm_vcpu
*vcpu
;
600 * We've injected an interrupt, time to find out who deserves
603 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
604 if (kvm_vgic_vcpu_pending_irq(vcpu
))
609 bool kvm_vgic_map_is_active(struct kvm_vcpu
*vcpu
, unsigned int virt_irq
)
611 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, virt_irq
);
614 spin_lock(&irq
->irq_lock
);
615 map_is_active
= irq
->hw
&& irq
->active
;
616 spin_unlock(&irq
->irq_lock
);
618 return map_is_active
;
This page took 0.056051 seconds and 5 git commands to generate.