X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=arch%2Fx86%2Fkvm%2Fi8254.c;h=e5a3e8015e30b61a73b98e8cc07e02659774272f;hb=b69d920f68b119bdc0483f0c33d34fd0c57724f5;hp=b0ea42b78ccdb50879a10c440d5e3a7fe194fbe9;hpb=6606b342febfd470b4a33acb73e360eeaca1d9bb;p=deliverable%2Flinux.git diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index b0ea42b78ccd..e5a3e8015e30 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -76,8 +76,6 @@ static void pit_set_gate(struct kvm *kvm, int channel, u32 val) struct kvm_kpit_channel_state *c = &kvm->arch.vpit->pit_state.channels[channel]; - WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); - switch (c->mode) { default: case 0: @@ -99,8 +97,6 @@ static void pit_set_gate(struct kvm *kvm, int channel, u32 val) static int pit_get_gate(struct kvm *kvm, int channel) { - WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); - return kvm->arch.vpit->pit_state.channels[channel].gate; } @@ -144,8 +140,6 @@ static int pit_get_count(struct kvm *kvm, int channel) s64 d, t; int counter; - WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); - t = kpit_elapsed(kvm, c, channel); d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); @@ -174,8 +168,6 @@ static int pit_get_out(struct kvm *kvm, int channel) s64 d, t; int out; - WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); - t = kpit_elapsed(kvm, c, channel); d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); @@ -207,8 +199,6 @@ static void pit_latch_count(struct kvm *kvm, int channel) struct kvm_kpit_channel_state *c = &kvm->arch.vpit->pit_state.channels[channel]; - WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); - if (!c->count_latched) { c->latched_count = pit_get_count(kvm, channel); c->count_latched = c->rw_mode; @@ -220,8 +210,6 @@ static void pit_latch_status(struct kvm *kvm, int channel) struct kvm_kpit_channel_state *c = &kvm->arch.vpit->pit_state.channels[channel]; - WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); - if (!c->status_latched) { /* TODO: Return NULL COUNT (bit 6). */ c->status = ((pit_get_out(kvm, channel) << 7) | @@ -236,22 +224,14 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) { struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, irq_ack_notifier); - int value; - - spin_lock(&ps->inject_lock); - value = atomic_dec_return(&ps->pending); - if (value < 0) - /* spurious acks can be generated if, for example, the - * PIC is being reset. Handle it gracefully here - */ - atomic_inc(&ps->pending); - else if (value > 0) - /* in this case, we had multiple outstanding pit interrupts - * that we needed to inject. Reinject - */ + + atomic_set(&ps->irq_ack, 1); + /* irq_ack should be set before pending is read. Order accesses with + * inc(pending) in pit_timer_fn and xchg(irq_ack, 0) in pit_do_work. + */ + smp_mb(); + if (atomic_dec_if_positive(&ps->pending) > 0 && ps->reinject) queue_kthread_work(&ps->pit->worker, &ps->pit->expired); - ps->irq_ack = 1; - spin_unlock(&ps->inject_lock); } void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) @@ -282,34 +262,25 @@ static void pit_do_work(struct kthread_work *work) struct kvm_vcpu *vcpu; int i; struct kvm_kpit_state *ps = &pit->pit_state; - int inject = 0; - /* Try to inject pending interrupts when - * last one has been acked. + if (ps->reinject && !atomic_xchg(&ps->irq_ack, 0)) + return; + + kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false); + kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false); + + /* + * Provides NMI watchdog support via Virtual Wire mode. + * The route is: PIT -> LVT0 in NMI mode. + * + * Note: Our Virtual Wire implementation does not follow + * the MP specification. We propagate a PIT interrupt to all + * VCPUs and only when LVT0 is in NMI mode. The interrupt can + * also be simultaneously delivered through PIC and IOAPIC. */ - spin_lock(&ps->inject_lock); - if (ps->irq_ack) { - ps->irq_ack = 0; - inject = 1; - } - spin_unlock(&ps->inject_lock); - if (inject) { - kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false); - kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false); - - /* - * Provides NMI watchdog support via Virtual Wire mode. - * The route is: PIT -> PIC -> LVT0 in NMI mode. - * - * Note: Our Virtual Wire implementation is simplified, only - * propagating PIT interrupts to all VCPUs when they have set - * LVT0 to NMI delivery. Other PIC interrupts are just sent to - * VCPU0, and only if its LVT0 is in EXTINT mode. - */ - if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0) - kvm_for_each_vcpu(i, vcpu, kvm) - kvm_apic_nmi_wd_deliver(vcpu); - } + if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0) + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_apic_nmi_wd_deliver(vcpu); } static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) @@ -317,10 +288,10 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer); struct kvm_pit *pt = ps->kvm->arch.vpit; - if (ps->reinject || !atomic_read(&ps->pending)) { + if (ps->reinject) atomic_inc(&ps->pending); - queue_kthread_work(&pt->worker, &pt->expired); - } + + queue_kthread_work(&pt->worker, &pt->expired); if (ps->is_periodic) { hrtimer_add_expires_ns(&ps->timer, ps->period); @@ -329,6 +300,12 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) return HRTIMER_NORESTART; } +static inline void kvm_pit_reset_reinject(struct kvm_pit *pit) +{ + atomic_set(&pit->pit_state.pending, 0); + atomic_set(&pit->pit_state.irq_ack, 1); +} + static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) { struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; @@ -351,8 +328,7 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) ps->timer.function = pit_timer_fn; ps->kvm = ps->pit->kvm; - atomic_set(&ps->pending, 0); - ps->irq_ack = 1; + kvm_pit_reset_reinject(ps->pit); /* * Do not allow the guest to program periodic timers with small @@ -379,8 +355,6 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) { struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; - WARN_ON(!mutex_is_locked(&ps->lock)); - pr_debug("load_count val is %d, channel is %d\n", val, channel); /* @@ -418,6 +392,9 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start) { u8 saved_mode; + + WARN_ON_ONCE(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); + if (hpet_legacy_start) { /* save existing mode for later reenablement */ WARN_ON(channel != 0); @@ -652,18 +629,15 @@ void kvm_pit_reset(struct kvm_pit *pit) } mutex_unlock(&pit->pit_state.lock); - atomic_set(&pit->pit_state.pending, 0); - pit->pit_state.irq_ack = 1; + kvm_pit_reset_reinject(pit); } static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) { struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); - if (!mask) { - atomic_set(&pit->pit_state.pending, 0); - pit->pit_state.irq_ack = 1; - } + if (!mask) + kvm_pit_reset_reinject(pit); } static const struct kvm_io_device_ops pit_dev_ops = { @@ -697,7 +671,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) mutex_init(&pit->pit_state.lock); mutex_lock(&pit->pit_state.lock); - spin_lock_init(&pit->pit_state.inject_lock); pid = get_pid(task_tgid(current)); pid_nr = pid_vnr(pid);