Merge tag 'wireless-drivers-for-davem-2015-06-01' of git://git.kernel.org/pub/scm...
[deliverable/linux.git] / arch / x86 / kernel / kvm.c
CommitLineData
0cf1bfd2
MT
1/*
2 * KVM paravirt_ops implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
21 */
22
56dd9470 23#include <linux/context_tracking.h>
0cf1bfd2
MT
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/kvm_para.h>
27#include <linux/cpu.h>
28#include <linux/mm.h>
1da8a77b 29#include <linux/highmem.h>
096d14a3 30#include <linux/hardirq.h>
fd10cde9
GN
31#include <linux/notifier.h>
32#include <linux/reboot.h>
631bc487
GN
33#include <linux/hash.h>
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/kprobes.h>
92b75202 37#include <linux/debugfs.h>
9919e39a 38#include <linux/nmi.h>
a90ede7b 39#include <asm/timer.h>
fd10cde9 40#include <asm/cpu.h>
631bc487
GN
41#include <asm/traps.h>
42#include <asm/desc.h>
6c047cd9 43#include <asm/tlbflush.h>
e0875921 44#include <asm/idle.h>
ab9cf499
MT
45#include <asm/apic.h>
46#include <asm/apicdef.h>
fc73373b 47#include <asm/hypervisor.h>
3dc4f7cf 48#include <asm/kvm_guest.h>
096d14a3 49
fd10cde9
GN
50static int kvmapf = 1;
51
52static int parse_no_kvmapf(char *arg)
53{
54 kvmapf = 0;
55 return 0;
56}
57
58early_param("no-kvmapf", parse_no_kvmapf);
59
d910f5c1
GC
60static int steal_acc = 1;
61static int parse_no_stealacc(char *arg)
62{
63 steal_acc = 0;
64 return 0;
65}
66
67early_param("no-steal-acc", parse_no_stealacc);
68
3dc4f7cf
MT
69static int kvmclock_vsyscall = 1;
70static int parse_no_kvmclock_vsyscall(char *arg)
71{
72 kvmclock_vsyscall = 0;
73 return 0;
74}
75
76early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
77
fd10cde9 78static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
d910f5c1
GC
79static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
80static int has_steal_clock = 0;
096d14a3 81
0cf1bfd2
MT
82/*
83 * No need for any "IO delay" on KVM
84 */
85static void kvm_io_delay(void)
86{
87}
88
631bc487
GN
89#define KVM_TASK_SLEEP_HASHBITS 8
90#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
91
92struct kvm_task_sleep_node {
93 struct hlist_node link;
94 wait_queue_head_t wq;
95 u32 token;
96 int cpu;
6c047cd9 97 bool halted;
631bc487
GN
98};
99
100static struct kvm_task_sleep_head {
101 spinlock_t lock;
102 struct hlist_head list;
103} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
104
105static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
106 u32 token)
107{
108 struct hlist_node *p;
109
110 hlist_for_each(p, &b->list) {
111 struct kvm_task_sleep_node *n =
112 hlist_entry(p, typeof(*n), link);
113 if (n->token == token)
114 return n;
115 }
116
117 return NULL;
118}
119
120void kvm_async_pf_task_wait(u32 token)
121{
122 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
123 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
124 struct kvm_task_sleep_node n, *e;
125 DEFINE_WAIT(wait);
126
9b132fbe
LZ
127 rcu_irq_enter();
128
631bc487
GN
129 spin_lock(&b->lock);
130 e = _find_apf_task(b, token);
131 if (e) {
132 /* dummy entry exist -> wake up was delivered ahead of PF */
133 hlist_del(&e->link);
134 kfree(e);
135 spin_unlock(&b->lock);
9b132fbe
LZ
136
137 rcu_irq_exit();
631bc487
GN
138 return;
139 }
140
141 n.token = token;
142 n.cpu = smp_processor_id();
859f8450 143 n.halted = is_idle_task(current) || preempt_count() > 1;
631bc487
GN
144 init_waitqueue_head(&n.wq);
145 hlist_add_head(&n.link, &b->list);
146 spin_unlock(&b->lock);
147
148 for (;;) {
6c047cd9
GN
149 if (!n.halted)
150 prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
631bc487
GN
151 if (hlist_unhashed(&n.link))
152 break;
6c047cd9
GN
153
154 if (!n.halted) {
155 local_irq_enable();
156 schedule();
157 local_irq_disable();
158 } else {
159 /*
160 * We cannot reschedule. So halt.
161 */
9b132fbe 162 rcu_irq_exit();
6c047cd9 163 native_safe_halt();
9b132fbe 164 rcu_irq_enter();
6c047cd9
GN
165 local_irq_disable();
166 }
631bc487 167 }
6c047cd9
GN
168 if (!n.halted)
169 finish_wait(&n.wq, &wait);
631bc487 170
9b132fbe 171 rcu_irq_exit();
631bc487
GN
172 return;
173}
174EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
175
176static void apf_task_wake_one(struct kvm_task_sleep_node *n)
177{
178 hlist_del_init(&n->link);
6c047cd9
GN
179 if (n->halted)
180 smp_send_reschedule(n->cpu);
181 else if (waitqueue_active(&n->wq))
631bc487
GN
182 wake_up(&n->wq);
183}
184
185static void apf_task_wake_all(void)
186{
187 int i;
188
189 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
190 struct hlist_node *p, *next;
191 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
192 spin_lock(&b->lock);
193 hlist_for_each_safe(p, next, &b->list) {
194 struct kvm_task_sleep_node *n =
195 hlist_entry(p, typeof(*n), link);
196 if (n->cpu == smp_processor_id())
197 apf_task_wake_one(n);
198 }
199 spin_unlock(&b->lock);
200 }
201}
202
203void kvm_async_pf_task_wake(u32 token)
204{
205 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
206 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
207 struct kvm_task_sleep_node *n;
208
209 if (token == ~0) {
210 apf_task_wake_all();
211 return;
212 }
213
214again:
215 spin_lock(&b->lock);
216 n = _find_apf_task(b, token);
217 if (!n) {
218 /*
219 * async PF was not yet handled.
220 * Add dummy entry for the token.
221 */
62c49cc9 222 n = kzalloc(sizeof(*n), GFP_ATOMIC);
631bc487
GN
223 if (!n) {
224 /*
225 * Allocation failed! Busy wait while other cpu
226 * handles async PF.
227 */
228 spin_unlock(&b->lock);
229 cpu_relax();
230 goto again;
231 }
232 n->token = token;
233 n->cpu = smp_processor_id();
234 init_waitqueue_head(&n->wq);
235 hlist_add_head(&n->link, &b->list);
236 } else
237 apf_task_wake_one(n);
238 spin_unlock(&b->lock);
239 return;
240}
241EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
242
243u32 kvm_read_and_reset_pf_reason(void)
244{
245 u32 reason = 0;
246
89cbc767
CL
247 if (__this_cpu_read(apf_reason.enabled)) {
248 reason = __this_cpu_read(apf_reason.reason);
249 __this_cpu_write(apf_reason.reason, 0);
631bc487
GN
250 }
251
252 return reason;
253}
254EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
9326638c 255NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
631bc487 256
9326638c 257dotraplinkage void
631bc487
GN
258do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
259{
6c1e0256
FW
260 enum ctx_state prev_state;
261
631bc487
GN
262 switch (kvm_read_and_reset_pf_reason()) {
263 default:
65a7f03f 264 trace_do_page_fault(regs, error_code);
631bc487
GN
265 break;
266 case KVM_PV_REASON_PAGE_NOT_PRESENT:
267 /* page is swapped out by the host. */
6c1e0256 268 prev_state = exception_enter();
c5e015d4 269 exit_idle();
631bc487 270 kvm_async_pf_task_wait((u32)read_cr2());
6c1e0256 271 exception_exit(prev_state);
631bc487
GN
272 break;
273 case KVM_PV_REASON_PAGE_READY:
e0875921
GN
274 rcu_irq_enter();
275 exit_idle();
631bc487 276 kvm_async_pf_task_wake((u32)read_cr2());
e0875921 277 rcu_irq_exit();
631bc487
GN
278 break;
279 }
280}
9326638c 281NOKPROBE_SYMBOL(do_async_page_fault);
631bc487 282
d3ac8815 283static void __init paravirt_ops_setup(void)
0cf1bfd2
MT
284{
285 pv_info.name = "KVM";
29fa6825
AL
286
287 /*
288 * KVM isn't paravirt in the sense of paravirt_enabled. A KVM
289 * guest kernel works like a bare metal kernel with additional
290 * features, and paravirt_enabled is about features that are
291 * missing.
292 */
293 pv_info.paravirt_enabled = 0;
0cf1bfd2
MT
294
295 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
296 pv_cpu_ops.io_delay = kvm_io_delay;
297
a90ede7b
MT
298#ifdef CONFIG_X86_IO_APIC
299 no_timer_check = 1;
300#endif
0cf1bfd2
MT
301}
302
d910f5c1
GC
303static void kvm_register_steal_time(void)
304{
305 int cpu = smp_processor_id();
306 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
307
308 if (!has_steal_clock)
309 return;
310
311 memset(st, 0, sizeof(*st));
312
5dfd486c 313 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
136867f5
SK
314 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
315 cpu, (unsigned long long) slow_virt_to_phys(st));
d910f5c1
GC
316}
317
ab9cf499
MT
318static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
319
320static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
321{
322 /**
323 * This relies on __test_and_clear_bit to modify the memory
324 * in a way that is atomic with respect to the local CPU.
325 * The hypervisor only accesses this memory from the local CPU so
326 * there's no need for lock or memory barriers.
327 * An optimization barrier is implied in apic write.
328 */
89cbc767 329 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
ab9cf499 330 return;
90536664 331 apic_write(APIC_EOI, APIC_EOI_ACK);
ab9cf499
MT
332}
333
148f9bb8 334void kvm_guest_cpu_init(void)
fd10cde9
GN
335{
336 if (!kvm_para_available())
337 return;
338
339 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
89cbc767 340 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
fd10cde9 341
6adba527
GN
342#ifdef CONFIG_PREEMPT
343 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
344#endif
fd10cde9 345 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
89cbc767 346 __this_cpu_write(apf_reason.enabled, 1);
fd10cde9
GN
347 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
348 smp_processor_id());
349 }
d910f5c1 350
ab9cf499
MT
351 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
352 unsigned long pa;
353 /* Size alignment is implied but just to make it explicit. */
354 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
89cbc767
CL
355 __this_cpu_write(kvm_apic_eoi, 0);
356 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
5dfd486c 357 | KVM_MSR_ENABLED;
ab9cf499
MT
358 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
359 }
360
d910f5c1
GC
361 if (has_steal_clock)
362 kvm_register_steal_time();
fd10cde9
GN
363}
364
ab9cf499 365static void kvm_pv_disable_apf(void)
fd10cde9 366{
89cbc767 367 if (!__this_cpu_read(apf_reason.enabled))
fd10cde9
GN
368 return;
369
370 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
89cbc767 371 __this_cpu_write(apf_reason.enabled, 0);
fd10cde9
GN
372
373 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
374 smp_processor_id());
375}
376
ab9cf499
MT
377static void kvm_pv_guest_cpu_reboot(void *unused)
378{
379 /*
380 * We disable PV EOI before we load a new kernel by kexec,
381 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
382 * New kernel can re-enable when it boots.
383 */
384 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
385 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
386 kvm_pv_disable_apf();
8fbe6a54 387 kvm_disable_steal_time();
ab9cf499
MT
388}
389
fd10cde9
GN
390static int kvm_pv_reboot_notify(struct notifier_block *nb,
391 unsigned long code, void *unused)
392{
393 if (code == SYS_RESTART)
ab9cf499 394 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
fd10cde9
GN
395 return NOTIFY_DONE;
396}
397
398static struct notifier_block kvm_pv_reboot_nb = {
399 .notifier_call = kvm_pv_reboot_notify,
400};
401
d910f5c1
GC
402static u64 kvm_steal_clock(int cpu)
403{
404 u64 steal;
405 struct kvm_steal_time *src;
406 int version;
407
408 src = &per_cpu(steal_time, cpu);
409 do {
410 version = src->version;
411 rmb();
412 steal = src->steal;
413 rmb();
414 } while ((version & 1) || (version != src->version));
415
416 return steal;
417}
418
419void kvm_disable_steal_time(void)
420{
421 if (!has_steal_clock)
422 return;
423
424 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
425}
426
ca3f1017
GN
427#ifdef CONFIG_SMP
428static void __init kvm_smp_prepare_boot_cpu(void)
429{
fd10cde9 430 kvm_guest_cpu_init();
ca3f1017 431 native_smp_prepare_boot_cpu();
92b75202 432 kvm_spinlock_init();
ca3f1017 433}
fd10cde9 434
148f9bb8 435static void kvm_guest_cpu_online(void *dummy)
fd10cde9
GN
436{
437 kvm_guest_cpu_init();
438}
439
440static void kvm_guest_cpu_offline(void *dummy)
441{
d910f5c1 442 kvm_disable_steal_time();
ab9cf499
MT
443 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
444 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
445 kvm_pv_disable_apf();
631bc487 446 apf_task_wake_all();
fd10cde9
GN
447}
448
148f9bb8
PG
449static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
450 void *hcpu)
fd10cde9
GN
451{
452 int cpu = (unsigned long)hcpu;
453 switch (action) {
454 case CPU_ONLINE:
455 case CPU_DOWN_FAILED:
456 case CPU_ONLINE_FROZEN:
457 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
458 break;
459 case CPU_DOWN_PREPARE:
460 case CPU_DOWN_PREPARE_FROZEN:
461 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
462 break;
463 default:
464 break;
465 }
466 return NOTIFY_OK;
467}
468
148f9bb8 469static struct notifier_block kvm_cpu_notifier = {
fd10cde9
GN
470 .notifier_call = kvm_cpu_notify,
471};
ca3f1017
GN
472#endif
473
631bc487
GN
474static void __init kvm_apf_trap_init(void)
475{
25c74b10 476 set_intr_gate(14, async_page_fault);
631bc487
GN
477}
478
0cf1bfd2
MT
479void __init kvm_guest_init(void)
480{
631bc487
GN
481 int i;
482
0cf1bfd2
MT
483 if (!kvm_para_available())
484 return;
485
486 paravirt_ops_setup();
fd10cde9 487 register_reboot_notifier(&kvm_pv_reboot_nb);
631bc487
GN
488 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
489 spin_lock_init(&async_pf_sleepers[i].lock);
490 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
491 x86_init.irqs.trap_init = kvm_apf_trap_init;
492
d910f5c1
GC
493 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
494 has_steal_clock = 1;
495 pv_time_ops.steal_clock = kvm_steal_clock;
496 }
497
90536664
MT
498 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
499 apic_set_eoi_write(kvm_guest_apic_eoi_write);
ab9cf499 500
3dc4f7cf
MT
501 if (kvmclock_vsyscall)
502 kvm_setup_vsyscall_timeinfo();
503
ca3f1017
GN
504#ifdef CONFIG_SMP
505 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
fd10cde9
GN
506 register_cpu_notifier(&kvm_cpu_notifier);
507#else
508 kvm_guest_cpu_init();
ca3f1017 509#endif
9919e39a
UO
510
511 /*
512 * Hard lockup detection is enabled by default. Disable it, as guests
513 * can get false positives too easily, for example if the host is
514 * overcommitted.
515 */
692297d8 516 hardlockup_detector_disable();
0cf1bfd2 517}
d910f5c1 518
1c300a40
PB
519static noinline uint32_t __kvm_cpuid_base(void)
520{
521 if (boot_cpu_data.cpuid_level < 0)
522 return 0; /* So we don't blow up on old processors */
523
524 if (cpu_has_hypervisor)
525 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
526
527 return 0;
528}
529
530static inline uint32_t kvm_cpuid_base(void)
531{
532 static int kvm_cpuid_base = -1;
533
534 if (kvm_cpuid_base == -1)
535 kvm_cpuid_base = __kvm_cpuid_base();
536
537 return kvm_cpuid_base;
538}
539
540bool kvm_para_available(void)
541{
542 return kvm_cpuid_base() != 0;
543}
544EXPORT_SYMBOL_GPL(kvm_para_available);
545
77f01bdf
PB
546unsigned int kvm_arch_para_features(void)
547{
548 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
549}
550
9df56f19 551static uint32_t __init kvm_detect(void)
fc73373b 552{
9df56f19 553 return kvm_cpuid_base();
fc73373b
PB
554}
555
556const struct hypervisor_x86 x86_hyper_kvm __refconst = {
557 .name = "KVM",
558 .detect = kvm_detect,
4cca6ea0 559 .x2apic_available = kvm_para_available,
fc73373b
PB
560};
561EXPORT_SYMBOL_GPL(x86_hyper_kvm);
562
d910f5c1
GC
563static __init int activate_jump_labels(void)
564{
565 if (has_steal_clock) {
c5905afb 566 static_key_slow_inc(&paravirt_steal_enabled);
d910f5c1 567 if (steal_acc)
c5905afb 568 static_key_slow_inc(&paravirt_steal_rq_enabled);
d910f5c1
GC
569 }
570
571 return 0;
572}
573arch_initcall(activate_jump_labels);
92b75202
SV
574
575#ifdef CONFIG_PARAVIRT_SPINLOCKS
576
577/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
36bd6213 578static void kvm_kick_cpu(int cpu)
92b75202
SV
579{
580 int apicid;
581 unsigned long flags = 0;
582
583 apicid = per_cpu(x86_cpu_to_apicid, cpu);
584 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
585}
586
587enum kvm_contention_stat {
588 TAKEN_SLOW,
589 TAKEN_SLOW_PICKUP,
590 RELEASED_SLOW,
591 RELEASED_SLOW_KICKED,
592 NR_CONTENTION_STATS
593};
594
595#ifdef CONFIG_KVM_DEBUG_FS
596#define HISTO_BUCKETS 30
597
598static struct kvm_spinlock_stats
599{
600 u32 contention_stats[NR_CONTENTION_STATS];
601 u32 histo_spin_blocked[HISTO_BUCKETS+1];
602 u64 time_blocked;
603} spinlock_stats;
604
605static u8 zero_stats;
606
607static inline void check_zero(void)
608{
609 u8 ret;
610 u8 old;
611
d6abfdb2 612 old = READ_ONCE(zero_stats);
92b75202
SV
613 if (unlikely(old)) {
614 ret = cmpxchg(&zero_stats, old, 0);
615 /* This ensures only one fellow resets the stat */
616 if (ret == old)
617 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
618 }
619}
620
621static inline void add_stats(enum kvm_contention_stat var, u32 val)
622{
623 check_zero();
624 spinlock_stats.contention_stats[var] += val;
625}
626
627
628static inline u64 spin_time_start(void)
629{
630 return sched_clock();
631}
632
633static void __spin_time_accum(u64 delta, u32 *array)
634{
635 unsigned index;
636
637 index = ilog2(delta);
638 check_zero();
639
640 if (index < HISTO_BUCKETS)
641 array[index]++;
642 else
643 array[HISTO_BUCKETS]++;
644}
645
646static inline void spin_time_accum_blocked(u64 start)
647{
648 u32 delta;
649
650 delta = sched_clock() - start;
651 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
652 spinlock_stats.time_blocked += delta;
653}
654
655static struct dentry *d_spin_debug;
656static struct dentry *d_kvm_debug;
657
658struct dentry *kvm_init_debugfs(void)
659{
d780a312 660 d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
92b75202
SV
661 if (!d_kvm_debug)
662 printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
663
664 return d_kvm_debug;
665}
666
667static int __init kvm_spinlock_debugfs(void)
668{
669 struct dentry *d_kvm;
670
671 d_kvm = kvm_init_debugfs();
672 if (d_kvm == NULL)
673 return -ENOMEM;
674
675 d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
676
677 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
678
679 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
680 &spinlock_stats.contention_stats[TAKEN_SLOW]);
681 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
682 &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
683
684 debugfs_create_u32("released_slow", 0444, d_spin_debug,
685 &spinlock_stats.contention_stats[RELEASED_SLOW]);
686 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
687 &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
688
689 debugfs_create_u64("time_blocked", 0444, d_spin_debug,
690 &spinlock_stats.time_blocked);
691
692 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
693 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
694
695 return 0;
696}
697fs_initcall(kvm_spinlock_debugfs);
698#else /* !CONFIG_KVM_DEBUG_FS */
699static inline void add_stats(enum kvm_contention_stat var, u32 val)
700{
701}
702
703static inline u64 spin_time_start(void)
704{
705 return 0;
706}
707
708static inline void spin_time_accum_blocked(u64 start)
709{
710}
711#endif /* CONFIG_KVM_DEBUG_FS */
712
713struct kvm_lock_waiting {
714 struct arch_spinlock *lock;
715 __ticket_t want;
716};
717
718/* cpus 'waiting' on a spinlock to become available */
719static cpumask_t waiting_cpus;
720
721/* Track spinlock on which a cpu is waiting */
722static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
723
dd41f818 724__visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
92b75202
SV
725{
726 struct kvm_lock_waiting *w;
727 int cpu;
728 u64 start;
729 unsigned long flags;
d6abfdb2 730 __ticket_t head;
92b75202
SV
731
732 if (in_nmi())
733 return;
734
89cbc767 735 w = this_cpu_ptr(&klock_waiting);
92b75202
SV
736 cpu = smp_processor_id();
737 start = spin_time_start();
738
739 /*
740 * Make sure an interrupt handler can't upset things in a
741 * partially setup state.
742 */
743 local_irq_save(flags);
744
745 /*
746 * The ordering protocol on this is that the "lock" pointer
747 * may only be set non-NULL if the "want" ticket is correct.
748 * If we're updating "want", we must first clear "lock".
749 */
750 w->lock = NULL;
751 smp_wmb();
752 w->want = want;
753 smp_wmb();
754 w->lock = lock;
755
756 add_stats(TAKEN_SLOW, 1);
757
758 /*
759 * This uses set_bit, which is atomic but we should not rely on its
760 * reordering gurantees. So barrier is needed after this call.
761 */
762 cpumask_set_cpu(cpu, &waiting_cpus);
763
764 barrier();
765
766 /*
767 * Mark entry to slowpath before doing the pickup test to make
768 * sure we don't deadlock with an unlocker.
769 */
770 __ticket_enter_slowpath(lock);
771
d6abfdb2
R
772 /* make sure enter_slowpath, which is atomic does not cross the read */
773 smp_mb__after_atomic();
774
92b75202
SV
775 /*
776 * check again make sure it didn't become free while
777 * we weren't looking.
778 */
d6abfdb2
R
779 head = READ_ONCE(lock->tickets.head);
780 if (__tickets_equal(head, want)) {
92b75202
SV
781 add_stats(TAKEN_SLOW_PICKUP, 1);
782 goto out;
783 }
784
785 /*
786 * halt until it's our turn and kicked. Note that we do safe halt
787 * for irq enabled case to avoid hang when lock info is overwritten
788 * in irq spinlock slowpath and no spurious interrupt occur to save us.
789 */
790 if (arch_irqs_disabled_flags(flags))
791 halt();
792 else
793 safe_halt();
794
795out:
796 cpumask_clear_cpu(cpu, &waiting_cpus);
797 w->lock = NULL;
798 local_irq_restore(flags);
799 spin_time_accum_blocked(start);
800}
801PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
802
803/* Kick vcpu waiting on @lock->head to reach value @ticket */
804static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
805{
806 int cpu;
807
808 add_stats(RELEASED_SLOW, 1);
809 for_each_cpu(cpu, &waiting_cpus) {
810 const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
d6abfdb2
R
811 if (READ_ONCE(w->lock) == lock &&
812 READ_ONCE(w->want) == ticket) {
92b75202
SV
813 add_stats(RELEASED_SLOW_KICKED, 1);
814 kvm_kick_cpu(cpu);
815 break;
816 }
817 }
818}
819
820/*
821 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
822 */
823void __init kvm_spinlock_init(void)
824{
825 if (!kvm_para_available())
826 return;
827 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
828 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
829 return;
830
3dbef3e3
R
831 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
832 pv_lock_ops.unlock_kick = kvm_unlock_kick;
833}
834
835static __init int kvm_spinlock_init_jump(void)
836{
837 if (!kvm_para_available())
838 return 0;
839 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
840 return 0;
92b75202
SV
841
842 static_key_slow_inc(&paravirt_ticketlocks_enabled);
3dbef3e3 843 printk(KERN_INFO "KVM setup paravirtual spinlock\n");
92b75202 844
3dbef3e3 845 return 0;
92b75202 846}
3dbef3e3
R
847early_initcall(kvm_spinlock_init_jump);
848
92b75202 849#endif /* CONFIG_PARAVIRT_SPINLOCKS */
This page took 0.438058 seconds and 5 git commands to generate.