x86, kvm: cache the base of the KVM cpuid leaves
[deliverable/linux.git] / arch / x86 / kernel / kvm.c
CommitLineData
0cf1bfd2
MT
1/*
2 * KVM paravirt_ops implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
21 */
22
56dd9470 23#include <linux/context_tracking.h>
0cf1bfd2
MT
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/kvm_para.h>
27#include <linux/cpu.h>
28#include <linux/mm.h>
1da8a77b 29#include <linux/highmem.h>
096d14a3 30#include <linux/hardirq.h>
fd10cde9
GN
31#include <linux/notifier.h>
32#include <linux/reboot.h>
631bc487
GN
33#include <linux/hash.h>
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/kprobes.h>
92b75202 37#include <linux/debugfs.h>
a90ede7b 38#include <asm/timer.h>
fd10cde9 39#include <asm/cpu.h>
631bc487
GN
40#include <asm/traps.h>
41#include <asm/desc.h>
6c047cd9 42#include <asm/tlbflush.h>
e0875921 43#include <asm/idle.h>
ab9cf499
MT
44#include <asm/apic.h>
45#include <asm/apicdef.h>
fc73373b 46#include <asm/hypervisor.h>
3dc4f7cf 47#include <asm/kvm_guest.h>
096d14a3 48
fd10cde9
GN
49static int kvmapf = 1;
50
51static int parse_no_kvmapf(char *arg)
52{
53 kvmapf = 0;
54 return 0;
55}
56
57early_param("no-kvmapf", parse_no_kvmapf);
58
d910f5c1
GC
59static int steal_acc = 1;
60static int parse_no_stealacc(char *arg)
61{
62 steal_acc = 0;
63 return 0;
64}
65
66early_param("no-steal-acc", parse_no_stealacc);
67
3dc4f7cf
MT
68static int kvmclock_vsyscall = 1;
69static int parse_no_kvmclock_vsyscall(char *arg)
70{
71 kvmclock_vsyscall = 0;
72 return 0;
73}
74
75early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
76
fd10cde9 77static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
d910f5c1
GC
78static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
79static int has_steal_clock = 0;
096d14a3 80
0cf1bfd2
MT
81/*
82 * No need for any "IO delay" on KVM
83 */
84static void kvm_io_delay(void)
85{
86}
87
631bc487
GN
88#define KVM_TASK_SLEEP_HASHBITS 8
89#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
90
91struct kvm_task_sleep_node {
92 struct hlist_node link;
93 wait_queue_head_t wq;
94 u32 token;
95 int cpu;
6c047cd9 96 bool halted;
631bc487
GN
97};
98
99static struct kvm_task_sleep_head {
100 spinlock_t lock;
101 struct hlist_head list;
102} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
103
104static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
105 u32 token)
106{
107 struct hlist_node *p;
108
109 hlist_for_each(p, &b->list) {
110 struct kvm_task_sleep_node *n =
111 hlist_entry(p, typeof(*n), link);
112 if (n->token == token)
113 return n;
114 }
115
116 return NULL;
117}
118
119void kvm_async_pf_task_wait(u32 token)
120{
121 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
122 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
123 struct kvm_task_sleep_node n, *e;
124 DEFINE_WAIT(wait);
125
9b132fbe
LZ
126 rcu_irq_enter();
127
631bc487
GN
128 spin_lock(&b->lock);
129 e = _find_apf_task(b, token);
130 if (e) {
131 /* dummy entry exist -> wake up was delivered ahead of PF */
132 hlist_del(&e->link);
133 kfree(e);
134 spin_unlock(&b->lock);
9b132fbe
LZ
135
136 rcu_irq_exit();
631bc487
GN
137 return;
138 }
139
140 n.token = token;
141 n.cpu = smp_processor_id();
859f8450 142 n.halted = is_idle_task(current) || preempt_count() > 1;
631bc487
GN
143 init_waitqueue_head(&n.wq);
144 hlist_add_head(&n.link, &b->list);
145 spin_unlock(&b->lock);
146
147 for (;;) {
6c047cd9
GN
148 if (!n.halted)
149 prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
631bc487
GN
150 if (hlist_unhashed(&n.link))
151 break;
6c047cd9
GN
152
153 if (!n.halted) {
154 local_irq_enable();
155 schedule();
156 local_irq_disable();
157 } else {
158 /*
159 * We cannot reschedule. So halt.
160 */
9b132fbe 161 rcu_irq_exit();
6c047cd9 162 native_safe_halt();
9b132fbe 163 rcu_irq_enter();
6c047cd9
GN
164 local_irq_disable();
165 }
631bc487 166 }
6c047cd9
GN
167 if (!n.halted)
168 finish_wait(&n.wq, &wait);
631bc487 169
9b132fbe 170 rcu_irq_exit();
631bc487
GN
171 return;
172}
173EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
174
175static void apf_task_wake_one(struct kvm_task_sleep_node *n)
176{
177 hlist_del_init(&n->link);
6c047cd9
GN
178 if (n->halted)
179 smp_send_reschedule(n->cpu);
180 else if (waitqueue_active(&n->wq))
631bc487
GN
181 wake_up(&n->wq);
182}
183
184static void apf_task_wake_all(void)
185{
186 int i;
187
188 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
189 struct hlist_node *p, *next;
190 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
191 spin_lock(&b->lock);
192 hlist_for_each_safe(p, next, &b->list) {
193 struct kvm_task_sleep_node *n =
194 hlist_entry(p, typeof(*n), link);
195 if (n->cpu == smp_processor_id())
196 apf_task_wake_one(n);
197 }
198 spin_unlock(&b->lock);
199 }
200}
201
202void kvm_async_pf_task_wake(u32 token)
203{
204 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
205 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
206 struct kvm_task_sleep_node *n;
207
208 if (token == ~0) {
209 apf_task_wake_all();
210 return;
211 }
212
213again:
214 spin_lock(&b->lock);
215 n = _find_apf_task(b, token);
216 if (!n) {
217 /*
218 * async PF was not yet handled.
219 * Add dummy entry for the token.
220 */
62c49cc9 221 n = kzalloc(sizeof(*n), GFP_ATOMIC);
631bc487
GN
222 if (!n) {
223 /*
224 * Allocation failed! Busy wait while other cpu
225 * handles async PF.
226 */
227 spin_unlock(&b->lock);
228 cpu_relax();
229 goto again;
230 }
231 n->token = token;
232 n->cpu = smp_processor_id();
233 init_waitqueue_head(&n->wq);
234 hlist_add_head(&n->link, &b->list);
235 } else
236 apf_task_wake_one(n);
237 spin_unlock(&b->lock);
238 return;
239}
240EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
241
242u32 kvm_read_and_reset_pf_reason(void)
243{
244 u32 reason = 0;
245
246 if (__get_cpu_var(apf_reason).enabled) {
247 reason = __get_cpu_var(apf_reason).reason;
248 __get_cpu_var(apf_reason).reason = 0;
249 }
250
251 return reason;
252}
253EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
254
255dotraplinkage void __kprobes
256do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
257{
6c1e0256
FW
258 enum ctx_state prev_state;
259
631bc487
GN
260 switch (kvm_read_and_reset_pf_reason()) {
261 default:
262 do_page_fault(regs, error_code);
263 break;
264 case KVM_PV_REASON_PAGE_NOT_PRESENT:
265 /* page is swapped out by the host. */
6c1e0256 266 prev_state = exception_enter();
c5e015d4 267 exit_idle();
631bc487 268 kvm_async_pf_task_wait((u32)read_cr2());
6c1e0256 269 exception_exit(prev_state);
631bc487
GN
270 break;
271 case KVM_PV_REASON_PAGE_READY:
e0875921
GN
272 rcu_irq_enter();
273 exit_idle();
631bc487 274 kvm_async_pf_task_wake((u32)read_cr2());
e0875921 275 rcu_irq_exit();
631bc487
GN
276 break;
277 }
278}
279
d3ac8815 280static void __init paravirt_ops_setup(void)
0cf1bfd2
MT
281{
282 pv_info.name = "KVM";
283 pv_info.paravirt_enabled = 1;
284
285 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
286 pv_cpu_ops.io_delay = kvm_io_delay;
287
a90ede7b
MT
288#ifdef CONFIG_X86_IO_APIC
289 no_timer_check = 1;
290#endif
0cf1bfd2
MT
291}
292
d910f5c1
GC
293static void kvm_register_steal_time(void)
294{
295 int cpu = smp_processor_id();
296 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
297
298 if (!has_steal_clock)
299 return;
300
301 memset(st, 0, sizeof(*st));
302
5dfd486c 303 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
136867f5
SK
304 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
305 cpu, (unsigned long long) slow_virt_to_phys(st));
d910f5c1
GC
306}
307
ab9cf499
MT
308static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
309
310static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
311{
312 /**
313 * This relies on __test_and_clear_bit to modify the memory
314 * in a way that is atomic with respect to the local CPU.
315 * The hypervisor only accesses this memory from the local CPU so
316 * there's no need for lock or memory barriers.
317 * An optimization barrier is implied in apic write.
318 */
319 if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi)))
320 return;
90536664 321 apic_write(APIC_EOI, APIC_EOI_ACK);
ab9cf499
MT
322}
323
148f9bb8 324void kvm_guest_cpu_init(void)
fd10cde9
GN
325{
326 if (!kvm_para_available())
327 return;
328
329 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
5dfd486c 330 u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason));
fd10cde9 331
6adba527
GN
332#ifdef CONFIG_PREEMPT
333 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
334#endif
fd10cde9
GN
335 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
336 __get_cpu_var(apf_reason).enabled = 1;
337 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
338 smp_processor_id());
339 }
d910f5c1 340
ab9cf499
MT
341 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
342 unsigned long pa;
343 /* Size alignment is implied but just to make it explicit. */
344 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
345 __get_cpu_var(kvm_apic_eoi) = 0;
5dfd486c
DH
346 pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi))
347 | KVM_MSR_ENABLED;
ab9cf499
MT
348 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
349 }
350
d910f5c1
GC
351 if (has_steal_clock)
352 kvm_register_steal_time();
fd10cde9
GN
353}
354
ab9cf499 355static void kvm_pv_disable_apf(void)
fd10cde9
GN
356{
357 if (!__get_cpu_var(apf_reason).enabled)
358 return;
359
360 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
361 __get_cpu_var(apf_reason).enabled = 0;
362
363 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
364 smp_processor_id());
365}
366
ab9cf499
MT
367static void kvm_pv_guest_cpu_reboot(void *unused)
368{
369 /*
370 * We disable PV EOI before we load a new kernel by kexec,
371 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
372 * New kernel can re-enable when it boots.
373 */
374 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
375 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
376 kvm_pv_disable_apf();
8fbe6a54 377 kvm_disable_steal_time();
ab9cf499
MT
378}
379
fd10cde9
GN
380static int kvm_pv_reboot_notify(struct notifier_block *nb,
381 unsigned long code, void *unused)
382{
383 if (code == SYS_RESTART)
ab9cf499 384 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
fd10cde9
GN
385 return NOTIFY_DONE;
386}
387
388static struct notifier_block kvm_pv_reboot_nb = {
389 .notifier_call = kvm_pv_reboot_notify,
390};
391
d910f5c1
GC
392static u64 kvm_steal_clock(int cpu)
393{
394 u64 steal;
395 struct kvm_steal_time *src;
396 int version;
397
398 src = &per_cpu(steal_time, cpu);
399 do {
400 version = src->version;
401 rmb();
402 steal = src->steal;
403 rmb();
404 } while ((version & 1) || (version != src->version));
405
406 return steal;
407}
408
409void kvm_disable_steal_time(void)
410{
411 if (!has_steal_clock)
412 return;
413
414 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
415}
416
ca3f1017
GN
417#ifdef CONFIG_SMP
418static void __init kvm_smp_prepare_boot_cpu(void)
419{
420 WARN_ON(kvm_register_clock("primary cpu clock"));
fd10cde9 421 kvm_guest_cpu_init();
ca3f1017 422 native_smp_prepare_boot_cpu();
92b75202 423 kvm_spinlock_init();
ca3f1017 424}
fd10cde9 425
148f9bb8 426static void kvm_guest_cpu_online(void *dummy)
fd10cde9
GN
427{
428 kvm_guest_cpu_init();
429}
430
431static void kvm_guest_cpu_offline(void *dummy)
432{
d910f5c1 433 kvm_disable_steal_time();
ab9cf499
MT
434 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
435 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
436 kvm_pv_disable_apf();
631bc487 437 apf_task_wake_all();
fd10cde9
GN
438}
439
148f9bb8
PG
440static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
441 void *hcpu)
fd10cde9
GN
442{
443 int cpu = (unsigned long)hcpu;
444 switch (action) {
445 case CPU_ONLINE:
446 case CPU_DOWN_FAILED:
447 case CPU_ONLINE_FROZEN:
448 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
449 break;
450 case CPU_DOWN_PREPARE:
451 case CPU_DOWN_PREPARE_FROZEN:
452 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
453 break;
454 default:
455 break;
456 }
457 return NOTIFY_OK;
458}
459
148f9bb8 460static struct notifier_block kvm_cpu_notifier = {
fd10cde9
GN
461 .notifier_call = kvm_cpu_notify,
462};
ca3f1017
GN
463#endif
464
631bc487
GN
465static void __init kvm_apf_trap_init(void)
466{
25c74b10 467 set_intr_gate(14, async_page_fault);
631bc487
GN
468}
469
0cf1bfd2
MT
470void __init kvm_guest_init(void)
471{
631bc487
GN
472 int i;
473
0cf1bfd2
MT
474 if (!kvm_para_available())
475 return;
476
477 paravirt_ops_setup();
fd10cde9 478 register_reboot_notifier(&kvm_pv_reboot_nb);
631bc487
GN
479 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
480 spin_lock_init(&async_pf_sleepers[i].lock);
481 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
482 x86_init.irqs.trap_init = kvm_apf_trap_init;
483
d910f5c1
GC
484 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
485 has_steal_clock = 1;
486 pv_time_ops.steal_clock = kvm_steal_clock;
487 }
488
90536664
MT
489 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
490 apic_set_eoi_write(kvm_guest_apic_eoi_write);
ab9cf499 491
3dc4f7cf
MT
492 if (kvmclock_vsyscall)
493 kvm_setup_vsyscall_timeinfo();
494
ca3f1017
GN
495#ifdef CONFIG_SMP
496 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
fd10cde9
GN
497 register_cpu_notifier(&kvm_cpu_notifier);
498#else
499 kvm_guest_cpu_init();
ca3f1017 500#endif
0cf1bfd2 501}
d910f5c1 502
1c300a40
PB
503static noinline uint32_t __kvm_cpuid_base(void)
504{
505 if (boot_cpu_data.cpuid_level < 0)
506 return 0; /* So we don't blow up on old processors */
507
508 if (cpu_has_hypervisor)
509 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
510
511 return 0;
512}
513
514static inline uint32_t kvm_cpuid_base(void)
515{
516 static int kvm_cpuid_base = -1;
517
518 if (kvm_cpuid_base == -1)
519 kvm_cpuid_base = __kvm_cpuid_base();
520
521 return kvm_cpuid_base;
522}
523
524bool kvm_para_available(void)
525{
526 return kvm_cpuid_base() != 0;
527}
528EXPORT_SYMBOL_GPL(kvm_para_available);
529
9df56f19 530static uint32_t __init kvm_detect(void)
fc73373b 531{
9df56f19 532 return kvm_cpuid_base();
fc73373b
PB
533}
534
535const struct hypervisor_x86 x86_hyper_kvm __refconst = {
536 .name = "KVM",
537 .detect = kvm_detect,
4cca6ea0 538 .x2apic_available = kvm_para_available,
fc73373b
PB
539};
540EXPORT_SYMBOL_GPL(x86_hyper_kvm);
541
d910f5c1
GC
542static __init int activate_jump_labels(void)
543{
544 if (has_steal_clock) {
c5905afb 545 static_key_slow_inc(&paravirt_steal_enabled);
d910f5c1 546 if (steal_acc)
c5905afb 547 static_key_slow_inc(&paravirt_steal_rq_enabled);
d910f5c1
GC
548 }
549
550 return 0;
551}
552arch_initcall(activate_jump_labels);
92b75202
SV
553
554#ifdef CONFIG_PARAVIRT_SPINLOCKS
555
556/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
36bd6213 557static void kvm_kick_cpu(int cpu)
92b75202
SV
558{
559 int apicid;
560 unsigned long flags = 0;
561
562 apicid = per_cpu(x86_cpu_to_apicid, cpu);
563 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
564}
565
566enum kvm_contention_stat {
567 TAKEN_SLOW,
568 TAKEN_SLOW_PICKUP,
569 RELEASED_SLOW,
570 RELEASED_SLOW_KICKED,
571 NR_CONTENTION_STATS
572};
573
574#ifdef CONFIG_KVM_DEBUG_FS
575#define HISTO_BUCKETS 30
576
577static struct kvm_spinlock_stats
578{
579 u32 contention_stats[NR_CONTENTION_STATS];
580 u32 histo_spin_blocked[HISTO_BUCKETS+1];
581 u64 time_blocked;
582} spinlock_stats;
583
584static u8 zero_stats;
585
586static inline void check_zero(void)
587{
588 u8 ret;
589 u8 old;
590
591 old = ACCESS_ONCE(zero_stats);
592 if (unlikely(old)) {
593 ret = cmpxchg(&zero_stats, old, 0);
594 /* This ensures only one fellow resets the stat */
595 if (ret == old)
596 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
597 }
598}
599
600static inline void add_stats(enum kvm_contention_stat var, u32 val)
601{
602 check_zero();
603 spinlock_stats.contention_stats[var] += val;
604}
605
606
607static inline u64 spin_time_start(void)
608{
609 return sched_clock();
610}
611
612static void __spin_time_accum(u64 delta, u32 *array)
613{
614 unsigned index;
615
616 index = ilog2(delta);
617 check_zero();
618
619 if (index < HISTO_BUCKETS)
620 array[index]++;
621 else
622 array[HISTO_BUCKETS]++;
623}
624
625static inline void spin_time_accum_blocked(u64 start)
626{
627 u32 delta;
628
629 delta = sched_clock() - start;
630 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
631 spinlock_stats.time_blocked += delta;
632}
633
634static struct dentry *d_spin_debug;
635static struct dentry *d_kvm_debug;
636
637struct dentry *kvm_init_debugfs(void)
638{
d780a312 639 d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
92b75202
SV
640 if (!d_kvm_debug)
641 printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
642
643 return d_kvm_debug;
644}
645
646static int __init kvm_spinlock_debugfs(void)
647{
648 struct dentry *d_kvm;
649
650 d_kvm = kvm_init_debugfs();
651 if (d_kvm == NULL)
652 return -ENOMEM;
653
654 d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
655
656 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
657
658 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
659 &spinlock_stats.contention_stats[TAKEN_SLOW]);
660 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
661 &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
662
663 debugfs_create_u32("released_slow", 0444, d_spin_debug,
664 &spinlock_stats.contention_stats[RELEASED_SLOW]);
665 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
666 &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
667
668 debugfs_create_u64("time_blocked", 0444, d_spin_debug,
669 &spinlock_stats.time_blocked);
670
671 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
672 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
673
674 return 0;
675}
676fs_initcall(kvm_spinlock_debugfs);
677#else /* !CONFIG_KVM_DEBUG_FS */
678static inline void add_stats(enum kvm_contention_stat var, u32 val)
679{
680}
681
682static inline u64 spin_time_start(void)
683{
684 return 0;
685}
686
687static inline void spin_time_accum_blocked(u64 start)
688{
689}
690#endif /* CONFIG_KVM_DEBUG_FS */
691
692struct kvm_lock_waiting {
693 struct arch_spinlock *lock;
694 __ticket_t want;
695};
696
697/* cpus 'waiting' on a spinlock to become available */
698static cpumask_t waiting_cpus;
699
700/* Track spinlock on which a cpu is waiting */
701static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
702
703static void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
704{
705 struct kvm_lock_waiting *w;
706 int cpu;
707 u64 start;
708 unsigned long flags;
709
710 if (in_nmi())
711 return;
712
713 w = &__get_cpu_var(klock_waiting);
714 cpu = smp_processor_id();
715 start = spin_time_start();
716
717 /*
718 * Make sure an interrupt handler can't upset things in a
719 * partially setup state.
720 */
721 local_irq_save(flags);
722
723 /*
724 * The ordering protocol on this is that the "lock" pointer
725 * may only be set non-NULL if the "want" ticket is correct.
726 * If we're updating "want", we must first clear "lock".
727 */
728 w->lock = NULL;
729 smp_wmb();
730 w->want = want;
731 smp_wmb();
732 w->lock = lock;
733
734 add_stats(TAKEN_SLOW, 1);
735
736 /*
737 * This uses set_bit, which is atomic but we should not rely on its
738 * reordering gurantees. So barrier is needed after this call.
739 */
740 cpumask_set_cpu(cpu, &waiting_cpus);
741
742 barrier();
743
744 /*
745 * Mark entry to slowpath before doing the pickup test to make
746 * sure we don't deadlock with an unlocker.
747 */
748 __ticket_enter_slowpath(lock);
749
750 /*
751 * check again make sure it didn't become free while
752 * we weren't looking.
753 */
754 if (ACCESS_ONCE(lock->tickets.head) == want) {
755 add_stats(TAKEN_SLOW_PICKUP, 1);
756 goto out;
757 }
758
759 /*
760 * halt until it's our turn and kicked. Note that we do safe halt
761 * for irq enabled case to avoid hang when lock info is overwritten
762 * in irq spinlock slowpath and no spurious interrupt occur to save us.
763 */
764 if (arch_irqs_disabled_flags(flags))
765 halt();
766 else
767 safe_halt();
768
769out:
770 cpumask_clear_cpu(cpu, &waiting_cpus);
771 w->lock = NULL;
772 local_irq_restore(flags);
773 spin_time_accum_blocked(start);
774}
775PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
776
777/* Kick vcpu waiting on @lock->head to reach value @ticket */
778static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
779{
780 int cpu;
781
782 add_stats(RELEASED_SLOW, 1);
783 for_each_cpu(cpu, &waiting_cpus) {
784 const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
785 if (ACCESS_ONCE(w->lock) == lock &&
786 ACCESS_ONCE(w->want) == ticket) {
787 add_stats(RELEASED_SLOW_KICKED, 1);
788 kvm_kick_cpu(cpu);
789 break;
790 }
791 }
792}
793
794/*
795 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
796 */
797void __init kvm_spinlock_init(void)
798{
799 if (!kvm_para_available())
800 return;
801 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
802 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
803 return;
804
3dbef3e3
R
805 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
806 pv_lock_ops.unlock_kick = kvm_unlock_kick;
807}
808
809static __init int kvm_spinlock_init_jump(void)
810{
811 if (!kvm_para_available())
812 return 0;
813 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
814 return 0;
92b75202
SV
815
816 static_key_slow_inc(&paravirt_ticketlocks_enabled);
3dbef3e3 817 printk(KERN_INFO "KVM setup paravirtual spinlock\n");
92b75202 818
3dbef3e3 819 return 0;
92b75202 820}
3dbef3e3
R
821early_initcall(kvm_spinlock_init_jump);
822
92b75202 823#endif /* CONFIG_PARAVIRT_SPINLOCKS */
This page took 0.621041 seconds and 5 git commands to generate.