KVM: Make kvm host use the paravirt clocksource structs
[deliverable/linux.git] / arch / x86 / kernel / kvmclock.c
CommitLineData
790c73f6
GOC
1/* KVM paravirtual clock driver. A clocksource implementation
2 Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17*/
18
19#include <linux/clocksource.h>
20#include <linux/kvm_para.h>
21#include <asm/arch_hooks.h>
22#include <asm/msr.h>
23#include <asm/apic.h>
24#include <linux/percpu.h>
1e977aa1 25#include <asm/reboot.h>
790c73f6
GOC
26
27#define KVM_SCALE 22
28
29static int kvmclock = 1;
30
31static int parse_no_kvmclock(char *arg)
32{
33 kvmclock = 0;
34 return 0;
35}
36early_param("no-kvmclock", parse_no_kvmclock);
37
38/* The hypervisor will put information about time periodically here */
39static DEFINE_PER_CPU_SHARED_ALIGNED(struct kvm_vcpu_time_info, hv_clock);
40#define get_clock(cpu, field) per_cpu(hv_clock, cpu).field
41
42static inline u64 kvm_get_delta(u64 last_tsc)
43{
44 int cpu = smp_processor_id();
45 u64 delta = native_read_tsc() - last_tsc;
46 return (delta * get_clock(cpu, tsc_to_system_mul)) >> KVM_SCALE;
47}
48
49static struct kvm_wall_clock wall_clock;
50static cycle_t kvm_clock_read(void);
51/*
52 * The wallclock is the time of day when we booted. Since then, some time may
53 * have elapsed since the hypervisor wrote the data. So we try to account for
54 * that with system time
55 */
2ddfd20e 56static unsigned long kvm_get_wallclock(void)
790c73f6
GOC
57{
58 u32 wc_sec, wc_nsec;
59 u64 delta;
60 struct timespec ts;
61 int version, nsec;
62 int low, high;
63
64 low = (int)__pa(&wall_clock);
65 high = ((u64)__pa(&wall_clock) >> 32);
66
67 delta = kvm_clock_read();
68
69 native_write_msr(MSR_KVM_WALL_CLOCK, low, high);
70 do {
71 version = wall_clock.wc_version;
72 rmb();
73 wc_sec = wall_clock.wc_sec;
74 wc_nsec = wall_clock.wc_nsec;
75 rmb();
76 } while ((wall_clock.wc_version != version) || (version & 1));
77
78 delta = kvm_clock_read() - delta;
79 delta += wc_nsec;
80 nsec = do_div(delta, NSEC_PER_SEC);
81 set_normalized_timespec(&ts, wc_sec + delta, nsec);
82 /*
83 * Of all mechanisms of time adjustment I've tested, this one
84 * was the champion!
85 */
86 return ts.tv_sec + 1;
87}
88
2ddfd20e 89static int kvm_set_wallclock(unsigned long now)
790c73f6
GOC
90{
91 return 0;
92}
93
94/*
95 * This is our read_clock function. The host puts an tsc timestamp each time
96 * it updates a new time. Without the tsc adjustment, we can have a situation
97 * in which a vcpu starts to run earlier (smaller system_time), but probes
98 * time later (compared to another vcpu), leading to backwards time
99 */
100static cycle_t kvm_clock_read(void)
101{
102 u64 last_tsc, now;
103 int cpu;
104
105 preempt_disable();
106 cpu = smp_processor_id();
107
108 last_tsc = get_clock(cpu, tsc_timestamp);
109 now = get_clock(cpu, system_time);
110
111 now += kvm_get_delta(last_tsc);
112 preempt_enable();
113
114 return now;
115}
116static struct clocksource kvm_clock = {
117 .name = "kvm-clock",
118 .read = kvm_clock_read,
119 .rating = 400,
120 .mask = CLOCKSOURCE_MASK(64),
121 .mult = 1 << KVM_SCALE,
122 .shift = KVM_SCALE,
123 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
124};
125
126static int kvm_register_clock(void)
127{
128 int cpu = smp_processor_id();
129 int low, high;
130 low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1;
131 high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
132
133 return native_write_msr_safe(MSR_KVM_SYSTEM_TIME, low, high);
134}
135
b8ba5f10 136#ifdef CONFIG_X86_LOCAL_APIC
790c73f6
GOC
137static void kvm_setup_secondary_clock(void)
138{
139 /*
140 * Now that the first cpu already had this clocksource initialized,
141 * we shouldn't fail.
142 */
143 WARN_ON(kvm_register_clock());
144 /* ok, done with our trickery, call native */
145 setup_secondary_APIC_clock();
146}
b8ba5f10 147#endif
790c73f6 148
1e977aa1
GC
149/*
150 * After the clock is registered, the host will keep writing to the
151 * registered memory location. If the guest happens to shutdown, this memory
152 * won't be valid. In cases like kexec, in which you install a new kernel, this
153 * means a random memory location will be kept being written. So before any
154 * kind of shutdown from our side, we unregister the clock by writting anything
155 * that does not have the 'enable' bit set in the msr
156 */
157#ifdef CONFIG_KEXEC
158static void kvm_crash_shutdown(struct pt_regs *regs)
159{
160 native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
161 native_machine_crash_shutdown(regs);
162}
163#endif
164
165static void kvm_shutdown(void)
166{
167 native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
168 native_machine_shutdown();
169}
170
790c73f6
GOC
171void __init kvmclock_init(void)
172{
173 if (!kvm_para_available())
174 return;
175
176 if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
177 if (kvm_register_clock())
178 return;
179 pv_time_ops.get_wallclock = kvm_get_wallclock;
180 pv_time_ops.set_wallclock = kvm_set_wallclock;
181 pv_time_ops.sched_clock = kvm_clock_read;
b8ba5f10 182#ifdef CONFIG_X86_LOCAL_APIC
790c73f6 183 pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock;
b8ba5f10 184#endif
1e977aa1
GC
185 machine_ops.shutdown = kvm_shutdown;
186#ifdef CONFIG_KEXEC
187 machine_ops.crash_shutdown = kvm_crash_shutdown;
188#endif
790c73f6
GOC
189 clocksource_register(&kvm_clock);
190 }
191}
This page took 0.056951 seconds and 5 git commands to generate.