Commit | Line | Data |
---|---|---|
2aae950b AK |
1 | /* |
2 | * Copyright 2006 Andi Kleen, SUSE Labs. | |
3 | * Subject to the GNU Public License, v.2 | |
4 | * | |
f144a6b4 | 5 | * Fast user context implementation of clock_gettime, gettimeofday, and time. |
2aae950b | 6 | * |
7a59ed41 SS |
7 | * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net> |
8 | * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany | |
9 | * | |
2aae950b AK |
10 | * The code should have no internal unresolved relocations. |
11 | * Check with readelf after changing. | |
2aae950b AK |
12 | */ |
13 | ||
7a59ed41 | 14 | #include <uapi/linux/time.h> |
2aae950b | 15 | #include <asm/vgtod.h> |
7c03156f | 16 | #include <asm/vvar.h> |
2aae950b | 17 | #include <asm/unistd.h> |
7c03156f | 18 | #include <asm/msr.h> |
76480a6a | 19 | #include <asm/pvclock.h> |
7c03156f SS |
20 | #include <linux/math64.h> |
21 | #include <linux/time.h> | |
76480a6a | 22 | #include <linux/kernel.h> |
2aae950b | 23 | |
8c49d9a7 | 24 | #define gtod (&VVAR(vsyscall_gtod_data)) |
2aae950b | 25 | |
7a59ed41 SS |
26 | extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts); |
27 | extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); | |
28 | extern time_t __vdso_time(time_t *t); | |
29 | ||
dac16fba AL |
30 | #ifdef CONFIG_PARAVIRT_CLOCK |
31 | extern u8 pvclock_page | |
32 | __attribute__((visibility("hidden"))); | |
33 | #endif | |
34 | ||
7a59ed41 SS |
35 | #ifndef BUILD_VDSO32 |
36 | ||
411f790c SS |
37 | notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) |
38 | { | |
39 | long ret; | |
40 | asm("syscall" : "=a" (ret) : | |
41 | "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory"); | |
42 | return ret; | |
98d0ac38 AL |
43 | } |
44 | ||
411f790c | 45 | notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) |
98d0ac38 | 46 | { |
411f790c SS |
47 | long ret; |
48 | ||
49 | asm("syscall" : "=a" (ret) : | |
50 | "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); | |
51 | return ret; | |
98d0ac38 AL |
52 | } |
53 | ||
51c19b4f | 54 | |
76480a6a AL |
55 | #else |
56 | ||
57 | notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) | |
58 | { | |
59 | long ret; | |
60 | ||
61 | asm( | |
62 | "mov %%ebx, %%edx \n" | |
63 | "mov %2, %%ebx \n" | |
64 | "call __kernel_vsyscall \n" | |
65 | "mov %%edx, %%ebx \n" | |
66 | : "=a" (ret) | |
67 | : "0" (__NR_clock_gettime), "g" (clock), "c" (ts) | |
68 | : "memory", "edx"); | |
69 | return ret; | |
70 | } | |
71 | ||
72 | notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) | |
73 | { | |
74 | long ret; | |
75 | ||
76 | asm( | |
77 | "mov %%ebx, %%edx \n" | |
78 | "mov %2, %%ebx \n" | |
79 | "call __kernel_vsyscall \n" | |
80 | "mov %%edx, %%ebx \n" | |
81 | : "=a" (ret) | |
82 | : "0" (__NR_gettimeofday), "g" (tv), "c" (tz) | |
83 | : "memory", "edx"); | |
84 | return ret; | |
85 | } | |
86 | ||
87 | #endif | |
88 | ||
89 | #ifdef CONFIG_PARAVIRT_CLOCK | |
dac16fba | 90 | static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void) |
51c19b4f | 91 | { |
dac16fba | 92 | return (const struct pvclock_vsyscall_time_info *)&pvclock_page; |
51c19b4f MT |
93 | } |
94 | ||
95 | static notrace cycle_t vread_pvclock(int *mode) | |
96 | { | |
dac16fba | 97 | const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti; |
51c19b4f | 98 | cycle_t ret; |
6b078f5d AL |
99 | u64 tsc, pvti_tsc; |
100 | u64 last, delta, pvti_system_time; | |
101 | u32 version, pvti_tsc_to_system_mul, pvti_tsc_shift; | |
51c19b4f MT |
102 | |
103 | /* | |
6b078f5d AL |
104 | * Note: The kernel and hypervisor must guarantee that cpu ID |
105 | * number maps 1:1 to per-CPU pvclock time info. | |
106 | * | |
107 | * Because the hypervisor is entirely unaware of guest userspace | |
108 | * preemption, it cannot guarantee that per-CPU pvclock time | |
109 | * info is updated if the underlying CPU changes or that that | |
110 | * version is increased whenever underlying CPU changes. | |
111 | * | |
112 | * On KVM, we are guaranteed that pvti updates for any vCPU are | |
113 | * atomic as seen by *all* vCPUs. This is an even stronger | |
114 | * guarantee than we get with a normal seqlock. | |
73459e2a | 115 | * |
6b078f5d AL |
116 | * On Xen, we don't appear to have that guarantee, but Xen still |
117 | * supplies a valid seqlock using the version field. | |
78fd8c72 | 118 | * |
6b078f5d AL |
119 | * We only do pvclock vdso timing at all if |
120 | * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to | |
121 | * mean that all vCPUs have matching pvti and that the TSC is | |
122 | * synced, so we can just look at vCPU 0's pvti. | |
51c19b4f | 123 | */ |
6b078f5d | 124 | |
6b078f5d AL |
125 | do { |
126 | version = pvti->version; | |
127 | ||
76480a6a | 128 | smp_rmb(); |
6b078f5d | 129 | |
78fd8c72 AL |
130 | if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) { |
131 | *mode = VCLOCK_NONE; | |
132 | return 0; | |
133 | } | |
134 | ||
76480a6a | 135 | tsc = rdtsc_ordered(); |
6b078f5d AL |
136 | pvti_tsc_to_system_mul = pvti->tsc_to_system_mul; |
137 | pvti_tsc_shift = pvti->tsc_shift; | |
138 | pvti_system_time = pvti->system_time; | |
139 | pvti_tsc = pvti->tsc_timestamp; | |
140 | ||
141 | /* Make sure that the version double-check is last. */ | |
142 | smp_rmb(); | |
143 | } while (unlikely((version & 1) || version != pvti->version)); | |
144 | ||
145 | delta = tsc - pvti_tsc; | |
146 | ret = pvti_system_time + | |
147 | pvclock_scale_delta(delta, pvti_tsc_to_system_mul, | |
148 | pvti_tsc_shift); | |
51c19b4f | 149 | |
76480a6a | 150 | /* refer to vread_tsc() comment for rationale */ |
7c03156f | 151 | last = gtod->cycle_last; |
51c19b4f MT |
152 | |
153 | if (likely(ret >= last)) | |
154 | return ret; | |
155 | ||
156 | return last; | |
157 | } | |
158 | #endif | |
159 | ||
411f790c | 160 | notrace static cycle_t vread_tsc(void) |
2aae950b | 161 | { |
03b9730b AL |
162 | cycle_t ret = (cycle_t)rdtsc_ordered(); |
163 | u64 last = gtod->cycle_last; | |
a939e817 | 164 | |
411f790c SS |
165 | if (likely(ret >= last)) |
166 | return ret; | |
167 | ||
168 | /* | |
169 | * GCC likes to generate cmov here, but this branch is extremely | |
6a6256f9 | 170 | * predictable (it's just a function of time and the likely is |
411f790c SS |
171 | * very likely) and there's a data dependence, so force GCC |
172 | * to generate a branch instead. I don't barrier() because | |
173 | * we don't actually need a barrier, and if this function | |
174 | * ever gets inlined it will generate worse code. | |
175 | */ | |
176 | asm volatile (""); | |
177 | return last; | |
178 | } | |
a939e817 | 179 | |
51c19b4f | 180 | notrace static inline u64 vgetsns(int *mode) |
2aae950b | 181 | { |
7a59ed41 | 182 | u64 v; |
98d0ac38 | 183 | cycles_t cycles; |
7c03156f SS |
184 | |
185 | if (gtod->vclock_mode == VCLOCK_TSC) | |
98d0ac38 | 186 | cycles = vread_tsc(); |
51c19b4f | 187 | #ifdef CONFIG_PARAVIRT_CLOCK |
7c03156f | 188 | else if (gtod->vclock_mode == VCLOCK_PVCLOCK) |
51c19b4f MT |
189 | cycles = vread_pvclock(mode); |
190 | #endif | |
a939e817 JS |
191 | else |
192 | return 0; | |
7c03156f SS |
193 | v = (cycles - gtod->cycle_last) & gtod->mask; |
194 | return v * gtod->mult; | |
2aae950b AK |
195 | } |
196 | ||
5f293474 AL |
197 | /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */ |
198 | notrace static int __always_inline do_realtime(struct timespec *ts) | |
2aae950b | 199 | { |
650ea024 JS |
200 | unsigned long seq; |
201 | u64 ns; | |
a939e817 JS |
202 | int mode; |
203 | ||
2aae950b | 204 | do { |
7c03156f SS |
205 | seq = gtod_read_begin(gtod); |
206 | mode = gtod->vclock_mode; | |
2aae950b | 207 | ts->tv_sec = gtod->wall_time_sec; |
650ea024 | 208 | ns = gtod->wall_time_snsec; |
51c19b4f | 209 | ns += vgetsns(&mode); |
7c03156f SS |
210 | ns >>= gtod->shift; |
211 | } while (unlikely(gtod_read_retry(gtod, seq))); | |
212 | ||
213 | ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); | |
214 | ts->tv_nsec = ns; | |
a939e817 | 215 | |
a939e817 | 216 | return mode; |
2aae950b AK |
217 | } |
218 | ||
7a59ed41 | 219 | notrace static int __always_inline do_monotonic(struct timespec *ts) |
2aae950b | 220 | { |
650ea024 JS |
221 | unsigned long seq; |
222 | u64 ns; | |
a939e817 JS |
223 | int mode; |
224 | ||
2aae950b | 225 | do { |
7c03156f SS |
226 | seq = gtod_read_begin(gtod); |
227 | mode = gtod->vclock_mode; | |
91ec87d5 | 228 | ts->tv_sec = gtod->monotonic_time_sec; |
650ea024 | 229 | ns = gtod->monotonic_time_snsec; |
51c19b4f | 230 | ns += vgetsns(&mode); |
7c03156f SS |
231 | ns >>= gtod->shift; |
232 | } while (unlikely(gtod_read_retry(gtod, seq))); | |
233 | ||
234 | ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); | |
235 | ts->tv_nsec = ns; | |
0f51f285 | 236 | |
a939e817 | 237 | return mode; |
2aae950b AK |
238 | } |
239 | ||
ce39c640 | 240 | notrace static void do_realtime_coarse(struct timespec *ts) |
da15cfda | 241 | { |
242 | unsigned long seq; | |
243 | do { | |
7c03156f SS |
244 | seq = gtod_read_begin(gtod); |
245 | ts->tv_sec = gtod->wall_time_coarse_sec; | |
246 | ts->tv_nsec = gtod->wall_time_coarse_nsec; | |
247 | } while (unlikely(gtod_read_retry(gtod, seq))); | |
da15cfda | 248 | } |
249 | ||
ce39c640 | 250 | notrace static void do_monotonic_coarse(struct timespec *ts) |
da15cfda | 251 | { |
91ec87d5 | 252 | unsigned long seq; |
da15cfda | 253 | do { |
7c03156f SS |
254 | seq = gtod_read_begin(gtod); |
255 | ts->tv_sec = gtod->monotonic_time_coarse_sec; | |
256 | ts->tv_nsec = gtod->monotonic_time_coarse_nsec; | |
257 | } while (unlikely(gtod_read_retry(gtod, seq))); | |
da15cfda | 258 | } |
259 | ||
23adec55 | 260 | notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) |
2aae950b | 261 | { |
0d7b8547 AL |
262 | switch (clock) { |
263 | case CLOCK_REALTIME: | |
ce39c640 SS |
264 | if (do_realtime(ts) == VCLOCK_NONE) |
265 | goto fallback; | |
0d7b8547 AL |
266 | break; |
267 | case CLOCK_MONOTONIC: | |
ce39c640 SS |
268 | if (do_monotonic(ts) == VCLOCK_NONE) |
269 | goto fallback; | |
0d7b8547 AL |
270 | break; |
271 | case CLOCK_REALTIME_COARSE: | |
ce39c640 SS |
272 | do_realtime_coarse(ts); |
273 | break; | |
0d7b8547 | 274 | case CLOCK_MONOTONIC_COARSE: |
ce39c640 SS |
275 | do_monotonic_coarse(ts); |
276 | break; | |
277 | default: | |
278 | goto fallback; | |
0d7b8547 AL |
279 | } |
280 | ||
a939e817 | 281 | return 0; |
ce39c640 SS |
282 | fallback: |
283 | return vdso_fallback_gettime(clock, ts); | |
2aae950b AK |
284 | } |
285 | int clock_gettime(clockid_t, struct timespec *) | |
286 | __attribute__((weak, alias("__vdso_clock_gettime"))); | |
287 | ||
23adec55 | 288 | notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) |
2aae950b | 289 | { |
a939e817 | 290 | if (likely(tv != NULL)) { |
0df1ea2b SS |
291 | if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE)) |
292 | return vdso_fallback_gtod(tv, tz); | |
a939e817 | 293 | tv->tv_usec /= 1000; |
2aae950b | 294 | } |
a939e817 | 295 | if (unlikely(tz != NULL)) { |
7c03156f SS |
296 | tz->tz_minuteswest = gtod->tz_minuteswest; |
297 | tz->tz_dsttime = gtod->tz_dsttime; | |
a939e817 JS |
298 | } |
299 | ||
a939e817 | 300 | return 0; |
2aae950b AK |
301 | } |
302 | int gettimeofday(struct timeval *, struct timezone *) | |
303 | __attribute__((weak, alias("__vdso_gettimeofday"))); | |
f144a6b4 | 304 | |
0d7b8547 AL |
305 | /* |
306 | * This will break when the xtime seconds get inaccurate, but that is | |
307 | * unlikely | |
308 | */ | |
f144a6b4 AL |
309 | notrace time_t __vdso_time(time_t *t) |
310 | { | |
7a59ed41 | 311 | /* This is atomic on x86 so we don't need any locks. */ |
af8c93d8 | 312 | time_t result = ACCESS_ONCE(gtod->wall_time_sec); |
f144a6b4 AL |
313 | |
314 | if (t) | |
315 | *t = result; | |
316 | return result; | |
317 | } | |
318 | int time(time_t *t) | |
319 | __attribute__((weak, alias("__vdso_time"))); |