Commit | Line | Data |
---|---|---|
2aae950b AK |
1 | /* |
2 | * Copyright 2006 Andi Kleen, SUSE Labs. | |
3 | * Subject to the GNU Public License, v.2 | |
4 | * | |
f144a6b4 | 5 | * Fast user context implementation of clock_gettime, gettimeofday, and time. |
2aae950b | 6 | * |
7a59ed41 SS |
7 | * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net> |
8 | * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany | |
9 | * | |
2aae950b AK |
10 | * The code should have no internal unresolved relocations. |
11 | * Check with readelf after changing. | |
2aae950b AK |
12 | */ |
13 | ||
2b7d0390 | 14 | /* Disable profiling for userspace code: */ |
2ed84eeb | 15 | #define DISABLE_BRANCH_PROFILING |
2b7d0390 | 16 | |
7a59ed41 | 17 | #include <uapi/linux/time.h> |
2aae950b | 18 | #include <asm/vgtod.h> |
2aae950b | 19 | #include <asm/hpet.h> |
7c03156f | 20 | #include <asm/vvar.h> |
2aae950b | 21 | #include <asm/unistd.h> |
7c03156f SS |
22 | #include <asm/msr.h> |
23 | #include <linux/math64.h> | |
24 | #include <linux/time.h> | |
2aae950b | 25 | |
8c49d9a7 | 26 | #define gtod (&VVAR(vsyscall_gtod_data)) |
2aae950b | 27 | |
7a59ed41 SS |
28 | extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts); |
29 | extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); | |
30 | extern time_t __vdso_time(time_t *t); | |
31 | ||
7c03156f SS |
32 | #ifdef CONFIG_HPET_TIMER |
33 | static inline u32 read_hpet_counter(const volatile void *addr) | |
34 | { | |
35 | return *(const volatile u32 *) (addr + HPET_COUNTER); | |
36 | } | |
37 | #endif | |
38 | ||
7a59ed41 SS |
39 | #ifndef BUILD_VDSO32 |
40 | ||
7c03156f SS |
41 | #include <linux/kernel.h> |
42 | #include <asm/vsyscall.h> | |
43 | #include <asm/fixmap.h> | |
44 | #include <asm/pvclock.h> | |
45 | ||
411f790c | 46 | static notrace cycle_t vread_hpet(void) |
98d0ac38 | 47 | { |
7c03156f | 48 | return read_hpet_counter((const void *)fix_to_virt(VSYSCALL_HPET)); |
411f790c | 49 | } |
98d0ac38 | 50 | |
411f790c SS |
51 | notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) |
52 | { | |
53 | long ret; | |
54 | asm("syscall" : "=a" (ret) : | |
55 | "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory"); | |
56 | return ret; | |
98d0ac38 AL |
57 | } |
58 | ||
411f790c | 59 | notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) |
98d0ac38 | 60 | { |
411f790c SS |
61 | long ret; |
62 | ||
63 | asm("syscall" : "=a" (ret) : | |
64 | "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); | |
65 | return ret; | |
98d0ac38 AL |
66 | } |
67 | ||
51c19b4f MT |
68 | #ifdef CONFIG_PARAVIRT_CLOCK |
69 | ||
70 | static notrace const struct pvclock_vsyscall_time_info *get_pvti(int cpu) | |
71 | { | |
72 | const struct pvclock_vsyscall_time_info *pvti_base; | |
73 | int idx = cpu / (PAGE_SIZE/PVTI_SIZE); | |
74 | int offset = cpu % (PAGE_SIZE/PVTI_SIZE); | |
75 | ||
76 | BUG_ON(PVCLOCK_FIXMAP_BEGIN + idx > PVCLOCK_FIXMAP_END); | |
77 | ||
78 | pvti_base = (struct pvclock_vsyscall_time_info *) | |
79 | __fix_to_virt(PVCLOCK_FIXMAP_BEGIN+idx); | |
80 | ||
81 | return &pvti_base[offset]; | |
82 | } | |
83 | ||
84 | static notrace cycle_t vread_pvclock(int *mode) | |
85 | { | |
86 | const struct pvclock_vsyscall_time_info *pvti; | |
87 | cycle_t ret; | |
88 | u64 last; | |
89 | u32 version; | |
51c19b4f MT |
90 | u8 flags; |
91 | unsigned cpu, cpu1; | |
92 | ||
93 | ||
94 | /* | |
e04c5d76 MT |
95 | * Note: hypervisor must guarantee that: |
96 | * 1. cpu ID number maps 1:1 to per-CPU pvclock time info. | |
97 | * 2. that per-CPU pvclock time info is updated if the | |
98 | * underlying CPU changes. | |
99 | * 3. that version is increased whenever underlying CPU | |
100 | * changes. | |
101 | * | |
51c19b4f MT |
102 | */ |
103 | do { | |
104 | cpu = __getcpu() & VGETCPU_CPU_MASK; | |
105 | /* TODO: We can put vcpu id into higher bits of pvti.version. | |
106 | * This will save a couple of cycles by getting rid of | |
107 | * __getcpu() calls (Gleb). | |
108 | */ | |
109 | ||
110 | pvti = get_pvti(cpu); | |
111 | ||
51c19b4f MT |
112 | version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); |
113 | ||
114 | /* | |
115 | * Test we're still on the cpu as well as the version. | |
116 | * We could have been migrated just after the first | |
117 | * vgetcpu but before fetching the version, so we | |
118 | * wouldn't notice a version change. | |
119 | */ | |
120 | cpu1 = __getcpu() & VGETCPU_CPU_MASK; | |
121 | } while (unlikely(cpu != cpu1 || | |
122 | (pvti->pvti.version & 1) || | |
e04c5d76 | 123 | pvti->pvti.version != version)); |
51c19b4f MT |
124 | |
125 | if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) | |
126 | *mode = VCLOCK_NONE; | |
127 | ||
128 | /* refer to tsc.c read_tsc() comment for rationale */ | |
7c03156f | 129 | last = gtod->cycle_last; |
51c19b4f MT |
130 | |
131 | if (likely(ret >= last)) | |
132 | return ret; | |
133 | ||
134 | return last; | |
135 | } | |
136 | #endif | |
137 | ||
7a59ed41 SS |
138 | #else |
139 | ||
140 | extern u8 hpet_page | |
141 | __attribute__((visibility("hidden"))); | |
142 | ||
143 | #ifdef CONFIG_HPET_TIMER | |
144 | static notrace cycle_t vread_hpet(void) | |
145 | { | |
7c03156f | 146 | return read_hpet_counter((const void *)(&hpet_page)); |
7a59ed41 SS |
147 | } |
148 | #endif | |
149 | ||
150 | notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) | |
151 | { | |
152 | long ret; | |
153 | ||
154 | asm( | |
155 | "mov %%ebx, %%edx \n" | |
156 | "mov %2, %%ebx \n" | |
6f121e54 | 157 | "call __kernel_vsyscall \n" |
7a59ed41 SS |
158 | "mov %%edx, %%ebx \n" |
159 | : "=a" (ret) | |
160 | : "0" (__NR_clock_gettime), "g" (clock), "c" (ts) | |
161 | : "memory", "edx"); | |
162 | return ret; | |
163 | } | |
164 | ||
165 | notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) | |
166 | { | |
167 | long ret; | |
168 | ||
169 | asm( | |
170 | "mov %%ebx, %%edx \n" | |
171 | "mov %2, %%ebx \n" | |
6f121e54 | 172 | "call __kernel_vsyscall \n" |
7a59ed41 SS |
173 | "mov %%edx, %%ebx \n" |
174 | : "=a" (ret) | |
175 | : "0" (__NR_gettimeofday), "g" (tv), "c" (tz) | |
176 | : "memory", "edx"); | |
177 | return ret; | |
178 | } | |
179 | ||
180 | #ifdef CONFIG_PARAVIRT_CLOCK | |
181 | ||
182 | static notrace cycle_t vread_pvclock(int *mode) | |
183 | { | |
184 | *mode = VCLOCK_NONE; | |
185 | return 0; | |
186 | } | |
187 | #endif | |
188 | ||
189 | #endif | |
190 | ||
411f790c | 191 | notrace static cycle_t vread_tsc(void) |
2aae950b | 192 | { |
411f790c SS |
193 | cycle_t ret; |
194 | u64 last; | |
2aae950b | 195 | |
411f790c SS |
196 | /* |
197 | * Empirically, a fence (of type that depends on the CPU) | |
198 | * before rdtsc is enough to ensure that rdtsc is ordered | |
199 | * with respect to loads. The various CPU manuals are unclear | |
200 | * as to whether rdtsc can be reordered with later loads, | |
201 | * but no one has ever seen it happen. | |
202 | */ | |
203 | rdtsc_barrier(); | |
7a59ed41 | 204 | ret = (cycle_t)__native_read_tsc(); |
a939e817 | 205 | |
7c03156f | 206 | last = gtod->cycle_last; |
a939e817 | 207 | |
411f790c SS |
208 | if (likely(ret >= last)) |
209 | return ret; | |
210 | ||
211 | /* | |
212 | * GCC likes to generate cmov here, but this branch is extremely | |
213 | * predictable (it's just a funciton of time and the likely is | |
214 | * very likely) and there's a data dependence, so force GCC | |
215 | * to generate a branch instead. I don't barrier() because | |
216 | * we don't actually need a barrier, and if this function | |
217 | * ever gets inlined it will generate worse code. | |
218 | */ | |
219 | asm volatile (""); | |
220 | return last; | |
221 | } | |
a939e817 | 222 | |
51c19b4f | 223 | notrace static inline u64 vgetsns(int *mode) |
2aae950b | 224 | { |
7a59ed41 | 225 | u64 v; |
98d0ac38 | 226 | cycles_t cycles; |
7c03156f SS |
227 | |
228 | if (gtod->vclock_mode == VCLOCK_TSC) | |
98d0ac38 | 229 | cycles = vread_tsc(); |
7a59ed41 | 230 | #ifdef CONFIG_HPET_TIMER |
7c03156f | 231 | else if (gtod->vclock_mode == VCLOCK_HPET) |
98d0ac38 | 232 | cycles = vread_hpet(); |
7a59ed41 | 233 | #endif |
51c19b4f | 234 | #ifdef CONFIG_PARAVIRT_CLOCK |
7c03156f | 235 | else if (gtod->vclock_mode == VCLOCK_PVCLOCK) |
51c19b4f MT |
236 | cycles = vread_pvclock(mode); |
237 | #endif | |
a939e817 JS |
238 | else |
239 | return 0; | |
7c03156f SS |
240 | v = (cycles - gtod->cycle_last) & gtod->mask; |
241 | return v * gtod->mult; | |
2aae950b AK |
242 | } |
243 | ||
5f293474 AL |
244 | /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */ |
245 | notrace static int __always_inline do_realtime(struct timespec *ts) | |
2aae950b | 246 | { |
650ea024 JS |
247 | unsigned long seq; |
248 | u64 ns; | |
a939e817 JS |
249 | int mode; |
250 | ||
2aae950b | 251 | do { |
7c03156f SS |
252 | seq = gtod_read_begin(gtod); |
253 | mode = gtod->vclock_mode; | |
2aae950b | 254 | ts->tv_sec = gtod->wall_time_sec; |
650ea024 | 255 | ns = gtod->wall_time_snsec; |
51c19b4f | 256 | ns += vgetsns(&mode); |
7c03156f SS |
257 | ns >>= gtod->shift; |
258 | } while (unlikely(gtod_read_retry(gtod, seq))); | |
259 | ||
260 | ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); | |
261 | ts->tv_nsec = ns; | |
a939e817 | 262 | |
a939e817 | 263 | return mode; |
2aae950b AK |
264 | } |
265 | ||
7a59ed41 | 266 | notrace static int __always_inline do_monotonic(struct timespec *ts) |
2aae950b | 267 | { |
650ea024 JS |
268 | unsigned long seq; |
269 | u64 ns; | |
a939e817 JS |
270 | int mode; |
271 | ||
2aae950b | 272 | do { |
7c03156f SS |
273 | seq = gtod_read_begin(gtod); |
274 | mode = gtod->vclock_mode; | |
91ec87d5 | 275 | ts->tv_sec = gtod->monotonic_time_sec; |
650ea024 | 276 | ns = gtod->monotonic_time_snsec; |
51c19b4f | 277 | ns += vgetsns(&mode); |
7c03156f SS |
278 | ns >>= gtod->shift; |
279 | } while (unlikely(gtod_read_retry(gtod, seq))); | |
280 | ||
281 | ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); | |
282 | ts->tv_nsec = ns; | |
0f51f285 | 283 | |
a939e817 | 284 | return mode; |
2aae950b AK |
285 | } |
286 | ||
ce39c640 | 287 | notrace static void do_realtime_coarse(struct timespec *ts) |
da15cfda | 288 | { |
289 | unsigned long seq; | |
290 | do { | |
7c03156f SS |
291 | seq = gtod_read_begin(gtod); |
292 | ts->tv_sec = gtod->wall_time_coarse_sec; | |
293 | ts->tv_nsec = gtod->wall_time_coarse_nsec; | |
294 | } while (unlikely(gtod_read_retry(gtod, seq))); | |
da15cfda | 295 | } |
296 | ||
ce39c640 | 297 | notrace static void do_monotonic_coarse(struct timespec *ts) |
da15cfda | 298 | { |
91ec87d5 | 299 | unsigned long seq; |
da15cfda | 300 | do { |
7c03156f SS |
301 | seq = gtod_read_begin(gtod); |
302 | ts->tv_sec = gtod->monotonic_time_coarse_sec; | |
303 | ts->tv_nsec = gtod->monotonic_time_coarse_nsec; | |
304 | } while (unlikely(gtod_read_retry(gtod, seq))); | |
da15cfda | 305 | } |
306 | ||
23adec55 | 307 | notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) |
2aae950b | 308 | { |
0d7b8547 AL |
309 | switch (clock) { |
310 | case CLOCK_REALTIME: | |
ce39c640 SS |
311 | if (do_realtime(ts) == VCLOCK_NONE) |
312 | goto fallback; | |
0d7b8547 AL |
313 | break; |
314 | case CLOCK_MONOTONIC: | |
ce39c640 SS |
315 | if (do_monotonic(ts) == VCLOCK_NONE) |
316 | goto fallback; | |
0d7b8547 AL |
317 | break; |
318 | case CLOCK_REALTIME_COARSE: | |
ce39c640 SS |
319 | do_realtime_coarse(ts); |
320 | break; | |
0d7b8547 | 321 | case CLOCK_MONOTONIC_COARSE: |
ce39c640 SS |
322 | do_monotonic_coarse(ts); |
323 | break; | |
324 | default: | |
325 | goto fallback; | |
0d7b8547 AL |
326 | } |
327 | ||
a939e817 | 328 | return 0; |
ce39c640 SS |
329 | fallback: |
330 | return vdso_fallback_gettime(clock, ts); | |
2aae950b AK |
331 | } |
332 | int clock_gettime(clockid_t, struct timespec *) | |
333 | __attribute__((weak, alias("__vdso_clock_gettime"))); | |
334 | ||
23adec55 | 335 | notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) |
2aae950b | 336 | { |
a939e817 | 337 | if (likely(tv != NULL)) { |
0df1ea2b SS |
338 | if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE)) |
339 | return vdso_fallback_gtod(tv, tz); | |
a939e817 | 340 | tv->tv_usec /= 1000; |
2aae950b | 341 | } |
a939e817 | 342 | if (unlikely(tz != NULL)) { |
7c03156f SS |
343 | tz->tz_minuteswest = gtod->tz_minuteswest; |
344 | tz->tz_dsttime = gtod->tz_dsttime; | |
a939e817 JS |
345 | } |
346 | ||
a939e817 | 347 | return 0; |
2aae950b AK |
348 | } |
349 | int gettimeofday(struct timeval *, struct timezone *) | |
350 | __attribute__((weak, alias("__vdso_gettimeofday"))); | |
f144a6b4 | 351 | |
0d7b8547 AL |
352 | /* |
353 | * This will break when the xtime seconds get inaccurate, but that is | |
354 | * unlikely | |
355 | */ | |
f144a6b4 AL |
356 | notrace time_t __vdso_time(time_t *t) |
357 | { | |
7a59ed41 | 358 | /* This is atomic on x86 so we don't need any locks. */ |
af8c93d8 | 359 | time_t result = ACCESS_ONCE(gtod->wall_time_sec); |
f144a6b4 AL |
360 | |
361 | if (t) | |
362 | *t = result; | |
363 | return result; | |
364 | } | |
365 | int time(time_t *t) | |
366 | __attribute__((weak, alias("__vdso_time"))); |