Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[deliverable/linux.git] / arch / x86 / vdso / vclock_gettime.c
1 /*
2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
4 *
5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
6 *
7 * The code should have no internal unresolved relocations.
8 * Check with readelf after changing.
9 */
10
11 /* Disable profiling for userspace code: */
12 #define DISABLE_BRANCH_PROFILING
13
14 #include <linux/kernel.h>
15 #include <linux/posix-timers.h>
16 #include <linux/time.h>
17 #include <linux/string.h>
18 #include <asm/vsyscall.h>
19 #include <asm/fixmap.h>
20 #include <asm/vgtod.h>
21 #include <asm/timex.h>
22 #include <asm/hpet.h>
23 #include <asm/unistd.h>
24 #include <asm/io.h>
25
26 #define gtod (&VVAR(vsyscall_gtod_data))
27
28 notrace static cycle_t vread_tsc(void)
29 {
30 cycle_t ret;
31 u64 last;
32
33 /*
34 * Empirically, a fence (of type that depends on the CPU)
35 * before rdtsc is enough to ensure that rdtsc is ordered
36 * with respect to loads. The various CPU manuals are unclear
37 * as to whether rdtsc can be reordered with later loads,
38 * but no one has ever seen it happen.
39 */
40 rdtsc_barrier();
41 ret = (cycle_t)vget_cycles();
42
43 last = VVAR(vsyscall_gtod_data).clock.cycle_last;
44
45 if (likely(ret >= last))
46 return ret;
47
48 /*
49 * GCC likes to generate cmov here, but this branch is extremely
50 * predictable (it's just a funciton of time and the likely is
51 * very likely) and there's a data dependence, so force GCC
52 * to generate a branch instead. I don't barrier() because
53 * we don't actually need a barrier, and if this function
54 * ever gets inlined it will generate worse code.
55 */
56 asm volatile ("");
57 return last;
58 }
59
60 static notrace cycle_t vread_hpet(void)
61 {
62 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
63 }
64
65 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
66 {
67 long ret;
68 asm("syscall" : "=a" (ret) :
69 "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
70 return ret;
71 }
72
73 notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
74 {
75 long ret;
76
77 asm("syscall" : "=a" (ret) :
78 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
79 return ret;
80 }
81
82
83 notrace static inline u64 vgetsns(void)
84 {
85 long v;
86 cycles_t cycles;
87 if (gtod->clock.vclock_mode == VCLOCK_TSC)
88 cycles = vread_tsc();
89 else if (gtod->clock.vclock_mode == VCLOCK_HPET)
90 cycles = vread_hpet();
91 else
92 return 0;
93 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
94 return v * gtod->clock.mult;
95 }
96
97 /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
98 notrace static int __always_inline do_realtime(struct timespec *ts)
99 {
100 unsigned long seq;
101 u64 ns;
102 int mode;
103
104 ts->tv_nsec = 0;
105 do {
106 seq = read_seqcount_begin(&gtod->seq);
107 mode = gtod->clock.vclock_mode;
108 ts->tv_sec = gtod->wall_time_sec;
109 ns = gtod->wall_time_snsec;
110 ns += vgetsns();
111 ns >>= gtod->clock.shift;
112 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
113
114 timespec_add_ns(ts, ns);
115 return mode;
116 }
117
118 notrace static int do_monotonic(struct timespec *ts)
119 {
120 unsigned long seq;
121 u64 ns;
122 int mode;
123
124 ts->tv_nsec = 0;
125 do {
126 seq = read_seqcount_begin(&gtod->seq);
127 mode = gtod->clock.vclock_mode;
128 ts->tv_sec = gtod->monotonic_time_sec;
129 ns = gtod->monotonic_time_snsec;
130 ns += vgetsns();
131 ns >>= gtod->clock.shift;
132 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
133 timespec_add_ns(ts, ns);
134
135 return mode;
136 }
137
138 notrace static int do_realtime_coarse(struct timespec *ts)
139 {
140 unsigned long seq;
141 do {
142 seq = read_seqcount_begin(&gtod->seq);
143 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
144 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
145 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
146 return 0;
147 }
148
149 notrace static int do_monotonic_coarse(struct timespec *ts)
150 {
151 unsigned long seq;
152 do {
153 seq = read_seqcount_begin(&gtod->seq);
154 ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
155 ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
156 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
157
158 return 0;
159 }
160
161 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
162 {
163 int ret = VCLOCK_NONE;
164
165 switch (clock) {
166 case CLOCK_REALTIME:
167 ret = do_realtime(ts);
168 break;
169 case CLOCK_MONOTONIC:
170 ret = do_monotonic(ts);
171 break;
172 case CLOCK_REALTIME_COARSE:
173 return do_realtime_coarse(ts);
174 case CLOCK_MONOTONIC_COARSE:
175 return do_monotonic_coarse(ts);
176 }
177
178 if (ret == VCLOCK_NONE)
179 return vdso_fallback_gettime(clock, ts);
180 return 0;
181 }
182 int clock_gettime(clockid_t, struct timespec *)
183 __attribute__((weak, alias("__vdso_clock_gettime")));
184
185 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
186 {
187 long ret = VCLOCK_NONE;
188
189 if (likely(tv != NULL)) {
190 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
191 offsetof(struct timespec, tv_nsec) ||
192 sizeof(*tv) != sizeof(struct timespec));
193 ret = do_realtime((struct timespec *)tv);
194 tv->tv_usec /= 1000;
195 }
196 if (unlikely(tz != NULL)) {
197 /* Avoid memcpy. Some old compilers fail to inline it */
198 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
199 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
200 }
201
202 if (ret == VCLOCK_NONE)
203 return vdso_fallback_gtod(tv, tz);
204 return 0;
205 }
206 int gettimeofday(struct timeval *, struct timezone *)
207 __attribute__((weak, alias("__vdso_gettimeofday")));
208
209 /*
210 * This will break when the xtime seconds get inaccurate, but that is
211 * unlikely
212 */
213 notrace time_t __vdso_time(time_t *t)
214 {
215 /* This is atomic on x86_64 so we don't need any locks. */
216 time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
217
218 if (t)
219 *t = result;
220 return result;
221 }
222 int time(time_t *t)
223 __attribute__((weak, alias("__vdso_time")));
This page took 0.049108 seconds and 5 git commands to generate.