Commit | Line | Data |
---|---|---|
886d51a3 MD |
1 | #ifndef _LTTNG_TRACE_CLOCK_H |
2 | #define _LTTNG_TRACE_CLOCK_H | |
3 | ||
f6c19f6e | 4 | /* |
886d51a3 | 5 | * wrapper/trace-clock.h |
f6c19f6e MD |
6 | * |
7 | * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic | |
8 | * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y. | |
9 | * | |
886d51a3 MD |
10 | * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
11 | * | |
12 | * This library is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU Lesser General Public | |
14 | * License as published by the Free Software Foundation; only | |
15 | * version 2.1 of the License. | |
16 | * | |
17 | * This library is distributed in the hope that it will be useful, | |
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
20 | * Lesser General Public License for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU Lesser General Public | |
23 | * License along with this library; if not, write to the Free Software | |
24 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
f6c19f6e MD |
25 | */ |
26 | ||
f6c19f6e MD |
27 | #ifdef CONFIG_HAVE_TRACE_CLOCK |
28 | #include <linux/trace-clock.h> | |
29 | #else /* CONFIG_HAVE_TRACE_CLOCK */ | |
30 | ||
31 | #include <linux/hardirq.h> | |
32 | #include <linux/ktime.h> | |
33 | #include <linux/time.h> | |
34 | #include <linux/hrtimer.h> | |
b0725207 | 35 | #include <linux/percpu.h> |
fc8216ae | 36 | #include <linux/version.h> |
b0725207 | 37 | #include <asm/local.h> |
9998f521 | 38 | #include "../lttng-kernel-version.h" |
2754583e | 39 | #include "../lttng-clock.h" |
e6b06d7d | 40 | #include "percpu-defs.h" |
a82c63f1 | 41 | #include "random.h" |
f6c19f6e | 42 | |
f30ae671 MD |
43 | #if ((LTTNG_KERNEL_RANGE(3,10,0, 3,10,14) && !LTTNG_RHEL_KERNEL_RANGE(3,10,0,7,0, 3,10,14,0,0)) \ |
44 | || LTTNG_KERNEL_RANGE(3,11,0, 3,11,3)) | |
9998f521 | 45 | #error "Linux kernels 3.10 and 3.11 introduce a deadlock in the timekeeping subsystem. Fixed by commit 7bd36014460f793c19e7d6c94dab67b0afcfcb7f \"timekeeping: Fix HRTICK related deadlock from ntp lock changes\" in Linux." |
fc8216ae MD |
46 | #endif |
47 | ||
2754583e MD |
48 | extern struct lttng_trace_clock *lttng_trace_clock; |
49 | ||
b0725207 MD |
50 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) |
51 | ||
52 | DECLARE_PER_CPU(local_t, lttng_last_tsc); | |
53 | ||
54 | #if (BITS_PER_LONG == 32) | |
55 | /* | |
56 | * Fixup "src_now" using the 32 LSB from "last". We need to handle overflow and | |
57 | * underflow of the 32nd bit. "last" can be above, below or equal to the 32 LSB | |
58 | * of "src_now". | |
59 | */ | |
60 | static inline u64 trace_clock_fixup(u64 src_now, u32 last) | |
61 | { | |
62 | u64 now; | |
63 | ||
64 | now = src_now & 0xFFFFFFFF00000000ULL; | |
65 | now |= (u64) last; | |
66 | /* Detect overflow or underflow between now and last. */ | |
67 | if ((src_now & 0x80000000U) && !(last & 0x80000000U)) { | |
68 | /* | |
69 | * If 32nd bit transitions from 1 to 0, and we move forward in | |
70 | * time from "now" to "last", then we have an overflow. | |
71 | */ | |
72 | if (((s32) now - (s32) last) < 0) | |
73 | now += 0x0000000100000000ULL; | |
74 | } else if (!(src_now & 0x80000000U) && (last & 0x80000000U)) { | |
75 | /* | |
76 | * If 32nd bit transitions from 0 to 1, and we move backward in | |
77 | * time from "now" to "last", then we have an underflow. | |
78 | */ | |
79 | if (((s32) now - (s32) last) > 0) | |
80 | now -= 0x0000000100000000ULL; | |
81 | } | |
82 | return now; | |
83 | } | |
84 | #else /* #if (BITS_PER_LONG == 32) */ | |
85 | /* | |
86 | * The fixup is pretty easy on 64-bit architectures: "last" is a 64-bit | |
87 | * value, so we can use last directly as current time. | |
88 | */ | |
89 | static inline u64 trace_clock_fixup(u64 src_now, u64 last) | |
90 | { | |
91 | return last; | |
92 | } | |
93 | #endif /* #else #if (BITS_PER_LONG == 32) */ | |
94 | ||
95 | /* | |
96 | * Always called with preemption disabled. Can be interrupted. | |
97 | */ | |
98 | static inline u64 trace_clock_monotonic_wrapper(void) | |
99 | { | |
100 | u64 now; | |
101 | unsigned long last, result; | |
102 | local_t *last_tsc; | |
103 | ||
104 | /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */ | |
e6b06d7d | 105 | last_tsc = lttng_this_cpu_ptr(<tng_last_tsc); |
b0725207 MD |
106 | last = local_read(last_tsc); |
107 | /* | |
108 | * Read "last" before "now". It is not strictly required, but it ensures | |
109 | * that an interrupt coming in won't artificially trigger a case where | |
110 | * "now" < "last". This kind of situation should only happen if the | |
111 | * mono_fast time source goes slightly backwards. | |
112 | */ | |
113 | barrier(); | |
114 | now = ktime_get_mono_fast_ns(); | |
115 | if (((long) now - (long) last) < 0) | |
116 | now = trace_clock_fixup(now, last); | |
117 | result = local_cmpxchg(last_tsc, last, (unsigned long) now); | |
118 | if (result == last) { | |
119 | /* Update done. */ | |
120 | return now; | |
121 | } else { | |
122 | /* | |
123 | * Update not done, due to concurrent update. We can use | |
124 | * "result", since it has been sampled concurrently with our | |
125 | * time read, so it should not be far from "now". | |
126 | */ | |
127 | return trace_clock_fixup(now, result); | |
128 | } | |
129 | } | |
130 | ||
131 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */ | |
f6c19f6e MD |
132 | static inline u64 trace_clock_monotonic_wrapper(void) |
133 | { | |
134 | ktime_t ktime; | |
135 | ||
136 | /* | |
137 | * Refuse to trace from NMIs with this wrapper, because an NMI could | |
138 | * nest over the xtime write seqlock and deadlock. | |
139 | */ | |
140 | if (in_nmi()) | |
97ca2c54 | 141 | return (u64) -EIO; |
f6c19f6e MD |
142 | |
143 | ktime = ktime_get(); | |
cfaf9f3d | 144 | return ktime_to_ns(ktime); |
f6c19f6e | 145 | } |
b0725207 | 146 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */ |
f6c19f6e | 147 | |
2754583e | 148 | static inline u64 trace_clock_read64_monotonic(void) |
f6c19f6e MD |
149 | { |
150 | return (u64) trace_clock_monotonic_wrapper(); | |
151 | } | |
152 | ||
2754583e | 153 | static inline u64 trace_clock_freq_monotonic(void) |
f6c19f6e | 154 | { |
a3ccff4f | 155 | return (u64) NSEC_PER_SEC; |
f6c19f6e MD |
156 | } |
157 | ||
2754583e | 158 | static inline int trace_clock_uuid_monotonic(char *uuid) |
f6c19f6e | 159 | { |
a82c63f1 | 160 | return wrapper_get_bootid(uuid); |
f6c19f6e MD |
161 | } |
162 | ||
2754583e MD |
163 | static inline const char *trace_clock_name_monotonic(void) |
164 | { | |
165 | return "monotonic"; | |
166 | } | |
167 | ||
168 | static inline const char *trace_clock_description_monotonic(void) | |
169 | { | |
170 | return "Monotonic Clock"; | |
171 | } | |
172 | ||
b0725207 | 173 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) |
f6c19f6e MD |
174 | static inline int get_trace_clock(void) |
175 | { | |
b0725207 MD |
176 | printk(KERN_WARNING "LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n"); |
177 | return 0; | |
178 | } | |
179 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */ | |
180 | static inline int get_trace_clock(void) | |
181 | { | |
182 | printk(KERN_WARNING "LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n"); | |
f6c19f6e MD |
183 | return 0; |
184 | } | |
b0725207 | 185 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */ |
f6c19f6e MD |
186 | |
187 | static inline void put_trace_clock(void) | |
188 | { | |
189 | } | |
190 | ||
2754583e MD |
191 | static inline u64 trace_clock_read64(void) |
192 | { | |
193 | struct lttng_trace_clock *ltc = ACCESS_ONCE(lttng_trace_clock); | |
194 | ||
195 | if (likely(!ltc)) { | |
196 | return trace_clock_read64_monotonic(); | |
197 | } else { | |
198 | read_barrier_depends(); /* load ltc before content */ | |
199 | return ltc->read64(); | |
200 | } | |
201 | } | |
202 | ||
203 | static inline u64 trace_clock_freq(void) | |
204 | { | |
205 | struct lttng_trace_clock *ltc = ACCESS_ONCE(lttng_trace_clock); | |
206 | ||
207 | if (!ltc) { | |
208 | return trace_clock_freq_monotonic(); | |
209 | } else { | |
210 | read_barrier_depends(); /* load ltc before content */ | |
211 | return ltc->freq(); | |
212 | } | |
213 | } | |
214 | ||
215 | static inline int trace_clock_uuid(char *uuid) | |
216 | { | |
217 | struct lttng_trace_clock *ltc = ACCESS_ONCE(lttng_trace_clock); | |
218 | ||
219 | read_barrier_depends(); /* load ltc before content */ | |
220 | /* Use default UUID cb when NULL */ | |
221 | if (!ltc || !ltc->uuid) { | |
222 | return trace_clock_uuid_monotonic(uuid); | |
223 | } else { | |
224 | return ltc->uuid(uuid); | |
225 | } | |
226 | } | |
227 | ||
228 | static inline const char *trace_clock_name(void) | |
229 | { | |
230 | struct lttng_trace_clock *ltc = ACCESS_ONCE(lttng_trace_clock); | |
231 | ||
232 | if (!ltc) { | |
233 | return trace_clock_name_monotonic(); | |
234 | } else { | |
235 | read_barrier_depends(); /* load ltc before content */ | |
236 | return ltc->name(); | |
237 | } | |
238 | } | |
239 | ||
240 | static inline const char *trace_clock_description(void) | |
241 | { | |
242 | struct lttng_trace_clock *ltc = ACCESS_ONCE(lttng_trace_clock); | |
243 | ||
244 | if (!ltc) { | |
245 | return trace_clock_description_monotonic(); | |
246 | } else { | |
247 | read_barrier_depends(); /* load ltc before content */ | |
248 | return ltc->description(); | |
249 | } | |
250 | } | |
251 | ||
f6c19f6e MD |
252 | #endif /* CONFIG_HAVE_TRACE_CLOCK */ |
253 | ||
a90917c3 | 254 | #endif /* _LTTNG_TRACE_CLOCK_H */ |