1 #ifndef _LTTNG_TRACE_CLOCK_H
2 #define _LTTNG_TRACE_CLOCK_H
5 * wrapper/trace-clock.h
7 * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
8 * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
10 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 * This library is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; only
15 * version 2.1 of the License.
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with this library; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 #ifdef CONFIG_HAVE_TRACE_CLOCK
28 #include <linux/trace-clock.h>
29 #else /* CONFIG_HAVE_TRACE_CLOCK */
31 #include <linux/hardirq.h>
32 #include <linux/ktime.h>
33 #include <linux/time.h>
34 #include <linux/hrtimer.h>
35 #include <linux/percpu.h>
36 #include <linux/version.h>
37 #include <asm/local.h>
38 #include <lttng-kernel-version.h>
39 #include <lttng-clock.h>
40 #include <wrapper/percpu-defs.h>
41 #include <wrapper/random.h>
43 #if ((LTTNG_KERNEL_RANGE(3,10,0, 3,10,14) && !LTTNG_RHEL_KERNEL_RANGE(3,10,0,123,0,0, 3,10,14,0,0,0)) \
44 || LTTNG_KERNEL_RANGE(3,11,0, 3,11,3))
45 #error "Linux kernels 3.10 and 3.11 introduce a deadlock in the timekeeping subsystem. Fixed by commit 7bd36014460f793c19e7d6c94dab67b0afcfcb7f \"timekeeping: Fix HRTICK related deadlock from ntp lock changes\" in Linux."
48 extern struct lttng_trace_clock
*lttng_trace_clock
;
51 * Upstream Linux commit 27727df240c7 ("Avoid taking lock in NMI path with
52 * CONFIG_DEBUG_TIMEKEEPING") introduces a buggy ktime_get_mono_fast_ns().
53 * This is fixed by patch "timekeeping: Fix __ktime_get_fast_ns() regression".
55 #if (LTTNG_KERNEL_RANGE(4,8,0, 4,8,2) \
56 || LTTNG_KERNEL_RANGE(4,7,4, 4,7,8) \
57 || LTTNG_KERNEL_RANGE(4,4,20, 4,4,25) \
58 || LTTNG_KERNEL_RANGE(4,1,32, 4,1,35))
59 #define LTTNG_CLOCK_NMI_SAFE_BROKEN
62 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) \
63 && !defined(LTTNG_CLOCK_NMI_SAFE_BROKEN))
65 DECLARE_PER_CPU(local_t
, lttng_last_tsc
);
67 #if (BITS_PER_LONG == 32)
69 * Fixup "src_now" using the 32 LSB from "last". We need to handle overflow and
70 * underflow of the 32nd bit. "last" can be above, below or equal to the 32 LSB
73 static inline u64
trace_clock_fixup(u64 src_now
, u32 last
)
77 now
= src_now
& 0xFFFFFFFF00000000ULL
;
79 /* Detect overflow or underflow between now and last. */
80 if ((src_now
& 0x80000000U
) && !(last
& 0x80000000U
)) {
82 * If 32nd bit transitions from 1 to 0, and we move forward in
83 * time from "now" to "last", then we have an overflow.
85 if (((s32
) now
- (s32
) last
) < 0)
86 now
+= 0x0000000100000000ULL
;
87 } else if (!(src_now
& 0x80000000U
) && (last
& 0x80000000U
)) {
89 * If 32nd bit transitions from 0 to 1, and we move backward in
90 * time from "now" to "last", then we have an underflow.
92 if (((s32
) now
- (s32
) last
) > 0)
93 now
-= 0x0000000100000000ULL
;
97 #else /* #if (BITS_PER_LONG == 32) */
99 * The fixup is pretty easy on 64-bit architectures: "last" is a 64-bit
100 * value, so we can use last directly as current time.
102 static inline u64
trace_clock_fixup(u64 src_now
, u64 last
)
106 #endif /* #else #if (BITS_PER_LONG == 32) */
109 * Sometimes called with preemption enabled. Can be interrupted.
111 static inline u64
trace_clock_monotonic_wrapper(void)
114 unsigned long last
, result
;
117 /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
119 last_tsc
= lttng_this_cpu_ptr(<tng_last_tsc
);
120 last
= local_read(last_tsc
);
122 * Read "last" before "now". It is not strictly required, but it ensures
123 * that an interrupt coming in won't artificially trigger a case where
124 * "now" < "last". This kind of situation should only happen if the
125 * mono_fast time source goes slightly backwards.
128 now
= ktime_get_mono_fast_ns();
129 if (((long) now
- (long) last
) < 0)
130 now
= trace_clock_fixup(now
, last
);
131 result
= local_cmpxchg(last_tsc
, last
, (unsigned long) now
);
133 if (result
== last
) {
138 * Update not done, due to concurrent update. We can use
139 * "result", since it has been sampled concurrently with our
140 * time read, so it should not be far from "now".
142 return trace_clock_fixup(now
, result
);
146 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */
147 static inline u64
trace_clock_monotonic_wrapper(void)
152 * Refuse to trace from NMIs with this wrapper, because an NMI could
153 * nest over the xtime write seqlock and deadlock.
159 return ktime_to_ns(ktime
);
161 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */
163 static inline u64
trace_clock_read64_monotonic(void)
165 return (u64
) trace_clock_monotonic_wrapper();
168 static inline u64
trace_clock_freq_monotonic(void)
170 return (u64
) NSEC_PER_SEC
;
173 static inline int trace_clock_uuid_monotonic(char *uuid
)
175 return wrapper_get_bootid(uuid
);
178 static inline const char *trace_clock_name_monotonic(void)
183 static inline const char *trace_clock_description_monotonic(void)
185 return "Monotonic Clock";
188 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0))
189 static inline int get_trace_clock(void)
191 printk_once(KERN_WARNING
"LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n");
194 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */
195 static inline int get_trace_clock(void)
197 printk_once(KERN_WARNING
"LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n");
200 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) */
202 static inline void put_trace_clock(void)
206 static inline u64
trace_clock_read64(void)
208 struct lttng_trace_clock
*ltc
= ACCESS_ONCE(lttng_trace_clock
);
211 return trace_clock_read64_monotonic();
213 read_barrier_depends(); /* load ltc before content */
214 return ltc
->read64();
218 static inline u64
trace_clock_freq(void)
220 struct lttng_trace_clock
*ltc
= ACCESS_ONCE(lttng_trace_clock
);
223 return trace_clock_freq_monotonic();
225 read_barrier_depends(); /* load ltc before content */
230 static inline int trace_clock_uuid(char *uuid
)
232 struct lttng_trace_clock
*ltc
= ACCESS_ONCE(lttng_trace_clock
);
234 read_barrier_depends(); /* load ltc before content */
235 /* Use default UUID cb when NULL */
236 if (!ltc
|| !ltc
->uuid
) {
237 return trace_clock_uuid_monotonic(uuid
);
239 return ltc
->uuid(uuid
);
243 static inline const char *trace_clock_name(void)
245 struct lttng_trace_clock
*ltc
= ACCESS_ONCE(lttng_trace_clock
);
248 return trace_clock_name_monotonic();
250 read_barrier_depends(); /* load ltc before content */
255 static inline const char *trace_clock_description(void)
257 struct lttng_trace_clock
*ltc
= ACCESS_ONCE(lttng_trace_clock
);
260 return trace_clock_description_monotonic();
262 read_barrier_depends(); /* load ltc before content */
263 return ltc
->description();
267 #endif /* CONFIG_HAVE_TRACE_CLOCK */
269 #endif /* _LTTNG_TRACE_CLOCK_H */
This page took 0.0384640000000001 seconds and 6 git commands to generate.