1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
3 * wrapper/trace-clock.h
5 * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
6 * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
8 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 #ifndef _LTTNG_TRACE_CLOCK_H
12 #define _LTTNG_TRACE_CLOCK_H
14 #ifdef CONFIG_HAVE_TRACE_CLOCK
15 #include <linux/trace-clock.h>
16 #else /* CONFIG_HAVE_TRACE_CLOCK */
18 #include <linux/hardirq.h>
19 #include <linux/ktime.h>
20 #include <linux/time.h>
21 #include <linux/hrtimer.h>
22 #include <linux/percpu.h>
23 #include <linux/version.h>
24 #include <asm/local.h>
25 #include <lttng-kernel-version.h>
26 #include <lttng-clock.h>
27 #include <wrapper/compiler.h>
28 #include <wrapper/percpu-defs.h>
29 #include <wrapper/random.h>
30 #include <blacklist/timekeeping.h>
32 extern struct lttng_trace_clock
*lttng_trace_clock
;
35 * Upstream Linux commit 27727df240c7 ("Avoid taking lock in NMI path with
36 * CONFIG_DEBUG_TIMEKEEPING") introduces a buggy ktime_get_mono_fast_ns().
37 * This is fixed by patch "timekeeping: Fix __ktime_get_fast_ns() regression".
39 #if (LTTNG_KERNEL_RANGE(4,8,0, 4,8,2) \
40 || LTTNG_KERNEL_RANGE(4,7,4, 4,7,8) \
41 || LTTNG_KERNEL_RANGE(4,4,20, 4,4,25) \
42 || LTTNG_KERNEL_RANGE(4,1,32, 4,1,35))
43 #define LTTNG_CLOCK_NMI_SAFE_BROKEN
47 * We need clock values to be monotonically increasing per-cpu, which is
48 * not strictly guaranteed by ktime_get_mono_fast_ns(). It is
49 * straightforward to do on architectures with a 64-bit cmpxchg(), but
50 * not so on architectures without 64-bit cmpxchg. For now, only enable
51 * this feature on 64-bit architectures.
54 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) \
55 && BITS_PER_LONG == 64 \
56 && !defined(LTTNG_CLOCK_NMI_SAFE_BROKEN))
57 #define LTTNG_USE_NMI_SAFE_CLOCK
60 #ifdef LTTNG_USE_NMI_SAFE_CLOCK
62 DECLARE_PER_CPU(u64
, lttng_last_tsc
);
65 * Sometimes called with preemption enabled. Can be interrupted.
67 static inline u64
trace_clock_monotonic_wrapper(void)
69 u64 now
, last
, result
;
72 /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
74 last_tsc_ptr
= lttng_this_cpu_ptr(<tng_last_tsc
);
77 * Read "last" before "now". It is not strictly required, but it ensures
78 * that an interrupt coming in won't artificially trigger a case where
79 * "now" < "last". This kind of situation should only happen if the
80 * mono_fast time source goes slightly backwards.
83 now
= ktime_get_mono_fast_ns();
84 if (U64_MAX
/ 2 < now
- last
)
86 result
= cmpxchg64_local(last_tsc_ptr
, last
, now
);
93 * Update not done, due to concurrent update. We can use
94 * "result", since it has been sampled concurrently with our
95 * time read, so it should not be far from "now".
101 #else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
102 static inline u64
trace_clock_monotonic_wrapper(void)
107 * Refuse to trace from NMIs with this wrapper, because an NMI could
108 * nest over the xtime write seqlock and deadlock.
114 return ktime_to_ns(ktime
);
116 #endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
118 static inline u64
trace_clock_read64_monotonic(void)
120 return (u64
) trace_clock_monotonic_wrapper();
123 static inline u64
trace_clock_freq_monotonic(void)
125 return (u64
) NSEC_PER_SEC
;
128 static inline int trace_clock_uuid_monotonic(char *uuid
)
130 return wrapper_get_bootid(uuid
);
133 static inline const char *trace_clock_name_monotonic(void)
138 static inline const char *trace_clock_description_monotonic(void)
140 return "Monotonic Clock";
143 #ifdef LTTNG_USE_NMI_SAFE_CLOCK
144 static inline int get_trace_clock(void)
146 printk_once(KERN_WARNING
"LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n");
149 #else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
150 static inline int get_trace_clock(void)
152 printk_once(KERN_WARNING
"LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n");
155 #endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
157 static inline void put_trace_clock(void)
161 static inline u64
trace_clock_read64(void)
163 struct lttng_trace_clock
*ltc
= READ_ONCE(lttng_trace_clock
);
166 return trace_clock_read64_monotonic();
168 read_barrier_depends(); /* load ltc before content */
169 return ltc
->read64();
173 static inline u64
trace_clock_freq(void)
175 struct lttng_trace_clock
*ltc
= READ_ONCE(lttng_trace_clock
);
178 return trace_clock_freq_monotonic();
180 read_barrier_depends(); /* load ltc before content */
185 static inline int trace_clock_uuid(char *uuid
)
187 struct lttng_trace_clock
*ltc
= READ_ONCE(lttng_trace_clock
);
189 read_barrier_depends(); /* load ltc before content */
190 /* Use default UUID cb when NULL */
191 if (!ltc
|| !ltc
->uuid
) {
192 return trace_clock_uuid_monotonic(uuid
);
194 return ltc
->uuid(uuid
);
198 static inline const char *trace_clock_name(void)
200 struct lttng_trace_clock
*ltc
= READ_ONCE(lttng_trace_clock
);
203 return trace_clock_name_monotonic();
205 read_barrier_depends(); /* load ltc before content */
210 static inline const char *trace_clock_description(void)
212 struct lttng_trace_clock
*ltc
= READ_ONCE(lttng_trace_clock
);
215 return trace_clock_description_monotonic();
217 read_barrier_depends(); /* load ltc before content */
218 return ltc
->description();
222 #endif /* CONFIG_HAVE_TRACE_CLOCK */
224 #endif /* _LTTNG_TRACE_CLOCK_H */
This page took 0.049111 seconds and 5 git commands to generate.