1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
3 * wrapper/trace-clock.h
5 * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
6 * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
8 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 #ifndef _LTTNG_TRACE_CLOCK_H
12 #define _LTTNG_TRACE_CLOCK_H
14 #ifdef CONFIG_HAVE_TRACE_CLOCK
15 #include <linux/trace-clock.h>
16 #else /* CONFIG_HAVE_TRACE_CLOCK */
18 #include <linux/hardirq.h>
19 #include <linux/ktime.h>
20 #include <linux/time.h>
21 #include <linux/hrtimer.h>
22 #include <linux/percpu.h>
23 #include <linux/version.h>
24 #include <linux/percpu-defs.h>
25 #include <asm/local.h>
26 #include <lttng-kernel-version.h>
27 #include <lttng-clock.h>
28 #include <wrapper/compiler.h>
29 #include <wrapper/random.h>
31 extern struct lttng_trace_clock
*lttng_trace_clock
;
34 * We need clock values to be monotonically increasing per-cpu, which is
35 * not strictly guaranteed by ktime_get_mono_fast_ns(). It is
36 * straightforward to do on architectures with a 64-bit cmpxchg(), but
37 * not so on architectures without 64-bit cmpxchg. For now, only enable
38 * this feature on 64-bit architectures.
41 #if BITS_PER_LONG == 64
42 #define LTTNG_USE_NMI_SAFE_CLOCK
45 #ifdef LTTNG_USE_NMI_SAFE_CLOCK
47 DECLARE_PER_CPU(u64
, lttng_last_tsc
);
50 * Sometimes called with preemption enabled. Can be interrupted.
52 static inline u64
trace_clock_monotonic_wrapper(void)
54 u64 now
, last
, result
;
57 /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
59 last_tsc_ptr
= this_cpu_ptr(<tng_last_tsc
);
62 * Read "last" before "now". It is not strictly required, but it ensures
63 * that an interrupt coming in won't artificially trigger a case where
64 * "now" < "last". This kind of situation should only happen if the
65 * mono_fast time source goes slightly backwards.
68 now
= ktime_get_mono_fast_ns();
69 if (U64_MAX
/ 2 < now
- last
)
71 result
= cmpxchg64_local(last_tsc_ptr
, last
, now
);
78 * Update not done, due to concurrent update. We can use
79 * "result", since it has been sampled concurrently with our
80 * time read, so it should not be far from "now".
86 #else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
87 static inline u64
trace_clock_monotonic_wrapper(void)
92 * Refuse to trace from NMIs with this wrapper, because an NMI could
93 * nest over the xtime write seqlock and deadlock.
99 return ktime_to_ns(ktime
);
101 #endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
103 static inline u64
trace_clock_read64_monotonic(void)
105 return (u64
) trace_clock_monotonic_wrapper();
108 static inline u64
trace_clock_freq_monotonic(void)
110 return (u64
) NSEC_PER_SEC
;
113 static inline int trace_clock_uuid_monotonic(char *uuid
)
115 return wrapper_get_bootid(uuid
);
118 static inline const char *trace_clock_name_monotonic(void)
123 static inline const char *trace_clock_description_monotonic(void)
125 return "Monotonic Clock";
128 #ifdef LTTNG_USE_NMI_SAFE_CLOCK
129 static inline int get_trace_clock(void)
131 printk_once(KERN_WARNING
"LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n");
134 #else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
135 static inline int get_trace_clock(void)
137 printk_once(KERN_WARNING
"LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n");
140 #endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
142 static inline void put_trace_clock(void)
146 static inline u64
trace_clock_read64(void)
148 struct lttng_trace_clock
*ltc
= READ_ONCE(lttng_trace_clock
);
151 return trace_clock_read64_monotonic();
153 read_barrier_depends(); /* load ltc before content */
154 return ltc
->read64();
158 static inline u64
trace_clock_freq(void)
160 struct lttng_trace_clock
*ltc
= READ_ONCE(lttng_trace_clock
);
163 return trace_clock_freq_monotonic();
165 read_barrier_depends(); /* load ltc before content */
170 static inline int trace_clock_uuid(char *uuid
)
172 struct lttng_trace_clock
*ltc
= READ_ONCE(lttng_trace_clock
);
174 read_barrier_depends(); /* load ltc before content */
175 /* Use default UUID cb when NULL */
176 if (!ltc
|| !ltc
->uuid
) {
177 return trace_clock_uuid_monotonic(uuid
);
179 return ltc
->uuid(uuid
);
183 static inline const char *trace_clock_name(void)
185 struct lttng_trace_clock
*ltc
= READ_ONCE(lttng_trace_clock
);
188 return trace_clock_name_monotonic();
190 read_barrier_depends(); /* load ltc before content */
195 static inline const char *trace_clock_description(void)
197 struct lttng_trace_clock
*ltc
= READ_ONCE(lttng_trace_clock
);
200 return trace_clock_description_monotonic();
202 read_barrier_depends(); /* load ltc before content */
203 return ltc
->description();
207 #endif /* CONFIG_HAVE_TRACE_CLOCK */
209 #endif /* _LTTNG_TRACE_CLOCK_H */
This page took 0.034391 seconds and 4 git commands to generate.