wrapper: remove compiler wrapper
[lttng-modules.git] / wrapper / trace-clock.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * wrapper/trace-clock.h
4 *
5 * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
6 * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
7 *
8 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 */
10
11 #ifndef _LTTNG_TRACE_CLOCK_H
12 #define _LTTNG_TRACE_CLOCK_H
13
14 #ifdef CONFIG_HAVE_TRACE_CLOCK
15 #include <linux/trace-clock.h>
16 #else /* CONFIG_HAVE_TRACE_CLOCK */
17
18 #include <linux/hardirq.h>
19 #include <linux/ktime.h>
20 #include <linux/time.h>
21 #include <linux/hrtimer.h>
22 #include <linux/percpu.h>
23 #include <linux/version.h>
24 #include <linux/percpu-defs.h>
25 #include <asm/local.h>
26 #include <lttng-kernel-version.h>
27 #include <lttng-clock.h>
28 #include <wrapper/random.h>
29
30 extern struct lttng_trace_clock *lttng_trace_clock;
31
32 /*
33 * We need clock values to be monotonically increasing per-cpu, which is
34 * not strictly guaranteed by ktime_get_mono_fast_ns(). It is
35 * straightforward to do on architectures with a 64-bit cmpxchg(), but
36 * not so on architectures without 64-bit cmpxchg. For now, only enable
37 * this feature on 64-bit architectures.
38 */
39
40 #if BITS_PER_LONG == 64
41 #define LTTNG_USE_NMI_SAFE_CLOCK
42 #endif
43
44 #ifdef LTTNG_USE_NMI_SAFE_CLOCK
45
46 DECLARE_PER_CPU(u64, lttng_last_tsc);
47
48 /*
49 * Sometimes called with preemption enabled. Can be interrupted.
50 */
51 static inline u64 trace_clock_monotonic_wrapper(void)
52 {
53 u64 now, last, result;
54 u64 *last_tsc_ptr;
55
56 /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
57 preempt_disable();
58 last_tsc_ptr = this_cpu_ptr(&lttng_last_tsc);
59 last = *last_tsc_ptr;
60 /*
61 * Read "last" before "now". It is not strictly required, but it ensures
62 * that an interrupt coming in won't artificially trigger a case where
63 * "now" < "last". This kind of situation should only happen if the
64 * mono_fast time source goes slightly backwards.
65 */
66 barrier();
67 now = ktime_get_mono_fast_ns();
68 if (U64_MAX / 2 < now - last)
69 now = last;
70 result = cmpxchg64_local(last_tsc_ptr, last, now);
71 preempt_enable();
72 if (result == last) {
73 /* Update done. */
74 return now;
75 } else {
76 /*
77 * Update not done, due to concurrent update. We can use
78 * "result", since it has been sampled concurrently with our
79 * time read, so it should not be far from "now".
80 */
81 return result;
82 }
83 }
84
85 #else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
86 static inline u64 trace_clock_monotonic_wrapper(void)
87 {
88 ktime_t ktime;
89
90 /*
91 * Refuse to trace from NMIs with this wrapper, because an NMI could
92 * nest over the xtime write seqlock and deadlock.
93 */
94 if (in_nmi())
95 return (u64) -EIO;
96
97 ktime = ktime_get();
98 return ktime_to_ns(ktime);
99 }
100 #endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
101
102 static inline u64 trace_clock_read64_monotonic(void)
103 {
104 return (u64) trace_clock_monotonic_wrapper();
105 }
106
107 static inline u64 trace_clock_freq_monotonic(void)
108 {
109 return (u64) NSEC_PER_SEC;
110 }
111
112 static inline int trace_clock_uuid_monotonic(char *uuid)
113 {
114 return wrapper_get_bootid(uuid);
115 }
116
117 static inline const char *trace_clock_name_monotonic(void)
118 {
119 return "monotonic";
120 }
121
122 static inline const char *trace_clock_description_monotonic(void)
123 {
124 return "Monotonic Clock";
125 }
126
127 #ifdef LTTNG_USE_NMI_SAFE_CLOCK
128 static inline int get_trace_clock(void)
129 {
130 printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n");
131 return 0;
132 }
133 #else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
134 static inline int get_trace_clock(void)
135 {
136 printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n");
137 return 0;
138 }
139 #endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
140
141 static inline void put_trace_clock(void)
142 {
143 }
144
145 static inline u64 trace_clock_read64(void)
146 {
147 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
148
149 if (likely(!ltc)) {
150 return trace_clock_read64_monotonic();
151 } else {
152 read_barrier_depends(); /* load ltc before content */
153 return ltc->read64();
154 }
155 }
156
157 static inline u64 trace_clock_freq(void)
158 {
159 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
160
161 if (!ltc) {
162 return trace_clock_freq_monotonic();
163 } else {
164 read_barrier_depends(); /* load ltc before content */
165 return ltc->freq();
166 }
167 }
168
169 static inline int trace_clock_uuid(char *uuid)
170 {
171 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
172
173 read_barrier_depends(); /* load ltc before content */
174 /* Use default UUID cb when NULL */
175 if (!ltc || !ltc->uuid) {
176 return trace_clock_uuid_monotonic(uuid);
177 } else {
178 return ltc->uuid(uuid);
179 }
180 }
181
182 static inline const char *trace_clock_name(void)
183 {
184 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
185
186 if (!ltc) {
187 return trace_clock_name_monotonic();
188 } else {
189 read_barrier_depends(); /* load ltc before content */
190 return ltc->name();
191 }
192 }
193
194 static inline const char *trace_clock_description(void)
195 {
196 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
197
198 if (!ltc) {
199 return trace_clock_description_monotonic();
200 } else {
201 read_barrier_depends(); /* load ltc before content */
202 return ltc->description();
203 }
204 }
205
206 #endif /* CONFIG_HAVE_TRACE_CLOCK */
207
208 #endif /* _LTTNG_TRACE_CLOCK_H */
This page took 0.033346 seconds and 4 git commands to generate.