Drop support for kernels < 4.4 from 'wrapper/trace-clock.h'
[lttng-modules.git] / include / wrapper / trace-clock.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * wrapper/trace-clock.h
4 *
5 * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
6 * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
7 *
8 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 */
10
11 #ifndef _LTTNG_TRACE_CLOCK_H
12 #define _LTTNG_TRACE_CLOCK_H
13
14 #ifdef CONFIG_HAVE_TRACE_CLOCK
15 #include <linux/trace-clock.h>
16 #else /* CONFIG_HAVE_TRACE_CLOCK */
17
18 #include <linux/hardirq.h>
19 #include <linux/ktime.h>
20 #include <linux/time.h>
21 #include <linux/hrtimer.h>
22 #include <linux/percpu.h>
23 #include <linux/percpu-defs.h>
24
25 #include <lttng/kernel-version.h>
26 #include <asm/local.h>
27 #include <lttng/kernel-version.h>
28 #include <lttng/clock.h>
29 #include <wrapper/compiler.h>
30 #include <wrapper/random.h>
31
32 extern struct lttng_trace_clock *lttng_trace_clock;
33
34 /*
35 * Upstream Linux commit 27727df240c7 ("Avoid taking lock in NMI path with
36 * CONFIG_DEBUG_TIMEKEEPING") introduces a buggy ktime_get_mono_fast_ns().
37 * This is fixed by patch "timekeeping: Fix __ktime_get_fast_ns() regression".
38 */
39 #if (LTTNG_KERNEL_RANGE(4,8,0, 4,8,2) \
40 || LTTNG_KERNEL_RANGE(4,7,4, 4,7,8) \
41 || LTTNG_KERNEL_RANGE(4,4,20, 4,4,25))
42 #define LTTNG_CLOCK_NMI_SAFE_BROKEN
43 #endif
44
45 /*
46 * We need clock values to be monotonically increasing per-cpu, which is
47 * not strictly guaranteed by ktime_get_mono_fast_ns(). It is
48 * straightforward to do on architectures with a 64-bit cmpxchg(), but
49 * not so on architectures without 64-bit cmpxchg. For now, only enable
50 * this feature on 64-bit architectures.
51 */
52
53 #if (BITS_PER_LONG == 64 && !defined(LTTNG_CLOCK_NMI_SAFE_BROKEN))
54 #define LTTNG_USE_NMI_SAFE_CLOCK
55 #endif
56
57 #ifdef LTTNG_USE_NMI_SAFE_CLOCK
58
59 DECLARE_PER_CPU(u64, lttng_last_tsc);
60
61 /*
62 * Sometimes called with preemption enabled. Can be interrupted.
63 */
64 static inline u64 trace_clock_monotonic_wrapper(void)
65 {
66 u64 now, last, result;
67 u64 *last_tsc_ptr;
68
69 /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
70 preempt_disable();
71 last_tsc_ptr = this_cpu_ptr(&lttng_last_tsc);
72 last = *last_tsc_ptr;
73 /*
74 * Read "last" before "now". It is not strictly required, but it ensures
75 * that an interrupt coming in won't artificially trigger a case where
76 * "now" < "last". This kind of situation should only happen if the
77 * mono_fast time source goes slightly backwards.
78 */
79 barrier();
80 now = ktime_get_mono_fast_ns();
81 if (U64_MAX / 2 < now - last)
82 now = last;
83 result = cmpxchg64_local(last_tsc_ptr, last, now);
84 preempt_enable();
85 if (result == last) {
86 /* Update done. */
87 return now;
88 } else {
89 /*
90 * Update not done, due to concurrent update. We can use
91 * "result", since it has been sampled concurrently with our
92 * time read, so it should not be far from "now".
93 */
94 return result;
95 }
96 }
97
98 #else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
99 static inline u64 trace_clock_monotonic_wrapper(void)
100 {
101 ktime_t ktime;
102
103 /*
104 * Refuse to trace from NMIs with this wrapper, because an NMI could
105 * nest over the xtime write seqlock and deadlock.
106 */
107 if (in_nmi())
108 return (u64) -EIO;
109
110 ktime = ktime_get();
111 return ktime_to_ns(ktime);
112 }
113 #endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
114
115 static inline u64 trace_clock_read64_monotonic(void)
116 {
117 return (u64) trace_clock_monotonic_wrapper();
118 }
119
120 static inline u64 trace_clock_freq_monotonic(void)
121 {
122 return (u64) NSEC_PER_SEC;
123 }
124
125 static inline int trace_clock_uuid_monotonic(char *uuid)
126 {
127 return wrapper_get_bootid(uuid);
128 }
129
130 static inline const char *trace_clock_name_monotonic(void)
131 {
132 return "monotonic";
133 }
134
135 static inline const char *trace_clock_description_monotonic(void)
136 {
137 return "Monotonic Clock";
138 }
139
140 #ifdef LTTNG_USE_NMI_SAFE_CLOCK
141 static inline int get_trace_clock(void)
142 {
143 printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n");
144 return 0;
145 }
146 #else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
147 static inline int get_trace_clock(void)
148 {
149 printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n");
150 return 0;
151 }
152 #endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
153
154 static inline void put_trace_clock(void)
155 {
156 }
157
158 static inline u64 trace_clock_read64(void)
159 {
160 struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
161
162 if (likely(!ltc)) {
163 return trace_clock_read64_monotonic();
164 } else {
165 return ltc->read64();
166 }
167 }
168
169 static inline u64 trace_clock_freq(void)
170 {
171 struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
172
173 if (!ltc) {
174 return trace_clock_freq_monotonic();
175 } else {
176 return ltc->freq();
177 }
178 }
179
180 static inline int trace_clock_uuid(char *uuid)
181 {
182 struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
183
184 /* Use default UUID cb when NULL */
185 if (!ltc || !ltc->uuid) {
186 return trace_clock_uuid_monotonic(uuid);
187 } else {
188 return ltc->uuid(uuid);
189 }
190 }
191
192 static inline const char *trace_clock_name(void)
193 {
194 struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
195
196 if (!ltc) {
197 return trace_clock_name_monotonic();
198 } else {
199 return ltc->name();
200 }
201 }
202
203 static inline const char *trace_clock_description(void)
204 {
205 struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock);
206
207 if (!ltc) {
208 return trace_clock_description_monotonic();
209 } else {
210 return ltc->description();
211 }
212 }
213
214 #endif /* CONFIG_HAVE_TRACE_CLOCK */
215
216 #endif /* _LTTNG_TRACE_CLOCK_H */
This page took 0.032703 seconds and 4 git commands to generate.