wrapper: trace-clock: remove kernel version blacklist
[lttng-modules.git] / wrapper / trace-clock.h
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
9f36eaed 2 *
886d51a3 3 * wrapper/trace-clock.h
f6c19f6e
MD
4 *
5 * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
6 * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
7 *
886d51a3 8 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
f6c19f6e
MD
9 */
10
9f36eaed
MJ
11#ifndef _LTTNG_TRACE_CLOCK_H
12#define _LTTNG_TRACE_CLOCK_H
13
f6c19f6e
MD
14#ifdef CONFIG_HAVE_TRACE_CLOCK
15#include <linux/trace-clock.h>
16#else /* CONFIG_HAVE_TRACE_CLOCK */
17
18#include <linux/hardirq.h>
19#include <linux/ktime.h>
20#include <linux/time.h>
21#include <linux/hrtimer.h>
b0725207 22#include <linux/percpu.h>
fc8216ae 23#include <linux/version.h>
46e25482 24#include <linux/percpu-defs.h>
b0725207 25#include <asm/local.h>
5a2f5e92
MD
26#include <lttng-kernel-version.h>
27#include <lttng-clock.h>
a8f2d0c7 28#include <wrapper/compiler.h>
5a2f5e92 29#include <wrapper/random.h>
fc8216ae 30
2754583e
MD
31extern struct lttng_trace_clock *lttng_trace_clock;
32
60e1cd07
MD
33/*
34 * We need clock values to be monotonically increasing per-cpu, which is
35 * not strictly guaranteed by ktime_get_mono_fast_ns(). It is
36 * straightforward to do on architectures with a 64-bit cmpxchg(), but
37 * not so on architectures without 64-bit cmpxchg. For now, only enable
38 * this feature on 64-bit architectures.
39 */
40
d0c04533 41#if BITS_PER_LONG == 64
60e1cd07
MD
42#define LTTNG_USE_NMI_SAFE_CLOCK
43#endif
b0725207 44
60e1cd07 45#ifdef LTTNG_USE_NMI_SAFE_CLOCK
b0725207 46
60e1cd07 47DECLARE_PER_CPU(u64, lttng_last_tsc);
b0725207
MD
48
49/*
0aaa7220 50 * Sometimes called with preemption enabled. Can be interrupted.
b0725207
MD
51 */
52static inline u64 trace_clock_monotonic_wrapper(void)
53{
60e1cd07
MD
54 u64 now, last, result;
55 u64 *last_tsc_ptr;
b0725207
MD
56
57 /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
0aaa7220 58 preempt_disable();
46e25482 59 last_tsc_ptr = this_cpu_ptr(&lttng_last_tsc);
60e1cd07 60 last = *last_tsc_ptr;
b0725207
MD
61 /*
62 * Read "last" before "now". It is not strictly required, but it ensures
63 * that an interrupt coming in won't artificially trigger a case where
64 * "now" < "last". This kind of situation should only happen if the
65 * mono_fast time source goes slightly backwards.
66 */
67 barrier();
68 now = ktime_get_mono_fast_ns();
60e1cd07
MD
69 if (U64_MAX / 2 < now - last)
70 now = last;
71 result = cmpxchg64_local(last_tsc_ptr, last, now);
0aaa7220 72 preempt_enable();
b0725207
MD
73 if (result == last) {
74 /* Update done. */
75 return now;
76 } else {
77 /*
78 * Update not done, due to concurrent update. We can use
79 * "result", since it has been sampled concurrently with our
80 * time read, so it should not be far from "now".
81 */
60e1cd07 82 return result;
b0725207
MD
83 }
84}
85
60e1cd07 86#else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
f6c19f6e
MD
87static inline u64 trace_clock_monotonic_wrapper(void)
88{
89 ktime_t ktime;
90
91 /*
92 * Refuse to trace from NMIs with this wrapper, because an NMI could
93 * nest over the xtime write seqlock and deadlock.
94 */
95 if (in_nmi())
97ca2c54 96 return (u64) -EIO;
f6c19f6e
MD
97
98 ktime = ktime_get();
cfaf9f3d 99 return ktime_to_ns(ktime);
f6c19f6e 100}
60e1cd07 101#endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
f6c19f6e 102
2754583e 103static inline u64 trace_clock_read64_monotonic(void)
f6c19f6e
MD
104{
105 return (u64) trace_clock_monotonic_wrapper();
106}
107
2754583e 108static inline u64 trace_clock_freq_monotonic(void)
f6c19f6e 109{
a3ccff4f 110 return (u64) NSEC_PER_SEC;
f6c19f6e
MD
111}
112
2754583e 113static inline int trace_clock_uuid_monotonic(char *uuid)
f6c19f6e 114{
a82c63f1 115 return wrapper_get_bootid(uuid);
f6c19f6e
MD
116}
117
2754583e
MD
118static inline const char *trace_clock_name_monotonic(void)
119{
120 return "monotonic";
121}
122
123static inline const char *trace_clock_description_monotonic(void)
124{
125 return "Monotonic Clock";
126}
127
60e1cd07 128#ifdef LTTNG_USE_NMI_SAFE_CLOCK
f6c19f6e
MD
129static inline int get_trace_clock(void)
130{
e36de50d 131 printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n");
b0725207
MD
132 return 0;
133}
60e1cd07 134#else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
b0725207
MD
135static inline int get_trace_clock(void)
136{
e36de50d 137 printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n");
f6c19f6e
MD
138 return 0;
139}
60e1cd07 140#endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
f6c19f6e
MD
141
142static inline void put_trace_clock(void)
143{
144}
145
2754583e
MD
146static inline u64 trace_clock_read64(void)
147{
a8f2d0c7 148 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
2754583e
MD
149
150 if (likely(!ltc)) {
151 return trace_clock_read64_monotonic();
152 } else {
153 read_barrier_depends(); /* load ltc before content */
154 return ltc->read64();
155 }
156}
157
158static inline u64 trace_clock_freq(void)
159{
a8f2d0c7 160 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
2754583e
MD
161
162 if (!ltc) {
163 return trace_clock_freq_monotonic();
164 } else {
165 read_barrier_depends(); /* load ltc before content */
166 return ltc->freq();
167 }
168}
169
170static inline int trace_clock_uuid(char *uuid)
171{
a8f2d0c7 172 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
2754583e
MD
173
174 read_barrier_depends(); /* load ltc before content */
175 /* Use default UUID cb when NULL */
176 if (!ltc || !ltc->uuid) {
177 return trace_clock_uuid_monotonic(uuid);
178 } else {
179 return ltc->uuid(uuid);
180 }
181}
182
183static inline const char *trace_clock_name(void)
184{
a8f2d0c7 185 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
2754583e
MD
186
187 if (!ltc) {
188 return trace_clock_name_monotonic();
189 } else {
190 read_barrier_depends(); /* load ltc before content */
191 return ltc->name();
192 }
193}
194
195static inline const char *trace_clock_description(void)
196{
a8f2d0c7 197 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
2754583e
MD
198
199 if (!ltc) {
200 return trace_clock_description_monotonic();
201 } else {
202 read_barrier_depends(); /* load ltc before content */
203 return ltc->description();
204 }
205}
206
f6c19f6e
MD
207#endif /* CONFIG_HAVE_TRACE_CLOCK */
208
a90917c3 209#endif /* _LTTNG_TRACE_CLOCK_H */
This page took 0.04907 seconds and 4 git commands to generate.