wrapper: trace-clock: remove compatibility code
[lttng-modules.git] / wrapper / trace-clock.h
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
9f36eaed 2 *
886d51a3 3 * wrapper/trace-clock.h
f6c19f6e
MD
4 *
5 * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
6 * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
7 *
886d51a3 8 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
f6c19f6e
MD
9 */
10
9f36eaed
MJ
11#ifndef _LTTNG_TRACE_CLOCK_H
12#define _LTTNG_TRACE_CLOCK_H
13
f6c19f6e
MD
14#ifdef CONFIG_HAVE_TRACE_CLOCK
15#include <linux/trace-clock.h>
16#else /* CONFIG_HAVE_TRACE_CLOCK */
17
18#include <linux/hardirq.h>
19#include <linux/ktime.h>
20#include <linux/time.h>
21#include <linux/hrtimer.h>
b0725207 22#include <linux/percpu.h>
fc8216ae 23#include <linux/version.h>
46e25482 24#include <linux/percpu-defs.h>
b0725207 25#include <asm/local.h>
5a2f5e92
MD
26#include <lttng-kernel-version.h>
27#include <lttng-clock.h>
a8f2d0c7 28#include <wrapper/compiler.h>
5a2f5e92 29#include <wrapper/random.h>
9153ad23 30#include <blacklist/timekeeping.h>
fc8216ae 31
2754583e
MD
32extern struct lttng_trace_clock *lttng_trace_clock;
33
60e1cd07
MD
34/*
35 * We need clock values to be monotonically increasing per-cpu, which is
36 * not strictly guaranteed by ktime_get_mono_fast_ns(). It is
37 * straightforward to do on architectures with a 64-bit cmpxchg(), but
38 * not so on architectures without 64-bit cmpxchg. For now, only enable
39 * this feature on 64-bit architectures.
40 */
41
d0c04533 42#if BITS_PER_LONG == 64
60e1cd07
MD
43#define LTTNG_USE_NMI_SAFE_CLOCK
44#endif
b0725207 45
60e1cd07 46#ifdef LTTNG_USE_NMI_SAFE_CLOCK
b0725207 47
60e1cd07 48DECLARE_PER_CPU(u64, lttng_last_tsc);
b0725207
MD
49
50/*
0aaa7220 51 * Sometimes called with preemption enabled. Can be interrupted.
b0725207
MD
52 */
53static inline u64 trace_clock_monotonic_wrapper(void)
54{
60e1cd07
MD
55 u64 now, last, result;
56 u64 *last_tsc_ptr;
b0725207
MD
57
58 /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
0aaa7220 59 preempt_disable();
46e25482 60 last_tsc_ptr = this_cpu_ptr(&lttng_last_tsc);
60e1cd07 61 last = *last_tsc_ptr;
b0725207
MD
62 /*
63 * Read "last" before "now". It is not strictly required, but it ensures
64 * that an interrupt coming in won't artificially trigger a case where
65 * "now" < "last". This kind of situation should only happen if the
66 * mono_fast time source goes slightly backwards.
67 */
68 barrier();
69 now = ktime_get_mono_fast_ns();
60e1cd07
MD
70 if (U64_MAX / 2 < now - last)
71 now = last;
72 result = cmpxchg64_local(last_tsc_ptr, last, now);
0aaa7220 73 preempt_enable();
b0725207
MD
74 if (result == last) {
75 /* Update done. */
76 return now;
77 } else {
78 /*
79 * Update not done, due to concurrent update. We can use
80 * "result", since it has been sampled concurrently with our
81 * time read, so it should not be far from "now".
82 */
60e1cd07 83 return result;
b0725207
MD
84 }
85}
86
60e1cd07 87#else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
f6c19f6e
MD
88static inline u64 trace_clock_monotonic_wrapper(void)
89{
90 ktime_t ktime;
91
92 /*
93 * Refuse to trace from NMIs with this wrapper, because an NMI could
94 * nest over the xtime write seqlock and deadlock.
95 */
96 if (in_nmi())
97ca2c54 97 return (u64) -EIO;
f6c19f6e
MD
98
99 ktime = ktime_get();
cfaf9f3d 100 return ktime_to_ns(ktime);
f6c19f6e 101}
60e1cd07 102#endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
f6c19f6e 103
2754583e 104static inline u64 trace_clock_read64_monotonic(void)
f6c19f6e
MD
105{
106 return (u64) trace_clock_monotonic_wrapper();
107}
108
2754583e 109static inline u64 trace_clock_freq_monotonic(void)
f6c19f6e 110{
a3ccff4f 111 return (u64) NSEC_PER_SEC;
f6c19f6e
MD
112}
113
2754583e 114static inline int trace_clock_uuid_monotonic(char *uuid)
f6c19f6e 115{
a82c63f1 116 return wrapper_get_bootid(uuid);
f6c19f6e
MD
117}
118
2754583e
MD
119static inline const char *trace_clock_name_monotonic(void)
120{
121 return "monotonic";
122}
123
124static inline const char *trace_clock_description_monotonic(void)
125{
126 return "Monotonic Clock";
127}
128
60e1cd07 129#ifdef LTTNG_USE_NMI_SAFE_CLOCK
f6c19f6e
MD
130static inline int get_trace_clock(void)
131{
e36de50d 132 printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n");
b0725207
MD
133 return 0;
134}
60e1cd07 135#else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
b0725207
MD
136static inline int get_trace_clock(void)
137{
e36de50d 138 printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n");
f6c19f6e
MD
139 return 0;
140}
60e1cd07 141#endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */
f6c19f6e
MD
142
143static inline void put_trace_clock(void)
144{
145}
146
2754583e
MD
147static inline u64 trace_clock_read64(void)
148{
a8f2d0c7 149 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
2754583e
MD
150
151 if (likely(!ltc)) {
152 return trace_clock_read64_monotonic();
153 } else {
154 read_barrier_depends(); /* load ltc before content */
155 return ltc->read64();
156 }
157}
158
159static inline u64 trace_clock_freq(void)
160{
a8f2d0c7 161 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
2754583e
MD
162
163 if (!ltc) {
164 return trace_clock_freq_monotonic();
165 } else {
166 read_barrier_depends(); /* load ltc before content */
167 return ltc->freq();
168 }
169}
170
171static inline int trace_clock_uuid(char *uuid)
172{
a8f2d0c7 173 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
2754583e
MD
174
175 read_barrier_depends(); /* load ltc before content */
176 /* Use default UUID cb when NULL */
177 if (!ltc || !ltc->uuid) {
178 return trace_clock_uuid_monotonic(uuid);
179 } else {
180 return ltc->uuid(uuid);
181 }
182}
183
184static inline const char *trace_clock_name(void)
185{
a8f2d0c7 186 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
2754583e
MD
187
188 if (!ltc) {
189 return trace_clock_name_monotonic();
190 } else {
191 read_barrier_depends(); /* load ltc before content */
192 return ltc->name();
193 }
194}
195
196static inline const char *trace_clock_description(void)
197{
a8f2d0c7 198 struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock);
2754583e
MD
199
200 if (!ltc) {
201 return trace_clock_description_monotonic();
202 } else {
203 read_barrier_depends(); /* load ltc before content */
204 return ltc->description();
205 }
206}
207
f6c19f6e
MD
208#endif /* CONFIG_HAVE_TRACE_CLOCK */
209
a90917c3 210#endif /* _LTTNG_TRACE_CLOCK_H */
This page took 0.065192 seconds and 4 git commands to generate.