#include <linux/hrtimer.h>
#include <linux/percpu.h>
#include <linux/version.h>
+#include <linux/percpu-defs.h>
#include <asm/local.h>
#include <lttng-kernel-version.h>
#include <lttng-clock.h>
-#include <wrapper/compiler.h>
-#include <wrapper/percpu-defs.h>
#include <wrapper/random.h>
-#include <blacklist/timekeeping.h>
extern struct lttng_trace_clock *lttng_trace_clock;
-/*
- * Upstream Linux commit 27727df240c7 ("Avoid taking lock in NMI path with
- * CONFIG_DEBUG_TIMEKEEPING") introduces a buggy ktime_get_mono_fast_ns().
- * This is fixed by patch "timekeeping: Fix __ktime_get_fast_ns() regression".
- */
-#if (LTTNG_KERNEL_RANGE(4,8,0, 4,8,2) \
- || LTTNG_KERNEL_RANGE(4,7,4, 4,7,8) \
- || LTTNG_KERNEL_RANGE(4,4,20, 4,4,25) \
- || LTTNG_KERNEL_RANGE(4,1,32, 4,1,35))
-#define LTTNG_CLOCK_NMI_SAFE_BROKEN
-#endif
-
/*
* We need clock values to be monotonically increasing per-cpu, which is
* not strictly guaranteed by ktime_get_mono_fast_ns(). It is
* this feature on 64-bit architectures.
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) \
- && BITS_PER_LONG == 64 \
- && !defined(LTTNG_CLOCK_NMI_SAFE_BROKEN))
+#if BITS_PER_LONG == 64
#define LTTNG_USE_NMI_SAFE_CLOCK
#endif
/* Use fast nmi-safe monotonic clock provided by the Linux kernel. */
preempt_disable();
- last_tsc_ptr = lttng_this_cpu_ptr(<tng_last_tsc);
+ last_tsc_ptr = this_cpu_ptr(<tng_last_tsc);
last = *last_tsc_ptr;
/*
* Read "last" before "now". It is not strictly required, but it ensures