X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=wrapper%2Ftrace-clock.h;h=9f4e366afbcbeecc89302a2cb205b41b3e9012e5;hb=9153ad231edfa178b83bb4c81891b7b812158b08;hp=801e640eda894346554cb6fa2fa9e9324de6b256;hpb=a82c63f10514ca43282498ea4fdee3908576528a;p=lttng-modules.git diff --git a/wrapper/trace-clock.h b/wrapper/trace-clock.h index 801e640e..9f4e366a 100644 --- a/wrapper/trace-clock.h +++ b/wrapper/trace-clock.h @@ -1,10 +1,11 @@ -/* - * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com) +/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1) + * + * wrapper/trace-clock.h * * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y. * - * Dual LGPL v2.1/GPL v2 license. + * Copyright (C) 2011-2012 Mathieu Desnoyers */ #ifndef _LTTNG_TRACE_CLOCK_H @@ -18,8 +19,86 @@ #include #include #include -#include "random.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern struct lttng_trace_clock *lttng_trace_clock; + +/* + * Upstream Linux commit 27727df240c7 ("Avoid taking lock in NMI path with + * CONFIG_DEBUG_TIMEKEEPING") introduces a buggy ktime_get_mono_fast_ns(). + * This is fixed by patch "timekeeping: Fix __ktime_get_fast_ns() regression". + */ +#if (LTTNG_KERNEL_RANGE(4,8,0, 4,8,2) \ + || LTTNG_KERNEL_RANGE(4,7,4, 4,7,8) \ + || LTTNG_KERNEL_RANGE(4,4,20, 4,4,25) \ + || LTTNG_KERNEL_RANGE(4,1,32, 4,1,35)) +#define LTTNG_CLOCK_NMI_SAFE_BROKEN +#endif + +/* + * We need clock values to be monotonically increasing per-cpu, which is + * not strictly guaranteed by ktime_get_mono_fast_ns(). It is + * straightforward to do on architectures with a 64-bit cmpxchg(), but + * not so on architectures without 64-bit cmpxchg. For now, only enable + * this feature on 64-bit architectures. + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) \ + && BITS_PER_LONG == 64 \ + && !defined(LTTNG_CLOCK_NMI_SAFE_BROKEN)) +#define LTTNG_USE_NMI_SAFE_CLOCK +#endif + +#ifdef LTTNG_USE_NMI_SAFE_CLOCK + +DECLARE_PER_CPU(u64, lttng_last_tsc); +/* + * Sometimes called with preemption enabled. Can be interrupted. + */ +static inline u64 trace_clock_monotonic_wrapper(void) +{ + u64 now, last, result; + u64 *last_tsc_ptr; + + /* Use fast nmi-safe monotonic clock provided by the Linux kernel. */ + preempt_disable(); + last_tsc_ptr = lttng_this_cpu_ptr(<tng_last_tsc); + last = *last_tsc_ptr; + /* + * Read "last" before "now". It is not strictly required, but it ensures + * that an interrupt coming in won't artificially trigger a case where + * "now" < "last". This kind of situation should only happen if the + * mono_fast time source goes slightly backwards. + */ + barrier(); + now = ktime_get_mono_fast_ns(); + if (U64_MAX / 2 < now - last) + now = last; + result = cmpxchg64_local(last_tsc_ptr, last, now); + preempt_enable(); + if (result == last) { + /* Update done. */ + return now; + } else { + /* + * Update not done, due to concurrent update. We can use + * "result", since it has been sampled concurrently with our + * time read, so it should not be far from "now". + */ + return result; + } +} + +#else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */ static inline u64 trace_clock_monotonic_wrapper(void) { ktime_t ktime; @@ -32,49 +111,112 @@ static inline u64 trace_clock_monotonic_wrapper(void) return (u64) -EIO; ktime = ktime_get(); - return (u64) ktime.tv64; + return ktime_to_ns(ktime); } +#endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */ -static inline u32 trace_clock_read32(void) +static inline u64 trace_clock_read64_monotonic(void) +{ + return (u64) trace_clock_monotonic_wrapper(); +} + +static inline u64 trace_clock_freq_monotonic(void) +{ + return (u64) NSEC_PER_SEC; +} + +static inline int trace_clock_uuid_monotonic(char *uuid) +{ + return wrapper_get_bootid(uuid); +} + +static inline const char *trace_clock_name_monotonic(void) +{ + return "monotonic"; +} + +static inline const char *trace_clock_description_monotonic(void) +{ + return "Monotonic Clock"; +} + +#ifdef LTTNG_USE_NMI_SAFE_CLOCK +static inline int get_trace_clock(void) +{ + printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic fast clock, which is NMI-safe.\n"); + return 0; +} +#else /* #ifdef LTTNG_USE_NMI_SAFE_CLOCK */ +static inline int get_trace_clock(void) +{ + printk_once(KERN_WARNING "LTTng: Using mainline kernel monotonic clock. NMIs will not be traced.\n"); + return 0; +} +#endif /* #else #ifdef LTTNG_USE_NMI_SAFE_CLOCK */ + +static inline void put_trace_clock(void) { - return (u32) trace_clock_monotonic_wrapper(); } static inline u64 trace_clock_read64(void) { - return (u64) trace_clock_monotonic_wrapper(); + struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); + + if (likely(!ltc)) { + return trace_clock_read64_monotonic(); + } else { + read_barrier_depends(); /* load ltc before content */ + return ltc->read64(); + } } static inline u64 trace_clock_freq(void) { - return (u64) NSEC_PER_SEC; + struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); + + if (!ltc) { + return trace_clock_freq_monotonic(); + } else { + read_barrier_depends(); /* load ltc before content */ + return ltc->freq(); + } } static inline int trace_clock_uuid(char *uuid) { - return wrapper_get_bootid(uuid); + struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); + + read_barrier_depends(); /* load ltc before content */ + /* Use default UUID cb when NULL */ + if (!ltc || !ltc->uuid) { + return trace_clock_uuid_monotonic(uuid); + } else { + return ltc->uuid(uuid); + } } -static inline int get_trace_clock(void) +static inline const char *trace_clock_name(void) { - /* - * LTTng: Using mainline kernel monotonic clock. NMIs will not be - * traced, and expect significant performance degradation compared to - * the LTTng trace clocks. Integration of the LTTng 0.x trace clocks - * into LTTng 2.0 is planned in a near future. - */ - printk(KERN_WARNING "LTTng: Using mainline kernel monotonic clock.\n"); - printk(KERN_WARNING " * NMIs will not be traced,\n"); - printk(KERN_WARNING " * expect significant performance degradation compared to the\n"); - printk(KERN_WARNING " LTTng trace clocks.\n"); - printk(KERN_WARNING "Integration of the LTTng 0.x trace clocks into LTTng 2.0 is planned\n"); - printk(KERN_WARNING "in a near future.\n"); + struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); - return 0; + if (!ltc) { + return trace_clock_name_monotonic(); + } else { + read_barrier_depends(); /* load ltc before content */ + return ltc->name(); + } } -static inline void put_trace_clock(void) +static inline const char *trace_clock_description(void) { + struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); + + if (!ltc) { + return trace_clock_description_monotonic(); + } else { + read_barrier_depends(); /* load ltc before content */ + return ltc->description(); + } } #endif /* CONFIG_HAVE_TRACE_CLOCK */