-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rcu
#if !defined(LTTNG_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
#define LTTNG_TRACE_RCU_H
-#include <probes/lttng-tracepoint-event.h>
+#include <lttng/tracepoint-event.h>
#include <linux/version.h>
/*
* and "cpuofl", respectively), and a CPU being kicked for being too
* long in dyntick-idle mode ("kick").
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
+ LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
* rcu_node structure, and the mask of CPUs that will be waited for.
* All but the type of RCU are extracted from the rcu_node structure.
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
+ LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
* include SRCU), the grace-period number that the task is blocking
* (the current or the next), and the task's PID.
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
+ LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
* read-side critical section exiting that critical section. Track the
* type of RCU (which one day might include SRCU) and the task's PID.
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
+ LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
* whether there are any blocked tasks blocking the current grace period.
* All but the type of RCU are extracted from the rcu_node structure.
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
+ LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
TP_PROTO(const char *rcuname, unsigned long gp_seq,
* or "kick" when kicking a CPU that has been in dyntick-idle mode for
* too long.
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
+ LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
LTTNG_TRACEPOINT_EVENT(rcu_fqs,
TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
* events use the upper bits of each number, while interrupt-related
* events use the lower bits.
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) \
+ || LTTNG_KERNEL_RANGE(5,5,6, 5,6,0) \
+ || LTTNG_KERNEL_RANGE(5,4,22, 5,5,0)
+LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
+
+ TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
+
+ TP_ARGS(polarity, oldnesting, newnesting, dynticks),
+
+ TP_FIELDS(
+ ctf_string(polarity, polarity)
+ ctf_integer(long, oldnesting, oldnesting)
+ ctf_integer(long, newnesting, newnesting)
+ ctf_integer(int, dynticks, dynticks)
+ )
+)
+
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0))
LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
* number of lazy callbacks queued, and the fourth element is the
* total number of callbacks queued.
*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
+LTTNG_TRACEPOINT_EVENT(rcu_callback,
+
+ TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen),
+
+ TP_ARGS(rcuname, rhp, qlen),
+
+ TP_FIELDS(
+ ctf_string(rcuname, rcuname)
+ ctf_integer_hex(void *, rhp, rhp)
+ ctf_integer_hex(void *, func, rhp->func)
+ ctf_integer(long, qlen, qlen)
+ )
+)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
LTTNG_TRACEPOINT_EVENT(rcu_callback,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
long qlen),
TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
+
+ TP_FIELDS(
+ ctf_string(rcuname, rcuname)
+ ctf_integer_hex(void *, rhp, rhp)
+ ctf_integer_hex(void *, func, rhp->func)
+ ctf_integer(long, qlen_lazy, qlen_lazy)
+ ctf_integer(long, qlen, qlen)
+ )
+)
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+LTTNG_TRACEPOINT_EVENT(rcu_callback,
+
TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
long qlen),
TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
+
+ TP_FIELDS(
+ ctf_string(rcuname, rcuname)
+ ctf_integer_hex(void *, rhp, rhp)
+ ctf_integer_hex(void *, func, rhp->func)
+ ctf_integer(long, qlen_lazy, qlen_lazy)
+ ctf_integer(long, qlen, qlen)
+ )
+)
#else
+LTTNG_TRACEPOINT_EVENT(rcu_callback,
+
TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen),
TP_ARGS(rcuname, rhp, qlen),
-#endif
TP_FIELDS(
ctf_string(rcuname, rcuname)
ctf_integer_hex(void *, rhp, rhp)
ctf_integer_hex(void *, func, rhp->func)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
- ctf_integer(long, qlen_lazy, qlen_lazy)
-#endif
ctf_integer(long, qlen, qlen)
)
)
+#endif
+
/*
* Tracepoint for the registration of a single RCU callback of the special
* the fourth argument is the number of lazy callbacks queued, and the
* fifth argument is the total number of callbacks queued.
*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
+ TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
+ long qlen),
+
+ TP_ARGS(rcuname, rhp, offset, qlen),
+
+ TP_FIELDS(
+ ctf_string(rcuname, rcuname)
+ ctf_integer_hex(void *, rhp, rhp)
+ ctf_integer_hex(unsigned long, offset, offset)
+ ctf_integer(long, qlen, qlen)
+ )
+)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
+LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
long qlen_lazy, long qlen),
TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
+
+ TP_FIELDS(
+ ctf_string(rcuname, rcuname)
+ ctf_integer_hex(void *, rhp, rhp)
+ ctf_integer_hex(unsigned long, offset, offset)
+ ctf_integer(long, qlen_lazy, qlen_lazy)
+ ctf_integer(long, qlen, qlen)
+ )
+)
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
+
TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
long qlen_lazy, long qlen),
TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
+
+ TP_FIELDS(
+ ctf_string(rcuname, rcuname)
+ ctf_integer_hex(void *, rhp, rhp)
+ ctf_integer_hex(unsigned long, offset, offset)
+ ctf_integer(long, qlen_lazy, qlen_lazy)
+ ctf_integer(long, qlen, qlen)
+ )
+)
#else
+LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
+
TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
long qlen),
TP_ARGS(rcuname, rhp, offset, qlen),
-#endif
TP_FIELDS(
ctf_string(rcuname, rcuname)
ctf_integer_hex(void *, rhp, rhp)
ctf_integer_hex(unsigned long, offset, offset)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
- ctf_integer(long, qlen_lazy, qlen_lazy)
-#endif
ctf_integer(long, qlen, qlen)
)
)
+#endif
/*
* Tracepoint for marking the beginning rcu_do_batch, performed to start
* the total number of callbacks queued, and the fourth argument is
* the current RCU-callback batch limit.
*/
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
+LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
+
+ TP_PROTO(const char *rcuname, long qlen, long blimit),
+
+ TP_ARGS(rcuname, qlen, blimit),
+
+ TP_FIELDS(
+ ctf_string(rcuname, rcuname)
+ ctf_integer(long, qlen, qlen)
+ ctf_integer(long, blimit, blimit)
+ )
+)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
+
+ TP_FIELDS(
+ ctf_string(rcuname, rcuname)
+ ctf_integer(long, qlen_lazy, qlen_lazy)
+ ctf_integer(long, qlen, qlen)
+ ctf_integer(long, blimit, blimit)
+ )
+)
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
+
TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit),
TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
+
+ TP_FIELDS(
+ ctf_string(rcuname, rcuname)
+ ctf_integer(long, qlen_lazy, qlen_lazy)
+ ctf_integer(long, qlen, qlen)
+ ctf_integer(long, blimit, blimit)
+ )
+)
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
+
TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
+
+ TP_FIELDS(
+ ctf_string(rcuname, rcuname)
+ ctf_integer(long, qlen_lazy, qlen_lazy)
+ ctf_integer(long, qlen, qlen)
+ ctf_integer(int, blimit, blimit)
+ )
+)
#else
+LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
+
TP_PROTO(char *rcuname, long qlen, int blimit),
TP_ARGS(rcuname, qlen, blimit),
-#endif
TP_FIELDS(
ctf_string(rcuname, rcuname)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
- ctf_integer(long, qlen_lazy, qlen_lazy)
-#endif
ctf_integer(long, qlen, qlen)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
- ctf_integer(long, blimit, blimit)
-#else
ctf_integer(int, blimit, blimit)
-#endif
)
)
+#endif
/*
* Tracepoint for the invocation of a single RCU callback function.
#else /* #ifdef CONFIG_RCU_TRACE */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
+ LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
qsmask) do { } while (0)
#endif /* LTTNG_TRACE_RCU_H */
/* This part must be outside protection */
-#include <probes/define_trace.h>
+#include <lttng/define_trace.h>