X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=instrumentation%2Fevents%2Flttng-module%2Frcu.h;h=4528a79b556aaf11a1daaba96f5fcbad5aa87353;hb=8c7865d5461b21e5463fa3bd9090e501d023de81;hp=1220fb726b36f45eaee1ca81326c800272373f87;hpb=8e6213127497f61d85c3c8cdd9f2e245b00a734a;p=lttng-modules.git diff --git a/instrumentation/events/lttng-module/rcu.h b/instrumentation/events/lttng-module/rcu.h index 1220fb72..4528a79b 100644 --- a/instrumentation/events/lttng-module/rcu.h +++ b/instrumentation/events/lttng-module/rcu.h @@ -1,10 +1,10 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM rcu -#if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_RCU_H +#if !defined(LTTNG_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ) +#define LTTNG_TRACE_RCU_H -#include +#include #include /* @@ -18,7 +18,7 @@ * An "@" character within "" is a comment character: Data * reduction scripts will ignore the "@" and the remainder of the line. */ -TRACE_EVENT(rcu_utilization, +LTTNG_TRACEPOINT_EVENT(rcu_utilization, #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) TP_PROTO(const char *s), @@ -28,20 +28,17 @@ TRACE_EVENT(rcu_utilization, TP_ARGS(s), - TP_STRUCT__entry( - __string(s, s) - ), - - TP_fast_assign( - tp_strcpy(s, s) - ), - - TP_printk("%s", __get_str(s)) + TP_FIELDS( + ctf_string(s, s) + ) ) #ifdef CONFIG_RCU_TRACE -#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) +#if defined(CONFIG_TREE_RCU) \ + || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) \ + && defined(CONFIG_PREEMPT_RCU)) \ + || defined(CONFIG_TREE_PREEMPT_RCU) /* * Tracepoint for grace-period events: starting and ending a grace @@ -52,26 +49,21 @@ TRACE_EVENT(rcu_utilization, * and "cpuofl", respectively), and a CPU being kicked for being too * long in dyntick-idle mode ("kick"). */ -TRACE_EVENT(rcu_grace_period, +LTTNG_TRACEPOINT_EVENT(rcu_grace_period, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent), +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent), +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_ARGS(rcuname, gpnum, gpevent), - TP_STRUCT__entry( - __string(rcuname, rcuname) - __field(unsigned long, gpnum) - __string(gpevent, gpevent) - ), - - TP_fast_assign( - tp_strcpy(rcuname, rcuname) - tp_assign(gpnum, gpnum) - tp_strcpy(gpevent, gpevent) - ), - - TP_printk("%s %lu %s", - __get_str(rcuname), __entry->gpnum, __get_str(gpevent)) + TP_FIELDS( + ctf_string(rcuname, rcuname) + ctf_integer(unsigned long, gpnum, gpnum) + ctf_string(gpevent, gpevent) + ) ) /* @@ -81,34 +73,26 @@ TRACE_EVENT(rcu_grace_period, * rcu_node structure, and the mask of CPUs that will be waited for. * All but the type of RCU are extracted from the rcu_node structure. */ -TRACE_EVENT(rcu_grace_period_init, +LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level, + int grplo, int grphi, unsigned long qsmask), +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_PROTO(char *rcuname, unsigned long gpnum, u8 level, int grplo, int grphi, unsigned long qsmask), +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask), - TP_STRUCT__entry( - __string(rcuname, rcuname) - __field(unsigned long, gpnum) - __field(u8, level) - __field(int, grplo) - __field(int, grphi) - __field(unsigned long, qsmask) - ), - - TP_fast_assign( - tp_strcpy(rcuname, rcuname) - tp_assign(gpnum, gpnum) - tp_assign(level, level) - tp_assign(grplo, grplo) - tp_assign(grphi, grphi) - tp_assign(qsmask, qsmask) - ), - - TP_printk("%s %lu %u %d %d %lx", - __get_str(rcuname), __entry->gpnum, __entry->level, - __entry->grplo, __entry->grphi, __entry->qsmask) + TP_FIELDS( + ctf_string(rcuname, rcuname) + ctf_integer(unsigned long, gpnum, gpnum) + ctf_integer(u8, level, level) + ctf_integer(int, grplo, grplo) + ctf_integer(int, grphi, grphi) + ctf_integer(unsigned long, qsmask, qsmask) + ) ) /* @@ -117,26 +101,21 @@ TRACE_EVENT(rcu_grace_period_init, * include SRCU), the grace-period number that the task is blocking * (the current or the next), and the task's PID. */ -TRACE_EVENT(rcu_preempt_task, +LTTNG_TRACEPOINT_EVENT(rcu_preempt_task, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *rcuname, int pid, unsigned long gpnum), +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_PROTO(char *rcuname, int pid, unsigned long gpnum), +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_ARGS(rcuname, pid, gpnum), - TP_STRUCT__entry( - __string(rcuname, rcuname) - __field(unsigned long, gpnum) - __field(int, pid) - ), - - TP_fast_assign( - tp_strcpy(rcuname, rcuname) - tp_assign(gpnum, gpnum) - tp_assign(pid, pid) - ), - - TP_printk("%s %lu %d", - __get_str(rcuname), __entry->gpnum, __entry->pid) + TP_FIELDS( + ctf_string(rcuname, rcuname) + ctf_integer(unsigned long, gpnum, gpnum) + ctf_integer(int, pid, pid) + ) ) /* @@ -144,25 +123,21 @@ TRACE_EVENT(rcu_preempt_task, * read-side critical section exiting that critical section. Track the * type of RCU (which one day might include SRCU) and the task's PID. */ -TRACE_EVENT(rcu_unlock_preempted_task, +LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *rcuname, unsigned long gpnum, int pid), +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_PROTO(char *rcuname, unsigned long gpnum, int pid), +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_ARGS(rcuname, gpnum, pid), - TP_STRUCT__entry( - __string(rcuname, rcuname) - __field(unsigned long, gpnum) - __field(int, pid) - ), - - TP_fast_assign( - tp_strcpy(rcuname, rcuname) - tp_assign(gpnum, gpnum) - tp_assign(pid, pid) - ), - - TP_printk("%s %lu %d", __get_str(rcuname), __entry->gpnum, __entry->pid) + TP_FIELDS( + ctf_string(rcuname, rcuname) + ctf_integer(unsigned long, gpnum, gpnum) + ctf_integer(int, pid, pid) + ) ) /* @@ -173,40 +148,30 @@ TRACE_EVENT(rcu_unlock_preempted_task, * whether there are any blocked tasks blocking the current grace period. * All but the type of RCU are extracted from the rcu_node structure. */ -TRACE_EVENT(rcu_quiescent_state_report, +LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *rcuname, unsigned long gpnum, + unsigned long mask, unsigned long qsmask, + u8 level, int grplo, int grphi, int gp_tasks), +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_PROTO(char *rcuname, unsigned long gpnum, unsigned long mask, unsigned long qsmask, u8 level, int grplo, int grphi, int gp_tasks), +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks), - TP_STRUCT__entry( - __string(rcuname, rcuname) - __field(unsigned long, gpnum) - __field(unsigned long, mask) - __field(unsigned long, qsmask) - __field(u8, level) - __field(int, grplo) - __field(int, grphi) - __field(u8, gp_tasks) - ), - - TP_fast_assign( - tp_strcpy(rcuname, rcuname) - tp_assign(gpnum, gpnum) - tp_assign(mask, mask) - tp_assign(qsmask, qsmask) - tp_assign(level, level) - tp_assign(grplo, grplo) - tp_assign(grphi, grphi) - tp_assign(gp_tasks, gp_tasks) - ), - - TP_printk("%s %lu %lx>%lx %u %d %d %u", - __get_str(rcuname), __entry->gpnum, - __entry->mask, __entry->qsmask, __entry->level, - __entry->grplo, __entry->grphi, __entry->gp_tasks) + TP_FIELDS( + ctf_string(rcuname, rcuname) + ctf_integer(unsigned long, gpnum, gpnum) + ctf_integer(unsigned long, mask, mask) + ctf_integer(unsigned long, qsmask, qsmask) + ctf_integer(u8, level, level) + ctf_integer(int, grplo, grplo) + ctf_integer(int, grphi, grphi) + ctf_integer(u8, gp_tasks, gp_tasks) + ) ) /* @@ -217,32 +182,30 @@ TRACE_EVENT(rcu_quiescent_state_report, * or "kick" when kicking a CPU that has been in dyntick-idle mode for * too long. */ -TRACE_EVENT(rcu_fqs, +LTTNG_TRACEPOINT_EVENT(rcu_fqs, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent), +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent), +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_ARGS(rcuname, gpnum, cpu, qsevent), - TP_STRUCT__entry( - __string(rcuname, rcuname) - __field(unsigned long, gpnum) - __field(int, cpu) - __string(qsevent, qsevent) - ), - - TP_fast_assign( - tp_strcpy(rcuname, rcuname) - tp_assign(gpnum, gpnum) - tp_assign(cpu, cpu) - tp_strcpy(qsevent, qsevent) - ), - - TP_printk("%s %lu %d %s", - __get_str(rcuname), __entry->gpnum, - __entry->cpu, __get_str(qsevent)) + TP_FIELDS( + ctf_integer(unsigned long, gpnum, gpnum) + ctf_integer(int, cpu, cpu) + ctf_string(rcuname, rcuname) + ctf_string(qsevent, qsevent) + ) ) -#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) */ +#endif /* + * #if defined(CONFIG_TREE_RCU) + * || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) + * && defined(CONFIG_PREEMPT_RCU)) + * || defined(CONFIG_TREE_PREEMPT_RCU) + */ /* * Tracepoint for dyntick-idle entry/exit events. These take a string @@ -257,41 +220,60 @@ TRACE_EVENT(rcu_fqs, * events use the upper bits of each number, while interrupt-related * events use the lower bits. */ -TRACE_EVENT(rcu_dyntick, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0)) +LTTNG_TRACEPOINT_EVENT(rcu_dyntick, + + TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks), + + TP_ARGS(polarity, oldnesting, newnesting, dynticks), + + TP_FIELDS( + ctf_string(polarity, polarity) + ctf_integer(long, oldnesting, oldnesting) + ctf_integer(long, newnesting, newnesting) + ctf_integer(int, dynticks, atomic_read(&dynticks)) + ) +) + +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) +LTTNG_TRACEPOINT_EVENT(rcu_dyntick, + + TP_PROTO(const char *polarity, long long oldnesting, long long newnesting), + + TP_ARGS(polarity, oldnesting, newnesting), + + TP_FIELDS( + ctf_string(polarity, polarity) + ctf_integer(long long, oldnesting, oldnesting) + ctf_integer(long long, newnesting, newnesting) + ) +) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +LTTNG_TRACEPOINT_EVENT(rcu_dyntick, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) TP_PROTO(char *polarity, long long oldnesting, long long newnesting), TP_ARGS(polarity, oldnesting, newnesting), + + TP_FIELDS( + ctf_string(polarity, polarity) + ctf_integer(long long, oldnesting, oldnesting) + ctf_integer(long long, newnesting, newnesting) + ) +) #else +LTTNG_TRACEPOINT_EVENT(rcu_dyntick, + TP_PROTO(char *polarity), TP_ARGS(polarity), -#endif - TP_STRUCT__entry( - __string(polarity, polarity) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) - __field(long long, oldnesting) - __field(long long, newnesting) + TP_FIELDS( + ctf_string(polarity, polarity) + ) +) #endif - ), - TP_fast_assign( - tp_strcpy(polarity, polarity) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) - tp_assign(oldnesting, oldnesting) - tp_assign(newnesting, newnesting) -#endif - ), - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) - TP_printk("%s %llx %llx", __get_str(polarity), - __entry->oldnesting, __entry->newnesting) -#else - TP_printk("%s", __get_str(polarity)) -#endif -) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) /* @@ -316,21 +298,19 @@ TRACE_EVENT(rcu_dyntick, * "Demigrate": Timer fired on wrong CPU, woke up correct CPU. * "Cleanup after idle": Idle exited, timer canceled. */ -TRACE_EVENT(rcu_prep_idle, +LTTNG_TRACEPOINT_EVENT(rcu_prep_idle, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *reason), +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_PROTO(char *reason), +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_ARGS(reason), - TP_STRUCT__entry( - __string(reason, reason) - ), - - TP_fast_assign( - tp_strcpy(reason, reason) - ), - - TP_printk("%s", __get_str(reason)) + TP_FIELDS( + ctf_string(reason, reason) + ) ) #endif @@ -341,9 +321,14 @@ TRACE_EVENT(rcu_prep_idle, * number of lazy callbacks queued, and the fourth element is the * total number of callbacks queued. */ -TRACE_EVENT(rcu_callback, +LTTNG_TRACEPOINT_EVENT(rcu_callback, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy, + long qlen), + + TP_ARGS(rcuname, rhp, qlen_lazy, qlen), +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy, long qlen), @@ -354,35 +339,15 @@ TRACE_EVENT(rcu_callback, TP_ARGS(rcuname, rhp, qlen), #endif - TP_STRUCT__entry( - __string(rcuname, rcuname) - __field(void *, rhp) - __field(void *, func) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) - __field(long, qlen_lazy) -#endif - __field(long, qlen) - ), - - TP_fast_assign( - tp_strcpy(rcuname, rcuname) - tp_assign(rhp, rhp) - tp_assign(func, rhp->func) + TP_FIELDS( + ctf_string(rcuname, rcuname) + ctf_integer_hex(void *, rhp, rhp) + ctf_integer_hex(void *, func, rhp->func) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) - tp_assign(qlen_lazy, qlen_lazy) -#endif - tp_assign(qlen, qlen) - ), - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) - TP_printk("%s rhp=%p func=%pf %ld/%ld", - __get_str(rcuname), __entry->rhp, __entry->func, - __entry->qlen_lazy, __entry->qlen) -#else - TP_printk("%s rhp=%p func=%pf %ld", - __get_str(rcuname), __entry->rhp, __entry->func, - __entry->qlen) + ctf_integer(long, qlen_lazy, qlen_lazy) #endif + ctf_integer(long, qlen, qlen) + ) ) /* @@ -393,9 +358,15 @@ TRACE_EVENT(rcu_callback, * the fourth argument is the number of lazy callbacks queued, and the * fifth argument is the total number of callbacks queued. */ -TRACE_EVENT(rcu_kfree_callback, +LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset, + long qlen_lazy, long qlen), + + TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen), +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset, long qlen_lazy, long qlen), @@ -407,35 +378,15 @@ TRACE_EVENT(rcu_kfree_callback, TP_ARGS(rcuname, rhp, offset, qlen), #endif - TP_STRUCT__entry( - __string(rcuname, rcuname) - __field(void *, rhp) - __field(unsigned long, offset) + TP_FIELDS( + ctf_string(rcuname, rcuname) + ctf_integer_hex(void *, rhp, rhp) + ctf_integer_hex(unsigned long, offset, offset) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) - __field(long, qlen_lazy) -#endif - __field(long, qlen) - ), - - TP_fast_assign( - tp_strcpy(rcuname, rcuname) - tp_assign(rhp, rhp) - tp_assign(offset, offset) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) - tp_assign(qlen_lazy, qlen_lazy) -#endif - tp_assign(qlen, qlen) - ), - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) - TP_printk("%s rhp=%p func=%ld %ld/%ld", - __get_str(rcuname), __entry->rhp, __entry->offset, - __entry->qlen_lazy, __entry->qlen) -#else - TP_printk("%s rhp=%p func=%ld %ld", - __get_str(rcuname), __entry->rhp, __entry->offset, - __entry->qlen) + ctf_integer(long, qlen_lazy, qlen_lazy) #endif + ctf_integer(long, qlen, qlen) + ) ) /* @@ -445,9 +396,13 @@ TRACE_EVENT(rcu_kfree_callback, * the total number of callbacks queued, and the fourth argument is * the current RCU-callback batch limit. */ -TRACE_EVENT(rcu_batch_start, +LTTNG_TRACEPOINT_EVENT(rcu_batch_start, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit), + + TP_ARGS(rcuname, qlen_lazy, qlen, blimit), +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit), TP_ARGS(rcuname, qlen_lazy, qlen, blimit), @@ -461,40 +416,18 @@ TRACE_EVENT(rcu_batch_start, TP_ARGS(rcuname, qlen, blimit), #endif - TP_STRUCT__entry( - __string(rcuname, rcuname) + TP_FIELDS( + ctf_string(rcuname, rcuname) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) - __field(long, qlen_lazy) + ctf_integer(long, qlen_lazy, qlen_lazy) #endif - __field(long, qlen) + ctf_integer(long, qlen, qlen) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) - __field(long, blimit) + ctf_integer(long, blimit, blimit) #else - __field(int, blimit) -#endif - ), - - TP_fast_assign( - tp_strcpy(rcuname, rcuname) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) - tp_assign(qlen_lazy, qlen_lazy) -#endif - tp_assign(qlen, qlen) - tp_assign(blimit, blimit) - ), - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) - TP_printk("%s CBs=%ld/%ld bl=%ld", - __get_str(rcuname), __entry->qlen_lazy, __entry->qlen, - __entry->blimit) -#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) - TP_printk("%s CBs=%ld/%ld bl=%d", - __get_str(rcuname), __entry->qlen_lazy, __entry->qlen, - __entry->blimit) -#else - TP_printk("%s CBs=%ld bl=%d", - __get_str(rcuname), __entry->qlen, __entry->blimit) + ctf_integer(int, blimit, blimit) #endif + ) ) /* @@ -502,26 +435,21 @@ TRACE_EVENT(rcu_batch_start, * The first argument is the type of RCU, and the second argument is * a pointer to the RCU callback itself. */ -TRACE_EVENT(rcu_invoke_callback, +LTTNG_TRACEPOINT_EVENT(rcu_invoke_callback, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *rcuname, struct rcu_head *rhp), +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_PROTO(char *rcuname, struct rcu_head *rhp), +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_ARGS(rcuname, rhp), - TP_STRUCT__entry( - __string(rcuname, rcuname) - __field(void *, rhp) - __field(void *, func) - ), - - TP_fast_assign( - tp_strcpy(rcuname, rcuname) - tp_assign(rhp, rhp) - tp_assign(func, rhp->func) - ), - - TP_printk("%s rhp=%p func=%pf", - __get_str(rcuname), __entry->rhp, __entry->func) + TP_FIELDS( + ctf_string(rcuname, rcuname) + ctf_integer_hex(void *, rhp, rhp) + ctf_integer_hex(void *, func, rhp->func) + ) ) /* @@ -531,26 +459,21 @@ TRACE_EVENT(rcu_invoke_callback, * is the offset of the callback within the enclosing RCU-protected * data structure. */ -TRACE_EVENT(rcu_invoke_kfree_callback, +LTTNG_TRACEPOINT_EVENT(rcu_invoke_kfree_callback, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset), +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset), +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_ARGS(rcuname, rhp, offset), - TP_STRUCT__entry( - __string(rcuname, rcuname) - __field(void *, rhp) - __field(unsigned long, offset) - ), - - TP_fast_assign( - tp_strcpy(rcuname, rcuname) - tp_assign(rhp, rhp) - tp_assign(offset, offset) - ), - - TP_printk("%s rhp=%p func=%ld", - __get_str(rcuname), __entry->rhp, __entry->offset) + TP_FIELDS( + ctf_string(rcuname, rcuname) + ctf_integer_hex(void *, rhp, rhp) + ctf_integer(unsigned long, offset, offset) + ) ) /* @@ -564,9 +487,19 @@ TRACE_EVENT(rcu_invoke_kfree_callback, * and the sixth argument (risk) is the return value from * rcu_is_callbacks_kthread(). */ -TRACE_EVENT(rcu_batch_end, +LTTNG_TRACEPOINT_EVENT(rcu_batch_end, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) + TP_PROTO(const char *rcuname, int callbacks_invoked, + char cb, char nr, char iit, char risk), + + TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk), +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *rcuname, int callbacks_invoked, + bool cb, bool nr, bool iit, bool risk), + + TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk), +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) TP_PROTO(char *rcuname, int callbacks_invoked, bool cb, bool nr, bool iit, bool risk), @@ -577,39 +510,21 @@ TRACE_EVENT(rcu_batch_end, TP_ARGS(rcuname, callbacks_invoked), #endif - TP_STRUCT__entry( - __string(rcuname, rcuname) - __field(int, callbacks_invoked) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) - __field(bool, cb) - __field(bool, nr) - __field(bool, iit) - __field(bool, risk) -#endif - ), - - TP_fast_assign( - tp_strcpy(rcuname, rcuname) - tp_assign(callbacks_invoked, callbacks_invoked) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) - tp_assign(cb, cb) - tp_assign(nr, nr) - tp_assign(iit, iit) - tp_assign(risk, risk) -#endif - ), - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) - TP_printk("%s CBs-invoked=%d idle=%c%c%c%c", - __get_str(rcuname), __entry->callbacks_invoked, - __entry->cb ? 'C' : '.', - __entry->nr ? 'S' : '.', - __entry->iit ? 'I' : '.', - __entry->risk ? 'R' : '.') -#else - TP_printk("%s CBs-invoked=%d", - __get_str(rcuname), __entry->callbacks_invoked) + TP_FIELDS( + ctf_string(rcuname, rcuname) + ctf_integer(int, callbacks_invoked, callbacks_invoked) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) + ctf_integer(char, cb, cb) + ctf_integer(char, nr, nr) + ctf_integer(char, iit, iit) + ctf_integer(char, risk, risk) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) + ctf_integer(bool, cb, cb) + ctf_integer(bool, nr, nr) + ctf_integer(bool, iit, iit) + ctf_integer(bool, risk, risk) #endif + ) ) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) @@ -618,9 +533,14 @@ TRACE_EVENT(rcu_batch_end, * of the RCU flavor from rcutorture's viewpoint and the second argument * is the callback address. */ -TRACE_EVENT(rcu_torture_read, +LTTNG_TRACEPOINT_EVENT(rcu_torture_read, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *rcutorturename, struct rcu_head *rhp, + unsigned long secs, unsigned long c_old, unsigned long c), + + TP_ARGS(rcutorturename, rhp, secs, c_old, c), +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) TP_PROTO(char *rcutorturename, struct rcu_head *rhp, unsigned long secs, unsigned long c_old, unsigned long c), @@ -631,34 +551,15 @@ TRACE_EVENT(rcu_torture_read, TP_ARGS(rcutorturename, rhp), #endif - TP_STRUCT__entry( - __string(rcutorturename, rcutorturename) - __field(struct rcu_head *, rhp) + TP_FIELDS( + ctf_string(rcutorturename, rcutorturename) + ctf_integer_hex(struct rcu_head *, rhp, rhp) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) - __field(unsigned long, secs) - __field(unsigned long, c_old) - __field(unsigned long, c) -#endif - ), - - TP_fast_assign( - tp_strcpy(rcutorturename, rcutorturename) - tp_assign(rhp, rhp) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) - tp_assign(secs, secs) - tp_assign(c_old, c_old) - tp_assign(c, c) -#endif - ), - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) - TP_printk("%s torture read %p %luus c: %lu %lu", - __entry->rcutorturename, __entry->rhp, - __entry->secs, __entry->c_old, __entry->c) -#else - TP_printk("%s torture read %p", - __get_str(rcutorturename), __entry->rhp) + ctf_integer(unsigned long, secs, secs) + ctf_integer(unsigned long, c_old, c_old) + ctf_integer(unsigned long, c, c) #endif + ) ) #endif @@ -680,31 +581,23 @@ TRACE_EVENT(rcu_torture_read, * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument * is the count of remaining callbacks, and "done" is the piggybacking count. */ -TRACE_EVENT(rcu_barrier, +LTTNG_TRACEPOINT_EVENT(rcu_barrier, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) + TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done), +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done), +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */ TP_ARGS(rcuname, s, cpu, cnt, done), - TP_STRUCT__entry( - __string(rcuname, rcuname) - __string(s, s) - __field(int, cpu) - __field(int, cnt) - __field(unsigned long, done) - ), - - TP_fast_assign( - tp_strcpy(rcuname, rcuname) - tp_strcpy(s, s) - tp_assign(cpu, cpu) - tp_assign(cnt, cnt) - tp_assign(done, done) - ), - - TP_printk("%s %s cpu %d remaining %d # %lu", - __get_str(rcuname), __get_str(s), __entry->cpu, __entry->cnt, - __entry->done) + TP_FIELDS( + ctf_string(rcuname, rcuname) + ctf_string(s, s) + ctf_integer(int, cpu, cpu) + ctf_integer(int, cnt, cnt) + ctf_integer(unsigned long, done, done) + ) ) #endif @@ -757,7 +650,7 @@ TRACE_EVENT(rcu_barrier, #endif #endif /* #else #ifdef CONFIG_RCU_TRACE */ -#endif /* _TRACE_RCU_H */ +#endif /* LTTNG_TRACE_RCU_H */ /* This part must be outside protection */ -#include "../../../probes/define_trace.h" +#include