X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=instrumentation%2Fevents%2Flttng-module%2Fsched.h;h=4364d0a90c03cc720dd8edcfba136b29d18a598b;hb=9486bed4329e2e7a15115c564fede382845ed87a;hp=4e829a33a7838685dbc78459980d88ee1be66c25;hpb=3ee729fef47b3559f2f5de83654b92c57df5ee7d;p=lttng-modules.git diff --git a/instrumentation/events/lttng-module/sched.h b/instrumentation/events/lttng-module/sched.h index 4e829a33..4364d0a9 100644 --- a/instrumentation/events/lttng-module/sched.h +++ b/instrumentation/events/lttng-module/sched.h @@ -4,7 +4,7 @@ #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) #define LTTNG_TRACE_SCHED_H -#include "../../../probes/lttng-tracepoint-event.h" +#include #include #include #include @@ -24,7 +24,63 @@ #ifndef _TRACE_SCHED_DEF_ #define _TRACE_SCHED_DEF_ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)) + +static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) +{ + unsigned int state; + +#ifdef CONFIG_SCHED_DEBUG + BUG_ON(p != current); +#endif /* CONFIG_SCHED_DEBUG */ + + /* + * Preemption ignores task state, therefore preempted tasks are always + * RUNNING (we will not have dequeued if state != RUNNING). + */ + if (preempt) + return TASK_REPORT_MAX; + + /* + * task_state_index() uses fls() and returns a value from 0-8 range. + * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using + * it for left shift operation to get the correct task->state + * mapping. + */ + state = task_state_index(p); + + return state ? (1 << (state - 1)) : state; +} + +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)) + +static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) +{ + unsigned int state; + +#ifdef CONFIG_SCHED_DEBUG + BUG_ON(p != current); +#endif /* CONFIG_SCHED_DEBUG */ + + /* + * Preemption ignores task state, therefore preempted tasks are always + * RUNNING (we will not have dequeued if state != RUNNING). + */ + if (preempt) + return TASK_REPORT_MAX; + + /* + * __get_task_state() uses fls() and returns a value from 0-8 range. + * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using + * it for left shift operation to get the correct task->state + * mapping. + */ + state = __get_task_state(p); + + return state ? (1 << (state - 1)) : state; +} + +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) { @@ -145,7 +201,14 @@ LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret, /* * Tracepoint for waking up a task: */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \ + LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \ + LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \ + LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \ + LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \ + LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \ + LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \ + LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0)) LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template, TP_PROTO(struct task_struct *p), @@ -155,7 +218,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template, TP_FIELDS( ctf_array_text(char, comm, p->comm, TASK_COMM_LEN) ctf_integer(pid_t, tid, p->pid) - ctf_integer(int, prio, p->prio) + ctf_integer(int, prio, p->prio - MAX_RT_PRIO) ctf_integer(int, target_cpu, task_cpu(p)) ) ) @@ -175,7 +238,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template, TP_FIELDS( ctf_array_text(char, comm, p->comm, TASK_COMM_LEN) ctf_integer(pid_t, tid, p->pid) - ctf_integer(int, prio, p->prio) + ctf_integer(int, prio, p->prio - MAX_RT_PRIO) ctf_integer(int, success, success) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) ctf_integer(int, target_cpu, task_cpu(p)) @@ -184,7 +247,14 @@ LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template, ) #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \ + LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \ + LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \ + LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \ + LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \ + LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \ + LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \ + LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0)) /* * Tracepoint called when waking a task; this tracepoint is guaranteed to be @@ -314,7 +384,7 @@ LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template, LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free, TP_PROTO(struct task_struct *p), TP_ARGS(p)) - + /* * Tracepoint for a task exiting: @@ -538,7 +608,27 @@ LTTNG_TRACEPOINT_EVENT(sched_stat_runtime, ) #endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0) || \ + LTTNG_RT_KERNEL_RANGE(4,9,27,18, 4,10,0,0) || \ + LTTNG_RT_KERNEL_RANGE(4,11,5,1, 4,12,0,0)) +/* + * Tracepoint for showing priority inheritance modifying a tasks + * priority. + */ +LTTNG_TRACEPOINT_EVENT(sched_pi_setprio, + + TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task), + + TP_ARGS(tsk, pi_task), + + TP_FIELDS( + ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN) + ctf_integer(pid_t, tid, tsk->pid) + ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO) + ctf_integer(int, newprio, pi_task ? pi_task->prio - MAX_RT_PRIO : tsk->prio - MAX_RT_PRIO) + ) +) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) /* * Tracepoint for showing priority inheritance modifying a tasks * priority. @@ -561,4 +651,4 @@ LTTNG_TRACEPOINT_EVENT(sched_pi_setprio, #endif /* LTTNG_TRACE_SCHED_H */ /* This part must be outside protection */ -#include "../../../probes/define_trace.h" +#include