Migrate tracepoint instrumentation to TP_FIELDS
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3
4 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_SCHED_H
6
7 #include "../../../probes/lttng-tracepoint-event.h"
8 #include <linux/sched.h>
9 #include <linux/binfmts.h>
10 #include <linux/version.h>
11 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
12 #include <linux/sched/rt.h>
13 #endif
14
15 #ifndef _TRACE_SCHED_DEF_
16 #define _TRACE_SCHED_DEF_
17
18 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
19
20 static inline long __trace_sched_switch_state(struct task_struct *p)
21 {
22 long state = p->state;
23
24 #ifdef CONFIG_PREEMPT
25 #ifdef CONFIG_SCHED_DEBUG
26 BUG_ON(p != current);
27 #endif /* CONFIG_SCHED_DEBUG */
28 /*
29 * For all intents and purposes a preempted task is a running task.
30 */
31 if (preempt_count() & PREEMPT_ACTIVE)
32 state = TASK_RUNNING | TASK_STATE_MAX;
33 #endif /* CONFIG_PREEMPT */
34
35 return state;
36 }
37
38 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
39
40 static inline long __trace_sched_switch_state(struct task_struct *p)
41 {
42 long state = p->state;
43
44 #ifdef CONFIG_PREEMPT
45 /*
46 * For all intents and purposes a preempted task is a running task.
47 */
48 if (task_preempt_count(p) & PREEMPT_ACTIVE)
49 state = TASK_RUNNING | TASK_STATE_MAX;
50 #endif
51
52 return state;
53 }
54
55 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
56
57 static inline long __trace_sched_switch_state(struct task_struct *p)
58 {
59 long state = p->state;
60
61 #ifdef CONFIG_PREEMPT
62 /*
63 * For all intents and purposes a preempted task is a running task.
64 */
65 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
66 state = TASK_RUNNING | TASK_STATE_MAX;
67 #endif
68
69 return state;
70 }
71
72 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
73
74 static inline long __trace_sched_switch_state(struct task_struct *p)
75 {
76 long state = p->state;
77
78 #ifdef CONFIG_PREEMPT
79 /*
80 * For all intents and purposes a preempted task is a running task.
81 */
82 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
83 state = TASK_RUNNING;
84 #endif
85
86 return state;
87 }
88
89 #endif
90
91 #endif /* _TRACE_SCHED_DEF_ */
92
93 /*
94 * Tracepoint for calling kthread_stop, performed to end a kthread:
95 */
96 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop,
97
98 TP_PROTO(struct task_struct *t),
99
100 TP_ARGS(t),
101
102 TP_FIELDS(
103 ctf_array_text(char, comm, t->comm, TASK_COMM_LEN)
104 ctf_integer(pid_t, tid, t->pid)
105 )
106 )
107
108 /*
109 * Tracepoint for the return value of the kthread stopping:
110 */
111 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
112
113 TP_PROTO(int ret),
114
115 TP_ARGS(ret),
116
117 TP_FIELDS(
118 ctf_integer(int, ret, ret)
119 )
120 )
121
122 /*
123 * Tracepoint for waking up a task:
124 */
125 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
126
127 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
128 TP_PROTO(struct task_struct *p, int success),
129
130 TP_ARGS(p, success),
131 #else
132 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
133
134 TP_ARGS(rq, p, success),
135 #endif
136
137 TP_FIELDS(
138 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
139 ctf_integer(pid_t, tid, p->pid)
140 ctf_integer(int, prio, p->prio)
141 ctf_integer(int, success, success)
142 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
143 ctf_integer(int, target_cpu, task_cpu(p))
144 #endif
145 )
146 )
147
148 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
149
150 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
151 TP_PROTO(struct task_struct *p, int success),
152 TP_ARGS(p, success))
153
154 /*
155 * Tracepoint for waking up a new task:
156 */
157 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
158 TP_PROTO(struct task_struct *p, int success),
159 TP_ARGS(p, success))
160
161 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
162
163 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
164 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
165 TP_ARGS(rq, p, success))
166
167 /*
168 * Tracepoint for waking up a new task:
169 */
170 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
171 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
172 TP_ARGS(rq, p, success))
173
174 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
175
176 /*
177 * Tracepoint for task switches, performed by the scheduler:
178 */
179 LTTNG_TRACEPOINT_EVENT(sched_switch,
180
181 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
182 TP_PROTO(struct task_struct *prev,
183 struct task_struct *next),
184
185 TP_ARGS(prev, next),
186 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
187 TP_PROTO(struct rq *rq, struct task_struct *prev,
188 struct task_struct *next),
189
190 TP_ARGS(rq, prev, next),
191 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
192
193 TP_FIELDS(
194 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
195 ctf_integer(pid_t, prev_tid, prev->pid)
196 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
197 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
198 ctf_integer(long, prev_state, __trace_sched_switch_state(prev))
199 #else
200 ctf_integer(long, prev_state, prev->state)
201 #endif
202 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
203 ctf_integer(pid_t, next_tid, next->pid)
204 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
205 )
206 )
207
208 /*
209 * Tracepoint for a task being migrated:
210 */
211 LTTNG_TRACEPOINT_EVENT(sched_migrate_task,
212
213 TP_PROTO(struct task_struct *p, int dest_cpu),
214
215 TP_ARGS(p, dest_cpu),
216
217 TP_FIELDS(
218 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
219 ctf_integer(pid_t, tid, p->pid)
220 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
221 ctf_integer(int, orig_cpu, task_cpu(p))
222 ctf_integer(int, dest_cpu, dest_cpu)
223 )
224 )
225
226 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template,
227
228 TP_PROTO(struct task_struct *p),
229
230 TP_ARGS(p),
231
232 TP_FIELDS(
233 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
234 ctf_integer(pid_t, tid, p->pid)
235 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
236 )
237 )
238
239 /*
240 * Tracepoint for freeing a task:
241 */
242 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free,
243 TP_PROTO(struct task_struct *p),
244 TP_ARGS(p))
245
246
247 /*
248 * Tracepoint for a task exiting:
249 */
250 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit,
251 TP_PROTO(struct task_struct *p),
252 TP_ARGS(p))
253
254 /*
255 * Tracepoint for waiting on task to unschedule:
256 */
257 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
258 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
259 TP_PROTO(struct task_struct *p),
260 TP_ARGS(p))
261 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
262 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
263 TP_PROTO(struct rq *rq, struct task_struct *p),
264 TP_ARGS(rq, p))
265 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
266
267 /*
268 * Tracepoint for a waiting task:
269 */
270 LTTNG_TRACEPOINT_EVENT(sched_process_wait,
271
272 TP_PROTO(struct pid *pid),
273
274 TP_ARGS(pid),
275
276 TP_FIELDS(
277 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
278 ctf_integer(pid_t, tid, pid_nr(pid))
279 ctf_integer(int, prio, current->prio - MAX_RT_PRIO)
280 )
281 )
282
283 /*
284 * Tracepoint for do_fork.
285 * Saving both TID and PID information, especially for the child, allows
286 * trace analyzers to distinguish between creation of a new process and
287 * creation of a new thread. Newly created processes will have child_tid
288 * == child_pid, while creation of a thread yields to child_tid !=
289 * child_pid.
290 */
291 LTTNG_TRACEPOINT_EVENT(sched_process_fork,
292
293 TP_PROTO(struct task_struct *parent, struct task_struct *child),
294
295 TP_ARGS(parent, child),
296
297 TP_FIELDS(
298 ctf_array_text(char, parent_comm, parent->comm, TASK_COMM_LEN)
299 ctf_integer(pid_t, parent_tid, parent->pid)
300 ctf_integer(pid_t, parent_pid, parent->tgid)
301 ctf_array_text(char, child_comm, child->comm, TASK_COMM_LEN)
302 ctf_integer(pid_t, child_tid, child->pid)
303 ctf_integer(pid_t, child_pid, child->tgid)
304 )
305 )
306
307 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
308 /*
309 * Tracepoint for sending a signal:
310 */
311 LTTNG_TRACEPOINT_EVENT(sched_signal_send,
312
313 TP_PROTO(int sig, struct task_struct *p),
314
315 TP_ARGS(sig, p),
316
317 TP_FIELDS(
318 ctf_integer(int, sig, sig)
319 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
320 ctf_integer(pid_t, tid, p->pid)
321 )
322 )
323 #endif
324
325 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
326 /*
327 * Tracepoint for exec:
328 */
329 LTTNG_TRACEPOINT_EVENT(sched_process_exec,
330
331 TP_PROTO(struct task_struct *p, pid_t old_pid,
332 struct linux_binprm *bprm),
333
334 TP_ARGS(p, old_pid, bprm),
335
336 TP_FIELDS(
337 ctf_string(filename, bprm->filename)
338 ctf_integer(pid_t, tid, p->pid)
339 ctf_integer(pid_t, old_tid, old_pid)
340 )
341 )
342 #endif
343
344 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
345 /*
346 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
347 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
348 */
349 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template,
350
351 TP_PROTO(struct task_struct *tsk, u64 delay),
352
353 TP_ARGS(tsk, delay),
354
355 TP_FIELDS(
356 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
357 ctf_integer(pid_t, tid, tsk->pid)
358 ctf_integer(u64, delay, delay)
359 )
360 )
361
362
363 /*
364 * Tracepoint for accounting wait time (time the task is runnable
365 * but not actually running due to scheduler contention).
366 */
367 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait,
368 TP_PROTO(struct task_struct *tsk, u64 delay),
369 TP_ARGS(tsk, delay))
370
371 /*
372 * Tracepoint for accounting sleep time (time the task is not runnable,
373 * including iowait, see below).
374 */
375 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep,
376 TP_PROTO(struct task_struct *tsk, u64 delay),
377 TP_ARGS(tsk, delay))
378
379 /*
380 * Tracepoint for accounting iowait time (time the task is not runnable
381 * due to waiting on IO to complete).
382 */
383 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait,
384 TP_PROTO(struct task_struct *tsk, u64 delay),
385 TP_ARGS(tsk, delay))
386
387 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
388 /*
389 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
390 */
391 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked,
392 TP_PROTO(struct task_struct *tsk, u64 delay),
393 TP_ARGS(tsk, delay))
394 #endif
395
396 /*
397 * Tracepoint for accounting runtime (time the task is executing
398 * on a CPU).
399 */
400 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
401
402 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
403
404 TP_ARGS(tsk, runtime, vruntime),
405
406 TP_FIELDS(
407 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
408 ctf_integer(pid_t, tid, tsk->pid)
409 ctf_integer(u64, runtime, runtime)
410 ctf_integer(u64, vruntime, vruntime)
411 )
412 )
413 #endif
414
415 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
416 /*
417 * Tracepoint for showing priority inheritance modifying a tasks
418 * priority.
419 */
420 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
421
422 TP_PROTO(struct task_struct *tsk, int newprio),
423
424 TP_ARGS(tsk, newprio),
425
426 TP_FIELDS(
427 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
428 ctf_integer(pid_t, tid, tsk->pid)
429 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
430 ctf_integer(int, newprio, newprio - MAX_RT_PRIO)
431 )
432 )
433 #endif
434
435 #endif /* LTTNG_TRACE_SCHED_H */
436
437 /* This part must be outside protection */
438 #include "../../../probes/define_trace.h"
This page took 0.039927 seconds and 4 git commands to generate.