instrumentation sched: add missing fork pid info
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_SCHED_H
6
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9 #include <linux/binfmts.h>
10 #include <linux/version.h>
11
12 #ifndef _TRACE_SCHED_DEF_
13 #define _TRACE_SCHED_DEF_
14
15 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
16
17 static inline long __trace_sched_switch_state(struct task_struct *p)
18 {
19 long state = p->state;
20
21 #ifdef CONFIG_PREEMPT
22 /*
23 * For all intents and purposes a preempted task is a running task.
24 */
25 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
26 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
27 state = TASK_RUNNING | TASK_STATE_MAX;
28 #else
29 state = TASK_RUNNING;
30 #endif
31 #endif
32
33 return state;
34 }
35
36 #endif
37
38 #endif /* _TRACE_SCHED_DEF_ */
39
40 /*
41 * Tracepoint for calling kthread_stop, performed to end a kthread:
42 */
43 TRACE_EVENT(sched_kthread_stop,
44
45 TP_PROTO(struct task_struct *t),
46
47 TP_ARGS(t),
48
49 TP_STRUCT__entry(
50 __array_text( char, comm, TASK_COMM_LEN )
51 __field( pid_t, tid )
52 ),
53
54 TP_fast_assign(
55 tp_memcpy(comm, t->comm, TASK_COMM_LEN)
56 tp_assign(tid, t->pid)
57 ),
58
59 TP_printk("comm=%s tid=%d", __entry->comm, __entry->tid)
60 )
61
62 /*
63 * Tracepoint for the return value of the kthread stopping:
64 */
65 TRACE_EVENT(sched_kthread_stop_ret,
66
67 TP_PROTO(int ret),
68
69 TP_ARGS(ret),
70
71 TP_STRUCT__entry(
72 __field( int, ret )
73 ),
74
75 TP_fast_assign(
76 tp_assign(ret, ret)
77 ),
78
79 TP_printk("ret=%d", __entry->ret)
80 )
81
82 /*
83 * Tracepoint for waking up a task:
84 */
85 DECLARE_EVENT_CLASS(sched_wakeup_template,
86
87 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
88 TP_PROTO(struct task_struct *p, int success),
89
90 TP_ARGS(p, success),
91 #else
92 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
93
94 TP_ARGS(rq, p, success),
95 #endif
96
97 TP_STRUCT__entry(
98 __array_text( char, comm, TASK_COMM_LEN )
99 __field( pid_t, tid )
100 __field( int, prio )
101 __field( int, success )
102 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
103 __field( int, target_cpu )
104 #endif
105 ),
106
107 TP_fast_assign(
108 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
109 tp_assign(tid, p->pid)
110 tp_assign(prio, p->prio)
111 tp_assign(success, success)
112 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
113 tp_assign(target_cpu, task_cpu(p))
114 #endif
115 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
116 )
117 TP_perf_assign(
118 __perf_task(p)
119 #endif
120 ),
121
122 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
123 TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
124 __entry->comm, __entry->tid, __entry->prio,
125 __entry->success, __entry->target_cpu)
126 #else
127 TP_printk("comm=%s tid=%d prio=%d success=%d",
128 __entry->comm, __entry->tid, __entry->prio,
129 __entry->success)
130 #endif
131 )
132
133 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
134
135 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
136 TP_PROTO(struct task_struct *p, int success),
137 TP_ARGS(p, success))
138
139 /*
140 * Tracepoint for waking up a new task:
141 */
142 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
143 TP_PROTO(struct task_struct *p, int success),
144 TP_ARGS(p, success))
145
146 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
147
148 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
149 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
150 TP_ARGS(rq, p, success))
151
152 /*
153 * Tracepoint for waking up a new task:
154 */
155 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
156 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
157 TP_ARGS(rq, p, success))
158
159 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
160
161 /*
162 * Tracepoint for task switches, performed by the scheduler:
163 */
164 TRACE_EVENT(sched_switch,
165
166 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
167 TP_PROTO(struct task_struct *prev,
168 struct task_struct *next),
169
170 TP_ARGS(prev, next),
171 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
172 TP_PROTO(struct rq *rq, struct task_struct *prev,
173 struct task_struct *next),
174
175 TP_ARGS(rq, prev, next),
176 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
177
178 TP_STRUCT__entry(
179 __array_text( char, prev_comm, TASK_COMM_LEN )
180 __field( pid_t, prev_tid )
181 __field( int, prev_prio )
182 __field( long, prev_state )
183 __array_text( char, next_comm, TASK_COMM_LEN )
184 __field( pid_t, next_tid )
185 __field( int, next_prio )
186 ),
187
188 TP_fast_assign(
189 tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
190 tp_assign(prev_tid, prev->pid)
191 tp_assign(prev_prio, prev->prio - MAX_RT_PRIO)
192 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
193 tp_assign(prev_state, __trace_sched_switch_state(prev))
194 #else
195 tp_assign(prev_state, prev->state)
196 #endif
197 tp_memcpy(prev_comm, prev->comm, TASK_COMM_LEN)
198 tp_assign(next_tid, next->pid)
199 tp_assign(next_prio, next->prio - MAX_RT_PRIO)
200 ),
201
202 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
203 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_tid=%d next_prio=%d",
204 __entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
205 __entry->prev_state & (TASK_STATE_MAX-1) ?
206 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
207 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
208 { 16, "Z" }, { 32, "X" }, { 64, "x" },
209 { 128, "W" }) : "R",
210 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
211 __entry->next_comm, __entry->next_tid, __entry->next_prio)
212 #else
213 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
214 __entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
215 __entry->prev_state ?
216 __print_flags(__entry->prev_state, "|",
217 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
218 { 16, "Z" }, { 32, "X" }, { 64, "x" },
219 { 128, "W" }) : "R",
220 __entry->next_comm, __entry->next_tid, __entry->next_prio)
221 #endif
222 )
223
224 /*
225 * Tracepoint for a task being migrated:
226 */
227 TRACE_EVENT(sched_migrate_task,
228
229 TP_PROTO(struct task_struct *p, int dest_cpu),
230
231 TP_ARGS(p, dest_cpu),
232
233 TP_STRUCT__entry(
234 __array_text( char, comm, TASK_COMM_LEN )
235 __field( pid_t, tid )
236 __field( int, prio )
237 __field( int, orig_cpu )
238 __field( int, dest_cpu )
239 ),
240
241 TP_fast_assign(
242 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
243 tp_assign(tid, p->pid)
244 tp_assign(prio, p->prio - MAX_RT_PRIO)
245 tp_assign(orig_cpu, task_cpu(p))
246 tp_assign(dest_cpu, dest_cpu)
247 ),
248
249 TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
250 __entry->comm, __entry->tid, __entry->prio,
251 __entry->orig_cpu, __entry->dest_cpu)
252 )
253
254 DECLARE_EVENT_CLASS(sched_process_template,
255
256 TP_PROTO(struct task_struct *p),
257
258 TP_ARGS(p),
259
260 TP_STRUCT__entry(
261 __array_text( char, comm, TASK_COMM_LEN )
262 __field( pid_t, tid )
263 __field( int, prio )
264 ),
265
266 TP_fast_assign(
267 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
268 tp_assign(tid, p->pid)
269 tp_assign(prio, p->prio - MAX_RT_PRIO)
270 ),
271
272 TP_printk("comm=%s tid=%d prio=%d",
273 __entry->comm, __entry->tid, __entry->prio)
274 )
275
276 /*
277 * Tracepoint for freeing a task:
278 */
279 DEFINE_EVENT(sched_process_template, sched_process_free,
280 TP_PROTO(struct task_struct *p),
281 TP_ARGS(p))
282
283
284 /*
285 * Tracepoint for a task exiting:
286 */
287 DEFINE_EVENT(sched_process_template, sched_process_exit,
288 TP_PROTO(struct task_struct *p),
289 TP_ARGS(p))
290
291 /*
292 * Tracepoint for waiting on task to unschedule:
293 */
294 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
295 DEFINE_EVENT(sched_process_template, sched_wait_task,
296 TP_PROTO(struct task_struct *p),
297 TP_ARGS(p))
298 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
299 DEFINE_EVENT(sched_process_template, sched_wait_task,
300 TP_PROTO(struct rq *rq, struct task_struct *p),
301 TP_ARGS(rq, p))
302 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
303
304 /*
305 * Tracepoint for a waiting task:
306 */
307 TRACE_EVENT(sched_process_wait,
308
309 TP_PROTO(struct pid *pid),
310
311 TP_ARGS(pid),
312
313 TP_STRUCT__entry(
314 __array_text( char, comm, TASK_COMM_LEN )
315 __field( pid_t, tid )
316 __field( int, prio )
317 ),
318
319 TP_fast_assign(
320 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
321 tp_assign(tid, pid_nr(pid))
322 tp_assign(prio, current->prio - MAX_RT_PRIO)
323 ),
324
325 TP_printk("comm=%s tid=%d prio=%d",
326 __entry->comm, __entry->tid, __entry->prio)
327 )
328
329 /*
330 * Tracepoint for do_fork.
331 * Saving both TID and PID information, especially for the child, allows
332 * trace analyzers to distinguish between creation of a new process and
333 * creation of a new thread. Newly created processes will have child_tid
334 * == child_pid, while creation of a thread yields to child_tid !=
335 * child_pid.
336 */
337 TRACE_EVENT(sched_process_fork,
338
339 TP_PROTO(struct task_struct *parent, struct task_struct *child),
340
341 TP_ARGS(parent, child),
342
343 TP_STRUCT__entry(
344 __array_text( char, parent_comm, TASK_COMM_LEN )
345 __field( pid_t, parent_tid )
346 __field( pid_t, parent_pid )
347 __array_text( char, child_comm, TASK_COMM_LEN )
348 __field( pid_t, child_tid )
349 __field( pid_t, child_pid )
350 ),
351
352 TP_fast_assign(
353 tp_memcpy(parent_comm, parent->comm, TASK_COMM_LEN)
354 tp_assign(parent_tid, parent->pid)
355 tp_assign(parent_pid, parent->tgid)
356 tp_memcpy(child_comm, child->comm, TASK_COMM_LEN)
357 tp_assign(child_tid, child->pid)
358 tp_assign(child_pid, child->tgid)
359 ),
360
361 TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
362 __entry->parent_comm, __entry->parent_tid,
363 __entry->child_comm, __entry->child_tid)
364 )
365
366 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
367 /*
368 * Tracepoint for sending a signal:
369 */
370 TRACE_EVENT(sched_signal_send,
371
372 TP_PROTO(int sig, struct task_struct *p),
373
374 TP_ARGS(sig, p),
375
376 TP_STRUCT__entry(
377 __field( int, sig )
378 __array( char, comm, TASK_COMM_LEN )
379 __field( pid_t, pid )
380 ),
381
382 TP_fast_assign(
383 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
384 tp_assign(pid, p->pid)
385 tp_assign(sig, sig)
386 ),
387
388 TP_printk("sig=%d comm=%s pid=%d",
389 __entry->sig, __entry->comm, __entry->pid)
390 )
391 #endif
392
393 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
394 /*
395 * Tracepoint for exec:
396 */
397 TRACE_EVENT(sched_process_exec,
398
399 TP_PROTO(struct task_struct *p, pid_t old_pid,
400 struct linux_binprm *bprm),
401
402 TP_ARGS(p, old_pid, bprm),
403
404 TP_STRUCT__entry(
405 __string( filename, bprm->filename )
406 __field( pid_t, tid )
407 __field( pid_t, old_tid )
408 ),
409
410 TP_fast_assign(
411 tp_strcpy(filename, bprm->filename)
412 tp_assign(tid, p->pid)
413 tp_assign(old_tid, old_pid)
414 ),
415
416 TP_printk("filename=%s tid=%d old_tid=%d", __get_str(filename),
417 __entry->tid, __entry->old_tid)
418 )
419 #endif
420
421 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
422 /*
423 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
424 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
425 */
426 DECLARE_EVENT_CLASS(sched_stat_template,
427
428 TP_PROTO(struct task_struct *tsk, u64 delay),
429
430 TP_ARGS(tsk, delay),
431
432 TP_STRUCT__entry(
433 __array_text( char, comm, TASK_COMM_LEN )
434 __field( pid_t, tid )
435 __field( u64, delay )
436 ),
437
438 TP_fast_assign(
439 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
440 tp_assign(tid, tsk->pid)
441 tp_assign(delay, delay)
442 )
443 TP_perf_assign(
444 __perf_count(delay)
445 ),
446
447 TP_printk("comm=%s tid=%d delay=%Lu [ns]",
448 __entry->comm, __entry->tid,
449 (unsigned long long)__entry->delay)
450 )
451
452
453 /*
454 * Tracepoint for accounting wait time (time the task is runnable
455 * but not actually running due to scheduler contention).
456 */
457 DEFINE_EVENT(sched_stat_template, sched_stat_wait,
458 TP_PROTO(struct task_struct *tsk, u64 delay),
459 TP_ARGS(tsk, delay))
460
461 /*
462 * Tracepoint for accounting sleep time (time the task is not runnable,
463 * including iowait, see below).
464 */
465 DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
466 TP_PROTO(struct task_struct *tsk, u64 delay),
467 TP_ARGS(tsk, delay))
468
469 /*
470 * Tracepoint for accounting iowait time (time the task is not runnable
471 * due to waiting on IO to complete).
472 */
473 DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
474 TP_PROTO(struct task_struct *tsk, u64 delay),
475 TP_ARGS(tsk, delay))
476
477 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
478 /*
479 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
480 */
481 DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
482 TP_PROTO(struct task_struct *tsk, u64 delay),
483 TP_ARGS(tsk, delay))
484 #endif
485
486 /*
487 * Tracepoint for accounting runtime (time the task is executing
488 * on a CPU).
489 */
490 TRACE_EVENT(sched_stat_runtime,
491
492 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
493
494 TP_ARGS(tsk, runtime, vruntime),
495
496 TP_STRUCT__entry(
497 __array_text( char, comm, TASK_COMM_LEN )
498 __field( pid_t, tid )
499 __field( u64, runtime )
500 __field( u64, vruntime )
501 ),
502
503 TP_fast_assign(
504 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
505 tp_assign(tid, tsk->pid)
506 tp_assign(runtime, runtime)
507 tp_assign(vruntime, vruntime)
508 )
509 TP_perf_assign(
510 __perf_count(runtime)
511 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
512 __perf_task(tsk)
513 #endif
514 ),
515
516 TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
517 __entry->comm, __entry->tid,
518 (unsigned long long)__entry->runtime,
519 (unsigned long long)__entry->vruntime)
520 )
521 #endif
522
523 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
524 /*
525 * Tracepoint for showing priority inheritance modifying a tasks
526 * priority.
527 */
528 TRACE_EVENT(sched_pi_setprio,
529
530 TP_PROTO(struct task_struct *tsk, int newprio),
531
532 TP_ARGS(tsk, newprio),
533
534 TP_STRUCT__entry(
535 __array_text( char, comm, TASK_COMM_LEN )
536 __field( pid_t, tid )
537 __field( int, oldprio )
538 __field( int, newprio )
539 ),
540
541 TP_fast_assign(
542 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
543 tp_assign(tid, tsk->pid)
544 tp_assign(oldprio, tsk->prio - MAX_RT_PRIO)
545 tp_assign(newprio, newprio - MAX_RT_PRIO)
546 ),
547
548 TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
549 __entry->comm, __entry->tid,
550 __entry->oldprio, __entry->newprio)
551 )
552 #endif
553
554 #endif /* _TRACE_SCHED_H */
555
556 /* This part must be outside protection */
557 #include "../../../probes/define_trace.h"
This page took 0.042366 seconds and 4 git commands to generate.