Update kernel probes to more detailed match to kernel versions
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_SCHED_H
6
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9 #include <linux/binfmts.h>
10 #include <linux/version.h>
11
12 #ifndef _TRACE_SCHED_DEF_
13 #define _TRACE_SCHED_DEF_
14
15 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
16
17 static inline long __trace_sched_switch_state(struct task_struct *p)
18 {
19 long state = p->state;
20
21 #ifdef CONFIG_PREEMPT
22 /*
23 * For all intents and purposes a preempted task is a running task.
24 */
25 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
26 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
27 state = TASK_RUNNING | TASK_STATE_MAX;
28 #else
29 state = TASK_RUNNING;
30 #endif
31 #endif
32
33 return state;
34 }
35
36 #endif
37
38 #endif /* _TRACE_SCHED_DEF_ */
39
40 /*
41 * Tracepoint for calling kthread_stop, performed to end a kthread:
42 */
43 TRACE_EVENT(sched_kthread_stop,
44
45 TP_PROTO(struct task_struct *t),
46
47 TP_ARGS(t),
48
49 TP_STRUCT__entry(
50 __array_text( char, comm, TASK_COMM_LEN )
51 __field( pid_t, tid )
52 ),
53
54 TP_fast_assign(
55 tp_memcpy(comm, t->comm, TASK_COMM_LEN)
56 tp_assign(tid, t->pid)
57 ),
58
59 TP_printk("comm=%s tid=%d", __entry->comm, __entry->tid)
60 )
61
62 /*
63 * Tracepoint for the return value of the kthread stopping:
64 */
65 TRACE_EVENT(sched_kthread_stop_ret,
66
67 TP_PROTO(int ret),
68
69 TP_ARGS(ret),
70
71 TP_STRUCT__entry(
72 __field( int, ret )
73 ),
74
75 TP_fast_assign(
76 tp_assign(ret, ret)
77 ),
78
79 TP_printk("ret=%d", __entry->ret)
80 )
81
82 /*
83 * Tracepoint for waking up a task:
84 */
85 DECLARE_EVENT_CLASS(sched_wakeup_template,
86
87 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
88 TP_PROTO(struct task_struct *p, int success),
89
90 TP_ARGS(p, success),
91 #else
92 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
93
94 TP_ARGS(rq, p, success),
95 #endif
96
97 TP_STRUCT__entry(
98 __array_text( char, comm, TASK_COMM_LEN )
99 __field( pid_t, tid )
100 __field( int, prio )
101 __field( int, success )
102 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
103 __field( int, target_cpu )
104 #endif
105 ),
106
107 TP_fast_assign(
108 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
109 tp_assign(tid, p->pid)
110 tp_assign(prio, p->prio)
111 tp_assign(success, success)
112 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
113 tp_assign(target_cpu, task_cpu(p))
114 #endif
115 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
116 )
117 TP_perf_assign(
118 __perf_task(p)
119 #endif
120 ),
121
122 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
123 TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
124 __entry->comm, __entry->tid, __entry->prio,
125 __entry->success, __entry->target_cpu)
126 #else
127 TP_printk("comm=%s tid=%d prio=%d success=%d",
128 __entry->comm, __entry->tid, __entry->prio,
129 __entry->success)
130 #endif
131 )
132
133 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
134
135 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
136 TP_PROTO(struct task_struct *p, int success),
137 TP_ARGS(p, success))
138
139 /*
140 * Tracepoint for waking up a new task:
141 */
142 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
143 TP_PROTO(struct task_struct *p, int success),
144 TP_ARGS(p, success))
145
146 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
147
148 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
149 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
150 TP_ARGS(rq, p, success))
151
152 /*
153 * Tracepoint for waking up a new task:
154 */
155 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
156 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
157 TP_ARGS(rq, p, success))
158
159 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
160
161 /*
162 * Tracepoint for task switches, performed by the scheduler:
163 */
164 TRACE_EVENT(sched_switch,
165
166 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
167 TP_PROTO(struct task_struct *prev,
168 struct task_struct *next),
169
170 TP_ARGS(prev, next),
171 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
172 TP_PROTO(struct rq *rq, struct task_struct *prev,
173 struct task_struct *next),
174
175 TP_ARGS(rq, prev, next),
176 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
177
178 TP_STRUCT__entry(
179 __array_text( char, prev_comm, TASK_COMM_LEN )
180 __field( pid_t, prev_tid )
181 __field( int, prev_prio )
182 __field( long, prev_state )
183 __array_text( char, next_comm, TASK_COMM_LEN )
184 __field( pid_t, next_tid )
185 __field( int, next_prio )
186 ),
187
188 TP_fast_assign(
189 tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
190 tp_assign(prev_tid, prev->pid)
191 tp_assign(prev_prio, prev->prio - MAX_RT_PRIO)
192 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
193 tp_assign(prev_state, __trace_sched_switch_state(prev))
194 #else
195 tp_assign(prev_state, prev->state)
196 #endif
197 tp_memcpy(prev_comm, prev->comm, TASK_COMM_LEN)
198 tp_assign(next_tid, next->pid)
199 tp_assign(next_prio, next->prio - MAX_RT_PRIO)
200 ),
201
202 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
203 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_tid=%d next_prio=%d",
204 __entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
205 __entry->prev_state & (TASK_STATE_MAX-1) ?
206 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
207 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
208 { 16, "Z" }, { 32, "X" }, { 64, "x" },
209 { 128, "W" }) : "R",
210 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
211 __entry->next_comm, __entry->next_tid, __entry->next_prio)
212 #else
213 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
214 __entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
215 __entry->prev_state ?
216 __print_flags(__entry->prev_state, "|",
217 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
218 { 16, "Z" }, { 32, "X" }, { 64, "x" },
219 { 128, "W" }) : "R",
220 __entry->next_comm, __entry->next_tid, __entry->next_prio)
221 #endif
222 )
223
224 /*
225 * Tracepoint for a task being migrated:
226 */
227 TRACE_EVENT(sched_migrate_task,
228
229 TP_PROTO(struct task_struct *p, int dest_cpu),
230
231 TP_ARGS(p, dest_cpu),
232
233 TP_STRUCT__entry(
234 __array_text( char, comm, TASK_COMM_LEN )
235 __field( pid_t, tid )
236 __field( int, prio )
237 __field( int, orig_cpu )
238 __field( int, dest_cpu )
239 ),
240
241 TP_fast_assign(
242 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
243 tp_assign(tid, p->pid)
244 tp_assign(prio, p->prio - MAX_RT_PRIO)
245 tp_assign(orig_cpu, task_cpu(p))
246 tp_assign(dest_cpu, dest_cpu)
247 ),
248
249 TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
250 __entry->comm, __entry->tid, __entry->prio,
251 __entry->orig_cpu, __entry->dest_cpu)
252 )
253
254 DECLARE_EVENT_CLASS(sched_process_template,
255
256 TP_PROTO(struct task_struct *p),
257
258 TP_ARGS(p),
259
260 TP_STRUCT__entry(
261 __array_text( char, comm, TASK_COMM_LEN )
262 __field( pid_t, tid )
263 __field( int, prio )
264 ),
265
266 TP_fast_assign(
267 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
268 tp_assign(tid, p->pid)
269 tp_assign(prio, p->prio - MAX_RT_PRIO)
270 ),
271
272 TP_printk("comm=%s tid=%d prio=%d",
273 __entry->comm, __entry->tid, __entry->prio)
274 )
275
276 /*
277 * Tracepoint for freeing a task:
278 */
279 DEFINE_EVENT(sched_process_template, sched_process_free,
280 TP_PROTO(struct task_struct *p),
281 TP_ARGS(p))
282
283
284 /*
285 * Tracepoint for a task exiting:
286 */
287 DEFINE_EVENT(sched_process_template, sched_process_exit,
288 TP_PROTO(struct task_struct *p),
289 TP_ARGS(p))
290
291 /*
292 * Tracepoint for waiting on task to unschedule:
293 */
294 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
295 DEFINE_EVENT(sched_process_template, sched_wait_task,
296 TP_PROTO(struct task_struct *p),
297 TP_ARGS(p))
298 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
299 DEFINE_EVENT(sched_process_template, sched_wait_task,
300 TP_PROTO(struct rq *rq, struct task_struct *p),
301 TP_ARGS(rq, p))
302 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
303
304 /*
305 * Tracepoint for a waiting task:
306 */
307 TRACE_EVENT(sched_process_wait,
308
309 TP_PROTO(struct pid *pid),
310
311 TP_ARGS(pid),
312
313 TP_STRUCT__entry(
314 __array_text( char, comm, TASK_COMM_LEN )
315 __field( pid_t, tid )
316 __field( int, prio )
317 ),
318
319 TP_fast_assign(
320 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
321 tp_assign(tid, pid_nr(pid))
322 tp_assign(prio, current->prio - MAX_RT_PRIO)
323 ),
324
325 TP_printk("comm=%s tid=%d prio=%d",
326 __entry->comm, __entry->tid, __entry->prio)
327 )
328
329 /*
330 * Tracepoint for do_fork:
331 */
332 TRACE_EVENT(sched_process_fork,
333
334 TP_PROTO(struct task_struct *parent, struct task_struct *child),
335
336 TP_ARGS(parent, child),
337
338 TP_STRUCT__entry(
339 __array_text( char, parent_comm, TASK_COMM_LEN )
340 __field( pid_t, parent_tid )
341 __array_text( char, child_comm, TASK_COMM_LEN )
342 __field( pid_t, child_tid )
343 ),
344
345 TP_fast_assign(
346 tp_memcpy(parent_comm, parent->comm, TASK_COMM_LEN)
347 tp_assign(parent_tid, parent->pid)
348 tp_memcpy(child_comm, child->comm, TASK_COMM_LEN)
349 tp_assign(child_tid, child->pid)
350 ),
351
352 TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
353 __entry->parent_comm, __entry->parent_tid,
354 __entry->child_comm, __entry->child_tid)
355 )
356
357 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
358 /*
359 * Tracepoint for sending a signal:
360 */
361 TRACE_EVENT(sched_signal_send,
362
363 TP_PROTO(int sig, struct task_struct *p),
364
365 TP_ARGS(sig, p),
366
367 TP_STRUCT__entry(
368 __field( int, sig )
369 __array( char, comm, TASK_COMM_LEN )
370 __field( pid_t, pid )
371 ),
372
373 TP_fast_assign(
374 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
375 tp_assign(pid, p->pid)
376 tp_assign(sig, sig)
377 ),
378
379 TP_printk("sig=%d comm=%s pid=%d",
380 __entry->sig, __entry->comm, __entry->pid)
381 )
382 #endif
383
384 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
385 /*
386 * Tracepoint for exec:
387 */
388 TRACE_EVENT(sched_process_exec,
389
390 TP_PROTO(struct task_struct *p, pid_t old_pid,
391 struct linux_binprm *bprm),
392
393 TP_ARGS(p, old_pid, bprm),
394
395 TP_STRUCT__entry(
396 __string( filename, bprm->filename )
397 __field( pid_t, tid )
398 __field( pid_t, old_tid )
399 ),
400
401 TP_fast_assign(
402 tp_strcpy(filename, bprm->filename)
403 tp_assign(tid, p->pid)
404 tp_assign(old_tid, old_pid)
405 ),
406
407 TP_printk("filename=%s tid=%d old_tid=%d", __get_str(filename),
408 __entry->tid, __entry->old_tid)
409 )
410 #endif
411
412 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
413 /*
414 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
415 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
416 */
417 DECLARE_EVENT_CLASS(sched_stat_template,
418
419 TP_PROTO(struct task_struct *tsk, u64 delay),
420
421 TP_ARGS(tsk, delay),
422
423 TP_STRUCT__entry(
424 __array_text( char, comm, TASK_COMM_LEN )
425 __field( pid_t, tid )
426 __field( u64, delay )
427 ),
428
429 TP_fast_assign(
430 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
431 tp_assign(tid, tsk->pid)
432 tp_assign(delay, delay)
433 )
434 TP_perf_assign(
435 __perf_count(delay)
436 ),
437
438 TP_printk("comm=%s tid=%d delay=%Lu [ns]",
439 __entry->comm, __entry->tid,
440 (unsigned long long)__entry->delay)
441 )
442
443
444 /*
445 * Tracepoint for accounting wait time (time the task is runnable
446 * but not actually running due to scheduler contention).
447 */
448 DEFINE_EVENT(sched_stat_template, sched_stat_wait,
449 TP_PROTO(struct task_struct *tsk, u64 delay),
450 TP_ARGS(tsk, delay))
451
452 /*
453 * Tracepoint for accounting sleep time (time the task is not runnable,
454 * including iowait, see below).
455 */
456 DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
457 TP_PROTO(struct task_struct *tsk, u64 delay),
458 TP_ARGS(tsk, delay))
459
460 /*
461 * Tracepoint for accounting iowait time (time the task is not runnable
462 * due to waiting on IO to complete).
463 */
464 DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
465 TP_PROTO(struct task_struct *tsk, u64 delay),
466 TP_ARGS(tsk, delay))
467
468 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
469 /*
470 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
471 */
472 DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
473 TP_PROTO(struct task_struct *tsk, u64 delay),
474 TP_ARGS(tsk, delay))
475 #endif
476
477 /*
478 * Tracepoint for accounting runtime (time the task is executing
479 * on a CPU).
480 */
481 TRACE_EVENT(sched_stat_runtime,
482
483 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
484
485 TP_ARGS(tsk, runtime, vruntime),
486
487 TP_STRUCT__entry(
488 __array_text( char, comm, TASK_COMM_LEN )
489 __field( pid_t, tid )
490 __field( u64, runtime )
491 __field( u64, vruntime )
492 ),
493
494 TP_fast_assign(
495 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
496 tp_assign(tid, tsk->pid)
497 tp_assign(runtime, runtime)
498 tp_assign(vruntime, vruntime)
499 )
500 TP_perf_assign(
501 __perf_count(runtime)
502 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
503 __perf_task(tsk)
504 #endif
505 ),
506
507 TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
508 __entry->comm, __entry->tid,
509 (unsigned long long)__entry->runtime,
510 (unsigned long long)__entry->vruntime)
511 )
512 #endif
513
514 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
515 /*
516 * Tracepoint for showing priority inheritance modifying a tasks
517 * priority.
518 */
519 TRACE_EVENT(sched_pi_setprio,
520
521 TP_PROTO(struct task_struct *tsk, int newprio),
522
523 TP_ARGS(tsk, newprio),
524
525 TP_STRUCT__entry(
526 __array_text( char, comm, TASK_COMM_LEN )
527 __field( pid_t, tid )
528 __field( int, oldprio )
529 __field( int, newprio )
530 ),
531
532 TP_fast_assign(
533 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
534 tp_assign(tid, tsk->pid)
535 tp_assign(oldprio, tsk->prio - MAX_RT_PRIO)
536 tp_assign(newprio, newprio - MAX_RT_PRIO)
537 ),
538
539 TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
540 __entry->comm, __entry->tid,
541 __entry->oldprio, __entry->newprio)
542 )
543 #endif
544
545 #endif /* _TRACE_SCHED_H */
546
547 /* This part must be outside protection */
548 #include "../../../probes/define_trace.h"
This page took 0.04082 seconds and 4 git commands to generate.