Add namespace info in sched_fork and statedump
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3
4 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_SCHED_H
6
7 #include "../../../probes/lttng-tracepoint-event.h"
8 #include <linux/sched.h>
9 #include <linux/pid_namespace.h>
10 #include <linux/binfmts.h>
11 #include <linux/version.h>
12 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
13 #include <linux/sched/rt.h>
14 #endif
15
16 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
17 #define lttng_proc_inum ns.inum
18 #else
19 #define lttng_proc_inum proc_inum
20 #endif
21
22 #define LTTNG_MAX_PID_NS_LEVEL 32
23
24 #ifndef _TRACE_SCHED_DEF_
25 #define _TRACE_SCHED_DEF_
26
27 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
28
29 static inline long __trace_sched_switch_state(struct task_struct *p)
30 {
31 long state = p->state;
32
33 #ifdef CONFIG_PREEMPT
34 #ifdef CONFIG_SCHED_DEBUG
35 BUG_ON(p != current);
36 #endif /* CONFIG_SCHED_DEBUG */
37 /*
38 * For all intents and purposes a preempted task is a running task.
39 */
40 if (preempt_count() & PREEMPT_ACTIVE)
41 state = TASK_RUNNING | TASK_STATE_MAX;
42 #endif /* CONFIG_PREEMPT */
43
44 return state;
45 }
46
47 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
48
49 static inline long __trace_sched_switch_state(struct task_struct *p)
50 {
51 long state = p->state;
52
53 #ifdef CONFIG_PREEMPT
54 /*
55 * For all intents and purposes a preempted task is a running task.
56 */
57 if (task_preempt_count(p) & PREEMPT_ACTIVE)
58 state = TASK_RUNNING | TASK_STATE_MAX;
59 #endif
60
61 return state;
62 }
63
64 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
65
66 static inline long __trace_sched_switch_state(struct task_struct *p)
67 {
68 long state = p->state;
69
70 #ifdef CONFIG_PREEMPT
71 /*
72 * For all intents and purposes a preempted task is a running task.
73 */
74 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
75 state = TASK_RUNNING | TASK_STATE_MAX;
76 #endif
77
78 return state;
79 }
80
81 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
82
83 static inline long __trace_sched_switch_state(struct task_struct *p)
84 {
85 long state = p->state;
86
87 #ifdef CONFIG_PREEMPT
88 /*
89 * For all intents and purposes a preempted task is a running task.
90 */
91 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
92 state = TASK_RUNNING;
93 #endif
94
95 return state;
96 }
97
98 #endif
99
100 #endif /* _TRACE_SCHED_DEF_ */
101
102 /*
103 * Tracepoint for calling kthread_stop, performed to end a kthread:
104 */
105 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop,
106
107 TP_PROTO(struct task_struct *t),
108
109 TP_ARGS(t),
110
111 TP_FIELDS(
112 ctf_array_text(char, comm, t->comm, TASK_COMM_LEN)
113 ctf_integer(pid_t, tid, t->pid)
114 )
115 )
116
117 /*
118 * Tracepoint for the return value of the kthread stopping:
119 */
120 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
121
122 TP_PROTO(int ret),
123
124 TP_ARGS(ret),
125
126 TP_FIELDS(
127 ctf_integer(int, ret, ret)
128 )
129 )
130
131 /*
132 * Tracepoint for waking up a task:
133 */
134 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
135
136 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
137 TP_PROTO(struct task_struct *p, int success),
138
139 TP_ARGS(p, success),
140 #else
141 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
142
143 TP_ARGS(rq, p, success),
144 #endif
145
146 TP_FIELDS(
147 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
148 ctf_integer(pid_t, tid, p->pid)
149 ctf_integer(int, prio, p->prio)
150 ctf_integer(int, success, success)
151 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
152 ctf_integer(int, target_cpu, task_cpu(p))
153 #endif
154 )
155 )
156
157 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
158
159 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
160 TP_PROTO(struct task_struct *p, int success),
161 TP_ARGS(p, success))
162
163 /*
164 * Tracepoint for waking up a new task:
165 */
166 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
167 TP_PROTO(struct task_struct *p, int success),
168 TP_ARGS(p, success))
169
170 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
171
172 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
173 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
174 TP_ARGS(rq, p, success))
175
176 /*
177 * Tracepoint for waking up a new task:
178 */
179 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
180 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
181 TP_ARGS(rq, p, success))
182
183 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
184
185 /*
186 * Tracepoint for task switches, performed by the scheduler:
187 */
188 LTTNG_TRACEPOINT_EVENT(sched_switch,
189
190 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
191 TP_PROTO(struct task_struct *prev,
192 struct task_struct *next),
193
194 TP_ARGS(prev, next),
195 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
196 TP_PROTO(struct rq *rq, struct task_struct *prev,
197 struct task_struct *next),
198
199 TP_ARGS(rq, prev, next),
200 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
201
202 TP_FIELDS(
203 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
204 ctf_integer(pid_t, prev_tid, prev->pid)
205 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
206 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
207 ctf_integer(long, prev_state, __trace_sched_switch_state(prev))
208 #else
209 ctf_integer(long, prev_state, prev->state)
210 #endif
211 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
212 ctf_integer(pid_t, next_tid, next->pid)
213 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
214 )
215 )
216
217 /*
218 * Tracepoint for a task being migrated:
219 */
220 LTTNG_TRACEPOINT_EVENT(sched_migrate_task,
221
222 TP_PROTO(struct task_struct *p, int dest_cpu),
223
224 TP_ARGS(p, dest_cpu),
225
226 TP_FIELDS(
227 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
228 ctf_integer(pid_t, tid, p->pid)
229 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
230 ctf_integer(int, orig_cpu, task_cpu(p))
231 ctf_integer(int, dest_cpu, dest_cpu)
232 )
233 )
234
235 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template,
236
237 TP_PROTO(struct task_struct *p),
238
239 TP_ARGS(p),
240
241 TP_FIELDS(
242 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
243 ctf_integer(pid_t, tid, p->pid)
244 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
245 )
246 )
247
248 /*
249 * Tracepoint for freeing a task:
250 */
251 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free,
252 TP_PROTO(struct task_struct *p),
253 TP_ARGS(p))
254
255
256 /*
257 * Tracepoint for a task exiting:
258 */
259 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit,
260 TP_PROTO(struct task_struct *p),
261 TP_ARGS(p))
262
263 /*
264 * Tracepoint for waiting on task to unschedule:
265 */
266 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
267 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
268 TP_PROTO(struct task_struct *p),
269 TP_ARGS(p))
270 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
271 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
272 TP_PROTO(struct rq *rq, struct task_struct *p),
273 TP_ARGS(rq, p))
274 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
275
276 /*
277 * Tracepoint for a waiting task:
278 */
279 LTTNG_TRACEPOINT_EVENT(sched_process_wait,
280
281 TP_PROTO(struct pid *pid),
282
283 TP_ARGS(pid),
284
285 TP_FIELDS(
286 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
287 ctf_integer(pid_t, tid, pid_nr(pid))
288 ctf_integer(int, prio, current->prio - MAX_RT_PRIO)
289 )
290 )
291
292 /*
293 * Tracepoint for do_fork.
294 * Saving both TID and PID information, especially for the child, allows
295 * trace analyzers to distinguish between creation of a new process and
296 * creation of a new thread. Newly created processes will have child_tid
297 * == child_pid, while creation of a thread yields to child_tid !=
298 * child_pid.
299 */
300 LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork,
301
302 TP_PROTO(struct task_struct *parent, struct task_struct *child),
303
304 TP_ARGS(parent, child),
305
306 TP_locvar(
307 pid_t vtids[LTTNG_MAX_PID_NS_LEVEL];
308 unsigned int ns_level;
309 ),
310
311 TP_code(
312 if (child) {
313 struct pid *child_pid;
314 unsigned int i;
315
316 child_pid = task_pid(child);
317 tp_locvar->ns_level =
318 min_t(unsigned int, child_pid->level + 1,
319 LTTNG_MAX_PID_NS_LEVEL);
320 for (i = 0; i < tp_locvar->ns_level; i++)
321 tp_locvar->vtids[i] = child_pid->numbers[i].nr;
322 }
323 ),
324
325 TP_FIELDS(
326 ctf_array_text(char, parent_comm, parent->comm, TASK_COMM_LEN)
327 ctf_integer(pid_t, parent_tid, parent->pid)
328 ctf_integer(pid_t, parent_pid, parent->tgid)
329 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
330 ctf_integer(unsigned int, parent_ns_inum,
331 ({
332 unsigned int parent_ns_inum = 0;
333
334 if (parent) {
335 struct pid_namespace *pid_ns;
336
337 pid_ns = task_active_pid_ns(parent);
338 if (pid_ns)
339 parent_ns_inum =
340 pid_ns->lttng_proc_inum;
341 }
342 parent_ns_inum;
343 }))
344 #endif
345 ctf_array_text(char, child_comm, child->comm, TASK_COMM_LEN)
346 ctf_integer(pid_t, child_tid, child->pid)
347 ctf_sequence(pid_t, vtids, tp_locvar->vtids, u8, tp_locvar->ns_level)
348 ctf_integer(pid_t, child_pid, child->tgid)
349 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
350 ctf_integer(unsigned int, child_ns_inum,
351 ({
352 unsigned int child_ns_inum = 0;
353
354 if (child) {
355 struct pid_namespace *pid_ns;
356
357 pid_ns = task_active_pid_ns(child);
358 if (pid_ns)
359 child_ns_inum =
360 pid_ns->lttng_proc_inum;
361 }
362 child_ns_inum;
363 }))
364 #endif
365 )
366 )
367
368 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
369 /*
370 * Tracepoint for sending a signal:
371 */
372 LTTNG_TRACEPOINT_EVENT(sched_signal_send,
373
374 TP_PROTO(int sig, struct task_struct *p),
375
376 TP_ARGS(sig, p),
377
378 TP_FIELDS(
379 ctf_integer(int, sig, sig)
380 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
381 ctf_integer(pid_t, tid, p->pid)
382 )
383 )
384 #endif
385
386 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
387 /*
388 * Tracepoint for exec:
389 */
390 LTTNG_TRACEPOINT_EVENT(sched_process_exec,
391
392 TP_PROTO(struct task_struct *p, pid_t old_pid,
393 struct linux_binprm *bprm),
394
395 TP_ARGS(p, old_pid, bprm),
396
397 TP_FIELDS(
398 ctf_string(filename, bprm->filename)
399 ctf_integer(pid_t, tid, p->pid)
400 ctf_integer(pid_t, old_tid, old_pid)
401 )
402 )
403 #endif
404
405 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
406 /*
407 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
408 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
409 */
410 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template,
411
412 TP_PROTO(struct task_struct *tsk, u64 delay),
413
414 TP_ARGS(tsk, delay),
415
416 TP_FIELDS(
417 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
418 ctf_integer(pid_t, tid, tsk->pid)
419 ctf_integer(u64, delay, delay)
420 )
421 )
422
423
424 /*
425 * Tracepoint for accounting wait time (time the task is runnable
426 * but not actually running due to scheduler contention).
427 */
428 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait,
429 TP_PROTO(struct task_struct *tsk, u64 delay),
430 TP_ARGS(tsk, delay))
431
432 /*
433 * Tracepoint for accounting sleep time (time the task is not runnable,
434 * including iowait, see below).
435 */
436 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep,
437 TP_PROTO(struct task_struct *tsk, u64 delay),
438 TP_ARGS(tsk, delay))
439
440 /*
441 * Tracepoint for accounting iowait time (time the task is not runnable
442 * due to waiting on IO to complete).
443 */
444 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait,
445 TP_PROTO(struct task_struct *tsk, u64 delay),
446 TP_ARGS(tsk, delay))
447
448 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
449 /*
450 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
451 */
452 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked,
453 TP_PROTO(struct task_struct *tsk, u64 delay),
454 TP_ARGS(tsk, delay))
455 #endif
456
457 /*
458 * Tracepoint for accounting runtime (time the task is executing
459 * on a CPU).
460 */
461 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
462
463 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
464
465 TP_ARGS(tsk, runtime, vruntime),
466
467 TP_FIELDS(
468 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
469 ctf_integer(pid_t, tid, tsk->pid)
470 ctf_integer(u64, runtime, runtime)
471 ctf_integer(u64, vruntime, vruntime)
472 )
473 )
474 #endif
475
476 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
477 /*
478 * Tracepoint for showing priority inheritance modifying a tasks
479 * priority.
480 */
481 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
482
483 TP_PROTO(struct task_struct *tsk, int newprio),
484
485 TP_ARGS(tsk, newprio),
486
487 TP_FIELDS(
488 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
489 ctf_integer(pid_t, tid, tsk->pid)
490 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
491 ctf_integer(int, newprio, newprio - MAX_RT_PRIO)
492 )
493 )
494 #endif
495
496 #endif /* LTTNG_TRACE_SCHED_H */
497
498 /* This part must be outside protection */
499 #include "../../../probes/define_trace.h"
This page took 0.051484 seconds and 5 git commands to generate.