Cleanup: move to kernel style SPDX license identifiers
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
... / ...
CommitLineData
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM sched
4
5#if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6#define LTTNG_TRACE_SCHED_H
7
8#include <probes/lttng-tracepoint-event.h>
9#include <linux/sched.h>
10#include <linux/pid_namespace.h>
11#include <linux/binfmts.h>
12#include <linux/version.h>
13#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
14#include <linux/sched/rt.h>
15#endif
16
17#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
18#define lttng_proc_inum ns.inum
19#else
20#define lttng_proc_inum proc_inum
21#endif
22
23#define LTTNG_MAX_PID_NS_LEVEL 32
24
25#ifndef _TRACE_SCHED_DEF_
26#define _TRACE_SCHED_DEF_
27
28#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
29
30static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
31{
32#ifdef CONFIG_SCHED_DEBUG
33 BUG_ON(p != current);
34#endif /* CONFIG_SCHED_DEBUG */
35 /*
36 * Preemption ignores task state, therefore preempted tasks are always RUNNING
37 * (we will not have dequeued if state != RUNNING).
38 */
39 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
40}
41
42#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
43
44static inline long __trace_sched_switch_state(struct task_struct *p)
45{
46 long state = p->state;
47
48#ifdef CONFIG_PREEMPT
49#ifdef CONFIG_SCHED_DEBUG
50 BUG_ON(p != current);
51#endif /* CONFIG_SCHED_DEBUG */
52 /*
53 * For all intents and purposes a preempted task is a running task.
54 */
55 if (preempt_count() & PREEMPT_ACTIVE)
56 state = TASK_RUNNING | TASK_STATE_MAX;
57#endif /* CONFIG_PREEMPT */
58
59 return state;
60}
61
62#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
63
64static inline long __trace_sched_switch_state(struct task_struct *p)
65{
66 long state = p->state;
67
68#ifdef CONFIG_PREEMPT
69 /*
70 * For all intents and purposes a preempted task is a running task.
71 */
72 if (task_preempt_count(p) & PREEMPT_ACTIVE)
73 state = TASK_RUNNING | TASK_STATE_MAX;
74#endif
75
76 return state;
77}
78
79#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
80
81static inline long __trace_sched_switch_state(struct task_struct *p)
82{
83 long state = p->state;
84
85#ifdef CONFIG_PREEMPT
86 /*
87 * For all intents and purposes a preempted task is a running task.
88 */
89 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
90 state = TASK_RUNNING | TASK_STATE_MAX;
91#endif
92
93 return state;
94}
95
96#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
97
98static inline long __trace_sched_switch_state(struct task_struct *p)
99{
100 long state = p->state;
101
102#ifdef CONFIG_PREEMPT
103 /*
104 * For all intents and purposes a preempted task is a running task.
105 */
106 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
107 state = TASK_RUNNING;
108#endif
109
110 return state;
111}
112
113#endif
114
115#endif /* _TRACE_SCHED_DEF_ */
116
117/*
118 * Tracepoint for calling kthread_stop, performed to end a kthread:
119 */
120LTTNG_TRACEPOINT_EVENT(sched_kthread_stop,
121
122 TP_PROTO(struct task_struct *t),
123
124 TP_ARGS(t),
125
126 TP_FIELDS(
127 ctf_array_text(char, comm, t->comm, TASK_COMM_LEN)
128 ctf_integer(pid_t, tid, t->pid)
129 )
130)
131
132/*
133 * Tracepoint for the return value of the kthread stopping:
134 */
135LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
136
137 TP_PROTO(int ret),
138
139 TP_ARGS(ret),
140
141 TP_FIELDS(
142 ctf_integer(int, ret, ret)
143 )
144)
145
146/*
147 * Tracepoint for waking up a task:
148 */
149#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
150 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
151 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
152 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
153 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
154 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
155 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
156 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
157LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
158
159 TP_PROTO(struct task_struct *p),
160
161 TP_ARGS(p),
162
163 TP_FIELDS(
164 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
165 ctf_integer(pid_t, tid, p->pid)
166 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
167 ctf_integer(int, target_cpu, task_cpu(p))
168 )
169)
170#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
171LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
172
173#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
174 TP_PROTO(struct task_struct *p, int success),
175
176 TP_ARGS(p, success),
177#else
178 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
179
180 TP_ARGS(rq, p, success),
181#endif
182
183 TP_FIELDS(
184 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
185 ctf_integer(pid_t, tid, p->pid)
186 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
187 ctf_integer(int, success, success)
188#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
189 ctf_integer(int, target_cpu, task_cpu(p))
190#endif
191 )
192)
193#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
194
195#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
196 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
197 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
198 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
199 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
200 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
201 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
202 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
203
204/*
205 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
206 * called from the waking context.
207 */
208LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_waking,
209 TP_PROTO(struct task_struct *p),
210 TP_ARGS(p))
211
212/*
213 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
214 * It it not always called from the waking context.
215 */
216LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
217 TP_PROTO(struct task_struct *p),
218 TP_ARGS(p))
219
220/*
221 * Tracepoint for waking up a new task:
222 */
223LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
224 TP_PROTO(struct task_struct *p),
225 TP_ARGS(p))
226
227#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
228
229LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
230 TP_PROTO(struct task_struct *p, int success),
231 TP_ARGS(p, success))
232
233/*
234 * Tracepoint for waking up a new task:
235 */
236LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
237 TP_PROTO(struct task_struct *p, int success),
238 TP_ARGS(p, success))
239
240#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
241
242LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
243 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
244 TP_ARGS(rq, p, success))
245
246/*
247 * Tracepoint for waking up a new task:
248 */
249LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
250 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
251 TP_ARGS(rq, p, success))
252
253#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
254
255/*
256 * Tracepoint for task switches, performed by the scheduler:
257 */
258LTTNG_TRACEPOINT_EVENT(sched_switch,
259
260#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
261 TP_PROTO(bool preempt,
262 struct task_struct *prev,
263 struct task_struct *next),
264
265 TP_ARGS(preempt, prev, next),
266#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
267 TP_PROTO(struct task_struct *prev,
268 struct task_struct *next),
269
270 TP_ARGS(prev, next),
271#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
272 TP_PROTO(struct rq *rq, struct task_struct *prev,
273 struct task_struct *next),
274
275 TP_ARGS(rq, prev, next),
276#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
277
278 TP_FIELDS(
279 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
280 ctf_integer(pid_t, prev_tid, prev->pid)
281 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
282#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
283 ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev))
284#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
285 ctf_integer(long, prev_state, __trace_sched_switch_state(prev))
286#else
287 ctf_integer(long, prev_state, prev->state)
288#endif
289 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
290 ctf_integer(pid_t, next_tid, next->pid)
291 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
292 )
293)
294
295/*
296 * Tracepoint for a task being migrated:
297 */
298LTTNG_TRACEPOINT_EVENT(sched_migrate_task,
299
300 TP_PROTO(struct task_struct *p, int dest_cpu),
301
302 TP_ARGS(p, dest_cpu),
303
304 TP_FIELDS(
305 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
306 ctf_integer(pid_t, tid, p->pid)
307 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
308 ctf_integer(int, orig_cpu, task_cpu(p))
309 ctf_integer(int, dest_cpu, dest_cpu)
310 )
311)
312
313LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template,
314
315 TP_PROTO(struct task_struct *p),
316
317 TP_ARGS(p),
318
319 TP_FIELDS(
320 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
321 ctf_integer(pid_t, tid, p->pid)
322 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
323 )
324)
325
326/*
327 * Tracepoint for freeing a task:
328 */
329LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free,
330 TP_PROTO(struct task_struct *p),
331 TP_ARGS(p))
332
333
334/*
335 * Tracepoint for a task exiting:
336 */
337LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit,
338 TP_PROTO(struct task_struct *p),
339 TP_ARGS(p))
340
341/*
342 * Tracepoint for waiting on task to unschedule:
343 */
344#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
345LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
346 TP_PROTO(struct task_struct *p),
347 TP_ARGS(p))
348#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
349LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
350 TP_PROTO(struct rq *rq, struct task_struct *p),
351 TP_ARGS(rq, p))
352#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
353
354/*
355 * Tracepoint for a waiting task:
356 */
357LTTNG_TRACEPOINT_EVENT(sched_process_wait,
358
359 TP_PROTO(struct pid *pid),
360
361 TP_ARGS(pid),
362
363 TP_FIELDS(
364 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
365 ctf_integer(pid_t, tid, pid_nr(pid))
366 ctf_integer(int, prio, current->prio - MAX_RT_PRIO)
367 )
368)
369
370/*
371 * Tracepoint for do_fork.
372 * Saving both TID and PID information, especially for the child, allows
373 * trace analyzers to distinguish between creation of a new process and
374 * creation of a new thread. Newly created processes will have child_tid
375 * == child_pid, while creation of a thread yields to child_tid !=
376 * child_pid.
377 */
378LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork,
379
380 TP_PROTO(struct task_struct *parent, struct task_struct *child),
381
382 TP_ARGS(parent, child),
383
384 TP_locvar(
385 pid_t vtids[LTTNG_MAX_PID_NS_LEVEL];
386 unsigned int ns_level;
387 ),
388
389 TP_code_pre(
390 if (child) {
391 struct pid *child_pid;
392 unsigned int i;
393
394 child_pid = task_pid(child);
395 tp_locvar->ns_level =
396 min_t(unsigned int, child_pid->level + 1,
397 LTTNG_MAX_PID_NS_LEVEL);
398 for (i = 0; i < tp_locvar->ns_level; i++)
399 tp_locvar->vtids[i] = child_pid->numbers[i].nr;
400 }
401 ),
402
403 TP_FIELDS(
404 ctf_array_text(char, parent_comm, parent->comm, TASK_COMM_LEN)
405 ctf_integer(pid_t, parent_tid, parent->pid)
406 ctf_integer(pid_t, parent_pid, parent->tgid)
407#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
408 ctf_integer(unsigned int, parent_ns_inum,
409 ({
410 unsigned int parent_ns_inum = 0;
411
412 if (parent) {
413 struct pid_namespace *pid_ns;
414
415 pid_ns = task_active_pid_ns(parent);
416 if (pid_ns)
417 parent_ns_inum =
418 pid_ns->lttng_proc_inum;
419 }
420 parent_ns_inum;
421 }))
422#endif
423 ctf_array_text(char, child_comm, child->comm, TASK_COMM_LEN)
424 ctf_integer(pid_t, child_tid, child->pid)
425 ctf_sequence(pid_t, vtids, tp_locvar->vtids, u8, tp_locvar->ns_level)
426 ctf_integer(pid_t, child_pid, child->tgid)
427#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
428 ctf_integer(unsigned int, child_ns_inum,
429 ({
430 unsigned int child_ns_inum = 0;
431
432 if (child) {
433 struct pid_namespace *pid_ns;
434
435 pid_ns = task_active_pid_ns(child);
436 if (pid_ns)
437 child_ns_inum =
438 pid_ns->lttng_proc_inum;
439 }
440 child_ns_inum;
441 }))
442#endif
443 ),
444
445 TP_code_post()
446)
447
448#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
449/*
450 * Tracepoint for sending a signal:
451 */
452LTTNG_TRACEPOINT_EVENT(sched_signal_send,
453
454 TP_PROTO(int sig, struct task_struct *p),
455
456 TP_ARGS(sig, p),
457
458 TP_FIELDS(
459 ctf_integer(int, sig, sig)
460 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
461 ctf_integer(pid_t, tid, p->pid)
462 )
463)
464#endif
465
466#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
467/*
468 * Tracepoint for exec:
469 */
470LTTNG_TRACEPOINT_EVENT(sched_process_exec,
471
472 TP_PROTO(struct task_struct *p, pid_t old_pid,
473 struct linux_binprm *bprm),
474
475 TP_ARGS(p, old_pid, bprm),
476
477 TP_FIELDS(
478 ctf_string(filename, bprm->filename)
479 ctf_integer(pid_t, tid, p->pid)
480 ctf_integer(pid_t, old_tid, old_pid)
481 )
482)
483#endif
484
485#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
486/*
487 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
488 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
489 */
490LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template,
491
492 TP_PROTO(struct task_struct *tsk, u64 delay),
493
494 TP_ARGS(tsk, delay),
495
496 TP_FIELDS(
497 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
498 ctf_integer(pid_t, tid, tsk->pid)
499 ctf_integer(u64, delay, delay)
500 )
501)
502
503
504/*
505 * Tracepoint for accounting wait time (time the task is runnable
506 * but not actually running due to scheduler contention).
507 */
508LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait,
509 TP_PROTO(struct task_struct *tsk, u64 delay),
510 TP_ARGS(tsk, delay))
511
512/*
513 * Tracepoint for accounting sleep time (time the task is not runnable,
514 * including iowait, see below).
515 */
516LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep,
517 TP_PROTO(struct task_struct *tsk, u64 delay),
518 TP_ARGS(tsk, delay))
519
520/*
521 * Tracepoint for accounting iowait time (time the task is not runnable
522 * due to waiting on IO to complete).
523 */
524LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait,
525 TP_PROTO(struct task_struct *tsk, u64 delay),
526 TP_ARGS(tsk, delay))
527
528#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
529/*
530 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
531 */
532LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked,
533 TP_PROTO(struct task_struct *tsk, u64 delay),
534 TP_ARGS(tsk, delay))
535#endif
536
537/*
538 * Tracepoint for accounting runtime (time the task is executing
539 * on a CPU).
540 */
541LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
542
543 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
544
545 TP_ARGS(tsk, runtime, vruntime),
546
547 TP_FIELDS(
548 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
549 ctf_integer(pid_t, tid, tsk->pid)
550 ctf_integer(u64, runtime, runtime)
551 ctf_integer(u64, vruntime, vruntime)
552 )
553)
554#endif
555
556#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0) || \
557 LTTNG_RT_KERNEL_RANGE(4,9,27,18, 4,10,0,0) || \
558 LTTNG_RT_KERNEL_RANGE(4,11,5,1, 4,12,0,0))
559/*
560 * Tracepoint for showing priority inheritance modifying a tasks
561 * priority.
562 */
563LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
564
565 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
566
567 TP_ARGS(tsk, pi_task),
568
569 TP_FIELDS(
570 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
571 ctf_integer(pid_t, tid, tsk->pid)
572 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
573 ctf_integer(int, newprio, pi_task ? pi_task->prio - MAX_RT_PRIO : tsk->prio - MAX_RT_PRIO)
574 )
575)
576#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
577/*
578 * Tracepoint for showing priority inheritance modifying a tasks
579 * priority.
580 */
581LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
582
583 TP_PROTO(struct task_struct *tsk, int newprio),
584
585 TP_ARGS(tsk, newprio),
586
587 TP_FIELDS(
588 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
589 ctf_integer(pid_t, tid, tsk->pid)
590 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
591 ctf_integer(int, newprio, newprio - MAX_RT_PRIO)
592 )
593)
594#endif
595
596#endif /* LTTNG_TRACE_SCHED_H */
597
598/* This part must be outside protection */
599#include <probes/define_trace.h>
This page took 0.024723 seconds and 4 git commands to generate.