f10f94d4d8bfacf61b10f455f7f863e80a541e51
[lttng-modules.git] / include / instrumentation / events / sched.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM sched
4
5 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define LTTNG_TRACE_SCHED_H
7
8 #include <lttng/tracepoint-event.h>
9 #include <linux/sched.h>
10 #include <linux/pid_namespace.h>
11 #include <linux/binfmts.h>
12 #include <lttng/kernel-version.h>
13 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0))
14 #include <linux/sched/rt.h>
15 #endif
16
17 #define LTTNG_MAX_PID_NS_LEVEL 32
18
19 #ifndef _TRACE_SCHED_DEF_
20 #define _TRACE_SCHED_DEF_
21
22 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0))
23
24 static inline long __trace_sched_switch_state(bool preempt,
25 unsigned int prev_state,
26 struct task_struct *p)
27 {
28 unsigned int state;
29
30 #ifdef CONFIG_SCHED_DEBUG
31 BUG_ON(p != current);
32 #endif /* CONFIG_SCHED_DEBUG */
33
34 /*
35 * Preemption ignores task state, therefore preempted tasks are always
36 * RUNNING (we will not have dequeued if state != RUNNING).
37 */
38 if (preempt)
39 return TASK_REPORT_MAX;
40
41 /*
42 * task_state_index() uses fls() and returns a value from 0-8 range.
43 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
44 * it for left shift operation to get the correct task->state
45 * mapping.
46 */
47 state = __task_state_index(prev_state, p->exit_state);
48
49 return state ? (1 << (state - 1)) : state;
50 }
51
52 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,15,0))
53
54 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
55 {
56 unsigned int state;
57
58 #ifdef CONFIG_SCHED_DEBUG
59 BUG_ON(p != current);
60 #endif /* CONFIG_SCHED_DEBUG */
61
62 /*
63 * Preemption ignores task state, therefore preempted tasks are always
64 * RUNNING (we will not have dequeued if state != RUNNING).
65 */
66 if (preempt)
67 return TASK_REPORT_MAX;
68
69 /*
70 * task_state_index() uses fls() and returns a value from 0-8 range.
71 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
72 * it for left shift operation to get the correct task->state
73 * mapping.
74 */
75 state = task_state_index(p);
76
77 return state ? (1 << (state - 1)) : state;
78 }
79
80 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,14,0))
81
82 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
83 {
84 unsigned int state;
85
86 #ifdef CONFIG_SCHED_DEBUG
87 BUG_ON(p != current);
88 #endif /* CONFIG_SCHED_DEBUG */
89
90 /*
91 * Preemption ignores task state, therefore preempted tasks are always
92 * RUNNING (we will not have dequeued if state != RUNNING).
93 */
94 if (preempt)
95 return TASK_REPORT_MAX;
96
97 /*
98 * __get_task_state() uses fls() and returns a value from 0-8 range.
99 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
100 * it for left shift operation to get the correct task->state
101 * mapping.
102 */
103 state = __get_task_state(p);
104
105 return state ? (1 << (state - 1)) : state;
106 }
107
108 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0))
109
110 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
111 {
112 #ifdef CONFIG_SCHED_DEBUG
113 BUG_ON(p != current);
114 #endif /* CONFIG_SCHED_DEBUG */
115 /*
116 * Preemption ignores task state, therefore preempted tasks are always RUNNING
117 * (we will not have dequeued if state != RUNNING).
118 */
119 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
120 }
121
122 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,19,0))
123
124 static inline long __trace_sched_switch_state(struct task_struct *p)
125 {
126 long state = p->state;
127
128 #ifdef CONFIG_PREEMPT
129 #ifdef CONFIG_SCHED_DEBUG
130 BUG_ON(p != current);
131 #endif /* CONFIG_SCHED_DEBUG */
132 /*
133 * For all intents and purposes a preempted task is a running task.
134 */
135 if (preempt_count() & PREEMPT_ACTIVE)
136 state = TASK_RUNNING | TASK_STATE_MAX;
137 #endif /* CONFIG_PREEMPT */
138
139 return state;
140 }
141
142 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,13,0))
143
144 static inline long __trace_sched_switch_state(struct task_struct *p)
145 {
146 long state = p->state;
147
148 #ifdef CONFIG_PREEMPT
149 /*
150 * For all intents and purposes a preempted task is a running task.
151 */
152 if (task_preempt_count(p) & PREEMPT_ACTIVE)
153 state = TASK_RUNNING | TASK_STATE_MAX;
154 #endif
155
156 return state;
157 }
158
159 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,2,0))
160
161 static inline long __trace_sched_switch_state(struct task_struct *p)
162 {
163 long state = p->state;
164
165 #ifdef CONFIG_PREEMPT
166 /*
167 * For all intents and purposes a preempted task is a running task.
168 */
169 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
170 state = TASK_RUNNING | TASK_STATE_MAX;
171 #endif
172
173 return state;
174 }
175
176 #else
177
178 static inline long __trace_sched_switch_state(struct task_struct *p)
179 {
180 long state = p->state;
181
182 #ifdef CONFIG_PREEMPT
183 /*
184 * For all intents and purposes a preempted task is a running task.
185 */
186 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
187 state = TASK_RUNNING;
188 #endif
189
190 return state;
191 }
192
193 #endif
194
195 #endif /* _TRACE_SCHED_DEF_ */
196
197 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
198 /*
199 * Enumeration of the task state bitmask.
200 * Only bit flags are enumerated here, not composition of states.
201 */
202 LTTNG_TRACEPOINT_ENUM(task_state,
203 TP_ENUM_VALUES(
204 ctf_enum_value("TASK_RUNNING", TASK_RUNNING)
205 ctf_enum_value("TASK_INTERRUPTIBLE", TASK_INTERRUPTIBLE)
206 ctf_enum_value("TASK_UNINTERRUPTIBLE", TASK_UNINTERRUPTIBLE)
207 ctf_enum_value("TASK_STOPPED", __TASK_STOPPED)
208 ctf_enum_value("TASK_TRACED", __TASK_TRACED)
209 ctf_enum_value("EXIT_DEAD", EXIT_DEAD)
210 ctf_enum_value("EXIT_ZOMBIE", EXIT_ZOMBIE)
211
212 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0))
213 ctf_enum_value("TASK_PARKED", TASK_PARKED)
214 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0)) */
215
216 ctf_enum_value("TASK_DEAD", TASK_DEAD)
217 ctf_enum_value("TASK_WAKEKILL", TASK_WAKEKILL)
218 ctf_enum_value("TASK_WAKING", TASK_WAKING)
219
220 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0))
221 ctf_enum_value("TASK_NOLOAD", TASK_NOLOAD)
222 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0)) */
223
224 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,8,0))
225 ctf_enum_value("TASK_NEW", TASK_NEW)
226 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,8,0)) */
227
228 ctf_enum_value("TASK_STATE_MAX", TASK_STATE_MAX)
229 )
230 )
231 #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
232
233 /*
234 * Tracepoint for calling kthread_stop, performed to end a kthread:
235 */
236 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop,
237
238 TP_PROTO(struct task_struct *t),
239
240 TP_ARGS(t),
241
242 TP_FIELDS(
243 ctf_array_text(char, comm, t->comm, TASK_COMM_LEN)
244 ctf_integer(pid_t, tid, t->pid)
245 )
246 )
247
248 /*
249 * Tracepoint for the return value of the kthread stopping:
250 */
251 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
252
253 TP_PROTO(int ret),
254
255 TP_ARGS(ret),
256
257 TP_FIELDS(
258 ctf_integer(int, ret, ret)
259 )
260 )
261
262 /*
263 * Tracepoint for waking up a task:
264 */
265 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0) || \
266 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
267 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
268 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
269 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
270 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
271 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
272 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
273 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
274
275 TP_PROTO(struct task_struct *p),
276
277 TP_ARGS(p),
278
279 TP_FIELDS(
280 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
281 ctf_integer(pid_t, tid, p->pid)
282 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
283 ctf_integer(int, target_cpu, task_cpu(p))
284 )
285 )
286 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */
287 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
288
289 TP_PROTO(struct task_struct *p, int success),
290
291 TP_ARGS(p, success),
292
293 TP_FIELDS(
294 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
295 ctf_integer(pid_t, tid, p->pid)
296 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
297 ctf_integer(int, success, success)
298 ctf_integer(int, target_cpu, task_cpu(p))
299 )
300 )
301 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */
302
303 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0) || \
304 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
305 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
306 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
307 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
308 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
309 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
310 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
311
312 /*
313 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
314 * called from the waking context.
315 */
316 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_waking,
317 TP_PROTO(struct task_struct *p),
318 TP_ARGS(p))
319
320 /*
321 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
322 * It it not always called from the waking context.
323 */
324 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
325 TP_PROTO(struct task_struct *p),
326 TP_ARGS(p))
327
328 /*
329 * Tracepoint for waking up a new task:
330 */
331 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
332 TP_PROTO(struct task_struct *p),
333 TP_ARGS(p))
334
335 #else
336
337 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
338 TP_PROTO(struct task_struct *p, int success),
339 TP_ARGS(p, success))
340
341 /*
342 * Tracepoint for waking up a new task:
343 */
344 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
345 TP_PROTO(struct task_struct *p, int success),
346 TP_ARGS(p, success))
347
348 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */
349
350 /*
351 * Tracepoint for task switches, performed by the scheduler:
352 */
353
354 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0))
355 LTTNG_TRACEPOINT_EVENT(sched_switch,
356
357 TP_PROTO(bool preempt,
358 struct task_struct *prev,
359 struct task_struct *next,
360 unsigned int prev_state),
361
362 TP_ARGS(preempt, prev, next, prev_state),
363
364 TP_FIELDS(
365 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
366 ctf_integer(pid_t, prev_tid, prev->pid)
367 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
368 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
369 ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(preempt, prev_state, prev))
370 #else
371 ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev_state, prev))
372 #endif
373 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
374 ctf_integer(pid_t, next_tid, next->pid)
375 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
376 )
377 )
378
379 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0))
380
381 LTTNG_TRACEPOINT_EVENT(sched_switch,
382
383 TP_PROTO(bool preempt,
384 struct task_struct *prev,
385 struct task_struct *next),
386
387 TP_ARGS(preempt, prev, next),
388
389 TP_FIELDS(
390 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
391 ctf_integer(pid_t, prev_tid, prev->pid)
392 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
393 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
394 ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(preempt, prev))
395 #else
396 ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev))
397 #endif
398 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
399 ctf_integer(pid_t, next_tid, next->pid)
400 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
401 )
402 )
403
404 #else
405
406 LTTNG_TRACEPOINT_EVENT(sched_switch,
407
408 TP_PROTO(struct task_struct *prev,
409 struct task_struct *next),
410
411 TP_ARGS(prev, next),
412
413 TP_FIELDS(
414 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
415 ctf_integer(pid_t, prev_tid, prev->pid)
416 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
417 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
418 ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(prev))
419 #else
420 ctf_integer(long, prev_state, __trace_sched_switch_state(prev))
421 #endif
422 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
423 ctf_integer(pid_t, next_tid, next->pid)
424 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
425 )
426 )
427 #endif
428
429 /*
430 * Tracepoint for a task being migrated:
431 */
432 LTTNG_TRACEPOINT_EVENT(sched_migrate_task,
433
434 TP_PROTO(struct task_struct *p, int dest_cpu),
435
436 TP_ARGS(p, dest_cpu),
437
438 TP_FIELDS(
439 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
440 ctf_integer(pid_t, tid, p->pid)
441 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
442 ctf_integer(int, orig_cpu, task_cpu(p))
443 ctf_integer(int, dest_cpu, dest_cpu)
444 )
445 )
446
447 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template,
448
449 TP_PROTO(struct task_struct *p),
450
451 TP_ARGS(p),
452
453 TP_FIELDS(
454 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
455 ctf_integer(pid_t, tid, p->pid)
456 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
457 )
458 )
459
460 /*
461 * Tracepoint for freeing a task:
462 */
463 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free,
464 TP_PROTO(struct task_struct *p),
465 TP_ARGS(p))
466
467
468 /*
469 * Tracepoint for a task exiting:
470 */
471 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit,
472 TP_PROTO(struct task_struct *p),
473 TP_ARGS(p))
474
475 /*
476 * Tracepoint for waiting on task to unschedule:
477 */
478 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
479 TP_PROTO(struct task_struct *p),
480 TP_ARGS(p))
481
482 /*
483 * Tracepoint for a waiting task:
484 */
485 LTTNG_TRACEPOINT_EVENT(sched_process_wait,
486
487 TP_PROTO(struct pid *pid),
488
489 TP_ARGS(pid),
490
491 TP_FIELDS(
492 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
493 ctf_integer(pid_t, tid, pid_nr(pid))
494 ctf_integer(int, prio, current->prio - MAX_RT_PRIO)
495 )
496 )
497
498 /*
499 * Tracepoint for do_fork.
500 * Saving both TID and PID information, especially for the child, allows
501 * trace analyzers to distinguish between creation of a new process and
502 * creation of a new thread. Newly created processes will have child_tid
503 * == child_pid, while creation of a thread yields to child_tid !=
504 * child_pid.
505 */
506 LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork,
507
508 TP_PROTO(struct task_struct *parent, struct task_struct *child),
509
510 TP_ARGS(parent, child),
511
512 TP_locvar(
513 pid_t vtids[LTTNG_MAX_PID_NS_LEVEL];
514 unsigned int ns_level;
515 ),
516
517 TP_code_pre(
518 if (child) {
519 struct pid *child_pid;
520 unsigned int i;
521
522 child_pid = task_pid(child);
523 tp_locvar->ns_level =
524 min_t(unsigned int, child_pid->level + 1,
525 LTTNG_MAX_PID_NS_LEVEL);
526 for (i = 0; i < tp_locvar->ns_level; i++)
527 tp_locvar->vtids[i] = child_pid->numbers[i].nr;
528 }
529 ),
530
531 TP_FIELDS(
532 ctf_array_text(char, parent_comm, parent->comm, TASK_COMM_LEN)
533 ctf_integer(pid_t, parent_tid, parent->pid)
534 ctf_integer(pid_t, parent_pid, parent->tgid)
535 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0))
536 ctf_integer(unsigned int, parent_ns_inum,
537 ({
538 unsigned int parent_ns_inum = 0;
539
540 if (parent) {
541 struct pid_namespace *pid_ns;
542
543 pid_ns = task_active_pid_ns(parent);
544 if (pid_ns)
545 parent_ns_inum =
546 pid_ns->ns.inum;
547 }
548 parent_ns_inum;
549 }))
550 #endif
551 ctf_array_text(char, child_comm, child->comm, TASK_COMM_LEN)
552 ctf_integer(pid_t, child_tid, child->pid)
553 ctf_sequence(pid_t, vtids, tp_locvar->vtids, u8, tp_locvar->ns_level)
554 ctf_integer(pid_t, child_pid, child->tgid)
555 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0))
556 ctf_integer(unsigned int, child_ns_inum,
557 ({
558 unsigned int child_ns_inum = 0;
559
560 if (child) {
561 struct pid_namespace *pid_ns;
562
563 pid_ns = task_active_pid_ns(child);
564 if (pid_ns)
565 child_ns_inum =
566 pid_ns->ns.inum;
567 }
568 child_ns_inum;
569 }))
570 #endif
571 ),
572
573 TP_code_post()
574 )
575
576 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
577 /*
578 * Tracepoint for exec:
579 */
580 LTTNG_TRACEPOINT_EVENT(sched_process_exec,
581
582 TP_PROTO(struct task_struct *p, pid_t old_pid,
583 struct linux_binprm *bprm),
584
585 TP_ARGS(p, old_pid, bprm),
586
587 TP_FIELDS(
588 ctf_string(filename, bprm->filename)
589 ctf_integer(pid_t, tid, p->pid)
590 ctf_integer(pid_t, old_tid, old_pid)
591 )
592 )
593 #endif
594
595 /*
596 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
597 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
598 */
599 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template,
600
601 TP_PROTO(struct task_struct *tsk, u64 delay),
602
603 TP_ARGS(tsk, delay),
604
605 TP_FIELDS(
606 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
607 ctf_integer(pid_t, tid, tsk->pid)
608 ctf_integer(u64, delay, delay)
609 )
610 )
611
612
613 /*
614 * Tracepoint for accounting wait time (time the task is runnable
615 * but not actually running due to scheduler contention).
616 */
617 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait,
618 TP_PROTO(struct task_struct *tsk, u64 delay),
619 TP_ARGS(tsk, delay))
620
621 /*
622 * Tracepoint for accounting sleep time (time the task is not runnable,
623 * including iowait, see below).
624 */
625 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep,
626 TP_PROTO(struct task_struct *tsk, u64 delay),
627 TP_ARGS(tsk, delay))
628
629 /*
630 * Tracepoint for accounting iowait time (time the task is not runnable
631 * due to waiting on IO to complete).
632 */
633 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait,
634 TP_PROTO(struct task_struct *tsk, u64 delay),
635 TP_ARGS(tsk, delay))
636
637 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,3,0))
638 /*
639 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
640 */
641 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked,
642 TP_PROTO(struct task_struct *tsk, u64 delay),
643 TP_ARGS(tsk, delay))
644 #endif
645
646 /*
647 * Tracepoint for accounting runtime (time the task is executing
648 * on a CPU).
649 */
650 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
651
652 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
653
654 TP_ARGS(tsk, runtime, vruntime),
655
656 TP_FIELDS(
657 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
658 ctf_integer(pid_t, tid, tsk->pid)
659 ctf_integer(u64, runtime, runtime)
660 ctf_integer(u64, vruntime, vruntime)
661 )
662 )
663
664 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,12,0) || \
665 LTTNG_RT_KERNEL_RANGE(4,9,27,18, 4,10,0,0) || \
666 LTTNG_RT_KERNEL_RANGE(4,11,5,1, 4,12,0,0))
667 /*
668 * Tracepoint for showing priority inheritance modifying a tasks
669 * priority.
670 */
671 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
672
673 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
674
675 TP_ARGS(tsk, pi_task),
676
677 TP_FIELDS(
678 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
679 ctf_integer(pid_t, tid, tsk->pid)
680 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
681 ctf_integer(int, newprio, pi_task ? pi_task->prio - MAX_RT_PRIO : tsk->prio - MAX_RT_PRIO)
682 )
683 )
684 #else
685 /*
686 * Tracepoint for showing priority inheritance modifying a tasks
687 * priority.
688 */
689 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
690
691 TP_PROTO(struct task_struct *tsk, int newprio),
692
693 TP_ARGS(tsk, newprio),
694
695 TP_FIELDS(
696 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
697 ctf_integer(pid_t, tid, tsk->pid)
698 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
699 ctf_integer(int, newprio, newprio - MAX_RT_PRIO)
700 )
701 )
702 #endif
703
704 #endif /* LTTNG_TRACE_SCHED_H */
705
706 /* This part must be outside protection */
707 #include <lttng/define_trace.h>
This page took 0.042763 seconds and 3 git commands to generate.