Fix: update sched prev_state instrumentation for upstream kernel
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
... / ...
CommitLineData
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM sched
3
4#if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5#define LTTNG_TRACE_SCHED_H
6
7#include <probes/lttng-tracepoint-event.h>
8#include <linux/sched.h>
9#include <linux/pid_namespace.h>
10#include <linux/binfmts.h>
11#include <linux/version.h>
12#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
13#include <linux/sched/rt.h>
14#endif
15
16#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
17#define lttng_proc_inum ns.inum
18#else
19#define lttng_proc_inum proc_inum
20#endif
21
22#define LTTNG_MAX_PID_NS_LEVEL 32
23
24#ifndef _TRACE_SCHED_DEF_
25#define _TRACE_SCHED_DEF_
26
27#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0))
28
29static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
30{
31 unsigned int state;
32
33#ifdef CONFIG_SCHED_DEBUG
34 BUG_ON(p != current);
35#endif /* CONFIG_SCHED_DEBUG */
36
37 /*
38 * Preemption ignores task state, therefore preempted tasks are always
39 * RUNNING (we will not have dequeued if state != RUNNING).
40 */
41 if (preempt)
42 return TASK_REPORT_MAX;
43
44 /*
45 * task_state_index() uses fls() and returns a value from 0-8 range.
46 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
47 * it for left shift operation to get the correct task->state
48 * mapping.
49 */
50 state = task_state_index(p);
51
52 return state ? (1 << (state - 1)) : state;
53}
54
55#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
56
57static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
58{
59 unsigned int state;
60
61#ifdef CONFIG_SCHED_DEBUG
62 BUG_ON(p != current);
63#endif /* CONFIG_SCHED_DEBUG */
64
65 /*
66 * Preemption ignores task state, therefore preempted tasks are always
67 * RUNNING (we will not have dequeued if state != RUNNING).
68 */
69 if (preempt)
70 return TASK_REPORT_MAX;
71
72 /*
73 * __get_task_state() uses fls() and returns a value from 0-8 range.
74 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
75 * it for left shift operation to get the correct task->state
76 * mapping.
77 */
78 state = __get_task_state(p);
79
80 return state ? (1 << (state - 1)) : state;
81}
82
83#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
84
85static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
86{
87#ifdef CONFIG_SCHED_DEBUG
88 BUG_ON(p != current);
89#endif /* CONFIG_SCHED_DEBUG */
90 /*
91 * Preemption ignores task state, therefore preempted tasks are always RUNNING
92 * (we will not have dequeued if state != RUNNING).
93 */
94 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
95}
96
97#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
98
99static inline long __trace_sched_switch_state(struct task_struct *p)
100{
101 long state = p->state;
102
103#ifdef CONFIG_PREEMPT
104#ifdef CONFIG_SCHED_DEBUG
105 BUG_ON(p != current);
106#endif /* CONFIG_SCHED_DEBUG */
107 /*
108 * For all intents and purposes a preempted task is a running task.
109 */
110 if (preempt_count() & PREEMPT_ACTIVE)
111 state = TASK_RUNNING | TASK_STATE_MAX;
112#endif /* CONFIG_PREEMPT */
113
114 return state;
115}
116
117#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
118
119static inline long __trace_sched_switch_state(struct task_struct *p)
120{
121 long state = p->state;
122
123#ifdef CONFIG_PREEMPT
124 /*
125 * For all intents and purposes a preempted task is a running task.
126 */
127 if (task_preempt_count(p) & PREEMPT_ACTIVE)
128 state = TASK_RUNNING | TASK_STATE_MAX;
129#endif
130
131 return state;
132}
133
134#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
135
136static inline long __trace_sched_switch_state(struct task_struct *p)
137{
138 long state = p->state;
139
140#ifdef CONFIG_PREEMPT
141 /*
142 * For all intents and purposes a preempted task is a running task.
143 */
144 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
145 state = TASK_RUNNING | TASK_STATE_MAX;
146#endif
147
148 return state;
149}
150
151#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
152
153static inline long __trace_sched_switch_state(struct task_struct *p)
154{
155 long state = p->state;
156
157#ifdef CONFIG_PREEMPT
158 /*
159 * For all intents and purposes a preempted task is a running task.
160 */
161 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
162 state = TASK_RUNNING;
163#endif
164
165 return state;
166}
167
168#endif
169
170#endif /* _TRACE_SCHED_DEF_ */
171
172/*
173 * Tracepoint for calling kthread_stop, performed to end a kthread:
174 */
175LTTNG_TRACEPOINT_EVENT(sched_kthread_stop,
176
177 TP_PROTO(struct task_struct *t),
178
179 TP_ARGS(t),
180
181 TP_FIELDS(
182 ctf_array_text(char, comm, t->comm, TASK_COMM_LEN)
183 ctf_integer(pid_t, tid, t->pid)
184 )
185)
186
187/*
188 * Tracepoint for the return value of the kthread stopping:
189 */
190LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
191
192 TP_PROTO(int ret),
193
194 TP_ARGS(ret),
195
196 TP_FIELDS(
197 ctf_integer(int, ret, ret)
198 )
199)
200
201/*
202 * Tracepoint for waking up a task:
203 */
204#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
205 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
206 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
207 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
208 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
209 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
210 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
211 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
212LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
213
214 TP_PROTO(struct task_struct *p),
215
216 TP_ARGS(p),
217
218 TP_FIELDS(
219 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
220 ctf_integer(pid_t, tid, p->pid)
221 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
222 ctf_integer(int, target_cpu, task_cpu(p))
223 )
224)
225#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
226LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
227
228#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
229 TP_PROTO(struct task_struct *p, int success),
230
231 TP_ARGS(p, success),
232#else
233 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
234
235 TP_ARGS(rq, p, success),
236#endif
237
238 TP_FIELDS(
239 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
240 ctf_integer(pid_t, tid, p->pid)
241 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
242 ctf_integer(int, success, success)
243#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
244 ctf_integer(int, target_cpu, task_cpu(p))
245#endif
246 )
247)
248#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
249
250#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
251 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
252 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
253 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
254 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
255 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
256 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
257 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
258
259/*
260 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
261 * called from the waking context.
262 */
263LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_waking,
264 TP_PROTO(struct task_struct *p),
265 TP_ARGS(p))
266
267/*
268 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
269 * It it not always called from the waking context.
270 */
271LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
272 TP_PROTO(struct task_struct *p),
273 TP_ARGS(p))
274
275/*
276 * Tracepoint for waking up a new task:
277 */
278LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
279 TP_PROTO(struct task_struct *p),
280 TP_ARGS(p))
281
282#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
283
284LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
285 TP_PROTO(struct task_struct *p, int success),
286 TP_ARGS(p, success))
287
288/*
289 * Tracepoint for waking up a new task:
290 */
291LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
292 TP_PROTO(struct task_struct *p, int success),
293 TP_ARGS(p, success))
294
295#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
296
297LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
298 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
299 TP_ARGS(rq, p, success))
300
301/*
302 * Tracepoint for waking up a new task:
303 */
304LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
305 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
306 TP_ARGS(rq, p, success))
307
308#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
309
310/*
311 * Tracepoint for task switches, performed by the scheduler:
312 */
313LTTNG_TRACEPOINT_EVENT(sched_switch,
314
315#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
316 TP_PROTO(bool preempt,
317 struct task_struct *prev,
318 struct task_struct *next),
319
320 TP_ARGS(preempt, prev, next),
321#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
322 TP_PROTO(struct task_struct *prev,
323 struct task_struct *next),
324
325 TP_ARGS(prev, next),
326#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
327 TP_PROTO(struct rq *rq, struct task_struct *prev,
328 struct task_struct *next),
329
330 TP_ARGS(rq, prev, next),
331#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
332
333 TP_FIELDS(
334 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
335 ctf_integer(pid_t, prev_tid, prev->pid)
336 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
337#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
338 ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev))
339#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
340 ctf_integer(long, prev_state, __trace_sched_switch_state(prev))
341#else
342 ctf_integer(long, prev_state, prev->state)
343#endif
344 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
345 ctf_integer(pid_t, next_tid, next->pid)
346 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
347 )
348)
349
350/*
351 * Tracepoint for a task being migrated:
352 */
353LTTNG_TRACEPOINT_EVENT(sched_migrate_task,
354
355 TP_PROTO(struct task_struct *p, int dest_cpu),
356
357 TP_ARGS(p, dest_cpu),
358
359 TP_FIELDS(
360 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
361 ctf_integer(pid_t, tid, p->pid)
362 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
363 ctf_integer(int, orig_cpu, task_cpu(p))
364 ctf_integer(int, dest_cpu, dest_cpu)
365 )
366)
367
368LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template,
369
370 TP_PROTO(struct task_struct *p),
371
372 TP_ARGS(p),
373
374 TP_FIELDS(
375 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
376 ctf_integer(pid_t, tid, p->pid)
377 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
378 )
379)
380
381/*
382 * Tracepoint for freeing a task:
383 */
384LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free,
385 TP_PROTO(struct task_struct *p),
386 TP_ARGS(p))
387
388
389/*
390 * Tracepoint for a task exiting:
391 */
392LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit,
393 TP_PROTO(struct task_struct *p),
394 TP_ARGS(p))
395
396/*
397 * Tracepoint for waiting on task to unschedule:
398 */
399#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
400LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
401 TP_PROTO(struct task_struct *p),
402 TP_ARGS(p))
403#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
404LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
405 TP_PROTO(struct rq *rq, struct task_struct *p),
406 TP_ARGS(rq, p))
407#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
408
409/*
410 * Tracepoint for a waiting task:
411 */
412LTTNG_TRACEPOINT_EVENT(sched_process_wait,
413
414 TP_PROTO(struct pid *pid),
415
416 TP_ARGS(pid),
417
418 TP_FIELDS(
419 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
420 ctf_integer(pid_t, tid, pid_nr(pid))
421 ctf_integer(int, prio, current->prio - MAX_RT_PRIO)
422 )
423)
424
425/*
426 * Tracepoint for do_fork.
427 * Saving both TID and PID information, especially for the child, allows
428 * trace analyzers to distinguish between creation of a new process and
429 * creation of a new thread. Newly created processes will have child_tid
430 * == child_pid, while creation of a thread yields to child_tid !=
431 * child_pid.
432 */
433LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork,
434
435 TP_PROTO(struct task_struct *parent, struct task_struct *child),
436
437 TP_ARGS(parent, child),
438
439 TP_locvar(
440 pid_t vtids[LTTNG_MAX_PID_NS_LEVEL];
441 unsigned int ns_level;
442 ),
443
444 TP_code_pre(
445 if (child) {
446 struct pid *child_pid;
447 unsigned int i;
448
449 child_pid = task_pid(child);
450 tp_locvar->ns_level =
451 min_t(unsigned int, child_pid->level + 1,
452 LTTNG_MAX_PID_NS_LEVEL);
453 for (i = 0; i < tp_locvar->ns_level; i++)
454 tp_locvar->vtids[i] = child_pid->numbers[i].nr;
455 }
456 ),
457
458 TP_FIELDS(
459 ctf_array_text(char, parent_comm, parent->comm, TASK_COMM_LEN)
460 ctf_integer(pid_t, parent_tid, parent->pid)
461 ctf_integer(pid_t, parent_pid, parent->tgid)
462#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
463 ctf_integer(unsigned int, parent_ns_inum,
464 ({
465 unsigned int parent_ns_inum = 0;
466
467 if (parent) {
468 struct pid_namespace *pid_ns;
469
470 pid_ns = task_active_pid_ns(parent);
471 if (pid_ns)
472 parent_ns_inum =
473 pid_ns->lttng_proc_inum;
474 }
475 parent_ns_inum;
476 }))
477#endif
478 ctf_array_text(char, child_comm, child->comm, TASK_COMM_LEN)
479 ctf_integer(pid_t, child_tid, child->pid)
480 ctf_sequence(pid_t, vtids, tp_locvar->vtids, u8, tp_locvar->ns_level)
481 ctf_integer(pid_t, child_pid, child->tgid)
482#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
483 ctf_integer(unsigned int, child_ns_inum,
484 ({
485 unsigned int child_ns_inum = 0;
486
487 if (child) {
488 struct pid_namespace *pid_ns;
489
490 pid_ns = task_active_pid_ns(child);
491 if (pid_ns)
492 child_ns_inum =
493 pid_ns->lttng_proc_inum;
494 }
495 child_ns_inum;
496 }))
497#endif
498 ),
499
500 TP_code_post()
501)
502
503#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
504/*
505 * Tracepoint for sending a signal:
506 */
507LTTNG_TRACEPOINT_EVENT(sched_signal_send,
508
509 TP_PROTO(int sig, struct task_struct *p),
510
511 TP_ARGS(sig, p),
512
513 TP_FIELDS(
514 ctf_integer(int, sig, sig)
515 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
516 ctf_integer(pid_t, tid, p->pid)
517 )
518)
519#endif
520
521#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
522/*
523 * Tracepoint for exec:
524 */
525LTTNG_TRACEPOINT_EVENT(sched_process_exec,
526
527 TP_PROTO(struct task_struct *p, pid_t old_pid,
528 struct linux_binprm *bprm),
529
530 TP_ARGS(p, old_pid, bprm),
531
532 TP_FIELDS(
533 ctf_string(filename, bprm->filename)
534 ctf_integer(pid_t, tid, p->pid)
535 ctf_integer(pid_t, old_tid, old_pid)
536 )
537)
538#endif
539
540#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
541/*
542 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
543 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
544 */
545LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template,
546
547 TP_PROTO(struct task_struct *tsk, u64 delay),
548
549 TP_ARGS(tsk, delay),
550
551 TP_FIELDS(
552 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
553 ctf_integer(pid_t, tid, tsk->pid)
554 ctf_integer(u64, delay, delay)
555 )
556)
557
558
559/*
560 * Tracepoint for accounting wait time (time the task is runnable
561 * but not actually running due to scheduler contention).
562 */
563LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait,
564 TP_PROTO(struct task_struct *tsk, u64 delay),
565 TP_ARGS(tsk, delay))
566
567/*
568 * Tracepoint for accounting sleep time (time the task is not runnable,
569 * including iowait, see below).
570 */
571LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep,
572 TP_PROTO(struct task_struct *tsk, u64 delay),
573 TP_ARGS(tsk, delay))
574
575/*
576 * Tracepoint for accounting iowait time (time the task is not runnable
577 * due to waiting on IO to complete).
578 */
579LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait,
580 TP_PROTO(struct task_struct *tsk, u64 delay),
581 TP_ARGS(tsk, delay))
582
583#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
584/*
585 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
586 */
587LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked,
588 TP_PROTO(struct task_struct *tsk, u64 delay),
589 TP_ARGS(tsk, delay))
590#endif
591
592/*
593 * Tracepoint for accounting runtime (time the task is executing
594 * on a CPU).
595 */
596LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
597
598 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
599
600 TP_ARGS(tsk, runtime, vruntime),
601
602 TP_FIELDS(
603 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
604 ctf_integer(pid_t, tid, tsk->pid)
605 ctf_integer(u64, runtime, runtime)
606 ctf_integer(u64, vruntime, vruntime)
607 )
608)
609#endif
610
611#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0) || \
612 LTTNG_RT_KERNEL_RANGE(4,9,27,18, 4,10,0,0) || \
613 LTTNG_RT_KERNEL_RANGE(4,11,5,1, 4,12,0,0))
614/*
615 * Tracepoint for showing priority inheritance modifying a tasks
616 * priority.
617 */
618LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
619
620 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
621
622 TP_ARGS(tsk, pi_task),
623
624 TP_FIELDS(
625 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
626 ctf_integer(pid_t, tid, tsk->pid)
627 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
628 ctf_integer(int, newprio, pi_task ? pi_task->prio - MAX_RT_PRIO : tsk->prio - MAX_RT_PRIO)
629 )
630)
631#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
632/*
633 * Tracepoint for showing priority inheritance modifying a tasks
634 * priority.
635 */
636LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
637
638 TP_PROTO(struct task_struct *tsk, int newprio),
639
640 TP_ARGS(tsk, newprio),
641
642 TP_FIELDS(
643 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
644 ctf_integer(pid_t, tid, tsk->pid)
645 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
646 ctf_integer(int, newprio, newprio - MAX_RT_PRIO)
647 )
648)
649#endif
650
651#endif /* LTTNG_TRACE_SCHED_H */
652
653/* This part must be outside protection */
654#include <probes/define_trace.h>
This page took 0.024241 seconds and 4 git commands to generate.