Fix: scsi: sd: Atomic write support added in 6.11-rc1
[lttng-modules.git] / include / instrumentation / events / sched.h
CommitLineData
61baff6e
MJ
1// SPDX-FileCopyrightText: 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2//
3// SPDX-License-Identifier: GPL-2.0-only
4
f62b389e
MD
5#undef TRACE_SYSTEM
6#define TRACE_SYSTEM sched
7
3bc29f0a
MD
8#if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
9#define LTTNG_TRACE_SCHED_H
f62b389e 10
3b4aafcb 11#include <lttng/tracepoint-event.h>
f62b389e 12#include <linux/sched.h>
7bbf55ea 13#include <linux/pid_namespace.h>
7c68b363 14#include <linux/binfmts.h>
5f4c791e 15#include <lttng/kernel-version.h>
090db00e 16#include <linux/sched/rt.h>
7bbf55ea
SL
17
18#define LTTNG_MAX_PID_NS_LEVEL 32
19
f62b389e
MD
20#ifndef _TRACE_SCHED_DEF_
21#define _TRACE_SCHED_DEF_
22
9f2d2694
MJ
23#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0) \
24 || LTTNG_RHEL_KERNEL_RANGE(5,14,0,162,0,0, 5,15,0,0,0,0))
8e52fd71
MJ
25
26static inline long __trace_sched_switch_state(bool preempt,
27 unsigned int prev_state,
28 struct task_struct *p)
29{
30 unsigned int state;
31
32#ifdef CONFIG_SCHED_DEBUG
33 BUG_ON(p != current);
34#endif /* CONFIG_SCHED_DEBUG */
35
36 /*
37 * Preemption ignores task state, therefore preempted tasks are always
38 * RUNNING (we will not have dequeued if state != RUNNING).
39 */
40 if (preempt)
41 return TASK_REPORT_MAX;
42
43 /*
44 * task_state_index() uses fls() and returns a value from 0-8 range.
45 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
46 * it for left shift operation to get the correct task->state
47 * mapping.
48 */
49 state = __task_state_index(prev_state, p->exit_state);
50
51 return state ? (1 << (state - 1)) : state;
52}
53
54#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,15,0))
27e6eda7
GAPG
55
56static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
57{
58 unsigned int state;
59
60#ifdef CONFIG_SCHED_DEBUG
61 BUG_ON(p != current);
62#endif /* CONFIG_SCHED_DEBUG */
63
64 /*
65 * Preemption ignores task state, therefore preempted tasks are always
66 * RUNNING (we will not have dequeued if state != RUNNING).
67 */
68 if (preempt)
69 return TASK_REPORT_MAX;
70
71 /*
72 * task_state_index() uses fls() and returns a value from 0-8 range.
73 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
74 * it for left shift operation to get the correct task->state
75 * mapping.
76 */
77 state = task_state_index(p);
78
79 return state ? (1 << (state - 1)) : state;
80}
81
5f4c791e 82#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,14,0))
27e6eda7
GAPG
83
84static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
85{
86 unsigned int state;
87
88#ifdef CONFIG_SCHED_DEBUG
89 BUG_ON(p != current);
90#endif /* CONFIG_SCHED_DEBUG */
91
92 /*
93 * Preemption ignores task state, therefore preempted tasks are always
94 * RUNNING (we will not have dequeued if state != RUNNING).
95 */
96 if (preempt)
97 return TASK_REPORT_MAX;
98
99 /*
100 * __get_task_state() uses fls() and returns a value from 0-8 range.
101 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
102 * it for left shift operation to get the correct task->state
103 * mapping.
104 */
105 state = __get_task_state(p);
106
107 return state ? (1 << (state - 1)) : state;
108}
109
dc818cc2 110#else
3ee729fe
MD
111
112static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
113{
114#ifdef CONFIG_SCHED_DEBUG
115 BUG_ON(p != current);
116#endif /* CONFIG_SCHED_DEBUG */
117 /*
118 * Preemption ignores task state, therefore preempted tasks are always RUNNING
119 * (we will not have dequeued if state != RUNNING).
120 */
121 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
122}
7c68b363
AG
123#endif
124
f62b389e
MD
125#endif /* _TRACE_SCHED_DEF_ */
126
e54b3828 127#ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
721caea4
GB
128/*
129 * Enumeration of the task state bitmask.
130 * Only bit flags are enumerated here, not composition of states.
131 */
132LTTNG_TRACEPOINT_ENUM(task_state,
133 TP_ENUM_VALUES(
134 ctf_enum_value("TASK_RUNNING", TASK_RUNNING)
135 ctf_enum_value("TASK_INTERRUPTIBLE", TASK_INTERRUPTIBLE)
136 ctf_enum_value("TASK_UNINTERRUPTIBLE", TASK_UNINTERRUPTIBLE)
137 ctf_enum_value("TASK_STOPPED", __TASK_STOPPED)
138 ctf_enum_value("TASK_TRACED", __TASK_TRACED)
139 ctf_enum_value("EXIT_DEAD", EXIT_DEAD)
140 ctf_enum_value("EXIT_ZOMBIE", EXIT_ZOMBIE)
721caea4 141 ctf_enum_value("TASK_PARKED", TASK_PARKED)
721caea4
GB
142 ctf_enum_value("TASK_DEAD", TASK_DEAD)
143 ctf_enum_value("TASK_WAKEKILL", TASK_WAKEKILL)
144 ctf_enum_value("TASK_WAKING", TASK_WAKING)
721caea4 145 ctf_enum_value("TASK_NOLOAD", TASK_NOLOAD)
721caea4 146
5f4c791e 147#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,8,0))
721caea4 148 ctf_enum_value("TASK_NEW", TASK_NEW)
5f4c791e 149#endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,8,0)) */
721caea4
GB
150
151 ctf_enum_value("TASK_STATE_MAX", TASK_STATE_MAX)
152 )
153)
e54b3828 154#endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
721caea4 155
f62b389e
MD
156/*
157 * Tracepoint for calling kthread_stop, performed to end a kthread:
158 */
3bc29f0a 159LTTNG_TRACEPOINT_EVENT(sched_kthread_stop,
f62b389e
MD
160
161 TP_PROTO(struct task_struct *t),
162
163 TP_ARGS(t),
164
f127e61e
MD
165 TP_FIELDS(
166 ctf_array_text(char, comm, t->comm, TASK_COMM_LEN)
167 ctf_integer(pid_t, tid, t->pid)
168 )
f62b389e
MD
169)
170
171/*
172 * Tracepoint for the return value of the kthread stopping:
173 */
3bc29f0a 174LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
f62b389e
MD
175
176 TP_PROTO(int ret),
177
178 TP_ARGS(ret),
179
f127e61e
MD
180 TP_FIELDS(
181 ctf_integer(int, ret, ret)
182 )
f62b389e
MD
183)
184
185/*
186 * Tracepoint for waking up a task:
187 */
ffcf2393
MD
188LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
189
190 TP_PROTO(struct task_struct *p),
191
192 TP_ARGS(p),
193
194 TP_FIELDS(
195 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
196 ctf_integer(pid_t, tid, p->pid)
5aa835c0 197 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
ffcf2393
MD
198 ctf_integer(int, target_cpu, task_cpu(p))
199 )
200)
ffcf2393
MD
201
202/*
203 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
204 * called from the waking context.
205 */
206LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_waking,
207 TP_PROTO(struct task_struct *p),
208 TP_ARGS(p))
209
210/*
211 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
212 * It it not always called from the waking context.
213 */
214LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
215 TP_PROTO(struct task_struct *p),
216 TP_ARGS(p))
217
218/*
219 * Tracepoint for waking up a new task:
220 */
221LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
222 TP_PROTO(struct task_struct *p),
223 TP_ARGS(p))
224
f62b389e
MD
225/*
226 * Tracepoint for task switches, performed by the scheduler:
227 */
8e52fd71 228
9f2d2694
MJ
229#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0) \
230 || LTTNG_RHEL_KERNEL_RANGE(5,14,0,162,0,0, 5,15,0,0,0,0))
3bc29f0a 231LTTNG_TRACEPOINT_EVENT(sched_switch,
f62b389e 232
3ee729fe 233 TP_PROTO(bool preempt,
8e52fd71 234 struct task_struct *prev,
9c5b8de3
MJ
235 struct task_struct *next,
236 unsigned int prev_state),
3ee729fe 237
9c5b8de3 238 TP_ARGS(preempt, prev, next, prev_state),
8e52fd71
MJ
239
240 TP_FIELDS(
241 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
242 ctf_integer(pid_t, prev_tid, prev->pid)
243 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
244#ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
245 ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(preempt, prev_state, prev))
ead65427 246#else
8e52fd71
MJ
247 ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev_state, prev))
248#endif
249 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
250 ctf_integer(pid_t, next_tid, next->pid)
251 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
252 )
253)
254
dc818cc2 255#else
8e52fd71
MJ
256
257LTTNG_TRACEPOINT_EVENT(sched_switch,
258
259 TP_PROTO(bool preempt,
260 struct task_struct *prev,
f62b389e
MD
261 struct task_struct *next),
262
8e52fd71 263 TP_ARGS(preempt, prev, next),
f62b389e 264
f127e61e
MD
265 TP_FIELDS(
266 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
267 ctf_integer(pid_t, prev_tid, prev->pid)
268 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
e54b3828 269#ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
721caea4 270 ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(preempt, prev))
7c68b363 271#else
e54b3828
MJ
272 ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev))
273#endif
8e52fd71
MJ
274 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
275 ctf_integer(pid_t, next_tid, next->pid)
276 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
277 )
278)
8e52fd71 279#endif
f62b389e
MD
280
281/*
282 * Tracepoint for a task being migrated:
283 */
3bc29f0a 284LTTNG_TRACEPOINT_EVENT(sched_migrate_task,
f62b389e
MD
285
286 TP_PROTO(struct task_struct *p, int dest_cpu),
287
288 TP_ARGS(p, dest_cpu),
289
f127e61e
MD
290 TP_FIELDS(
291 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
292 ctf_integer(pid_t, tid, p->pid)
293 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
294 ctf_integer(int, orig_cpu, task_cpu(p))
295 ctf_integer(int, dest_cpu, dest_cpu)
296 )
f62b389e
MD
297)
298
3bc29f0a 299LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template,
f62b389e
MD
300
301 TP_PROTO(struct task_struct *p),
302
303 TP_ARGS(p),
304
f127e61e
MD
305 TP_FIELDS(
306 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
307 ctf_integer(pid_t, tid, p->pid)
308 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
309 )
f62b389e
MD
310)
311
312/*
313 * Tracepoint for freeing a task:
314 */
3bc29f0a 315LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free,
f62b389e
MD
316 TP_PROTO(struct task_struct *p),
317 TP_ARGS(p))
216b6baa 318
f62b389e
MD
319
320/*
321 * Tracepoint for a task exiting:
322 */
3bc29f0a 323LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit,
f62b389e
MD
324 TP_PROTO(struct task_struct *p),
325 TP_ARGS(p))
326
327/*
328 * Tracepoint for waiting on task to unschedule:
329 */
3bc29f0a 330LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
f62b389e
MD
331 TP_PROTO(struct task_struct *p),
332 TP_ARGS(p))
333
334/*
335 * Tracepoint for a waiting task:
336 */
3bc29f0a 337LTTNG_TRACEPOINT_EVENT(sched_process_wait,
f62b389e
MD
338
339 TP_PROTO(struct pid *pid),
340
341 TP_ARGS(pid),
342
f127e61e
MD
343 TP_FIELDS(
344 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
345 ctf_integer(pid_t, tid, pid_nr(pid))
346 ctf_integer(int, prio, current->prio - MAX_RT_PRIO)
347 )
f62b389e
MD
348)
349
350/*
79b18ef7
MD
351 * Tracepoint for do_fork.
352 * Saving both TID and PID information, especially for the child, allows
353 * trace analyzers to distinguish between creation of a new process and
354 * creation of a new thread. Newly created processes will have child_tid
355 * == child_pid, while creation of a thread yields to child_tid !=
356 * child_pid.
f62b389e 357 */
7bbf55ea 358LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork,
f62b389e
MD
359
360 TP_PROTO(struct task_struct *parent, struct task_struct *child),
361
362 TP_ARGS(parent, child),
363
7bbf55ea
SL
364 TP_locvar(
365 pid_t vtids[LTTNG_MAX_PID_NS_LEVEL];
366 unsigned int ns_level;
367 ),
368
265822ae 369 TP_code_pre(
7bbf55ea
SL
370 if (child) {
371 struct pid *child_pid;
372 unsigned int i;
373
374 child_pid = task_pid(child);
375 tp_locvar->ns_level =
376 min_t(unsigned int, child_pid->level + 1,
377 LTTNG_MAX_PID_NS_LEVEL);
378 for (i = 0; i < tp_locvar->ns_level; i++)
379 tp_locvar->vtids[i] = child_pid->numbers[i].nr;
380 }
381 ),
382
f127e61e
MD
383 TP_FIELDS(
384 ctf_array_text(char, parent_comm, parent->comm, TASK_COMM_LEN)
385 ctf_integer(pid_t, parent_tid, parent->pid)
386 ctf_integer(pid_t, parent_pid, parent->tgid)
7bbf55ea
SL
387 ctf_integer(unsigned int, parent_ns_inum,
388 ({
389 unsigned int parent_ns_inum = 0;
390
391 if (parent) {
392 struct pid_namespace *pid_ns;
393
394 pid_ns = task_active_pid_ns(parent);
395 if (pid_ns)
396 parent_ns_inum =
93e5bcec 397 pid_ns->ns.inum;
7bbf55ea
SL
398 }
399 parent_ns_inum;
400 }))
f127e61e
MD
401 ctf_array_text(char, child_comm, child->comm, TASK_COMM_LEN)
402 ctf_integer(pid_t, child_tid, child->pid)
7bbf55ea 403 ctf_sequence(pid_t, vtids, tp_locvar->vtids, u8, tp_locvar->ns_level)
f127e61e 404 ctf_integer(pid_t, child_pid, child->tgid)
7bbf55ea
SL
405 ctf_integer(unsigned int, child_ns_inum,
406 ({
407 unsigned int child_ns_inum = 0;
408
409 if (child) {
410 struct pid_namespace *pid_ns;
411
412 pid_ns = task_active_pid_ns(child);
413 if (pid_ns)
414 child_ns_inum =
93e5bcec 415 pid_ns->ns.inum;
7bbf55ea
SL
416 }
417 child_ns_inum;
418 }))
265822ae
MD
419 ),
420
421 TP_code_post()
f62b389e
MD
422)
423
46142a81
PW
424/*
425 * Tracepoint for exec:
426 */
3bc29f0a 427LTTNG_TRACEPOINT_EVENT(sched_process_exec,
46142a81
PW
428
429 TP_PROTO(struct task_struct *p, pid_t old_pid,
430 struct linux_binprm *bprm),
431
432 TP_ARGS(p, old_pid, bprm),
433
f127e61e
MD
434 TP_FIELDS(
435 ctf_string(filename, bprm->filename)
436 ctf_integer(pid_t, tid, p->pid)
437 ctf_integer(pid_t, old_tid, old_pid)
438 )
46142a81
PW
439)
440
f62b389e
MD
441/*
442 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
443 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
444 */
3bc29f0a 445LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template,
f62b389e
MD
446
447 TP_PROTO(struct task_struct *tsk, u64 delay),
448
449 TP_ARGS(tsk, delay),
450
f127e61e
MD
451 TP_FIELDS(
452 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
453 ctf_integer(pid_t, tid, tsk->pid)
454 ctf_integer(u64, delay, delay)
f62b389e 455 )
f62b389e
MD
456)
457
458
459/*
460 * Tracepoint for accounting wait time (time the task is runnable
461 * but not actually running due to scheduler contention).
462 */
3bc29f0a 463LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait,
f62b389e
MD
464 TP_PROTO(struct task_struct *tsk, u64 delay),
465 TP_ARGS(tsk, delay))
466
467/*
468 * Tracepoint for accounting sleep time (time the task is not runnable,
469 * including iowait, see below).
470 */
3bc29f0a 471LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep,
f62b389e
MD
472 TP_PROTO(struct task_struct *tsk, u64 delay),
473 TP_ARGS(tsk, delay))
474
475/*
476 * Tracepoint for accounting iowait time (time the task is not runnable
477 * due to waiting on IO to complete).
478 */
3bc29f0a 479LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait,
f62b389e
MD
480 TP_PROTO(struct task_struct *tsk, u64 delay),
481 TP_ARGS(tsk, delay))
482
7c68b363
AG
483/*
484 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
485 */
3bc29f0a 486LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked,
7c68b363
AG
487 TP_PROTO(struct task_struct *tsk, u64 delay),
488 TP_ARGS(tsk, delay))
7c68b363 489
f97f9d2e
KS
490#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,8,0) || \
491 LTTNG_RHEL_KERNEL_RANGE(5,14,0,452,0,0, 5,15,0,0,0,0))
af65235e
KS
492/*
493 * Tracepoint for accounting runtime (time the task is executing
494 * on a CPU).
495 */
496LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
497
498 TP_PROTO(struct task_struct *tsk, u64 runtime),
499
500 TP_ARGS(tsk, runtime),
501
502 TP_FIELDS(
503 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
504 ctf_integer(pid_t, tid, tsk->pid)
505 ctf_integer(u64, runtime, runtime)
506 )
507)
508#else
f62b389e
MD
509/*
510 * Tracepoint for accounting runtime (time the task is executing
511 * on a CPU).
512 */
3bc29f0a 513LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
f62b389e
MD
514
515 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
516
517 TP_ARGS(tsk, runtime, vruntime),
518
f127e61e
MD
519 TP_FIELDS(
520 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
521 ctf_integer(pid_t, tid, tsk->pid)
522 ctf_integer(u64, runtime, runtime)
523 ctf_integer(u64, vruntime, vruntime)
f62b389e 524 )
f62b389e 525)
af65235e 526#endif
f62b389e 527
5f4c791e 528#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,12,0) || \
46dded2d
MJ
529 LTTNG_RT_KERNEL_RANGE(4,9,27,18, 4,10,0,0) || \
530 LTTNG_RT_KERNEL_RANGE(4,11,5,1, 4,12,0,0))
673e9a03
MJ
531/*
532 * Tracepoint for showing priority inheritance modifying a tasks
533 * priority.
534 */
535LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
536
537 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
538
539 TP_ARGS(tsk, pi_task),
540
541 TP_FIELDS(
542 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
543 ctf_integer(pid_t, tid, tsk->pid)
544 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
545 ctf_integer(int, newprio, pi_task ? pi_task->prio - MAX_RT_PRIO : tsk->prio - MAX_RT_PRIO)
546 )
547)
ead65427 548#else
f62b389e
MD
549/*
550 * Tracepoint for showing priority inheritance modifying a tasks
551 * priority.
552 */
3bc29f0a 553LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
f62b389e
MD
554
555 TP_PROTO(struct task_struct *tsk, int newprio),
556
557 TP_ARGS(tsk, newprio),
558
f127e61e
MD
559 TP_FIELDS(
560 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
561 ctf_integer(pid_t, tid, tsk->pid)
562 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
563 ctf_integer(int, newprio, newprio - MAX_RT_PRIO)
564 )
f62b389e 565)
7c68b363 566#endif
f62b389e 567
3bc29f0a 568#endif /* LTTNG_TRACE_SCHED_H */
f62b389e
MD
569
570/* This part must be outside protection */
3b4aafcb 571#include <lttng/define_trace.h>
This page took 0.107524 seconds and 4 git commands to generate.