fix: Adjust ranges for Ubuntu HWE 5.0 kernels
[lttng-modules.git] / instrumentation / events / lttng-module / rcu.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM rcu
3
4 #if !defined(LTTNG_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_RCU_H
6
7 #include <probes/lttng-tracepoint-event.h>
8 #include <linux/version.h>
9
10 /*
11 * Tracepoint for start/end markers used for utilization calculations.
12 * By convention, the string is of the following forms:
13 *
14 * "Start <activity>" -- Mark the start of the specified activity,
15 * such as "context switch". Nesting is permitted.
16 * "End <activity>" -- Mark the end of the specified activity.
17 *
18 * An "@" character within "<activity>" is a comment character: Data
19 * reduction scripts will ignore the "@" and the remainder of the line.
20 */
21 LTTNG_TRACEPOINT_EVENT(rcu_utilization,
22
23 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
24 TP_PROTO(const char *s),
25 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
26 TP_PROTO(char *s),
27 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
28
29 TP_ARGS(s),
30
31 TP_FIELDS(
32 ctf_string(s, s)
33 )
34 )
35
36 #ifdef CONFIG_RCU_TRACE
37
38 #if defined(CONFIG_TREE_RCU) \
39 || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) \
40 && defined(CONFIG_PREEMPT_RCU)) \
41 || defined(CONFIG_TREE_PREEMPT_RCU)
42
43 /*
44 * Tracepoint for grace-period events: starting and ending a grace
45 * period ("start" and "end", respectively), a CPU noting the start
46 * of a new grace period or the end of an old grace period ("cpustart"
47 * and "cpuend", respectively), a CPU passing through a quiescent
48 * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
49 * and "cpuofl", respectively), and a CPU being kicked for being too
50 * long in dyntick-idle mode ("kick").
51 */
52 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
53 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
54 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
55
56 TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
57
58 TP_ARGS(rcuname, gp_seq, gpevent),
59
60 TP_FIELDS(
61 ctf_string(rcuname, rcuname)
62 ctf_integer(unsigned long, gp_seq, gp_seq)
63 ctf_string(gpevent, gpevent)
64 )
65 )
66 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
67 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
68
69 TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
70
71 TP_ARGS(rcuname, gpnum, gpevent),
72
73 TP_FIELDS(
74 ctf_string(rcuname, rcuname)
75 ctf_integer(unsigned long, gpnum, gpnum)
76 ctf_string(gpevent, gpevent)
77 )
78 )
79 #else
80 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
81
82 TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent),
83
84 TP_ARGS(rcuname, gpnum, gpevent),
85
86 TP_FIELDS(
87 ctf_string(rcuname, rcuname)
88 ctf_integer(unsigned long, gpnum, gpnum)
89 ctf_string(gpevent, gpevent)
90 )
91 )
92 #endif
93
94 /*
95 * Tracepoint for grace-period-initialization events. These are
96 * distinguished by the type of RCU, the new grace-period number, the
97 * rcu_node structure level, the starting and ending CPU covered by the
98 * rcu_node structure, and the mask of CPUs that will be waited for.
99 * All but the type of RCU are extracted from the rcu_node structure.
100 */
101 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
102 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
103 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
104
105 TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
106 int grplo, int grphi, unsigned long qsmask),
107
108 TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
109
110 TP_FIELDS(
111 ctf_string(rcuname, rcuname)
112 ctf_integer(unsigned long, gp_seq, gp_seq)
113 ctf_integer(u8, level, level)
114 ctf_integer(int, grplo, grplo)
115 ctf_integer(int, grphi, grphi)
116 ctf_integer(unsigned long, qsmask, qsmask)
117 )
118 )
119 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
120 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
121
122 TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
123 int grplo, int grphi, unsigned long qsmask),
124
125 TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
126
127 TP_FIELDS(
128 ctf_string(rcuname, rcuname)
129 ctf_integer(unsigned long, gpnum, gpnum)
130 ctf_integer(u8, level, level)
131 ctf_integer(int, grplo, grplo)
132 ctf_integer(int, grphi, grphi)
133 ctf_integer(unsigned long, qsmask, qsmask)
134 )
135 )
136 #else
137 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
138
139 TP_PROTO(char *rcuname, unsigned long gpnum, u8 level,
140 int grplo, int grphi, unsigned long qsmask),
141
142 TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
143
144 TP_FIELDS(
145 ctf_string(rcuname, rcuname)
146 ctf_integer(unsigned long, gpnum, gpnum)
147 ctf_integer(u8, level, level)
148 ctf_integer(int, grplo, grplo)
149 ctf_integer(int, grphi, grphi)
150 ctf_integer(unsigned long, qsmask, qsmask)
151 )
152 )
153 #endif
154
155 /*
156 * Tracepoint for tasks blocking within preemptible-RCU read-side
157 * critical sections. Track the type of RCU (which one day might
158 * include SRCU), the grace-period number that the task is blocking
159 * (the current or the next), and the task's PID.
160 */
161 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
162 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
163 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
164
165 TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
166
167 TP_ARGS(rcuname, pid, gp_seq),
168
169 TP_FIELDS(
170 ctf_string(rcuname, rcuname)
171 ctf_integer(unsigned long, gp_seq, gp_seq)
172 ctf_integer(int, pid, pid)
173 )
174 )
175 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
176 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
177
178 TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
179
180 TP_ARGS(rcuname, pid, gpnum),
181
182 TP_FIELDS(
183 ctf_string(rcuname, rcuname)
184 ctf_integer(unsigned long, gpnum, gpnum)
185 ctf_integer(int, pid, pid)
186 )
187 )
188 #else
189 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
190
191 TP_PROTO(char *rcuname, int pid, unsigned long gpnum),
192
193 TP_ARGS(rcuname, pid, gpnum),
194
195 TP_FIELDS(
196 ctf_string(rcuname, rcuname)
197 ctf_integer(unsigned long, gpnum, gpnum)
198 ctf_integer(int, pid, pid)
199 )
200 )
201 #endif
202
203 /*
204 * Tracepoint for tasks that blocked within a given preemptible-RCU
205 * read-side critical section exiting that critical section. Track the
206 * type of RCU (which one day might include SRCU) and the task's PID.
207 */
208 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
209 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
210 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
211
212 TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
213
214 TP_ARGS(rcuname, gp_seq, pid),
215
216 TP_FIELDS(
217 ctf_string(rcuname, rcuname)
218 ctf_integer(unsigned long, gp_seq, gp_seq)
219 ctf_integer(int, pid, pid)
220 )
221 )
222 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
223 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
224
225 TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
226
227 TP_ARGS(rcuname, gpnum, pid),
228
229 TP_FIELDS(
230 ctf_string(rcuname, rcuname)
231 ctf_integer(unsigned long, gpnum, gpnum)
232 ctf_integer(int, pid, pid)
233 )
234 )
235 #else
236 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
237
238 TP_PROTO(char *rcuname, unsigned long gpnum, int pid),
239
240 TP_ARGS(rcuname, gpnum, pid),
241
242 TP_FIELDS(
243 ctf_string(rcuname, rcuname)
244 ctf_integer(unsigned long, gpnum, gpnum)
245 ctf_integer(int, pid, pid)
246 )
247 )
248 #endif
249
250 /*
251 * Tracepoint for quiescent-state-reporting events. These are
252 * distinguished by the type of RCU, the grace-period number, the
253 * mask of quiescent lower-level entities, the rcu_node structure level,
254 * the starting and ending CPU covered by the rcu_node structure, and
255 * whether there are any blocked tasks blocking the current grace period.
256 * All but the type of RCU are extracted from the rcu_node structure.
257 */
258 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
259 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
260 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
261
262 TP_PROTO(const char *rcuname, unsigned long gp_seq,
263 unsigned long mask, unsigned long qsmask,
264 u8 level, int grplo, int grphi, int gp_tasks),
265
266 TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
267
268 TP_FIELDS(
269 ctf_string(rcuname, rcuname)
270 ctf_integer(unsigned long, gp_seq, gp_seq)
271 ctf_integer(unsigned long, mask, mask)
272 ctf_integer(unsigned long, qsmask, qsmask)
273 ctf_integer(u8, level, level)
274 ctf_integer(int, grplo, grplo)
275 ctf_integer(int, grphi, grphi)
276 ctf_integer(u8, gp_tasks, gp_tasks)
277 )
278 )
279 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
280 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
281
282 TP_PROTO(const char *rcuname, unsigned long gpnum,
283 unsigned long mask, unsigned long qsmask,
284 u8 level, int grplo, int grphi, int gp_tasks),
285
286 TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
287
288 TP_FIELDS(
289 ctf_string(rcuname, rcuname)
290 ctf_integer(unsigned long, gpnum, gpnum)
291 ctf_integer(unsigned long, mask, mask)
292 ctf_integer(unsigned long, qsmask, qsmask)
293 ctf_integer(u8, level, level)
294 ctf_integer(int, grplo, grplo)
295 ctf_integer(int, grphi, grphi)
296 ctf_integer(u8, gp_tasks, gp_tasks)
297 )
298 )
299 #else
300 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
301
302 TP_PROTO(char *rcuname, unsigned long gpnum,
303 unsigned long mask, unsigned long qsmask,
304 u8 level, int grplo, int grphi, int gp_tasks),
305
306 TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
307
308 TP_FIELDS(
309 ctf_string(rcuname, rcuname)
310 ctf_integer(unsigned long, gpnum, gpnum)
311 ctf_integer(unsigned long, mask, mask)
312 ctf_integer(unsigned long, qsmask, qsmask)
313 ctf_integer(u8, level, level)
314 ctf_integer(int, grplo, grplo)
315 ctf_integer(int, grphi, grphi)
316 ctf_integer(u8, gp_tasks, gp_tasks)
317 )
318 )
319 #endif
320
321 /*
322 * Tracepoint for quiescent states detected by force_quiescent_state().
323 * These trace events include the type of RCU, the grace-period number
324 * that was blocked by the CPU, the CPU itself, and the type of quiescent
325 * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
326 * or "kick" when kicking a CPU that has been in dyntick-idle mode for
327 * too long.
328 */
329 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
330 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
331 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
332
333 TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
334
335 TP_ARGS(rcuname, gp_seq, cpu, qsevent),
336
337 TP_FIELDS(
338 ctf_integer(unsigned long, gp_seq, gp_seq)
339 ctf_integer(int, cpu, cpu)
340 ctf_string(rcuname, rcuname)
341 ctf_string(qsevent, qsevent)
342 )
343 )
344 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
345 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
346
347 TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
348
349 TP_ARGS(rcuname, gpnum, cpu, qsevent),
350
351 TP_FIELDS(
352 ctf_integer(unsigned long, gpnum, gpnum)
353 ctf_integer(int, cpu, cpu)
354 ctf_string(rcuname, rcuname)
355 ctf_string(qsevent, qsevent)
356 )
357 )
358 #else
359 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
360
361 TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent),
362
363 TP_ARGS(rcuname, gpnum, cpu, qsevent),
364
365 TP_FIELDS(
366 ctf_integer(unsigned long, gpnum, gpnum)
367 ctf_integer(int, cpu, cpu)
368 ctf_string(rcuname, rcuname)
369 ctf_string(qsevent, qsevent)
370 )
371 )
372 #endif
373
374 #endif /*
375 * #if defined(CONFIG_TREE_RCU)
376 * || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
377 * && defined(CONFIG_PREEMPT_RCU))
378 * || defined(CONFIG_TREE_PREEMPT_RCU)
379 */
380
381 /*
382 * Tracepoint for dyntick-idle entry/exit events. These take a string
383 * as argument: "Start" for entering dyntick-idle mode, "End" for
384 * leaving it, "--=" for events moving towards idle, and "++=" for events
385 * moving away from idle. "Error on entry: not idle task" and "Error on
386 * exit: not idle task" indicate that a non-idle task is erroneously
387 * toying with the idle loop.
388 *
389 * These events also take a pair of numbers, which indicate the nesting
390 * depth before and after the event of interest. Note that task-related
391 * events use the upper bits of each number, while interrupt-related
392 * events use the lower bits.
393 */
394 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) \
395 || LTTNG_KERNEL_RANGE(5,5,6, 5,6,0) \
396 || LTTNG_KERNEL_RANGE(5,4,22, 5,5,0) \
397 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,46, 5,1,0,0)
398 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
399
400 TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
401
402 TP_ARGS(polarity, oldnesting, newnesting, dynticks),
403
404 TP_FIELDS(
405 ctf_string(polarity, polarity)
406 ctf_integer(long, oldnesting, oldnesting)
407 ctf_integer(long, newnesting, newnesting)
408 ctf_integer(int, dynticks, dynticks)
409 )
410 )
411
412 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0))
413 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
414
415 TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
416
417 TP_ARGS(polarity, oldnesting, newnesting, dynticks),
418
419 TP_FIELDS(
420 ctf_string(polarity, polarity)
421 ctf_integer(long, oldnesting, oldnesting)
422 ctf_integer(long, newnesting, newnesting)
423 ctf_integer(int, dynticks, atomic_read(&dynticks))
424 )
425 )
426
427 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
428 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
429
430 TP_PROTO(const char *polarity, long long oldnesting, long long newnesting),
431
432 TP_ARGS(polarity, oldnesting, newnesting),
433
434 TP_FIELDS(
435 ctf_string(polarity, polarity)
436 ctf_integer(long long, oldnesting, oldnesting)
437 ctf_integer(long long, newnesting, newnesting)
438 )
439 )
440 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
441 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
442
443 TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
444
445 TP_ARGS(polarity, oldnesting, newnesting),
446
447 TP_FIELDS(
448 ctf_string(polarity, polarity)
449 ctf_integer(long long, oldnesting, oldnesting)
450 ctf_integer(long long, newnesting, newnesting)
451 )
452 )
453 #else
454 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
455
456 TP_PROTO(char *polarity),
457
458 TP_ARGS(polarity),
459
460 TP_FIELDS(
461 ctf_string(polarity, polarity)
462 )
463 )
464 #endif
465
466
467 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
468 /*
469 * Tracepoint for RCU preparation for idle, the goal being to get RCU
470 * processing done so that the current CPU can shut off its scheduling
471 * clock and enter dyntick-idle mode. One way to accomplish this is
472 * to drain all RCU callbacks from this CPU, and the other is to have
473 * done everything RCU requires for the current grace period. In this
474 * latter case, the CPU will be awakened at the end of the current grace
475 * period in order to process the remainder of its callbacks.
476 *
477 * These tracepoints take a string as argument:
478 *
479 * "No callbacks": Nothing to do, no callbacks on this CPU.
480 * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
481 * "Begin holdoff": Attempt failed, don't retry until next jiffy.
482 * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
483 * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
484 * "More callbacks": Still more callbacks, try again to clear them out.
485 * "Callbacks drained": All callbacks processed, off to dyntick idle!
486 * "Timer": Timer fired to cause CPU to continue processing callbacks.
487 * "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
488 * "Cleanup after idle": Idle exited, timer canceled.
489 */
490 LTTNG_TRACEPOINT_EVENT(rcu_prep_idle,
491
492 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
493 TP_PROTO(const char *reason),
494 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
495 TP_PROTO(char *reason),
496 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
497
498 TP_ARGS(reason),
499
500 TP_FIELDS(
501 ctf_string(reason, reason)
502 )
503 )
504 #endif
505
506 /*
507 * Tracepoint for the registration of a single RCU callback function.
508 * The first argument is the type of RCU, the second argument is
509 * a pointer to the RCU callback itself, the third element is the
510 * number of lazy callbacks queued, and the fourth element is the
511 * total number of callbacks queued.
512 */
513 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
514 LTTNG_TRACEPOINT_EVENT(rcu_callback,
515
516 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen),
517
518 TP_ARGS(rcuname, rhp, qlen),
519
520 TP_FIELDS(
521 ctf_string(rcuname, rcuname)
522 ctf_integer_hex(void *, rhp, rhp)
523 ctf_integer_hex(void *, func, rhp->func)
524 ctf_integer(long, qlen, qlen)
525 )
526 )
527 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
528 LTTNG_TRACEPOINT_EVENT(rcu_callback,
529
530 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
531 long qlen),
532
533 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
534
535 TP_FIELDS(
536 ctf_string(rcuname, rcuname)
537 ctf_integer_hex(void *, rhp, rhp)
538 ctf_integer_hex(void *, func, rhp->func)
539 ctf_integer(long, qlen_lazy, qlen_lazy)
540 ctf_integer(long, qlen, qlen)
541 )
542 )
543 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
544 LTTNG_TRACEPOINT_EVENT(rcu_callback,
545
546 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
547 long qlen),
548
549 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
550
551 TP_FIELDS(
552 ctf_string(rcuname, rcuname)
553 ctf_integer_hex(void *, rhp, rhp)
554 ctf_integer_hex(void *, func, rhp->func)
555 ctf_integer(long, qlen_lazy, qlen_lazy)
556 ctf_integer(long, qlen, qlen)
557 )
558 )
559 #else
560 LTTNG_TRACEPOINT_EVENT(rcu_callback,
561
562 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen),
563
564 TP_ARGS(rcuname, rhp, qlen),
565
566 TP_FIELDS(
567 ctf_string(rcuname, rcuname)
568 ctf_integer_hex(void *, rhp, rhp)
569 ctf_integer_hex(void *, func, rhp->func)
570 ctf_integer(long, qlen, qlen)
571 )
572 )
573 #endif
574
575
576 /*
577 * Tracepoint for the registration of a single RCU callback of the special
578 * kfree() form. The first argument is the RCU type, the second argument
579 * is a pointer to the RCU callback, the third argument is the offset
580 * of the callback within the enclosing RCU-protected data structure,
581 * the fourth argument is the number of lazy callbacks queued, and the
582 * fifth argument is the total number of callbacks queued.
583 */
584 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
585 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
586
587 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
588 long qlen),
589
590 TP_ARGS(rcuname, rhp, offset, qlen),
591
592 TP_FIELDS(
593 ctf_string(rcuname, rcuname)
594 ctf_integer_hex(void *, rhp, rhp)
595 ctf_integer_hex(unsigned long, offset, offset)
596 ctf_integer(long, qlen, qlen)
597 )
598 )
599 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
600 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
601
602 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
603 long qlen_lazy, long qlen),
604
605 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
606
607 TP_FIELDS(
608 ctf_string(rcuname, rcuname)
609 ctf_integer_hex(void *, rhp, rhp)
610 ctf_integer_hex(unsigned long, offset, offset)
611 ctf_integer(long, qlen_lazy, qlen_lazy)
612 ctf_integer(long, qlen, qlen)
613 )
614 )
615 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
616 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
617
618 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
619 long qlen_lazy, long qlen),
620
621 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
622
623 TP_FIELDS(
624 ctf_string(rcuname, rcuname)
625 ctf_integer_hex(void *, rhp, rhp)
626 ctf_integer_hex(unsigned long, offset, offset)
627 ctf_integer(long, qlen_lazy, qlen_lazy)
628 ctf_integer(long, qlen, qlen)
629 )
630 )
631 #else
632 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
633
634 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
635 long qlen),
636
637 TP_ARGS(rcuname, rhp, offset, qlen),
638
639 TP_FIELDS(
640 ctf_string(rcuname, rcuname)
641 ctf_integer_hex(void *, rhp, rhp)
642 ctf_integer_hex(unsigned long, offset, offset)
643 ctf_integer(long, qlen, qlen)
644 )
645 )
646 #endif
647
648 /*
649 * Tracepoint for marking the beginning rcu_do_batch, performed to start
650 * RCU callback invocation. The first argument is the RCU flavor,
651 * the second is the number of lazy callbacks queued, the third is
652 * the total number of callbacks queued, and the fourth argument is
653 * the current RCU-callback batch limit.
654 */
655 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
656 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
657
658 TP_PROTO(const char *rcuname, long qlen, long blimit),
659
660 TP_ARGS(rcuname, qlen, blimit),
661
662 TP_FIELDS(
663 ctf_string(rcuname, rcuname)
664 ctf_integer(long, qlen, qlen)
665 ctf_integer(long, blimit, blimit)
666 )
667 )
668 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
669 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
670
671 TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
672
673 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
674
675 TP_FIELDS(
676 ctf_string(rcuname, rcuname)
677 ctf_integer(long, qlen_lazy, qlen_lazy)
678 ctf_integer(long, qlen, qlen)
679 ctf_integer(long, blimit, blimit)
680 )
681 )
682 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
683 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
684
685 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit),
686
687 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
688
689 TP_FIELDS(
690 ctf_string(rcuname, rcuname)
691 ctf_integer(long, qlen_lazy, qlen_lazy)
692 ctf_integer(long, qlen, qlen)
693 ctf_integer(long, blimit, blimit)
694 )
695 )
696 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
697 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
698
699 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
700
701 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
702
703 TP_FIELDS(
704 ctf_string(rcuname, rcuname)
705 ctf_integer(long, qlen_lazy, qlen_lazy)
706 ctf_integer(long, qlen, qlen)
707 ctf_integer(int, blimit, blimit)
708 )
709 )
710 #else
711 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
712
713 TP_PROTO(char *rcuname, long qlen, int blimit),
714
715 TP_ARGS(rcuname, qlen, blimit),
716
717 TP_FIELDS(
718 ctf_string(rcuname, rcuname)
719 ctf_integer(long, qlen, qlen)
720 ctf_integer(int, blimit, blimit)
721 )
722 )
723 #endif
724
725 /*
726 * Tracepoint for the invocation of a single RCU callback function.
727 * The first argument is the type of RCU, and the second argument is
728 * a pointer to the RCU callback itself.
729 */
730 LTTNG_TRACEPOINT_EVENT(rcu_invoke_callback,
731
732 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
733 TP_PROTO(const char *rcuname, struct rcu_head *rhp),
734 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
735 TP_PROTO(char *rcuname, struct rcu_head *rhp),
736 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
737
738 TP_ARGS(rcuname, rhp),
739
740 TP_FIELDS(
741 ctf_string(rcuname, rcuname)
742 ctf_integer_hex(void *, rhp, rhp)
743 ctf_integer_hex(void *, func, rhp->func)
744 )
745 )
746
747 /*
748 * Tracepoint for the invocation of a single RCU callback of the special
749 * kfree() form. The first argument is the RCU flavor, the second
750 * argument is a pointer to the RCU callback, and the third argument
751 * is the offset of the callback within the enclosing RCU-protected
752 * data structure.
753 */
754 LTTNG_TRACEPOINT_EVENT(rcu_invoke_kfree_callback,
755
756 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
757 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
758 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
759 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset),
760 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
761
762 TP_ARGS(rcuname, rhp, offset),
763
764 TP_FIELDS(
765 ctf_string(rcuname, rcuname)
766 ctf_integer_hex(void *, rhp, rhp)
767 ctf_integer(unsigned long, offset, offset)
768 )
769 )
770
771 /*
772 * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
773 * invoked. The first argument is the name of the RCU flavor,
774 * the second argument is number of callbacks actually invoked,
775 * the third argument (cb) is whether or not any of the callbacks that
776 * were ready to invoke at the beginning of this batch are still
777 * queued, the fourth argument (nr) is the return value of need_resched(),
778 * the fifth argument (iit) is 1 if the current task is the idle task,
779 * and the sixth argument (risk) is the return value from
780 * rcu_is_callbacks_kthread().
781 */
782 LTTNG_TRACEPOINT_EVENT(rcu_batch_end,
783
784 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
785 TP_PROTO(const char *rcuname, int callbacks_invoked,
786 char cb, char nr, char iit, char risk),
787
788 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
789 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
790 TP_PROTO(const char *rcuname, int callbacks_invoked,
791 bool cb, bool nr, bool iit, bool risk),
792
793 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
794 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
795 TP_PROTO(char *rcuname, int callbacks_invoked,
796 bool cb, bool nr, bool iit, bool risk),
797
798 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
799 #else
800 TP_PROTO(char *rcuname, int callbacks_invoked),
801
802 TP_ARGS(rcuname, callbacks_invoked),
803 #endif
804
805 TP_FIELDS(
806 ctf_string(rcuname, rcuname)
807 ctf_integer(int, callbacks_invoked, callbacks_invoked)
808 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
809 ctf_integer(char, cb, cb)
810 ctf_integer(char, nr, nr)
811 ctf_integer(char, iit, iit)
812 ctf_integer(char, risk, risk)
813 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
814 ctf_integer(bool, cb, cb)
815 ctf_integer(bool, nr, nr)
816 ctf_integer(bool, iit, iit)
817 ctf_integer(bool, risk, risk)
818 #endif
819 )
820 )
821
822 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
823 /*
824 * Tracepoint for rcutorture readers. The first argument is the name
825 * of the RCU flavor from rcutorture's viewpoint and the second argument
826 * is the callback address.
827 */
828 LTTNG_TRACEPOINT_EVENT(rcu_torture_read,
829
830 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
831 TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
832 unsigned long secs, unsigned long c_old, unsigned long c),
833
834 TP_ARGS(rcutorturename, rhp, secs, c_old, c),
835 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
836 TP_PROTO(char *rcutorturename, struct rcu_head *rhp,
837 unsigned long secs, unsigned long c_old, unsigned long c),
838
839 TP_ARGS(rcutorturename, rhp, secs, c_old, c),
840 #else
841 TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
842
843 TP_ARGS(rcutorturename, rhp),
844 #endif
845
846 TP_FIELDS(
847 ctf_string(rcutorturename, rcutorturename)
848 ctf_integer_hex(struct rcu_head *, rhp, rhp)
849 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
850 ctf_integer(unsigned long, secs, secs)
851 ctf_integer(unsigned long, c_old, c_old)
852 ctf_integer(unsigned long, c, c)
853 #endif
854 )
855 )
856 #endif
857
858 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
859 /*
860 * Tracepoint for _rcu_barrier() execution. The string "s" describes
861 * the _rcu_barrier phase:
862 * "Begin": rcu_barrier_callback() started.
863 * "Check": rcu_barrier_callback() checking for piggybacking.
864 * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
865 * "Inc1": rcu_barrier_callback() piggyback check counter incremented.
866 * "Offline": rcu_barrier_callback() found offline CPU
867 * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
868 * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
869 * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
870 * "CB": An rcu_barrier_callback() invoked a callback, not the last.
871 * "LastCB": An rcu_barrier_callback() invoked the last callback.
872 * "Inc2": rcu_barrier_callback() piggyback check counter incremented.
873 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
874 * is the count of remaining callbacks, and "done" is the piggybacking count.
875 */
876 LTTNG_TRACEPOINT_EVENT(rcu_barrier,
877
878 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
879 TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
880 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
881 TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
882 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
883
884 TP_ARGS(rcuname, s, cpu, cnt, done),
885
886 TP_FIELDS(
887 ctf_string(rcuname, rcuname)
888 ctf_string(s, s)
889 ctf_integer(int, cpu, cpu)
890 ctf_integer(int, cnt, cnt)
891 ctf_integer(unsigned long, done, done)
892 )
893 )
894 #endif
895
896 #else /* #ifdef CONFIG_RCU_TRACE */
897
898 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0) || \
899 LTTNG_RHEL_KERNEL_RANGE(4,18,0,80,0,0, 4,19,0,0,0,0))
900 #define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
901 #define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
902 qsmask) do { } while (0)
903 #define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
904 #define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
905 #define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
906 grplo, grphi, gp_tasks) do { } \
907 while (0)
908 #define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
909 #else
910 #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
911 #define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
912 qsmask) do { } while (0)
913 #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
914 #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
915 #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
916 grplo, grphi, gp_tasks) do { } \
917 while (0)
918 #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
919 #endif
920
921 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0))
922 #define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
923 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
924 #define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
925 #else
926 #define trace_rcu_dyntick(polarity) do { } while (0)
927 #endif
928 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
929 #define trace_rcu_prep_idle(reason) do { } while (0)
930 #endif
931 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
932 #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
933 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
934 do { } while (0)
935 #define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
936 do { } while (0)
937 #else
938 #define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
939 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
940 #define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
941 #endif
942 #define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
943 #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
944 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
945 #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
946 do { } while (0)
947 #else
948 #define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
949 #endif
950 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
951 #define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
952 do { } while (0)
953 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
954 #define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
955 #endif
956 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
957 #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
958 #endif
959 #endif /* #else #ifdef CONFIG_RCU_TRACE */
960
961 #endif /* LTTNG_TRACE_RCU_H */
962
963 /* This part must be outside protection */
964 #include <probes/define_trace.h>
This page took 0.049128 seconds and 4 git commands to generate.