Fix: Convert rcu tracepointis to gp_seq (v4.19)
[lttng-modules.git] / instrumentation / events / lttng-module / rcu.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM rcu
3
4 #if !defined(LTTNG_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_RCU_H
6
7 #include <probes/lttng-tracepoint-event.h>
8 #include <linux/version.h>
9
10 /*
11 * Tracepoint for start/end markers used for utilization calculations.
12 * By convention, the string is of the following forms:
13 *
14 * "Start <activity>" -- Mark the start of the specified activity,
15 * such as "context switch". Nesting is permitted.
16 * "End <activity>" -- Mark the end of the specified activity.
17 *
18 * An "@" character within "<activity>" is a comment character: Data
19 * reduction scripts will ignore the "@" and the remainder of the line.
20 */
21 LTTNG_TRACEPOINT_EVENT(rcu_utilization,
22
23 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
24 TP_PROTO(const char *s),
25 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
26 TP_PROTO(char *s),
27 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
28
29 TP_ARGS(s),
30
31 TP_FIELDS(
32 ctf_string(s, s)
33 )
34 )
35
36 #ifdef CONFIG_RCU_TRACE
37
38 #if defined(CONFIG_TREE_RCU) \
39 || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) \
40 && defined(CONFIG_PREEMPT_RCU)) \
41 || defined(CONFIG_TREE_PREEMPT_RCU)
42
43 /*
44 * Tracepoint for grace-period events: starting and ending a grace
45 * period ("start" and "end", respectively), a CPU noting the start
46 * of a new grace period or the end of an old grace period ("cpustart"
47 * and "cpuend", respectively), a CPU passing through a quiescent
48 * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
49 * and "cpuofl", respectively), and a CPU being kicked for being too
50 * long in dyntick-idle mode ("kick").
51 */
52 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
53 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
54
55 TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
56
57 TP_ARGS(rcuname, gp_seq, gpevent),
58
59 TP_FIELDS(
60 ctf_string(rcuname, rcuname)
61 ctf_integer(unsigned long, gp_seq, gp_seq)
62 ctf_string(gpevent, gpevent)
63 )
64 )
65 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
66 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
67
68 TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
69
70 TP_ARGS(rcuname, gpnum, gpevent),
71
72 TP_FIELDS(
73 ctf_string(rcuname, rcuname)
74 ctf_integer(unsigned long, gpnum, gpnum)
75 ctf_string(gpevent, gpevent)
76 )
77 )
78 #else
79 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
80
81 TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent),
82
83 TP_ARGS(rcuname, gpnum, gpevent),
84
85 TP_FIELDS(
86 ctf_string(rcuname, rcuname)
87 ctf_integer(unsigned long, gpnum, gpnum)
88 ctf_string(gpevent, gpevent)
89 )
90 )
91 #endif
92
93 /*
94 * Tracepoint for grace-period-initialization events. These are
95 * distinguished by the type of RCU, the new grace-period number, the
96 * rcu_node structure level, the starting and ending CPU covered by the
97 * rcu_node structure, and the mask of CPUs that will be waited for.
98 * All but the type of RCU are extracted from the rcu_node structure.
99 */
100 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
101 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
102
103 TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
104 int grplo, int grphi, unsigned long qsmask),
105
106 TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
107
108 TP_FIELDS(
109 ctf_string(rcuname, rcuname)
110 ctf_integer(unsigned long, gp_seq, gp_seq)
111 ctf_integer(u8, level, level)
112 ctf_integer(int, grplo, grplo)
113 ctf_integer(int, grphi, grphi)
114 ctf_integer(unsigned long, qsmask, qsmask)
115 )
116 )
117 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
118 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
119
120 TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
121 int grplo, int grphi, unsigned long qsmask),
122
123 TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
124
125 TP_FIELDS(
126 ctf_string(rcuname, rcuname)
127 ctf_integer(unsigned long, gpnum, gpnum)
128 ctf_integer(u8, level, level)
129 ctf_integer(int, grplo, grplo)
130 ctf_integer(int, grphi, grphi)
131 ctf_integer(unsigned long, qsmask, qsmask)
132 )
133 )
134 #else
135 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
136
137 TP_PROTO(char *rcuname, unsigned long gpnum, u8 level,
138 int grplo, int grphi, unsigned long qsmask),
139
140 TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
141
142 TP_FIELDS(
143 ctf_string(rcuname, rcuname)
144 ctf_integer(unsigned long, gpnum, gpnum)
145 ctf_integer(u8, level, level)
146 ctf_integer(int, grplo, grplo)
147 ctf_integer(int, grphi, grphi)
148 ctf_integer(unsigned long, qsmask, qsmask)
149 )
150 )
151 #endif
152
153 /*
154 * Tracepoint for tasks blocking within preemptible-RCU read-side
155 * critical sections. Track the type of RCU (which one day might
156 * include SRCU), the grace-period number that the task is blocking
157 * (the current or the next), and the task's PID.
158 */
159 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
160 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
161
162 TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
163
164 TP_ARGS(rcuname, pid, gp_seq),
165
166 TP_FIELDS(
167 ctf_string(rcuname, rcuname)
168 ctf_integer(unsigned long, gp_seq, gp_seq)
169 ctf_integer(int, pid, pid)
170 )
171 )
172 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
173 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
174
175 TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
176
177 TP_ARGS(rcuname, pid, gpnum),
178
179 TP_FIELDS(
180 ctf_string(rcuname, rcuname)
181 ctf_integer(unsigned long, gpnum, gpnum)
182 ctf_integer(int, pid, pid)
183 )
184 )
185 #else
186 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
187
188 TP_PROTO(char *rcuname, int pid, unsigned long gpnum),
189
190 TP_ARGS(rcuname, pid, gpnum),
191
192 TP_FIELDS(
193 ctf_string(rcuname, rcuname)
194 ctf_integer(unsigned long, gpnum, gpnum)
195 ctf_integer(int, pid, pid)
196 )
197 )
198 #endif
199
200 /*
201 * Tracepoint for tasks that blocked within a given preemptible-RCU
202 * read-side critical section exiting that critical section. Track the
203 * type of RCU (which one day might include SRCU) and the task's PID.
204 */
205 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
206 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
207
208 TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
209
210 TP_ARGS(rcuname, gp_seq, pid),
211
212 TP_FIELDS(
213 ctf_string(rcuname, rcuname)
214 ctf_integer(unsigned long, gp_seq, gp_seq)
215 ctf_integer(int, pid, pid)
216 )
217 )
218 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
219 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
220
221 TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
222
223 TP_ARGS(rcuname, gpnum, pid),
224
225 TP_FIELDS(
226 ctf_string(rcuname, rcuname)
227 ctf_integer(unsigned long, gpnum, gpnum)
228 ctf_integer(int, pid, pid)
229 )
230 )
231 #else
232 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
233
234 TP_PROTO(char *rcuname, unsigned long gpnum, int pid),
235
236 TP_ARGS(rcuname, gpnum, pid),
237
238 TP_FIELDS(
239 ctf_string(rcuname, rcuname)
240 ctf_integer(unsigned long, gpnum, gpnum)
241 ctf_integer(int, pid, pid)
242 )
243 )
244 #endif
245
246 /*
247 * Tracepoint for quiescent-state-reporting events. These are
248 * distinguished by the type of RCU, the grace-period number, the
249 * mask of quiescent lower-level entities, the rcu_node structure level,
250 * the starting and ending CPU covered by the rcu_node structure, and
251 * whether there are any blocked tasks blocking the current grace period.
252 * All but the type of RCU are extracted from the rcu_node structure.
253 */
254 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
255 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
256
257 TP_PROTO(const char *rcuname, unsigned long gp_seq,
258 unsigned long mask, unsigned long qsmask,
259 u8 level, int grplo, int grphi, int gp_tasks),
260
261 TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
262
263 TP_FIELDS(
264 ctf_string(rcuname, rcuname)
265 ctf_integer(unsigned long, gp_seq, gp_seq)
266 ctf_integer(unsigned long, mask, mask)
267 ctf_integer(unsigned long, qsmask, qsmask)
268 ctf_integer(u8, level, level)
269 ctf_integer(int, grplo, grplo)
270 ctf_integer(int, grphi, grphi)
271 ctf_integer(u8, gp_tasks, gp_tasks)
272 )
273 )
274 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
275 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
276
277 TP_PROTO(const char *rcuname, unsigned long gpnum,
278 unsigned long mask, unsigned long qsmask,
279 u8 level, int grplo, int grphi, int gp_tasks),
280
281 TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
282
283 TP_FIELDS(
284 ctf_string(rcuname, rcuname)
285 ctf_integer(unsigned long, gpnum, gpnum)
286 ctf_integer(unsigned long, mask, mask)
287 ctf_integer(unsigned long, qsmask, qsmask)
288 ctf_integer(u8, level, level)
289 ctf_integer(int, grplo, grplo)
290 ctf_integer(int, grphi, grphi)
291 ctf_integer(u8, gp_tasks, gp_tasks)
292 )
293 )
294 #else
295 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
296
297 TP_PROTO(char *rcuname, unsigned long gpnum,
298 unsigned long mask, unsigned long qsmask,
299 u8 level, int grplo, int grphi, int gp_tasks),
300
301 TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
302
303 TP_FIELDS(
304 ctf_string(rcuname, rcuname)
305 ctf_integer(unsigned long, gpnum, gpnum)
306 ctf_integer(unsigned long, mask, mask)
307 ctf_integer(unsigned long, qsmask, qsmask)
308 ctf_integer(u8, level, level)
309 ctf_integer(int, grplo, grplo)
310 ctf_integer(int, grphi, grphi)
311 ctf_integer(u8, gp_tasks, gp_tasks)
312 )
313 )
314 #endif
315
316 /*
317 * Tracepoint for quiescent states detected by force_quiescent_state().
318 * These trace events include the type of RCU, the grace-period number
319 * that was blocked by the CPU, the CPU itself, and the type of quiescent
320 * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
321 * or "kick" when kicking a CPU that has been in dyntick-idle mode for
322 * too long.
323 */
324 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
325 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
326
327 TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
328
329 TP_ARGS(rcuname, gp_seq, cpu, qsevent),
330
331 TP_FIELDS(
332 ctf_integer(unsigned long, gp_seq, gp_seq)
333 ctf_integer(int, cpu, cpu)
334 ctf_string(rcuname, rcuname)
335 ctf_string(qsevent, qsevent)
336 )
337 )
338 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
339 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
340
341 TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
342
343 TP_ARGS(rcuname, gpnum, cpu, qsevent),
344
345 TP_FIELDS(
346 ctf_integer(unsigned long, gpnum, gpnum)
347 ctf_integer(int, cpu, cpu)
348 ctf_string(rcuname, rcuname)
349 ctf_string(qsevent, qsevent)
350 )
351 )
352 #else
353 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
354
355 TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent),
356
357 TP_ARGS(rcuname, gpnum, cpu, qsevent),
358
359 TP_FIELDS(
360 ctf_integer(unsigned long, gpnum, gpnum)
361 ctf_integer(int, cpu, cpu)
362 ctf_string(rcuname, rcuname)
363 ctf_string(qsevent, qsevent)
364 )
365 )
366 #endif
367
368 #endif /*
369 * #if defined(CONFIG_TREE_RCU)
370 * || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
371 * && defined(CONFIG_PREEMPT_RCU))
372 * || defined(CONFIG_TREE_PREEMPT_RCU)
373 */
374
375 /*
376 * Tracepoint for dyntick-idle entry/exit events. These take a string
377 * as argument: "Start" for entering dyntick-idle mode, "End" for
378 * leaving it, "--=" for events moving towards idle, and "++=" for events
379 * moving away from idle. "Error on entry: not idle task" and "Error on
380 * exit: not idle task" indicate that a non-idle task is erroneously
381 * toying with the idle loop.
382 *
383 * These events also take a pair of numbers, which indicate the nesting
384 * depth before and after the event of interest. Note that task-related
385 * events use the upper bits of each number, while interrupt-related
386 * events use the lower bits.
387 */
388 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0))
389 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
390
391 TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
392
393 TP_ARGS(polarity, oldnesting, newnesting, dynticks),
394
395 TP_FIELDS(
396 ctf_string(polarity, polarity)
397 ctf_integer(long, oldnesting, oldnesting)
398 ctf_integer(long, newnesting, newnesting)
399 ctf_integer(int, dynticks, atomic_read(&dynticks))
400 )
401 )
402
403 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
404 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
405
406 TP_PROTO(const char *polarity, long long oldnesting, long long newnesting),
407
408 TP_ARGS(polarity, oldnesting, newnesting),
409
410 TP_FIELDS(
411 ctf_string(polarity, polarity)
412 ctf_integer(long long, oldnesting, oldnesting)
413 ctf_integer(long long, newnesting, newnesting)
414 )
415 )
416 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
417 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
418
419 TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
420
421 TP_ARGS(polarity, oldnesting, newnesting),
422
423 TP_FIELDS(
424 ctf_string(polarity, polarity)
425 ctf_integer(long long, oldnesting, oldnesting)
426 ctf_integer(long long, newnesting, newnesting)
427 )
428 )
429 #else
430 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
431
432 TP_PROTO(char *polarity),
433
434 TP_ARGS(polarity),
435
436 TP_FIELDS(
437 ctf_string(polarity, polarity)
438 )
439 )
440 #endif
441
442
443 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
444 /*
445 * Tracepoint for RCU preparation for idle, the goal being to get RCU
446 * processing done so that the current CPU can shut off its scheduling
447 * clock and enter dyntick-idle mode. One way to accomplish this is
448 * to drain all RCU callbacks from this CPU, and the other is to have
449 * done everything RCU requires for the current grace period. In this
450 * latter case, the CPU will be awakened at the end of the current grace
451 * period in order to process the remainder of its callbacks.
452 *
453 * These tracepoints take a string as argument:
454 *
455 * "No callbacks": Nothing to do, no callbacks on this CPU.
456 * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
457 * "Begin holdoff": Attempt failed, don't retry until next jiffy.
458 * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
459 * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
460 * "More callbacks": Still more callbacks, try again to clear them out.
461 * "Callbacks drained": All callbacks processed, off to dyntick idle!
462 * "Timer": Timer fired to cause CPU to continue processing callbacks.
463 * "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
464 * "Cleanup after idle": Idle exited, timer canceled.
465 */
466 LTTNG_TRACEPOINT_EVENT(rcu_prep_idle,
467
468 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
469 TP_PROTO(const char *reason),
470 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
471 TP_PROTO(char *reason),
472 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
473
474 TP_ARGS(reason),
475
476 TP_FIELDS(
477 ctf_string(reason, reason)
478 )
479 )
480 #endif
481
482 /*
483 * Tracepoint for the registration of a single RCU callback function.
484 * The first argument is the type of RCU, the second argument is
485 * a pointer to the RCU callback itself, the third element is the
486 * number of lazy callbacks queued, and the fourth element is the
487 * total number of callbacks queued.
488 */
489 LTTNG_TRACEPOINT_EVENT(rcu_callback,
490
491 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
492 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
493 long qlen),
494
495 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
496 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
497 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
498 long qlen),
499
500 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
501 #else
502 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen),
503
504 TP_ARGS(rcuname, rhp, qlen),
505 #endif
506
507 TP_FIELDS(
508 ctf_string(rcuname, rcuname)
509 ctf_integer_hex(void *, rhp, rhp)
510 ctf_integer_hex(void *, func, rhp->func)
511 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
512 ctf_integer(long, qlen_lazy, qlen_lazy)
513 #endif
514 ctf_integer(long, qlen, qlen)
515 )
516 )
517
518 /*
519 * Tracepoint for the registration of a single RCU callback of the special
520 * kfree() form. The first argument is the RCU type, the second argument
521 * is a pointer to the RCU callback, the third argument is the offset
522 * of the callback within the enclosing RCU-protected data structure,
523 * the fourth argument is the number of lazy callbacks queued, and the
524 * fifth argument is the total number of callbacks queued.
525 */
526 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
527
528
529 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
530 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
531 long qlen_lazy, long qlen),
532
533 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
534 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
535 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
536 long qlen_lazy, long qlen),
537
538 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
539 #else
540 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
541 long qlen),
542
543 TP_ARGS(rcuname, rhp, offset, qlen),
544 #endif
545
546 TP_FIELDS(
547 ctf_string(rcuname, rcuname)
548 ctf_integer_hex(void *, rhp, rhp)
549 ctf_integer_hex(unsigned long, offset, offset)
550 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
551 ctf_integer(long, qlen_lazy, qlen_lazy)
552 #endif
553 ctf_integer(long, qlen, qlen)
554 )
555 )
556
557 /*
558 * Tracepoint for marking the beginning rcu_do_batch, performed to start
559 * RCU callback invocation. The first argument is the RCU flavor,
560 * the second is the number of lazy callbacks queued, the third is
561 * the total number of callbacks queued, and the fourth argument is
562 * the current RCU-callback batch limit.
563 */
564 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
565
566 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
567 TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
568
569 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
570 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
571 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit),
572
573 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
574 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
575 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
576
577 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
578 #else
579 TP_PROTO(char *rcuname, long qlen, int blimit),
580
581 TP_ARGS(rcuname, qlen, blimit),
582 #endif
583
584 TP_FIELDS(
585 ctf_string(rcuname, rcuname)
586 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
587 ctf_integer(long, qlen_lazy, qlen_lazy)
588 #endif
589 ctf_integer(long, qlen, qlen)
590 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
591 ctf_integer(long, blimit, blimit)
592 #else
593 ctf_integer(int, blimit, blimit)
594 #endif
595 )
596 )
597
598 /*
599 * Tracepoint for the invocation of a single RCU callback function.
600 * The first argument is the type of RCU, and the second argument is
601 * a pointer to the RCU callback itself.
602 */
603 LTTNG_TRACEPOINT_EVENT(rcu_invoke_callback,
604
605 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
606 TP_PROTO(const char *rcuname, struct rcu_head *rhp),
607 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
608 TP_PROTO(char *rcuname, struct rcu_head *rhp),
609 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
610
611 TP_ARGS(rcuname, rhp),
612
613 TP_FIELDS(
614 ctf_string(rcuname, rcuname)
615 ctf_integer_hex(void *, rhp, rhp)
616 ctf_integer_hex(void *, func, rhp->func)
617 )
618 )
619
620 /*
621 * Tracepoint for the invocation of a single RCU callback of the special
622 * kfree() form. The first argument is the RCU flavor, the second
623 * argument is a pointer to the RCU callback, and the third argument
624 * is the offset of the callback within the enclosing RCU-protected
625 * data structure.
626 */
627 LTTNG_TRACEPOINT_EVENT(rcu_invoke_kfree_callback,
628
629 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
630 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
631 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
632 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset),
633 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
634
635 TP_ARGS(rcuname, rhp, offset),
636
637 TP_FIELDS(
638 ctf_string(rcuname, rcuname)
639 ctf_integer_hex(void *, rhp, rhp)
640 ctf_integer(unsigned long, offset, offset)
641 )
642 )
643
644 /*
645 * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
646 * invoked. The first argument is the name of the RCU flavor,
647 * the second argument is number of callbacks actually invoked,
648 * the third argument (cb) is whether or not any of the callbacks that
649 * were ready to invoke at the beginning of this batch are still
650 * queued, the fourth argument (nr) is the return value of need_resched(),
651 * the fifth argument (iit) is 1 if the current task is the idle task,
652 * and the sixth argument (risk) is the return value from
653 * rcu_is_callbacks_kthread().
654 */
655 LTTNG_TRACEPOINT_EVENT(rcu_batch_end,
656
657 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
658 TP_PROTO(const char *rcuname, int callbacks_invoked,
659 char cb, char nr, char iit, char risk),
660
661 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
662 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
663 TP_PROTO(const char *rcuname, int callbacks_invoked,
664 bool cb, bool nr, bool iit, bool risk),
665
666 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
667 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
668 TP_PROTO(char *rcuname, int callbacks_invoked,
669 bool cb, bool nr, bool iit, bool risk),
670
671 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
672 #else
673 TP_PROTO(char *rcuname, int callbacks_invoked),
674
675 TP_ARGS(rcuname, callbacks_invoked),
676 #endif
677
678 TP_FIELDS(
679 ctf_string(rcuname, rcuname)
680 ctf_integer(int, callbacks_invoked, callbacks_invoked)
681 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
682 ctf_integer(char, cb, cb)
683 ctf_integer(char, nr, nr)
684 ctf_integer(char, iit, iit)
685 ctf_integer(char, risk, risk)
686 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
687 ctf_integer(bool, cb, cb)
688 ctf_integer(bool, nr, nr)
689 ctf_integer(bool, iit, iit)
690 ctf_integer(bool, risk, risk)
691 #endif
692 )
693 )
694
695 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
696 /*
697 * Tracepoint for rcutorture readers. The first argument is the name
698 * of the RCU flavor from rcutorture's viewpoint and the second argument
699 * is the callback address.
700 */
701 LTTNG_TRACEPOINT_EVENT(rcu_torture_read,
702
703 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
704 TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
705 unsigned long secs, unsigned long c_old, unsigned long c),
706
707 TP_ARGS(rcutorturename, rhp, secs, c_old, c),
708 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
709 TP_PROTO(char *rcutorturename, struct rcu_head *rhp,
710 unsigned long secs, unsigned long c_old, unsigned long c),
711
712 TP_ARGS(rcutorturename, rhp, secs, c_old, c),
713 #else
714 TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
715
716 TP_ARGS(rcutorturename, rhp),
717 #endif
718
719 TP_FIELDS(
720 ctf_string(rcutorturename, rcutorturename)
721 ctf_integer_hex(struct rcu_head *, rhp, rhp)
722 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
723 ctf_integer(unsigned long, secs, secs)
724 ctf_integer(unsigned long, c_old, c_old)
725 ctf_integer(unsigned long, c, c)
726 #endif
727 )
728 )
729 #endif
730
731 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
732 /*
733 * Tracepoint for _rcu_barrier() execution. The string "s" describes
734 * the _rcu_barrier phase:
735 * "Begin": rcu_barrier_callback() started.
736 * "Check": rcu_barrier_callback() checking for piggybacking.
737 * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
738 * "Inc1": rcu_barrier_callback() piggyback check counter incremented.
739 * "Offline": rcu_barrier_callback() found offline CPU
740 * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
741 * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
742 * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
743 * "CB": An rcu_barrier_callback() invoked a callback, not the last.
744 * "LastCB": An rcu_barrier_callback() invoked the last callback.
745 * "Inc2": rcu_barrier_callback() piggyback check counter incremented.
746 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
747 * is the count of remaining callbacks, and "done" is the piggybacking count.
748 */
749 LTTNG_TRACEPOINT_EVENT(rcu_barrier,
750
751 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
752 TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
753 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
754 TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
755 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
756
757 TP_ARGS(rcuname, s, cpu, cnt, done),
758
759 TP_FIELDS(
760 ctf_string(rcuname, rcuname)
761 ctf_string(s, s)
762 ctf_integer(int, cpu, cpu)
763 ctf_integer(int, cnt, cnt)
764 ctf_integer(unsigned long, done, done)
765 )
766 )
767 #endif
768
769 #else /* #ifdef CONFIG_RCU_TRACE */
770
771 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
772 #define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
773 #define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
774 qsmask) do { } while (0)
775 #define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
776 #define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
777 #define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
778 grplo, grphi, gp_tasks) do { } \
779 while (0)
780 #define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
781 #else
782 #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
783 #define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
784 qsmask) do { } while (0)
785 #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
786 #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
787 #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
788 grplo, grphi, gp_tasks) do { } \
789 while (0)
790 #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
791 #endif
792
793 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0))
794 #define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
795 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
796 #define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
797 #else
798 #define trace_rcu_dyntick(polarity) do { } while (0)
799 #endif
800 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
801 #define trace_rcu_prep_idle(reason) do { } while (0)
802 #endif
803 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
804 #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
805 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
806 do { } while (0)
807 #define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
808 do { } while (0)
809 #else
810 #define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
811 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
812 #define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
813 #endif
814 #define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
815 #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
816 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
817 #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
818 do { } while (0)
819 #else
820 #define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
821 #endif
822 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
823 #define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
824 do { } while (0)
825 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
826 #define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
827 #endif
828 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
829 #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
830 #endif
831 #endif /* #else #ifdef CONFIG_RCU_TRACE */
832
833 #endif /* LTTNG_TRACE_RCU_H */
834
835 /* This part must be outside protection */
836 #include <probes/define_trace.h>
This page took 0.046553 seconds and 4 git commands to generate.