Fix: rcu: Fix data-race due to atomic_t copy-by-value (5.5.6, 5.4.22)
[lttng-modules.git] / instrumentation / events / lttng-module / rcu.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM rcu
3
4 #if !defined(LTTNG_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_RCU_H
6
7 #include <probes/lttng-tracepoint-event.h>
8 #include <linux/version.h>
9
10 /*
11 * Tracepoint for start/end markers used for utilization calculations.
12 * By convention, the string is of the following forms:
13 *
14 * "Start <activity>" -- Mark the start of the specified activity,
15 * such as "context switch". Nesting is permitted.
16 * "End <activity>" -- Mark the end of the specified activity.
17 *
18 * An "@" character within "<activity>" is a comment character: Data
19 * reduction scripts will ignore the "@" and the remainder of the line.
20 */
21 LTTNG_TRACEPOINT_EVENT(rcu_utilization,
22
23 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
24 TP_PROTO(const char *s),
25 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
26 TP_PROTO(char *s),
27 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
28
29 TP_ARGS(s),
30
31 TP_FIELDS(
32 ctf_string(s, s)
33 )
34 )
35
36 #ifdef CONFIG_RCU_TRACE
37
38 #if defined(CONFIG_TREE_RCU) \
39 || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) \
40 && defined(CONFIG_PREEMPT_RCU)) \
41 || defined(CONFIG_TREE_PREEMPT_RCU)
42
43 /*
44 * Tracepoint for grace-period events: starting and ending a grace
45 * period ("start" and "end", respectively), a CPU noting the start
46 * of a new grace period or the end of an old grace period ("cpustart"
47 * and "cpuend", respectively), a CPU passing through a quiescent
48 * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
49 * and "cpuofl", respectively), and a CPU being kicked for being too
50 * long in dyntick-idle mode ("kick").
51 */
52 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
53 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
54
55 TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
56
57 TP_ARGS(rcuname, gp_seq, gpevent),
58
59 TP_FIELDS(
60 ctf_string(rcuname, rcuname)
61 ctf_integer(unsigned long, gp_seq, gp_seq)
62 ctf_string(gpevent, gpevent)
63 )
64 )
65 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
66 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
67
68 TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
69
70 TP_ARGS(rcuname, gpnum, gpevent),
71
72 TP_FIELDS(
73 ctf_string(rcuname, rcuname)
74 ctf_integer(unsigned long, gpnum, gpnum)
75 ctf_string(gpevent, gpevent)
76 )
77 )
78 #else
79 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
80
81 TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent),
82
83 TP_ARGS(rcuname, gpnum, gpevent),
84
85 TP_FIELDS(
86 ctf_string(rcuname, rcuname)
87 ctf_integer(unsigned long, gpnum, gpnum)
88 ctf_string(gpevent, gpevent)
89 )
90 )
91 #endif
92
93 /*
94 * Tracepoint for grace-period-initialization events. These are
95 * distinguished by the type of RCU, the new grace-period number, the
96 * rcu_node structure level, the starting and ending CPU covered by the
97 * rcu_node structure, and the mask of CPUs that will be waited for.
98 * All but the type of RCU are extracted from the rcu_node structure.
99 */
100 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
101 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
102
103 TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
104 int grplo, int grphi, unsigned long qsmask),
105
106 TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
107
108 TP_FIELDS(
109 ctf_string(rcuname, rcuname)
110 ctf_integer(unsigned long, gp_seq, gp_seq)
111 ctf_integer(u8, level, level)
112 ctf_integer(int, grplo, grplo)
113 ctf_integer(int, grphi, grphi)
114 ctf_integer(unsigned long, qsmask, qsmask)
115 )
116 )
117 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
118 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
119
120 TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
121 int grplo, int grphi, unsigned long qsmask),
122
123 TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
124
125 TP_FIELDS(
126 ctf_string(rcuname, rcuname)
127 ctf_integer(unsigned long, gpnum, gpnum)
128 ctf_integer(u8, level, level)
129 ctf_integer(int, grplo, grplo)
130 ctf_integer(int, grphi, grphi)
131 ctf_integer(unsigned long, qsmask, qsmask)
132 )
133 )
134 #else
135 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
136
137 TP_PROTO(char *rcuname, unsigned long gpnum, u8 level,
138 int grplo, int grphi, unsigned long qsmask),
139
140 TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
141
142 TP_FIELDS(
143 ctf_string(rcuname, rcuname)
144 ctf_integer(unsigned long, gpnum, gpnum)
145 ctf_integer(u8, level, level)
146 ctf_integer(int, grplo, grplo)
147 ctf_integer(int, grphi, grphi)
148 ctf_integer(unsigned long, qsmask, qsmask)
149 )
150 )
151 #endif
152
153 /*
154 * Tracepoint for tasks blocking within preemptible-RCU read-side
155 * critical sections. Track the type of RCU (which one day might
156 * include SRCU), the grace-period number that the task is blocking
157 * (the current or the next), and the task's PID.
158 */
159 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
160 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
161
162 TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
163
164 TP_ARGS(rcuname, pid, gp_seq),
165
166 TP_FIELDS(
167 ctf_string(rcuname, rcuname)
168 ctf_integer(unsigned long, gp_seq, gp_seq)
169 ctf_integer(int, pid, pid)
170 )
171 )
172 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
173 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
174
175 TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
176
177 TP_ARGS(rcuname, pid, gpnum),
178
179 TP_FIELDS(
180 ctf_string(rcuname, rcuname)
181 ctf_integer(unsigned long, gpnum, gpnum)
182 ctf_integer(int, pid, pid)
183 )
184 )
185 #else
186 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
187
188 TP_PROTO(char *rcuname, int pid, unsigned long gpnum),
189
190 TP_ARGS(rcuname, pid, gpnum),
191
192 TP_FIELDS(
193 ctf_string(rcuname, rcuname)
194 ctf_integer(unsigned long, gpnum, gpnum)
195 ctf_integer(int, pid, pid)
196 )
197 )
198 #endif
199
200 /*
201 * Tracepoint for tasks that blocked within a given preemptible-RCU
202 * read-side critical section exiting that critical section. Track the
203 * type of RCU (which one day might include SRCU) and the task's PID.
204 */
205 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
206 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
207
208 TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
209
210 TP_ARGS(rcuname, gp_seq, pid),
211
212 TP_FIELDS(
213 ctf_string(rcuname, rcuname)
214 ctf_integer(unsigned long, gp_seq, gp_seq)
215 ctf_integer(int, pid, pid)
216 )
217 )
218 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
219 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
220
221 TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
222
223 TP_ARGS(rcuname, gpnum, pid),
224
225 TP_FIELDS(
226 ctf_string(rcuname, rcuname)
227 ctf_integer(unsigned long, gpnum, gpnum)
228 ctf_integer(int, pid, pid)
229 )
230 )
231 #else
232 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
233
234 TP_PROTO(char *rcuname, unsigned long gpnum, int pid),
235
236 TP_ARGS(rcuname, gpnum, pid),
237
238 TP_FIELDS(
239 ctf_string(rcuname, rcuname)
240 ctf_integer(unsigned long, gpnum, gpnum)
241 ctf_integer(int, pid, pid)
242 )
243 )
244 #endif
245
246 /*
247 * Tracepoint for quiescent-state-reporting events. These are
248 * distinguished by the type of RCU, the grace-period number, the
249 * mask of quiescent lower-level entities, the rcu_node structure level,
250 * the starting and ending CPU covered by the rcu_node structure, and
251 * whether there are any blocked tasks blocking the current grace period.
252 * All but the type of RCU are extracted from the rcu_node structure.
253 */
254 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
255 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
256
257 TP_PROTO(const char *rcuname, unsigned long gp_seq,
258 unsigned long mask, unsigned long qsmask,
259 u8 level, int grplo, int grphi, int gp_tasks),
260
261 TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
262
263 TP_FIELDS(
264 ctf_string(rcuname, rcuname)
265 ctf_integer(unsigned long, gp_seq, gp_seq)
266 ctf_integer(unsigned long, mask, mask)
267 ctf_integer(unsigned long, qsmask, qsmask)
268 ctf_integer(u8, level, level)
269 ctf_integer(int, grplo, grplo)
270 ctf_integer(int, grphi, grphi)
271 ctf_integer(u8, gp_tasks, gp_tasks)
272 )
273 )
274 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
275 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
276
277 TP_PROTO(const char *rcuname, unsigned long gpnum,
278 unsigned long mask, unsigned long qsmask,
279 u8 level, int grplo, int grphi, int gp_tasks),
280
281 TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
282
283 TP_FIELDS(
284 ctf_string(rcuname, rcuname)
285 ctf_integer(unsigned long, gpnum, gpnum)
286 ctf_integer(unsigned long, mask, mask)
287 ctf_integer(unsigned long, qsmask, qsmask)
288 ctf_integer(u8, level, level)
289 ctf_integer(int, grplo, grplo)
290 ctf_integer(int, grphi, grphi)
291 ctf_integer(u8, gp_tasks, gp_tasks)
292 )
293 )
294 #else
295 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
296
297 TP_PROTO(char *rcuname, unsigned long gpnum,
298 unsigned long mask, unsigned long qsmask,
299 u8 level, int grplo, int grphi, int gp_tasks),
300
301 TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
302
303 TP_FIELDS(
304 ctf_string(rcuname, rcuname)
305 ctf_integer(unsigned long, gpnum, gpnum)
306 ctf_integer(unsigned long, mask, mask)
307 ctf_integer(unsigned long, qsmask, qsmask)
308 ctf_integer(u8, level, level)
309 ctf_integer(int, grplo, grplo)
310 ctf_integer(int, grphi, grphi)
311 ctf_integer(u8, gp_tasks, gp_tasks)
312 )
313 )
314 #endif
315
316 /*
317 * Tracepoint for quiescent states detected by force_quiescent_state().
318 * These trace events include the type of RCU, the grace-period number
319 * that was blocked by the CPU, the CPU itself, and the type of quiescent
320 * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
321 * or "kick" when kicking a CPU that has been in dyntick-idle mode for
322 * too long.
323 */
324 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
325 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
326
327 TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
328
329 TP_ARGS(rcuname, gp_seq, cpu, qsevent),
330
331 TP_FIELDS(
332 ctf_integer(unsigned long, gp_seq, gp_seq)
333 ctf_integer(int, cpu, cpu)
334 ctf_string(rcuname, rcuname)
335 ctf_string(qsevent, qsevent)
336 )
337 )
338 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
339 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
340
341 TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
342
343 TP_ARGS(rcuname, gpnum, cpu, qsevent),
344
345 TP_FIELDS(
346 ctf_integer(unsigned long, gpnum, gpnum)
347 ctf_integer(int, cpu, cpu)
348 ctf_string(rcuname, rcuname)
349 ctf_string(qsevent, qsevent)
350 )
351 )
352 #else
353 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
354
355 TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent),
356
357 TP_ARGS(rcuname, gpnum, cpu, qsevent),
358
359 TP_FIELDS(
360 ctf_integer(unsigned long, gpnum, gpnum)
361 ctf_integer(int, cpu, cpu)
362 ctf_string(rcuname, rcuname)
363 ctf_string(qsevent, qsevent)
364 )
365 )
366 #endif
367
368 #endif /*
369 * #if defined(CONFIG_TREE_RCU)
370 * || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
371 * && defined(CONFIG_PREEMPT_RCU))
372 * || defined(CONFIG_TREE_PREEMPT_RCU)
373 */
374
375 /*
376 * Tracepoint for dyntick-idle entry/exit events. These take a string
377 * as argument: "Start" for entering dyntick-idle mode, "End" for
378 * leaving it, "--=" for events moving towards idle, and "++=" for events
379 * moving away from idle. "Error on entry: not idle task" and "Error on
380 * exit: not idle task" indicate that a non-idle task is erroneously
381 * toying with the idle loop.
382 *
383 * These events also take a pair of numbers, which indicate the nesting
384 * depth before and after the event of interest. Note that task-related
385 * events use the upper bits of each number, while interrupt-related
386 * events use the lower bits.
387 */
388 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) \
389 || LTTNG_KERNEL_RANGE(5,5,6, 5,6,0) \
390 || LTTNG_KERNEL_RANGE(5,4,22, 5,5,0)
391 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
392
393 TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
394
395 TP_ARGS(polarity, oldnesting, newnesting, dynticks),
396
397 TP_FIELDS(
398 ctf_string(polarity, polarity)
399 ctf_integer(long, oldnesting, oldnesting)
400 ctf_integer(long, newnesting, newnesting)
401 ctf_integer(int, dynticks, dynticks)
402 )
403 )
404
405 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0))
406 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
407
408 TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
409
410 TP_ARGS(polarity, oldnesting, newnesting, dynticks),
411
412 TP_FIELDS(
413 ctf_string(polarity, polarity)
414 ctf_integer(long, oldnesting, oldnesting)
415 ctf_integer(long, newnesting, newnesting)
416 ctf_integer(int, dynticks, atomic_read(&dynticks))
417 )
418 )
419
420 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
421 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
422
423 TP_PROTO(const char *polarity, long long oldnesting, long long newnesting),
424
425 TP_ARGS(polarity, oldnesting, newnesting),
426
427 TP_FIELDS(
428 ctf_string(polarity, polarity)
429 ctf_integer(long long, oldnesting, oldnesting)
430 ctf_integer(long long, newnesting, newnesting)
431 )
432 )
433 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
434 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
435
436 TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
437
438 TP_ARGS(polarity, oldnesting, newnesting),
439
440 TP_FIELDS(
441 ctf_string(polarity, polarity)
442 ctf_integer(long long, oldnesting, oldnesting)
443 ctf_integer(long long, newnesting, newnesting)
444 )
445 )
446 #else
447 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
448
449 TP_PROTO(char *polarity),
450
451 TP_ARGS(polarity),
452
453 TP_FIELDS(
454 ctf_string(polarity, polarity)
455 )
456 )
457 #endif
458
459
460 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
461 /*
462 * Tracepoint for RCU preparation for idle, the goal being to get RCU
463 * processing done so that the current CPU can shut off its scheduling
464 * clock and enter dyntick-idle mode. One way to accomplish this is
465 * to drain all RCU callbacks from this CPU, and the other is to have
466 * done everything RCU requires for the current grace period. In this
467 * latter case, the CPU will be awakened at the end of the current grace
468 * period in order to process the remainder of its callbacks.
469 *
470 * These tracepoints take a string as argument:
471 *
472 * "No callbacks": Nothing to do, no callbacks on this CPU.
473 * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
474 * "Begin holdoff": Attempt failed, don't retry until next jiffy.
475 * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
476 * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
477 * "More callbacks": Still more callbacks, try again to clear them out.
478 * "Callbacks drained": All callbacks processed, off to dyntick idle!
479 * "Timer": Timer fired to cause CPU to continue processing callbacks.
480 * "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
481 * "Cleanup after idle": Idle exited, timer canceled.
482 */
483 LTTNG_TRACEPOINT_EVENT(rcu_prep_idle,
484
485 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
486 TP_PROTO(const char *reason),
487 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
488 TP_PROTO(char *reason),
489 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
490
491 TP_ARGS(reason),
492
493 TP_FIELDS(
494 ctf_string(reason, reason)
495 )
496 )
497 #endif
498
499 /*
500 * Tracepoint for the registration of a single RCU callback function.
501 * The first argument is the type of RCU, the second argument is
502 * a pointer to the RCU callback itself, the third element is the
503 * number of lazy callbacks queued, and the fourth element is the
504 * total number of callbacks queued.
505 */
506 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
507 LTTNG_TRACEPOINT_EVENT(rcu_callback,
508
509 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen),
510
511 TP_ARGS(rcuname, rhp, qlen),
512
513 TP_FIELDS(
514 ctf_string(rcuname, rcuname)
515 ctf_integer_hex(void *, rhp, rhp)
516 ctf_integer_hex(void *, func, rhp->func)
517 ctf_integer(long, qlen, qlen)
518 )
519 )
520 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
521 LTTNG_TRACEPOINT_EVENT(rcu_callback,
522
523 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
524 long qlen),
525
526 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
527
528 TP_FIELDS(
529 ctf_string(rcuname, rcuname)
530 ctf_integer_hex(void *, rhp, rhp)
531 ctf_integer_hex(void *, func, rhp->func)
532 ctf_integer(long, qlen_lazy, qlen_lazy)
533 ctf_integer(long, qlen, qlen)
534 )
535 )
536 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
537 LTTNG_TRACEPOINT_EVENT(rcu_callback,
538
539 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
540 long qlen),
541
542 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
543
544 TP_FIELDS(
545 ctf_string(rcuname, rcuname)
546 ctf_integer_hex(void *, rhp, rhp)
547 ctf_integer_hex(void *, func, rhp->func)
548 ctf_integer(long, qlen_lazy, qlen_lazy)
549 ctf_integer(long, qlen, qlen)
550 )
551 )
552 #else
553 LTTNG_TRACEPOINT_EVENT(rcu_callback,
554
555 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen),
556
557 TP_ARGS(rcuname, rhp, qlen),
558
559 TP_FIELDS(
560 ctf_string(rcuname, rcuname)
561 ctf_integer_hex(void *, rhp, rhp)
562 ctf_integer_hex(void *, func, rhp->func)
563 ctf_integer(long, qlen, qlen)
564 )
565 )
566 #endif
567
568
569 /*
570 * Tracepoint for the registration of a single RCU callback of the special
571 * kfree() form. The first argument is the RCU type, the second argument
572 * is a pointer to the RCU callback, the third argument is the offset
573 * of the callback within the enclosing RCU-protected data structure,
574 * the fourth argument is the number of lazy callbacks queued, and the
575 * fifth argument is the total number of callbacks queued.
576 */
577 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
578 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
579
580 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
581 long qlen),
582
583 TP_ARGS(rcuname, rhp, offset, qlen),
584
585 TP_FIELDS(
586 ctf_string(rcuname, rcuname)
587 ctf_integer_hex(void *, rhp, rhp)
588 ctf_integer_hex(unsigned long, offset, offset)
589 ctf_integer(long, qlen, qlen)
590 )
591 )
592 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
593 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
594
595 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
596 long qlen_lazy, long qlen),
597
598 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
599
600 TP_FIELDS(
601 ctf_string(rcuname, rcuname)
602 ctf_integer_hex(void *, rhp, rhp)
603 ctf_integer_hex(unsigned long, offset, offset)
604 ctf_integer(long, qlen_lazy, qlen_lazy)
605 ctf_integer(long, qlen, qlen)
606 )
607 )
608 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
609 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
610
611 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
612 long qlen_lazy, long qlen),
613
614 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
615
616 TP_FIELDS(
617 ctf_string(rcuname, rcuname)
618 ctf_integer_hex(void *, rhp, rhp)
619 ctf_integer_hex(unsigned long, offset, offset)
620 ctf_integer(long, qlen_lazy, qlen_lazy)
621 ctf_integer(long, qlen, qlen)
622 )
623 )
624 #else
625 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
626
627 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
628 long qlen),
629
630 TP_ARGS(rcuname, rhp, offset, qlen),
631
632 TP_FIELDS(
633 ctf_string(rcuname, rcuname)
634 ctf_integer_hex(void *, rhp, rhp)
635 ctf_integer_hex(unsigned long, offset, offset)
636 ctf_integer(long, qlen, qlen)
637 )
638 )
639 #endif
640
641 /*
642 * Tracepoint for marking the beginning rcu_do_batch, performed to start
643 * RCU callback invocation. The first argument is the RCU flavor,
644 * the second is the number of lazy callbacks queued, the third is
645 * the total number of callbacks queued, and the fourth argument is
646 * the current RCU-callback batch limit.
647 */
648 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
649 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
650
651 TP_PROTO(const char *rcuname, long qlen, long blimit),
652
653 TP_ARGS(rcuname, qlen, blimit),
654
655 TP_FIELDS(
656 ctf_string(rcuname, rcuname)
657 ctf_integer(long, qlen, qlen)
658 ctf_integer(long, blimit, blimit)
659 )
660 )
661 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
662 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
663
664 TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
665
666 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
667
668 TP_FIELDS(
669 ctf_string(rcuname, rcuname)
670 ctf_integer(long, qlen_lazy, qlen_lazy)
671 ctf_integer(long, qlen, qlen)
672 ctf_integer(long, blimit, blimit)
673 )
674 )
675 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
676 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
677
678 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit),
679
680 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
681
682 TP_FIELDS(
683 ctf_string(rcuname, rcuname)
684 ctf_integer(long, qlen_lazy, qlen_lazy)
685 ctf_integer(long, qlen, qlen)
686 ctf_integer(long, blimit, blimit)
687 )
688 )
689 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
690 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
691
692 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
693
694 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
695
696 TP_FIELDS(
697 ctf_string(rcuname, rcuname)
698 ctf_integer(long, qlen_lazy, qlen_lazy)
699 ctf_integer(long, qlen, qlen)
700 ctf_integer(int, blimit, blimit)
701 )
702 )
703 #else
704 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
705
706 TP_PROTO(char *rcuname, long qlen, int blimit),
707
708 TP_ARGS(rcuname, qlen, blimit),
709
710 TP_FIELDS(
711 ctf_string(rcuname, rcuname)
712 ctf_integer(long, qlen, qlen)
713 ctf_integer(int, blimit, blimit)
714 )
715 )
716 #endif
717
718 /*
719 * Tracepoint for the invocation of a single RCU callback function.
720 * The first argument is the type of RCU, and the second argument is
721 * a pointer to the RCU callback itself.
722 */
723 LTTNG_TRACEPOINT_EVENT(rcu_invoke_callback,
724
725 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
726 TP_PROTO(const char *rcuname, struct rcu_head *rhp),
727 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
728 TP_PROTO(char *rcuname, struct rcu_head *rhp),
729 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
730
731 TP_ARGS(rcuname, rhp),
732
733 TP_FIELDS(
734 ctf_string(rcuname, rcuname)
735 ctf_integer_hex(void *, rhp, rhp)
736 ctf_integer_hex(void *, func, rhp->func)
737 )
738 )
739
740 /*
741 * Tracepoint for the invocation of a single RCU callback of the special
742 * kfree() form. The first argument is the RCU flavor, the second
743 * argument is a pointer to the RCU callback, and the third argument
744 * is the offset of the callback within the enclosing RCU-protected
745 * data structure.
746 */
747 LTTNG_TRACEPOINT_EVENT(rcu_invoke_kfree_callback,
748
749 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
750 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
751 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
752 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset),
753 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
754
755 TP_ARGS(rcuname, rhp, offset),
756
757 TP_FIELDS(
758 ctf_string(rcuname, rcuname)
759 ctf_integer_hex(void *, rhp, rhp)
760 ctf_integer(unsigned long, offset, offset)
761 )
762 )
763
764 /*
765 * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
766 * invoked. The first argument is the name of the RCU flavor,
767 * the second argument is number of callbacks actually invoked,
768 * the third argument (cb) is whether or not any of the callbacks that
769 * were ready to invoke at the beginning of this batch are still
770 * queued, the fourth argument (nr) is the return value of need_resched(),
771 * the fifth argument (iit) is 1 if the current task is the idle task,
772 * and the sixth argument (risk) is the return value from
773 * rcu_is_callbacks_kthread().
774 */
775 LTTNG_TRACEPOINT_EVENT(rcu_batch_end,
776
777 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
778 TP_PROTO(const char *rcuname, int callbacks_invoked,
779 char cb, char nr, char iit, char risk),
780
781 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
782 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
783 TP_PROTO(const char *rcuname, int callbacks_invoked,
784 bool cb, bool nr, bool iit, bool risk),
785
786 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
787 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
788 TP_PROTO(char *rcuname, int callbacks_invoked,
789 bool cb, bool nr, bool iit, bool risk),
790
791 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
792 #else
793 TP_PROTO(char *rcuname, int callbacks_invoked),
794
795 TP_ARGS(rcuname, callbacks_invoked),
796 #endif
797
798 TP_FIELDS(
799 ctf_string(rcuname, rcuname)
800 ctf_integer(int, callbacks_invoked, callbacks_invoked)
801 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
802 ctf_integer(char, cb, cb)
803 ctf_integer(char, nr, nr)
804 ctf_integer(char, iit, iit)
805 ctf_integer(char, risk, risk)
806 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
807 ctf_integer(bool, cb, cb)
808 ctf_integer(bool, nr, nr)
809 ctf_integer(bool, iit, iit)
810 ctf_integer(bool, risk, risk)
811 #endif
812 )
813 )
814
815 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
816 /*
817 * Tracepoint for rcutorture readers. The first argument is the name
818 * of the RCU flavor from rcutorture's viewpoint and the second argument
819 * is the callback address.
820 */
821 LTTNG_TRACEPOINT_EVENT(rcu_torture_read,
822
823 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
824 TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
825 unsigned long secs, unsigned long c_old, unsigned long c),
826
827 TP_ARGS(rcutorturename, rhp, secs, c_old, c),
828 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
829 TP_PROTO(char *rcutorturename, struct rcu_head *rhp,
830 unsigned long secs, unsigned long c_old, unsigned long c),
831
832 TP_ARGS(rcutorturename, rhp, secs, c_old, c),
833 #else
834 TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
835
836 TP_ARGS(rcutorturename, rhp),
837 #endif
838
839 TP_FIELDS(
840 ctf_string(rcutorturename, rcutorturename)
841 ctf_integer_hex(struct rcu_head *, rhp, rhp)
842 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
843 ctf_integer(unsigned long, secs, secs)
844 ctf_integer(unsigned long, c_old, c_old)
845 ctf_integer(unsigned long, c, c)
846 #endif
847 )
848 )
849 #endif
850
851 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
852 /*
853 * Tracepoint for _rcu_barrier() execution. The string "s" describes
854 * the _rcu_barrier phase:
855 * "Begin": rcu_barrier_callback() started.
856 * "Check": rcu_barrier_callback() checking for piggybacking.
857 * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
858 * "Inc1": rcu_barrier_callback() piggyback check counter incremented.
859 * "Offline": rcu_barrier_callback() found offline CPU
860 * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
861 * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
862 * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
863 * "CB": An rcu_barrier_callback() invoked a callback, not the last.
864 * "LastCB": An rcu_barrier_callback() invoked the last callback.
865 * "Inc2": rcu_barrier_callback() piggyback check counter incremented.
866 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
867 * is the count of remaining callbacks, and "done" is the piggybacking count.
868 */
869 LTTNG_TRACEPOINT_EVENT(rcu_barrier,
870
871 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
872 TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
873 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
874 TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
875 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
876
877 TP_ARGS(rcuname, s, cpu, cnt, done),
878
879 TP_FIELDS(
880 ctf_string(rcuname, rcuname)
881 ctf_string(s, s)
882 ctf_integer(int, cpu, cpu)
883 ctf_integer(int, cnt, cnt)
884 ctf_integer(unsigned long, done, done)
885 )
886 )
887 #endif
888
889 #else /* #ifdef CONFIG_RCU_TRACE */
890
891 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
892 #define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
893 #define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
894 qsmask) do { } while (0)
895 #define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
896 #define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
897 #define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
898 grplo, grphi, gp_tasks) do { } \
899 while (0)
900 #define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
901 #else
902 #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
903 #define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
904 qsmask) do { } while (0)
905 #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
906 #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
907 #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
908 grplo, grphi, gp_tasks) do { } \
909 while (0)
910 #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
911 #endif
912
913 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0))
914 #define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
915 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
916 #define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
917 #else
918 #define trace_rcu_dyntick(polarity) do { } while (0)
919 #endif
920 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
921 #define trace_rcu_prep_idle(reason) do { } while (0)
922 #endif
923 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
924 #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
925 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
926 do { } while (0)
927 #define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
928 do { } while (0)
929 #else
930 #define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
931 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
932 #define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
933 #endif
934 #define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
935 #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
936 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
937 #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
938 do { } while (0)
939 #else
940 #define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
941 #endif
942 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
943 #define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
944 do { } while (0)
945 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
946 #define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
947 #endif
948 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
949 #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
950 #endif
951 #endif /* #else #ifdef CONFIG_RCU_TRACE */
952
953 #endif /* LTTNG_TRACE_RCU_H */
954
955 /* This part must be outside protection */
956 #include <probes/define_trace.h>
This page took 0.049738 seconds and 4 git commands to generate.