fix: rcu: Fix data-race due to atomic_t copy-by-value (v5.6)
[lttng-modules.git] / instrumentation / events / lttng-module / rcu.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM rcu
3
4 #if !defined(LTTNG_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_RCU_H
6
7 #include <probes/lttng-tracepoint-event.h>
8 #include <linux/version.h>
9
10 /*
11 * Tracepoint for start/end markers used for utilization calculations.
12 * By convention, the string is of the following forms:
13 *
14 * "Start <activity>" -- Mark the start of the specified activity,
15 * such as "context switch". Nesting is permitted.
16 * "End <activity>" -- Mark the end of the specified activity.
17 *
18 * An "@" character within "<activity>" is a comment character: Data
19 * reduction scripts will ignore the "@" and the remainder of the line.
20 */
21 LTTNG_TRACEPOINT_EVENT(rcu_utilization,
22
23 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
24 TP_PROTO(const char *s),
25 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
26 TP_PROTO(char *s),
27 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
28
29 TP_ARGS(s),
30
31 TP_FIELDS(
32 ctf_string(s, s)
33 )
34 )
35
36 #ifdef CONFIG_RCU_TRACE
37
38 #if defined(CONFIG_TREE_RCU) \
39 || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) \
40 && defined(CONFIG_PREEMPT_RCU)) \
41 || defined(CONFIG_TREE_PREEMPT_RCU)
42
43 /*
44 * Tracepoint for grace-period events: starting and ending a grace
45 * period ("start" and "end", respectively), a CPU noting the start
46 * of a new grace period or the end of an old grace period ("cpustart"
47 * and "cpuend", respectively), a CPU passing through a quiescent
48 * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
49 * and "cpuofl", respectively), and a CPU being kicked for being too
50 * long in dyntick-idle mode ("kick").
51 */
52 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
53 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
54
55 TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
56
57 TP_ARGS(rcuname, gp_seq, gpevent),
58
59 TP_FIELDS(
60 ctf_string(rcuname, rcuname)
61 ctf_integer(unsigned long, gp_seq, gp_seq)
62 ctf_string(gpevent, gpevent)
63 )
64 )
65 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
66 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
67
68 TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
69
70 TP_ARGS(rcuname, gpnum, gpevent),
71
72 TP_FIELDS(
73 ctf_string(rcuname, rcuname)
74 ctf_integer(unsigned long, gpnum, gpnum)
75 ctf_string(gpevent, gpevent)
76 )
77 )
78 #else
79 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
80
81 TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent),
82
83 TP_ARGS(rcuname, gpnum, gpevent),
84
85 TP_FIELDS(
86 ctf_string(rcuname, rcuname)
87 ctf_integer(unsigned long, gpnum, gpnum)
88 ctf_string(gpevent, gpevent)
89 )
90 )
91 #endif
92
93 /*
94 * Tracepoint for grace-period-initialization events. These are
95 * distinguished by the type of RCU, the new grace-period number, the
96 * rcu_node structure level, the starting and ending CPU covered by the
97 * rcu_node structure, and the mask of CPUs that will be waited for.
98 * All but the type of RCU are extracted from the rcu_node structure.
99 */
100 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
101 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
102
103 TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
104 int grplo, int grphi, unsigned long qsmask),
105
106 TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
107
108 TP_FIELDS(
109 ctf_string(rcuname, rcuname)
110 ctf_integer(unsigned long, gp_seq, gp_seq)
111 ctf_integer(u8, level, level)
112 ctf_integer(int, grplo, grplo)
113 ctf_integer(int, grphi, grphi)
114 ctf_integer(unsigned long, qsmask, qsmask)
115 )
116 )
117 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
118 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
119
120 TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
121 int grplo, int grphi, unsigned long qsmask),
122
123 TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
124
125 TP_FIELDS(
126 ctf_string(rcuname, rcuname)
127 ctf_integer(unsigned long, gpnum, gpnum)
128 ctf_integer(u8, level, level)
129 ctf_integer(int, grplo, grplo)
130 ctf_integer(int, grphi, grphi)
131 ctf_integer(unsigned long, qsmask, qsmask)
132 )
133 )
134 #else
135 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
136
137 TP_PROTO(char *rcuname, unsigned long gpnum, u8 level,
138 int grplo, int grphi, unsigned long qsmask),
139
140 TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
141
142 TP_FIELDS(
143 ctf_string(rcuname, rcuname)
144 ctf_integer(unsigned long, gpnum, gpnum)
145 ctf_integer(u8, level, level)
146 ctf_integer(int, grplo, grplo)
147 ctf_integer(int, grphi, grphi)
148 ctf_integer(unsigned long, qsmask, qsmask)
149 )
150 )
151 #endif
152
153 /*
154 * Tracepoint for tasks blocking within preemptible-RCU read-side
155 * critical sections. Track the type of RCU (which one day might
156 * include SRCU), the grace-period number that the task is blocking
157 * (the current or the next), and the task's PID.
158 */
159 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
160 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
161
162 TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
163
164 TP_ARGS(rcuname, pid, gp_seq),
165
166 TP_FIELDS(
167 ctf_string(rcuname, rcuname)
168 ctf_integer(unsigned long, gp_seq, gp_seq)
169 ctf_integer(int, pid, pid)
170 )
171 )
172 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
173 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
174
175 TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
176
177 TP_ARGS(rcuname, pid, gpnum),
178
179 TP_FIELDS(
180 ctf_string(rcuname, rcuname)
181 ctf_integer(unsigned long, gpnum, gpnum)
182 ctf_integer(int, pid, pid)
183 )
184 )
185 #else
186 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
187
188 TP_PROTO(char *rcuname, int pid, unsigned long gpnum),
189
190 TP_ARGS(rcuname, pid, gpnum),
191
192 TP_FIELDS(
193 ctf_string(rcuname, rcuname)
194 ctf_integer(unsigned long, gpnum, gpnum)
195 ctf_integer(int, pid, pid)
196 )
197 )
198 #endif
199
200 /*
201 * Tracepoint for tasks that blocked within a given preemptible-RCU
202 * read-side critical section exiting that critical section. Track the
203 * type of RCU (which one day might include SRCU) and the task's PID.
204 */
205 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
206 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
207
208 TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
209
210 TP_ARGS(rcuname, gp_seq, pid),
211
212 TP_FIELDS(
213 ctf_string(rcuname, rcuname)
214 ctf_integer(unsigned long, gp_seq, gp_seq)
215 ctf_integer(int, pid, pid)
216 )
217 )
218 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
219 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
220
221 TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
222
223 TP_ARGS(rcuname, gpnum, pid),
224
225 TP_FIELDS(
226 ctf_string(rcuname, rcuname)
227 ctf_integer(unsigned long, gpnum, gpnum)
228 ctf_integer(int, pid, pid)
229 )
230 )
231 #else
232 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
233
234 TP_PROTO(char *rcuname, unsigned long gpnum, int pid),
235
236 TP_ARGS(rcuname, gpnum, pid),
237
238 TP_FIELDS(
239 ctf_string(rcuname, rcuname)
240 ctf_integer(unsigned long, gpnum, gpnum)
241 ctf_integer(int, pid, pid)
242 )
243 )
244 #endif
245
246 /*
247 * Tracepoint for quiescent-state-reporting events. These are
248 * distinguished by the type of RCU, the grace-period number, the
249 * mask of quiescent lower-level entities, the rcu_node structure level,
250 * the starting and ending CPU covered by the rcu_node structure, and
251 * whether there are any blocked tasks blocking the current grace period.
252 * All but the type of RCU are extracted from the rcu_node structure.
253 */
254 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
255 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
256
257 TP_PROTO(const char *rcuname, unsigned long gp_seq,
258 unsigned long mask, unsigned long qsmask,
259 u8 level, int grplo, int grphi, int gp_tasks),
260
261 TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
262
263 TP_FIELDS(
264 ctf_string(rcuname, rcuname)
265 ctf_integer(unsigned long, gp_seq, gp_seq)
266 ctf_integer(unsigned long, mask, mask)
267 ctf_integer(unsigned long, qsmask, qsmask)
268 ctf_integer(u8, level, level)
269 ctf_integer(int, grplo, grplo)
270 ctf_integer(int, grphi, grphi)
271 ctf_integer(u8, gp_tasks, gp_tasks)
272 )
273 )
274 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
275 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
276
277 TP_PROTO(const char *rcuname, unsigned long gpnum,
278 unsigned long mask, unsigned long qsmask,
279 u8 level, int grplo, int grphi, int gp_tasks),
280
281 TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
282
283 TP_FIELDS(
284 ctf_string(rcuname, rcuname)
285 ctf_integer(unsigned long, gpnum, gpnum)
286 ctf_integer(unsigned long, mask, mask)
287 ctf_integer(unsigned long, qsmask, qsmask)
288 ctf_integer(u8, level, level)
289 ctf_integer(int, grplo, grplo)
290 ctf_integer(int, grphi, grphi)
291 ctf_integer(u8, gp_tasks, gp_tasks)
292 )
293 )
294 #else
295 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
296
297 TP_PROTO(char *rcuname, unsigned long gpnum,
298 unsigned long mask, unsigned long qsmask,
299 u8 level, int grplo, int grphi, int gp_tasks),
300
301 TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
302
303 TP_FIELDS(
304 ctf_string(rcuname, rcuname)
305 ctf_integer(unsigned long, gpnum, gpnum)
306 ctf_integer(unsigned long, mask, mask)
307 ctf_integer(unsigned long, qsmask, qsmask)
308 ctf_integer(u8, level, level)
309 ctf_integer(int, grplo, grplo)
310 ctf_integer(int, grphi, grphi)
311 ctf_integer(u8, gp_tasks, gp_tasks)
312 )
313 )
314 #endif
315
316 /*
317 * Tracepoint for quiescent states detected by force_quiescent_state().
318 * These trace events include the type of RCU, the grace-period number
319 * that was blocked by the CPU, the CPU itself, and the type of quiescent
320 * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
321 * or "kick" when kicking a CPU that has been in dyntick-idle mode for
322 * too long.
323 */
324 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
325 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
326
327 TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
328
329 TP_ARGS(rcuname, gp_seq, cpu, qsevent),
330
331 TP_FIELDS(
332 ctf_integer(unsigned long, gp_seq, gp_seq)
333 ctf_integer(int, cpu, cpu)
334 ctf_string(rcuname, rcuname)
335 ctf_string(qsevent, qsevent)
336 )
337 )
338 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
339 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
340
341 TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
342
343 TP_ARGS(rcuname, gpnum, cpu, qsevent),
344
345 TP_FIELDS(
346 ctf_integer(unsigned long, gpnum, gpnum)
347 ctf_integer(int, cpu, cpu)
348 ctf_string(rcuname, rcuname)
349 ctf_string(qsevent, qsevent)
350 )
351 )
352 #else
353 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
354
355 TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent),
356
357 TP_ARGS(rcuname, gpnum, cpu, qsevent),
358
359 TP_FIELDS(
360 ctf_integer(unsigned long, gpnum, gpnum)
361 ctf_integer(int, cpu, cpu)
362 ctf_string(rcuname, rcuname)
363 ctf_string(qsevent, qsevent)
364 )
365 )
366 #endif
367
368 #endif /*
369 * #if defined(CONFIG_TREE_RCU)
370 * || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
371 * && defined(CONFIG_PREEMPT_RCU))
372 * || defined(CONFIG_TREE_PREEMPT_RCU)
373 */
374
375 /*
376 * Tracepoint for dyntick-idle entry/exit events. These take a string
377 * as argument: "Start" for entering dyntick-idle mode, "End" for
378 * leaving it, "--=" for events moving towards idle, and "++=" for events
379 * moving away from idle. "Error on entry: not idle task" and "Error on
380 * exit: not idle task" indicate that a non-idle task is erroneously
381 * toying with the idle loop.
382 *
383 * These events also take a pair of numbers, which indicate the nesting
384 * depth before and after the event of interest. Note that task-related
385 * events use the upper bits of each number, while interrupt-related
386 * events use the lower bits.
387 */
388 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
389 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
390
391 TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
392
393 TP_ARGS(polarity, oldnesting, newnesting, dynticks),
394
395 TP_FIELDS(
396 ctf_string(polarity, polarity)
397 ctf_integer(long, oldnesting, oldnesting)
398 ctf_integer(long, newnesting, newnesting)
399 ctf_integer(int, dynticks, dynticks)
400 )
401 )
402
403 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0))
404 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
405
406 TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
407
408 TP_ARGS(polarity, oldnesting, newnesting, dynticks),
409
410 TP_FIELDS(
411 ctf_string(polarity, polarity)
412 ctf_integer(long, oldnesting, oldnesting)
413 ctf_integer(long, newnesting, newnesting)
414 ctf_integer(int, dynticks, atomic_read(&dynticks))
415 )
416 )
417
418 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
419 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
420
421 TP_PROTO(const char *polarity, long long oldnesting, long long newnesting),
422
423 TP_ARGS(polarity, oldnesting, newnesting),
424
425 TP_FIELDS(
426 ctf_string(polarity, polarity)
427 ctf_integer(long long, oldnesting, oldnesting)
428 ctf_integer(long long, newnesting, newnesting)
429 )
430 )
431 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
432 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
433
434 TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
435
436 TP_ARGS(polarity, oldnesting, newnesting),
437
438 TP_FIELDS(
439 ctf_string(polarity, polarity)
440 ctf_integer(long long, oldnesting, oldnesting)
441 ctf_integer(long long, newnesting, newnesting)
442 )
443 )
444 #else
445 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
446
447 TP_PROTO(char *polarity),
448
449 TP_ARGS(polarity),
450
451 TP_FIELDS(
452 ctf_string(polarity, polarity)
453 )
454 )
455 #endif
456
457
458 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
459 /*
460 * Tracepoint for RCU preparation for idle, the goal being to get RCU
461 * processing done so that the current CPU can shut off its scheduling
462 * clock and enter dyntick-idle mode. One way to accomplish this is
463 * to drain all RCU callbacks from this CPU, and the other is to have
464 * done everything RCU requires for the current grace period. In this
465 * latter case, the CPU will be awakened at the end of the current grace
466 * period in order to process the remainder of its callbacks.
467 *
468 * These tracepoints take a string as argument:
469 *
470 * "No callbacks": Nothing to do, no callbacks on this CPU.
471 * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
472 * "Begin holdoff": Attempt failed, don't retry until next jiffy.
473 * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
474 * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
475 * "More callbacks": Still more callbacks, try again to clear them out.
476 * "Callbacks drained": All callbacks processed, off to dyntick idle!
477 * "Timer": Timer fired to cause CPU to continue processing callbacks.
478 * "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
479 * "Cleanup after idle": Idle exited, timer canceled.
480 */
481 LTTNG_TRACEPOINT_EVENT(rcu_prep_idle,
482
483 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
484 TP_PROTO(const char *reason),
485 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
486 TP_PROTO(char *reason),
487 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
488
489 TP_ARGS(reason),
490
491 TP_FIELDS(
492 ctf_string(reason, reason)
493 )
494 )
495 #endif
496
497 /*
498 * Tracepoint for the registration of a single RCU callback function.
499 * The first argument is the type of RCU, the second argument is
500 * a pointer to the RCU callback itself, the third element is the
501 * number of lazy callbacks queued, and the fourth element is the
502 * total number of callbacks queued.
503 */
504 LTTNG_TRACEPOINT_EVENT(rcu_callback,
505
506 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
507 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
508 long qlen),
509
510 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
511 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
512 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
513 long qlen),
514
515 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
516 #else
517 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen),
518
519 TP_ARGS(rcuname, rhp, qlen),
520 #endif
521
522 TP_FIELDS(
523 ctf_string(rcuname, rcuname)
524 ctf_integer_hex(void *, rhp, rhp)
525 ctf_integer_hex(void *, func, rhp->func)
526 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
527 ctf_integer(long, qlen_lazy, qlen_lazy)
528 #endif
529 ctf_integer(long, qlen, qlen)
530 )
531 )
532
533 /*
534 * Tracepoint for the registration of a single RCU callback of the special
535 * kfree() form. The first argument is the RCU type, the second argument
536 * is a pointer to the RCU callback, the third argument is the offset
537 * of the callback within the enclosing RCU-protected data structure,
538 * the fourth argument is the number of lazy callbacks queued, and the
539 * fifth argument is the total number of callbacks queued.
540 */
541 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
542
543
544 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
545 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
546 long qlen_lazy, long qlen),
547
548 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
549 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
550 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
551 long qlen_lazy, long qlen),
552
553 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
554 #else
555 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
556 long qlen),
557
558 TP_ARGS(rcuname, rhp, offset, qlen),
559 #endif
560
561 TP_FIELDS(
562 ctf_string(rcuname, rcuname)
563 ctf_integer_hex(void *, rhp, rhp)
564 ctf_integer_hex(unsigned long, offset, offset)
565 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
566 ctf_integer(long, qlen_lazy, qlen_lazy)
567 #endif
568 ctf_integer(long, qlen, qlen)
569 )
570 )
571
572 /*
573 * Tracepoint for marking the beginning rcu_do_batch, performed to start
574 * RCU callback invocation. The first argument is the RCU flavor,
575 * the second is the number of lazy callbacks queued, the third is
576 * the total number of callbacks queued, and the fourth argument is
577 * the current RCU-callback batch limit.
578 */
579 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
580
581 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
582 TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
583
584 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
585 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
586 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit),
587
588 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
589 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
590 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
591
592 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
593 #else
594 TP_PROTO(char *rcuname, long qlen, int blimit),
595
596 TP_ARGS(rcuname, qlen, blimit),
597 #endif
598
599 TP_FIELDS(
600 ctf_string(rcuname, rcuname)
601 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
602 ctf_integer(long, qlen_lazy, qlen_lazy)
603 #endif
604 ctf_integer(long, qlen, qlen)
605 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
606 ctf_integer(long, blimit, blimit)
607 #else
608 ctf_integer(int, blimit, blimit)
609 #endif
610 )
611 )
612
613 /*
614 * Tracepoint for the invocation of a single RCU callback function.
615 * The first argument is the type of RCU, and the second argument is
616 * a pointer to the RCU callback itself.
617 */
618 LTTNG_TRACEPOINT_EVENT(rcu_invoke_callback,
619
620 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
621 TP_PROTO(const char *rcuname, struct rcu_head *rhp),
622 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
623 TP_PROTO(char *rcuname, struct rcu_head *rhp),
624 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
625
626 TP_ARGS(rcuname, rhp),
627
628 TP_FIELDS(
629 ctf_string(rcuname, rcuname)
630 ctf_integer_hex(void *, rhp, rhp)
631 ctf_integer_hex(void *, func, rhp->func)
632 )
633 )
634
635 /*
636 * Tracepoint for the invocation of a single RCU callback of the special
637 * kfree() form. The first argument is the RCU flavor, the second
638 * argument is a pointer to the RCU callback, and the third argument
639 * is the offset of the callback within the enclosing RCU-protected
640 * data structure.
641 */
642 LTTNG_TRACEPOINT_EVENT(rcu_invoke_kfree_callback,
643
644 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
645 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
646 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
647 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset),
648 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
649
650 TP_ARGS(rcuname, rhp, offset),
651
652 TP_FIELDS(
653 ctf_string(rcuname, rcuname)
654 ctf_integer_hex(void *, rhp, rhp)
655 ctf_integer(unsigned long, offset, offset)
656 )
657 )
658
659 /*
660 * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
661 * invoked. The first argument is the name of the RCU flavor,
662 * the second argument is number of callbacks actually invoked,
663 * the third argument (cb) is whether or not any of the callbacks that
664 * were ready to invoke at the beginning of this batch are still
665 * queued, the fourth argument (nr) is the return value of need_resched(),
666 * the fifth argument (iit) is 1 if the current task is the idle task,
667 * and the sixth argument (risk) is the return value from
668 * rcu_is_callbacks_kthread().
669 */
670 LTTNG_TRACEPOINT_EVENT(rcu_batch_end,
671
672 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
673 TP_PROTO(const char *rcuname, int callbacks_invoked,
674 char cb, char nr, char iit, char risk),
675
676 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
677 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
678 TP_PROTO(const char *rcuname, int callbacks_invoked,
679 bool cb, bool nr, bool iit, bool risk),
680
681 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
682 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
683 TP_PROTO(char *rcuname, int callbacks_invoked,
684 bool cb, bool nr, bool iit, bool risk),
685
686 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
687 #else
688 TP_PROTO(char *rcuname, int callbacks_invoked),
689
690 TP_ARGS(rcuname, callbacks_invoked),
691 #endif
692
693 TP_FIELDS(
694 ctf_string(rcuname, rcuname)
695 ctf_integer(int, callbacks_invoked, callbacks_invoked)
696 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
697 ctf_integer(char, cb, cb)
698 ctf_integer(char, nr, nr)
699 ctf_integer(char, iit, iit)
700 ctf_integer(char, risk, risk)
701 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
702 ctf_integer(bool, cb, cb)
703 ctf_integer(bool, nr, nr)
704 ctf_integer(bool, iit, iit)
705 ctf_integer(bool, risk, risk)
706 #endif
707 )
708 )
709
710 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
711 /*
712 * Tracepoint for rcutorture readers. The first argument is the name
713 * of the RCU flavor from rcutorture's viewpoint and the second argument
714 * is the callback address.
715 */
716 LTTNG_TRACEPOINT_EVENT(rcu_torture_read,
717
718 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
719 TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
720 unsigned long secs, unsigned long c_old, unsigned long c),
721
722 TP_ARGS(rcutorturename, rhp, secs, c_old, c),
723 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
724 TP_PROTO(char *rcutorturename, struct rcu_head *rhp,
725 unsigned long secs, unsigned long c_old, unsigned long c),
726
727 TP_ARGS(rcutorturename, rhp, secs, c_old, c),
728 #else
729 TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
730
731 TP_ARGS(rcutorturename, rhp),
732 #endif
733
734 TP_FIELDS(
735 ctf_string(rcutorturename, rcutorturename)
736 ctf_integer_hex(struct rcu_head *, rhp, rhp)
737 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
738 ctf_integer(unsigned long, secs, secs)
739 ctf_integer(unsigned long, c_old, c_old)
740 ctf_integer(unsigned long, c, c)
741 #endif
742 )
743 )
744 #endif
745
746 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
747 /*
748 * Tracepoint for _rcu_barrier() execution. The string "s" describes
749 * the _rcu_barrier phase:
750 * "Begin": rcu_barrier_callback() started.
751 * "Check": rcu_barrier_callback() checking for piggybacking.
752 * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
753 * "Inc1": rcu_barrier_callback() piggyback check counter incremented.
754 * "Offline": rcu_barrier_callback() found offline CPU
755 * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
756 * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
757 * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
758 * "CB": An rcu_barrier_callback() invoked a callback, not the last.
759 * "LastCB": An rcu_barrier_callback() invoked the last callback.
760 * "Inc2": rcu_barrier_callback() piggyback check counter incremented.
761 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
762 * is the count of remaining callbacks, and "done" is the piggybacking count.
763 */
764 LTTNG_TRACEPOINT_EVENT(rcu_barrier,
765
766 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
767 TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
768 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
769 TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
770 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
771
772 TP_ARGS(rcuname, s, cpu, cnt, done),
773
774 TP_FIELDS(
775 ctf_string(rcuname, rcuname)
776 ctf_string(s, s)
777 ctf_integer(int, cpu, cpu)
778 ctf_integer(int, cnt, cnt)
779 ctf_integer(unsigned long, done, done)
780 )
781 )
782 #endif
783
784 #else /* #ifdef CONFIG_RCU_TRACE */
785
786 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
787 #define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
788 #define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
789 qsmask) do { } while (0)
790 #define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
791 #define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
792 #define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
793 grplo, grphi, gp_tasks) do { } \
794 while (0)
795 #define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
796 #else
797 #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
798 #define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
799 qsmask) do { } while (0)
800 #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
801 #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
802 #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
803 grplo, grphi, gp_tasks) do { } \
804 while (0)
805 #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
806 #endif
807
808 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0))
809 #define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
810 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
811 #define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
812 #else
813 #define trace_rcu_dyntick(polarity) do { } while (0)
814 #endif
815 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
816 #define trace_rcu_prep_idle(reason) do { } while (0)
817 #endif
818 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
819 #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
820 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
821 do { } while (0)
822 #define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
823 do { } while (0)
824 #else
825 #define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
826 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
827 #define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
828 #endif
829 #define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
830 #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
831 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
832 #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
833 do { } while (0)
834 #else
835 #define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
836 #endif
837 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
838 #define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
839 do { } while (0)
840 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
841 #define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
842 #endif
843 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
844 #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
845 #endif
846 #endif /* #else #ifdef CONFIG_RCU_TRACE */
847
848 #endif /* LTTNG_TRACE_RCU_H */
849
850 /* This part must be outside protection */
851 #include <probes/define_trace.h>
This page took 0.058554 seconds and 4 git commands to generate.