Implement filtering infrastructure
[lttng-modules.git] / instrumentation / events / lttng-module / rcu.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM rcu
3
4 #if !defined(LTTNG_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_RCU_H
6
7 #include "../../../probes/lttng-tracepoint-event.h"
8 #include <linux/version.h>
9
10 /*
11 * Tracepoint for start/end markers used for utilization calculations.
12 * By convention, the string is of the following forms:
13 *
14 * "Start <activity>" -- Mark the start of the specified activity,
15 * such as "context switch". Nesting is permitted.
16 * "End <activity>" -- Mark the end of the specified activity.
17 *
18 * An "@" character within "<activity>" is a comment character: Data
19 * reduction scripts will ignore the "@" and the remainder of the line.
20 */
21 LTTNG_TRACEPOINT_EVENT(rcu_utilization,
22
23 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
24 TP_PROTO(const char *s),
25 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
26 TP_PROTO(char *s),
27 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
28
29 TP_ARGS(s),
30
31 TP_STRUCT__entry(
32 __string(s, s)
33 ),
34
35 TP_fast_assign(
36 tp_strcpy(s, s)
37 ),
38
39 TP_printk("%s", __get_str(s))
40 )
41
42 #ifdef CONFIG_RCU_TRACE
43
44 #if defined(CONFIG_TREE_RCU) \
45 || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) \
46 && defined(CONFIG_PREEMPT_RCU)) \
47 || defined(CONFIG_TREE_PREEMPT_RCU)
48
49 /*
50 * Tracepoint for grace-period events: starting and ending a grace
51 * period ("start" and "end", respectively), a CPU noting the start
52 * of a new grace period or the end of an old grace period ("cpustart"
53 * and "cpuend", respectively), a CPU passing through a quiescent
54 * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
55 * and "cpuofl", respectively), and a CPU being kicked for being too
56 * long in dyntick-idle mode ("kick").
57 */
58 LTTNG_TRACEPOINT_EVENT(rcu_grace_period,
59
60 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
61 TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
62 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
63 TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent),
64 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
65
66 TP_ARGS(rcuname, gpnum, gpevent),
67
68 TP_STRUCT__entry(
69 __string(rcuname, rcuname)
70 __field(unsigned long, gpnum)
71 __string(gpevent, gpevent)
72 ),
73
74 TP_fast_assign(
75 tp_strcpy(rcuname, rcuname)
76 tp_assign(gpnum, gpnum)
77 tp_strcpy(gpevent, gpevent)
78 ),
79
80 TP_printk("%s %lu %s",
81 __get_str(rcuname), __entry->gpnum, __get_str(gpevent))
82 )
83
84 /*
85 * Tracepoint for grace-period-initialization events. These are
86 * distinguished by the type of RCU, the new grace-period number, the
87 * rcu_node structure level, the starting and ending CPU covered by the
88 * rcu_node structure, and the mask of CPUs that will be waited for.
89 * All but the type of RCU are extracted from the rcu_node structure.
90 */
91 LTTNG_TRACEPOINT_EVENT(rcu_grace_period_init,
92
93 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
94 TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
95 int grplo, int grphi, unsigned long qsmask),
96 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
97 TP_PROTO(char *rcuname, unsigned long gpnum, u8 level,
98 int grplo, int grphi, unsigned long qsmask),
99 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
100
101 TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
102
103 TP_STRUCT__entry(
104 __string(rcuname, rcuname)
105 __field(unsigned long, gpnum)
106 __field(u8, level)
107 __field(int, grplo)
108 __field(int, grphi)
109 __field(unsigned long, qsmask)
110 ),
111
112 TP_fast_assign(
113 tp_strcpy(rcuname, rcuname)
114 tp_assign(gpnum, gpnum)
115 tp_assign(level, level)
116 tp_assign(grplo, grplo)
117 tp_assign(grphi, grphi)
118 tp_assign(qsmask, qsmask)
119 ),
120
121 TP_printk("%s %lu %u %d %d %lx",
122 __get_str(rcuname), __entry->gpnum, __entry->level,
123 __entry->grplo, __entry->grphi, __entry->qsmask)
124 )
125
126 /*
127 * Tracepoint for tasks blocking within preemptible-RCU read-side
128 * critical sections. Track the type of RCU (which one day might
129 * include SRCU), the grace-period number that the task is blocking
130 * (the current or the next), and the task's PID.
131 */
132 LTTNG_TRACEPOINT_EVENT(rcu_preempt_task,
133
134 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
135 TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
136 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
137 TP_PROTO(char *rcuname, int pid, unsigned long gpnum),
138 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
139
140 TP_ARGS(rcuname, pid, gpnum),
141
142 TP_STRUCT__entry(
143 __string(rcuname, rcuname)
144 __field(unsigned long, gpnum)
145 __field(int, pid)
146 ),
147
148 TP_fast_assign(
149 tp_strcpy(rcuname, rcuname)
150 tp_assign(gpnum, gpnum)
151 tp_assign(pid, pid)
152 ),
153
154 TP_printk("%s %lu %d",
155 __get_str(rcuname), __entry->gpnum, __entry->pid)
156 )
157
158 /*
159 * Tracepoint for tasks that blocked within a given preemptible-RCU
160 * read-side critical section exiting that critical section. Track the
161 * type of RCU (which one day might include SRCU) and the task's PID.
162 */
163 LTTNG_TRACEPOINT_EVENT(rcu_unlock_preempted_task,
164
165 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
166 TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
167 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
168 TP_PROTO(char *rcuname, unsigned long gpnum, int pid),
169 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
170
171 TP_ARGS(rcuname, gpnum, pid),
172
173 TP_STRUCT__entry(
174 __string(rcuname, rcuname)
175 __field(unsigned long, gpnum)
176 __field(int, pid)
177 ),
178
179 TP_fast_assign(
180 tp_strcpy(rcuname, rcuname)
181 tp_assign(gpnum, gpnum)
182 tp_assign(pid, pid)
183 ),
184
185 TP_printk("%s %lu %d", __get_str(rcuname), __entry->gpnum, __entry->pid)
186 )
187
188 /*
189 * Tracepoint for quiescent-state-reporting events. These are
190 * distinguished by the type of RCU, the grace-period number, the
191 * mask of quiescent lower-level entities, the rcu_node structure level,
192 * the starting and ending CPU covered by the rcu_node structure, and
193 * whether there are any blocked tasks blocking the current grace period.
194 * All but the type of RCU are extracted from the rcu_node structure.
195 */
196 LTTNG_TRACEPOINT_EVENT(rcu_quiescent_state_report,
197
198 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
199 TP_PROTO(const char *rcuname, unsigned long gpnum,
200 unsigned long mask, unsigned long qsmask,
201 u8 level, int grplo, int grphi, int gp_tasks),
202 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
203 TP_PROTO(char *rcuname, unsigned long gpnum,
204 unsigned long mask, unsigned long qsmask,
205 u8 level, int grplo, int grphi, int gp_tasks),
206 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
207
208 TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
209
210 TP_STRUCT__entry(
211 __string(rcuname, rcuname)
212 __field(unsigned long, gpnum)
213 __field(unsigned long, mask)
214 __field(unsigned long, qsmask)
215 __field(u8, level)
216 __field(int, grplo)
217 __field(int, grphi)
218 __field(u8, gp_tasks)
219 ),
220
221 TP_fast_assign(
222 tp_strcpy(rcuname, rcuname)
223 tp_assign(gpnum, gpnum)
224 tp_assign(mask, mask)
225 tp_assign(qsmask, qsmask)
226 tp_assign(level, level)
227 tp_assign(grplo, grplo)
228 tp_assign(grphi, grphi)
229 tp_assign(gp_tasks, gp_tasks)
230 ),
231
232 TP_printk("%s %lu %lx>%lx %u %d %d %u",
233 __get_str(rcuname), __entry->gpnum,
234 __entry->mask, __entry->qsmask, __entry->level,
235 __entry->grplo, __entry->grphi, __entry->gp_tasks)
236 )
237
238 /*
239 * Tracepoint for quiescent states detected by force_quiescent_state().
240 * These trace events include the type of RCU, the grace-period number
241 * that was blocked by the CPU, the CPU itself, and the type of quiescent
242 * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
243 * or "kick" when kicking a CPU that has been in dyntick-idle mode for
244 * too long.
245 */
246 LTTNG_TRACEPOINT_EVENT(rcu_fqs,
247
248 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
249 TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
250 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
251 TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent),
252 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
253
254 TP_ARGS(rcuname, gpnum, cpu, qsevent),
255
256 TP_STRUCT__entry(
257 __string(rcuname, rcuname)
258 __field(unsigned long, gpnum)
259 __field(int, cpu)
260 __string(qsevent, qsevent)
261 ),
262
263 TP_fast_assign(
264 tp_strcpy(rcuname, rcuname)
265 tp_assign(gpnum, gpnum)
266 tp_assign(cpu, cpu)
267 tp_strcpy(qsevent, qsevent)
268 ),
269
270 TP_printk("%s %lu %d %s",
271 __get_str(rcuname), __entry->gpnum,
272 __entry->cpu, __get_str(qsevent))
273 )
274
275 #endif /*
276 * #if defined(CONFIG_TREE_RCU)
277 * || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
278 * && defined(CONFIG_PREEMPT_RCU))
279 * || defined(CONFIG_TREE_PREEMPT_RCU)
280 */
281
282 /*
283 * Tracepoint for dyntick-idle entry/exit events. These take a string
284 * as argument: "Start" for entering dyntick-idle mode, "End" for
285 * leaving it, "--=" for events moving towards idle, and "++=" for events
286 * moving away from idle. "Error on entry: not idle task" and "Error on
287 * exit: not idle task" indicate that a non-idle task is erroneously
288 * toying with the idle loop.
289 *
290 * These events also take a pair of numbers, which indicate the nesting
291 * depth before and after the event of interest. Note that task-related
292 * events use the upper bits of each number, while interrupt-related
293 * events use the lower bits.
294 */
295 LTTNG_TRACEPOINT_EVENT(rcu_dyntick,
296
297
298 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
299 TP_PROTO(const char *polarity, long long oldnesting, long long newnesting),
300
301 TP_ARGS(polarity, oldnesting, newnesting),
302 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
303 TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
304
305 TP_ARGS(polarity, oldnesting, newnesting),
306 #else
307 TP_PROTO(char *polarity),
308
309 TP_ARGS(polarity),
310 #endif
311
312 TP_STRUCT__entry(
313 __string(polarity, polarity)
314 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
315 __field(long long, oldnesting)
316 __field(long long, newnesting)
317 #endif
318 ),
319
320 TP_fast_assign(
321 tp_strcpy(polarity, polarity)
322 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
323 tp_assign(oldnesting, oldnesting)
324 tp_assign(newnesting, newnesting)
325 #endif
326 ),
327
328 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
329 TP_printk("%s %llx %llx", __get_str(polarity),
330 __entry->oldnesting, __entry->newnesting)
331 #else
332 TP_printk("%s", __get_str(polarity))
333 #endif
334 )
335
336 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
337 /*
338 * Tracepoint for RCU preparation for idle, the goal being to get RCU
339 * processing done so that the current CPU can shut off its scheduling
340 * clock and enter dyntick-idle mode. One way to accomplish this is
341 * to drain all RCU callbacks from this CPU, and the other is to have
342 * done everything RCU requires for the current grace period. In this
343 * latter case, the CPU will be awakened at the end of the current grace
344 * period in order to process the remainder of its callbacks.
345 *
346 * These tracepoints take a string as argument:
347 *
348 * "No callbacks": Nothing to do, no callbacks on this CPU.
349 * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
350 * "Begin holdoff": Attempt failed, don't retry until next jiffy.
351 * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
352 * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
353 * "More callbacks": Still more callbacks, try again to clear them out.
354 * "Callbacks drained": All callbacks processed, off to dyntick idle!
355 * "Timer": Timer fired to cause CPU to continue processing callbacks.
356 * "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
357 * "Cleanup after idle": Idle exited, timer canceled.
358 */
359 LTTNG_TRACEPOINT_EVENT(rcu_prep_idle,
360
361 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
362 TP_PROTO(const char *reason),
363 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
364 TP_PROTO(char *reason),
365 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
366
367 TP_ARGS(reason),
368
369 TP_STRUCT__entry(
370 __string(reason, reason)
371 ),
372
373 TP_fast_assign(
374 tp_strcpy(reason, reason)
375 ),
376
377 TP_printk("%s", __get_str(reason))
378 )
379 #endif
380
381 /*
382 * Tracepoint for the registration of a single RCU callback function.
383 * The first argument is the type of RCU, the second argument is
384 * a pointer to the RCU callback itself, the third element is the
385 * number of lazy callbacks queued, and the fourth element is the
386 * total number of callbacks queued.
387 */
388 LTTNG_TRACEPOINT_EVENT(rcu_callback,
389
390 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
391 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
392 long qlen),
393
394 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
395 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
396 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
397 long qlen),
398
399 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
400 #else
401 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen),
402
403 TP_ARGS(rcuname, rhp, qlen),
404 #endif
405
406 TP_STRUCT__entry(
407 __string(rcuname, rcuname)
408 __field(void *, rhp)
409 __field(void *, func)
410 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
411 __field(long, qlen_lazy)
412 #endif
413 __field(long, qlen)
414 ),
415
416 TP_fast_assign(
417 tp_strcpy(rcuname, rcuname)
418 tp_assign(rhp, rhp)
419 tp_assign(func, rhp->func)
420 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
421 tp_assign(qlen_lazy, qlen_lazy)
422 #endif
423 tp_assign(qlen, qlen)
424 ),
425
426 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
427 TP_printk("%s rhp=%p func=%pf %ld/%ld",
428 __get_str(rcuname), __entry->rhp, __entry->func,
429 __entry->qlen_lazy, __entry->qlen)
430 #else
431 TP_printk("%s rhp=%p func=%pf %ld",
432 __get_str(rcuname), __entry->rhp, __entry->func,
433 __entry->qlen)
434 #endif
435 )
436
437 /*
438 * Tracepoint for the registration of a single RCU callback of the special
439 * kfree() form. The first argument is the RCU type, the second argument
440 * is a pointer to the RCU callback, the third argument is the offset
441 * of the callback within the enclosing RCU-protected data structure,
442 * the fourth argument is the number of lazy callbacks queued, and the
443 * fifth argument is the total number of callbacks queued.
444 */
445 LTTNG_TRACEPOINT_EVENT(rcu_kfree_callback,
446
447
448 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
449 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
450 long qlen_lazy, long qlen),
451
452 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
453 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
454 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
455 long qlen_lazy, long qlen),
456
457 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
458 #else
459 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
460 long qlen),
461
462 TP_ARGS(rcuname, rhp, offset, qlen),
463 #endif
464
465 TP_STRUCT__entry(
466 __string(rcuname, rcuname)
467 __field(void *, rhp)
468 __field(unsigned long, offset)
469 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
470 __field(long, qlen_lazy)
471 #endif
472 __field(long, qlen)
473 ),
474
475 TP_fast_assign(
476 tp_strcpy(rcuname, rcuname)
477 tp_assign(rhp, rhp)
478 tp_assign(offset, offset)
479 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
480 tp_assign(qlen_lazy, qlen_lazy)
481 #endif
482 tp_assign(qlen, qlen)
483 ),
484
485 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
486 TP_printk("%s rhp=%p func=%ld %ld/%ld",
487 __get_str(rcuname), __entry->rhp, __entry->offset,
488 __entry->qlen_lazy, __entry->qlen)
489 #else
490 TP_printk("%s rhp=%p func=%ld %ld",
491 __get_str(rcuname), __entry->rhp, __entry->offset,
492 __entry->qlen)
493 #endif
494 )
495
496 /*
497 * Tracepoint for marking the beginning rcu_do_batch, performed to start
498 * RCU callback invocation. The first argument is the RCU flavor,
499 * the second is the number of lazy callbacks queued, the third is
500 * the total number of callbacks queued, and the fourth argument is
501 * the current RCU-callback batch limit.
502 */
503 LTTNG_TRACEPOINT_EVENT(rcu_batch_start,
504
505 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
506 TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
507
508 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
509 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
510 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit),
511
512 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
513 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
514 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
515
516 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
517 #else
518 TP_PROTO(char *rcuname, long qlen, int blimit),
519
520 TP_ARGS(rcuname, qlen, blimit),
521 #endif
522
523 TP_STRUCT__entry(
524 __string(rcuname, rcuname)
525 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
526 __field(long, qlen_lazy)
527 #endif
528 __field(long, qlen)
529 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
530 __field(long, blimit)
531 #else
532 __field(int, blimit)
533 #endif
534 ),
535
536 TP_fast_assign(
537 tp_strcpy(rcuname, rcuname)
538 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
539 tp_assign(qlen_lazy, qlen_lazy)
540 #endif
541 tp_assign(qlen, qlen)
542 tp_assign(blimit, blimit)
543 ),
544
545 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
546 TP_printk("%s CBs=%ld/%ld bl=%ld",
547 __get_str(rcuname), __entry->qlen_lazy, __entry->qlen,
548 __entry->blimit)
549 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
550 TP_printk("%s CBs=%ld/%ld bl=%d",
551 __get_str(rcuname), __entry->qlen_lazy, __entry->qlen,
552 __entry->blimit)
553 #else
554 TP_printk("%s CBs=%ld bl=%d",
555 __get_str(rcuname), __entry->qlen, __entry->blimit)
556 #endif
557 )
558
559 /*
560 * Tracepoint for the invocation of a single RCU callback function.
561 * The first argument is the type of RCU, and the second argument is
562 * a pointer to the RCU callback itself.
563 */
564 LTTNG_TRACEPOINT_EVENT(rcu_invoke_callback,
565
566 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
567 TP_PROTO(const char *rcuname, struct rcu_head *rhp),
568 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
569 TP_PROTO(char *rcuname, struct rcu_head *rhp),
570 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
571
572 TP_ARGS(rcuname, rhp),
573
574 TP_STRUCT__entry(
575 __string(rcuname, rcuname)
576 __field(void *, rhp)
577 __field(void *, func)
578 ),
579
580 TP_fast_assign(
581 tp_strcpy(rcuname, rcuname)
582 tp_assign(rhp, rhp)
583 tp_assign(func, rhp->func)
584 ),
585
586 TP_printk("%s rhp=%p func=%pf",
587 __get_str(rcuname), __entry->rhp, __entry->func)
588 )
589
590 /*
591 * Tracepoint for the invocation of a single RCU callback of the special
592 * kfree() form. The first argument is the RCU flavor, the second
593 * argument is a pointer to the RCU callback, and the third argument
594 * is the offset of the callback within the enclosing RCU-protected
595 * data structure.
596 */
597 LTTNG_TRACEPOINT_EVENT(rcu_invoke_kfree_callback,
598
599 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
600 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
601 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
602 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset),
603 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
604
605 TP_ARGS(rcuname, rhp, offset),
606
607 TP_STRUCT__entry(
608 __string(rcuname, rcuname)
609 __field(void *, rhp)
610 __field(unsigned long, offset)
611 ),
612
613 TP_fast_assign(
614 tp_strcpy(rcuname, rcuname)
615 tp_assign(rhp, rhp)
616 tp_assign(offset, offset)
617 ),
618
619 TP_printk("%s rhp=%p func=%ld",
620 __get_str(rcuname), __entry->rhp, __entry->offset)
621 )
622
623 /*
624 * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
625 * invoked. The first argument is the name of the RCU flavor,
626 * the second argument is number of callbacks actually invoked,
627 * the third argument (cb) is whether or not any of the callbacks that
628 * were ready to invoke at the beginning of this batch are still
629 * queued, the fourth argument (nr) is the return value of need_resched(),
630 * the fifth argument (iit) is 1 if the current task is the idle task,
631 * and the sixth argument (risk) is the return value from
632 * rcu_is_callbacks_kthread().
633 */
634 LTTNG_TRACEPOINT_EVENT(rcu_batch_end,
635
636 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
637 TP_PROTO(const char *rcuname, int callbacks_invoked,
638 char cb, char nr, char iit, char risk),
639
640 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
641 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
642 TP_PROTO(const char *rcuname, int callbacks_invoked,
643 bool cb, bool nr, bool iit, bool risk),
644
645 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
646 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
647 TP_PROTO(char *rcuname, int callbacks_invoked,
648 bool cb, bool nr, bool iit, bool risk),
649
650 TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
651 #else
652 TP_PROTO(char *rcuname, int callbacks_invoked),
653
654 TP_ARGS(rcuname, callbacks_invoked),
655 #endif
656
657 TP_STRUCT__entry(
658 __string(rcuname, rcuname)
659 __field(int, callbacks_invoked)
660 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
661 __field(char, cb)
662 __field(char, nr)
663 __field(char, iit)
664 __field(char, risk)
665 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
666 __field(bool, cb)
667 __field(bool, nr)
668 __field(bool, iit)
669 __field(bool, risk)
670 #endif
671 ),
672
673 TP_fast_assign(
674 tp_strcpy(rcuname, rcuname)
675 tp_assign(callbacks_invoked, callbacks_invoked)
676 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
677 tp_assign(cb, cb)
678 tp_assign(nr, nr)
679 tp_assign(iit, iit)
680 tp_assign(risk, risk)
681 #endif
682 ),
683
684 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
685 TP_printk("%s CBs-invoked=%d idle=%c%c%c%c",
686 __get_str(rcuname), __entry->callbacks_invoked,
687 __entry->cb ? 'C' : '.',
688 __entry->nr ? 'S' : '.',
689 __entry->iit ? 'I' : '.',
690 __entry->risk ? 'R' : '.')
691 #else
692 TP_printk("%s CBs-invoked=%d",
693 __get_str(rcuname), __entry->callbacks_invoked)
694 #endif
695 )
696
697 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
698 /*
699 * Tracepoint for rcutorture readers. The first argument is the name
700 * of the RCU flavor from rcutorture's viewpoint and the second argument
701 * is the callback address.
702 */
703 LTTNG_TRACEPOINT_EVENT(rcu_torture_read,
704
705 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
706 TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
707 unsigned long secs, unsigned long c_old, unsigned long c),
708
709 TP_ARGS(rcutorturename, rhp, secs, c_old, c),
710 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
711 TP_PROTO(char *rcutorturename, struct rcu_head *rhp,
712 unsigned long secs, unsigned long c_old, unsigned long c),
713
714 TP_ARGS(rcutorturename, rhp, secs, c_old, c),
715 #else
716 TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
717
718 TP_ARGS(rcutorturename, rhp),
719 #endif
720
721 TP_STRUCT__entry(
722 __string(rcutorturename, rcutorturename)
723 __field(struct rcu_head *, rhp)
724 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
725 __field(unsigned long, secs)
726 __field(unsigned long, c_old)
727 __field(unsigned long, c)
728 #endif
729 ),
730
731 TP_fast_assign(
732 tp_strcpy(rcutorturename, rcutorturename)
733 tp_assign(rhp, rhp)
734 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
735 tp_assign(secs, secs)
736 tp_assign(c_old, c_old)
737 tp_assign(c, c)
738 #endif
739 ),
740
741 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
742 TP_printk("%s torture read %p %luus c: %lu %lu",
743 __entry->rcutorturename, __entry->rhp,
744 __entry->secs, __entry->c_old, __entry->c)
745 #else
746 TP_printk("%s torture read %p",
747 __get_str(rcutorturename), __entry->rhp)
748 #endif
749 )
750 #endif
751
752 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
753 /*
754 * Tracepoint for _rcu_barrier() execution. The string "s" describes
755 * the _rcu_barrier phase:
756 * "Begin": rcu_barrier_callback() started.
757 * "Check": rcu_barrier_callback() checking for piggybacking.
758 * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
759 * "Inc1": rcu_barrier_callback() piggyback check counter incremented.
760 * "Offline": rcu_barrier_callback() found offline CPU
761 * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
762 * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
763 * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
764 * "CB": An rcu_barrier_callback() invoked a callback, not the last.
765 * "LastCB": An rcu_barrier_callback() invoked the last callback.
766 * "Inc2": rcu_barrier_callback() piggyback check counter incremented.
767 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
768 * is the count of remaining callbacks, and "done" is the piggybacking count.
769 */
770 LTTNG_TRACEPOINT_EVENT(rcu_barrier,
771
772 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
773 TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
774 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
775 TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
776 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
777
778 TP_ARGS(rcuname, s, cpu, cnt, done),
779
780 TP_STRUCT__entry(
781 __string(rcuname, rcuname)
782 __string(s, s)
783 __field(int, cpu)
784 __field(int, cnt)
785 __field(unsigned long, done)
786 ),
787
788 TP_fast_assign(
789 tp_strcpy(rcuname, rcuname)
790 tp_strcpy(s, s)
791 tp_assign(cpu, cpu)
792 tp_assign(cnt, cnt)
793 tp_assign(done, done)
794 ),
795
796 TP_printk("%s %s cpu %d remaining %d # %lu",
797 __get_str(rcuname), __get_str(s), __entry->cpu, __entry->cnt,
798 __entry->done)
799 )
800 #endif
801
802 #else /* #ifdef CONFIG_RCU_TRACE */
803
804 #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
805 #define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
806 qsmask) do { } while (0)
807 #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
808 #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
809 #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
810 grplo, grphi, gp_tasks) do { } \
811 while (0)
812 #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
813 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
814 #define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
815 #else
816 #define trace_rcu_dyntick(polarity) do { } while (0)
817 #endif
818 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
819 #define trace_rcu_prep_idle(reason) do { } while (0)
820 #endif
821 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
822 #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
823 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
824 do { } while (0)
825 #define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
826 do { } while (0)
827 #else
828 #define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
829 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
830 #define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
831 #endif
832 #define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
833 #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
834 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
835 #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
836 do { } while (0)
837 #else
838 #define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
839 #endif
840 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
841 #define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
842 do { } while (0)
843 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
844 #define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
845 #endif
846 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
847 #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
848 #endif
849 #endif /* #else #ifdef CONFIG_RCU_TRACE */
850
851 #endif /* LTTNG_TRACE_RCU_H */
852
853 /* This part must be outside protection */
854 #include "../../../probes/define_trace.h"
This page took 0.069947 seconds and 4 git commands to generate.