baf4176aa1192ed0f46b48a25390f356158a5e17
[lttng-modules.git] / instrumentation / events / lttng-module / kmem.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM kmem
3
4 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_KMEM_H
6
7 #include <linux/types.h>
8 #include <linux/tracepoint.h>
9 #include <linux/version.h>
10 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
11 #include <trace/events/gfpflags.h>
12 #endif
13
14 DECLARE_EVENT_CLASS(kmem_alloc,
15
16 TP_PROTO(unsigned long call_site,
17 const void *ptr,
18 size_t bytes_req,
19 size_t bytes_alloc,
20 gfp_t gfp_flags),
21
22 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
23
24 TP_STRUCT__entry(
25 __field( unsigned long, call_site )
26 __field( const void *, ptr )
27 __field( size_t, bytes_req )
28 __field( size_t, bytes_alloc )
29 __field( gfp_t, gfp_flags )
30 ),
31
32 TP_fast_assign(
33 tp_assign(call_site, call_site)
34 tp_assign(ptr, ptr)
35 tp_assign(bytes_req, bytes_req)
36 tp_assign(bytes_alloc, bytes_alloc)
37 tp_assign(gfp_flags, gfp_flags)
38 ),
39
40 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
41 __entry->call_site,
42 __entry->ptr,
43 __entry->bytes_req,
44 __entry->bytes_alloc,
45 show_gfp_flags(__entry->gfp_flags))
46 )
47
48 DEFINE_EVENT(kmem_alloc, kmalloc,
49
50 TP_PROTO(unsigned long call_site, const void *ptr,
51 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
52
53 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
54 )
55
56 DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
57
58 TP_PROTO(unsigned long call_site, const void *ptr,
59 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
60
61 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
62 )
63
64 DECLARE_EVENT_CLASS(kmem_alloc_node,
65
66 TP_PROTO(unsigned long call_site,
67 const void *ptr,
68 size_t bytes_req,
69 size_t bytes_alloc,
70 gfp_t gfp_flags,
71 int node),
72
73 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
74
75 TP_STRUCT__entry(
76 __field( unsigned long, call_site )
77 __field( const void *, ptr )
78 __field( size_t, bytes_req )
79 __field( size_t, bytes_alloc )
80 __field( gfp_t, gfp_flags )
81 __field( int, node )
82 ),
83
84 TP_fast_assign(
85 tp_assign(call_site, call_site)
86 tp_assign(ptr, ptr)
87 tp_assign(bytes_req, bytes_req)
88 tp_assign(bytes_alloc, bytes_alloc)
89 tp_assign(gfp_flags, gfp_flags)
90 tp_assign(node, node)
91 ),
92
93 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
94 __entry->call_site,
95 __entry->ptr,
96 __entry->bytes_req,
97 __entry->bytes_alloc,
98 show_gfp_flags(__entry->gfp_flags),
99 __entry->node)
100 )
101
102 DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
103
104 TP_PROTO(unsigned long call_site, const void *ptr,
105 size_t bytes_req, size_t bytes_alloc,
106 gfp_t gfp_flags, int node),
107
108 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
109 )
110
111 DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
112
113 TP_PROTO(unsigned long call_site, const void *ptr,
114 size_t bytes_req, size_t bytes_alloc,
115 gfp_t gfp_flags, int node),
116
117 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
118 )
119
120 DECLARE_EVENT_CLASS(kmem_free,
121
122 TP_PROTO(unsigned long call_site, const void *ptr),
123
124 TP_ARGS(call_site, ptr),
125
126 TP_STRUCT__entry(
127 __field( unsigned long, call_site )
128 __field( const void *, ptr )
129 ),
130
131 TP_fast_assign(
132 tp_assign(call_site, call_site)
133 tp_assign(ptr, ptr)
134 ),
135
136 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
137 )
138
139 DEFINE_EVENT(kmem_free, kfree,
140
141 TP_PROTO(unsigned long call_site, const void *ptr),
142
143 TP_ARGS(call_site, ptr)
144 )
145
146 DEFINE_EVENT(kmem_free, kmem_cache_free,
147
148 TP_PROTO(unsigned long call_site, const void *ptr),
149
150 TP_ARGS(call_site, ptr)
151 )
152
153 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
154 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
155 TRACE_EVENT(mm_page_free,
156 #else
157 TRACE_EVENT(mm_page_free_direct,
158 #endif
159
160 TP_PROTO(struct page *page, unsigned int order),
161
162 TP_ARGS(page, order),
163
164 TP_STRUCT__entry(
165 __field( struct page *, page )
166 __field( unsigned int, order )
167 ),
168
169 TP_fast_assign(
170 tp_assign(page, page)
171 tp_assign(order, order)
172 ),
173
174 TP_printk("page=%p pfn=%lu order=%d",
175 __entry->page,
176 page_to_pfn(__entry->page),
177 __entry->order)
178 )
179
180 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
181 TRACE_EVENT(mm_page_free_batched,
182 #else
183 TRACE_EVENT(mm_pagevec_free,
184 #endif
185
186 TP_PROTO(struct page *page, int cold),
187
188 TP_ARGS(page, cold),
189
190 TP_STRUCT__entry(
191 __field( struct page *, page )
192 __field( int, cold )
193 ),
194
195 TP_fast_assign(
196 tp_assign(page, page)
197 tp_assign(cold, cold)
198 ),
199
200 TP_printk("page=%p pfn=%lu order=0 cold=%d",
201 __entry->page,
202 page_to_pfn(__entry->page),
203 __entry->cold)
204 )
205
206 TRACE_EVENT(mm_page_alloc,
207
208 TP_PROTO(struct page *page, unsigned int order,
209 gfp_t gfp_flags, int migratetype),
210
211 TP_ARGS(page, order, gfp_flags, migratetype),
212
213 TP_STRUCT__entry(
214 __field( struct page *, page )
215 __field( unsigned int, order )
216 __field( gfp_t, gfp_flags )
217 __field( int, migratetype )
218 ),
219
220 TP_fast_assign(
221 tp_assign(page, page)
222 tp_assign(order, order)
223 tp_assign(gfp_flags, gfp_flags)
224 tp_assign(migratetype, migratetype)
225 ),
226
227 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
228 __entry->page,
229 __entry->page ? page_to_pfn(__entry->page) : 0,
230 __entry->order,
231 __entry->migratetype,
232 show_gfp_flags(__entry->gfp_flags))
233 )
234
235 DECLARE_EVENT_CLASS(mm_page,
236
237 TP_PROTO(struct page *page, unsigned int order, int migratetype),
238
239 TP_ARGS(page, order, migratetype),
240
241 TP_STRUCT__entry(
242 __field( struct page *, page )
243 __field( unsigned int, order )
244 __field( int, migratetype )
245 ),
246
247 TP_fast_assign(
248 tp_assign(page, page)
249 tp_assign(order, order)
250 tp_assign(migratetype, migratetype)
251 ),
252
253 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
254 __entry->page,
255 __entry->page ? page_to_pfn(__entry->page) : 0,
256 __entry->order,
257 __entry->migratetype,
258 __entry->order == 0)
259 )
260
261 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
262
263 TP_PROTO(struct page *page, unsigned int order, int migratetype),
264
265 TP_ARGS(page, order, migratetype)
266 )
267
268 DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
269
270 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
271 TP_PROTO(struct page *page, unsigned int order, int migratetype),
272 #else
273 TP_PROTO(struct page *page, int order, int migratetype),
274 #endif
275
276 TP_ARGS(page, order, migratetype),
277
278 TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
279 __entry->page, page_to_pfn(__entry->page),
280 __entry->order, __entry->migratetype)
281 )
282
283 TRACE_EVENT(mm_page_alloc_extfrag,
284
285 TP_PROTO(struct page *page,
286 int alloc_order, int fallback_order,
287 int alloc_migratetype, int fallback_migratetype),
288
289 TP_ARGS(page,
290 alloc_order, fallback_order,
291 alloc_migratetype, fallback_migratetype),
292
293 TP_STRUCT__entry(
294 __field( struct page *, page )
295 __field( int, alloc_order )
296 __field( int, fallback_order )
297 __field( int, alloc_migratetype )
298 __field( int, fallback_migratetype )
299 ),
300
301 TP_fast_assign(
302 tp_assign(page, page)
303 tp_assign(alloc_order, alloc_order)
304 tp_assign(fallback_order, fallback_order)
305 tp_assign(alloc_migratetype, alloc_migratetype)
306 tp_assign(fallback_migratetype, fallback_migratetype)
307 ),
308
309 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
310 __entry->page,
311 page_to_pfn(__entry->page),
312 __entry->alloc_order,
313 __entry->fallback_order,
314 pageblock_order,
315 __entry->alloc_migratetype,
316 __entry->fallback_migratetype,
317 __entry->fallback_order < pageblock_order,
318 __entry->alloc_migratetype == __entry->fallback_migratetype)
319 )
320 #endif
321
322 #endif /* _TRACE_KMEM_H */
323
324 /* This part must be outside protection */
325 #include "../../../probes/define_trace.h"
This page took 0.034781 seconds and 3 git commands to generate.