Add kernel probes for supplementary subsystems
[lttng-modules.git] / instrumentation / events / mainline / kmem.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM kmem
3
4 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_KMEM_H
6
7 #include <linux/types.h>
8 #include <linux/tracepoint.h>
9 #include "gfpflags.h"
10
11 DECLARE_EVENT_CLASS(kmem_alloc,
12
13 TP_PROTO(unsigned long call_site,
14 const void *ptr,
15 size_t bytes_req,
16 size_t bytes_alloc,
17 gfp_t gfp_flags),
18
19 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
20
21 TP_STRUCT__entry(
22 __field( unsigned long, call_site )
23 __field( const void *, ptr )
24 __field( size_t, bytes_req )
25 __field( size_t, bytes_alloc )
26 __field( gfp_t, gfp_flags )
27 ),
28
29 TP_fast_assign(
30 __entry->call_site = call_site;
31 __entry->ptr = ptr;
32 __entry->bytes_req = bytes_req;
33 __entry->bytes_alloc = bytes_alloc;
34 __entry->gfp_flags = gfp_flags;
35 ),
36
37 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
38 __entry->call_site,
39 __entry->ptr,
40 __entry->bytes_req,
41 __entry->bytes_alloc,
42 show_gfp_flags(__entry->gfp_flags))
43 );
44
45 DEFINE_EVENT(kmem_alloc, kmalloc,
46
47 TP_PROTO(unsigned long call_site, const void *ptr,
48 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
49
50 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
51 );
52
53 DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
54
55 TP_PROTO(unsigned long call_site, const void *ptr,
56 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
57
58 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
59 );
60
61 DECLARE_EVENT_CLASS(kmem_alloc_node,
62
63 TP_PROTO(unsigned long call_site,
64 const void *ptr,
65 size_t bytes_req,
66 size_t bytes_alloc,
67 gfp_t gfp_flags,
68 int node),
69
70 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
71
72 TP_STRUCT__entry(
73 __field( unsigned long, call_site )
74 __field( const void *, ptr )
75 __field( size_t, bytes_req )
76 __field( size_t, bytes_alloc )
77 __field( gfp_t, gfp_flags )
78 __field( int, node )
79 ),
80
81 TP_fast_assign(
82 __entry->call_site = call_site;
83 __entry->ptr = ptr;
84 __entry->bytes_req = bytes_req;
85 __entry->bytes_alloc = bytes_alloc;
86 __entry->gfp_flags = gfp_flags;
87 __entry->node = node;
88 ),
89
90 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
91 __entry->call_site,
92 __entry->ptr,
93 __entry->bytes_req,
94 __entry->bytes_alloc,
95 show_gfp_flags(__entry->gfp_flags),
96 __entry->node)
97 );
98
99 DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
100
101 TP_PROTO(unsigned long call_site, const void *ptr,
102 size_t bytes_req, size_t bytes_alloc,
103 gfp_t gfp_flags, int node),
104
105 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
106 );
107
108 DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
109
110 TP_PROTO(unsigned long call_site, const void *ptr,
111 size_t bytes_req, size_t bytes_alloc,
112 gfp_t gfp_flags, int node),
113
114 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
115 );
116
117 DECLARE_EVENT_CLASS(kmem_free,
118
119 TP_PROTO(unsigned long call_site, const void *ptr),
120
121 TP_ARGS(call_site, ptr),
122
123 TP_STRUCT__entry(
124 __field( unsigned long, call_site )
125 __field( const void *, ptr )
126 ),
127
128 TP_fast_assign(
129 __entry->call_site = call_site;
130 __entry->ptr = ptr;
131 ),
132
133 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
134 );
135
136 DEFINE_EVENT(kmem_free, kfree,
137
138 TP_PROTO(unsigned long call_site, const void *ptr),
139
140 TP_ARGS(call_site, ptr)
141 );
142
143 DEFINE_EVENT(kmem_free, kmem_cache_free,
144
145 TP_PROTO(unsigned long call_site, const void *ptr),
146
147 TP_ARGS(call_site, ptr)
148 );
149
150 TRACE_EVENT(mm_page_free_direct,
151
152 TP_PROTO(struct page *page, unsigned int order),
153
154 TP_ARGS(page, order),
155
156 TP_STRUCT__entry(
157 __field( struct page *, page )
158 __field( unsigned int, order )
159 ),
160
161 TP_fast_assign(
162 __entry->page = page;
163 __entry->order = order;
164 ),
165
166 TP_printk("page=%p pfn=%lu order=%d",
167 __entry->page,
168 page_to_pfn(__entry->page),
169 __entry->order)
170 );
171
172 TRACE_EVENT(mm_pagevec_free,
173
174 TP_PROTO(struct page *page, int cold),
175
176 TP_ARGS(page, cold),
177
178 TP_STRUCT__entry(
179 __field( struct page *, page )
180 __field( int, cold )
181 ),
182
183 TP_fast_assign(
184 __entry->page = page;
185 __entry->cold = cold;
186 ),
187
188 TP_printk("page=%p pfn=%lu order=0 cold=%d",
189 __entry->page,
190 page_to_pfn(__entry->page),
191 __entry->cold)
192 );
193
194 TRACE_EVENT(mm_page_alloc,
195
196 TP_PROTO(struct page *page, unsigned int order,
197 gfp_t gfp_flags, int migratetype),
198
199 TP_ARGS(page, order, gfp_flags, migratetype),
200
201 TP_STRUCT__entry(
202 __field( struct page *, page )
203 __field( unsigned int, order )
204 __field( gfp_t, gfp_flags )
205 __field( int, migratetype )
206 ),
207
208 TP_fast_assign(
209 __entry->page = page;
210 __entry->order = order;
211 __entry->gfp_flags = gfp_flags;
212 __entry->migratetype = migratetype;
213 ),
214
215 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
216 __entry->page,
217 page_to_pfn(__entry->page),
218 __entry->order,
219 __entry->migratetype,
220 show_gfp_flags(__entry->gfp_flags))
221 );
222
223 DECLARE_EVENT_CLASS(mm_page,
224
225 TP_PROTO(struct page *page, unsigned int order, int migratetype),
226
227 TP_ARGS(page, order, migratetype),
228
229 TP_STRUCT__entry(
230 __field( struct page *, page )
231 __field( unsigned int, order )
232 __field( int, migratetype )
233 ),
234
235 TP_fast_assign(
236 __entry->page = page;
237 __entry->order = order;
238 __entry->migratetype = migratetype;
239 ),
240
241 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
242 __entry->page,
243 page_to_pfn(__entry->page),
244 __entry->order,
245 __entry->migratetype,
246 __entry->order == 0)
247 );
248
249 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
250
251 TP_PROTO(struct page *page, unsigned int order, int migratetype),
252
253 TP_ARGS(page, order, migratetype)
254 );
255
256 DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
257
258 TP_PROTO(struct page *page, unsigned int order, int migratetype),
259
260 TP_ARGS(page, order, migratetype),
261
262 TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
263 __entry->page, page_to_pfn(__entry->page),
264 __entry->order, __entry->migratetype)
265 );
266
267 TRACE_EVENT(mm_page_alloc_extfrag,
268
269 TP_PROTO(struct page *page,
270 int alloc_order, int fallback_order,
271 int alloc_migratetype, int fallback_migratetype),
272
273 TP_ARGS(page,
274 alloc_order, fallback_order,
275 alloc_migratetype, fallback_migratetype),
276
277 TP_STRUCT__entry(
278 __field( struct page *, page )
279 __field( int, alloc_order )
280 __field( int, fallback_order )
281 __field( int, alloc_migratetype )
282 __field( int, fallback_migratetype )
283 ),
284
285 TP_fast_assign(
286 __entry->page = page;
287 __entry->alloc_order = alloc_order;
288 __entry->fallback_order = fallback_order;
289 __entry->alloc_migratetype = alloc_migratetype;
290 __entry->fallback_migratetype = fallback_migratetype;
291 ),
292
293 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
294 __entry->page,
295 page_to_pfn(__entry->page),
296 __entry->alloc_order,
297 __entry->fallback_order,
298 pageblock_order,
299 __entry->alloc_migratetype,
300 __entry->fallback_migratetype,
301 __entry->fallback_order < pageblock_order,
302 __entry->alloc_migratetype == __entry->fallback_migratetype)
303 );
304
305 #endif /* _TRACE_KMEM_H */
306
307 /* This part must be outside protection */
308 #include <trace/define_trace.h>
This page took 0.035685 seconds and 4 git commands to generate.