165d7d6a7c798148706351a989d75a8a6728e561
[lttng-modules.git] / instrumentation / events / lttng-module / arch / x86 / kvm / mmutrace.h
1 #if !defined(LTTNG_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define LTTNG_TRACE_KVMMMU_H
3
4 #include "../../../../../../probes/lttng-tracepoint-event.h"
5 #include <linux/version.h>
6
7 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
8 #include <linux/trace_events.h>
9 #else /* if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
10 #include <linux/ftrace_event.h>
11 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
12
13 #undef TRACE_SYSTEM
14 #define TRACE_SYSTEM kvmmmu
15
16 #undef KVM_MMU_PAGE_FIELDS
17 #undef KVM_MMU_PAGE_ASSIGN
18
19 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
20
21 #define KVM_MMU_PAGE_FIELDS \
22 __field(unsigned long, mmu_valid_gen) \
23 __field(__u64, gfn) \
24 __field(__u32, role) \
25 __field(__u32, root_count) \
26 __field(bool, unsync)
27
28 #define KVM_MMU_PAGE_ASSIGN(sp) \
29 tp_assign(mmu_valid_gen, sp->mmu_valid_gen) \
30 tp_assign(gfn, sp->gfn) \
31 tp_assign(role, sp->role.word) \
32 tp_assign(root_count, sp->root_count) \
33 tp_assign(unsync, sp->unsync)
34
35 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
36
37 #define KVM_MMU_PAGE_FIELDS \
38 __field(__u64, gfn) \
39 __field(__u32, role) \
40 __field(__u32, root_count) \
41 __field(bool, unsync)
42
43 #define KVM_MMU_PAGE_ASSIGN(sp) \
44 tp_assign(gfn, sp->gfn) \
45 tp_assign(role, sp->role.word) \
46 tp_assign(root_count, sp->root_count) \
47 tp_assign(unsync, sp->unsync)
48
49 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
50
51 #define kvm_mmu_trace_pferr_flags \
52 { PFERR_PRESENT_MASK, "P" }, \
53 { PFERR_WRITE_MASK, "W" }, \
54 { PFERR_USER_MASK, "U" }, \
55 { PFERR_RSVD_MASK, "RSVD" }, \
56 { PFERR_FETCH_MASK, "F" }
57
58 /*
59 * A pagetable walk has started
60 */
61 LTTNG_TRACEPOINT_EVENT(
62 kvm_mmu_pagetable_walk,
63 TP_PROTO(u64 addr, u32 pferr),
64 TP_ARGS(addr, pferr),
65
66 TP_STRUCT__entry(
67 __field(__u64, addr)
68 __field(__u32, pferr)
69 ),
70
71 TP_fast_assign(
72 tp_assign(addr, addr)
73 tp_assign(pferr, pferr)
74 ),
75
76 TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
77 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
78 )
79
80
81 /* We just walked a paging element */
82 LTTNG_TRACEPOINT_EVENT(
83 kvm_mmu_paging_element,
84 TP_PROTO(u64 pte, int level),
85 TP_ARGS(pte, level),
86
87 TP_STRUCT__entry(
88 __field(__u64, pte)
89 __field(__u32, level)
90 ),
91
92 TP_fast_assign(
93 tp_assign(pte, pte)
94 tp_assign(level, level)
95 ),
96
97 TP_printk("pte %llx level %u", __entry->pte, __entry->level)
98 )
99
100 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_set_bit_class,
101
102 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
103
104 TP_ARGS(table_gfn, index, size),
105
106 TP_STRUCT__entry(
107 __field(__u64, gpa)
108 ),
109
110 TP_fast_assign(
111 tp_assign(gpa, ((u64)table_gfn << PAGE_SHIFT)
112 + index * size)
113 ),
114
115 TP_printk("gpa %llx", __entry->gpa)
116 )
117
118 /* We set a pte accessed bit */
119 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
120
121 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
122
123 TP_ARGS(table_gfn, index, size)
124 )
125
126 /* We set a pte dirty bit */
127 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
128
129 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
130
131 TP_ARGS(table_gfn, index, size)
132 )
133
134 LTTNG_TRACEPOINT_EVENT(
135 kvm_mmu_walker_error,
136 TP_PROTO(u32 pferr),
137 TP_ARGS(pferr),
138
139 TP_STRUCT__entry(
140 __field(__u32, pferr)
141 ),
142
143 TP_fast_assign(
144 tp_assign(pferr, pferr)
145 ),
146
147 TP_printk("pferr %x %s", __entry->pferr,
148 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
149 )
150
151 LTTNG_TRACEPOINT_EVENT(
152 kvm_mmu_get_page,
153 TP_PROTO(struct kvm_mmu_page *sp, bool created),
154 TP_ARGS(sp, created),
155
156 TP_STRUCT__entry(
157 KVM_MMU_PAGE_FIELDS
158 __field(bool, created)
159 ),
160
161 TP_fast_assign(
162 KVM_MMU_PAGE_ASSIGN(sp)
163 tp_assign(created, created)
164 ),
165
166 TP_printk()
167 )
168
169 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_page_class,
170
171 TP_PROTO(struct kvm_mmu_page *sp),
172 TP_ARGS(sp),
173
174 TP_STRUCT__entry(
175 KVM_MMU_PAGE_FIELDS
176 ),
177
178 TP_fast_assign(
179 KVM_MMU_PAGE_ASSIGN(sp)
180 ),
181
182 TP_printk()
183 )
184
185 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_sync_page,
186 TP_PROTO(struct kvm_mmu_page *sp),
187
188 TP_ARGS(sp)
189 )
190
191 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_unsync_page,
192 TP_PROTO(struct kvm_mmu_page *sp),
193
194 TP_ARGS(sp)
195 )
196
197 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
198 TP_PROTO(struct kvm_mmu_page *sp),
199
200 TP_ARGS(sp)
201 )
202
203 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
204
205 LTTNG_TRACEPOINT_EVENT(
206 mark_mmio_spte,
207 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
208 TP_ARGS(sptep, gfn, access, gen),
209
210 TP_STRUCT__entry(
211 __field(void *, sptep)
212 __field(gfn_t, gfn)
213 __field(unsigned, access)
214 __field(unsigned int, gen)
215 ),
216
217 TP_fast_assign(
218 tp_assign(sptep, sptep)
219 tp_assign(gfn, gfn)
220 tp_assign(access, access)
221 tp_assign(gen, gen)
222 ),
223
224 TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn,
225 __entry->access)
226 )
227
228 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
229
230 LTTNG_TRACEPOINT_EVENT(
231 mark_mmio_spte,
232 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
233 TP_ARGS(sptep, gfn, access),
234
235 TP_STRUCT__entry(
236 __field(void *, sptep)
237 __field(gfn_t, gfn)
238 __field(unsigned, access)
239 ),
240
241 TP_fast_assign(
242 tp_assign(sptep, sptep)
243 tp_assign(gfn, gfn)
244 tp_assign(access, access)
245 ),
246
247 TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn,
248 __entry->access)
249 )
250
251 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
252
253 LTTNG_TRACEPOINT_EVENT(
254 handle_mmio_page_fault,
255 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
256 TP_ARGS(addr, gfn, access),
257
258 TP_STRUCT__entry(
259 __field(u64, addr)
260 __field(gfn_t, gfn)
261 __field(unsigned, access)
262 ),
263
264 TP_fast_assign(
265 tp_assign(addr, addr)
266 tp_assign(gfn, gfn)
267 tp_assign(access, access)
268 ),
269
270 TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
271 __entry->access)
272 )
273
274 #define __spte_satisfied(__spte) \
275 (__entry->retry && is_writable_pte(__entry->__spte))
276
277 LTTNG_TRACEPOINT_EVENT(
278 fast_page_fault,
279 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
280 u64 *sptep, u64 old_spte, bool retry),
281 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
282
283 TP_STRUCT__entry(
284 __field(int, vcpu_id)
285 __field(gva_t, gva)
286 __field(u32, error_code)
287 __field(u64 *, sptep)
288 __field(u64, old_spte)
289 __field(u64, new_spte)
290 __field(bool, retry)
291 ),
292
293 TP_fast_assign(
294 tp_assign(vcpu_id, vcpu->vcpu_id)
295 tp_assign(gva, gva)
296 tp_assign(error_code, error_code)
297 tp_assign(sptep, sptep)
298 tp_assign(old_spte, old_spte)
299 tp_assign(new_spte, *sptep)
300 tp_assign(retry, retry)
301 ),
302
303 TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
304 " new %llx spurious %d fixed %d", __entry->vcpu_id,
305 __entry->gva, __print_flags(__entry->error_code, "|",
306 kvm_mmu_trace_pferr_flags), __entry->sptep,
307 __entry->old_spte, __entry->new_spte,
308 __spte_satisfied(old_spte), __spte_satisfied(new_spte)
309 )
310 )
311 #endif /* LTTNG_TRACE_KVMMMU_H */
312
313 #undef TRACE_INCLUDE_PATH
314 #define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module/arch/x86/kvm
315 #undef TRACE_INCLUDE_FILE
316 #define TRACE_INCLUDE_FILE mmutrace
317
318 /* This part must be outside protection */
319 #include "../../../../../../probes/define_trace.h"
This page took 0.035159 seconds and 3 git commands to generate.