Add mmu_valid_gen field to mmutrace events
[lttng-modules.git] / instrumentation / events / lttng-module / arch / x86 / kvm / mmutrace.h
1 #if !defined(LTTNG_TRACE_KVM_MMU_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define LTTNG_TRACE_KVM_MMU_H
3
4 #include "../../../../../../probes/lttng-tracepoint-event.h"
5 #include <linux/ftrace_event.h>
6 #include <linux/version.h>
7
8 #undef TRACE_SYSTEM
9 #define TRACE_SYSTEM kvm_mmu
10
11 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
12
13 #define LTTNG_KVM_MMU_PAGE_FIELDS \
14 ctf_integer(unsigned long, mmu_valid_gen, (sp)->mmu_valid_gen) \
15 ctf_integer(__u64, gfn, (sp)->gfn) \
16 ctf_integer(__u32, role, (sp)->role.word) \
17 ctf_integer(__u32, root_count, (sp)->root_count) \
18 ctf_integer(bool, unsync, (sp)->unsync)
19
20 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
21
22 #define LTTNG_KVM_MMU_PAGE_FIELDS \
23 ctf_integer(__u64, gfn, (sp)->gfn) \
24 ctf_integer(__u32, role, (sp)->role.word) \
25 ctf_integer(__u32, root_count, (sp)->root_count) \
26 ctf_integer(bool, unsync, (sp)->unsync)
27
28 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
29
30 /*
31 * A pagetable walk has started
32 */
33 LTTNG_TRACEPOINT_EVENT(
34 kvm_mmu_pagetable_walk,
35 TP_PROTO(u64 addr, u32 pferr),
36 TP_ARGS(addr, pferr),
37
38 TP_FIELDS(
39 ctf_integer(__u64, addr, addr)
40 ctf_integer(__u32, pferr, pferr)
41 )
42 )
43
44
45 /* We just walked a paging element */
46 LTTNG_TRACEPOINT_EVENT(
47 kvm_mmu_paging_element,
48 TP_PROTO(u64 pte, int level),
49 TP_ARGS(pte, level),
50
51 TP_FIELDS(
52 ctf_integer(__u64, pte, pte)
53 ctf_integer(__u32, level, level)
54 )
55 )
56
57 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_set_bit_class,
58
59 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
60
61 TP_ARGS(table_gfn, index, size),
62
63 TP_FIELDS(
64 ctf_integer(__u64, gpa,
65 ((u64)table_gfn << PAGE_SHIFT) + index * size)
66 )
67 )
68
69 /* We set a pte accessed bit */
70 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
71
72 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
73
74 TP_ARGS(table_gfn, index, size)
75 )
76
77 /* We set a pte dirty bit */
78 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
79
80 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
81
82 TP_ARGS(table_gfn, index, size)
83 )
84
85 LTTNG_TRACEPOINT_EVENT(
86 kvm_mmu_walker_error,
87 TP_PROTO(u32 pferr),
88 TP_ARGS(pferr),
89
90 TP_FIELDS(
91 ctf_integer(__u32, pferr, pferr)
92 )
93 )
94
95 LTTNG_TRACEPOINT_EVENT(
96 kvm_mmu_get_page,
97 TP_PROTO(struct kvm_mmu_page *sp, bool created),
98 TP_ARGS(sp, created),
99
100 TP_FIELDS(
101 LTTNG_KVM_MMU_PAGE_FIELDS
102 ctf_integer(bool, created, created)
103 )
104 )
105
106 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_page_class,
107
108 TP_PROTO(struct kvm_mmu_page *sp),
109 TP_ARGS(sp),
110
111 TP_FIELDS(
112 LTTNG_KVM_MMU_PAGE_FIELDS
113 )
114 )
115
116 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_sync_page,
117 TP_PROTO(struct kvm_mmu_page *sp),
118
119 TP_ARGS(sp)
120 )
121
122 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_unsync_page,
123 TP_PROTO(struct kvm_mmu_page *sp),
124
125 TP_ARGS(sp)
126 )
127
128 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
129 TP_PROTO(struct kvm_mmu_page *sp),
130
131 TP_ARGS(sp)
132 )
133
134 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
135
136 LTTNG_TRACEPOINT_EVENT_MAP(
137 mark_mmio_spte,
138
139 kvm_mmu_mark_mmio_spte,
140
141 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
142 TP_ARGS(sptep, gfn, access, gen),
143
144 TP_FIELDS(
145 ctf_integer(void *, sptep, sptep)
146 ctf_integer(gfn_t, gfn, gfn)
147 ctf_integer(unsigned, access, access)
148 ctf_integer(unsigned int, gen, gen)
149 )
150 )
151
152 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
153
154 LTTNG_TRACEPOINT_EVENT_MAP(
155 mark_mmio_spte,
156
157 kvm_mmu_mark_mmio_spte,
158
159 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
160 TP_ARGS(sptep, gfn, access),
161
162 TP_FIELDS(
163 ctf_integer(void *, sptep, sptep)
164 ctf_integer(gfn_t, gfn, gfn)
165 ctf_integer(unsigned, access, access)
166 )
167 )
168
169 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
170
171 LTTNG_TRACEPOINT_EVENT_MAP(
172 handle_mmio_page_fault,
173
174 kvm_mmu_handle_mmio_page_fault,
175
176 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
177 TP_ARGS(addr, gfn, access),
178
179 TP_FIELDS(
180 ctf_integer(u64, addr, addr)
181 ctf_integer(gfn_t, gfn, gfn)
182 ctf_integer(unsigned, access, access)
183 )
184 )
185
186 LTTNG_TRACEPOINT_EVENT_MAP(
187 fast_page_fault,
188
189 kvm_mmu_fast_page_fault,
190
191 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
192 u64 *sptep, u64 old_spte, bool retry),
193 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
194
195 TP_FIELDS(
196 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
197 ctf_integer(gva_t, gva, gva)
198 ctf_integer(u32, error_code, error_code)
199 ctf_integer(u64 *, sptep, sptep)
200 ctf_integer(u64, old_spte, old_spte)
201 ctf_integer(u64, new_spte, *sptep)
202 ctf_integer(bool, retry, retry)
203 )
204 )
205 #endif /* LTTNG_TRACE_KVM_MMU_H */
206
207 #undef TRACE_INCLUDE_PATH
208 #define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module/arch/x86/kvm
209 #undef TRACE_INCLUDE_FILE
210 #define TRACE_INCLUDE_FILE mmutrace
211
212 /* This part must be outside protection */
213 #include "../../../../../../probes/define_trace.h"
This page took 0.035216 seconds and 5 git commands to generate.