Drop support for kernels < 4.4 from kvm instrumentation
[lttng-modules.git] / include / instrumentation / events / arch / x86 / kvm / mmutrace.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #if !defined(LTTNG_TRACE_KVM_MMU_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define LTTNG_TRACE_KVM_MMU_H
4
5 #include <lttng/tracepoint-event.h>
6 #include <lttng/kernel-version.h>
7
8 #include <linux/trace_events.h>
9
10 #undef TRACE_SYSTEM
11 #define TRACE_SYSTEM kvm_mmu
12
13 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
14 LTTNG_RHEL_KERNEL_RANGE(4,18,0,147,0,0, 4,19,0,0,0,0))
15
16 #define LTTNG_KVM_MMU_PAGE_FIELDS \
17 ctf_integer(__u64, gfn, (sp)->gfn) \
18 ctf_integer(__u32, role, (sp)->role.word) \
19 ctf_integer(__u32, root_count, (sp)->root_count) \
20 ctf_integer(bool, unsync, (sp)->unsync)
21
22 #else
23
24 #define LTTNG_KVM_MMU_PAGE_FIELDS \
25 ctf_integer(unsigned long, mmu_valid_gen, (sp)->mmu_valid_gen) \
26 ctf_integer(__u64, gfn, (sp)->gfn) \
27 ctf_integer(__u32, role, (sp)->role.word) \
28 ctf_integer(__u32, root_count, (sp)->root_count) \
29 ctf_integer(bool, unsync, (sp)->unsync)
30
31 #endif
32
33 /*
34 * A pagetable walk has started
35 */
36 LTTNG_TRACEPOINT_EVENT(
37 kvm_mmu_pagetable_walk,
38 TP_PROTO(u64 addr, u32 pferr),
39 TP_ARGS(addr, pferr),
40
41 TP_FIELDS(
42 ctf_integer_hex(__u64, addr, addr)
43 ctf_integer(__u32, pferr, pferr)
44 )
45 )
46
47 /* We just walked a paging element */
48 LTTNG_TRACEPOINT_EVENT(
49 kvm_mmu_paging_element,
50 TP_PROTO(u64 pte, int level),
51 TP_ARGS(pte, level),
52
53 TP_FIELDS(
54 ctf_integer(__u64, pte, pte)
55 ctf_integer(__u32, level, level)
56 )
57 )
58
59 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_set_bit_class,
60
61 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
62
63 TP_ARGS(table_gfn, index, size),
64
65 TP_FIELDS(
66 ctf_integer(__u64, gpa,
67 ((u64)table_gfn << PAGE_SHIFT) + index * size)
68 )
69 )
70
71 /* We set a pte accessed bit */
72 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
73
74 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
75
76 TP_ARGS(table_gfn, index, size)
77 )
78
79 /* We set a pte dirty bit */
80 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
81
82 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
83
84 TP_ARGS(table_gfn, index, size)
85 )
86
87 LTTNG_TRACEPOINT_EVENT(
88 kvm_mmu_walker_error,
89 TP_PROTO(u32 pferr),
90 TP_ARGS(pferr),
91
92 TP_FIELDS(
93 ctf_integer(__u32, pferr, pferr)
94 )
95 )
96
97 LTTNG_TRACEPOINT_EVENT(
98 kvm_mmu_get_page,
99 TP_PROTO(struct kvm_mmu_page *sp, bool created),
100 TP_ARGS(sp, created),
101
102 TP_FIELDS(
103 LTTNG_KVM_MMU_PAGE_FIELDS
104 ctf_integer(bool, created, created)
105 )
106 )
107
108 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_page_class,
109
110 TP_PROTO(struct kvm_mmu_page *sp),
111 TP_ARGS(sp),
112
113 TP_FIELDS(
114 LTTNG_KVM_MMU_PAGE_FIELDS
115 )
116 )
117
118 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_sync_page,
119 TP_PROTO(struct kvm_mmu_page *sp),
120
121 TP_ARGS(sp)
122 )
123
124 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_unsync_page,
125 TP_PROTO(struct kvm_mmu_page *sp),
126
127 TP_ARGS(sp)
128 )
129
130 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
131 TP_PROTO(struct kvm_mmu_page *sp),
132
133 TP_ARGS(sp)
134 )
135
136 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,10,0))
137
138 LTTNG_TRACEPOINT_EVENT_MAP(
139 mark_mmio_spte,
140
141 kvm_mmu_mark_mmio_spte,
142
143 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
144 TP_ARGS(sptep, gfn, spte),
145
146 TP_FIELDS(
147 ctf_integer_hex(void *, sptep, sptep)
148 ctf_integer(gfn_t, gfn, gfn)
149 ctf_integer(unsigned, access, spte & ACC_ALL)
150 ctf_integer(unsigned int, gen, get_mmio_spte_generation(spte))
151 )
152 )
153
154 #else
155
156 LTTNG_TRACEPOINT_EVENT_MAP(
157 mark_mmio_spte,
158
159 kvm_mmu_mark_mmio_spte,
160
161 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
162 TP_ARGS(sptep, gfn, access, gen),
163
164 TP_FIELDS(
165 ctf_integer_hex(void *, sptep, sptep)
166 ctf_integer(gfn_t, gfn, gfn)
167 ctf_integer(unsigned, access, access)
168 ctf_integer(unsigned int, gen, gen)
169 )
170 )
171 #endif
172
173 LTTNG_TRACEPOINT_EVENT_MAP(
174 handle_mmio_page_fault,
175
176 kvm_mmu_handle_mmio_page_fault,
177
178 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
179 TP_ARGS(addr, gfn, access),
180
181 TP_FIELDS(
182 ctf_integer_hex(u64, addr, addr)
183 ctf_integer(gfn_t, gfn, gfn)
184 ctf_integer(unsigned, access, access)
185 )
186 )
187
188 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,16,0))
189 LTTNG_TRACEPOINT_EVENT_MAP(
190 fast_page_fault,
191
192 kvm_mmu_fast_page_fault,
193
194 TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
195 u64 *sptep, u64 old_spte, int ret),
196 TP_ARGS(vcpu, fault, sptep, old_spte, ret),
197
198 TP_FIELDS(
199 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
200 ctf_integer(gpa_t, cr2_or_gpa, fault->addr)
201 ctf_integer(u32, error_code, fault->error_code)
202 ctf_integer_hex(u64 *, sptep, sptep)
203 ctf_integer(u64, old_spte, old_spte)
204 ctf_integer(u64, new_spte, *sptep)
205 ctf_integer(int, ret, ret)
206 )
207 )
208 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,10,0))
209 LTTNG_TRACEPOINT_EVENT_MAP(
210 fast_page_fault,
211
212 kvm_mmu_fast_page_fault,
213
214 TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
215 u64 *sptep, u64 old_spte, int ret),
216 TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, ret),
217
218 TP_FIELDS(
219 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
220 ctf_integer(gpa_t, cr2_or_gpa, cr2_or_gpa)
221 ctf_integer(u32, error_code, error_code)
222 ctf_integer_hex(u64 *, sptep, sptep)
223 ctf_integer(u64, old_spte, old_spte)
224 ctf_integer(u64, new_spte, *sptep)
225 ctf_integer(int, ret, ret)
226 )
227 )
228 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) || \
229 LTTNG_KERNEL_RANGE(4,19,103, 4,20,0) || \
230 LTTNG_KERNEL_RANGE(5,4,19, 5,5,0) || \
231 LTTNG_KERNEL_RANGE(5,5,3, 5,6,0) || \
232 LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,92, 4,16,0,0) || \
233 LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,44, 5,1,0,0) || \
234 LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,43, 5,3,18,45) || \
235 LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,46, 5,4,0,0))
236 LTTNG_TRACEPOINT_EVENT_MAP(
237 fast_page_fault,
238
239 kvm_mmu_fast_page_fault,
240
241 TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
242 u64 *sptep, u64 old_spte, bool retry),
243 TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, retry),
244
245 TP_FIELDS(
246 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
247 ctf_integer(gpa_t, cr2_or_gpa, cr2_or_gpa)
248 ctf_integer(u32, error_code, error_code)
249 ctf_integer_hex(u64 *, sptep, sptep)
250 ctf_integer(u64, old_spte, old_spte)
251 ctf_integer(u64, new_spte, *sptep)
252 ctf_integer(bool, retry, retry)
253 )
254 )
255 #else
256 LTTNG_TRACEPOINT_EVENT_MAP(
257 fast_page_fault,
258
259 kvm_mmu_fast_page_fault,
260
261 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
262 u64 *sptep, u64 old_spte, bool retry),
263 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
264
265 TP_FIELDS(
266 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
267 ctf_integer(gva_t, gva, gva)
268 ctf_integer(u32, error_code, error_code)
269 ctf_integer_hex(u64 *, sptep, sptep)
270 ctf_integer(u64, old_spte, old_spte)
271 ctf_integer(u64, new_spte, *sptep)
272 ctf_integer(bool, retry, retry)
273 )
274 )
275 #endif
276
277 #endif /* LTTNG_TRACE_KVM_MMU_H */
278
279 #undef TRACE_INCLUDE_PATH
280 #define TRACE_INCLUDE_PATH instrumentation/events/arch/x86/kvm
281 #undef TRACE_INCLUDE_FILE
282 #define TRACE_INCLUDE_FILE mmutrace
283
284 /* This part must be outside protection */
285 #include <lttng/define_trace.h>
This page took 0.036513 seconds and 4 git commands to generate.