Fix: Revert "KVM: MMU: show mmu_valid_gen..." (v5.1)
[lttng-modules.git] / instrumentation / events / lttng-module / arch / x86 / kvm / mmutrace.h
1 #if !defined(LTTNG_TRACE_KVM_MMU_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define LTTNG_TRACE_KVM_MMU_H
3
4 #include <probes/lttng-tracepoint-event.h>
5 #include <linux/version.h>
6
7 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
8 #include <linux/trace_events.h>
9 #else /* if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
10 #include <linux/ftrace_event.h>
11 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
12
13 #undef TRACE_SYSTEM
14 #define TRACE_SYSTEM kvm_mmu
15
16 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
17
18 #define LTTNG_KVM_MMU_PAGE_FIELDS \
19 ctf_integer(__u64, gfn, (sp)->gfn) \
20 ctf_integer(__u32, role, (sp)->role.word) \
21 ctf_integer(__u32, root_count, (sp)->root_count) \
22 ctf_integer(bool, unsync, (sp)->unsync)
23
24 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
25
26 #define LTTNG_KVM_MMU_PAGE_FIELDS \
27 ctf_integer(unsigned long, mmu_valid_gen, (sp)->mmu_valid_gen) \
28 ctf_integer(__u64, gfn, (sp)->gfn) \
29 ctf_integer(__u32, role, (sp)->role.word) \
30 ctf_integer(__u32, root_count, (sp)->root_count) \
31 ctf_integer(bool, unsync, (sp)->unsync)
32
33 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
34
35 #define LTTNG_KVM_MMU_PAGE_FIELDS \
36 ctf_integer(__u64, gfn, (sp)->gfn) \
37 ctf_integer(__u32, role, (sp)->role.word) \
38 ctf_integer(__u32, root_count, (sp)->root_count) \
39 ctf_integer(bool, unsync, (sp)->unsync)
40
41 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
42
43 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
44 /*
45 * A pagetable walk has started
46 */
47 LTTNG_TRACEPOINT_EVENT(
48 kvm_mmu_pagetable_walk,
49 TP_PROTO(u64 addr, u32 pferr),
50 TP_ARGS(addr, pferr),
51
52 TP_FIELDS(
53 ctf_integer(__u64, addr, addr)
54 ctf_integer(__u32, pferr, pferr)
55 )
56 )
57 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) */
58 /*
59 * A pagetable walk has started
60 */
61 LTTNG_TRACEPOINT_EVENT(
62 kvm_mmu_pagetable_walk,
63 TP_PROTO(u64 addr, int write_fault, int user_fault, int fetch_fault),
64 TP_ARGS(addr, write_fault, user_fault, fetch_fault),
65
66 TP_FIELDS(
67 ctf_integer(__u64, addr, addr)
68 ctf_integer(__u32, pferr,
69 (!!write_fault << 1) | (!!user_fault << 2)
70 | (!!fetch_fault << 4))
71 )
72 )
73 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) */
74
75 /* We just walked a paging element */
76 LTTNG_TRACEPOINT_EVENT(
77 kvm_mmu_paging_element,
78 TP_PROTO(u64 pte, int level),
79 TP_ARGS(pte, level),
80
81 TP_FIELDS(
82 ctf_integer(__u64, pte, pte)
83 ctf_integer(__u32, level, level)
84 )
85 )
86
87 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_set_bit_class,
88
89 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
90
91 TP_ARGS(table_gfn, index, size),
92
93 TP_FIELDS(
94 ctf_integer(__u64, gpa,
95 ((u64)table_gfn << PAGE_SHIFT) + index * size)
96 )
97 )
98
99 /* We set a pte accessed bit */
100 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
101
102 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
103
104 TP_ARGS(table_gfn, index, size)
105 )
106
107 /* We set a pte dirty bit */
108 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
109
110 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
111
112 TP_ARGS(table_gfn, index, size)
113 )
114
115 LTTNG_TRACEPOINT_EVENT(
116 kvm_mmu_walker_error,
117 TP_PROTO(u32 pferr),
118 TP_ARGS(pferr),
119
120 TP_FIELDS(
121 ctf_integer(__u32, pferr, pferr)
122 )
123 )
124
125 LTTNG_TRACEPOINT_EVENT(
126 kvm_mmu_get_page,
127 TP_PROTO(struct kvm_mmu_page *sp, bool created),
128 TP_ARGS(sp, created),
129
130 TP_FIELDS(
131 LTTNG_KVM_MMU_PAGE_FIELDS
132 ctf_integer(bool, created, created)
133 )
134 )
135
136 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_page_class,
137
138 TP_PROTO(struct kvm_mmu_page *sp),
139 TP_ARGS(sp),
140
141 TP_FIELDS(
142 LTTNG_KVM_MMU_PAGE_FIELDS
143 )
144 )
145
146 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_sync_page,
147 TP_PROTO(struct kvm_mmu_page *sp),
148
149 TP_ARGS(sp)
150 )
151
152 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_unsync_page,
153 TP_PROTO(struct kvm_mmu_page *sp),
154
155 TP_ARGS(sp)
156 )
157
158 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
159 TP_PROTO(struct kvm_mmu_page *sp),
160
161 TP_ARGS(sp)
162 )
163
164 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
165
166 LTTNG_TRACEPOINT_EVENT_MAP(
167 mark_mmio_spte,
168
169 kvm_mmu_mark_mmio_spte,
170
171 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
172 TP_ARGS(sptep, gfn, access, gen),
173
174 TP_FIELDS(
175 ctf_integer_hex(void *, sptep, sptep)
176 ctf_integer(gfn_t, gfn, gfn)
177 ctf_integer(unsigned, access, access)
178 ctf_integer(unsigned int, gen, gen)
179 )
180 )
181
182 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
183
184 LTTNG_TRACEPOINT_EVENT_MAP(
185 mark_mmio_spte,
186
187 kvm_mmu_mark_mmio_spte,
188
189 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
190 TP_ARGS(sptep, gfn, access),
191
192 TP_FIELDS(
193 ctf_integer_hex(void *, sptep, sptep)
194 ctf_integer(gfn_t, gfn, gfn)
195 ctf_integer(unsigned, access, access)
196 )
197 )
198
199 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
200
201 LTTNG_TRACEPOINT_EVENT_MAP(
202 handle_mmio_page_fault,
203
204 kvm_mmu_handle_mmio_page_fault,
205
206 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
207 TP_ARGS(addr, gfn, access),
208
209 TP_FIELDS(
210 ctf_integer(u64, addr, addr)
211 ctf_integer(gfn_t, gfn, gfn)
212 ctf_integer(unsigned, access, access)
213 )
214 )
215
216 LTTNG_TRACEPOINT_EVENT_MAP(
217 fast_page_fault,
218
219 kvm_mmu_fast_page_fault,
220
221 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
222 u64 *sptep, u64 old_spte, bool retry),
223 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
224
225 TP_FIELDS(
226 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
227 ctf_integer(gva_t, gva, gva)
228 ctf_integer(u32, error_code, error_code)
229 ctf_integer_hex(u64 *, sptep, sptep)
230 ctf_integer(u64, old_spte, old_spte)
231 ctf_integer(u64, new_spte, *sptep)
232 ctf_integer(bool, retry, retry)
233 )
234 )
235 #endif /* LTTNG_TRACE_KVM_MMU_H */
236
237 #undef TRACE_INCLUDE_PATH
238 #define TRACE_INCLUDE_PATH instrumentation/events/lttng-module/arch/x86/kvm
239 #undef TRACE_INCLUDE_FILE
240 #define TRACE_INCLUDE_FILE mmutrace
241
242 /* This part must be outside protection */
243 #include <probes/define_trace.h>
This page took 0.034974 seconds and 4 git commands to generate.