Fix: build kvm probe on EL 8.4+
[lttng-modules.git] / instrumentation / events / lttng-module / arch / x86 / kvm / trace.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #if !defined(LTTNG_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define LTTNG_TRACE_KVM_H
4
5 #include <probes/lttng-tracepoint-event.h>
6 #include <asm/vmx.h>
7 #include <asm/svm.h>
8 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0))
9 #include <asm/clocksource.h>
10 #endif
11 #include <linux/kvm_host.h>
12 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,7,0) || \
13 LTTNG_RHEL_KERNEL_RANGE(4,18,0,305,0,0, 4,19,0,0,0,0))
14 #include <kvm_emulate.h>
15 #endif
16 #include <lttng-kernel-version.h>
17 #include <../arch/x86/kvm/lapic.h>
18 #include <../arch/x86/kvm/kvm_cache_regs.h>
19
20 #undef TRACE_SYSTEM
21 #define TRACE_SYSTEM kvm_x86
22
23 /*
24 * Tracepoint for guest mode entry.
25 */
26 LTTNG_TRACEPOINT_EVENT_MAP(kvm_entry, kvm_x86_entry,
27 TP_PROTO(unsigned int vcpu_id),
28 TP_ARGS(vcpu_id),
29
30 TP_FIELDS(
31 ctf_integer(unsigned int, vcpu_id, vcpu_id)
32 )
33 )
34
35 /*
36 * Tracepoint for hypercall.
37 */
38 LTTNG_TRACEPOINT_EVENT_MAP(kvm_hypercall, kvm_x86_hypercall,
39 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
40 unsigned long a2, unsigned long a3),
41 TP_ARGS(nr, a0, a1, a2, a3),
42
43 TP_FIELDS(
44 ctf_integer(unsigned long, nr, nr)
45 ctf_integer(unsigned long, a0, a0)
46 ctf_integer(unsigned long, a1, a1)
47 ctf_integer(unsigned long, a2, a2)
48 ctf_integer(unsigned long, a3, a3)
49 )
50 )
51
52 /*
53 * Tracepoint for hypercall.
54 */
55 LTTNG_TRACEPOINT_EVENT_MAP(kvm_hv_hypercall, kvm_x86_hv_hypercall,
56 TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
57 __u64 ingpa, __u64 outgpa),
58 TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
59
60 TP_FIELDS(
61 ctf_integer(__u16, rep_cnt, rep_cnt)
62 ctf_integer(__u16, rep_idx, rep_idx)
63 ctf_integer(__u64, ingpa, ingpa)
64 ctf_integer(__u64, outgpa, outgpa)
65 ctf_integer(__u16, code, code)
66 ctf_integer(bool, fast, fast)
67 )
68 )
69
70 /*
71 * Tracepoint for PIO.
72 */
73 LTTNG_TRACEPOINT_EVENT_MAP(kvm_pio, kvm_x86_pio,
74 TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
75 unsigned int count),
76 TP_ARGS(rw, port, size, count),
77
78 TP_FIELDS(
79 ctf_integer(unsigned int, rw, rw)
80 ctf_integer(unsigned int, port, port)
81 ctf_integer(unsigned int, size, size)
82 ctf_integer(unsigned int, count, count)
83 )
84 )
85
86 /*
87 * Tracepoint for cpuid.
88 */
89 LTTNG_TRACEPOINT_EVENT_MAP(kvm_cpuid, kvm_x86_cpuid,
90 TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
91 unsigned long rcx, unsigned long rdx),
92 TP_ARGS(function, rax, rbx, rcx, rdx),
93
94 TP_FIELDS(
95 ctf_integer(unsigned int, function, function)
96 ctf_integer(unsigned long, rax, rax)
97 ctf_integer(unsigned long, rbx, rbx)
98 ctf_integer(unsigned long, rcx, rcx)
99 ctf_integer(unsigned long, rdx, rdx)
100 )
101 )
102
103 /*
104 * Tracepoint for apic access.
105 */
106 LTTNG_TRACEPOINT_EVENT_MAP(kvm_apic, kvm_x86_apic,
107 TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
108 TP_ARGS(rw, reg, val),
109
110 TP_FIELDS(
111 ctf_integer(unsigned int, rw, rw)
112 ctf_integer(unsigned int, reg, reg)
113 ctf_integer(unsigned int, val, val)
114 )
115 )
116
117 #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val)
118 #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val)
119
120 /*
121 * Tracepoint for kvm guest exit:
122 */
123 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,16,0))
124 LTTNG_TRACEPOINT_EVENT_CODE_MAP(kvm_exit, kvm_x86_exit,
125 TP_PROTO(struct kvm_vcpu *vcpu, u32 isa),
126 TP_ARGS(vcpu, isa),
127
128 TP_locvar(
129 u32 reason;
130 u64 info1, info2;
131 u32 intr_info, error_code;
132 ),
133
134 TP_code_pre(
135 kvm_x86_ops.get_exit_info(vcpu, &tp_locvar->reason,
136 &tp_locvar->info1,
137 &tp_locvar->info2,
138 &tp_locvar->intr_info,
139 &tp_locvar->error_code);
140 ),
141
142 TP_FIELDS(
143 ctf_integer(u32, exit_reason, tp_locvar->reason)
144 ctf_integer(unsigned long, guest_rip, kvm_rip_read(vcpu))
145 ctf_integer(u32, isa, isa)
146 ctf_integer(u64, info1, tp_locvar->info1)
147 ctf_integer(u64, info2, tp_locvar->info2)
148 ctf_integer(u32, intr_info, tp_locvar->intr_info)
149 ctf_integer(u32, error_code, tp_locvar->error_code)
150 ctf_integer(unsigned int, vcpu_id, vcpu->vcpu_id)
151 ),
152
153 TP_code_post()
154 )
155 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,10,0) || \
156 LTTNG_RHEL_KERNEL_RANGE(4,18,0,240,0,0, 4,19,0,0,0,0))
157 LTTNG_TRACEPOINT_EVENT_CODE_MAP(kvm_exit, kvm_x86_exit,
158 TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
159 TP_ARGS(exit_reason, vcpu, isa),
160
161 TP_locvar(
162 u64 info1, info2;
163 u32 intr_info, error_code;
164 ),
165
166 TP_code_pre(
167 kvm_x86_ops.get_exit_info(vcpu, &tp_locvar->info1,
168 &tp_locvar->info2,
169 &tp_locvar->intr_info,
170 &tp_locvar->error_code);
171 ),
172
173 TP_FIELDS(
174 ctf_integer(unsigned int, exit_reason, exit_reason)
175 ctf_integer(unsigned long, guest_rip, kvm_rip_read(vcpu))
176 ctf_integer(u32, isa, isa)
177 ctf_integer(u64, info1, tp_locvar->info1)
178 ctf_integer(u64, info2, tp_locvar->info2)
179 ctf_integer(u32, intr_info, tp_locvar->intr_info)
180 ctf_integer(u32, error_code, tp_locvar->error_code)
181 ctf_integer(unsigned int, vcpu_id, vcpu->vcpu_id)
182 ),
183
184 TP_code_post()
185 )
186 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,7,0))
187 LTTNG_TRACEPOINT_EVENT_CODE_MAP(kvm_exit, kvm_x86_exit,
188 TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
189 TP_ARGS(exit_reason, vcpu, isa),
190
191 TP_locvar(
192 u64 info1, info2;
193 ),
194
195 TP_code_pre(
196 kvm_x86_ops.get_exit_info(vcpu, &tp_locvar->info1,
197 &tp_locvar->info2);
198 ),
199
200 TP_FIELDS(
201 ctf_integer(unsigned int, exit_reason, exit_reason)
202 ctf_integer(unsigned long, guest_rip, kvm_rip_read(vcpu))
203 ctf_integer(u32, isa, isa)
204 ctf_integer(u64, info1, tp_locvar->info1)
205 ctf_integer(u64, info2, tp_locvar->info2)
206 ),
207
208 TP_code_post()
209 )
210 #else
211 LTTNG_TRACEPOINT_EVENT_CODE_MAP(kvm_exit, kvm_x86_exit,
212 TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
213 TP_ARGS(exit_reason, vcpu, isa),
214
215 TP_locvar(
216 u64 info1, info2;
217 ),
218
219 TP_code_pre(
220 kvm_x86_ops->get_exit_info(vcpu, &tp_locvar->info1,
221 &tp_locvar->info2);
222 ),
223
224 TP_FIELDS(
225 ctf_integer(unsigned int, exit_reason, exit_reason)
226 ctf_integer(unsigned long, guest_rip, kvm_rip_read(vcpu))
227 ctf_integer(u32, isa, isa)
228 ctf_integer(u64, info1, tp_locvar->info1)
229 ctf_integer(u64, info2, tp_locvar->info2)
230 ),
231
232 TP_code_post()
233 )
234 #endif
235
236 /*
237 * Tracepoint for kvm interrupt injection:
238 */
239 LTTNG_TRACEPOINT_EVENT_MAP(kvm_inj_virq, kvm_x86_inj_virq,
240 TP_PROTO(unsigned int irq),
241 TP_ARGS(irq),
242
243 TP_FIELDS(
244 ctf_integer(unsigned int, irq, irq)
245 )
246 )
247
248 /*
249 * Tracepoint for kvm interrupt injection:
250 */
251 LTTNG_TRACEPOINT_EVENT_MAP(kvm_inj_exception, kvm_x86_inj_exception,
252 TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
253 TP_ARGS(exception, has_error, error_code),
254
255 TP_FIELDS(
256 ctf_integer(u8, exception, exception)
257 ctf_integer(u8, has_error, has_error)
258 ctf_integer(u32, error_code, error_code)
259 )
260 )
261
262 /*
263 * Tracepoint for page fault.
264 */
265 LTTNG_TRACEPOINT_EVENT_MAP(kvm_page_fault, kvm_x86_page_fault,
266 TP_PROTO(unsigned long fault_address, unsigned int error_code),
267 TP_ARGS(fault_address, error_code),
268
269 TP_FIELDS(
270 ctf_integer(unsigned long, fault_address, fault_address)
271 ctf_integer(unsigned int, error_code, error_code)
272 )
273 )
274
275 /*
276 * Tracepoint for guest MSR access.
277 */
278 LTTNG_TRACEPOINT_EVENT_MAP(kvm_msr, kvm_x86_msr,
279 TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
280 TP_ARGS(write, ecx, data, exception),
281
282 TP_FIELDS(
283 ctf_integer(unsigned, write, write)
284 ctf_integer(u32, ecx, ecx)
285 ctf_integer(u64, data, data)
286 ctf_integer(u8, exception, exception)
287 )
288 )
289
290 #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false)
291 #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false)
292 #define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true)
293 #define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true)
294
295 /*
296 * Tracepoint for guest CR access.
297 */
298 LTTNG_TRACEPOINT_EVENT_MAP(kvm_cr, kvm_x86_cr,
299 TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
300 TP_ARGS(rw, cr, val),
301
302 TP_FIELDS(
303 ctf_integer(unsigned int, rw, rw)
304 ctf_integer(unsigned int, cr, cr)
305 ctf_integer(unsigned long, val, val)
306 )
307 )
308
309 #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val)
310 #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val)
311
312 LTTNG_TRACEPOINT_EVENT_MAP(kvm_pic_set_irq, kvm_x86_pic_set_irq,
313 TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
314 TP_ARGS(chip, pin, elcr, imr, coalesced),
315
316 TP_FIELDS(
317 ctf_integer(__u8, chip, chip)
318 ctf_integer(__u8, pin, pin)
319 ctf_integer(__u8, elcr, elcr)
320 ctf_integer(__u8, imr, imr)
321 ctf_integer(bool, coalesced, coalesced)
322 )
323 )
324
325 LTTNG_TRACEPOINT_EVENT_MAP(kvm_apic_ipi, kvm_x86_apic_ipi,
326 TP_PROTO(__u32 icr_low, __u32 dest_id),
327 TP_ARGS(icr_low, dest_id),
328
329 TP_FIELDS(
330 ctf_integer(__u32, icr_low, icr_low)
331 ctf_integer(__u32, dest_id, dest_id)
332 )
333 )
334
335 LTTNG_TRACEPOINT_EVENT_MAP(kvm_apic_accept_irq, kvm_x86_apic_accept_irq,
336 TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced),
337 TP_ARGS(apicid, dm, tm, vec, coalesced),
338
339 TP_FIELDS(
340 ctf_integer(__u32, apicid, apicid)
341 ctf_integer(__u16, dm, dm)
342 ctf_integer(__u8, tm, tm)
343 ctf_integer(__u8, vec, vec)
344 ctf_integer(bool, coalesced, coalesced)
345 )
346 )
347
348 LTTNG_TRACEPOINT_EVENT_MAP(kvm_eoi, kvm_x86_eoi,
349 TP_PROTO(struct kvm_lapic *apic, int vector),
350 TP_ARGS(apic, vector),
351
352 TP_FIELDS(
353 ctf_integer(__u32, apicid, apic->vcpu->vcpu_id)
354 ctf_integer(int, vector, vector)
355 )
356 )
357
358 LTTNG_TRACEPOINT_EVENT_MAP(kvm_pv_eoi, kvm_x86_pv_eoi,
359 TP_PROTO(struct kvm_lapic *apic, int vector),
360 TP_ARGS(apic, vector),
361
362 TP_FIELDS(
363 ctf_integer(__u32, apicid, apic->vcpu->vcpu_id)
364 ctf_integer(int, vector, vector)
365 )
366 )
367
368 /*
369 * Tracepoint for nested VMRUN
370 */
371 LTTNG_TRACEPOINT_EVENT_MAP(kvm_nested_vmrun, kvm_x86_nested_vmrun,
372 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
373 __u32 event_inj, bool npt),
374 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
375
376 TP_FIELDS(
377 ctf_integer(__u64, rip, rip)
378 ctf_integer(__u64, vmcb, vmcb)
379 ctf_integer(__u64, nested_rip, nested_rip)
380 ctf_integer(__u32, int_ctl, int_ctl)
381 ctf_integer(__u32, event_inj, event_inj)
382 ctf_integer(bool, npt, npt)
383 )
384 )
385
386 LTTNG_TRACEPOINT_EVENT_MAP(kvm_nested_intercepts, kvm_x86_nested_intercepts,
387 TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
388 TP_ARGS(cr_read, cr_write, exceptions, intercept),
389
390 TP_FIELDS(
391 ctf_integer(__u16, cr_read, cr_read)
392 ctf_integer(__u16, cr_write, cr_write)
393 ctf_integer(__u32, exceptions, exceptions)
394 ctf_integer(__u64, intercept, intercept)
395 )
396 )
397 /*
398 * Tracepoint for #VMEXIT while nested
399 */
400 LTTNG_TRACEPOINT_EVENT_MAP(kvm_nested_vmexit, kvm_x86_nested_vmexit,
401 TP_PROTO(__u64 rip, __u32 exit_code,
402 __u64 exit_info1, __u64 exit_info2,
403 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
404 TP_ARGS(rip, exit_code, exit_info1, exit_info2,
405 exit_int_info, exit_int_info_err, isa),
406
407 TP_FIELDS(
408 ctf_integer(__u64, rip, rip)
409 ctf_integer(__u32, exit_code, exit_code)
410 ctf_integer(__u64, exit_info1, exit_info1)
411 ctf_integer(__u64, exit_info2, exit_info2)
412 ctf_integer(__u32, exit_int_info, exit_int_info)
413 ctf_integer(__u32, exit_int_info_err, exit_int_info_err)
414 ctf_integer(__u32, isa, isa)
415 )
416 )
417
418 /*
419 * Tracepoint for #VMEXIT reinjected to the guest
420 */
421 LTTNG_TRACEPOINT_EVENT_MAP(kvm_nested_vmexit_inject, kvm_x86_nested_vmexit_inject,
422 TP_PROTO(__u32 exit_code,
423 __u64 exit_info1, __u64 exit_info2,
424 __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
425 TP_ARGS(exit_code, exit_info1, exit_info2,
426 exit_int_info, exit_int_info_err, isa),
427
428 TP_FIELDS(
429 ctf_integer(__u32, exit_code, exit_code)
430 ctf_integer(__u64, exit_info1, exit_info1)
431 ctf_integer(__u64, exit_info2, exit_info2)
432 ctf_integer(__u32, exit_int_info, exit_int_info)
433 ctf_integer(__u32, exit_int_info_err, exit_int_info_err)
434 ctf_integer(__u32, isa, isa)
435 )
436 )
437
438 /*
439 * Tracepoint for nested #vmexit because of interrupt pending
440 */
441 LTTNG_TRACEPOINT_EVENT_MAP(kvm_nested_intr_vmexit, kvm_x86_nested_intr_vmexit,
442 TP_PROTO(__u64 rip),
443 TP_ARGS(rip),
444
445 TP_FIELDS(
446 ctf_integer(__u64, rip, rip)
447 )
448 )
449
450 /*
451 * Tracepoint for nested #vmexit because of interrupt pending
452 */
453 LTTNG_TRACEPOINT_EVENT_MAP(kvm_invlpga, kvm_x86_invlpga,
454 TP_PROTO(__u64 rip, int asid, u64 address),
455 TP_ARGS(rip, asid, address),
456
457 TP_FIELDS(
458 ctf_integer(__u64, rip, rip)
459 ctf_integer(int, asid, asid)
460 ctf_integer(__u64, address, address)
461 )
462 )
463
464 /*
465 * Tracepoint for nested #vmexit because of interrupt pending
466 */
467 LTTNG_TRACEPOINT_EVENT_MAP(kvm_skinit, kvm_x86_skinit,
468 TP_PROTO(__u64 rip, __u32 slb),
469 TP_ARGS(rip, slb),
470
471 TP_FIELDS(
472 ctf_integer(__u64, rip, rip)
473 ctf_integer(__u32, slb, slb)
474 )
475 )
476
477 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
478 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
479 #define KVM_EMUL_INSN_F_CS_D (1 << 2)
480 #define KVM_EMUL_INSN_F_CS_L (1 << 3)
481
482 #define kvm_trace_symbol_emul_flags \
483 { 0, "real" }, \
484 { KVM_EMUL_INSN_F_CR0_PE \
485 | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \
486 { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \
487 { KVM_EMUL_INSN_F_CR0_PE \
488 | KVM_EMUL_INSN_F_CS_D, "prot32" }, \
489 { KVM_EMUL_INSN_F_CR0_PE \
490 | KVM_EMUL_INSN_F_CS_L, "prot64" }
491
492 #define kei_decode_mode(mode) ({ \
493 u8 flags = 0xff; \
494 switch (mode) { \
495 case X86EMUL_MODE_REAL: \
496 flags = 0; \
497 break; \
498 case X86EMUL_MODE_VM86: \
499 flags = KVM_EMUL_INSN_F_EFL_VM; \
500 break; \
501 case X86EMUL_MODE_PROT16: \
502 flags = KVM_EMUL_INSN_F_CR0_PE; \
503 break; \
504 case X86EMUL_MODE_PROT32: \
505 flags = KVM_EMUL_INSN_F_CR0_PE \
506 | KVM_EMUL_INSN_F_CS_D; \
507 break; \
508 case X86EMUL_MODE_PROT64: \
509 flags = KVM_EMUL_INSN_F_CR0_PE \
510 | KVM_EMUL_INSN_F_CS_L; \
511 break; \
512 } \
513 flags; \
514 })
515
516 LTTNG_TRACEPOINT_EVENT_MAP(kvm_emulate_insn, kvm_x86_emulate_insn,
517 TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
518 TP_ARGS(vcpu, failed),
519
520 TP_FIELDS(
521 #if (LTTNG_LINUX_VERSION_CODE < LTTNG_KERNEL_VERSION(3,1,0))
522 ctf_integer(__u64, rip, vcpu->arch.emulate_ctxt.decode.fetch.start)
523 ctf_integer(__u32, csbase, kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS))
524 ctf_integer(__u8, len, vcpu->arch.emulate_ctxt.decode.eip
525 - vcpu->arch.emulate_ctxt.decode.fetch.start)
526 ctf_array(__u8, insn, vcpu->arch.emulate_ctxt.decode.fetch.data, 15)
527 ctf_integer(__u8, flags, kei_decode_mode(vcpu->arch.emulate_ctxt.mode))
528 #elif (LTTNG_LINUX_VERSION_CODE < LTTNG_KERNEL_VERSION(3,17,0))
529 ctf_integer(__u64, rip, vcpu->arch.emulate_ctxt.fetch.start)
530 ctf_integer(__u32, csbase, kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS))
531 ctf_integer(__u8, len, vcpu->arch.emulate_ctxt._eip
532 - vcpu->arch.emulate_ctxt.fetch.start)
533 ctf_array(__u8, insn, vcpu->arch.emulate_ctxt.fetch.data, 15)
534 ctf_integer(__u8, flags, kei_decode_mode(vcpu->arch.emulate_ctxt.mode))
535 #elif (LTTNG_LINUX_VERSION_CODE < LTTNG_KERNEL_VERSION(5,7,0) && \
536 !LTTNG_RHEL_KERNEL_RANGE(4,18,0,305,0,0, 4,19,0,0,0,0))
537 ctf_integer(__u64, rip, vcpu->arch.emulate_ctxt._eip -
538 (vcpu->arch.emulate_ctxt.fetch.ptr -
539 vcpu->arch.emulate_ctxt.fetch.data))
540 ctf_integer(__u32, csbase, kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS))
541 ctf_integer(__u8, len, vcpu->arch.emulate_ctxt.fetch.ptr -
542 vcpu->arch.emulate_ctxt.fetch.data)
543 ctf_array(__u8, insn, vcpu->arch.emulate_ctxt.fetch.data, 15)
544 ctf_integer(__u8, flags, kei_decode_mode(vcpu->arch.emulate_ctxt.mode))
545 #elif (LTTNG_LINUX_VERSION_CODE < LTTNG_KERNEL_VERSION(5,18,0) || \
546 LTTNG_RHEL_KERNEL_RANGE(4,18,0,305,0,0, 4,19,0,0,0,0))
547 ctf_integer(__u64, rip, vcpu->arch.emulate_ctxt->_eip -
548 (vcpu->arch.emulate_ctxt->fetch.ptr -
549 vcpu->arch.emulate_ctxt->fetch.data))
550 ctf_integer(__u32, csbase, kvm_x86_ops.get_segment_base(vcpu, VCPU_SREG_CS))
551 ctf_integer(__u8, len, vcpu->arch.emulate_ctxt->fetch.ptr -
552 vcpu->arch.emulate_ctxt->fetch.data)
553 ctf_array(__u8, insn, vcpu->arch.emulate_ctxt->fetch.data, 15)
554 ctf_integer(__u8, flags, kei_decode_mode(vcpu->arch.emulate_ctxt->mode))
555 #else
556 ctf_integer(__u64, rip, vcpu->arch.emulate_ctxt->_eip -
557 (vcpu->arch.emulate_ctxt->fetch.ptr -
558 vcpu->arch.emulate_ctxt->fetch.data))
559 ctf_integer(__u32, csbase, kvm_x86_ops.get_segment_base(vcpu, VCPU_SREG_CS))
560 ctf_integer(__u8, len, vcpu->arch.emulate_ctxt->fetch.ptr -
561 vcpu->arch.emulate_ctxt->fetch.data)
562 ctf_array(__u8, insn, vcpu->arch.emulate_ctxt->fetch.data, 15)
563 ctf_integer(__u8, flags, kei_decode_mode(vcpu->arch.emulate_ctxt->mode))
564 #endif
565 ctf_integer(__u8, failed, failed)
566 )
567 )
568
569 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
570 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
571
572 LTTNG_TRACEPOINT_EVENT_MAP(
573 vcpu_match_mmio, kvm_x86_vcpu_match_mmio,
574 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
575 TP_ARGS(gva, gpa, write, gpa_match),
576
577 TP_FIELDS(
578 ctf_integer(gva_t, gva, gva)
579 ctf_integer(gpa_t, gpa, gpa)
580 ctf_integer(bool, write, write)
581 ctf_integer(bool, gpa_match, gpa_match)
582 )
583 )
584
585 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,11,0))
586 LTTNG_TRACEPOINT_EVENT_MAP(kvm_write_tsc_offset, kvm_x86_write_tsc_offset,
587 TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset,
588 __u64 next_tsc_offset),
589 TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset),
590
591 TP_FIELDS(
592 ctf_integer(unsigned int, vcpu_id, vcpu_id)
593 ctf_integer(__u64, previous_tsc_offset, previous_tsc_offset)
594 ctf_integer(__u64, next_tsc_offset, next_tsc_offset)
595 )
596 )
597 #endif
598
599 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0))
600 #ifdef CONFIG_X86_64
601
602 LTTNG_TRACEPOINT_EVENT_MAP(kvm_update_master_clock, kvm_x86_update_master_clock,
603 TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
604 TP_ARGS(use_master_clock, host_clock, offset_matched),
605
606 TP_FIELDS(
607 ctf_integer(bool, use_master_clock, use_master_clock)
608 ctf_integer(unsigned int, host_clock, host_clock)
609 ctf_integer(bool, offset_matched, offset_matched)
610 )
611 )
612
613 LTTNG_TRACEPOINT_EVENT_MAP(kvm_track_tsc, kvm_x86_track_tsc,
614 TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
615 unsigned int online_vcpus, bool use_master_clock,
616 unsigned int host_clock),
617 TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
618 host_clock),
619
620 TP_FIELDS(
621 ctf_integer(unsigned int, vcpu_id, vcpu_id)
622 ctf_integer(unsigned int, nr_vcpus_matched_tsc, nr_matched)
623 ctf_integer(unsigned int, online_vcpus, online_vcpus)
624 ctf_integer(bool, use_master_clock, use_master_clock)
625 ctf_integer(unsigned int, host_clock, host_clock)
626 )
627 )
628
629 #endif /* CONFIG_X86_64 */
630 #endif /* LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0) */
631
632 #endif /* LTTNG_TRACE_KVM_H */
633
634 #undef TRACE_INCLUDE_PATH
635 #define TRACE_INCLUDE_PATH instrumentation/events/lttng-module/arch/x86/kvm
636 #undef TRACE_INCLUDE_FILE
637 #define TRACE_INCLUDE_FILE trace
638
639 /* This part must be outside protection */
640 #include <probes/define_trace.h>
This page took 0.042484 seconds and 4 git commands to generate.