fix: KVM: MMU: change tracepoints arguments to kvm_page_fault (v5.16)
[lttng-modules.git] / instrumentation / events / lttng-module / arch / x86 / kvm / mmutrace.h
index 6bf6beeb36aa23f995eb482157803f97729fecf5..3116328f6557cb810b6078a939e404f21776f553 100644 (file)
@@ -1,14 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #if !defined(LTTNG_TRACE_KVM_MMU_H) || defined(TRACE_HEADER_MULTI_READ)
 #define LTTNG_TRACE_KVM_MMU_H
 
-#include "../../../../../../probes/lttng-tracepoint-event.h"
+#include <probes/lttng-tracepoint-event.h>
+#include <lttng-kernel-version.h>
+
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0))
+#include <linux/trace_events.h>
+#else /* if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0)) */
 #include <linux/ftrace_event.h>
-#include <linux/version.h>
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0)) */
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm_mmu
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
+       LTTNG_RHEL_KERNEL_RANGE(4,18,0,147,0,0, 4,19,0,0,0,0))
+
+#define LTTNG_KVM_MMU_PAGE_FIELDS \
+       ctf_integer(__u64, gfn, (sp)->gfn) \
+       ctf_integer(__u32, role, (sp)->role.word) \
+       ctf_integer(__u32, root_count, (sp)->root_count) \
+       ctf_integer(bool, unsync, (sp)->unsync)
+
+#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,11,0))
 
 #define LTTNG_KVM_MMU_PAGE_FIELDS \
        ctf_integer(unsigned long, mmu_valid_gen, (sp)->mmu_valid_gen) \
@@ -17,7 +32,7 @@
        ctf_integer(__u32, root_count, (sp)->root_count) \
        ctf_integer(bool, unsync, (sp)->unsync)
 
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,11,0)) */
 
 #define LTTNG_KVM_MMU_PAGE_FIELDS \
        ctf_integer(__u64, gfn, (sp)->gfn) \
@@ -25,8 +40,9 @@
        ctf_integer(__u32, root_count, (sp)->root_count) \
        ctf_integer(bool, unsync, (sp)->unsync)
 
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,11,0)) */
 
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,6,0))
 /*
  * A pagetable walk has started
  */
@@ -40,7 +56,23 @@ LTTNG_TRACEPOINT_EVENT(
                ctf_integer(__u32, pferr, pferr)
        )
 )
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,6,0)) */
+/*
+ * A pagetable walk has started
+ */
+LTTNG_TRACEPOINT_EVENT(
+       kvm_mmu_pagetable_walk,
+       TP_PROTO(u64 addr, int write_fault, int user_fault, int fetch_fault),
+       TP_ARGS(addr, write_fault, user_fault, fetch_fault),
 
+       TP_FIELDS(
+               ctf_integer(__u64, addr, addr)
+               ctf_integer(__u32, pferr,
+                       (!!write_fault << 1) | (!!user_fault << 2)
+                       | (!!fetch_fault << 4))
+       )
+)
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,6,0)) */
 
 /* We just walked a paging element */
 LTTNG_TRACEPOINT_EVENT(
@@ -131,7 +163,25 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
        TP_ARGS(sp)
 )
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,10,0))
+
+LTTNG_TRACEPOINT_EVENT_MAP(
+       mark_mmio_spte,
+
+       kvm_mmu_mark_mmio_spte,
+
+       TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
+       TP_ARGS(sptep, gfn, spte),
+
+       TP_FIELDS(
+               ctf_integer_hex(void *, sptep, sptep)
+               ctf_integer(gfn_t, gfn, gfn)
+               ctf_integer(unsigned, access, spte & ACC_ALL)
+               ctf_integer(unsigned int, gen, get_mmio_spte_generation(spte))
+       )
+)
+
+#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,11,0))
 
 LTTNG_TRACEPOINT_EVENT_MAP(
        mark_mmio_spte,
@@ -142,14 +192,14 @@ LTTNG_TRACEPOINT_EVENT_MAP(
        TP_ARGS(sptep, gfn, access, gen),
 
        TP_FIELDS(
-               ctf_integer(void *, sptep, sptep)
+               ctf_integer_hex(void *, sptep, sptep)
                ctf_integer(gfn_t, gfn, gfn)
                ctf_integer(unsigned, access, access)
                ctf_integer(unsigned int, gen, gen)
        )
 )
 
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,11,0)) */
 
 LTTNG_TRACEPOINT_EVENT_MAP(
        mark_mmio_spte,
@@ -160,13 +210,13 @@ LTTNG_TRACEPOINT_EVENT_MAP(
        TP_ARGS(sptep, gfn, access),
 
        TP_FIELDS(
-               ctf_integer(void *, sptep, sptep)
+               ctf_integer_hex(void *, sptep, sptep)
                ctf_integer(gfn_t, gfn, gfn)
                ctf_integer(unsigned, access, access)
        )
 )
 
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,11,0)) */
 
 LTTNG_TRACEPOINT_EVENT_MAP(
        handle_mmio_page_fault,
@@ -183,6 +233,74 @@ LTTNG_TRACEPOINT_EVENT_MAP(
        )
 )
 
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,16,0))
+LTTNG_TRACEPOINT_EVENT_MAP(
+       fast_page_fault,
+
+       kvm_mmu_fast_page_fault,
+
+       TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
+                u64 *sptep, u64 old_spte, int ret),
+       TP_ARGS(vcpu, fault, sptep, old_spte, ret),
+
+       TP_FIELDS(
+               ctf_integer(int, vcpu_id, vcpu->vcpu_id)
+               ctf_integer(gpa_t, cr2_or_gpa, fault->addr)
+               ctf_integer(u32, error_code, fault->error_code)
+               ctf_integer_hex(u64 *, sptep, sptep)
+               ctf_integer(u64, old_spte, old_spte)
+               ctf_integer(u64, new_spte, *sptep)
+               ctf_integer(int, ret, ret)
+       )
+)
+#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,10,0))
+LTTNG_TRACEPOINT_EVENT_MAP(
+       fast_page_fault,
+
+       kvm_mmu_fast_page_fault,
+
+       TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
+                u64 *sptep, u64 old_spte, int ret),
+       TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, ret),
+
+       TP_FIELDS(
+               ctf_integer(int, vcpu_id, vcpu->vcpu_id)
+               ctf_integer(gpa_t, cr2_or_gpa, cr2_or_gpa)
+               ctf_integer(u32, error_code, error_code)
+               ctf_integer_hex(u64 *, sptep, sptep)
+               ctf_integer(u64, old_spte, old_spte)
+               ctf_integer(u64, new_spte, *sptep)
+               ctf_integer(int, ret, ret)
+       )
+)
+#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) || \
+       LTTNG_KERNEL_RANGE(4,19,103, 4,20,0) || \
+       LTTNG_KERNEL_RANGE(5,4,19, 5,5,0) || \
+       LTTNG_KERNEL_RANGE(5,5,3, 5,6,0) || \
+       LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,92, 4,16,0,0) || \
+       LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,44, 5,1,0,0) || \
+       LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,43, 5,3,18,45) || \
+       LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,46, 5,4,0,0))
+LTTNG_TRACEPOINT_EVENT_MAP(
+       fast_page_fault,
+
+       kvm_mmu_fast_page_fault,
+
+       TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
+                u64 *sptep, u64 old_spte, bool retry),
+       TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, retry),
+
+       TP_FIELDS(
+               ctf_integer(int, vcpu_id, vcpu->vcpu_id)
+               ctf_integer(gpa_t, cr2_or_gpa, cr2_or_gpa)
+               ctf_integer(u32, error_code, error_code)
+               ctf_integer_hex(u64 *, sptep, sptep)
+               ctf_integer(u64, old_spte, old_spte)
+               ctf_integer(u64, new_spte, *sptep)
+               ctf_integer(bool, retry, retry)
+       )
+)
+#else
 LTTNG_TRACEPOINT_EVENT_MAP(
        fast_page_fault,
 
@@ -196,18 +314,20 @@ LTTNG_TRACEPOINT_EVENT_MAP(
                ctf_integer(int, vcpu_id, vcpu->vcpu_id)
                ctf_integer(gva_t, gva, gva)
                ctf_integer(u32, error_code, error_code)
-               ctf_integer(u64 *, sptep, sptep)
+               ctf_integer_hex(u64 *, sptep, sptep)
                ctf_integer(u64, old_spte, old_spte)
                ctf_integer(u64, new_spte, *sptep)
                ctf_integer(bool, retry, retry)
        )
 )
+#endif
+
 #endif /* LTTNG_TRACE_KVM_MMU_H */
 
 #undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module/arch/x86/kvm
+#define TRACE_INCLUDE_PATH instrumentation/events/lttng-module/arch/x86/kvm
 #undef TRACE_INCLUDE_FILE
 #define TRACE_INCLUDE_FILE mmutrace
 
 /* This part must be outside protection */
-#include "../../../../../../probes/define_trace.h"
+#include <probes/define_trace.h>
This page took 0.027226 seconds and 4 git commands to generate.