-#if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_KVMMMU_H
+#if !defined(LTTNG_TRACE_KVM_MMU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define LTTNG_TRACE_KVM_MMU_H
-#include <linux/tracepoint.h>
+#include <probes/lttng-tracepoint-event.h>
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
+#include <linux/trace_events.h>
+#else /* if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
#include <linux/ftrace_event.h>
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
#undef TRACE_SYSTEM
-#define TRACE_SYSTEM kvmmmu
-
-#define KVM_MMU_PAGE_FIELDS \
- __field(__u64, gfn) \
- __field(__u32, role) \
- __field(__u32, root_count) \
- __field(bool, unsync)
-
-#define KVM_MMU_PAGE_ASSIGN(sp) \
- tp_assign(gfn, sp->gfn) \
- tp_assign(role, sp->role.word) \
- tp_assign(root_count, sp->root_count) \
- tp_assign(unsync, sp->unsync)
-
-#define KVM_MMU_PAGE_PRINTK() ({ \
- const char *ret = p->buffer + p->len; \
- static const char *access_str[] = { \
- "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
- }; \
- union kvm_mmu_page_role role; \
- \
- role.word = __entry->role; \
- \
- trace_seq_printf(p, "sp gfn %llx %u%s q%u%s %s%s" \
- " %snxe root %u %s%c", \
- __entry->gfn, role.level, \
- role.cr4_pae ? " pae" : "", \
- role.quadrant, \
- role.direct ? " direct" : "", \
- access_str[role.access], \
- role.invalid ? " invalid" : "", \
- role.nxe ? "" : "!", \
- __entry->root_count, \
- __entry->unsync ? "unsync" : "sync", 0); \
- ret; \
- })
-
-#define kvm_mmu_trace_pferr_flags \
- { PFERR_PRESENT_MASK, "P" }, \
- { PFERR_WRITE_MASK, "W" }, \
- { PFERR_USER_MASK, "U" }, \
- { PFERR_RSVD_MASK, "RSVD" }, \
- { PFERR_FETCH_MASK, "F" }
+#define TRACE_SYSTEM kvm_mmu
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
+
+#define LTTNG_KVM_MMU_PAGE_FIELDS \
+ ctf_integer(unsigned long, mmu_valid_gen, (sp)->mmu_valid_gen) \
+ ctf_integer(__u64, gfn, (sp)->gfn) \
+ ctf_integer(__u32, role, (sp)->role.word) \
+ ctf_integer(__u32, root_count, (sp)->root_count) \
+ ctf_integer(bool, unsync, (sp)->unsync)
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
+
+#define LTTNG_KVM_MMU_PAGE_FIELDS \
+ ctf_integer(__u64, gfn, (sp)->gfn) \
+ ctf_integer(__u32, role, (sp)->role.word) \
+ ctf_integer(__u32, root_count, (sp)->root_count) \
+ ctf_integer(bool, unsync, (sp)->unsync)
+
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
/*
* A pagetable walk has started
*/
-TRACE_EVENT(
+LTTNG_TRACEPOINT_EVENT(
kvm_mmu_pagetable_walk,
TP_PROTO(u64 addr, u32 pferr),
TP_ARGS(addr, pferr),
- TP_STRUCT__entry(
- __field(__u64, addr)
- __field(__u32, pferr)
- ),
-
- TP_fast_assign(
- tp_assign(addr, addr)
- tp_assign(pferr, pferr)
- ),
-
- TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
- __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
+ TP_FIELDS(
+ ctf_integer(__u64, addr, addr)
+ ctf_integer(__u32, pferr, pferr)
+ )
)
/* We just walked a paging element */
-TRACE_EVENT(
+LTTNG_TRACEPOINT_EVENT(
kvm_mmu_paging_element,
TP_PROTO(u64 pte, int level),
TP_ARGS(pte, level),
- TP_STRUCT__entry(
- __field(__u64, pte)
- __field(__u32, level)
- ),
-
- TP_fast_assign(
- tp_assign(pte, pte)
- tp_assign(level, level)
- ),
-
- TP_printk("pte %llx level %u", __entry->pte, __entry->level)
+ TP_FIELDS(
+ ctf_integer(__u64, pte, pte)
+ ctf_integer(__u32, level, level)
+ )
)
-DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
+LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_set_bit_class,
TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
TP_ARGS(table_gfn, index, size),
- TP_STRUCT__entry(
- __field(__u64, gpa)
- ),
-
- TP_fast_assign(
- tp_assign(gpa, ((u64)table_gfn << PAGE_SHIFT)
- + index * size)
- ),
-
- TP_printk("gpa %llx", __entry->gpa)
+ TP_FIELDS(
+ ctf_integer(__u64, gpa,
+ ((u64)table_gfn << PAGE_SHIFT) + index * size)
+ )
)
/* We set a pte accessed bit */
-DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
)
/* We set a pte dirty bit */
-DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
TP_ARGS(table_gfn, index, size)
)
-TRACE_EVENT(
+LTTNG_TRACEPOINT_EVENT(
kvm_mmu_walker_error,
TP_PROTO(u32 pferr),
TP_ARGS(pferr),
- TP_STRUCT__entry(
- __field(__u32, pferr)
- ),
-
- TP_fast_assign(
- tp_assign(pferr, pferr)
- ),
-
- TP_printk("pferr %x %s", __entry->pferr,
- __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
+ TP_FIELDS(
+ ctf_integer(__u32, pferr, pferr)
+ )
)
-TRACE_EVENT(
+LTTNG_TRACEPOINT_EVENT(
kvm_mmu_get_page,
TP_PROTO(struct kvm_mmu_page *sp, bool created),
TP_ARGS(sp, created),
- TP_STRUCT__entry(
- KVM_MMU_PAGE_FIELDS
- __field(bool, created)
- ),
-
- TP_fast_assign(
- KVM_MMU_PAGE_ASSIGN(sp)
- tp_assign(created, created)
- ),
-
- TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
- __entry->created ? "new" : "existing")
+ TP_FIELDS(
+ LTTNG_KVM_MMU_PAGE_FIELDS
+ ctf_integer(bool, created, created)
+ )
)
-DECLARE_EVENT_CLASS(kvm_mmu_page_class,
+LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_page_class,
TP_PROTO(struct kvm_mmu_page *sp),
TP_ARGS(sp),
- TP_STRUCT__entry(
- KVM_MMU_PAGE_FIELDS
- ),
-
- TP_fast_assign(
- KVM_MMU_PAGE_ASSIGN(sp)
- ),
-
- TP_printk("%s", KVM_MMU_PAGE_PRINTK())
+ TP_FIELDS(
+ LTTNG_KVM_MMU_PAGE_FIELDS
+ )
)
-DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_sync_page,
TP_PROTO(struct kvm_mmu_page *sp),
TP_ARGS(sp)
)
-DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_unsync_page,
TP_PROTO(struct kvm_mmu_page *sp),
TP_ARGS(sp)
)
-DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
TP_PROTO(struct kvm_mmu_page *sp),
TP_ARGS(sp)
)
-TRACE_EVENT(
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
+
+LTTNG_TRACEPOINT_EVENT_MAP(
mark_mmio_spte,
- TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
- TP_ARGS(sptep, gfn, access),
- TP_STRUCT__entry(
- __field(void *, sptep)
- __field(gfn_t, gfn)
- __field(unsigned, access)
- ),
+ kvm_mmu_mark_mmio_spte,
- TP_fast_assign(
- tp_assign(sptep, sptep)
- tp_assign(gfn, gfn)
- tp_assign(access, access)
- ),
+ TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
+ TP_ARGS(sptep, gfn, access, gen),
- TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn,
- __entry->access)
+ TP_FIELDS(
+ ctf_integer_hex(void *, sptep, sptep)
+ ctf_integer(gfn_t, gfn, gfn)
+ ctf_integer(unsigned, access, access)
+ ctf_integer(unsigned int, gen, gen)
+ )
)
-TRACE_EVENT(
- handle_mmio_page_fault,
- TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
- TP_ARGS(addr, gfn, access),
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
+
+LTTNG_TRACEPOINT_EVENT_MAP(
+ mark_mmio_spte,
- TP_STRUCT__entry(
- __field(u64, addr)
- __field(gfn_t, gfn)
- __field(unsigned, access)
- ),
+ kvm_mmu_mark_mmio_spte,
- TP_fast_assign(
- tp_assign(addr, addr)
- tp_assign(gfn, gfn)
- tp_assign(access, access)
- ),
+ TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
+ TP_ARGS(sptep, gfn, access),
- TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
- __entry->access)
+ TP_FIELDS(
+ ctf_integer_hex(void *, sptep, sptep)
+ ctf_integer(gfn_t, gfn, gfn)
+ ctf_integer(unsigned, access, access)
+ )
)
-#define __spte_satisfied(__spte) \
- (__entry->retry && is_writable_pte(__entry->__spte))
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
-TRACE_EVENT(
+LTTNG_TRACEPOINT_EVENT_MAP(
+ handle_mmio_page_fault,
+
+ kvm_mmu_handle_mmio_page_fault,
+
+ TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
+ TP_ARGS(addr, gfn, access),
+
+ TP_FIELDS(
+ ctf_integer(u64, addr, addr)
+ ctf_integer(gfn_t, gfn, gfn)
+ ctf_integer(unsigned, access, access)
+ )
+)
+
+LTTNG_TRACEPOINT_EVENT_MAP(
fast_page_fault,
+
+ kvm_mmu_fast_page_fault,
+
TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
u64 *sptep, u64 old_spte, bool retry),
TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
- TP_STRUCT__entry(
- __field(int, vcpu_id)
- __field(gva_t, gva)
- __field(u32, error_code)
- __field(u64 *, sptep)
- __field(u64, old_spte)
- __field(u64, new_spte)
- __field(bool, retry)
- ),
-
- TP_fast_assign(
- tp_assign(vcpu_id, vcpu->vcpu_id)
- tp_assign(gva, gva)
- tp_assign(error_code, error_code)
- tp_assign(sptep, sptep)
- tp_assign(old_spte, old_spte)
- tp_assign(new_spte, *sptep)
- tp_assign(retry, retry)
- ),
-
- TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
- " new %llx spurious %d fixed %d", __entry->vcpu_id,
- __entry->gva, __print_flags(__entry->error_code, "|",
- kvm_mmu_trace_pferr_flags), __entry->sptep,
- __entry->old_spte, __entry->new_spte,
- __spte_satisfied(old_spte), __spte_satisfied(new_spte)
+ TP_FIELDS(
+ ctf_integer(int, vcpu_id, vcpu->vcpu_id)
+ ctf_integer(gva_t, gva, gva)
+ ctf_integer(u32, error_code, error_code)
+ ctf_integer_hex(u64 *, sptep, sptep)
+ ctf_integer(u64, old_spte, old_spte)
+ ctf_integer(u64, new_spte, *sptep)
+ ctf_integer(bool, retry, retry)
)
)
-#endif /* _TRACE_KVMMMU_H */
+#endif /* LTTNG_TRACE_KVM_MMU_H */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module/arch/x86/kvm
+#define TRACE_INCLUDE_PATH instrumentation/events/lttng-module/arch/x86/kvm
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE mmutrace
/* This part must be outside protection */
-#include "../../../../../../probes/define_trace.h"
+#include <probes/define_trace.h>