KVM: x86: add tracepoints around __direct_map and FNAME(fetch)
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 4 Jul 2019 09:14:13 +0000 (05:14 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 Nov 2019 18:19:07 +0000 (19:19 +0100)
commit 335e192a3fa415e1202c8b9ecdaaecd643f823cc upstream.

These are useful in debugging shadow paging.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kvm/mmu.c
arch/x86/kvm/mmutrace.h
arch/x86/kvm/paging_tmpl.h

index 17c2a123521572bab647f366c950d1a6fed35cba..d2c4d7615aa26222269bbedc086305f08d0d718a 100644 (file)
@@ -139,9 +139,6 @@ module_param(dbg, bool, 0644);
 
 #include <trace/events/kvm.h>
 
-#define CREATE_TRACE_POINTS
-#include "mmutrace.h"
-
 #define SPTE_HOST_WRITEABLE    (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
 #define SPTE_MMU_WRITEABLE     (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
 
@@ -244,6 +241,11 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
 
 static void mmu_spte_set(u64 *sptep, u64 spte);
 static void mmu_free_roots(struct kvm_vcpu *vcpu);
+static bool is_executable_pte(u64 spte);
+
+#define CREATE_TRACE_POINTS
+#include "mmutrace.h"
+
 
 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
 {
@@ -2909,10 +2911,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
                ret = RET_PF_EMULATE;
 
        pgprintk("%s: setting spte %llx\n", __func__, *sptep);
-       pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
-                is_large_pte(*sptep)? "2MB" : "4kB",
-                *sptep & PT_WRITABLE_MASK ? "RW" : "R", gfn,
-                *sptep, sptep);
+       trace_kvm_mmu_set_spte(level, gfn, sptep);
        if (!was_rmapped && is_large_pte(*sptep))
                ++vcpu->kvm->stat.lpages;
 
@@ -3023,6 +3022,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
                return RET_PF_RETRY;
 
+       trace_kvm_mmu_spte_requested(gpa, level, pfn);
        for_each_shadow_entry(vcpu, gpa, it) {
                base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
                if (it.level == level)
index c73bf4e4988cb5c84065dd324b50b93cbe12c2e1..918b0d5bf2724c66953addcc6b0bfd5b776c6c45 100644 (file)
@@ -325,6 +325,65 @@ TRACE_EVENT(
                  __entry->kvm_gen == __entry->spte_gen
        )
 );
+
+TRACE_EVENT(
+       kvm_mmu_set_spte,
+       TP_PROTO(int level, gfn_t gfn, u64 *sptep),
+       TP_ARGS(level, gfn, sptep),
+
+       TP_STRUCT__entry(
+               __field(u64, gfn)
+               __field(u64, spte)
+               __field(u64, sptep)
+               __field(u8, level)
+               /* These depend on page entry type, so compute them now.  */
+               __field(bool, r)
+               __field(bool, x)
+               __field(u8, u)
+       ),
+
+       TP_fast_assign(
+               __entry->gfn = gfn;
+               __entry->spte = *sptep;
+               __entry->sptep = virt_to_phys(sptep);
+               __entry->level = level;
+               __entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK);
+               __entry->x = is_executable_pte(__entry->spte);
+               __entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1;
+       ),
+
+       TP_printk("gfn %llx spte %llx (%s%s%s%s) level %d at %llx",
+                 __entry->gfn, __entry->spte,
+                 __entry->r ? "r" : "-",
+                 __entry->spte & PT_WRITABLE_MASK ? "w" : "-",
+                 __entry->x ? "x" : "-",
+                 __entry->u == -1 ? "" : (__entry->u ? "u" : "-"),
+                 __entry->level, __entry->sptep
+       )
+);
+
+TRACE_EVENT(
+       kvm_mmu_spte_requested,
+       TP_PROTO(gpa_t addr, int level, kvm_pfn_t pfn),
+       TP_ARGS(addr, level, pfn),
+
+       TP_STRUCT__entry(
+               __field(u64, gfn)
+               __field(u64, pfn)
+               __field(u8, level)
+       ),
+
+       TP_fast_assign(
+               __entry->gfn = addr >> PAGE_SHIFT;
+               __entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
+               __entry->level = level;
+       ),
+
+       TP_printk("gfn %llx pfn %llx level %d",
+                 __entry->gfn, __entry->pfn, __entry->level
+       )
+);
+
 #endif /* _TRACE_KVMMMU_H */
 
 #undef TRACE_INCLUDE_PATH
index 7fa2e95149ee7b5b3fb450fc9394f28826ae0183..b6b0fb60e0a75bd187f2e2bbc498b9b20d610dc6 100644 (file)
@@ -649,6 +649,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 
        base_gfn = gw->gfn;
 
+       trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
+
        for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
                clear_sp_write_flooding_count(it.sptep);
                base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);