AUDIT_PRE_PAGE_FAULT,
AUDIT_POST_PAGE_FAULT,
AUDIT_PRE_PTE_WRITE,
- AUDIT_POST_PTE_WRITE
+ AUDIT_POST_PTE_WRITE,
+ AUDIT_PRE_SYNC,
+ AUDIT_POST_SYNC
};
char *audit_point_name[] = {
"pre page fault",
"post page fault",
"pre pte write",
- "post pte write"
+ "post pte write",
+ "pre sync",
+ "post sync"
};
#undef MMU_DEBUG
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
+
+ trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;
sp = page_header(root);
mmu_sync_children(vcpu, sp);
}
}
+ trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
}
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
inspect_spte_has_rmap(vcpu->kvm, sptep);
}
+static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
+{
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
+
+ if (audit_point == AUDIT_POST_SYNC && sp->unsync)
+ audit_printk("meet unsync sp(%p) after sync root.\n", sp);
+}
+
static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
{
int i;
}
}
-void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
+static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
{
struct kvm_memory_slot *slot;
unsigned long *rmapp;
{
audit_sptes_have_rmaps(vcpu, sptep, level);
audit_mappings(vcpu, sptep, level);
+ audit_spte_after_sync(vcpu, sptep, level);
}
static void audit_vcpu_spte(struct kvm_vcpu *vcpu)