static const char *audit_msg;
-static gva_t canonicalize(gva_t gva)
-{
-#ifdef CONFIG_X86_64
- gva = (long long)(gva << 16) >> 16;
-#endif
- return gva;
-}
-
-
typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep);
static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
- u64 ent = pt[i];
+ u64 *sptep = pt + i;
+ struct kvm_mmu_page *sp;
+ gfn_t gfn;
+ pfn_t pfn;
+ hpa_t hpa;
- if (ent == shadow_trap_nonpresent_pte)
- continue;
+ sp = page_header(__pa(sptep));
- va = canonicalize(va);
- if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
- audit_mappings_page(vcpu, ent, va, level - 1);
- else {
- gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
- gfn_t gfn = gpa >> PAGE_SHIFT;
- pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
- hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
+ if (sp->unsync) {
+ if (level != PT_PAGE_TABLE_LEVEL) {
+ printk(KERN_ERR "audit: (%s) error: unsync sp: %p level = %d\n",
+ audit_msg, sp, level);
+ return;
+ }
- if (is_error_pfn(pfn)) {
- kvm_release_pfn_clean(pfn);
- continue;
+ if (*sptep == shadow_notrap_nonpresent_pte) {
+ printk(KERN_ERR "audit: (%s) error: notrap spte in unsync sp: %p\n",
+ audit_msg, sp);
+ return;
}
+ }
- if (is_shadow_present_pte(ent)
- && (ent & PT64_BASE_ADDR_MASK) != hpa)
- printk(KERN_ERR "xx audit error: (%s) levels %d"
- " gva %lx gpa %llx hpa %llx ent %llx %d\n",
- audit_msg, vcpu->arch.mmu.root_level,
- va, gpa, hpa, ent,
- is_shadow_present_pte(ent));
- else if (ent == shadow_notrap_nonpresent_pte
- && !is_error_hpa(hpa))
- printk(KERN_ERR "audit: (%s) notrap shadow,"
- " valid guest gva %lx\n", audit_msg, va);
- kvm_release_pfn_clean(pfn);
+ if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
+ printk(KERN_ERR "audit: (%s) error: notrap spte in direct sp: %p\n",
+ audit_msg, sp);
+ return;
+ }
+
+ if (!is_shadow_present_pte(*sptep) ||
+ !is_last_spte(*sptep, level))
+ return;
+
+ gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
+ pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
+ if (is_error_pfn(pfn)) {
+ kvm_release_pfn_clean(pfn);
+ return;
}
+
+ hpa = pfn << PAGE_SHIFT;
+
+ if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
+ printk(KERN_ERR "xx audit error: (%s) levels %d"
+ " gva %lx pfn %llx hpa %llx ent %llxn",
+ audit_msg, vcpu->arch.mmu.root_level,
+ va, pfn, hpa, *sptep);
}
}
}
EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
-pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
+static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic)
{
unsigned long addr;
return page_to_pfn(bad_page);
}
- return hva_to_pfn(kvm, addr, false);
+ return hva_to_pfn(kvm, addr, atomic);
+}
+
+pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
+{
+ return __gfn_to_pfn(kvm, gfn, true);
+}
+EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
+
+pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
+{
+ return __gfn_to_pfn(kvm, gfn, false);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn);