KVM: Return EFAULT from kvm ioctl when guest accesses bad area
authorGleb Natapov <gleb@redhat.com>
Wed, 7 Jul 2010 17:16:45 +0000 (20:16 +0300)
committerAvi Kivity <avi@redhat.com>
Mon, 2 Aug 2010 03:40:33 +0000 (06:40 +0300)
Currently if guest access address that belongs to memory slot but is not
backed up by page or page is read only KVM treats it like MMIO access.
Remove that capability. It was never part of the interface and should
not be relied upon.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index d8d48329cb82cb54664e0fdc0f2f92d9219dbe2b..89d7a2cae53bf0da45b887e33d6423cd4c1a6eb0 100644 (file)
@@ -2078,7 +2078,9 @@ static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
        if (is_hwpoison_pfn(pfn)) {
                kvm_send_hwpoison_signal(kvm, gfn);
                return 0;
-       }
+       } else if (is_fault_pfn(pfn))
+               return -EFAULT;
+
        return 1;
 }
 
index e796326f364628935e690b20f09c6e606ad7eb4e..8055067b6becc86969aa543a338aced141f4d882 100644 (file)
@@ -269,6 +269,7 @@ extern pfn_t bad_pfn;
 int is_error_page(struct page *page);
 int is_error_pfn(pfn_t pfn);
 int is_hwpoison_pfn(pfn_t pfn);
+int is_fault_pfn(pfn_t pfn);
 int kvm_is_error_hva(unsigned long addr);
 int kvm_set_memory_region(struct kvm *kvm,
                          struct kvm_userspace_memory_region *mem,
index 630d1224f18793e5f0756807c482171118ad8216..b78b794c1039df394b6ccfc43ae34fb2ff6fc47a 100644 (file)
@@ -96,6 +96,9 @@ static bool largepages_enabled = true;
 static struct page *hwpoison_page;
 static pfn_t hwpoison_pfn;
 
+static struct page *fault_page;
+static pfn_t fault_pfn;
+
 inline int kvm_is_mmio_pfn(pfn_t pfn)
 {
        if (pfn_valid(pfn)) {
@@ -815,13 +818,13 @@ EXPORT_SYMBOL_GPL(kvm_disable_largepages);
 
 int is_error_page(struct page *page)
 {
-       return page == bad_page || page == hwpoison_page;
+       return page == bad_page || page == hwpoison_page || page == fault_page;
 }
 EXPORT_SYMBOL_GPL(is_error_page);
 
 int is_error_pfn(pfn_t pfn)
 {
-       return pfn == bad_pfn || pfn == hwpoison_pfn;
+       return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn;
 }
 EXPORT_SYMBOL_GPL(is_error_pfn);
 
@@ -831,6 +834,12 @@ int is_hwpoison_pfn(pfn_t pfn)
 }
 EXPORT_SYMBOL_GPL(is_hwpoison_pfn);
 
+int is_fault_pfn(pfn_t pfn)
+{
+       return pfn == fault_pfn;
+}
+EXPORT_SYMBOL_GPL(is_fault_pfn);
+
 static inline unsigned long bad_hva(void)
 {
        return PAGE_OFFSET;
@@ -959,8 +968,8 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
                if (vma == NULL || addr < vma->vm_start ||
                    !(vma->vm_flags & VM_PFNMAP)) {
                        up_read(&current->mm->mmap_sem);
-                       get_page(bad_page);
-                       return page_to_pfn(bad_page);
+                       get_page(fault_page);
+                       return page_to_pfn(fault_page);
                }
 
                pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
@@ -2226,6 +2235,15 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 
        hwpoison_pfn = page_to_pfn(hwpoison_page);
 
+       fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+
+       if (fault_page == NULL) {
+               r = -ENOMEM;
+               goto out_free_0;
+       }
+
+       fault_pfn = page_to_pfn(fault_page);
+
        if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
                r = -ENOMEM;
                goto out_free_0;
@@ -2298,6 +2316,8 @@ out_free_1:
 out_free_0a:
        free_cpumask_var(cpus_hardware_enabled);
 out_free_0:
+       if (fault_page)
+               __free_page(fault_page);
        if (hwpoison_page)
                __free_page(hwpoison_page);
        __free_page(bad_page);