slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
if (!slot)
- return get_fault_pfn();
+ return KVM_PFN_ERR_FAULT;
hva = gfn_to_hva_memslot(slot, gfn);
#define KVM_MAX_MMIO_FRAGMENTS \
(KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
+#define KVM_PFN_ERR_FAULT (-EFAULT)
+
/*
* vcpu->requests bit members
*/
void kvm_set_pfn_dirty(pfn_t pfn);
void kvm_set_pfn_accessed(pfn_t pfn);
void kvm_get_pfn(pfn_t pfn);
-pfn_t get_fault_pfn(void);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
return -ENOENT;
}
-pfn_t get_fault_pfn(void)
-{
- return -EFAULT;
-}
-EXPORT_SYMBOL_GPL(get_fault_pfn);
-
static pfn_t get_hwpoison_pfn(void)
{
return -EHWPOISON;
struct vm_area_struct *vma;
if (atomic)
- return get_fault_pfn();
+ return KVM_PFN_ERR_FAULT;
down_read(¤t->mm->mmap_sem);
if (npages == -EHWPOISON ||
vma = find_vma_intersection(current->mm, addr, addr+1);
if (vma == NULL)
- pfn = get_fault_pfn();
+ pfn = KVM_PFN_ERR_FAULT;
else if ((vma->vm_flags & VM_PFNMAP)) {
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff;
} else {
if (async && (vma->vm_flags & VM_WRITE))
*async = true;
- pfn = get_fault_pfn();
+ pfn = KVM_PFN_ERR_FAULT;
}
up_read(¤t->mm->mmap_sem);
} else