stlbe = &vcpu->arch.shadow_tlb[victim];
/* Get reference to new page. */
- down_read(¤t->mm->mmap_sem);
new_page = gfn_to_page(vcpu->kvm, gfn);
- up_read(¤t->mm->mmap_sem);
if (is_error_page(new_page)) {
printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
kvm_release_page_clean(new_page);
{
struct vm_area_struct *vma;
unsigned long addr;
+ int ret = 0;
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
- return 0;
+ return ret;
+ down_read(¤t->mm->mmap_sem);
vma = find_vma(current->mm, addr);
if (vma && is_vm_hugetlb_page(vma))
- return 1;
+ ret = 1;
+ up_read(¤t->mm->mmap_sem);
- return 0;
+ return ret;
}
static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
if (gpa == UNMAPPED_GVA)
return NULL;
- down_read(¤t->mm->mmap_sem);
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
- up_read(¤t->mm->mmap_sem);
return page;
}
pfn_t pfn;
unsigned long mmu_seq;
- down_read(¤t->mm->mmap_sem);
if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
gfn &= ~(KVM_PAGES_PER_HPAGE-1);
largepage = 1;
}
mmu_seq = vcpu->kvm->mmu_notifier_seq;
- /* implicit mb(), we'll read before PT lock is unlocked */
+ smp_rmb();
pfn = gfn_to_pfn(vcpu->kvm, gfn);
- up_read(¤t->mm->mmap_sem);
/* mmio */
if (is_error_pfn(pfn)) {
if (r)
return r;
- down_read(¤t->mm->mmap_sem);
if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
gfn &= ~(KVM_PAGES_PER_HPAGE-1);
largepage = 1;
}
mmu_seq = vcpu->kvm->mmu_notifier_seq;
- /* implicit mb(), we'll read before PT lock is unlocked */
+ smp_rmb();
pfn = gfn_to_pfn(vcpu->kvm, gfn);
- up_read(¤t->mm->mmap_sem);
if (is_error_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
return 1;
return;
gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
- down_read(¤t->mm->mmap_sem);
if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
gfn &= ~(KVM_PAGES_PER_HPAGE-1);
vcpu->arch.update_pte.largepage = 1;
}
vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
- /* implicit mb(), we'll read before PT lock is unlocked */
+ smp_rmb();
pfn = gfn_to_pfn(vcpu->kvm, gfn);
- up_read(¤t->mm->mmap_sem);
if (is_error_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
pt_element_t *table;
struct page *page;
- down_read(¤t->mm->mmap_sem);
page = gfn_to_page(kvm, table_gfn);
- up_read(¤t->mm->mmap_sem);
table = kmap_atomic(page, KM_USER0);
-
ret = CMPXCHG(&table[index], orig_pte, new_pte);
-
kunmap_atomic(table, KM_USER0);
kvm_release_page_dirty(page);
return 0;
}
- down_read(¤t->mm->mmap_sem);
if (walker.level == PT_DIRECTORY_LEVEL) {
gfn_t large_gfn;
large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
}
}
mmu_seq = vcpu->kvm->mmu_notifier_seq;
- /* implicit mb(), we'll read before PT lock is unlocked */
+ smp_rmb();
pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
- up_read(¤t->mm->mmap_sem);
/* mmio */
if (is_error_pfn(pfn)) {
if (r)
goto out;
- down_read(¤t->mm->mmap_sem);
kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
- up_read(¤t->mm->mmap_sem);
out:
up_write(&kvm->slots_lock);
return r;
if (r)
goto out;
- down_read(¤t->mm->mmap_sem);
kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT);
- up_read(¤t->mm->mmap_sem);
out:
up_write(&kvm->slots_lock);
return r;
/* ...but clean it before doing the actual write */
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
- down_read(¤t->mm->mmap_sem);
vcpu->arch.time_page =
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
- up_read(¤t->mm->mmap_sem);
if (is_error_page(vcpu->arch.time_page)) {
kvm_release_page_clean(vcpu->arch.time_page);
val = *(u64 *)new;
- down_read(¤t->mm->mmap_sem);
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
- up_read(¤t->mm->mmap_sem);
kaddr = kmap_atomic(page, KM_USER0);
set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
if (!apic || !apic->vapic_addr)
return;
- down_read(¤t->mm->mmap_sem);
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
- up_read(¤t->mm->mmap_sem);
vcpu->arch.apic->vapic_page = page;
}
}
EXPORT_SYMBOL_GPL(gfn_to_hva);
-/*
- * Requires current->mm->mmap_sem to be held
- */
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
struct page *page[1];
return page_to_pfn(bad_page);
}
- npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
- NULL);
+ npages = get_user_pages_fast(addr, 1, 1, page);
if (unlikely(npages != 1)) {
struct vm_area_struct *vma;
+ down_read(¤t->mm->mmap_sem);
vma = find_vma(current->mm, addr);
+
if (vma == NULL || addr < vma->vm_start ||
!(vma->vm_flags & VM_PFNMAP)) {
+ up_read(¤t->mm->mmap_sem);
get_page(bad_page);
return page_to_pfn(bad_page);
}
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ up_read(¤t->mm->mmap_sem);
BUG_ON(!is_mmio_pfn(pfn));
} else
pfn = page_to_pfn(page[0]);