return page;
}
+static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *page)
+{
+ BUG();
+}
+
static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page,
u64 *parent_pte)
{
mmu_page_remove_parent_pte(page, parent_pte);
+ if (page->role.level > PT_PAGE_TABLE_LEVEL)
+ kvm_mmu_page_unlink_children(vcpu, page);
+ hlist_del(&page->hash_link);
+ list_del(&page->link);
+ list_add(&page->link, &vcpu->free_pages);
+}
+
+static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *page)
+{
+ u64 *parent_pte;
+
+ while (page->multimapped || page->parent_pte) {
+ if (!page->multimapped)
+ parent_pte = page->parent_pte;
+ else {
+ struct kvm_pte_chain *chain;
+
+ chain = container_of(page->parent_ptes.first,
+ struct kvm_pte_chain, link);
+ parent_pte = chain->parent_ptes[0];
+ }
+ kvm_mmu_put_page(vcpu, page, parent_pte);
+ *parent_pte = 0;
+ }
+}
+
+static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ unsigned index;
+ struct hlist_head *bucket;
+ struct kvm_mmu_page *page;
+ struct hlist_node *node, *n;
+ int r;
+
+ pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
+ r = 0;
+ index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+ bucket = &vcpu->kvm->mmu_page_hash[index];
+ hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
+ if (page->gfn == gfn && !page->role.metaphysical) {
+ kvm_mmu_zap_page(vcpu, page);
+ r = 1;
+ }
+ return r;
}
static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
{
}
+int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
+{
+ gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
+
+ return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
+}
+
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
while (!list_empty(&vcpu->free_pages)) {