Since the mmu uses different shadow pages for dirty large pages and clean
large pages, this allows the mmu to drop ptes that are now invalid.
Signed-off-by: Avi Kivity <avi@qumranet.com>
pt_element_t *ptep,
gfn_t table_gfn)
{
+ gpa_t pte_gpa;
+
if (write_fault && !is_dirty_pte(*ptep)) {
mark_page_dirty(vcpu->kvm, table_gfn);
*ptep |= PT_DIRTY_MASK;
+ pte_gpa = ((gpa_t)table_gfn << PAGE_SHIFT);
+ pte_gpa += offset_in_page(ptep);
+ kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)ptep, sizeof(*ptep));
}
}