KVM: MMU: Make setting shadow ptes atomic on i386
authorAvi Kivity <avi@qumranet.com>
Thu, 31 May 2007 12:46:04 +0000 (15:46 +0300)
committerAvi Kivity <avi@qumranet.com>
Mon, 16 Jul 2007 09:05:44 +0000 (12:05 +0300)
Signed-off-by: Avi Kivity <avi@qumranet.com>
drivers/kvm/Kconfig
drivers/kvm/mmu.c
drivers/kvm/paging_tmpl.h

index 2f661e5f0dae67f37209879e34bbf50546d4486b..33fa28a8c1993d5dd3cc258ec3896e41b0941cbb 100644 (file)
@@ -11,6 +11,7 @@ if VIRTUALIZATION
 config KVM
        tristate "Kernel-based Virtual Machine (KVM) support"
        depends on X86 && EXPERIMENTAL
+       depends on X86_CMPXCHG64 || 64BIT
        ---help---
          Support hosting fully virtualized guest machines using hardware
          virtualization extensions.  You will need a fairly recent
index 3cdbf687df25d8e57aff64ebab1fbeb07c91fbd3..f24b540148aa3def67b832e4dacbd3fefe6715e0 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/module.h>
+#include <asm/cmpxchg.h>
 
 #include "vmx.h"
 #include "kvm.h"
@@ -204,6 +205,15 @@ static int is_rmap_pte(u64 pte)
                == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
 }
 
+static void set_shadow_pte(u64 *sptep, u64 spte)
+{
+#ifdef CONFIG_X86_64
+       set_64bit((unsigned long *)sptep, spte);
+#else
+       set_64bit((unsigned long long *)sptep, spte);
+#endif
+}
+
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
                                  struct kmem_cache *base_cache, int min,
                                  gfp_t gfp_flags)
@@ -446,7 +456,7 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
                rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
                rmap_remove(vcpu, spte);
                kvm_arch_ops->tlb_flush(vcpu);
-               *spte &= ~(u64)PT_WRITABLE_MASK;
+               set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
        }
 }
 
@@ -699,7 +709,7 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
                }
                BUG_ON(!parent_pte);
                kvm_mmu_put_page(vcpu, page, parent_pte);
-               *parent_pte = 0;
+               set_shadow_pte(parent_pte, 0);
        }
        kvm_mmu_page_unlink_children(vcpu, page);
        if (!page->root_count) {
index 397a4039eaad34eba84ca2c432aa4b475dbe51c7..fabc2c9093cd825d9228f4c43f42b8f901527616 100644 (file)
@@ -234,7 +234,7 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
                spte |= gaddr;
                spte |= PT_SHADOW_IO_MARK;
                spte &= ~PT_PRESENT_MASK;
-               *shadow_pte = spte;
+               set_shadow_pte(shadow_pte, spte);
                return;
        }
 
@@ -280,7 +280,7 @@ unshadowed:
        if (access_bits & PT_WRITABLE_MASK)
                mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
 
-       *shadow_pte = spte;
+       set_shadow_pte(shadow_pte, spte);
        page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
        if (!was_rmapped)
                rmap_add(vcpu, shadow_pte);