KVM: MMU: Split the main body of rmap_write_protect() off from others
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Thu, 1 Mar 2012 10:31:22 +0000 (19:31 +0900)
committerAvi Kivity <avi@redhat.com>
Sun, 8 Apr 2012 09:49:56 +0000 (12:49 +0300)
We will use this in the following patch to implement another function
which needs to write protect pages using the rmap information.

Note that there is a small change in debug printing for large pages:
we do not differentiate them from others to avoid duplicating code.

Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c

index 4cb164268846302ac5acc122bc434246b4175605..c8b5694d1a48ab1c87c8f132381c5c817cb34c8e 100644 (file)
@@ -1010,42 +1010,43 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
                rmap_remove(kvm, sptep);
 }
 
-int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
-                              struct kvm_memory_slot *slot)
+static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
 {
-       unsigned long *rmapp;
-       u64 *spte;
-       int i, write_protected = 0;
+       u64 *spte = NULL;
+       int write_protected = 0;
 
-       rmapp = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot);
-       spte = rmap_next(rmapp, NULL);
-       while (spte) {
+       while ((spte = rmap_next(rmapp, spte))) {
                BUG_ON(!(*spte & PT_PRESENT_MASK));
                rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
-               if (is_writable_pte(*spte)) {
+
+               if (!is_writable_pte(*spte))
+                       continue;
+
+               if (level == PT_PAGE_TABLE_LEVEL) {
                        mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK);
-                       write_protected = 1;
+               } else {
+                       BUG_ON(!is_large_pte(*spte));
+                       drop_spte(kvm, spte);
+                       --kvm->stat.lpages;
+                       spte = NULL;
                }
-               spte = rmap_next(rmapp, spte);
+
+               write_protected = 1;
        }
 
-       /* check for huge page mappings */
-       for (i = PT_DIRECTORY_LEVEL;
+       return write_protected;
+}
+
+int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
+                              struct kvm_memory_slot *slot)
+{
+       unsigned long *rmapp;
+       int i, write_protected = 0;
+
+       for (i = PT_PAGE_TABLE_LEVEL;
             i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
                rmapp = __gfn_to_rmap(gfn, i, slot);
-               spte = rmap_next(rmapp, NULL);
-               while (spte) {
-                       BUG_ON(!(*spte & PT_PRESENT_MASK));
-                       BUG_ON(!is_large_pte(*spte));
-                       pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
-                       if (is_writable_pte(*spte)) {
-                               drop_spte(kvm, spte);
-                               --kvm->stat.lpages;
-                               spte = NULL;
-                               write_protected = 1;
-                       }
-                       spte = rmap_next(rmapp, spte);
-               }
+               write_protected |= __rmap_write_protect(kvm, rmapp, i);
        }
 
        return write_protected;