KVM: Avoid checking huge page mappings in get_dirty_log()
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Thu, 1 Mar 2012 10:32:16 +0000 (19:32 +0900)
committerAvi Kivity <avi@redhat.com>
Sun, 8 Apr 2012 09:49:58 +0000 (12:49 +0300)
Dropped such mappings when we enabled dirty logging and we will never
create new ones until we stop the logging.

For this we introduce a new function which can be used to write protect
a range of PT level pages: although we do not need to care about a range
of pages at this point, the following patch will need this feature to
optimize the write protection of many pages.

Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c

index e216ba066e798cbd7be31892ac76afcc78055c0b..f624ca72ea245ce504af892e572756baff292646 100644 (file)
@@ -712,8 +712,9 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 
 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
-int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
-                              struct kvm_memory_slot *slot);
+void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+                                    struct kvm_memory_slot *slot,
+                                    gfn_t gfn_offset, unsigned long mask);
 void kvm_mmu_zap_all(struct kvm *kvm);
 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
index c8b5694d1a48ab1c87c8f132381c5c817cb34c8e..dc5f2459db6c1b85b3df682274e3a0c0c8f403d2 100644 (file)
@@ -1037,27 +1037,47 @@ static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level
        return write_protected;
 }
 
-int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
-                              struct kvm_memory_slot *slot)
+/**
+ * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
+ * @kvm: kvm instance
+ * @slot: slot to protect
+ * @gfn_offset: start of the BITS_PER_LONG pages we care about
+ * @mask: indicates which pages we should protect
+ *
+ * Used when we do not need to care about huge page mappings: e.g. during dirty
+ * logging we do not have any such mappings.
+ */
+void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+                                    struct kvm_memory_slot *slot,
+                                    gfn_t gfn_offset, unsigned long mask)
 {
        unsigned long *rmapp;
-       int i, write_protected = 0;
 
-       for (i = PT_PAGE_TABLE_LEVEL;
-            i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
-               rmapp = __gfn_to_rmap(gfn, i, slot);
-               write_protected |= __rmap_write_protect(kvm, rmapp, i);
-       }
+       while (mask) {
+               rmapp = &slot->rmap[gfn_offset + __ffs(mask)];
+               __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL);
 
-       return write_protected;
+               /* clear the first set bit */
+               mask &= mask - 1;
+       }
 }
 
 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
 {
        struct kvm_memory_slot *slot;
+       unsigned long *rmapp;
+       int i;
+       int write_protected = 0;
 
        slot = gfn_to_memslot(kvm, gfn);
-       return kvm_mmu_rmap_write_protect(kvm, gfn, slot);
+
+       for (i = PT_PAGE_TABLE_LEVEL;
+            i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
+               rmapp = __gfn_to_rmap(gfn, i, slot);
+               write_protected |= __rmap_write_protect(kvm, rmapp, i);
+       }
+
+       return write_protected;
 }
 
 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
index 99b738028fc0a0d726c39d85794b59c1941b095d..813ebf1e55a00fa02500a3b4ad7cbc5c5158185a 100644 (file)
@@ -3095,13 +3095,11 @@ static void write_protect_slot(struct kvm *kvm,
 
        /* Not many dirty pages compared to # of shadow pages. */
        if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
-               unsigned long gfn_offset;
+               gfn_t offset;
 
-               for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
-                       unsigned long gfn = memslot->base_gfn + gfn_offset;
+               for_each_set_bit(offset, dirty_bitmap, memslot->npages)
+                       kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, 1);
 
-                       kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
-               }
                kvm_flush_remote_tlbs(kvm);
        } else
                kvm_mmu_slot_remove_write_access(kvm, memslot->id);