KVM: MMU: apply page track notifier
authorXiao Guangrong <guangrong.xiao@linux.intel.com>
Wed, 24 Feb 2016 09:51:16 +0000 (17:51 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 3 Mar 2016 13:36:24 +0000 (14:36 +0100)
Register the notifier to receive write track event so that we can update
our shadow page table

It makes kvm_mmu_pte_write() be the callback of the notifier, no function
is changed

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c

index eb68e6aca0cf5e8eab2332706e573db5bcc99b73..1c3e390993a2b75167cd11e74040fab0dd841aae 100644 (file)
@@ -704,6 +704,7 @@ struct kvm_arch {
         */
        struct list_head active_mmu_pages;
        struct list_head zapped_obsolete_pages;
+       struct kvm_page_track_notifier_node mmu_sp_tracker;
        struct kvm_page_track_notifier_head track_notifier_head;
 
        struct list_head assigned_dev_head;
@@ -1001,6 +1002,8 @@ void kvm_mmu_module_exit(void);
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
 int kvm_mmu_create(struct kvm_vcpu *vcpu);
 void kvm_mmu_setup(struct kvm_vcpu *vcpu);
+void kvm_mmu_init_vm(struct kvm *kvm);
+void kvm_mmu_uninit_vm(struct kvm *kvm);
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
                u64 dirty_mask, u64 nx_mask, u64 x_mask);
 
@@ -1140,8 +1143,6 @@ void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
 
 void kvm_inject_nmi(struct kvm_vcpu *vcpu);
 
-void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-                      const u8 *new, int bytes);
 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
index d6b264b492c90ca7aa1a11142ef311eefcc798bf..42ca0acc1c4e9ab93d928c47c69b7248b1db2801 100644 (file)
@@ -4302,8 +4302,8 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
        return spte;
 }
 
-void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-                      const u8 *new, int bytes)
+static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+                             const u8 *new, int bytes)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
        struct kvm_mmu_page *sp;
@@ -4517,6 +4517,21 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
        init_kvm_mmu(vcpu);
 }
 
+void kvm_mmu_init_vm(struct kvm *kvm)
+{
+       struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+
+       node->track_write = kvm_mmu_pte_write;
+       kvm_page_track_register_notifier(kvm, node);
+}
+
+void kvm_mmu_uninit_vm(struct kvm *kvm)
+{
+       struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+
+       kvm_page_track_unregister_notifier(kvm, node);
+}
+
 /* The return value indicates if tlb flush on all vcpus is needed. */
 typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
 
index b81c14ef1e1d46f035b97334d99cea88a8346bdf..f4654e4150b05858134b5c7dc130fc7a6900b1b6 100644 (file)
@@ -4345,7 +4345,6 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
        ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
        if (ret < 0)
                return 0;
-       kvm_mmu_pte_write(vcpu, gpa, val, bytes);
        kvm_page_track_write(vcpu, gpa, val, bytes);
        return 1;
 }
@@ -4604,7 +4603,6 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
                return X86EMUL_CMPXCHG_FAILED;
 
        kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
-       kvm_mmu_pte_write(vcpu, gpa, new, bytes);
        kvm_page_track_write(vcpu, gpa, new, bytes);
 
        return X86EMUL_CONTINUE;
@@ -7727,6 +7725,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
 
        kvm_page_track_init(kvm);
+       kvm_mmu_init_vm(kvm);
 
        return 0;
 }
@@ -7854,6 +7853,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
        kfree(kvm->arch.vioapic);
        kvm_free_vcpus(kvm);
        kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
+       kvm_mmu_uninit_vm(kvm);
 }
 
 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,