kvm/page_track: call notifiers with kvm_page_track_notifier_node
authorJike Song <jike.song@intel.com>
Tue, 25 Oct 2016 07:50:42 +0000 (15:50 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 4 Nov 2016 11:13:20 +0000 (12:13 +0100)
The user of page_track might needs extra information, so pass
the kvm_page_track_notifier_node to callbacks.

Signed-off-by: Jike Song <jike.song@intel.com>
Reviewed-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_page_track.h
arch/x86/kvm/mmu.c
arch/x86/kvm/page_track.c

index 5f66597c71c08a36e5b16a3609bed87adfaa578b..d74747b031ecd2e20dcf437944195a37e7c6bb3b 100644 (file)
@@ -29,17 +29,20 @@ struct kvm_page_track_notifier_node {
         * @gpa: the physical address written by guest.
         * @new: the data was written to the address.
         * @bytes: the written length.
+        * @node: this node
         */
        void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
-                           int bytes);
+                           int bytes, struct kvm_page_track_notifier_node *node);
        /*
         * It is called when memory slot is being moved or removed
         * users can drop write-protection for the pages in that memory slot
         *
         * @kvm: the kvm where memory slot being moved or removed
         * @slot: the memory slot being moved or removed
+        * @node: this node
         */
-       void (*track_flush_slot)(struct kvm *kvm, struct kvm_memory_slot *slot);
+       void (*track_flush_slot)(struct kvm *kvm, struct kvm_memory_slot *slot,
+                           struct kvm_page_track_notifier_node *node);
 };
 
 void kvm_page_track_init(struct kvm *kvm);
index 8ac1fb858b2f7bc2ac72ef88bfb65abfde72614e..87c5880ba3b7d2c296e5cfda8a0c198e8efd7170 100644 (file)
@@ -4405,7 +4405,8 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
 }
 
 static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-                             const u8 *new, int bytes)
+                             const u8 *new, int bytes,
+                             struct kvm_page_track_notifier_node *node)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
        struct kvm_mmu_page *sp;
@@ -4618,7 +4619,8 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
 }
 
 static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
-                       struct kvm_memory_slot *slot)
+                       struct kvm_memory_slot *slot,
+                       struct kvm_page_track_notifier_node *node)
 {
        kvm_mmu_invalidate_zap_all_pages(kvm);
 }
index e79bb256d17771eb700e4aeb87ea0ae8da0bbbf1..3dae0e3d6a583830beeeae63241756fc9133cb65 100644 (file)
@@ -222,7 +222,7 @@ void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
        idx = srcu_read_lock(&head->track_srcu);
        hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
                if (n->track_write)
-                       n->track_write(vcpu, gpa, new, bytes);
+                       n->track_write(vcpu, gpa, new, bytes, n);
        srcu_read_unlock(&head->track_srcu, idx);
 }
 
@@ -247,6 +247,6 @@ void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
        idx = srcu_read_lock(&head->track_srcu);
        hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
                if (n->track_flush_slot)
-                       n->track_flush_slot(kvm, slot);
+                       n->track_flush_slot(kvm, slot, n);
        srcu_read_unlock(&head->track_srcu, idx);
 }