KVM: MMU: check kvm_mmu_pages and mmu_page_path indices
authorXiao Guangrong <guangrong.xiao@linux.intel.com>
Wed, 24 Feb 2016 08:46:06 +0000 (09:46 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 4 Mar 2016 11:35:24 +0000 (12:35 +0100)
Give a special invalid index to the root of the walk, so that we
can check the consistency of kvm_mmu_pages and mmu_page_path.

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
[Extracted from a bigger patch proposed by Guangrong. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c

index 609fa5322f6a7925e831f7ddc29595cf9af50e8a..0a4dc9b541810c58eb6c88bfead1ba45323fc894 100644 (file)
@@ -1870,6 +1870,8 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
        return nr_unsync_leaf;
 }
 
+#define INVALID_INDEX (-1)
+
 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
                           struct kvm_mmu_pages *pvec)
 {
@@ -1877,7 +1879,7 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp,
        if (!sp->unsync_children)
                return 0;
 
-       mmu_pages_add(pvec, sp, 0);
+       mmu_pages_add(pvec, sp, INVALID_INDEX);
        return __mmu_unsync_walk(sp, pvec);
 }
 
@@ -2026,6 +2028,8 @@ static int mmu_pages_first(struct kvm_mmu_pages *pvec,
        if (pvec->nr == 0)
                return 0;
 
+       WARN_ON(pvec->page[0].idx != INVALID_INDEX);
+
        sp = pvec->page[0].sp;
        level = sp->role.level;
        WARN_ON(level == PT_PAGE_TABLE_LEVEL);
@@ -2050,6 +2054,7 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents)
                if (!sp)
                        return;
 
+               WARN_ON(idx == INVALID_INDEX);
                clear_unsync_child_bit(sp, idx);
                level++;
        } while (!sp->unsync_children);