KVM: PPC: Book3S HV: Clear the key field of HPTE when the page is paged out
authorYongji Xie <xyjxie@linux.vnet.ibm.com>
Fri, 4 Nov 2016 05:55:11 +0000 (13:55 +0800)
committerPaul Mackerras <paulus@ozlabs.org>
Mon, 21 Nov 2016 04:17:55 +0000 (15:17 +1100)
Currently we mark a HPTE for emulated MMIO with HPTE_V_ABSENT bit
set as well as key 0x1f. However, those HPTEs may be conflicted with
the HPTE for real guest RAM page HPTE with key 0x1f when the page
get paged out.

This patch clears the key field of HPTE when the page is paged out,
then recover it when HPTE is re-established.

Signed-off-by: Yongji Xie <xyjxie@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c

index 95abca69b1685da999b9e905aca1f12b01d5dae6..33a7d1f9167bbaee3dfdf2a94a04b0b23a4b2300 100644 (file)
@@ -575,7 +575,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
         */
        if (psize < PAGE_SIZE)
                psize = PAGE_SIZE;
-       r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
+       r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) |
+                                       ((pfn << PAGE_SHIFT) & ~(psize - 1));
        if (hpte_is_writable(r) && !write_ok)
                r = hpte_make_readonly(r);
        ret = RESUME_GUEST;
@@ -758,6 +759,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
                    hpte_rpn(ptel, psize) == gfn) {
                        hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
                        kvmppc_invalidate_hpte(kvm, hptep, i);
+                       hptep[1] &= ~cpu_to_be64(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
                        /* Harvest R and C */
                        rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
                        *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
index 6b3d01b024d784d6c1955cac0c4c8b466b2c2509..e960c831fd15f4e2b53c1c77c2b1400e4c8cdc35 100644 (file)
@@ -265,8 +265,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 
        if (pa)
                pteh |= HPTE_V_VALID;
-       else
+       else {
                pteh |= HPTE_V_ABSENT;
+               ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
+       }
 
        /*If we had host pte mapping then  Check WIMG */
        if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) {
@@ -352,6 +354,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                        /* inval in progress, write a non-present HPTE */
                        pteh |= HPTE_V_ABSENT;
                        pteh &= ~HPTE_V_VALID;
+                       ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
                        unlock_rmap(rmap);
                } else {
                        kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,