KVM: s390: use new mm defines instead of magic values
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Wed, 5 Jul 2017 05:37:14 +0000 (07:37 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Wed, 26 Jul 2017 06:25:10 +0000 (08:25 +0200)
Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Janosch Frank <frankja@linux.vnet.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/kvm/diag.c
arch/s390/kvm/gaccess.c
arch/s390/kvm/priv.c
arch/s390/kvm/vsie.c

index ce865bd4f81d9696e7c9907d7bcaedede8f7cf43..e4d36094aceb53eed6a542c7c1f145870e9595f3 100644 (file)
@@ -27,7 +27,7 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
        unsigned long prefix  = kvm_s390_get_prefix(vcpu);
 
        start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
-       end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
+       end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
        vcpu->stat.diagnose_10++;
 
        if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
@@ -51,9 +51,9 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
                 */
                gmap_discard(vcpu->arch.gmap, start, prefix);
                if (start <= prefix)
-                       gmap_discard(vcpu->arch.gmap, 0, 4096);
-               if (end > prefix + 4096)
-                       gmap_discard(vcpu->arch.gmap, 4096, 8192);
+                       gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE);
+               if (end > prefix + PAGE_SIZE)
+                       gmap_discard(vcpu->arch.gmap, PAGE_SIZE, 2 * PAGE_SIZE);
                gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end);
        }
        return 0;
index 653cae5e1ee1f97b6de07f665cf2b90ba8619d8e..3cc77391a10229cfd1eb1f3d375327005bdd0c33 100644 (file)
@@ -629,7 +629,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
        iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130);
        if (asce.r)
                goto real_address;
-       ptr = asce.origin * 4096;
+       ptr = asce.origin * PAGE_SIZE;
        switch (asce.dt) {
        case ASCE_TYPE_REGION1:
                if (vaddr.rfx01 > asce.tl)
@@ -674,7 +674,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
                        return PGM_REGION_SECOND_TRANS;
                if (edat1)
                        dat_protection |= rfte.p;
-               ptr = rfte.rto * 4096 + vaddr.rsx * 8;
+               ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8;
        }
                /* fallthrough */
        case ASCE_TYPE_REGION2: {
@@ -692,7 +692,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
                        return PGM_REGION_THIRD_TRANS;
                if (edat1)
                        dat_protection |= rste.p;
-               ptr = rste.rto * 4096 + vaddr.rtx * 8;
+               ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8;
        }
                /* fallthrough */
        case ASCE_TYPE_REGION3: {
@@ -720,7 +720,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
                        return PGM_SEGMENT_TRANSLATION;
                if (edat1)
                        dat_protection |= rtte.fc0.p;
-               ptr = rtte.fc0.sto * 4096 + vaddr.sx * 8;
+               ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8;
        }
                /* fallthrough */
        case ASCE_TYPE_SEGMENT: {
@@ -743,7 +743,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
                        goto absolute_address;
                }
                dat_protection |= ste.fc0.p;
-               ptr = ste.fc0.pto * 2048 + vaddr.px * 8;
+               ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
        }
        }
        if (kvm_is_error_gpa(vcpu->kvm, ptr))
@@ -993,7 +993,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
        parent = sg->parent;
        vaddr.addr = saddr;
        asce.val = sg->orig_asce;
-       ptr = asce.origin * 4096;
+       ptr = asce.origin * PAGE_SIZE;
        if (asce.r) {
                *fake = 1;
                ptr = 0;
@@ -1029,7 +1029,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
                union region1_table_entry rfte;
 
                if (*fake) {
-                       ptr += (unsigned long) vaddr.rfx << 53;
+                       ptr += vaddr.rfx * _REGION1_SIZE;
                        rfte.val = ptr;
                        goto shadow_r2t;
                }
@@ -1044,7 +1044,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
                        return PGM_REGION_SECOND_TRANS;
                if (sg->edat_level >= 1)
                        *dat_protection |= rfte.p;
-               ptr = rfte.rto << 12UL;
+               ptr = rfte.rto * PAGE_SIZE;
 shadow_r2t:
                rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
                if (rc)
@@ -1055,7 +1055,7 @@ shadow_r2t:
                union region2_table_entry rste;
 
                if (*fake) {
-                       ptr += (unsigned long) vaddr.rsx << 42;
+                       ptr += vaddr.rsx * _REGION2_SIZE;
                        rste.val = ptr;
                        goto shadow_r3t;
                }
@@ -1070,7 +1070,7 @@ shadow_r2t:
                        return PGM_REGION_THIRD_TRANS;
                if (sg->edat_level >= 1)
                        *dat_protection |= rste.p;
-               ptr = rste.rto << 12UL;
+               ptr = rste.rto * PAGE_SIZE;
 shadow_r3t:
                rste.p |= *dat_protection;
                rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
@@ -1082,7 +1082,7 @@ shadow_r3t:
                union region3_table_entry rtte;
 
                if (*fake) {
-                       ptr += (unsigned long) vaddr.rtx << 31;
+                       ptr += vaddr.rtx * _REGION3_SIZE;
                        rtte.val = ptr;
                        goto shadow_sgt;
                }
@@ -1098,7 +1098,7 @@ shadow_r3t:
                if (rtte.fc && sg->edat_level >= 2) {
                        *dat_protection |= rtte.fc0.p;
                        *fake = 1;
-                       ptr = rtte.fc1.rfaa << 31UL;
+                       ptr = rtte.fc1.rfaa * _REGION3_SIZE;
                        rtte.val = ptr;
                        goto shadow_sgt;
                }
@@ -1106,7 +1106,7 @@ shadow_r3t:
                        return PGM_SEGMENT_TRANSLATION;
                if (sg->edat_level >= 1)
                        *dat_protection |= rtte.fc0.p;
-               ptr = rtte.fc0.sto << 12UL;
+               ptr = rtte.fc0.sto * PAGE_SIZE;
 shadow_sgt:
                rtte.fc0.p |= *dat_protection;
                rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
@@ -1118,7 +1118,7 @@ shadow_sgt:
                union segment_table_entry ste;
 
                if (*fake) {
-                       ptr += (unsigned long) vaddr.sx << 20;
+                       ptr += vaddr.sx * _SEGMENT_SIZE;
                        ste.val = ptr;
                        goto shadow_pgt;
                }
@@ -1134,11 +1134,11 @@ shadow_sgt:
                *dat_protection |= ste.fc0.p;
                if (ste.fc && sg->edat_level >= 1) {
                        *fake = 1;
-                       ptr = ste.fc1.sfaa << 20UL;
+                       ptr = ste.fc1.sfaa * _SEGMENT_SIZE;
                        ste.val = ptr;
                        goto shadow_pgt;
                }
-               ptr = ste.fc0.pto << 11UL;
+               ptr = ste.fc0.pto * (PAGE_SIZE / 2);
 shadow_pgt:
                ste.fc0.p |= *dat_protection;
                rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
@@ -1187,8 +1187,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
 
        vaddr.addr = saddr;
        if (fake) {
-               /* offset in 1MB guest memory block */
-               pte.val = pgt + ((unsigned long) vaddr.px << 12UL);
+               pte.val = pgt + vaddr.px * PAGE_SIZE;
                goto shadow_page;
        }
        if (!rc)
index 8a1dac793d6b0ad0685ffd7a35743ca511274035..785ad028bde602de4699d987d1a4c7552211ba39 100644 (file)
@@ -329,7 +329,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
        start = kvm_s390_logical_to_effective(vcpu, start);
        if (m3 & SSKE_MB) {
                /* start already designates an absolute address */
-               end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
+               end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
        } else {
                start = kvm_s390_real_to_abs(vcpu, start);
                end = start + PAGE_SIZE;
@@ -893,10 +893,10 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
        case 0x00000000:
                /* only 4k frames specify a real address */
                start = kvm_s390_real_to_abs(vcpu, start);
-               end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
+               end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
                break;
        case 0x00001000:
-               end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
+               end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
                break;
        case 0x00002000:
                /* only support 2G frame size if EDAT2 is available and we are
@@ -904,7 +904,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
                if (!test_kvm_facility(vcpu->kvm, 78) ||
                    psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
                        return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
-               end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
+               end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1);
                break;
        default:
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
index 715c19c45d9adbf565c28839d6f9d45cdb627b15..ba8203e4d516da2ec13c10d45beb1f1744580243 100644 (file)
@@ -1069,7 +1069,7 @@ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 
-       BUILD_BUG_ON(sizeof(struct vsie_page) != 4096);
+       BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE);
        scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
 
        /* 512 byte alignment */