KVM: s390: vsie: support guest-storage-limit-suppression
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Tue, 24 Nov 2015 15:41:33 +0000 (16:41 +0100)
committerChristian Borntraeger <borntraeger@de.ibm.com>
Tue, 21 Jun 2016 07:43:41 +0000 (09:43 +0200)
We can easily forward guest-storage-limit-suppression if available.

One thing to care about is keeping the prefix properly mapped when
gsls in toggled on/off or the mso changes in between. Therefore we better
remap the prefix on any mso changes just like we already do with the
prefix.

Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
arch/s390/include/uapi/asm/kvm.h
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/vsie.c

index 98526ac114bfd7b19d8ae135ef67a61524b40c64..9ed07479714f01b13c5b65b9240afc01b1cc54de 100644 (file)
@@ -102,6 +102,7 @@ struct kvm_s390_vm_cpu_machine {
 #define KVM_S390_VM_CPU_FEAT_64BSCAO   2
 #define KVM_S390_VM_CPU_FEAT_SIIF      3
 #define KVM_S390_VM_CPU_FEAT_GPERE     4
+#define KVM_S390_VM_CPU_FEAT_GSLS      5
 struct kvm_s390_vm_cpu_feat {
        __u64 feat[16];
 };
index 175752877c0d73e79e3aae2c0b03b954b17132fa..ce9813afd5028086e797c4e58a06c3e1b2e8bab2 100644 (file)
@@ -271,6 +271,8 @@ static void kvm_s390_cpu_feat_init(void)
                allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
        if (sclp.has_gpere)
                allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
+       if (sclp.has_gsls)
+               allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
 }
 
 int kvm_arch_init(void *opaque)
index b8792ef0103077dc9518d1ed6bcc4f0934dd9e43..ea65bf2f02011c57334e3da1fc8a644166547336 100644 (file)
@@ -109,6 +109,8 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        }
        if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
                newflags |= cpuflags & CPUSTAT_P;
+       if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
+               newflags |= cpuflags & CPUSTAT_SM;
 
        atomic_set(&scb_s->cpuflags, newflags);
        return 0;
@@ -242,7 +244,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
        struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
        bool had_tx = scb_s->ecb & 0x10U;
-       unsigned long new_mso;
+       unsigned long new_mso = 0;
        int rc;
 
        /* make sure we don't have any leftovers when reusing the scb */
@@ -284,7 +286,8 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
        scb_s->icpua = scb_o->icpua;
 
-       new_mso = scb_o->mso & 0xfffffffffff00000UL;
+       if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
+               new_mso = scb_o->mso & 0xfffffffffff00000UL;
        /* if the hva of the prefix changes, we have to remap the prefix */
        if (scb_s->mso != new_mso || scb_s->prefix != scb_o->prefix)
                prefix_unmapped(vsie_page);