unsigned int idx;
switch (attr->attr) {
case KVM_S390_VM_MEM_ENABLE_CMMA:
+ /* enable CMMA only for z10 and later (EDAT_1) */
+ ret = -EINVAL;
+ if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
+ break;
+
ret = -EBUSY;
mutex_lock(&kvm->lock);
if (atomic_read(&kvm->online_vcpus) == 0) {
if (kvm_is_ucontrol(vcpu->kvm))
gmap_free(vcpu->arch.gmap);
- if (kvm_s390_cmma_enabled(vcpu->kvm))
+ if (vcpu->kvm->arch.use_cmma)
kvm_s390_vcpu_unsetup_cmma(vcpu);
free_page((unsigned long)(vcpu->arch.sie_block));
}
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
- if (kvm_s390_cmma_enabled(vcpu->kvm)) {
+ if (vcpu->kvm->arch.use_cmma) {
rc = kvm_s390_vcpu_setup_cmma(vcpu);
if (rc)
return rc;
return rc;
}
-bool kvm_s390_cmma_enabled(struct kvm *kvm)
-{
- if (!MACHINE_IS_LPAR)
- return false;
- /* only enable for z10 and later */
- if (!MACHINE_HAS_EDAT1)
- return false;
- if (!kvm->arch.use_cmma)
- return false;
- return true;
-}
-
static bool ibs_enabled(struct kvm_vcpu *vcpu)
{
return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
-/* is cmma enabled */
-bool kvm_s390_cmma_enabled(struct kvm *kvm);
unsigned long kvm_s390_fac_list_mask_size(void);
extern unsigned long kvm_s390_fac_list_mask[];
VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
gmap = vcpu->arch.gmap;
vcpu->stat.instruction_essa++;
- if (!kvm_s390_cmma_enabled(vcpu->kvm))
+ if (!vcpu->kvm->arch.use_cmma)
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)