int ipte_lock_held(struct kvm_vcpu *vcpu)
{
- union ipte_control *ic = kvm_s390_get_ipte_control(vcpu->kvm);
+ if (vcpu->arch.sie_block->eca & 1) {
+ int rc;
- if (vcpu->arch.sie_block->eca & 1)
- return ic->kh != 0;
+ read_lock(&vcpu->kvm->arch.sca_lock);
+ rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0;
+ read_unlock(&vcpu->kvm->arch.sca_lock);
+ return rc;
+ }
return vcpu->kvm->arch.ipte_lock_count != 0;
}
vcpu->kvm->arch.ipte_lock_count++;
if (vcpu->kvm->arch.ipte_lock_count > 1)
goto out;
+retry:
+ read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do {
old = READ_ONCE(*ic);
- while (old.k) {
+ if (old.k) {
+ read_unlock(&vcpu->kvm->arch.sca_lock);
cond_resched();
- old = READ_ONCE(*ic);
+ goto retry;
}
new = old;
new.k = 1;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
+ read_unlock(&vcpu->kvm->arch.sca_lock);
out:
mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
}
vcpu->kvm->arch.ipte_lock_count--;
if (vcpu->kvm->arch.ipte_lock_count)
goto out;
+ read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do {
old = READ_ONCE(*ic);
new = old;
new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
+ read_unlock(&vcpu->kvm->arch.sca_lock);
wake_up(&vcpu->kvm->arch.ipte_wq);
out:
mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
{
union ipte_control old, new, *ic;
+retry:
+ read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do {
old = READ_ONCE(*ic);
- while (old.kg) {
+ if (old.kg) {
+ read_unlock(&vcpu->kvm->arch.sca_lock);
cond_resched();
- old = READ_ONCE(*ic);
+ goto retry;
}
new = old;
new.k = 1;
new.kh++;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
+ read_unlock(&vcpu->kvm->arch.sca_lock);
}
static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
{
union ipte_control old, new, *ic;
+ read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do {
old = READ_ONCE(*ic);
if (!new.kh)
new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
+ read_unlock(&vcpu->kvm->arch.sca_lock);
if (!new.kh)
wake_up(&vcpu->kvm->arch.ipte_wq);
}
{
int c, scn;
+ read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl sigp_ctrl =
c = sigp_ctrl.c;
scn = sigp_ctrl.scn;
}
+ read_unlock(&vcpu->kvm->arch.sca_lock);
if (src_id)
*src_id = scn;
{
int expect, rc;
+ read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
expect = old_val.value;
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
}
+ read_unlock(&vcpu->kvm->arch.sca_lock);
if (rc != expect) {
/* another external call is pending */
int rc, expect;
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
+ read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
}
+ read_unlock(&vcpu->kvm->arch.sca_lock);
WARN_ON(rc != expect); /* cannot clear? */
}
static void sca_dispose(struct kvm *kvm)
{
if (kvm->arch.use_esca)
- BUG(); /* not implemented yet */
+ free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
else
free_page((unsigned long)(kvm->arch.sca));
kvm->arch.sca = NULL;
rc = -ENOMEM;
kvm->arch.use_esca = 0; /* start with basic SCA */
+ rwlock_init(&kvm->arch.sca_lock);
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
if (!kvm->arch.sca)
goto out_err;
static void sca_del_vcpu(struct kvm_vcpu *vcpu)
{
+ read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
if (sca->cpu[vcpu->vcpu_id].sda == (__u64) vcpu->arch.sie_block)
sca->cpu[vcpu->vcpu_id].sda = 0;
}
+ read_unlock(&vcpu->kvm->arch.sca_lock);
}
static void sca_add_vcpu(struct kvm_vcpu *vcpu, struct kvm *kvm,
unsigned int id)
{
+ read_lock(&kvm->arch.sca_lock);
if (kvm->arch.use_esca) {
struct esca_block *sca = kvm->arch.sca;
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
set_bit_inv(id, (unsigned long *) &sca->mcn);
}
+ read_unlock(&kvm->arch.sca_lock);
+}
+
+/* Basic SCA to Extended SCA data copy routines */
+static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
+{
+ d->sda = s->sda;
+ d->sigp_ctrl.c = s->sigp_ctrl.c;
+ d->sigp_ctrl.scn = s->sigp_ctrl.scn;
+}
+
+static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
+{
+ int i;
+
+ d->ipte_control = s->ipte_control;
+ d->mcn[0] = s->mcn;
+ for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
+ sca_copy_entry(&d->cpu[i], &s->cpu[i]);
+}
+
+static int sca_switch_to_extended(struct kvm *kvm)
+{
+ struct bsca_block *old_sca = kvm->arch.sca;
+ struct esca_block *new_sca;
+ struct kvm_vcpu *vcpu;
+ unsigned int vcpu_idx;
+ u32 scaol, scaoh;
+
+ new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
+ if (!new_sca)
+ return -ENOMEM;
+
+ scaoh = (u32)((u64)(new_sca) >> 32);
+ scaol = (u32)(u64)(new_sca) & ~0x3fU;
+
+ kvm_s390_vcpu_block_all(kvm);
+ write_lock(&kvm->arch.sca_lock);
+
+ sca_copy_b_to_e(new_sca, old_sca);
+
+ kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
+ vcpu->arch.sie_block->scaoh = scaoh;
+ vcpu->arch.sie_block->scaol = scaol;
+ vcpu->arch.sie_block->ecb2 |= 0x04U;
+ }
+ kvm->arch.sca = new_sca;
+ kvm->arch.use_esca = 1;
+
+ write_unlock(&kvm->arch.sca_lock);
+ kvm_s390_vcpu_unblock_all(kvm);
+
+ free_page((unsigned long)old_sca);
+
+ VM_EVENT(kvm, 2, "Switched to ESCA (%p -> %p)", old_sca, kvm->arch.sca);
+ return 0;
}
static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
{
- return id < KVM_MAX_VCPUS;
+ int rc;
+
+ if (id < KVM_S390_BSCA_CPU_SLOTS)
+ return true;
+ if (!sclp.has_esca)
+ return false;
+
+ mutex_lock(&kvm->lock);
+ rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
+ mutex_unlock(&kvm->lock);
+
+ return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
}
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)