#include <asm/fpu/api.h>
#include <asm/isc.h>
-#define KVM_MAX_VCPUS 64
+#define KVM_S390_BSCA_CPU_SLOTS 64
+#define KVM_S390_ESCA_CPU_SLOTS 248
+#define KVM_MAX_VCPUS KVM_S390_BSCA_CPU_SLOTS
#define KVM_USER_MEM_SLOTS 32
/*
#define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f
-struct sca_entry {
+union bsca_sigp_ctrl {
+ __u8 value;
+ struct {
+ __u8 c : 1;
+ __u8 r : 1;
+ __u8 scn : 6;
+ };
+} __packed;
+
+union esca_sigp_ctrl {
+ __u16 value;
+ struct {
+ __u8 c : 1;
+ __u8 reserved: 7;
+ __u8 scn;
+ };
+} __packed;
+
+struct esca_entry {
+ union esca_sigp_ctrl sigp_ctrl;
+ __u16 reserved1[3];
+ __u64 sda;
+ __u64 reserved2[6];
+} __packed;
+
+struct bsca_entry {
__u8 reserved0;
- __u8 sigp_ctrl;
+ union bsca_sigp_ctrl sigp_ctrl;
__u16 reserved[3];
__u64 sda;
__u64 reserved2[2];
};
};
-struct sca_block {
+struct bsca_block {
union ipte_control ipte_control;
__u64 reserved[5];
__u64 mcn;
__u64 reserved2;
- struct sca_entry cpu[64];
+ struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
} __attribute__((packed));
+struct esca_block {
+ union ipte_control ipte_control;
+ __u64 reserved1[7];
+ __u64 mcn[4];
+ __u64 reserved2[20];
+ struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
+} __packed;
+
#define CPUSTAT_STOPPED 0x80000000
#define CPUSTAT_WAIT 0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000
};
struct kvm_arch{
- struct sca_block *sca;
+ struct bsca_block *sca;
debug_info_t *dbf;
struct kvm_s390_float_interrupt float_int;
struct kvm_device *flic;
/* handle external calls via sigp interpretation facility */
static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
{
- struct sca_block *sca = vcpu->kvm->arch.sca;
- uint8_t sigp_ctrl = sca->cpu[vcpu->vcpu_id].sigp_ctrl;
+ struct bsca_block *sca = vcpu->kvm->arch.sca;
+ union bsca_sigp_ctrl sigp_ctrl = sca->cpu[vcpu->vcpu_id].sigp_ctrl;
if (src_id)
- *src_id = sigp_ctrl & SIGP_CTRL_SCN_MASK;
+ *src_id = sigp_ctrl.scn;
- return sigp_ctrl & SIGP_CTRL_C &&
+ return sigp_ctrl.c &&
atomic_read(&vcpu->arch.sie_block->cpuflags) &
CPUSTAT_ECALL_PEND;
}
static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
{
- struct sca_block *sca = vcpu->kvm->arch.sca;
- uint8_t *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
- uint8_t new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
- uint8_t old_val = *sigp_ctrl & ~SIGP_CTRL_C;
+ int expect, rc;
+ struct bsca_block *sca = vcpu->kvm->arch.sca;
+ union bsca_sigp_ctrl *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
+ union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
- if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
+ new_val.scn = src_id;
+ new_val.c = 1;
+ old_val.c = 0;
+
+ expect = old_val.value;
+ rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
+
+ if (rc != expect) {
/* another external call is pending */
return -EBUSY;
}
static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
{
- struct sca_block *sca = vcpu->kvm->arch.sca;
+ struct bsca_block *sca = vcpu->kvm->arch.sca;
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- uint8_t *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
+ union bsca_sigp_ctrl *sigp_ctrl = &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
- *sigp_ctrl = 0;
+ sigp_ctrl->value = 0;
}
int psw_extint_disabled(struct kvm_vcpu *vcpu)
rc = -ENOMEM;
- kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
+ kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
if (!kvm->arch.sca)
goto out_err;
spin_lock(&kvm_lock);
sca_offset += 16;
- if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE)
+ if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
sca_offset = 0;
- kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
+ kvm->arch.sca = (struct bsca_block *)
+ ((char *) kvm->arch.sca + sca_offset);
spin_unlock(&kvm_lock);
sprintf(debug_name, "kvm-%u", current->pid);
trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
kvm_s390_clear_local_irqs(vcpu);
kvm_clear_async_pf_completion_queue(vcpu);
- if (!kvm_is_ucontrol(vcpu->kvm)) {
+ if (!kvm_is_ucontrol(vcpu->kvm))
sca_del_vcpu(vcpu);
- }
smp_mb();
if (kvm_is_ucontrol(vcpu->kvm))
static void sca_del_vcpu(struct kvm_vcpu *vcpu)
{
- struct sca_block *sca = vcpu->kvm->arch.sca;
+ struct bsca_block *sca = vcpu->kvm->arch.sca;
clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
if (sca->cpu[vcpu->vcpu_id].sda == (__u64) vcpu->arch.sie_block)
static void sca_add_vcpu(struct kvm_vcpu *vcpu, struct kvm *kvm,
unsigned int id)
{
- struct sca_block *sca = kvm->arch.sca;
+ struct bsca_block *sca = kvm->arch.sca;
if (!sca->cpu[id].sda)
sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
/* support for Basic/Extended SCA handling */
static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
{
- return &kvm->arch.sca->ipte_control;
+ struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */
+
+ return &sca->ipte_control;
}
#endif