struct kvm_s390_sie_block {
atomic_t cpuflags; /* 0x0000 */
- __u32 prefix; /* 0x0004 */
+ __u32 : 1; /* 0x0004 */
+ __u32 prefix : 18;
+ __u32 : 13;
__u8 reserved08[4]; /* 0x0008 */
#define PROG_IN_SIE (1<<0)
__u32 prog0c; /* 0x000c */
static int diag_release_pages(struct kvm_vcpu *vcpu)
{
unsigned long start, end;
- unsigned long prefix = vcpu->arch.sie_block->prefix;
+ unsigned long prefix = kvm_s390_get_prefix(vcpu);
start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
unsigned long gra)
{
- unsigned long prefix = vcpu->arch.sie_block->prefix;
+ unsigned long prefix = kvm_s390_get_prefix(vcpu);
if (gra < 2 * PAGE_SIZE)
gra += prefix;
unsigned long __gpa; \
\
__gpa = (unsigned long)(gra); \
- __gpa += __vcpu->arch.sie_block->prefix; \
+ __gpa += kvm_s390_get_prefix(__vcpu); \
kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x)); \
})
int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
unsigned long len)
{
- unsigned long gpa = gra + vcpu->arch.sie_block->prefix;
+ unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
return kvm_write_guest(vcpu->kvm, gpa, data, len);
}
int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
unsigned long len)
{
- unsigned long gpa = gra + vcpu->arch.sie_block->prefix;
+ unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
return kvm_read_guest(vcpu->kvm, gpa, data, len);
}
kvm_for_each_vcpu(i, vcpu, kvm) {
/* match against both prefix pages */
- if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
+ if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
exit_sie_sync(vcpu);
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
int rc;
rc = gmap_ipte_notify(vcpu->arch.gmap,
- vcpu->arch.sie_block->prefix,
+ kvm_s390_get_prefix(vcpu),
PAGE_SIZE * 2);
if (rc)
return rc;
kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
- kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
+ kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
if (vcpu->sigset_active)
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
{
unsigned char archmode = 1;
+ unsigned int px;
u64 clkcomp;
int rc;
vcpu->run->s.regs.gprs, 128);
rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
&vcpu->arch.sie_block->gpsw, 16);
+ px = kvm_s390_get_prefix(vcpu);
rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
- &vcpu->arch.sie_block->prefix, 4);
+ &px, 4);
rc |= write_guest_abs(vcpu,
gpa + offsetof(struct save_area, fp_ctrl_reg),
&vcpu->arch.guest_fpregs.fpc, 4);
#endif
}
+#define GUEST_PREFIX_SHIFT 13
+static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
+}
+
static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
{
- vcpu->arch.sie_block->prefix = prefix & 0x7fffe000u;
+ vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
vcpu->arch.sie_block->ihcpu = 0xffff;
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
}
if (operand2 & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- address = vcpu->arch.sie_block->prefix;
- address = address & 0x7fffe000u;
+ address = kvm_s390_get_prefix(vcpu);
/* get the value */
rc = write_guest(vcpu, operand2, &address, sizeof(address));