};
};
-void save_fpu_regs(struct fpu *fpu);
+void save_fpu_regs(void);
#define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX))
#define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX))
#define switch_to(prev,next,last) do { \
if (prev->mm) { \
- save_fpu_regs(&prev->thread.fpu); \
+ save_fpu_regs(); \
save_access_regs(&prev->thread.acrs[0]); \
save_ri_cb(prev->thread.ri_cb); \
} \
DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
BLANK();
DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
- DEFINE(__THREAD_fpu, offsetof(struct task_struct, thread.fpu));
+ DEFINE(__THREAD_FPU_fpc, offsetof(struct thread_struct, fpu.fpc));
+ DEFINE(__THREAD_FPU_flags, offsetof(struct thread_struct, fpu.flags));
+ DEFINE(__THREAD_FPU_regs, offsetof(struct thread_struct, fpu.regs));
DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
BLANK();
- DEFINE(__FPU_fpc, offsetof(struct fpu, fpc));
- DEFINE(__FPU_flags, offsetof(struct fpu, flags));
- DEFINE(__FPU_regs, offsetof(struct fpu, regs));
- BLANK();
DEFINE(__TI_task, offsetof(struct thread_info, task));
DEFINE(__TI_flags, offsetof(struct thread_info, flags));
DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
static void store_sigregs(void)
{
save_access_regs(current->thread.acrs);
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
}
/* Load registers after signal return */
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
goto badframe;
set_current_blocked(&set);
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
if (restore_sigregs32(regs, &frame->sregs))
goto badframe;
if (restore_sigregs_ext32(regs, &frame->sregs_ext))
set_current_blocked(&set);
if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe;
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
goto badframe;
if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
tm __LC_CPU_FLAGS+7,_CIF_FPU # load guest fp/vx registers ?
jno .Lsie_load_guest_gprs
- lg %r12,__LC_THREAD_INFO # load fp/vx regs save area
brasl %r14,load_fpu_regs # load guest fp/vx regs
.Lsie_load_guest_gprs:
lmg %r0,%r13,0(%r3) # load guest gprs 0-13
* of the register contents at system call or io return.
*/
ENTRY(save_fpu_regs)
+ lg %r2,__LC_CURRENT
+ aghi %r2,__TASK_thread
tm __LC_CPU_FLAGS+7,_CIF_FPU
bor %r14
- stfpc __FPU_fpc(%r2)
+ stfpc __THREAD_FPU_fpc(%r2)
.Lsave_fpu_regs_fpc_end:
- lg %r3,__FPU_regs(%r2)
+ lg %r3,__THREAD_FPU_regs(%r2)
ltgr %r3,%r3
jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU
- tm __FPU_flags+3(%r2),FPU_USE_VX
+ tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX
jz .Lsave_fpu_regs_fp # no -> store FP regs
.Lsave_fpu_regs_vx_low:
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
* FP/VX state, the vector-enablement control, CR0.46, is either set or cleared.
*
* There are special calling conventions to fit into sysc and io return work:
- * %r12: __LC_THREAD_INFO
* %r15: <kernel stack>
* The function requires:
* %r4 and __SF_EMPTY+32(%r15)
*/
load_fpu_regs:
+ lg %r4,__LC_CURRENT
+ aghi %r4,__TASK_thread
tm __LC_CPU_FLAGS+7,_CIF_FPU
bnor %r14
- lg %r4,__TI_task(%r12)
- la %r4,__THREAD_fpu(%r4)
- lfpc __FPU_fpc(%r4)
+ lfpc __THREAD_FPU_fpc(%r4)
stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0
- tm __FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
- lg %r4,__FPU_regs(%r4) # %r4 <- reg save area
+ tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
+ lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs
.Lload_fpu_regs_vx_ctl:
tm __SF_EMPTY+32+5(%r15),2 # test VX control
jhe 2f
clg %r9,BASED(.Lcleanup_save_fpu_fpc_end)
jhe 1f
+ lg %r2,__LC_CURRENT
0: # Store floating-point controls
- stfpc __FPU_fpc(%r2)
+ stfpc __THREAD_FPU_fpc(%r2)
1: # Load register save area and check if VX is active
- lg %r3,__FPU_regs(%r2)
+ lg %r3,__THREAD_FPU_regs(%r2)
ltgr %r3,%r3
jz 5f # no save area -> set CIF_FPU
- tm __FPU_flags+3(%r2),FPU_USE_VX
+ tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX
jz 4f # no VX -> store FP regs
2: # Store vector registers (V0-V15)
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
jhe 5f
clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl)
jhe 6f
- lg %r4,__TI_task(%r12)
- la %r4,__THREAD_fpu(%r4)
- lfpc __FPU_fpc(%r4)
- tm __FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
- lg %r4,__FPU_regs(%r4) # %r4 <- reg save area
+ lg %r4,__LC_CURRENT
+ lfpc __THREAD_FPU_fpc(%r4)
+ tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
+ lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
jz 3f # -> no VX, load FP regs
6: # Set VX-enablement control
stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0
* The CIF_FPU flag is set in any case to lazy clear or restore a saved
* state when switching to a different task or returning to user space.
*/
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
dst->thread.fpu.fpc = current->thread.fpu.fpc;
if (is_vx_task(current))
convert_vx_to_fp(dst->thread.fpu.fprs,
*/
int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
{
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
fpregs->fpc = current->thread.fpu.fpc;
fpregs->pad = 0;
if (is_vx_task(current))
_s390_fp_regs fp_regs;
if (target == current)
- save_fpu_regs(&target->thread.fpu);
+ save_fpu_regs();
fp_regs.fpc = target->thread.fpu.fpc;
fpregs_store(&fp_regs, &target->thread.fpu);
freg_t fprs[__NUM_FPRS];
if (target == current)
- save_fpu_regs(&target->thread.fpu);
+ save_fpu_regs();
/* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
return -ENODEV;
if (is_vx_task(target)) {
if (target == current)
- save_fpu_regs(&target->thread.fpu);
+ save_fpu_regs();
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
} else
if (rc)
return rc;
} else if (target == current)
- save_fpu_regs(&target->thread.fpu);
+ save_fpu_regs();
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
if (rc == 0)
return -ENODEV;
if (is_vx_task(target)) {
if (target == current)
- save_fpu_regs(&target->thread.fpu);
+ save_fpu_regs();
memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW,
sizeof(vxrs));
} else
if (rc)
return rc;
} else if (target == current)
- save_fpu_regs(&target->thread.fpu);
+ save_fpu_regs();
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
static void store_sigregs(void)
{
save_access_regs(current->thread.acrs);
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
}
/* Load registers after signal return */
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
goto badframe;
set_current_blocked(&set);
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
if (restore_sigregs(regs, &frame->sregs))
goto badframe;
if (restore_sigregs_ext(regs, &frame->sregs_ext))
set_current_blocked(&set);
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
if (restore_sigregs(regs, &frame->uc.uc_mcontext))
goto badframe;
if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
return -ENOMEM;
preempt_disable();
if (tsk == current)
- save_fpu_regs(&tsk->thread.fpu);
+ save_fpu_regs();
/* Copy the 16 floating point registers */
convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs);
fprs = tsk->thread.fpu.fprs;
}
/* get vector interrupt code from fpc */
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
vic = (current->thread.fpu.fpc & 0xf00) >> 8;
switch (vic) {
case 1: /* invalid vector operation */
location = get_trap_ip(regs);
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
/* Check for vector register enablement */
if (MACHINE_HAS_VX && !is_vx_task(current) &&
(current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) {
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
/* Save host register state */
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
save_fpu_to(&vcpu->arch.host_fpregs);
if (test_kvm_facility(vcpu->kvm, 129)) {
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap);
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
if (test_kvm_facility(vcpu->kvm, 129))
/*
return -EINVAL;
memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
vcpu->arch.guest_fpregs.fpc = fpu->fpc;
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
load_fpu_from(&vcpu->arch.guest_fpregs);
return 0;
}
* copying in vcpu load/put. Lets update our copies before we save
* it into the save area
*/
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
if (test_kvm_facility(vcpu->kvm, 129)) {
/*
* If the vector extension is available, the vector registers
*
* Let's update our copies before we save it into the save area.
*/
- save_fpu_regs(¤t->thread.fpu);
+ save_fpu_regs();
return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
}