2 * linux/arch/arm/vfp/vfpmodule.c
4 * Copyright (C) 2004 ARM Limited.
5 * Written by Deep Blue Solutions Limited.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/types.h>
12 #include <linux/cpu.h>
13 #include <linux/cpu_pm.h>
14 #include <linux/hardirq.h>
15 #include <linux/kernel.h>
16 #include <linux/notifier.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/uaccess.h>
22 #include <linux/user.h>
25 #include <asm/cputype.h>
26 #include <asm/system_info.h>
27 #include <asm/thread_notify.h>
34 * Our undef handlers (in entry.S)
36 void vfp_testing_entry(void);
37 void vfp_support_entry(void);
38 void vfp_null_entry(void);
40 void (*vfp_vector
)(void) = vfp_null_entry
;
44 * Used in startup: set to non-zero if VFP checks fail
45 * After startup, holds VFP architecture
47 unsigned int VFP_arch
;
50 * The pointer to the vfpstate structure of the thread which currently
51 * owns the context held in the VFP hardware, or NULL if the hardware
54 * For UP, this is sufficient to tell which thread owns the VFP context.
55 * However, for SMP, we also need to check the CPU number stored in the
56 * saved state too to catch migrations.
58 union vfp_state
*vfp_current_hw_state
[NR_CPUS
];
61 * Is 'thread's most up to date state stored in this CPUs hardware?
62 * Must be called from non-preemptible context.
64 static bool vfp_state_in_hw(unsigned int cpu
, struct thread_info
*thread
)
67 if (thread
->vfpstate
.hard
.cpu
!= cpu
)
70 return vfp_current_hw_state
[cpu
] == &thread
->vfpstate
;
74 * Force a reload of the VFP context from the thread structure. We do
75 * this by ensuring that access to the VFP hardware is disabled, and
76 * clear vfp_current_hw_state. Must be called from non-preemptible context.
78 static void vfp_force_reload(unsigned int cpu
, struct thread_info
*thread
)
80 if (vfp_state_in_hw(cpu
, thread
)) {
81 #ifndef CONFIG_VFP_OPT
82 fmxr(FPEXC
, fmrx(FPEXC
) & ~FPEXC_EN
);
84 vfp_current_hw_state
[cpu
] = NULL
;
87 thread
->vfpstate
.hard
.cpu
= NR_CPUS
;
92 * Per-thread VFP initialization.
94 static void vfp_thread_flush(struct thread_info
*thread
)
96 union vfp_state
*vfp
= &thread
->vfpstate
;
100 * Disable VFP to ensure we initialize it first. We must ensure
101 * that the modification of vfp_current_hw_state[] and hardware
102 * disable are done for the same CPU and without preemption.
104 * Do this first to ensure that preemption won't overwrite our
105 * state saving should access to the VFP be enabled at this point.
108 if (vfp_current_hw_state
[cpu
] == vfp
)
109 vfp_current_hw_state
[cpu
] = NULL
;
110 #ifndef CONFIG_VFP_OPT
111 fmxr(FPEXC
, fmrx(FPEXC
) & ~FPEXC_EN
);
115 memset(vfp
, 0, sizeof(union vfp_state
));
117 vfp
->hard
.fpexc
= FPEXC_EN
;
118 vfp
->hard
.fpscr
= FPSCR_ROUND_NEAREST
;
120 vfp
->hard
.cpu
= NR_CPUS
;
124 static void vfp_thread_exit(struct thread_info
*thread
)
126 /* release case: Per-thread VFP cleanup. */
127 union vfp_state
*vfp
= &thread
->vfpstate
;
128 unsigned int cpu
= get_cpu();
130 if (vfp_current_hw_state
[cpu
] == vfp
)
131 vfp_current_hw_state
[cpu
] = NULL
;
135 static void vfp_thread_copy(struct thread_info
*thread
)
137 struct thread_info
*parent
= current_thread_info();
139 vfp_sync_hwstate(parent
);
140 thread
->vfpstate
= parent
->vfpstate
;
142 thread
->vfpstate
.hard
.cpu
= NR_CPUS
;
147 * When this function is called with the following 'cmd's, the following
148 * is true while this function is being run:
149 * THREAD_NOFTIFY_SWTICH:
150 * - the previously running thread will not be scheduled onto another CPU.
151 * - the next thread to be run (v) will not be running on another CPU.
152 * - thread->cpu is the local CPU number
153 * - not preemptible as we're called in the middle of a thread switch
154 * THREAD_NOTIFY_FLUSH:
155 * - the thread (v) will be running on the local CPU, so
156 * v === current_thread_info()
157 * - thread->cpu is the local CPU number at the time it is accessed,
158 * but may change at any time.
159 * - we could be preempted if tree preempt rcu is enabled, so
160 * it is unsafe to use thread->cpu.
162 * - the thread (v) will be running on the local CPU, so
163 * v === current_thread_info()
164 * - thread->cpu is the local CPU number at the time it is accessed,
165 * but may change at any time.
166 * - we could be preempted if tree preempt rcu is enabled, so
167 * it is unsafe to use thread->cpu.
169 static int vfp_notifier(struct notifier_block
*self
, unsigned long cmd
, void *v
)
171 struct thread_info
*thread
= v
;
173 #ifndef CONFIG_VFP_OPT
179 case THREAD_NOTIFY_SWITCH
:
181 #ifndef CONFIG_VFP_OPT
186 * On SMP, if VFP is enabled, save the old state in
187 * case the thread migrates to a different CPU. The
188 * restoring is done lazily.
190 if ((fpexc
& FPEXC_EN
) && vfp_current_hw_state
[cpu
])
191 vfp_save_state(vfp_current_hw_state
[cpu
], fpexc
);
195 * Always disable VFP so we can lazily save/restore the
198 fmxr(FPEXC
, fpexc
& ~FPEXC_EN
);
202 case THREAD_NOTIFY_FLUSH
:
203 vfp_thread_flush(thread
);
206 case THREAD_NOTIFY_EXIT
:
207 vfp_thread_exit(thread
);
210 case THREAD_NOTIFY_COPY
:
211 vfp_thread_copy(thread
);
218 static struct notifier_block vfp_notifier_block
= {
219 .notifier_call
= vfp_notifier
,
223 * Raise a SIGFPE for the current process.
224 * sicode describes the signal being raised.
226 static void vfp_raise_sigfpe(unsigned int sicode
, struct pt_regs
*regs
)
230 memset(&info
, 0, sizeof(info
));
232 info
.si_signo
= SIGFPE
;
233 info
.si_code
= sicode
;
234 info
.si_addr
= (void __user
*)(instruction_pointer(regs
) - 4);
237 * This is the same as NWFPE, because it's not clear what
240 current
->thread
.error_code
= 0;
241 current
->thread
.trap_no
= 6;
243 send_sig_info(SIGFPE
, &info
, current
);
246 static void vfp_panic(char *reason
, u32 inst
)
250 pr_err("VFP: Error: %s\n", reason
);
251 pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
252 fmrx(FPEXC
), fmrx(FPSCR
), inst
);
253 for (i
= 0; i
< 32; i
+= 2)
254 pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
255 i
, vfp_get_float(i
), i
+1, vfp_get_float(i
+1));
259 * Process bitmask of exception conditions.
261 static void vfp_raise_exceptions(u32 exceptions
, u32 inst
, u32 fpscr
, struct pt_regs
*regs
)
265 pr_debug("VFP: raising exceptions %08x\n", exceptions
);
267 if (exceptions
== VFP_EXCEPTION_ERROR
) {
268 vfp_panic("unhandled bounce", inst
);
269 vfp_raise_sigfpe(0, regs
);
274 * If any of the status flags are set, update the FPSCR.
275 * Comparison instructions always return at least one of
278 if (exceptions
& (FPSCR_N
|FPSCR_Z
|FPSCR_C
|FPSCR_V
))
279 fpscr
&= ~(FPSCR_N
|FPSCR_Z
|FPSCR_C
|FPSCR_V
);
285 #define RAISE(stat,en,sig) \
286 if (exceptions & stat && fpscr & en) \
290 * These are arranged in priority order, least to highest.
292 RAISE(FPSCR_DZC
, FPSCR_DZE
, FPE_FLTDIV
);
293 RAISE(FPSCR_IXC
, FPSCR_IXE
, FPE_FLTRES
);
294 RAISE(FPSCR_UFC
, FPSCR_UFE
, FPE_FLTUND
);
295 RAISE(FPSCR_OFC
, FPSCR_OFE
, FPE_FLTOVF
);
296 RAISE(FPSCR_IOC
, FPSCR_IOE
, FPE_FLTINV
);
299 vfp_raise_sigfpe(si_code
, regs
);
303 * Emulate a VFP instruction.
305 static u32
vfp_emulate_instruction(u32 inst
, u32 fpscr
, struct pt_regs
*regs
)
307 u32 exceptions
= VFP_EXCEPTION_ERROR
;
309 pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst
, fpscr
);
311 if (INST_CPRTDO(inst
)) {
312 if (!INST_CPRT(inst
)) {
316 if (vfp_single(inst
)) {
317 exceptions
= vfp_single_cpdo(inst
, fpscr
);
319 exceptions
= vfp_double_cpdo(inst
, fpscr
);
323 * A CPRT instruction can not appear in FPINST2, nor
324 * can it cause an exception. Therefore, we do not
325 * have to emulate it.
330 * A CPDT instruction can not appear in FPINST2, nor can
331 * it cause an exception. Therefore, we do not have to
335 return exceptions
& ~VFP_NAN_FLAG
;
339 * Package up a bounce condition.
341 void VFP_bounce(u32 trigger
, u32 fpexc
, struct pt_regs
*regs
)
343 u32 fpscr
, orig_fpscr
, fpsid
, exceptions
;
345 pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger
, fpexc
);
348 * At this point, FPEXC can have the following configuration:
351 * 0 1 x - synchronous exception
352 * 1 x 0 - asynchronous exception
353 * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later
354 * 0 0 1 - synchronous on VFP9 (non-standard subarch 1
355 * implementation), undefined otherwise
357 * Clear various bits and enable access to the VFP so we can
360 fmxr(FPEXC
, fpexc
& ~(FPEXC_EX
|FPEXC_DEX
|FPEXC_FP2V
|FPEXC_VV
|FPEXC_TRAP_MASK
));
363 orig_fpscr
= fpscr
= fmrx(FPSCR
);
366 * Check for the special VFP subarch 1 and FPSCR.IXE bit case
368 if ((fpsid
& FPSID_ARCH_MASK
) == (1 << FPSID_ARCH_BIT
)
369 && (fpscr
& FPSCR_IXE
)) {
371 * Synchronous exception, emulate the trigger instruction
376 if (fpexc
& FPEXC_EX
) {
377 #ifndef CONFIG_CPU_FEROCEON
379 * Asynchronous exception. The instruction is read from FPINST
380 * and the interrupted instruction has to be restarted.
382 trigger
= fmrx(FPINST
);
385 } else if (!(fpexc
& FPEXC_DEX
)) {
387 * Illegal combination of bits. It can be caused by an
388 * unallocated VFP instruction but with FPSCR.IXE set and not
391 vfp_raise_exceptions(VFP_EXCEPTION_ERROR
, trigger
, fpscr
, regs
);
396 * Modify fpscr to indicate the number of iterations remaining.
397 * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
398 * whether FPEXC.VECITR or FPSCR.LEN is used.
400 if (fpexc
& (FPEXC_EX
| FPEXC_VV
)) {
403 len
= fpexc
+ (1 << FPEXC_LENGTH_BIT
);
405 fpscr
&= ~FPSCR_LENGTH_MASK
;
406 fpscr
|= (len
& FPEXC_LENGTH_MASK
) << (FPSCR_LENGTH_BIT
- FPEXC_LENGTH_BIT
);
410 * Handle the first FP instruction. We used to take note of the
411 * FPEXC bounce reason, but this appears to be unreliable.
412 * Emulate the bounced instruction instead.
414 exceptions
= vfp_emulate_instruction(trigger
, fpscr
, regs
);
416 vfp_raise_exceptions(exceptions
, trigger
, orig_fpscr
, regs
);
419 * If there isn't a second FP instruction, exit now. Note that
420 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
422 if ((fpexc
& (FPEXC_EX
| FPEXC_FP2V
)) != (FPEXC_EX
| FPEXC_FP2V
))
426 * The barrier() here prevents fpinst2 being read
427 * before the condition above.
430 trigger
= fmrx(FPINST2
);
433 exceptions
= vfp_emulate_instruction(trigger
, orig_fpscr
, regs
);
435 vfp_raise_exceptions(exceptions
, trigger
, orig_fpscr
, regs
);
440 static void vfp_enable(void *unused
)
444 BUG_ON(preemptible());
445 access
= get_copro_access();
448 * Enable full access to VFP (cp10 and cp11)
450 set_copro_access(access
| CPACC_FULL(10) | CPACC_FULL(11));
454 static int vfp_pm_suspend(void)
456 struct thread_info
*ti
= current_thread_info();
457 u32 fpexc
= fmrx(FPEXC
);
459 /* if vfp is on, then save state for resumption */
460 if (fpexc
& FPEXC_EN
) {
461 pr_debug("%s: saving vfp state\n", __func__
);
462 vfp_save_state(&ti
->vfpstate
, fpexc
);
464 #ifndef CONFIG_VFP_OPT
465 /* disable, just in case */
466 fmxr(FPEXC
, fmrx(FPEXC
) & ~FPEXC_EN
);
468 } else if (vfp_current_hw_state
[ti
->cpu
]) {
470 fmxr(FPEXC
, fpexc
| FPEXC_EN
);
471 vfp_save_state(vfp_current_hw_state
[ti
->cpu
], fpexc
);
472 #ifndef CONFIG_VFP_OPT
478 /* clear any information we had about last context state */
479 vfp_current_hw_state
[ti
->cpu
] = NULL
;
484 static void vfp_pm_resume(void)
486 #ifdef CONFIG_VFP_OPT
487 struct thread_info
*ti
= current_thread_info();
488 u32
*vfpstate
= (u32
*)(&ti
->vfpstate
);
490 u32 fpexc
= 0, fpscr
= 0, fpinst
= 0, fpinst2
= 0;
493 /* ensure we have access to the vfp */
496 #ifndef CONFIG_VFP_OPT
497 /* and disable it to ensure the next usage restores the state */
498 fmxr(FPEXC
, fmrx(FPEXC
) & ~FPEXC_EN
);
500 /* restore VFP registers and state */
502 "LDC p11, cr0, [%0],#32*4\n"
503 //"VFPFMRX \tmp, MVFR0\n"
504 "MRC p10, 7, %1, cr7, cr0, 0\n"
507 "ldceql p11, cr0, [%0],#32*4\n"
508 "addne %0, %0, #32*4\n"
509 "ldmia %0, {%2, %3, %4, %5}\n"
510 //"VFPFMXR FPSCR, %3\n"
511 "MCR p10, 7, %3, cr1, cr0, 0"
512 : "+r"(vfpstate
), "+r"(temp
), "+r"(fpexc
), "+r"(fpscr
), "+r"(fpinst
), "+r"(fpinst2
)
513 : "r" (MVFR0_A_SIMD_MASK
)
520 static int vfp_cpu_pm_notifier(struct notifier_block
*self
, unsigned long cmd
,
527 case CPU_PM_ENTER_FAILED
:
535 static struct notifier_block vfp_cpu_pm_notifier_block
= {
536 .notifier_call
= vfp_cpu_pm_notifier
,
539 static void vfp_pm_init(void)
541 cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block
);
545 static inline void vfp_pm_init(void) { }
546 #endif /* CONFIG_CPU_PM */
549 * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
550 * with the hardware state.
552 void vfp_sync_hwstate(struct thread_info
*thread
)
554 unsigned int cpu
= get_cpu();
556 if (vfp_state_in_hw(cpu
, thread
)) {
557 u32 fpexc
= fmrx(FPEXC
);
560 * Save the last VFP state on this CPU.
562 fmxr(FPEXC
, fpexc
| FPEXC_EN
);
563 vfp_save_state(&thread
->vfpstate
, fpexc
| FPEXC_EN
);
564 #ifndef CONFIG_VFP_OPT
572 /* Ensure that the thread reloads the hardware VFP state on the next use. */
573 void vfp_flush_hwstate(struct thread_info
*thread
)
575 unsigned int cpu
= get_cpu();
577 vfp_force_reload(cpu
, thread
);
583 * Save the current VFP state into the provided structures and prepare
584 * for entry into a new function (signal handler).
586 int vfp_preserve_user_clear_hwstate(struct user_vfp __user
*ufp
,
587 struct user_vfp_exc __user
*ufp_exc
)
589 struct thread_info
*thread
= current_thread_info();
590 struct vfp_hard_struct
*hwstate
= &thread
->vfpstate
.hard
;
593 /* Ensure that the saved hwstate is up-to-date. */
594 vfp_sync_hwstate(thread
);
597 * Copy the floating point registers. There can be unused
598 * registers see asm/hwcap.h for details.
600 err
|= __copy_to_user(&ufp
->fpregs
, &hwstate
->fpregs
,
601 sizeof(hwstate
->fpregs
));
603 * Copy the status and control register.
605 __put_user_error(hwstate
->fpscr
, &ufp
->fpscr
, err
);
608 * Copy the exception registers.
610 __put_user_error(hwstate
->fpexc
, &ufp_exc
->fpexc
, err
);
611 __put_user_error(hwstate
->fpinst
, &ufp_exc
->fpinst
, err
);
612 __put_user_error(hwstate
->fpinst2
, &ufp_exc
->fpinst2
, err
);
617 /* Ensure that VFP is disabled. */
618 vfp_flush_hwstate(thread
);
621 * As per the PCS, clear the length and stride bits for function
624 hwstate
->fpscr
&= ~(FPSCR_LENGTH_MASK
| FPSCR_STRIDE_MASK
);
628 /* Sanitise and restore the current VFP state from the provided structures. */
629 int vfp_restore_user_hwstate(struct user_vfp __user
*ufp
,
630 struct user_vfp_exc __user
*ufp_exc
)
632 struct thread_info
*thread
= current_thread_info();
633 struct vfp_hard_struct
*hwstate
= &thread
->vfpstate
.hard
;
637 /* Disable VFP to avoid corrupting the new thread state. */
638 vfp_flush_hwstate(thread
);
641 * Copy the floating point registers. There can be unused
642 * registers see asm/hwcap.h for details.
644 err
|= __copy_from_user(&hwstate
->fpregs
, &ufp
->fpregs
,
645 sizeof(hwstate
->fpregs
));
647 * Copy the status and control register.
649 __get_user_error(hwstate
->fpscr
, &ufp
->fpscr
, err
);
652 * Sanitise and restore the exception registers.
654 __get_user_error(fpexc
, &ufp_exc
->fpexc
, err
);
656 /* Ensure the VFP is enabled. */
659 /* Ensure FPINST2 is invalid and the exception flag is cleared. */
660 fpexc
&= ~(FPEXC_EX
| FPEXC_FP2V
);
661 hwstate
->fpexc
= fpexc
;
663 __get_user_error(hwstate
->fpinst
, &ufp_exc
->fpinst
, err
);
664 __get_user_error(hwstate
->fpinst2
, &ufp_exc
->fpinst2
, err
);
666 return err
? -EFAULT
: 0;
670 * VFP hardware can lose all context when a CPU goes offline.
671 * As we will be running in SMP mode with CPU hotplug, we will save the
672 * hardware state at every thread switch. We clear our held state when
673 * a CPU has been killed, indicating that the VFP hardware doesn't contain
674 * a threads VFP state. When a CPU starts up, we re-enable access to the
677 * Both CPU_DYING and CPU_STARTING are called on the CPU which
678 * is being offlined/onlined.
680 static int vfp_hotplug(struct notifier_block
*b
, unsigned long action
,
683 if (action
== CPU_DYING
|| action
== CPU_DYING_FROZEN
) {
684 vfp_force_reload((long)hcpu
, current_thread_info());
685 } else if (action
== CPU_STARTING
|| action
== CPU_STARTING_FROZEN
)
691 * VFP support code initialisation.
693 static int __init
vfp_init(void)
696 unsigned int cpu_arch
= cpu_architecture();
698 if (cpu_arch
>= CPU_ARCH_ARMv6
)
699 on_each_cpu(vfp_enable
, NULL
, 1);
702 * First check that there is a VFP that we can use.
703 * The handler is already setup to just log calls, so
704 * we just need to read the VFPSID register.
706 vfp_vector
= vfp_testing_entry
;
708 vfpsid
= fmrx(FPSID
);
710 vfp_vector
= vfp_null_entry
;
712 pr_info("VFP support v0.3: ");
714 pr_cont("not present\n");
715 else if (vfpsid
& FPSID_NODOUBLE
) {
716 pr_cont("no double precision support\n");
718 hotcpu_notifier(vfp_hotplug
, 0);
720 VFP_arch
= (vfpsid
& FPSID_ARCH_MASK
) >> FPSID_ARCH_BIT
; /* Extract the architecture version */
721 pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
722 (vfpsid
& FPSID_IMPLEMENTER_MASK
) >> FPSID_IMPLEMENTER_BIT
,
723 (vfpsid
& FPSID_ARCH_MASK
) >> FPSID_ARCH_BIT
,
724 (vfpsid
& FPSID_PART_MASK
) >> FPSID_PART_BIT
,
725 (vfpsid
& FPSID_VARIANT_MASK
) >> FPSID_VARIANT_BIT
,
726 (vfpsid
& FPSID_REV_MASK
) >> FPSID_REV_BIT
);
728 vfp_vector
= vfp_support_entry
;
730 thread_register_notifier(&vfp_notifier_block
);
734 * We detected VFP, and the support code is
735 * in place; report VFP support to userspace.
737 elf_hwcap
|= HWCAP_VFP
;
740 elf_hwcap
|= HWCAP_VFPv3
;
743 * Check for VFPv3 D16 and VFPv4 D16. CPUs in
744 * this configuration only have 16 x 64bit
747 if (((fmrx(MVFR0
) & MVFR0_A_SIMD_MASK
)) == 1)
748 elf_hwcap
|= HWCAP_VFPv3D16
; /* also v4-D16 */
750 elf_hwcap
|= HWCAP_VFPD32
;
754 * Check for the presence of the Advanced SIMD
755 * load/store instructions, integer and single
756 * precision floating point operations. Only check
757 * for NEON if the hardware has the MVFR registers.
759 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
761 if ((fmrx(MVFR1
) & 0x000fff00) == 0x00011100)
762 elf_hwcap
|= HWCAP_NEON
;
765 if ((fmrx(MVFR1
) & 0xf0000000) == 0x10000000)
766 elf_hwcap
|= HWCAP_VFPv4
;
773 late_initcall(vfp_init
);