[ARM] 4111/1: Allow VFP to work with thread migration on SMP
authorCatalin Marinas <catalin.marinas@arm.com>
Wed, 24 Jan 2007 17:47:08 +0000 (18:47 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 25 Jan 2007 16:35:29 +0000 (16:35 +0000)
The current lazy saving of the VFP registers is no longer possible
with thread migration on SMP. This patch implements a per-CPU
vfp-state pointer and the saving of the VFP registers at every context
switch. The registers restoring is still performed in a lazy way.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/vfp/entry.S
arch/arm/vfp/vfp.h
arch/arm/vfp/vfphw.S
arch/arm/vfp/vfpmodule.c
include/asm-arm/fpstate.h

index 7b595547c1c80c9a0333a58a75cc00283d3c8fb9..ca2a5ad19ea6a65ce9ee1c871a5b06fdcc9fab86 100644 (file)
@@ -25,6 +25,7 @@
 do_vfp:
        enable_irq
        ldr     r4, .LCvfp
+       ldr     r11, [r10, #TI_CPU]     @ CPU number
        add     r10, r10, #TI_VFPSTATE  @ r10 = workspace
        ldr     pc, [r4]                @ call VFP entry point
 
index f2797896e6d5dbcc488f5c9b0be9f1ce497d7843..54a2ad6d9ca25a314514b6e99543c392a37ddde9 100644 (file)
@@ -370,3 +370,7 @@ struct op {
        u32 (* const fn)(int dd, int dn, int dm, u32 fpscr);
        u32 flags;
 };
+
+#ifdef CONFIG_SMP
+extern void vfp_save_state(void *location, u32 fpexc);
+#endif
index e51e6679c402b39b461e16a2a0de65ca3326e789..d4b7b229631d5b3fe9db079a3aed284f656beadf 100644 (file)
@@ -65,6 +65,7 @@
 @  r2  = faulted PC+4
 @  r9  = successful return
 @  r10 = vfp_state union
+@  r11 = CPU number
 @  lr  = failure return
 
        .globl  vfp_support_entry
@@ -79,7 +80,7 @@ vfp_support_entry:
        DBGSTR1 "enable %x", r10
        ldr     r3, last_VFP_context_address
        orr     r1, r1, #FPEXC_ENABLE   @ user FPEXC has the enable bit set
-       ldr     r4, [r3]                @ last_VFP_context pointer
+       ldr     r4, [r3, r11, lsl #2]   @ last_VFP_context pointer
        bic     r5, r1, #FPEXC_EXCEPTION @ make sure exceptions are disabled
        cmp     r4, r10
        beq     check_for_exception     @ we are returning to the same
@@ -91,7 +92,9 @@ vfp_support_entry:
                                        @ exceptions, so we can get at the
                                        @ rest of it
 
+#ifndef CONFIG_SMP
        @ Save out the current registers to the old thread state
+       @ No need for SMP since this is not done lazily
 
        DBGSTR1 "save old state %p", r4
        cmp     r4, #0
@@ -105,10 +108,11 @@ vfp_support_entry:
        stmia   r4, {r1, r5, r6, r8}    @ save FPEXC, FPSCR, FPINST, FPINST2
                                        @ and point r4 at the word at the
                                        @ start of the register dump
+#endif
 
 no_old_VFP_process:
        DBGSTR1 "load state %p", r10
-       str     r10, [r3]               @ update the last_VFP_context pointer
+       str     r10, [r3, r11, lsl #2]  @ update the last_VFP_context pointer
                                        @ Load the saved state back into the VFP
        VFPFLDMIA r10                   @ reload the working registers while
                                        @ FPEXC is in a safe state
@@ -162,6 +166,24 @@ process_exception:
                                        @ required. If not, the user code will
                                        @ retry the faulted instruction
 
+#ifdef CONFIG_SMP
+       .globl  vfp_save_state
+       .type   vfp_save_state, %function
+vfp_save_state:
+       @ Save the current VFP state
+       @ r0 - save location
+       @ r1 - FPEXC
+       DBGSTR1 "save VFP state %p", r0
+       VFPFMRX r2, FPSCR               @ current status
+       VFPFMRX r3, FPINST              @ FPINST (always there, rev0 onwards)
+       tst     r1, #FPEXC_FPV2         @ is there an FPINST2 to read?
+       VFPFMRX r12, FPINST2, NE        @ FPINST2 if needed - avoids reading
+                                       @ nonexistant reg on rev0
+       VFPFSTMIA r0                    @ save the working registers
+       stmia   r0, {r1, r2, r3, r12}   @ save FPEXC, FPSCR, FPINST, FPINST2
+       mov     pc, lr
+#endif
+
 last_VFP_context_address:
        .word   last_VFP_context
 
index 490d9d18a7d1c166dbef16e988050ec870e406a8..f1e5951dc72188ce86c9ae880ccbef79621ae327 100644 (file)
@@ -28,7 +28,7 @@ void vfp_testing_entry(void);
 void vfp_support_entry(void);
 
 void (*vfp_vector)(void) = vfp_testing_entry;
-union vfp_state *last_VFP_context;
+union vfp_state *last_VFP_context[NR_CPUS];
 
 /*
  * Dual-use variable.
@@ -41,13 +41,35 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
 {
        struct thread_info *thread = v;
        union vfp_state *vfp;
+       __u32 cpu = thread->cpu;
 
        if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
+               u32 fpexc = fmrx(FPEXC);
+
+#ifdef CONFIG_SMP
+               /*
+                * On SMP, if VFP is enabled, save the old state in
+                * case the thread migrates to a different CPU. The
+                * restoring is done lazily.
+                */
+               if ((fpexc & FPEXC_ENABLE) && last_VFP_context[cpu]) {
+                       vfp_save_state(last_VFP_context[cpu], fpexc);
+                       last_VFP_context[cpu]->hard.cpu = cpu;
+               }
+               /*
+                * Thread migration, just force the reloading of the
+                * state on the new CPU in case the VFP registers
+                * contain stale data.
+                */
+               if (thread->vfpstate.hard.cpu != cpu)
+                       last_VFP_context[cpu] = NULL;
+#endif
+
                /*
                 * Always disable VFP so we can lazily save/restore the
                 * old state.
                 */
-               fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE);
+               fmxr(FPEXC, fpexc & ~FPEXC_ENABLE);
                return NOTIFY_DONE;
        }
 
@@ -68,8 +90,8 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
        }
 
        /* flush and release case: Per-thread VFP cleanup. */
-       if (last_VFP_context == vfp)
-               last_VFP_context = NULL;
+       if (last_VFP_context[cpu] == vfp)
+               last_VFP_context[cpu] = NULL;
 
        return NOTIFY_DONE;
 }
index 6af4e6bd1290c261dcba28096949fe6262145c8a..f31cda5a55eeb04d7031c227ec7b1071c77338fd 100644 (file)
@@ -35,6 +35,9 @@ struct vfp_hard_struct {
         */
        __u32 fpinst;
        __u32 fpinst2;
+#ifdef CONFIG_SMP
+       __u32 cpu;
+#endif
 };
 
 union vfp_state {