{
#if defined(CONFIG_M523x) || defined(CONFIG_M527x)
__asm__ __volatile__ (
- "movel #0x81400100, %%d0\n\t"
+ "movel #0x81400110, %%d0\n\t"
"movec %%d0, %%CACR\n\t"
"nop\n\t"
: : : "d0" );
#endif /* CONFIG_M5249 */
#ifdef CONFIG_M532x
__asm__ __volatile__ (
- "movel #0x81000200, %%d0\n\t"
+ "movel #0x81000210, %%d0\n\t"
"movec %%d0, %%CACR\n\t"
"nop\n\t"
: : : "d0" );
*/
#ifdef CONFIG_COLDFIRE
+#ifdef CONFIG_COLDFIRE_SW_A7
/*
- * This is made a little more tricky on the ColdFire. There is no
- * separate kernel and user stack pointers. Need to artificially
+ * This is made a little more tricky on older ColdFires. There is no
+ * separate supervisor and user stack pointers. Need to artificially
* construct a usp in software... When doing this we need to disable
- * interrupts, otherwise bad things could happen.
+ * interrupts, otherwise bad things will happen.
*/
+.globl sw_usp
+.globl sw_ksp
+
.macro SAVE_ALL
move #0x2700,%sr /* disable intrs */
btst #5,%sp@(2) /* from user? */
7:
.endm
-.macro RESTORE_ALL
- btst #5,%sp@(PT_SR) /* going user? */
- bnes 8f /* no, skip */
+.macro RESTORE_USER
move #0x2700,%sr /* disable intrs */
movel sw_usp,%a0 /* get usp */
movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */
subql #8,sw_usp /* set exception */
movel sw_usp,%sp /* restore usp */
rte
- 8:
- moveml %sp@,%d1-%d5/%a0-%a2
- lea %sp@(32),%sp /* space for 8 regs */
- movel %sp@+,%d0
- addql #4,%sp /* orig d0 */
- addl %sp@+,%sp /* stkadj */
- rte
.endm
+.macro RDUSP
+ movel sw_usp,%a2
+.endm
+
+.macro WRUSP
+ movel %a0,sw_usp
+.endm
+
+#else /* !CONFIG_COLDFIRE_SW_A7 */
/*
- * Quick exception save, use current stack only.
+ * Modern ColdFire parts have separate supervisor and user stack
+ * pointers. Simple load and restore macros for this case.
*/
-.macro SAVE_LOCAL
+.macro SAVE_ALL
move #0x2700,%sr /* disable intrs */
clrl %sp@- /* stkadj */
movel %d0,%sp@- /* orig d0 */
moveml %d1-%d5/%a0-%a2,%sp@
.endm
-.macro RESTORE_LOCAL
+.macro RESTORE_USER
moveml %sp@,%d1-%d5/%a0-%a2
lea %sp@(32),%sp /* space for 8 regs */
movel %sp@+,%d0
rte
.endm
+.macro RDUSP
+ /*move %usp,%a2*/
+ .word 0x4e6a
+.endm
+
+.macro WRUSP
+ /*move %a0,%usp*/
+ .word 0x4e60
+.endm
+
+#endif /* !CONFIG_COLDFIRE_SW_A7 */
+
.macro SAVE_SWITCH_STACK
lea %sp@(-24),%sp /* 6 regs */
moveml %a3-%a6/%d6-%d7,%sp@
lea %sp@(24),%sp /* 6 regs */
.endm
-/*
- * Software copy of the user and kernel stack pointers... Ugh...
- * Need these to get around ColdFire not having separate kernel
- * and user stack pointers.
- */
-.globl sw_usp
-.globl sw_ksp
-
#else /* !CONFIG_COLDFIRE */
/*
moveml %sp@+,%a3-%a6/%d6-%d7
.endm
-#endif /* !CONFIG_COLDFIRE */
+#endif /* !COLDFIRE_SW_A7 */
#endif /* __ASSEMBLY__ */
#endif /* __M68KNOMMU_ENTRY_H */
#define CACR_IHLCK 0x00000800 /* Intruction cache half lock */
#define CACR_IDCM 0x00000400 /* Intruction cache inhibit */
#define CACR_ICINVA 0x00000100 /* Invalidate instr cache */
+#define CACR_EUSP 0x00000020 /* Enable separate user a7 */
#define ACR_BASE_POS 24 /* Address Base */
#define ACR_MASK_POS 16 /* Address Mask */
/* Enable data store buffer */
/* outside ACRs : No cache, precise */
/* Enable instruction+branch caches */
+#if defined(CONFIG_M5407)
#define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC)
+#else
+#define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC+CACR_EUSP)
+#endif
#define DATA_CACHE_MODE (ACR_ENABLE+ACR_ANY+ACR_CM_WT)
movec %d0,%ACR0
movel #0x00000000,%d0 /* no other regions cached */
movec %d0,%ACR1
- movel #0x80400100,%d0 /* configure cache */
+ movel #0x80400110,%d0 /* configure cache */
movec %d0,%CACR /* enable cache */
nop
.endm
movec %d0,%ACR0
movel #0x00000000,%d0 /* no other regions cached */
movec %d0,%ACR1
- movel #0x80000200,%d0 /* setup cache mask */
+ movel #0x80000210,%d0 /* setup cache mask */
movec %d0,%CACR /* enable cache */
nop
.endm
movec %d0,%ACR0
move.l #0x00000000,%d0 /* no other regions cached */
movec %d0,%ACR1
- move.l #0x80400000,%d0 /* enable 8K instruction cache */
+ move.l #0x80400010,%d0 /* enable 8K instruction cache */
movec %d0,%CACR
nop
.endm
static inline unsigned long rdusp(void)
{
-#ifdef CONFIG_COLDFIRE
+#ifdef CONFIG_COLDFIRE_SW_A7
extern unsigned int sw_usp;
return sw_usp;
#else
- unsigned long usp;
- __asm__ __volatile__("move %/usp,%0" : "=a" (usp));
+ register unsigned long usp __asm__("a0");
+ /* move %usp,%a0 */
+ __asm__ __volatile__(".word 0x4e68" : "=a" (usp));
return usp;
#endif
}
static inline void wrusp(unsigned long usp)
{
-#ifdef CONFIG_COLDFIRE
+#ifdef CONFIG_COLDFIRE_SW_A7
extern unsigned int sw_usp;
sw_usp = usp;
#else
- __asm__ __volatile__("move %0,%/usp" : : "a" (usp));
+ register unsigned long a0 __asm__("a0") = usp;
+ /* move %a0,%usp */
+ __asm__ __volatile__(".word 0x4e60" : : "a" (a0) );
#endif
}
config NO_IOPORT
def_bool y
+config COLDFIRE_SW_A7
+ bool
+ default n
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
config M5206
bool "MCF5206"
+ select COLDFIRE_SW_A7
help
Motorola ColdFire 5206 processor support.
config M5206e
bool "MCF5206e"
+ select COLDFIRE_SW_A7
help
Motorola ColdFire 5206e processor support.
config M5249
bool "MCF5249"
+ select COLDFIRE_SW_A7
help
Motorola ColdFire 5249 processor support.
config M5272
bool "MCF5272"
+ select COLDFIRE_SW_A7
help
Motorola ColdFire 5272 processor support.
config M5307
bool "MCF5307"
+ select COLDFIRE_SW_A7
help
Motorola ColdFire 5307 processor support.
config M5407
bool "MCF5407"
+ select COLDFIRE_SW_A7
help
Motorola ColdFire 5407 processor support.
#include <asm/asm-offsets.h>
#include <asm/entry.h>
+#ifdef CONFIG_COLDFIRE_SW_A7
+/*
+ * Define software copies of the supervisor and user stack pointers.
+ */
.bss
-
sw_ksp:
.long 0
-
sw_usp:
.long 0
+#endif /* CONFIG_COLDFIRE_SW_A7 */
.text
.globl ret_from_signal
.globl sys_call_table
.globl inthandler
+.globl fasthandler
enosys:
mov.l #sys_ni_syscall,%d3
jne Lwork_to_do /* still work to do */
Lreturn:
- move #0x2700,%sr /* disable intrs */
- movel sw_usp,%a0 /* get usp */
- movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */
- movel %sp@(PT_OFF_FORMATVEC),%a0@- /* copy exception format/vector/sr */
- moveml %sp@,%d1-%d5/%a0-%a2
- lea %sp@(32),%sp /* space for 8 regs */
- movel %sp@+,%d0
- addql #4,%sp /* orig d0 */
- addl %sp@+,%sp /* stk adj */
- addql #8,%sp /* remove exception */
- movel %sp,sw_ksp /* save ksp */
- subql #8,sw_usp /* set exception */
- movel sw_usp,%sp /* restore usp */
- rte
+ RESTORE_USER
Lwork_to_do:
movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
*/
ENTRY(resume)
movel %a0, %d1 /* get prev thread in d1 */
-
- movel sw_usp,%d0 /* save usp */
- movel %d0,%a0@(TASK_THREAD+THREAD_USP)
+ RDUSP
+ movel %a2,%a0@(TASK_THREAD+THREAD_USP)
SAVE_SWITCH_STACK
movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
RESTORE_SWITCH_STACK
movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore thread user stack */
- movel %a0, sw_usp
+ WRUSP
rts