sh: Split out system.h in to _32 and _64 variants.
authorPaul Mundt <lethal@linux-sh.org>
Sat, 10 Nov 2007 10:46:31 +0000 (19:46 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Mon, 28 Jan 2008 04:18:42 +0000 (13:18 +0900)
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
include/asm-sh/system.h
include/asm-sh/system_32.h [new file with mode: 0644]
include/asm-sh/system_64.h [new file with mode: 0644]
include/asm-sh64/system.h [deleted file]

index 288abeb5476fcf112449ac3f8d203379972a06de..0cfa96aa58443cae6e4878a02f472612ba6add21 100644 (file)
 #include <asm/types.h>
 #include <asm/ptrace.h>
 
-struct task_struct *__switch_to(struct task_struct *prev,
-                               struct task_struct *next);
-
 #define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
-/*
- *     switch_to() should switch tasks to task nr n, first
- */
-
-#define switch_to(prev, next, last) do {                               \
- struct task_struct *__last;                                           \
- register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp;      \
- register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc;      \
- register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
- register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \
- register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp;      \
- register unsigned long __ts7 __asm__ ("r7") = next->thread.pc;                \
- __asm__ __volatile__ (".balign 4\n\t"                                         \
-                      "stc.l   gbr, @-r15\n\t"                         \
-                      "sts.l   pr, @-r15\n\t"                          \
-                      "mov.l   r8, @-r15\n\t"                          \
-                      "mov.l   r9, @-r15\n\t"                          \
-                      "mov.l   r10, @-r15\n\t"                         \
-                      "mov.l   r11, @-r15\n\t"                         \
-                      "mov.l   r12, @-r15\n\t"                         \
-                      "mov.l   r13, @-r15\n\t"                         \
-                      "mov.l   r14, @-r15\n\t"                         \
-                      "mov.l   r15, @r1        ! save SP\n\t"          \
-                      "mov.l   @r6, r15        ! change to new stack\n\t" \
-                      "mova    1f, %0\n\t"                             \
-                      "mov.l   %0, @r2         ! save PC\n\t"          \
-                      "mov.l   2f, %0\n\t"                             \
-                      "jmp     @%0             ! call __switch_to\n\t" \
-                      " lds    r7, pr          !  with return to new PC\n\t" \
-                      ".balign 4\n"                                    \
-                      "2:\n\t"                                         \
-                      ".long   __switch_to\n"                          \
-                      "1:\n\t"                                         \
-                      "mov.l   @r15+, r14\n\t"                         \
-                      "mov.l   @r15+, r13\n\t"                         \
-                      "mov.l   @r15+, r12\n\t"                         \
-                      "mov.l   @r15+, r11\n\t"                         \
-                      "mov.l   @r15+, r10\n\t"                         \
-                      "mov.l   @r15+, r9\n\t"                          \
-                      "mov.l   @r15+, r8\n\t"                          \
-                      "lds.l   @r15+, pr\n\t"                          \
-                      "ldc.l   @r15+, gbr\n\t"                         \
-                      : "=z" (__last)                                  \
-                      : "r" (__ts1), "r" (__ts2), "r" (__ts4),         \
-                        "r" (__ts5), "r" (__ts6), "r" (__ts7)          \
-                      : "r3", "t");                                    \
-       last = __last;                                                  \
-} while (0)
 
-#ifdef CONFIG_CPU_SH4A
+#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
 #define __icbi()                       \
 {                                      \
        unsigned long __addr;           \
@@ -91,7 +40,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
  * Historically we have only done this type of barrier for the MMUCR, but
  * it's also necessary for the CCR, so we make it generic here instead.
  */
-#ifdef CONFIG_CPU_SH4A
+#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
 #define mb()           __asm__ __volatile__ ("synco": : :"memory")
 #define rmb()          mb()
 #define wmb()          __asm__ __volatile__ ("synco": : :"memory")
@@ -119,42 +68,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
 
 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
 
-/*
- * Jump to P2 area.
- * When handling TLB or caches, we need to do it from P2 area.
- */
-#define jump_to_P2()                   \
-do {                                   \
-       unsigned long __dummy;          \
-       __asm__ __volatile__(           \
-               "mov.l  1f, %0\n\t"     \
-               "or     %1, %0\n\t"     \
-               "jmp    @%0\n\t"        \
-               " nop\n\t"              \
-               ".balign 4\n"           \
-               "1:     .long 2f\n"     \
-               "2:"                    \
-               : "=&r" (__dummy)       \
-               : "r" (0x20000000));    \
-} while (0)
-
-/*
- * Back to P1 area.
- */
-#define back_to_P1()                                   \
-do {                                                   \
-       unsigned long __dummy;                          \
-       ctrl_barrier();                                 \
-       __asm__ __volatile__(                           \
-               "mov.l  1f, %0\n\t"                     \
-               "jmp    @%0\n\t"                        \
-               " nop\n\t"                              \
-               ".balign 4\n"                           \
-               "1:     .long 2f\n"                     \
-               "2:"                                    \
-               : "=&r" (__dummy));                     \
-} while (0)
-
 static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
 {
        unsigned long flags, retval;
@@ -281,4 +194,10 @@ asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5,
 
 #define arch_align_stack(x) (x)
 
+#ifdef CONFIG_SUPERH32
+# include "system_32.h"
+#else
+# include "system_64.h"
+#endif
+
 #endif
diff --git a/include/asm-sh/system_32.h b/include/asm-sh/system_32.h
new file mode 100644 (file)
index 0000000..ad37e8d
--- /dev/null
@@ -0,0 +1,97 @@
+#ifndef __ASM_SH_SYSTEM_32_H
+#define __ASM_SH_SYSTEM_32_H
+
+#include <linux/types.h>
+
+struct task_struct *__switch_to(struct task_struct *prev,
+                               struct task_struct *next);
+
+/*
+ *     switch_to() should switch tasks to task nr n, first
+ */
+#define switch_to(prev, next, last)                            \
+do {                                                           \
+       register u32 *__ts1 __asm__ ("r1") = &prev->thread.sp;  \
+       register u32 *__ts2 __asm__ ("r2") = &prev->thread.pc;  \
+       register u32 *__ts4 __asm__ ("r4") = (u32 *)prev;       \
+       register u32 *__ts5 __asm__ ("r5") = (u32 *)next;       \
+       register u32 *__ts6 __asm__ ("r6") = &next->thread.sp;  \
+       register u32 __ts7 __asm__ ("r7") = next->thread.pc;    \
+       struct task_struct *__last;                             \
+                                                               \
+       __asm__ __volatile__ (                                  \
+               ".balign 4\n\t"                                 \
+               "stc.l  gbr, @-r15\n\t"                         \
+               "sts.l  pr, @-r15\n\t"                          \
+               "mov.l  r8, @-r15\n\t"                          \
+               "mov.l  r9, @-r15\n\t"                          \
+               "mov.l  r10, @-r15\n\t"                         \
+               "mov.l  r11, @-r15\n\t"                         \
+               "mov.l  r12, @-r15\n\t"                         \
+               "mov.l  r13, @-r15\n\t"                         \
+               "mov.l  r14, @-r15\n\t"                         \
+               "mov.l  r15, @r1\t! save SP\n\t"                \
+               "mov.l  @r6, r15\t! change to new stack\n\t"    \
+               "mova   1f, %0\n\t"                             \
+               "mov.l  %0, @r2\t! save PC\n\t"                 \
+               "mov.l  2f, %0\n\t"                             \
+               "jmp    @%0\t! call __switch_to\n\t"            \
+               " lds   r7, pr\t!  with return to new PC\n\t"   \
+               ".balign        4\n"                            \
+               "2:\n\t"                                        \
+               ".long  __switch_to\n"                          \
+               "1:\n\t"                                        \
+               "mov.l  @r15+, r14\n\t"                         \
+               "mov.l  @r15+, r13\n\t"                         \
+               "mov.l  @r15+, r12\n\t"                         \
+               "mov.l  @r15+, r11\n\t"                         \
+               "mov.l  @r15+, r10\n\t"                         \
+               "mov.l  @r15+, r9\n\t"                          \
+               "mov.l  @r15+, r8\n\t"                          \
+               "lds.l  @r15+, pr\n\t"                          \
+               "ldc.l  @r15+, gbr\n\t"                         \
+               : "=z" (__last)                                 \
+               : "r" (__ts1), "r" (__ts2), "r" (__ts4),        \
+                 "r" (__ts5), "r" (__ts6), "r" (__ts7)         \
+               : "r3", "t");                                   \
+                                                               \
+       last = __last;                                          \
+} while (0)
+
+/*
+ * Jump to P2 area.
+ * When handling TLB or caches, we need to do it from P2 area.
+ */
+#define jump_to_P2()                   \
+do {                                   \
+       unsigned long __dummy;          \
+       __asm__ __volatile__(           \
+               "mov.l  1f, %0\n\t"     \
+               "or     %1, %0\n\t"     \
+               "jmp    @%0\n\t"        \
+               " nop\n\t"              \
+               ".balign 4\n"           \
+               "1:     .long 2f\n"     \
+               "2:"                    \
+               : "=&r" (__dummy)       \
+               : "r" (0x20000000));    \
+} while (0)
+
+/*
+ * Back to P1 area.
+ */
+#define back_to_P1()                                   \
+do {                                                   \
+       unsigned long __dummy;                          \
+       ctrl_barrier();                                 \
+       __asm__ __volatile__(                           \
+               "mov.l  1f, %0\n\t"                     \
+               "jmp    @%0\n\t"                        \
+               " nop\n\t"                              \
+               ".balign 4\n"                           \
+               "1:     .long 2f\n"                     \
+               "2:"                                    \
+               : "=&r" (__dummy));                     \
+} while (0)
+
+#endif /* __ASM_SH_SYSTEM_32_H */
diff --git a/include/asm-sh/system_64.h b/include/asm-sh/system_64.h
new file mode 100644 (file)
index 0000000..0e466e9
--- /dev/null
@@ -0,0 +1,39 @@
+#ifndef __ASM_SH_SYSTEM_64_H
+#define __ASM_SH_SYSTEM_64_H
+
+/*
+ * include/asm-sh/system_64.h
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003  Paul Mundt
+ * Copyright (C) 2004  Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <asm/processor.h>
+
+/*
+ *     switch_to() should switch tasks to task nr n, first
+ */
+struct task_struct *sh64_switch_to(struct task_struct *prev,
+                                  struct thread_struct *prev_thread,
+                                  struct task_struct *next,
+                                  struct thread_struct *next_thread);
+
+#define switch_to(prev,next,last)                              \
+do {                                                           \
+       if (last_task_used_math != next) {                      \
+               struct pt_regs *regs = next->thread.uregs;      \
+               if (regs) regs->sr |= SR_FD;                    \
+       }                                                       \
+       last = sh64_switch_to(prev, &prev->thread, next,        \
+                             &next->thread);                   \
+} while (0)
+
+/* No segmentation.. */
+#define jump_to_P2()   do { } while (0)
+#define back_to_P1()   do { } while (0)
+
+#endif /* __ASM_SH_SYSTEM_64_H */
diff --git a/include/asm-sh64/system.h b/include/asm-sh64/system.h
deleted file mode 100644 (file)
index be2a15f..0000000
+++ /dev/null
@@ -1,190 +0,0 @@
-#ifndef __ASM_SH64_SYSTEM_H
-#define __ASM_SH64_SYSTEM_H
-
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * include/asm-sh64/system.h
- *
- * Copyright (C) 2000, 2001  Paolo Alberelli
- * Copyright (C) 2003  Paul Mundt
- * Copyright (C) 2004  Richard Curnow
- *
- */
-
-#include <asm/registers.h>
-#include <asm/processor.h>
-
-/*
- *     switch_to() should switch tasks to task nr n, first
- */
-
-typedef struct {
-       unsigned long seg;
-} mm_segment_t;
-
-extern struct task_struct *sh64_switch_to(struct task_struct *prev,
-                                         struct thread_struct *prev_thread,
-                                         struct task_struct *next,
-                                         struct thread_struct *next_thread);
-
-#define switch_to(prev,next,last) \
-       do {\
-               if (last_task_used_math != next) {\
-                       struct pt_regs *regs = next->thread.uregs;\
-                       if (regs) regs->sr |= SR_FD;\
-               }\
-               last = sh64_switch_to(prev, &prev->thread, next, &next->thread);\
-       } while(0)
-
-#define nop() __asm__ __volatile__ ("nop")
-
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-extern void __xchg_called_with_bad_pointer(void);
-
-#define mb()   __asm__ __volatile__ ("synco": : :"memory")
-#define rmb()  mb()
-#define wmb()  __asm__ __volatile__ ("synco": : :"memory")
-#define read_barrier_depends() do { } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb()       mb()
-#define smp_rmb()      rmb()
-#define smp_wmb()      wmb()
-#define smp_read_barrier_depends()     read_barrier_depends()
-#else
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-#define smp_read_barrier_depends()     do { } while (0)
-#endif /* CONFIG_SMP */
-
-#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
-
-/* Interrupt Control */
-#ifndef HARD_CLI
-#define SR_MASK_L 0x000000f0L
-#define SR_MASK_LL 0x00000000000000f0LL
-#else
-#define SR_MASK_L 0x10000000L
-#define SR_MASK_LL 0x0000000010000000LL
-#endif
-
-static __inline__ void local_irq_enable(void)
-{
-       /* cli/sti based on SR.BL */
-       unsigned long long __dummy0, __dummy1=~SR_MASK_LL;
-
-       __asm__ __volatile__("getcon    " __SR ", %0\n\t"
-                            "and       %0, %1, %0\n\t"
-                            "putcon    %0, " __SR "\n\t"
-                            : "=&r" (__dummy0)
-                            : "r" (__dummy1));
-}
-
-static __inline__ void local_irq_disable(void)
-{
-       /* cli/sti based on SR.BL */
-       unsigned long long __dummy0, __dummy1=SR_MASK_LL;
-       __asm__ __volatile__("getcon    " __SR ", %0\n\t"
-                            "or        %0, %1, %0\n\t"
-                            "putcon    %0, " __SR "\n\t"
-                            : "=&r" (__dummy0)
-                            : "r" (__dummy1));
-}
-
-#define local_save_flags(x)                                            \
-(__extension__ ({      unsigned long long __dummy=SR_MASK_LL;          \
-       __asm__ __volatile__(                                           \
-               "getcon " __SR ", %0\n\t"                               \
-               "and    %0, %1, %0"                                     \
-               : "=&r" (x)                                             \
-               : "r" (__dummy));}))
-
-#define local_irq_save(x)                                              \
-(__extension__ ({      unsigned long long __d2=SR_MASK_LL, __d1;       \
-       __asm__ __volatile__(                                           \
-               "getcon " __SR ", %1\n\t"                               \
-               "or     %1, r63, %0\n\t"                                \
-               "or     %1, %2, %1\n\t"                                 \
-               "putcon %1, " __SR "\n\t"                               \
-               "and    %0, %2, %0"                                     \
-               : "=&r" (x), "=&r" (__d1)                               \
-               : "r" (__d2));}));
-
-#define local_irq_restore(x) do {                                      \
-       if ( ((x) & SR_MASK_L) == 0 )           /* dropping to 0 ? */   \
-               local_irq_enable();             /* yes...re-enable */   \
-} while (0)
-
-#define irqs_disabled()                        \
-({                                     \
-       unsigned long flags;            \
-       local_save_flags(flags);        \
-       (flags != 0);                   \
-})
-
-static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
-{
-       unsigned long flags, retval;
-
-       local_irq_save(flags);
-       retval = *m;
-       *m = val;
-       local_irq_restore(flags);
-       return retval;
-}
-
-static inline unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
-{
-       unsigned long flags, retval;
-
-       local_irq_save(flags);
-       retval = *m;
-       *m = val & 0xff;
-       local_irq_restore(flags);
-       return retval;
-}
-
-static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
-       switch (size) {
-       case 4:
-               return xchg_u32(ptr, x);
-               break;
-       case 1:
-               return xchg_u8(ptr, x);
-               break;
-       }
-       __xchg_called_with_bad_pointer();
-       return x;
-}
-
-/* XXX
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-void disable_hlt(void);
-void enable_hlt(void);
-
-
-#define smp_mb()        barrier()
-#define smp_rmb()       barrier()
-#define smp_wmb()       barrier()
-
-#ifdef CONFIG_SH_ALPHANUMERIC
-/* This is only used for debugging. */
-extern void print_seg(char *file,int line);
-#define PLS() print_seg(__FILE__,__LINE__)
-#else  /* CONFIG_SH_ALPHANUMERIC */
-#define PLS()
-#endif /* CONFIG_SH_ALPHANUMERIC */
-
-#define PL() printk("@ <%s,%s:%d>\n",__FILE__,__FUNCTION__,__LINE__)
-
-#define arch_align_stack(x) (x)
-
-#endif /* __ASM_SH64_SYSTEM_H */