Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _M68KNOMMU_SYSTEM_H |
2 | #define _M68KNOMMU_SYSTEM_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/linkage.h> |
5 | #include <asm/segment.h> | |
6 | #include <asm/entry.h> | |
7 | ||
8 | /* | |
9 | * switch_to(n) should switch tasks to task ptr, first checking that | |
10 | * ptr isn't the current task, in which case it does nothing. This | |
11 | * also clears the TS-flag if the task we switched to has used the | |
12 | * math co-processor latest. | |
13 | */ | |
14 | /* | |
15 | * switch_to() saves the extra registers, that are not saved | |
16 | * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and | |
17 | * a0-a1. Some of these are used by schedule() and its predecessors | |
18 | * and so we might get see unexpected behaviors when a task returns | |
19 | * with unexpected register values. | |
20 | * | |
21 | * syscall stores these registers itself and none of them are used | |
22 | * by syscall after the function in the syscall has been called. | |
23 | * | |
24 | * Beware that resume now expects *next to be in d1 and the offset of | |
25 | * tss to be in a1. This saves a few instructions as we no longer have | |
26 | * to push them onto the stack and read them back right after. | |
27 | * | |
28 | * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) | |
29 | * | |
30 | * Changed 96/09/19 by Andreas Schwab | |
31 | * pass prev in a0, next in a1, offset of tss in d1, and whether | |
32 | * the mm structures are shared in d2 (to avoid atc flushing). | |
33 | */ | |
34 | asmlinkage void resume(void); | |
35 | #define switch_to(prev,next,last) \ | |
36 | { \ | |
37 | void *_last; \ | |
38 | __asm__ __volatile__( \ | |
39 | "movel %1, %%a0\n\t" \ | |
40 | "movel %2, %%a1\n\t" \ | |
41 | "jbsr resume\n\t" \ | |
42 | "movel %%d1, %0\n\t" \ | |
43 | : "=d" (_last) \ | |
44 | : "d" (prev), "d" (next) \ | |
45 | : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \ | |
46 | (last) = _last; \ | |
47 | } | |
48 | ||
49 | #ifdef CONFIG_COLDFIRE | |
50 | #define local_irq_enable() __asm__ __volatile__ ( \ | |
51 | "move %/sr,%%d0\n\t" \ | |
52 | "andi.l #0xf8ff,%%d0\n\t" \ | |
53 | "move %%d0,%/sr\n" \ | |
54 | : /* no outputs */ \ | |
55 | : \ | |
56 | : "cc", "%d0", "memory") | |
57 | #define local_irq_disable() __asm__ __volatile__ ( \ | |
58 | "move %/sr,%%d0\n\t" \ | |
9c2aba48 | 59 | "ori.l #0x0700,%%d0\n\t" \ |
1da177e4 | 60 | "move %%d0,%/sr\n" \ |
9c2aba48 GU |
61 | : /* no outputs */ \ |
62 | : \ | |
63 | : "cc", "%d0", "memory") | |
64 | /* For spinlocks etc */ | |
65 | #define local_irq_save(x) __asm__ __volatile__ ( \ | |
66 | "movew %%sr,%0\n\t" \ | |
67 | "movew #0x0700,%%d0\n\t" \ | |
68 | "or.l %0,%%d0\n\t" \ | |
69 | "movew %%d0,%/sr" \ | |
70 | : "=d" (x) \ | |
1da177e4 LT |
71 | : \ |
72 | : "cc", "%d0", "memory") | |
73 | #else | |
74 | ||
75 | /* portable version */ /* FIXME - see entry.h*/ | |
76 | #define ALLOWINT 0xf8ff | |
77 | ||
78 | #define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory") | |
79 | #define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory") | |
80 | #endif | |
81 | ||
82 | #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory") | |
83 | #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory") | |
84 | ||
85 | /* For spinlocks etc */ | |
9c2aba48 | 86 | #ifndef local_irq_save |
1da177e4 | 87 | #define local_irq_save(x) do { local_save_flags(x); local_irq_disable(); } while (0) |
9c2aba48 | 88 | #endif |
1da177e4 LT |
89 | |
90 | #define irqs_disabled() \ | |
91 | ({ \ | |
92 | unsigned long flags; \ | |
93 | local_save_flags(flags); \ | |
94 | ((flags & 0x0700) == 0x0700); \ | |
95 | }) | |
96 | ||
97 | #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") | |
98 | ||
99 | /* | |
100 | * Force strict CPU ordering. | |
101 | * Not really required on m68k... | |
102 | */ | |
103 | #define nop() asm volatile ("nop"::) | |
104 | #define mb() asm volatile ("" : : :"memory") | |
105 | #define rmb() asm volatile ("" : : :"memory") | |
106 | #define wmb() asm volatile ("" : : :"memory") | |
b2fff3f1 | 107 | #define set_mb(var, value) do { xchg(&var, value); } while (0) |
1da177e4 LT |
108 | |
109 | #ifdef CONFIG_SMP | |
110 | #define smp_mb() mb() | |
111 | #define smp_rmb() rmb() | |
112 | #define smp_wmb() wmb() | |
113 | #define smp_read_barrier_depends() read_barrier_depends() | |
114 | #else | |
115 | #define smp_mb() barrier() | |
116 | #define smp_rmb() barrier() | |
117 | #define smp_wmb() barrier() | |
118 | #define smp_read_barrier_depends() do { } while(0) | |
119 | #endif | |
120 | ||
121 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
1da177e4 LT |
122 | |
123 | struct __xchg_dummy { unsigned long a[100]; }; | |
124 | #define __xg(x) ((volatile struct __xchg_dummy *)(x)) | |
125 | ||
126 | #ifndef CONFIG_RMW_INSNS | |
127 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | |
128 | { | |
129 | unsigned long tmp, flags; | |
130 | ||
131 | local_irq_save(flags); | |
132 | ||
133 | switch (size) { | |
134 | case 1: | |
135 | __asm__ __volatile__ | |
136 | ("moveb %2,%0\n\t" | |
137 | "moveb %1,%2" | |
138 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
139 | break; | |
140 | case 2: | |
141 | __asm__ __volatile__ | |
142 | ("movew %2,%0\n\t" | |
143 | "movew %1,%2" | |
144 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
145 | break; | |
146 | case 4: | |
147 | __asm__ __volatile__ | |
148 | ("movel %2,%0\n\t" | |
149 | "movel %1,%2" | |
150 | : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
151 | break; | |
152 | } | |
153 | local_irq_restore(flags); | |
154 | return tmp; | |
155 | } | |
156 | #else | |
157 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | |
158 | { | |
159 | switch (size) { | |
160 | case 1: | |
161 | __asm__ __volatile__ | |
162 | ("moveb %2,%0\n\t" | |
163 | "1:\n\t" | |
164 | "casb %0,%1,%2\n\t" | |
165 | "jne 1b" | |
166 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
167 | break; | |
168 | case 2: | |
169 | __asm__ __volatile__ | |
170 | ("movew %2,%0\n\t" | |
171 | "1:\n\t" | |
172 | "casw %0,%1,%2\n\t" | |
173 | "jne 1b" | |
174 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
175 | break; | |
176 | case 4: | |
177 | __asm__ __volatile__ | |
178 | ("movel %2,%0\n\t" | |
179 | "1:\n\t" | |
180 | "casl %0,%1,%2\n\t" | |
181 | "jne 1b" | |
182 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | |
183 | break; | |
184 | } | |
185 | return x; | |
186 | } | |
187 | #endif | |
188 | ||
189 | /* | |
190 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | |
191 | * store NEW in MEM. Return the initial value in MEM. Success is | |
192 | * indicated by comparing RETURN with OLD. | |
193 | */ | |
194 | #define __HAVE_ARCH_CMPXCHG 1 | |
195 | ||
196 | static __inline__ unsigned long | |
197 | cmpxchg(volatile int *p, int old, int new) | |
198 | { | |
199 | unsigned long flags; | |
200 | int prev; | |
201 | ||
202 | local_irq_save(flags); | |
203 | if ((prev = *p) == old) | |
204 | *p = new; | |
205 | local_irq_restore(flags); | |
206 | return(prev); | |
207 | } | |
208 | ||
209 | ||
210 | #ifdef CONFIG_M68332 | |
211 | #define HARD_RESET_NOW() ({ \ | |
212 | local_irq_disable(); \ | |
213 | asm(" \ | |
214 | movew #0x0000, 0xfffa6a; \ | |
215 | reset; \ | |
216 | /*movew #0x1557, 0xfffa44;*/ \ | |
217 | /*movew #0x0155, 0xfffa46;*/ \ | |
218 | moveal #0, %a0; \ | |
219 | movec %a0, %vbr; \ | |
220 | moveal 0, %sp; \ | |
221 | moveal 4, %a0; \ | |
222 | jmp (%a0); \ | |
223 | "); \ | |
224 | }) | |
225 | #endif | |
226 | ||
227 | #if defined( CONFIG_M68328 ) || defined( CONFIG_M68EZ328 ) || \ | |
228 | defined (CONFIG_M68360) || defined( CONFIG_M68VZ328 ) | |
229 | #define HARD_RESET_NOW() ({ \ | |
230 | local_irq_disable(); \ | |
231 | asm(" \ | |
232 | moveal #0x10c00000, %a0; \ | |
233 | moveb #0, 0xFFFFF300; \ | |
234 | moveal 0(%a0), %sp; \ | |
235 | moveal 4(%a0), %a0; \ | |
236 | jmp (%a0); \ | |
237 | "); \ | |
238 | }) | |
239 | #endif | |
240 | ||
241 | #ifdef CONFIG_COLDFIRE | |
242 | #if defined(CONFIG_M5272) && defined(CONFIG_NETtel) | |
243 | /* | |
9c2aba48 GU |
244 | * Need to account for broken early mask of 5272 silicon. So don't |
245 | * jump through the original start address. Jump strait into the | |
246 | * known start of the FLASH code. | |
1da177e4 LT |
247 | */ |
248 | #define HARD_RESET_NOW() ({ \ | |
249 | asm(" \ | |
250 | movew #0x2700, %sr; \ | |
251 | jmp 0xf0000400; \ | |
252 | "); \ | |
253 | }) | |
9c2aba48 | 254 | #elif defined(CONFIG_NETtel) || defined(CONFIG_eLIA) || \ |
fabc7f66 | 255 | defined(CONFIG_SECUREEDGEMP3) || defined(CONFIG_CLEOPATRA) |
1da177e4 LT |
256 | #define HARD_RESET_NOW() ({ \ |
257 | asm(" \ | |
258 | movew #0x2700, %sr; \ | |
259 | moveal #0x10000044, %a0; \ | |
260 | movel #0xffffffff, (%a0); \ | |
261 | moveal #0x10000001, %a0; \ | |
262 | moveb #0x00, (%a0); \ | |
263 | moveal #0xf0000004, %a0; \ | |
264 | moveal (%a0), %a0; \ | |
265 | jmp (%a0); \ | |
266 | "); \ | |
267 | }) | |
9c2aba48 GU |
268 | #elif defined(CONFIG_M5272) |
269 | /* | |
270 | * Retrieve the boot address in flash using CSBR0 and CSOR0 | |
271 | * find the reset vector at flash_address + 4 (e.g. 0x400) | |
272 | * remap it in the flash's current location (e.g. 0xf0000400) | |
273 | * and jump there. | |
274 | */ | |
275 | #define HARD_RESET_NOW() ({ \ | |
276 | asm(" \ | |
277 | movew #0x2700, %%sr; \ | |
278 | move.l %0+0x40,%%d0; \ | |
279 | and.l %0+0x44,%%d0; \ | |
280 | andi.l #0xfffff000,%%d0; \ | |
281 | mov.l %%d0,%%a0; \ | |
282 | or.l 4(%%a0),%%d0; \ | |
283 | mov.l %%d0,%%a0; \ | |
284 | jmp (%%a0);" \ | |
285 | : /* No output */ \ | |
286 | : "o" (*(char *)MCF_MBAR) ); \ | |
287 | }) | |
1da177e4 LT |
288 | #elif defined(CONFIG_M528x) |
289 | /* | |
290 | * The MCF528x has a bit (SOFTRST) in memory (Reset Control Register RCR), | |
291 | * that when set, resets the MCF528x. | |
292 | */ | |
293 | #define HARD_RESET_NOW() \ | |
294 | ({ \ | |
295 | unsigned char volatile *reset; \ | |
296 | asm("move.w #0x2700, %sr"); \ | |
020f9e16 | 297 | reset = ((volatile unsigned char *)(MCF_IPSBAR + 0x110000)); \ |
1da177e4 LT |
298 | while(1) \ |
299 | *reset |= (0x01 << 7);\ | |
300 | }) | |
9c2aba48 GU |
301 | #elif defined(CONFIG_M523x) |
302 | #define HARD_RESET_NOW() ({ \ | |
303 | asm(" \ | |
304 | movew #0x2700, %sr; \ | |
305 | movel #0x01000000, %sp; \ | |
306 | moveal #0x40110000, %a0; \ | |
307 | moveb #0x80, (%a0); \ | |
308 | "); \ | |
309 | }) | |
01824853 GU |
310 | #elif defined(CONFIG_M520x) |
311 | /* | |
312 | * The MCF5208 has a bit (SOFTRST) in memory (Reset Control Register | |
313 | * RCR), that when set, resets the MCF5208. | |
314 | */ | |
315 | #define HARD_RESET_NOW() \ | |
316 | ({ \ | |
317 | unsigned char volatile *reset; \ | |
318 | asm("move.w #0x2700, %sr"); \ | |
020f9e16 | 319 | reset = ((volatile unsigned char *)(MCF_IPSBAR + 0xA0000)); \ |
01824853 GU |
320 | while(1) \ |
321 | *reset |= 0x80; \ | |
322 | }) | |
1da177e4 LT |
323 | #else |
324 | #define HARD_RESET_NOW() ({ \ | |
325 | asm(" \ | |
326 | movew #0x2700, %sr; \ | |
327 | moveal #0x4, %a0; \ | |
328 | moveal (%a0), %a0; \ | |
329 | jmp (%a0); \ | |
330 | "); \ | |
331 | }) | |
332 | #endif | |
333 | #endif | |
334 | #define arch_align_stack(x) (x) | |
335 | ||
336 | #endif /* _M68KNOMMU_SYSTEM_H */ |