Commit | Line | Data |
---|---|---|
f5e706ad SR |
1 | #ifndef __SPARC64_SYSTEM_H |
2 | #define __SPARC64_SYSTEM_H | |
3 | ||
4 | #include <asm/ptrace.h> | |
5 | #include <asm/processor.h> | |
6 | #include <asm/visasm.h> | |
7 | ||
8 | #ifndef __ASSEMBLY__ | |
9 | ||
10 | #include <linux/irqflags.h> | |
11 | #include <asm-generic/cmpxchg-local.h> | |
12 | ||
13 | /* | |
14 | * Sparc (general) CPU types | |
15 | */ | |
16 | enum sparc_cpu { | |
17 | sun4 = 0x00, | |
18 | sun4c = 0x01, | |
19 | sun4m = 0x02, | |
20 | sun4d = 0x03, | |
21 | sun4e = 0x04, | |
22 | sun4u = 0x05, /* V8 ploos ploos */ | |
23 | sun_unknown = 0x06, | |
24 | ap1000 = 0x07, /* almost a sun4m */ | |
25 | }; | |
26 | ||
27 | #define sparc_cpu_model sun4u | |
28 | ||
5110bd21 AB |
29 | /* This cannot ever be a sun4c :) That's just history. */ |
30 | #define ARCH_SUN4C 0 | |
f5e706ad SR |
31 | |
32 | extern char reboot_command[]; | |
33 | ||
34 | /* These are here in an effort to more fully work around Spitfire Errata | |
35 | * #51. Essentially, if a memory barrier occurs soon after a mispredicted | |
36 | * branch, the chip can stop executing instructions until a trap occurs. | |
37 | * Therefore, if interrupts are disabled, the chip can hang forever. | |
38 | * | |
39 | * It used to be believed that the memory barrier had to be right in the | |
40 | * delay slot, but a case has been traced recently wherein the memory barrier | |
41 | * was one instruction after the branch delay slot and the chip still hung. | |
42 | * The offending sequence was the following in sym_wakeup_done() of the | |
43 | * sym53c8xx_2 driver: | |
44 | * | |
45 | * call sym_ccb_from_dsa, 0 | |
46 | * movge %icc, 0, %l0 | |
47 | * brz,pn %o0, .LL1303 | |
48 | * mov %o0, %l2 | |
49 | * membar #LoadLoad | |
50 | * | |
51 | * The branch has to be mispredicted for the bug to occur. Therefore, we put | |
52 | * the memory barrier explicitly into a "branch always, predicted taken" | |
53 | * delay slot to avoid the problem case. | |
54 | */ | |
55 | #define membar_safe(type) \ | |
56 | do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ | |
57 | " membar " type "\n" \ | |
58 | "1:\n" \ | |
59 | : : : "memory"); \ | |
60 | } while (0) | |
61 | ||
4eb0c00b DM |
62 | /* The kernel always executes in TSO memory model these days, |
63 | * and furthermore most sparc64 chips implement more stringent | |
64 | * memory ordering than required by the specifications. | |
65 | */ | |
293666b7 DM |
66 | #define mb() membar_safe("#StoreLoad") |
67 | #define rmb() __asm__ __volatile__("":::"memory") | |
68 | #define wmb() __asm__ __volatile__("":::"memory") | |
f5e706ad SR |
69 | |
70 | #endif | |
71 | ||
72 | #define nop() __asm__ __volatile__ ("nop") | |
73 | ||
74 | #define read_barrier_depends() do { } while(0) | |
75 | #define set_mb(__var, __value) \ | |
293666b7 | 76 | do { __var = __value; membar_safe("#StoreLoad"); } while(0) |
f5e706ad SR |
77 | |
78 | #ifdef CONFIG_SMP | |
79 | #define smp_mb() mb() | |
80 | #define smp_rmb() rmb() | |
81 | #define smp_wmb() wmb() | |
f5e706ad SR |
82 | #else |
83 | #define smp_mb() __asm__ __volatile__("":::"memory") | |
84 | #define smp_rmb() __asm__ __volatile__("":::"memory") | |
85 | #define smp_wmb() __asm__ __volatile__("":::"memory") | |
f5e706ad SR |
86 | #endif |
87 | ||
293666b7 DM |
88 | #define smp_read_barrier_depends() do { } while(0) |
89 | ||
f5e706ad SR |
90 | #define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory") |
91 | ||
92 | #define flushw_all() __asm__ __volatile__("flushw") | |
93 | ||
94 | /* Performance counter register access. */ | |
95 | #define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p)) | |
96 | #define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p)) | |
97 | #define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p)) | |
98 | ||
99 | /* Blackbird errata workaround. See commentary in | |
100 | * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt() | |
101 | * for more information. | |
102 | */ | |
456cad8e DM |
103 | #define write_pic(__p) \ |
104 | __asm__ __volatile__("ba,pt %%xcc, 99f\n\t" \ | |
019408f9 | 105 | " nop\n\t" \ |
f5e706ad | 106 | ".align 64\n" \ |
456cad8e DM |
107 | "99:wr %0, 0x0, %%pic\n\t" \ |
108 | "rd %%pic, %%g0" : : "r" (__p)) | |
109 | #define reset_pic() write_pic(0) | |
f5e706ad SR |
110 | |
111 | #ifndef __ASSEMBLY__ | |
112 | ||
113 | extern void sun_do_break(void); | |
114 | extern int stop_a_enabled; | |
17f04fbb | 115 | extern int scons_pwroff; |
f5e706ad SR |
116 | |
117 | extern void fault_in_user_windows(void); | |
118 | extern void synchronize_user_stack(void); | |
119 | ||
120 | extern void __flushw_user(void); | |
121 | #define flushw_user() __flushw_user() | |
122 | ||
123 | #define flush_user_windows flushw_user | |
124 | #define flush_register_windows flushw_all | |
125 | ||
126 | /* Don't hold the runqueue lock over context switch */ | |
127 | #define __ARCH_WANT_UNLOCKED_CTXSW | |
128 | #define prepare_arch_switch(next) \ | |
129 | do { \ | |
130 | flushw_all(); \ | |
131 | } while (0) | |
132 | ||
133 | /* See what happens when you design the chip correctly? | |
134 | * | |
135 | * We tell gcc we clobber all non-fixed-usage registers except | |
136 | * for l0/l1. It will use one for 'next' and the other to hold | |
137 | * the output value of 'last'. 'next' is not referenced again | |
138 | * past the invocation of switch_to in the scheduler, so we need | |
139 | * not preserve it's value. Hairy, but it lets us remove 2 loads | |
140 | * and 2 stores in this critical code path. -DaveM | |
141 | */ | |
142 | #define switch_to(prev, next, last) \ | |
c7d5a005 | 143 | do { flush_tlb_pending(); \ |
f5e706ad SR |
144 | save_and_clear_fpu(); \ |
145 | /* If you are tempted to conditionalize the following */ \ | |
146 | /* so that ASI is only written if it changes, think again. */ \ | |
147 | __asm__ __volatile__("wr %%g0, %0, %%asi" \ | |
148 | : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ | |
149 | trap_block[current_thread_info()->cpu].thread = \ | |
150 | task_thread_info(next); \ | |
151 | __asm__ __volatile__( \ | |
152 | "mov %%g4, %%g7\n\t" \ | |
153 | "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ | |
154 | "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ | |
155 | "rdpr %%wstate, %%o5\n\t" \ | |
156 | "stx %%o6, [%%g6 + %6]\n\t" \ | |
157 | "stb %%o5, [%%g6 + %5]\n\t" \ | |
158 | "rdpr %%cwp, %%o5\n\t" \ | |
159 | "stb %%o5, [%%g6 + %8]\n\t" \ | |
f9aad600 | 160 | "wrpr %%g0, 15, %%pil\n\t" \ |
f5e706ad SR |
161 | "mov %4, %%g6\n\t" \ |
162 | "ldub [%4 + %8], %%g1\n\t" \ | |
163 | "wrpr %%g1, %%cwp\n\t" \ | |
164 | "ldx [%%g6 + %6], %%o6\n\t" \ | |
165 | "ldub [%%g6 + %5], %%o5\n\t" \ | |
166 | "ldub [%%g6 + %7], %%o7\n\t" \ | |
167 | "wrpr %%o5, 0x0, %%wstate\n\t" \ | |
168 | "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ | |
169 | "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ | |
170 | "ldx [%%g6 + %9], %%g4\n\t" \ | |
f9aad600 | 171 | "wrpr %%g0, 14, %%pil\n\t" \ |
f5e706ad SR |
172 | "brz,pt %%o7, switch_to_pc\n\t" \ |
173 | " mov %%g7, %0\n\t" \ | |
174 | "sethi %%hi(ret_from_syscall), %%g1\n\t" \ | |
175 | "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \ | |
176 | " nop\n\t" \ | |
177 | ".globl switch_to_pc\n\t" \ | |
178 | "switch_to_pc:\n\t" \ | |
179 | : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \ | |
180 | "=r" (__local_per_cpu_offset) \ | |
181 | : "0" (task_thread_info(next)), \ | |
182 | "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \ | |
183 | "i" (TI_CWP), "i" (TI_TASK) \ | |
184 | : "cc", \ | |
185 | "g1", "g2", "g3", "g7", \ | |
186 | "l1", "l2", "l3", "l4", "l5", "l6", "l7", \ | |
187 | "i0", "i1", "i2", "i3", "i4", "i5", \ | |
188 | "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \ | |
f5e706ad SR |
189 | } while(0) |
190 | ||
191 | static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) | |
192 | { | |
193 | unsigned long tmp1, tmp2; | |
194 | ||
195 | __asm__ __volatile__( | |
f5e706ad SR |
196 | " mov %0, %1\n" |
197 | "1: lduw [%4], %2\n" | |
198 | " cas [%4], %2, %0\n" | |
199 | " cmp %2, %0\n" | |
200 | " bne,a,pn %%icc, 1b\n" | |
201 | " mov %1, %0\n" | |
f5e706ad SR |
202 | : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) |
203 | : "0" (val), "r" (m) | |
204 | : "cc", "memory"); | |
205 | return val; | |
206 | } | |
207 | ||
208 | static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val) | |
209 | { | |
210 | unsigned long tmp1, tmp2; | |
211 | ||
212 | __asm__ __volatile__( | |
f5e706ad SR |
213 | " mov %0, %1\n" |
214 | "1: ldx [%4], %2\n" | |
215 | " casx [%4], %2, %0\n" | |
216 | " cmp %2, %0\n" | |
217 | " bne,a,pn %%xcc, 1b\n" | |
218 | " mov %1, %0\n" | |
f5e706ad SR |
219 | : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) |
220 | : "0" (val), "r" (m) | |
221 | : "cc", "memory"); | |
222 | return val; | |
223 | } | |
224 | ||
225 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
226 | ||
227 | extern void __xchg_called_with_bad_pointer(void); | |
228 | ||
229 | static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, | |
230 | int size) | |
231 | { | |
232 | switch (size) { | |
233 | case 4: | |
234 | return xchg32(ptr, x); | |
235 | case 8: | |
236 | return xchg64(ptr, x); | |
6cb79b3f | 237 | } |
f5e706ad SR |
238 | __xchg_called_with_bad_pointer(); |
239 | return x; | |
240 | } | |
241 | ||
242 | extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); | |
243 | ||
244 | /* | |
245 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | |
246 | * store NEW in MEM. Return the initial value in MEM. Success is | |
247 | * indicated by comparing RETURN with OLD. | |
248 | */ | |
249 | ||
250 | #define __HAVE_ARCH_CMPXCHG 1 | |
251 | ||
252 | static inline unsigned long | |
253 | __cmpxchg_u32(volatile int *m, int old, int new) | |
254 | { | |
293666b7 | 255 | __asm__ __volatile__("cas [%2], %3, %0" |
f5e706ad SR |
256 | : "=&r" (new) |
257 | : "0" (new), "r" (m), "r" (old) | |
258 | : "memory"); | |
259 | ||
260 | return new; | |
261 | } | |
262 | ||
263 | static inline unsigned long | |
264 | __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) | |
265 | { | |
293666b7 | 266 | __asm__ __volatile__("casx [%2], %3, %0" |
f5e706ad SR |
267 | : "=&r" (new) |
268 | : "0" (new), "r" (m), "r" (old) | |
269 | : "memory"); | |
270 | ||
271 | return new; | |
272 | } | |
273 | ||
274 | /* This function doesn't exist, so you'll get a linker error | |
275 | if something tries to do an invalid cmpxchg(). */ | |
276 | extern void __cmpxchg_called_with_bad_pointer(void); | |
277 | ||
278 | static inline unsigned long | |
279 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |
280 | { | |
281 | switch (size) { | |
282 | case 4: | |
283 | return __cmpxchg_u32(ptr, old, new); | |
284 | case 8: | |
285 | return __cmpxchg_u64(ptr, old, new); | |
286 | } | |
287 | __cmpxchg_called_with_bad_pointer(); | |
288 | return old; | |
289 | } | |
290 | ||
291 | #define cmpxchg(ptr,o,n) \ | |
292 | ({ \ | |
293 | __typeof__(*(ptr)) _o_ = (o); \ | |
294 | __typeof__(*(ptr)) _n_ = (n); \ | |
295 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | |
296 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
297 | }) | |
298 | ||
299 | /* | |
300 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | |
301 | * them available. | |
302 | */ | |
303 | ||
304 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | |
305 | unsigned long old, | |
306 | unsigned long new, int size) | |
307 | { | |
308 | switch (size) { | |
309 | case 4: | |
310 | case 8: return __cmpxchg(ptr, old, new, size); | |
311 | default: | |
312 | return __cmpxchg_local_generic(ptr, old, new, size); | |
313 | } | |
314 | ||
315 | return old; | |
316 | } | |
317 | ||
318 | #define cmpxchg_local(ptr, o, n) \ | |
319 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | |
320 | (unsigned long)(n), sizeof(*(ptr)))) | |
321 | #define cmpxchg64_local(ptr, o, n) \ | |
322 | ({ \ | |
323 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | |
324 | cmpxchg_local((ptr), (o), (n)); \ | |
325 | }) | |
326 | ||
327 | #endif /* !(__ASSEMBLY__) */ | |
328 | ||
329 | #define arch_align_stack(x) (x) | |
330 | ||
331 | #endif /* !(__SPARC64_SYSTEM_H) */ |