Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
3 | */ | |
bbeb3f4c SR |
4 | #ifndef _ASM_POWERPC_SYSTEM_H |
5 | #define _ASM_POWERPC_SYSTEM_H | |
14cf11af | 6 | |
14cf11af PM |
7 | #include <linux/kernel.h> |
8 | ||
9 | #include <asm/hw_irq.h> | |
40ef8cbc | 10 | #include <asm/atomic.h> |
14cf11af PM |
11 | |
12 | /* | |
13 | * Memory barrier. | |
14 | * The sync instruction guarantees that all memory accesses initiated | |
15 | * by this processor have been performed (with respect to all other | |
16 | * mechanisms that access memory). The eieio instruction is a barrier | |
17 | * providing an ordering (separately) for (a) cacheable stores and (b) | |
18 | * loads and stores to non-cacheable memory (e.g. I/O devices). | |
19 | * | |
20 | * mb() prevents loads and stores being reordered across this point. | |
21 | * rmb() prevents loads being reordered across this point. | |
22 | * wmb() prevents stores being reordered across this point. | |
23 | * read_barrier_depends() prevents data-dependent loads being reordered | |
24 | * across this point (nop on PPC). | |
25 | * | |
26 | * We have to use the sync instructions for mb(), since lwsync doesn't | |
27 | * order loads with respect to previous stores. Lwsync is fine for | |
28 | * rmb(), though. Note that lwsync is interpreted as sync by | |
29 | * 32-bit and older 64-bit CPUs. | |
30 | * | |
31 | * For wmb(), we use sync since wmb is used in drivers to order | |
32 | * stores to system memory with respect to writes to the device. | |
33 | * However, smp_wmb() can be a lighter-weight eieio barrier on | |
34 | * SMP since it is only used to order updates to system memory. | |
35 | */ | |
36 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") | |
37 | #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") | |
38 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") | |
39 | #define read_barrier_depends() do { } while(0) | |
40 | ||
41 | #define set_mb(var, value) do { var = value; mb(); } while (0) | |
14cf11af | 42 | |
88ced031 | 43 | #ifdef __KERNEL__ |
14cf11af PM |
44 | #ifdef CONFIG_SMP |
45 | #define smp_mb() mb() | |
46 | #define smp_rmb() rmb() | |
47 | #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") | |
48 | #define smp_read_barrier_depends() read_barrier_depends() | |
49 | #else | |
50 | #define smp_mb() barrier() | |
51 | #define smp_rmb() barrier() | |
52 | #define smp_wmb() barrier() | |
53 | #define smp_read_barrier_depends() do { } while(0) | |
54 | #endif /* CONFIG_SMP */ | |
55 | ||
5db9fa95 NL |
56 | /* |
57 | * This is a barrier which prevents following instructions from being | |
58 | * started until the value of the argument x is known. For example, if | |
59 | * x is a variable loaded from memory, this prevents following | |
60 | * instructions from being executed until the load has been performed. | |
61 | */ | |
62 | #define data_barrier(x) \ | |
63 | asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); | |
64 | ||
14cf11af PM |
65 | struct task_struct; |
66 | struct pt_regs; | |
67 | ||
68 | #ifdef CONFIG_DEBUGGER | |
69 | ||
70 | extern int (*__debugger)(struct pt_regs *regs); | |
71 | extern int (*__debugger_ipi)(struct pt_regs *regs); | |
72 | extern int (*__debugger_bpt)(struct pt_regs *regs); | |
73 | extern int (*__debugger_sstep)(struct pt_regs *regs); | |
74 | extern int (*__debugger_iabr_match)(struct pt_regs *regs); | |
75 | extern int (*__debugger_dabr_match)(struct pt_regs *regs); | |
76 | extern int (*__debugger_fault_handler)(struct pt_regs *regs); | |
77 | ||
78 | #define DEBUGGER_BOILERPLATE(__NAME) \ | |
79 | static inline int __NAME(struct pt_regs *regs) \ | |
80 | { \ | |
81 | if (unlikely(__ ## __NAME)) \ | |
82 | return __ ## __NAME(regs); \ | |
83 | return 0; \ | |
84 | } | |
85 | ||
86 | DEBUGGER_BOILERPLATE(debugger) | |
87 | DEBUGGER_BOILERPLATE(debugger_ipi) | |
88 | DEBUGGER_BOILERPLATE(debugger_bpt) | |
89 | DEBUGGER_BOILERPLATE(debugger_sstep) | |
90 | DEBUGGER_BOILERPLATE(debugger_iabr_match) | |
91 | DEBUGGER_BOILERPLATE(debugger_dabr_match) | |
92 | DEBUGGER_BOILERPLATE(debugger_fault_handler) | |
93 | ||
94 | #ifdef CONFIG_XMON | |
95 | extern void xmon_init(int enable); | |
96 | #endif | |
97 | ||
98 | #else | |
99 | static inline int debugger(struct pt_regs *regs) { return 0; } | |
100 | static inline int debugger_ipi(struct pt_regs *regs) { return 0; } | |
101 | static inline int debugger_bpt(struct pt_regs *regs) { return 0; } | |
102 | static inline int debugger_sstep(struct pt_regs *regs) { return 0; } | |
103 | static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } | |
104 | static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } | |
105 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } | |
106 | #endif | |
107 | ||
108 | extern int set_dabr(unsigned long dabr); | |
109 | extern void print_backtrace(unsigned long *); | |
110 | extern void show_regs(struct pt_regs * regs); | |
111 | extern void flush_instruction_cache(void); | |
112 | extern void hard_reset_now(void); | |
113 | extern void poweroff_now(void); | |
114 | ||
115 | #ifdef CONFIG_6xx | |
116 | extern long _get_L2CR(void); | |
117 | extern long _get_L3CR(void); | |
118 | extern void _set_L2CR(unsigned long); | |
119 | extern void _set_L3CR(unsigned long); | |
120 | #else | |
121 | #define _get_L2CR() 0L | |
122 | #define _get_L3CR() 0L | |
123 | #define _set_L2CR(val) do { } while(0) | |
124 | #define _set_L3CR(val) do { } while(0) | |
125 | #endif | |
126 | ||
127 | extern void via_cuda_init(void); | |
14cf11af PM |
128 | extern void read_rtc_time(void); |
129 | extern void pmac_find_display(void); | |
130 | extern void giveup_fpu(struct task_struct *); | |
cabb5587 | 131 | extern void disable_kernel_fp(void); |
14cf11af PM |
132 | extern void enable_kernel_fp(void); |
133 | extern void flush_fp_to_thread(struct task_struct *); | |
134 | extern void enable_kernel_altivec(void); | |
135 | extern void giveup_altivec(struct task_struct *); | |
136 | extern void load_up_altivec(struct task_struct *); | |
40ef8cbc | 137 | extern int emulate_altivec(struct pt_regs *); |
14cf11af PM |
138 | extern void giveup_spe(struct task_struct *); |
139 | extern void load_up_spe(struct task_struct *); | |
140 | extern int fix_alignment(struct pt_regs *); | |
25c8a78b DG |
141 | extern void cvt_fd(float *from, double *to, struct thread_struct *thread); |
142 | extern void cvt_df(double *from, float *to, struct thread_struct *thread); | |
14cf11af | 143 | |
5388fb10 PM |
144 | #ifndef CONFIG_SMP |
145 | extern void discard_lazy_cpu_state(void); | |
146 | #else | |
147 | static inline void discard_lazy_cpu_state(void) | |
148 | { | |
149 | } | |
150 | #endif | |
151 | ||
14cf11af PM |
152 | #ifdef CONFIG_ALTIVEC |
153 | extern void flush_altivec_to_thread(struct task_struct *); | |
154 | #else | |
155 | static inline void flush_altivec_to_thread(struct task_struct *t) | |
156 | { | |
157 | } | |
158 | #endif | |
159 | ||
160 | #ifdef CONFIG_SPE | |
161 | extern void flush_spe_to_thread(struct task_struct *); | |
162 | #else | |
163 | static inline void flush_spe_to_thread(struct task_struct *t) | |
164 | { | |
165 | } | |
166 | #endif | |
167 | ||
168 | extern int call_rtas(const char *, int, int, unsigned long *, ...); | |
169 | extern void cacheable_memzero(void *p, unsigned int nb); | |
170 | extern void *cacheable_memcpy(void *, const void *, unsigned int); | |
171 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); | |
172 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); | |
173 | extern int die(const char *, struct pt_regs *, long); | |
174 | extern void _exception(int, struct pt_regs *, int, unsigned long); | |
175 | #ifdef CONFIG_BOOKE_WDT | |
176 | extern u32 booke_wdt_enabled; | |
177 | extern u32 booke_wdt_period; | |
178 | #endif /* CONFIG_BOOKE_WDT */ | |
179 | ||
14cf11af PM |
180 | struct device_node; |
181 | extern void note_scsi_host(struct device_node *, void *); | |
182 | ||
183 | extern struct task_struct *__switch_to(struct task_struct *, | |
184 | struct task_struct *); | |
185 | #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) | |
186 | ||
187 | struct thread_struct; | |
188 | extern struct task_struct *_switch(struct thread_struct *prev, | |
189 | struct thread_struct *next); | |
190 | ||
4dc7a0bb IM |
191 | /* |
192 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
193 | * it needs a way to flush as much of the CPU's caches as possible. | |
194 | * | |
195 | * TODO: fill this in! | |
196 | */ | |
197 | static inline void sched_cacheflush(void) | |
198 | { | |
199 | } | |
200 | ||
14cf11af | 201 | extern unsigned int rtas_data; |
40ef8cbc | 202 | extern int mem_init_done; /* set on boot once kmalloc can be called */ |
cf00a8d1 | 203 | extern unsigned long memory_limit; |
49b09853 | 204 | extern unsigned long klimit; |
14cf11af | 205 | |
17a6392d PM |
206 | extern int powersave_nap; /* set if nap mode can be used in idle loop */ |
207 | ||
14cf11af PM |
208 | /* |
209 | * Atomic exchange | |
210 | * | |
211 | * Changes the memory location '*ptr' to be val and returns | |
212 | * the previous value stored there. | |
213 | */ | |
214 | static __inline__ unsigned long | |
215 | __xchg_u32(volatile void *p, unsigned long val) | |
216 | { | |
217 | unsigned long prev; | |
218 | ||
219 | __asm__ __volatile__( | |
144b9c13 | 220 | LWSYNC_ON_SMP |
14cf11af PM |
221 | "1: lwarx %0,0,%2 \n" |
222 | PPC405_ERR77(0,%2) | |
223 | " stwcx. %3,0,%2 \n\ | |
224 | bne- 1b" | |
225 | ISYNC_ON_SMP | |
e2a3d402 LT |
226 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) |
227 | : "r" (p), "r" (val) | |
14cf11af PM |
228 | : "cc", "memory"); |
229 | ||
230 | return prev; | |
231 | } | |
232 | ||
233 | #ifdef CONFIG_PPC64 | |
234 | static __inline__ unsigned long | |
235 | __xchg_u64(volatile void *p, unsigned long val) | |
236 | { | |
237 | unsigned long prev; | |
238 | ||
239 | __asm__ __volatile__( | |
144b9c13 | 240 | LWSYNC_ON_SMP |
14cf11af PM |
241 | "1: ldarx %0,0,%2 \n" |
242 | PPC405_ERR77(0,%2) | |
243 | " stdcx. %3,0,%2 \n\ | |
244 | bne- 1b" | |
245 | ISYNC_ON_SMP | |
e2a3d402 LT |
246 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) |
247 | : "r" (p), "r" (val) | |
14cf11af PM |
248 | : "cc", "memory"); |
249 | ||
250 | return prev; | |
251 | } | |
252 | #endif | |
253 | ||
254 | /* | |
255 | * This function doesn't exist, so you'll get a linker error | |
256 | * if something tries to do an invalid xchg(). | |
257 | */ | |
258 | extern void __xchg_called_with_bad_pointer(void); | |
259 | ||
260 | static __inline__ unsigned long | |
261 | __xchg(volatile void *ptr, unsigned long x, unsigned int size) | |
262 | { | |
263 | switch (size) { | |
264 | case 4: | |
265 | return __xchg_u32(ptr, x); | |
266 | #ifdef CONFIG_PPC64 | |
267 | case 8: | |
268 | return __xchg_u64(ptr, x); | |
269 | #endif | |
270 | } | |
271 | __xchg_called_with_bad_pointer(); | |
272 | return x; | |
273 | } | |
274 | ||
275 | #define xchg(ptr,x) \ | |
276 | ({ \ | |
277 | __typeof__(*(ptr)) _x_ = (x); \ | |
278 | (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ | |
279 | }) | |
280 | ||
281 | #define tas(ptr) (xchg((ptr),1)) | |
282 | ||
283 | /* | |
284 | * Compare and exchange - if *p == old, set it to new, | |
285 | * and return the old value of *p. | |
286 | */ | |
287 | #define __HAVE_ARCH_CMPXCHG 1 | |
288 | ||
289 | static __inline__ unsigned long | |
290 | __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) | |
291 | { | |
292 | unsigned int prev; | |
293 | ||
294 | __asm__ __volatile__ ( | |
144b9c13 | 295 | LWSYNC_ON_SMP |
14cf11af PM |
296 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ |
297 | cmpw 0,%0,%3\n\ | |
298 | bne- 2f\n" | |
299 | PPC405_ERR77(0,%2) | |
300 | " stwcx. %4,0,%2\n\ | |
301 | bne- 1b" | |
302 | ISYNC_ON_SMP | |
303 | "\n\ | |
304 | 2:" | |
e2a3d402 LT |
305 | : "=&r" (prev), "+m" (*p) |
306 | : "r" (p), "r" (old), "r" (new) | |
14cf11af PM |
307 | : "cc", "memory"); |
308 | ||
309 | return prev; | |
310 | } | |
311 | ||
312 | #ifdef CONFIG_PPC64 | |
313 | static __inline__ unsigned long | |
3c726f8d | 314 | __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) |
14cf11af PM |
315 | { |
316 | unsigned long prev; | |
317 | ||
318 | __asm__ __volatile__ ( | |
144b9c13 | 319 | LWSYNC_ON_SMP |
14cf11af PM |
320 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ |
321 | cmpd 0,%0,%3\n\ | |
322 | bne- 2f\n\ | |
323 | stdcx. %4,0,%2\n\ | |
324 | bne- 1b" | |
325 | ISYNC_ON_SMP | |
326 | "\n\ | |
327 | 2:" | |
e2a3d402 LT |
328 | : "=&r" (prev), "+m" (*p) |
329 | : "r" (p), "r" (old), "r" (new) | |
14cf11af PM |
330 | : "cc", "memory"); |
331 | ||
332 | return prev; | |
333 | } | |
334 | #endif | |
335 | ||
336 | /* This function doesn't exist, so you'll get a linker error | |
337 | if something tries to do an invalid cmpxchg(). */ | |
338 | extern void __cmpxchg_called_with_bad_pointer(void); | |
339 | ||
340 | static __inline__ unsigned long | |
341 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, | |
342 | unsigned int size) | |
343 | { | |
344 | switch (size) { | |
345 | case 4: | |
346 | return __cmpxchg_u32(ptr, old, new); | |
347 | #ifdef CONFIG_PPC64 | |
348 | case 8: | |
349 | return __cmpxchg_u64(ptr, old, new); | |
350 | #endif | |
351 | } | |
352 | __cmpxchg_called_with_bad_pointer(); | |
353 | return old; | |
354 | } | |
355 | ||
356 | #define cmpxchg(ptr,o,n) \ | |
357 | ({ \ | |
358 | __typeof__(*(ptr)) _o_ = (o); \ | |
359 | __typeof__(*(ptr)) _n_ = (n); \ | |
360 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | |
361 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
362 | }) | |
363 | ||
364 | #ifdef CONFIG_PPC64 | |
365 | /* | |
366 | * We handle most unaligned accesses in hardware. On the other hand | |
367 | * unaligned DMA can be very expensive on some ppc64 IO chips (it does | |
368 | * powers of 2 writes until it reaches sufficient alignment). | |
369 | * | |
370 | * Based on this we disable the IP header alignment in network drivers. | |
025be81e AB |
371 | * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining |
372 | * cacheline alignment of buffers. | |
14cf11af | 373 | */ |
025be81e AB |
374 | #define NET_IP_ALIGN 0 |
375 | #define NET_SKB_PAD L1_CACHE_BYTES | |
14cf11af PM |
376 | #endif |
377 | ||
378 | #define arch_align_stack(x) (x) | |
379 | ||
9b6b563c | 380 | /* Used in very early kernel initialization. */ |
cabb5587 | 381 | extern unsigned long reloc_offset(void); |
9b6b563c PM |
382 | extern unsigned long add_reloc_offset(unsigned long); |
383 | extern void reloc_got2(unsigned long); | |
384 | ||
385 | #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) | |
cabb5587 | 386 | |
c87ef117 ME |
387 | static inline void create_instruction(unsigned long addr, unsigned int instr) |
388 | { | |
389 | unsigned int *p; | |
390 | p = (unsigned int *)addr; | |
391 | *p = instr; | |
392 | asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p)); | |
393 | } | |
394 | ||
395 | /* Flags for create_branch: | |
396 | * "b" == create_branch(addr, target, 0); | |
397 | * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); | |
398 | * "bl" == create_branch(addr, target, BRANCH_SET_LINK); | |
399 | * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK); | |
400 | */ | |
401 | #define BRANCH_SET_LINK 0x1 | |
402 | #define BRANCH_ABSOLUTE 0x2 | |
403 | ||
404 | static inline void create_branch(unsigned long addr, | |
405 | unsigned long target, int flags) | |
406 | { | |
407 | unsigned int instruction; | |
408 | ||
409 | if (! (flags & BRANCH_ABSOLUTE)) | |
410 | target = target - addr; | |
411 | ||
412 | /* Mask out the flags and target, so they don't step on each other. */ | |
413 | instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC); | |
414 | ||
415 | create_instruction(addr, instruction); | |
416 | } | |
417 | ||
418 | static inline void create_function_call(unsigned long addr, void * func) | |
419 | { | |
420 | unsigned long func_addr; | |
421 | ||
422 | #ifdef CONFIG_PPC64 | |
423 | /* | |
424 | * On PPC64 the function pointer actually points to the function's | |
425 | * descriptor. The first entry in the descriptor is the address | |
426 | * of the function text. | |
427 | */ | |
428 | func_addr = *(unsigned long *)func; | |
429 | #else | |
430 | func_addr = (unsigned long)func; | |
431 | #endif | |
432 | create_branch(addr, func_addr, BRANCH_SET_LINK); | |
433 | } | |
434 | ||
c6622f63 PM |
435 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
436 | extern void account_system_vtime(struct task_struct *); | |
437 | #endif | |
438 | ||
14cf11af | 439 | #endif /* __KERNEL__ */ |
bbeb3f4c | 440 | #endif /* _ASM_POWERPC_SYSTEM_H */ |