Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
3 | */ | |
bbeb3f4c SR |
4 | #ifndef _ASM_POWERPC_SYSTEM_H |
5 | #define _ASM_POWERPC_SYSTEM_H | |
14cf11af | 6 | |
14cf11af | 7 | #include <linux/kernel.h> |
14b3ca40 | 8 | #include <linux/irqflags.h> |
14cf11af PM |
9 | |
10 | #include <asm/hw_irq.h> | |
14cf11af PM |
11 | |
12 | /* | |
13 | * Memory barrier. | |
14 | * The sync instruction guarantees that all memory accesses initiated | |
15 | * by this processor have been performed (with respect to all other | |
16 | * mechanisms that access memory). The eieio instruction is a barrier | |
17 | * providing an ordering (separately) for (a) cacheable stores and (b) | |
18 | * loads and stores to non-cacheable memory (e.g. I/O devices). | |
19 | * | |
20 | * mb() prevents loads and stores being reordered across this point. | |
21 | * rmb() prevents loads being reordered across this point. | |
22 | * wmb() prevents stores being reordered across this point. | |
23 | * read_barrier_depends() prevents data-dependent loads being reordered | |
24 | * across this point (nop on PPC). | |
25 | * | |
26 | * We have to use the sync instructions for mb(), since lwsync doesn't | |
27 | * order loads with respect to previous stores. Lwsync is fine for | |
e0da0dae AF |
28 | * rmb(), though. Note that rmb() actually uses a sync on 32-bit |
29 | * architectures. | |
14cf11af PM |
30 | * |
31 | * For wmb(), we use sync since wmb is used in drivers to order | |
32 | * stores to system memory with respect to writes to the device. | |
74f06095 NP |
33 | * However, smp_wmb() can be a lighter-weight lwsync or eieio barrier |
34 | * on SMP since it is only used to order updates to system memory. | |
14cf11af PM |
35 | */ |
36 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") | |
598056d5 | 37 | #define rmb() __asm__ __volatile__ ("sync" : : : "memory") |
14cf11af PM |
38 | #define wmb() __asm__ __volatile__ ("sync" : : : "memory") |
39 | #define read_barrier_depends() do { } while(0) | |
40 | ||
41 | #define set_mb(var, value) do { var = value; mb(); } while (0) | |
14cf11af | 42 | |
88ced031 | 43 | #ifdef __KERNEL__ |
4f9a58d7 | 44 | #define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */ |
14cf11af | 45 | #ifdef CONFIG_SMP |
74f06095 NP |
46 | |
47 | #ifdef __SUBARCH_HAS_LWSYNC | |
48 | # define SMPWMB lwsync | |
49 | #else | |
50 | # define SMPWMB eieio | |
51 | #endif | |
52 | ||
14cf11af PM |
53 | #define smp_mb() mb() |
54 | #define smp_rmb() rmb() | |
74f06095 | 55 | #define smp_wmb() __asm__ __volatile__ (__stringify(SMPWMB) : : :"memory") |
14cf11af PM |
56 | #define smp_read_barrier_depends() read_barrier_depends() |
57 | #else | |
58 | #define smp_mb() barrier() | |
59 | #define smp_rmb() barrier() | |
60 | #define smp_wmb() barrier() | |
61 | #define smp_read_barrier_depends() do { } while(0) | |
62 | #endif /* CONFIG_SMP */ | |
63 | ||
5db9fa95 NL |
64 | /* |
65 | * This is a barrier which prevents following instructions from being | |
66 | * started until the value of the argument x is known. For example, if | |
67 | * x is a variable loaded from memory, this prevents following | |
68 | * instructions from being executed until the load has been performed. | |
69 | */ | |
70 | #define data_barrier(x) \ | |
71 | asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); | |
72 | ||
14cf11af PM |
73 | struct task_struct; |
74 | struct pt_regs; | |
75 | ||
7dbb922c | 76 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
14cf11af PM |
77 | |
78 | extern int (*__debugger)(struct pt_regs *regs); | |
79 | extern int (*__debugger_ipi)(struct pt_regs *regs); | |
80 | extern int (*__debugger_bpt)(struct pt_regs *regs); | |
81 | extern int (*__debugger_sstep)(struct pt_regs *regs); | |
82 | extern int (*__debugger_iabr_match)(struct pt_regs *regs); | |
83 | extern int (*__debugger_dabr_match)(struct pt_regs *regs); | |
84 | extern int (*__debugger_fault_handler)(struct pt_regs *regs); | |
85 | ||
86 | #define DEBUGGER_BOILERPLATE(__NAME) \ | |
87 | static inline int __NAME(struct pt_regs *regs) \ | |
88 | { \ | |
89 | if (unlikely(__ ## __NAME)) \ | |
90 | return __ ## __NAME(regs); \ | |
91 | return 0; \ | |
92 | } | |
93 | ||
94 | DEBUGGER_BOILERPLATE(debugger) | |
95 | DEBUGGER_BOILERPLATE(debugger_ipi) | |
96 | DEBUGGER_BOILERPLATE(debugger_bpt) | |
97 | DEBUGGER_BOILERPLATE(debugger_sstep) | |
98 | DEBUGGER_BOILERPLATE(debugger_iabr_match) | |
99 | DEBUGGER_BOILERPLATE(debugger_dabr_match) | |
100 | DEBUGGER_BOILERPLATE(debugger_fault_handler) | |
101 | ||
14cf11af PM |
102 | #else |
103 | static inline int debugger(struct pt_regs *regs) { return 0; } | |
104 | static inline int debugger_ipi(struct pt_regs *regs) { return 0; } | |
105 | static inline int debugger_bpt(struct pt_regs *regs) { return 0; } | |
106 | static inline int debugger_sstep(struct pt_regs *regs) { return 0; } | |
107 | static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } | |
108 | static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } | |
109 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } | |
110 | #endif | |
111 | ||
112 | extern int set_dabr(unsigned long dabr); | |
113 | extern void print_backtrace(unsigned long *); | |
114 | extern void show_regs(struct pt_regs * regs); | |
115 | extern void flush_instruction_cache(void); | |
116 | extern void hard_reset_now(void); | |
117 | extern void poweroff_now(void); | |
118 | ||
119 | #ifdef CONFIG_6xx | |
120 | extern long _get_L2CR(void); | |
121 | extern long _get_L3CR(void); | |
122 | extern void _set_L2CR(unsigned long); | |
123 | extern void _set_L3CR(unsigned long); | |
124 | #else | |
125 | #define _get_L2CR() 0L | |
126 | #define _get_L3CR() 0L | |
127 | #define _set_L2CR(val) do { } while(0) | |
128 | #define _set_L3CR(val) do { } while(0) | |
129 | #endif | |
130 | ||
131 | extern void via_cuda_init(void); | |
14cf11af PM |
132 | extern void read_rtc_time(void); |
133 | extern void pmac_find_display(void); | |
134 | extern void giveup_fpu(struct task_struct *); | |
cabb5587 | 135 | extern void disable_kernel_fp(void); |
14cf11af PM |
136 | extern void enable_kernel_fp(void); |
137 | extern void flush_fp_to_thread(struct task_struct *); | |
138 | extern void enable_kernel_altivec(void); | |
139 | extern void giveup_altivec(struct task_struct *); | |
140 | extern void load_up_altivec(struct task_struct *); | |
40ef8cbc | 141 | extern int emulate_altivec(struct pt_regs *); |
ce48b210 | 142 | extern void giveup_vsx(struct task_struct *); |
d169d140 | 143 | extern void enable_kernel_spe(void); |
14cf11af PM |
144 | extern void giveup_spe(struct task_struct *); |
145 | extern void load_up_spe(struct task_struct *); | |
146 | extern int fix_alignment(struct pt_regs *); | |
25c8a78b DG |
147 | extern void cvt_fd(float *from, double *to, struct thread_struct *thread); |
148 | extern void cvt_df(double *from, float *to, struct thread_struct *thread); | |
14cf11af | 149 | |
5388fb10 PM |
150 | #ifndef CONFIG_SMP |
151 | extern void discard_lazy_cpu_state(void); | |
152 | #else | |
153 | static inline void discard_lazy_cpu_state(void) | |
154 | { | |
155 | } | |
156 | #endif | |
157 | ||
14cf11af PM |
158 | #ifdef CONFIG_ALTIVEC |
159 | extern void flush_altivec_to_thread(struct task_struct *); | |
160 | #else | |
161 | static inline void flush_altivec_to_thread(struct task_struct *t) | |
162 | { | |
163 | } | |
164 | #endif | |
165 | ||
ce48b210 MN |
166 | #ifdef CONFIG_VSX |
167 | extern void flush_vsx_to_thread(struct task_struct *); | |
168 | #else | |
169 | static inline void flush_vsx_to_thread(struct task_struct *t) | |
170 | { | |
171 | } | |
172 | #endif | |
173 | ||
14cf11af PM |
174 | #ifdef CONFIG_SPE |
175 | extern void flush_spe_to_thread(struct task_struct *); | |
176 | #else | |
177 | static inline void flush_spe_to_thread(struct task_struct *t) | |
178 | { | |
179 | } | |
180 | #endif | |
181 | ||
182 | extern int call_rtas(const char *, int, int, unsigned long *, ...); | |
183 | extern void cacheable_memzero(void *p, unsigned int nb); | |
184 | extern void *cacheable_memcpy(void *, const void *, unsigned int); | |
185 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); | |
186 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); | |
187 | extern int die(const char *, struct pt_regs *, long); | |
188 | extern void _exception(int, struct pt_regs *, int, unsigned long); | |
1d59483a JL |
189 | extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); |
190 | ||
14cf11af PM |
191 | #ifdef CONFIG_BOOKE_WDT |
192 | extern u32 booke_wdt_enabled; | |
193 | extern u32 booke_wdt_period; | |
194 | #endif /* CONFIG_BOOKE_WDT */ | |
195 | ||
14cf11af PM |
196 | struct device_node; |
197 | extern void note_scsi_host(struct device_node *, void *); | |
198 | ||
199 | extern struct task_struct *__switch_to(struct task_struct *, | |
200 | struct task_struct *); | |
201 | #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) | |
202 | ||
203 | struct thread_struct; | |
204 | extern struct task_struct *_switch(struct thread_struct *prev, | |
205 | struct thread_struct *next); | |
206 | ||
207 | extern unsigned int rtas_data; | |
40ef8cbc | 208 | extern int mem_init_done; /* set on boot once kmalloc can be called */ |
5f25f065 | 209 | extern int init_bootmem_done; /* set on !NUMA once bootmem is available */ |
cf00a8d1 | 210 | extern unsigned long memory_limit; |
49b09853 | 211 | extern unsigned long klimit; |
14cf11af | 212 | |
7b2c3c5b | 213 | extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); |
5669c3cf | 214 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); |
7b2c3c5b | 215 | |
17a6392d PM |
216 | extern int powersave_nap; /* set if nap mode can be used in idle loop */ |
217 | ||
14cf11af PM |
218 | /* |
219 | * Atomic exchange | |
220 | * | |
221 | * Changes the memory location '*ptr' to be val and returns | |
222 | * the previous value stored there. | |
223 | */ | |
dd18434f | 224 | static __always_inline unsigned long |
14cf11af PM |
225 | __xchg_u32(volatile void *p, unsigned long val) |
226 | { | |
227 | unsigned long prev; | |
228 | ||
229 | __asm__ __volatile__( | |
144b9c13 | 230 | LWSYNC_ON_SMP |
14cf11af PM |
231 | "1: lwarx %0,0,%2 \n" |
232 | PPC405_ERR77(0,%2) | |
233 | " stwcx. %3,0,%2 \n\ | |
234 | bne- 1b" | |
235 | ISYNC_ON_SMP | |
e2a3d402 LT |
236 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) |
237 | : "r" (p), "r" (val) | |
14cf11af PM |
238 | : "cc", "memory"); |
239 | ||
240 | return prev; | |
241 | } | |
242 | ||
f46e477e MD |
243 | /* |
244 | * Atomic exchange | |
245 | * | |
246 | * Changes the memory location '*ptr' to be val and returns | |
247 | * the previous value stored there. | |
248 | */ | |
dd18434f | 249 | static __always_inline unsigned long |
f46e477e MD |
250 | __xchg_u32_local(volatile void *p, unsigned long val) |
251 | { | |
252 | unsigned long prev; | |
253 | ||
254 | __asm__ __volatile__( | |
255 | "1: lwarx %0,0,%2 \n" | |
256 | PPC405_ERR77(0,%2) | |
257 | " stwcx. %3,0,%2 \n\ | |
258 | bne- 1b" | |
259 | : "=&r" (prev), "+m" (*(volatile unsigned int *)p) | |
260 | : "r" (p), "r" (val) | |
261 | : "cc", "memory"); | |
262 | ||
263 | return prev; | |
264 | } | |
265 | ||
14cf11af | 266 | #ifdef CONFIG_PPC64 |
dd18434f | 267 | static __always_inline unsigned long |
14cf11af PM |
268 | __xchg_u64(volatile void *p, unsigned long val) |
269 | { | |
270 | unsigned long prev; | |
271 | ||
272 | __asm__ __volatile__( | |
144b9c13 | 273 | LWSYNC_ON_SMP |
14cf11af PM |
274 | "1: ldarx %0,0,%2 \n" |
275 | PPC405_ERR77(0,%2) | |
276 | " stdcx. %3,0,%2 \n\ | |
277 | bne- 1b" | |
278 | ISYNC_ON_SMP | |
e2a3d402 LT |
279 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) |
280 | : "r" (p), "r" (val) | |
14cf11af PM |
281 | : "cc", "memory"); |
282 | ||
283 | return prev; | |
284 | } | |
f46e477e | 285 | |
dd18434f | 286 | static __always_inline unsigned long |
f46e477e MD |
287 | __xchg_u64_local(volatile void *p, unsigned long val) |
288 | { | |
289 | unsigned long prev; | |
290 | ||
291 | __asm__ __volatile__( | |
292 | "1: ldarx %0,0,%2 \n" | |
293 | PPC405_ERR77(0,%2) | |
294 | " stdcx. %3,0,%2 \n\ | |
295 | bne- 1b" | |
296 | : "=&r" (prev), "+m" (*(volatile unsigned long *)p) | |
297 | : "r" (p), "r" (val) | |
298 | : "cc", "memory"); | |
299 | ||
300 | return prev; | |
301 | } | |
14cf11af PM |
302 | #endif |
303 | ||
304 | /* | |
305 | * This function doesn't exist, so you'll get a linker error | |
306 | * if something tries to do an invalid xchg(). | |
307 | */ | |
308 | extern void __xchg_called_with_bad_pointer(void); | |
309 | ||
dd18434f | 310 | static __always_inline unsigned long |
14cf11af PM |
311 | __xchg(volatile void *ptr, unsigned long x, unsigned int size) |
312 | { | |
313 | switch (size) { | |
314 | case 4: | |
315 | return __xchg_u32(ptr, x); | |
316 | #ifdef CONFIG_PPC64 | |
317 | case 8: | |
318 | return __xchg_u64(ptr, x); | |
319 | #endif | |
320 | } | |
321 | __xchg_called_with_bad_pointer(); | |
322 | return x; | |
323 | } | |
324 | ||
dd18434f | 325 | static __always_inline unsigned long |
f46e477e MD |
326 | __xchg_local(volatile void *ptr, unsigned long x, unsigned int size) |
327 | { | |
328 | switch (size) { | |
329 | case 4: | |
330 | return __xchg_u32_local(ptr, x); | |
331 | #ifdef CONFIG_PPC64 | |
332 | case 8: | |
333 | return __xchg_u64_local(ptr, x); | |
334 | #endif | |
335 | } | |
336 | __xchg_called_with_bad_pointer(); | |
337 | return x; | |
338 | } | |
14cf11af PM |
339 | #define xchg(ptr,x) \ |
340 | ({ \ | |
341 | __typeof__(*(ptr)) _x_ = (x); \ | |
342 | (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ | |
343 | }) | |
344 | ||
f46e477e MD |
345 | #define xchg_local(ptr,x) \ |
346 | ({ \ | |
347 | __typeof__(*(ptr)) _x_ = (x); \ | |
348 | (__typeof__(*(ptr))) __xchg_local((ptr), \ | |
349 | (unsigned long)_x_, sizeof(*(ptr))); \ | |
350 | }) | |
351 | ||
14cf11af PM |
352 | /* |
353 | * Compare and exchange - if *p == old, set it to new, | |
354 | * and return the old value of *p. | |
355 | */ | |
356 | #define __HAVE_ARCH_CMPXCHG 1 | |
357 | ||
dd18434f | 358 | static __always_inline unsigned long |
14cf11af PM |
359 | __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) |
360 | { | |
361 | unsigned int prev; | |
362 | ||
363 | __asm__ __volatile__ ( | |
144b9c13 | 364 | LWSYNC_ON_SMP |
14cf11af PM |
365 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ |
366 | cmpw 0,%0,%3\n\ | |
367 | bne- 2f\n" | |
368 | PPC405_ERR77(0,%2) | |
369 | " stwcx. %4,0,%2\n\ | |
370 | bne- 1b" | |
371 | ISYNC_ON_SMP | |
372 | "\n\ | |
373 | 2:" | |
e2a3d402 LT |
374 | : "=&r" (prev), "+m" (*p) |
375 | : "r" (p), "r" (old), "r" (new) | |
14cf11af PM |
376 | : "cc", "memory"); |
377 | ||
378 | return prev; | |
379 | } | |
380 | ||
dd18434f | 381 | static __always_inline unsigned long |
f46e477e MD |
382 | __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, |
383 | unsigned long new) | |
384 | { | |
385 | unsigned int prev; | |
386 | ||
387 | __asm__ __volatile__ ( | |
388 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ | |
389 | cmpw 0,%0,%3\n\ | |
390 | bne- 2f\n" | |
391 | PPC405_ERR77(0,%2) | |
392 | " stwcx. %4,0,%2\n\ | |
393 | bne- 1b" | |
394 | "\n\ | |
395 | 2:" | |
396 | : "=&r" (prev), "+m" (*p) | |
397 | : "r" (p), "r" (old), "r" (new) | |
398 | : "cc", "memory"); | |
399 | ||
400 | return prev; | |
401 | } | |
402 | ||
14cf11af | 403 | #ifdef CONFIG_PPC64 |
dd18434f | 404 | static __always_inline unsigned long |
3c726f8d | 405 | __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) |
14cf11af PM |
406 | { |
407 | unsigned long prev; | |
408 | ||
409 | __asm__ __volatile__ ( | |
144b9c13 | 410 | LWSYNC_ON_SMP |
14cf11af PM |
411 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ |
412 | cmpd 0,%0,%3\n\ | |
413 | bne- 2f\n\ | |
414 | stdcx. %4,0,%2\n\ | |
415 | bne- 1b" | |
416 | ISYNC_ON_SMP | |
417 | "\n\ | |
418 | 2:" | |
e2a3d402 LT |
419 | : "=&r" (prev), "+m" (*p) |
420 | : "r" (p), "r" (old), "r" (new) | |
14cf11af PM |
421 | : "cc", "memory"); |
422 | ||
423 | return prev; | |
424 | } | |
f46e477e | 425 | |
dd18434f | 426 | static __always_inline unsigned long |
f46e477e MD |
427 | __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, |
428 | unsigned long new) | |
429 | { | |
430 | unsigned long prev; | |
431 | ||
432 | __asm__ __volatile__ ( | |
433 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ | |
434 | cmpd 0,%0,%3\n\ | |
435 | bne- 2f\n\ | |
436 | stdcx. %4,0,%2\n\ | |
437 | bne- 1b" | |
438 | "\n\ | |
439 | 2:" | |
440 | : "=&r" (prev), "+m" (*p) | |
441 | : "r" (p), "r" (old), "r" (new) | |
442 | : "cc", "memory"); | |
443 | ||
444 | return prev; | |
445 | } | |
14cf11af PM |
446 | #endif |
447 | ||
448 | /* This function doesn't exist, so you'll get a linker error | |
449 | if something tries to do an invalid cmpxchg(). */ | |
450 | extern void __cmpxchg_called_with_bad_pointer(void); | |
451 | ||
dd18434f | 452 | static __always_inline unsigned long |
14cf11af PM |
453 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, |
454 | unsigned int size) | |
455 | { | |
456 | switch (size) { | |
457 | case 4: | |
458 | return __cmpxchg_u32(ptr, old, new); | |
459 | #ifdef CONFIG_PPC64 | |
460 | case 8: | |
461 | return __cmpxchg_u64(ptr, old, new); | |
462 | #endif | |
463 | } | |
464 | __cmpxchg_called_with_bad_pointer(); | |
465 | return old; | |
466 | } | |
467 | ||
dd18434f | 468 | static __always_inline unsigned long |
f46e477e MD |
469 | __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, |
470 | unsigned int size) | |
471 | { | |
472 | switch (size) { | |
473 | case 4: | |
474 | return __cmpxchg_u32_local(ptr, old, new); | |
475 | #ifdef CONFIG_PPC64 | |
476 | case 8: | |
477 | return __cmpxchg_u64_local(ptr, old, new); | |
478 | #endif | |
479 | } | |
480 | __cmpxchg_called_with_bad_pointer(); | |
481 | return old; | |
482 | } | |
483 | ||
f9c4650b | 484 | #define cmpxchg(ptr, o, n) \ |
14cf11af PM |
485 | ({ \ |
486 | __typeof__(*(ptr)) _o_ = (o); \ | |
487 | __typeof__(*(ptr)) _n_ = (n); \ | |
488 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | |
489 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
490 | }) | |
491 | ||
f46e477e | 492 | |
f9c4650b | 493 | #define cmpxchg_local(ptr, o, n) \ |
f46e477e MD |
494 | ({ \ |
495 | __typeof__(*(ptr)) _o_ = (o); \ | |
496 | __typeof__(*(ptr)) _n_ = (n); \ | |
497 | (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ | |
498 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
499 | }) | |
500 | ||
14cf11af PM |
501 | #ifdef CONFIG_PPC64 |
502 | /* | |
503 | * We handle most unaligned accesses in hardware. On the other hand | |
504 | * unaligned DMA can be very expensive on some ppc64 IO chips (it does | |
505 | * powers of 2 writes until it reaches sufficient alignment). | |
506 | * | |
507 | * Based on this we disable the IP header alignment in network drivers. | |
025be81e AB |
508 | * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining |
509 | * cacheline alignment of buffers. | |
14cf11af | 510 | */ |
025be81e AB |
511 | #define NET_IP_ALIGN 0 |
512 | #define NET_SKB_PAD L1_CACHE_BYTES | |
f9c4650b MD |
513 | |
514 | #define cmpxchg64(ptr, o, n) \ | |
515 | ({ \ | |
516 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | |
517 | cmpxchg((ptr), (o), (n)); \ | |
518 | }) | |
519 | #define cmpxchg64_local(ptr, o, n) \ | |
520 | ({ \ | |
521 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | |
522 | cmpxchg_local((ptr), (o), (n)); \ | |
523 | }) | |
524 | #else | |
525 | #include <asm-generic/cmpxchg-local.h> | |
526 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | |
14cf11af PM |
527 | #endif |
528 | ||
529 | #define arch_align_stack(x) (x) | |
530 | ||
9b6b563c | 531 | /* Used in very early kernel initialization. */ |
cabb5587 | 532 | extern unsigned long reloc_offset(void); |
9b6b563c PM |
533 | extern unsigned long add_reloc_offset(unsigned long); |
534 | extern void reloc_got2(unsigned long); | |
535 | ||
536 | #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) | |
cabb5587 | 537 | |
c6622f63 PM |
538 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
539 | extern void account_system_vtime(struct task_struct *); | |
540 | #endif | |
541 | ||
94a3807c ME |
542 | extern struct dentry *powerpc_debugfs_root; |
543 | ||
14cf11af | 544 | #endif /* __KERNEL__ */ |
bbeb3f4c | 545 | #endif /* _ASM_POWERPC_SYSTEM_H */ |