Merge branch 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / arch / parisc / include / asm / ldcw.h
CommitLineData
527dcdcc
DH
1#ifndef __PARISC_LDCW_H
2#define __PARISC_LDCW_H
3
4#ifndef CONFIG_PA20
5/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
6 and GCC only guarantees 8-byte alignment for stack locals, we can't
7 be assured of 16-byte alignment for atomic lock data even if we
8 specify "__attribute ((aligned(16)))" in the type declaration. So,
9 we use a struct containing an array of four ints for the atomic lock
10 type and dynamically select the 16-byte aligned int from the array
11 for the semaphore. */
12
13#define __PA_LDCW_ALIGNMENT 16
14#define __ldcw_align(a) ({ \
15 unsigned long __ret = (unsigned long) &(a)->lock[0]; \
16 __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
17 & ~(__PA_LDCW_ALIGNMENT - 1); \
18 (volatile unsigned int *) __ret; \
19})
20#define __LDCW "ldcw"
21
22#else /*CONFIG_PA20*/
23/* From: "Jim Hull" <jim.hull of hp.com>
24 I've attached a summary of the change, but basically, for PA 2.0, as
25 long as the ",CO" (coherent operation) completer is specified, then the
26 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
27 they only require "natural" alignment (4-byte for ldcw, 8-byte for
28 ldcd). */
29
30#define __PA_LDCW_ALIGNMENT 4
31#define __ldcw_align(a) (&(a)->slock)
32#define __LDCW "ldcw,co"
33
34#endif /*!CONFIG_PA20*/
35
45db0738
JDA
36/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
37 We don't explicitly expose that "*a" may be written as reload
38 fails to find a register in class R1_REGS when "a" needs to be
39 reloaded when generating 64-bit PIC code. Instead, we clobber
40 memory to indicate to the compiler that the assembly code reads
41 or writes to items other than those listed in the input and output
42 operands. This may pessimize the code somewhat but __ldcw is
d14b3dfc 43 usually used within code blocks surrounded by memory barriers. */
527dcdcc
DH
44#define __ldcw(a) ({ \
45 unsigned __ret; \
45db0738
JDA
46 __asm__ __volatile__(__LDCW " 0(%1),%0" \
47 : "=r" (__ret) : "r" (a) : "memory"); \
527dcdcc
DH
48 __ret; \
49})
50
51#ifdef CONFIG_SMP
52# define __lock_aligned __attribute__((__section__(".data..lock_aligned")))
53#endif
54
55#endif /* __PARISC_LDCW_H */