Commit | Line | Data |
---|---|---|
9f97da78 DH |
1 | #ifndef __ASM_ARM_CMPXCHG_H |
2 | #define __ASM_ARM_CMPXCHG_H | |
3 | ||
4 | #include <linux/irqflags.h> | |
5 | #include <asm/barrier.h> | |
6 | ||
7 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) | |
8 | /* | |
9 | * On the StrongARM, "swp" is terminally broken since it bypasses the | |
10 | * cache totally. This means that the cache becomes inconsistent, and, | |
11 | * since we use normal loads/stores as well, this is really bad. | |
12 | * Typically, this causes oopsen in filp_close, but could have other, | |
13 | * more disastrous effects. There are two work-arounds: | |
14 | * 1. Disable interrupts and emulate the atomic swap | |
15 | * 2. Clean the cache, perform atomic swap, flush the cache | |
16 | * | |
17 | * We choose (1) since its the "easiest" to achieve here and is not | |
18 | * dependent on the processor type. | |
19 | * | |
20 | * NOTE that this solution won't work on an SMP system, so explcitly | |
21 | * forbid it here. | |
22 | */ | |
23 | #define swp_is_buggy | |
24 | #endif | |
25 | ||
26 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |
27 | { | |
28 | extern void __bad_xchg(volatile void *, int); | |
29 | unsigned long ret; | |
30 | #ifdef swp_is_buggy | |
31 | unsigned long flags; | |
32 | #endif | |
33 | #if __LINUX_ARM_ARCH__ >= 6 | |
34 | unsigned int tmp; | |
35 | #endif | |
36 | ||
37 | smp_mb(); | |
38 | ||
39 | switch (size) { | |
40 | #if __LINUX_ARM_ARCH__ >= 6 | |
41 | case 1: | |
42 | asm volatile("@ __xchg1\n" | |
43 | "1: ldrexb %0, [%3]\n" | |
44 | " strexb %1, %2, [%3]\n" | |
45 | " teq %1, #0\n" | |
46 | " bne 1b" | |
47 | : "=&r" (ret), "=&r" (tmp) | |
48 | : "r" (x), "r" (ptr) | |
49 | : "memory", "cc"); | |
50 | break; | |
51 | case 4: | |
52 | asm volatile("@ __xchg4\n" | |
53 | "1: ldrex %0, [%3]\n" | |
54 | " strex %1, %2, [%3]\n" | |
55 | " teq %1, #0\n" | |
56 | " bne 1b" | |
57 | : "=&r" (ret), "=&r" (tmp) | |
58 | : "r" (x), "r" (ptr) | |
59 | : "memory", "cc"); | |
60 | break; | |
61 | #elif defined(swp_is_buggy) | |
62 | #ifdef CONFIG_SMP | |
63 | #error SMP is not supported on this platform | |
64 | #endif | |
65 | case 1: | |
66 | raw_local_irq_save(flags); | |
67 | ret = *(volatile unsigned char *)ptr; | |
68 | *(volatile unsigned char *)ptr = x; | |
69 | raw_local_irq_restore(flags); | |
70 | break; | |
71 | ||
72 | case 4: | |
73 | raw_local_irq_save(flags); | |
74 | ret = *(volatile unsigned long *)ptr; | |
75 | *(volatile unsigned long *)ptr = x; | |
76 | raw_local_irq_restore(flags); | |
77 | break; | |
78 | #else | |
79 | case 1: | |
80 | asm volatile("@ __xchg1\n" | |
81 | " swpb %0, %1, [%2]" | |
82 | : "=&r" (ret) | |
83 | : "r" (x), "r" (ptr) | |
84 | : "memory", "cc"); | |
85 | break; | |
86 | case 4: | |
87 | asm volatile("@ __xchg4\n" | |
88 | " swp %0, %1, [%2]" | |
89 | : "=&r" (ret) | |
90 | : "r" (x), "r" (ptr) | |
91 | : "memory", "cc"); | |
92 | break; | |
93 | #endif | |
94 | default: | |
95 | __bad_xchg(ptr, size), ret = 0; | |
96 | break; | |
97 | } | |
98 | smp_mb(); | |
99 | ||
100 | return ret; | |
101 | } | |
102 | ||
103 | #define xchg(ptr,x) \ | |
104 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
105 | ||
106 | #include <asm-generic/cmpxchg-local.h> | |
107 | ||
108 | #if __LINUX_ARM_ARCH__ < 6 | |
109 | /* min ARCH < ARMv6 */ | |
110 | ||
111 | #ifdef CONFIG_SMP | |
112 | #error "SMP is not supported on this platform" | |
113 | #endif | |
114 | ||
115 | /* | |
116 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | |
117 | * them available. | |
118 | */ | |
119 | #define cmpxchg_local(ptr, o, n) \ | |
120 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ | |
121 | (unsigned long)(n), sizeof(*(ptr)))) | |
122 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | |
123 | ||
124 | #ifndef CONFIG_SMP | |
125 | #include <asm-generic/cmpxchg.h> | |
126 | #endif | |
127 | ||
128 | #else /* min ARCH >= ARMv6 */ | |
129 | ||
130 | extern void __bad_cmpxchg(volatile void *ptr, int size); | |
131 | ||
132 | /* | |
133 | * cmpxchg only support 32-bits operands on ARMv6. | |
134 | */ | |
135 | ||
136 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |
137 | unsigned long new, int size) | |
138 | { | |
139 | unsigned long oldval, res; | |
140 | ||
141 | switch (size) { | |
142 | #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ | |
143 | case 1: | |
144 | do { | |
145 | asm volatile("@ __cmpxchg1\n" | |
146 | " ldrexb %1, [%2]\n" | |
147 | " mov %0, #0\n" | |
148 | " teq %1, %3\n" | |
149 | " strexbeq %0, %4, [%2]\n" | |
150 | : "=&r" (res), "=&r" (oldval) | |
151 | : "r" (ptr), "Ir" (old), "r" (new) | |
152 | : "memory", "cc"); | |
153 | } while (res); | |
154 | break; | |
155 | case 2: | |
156 | do { | |
157 | asm volatile("@ __cmpxchg1\n" | |
158 | " ldrexh %1, [%2]\n" | |
159 | " mov %0, #0\n" | |
160 | " teq %1, %3\n" | |
161 | " strexheq %0, %4, [%2]\n" | |
162 | : "=&r" (res), "=&r" (oldval) | |
163 | : "r" (ptr), "Ir" (old), "r" (new) | |
164 | : "memory", "cc"); | |
165 | } while (res); | |
166 | break; | |
167 | #endif | |
168 | case 4: | |
169 | do { | |
170 | asm volatile("@ __cmpxchg4\n" | |
171 | " ldrex %1, [%2]\n" | |
172 | " mov %0, #0\n" | |
173 | " teq %1, %3\n" | |
174 | " strexeq %0, %4, [%2]\n" | |
175 | : "=&r" (res), "=&r" (oldval) | |
176 | : "r" (ptr), "Ir" (old), "r" (new) | |
177 | : "memory", "cc"); | |
178 | } while (res); | |
179 | break; | |
180 | default: | |
181 | __bad_cmpxchg(ptr, size); | |
182 | oldval = 0; | |
183 | } | |
184 | ||
185 | return oldval; | |
186 | } | |
187 | ||
188 | static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, | |
189 | unsigned long new, int size) | |
190 | { | |
191 | unsigned long ret; | |
192 | ||
193 | smp_mb(); | |
194 | ret = __cmpxchg(ptr, old, new, size); | |
195 | smp_mb(); | |
196 | ||
197 | return ret; | |
198 | } | |
199 | ||
200 | #define cmpxchg(ptr,o,n) \ | |
201 | ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ | |
202 | (unsigned long)(o), \ | |
203 | (unsigned long)(n), \ | |
204 | sizeof(*(ptr)))) | |
205 | ||
206 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | |
207 | unsigned long old, | |
208 | unsigned long new, int size) | |
209 | { | |
210 | unsigned long ret; | |
211 | ||
212 | switch (size) { | |
213 | #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ | |
214 | case 1: | |
215 | case 2: | |
216 | ret = __cmpxchg_local_generic(ptr, old, new, size); | |
217 | break; | |
218 | #endif | |
219 | default: | |
220 | ret = __cmpxchg(ptr, old, new, size); | |
221 | } | |
222 | ||
223 | return ret; | |
224 | } | |
225 | ||
226 | #define cmpxchg_local(ptr,o,n) \ | |
227 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ | |
228 | (unsigned long)(o), \ | |
229 | (unsigned long)(n), \ | |
230 | sizeof(*(ptr)))) | |
231 | ||
3e0f5a15 WD |
232 | #define cmpxchg64(ptr, o, n) \ |
233 | ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ | |
234 | atomic64_t, \ | |
235 | counter), \ | |
236 | (unsigned long)(o), \ | |
237 | (unsigned long)(n))) | |
238 | ||
239 | #define cmpxchg64_local(ptr, o, n) \ | |
240 | ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ | |
241 | local64_t, \ | |
242 | a), \ | |
243 | (unsigned long)(o), \ | |
244 | (unsigned long)(n))) | |
9f97da78 DH |
245 | |
246 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ | |
247 | ||
248 | #endif /* __ASM_ARM_CMPXCHG_H */ |