Commit | Line | Data |
---|---|---|
2e13b31e KM |
1 | /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> |
2 | * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> | |
3 | */ | |
4 | ||
1da177e4 LT |
5 | #ifndef _ASM_PARISC_ATOMIC_H_ |
6 | #define _ASM_PARISC_ATOMIC_H_ | |
7 | ||
2e13b31e | 8 | #include <linux/types.h> |
9e5228ce | 9 | #include <asm/cmpxchg.h> |
e4a65e9d | 10 | #include <asm/barrier.h> |
1da177e4 LT |
11 | |
12 | /* | |
13 | * Atomic operations that C can't guarantee us. Useful for | |
14 | * resource counting etc.. | |
15 | * | |
16 | * And probably incredibly slow on parisc. OTOH, we don't | |
17 | * have to write any serious assembly. prumpf | |
18 | */ | |
19 | ||
20 | #ifdef CONFIG_SMP | |
21 | #include <asm/spinlock.h> | |
22 | #include <asm/cache.h> /* we use L1_CACHE_BYTES */ | |
23 | ||
24 | /* Use an array of spinlocks for our atomic_ts. | |
25 | * Hash function to index into a different SPINLOCK. | |
26 | * Since "a" is usually an address, use one spinlock per cacheline. | |
27 | */ | |
28 | # define ATOMIC_HASH_SIZE 4 | |
47e669ce | 29 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) |
1da177e4 | 30 | |
445c8951 | 31 | extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
1da177e4 | 32 | |
fb1c8f93 | 33 | /* Can't use raw_spin_lock_irq because of #include problems, so |
1da177e4 LT |
34 | * this is the substitute */ |
35 | #define _atomic_spin_lock_irqsave(l,f) do { \ | |
445c8951 | 36 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
1da177e4 | 37 | local_irq_save(f); \ |
0199c4e6 | 38 | arch_spin_lock(s); \ |
1da177e4 LT |
39 | } while(0) |
40 | ||
41 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | |
445c8951 | 42 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
0199c4e6 | 43 | arch_spin_unlock(s); \ |
1da177e4 LT |
44 | local_irq_restore(f); \ |
45 | } while(0) | |
46 | ||
47 | ||
48 | #else | |
49 | # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) | |
50 | # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) | |
51 | #endif | |
52 | ||
ea435467 MW |
53 | /* |
54 | * Note that we need not lock read accesses - aligned word writes/reads | |
55 | * are atomic, so a reader never sees inconsistent values. | |
2e13b31e | 56 | */ |
1da177e4 | 57 | |
15e3f6d7 | 58 | static __inline__ void atomic_set(atomic_t *v, int i) |
1da177e4 LT |
59 | { |
60 | unsigned long flags; | |
61 | _atomic_spin_lock_irqsave(v, flags); | |
62 | ||
63 | v->counter = i; | |
64 | ||
65 | _atomic_spin_unlock_irqrestore(v, flags); | |
66 | } | |
67 | ||
68 | static __inline__ int atomic_read(const atomic_t *v) | |
69 | { | |
62e8a325 | 70 | return READ_ONCE((v)->counter); |
1da177e4 LT |
71 | } |
72 | ||
73 | /* exported interface */ | |
8ffe9d0b | 74 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) |
ffbf670f | 75 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
1da177e4 | 76 | |
8426e1f6 | 77 | /** |
f24219b4 | 78 | * __atomic_add_unless - add unless the number is a given value |
8426e1f6 NP |
79 | * @v: pointer of type atomic_t |
80 | * @a: the amount to add to v... | |
81 | * @u: ...unless v is equal to u. | |
82 | * | |
83 | * Atomically adds @a to @v, so long as it was not @u. | |
f24219b4 | 84 | * Returns the old value of @v. |
8426e1f6 | 85 | */ |
f24219b4 | 86 | static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) |
2856f5e3 MD |
87 | { |
88 | int c, old; | |
89 | c = atomic_read(v); | |
90 | for (;;) { | |
91 | if (unlikely(c == (u))) | |
92 | break; | |
93 | old = atomic_cmpxchg((v), c, c + (a)); | |
94 | if (likely(old == c)) | |
95 | break; | |
96 | c = old; | |
97 | } | |
f24219b4 | 98 | return c; |
2856f5e3 MD |
99 | } |
100 | ||
15e3f6d7 PZ |
101 | #define ATOMIC_OP(op, c_op) \ |
102 | static __inline__ void atomic_##op(int i, atomic_t *v) \ | |
103 | { \ | |
104 | unsigned long flags; \ | |
105 | \ | |
106 | _atomic_spin_lock_irqsave(v, flags); \ | |
107 | v->counter c_op i; \ | |
108 | _atomic_spin_unlock_irqrestore(v, flags); \ | |
109 | } \ | |
110 | ||
111 | #define ATOMIC_OP_RETURN(op, c_op) \ | |
112 | static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ | |
113 | { \ | |
114 | unsigned long flags; \ | |
115 | int ret; \ | |
116 | \ | |
117 | _atomic_spin_lock_irqsave(v, flags); \ | |
118 | ret = (v->counter c_op i); \ | |
119 | _atomic_spin_unlock_irqrestore(v, flags); \ | |
120 | \ | |
121 | return ret; \ | |
122 | } | |
123 | ||
e5857a6e PZ |
124 | #define ATOMIC_FETCH_OP(op, c_op) \ |
125 | static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \ | |
126 | { \ | |
127 | unsigned long flags; \ | |
128 | int ret; \ | |
129 | \ | |
130 | _atomic_spin_lock_irqsave(v, flags); \ | |
131 | ret = v->counter; \ | |
132 | v->counter c_op i; \ | |
133 | _atomic_spin_unlock_irqrestore(v, flags); \ | |
134 | \ | |
135 | return ret; \ | |
136 | } | |
137 | ||
138 | #define ATOMIC_OPS(op, c_op) \ | |
139 | ATOMIC_OP(op, c_op) \ | |
140 | ATOMIC_OP_RETURN(op, c_op) \ | |
141 | ATOMIC_FETCH_OP(op, c_op) | |
15e3f6d7 PZ |
142 | |
143 | ATOMIC_OPS(add, +=) | |
144 | ATOMIC_OPS(sub, -=) | |
145 | ||
e5857a6e PZ |
146 | #undef ATOMIC_OPS |
147 | #define ATOMIC_OPS(op, c_op) \ | |
148 | ATOMIC_OP(op, c_op) \ | |
149 | ATOMIC_FETCH_OP(op, c_op) | |
150 | ||
e5857a6e PZ |
151 | ATOMIC_OPS(and, &=) |
152 | ATOMIC_OPS(or, |=) | |
153 | ATOMIC_OPS(xor, ^=) | |
aebea935 | 154 | |
15e3f6d7 | 155 | #undef ATOMIC_OPS |
e5857a6e | 156 | #undef ATOMIC_FETCH_OP |
15e3f6d7 PZ |
157 | #undef ATOMIC_OP_RETURN |
158 | #undef ATOMIC_OP | |
8426e1f6 | 159 | |
15e3f6d7 PZ |
160 | #define atomic_inc(v) (atomic_add( 1,(v))) |
161 | #define atomic_dec(v) (atomic_add( -1,(v))) | |
1da177e4 | 162 | |
15e3f6d7 PZ |
163 | #define atomic_inc_return(v) (atomic_add_return( 1,(v))) |
164 | #define atomic_dec_return(v) (atomic_add_return( -1,(v))) | |
1da177e4 LT |
165 | |
166 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | |
167 | ||
168 | /* | |
169 | * atomic_inc_and_test - increment and test | |
170 | * @v: pointer of type atomic_t | |
171 | * | |
172 | * Atomically increments @v by 1 | |
173 | * and returns true if the result is zero, or false for all | |
174 | * other cases. | |
175 | */ | |
176 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | |
177 | ||
178 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) | |
179 | ||
4da9f131 KM |
180 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) |
181 | ||
bba3d8c3 | 182 | #define ATOMIC_INIT(i) { (i) } |
1da177e4 | 183 | |
513e7ecd | 184 | #ifdef CONFIG_64BIT |
2e13b31e | 185 | |
bba3d8c3 | 186 | #define ATOMIC64_INIT(i) { (i) } |
2e13b31e | 187 | |
15e3f6d7 PZ |
188 | #define ATOMIC64_OP(op, c_op) \ |
189 | static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \ | |
190 | { \ | |
191 | unsigned long flags; \ | |
192 | \ | |
193 | _atomic_spin_lock_irqsave(v, flags); \ | |
194 | v->counter c_op i; \ | |
195 | _atomic_spin_unlock_irqrestore(v, flags); \ | |
196 | } \ | |
197 | ||
198 | #define ATOMIC64_OP_RETURN(op, c_op) \ | |
199 | static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ | |
200 | { \ | |
201 | unsigned long flags; \ | |
202 | s64 ret; \ | |
203 | \ | |
204 | _atomic_spin_lock_irqsave(v, flags); \ | |
205 | ret = (v->counter c_op i); \ | |
206 | _atomic_spin_unlock_irqrestore(v, flags); \ | |
207 | \ | |
208 | return ret; \ | |
209 | } | |
2e13b31e | 210 | |
e5857a6e PZ |
211 | #define ATOMIC64_FETCH_OP(op, c_op) \ |
212 | static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \ | |
213 | { \ | |
214 | unsigned long flags; \ | |
215 | s64 ret; \ | |
216 | \ | |
217 | _atomic_spin_lock_irqsave(v, flags); \ | |
218 | ret = v->counter; \ | |
219 | v->counter c_op i; \ | |
220 | _atomic_spin_unlock_irqrestore(v, flags); \ | |
221 | \ | |
222 | return ret; \ | |
223 | } | |
224 | ||
225 | #define ATOMIC64_OPS(op, c_op) \ | |
226 | ATOMIC64_OP(op, c_op) \ | |
227 | ATOMIC64_OP_RETURN(op, c_op) \ | |
228 | ATOMIC64_FETCH_OP(op, c_op) | |
2e13b31e | 229 | |
15e3f6d7 PZ |
230 | ATOMIC64_OPS(add, +=) |
231 | ATOMIC64_OPS(sub, -=) | |
232 | ||
233 | #undef ATOMIC64_OPS | |
e5857a6e PZ |
234 | #define ATOMIC64_OPS(op, c_op) \ |
235 | ATOMIC64_OP(op, c_op) \ | |
236 | ATOMIC64_FETCH_OP(op, c_op) | |
237 | ||
238 | ATOMIC64_OPS(and, &=) | |
239 | ATOMIC64_OPS(or, |=) | |
240 | ATOMIC64_OPS(xor, ^=) | |
241 | ||
242 | #undef ATOMIC64_OPS | |
243 | #undef ATOMIC64_FETCH_OP | |
15e3f6d7 PZ |
244 | #undef ATOMIC64_OP_RETURN |
245 | #undef ATOMIC64_OP | |
2e13b31e KM |
246 | |
247 | static __inline__ void | |
248 | atomic64_set(atomic64_t *v, s64 i) | |
249 | { | |
250 | unsigned long flags; | |
251 | _atomic_spin_lock_irqsave(v, flags); | |
252 | ||
253 | v->counter = i; | |
254 | ||
255 | _atomic_spin_unlock_irqrestore(v, flags); | |
256 | } | |
257 | ||
258 | static __inline__ s64 | |
259 | atomic64_read(const atomic64_t *v) | |
260 | { | |
2291059c | 261 | return ACCESS_ONCE((v)->counter); |
2e13b31e KM |
262 | } |
263 | ||
15e3f6d7 PZ |
264 | #define atomic64_inc(v) (atomic64_add( 1,(v))) |
265 | #define atomic64_dec(v) (atomic64_add( -1,(v))) | |
2e13b31e | 266 | |
15e3f6d7 PZ |
267 | #define atomic64_inc_return(v) (atomic64_add_return( 1,(v))) |
268 | #define atomic64_dec_return(v) (atomic64_add_return( -1,(v))) | |
2e13b31e KM |
269 | |
270 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
271 | ||
272 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
273 | #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) | |
4da9f131 | 274 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0) |
2e13b31e | 275 | |
8ffe9d0b MD |
276 | /* exported interface */ |
277 | #define atomic64_cmpxchg(v, o, n) \ | |
278 | ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) | |
279 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | |
280 | ||
281 | /** | |
282 | * atomic64_add_unless - add unless the number is a given value | |
283 | * @v: pointer of type atomic64_t | |
284 | * @a: the amount to add to v... | |
285 | * @u: ...unless v is equal to u. | |
286 | * | |
287 | * Atomically adds @a to @v, so long as it was not @u. | |
f24219b4 | 288 | * Returns the old value of @v. |
8ffe9d0b | 289 | */ |
2856f5e3 MD |
290 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) |
291 | { | |
292 | long c, old; | |
293 | c = atomic64_read(v); | |
294 | for (;;) { | |
295 | if (unlikely(c == (u))) | |
296 | break; | |
297 | old = atomic64_cmpxchg((v), c, c + (a)); | |
298 | if (likely(old == c)) | |
299 | break; | |
300 | c = old; | |
301 | } | |
302 | return c != (u); | |
303 | } | |
304 | ||
8ffe9d0b MD |
305 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
306 | ||
f21dda02 HD |
307 | /* |
308 | * atomic64_dec_if_positive - decrement by 1 if old value positive | |
309 | * @v: pointer of type atomic_t | |
310 | * | |
311 | * The function returns the old value of *v minus 1, even if | |
312 | * the atomic variable, v, was not decremented. | |
313 | */ | |
314 | static inline long atomic64_dec_if_positive(atomic64_t *v) | |
315 | { | |
316 | long c, old, dec; | |
317 | c = atomic64_read(v); | |
318 | for (;;) { | |
319 | dec = c - 1; | |
320 | if (unlikely(dec < 0)) | |
321 | break; | |
322 | old = atomic64_cmpxchg((v), c, dec); | |
323 | if (likely(old == c)) | |
324 | break; | |
325 | c = old; | |
326 | } | |
327 | return dec; | |
328 | } | |
329 | ||
64daa443 | 330 | #endif /* !CONFIG_64BIT */ |
2e13b31e | 331 | |
2e13b31e KM |
332 | |
333 | #endif /* _ASM_PARISC_ATOMIC_H_ */ |