Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2e13b31e KM |
2 | /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> |
3 | * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> | |
4 | */ | |
5 | ||
1da177e4 LT |
6 | #ifndef _ASM_PARISC_ATOMIC_H_ |
7 | #define _ASM_PARISC_ATOMIC_H_ | |
8 | ||
2e13b31e | 9 | #include <linux/types.h> |
9e5228ce | 10 | #include <asm/cmpxchg.h> |
e4a65e9d | 11 | #include <asm/barrier.h> |
1da177e4 LT |
12 | |
13 | /* | |
14 | * Atomic operations that C can't guarantee us. Useful for | |
15 | * resource counting etc.. | |
16 | * | |
17 | * And probably incredibly slow on parisc. OTOH, we don't | |
18 | * have to write any serious assembly. prumpf | |
19 | */ | |
20 | ||
21 | #ifdef CONFIG_SMP | |
22 | #include <asm/spinlock.h> | |
23 | #include <asm/cache.h> /* we use L1_CACHE_BYTES */ | |
24 | ||
25 | /* Use an array of spinlocks for our atomic_ts. | |
26 | * Hash function to index into a different SPINLOCK. | |
27 | * Since "a" is usually an address, use one spinlock per cacheline. | |
28 | */ | |
29 | # define ATOMIC_HASH_SIZE 4 | |
47e669ce | 30 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) |
1da177e4 | 31 | |
445c8951 | 32 | extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
1da177e4 | 33 | |
fb1c8f93 | 34 | /* Can't use raw_spin_lock_irq because of #include problems, so |
1da177e4 LT |
35 | * this is the substitute */ |
36 | #define _atomic_spin_lock_irqsave(l,f) do { \ | |
445c8951 | 37 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
1da177e4 | 38 | local_irq_save(f); \ |
0199c4e6 | 39 | arch_spin_lock(s); \ |
1da177e4 LT |
40 | } while(0) |
41 | ||
42 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | |
445c8951 | 43 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
0199c4e6 | 44 | arch_spin_unlock(s); \ |
1da177e4 LT |
45 | local_irq_restore(f); \ |
46 | } while(0) | |
47 | ||
48 | ||
49 | #else | |
50 | # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) | |
51 | # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) | |
52 | #endif | |
53 | ||
ea435467 MW |
54 | /* |
55 | * Note that we need not lock read accesses - aligned word writes/reads | |
56 | * are atomic, so a reader never sees inconsistent values. | |
2e13b31e | 57 | */ |
1da177e4 | 58 | |
15e3f6d7 | 59 | static __inline__ void atomic_set(atomic_t *v, int i) |
1da177e4 LT |
60 | { |
61 | unsigned long flags; | |
62 | _atomic_spin_lock_irqsave(v, flags); | |
63 | ||
64 | v->counter = i; | |
65 | ||
66 | _atomic_spin_unlock_irqrestore(v, flags); | |
67 | } | |
68 | ||
9d664c0a PZ |
69 | #define atomic_set_release(v, i) atomic_set((v), (i)) |
70 | ||
1da177e4 LT |
71 | static __inline__ int atomic_read(const atomic_t *v) |
72 | { | |
62e8a325 | 73 | return READ_ONCE((v)->counter); |
1da177e4 LT |
74 | } |
75 | ||
76 | /* exported interface */ | |
8ffe9d0b | 77 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) |
ffbf670f | 78 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
1da177e4 | 79 | |
8426e1f6 | 80 | /** |
f24219b4 | 81 | * __atomic_add_unless - add unless the number is a given value |
8426e1f6 NP |
82 | * @v: pointer of type atomic_t |
83 | * @a: the amount to add to v... | |
84 | * @u: ...unless v is equal to u. | |
85 | * | |
86 | * Atomically adds @a to @v, so long as it was not @u. | |
f24219b4 | 87 | * Returns the old value of @v. |
8426e1f6 | 88 | */ |
f24219b4 | 89 | static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) |
2856f5e3 MD |
90 | { |
91 | int c, old; | |
92 | c = atomic_read(v); | |
93 | for (;;) { | |
94 | if (unlikely(c == (u))) | |
95 | break; | |
96 | old = atomic_cmpxchg((v), c, c + (a)); | |
97 | if (likely(old == c)) | |
98 | break; | |
99 | c = old; | |
100 | } | |
f24219b4 | 101 | return c; |
2856f5e3 MD |
102 | } |
103 | ||
15e3f6d7 PZ |
104 | #define ATOMIC_OP(op, c_op) \ |
105 | static __inline__ void atomic_##op(int i, atomic_t *v) \ | |
106 | { \ | |
107 | unsigned long flags; \ | |
108 | \ | |
109 | _atomic_spin_lock_irqsave(v, flags); \ | |
110 | v->counter c_op i; \ | |
111 | _atomic_spin_unlock_irqrestore(v, flags); \ | |
112 | } \ | |
113 | ||
114 | #define ATOMIC_OP_RETURN(op, c_op) \ | |
115 | static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ | |
116 | { \ | |
117 | unsigned long flags; \ | |
118 | int ret; \ | |
119 | \ | |
120 | _atomic_spin_lock_irqsave(v, flags); \ | |
121 | ret = (v->counter c_op i); \ | |
122 | _atomic_spin_unlock_irqrestore(v, flags); \ | |
123 | \ | |
124 | return ret; \ | |
125 | } | |
126 | ||
e5857a6e PZ |
127 | #define ATOMIC_FETCH_OP(op, c_op) \ |
128 | static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \ | |
129 | { \ | |
130 | unsigned long flags; \ | |
131 | int ret; \ | |
132 | \ | |
133 | _atomic_spin_lock_irqsave(v, flags); \ | |
134 | ret = v->counter; \ | |
135 | v->counter c_op i; \ | |
136 | _atomic_spin_unlock_irqrestore(v, flags); \ | |
137 | \ | |
138 | return ret; \ | |
139 | } | |
140 | ||
141 | #define ATOMIC_OPS(op, c_op) \ | |
142 | ATOMIC_OP(op, c_op) \ | |
143 | ATOMIC_OP_RETURN(op, c_op) \ | |
144 | ATOMIC_FETCH_OP(op, c_op) | |
15e3f6d7 PZ |
145 | |
146 | ATOMIC_OPS(add, +=) | |
147 | ATOMIC_OPS(sub, -=) | |
148 | ||
e5857a6e PZ |
149 | #undef ATOMIC_OPS |
150 | #define ATOMIC_OPS(op, c_op) \ | |
151 | ATOMIC_OP(op, c_op) \ | |
152 | ATOMIC_FETCH_OP(op, c_op) | |
153 | ||
e5857a6e PZ |
154 | ATOMIC_OPS(and, &=) |
155 | ATOMIC_OPS(or, |=) | |
156 | ATOMIC_OPS(xor, ^=) | |
aebea935 | 157 | |
15e3f6d7 | 158 | #undef ATOMIC_OPS |
e5857a6e | 159 | #undef ATOMIC_FETCH_OP |
15e3f6d7 PZ |
160 | #undef ATOMIC_OP_RETURN |
161 | #undef ATOMIC_OP | |
8426e1f6 | 162 | |
15e3f6d7 PZ |
163 | #define atomic_inc(v) (atomic_add( 1,(v))) |
164 | #define atomic_dec(v) (atomic_add( -1,(v))) | |
1da177e4 | 165 | |
15e3f6d7 PZ |
166 | #define atomic_inc_return(v) (atomic_add_return( 1,(v))) |
167 | #define atomic_dec_return(v) (atomic_add_return( -1,(v))) | |
1da177e4 LT |
168 | |
169 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | |
170 | ||
171 | /* | |
172 | * atomic_inc_and_test - increment and test | |
173 | * @v: pointer of type atomic_t | |
174 | * | |
175 | * Atomically increments @v by 1 | |
176 | * and returns true if the result is zero, or false for all | |
177 | * other cases. | |
178 | */ | |
179 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | |
180 | ||
181 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) | |
182 | ||
4da9f131 KM |
183 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) |
184 | ||
bba3d8c3 | 185 | #define ATOMIC_INIT(i) { (i) } |
1da177e4 | 186 | |
513e7ecd | 187 | #ifdef CONFIG_64BIT |
2e13b31e | 188 | |
bba3d8c3 | 189 | #define ATOMIC64_INIT(i) { (i) } |
2e13b31e | 190 | |
15e3f6d7 PZ |
191 | #define ATOMIC64_OP(op, c_op) \ |
192 | static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \ | |
193 | { \ | |
194 | unsigned long flags; \ | |
195 | \ | |
196 | _atomic_spin_lock_irqsave(v, flags); \ | |
197 | v->counter c_op i; \ | |
198 | _atomic_spin_unlock_irqrestore(v, flags); \ | |
199 | } \ | |
200 | ||
201 | #define ATOMIC64_OP_RETURN(op, c_op) \ | |
202 | static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ | |
203 | { \ | |
204 | unsigned long flags; \ | |
205 | s64 ret; \ | |
206 | \ | |
207 | _atomic_spin_lock_irqsave(v, flags); \ | |
208 | ret = (v->counter c_op i); \ | |
209 | _atomic_spin_unlock_irqrestore(v, flags); \ | |
210 | \ | |
211 | return ret; \ | |
212 | } | |
2e13b31e | 213 | |
e5857a6e PZ |
214 | #define ATOMIC64_FETCH_OP(op, c_op) \ |
215 | static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \ | |
216 | { \ | |
217 | unsigned long flags; \ | |
218 | s64 ret; \ | |
219 | \ | |
220 | _atomic_spin_lock_irqsave(v, flags); \ | |
221 | ret = v->counter; \ | |
222 | v->counter c_op i; \ | |
223 | _atomic_spin_unlock_irqrestore(v, flags); \ | |
224 | \ | |
225 | return ret; \ | |
226 | } | |
227 | ||
228 | #define ATOMIC64_OPS(op, c_op) \ | |
229 | ATOMIC64_OP(op, c_op) \ | |
230 | ATOMIC64_OP_RETURN(op, c_op) \ | |
231 | ATOMIC64_FETCH_OP(op, c_op) | |
2e13b31e | 232 | |
15e3f6d7 PZ |
233 | ATOMIC64_OPS(add, +=) |
234 | ATOMIC64_OPS(sub, -=) | |
235 | ||
236 | #undef ATOMIC64_OPS | |
e5857a6e PZ |
237 | #define ATOMIC64_OPS(op, c_op) \ |
238 | ATOMIC64_OP(op, c_op) \ | |
239 | ATOMIC64_FETCH_OP(op, c_op) | |
240 | ||
241 | ATOMIC64_OPS(and, &=) | |
242 | ATOMIC64_OPS(or, |=) | |
243 | ATOMIC64_OPS(xor, ^=) | |
244 | ||
245 | #undef ATOMIC64_OPS | |
246 | #undef ATOMIC64_FETCH_OP | |
15e3f6d7 PZ |
247 | #undef ATOMIC64_OP_RETURN |
248 | #undef ATOMIC64_OP | |
2e13b31e KM |
249 | |
250 | static __inline__ void | |
251 | atomic64_set(atomic64_t *v, s64 i) | |
252 | { | |
253 | unsigned long flags; | |
254 | _atomic_spin_lock_irqsave(v, flags); | |
255 | ||
256 | v->counter = i; | |
257 | ||
258 | _atomic_spin_unlock_irqrestore(v, flags); | |
259 | } | |
260 | ||
261 | static __inline__ s64 | |
262 | atomic64_read(const atomic64_t *v) | |
263 | { | |
2291059c | 264 | return ACCESS_ONCE((v)->counter); |
2e13b31e KM |
265 | } |
266 | ||
15e3f6d7 PZ |
267 | #define atomic64_inc(v) (atomic64_add( 1,(v))) |
268 | #define atomic64_dec(v) (atomic64_add( -1,(v))) | |
2e13b31e | 269 | |
15e3f6d7 PZ |
270 | #define atomic64_inc_return(v) (atomic64_add_return( 1,(v))) |
271 | #define atomic64_dec_return(v) (atomic64_add_return( -1,(v))) | |
2e13b31e KM |
272 | |
273 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
274 | ||
275 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | |
276 | #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) | |
4da9f131 | 277 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0) |
2e13b31e | 278 | |
8ffe9d0b MD |
279 | /* exported interface */ |
280 | #define atomic64_cmpxchg(v, o, n) \ | |
281 | ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) | |
282 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | |
283 | ||
284 | /** | |
285 | * atomic64_add_unless - add unless the number is a given value | |
286 | * @v: pointer of type atomic64_t | |
287 | * @a: the amount to add to v... | |
288 | * @u: ...unless v is equal to u. | |
289 | * | |
290 | * Atomically adds @a to @v, so long as it was not @u. | |
f24219b4 | 291 | * Returns the old value of @v. |
8ffe9d0b | 292 | */ |
2856f5e3 MD |
293 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) |
294 | { | |
295 | long c, old; | |
296 | c = atomic64_read(v); | |
297 | for (;;) { | |
298 | if (unlikely(c == (u))) | |
299 | break; | |
300 | old = atomic64_cmpxchg((v), c, c + (a)); | |
301 | if (likely(old == c)) | |
302 | break; | |
303 | c = old; | |
304 | } | |
305 | return c != (u); | |
306 | } | |
307 | ||
8ffe9d0b MD |
308 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
309 | ||
f21dda02 HD |
310 | /* |
311 | * atomic64_dec_if_positive - decrement by 1 if old value positive | |
312 | * @v: pointer of type atomic_t | |
313 | * | |
314 | * The function returns the old value of *v minus 1, even if | |
315 | * the atomic variable, v, was not decremented. | |
316 | */ | |
317 | static inline long atomic64_dec_if_positive(atomic64_t *v) | |
318 | { | |
319 | long c, old, dec; | |
320 | c = atomic64_read(v); | |
321 | for (;;) { | |
322 | dec = c - 1; | |
323 | if (unlikely(dec < 0)) | |
324 | break; | |
325 | old = atomic64_cmpxchg((v), c, dec); | |
326 | if (likely(old == c)) | |
327 | break; | |
328 | c = old; | |
329 | } | |
330 | return dec; | |
331 | } | |
332 | ||
64daa443 | 333 | #endif /* !CONFIG_64BIT */ |
2e13b31e | 334 | |
2e13b31e KM |
335 | |
336 | #endif /* _ASM_PARISC_ATOMIC_H_ */ |