atomic: use <linux/atomic.h>
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / parisc / include / asm / atomic.h
CommitLineData
2e13b31e
KM
1/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3 */
4
1da177e4
LT
5#ifndef _ASM_PARISC_ATOMIC_H_
6#define _ASM_PARISC_ATOMIC_H_
7
2e13b31e 8#include <linux/types.h>
2856f5e3 9#include <asm/system.h>
1da177e4
LT
10
11/*
12 * Atomic operations that C can't guarantee us. Useful for
13 * resource counting etc..
14 *
15 * And probably incredibly slow on parisc. OTOH, we don't
16 * have to write any serious assembly. prumpf
17 */
18
19#ifdef CONFIG_SMP
20#include <asm/spinlock.h>
21#include <asm/cache.h> /* we use L1_CACHE_BYTES */
22
23/* Use an array of spinlocks for our atomic_ts.
24 * Hash function to index into a different SPINLOCK.
25 * Since "a" is usually an address, use one spinlock per cacheline.
26 */
27# define ATOMIC_HASH_SIZE 4
47e669ce 28# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
1da177e4 29
445c8951 30extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
1da177e4 31
fb1c8f93 32/* Can't use raw_spin_lock_irq because of #include problems, so
1da177e4
LT
33 * this is the substitute */
34#define _atomic_spin_lock_irqsave(l,f) do { \
445c8951 35 arch_spinlock_t *s = ATOMIC_HASH(l); \
1da177e4 36 local_irq_save(f); \
0199c4e6 37 arch_spin_lock(s); \
1da177e4
LT
38} while(0)
39
40#define _atomic_spin_unlock_irqrestore(l,f) do { \
445c8951 41 arch_spinlock_t *s = ATOMIC_HASH(l); \
0199c4e6 42 arch_spin_unlock(s); \
1da177e4
LT
43 local_irq_restore(f); \
44} while(0)
45
46
47#else
48# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
49# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
50#endif
51
1da177e4
LT
52/* This should get optimized out since it's never called.
53** Or get a link error if xchg is used "wrong".
54*/
55extern void __xchg_called_with_bad_pointer(void);
56
57
58/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
59extern unsigned long __xchg8(char, char *);
60extern unsigned long __xchg32(int, int *);
513e7ecd 61#ifdef CONFIG_64BIT
1da177e4
LT
62extern unsigned long __xchg64(unsigned long, unsigned long *);
63#endif
64
65/* optimizer better get rid of switch since size is a constant */
2e13b31e
KM
66static __inline__ unsigned long
67__xchg(unsigned long x, __volatile__ void * ptr, int size)
1da177e4 68{
1da177e4 69 switch(size) {
513e7ecd 70#ifdef CONFIG_64BIT
1da177e4
LT
71 case 8: return __xchg64(x,(unsigned long *) ptr);
72#endif
73 case 4: return __xchg32((int) x, (int *) ptr);
74 case 1: return __xchg8((char) x, (char *) ptr);
75 }
76 __xchg_called_with_bad_pointer();
77 return x;
78}
79
80
81/*
82** REVISIT - Abandoned use of LDCW in xchg() for now:
83** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
513e7ecd 84** o and while we are at it, could CONFIG_64BIT code use LDCD too?
1da177e4
LT
85**
86** if (__builtin_constant_p(x) && (x == NULL))
87** if (((unsigned long)p & 0xf) == 0)
88** return __ldcw(p);
89*/
90#define xchg(ptr,x) \
91 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
92
93
94#define __HAVE_ARCH_CMPXCHG 1
95
96/* bug catcher for when unsupported size is used - won't link */
97extern void __cmpxchg_called_with_bad_pointer(void);
98
99/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
100extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
101extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
102
103/* don't worry...optimizer will get rid of most of this */
104static __inline__ unsigned long
105__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
106{
107 switch(size) {
513e7ecd 108#ifdef CONFIG_64BIT
1da177e4
LT
109 case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
110#endif
111 case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
112 }
113 __cmpxchg_called_with_bad_pointer();
114 return old;
115}
116
117#define cmpxchg(ptr,o,n) \
118 ({ \
119 __typeof__(*(ptr)) _o_ = (o); \
120 __typeof__(*(ptr)) _n_ = (n); \
121 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
122 (unsigned long)_n_, sizeof(*(ptr))); \
123 })
124
df80c8c5
MD
125#include <asm-generic/cmpxchg-local.h>
126
127static inline unsigned long __cmpxchg_local(volatile void *ptr,
128 unsigned long old,
129 unsigned long new_, int size)
130{
131 switch (size) {
132#ifdef CONFIG_64BIT
133 case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
134#endif
135 case 4: return __cmpxchg_u32(ptr, old, new_);
136 default:
137 return __cmpxchg_local_generic(ptr, old, new_, size);
138 }
139}
140
141/*
142 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
143 * them available.
144 */
145#define cmpxchg_local(ptr, o, n) \
146 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
147 (unsigned long)(n), sizeof(*(ptr))))
148#ifdef CONFIG_64BIT
149#define cmpxchg64_local(ptr, o, n) \
150 ({ \
151 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
152 cmpxchg_local((ptr), (o), (n)); \
153 })
154#else
155#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
156#endif
157
ea435467
MW
158/*
159 * Note that we need not lock read accesses - aligned word writes/reads
160 * are atomic, so a reader never sees inconsistent values.
2e13b31e 161 */
1da177e4 162
1da177e4
LT
163/* It's possible to reduce all atomic operations to either
164 * __atomic_add_return, atomic_set and atomic_read (the latter
165 * is there only for consistency).
166 */
167
168static __inline__ int __atomic_add_return(int i, atomic_t *v)
169{
170 int ret;
171 unsigned long flags;
172 _atomic_spin_lock_irqsave(v, flags);
173
174 ret = (v->counter += i);
175
176 _atomic_spin_unlock_irqrestore(v, flags);
177 return ret;
178}
179
180static __inline__ void atomic_set(atomic_t *v, int i)
181{
182 unsigned long flags;
183 _atomic_spin_lock_irqsave(v, flags);
184
185 v->counter = i;
186
187 _atomic_spin_unlock_irqrestore(v, flags);
188}
189
190static __inline__ int atomic_read(const atomic_t *v)
191{
f3d46f9d 192 return (*(volatile int *)&(v)->counter);
1da177e4
LT
193}
194
195/* exported interface */
8ffe9d0b 196#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
ffbf670f 197#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1da177e4 198
8426e1f6
NP
199/**
200 * atomic_add_unless - add unless the number is a given value
201 * @v: pointer of type atomic_t
202 * @a: the amount to add to v...
203 * @u: ...unless v is equal to u.
204 *
205 * Atomically adds @a to @v, so long as it was not @u.
206 * Returns non-zero if @v was not @u, and zero otherwise.
207 */
2856f5e3
MD
208static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
209{
210 int c, old;
211 c = atomic_read(v);
212 for (;;) {
213 if (unlikely(c == (u)))
214 break;
215 old = atomic_cmpxchg((v), c, c + (a));
216 if (likely(old == c))
217 break;
218 c = old;
219 }
220 return c != (u);
221}
222
8426e1f6 223
692c14a5
BB
224#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
225#define atomic_sub(i,v) ((void)(__atomic_add_return(-(i),(v))))
1da177e4
LT
226#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
227#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
228
692c14a5
BB
229#define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
230#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
1da177e4
LT
231#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
232#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
233
234#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
235
236/*
237 * atomic_inc_and_test - increment and test
238 * @v: pointer of type atomic_t
239 *
240 * Atomically increments @v by 1
241 * and returns true if the result is zero, or false for all
242 * other cases.
243 */
244#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
245
246#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
247
4da9f131
KM
248#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
249
2e13b31e 250#define ATOMIC_INIT(i) ((atomic_t) { (i) })
1da177e4
LT
251
252#define smp_mb__before_atomic_dec() smp_mb()
253#define smp_mb__after_atomic_dec() smp_mb()
254#define smp_mb__before_atomic_inc() smp_mb()
255#define smp_mb__after_atomic_inc() smp_mb()
256
513e7ecd 257#ifdef CONFIG_64BIT
2e13b31e 258
2e13b31e
KM
259#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
260
261static __inline__ int
262__atomic64_add_return(s64 i, atomic64_t *v)
263{
264 int ret;
265 unsigned long flags;
266 _atomic_spin_lock_irqsave(v, flags);
267
268 ret = (v->counter += i);
269
270 _atomic_spin_unlock_irqrestore(v, flags);
271 return ret;
272}
273
274static __inline__ void
275atomic64_set(atomic64_t *v, s64 i)
276{
277 unsigned long flags;
278 _atomic_spin_lock_irqsave(v, flags);
279
280 v->counter = i;
281
282 _atomic_spin_unlock_irqrestore(v, flags);
283}
284
285static __inline__ s64
286atomic64_read(const atomic64_t *v)
287{
f3d46f9d 288 return (*(volatile long *)&(v)->counter);
2e13b31e
KM
289}
290
47e669ce
JB
291#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
292#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v))))
2e13b31e
KM
293#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
294#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
295
47e669ce
JB
296#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v)))
297#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v)))
2e13b31e
KM
298#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
299#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
300
301#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
302
303#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
304#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
4da9f131 305#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
2e13b31e 306
8ffe9d0b
MD
307/* exported interface */
308#define atomic64_cmpxchg(v, o, n) \
309 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
310#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
311
312/**
313 * atomic64_add_unless - add unless the number is a given value
314 * @v: pointer of type atomic64_t
315 * @a: the amount to add to v...
316 * @u: ...unless v is equal to u.
317 *
318 * Atomically adds @a to @v, so long as it was not @u.
319 * Returns non-zero if @v was not @u, and zero otherwise.
320 */
2856f5e3
MD
321static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
322{
323 long c, old;
324 c = atomic64_read(v);
325 for (;;) {
326 if (unlikely(c == (u)))
327 break;
328 old = atomic64_cmpxchg((v), c, c + (a));
329 if (likely(old == c))
330 break;
331 c = old;
332 }
333 return c != (u);
334}
335
8ffe9d0b
MD
336#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
337
64daa443
KM
338#else /* CONFIG_64BIT */
339
340#include <asm-generic/atomic64.h>
341
342#endif /* !CONFIG_64BIT */
2e13b31e 343
72099ed2 344#include <asm-generic/atomic-long.h>
2e13b31e
KM
345
346#endif /* _ASM_PARISC_ATOMIC_H_ */