[PATCH] Numerous fixes to kernel-doc info in source files.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / asm-i386 / atomic.h
CommitLineData
1da177e4
LT
1#ifndef __ARCH_I386_ATOMIC__
2#define __ARCH_I386_ATOMIC__
3
1da177e4
LT
4#include <linux/compiler.h>
5#include <asm/processor.h>
6
7/*
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc..
10 */
11
1da177e4
LT
12/*
13 * Make sure gcc doesn't try to be clever and move things around
14 * on us. We need to use _exactly_ the address the user gave us,
15 * not some alias that contains the same information.
16 */
f9e9dcb3 17typedef struct { int counter; } atomic_t;
1da177e4
LT
18
19#define ATOMIC_INIT(i) { (i) }
20
21/**
22 * atomic_read - read atomic variable
23 * @v: pointer of type atomic_t
24 *
25 * Atomically reads the value of @v.
26 */
27#define atomic_read(v) ((v)->counter)
28
29/**
30 * atomic_set - set atomic variable
31 * @v: pointer of type atomic_t
32 * @i: required value
33 *
34 * Atomically sets the value of @v to @i.
35 */
36#define atomic_set(v,i) (((v)->counter) = (i))
37
38/**
39 * atomic_add - add integer to atomic variable
40 * @i: integer value to add
41 * @v: pointer of type atomic_t
42 *
43 * Atomically adds @i to @v.
44 */
45static __inline__ void atomic_add(int i, atomic_t *v)
46{
47 __asm__ __volatile__(
9a0b5817 48 LOCK_PREFIX "addl %1,%0"
b862f3b0
LT
49 :"+m" (v->counter)
50 :"ir" (i));
1da177e4
LT
51}
52
53/**
54 * atomic_sub - subtract the atomic variable
55 * @i: integer value to subtract
56 * @v: pointer of type atomic_t
57 *
58 * Atomically subtracts @i from @v.
59 */
60static __inline__ void atomic_sub(int i, atomic_t *v)
61{
62 __asm__ __volatile__(
9a0b5817 63 LOCK_PREFIX "subl %1,%0"
b862f3b0
LT
64 :"+m" (v->counter)
65 :"ir" (i));
1da177e4
LT
66}
67
68/**
69 * atomic_sub_and_test - subtract value from variable and test result
70 * @i: integer value to subtract
71 * @v: pointer of type atomic_t
72 *
73 * Atomically subtracts @i from @v and returns
74 * true if the result is zero, or false for all
75 * other cases.
76 */
77static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
78{
79 unsigned char c;
80
81 __asm__ __volatile__(
9a0b5817 82 LOCK_PREFIX "subl %2,%0; sete %1"
b862f3b0
LT
83 :"+m" (v->counter), "=qm" (c)
84 :"ir" (i) : "memory");
1da177e4
LT
85 return c;
86}
87
88/**
89 * atomic_inc - increment atomic variable
90 * @v: pointer of type atomic_t
91 *
92 * Atomically increments @v by 1.
93 */
94static __inline__ void atomic_inc(atomic_t *v)
95{
96 __asm__ __volatile__(
9a0b5817 97 LOCK_PREFIX "incl %0"
b862f3b0 98 :"+m" (v->counter));
1da177e4
LT
99}
100
101/**
102 * atomic_dec - decrement atomic variable
103 * @v: pointer of type atomic_t
104 *
105 * Atomically decrements @v by 1.
106 */
107static __inline__ void atomic_dec(atomic_t *v)
108{
109 __asm__ __volatile__(
9a0b5817 110 LOCK_PREFIX "decl %0"
b862f3b0 111 :"+m" (v->counter));
1da177e4
LT
112}
113
114/**
115 * atomic_dec_and_test - decrement and test
116 * @v: pointer of type atomic_t
117 *
118 * Atomically decrements @v by 1 and
119 * returns true if the result is 0, or false for all other
120 * cases.
121 */
122static __inline__ int atomic_dec_and_test(atomic_t *v)
123{
124 unsigned char c;
125
126 __asm__ __volatile__(
9a0b5817 127 LOCK_PREFIX "decl %0; sete %1"
b862f3b0
LT
128 :"+m" (v->counter), "=qm" (c)
129 : : "memory");
1da177e4
LT
130 return c != 0;
131}
132
133/**
134 * atomic_inc_and_test - increment and test
135 * @v: pointer of type atomic_t
136 *
137 * Atomically increments @v by 1
138 * and returns true if the result is zero, or false for all
139 * other cases.
140 */
141static __inline__ int atomic_inc_and_test(atomic_t *v)
142{
143 unsigned char c;
144
145 __asm__ __volatile__(
9a0b5817 146 LOCK_PREFIX "incl %0; sete %1"
b862f3b0
LT
147 :"+m" (v->counter), "=qm" (c)
148 : : "memory");
1da177e4
LT
149 return c != 0;
150}
151
152/**
153 * atomic_add_negative - add and test if negative
154 * @v: pointer of type atomic_t
155 * @i: integer value to add
156 *
157 * Atomically adds @i to @v and returns true
158 * if the result is negative, or false when
159 * result is greater than or equal to zero.
160 */
161static __inline__ int atomic_add_negative(int i, atomic_t *v)
162{
163 unsigned char c;
164
165 __asm__ __volatile__(
9a0b5817 166 LOCK_PREFIX "addl %2,%0; sets %1"
b862f3b0
LT
167 :"+m" (v->counter), "=qm" (c)
168 :"ir" (i) : "memory");
1da177e4
LT
169 return c;
170}
171
172/**
173 * atomic_add_return - add and return
174 * @v: pointer of type atomic_t
175 * @i: integer value to add
176 *
177 * Atomically adds @i to @v and returns @i + @v
178 */
179static __inline__ int atomic_add_return(int i, atomic_t *v)
180{
181 int __i;
182#ifdef CONFIG_M386
1bb858f2 183 unsigned long flags;
1da177e4
LT
184 if(unlikely(boot_cpu_data.x86==3))
185 goto no_xadd;
186#endif
187 /* Modern 486+ processor */
188 __i = i;
189 __asm__ __volatile__(
e4b522d7
DS
190 LOCK_PREFIX "xaddl %0, %1"
191 :"+r" (i), "+m" (v->counter)
192 : : "memory");
1da177e4
LT
193 return i + __i;
194
195#ifdef CONFIG_M386
196no_xadd: /* Legacy 386 processor */
1bb858f2 197 local_irq_save(flags);
1da177e4
LT
198 __i = atomic_read(v);
199 atomic_set(v, i + __i);
1bb858f2 200 local_irq_restore(flags);
1da177e4
LT
201 return i + __i;
202#endif
203}
204
205static __inline__ int atomic_sub_return(int i, atomic_t *v)
206{
207 return atomic_add_return(-i,v);
208}
209
4a6dae6d 210#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
ffbf670f 211#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4a6dae6d 212
8426e1f6 213/**
72fd4a35 214 * atomic_add_unless - add unless the number is already a given value
8426e1f6
NP
215 * @v: pointer of type atomic_t
216 * @a: the amount to add to v...
217 * @u: ...unless v is equal to u.
218 *
72fd4a35 219 * Atomically adds @a to @v, so long as @v was not already @u.
8426e1f6
NP
220 * Returns non-zero if @v was not @u, and zero otherwise.
221 */
222#define atomic_add_unless(v, a, u) \
223({ \
224 int c, old; \
225 c = atomic_read(v); \
0b2fcfdb
NP
226 for (;;) { \
227 if (unlikely(c == (u))) \
228 break; \
229 old = atomic_cmpxchg((v), c, c + (a)); \
230 if (likely(old == c)) \
231 break; \
8426e1f6 232 c = old; \
0b2fcfdb 233 } \
8426e1f6
NP
234 c != (u); \
235})
236#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
237
1da177e4
LT
238#define atomic_inc_return(v) (atomic_add_return(1,v))
239#define atomic_dec_return(v) (atomic_sub_return(1,v))
240
241/* These are x86-specific, used by some header files */
242#define atomic_clear_mask(mask, addr) \
9a0b5817 243__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
1da177e4
LT
244: : "r" (~(mask)),"m" (*addr) : "memory")
245
246#define atomic_set_mask(mask, addr) \
9a0b5817 247__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
1da177e4
LT
248: : "r" (mask),"m" (*(addr)) : "memory")
249
250/* Atomic operations are already serializing on x86 */
251#define smp_mb__before_atomic_dec() barrier()
252#define smp_mb__after_atomic_dec() barrier()
253#define smp_mb__before_atomic_inc() barrier()
254#define smp_mb__after_atomic_inc() barrier()
255
d3cb4871 256#include <asm-generic/atomic.h>
1da177e4 257#endif