Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ALPHA_ATOMIC_H |
2 | #define _ALPHA_ATOMIC_H | |
3 | ||
ea435467 | 4 | #include <linux/types.h> |
0db9ae4a | 5 | #include <asm/barrier.h> |
2856f5e3 | 6 | #include <asm/system.h> |
0db9ae4a | 7 | |
1da177e4 LT |
8 | /* |
9 | * Atomic operations that C can't guarantee us. Useful for | |
10 | * resource counting etc... | |
11 | * | |
12 | * But use these as seldom as possible since they are much slower | |
13 | * than regular operations. | |
14 | */ | |
15 | ||
16 | ||
1da177e4 LT |
17 | #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) |
18 | #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) | |
19 | ||
f3d46f9d AB |
20 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
21 | #define atomic64_read(v) (*(volatile long *)&(v)->counter) | |
1da177e4 LT |
22 | |
23 | #define atomic_set(v,i) ((v)->counter = (i)) | |
24 | #define atomic64_set(v,i) ((v)->counter = (i)) | |
25 | ||
26 | /* | |
27 | * To get proper branch prediction for the main line, we must branch | |
28 | * forward to code at the end of this object's .text section, then | |
29 | * branch back to restart the operation. | |
30 | */ | |
31 | ||
32 | static __inline__ void atomic_add(int i, atomic_t * v) | |
33 | { | |
34 | unsigned long temp; | |
35 | __asm__ __volatile__( | |
36 | "1: ldl_l %0,%1\n" | |
37 | " addl %0,%2,%0\n" | |
38 | " stl_c %0,%1\n" | |
39 | " beq %0,2f\n" | |
40 | ".subsection 2\n" | |
41 | "2: br 1b\n" | |
42 | ".previous" | |
43 | :"=&r" (temp), "=m" (v->counter) | |
44 | :"Ir" (i), "m" (v->counter)); | |
45 | } | |
46 | ||
47 | static __inline__ void atomic64_add(long i, atomic64_t * v) | |
48 | { | |
49 | unsigned long temp; | |
50 | __asm__ __volatile__( | |
51 | "1: ldq_l %0,%1\n" | |
52 | " addq %0,%2,%0\n" | |
53 | " stq_c %0,%1\n" | |
54 | " beq %0,2f\n" | |
55 | ".subsection 2\n" | |
56 | "2: br 1b\n" | |
57 | ".previous" | |
58 | :"=&r" (temp), "=m" (v->counter) | |
59 | :"Ir" (i), "m" (v->counter)); | |
60 | } | |
61 | ||
62 | static __inline__ void atomic_sub(int i, atomic_t * v) | |
63 | { | |
64 | unsigned long temp; | |
65 | __asm__ __volatile__( | |
66 | "1: ldl_l %0,%1\n" | |
67 | " subl %0,%2,%0\n" | |
68 | " stl_c %0,%1\n" | |
69 | " beq %0,2f\n" | |
70 | ".subsection 2\n" | |
71 | "2: br 1b\n" | |
72 | ".previous" | |
73 | :"=&r" (temp), "=m" (v->counter) | |
74 | :"Ir" (i), "m" (v->counter)); | |
75 | } | |
76 | ||
77 | static __inline__ void atomic64_sub(long i, atomic64_t * v) | |
78 | { | |
79 | unsigned long temp; | |
80 | __asm__ __volatile__( | |
81 | "1: ldq_l %0,%1\n" | |
82 | " subq %0,%2,%0\n" | |
83 | " stq_c %0,%1\n" | |
84 | " beq %0,2f\n" | |
85 | ".subsection 2\n" | |
86 | "2: br 1b\n" | |
87 | ".previous" | |
88 | :"=&r" (temp), "=m" (v->counter) | |
89 | :"Ir" (i), "m" (v->counter)); | |
90 | } | |
91 | ||
92 | ||
93 | /* | |
94 | * Same as above, but return the result value | |
95 | */ | |
26a6e661 | 96 | static inline int atomic_add_return(int i, atomic_t *v) |
1da177e4 LT |
97 | { |
98 | long temp, result; | |
d475f3f4 | 99 | smp_mb(); |
1da177e4 LT |
100 | __asm__ __volatile__( |
101 | "1: ldl_l %0,%1\n" | |
102 | " addl %0,%3,%2\n" | |
103 | " addl %0,%3,%0\n" | |
104 | " stl_c %0,%1\n" | |
105 | " beq %0,2f\n" | |
1da177e4 LT |
106 | ".subsection 2\n" |
107 | "2: br 1b\n" | |
108 | ".previous" | |
109 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | |
110 | :"Ir" (i), "m" (v->counter) : "memory"); | |
d475f3f4 | 111 | smp_mb(); |
1da177e4 LT |
112 | return result; |
113 | } | |
114 | ||
1da177e4 LT |
115 | static __inline__ long atomic64_add_return(long i, atomic64_t * v) |
116 | { | |
117 | long temp, result; | |
d475f3f4 | 118 | smp_mb(); |
1da177e4 LT |
119 | __asm__ __volatile__( |
120 | "1: ldq_l %0,%1\n" | |
121 | " addq %0,%3,%2\n" | |
122 | " addq %0,%3,%0\n" | |
123 | " stq_c %0,%1\n" | |
124 | " beq %0,2f\n" | |
1da177e4 LT |
125 | ".subsection 2\n" |
126 | "2: br 1b\n" | |
127 | ".previous" | |
128 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | |
129 | :"Ir" (i), "m" (v->counter) : "memory"); | |
d475f3f4 | 130 | smp_mb(); |
1da177e4 LT |
131 | return result; |
132 | } | |
133 | ||
134 | static __inline__ long atomic_sub_return(int i, atomic_t * v) | |
135 | { | |
136 | long temp, result; | |
d475f3f4 | 137 | smp_mb(); |
1da177e4 LT |
138 | __asm__ __volatile__( |
139 | "1: ldl_l %0,%1\n" | |
140 | " subl %0,%3,%2\n" | |
141 | " subl %0,%3,%0\n" | |
142 | " stl_c %0,%1\n" | |
143 | " beq %0,2f\n" | |
1da177e4 LT |
144 | ".subsection 2\n" |
145 | "2: br 1b\n" | |
146 | ".previous" | |
147 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | |
148 | :"Ir" (i), "m" (v->counter) : "memory"); | |
d475f3f4 | 149 | smp_mb(); |
1da177e4 LT |
150 | return result; |
151 | } | |
152 | ||
153 | static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | |
154 | { | |
155 | long temp, result; | |
d475f3f4 | 156 | smp_mb(); |
1da177e4 LT |
157 | __asm__ __volatile__( |
158 | "1: ldq_l %0,%1\n" | |
159 | " subq %0,%3,%2\n" | |
160 | " subq %0,%3,%0\n" | |
161 | " stq_c %0,%1\n" | |
162 | " beq %0,2f\n" | |
1da177e4 LT |
163 | ".subsection 2\n" |
164 | "2: br 1b\n" | |
165 | ".previous" | |
166 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | |
167 | :"Ir" (i), "m" (v->counter) : "memory"); | |
d475f3f4 | 168 | smp_mb(); |
1da177e4 LT |
169 | return result; |
170 | } | |
171 | ||
e96e6994 MD |
172 | #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) |
173 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | |
174 | ||
175 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | |
ffbf670f | 176 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
4a6dae6d | 177 | |
e96e6994 MD |
178 | /** |
179 | * atomic_add_unless - add unless the number is a given value | |
180 | * @v: pointer of type atomic_t | |
181 | * @a: the amount to add to v... | |
182 | * @u: ...unless v is equal to u. | |
183 | * | |
184 | * Atomically adds @a to @v, so long as it was not @u. | |
185 | * Returns non-zero if @v was not @u, and zero otherwise. | |
186 | */ | |
2856f5e3 MD |
187 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) |
188 | { | |
189 | int c, old; | |
190 | c = atomic_read(v); | |
191 | for (;;) { | |
192 | if (unlikely(c == (u))) | |
193 | break; | |
194 | old = atomic_cmpxchg((v), c, c + (a)); | |
195 | if (likely(old == c)) | |
196 | break; | |
197 | c = old; | |
198 | } | |
199 | return c != (u); | |
200 | } | |
201 | ||
8426e1f6 | 202 | |
e96e6994 MD |
203 | /** |
204 | * atomic64_add_unless - add unless the number is a given value | |
205 | * @v: pointer of type atomic64_t | |
206 | * @a: the amount to add to v... | |
207 | * @u: ...unless v is equal to u. | |
208 | * | |
209 | * Atomically adds @a to @v, so long as it was not @u. | |
210 | * Returns non-zero if @v was not @u, and zero otherwise. | |
211 | */ | |
2856f5e3 MD |
212 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) |
213 | { | |
214 | long c, old; | |
215 | c = atomic64_read(v); | |
216 | for (;;) { | |
217 | if (unlikely(c == (u))) | |
218 | break; | |
219 | old = atomic64_cmpxchg((v), c, c + (a)); | |
220 | if (likely(old == c)) | |
221 | break; | |
222 | c = old; | |
223 | } | |
224 | return c != (u); | |
225 | } | |
226 | ||
e96e6994 MD |
227 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
228 | ||
7c72aaf2 HD |
229 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) |
230 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | |
231 | ||
1da177e4 LT |
232 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) |
233 | #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) | |
234 | ||
235 | #define atomic_inc_return(v) atomic_add_return(1,(v)) | |
236 | #define atomic64_inc_return(v) atomic64_add_return(1,(v)) | |
237 | ||
238 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) | |
239 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) | |
240 | ||
241 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | |
7c72aaf2 HD |
242 | #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) |
243 | ||
1da177e4 LT |
244 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
245 | #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) | |
246 | ||
247 | #define atomic_inc(v) atomic_add(1,(v)) | |
248 | #define atomic64_inc(v) atomic64_add(1,(v)) | |
249 | ||
250 | #define atomic_dec(v) atomic_sub(1,(v)) | |
251 | #define atomic64_dec(v) atomic64_sub(1,(v)) | |
252 | ||
253 | #define smp_mb__before_atomic_dec() smp_mb() | |
254 | #define smp_mb__after_atomic_dec() smp_mb() | |
255 | #define smp_mb__before_atomic_inc() smp_mb() | |
256 | #define smp_mb__after_atomic_inc() smp_mb() | |
257 | ||
72099ed2 | 258 | #include <asm-generic/atomic-long.h> |
1da177e4 | 259 | #endif /* _ALPHA_ATOMIC_H */ |