Fix common misspellings
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / s390 / include / asm / atomic.h
CommitLineData
1da177e4
LT
1#ifndef __ARCH_S390_ATOMIC__
2#define __ARCH_S390_ATOMIC__
3
4/*
12751058
HC
5 * Copyright 1999,2009 IBM Corp.
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Denis Joseph Barrow,
8 * Arnd Bergmann <arndb@de.ibm.com>,
1da177e4 9 *
12751058
HC
10 * Atomic operations that C can't guarantee us.
11 * Useful for resource counting etc.
25985edc 12 * s390 uses 'Compare And Swap' for atomicity in SMP environment.
1da177e4
LT
13 *
14 */
15
12751058
HC
16#include <linux/compiler.h>
17#include <linux/types.h>
2ddb3ec4 18#include <asm/system.h>
1da177e4 19
1da177e4
LT
20#define ATOMIC_INIT(i) { (i) }
21
1da177e4 22#define __CS_LOOP(ptr, op_val, op_string) ({ \
39475179 23 int old_val, new_val; \
94c12cc7
MS
24 asm volatile( \
25 " l %0,%2\n" \
26 "0: lr %1,%0\n" \
27 op_string " %1,%3\n" \
28 " cs %0,%1,%2\n" \
29 " jl 0b" \
30 : "=&d" (old_val), "=&d" (new_val), \
31 "=Q" (((atomic_t *)(ptr))->counter) \
32 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
33 : "cc", "memory"); \
1da177e4
LT
34 new_val; \
35})
94c12cc7 36
c51b9621
HC
37static inline int atomic_read(const atomic_t *v)
38{
7657e41a
HC
39 int c;
40
41 asm volatile(
42 " l %0,%1\n"
43 : "=d" (c) : "Q" (v->counter));
44 return c;
c51b9621
HC
45}
46
47static inline void atomic_set(atomic_t *v, int i)
48{
7657e41a
HC
49 asm volatile(
50 " st %1,%0\n"
51 : "=Q" (v->counter) : "d" (i));
c51b9621 52}
1da177e4 53
bfe3349b 54static inline int atomic_add_return(int i, atomic_t *v)
1da177e4
LT
55{
56 return __CS_LOOP(v, i, "ar");
57}
973bd993
MS
58#define atomic_add(_i, _v) atomic_add_return(_i, _v)
59#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
60#define atomic_inc(_v) atomic_add_return(1, _v)
61#define atomic_inc_return(_v) atomic_add_return(1, _v)
62#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
63
bfe3349b 64static inline int atomic_sub_return(int i, atomic_t *v)
1da177e4
LT
65{
66 return __CS_LOOP(v, i, "sr");
67}
973bd993
MS
68#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
69#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
70#define atomic_dec(_v) atomic_sub_return(1, _v)
71#define atomic_dec_return(_v) atomic_sub_return(1, _v)
72#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
1da177e4 73
bfe3349b 74static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
1da177e4 75{
bfe3349b 76 __CS_LOOP(v, ~mask, "nr");
1da177e4 77}
973bd993 78
bfe3349b 79static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
1da177e4 80{
bfe3349b 81 __CS_LOOP(v, mask, "or");
1da177e4 82}
973bd993 83
ffbf670f
IM
84#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
85
bfe3349b 86static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
973bd993 87{
94c12cc7
MS
88 asm volatile(
89 " cs %0,%2,%1"
90 : "+d" (old), "=Q" (v->counter)
91 : "d" (new), "Q" (v->counter)
92 : "cc", "memory");
973bd993
MS
93 return old;
94}
95
bfe3349b 96static inline int atomic_add_unless(atomic_t *v, int a, int u)
973bd993
MS
97{
98 int c, old;
973bd993 99 c = atomic_read(v);
0b2fcfdb
NP
100 for (;;) {
101 if (unlikely(c == u))
102 break;
103 old = atomic_cmpxchg(v, c, c + a);
104 if (likely(old == c))
105 break;
973bd993 106 c = old;
0b2fcfdb 107 }
973bd993
MS
108 return c != u;
109}
110
111#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
112
1da177e4
LT
113#undef __CS_LOOP
114
1da177e4
LT
115#define ATOMIC64_INIT(i) { (i) }
116
12751058
HC
117#ifdef CONFIG_64BIT
118
1da177e4 119#define __CSG_LOOP(ptr, op_val, op_string) ({ \
39475179 120 long long old_val, new_val; \
94c12cc7
MS
121 asm volatile( \
122 " lg %0,%2\n" \
123 "0: lgr %1,%0\n" \
124 op_string " %1,%3\n" \
125 " csg %0,%1,%2\n" \
126 " jl 0b" \
127 : "=&d" (old_val), "=&d" (new_val), \
128 "=Q" (((atomic_t *)(ptr))->counter) \
129 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
bfe3349b 130 : "cc", "memory"); \
1da177e4
LT
131 new_val; \
132})
94c12cc7 133
c51b9621
HC
134static inline long long atomic64_read(const atomic64_t *v)
135{
7657e41a
HC
136 long long c;
137
138 asm volatile(
139 " lg %0,%1\n"
140 : "=d" (c) : "Q" (v->counter));
141 return c;
c51b9621
HC
142}
143
144static inline void atomic64_set(atomic64_t *v, long long i)
145{
7657e41a
HC
146 asm volatile(
147 " stg %1,%0\n"
148 : "=Q" (v->counter) : "d" (i));
c51b9621 149}
1da177e4 150
bfe3349b 151static inline long long atomic64_add_return(long long i, atomic64_t *v)
1da177e4
LT
152{
153 return __CSG_LOOP(v, i, "agr");
154}
973bd993 155
bfe3349b 156static inline long long atomic64_sub_return(long long i, atomic64_t *v)
1da177e4 157{
973bd993 158 return __CSG_LOOP(v, i, "sgr");
1da177e4 159}
973bd993 160
bfe3349b 161static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
1da177e4 162{
bfe3349b 163 __CSG_LOOP(v, ~mask, "ngr");
1da177e4 164}
973bd993 165
bfe3349b 166static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
1da177e4 167{
bfe3349b 168 __CSG_LOOP(v, mask, "ogr");
1da177e4
LT
169}
170
3a5f10e3
MD
171#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
172
bfe3349b 173static inline long long atomic64_cmpxchg(atomic64_t *v,
973bd993
MS
174 long long old, long long new)
175{
94c12cc7
MS
176 asm volatile(
177 " csg %0,%2,%1"
178 : "+d" (old), "=Q" (v->counter)
179 : "d" (new), "Q" (v->counter)
180 : "cc", "memory");
973bd993
MS
181 return old;
182}
1da177e4 183
12751058
HC
184#undef __CSG_LOOP
185
186#else /* CONFIG_64BIT */
187
188typedef struct {
189 long long counter;
190} atomic64_t;
191
192static inline long long atomic64_read(const atomic64_t *v)
193{
194 register_pair rp;
195
196 asm volatile(
987bcdac
MS
197 " lm %0,%N0,%1"
198 : "=&d" (rp) : "Q" (v->counter) );
12751058
HC
199 return rp.pair;
200}
201
202static inline void atomic64_set(atomic64_t *v, long long i)
203{
204 register_pair rp = {.pair = i};
205
206 asm volatile(
987bcdac
MS
207 " stm %1,%N1,%0"
208 : "=Q" (v->counter) : "d" (rp) );
12751058
HC
209}
210
211static inline long long atomic64_xchg(atomic64_t *v, long long new)
212{
213 register_pair rp_new = {.pair = new};
214 register_pair rp_old;
215
216 asm volatile(
987bcdac
MS
217 " lm %0,%N0,%1\n"
218 "0: cds %0,%2,%1\n"
12751058 219 " jl 0b\n"
987bcdac
MS
220 : "=&d" (rp_old), "=Q" (v->counter)
221 : "d" (rp_new), "Q" (v->counter)
12751058
HC
222 : "cc");
223 return rp_old.pair;
224}
225
226static inline long long atomic64_cmpxchg(atomic64_t *v,
227 long long old, long long new)
228{
229 register_pair rp_old = {.pair = old};
230 register_pair rp_new = {.pair = new};
231
232 asm volatile(
987bcdac
MS
233 " cds %0,%2,%1"
234 : "+&d" (rp_old), "=Q" (v->counter)
235 : "d" (rp_new), "Q" (v->counter)
12751058
HC
236 : "cc");
237 return rp_old.pair;
238}
239
240
241static inline long long atomic64_add_return(long long i, atomic64_t *v)
242{
243 long long old, new;
244
245 do {
246 old = atomic64_read(v);
247 new = old + i;
248 } while (atomic64_cmpxchg(v, old, new) != old);
249 return new;
250}
251
252static inline long long atomic64_sub_return(long long i, atomic64_t *v)
253{
254 long long old, new;
255
256 do {
257 old = atomic64_read(v);
258 new = old - i;
259 } while (atomic64_cmpxchg(v, old, new) != old);
260 return new;
261}
262
263static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
264{
265 long long old, new;
266
267 do {
268 old = atomic64_read(v);
269 new = old | mask;
270 } while (atomic64_cmpxchg(v, old, new) != old);
271}
272
273static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
274{
275 long long old, new;
276
277 do {
278 old = atomic64_read(v);
279 new = old & mask;
280 } while (atomic64_cmpxchg(v, old, new) != old);
281}
282
283#endif /* CONFIG_64BIT */
284
bfe3349b 285static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1da177e4 286{
973bd993 287 long long c, old;
2ddb3ec4 288
973bd993 289 c = atomic64_read(v);
0b2fcfdb
NP
290 for (;;) {
291 if (unlikely(c == u))
292 break;
293 old = atomic64_cmpxchg(v, c, c + a);
294 if (likely(old == c))
295 break;
973bd993 296 c = old;
0b2fcfdb 297 }
973bd993 298 return c != u;
1da177e4
LT
299}
300
2ddb3ec4
HC
301static inline long long atomic64_dec_if_positive(atomic64_t *v)
302{
303 long long c, old, dec;
304
305 c = atomic64_read(v);
306 for (;;) {
307 dec = c - 1;
308 if (unlikely(dec < 0))
309 break;
310 old = atomic64_cmpxchg((v), c, dec);
311 if (likely(old == c))
312 break;
313 c = old;
314 }
315 return dec;
316}
317
12751058
HC
318#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
319#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
320#define atomic64_inc(_v) atomic64_add_return(1, _v)
321#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
322#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
323#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
324#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
325#define atomic64_dec(_v) atomic64_sub_return(1, _v)
326#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
327#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
328#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8426e1f6 329
1da177e4
LT
330#define smp_mb__before_atomic_dec() smp_mb()
331#define smp_mb__after_atomic_dec() smp_mb()
332#define smp_mb__before_atomic_inc() smp_mb()
333#define smp_mb__after_atomic_inc() smp_mb()
334
72099ed2 335#include <asm-generic/atomic-long.h>
bfe3349b 336
1da177e4 337#endif /* __ARCH_S390_ATOMIC__ */