Merge branches 'x86/cache', 'x86/debug' and 'x86/irq' into x86/urgent
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / arch / frv / include / asm / atomic.h
1 /* atomic.h: atomic operation emulation for FR-V
2 *
3 * For an explanation of how atomic ops work in this arch, see:
4 * Documentation/frv/atomic-ops.txt
5 *
6 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
7 * Written by David Howells (dhowells@redhat.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14 #ifndef _ASM_ATOMIC_H
15 #define _ASM_ATOMIC_H
16
17 #include <linux/types.h>
18 #include <asm/cmpxchg.h>
19 #include <asm/barrier.h>
20
21 #ifdef CONFIG_SMP
22 #error not SMP safe
23 #endif
24
25 #include <asm/atomic_defs.h>
26
27 /*
28 * Atomic operations that C can't guarantee us. Useful for
29 * resource counting etc..
30 *
31 * We do not have SMP systems, so we don't have to deal with that.
32 */
33
34 #define ATOMIC_INIT(i) { (i) }
35 #define atomic_read(v) READ_ONCE((v)->counter)
36 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
37
38 static inline int atomic_inc_return(atomic_t *v)
39 {
40 return __atomic_add_return(1, &v->counter);
41 }
42
43 static inline int atomic_dec_return(atomic_t *v)
44 {
45 return __atomic_sub_return(1, &v->counter);
46 }
47
48 static inline int atomic_add_return(int i, atomic_t *v)
49 {
50 return __atomic_add_return(i, &v->counter);
51 }
52
53 static inline int atomic_sub_return(int i, atomic_t *v)
54 {
55 return __atomic_sub_return(i, &v->counter);
56 }
57
58 static inline int atomic_add_negative(int i, atomic_t *v)
59 {
60 return atomic_add_return(i, v) < 0;
61 }
62
63 static inline void atomic_inc(atomic_t *v)
64 {
65 atomic_inc_return(v);
66 }
67
68 static inline void atomic_dec(atomic_t *v)
69 {
70 atomic_dec_return(v);
71 }
72
73 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
74 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
75 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
76
77 /*
78 * 64-bit atomic ops
79 */
80 typedef struct {
81 long long counter;
82 } atomic64_t;
83
84 #define ATOMIC64_INIT(i) { (i) }
85
86 static inline long long atomic64_read(const atomic64_t *v)
87 {
88 long long counter;
89
90 asm("ldd%I1 %M1,%0"
91 : "=e"(counter)
92 : "m"(v->counter));
93
94 return counter;
95 }
96
97 static inline void atomic64_set(atomic64_t *v, long long i)
98 {
99 asm volatile("std%I0 %1,%M0"
100 : "=m"(v->counter)
101 : "e"(i));
102 }
103
104 static inline long long atomic64_inc_return(atomic64_t *v)
105 {
106 return __atomic64_add_return(1, &v->counter);
107 }
108
109 static inline long long atomic64_dec_return(atomic64_t *v)
110 {
111 return __atomic64_sub_return(1, &v->counter);
112 }
113
114 static inline long long atomic64_add_return(long long i, atomic64_t *v)
115 {
116 return __atomic64_add_return(i, &v->counter);
117 }
118
119 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
120 {
121 return __atomic64_sub_return(i, &v->counter);
122 }
123
124 static inline long long atomic64_add_negative(long long i, atomic64_t *v)
125 {
126 return atomic64_add_return(i, v) < 0;
127 }
128
129 static inline void atomic64_inc(atomic64_t *v)
130 {
131 atomic64_inc_return(v);
132 }
133
134 static inline void atomic64_dec(atomic64_t *v)
135 {
136 atomic64_dec_return(v);
137 }
138
139 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
140 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
141 #define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
142 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
143
144 #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
145 #define atomic_xchg(v, new) (xchg(&(v)->counter, new))
146 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
147 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
148
149 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
150 {
151 int c, old;
152 c = atomic_read(v);
153 for (;;) {
154 if (unlikely(c == (u)))
155 break;
156 old = atomic_cmpxchg((v), c, c + (a));
157 if (likely(old == c))
158 break;
159 c = old;
160 }
161 return c;
162 }
163
164 static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
165 {
166 long long c, old;
167
168 c = atomic64_read(v);
169 for (;;) {
170 if (unlikely(c == u))
171 break;
172 old = atomic64_cmpxchg(v, c, c + i);
173 if (likely(old == c))
174 break;
175 c = old;
176 }
177 return c != u;
178 }
179
180 static inline long long atomic64_dec_if_positive(atomic64_t *v)
181 {
182 long long c, old, dec;
183
184 c = atomic64_read(v);
185 for (;;) {
186 dec = c - 1;
187 if (unlikely(dec < 0))
188 break;
189 old = atomic64_cmpxchg((v), c, dec);
190 if (likely(old == c))
191 break;
192 c = old;
193 }
194 return dec;
195 }
196
197 #define ATOMIC_OP(op) \
198 static inline int atomic_fetch_##op(int i, atomic_t *v) \
199 { \
200 return __atomic32_fetch_##op(i, &v->counter); \
201 } \
202 static inline void atomic_##op(int i, atomic_t *v) \
203 { \
204 (void)__atomic32_fetch_##op(i, &v->counter); \
205 } \
206 \
207 static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
208 { \
209 return __atomic64_fetch_##op(i, &v->counter); \
210 } \
211 static inline void atomic64_##op(long long i, atomic64_t *v) \
212 { \
213 (void)__atomic64_fetch_##op(i, &v->counter); \
214 }
215
216 ATOMIC_OP(or)
217 ATOMIC_OP(and)
218 ATOMIC_OP(xor)
219 ATOMIC_OP(add)
220 ATOMIC_OP(sub)
221
222 #undef ATOMIC_OP
223
224 #endif /* _ASM_ATOMIC_H */