Merge branches 'release' and 'gpe-ack' into release
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / asm-x86 / spinlock.h
1 #ifndef _X86_SPINLOCK_H_
2 #define _X86_SPINLOCK_H_
3
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
6 #include <asm/page.h>
7 #include <asm/processor.h>
8 #include <linux/compiler.h>
9
10 /*
11 * Your basic SMP spinlocks, allowing only a single CPU anywhere
12 *
13 * Simple spin lock operations. There are two variants, one clears IRQ's
14 * on the local processor, one does not.
15 *
16 * These are fair FIFO ticket locks, which are currently limited to 256
17 * CPUs.
18 *
19 * (the type definitions are in asm/spinlock_types.h)
20 */
21
22 #ifdef CONFIG_X86_32
23 typedef char _slock_t;
24 # define LOCK_INS_DEC "decb"
25 # define LOCK_INS_XCH "xchgb"
26 # define LOCK_INS_MOV "movb"
27 # define LOCK_INS_CMP "cmpb"
28 # define LOCK_PTR_REG "a"
29 #else
30 typedef int _slock_t;
31 # define LOCK_INS_DEC "decl"
32 # define LOCK_INS_XCH "xchgl"
33 # define LOCK_INS_MOV "movl"
34 # define LOCK_INS_CMP "cmpl"
35 # define LOCK_PTR_REG "D"
36 #endif
37
38 #if defined(CONFIG_X86_32) && \
39 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
40 /*
41 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
42 * (PPro errata 66, 92)
43 */
44 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
45 #else
46 # define UNLOCK_LOCK_PREFIX
47 #endif
48
49 /*
50 * Ticket locks are conceptually two parts, one indicating the current head of
51 * the queue, and the other indicating the current tail. The lock is acquired
52 * by atomically noting the tail and incrementing it by one (thus adding
53 * ourself to the queue and noting our position), then waiting until the head
54 * becomes equal to the the initial value of the tail.
55 *
56 * We use an xadd covering *both* parts of the lock, to increment the tail and
57 * also load the position of the head, which takes care of memory ordering
58 * issues and should be optimal for the uncontended case. Note the tail must be
59 * in the high part, because a wide xadd increment of the low part would carry
60 * up and contaminate the high part.
61 *
62 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
63 * save some instructions and make the code more elegant. There really isn't
64 * much between them in performance though, especially as locks are out of line.
65 */
66 #if (NR_CPUS < 256)
67 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
68 {
69 int tmp = *(volatile signed int *)(&(lock)->slock);
70
71 return (((tmp >> 8) & 0xff) != (tmp & 0xff));
72 }
73
74 static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
75 {
76 int tmp = *(volatile signed int *)(&(lock)->slock);
77
78 return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
79 }
80
81 static inline void __raw_spin_lock(raw_spinlock_t *lock)
82 {
83 short inc = 0x0100;
84
85 __asm__ __volatile__ (
86 LOCK_PREFIX "xaddw %w0, %1\n"
87 "1:\t"
88 "cmpb %h0, %b0\n\t"
89 "je 2f\n\t"
90 "rep ; nop\n\t"
91 "movb %1, %b0\n\t"
92 /* don't need lfence here, because loads are in-order */
93 "jmp 1b\n"
94 "2:"
95 :"+Q" (inc), "+m" (lock->slock)
96 :
97 :"memory", "cc");
98 }
99
100 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
101
102 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
103 {
104 int tmp;
105 short new;
106
107 asm volatile(
108 "movw %2,%w0\n\t"
109 "cmpb %h0,%b0\n\t"
110 "jne 1f\n\t"
111 "movw %w0,%w1\n\t"
112 "incb %h1\n\t"
113 "lock ; cmpxchgw %w1,%2\n\t"
114 "1:"
115 "sete %b1\n\t"
116 "movzbl %b1,%0\n\t"
117 :"=&a" (tmp), "=Q" (new), "+m" (lock->slock)
118 :
119 : "memory", "cc");
120
121 return tmp;
122 }
123
124 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
125 {
126 __asm__ __volatile__(
127 UNLOCK_LOCK_PREFIX "incb %0"
128 :"+m" (lock->slock)
129 :
130 :"memory", "cc");
131 }
132 #else
133 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
134 {
135 int tmp = *(volatile signed int *)(&(lock)->slock);
136
137 return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
138 }
139
140 static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
141 {
142 int tmp = *(volatile signed int *)(&(lock)->slock);
143
144 return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
145 }
146
147 static inline void __raw_spin_lock(raw_spinlock_t *lock)
148 {
149 int inc = 0x00010000;
150 int tmp;
151
152 __asm__ __volatile__ (
153 "lock ; xaddl %0, %1\n"
154 "movzwl %w0, %2\n\t"
155 "shrl $16, %0\n\t"
156 "1:\t"
157 "cmpl %0, %2\n\t"
158 "je 2f\n\t"
159 "rep ; nop\n\t"
160 "movzwl %1, %2\n\t"
161 /* don't need lfence here, because loads are in-order */
162 "jmp 1b\n"
163 "2:"
164 :"+Q" (inc), "+m" (lock->slock), "=r" (tmp)
165 :
166 :"memory", "cc");
167 }
168
169 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
170
171 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
172 {
173 int tmp;
174 int new;
175
176 asm volatile(
177 "movl %2,%0\n\t"
178 "movl %0,%1\n\t"
179 "roll $16, %0\n\t"
180 "cmpl %0,%1\n\t"
181 "jne 1f\n\t"
182 "addl $0x00010000, %1\n\t"
183 "lock ; cmpxchgl %1,%2\n\t"
184 "1:"
185 "sete %b1\n\t"
186 "movzbl %b1,%0\n\t"
187 :"=&a" (tmp), "=r" (new), "+m" (lock->slock)
188 :
189 : "memory", "cc");
190
191 return tmp;
192 }
193
194 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
195 {
196 __asm__ __volatile__(
197 UNLOCK_LOCK_PREFIX "incw %0"
198 :"+m" (lock->slock)
199 :
200 :"memory", "cc");
201 }
202 #endif
203
204 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
205 {
206 while (__raw_spin_is_locked(lock))
207 cpu_relax();
208 }
209
210 /*
211 * Read-write spinlocks, allowing multiple readers
212 * but only one writer.
213 *
214 * NOTE! it is quite common to have readers in interrupts
215 * but no interrupt writers. For those circumstances we
216 * can "mix" irq-safe locks - any writer needs to get a
217 * irq-safe write-lock, but readers can get non-irqsafe
218 * read-locks.
219 *
220 * On x86, we implement read-write locks as a 32-bit counter
221 * with the high bit (sign) being the "contended" bit.
222 */
223
224 /**
225 * read_can_lock - would read_trylock() succeed?
226 * @lock: the rwlock in question.
227 */
228 static inline int __raw_read_can_lock(raw_rwlock_t *lock)
229 {
230 return (int)(lock)->lock > 0;
231 }
232
233 /**
234 * write_can_lock - would write_trylock() succeed?
235 * @lock: the rwlock in question.
236 */
237 static inline int __raw_write_can_lock(raw_rwlock_t *lock)
238 {
239 return (lock)->lock == RW_LOCK_BIAS;
240 }
241
242 static inline void __raw_read_lock(raw_rwlock_t *rw)
243 {
244 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
245 "jns 1f\n"
246 "call __read_lock_failed\n\t"
247 "1:\n"
248 ::LOCK_PTR_REG (rw) : "memory");
249 }
250
251 static inline void __raw_write_lock(raw_rwlock_t *rw)
252 {
253 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
254 "jz 1f\n"
255 "call __write_lock_failed\n\t"
256 "1:\n"
257 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
258 }
259
260 static inline int __raw_read_trylock(raw_rwlock_t *lock)
261 {
262 atomic_t *count = (atomic_t *)lock;
263
264 atomic_dec(count);
265 if (atomic_read(count) >= 0)
266 return 1;
267 atomic_inc(count);
268 return 0;
269 }
270
271 static inline int __raw_write_trylock(raw_rwlock_t *lock)
272 {
273 atomic_t *count = (atomic_t *)lock;
274
275 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
276 return 1;
277 atomic_add(RW_LOCK_BIAS, count);
278 return 0;
279 }
280
281 static inline void __raw_read_unlock(raw_rwlock_t *rw)
282 {
283 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
284 }
285
286 static inline void __raw_write_unlock(raw_rwlock_t *rw)
287 {
288 asm volatile(LOCK_PREFIX "addl %1, %0"
289 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
290 }
291
292 #define _raw_spin_relax(lock) cpu_relax()
293 #define _raw_read_relax(lock) cpu_relax()
294 #define _raw_write_relax(lock) cpu_relax()
295
296 #endif