drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / lib / spinlock_debug.c
CommitLineData
fb1c8f93
IM
1/*
2 * Copyright 2005, Red Hat, Inc., Ingo Molnar
3 * Released under the General Public License (GPL).
4 *
5 * This file contains the spinlock/rwlock implementations for
6 * DEBUG_SPINLOCK.
7 */
8
fb1c8f93 9#include <linux/spinlock.h>
bb81a09e 10#include <linux/nmi.h>
fb1c8f93 11#include <linux/interrupt.h>
9a11b49a 12#include <linux/debug_locks.h>
fb1c8f93 13#include <linux/delay.h>
8bc3bcc9 14#include <linux/export.h>
6fa3eb70
S
15#include <linux/kernel.h>
16#include <linux/aee.h>
17
18extern int InDumpAllStack;
19
fb1c8f93 20
c2f21ce2
TG
21void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
22 struct lock_class_key *key)
8a25d5de
IM
23{
24#ifdef CONFIG_DEBUG_LOCK_ALLOC
25 /*
26 * Make sure we are not reinitializing a held lock:
27 */
28 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
4dfbb9d8 29 lockdep_init_map(&lock->dep_map, name, key, 0);
8a25d5de 30#endif
edc35bd7 31 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8a25d5de
IM
32 lock->magic = SPINLOCK_MAGIC;
33 lock->owner = SPINLOCK_OWNER_INIT;
34 lock->owner_cpu = -1;
35}
36
c2f21ce2 37EXPORT_SYMBOL(__raw_spin_lock_init);
8a25d5de
IM
38
39void __rwlock_init(rwlock_t *lock, const char *name,
40 struct lock_class_key *key)
41{
42#ifdef CONFIG_DEBUG_LOCK_ALLOC
43 /*
44 * Make sure we are not reinitializing a held lock:
45 */
46 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
4dfbb9d8 47 lockdep_init_map(&lock->dep_map, name, key, 0);
8a25d5de 48#endif
fb3a6bbc 49 lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
8a25d5de
IM
50 lock->magic = RWLOCK_MAGIC;
51 lock->owner = SPINLOCK_OWNER_INIT;
52 lock->owner_cpu = -1;
53}
54
55EXPORT_SYMBOL(__rwlock_init);
56
4e101b0e 57static void spin_dump(raw_spinlock_t *lock, const char *msg)
fb1c8f93 58{
fb1c8f93
IM
59 struct task_struct *owner = NULL;
60
9a11b49a
IM
61 if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
62 owner = lock->owner;
63 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
64 msg, raw_smp_processor_id(),
ba25f9dc 65 current->comm, task_pid_nr(current));
4b068148 66 printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
6fa3eb70 67 ".owner_cpu: %d value:0x%08x\n",
9a11b49a
IM
68 lock, lock->magic,
69 owner ? owner->comm : "<none>",
ba25f9dc 70 owner ? task_pid_nr(owner) : -1,
6fa3eb70 71 lock->owner_cpu, *((unsigned int *)&lock->raw_lock));
9a11b49a 72 dump_stack();
fb1c8f93
IM
73}
74
4e101b0e
AM
75static void spin_bug(raw_spinlock_t *lock, const char *msg)
76{
6fa3eb70
S
77 char aee_str[50];
78// if (!debug_locks_off())
79// return;
80
81 spin_dump(lock, msg);
82 snprintf( aee_str, 50, "Spinlock %s :%s\n", current->comm, msg);
83 if(!strcmp(msg,"bad magic")){
84 printk("[spin lock debug] bad magic:%08x, should be %08x, may use an un-initial spin_lock or mem corrupt\n", lock->magic, SPINLOCK_MAGIC);
85 printk(">>>>>>>>>>>>>> Let's KE <<<<<<<<<<<<<<\n");
86 BUG_ON(1);
87 }
88 aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE, aee_str,"spinlock debugger\n");
4e101b0e
AM
89}
90
fb1c8f93
IM
91#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
92
9a11b49a 93static inline void
c2f21ce2 94debug_spin_lock_before(raw_spinlock_t *lock)
fb1c8f93
IM
95{
96 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
97 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
98 SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
99 lock, "cpu recursion");
100}
101
c2f21ce2 102static inline void debug_spin_lock_after(raw_spinlock_t *lock)
fb1c8f93
IM
103{
104 lock->owner_cpu = raw_smp_processor_id();
105 lock->owner = current;
106}
107
c2f21ce2 108static inline void debug_spin_unlock(raw_spinlock_t *lock)
fb1c8f93
IM
109{
110 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
c2f21ce2 111 SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
fb1c8f93
IM
112 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
113 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
114 lock, "wrong CPU");
115 lock->owner = SPINLOCK_OWNER_INIT;
116 lock->owner_cpu = -1;
117}
118
6fa3eb70
S
119/*
120 Select appropriate loop counts to 1~2sec
121*/
122#if HZ == 100
123#define LOOP_HZ 100 // temp 10
124#elif HZ == 10
125#define LOOP_HZ 2 // temp 2
126#else
127#define LOOP_HZ HZ
128#endif
129#define WARNING_TIME 1000000000 // warning time 1 seconds
c2f21ce2 130static void __spin_lock_debug(raw_spinlock_t *lock)
fb1c8f93 131{
6fa3eb70 132#ifdef CONFIG_MTK_MUTATION
fb1c8f93 133 u64 i;
6fa3eb70
S
134 u64 loops = loops_per_jiffy * LOOP_HZ;
135 int print_once = 1;
136 char aee_str[50];
137 unsigned long long t1,t2;
138 t1 = sched_clock();
139 t2 = t1;
140 for (;;) {
141 for (i = 0; i < loops; i++) {
142 if (arch_spin_trylock(&lock->raw_lock))
143 return;
144 __delay(1);
145 }
146 if(sched_clock() - t2 < WARNING_TIME*3) continue;
147 t2 = sched_clock();
148#ifdef CONFIG_MTK_AEE_FEATURE
149 if((oops_in_progress != 0) || (InDumpAllStack == 1)) continue; // in exception follow, printk maybe spinlock error
150#else
151 if(oops_in_progress != 0) continue; // in exception follow, printk maybe spinlock error
152#endif
153 /* lockup suspected: */
154 printk("spin time: %llu ns(start:%llu ns, lpj:%lu, LPHZ:%d), value: 0x%08x\n", sched_clock() - t1, t1, loops_per_jiffy, (int)LOOP_HZ, *((unsigned int *)&lock->raw_lock));
155 if (print_once) {
156 print_once = 0;
157 spin_dump(lock, "lockup suspected");
158#ifdef CONFIG_SMP
159 trigger_all_cpu_backtrace();
160#endif
161 debug_show_all_locks();
162 snprintf( aee_str, 50, "Spinlock lockup:%s\n", current->comm);
163 aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE, aee_str,"spinlock debugger\n");
214f766e 164
6fa3eb70
S
165 }
166 }
167#else //CONFIG_MTK_MUTATION
168 u64 i;
169 u64 loops = loops_per_jiffy * HZ;
214f766e
VM
170 for (i = 0; i < loops; i++) {
171 if (arch_spin_trylock(&lock->raw_lock))
172 return;
173 __delay(1);
174 }
175 /* lockup suspected: */
176 spin_dump(lock, "lockup suspected");
bb81a09e 177#ifdef CONFIG_SMP
214f766e 178 trigger_all_cpu_backtrace();
bb81a09e 179#endif
214f766e
VM
180
181 /*
182 * The trylock above was causing a livelock. Give the lower level arch
183 * specific lock code a chance to acquire the lock. We have already
184 * printed a warning/backtrace at this point. The non-debug arch
185 * specific code might actually succeed in acquiring the lock. If it is
186 * not successful, the end-result is the same - there is no forward
187 * progress.
188 */
189 arch_spin_lock(&lock->raw_lock);
6fa3eb70 190#endif //CONFIG_MTK_MUTATION
fb1c8f93
IM
191}
192
9828ea9d 193void do_raw_spin_lock(raw_spinlock_t *lock)
fb1c8f93
IM
194{
195 debug_spin_lock_before(lock);
0199c4e6 196 if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
fb1c8f93
IM
197 __spin_lock_debug(lock);
198 debug_spin_lock_after(lock);
199}
200
9828ea9d 201int do_raw_spin_trylock(raw_spinlock_t *lock)
fb1c8f93 202{
0199c4e6 203 int ret = arch_spin_trylock(&lock->raw_lock);
fb1c8f93
IM
204
205 if (ret)
206 debug_spin_lock_after(lock);
207#ifndef CONFIG_SMP
208 /*
209 * Must not happen on UP:
210 */
211 SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
212#endif
213 return ret;
214}
215
9828ea9d 216void do_raw_spin_unlock(raw_spinlock_t *lock)
fb1c8f93
IM
217{
218 debug_spin_unlock(lock);
0199c4e6 219 arch_spin_unlock(&lock->raw_lock);
fb1c8f93
IM
220}
221
222static void rwlock_bug(rwlock_t *lock, const char *msg)
223{
9a11b49a
IM
224 if (!debug_locks_off())
225 return;
226
227 printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
228 msg, raw_smp_processor_id(), current->comm,
ba25f9dc 229 task_pid_nr(current), lock);
9a11b49a 230 dump_stack();
fb1c8f93
IM
231}
232
233#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
234
72f0b4e2 235#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
fb1c8f93
IM
236static void __read_lock_debug(rwlock_t *lock)
237{
fb1c8f93 238 u64 i;
c22f008b
CE
239 u64 loops = loops_per_jiffy * HZ;
240 int print_once = 1;
fb1c8f93
IM
241
242 for (;;) {
c22f008b 243 for (i = 0; i < loops; i++) {
e5931943 244 if (arch_read_trylock(&lock->raw_lock))
fb1c8f93 245 return;
e0a60296 246 __delay(1);
fb1c8f93
IM
247 }
248 /* lockup suspected: */
249 if (print_once) {
250 print_once = 0;
51989b9f
DJ
251 printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
252 "%s/%d, %p\n",
bb44f116
IM
253 raw_smp_processor_id(), current->comm,
254 current->pid, lock);
fb1c8f93
IM
255 dump_stack();
256 }
257 }
258}
72f0b4e2 259#endif
fb1c8f93 260
9828ea9d 261void do_raw_read_lock(rwlock_t *lock)
fb1c8f93
IM
262{
263 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
e5931943 264 arch_read_lock(&lock->raw_lock);
fb1c8f93
IM
265}
266
9828ea9d 267int do_raw_read_trylock(rwlock_t *lock)
fb1c8f93 268{
e5931943 269 int ret = arch_read_trylock(&lock->raw_lock);
fb1c8f93
IM
270
271#ifndef CONFIG_SMP
272 /*
273 * Must not happen on UP:
274 */
275 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
276#endif
277 return ret;
278}
279
9828ea9d 280void do_raw_read_unlock(rwlock_t *lock)
fb1c8f93
IM
281{
282 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
e5931943 283 arch_read_unlock(&lock->raw_lock);
fb1c8f93
IM
284}
285
286static inline void debug_write_lock_before(rwlock_t *lock)
287{
288 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
289 RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
290 RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
291 lock, "cpu recursion");
292}
293
294static inline void debug_write_lock_after(rwlock_t *lock)
295{
296 lock->owner_cpu = raw_smp_processor_id();
297 lock->owner = current;
298}
299
300static inline void debug_write_unlock(rwlock_t *lock)
301{
302 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
303 RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
304 RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
305 lock, "wrong CPU");
306 lock->owner = SPINLOCK_OWNER_INIT;
307 lock->owner_cpu = -1;
308}
309
72f0b4e2 310#if 0 /* This can cause lockups */
fb1c8f93
IM
311static void __write_lock_debug(rwlock_t *lock)
312{
fb1c8f93 313 u64 i;
c22f008b
CE
314 u64 loops = loops_per_jiffy * HZ;
315 int print_once = 1;
fb1c8f93
IM
316
317 for (;;) {
c22f008b 318 for (i = 0; i < loops; i++) {
e5931943 319 if (arch_write_trylock(&lock->raw_lock))
fb1c8f93 320 return;
e0a60296 321 __delay(1);
fb1c8f93
IM
322 }
323 /* lockup suspected: */
324 if (print_once) {
325 print_once = 0;
51989b9f
DJ
326 printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
327 "%s/%d, %p\n",
bb44f116
IM
328 raw_smp_processor_id(), current->comm,
329 current->pid, lock);
fb1c8f93
IM
330 dump_stack();
331 }
332 }
333}
72f0b4e2 334#endif
fb1c8f93 335
9828ea9d 336void do_raw_write_lock(rwlock_t *lock)
fb1c8f93
IM
337{
338 debug_write_lock_before(lock);
e5931943 339 arch_write_lock(&lock->raw_lock);
fb1c8f93
IM
340 debug_write_lock_after(lock);
341}
342
9828ea9d 343int do_raw_write_trylock(rwlock_t *lock)
fb1c8f93 344{
e5931943 345 int ret = arch_write_trylock(&lock->raw_lock);
fb1c8f93
IM
346
347 if (ret)
348 debug_write_lock_after(lock);
349#ifndef CONFIG_SMP
350 /*
351 * Must not happen on UP:
352 */
353 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
354#endif
355 return ret;
356}
357
9828ea9d 358void do_raw_write_unlock(rwlock_t *lock)
fb1c8f93
IM
359{
360 debug_write_unlock(lock);
e5931943 361 arch_write_unlock(&lock->raw_lock);
fb1c8f93 362}