drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / lib / spinlock_debug.c
1 /*
2 * Copyright 2005, Red Hat, Inc., Ingo Molnar
3 * Released under the General Public License (GPL).
4 *
5 * This file contains the spinlock/rwlock implementations for
6 * DEBUG_SPINLOCK.
7 */
8
9 #include <linux/spinlock.h>
10 #include <linux/nmi.h>
11 #include <linux/interrupt.h>
12 #include <linux/debug_locks.h>
13 #include <linux/delay.h>
14 #include <linux/export.h>
15 #include <linux/kernel.h>
16 #include <linux/aee.h>
17
18 extern int InDumpAllStack;
19
20
21 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
22 struct lock_class_key *key)
23 {
24 #ifdef CONFIG_DEBUG_LOCK_ALLOC
25 /*
26 * Make sure we are not reinitializing a held lock:
27 */
28 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
29 lockdep_init_map(&lock->dep_map, name, key, 0);
30 #endif
31 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
32 lock->magic = SPINLOCK_MAGIC;
33 lock->owner = SPINLOCK_OWNER_INIT;
34 lock->owner_cpu = -1;
35 }
36
37 EXPORT_SYMBOL(__raw_spin_lock_init);
38
39 void __rwlock_init(rwlock_t *lock, const char *name,
40 struct lock_class_key *key)
41 {
42 #ifdef CONFIG_DEBUG_LOCK_ALLOC
43 /*
44 * Make sure we are not reinitializing a held lock:
45 */
46 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
47 lockdep_init_map(&lock->dep_map, name, key, 0);
48 #endif
49 lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
50 lock->magic = RWLOCK_MAGIC;
51 lock->owner = SPINLOCK_OWNER_INIT;
52 lock->owner_cpu = -1;
53 }
54
55 EXPORT_SYMBOL(__rwlock_init);
56
57 static void spin_dump(raw_spinlock_t *lock, const char *msg)
58 {
59 struct task_struct *owner = NULL;
60
61 if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
62 owner = lock->owner;
63 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
64 msg, raw_smp_processor_id(),
65 current->comm, task_pid_nr(current));
66 printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
67 ".owner_cpu: %d value:0x%08x\n",
68 lock, lock->magic,
69 owner ? owner->comm : "<none>",
70 owner ? task_pid_nr(owner) : -1,
71 lock->owner_cpu, *((unsigned int *)&lock->raw_lock));
72 dump_stack();
73 }
74
75 static void spin_bug(raw_spinlock_t *lock, const char *msg)
76 {
77 char aee_str[50];
78 // if (!debug_locks_off())
79 // return;
80
81 spin_dump(lock, msg);
82 snprintf( aee_str, 50, "Spinlock %s :%s\n", current->comm, msg);
83 if(!strcmp(msg,"bad magic")){
84 printk("[spin lock debug] bad magic:%08x, should be %08x, may use an un-initial spin_lock or mem corrupt\n", lock->magic, SPINLOCK_MAGIC);
85 printk(">>>>>>>>>>>>>> Let's KE <<<<<<<<<<<<<<\n");
86 BUG_ON(1);
87 }
88 aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE, aee_str,"spinlock debugger\n");
89 }
90
91 #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
92
93 static inline void
94 debug_spin_lock_before(raw_spinlock_t *lock)
95 {
96 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
97 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
98 SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
99 lock, "cpu recursion");
100 }
101
102 static inline void debug_spin_lock_after(raw_spinlock_t *lock)
103 {
104 lock->owner_cpu = raw_smp_processor_id();
105 lock->owner = current;
106 }
107
108 static inline void debug_spin_unlock(raw_spinlock_t *lock)
109 {
110 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
111 SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
112 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
113 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
114 lock, "wrong CPU");
115 lock->owner = SPINLOCK_OWNER_INIT;
116 lock->owner_cpu = -1;
117 }
118
119 /*
120 Select appropriate loop counts to 1~2sec
121 */
122 #if HZ == 100
123 #define LOOP_HZ 100 // temp 10
124 #elif HZ == 10
125 #define LOOP_HZ 2 // temp 2
126 #else
127 #define LOOP_HZ HZ
128 #endif
129 #define WARNING_TIME 1000000000 // warning time 1 seconds
130 static void __spin_lock_debug(raw_spinlock_t *lock)
131 {
132 #ifdef CONFIG_MTK_MUTATION
133 u64 i;
134 u64 loops = loops_per_jiffy * LOOP_HZ;
135 int print_once = 1;
136 char aee_str[50];
137 unsigned long long t1,t2;
138 t1 = sched_clock();
139 t2 = t1;
140 for (;;) {
141 for (i = 0; i < loops; i++) {
142 if (arch_spin_trylock(&lock->raw_lock))
143 return;
144 __delay(1);
145 }
146 if(sched_clock() - t2 < WARNING_TIME*3) continue;
147 t2 = sched_clock();
148 #ifdef CONFIG_MTK_AEE_FEATURE
149 if((oops_in_progress != 0) || (InDumpAllStack == 1)) continue; // in exception follow, printk maybe spinlock error
150 #else
151 if(oops_in_progress != 0) continue; // in exception follow, printk maybe spinlock error
152 #endif
153 /* lockup suspected: */
154 printk("spin time: %llu ns(start:%llu ns, lpj:%lu, LPHZ:%d), value: 0x%08x\n", sched_clock() - t1, t1, loops_per_jiffy, (int)LOOP_HZ, *((unsigned int *)&lock->raw_lock));
155 if (print_once) {
156 print_once = 0;
157 spin_dump(lock, "lockup suspected");
158 #ifdef CONFIG_SMP
159 trigger_all_cpu_backtrace();
160 #endif
161 debug_show_all_locks();
162 snprintf( aee_str, 50, "Spinlock lockup:%s\n", current->comm);
163 aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE, aee_str,"spinlock debugger\n");
164
165 }
166 }
167 #else //CONFIG_MTK_MUTATION
168 u64 i;
169 u64 loops = loops_per_jiffy * HZ;
170 for (i = 0; i < loops; i++) {
171 if (arch_spin_trylock(&lock->raw_lock))
172 return;
173 __delay(1);
174 }
175 /* lockup suspected: */
176 spin_dump(lock, "lockup suspected");
177 #ifdef CONFIG_SMP
178 trigger_all_cpu_backtrace();
179 #endif
180
181 /*
182 * The trylock above was causing a livelock. Give the lower level arch
183 * specific lock code a chance to acquire the lock. We have already
184 * printed a warning/backtrace at this point. The non-debug arch
185 * specific code might actually succeed in acquiring the lock. If it is
186 * not successful, the end-result is the same - there is no forward
187 * progress.
188 */
189 arch_spin_lock(&lock->raw_lock);
190 #endif //CONFIG_MTK_MUTATION
191 }
192
193 void do_raw_spin_lock(raw_spinlock_t *lock)
194 {
195 debug_spin_lock_before(lock);
196 if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
197 __spin_lock_debug(lock);
198 debug_spin_lock_after(lock);
199 }
200
201 int do_raw_spin_trylock(raw_spinlock_t *lock)
202 {
203 int ret = arch_spin_trylock(&lock->raw_lock);
204
205 if (ret)
206 debug_spin_lock_after(lock);
207 #ifndef CONFIG_SMP
208 /*
209 * Must not happen on UP:
210 */
211 SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
212 #endif
213 return ret;
214 }
215
216 void do_raw_spin_unlock(raw_spinlock_t *lock)
217 {
218 debug_spin_unlock(lock);
219 arch_spin_unlock(&lock->raw_lock);
220 }
221
222 static void rwlock_bug(rwlock_t *lock, const char *msg)
223 {
224 if (!debug_locks_off())
225 return;
226
227 printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
228 msg, raw_smp_processor_id(), current->comm,
229 task_pid_nr(current), lock);
230 dump_stack();
231 }
232
233 #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
234
235 #if 0 /* __write_lock_debug() can lock up - maybe this can too? */
236 static void __read_lock_debug(rwlock_t *lock)
237 {
238 u64 i;
239 u64 loops = loops_per_jiffy * HZ;
240 int print_once = 1;
241
242 for (;;) {
243 for (i = 0; i < loops; i++) {
244 if (arch_read_trylock(&lock->raw_lock))
245 return;
246 __delay(1);
247 }
248 /* lockup suspected: */
249 if (print_once) {
250 print_once = 0;
251 printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
252 "%s/%d, %p\n",
253 raw_smp_processor_id(), current->comm,
254 current->pid, lock);
255 dump_stack();
256 }
257 }
258 }
259 #endif
260
261 void do_raw_read_lock(rwlock_t *lock)
262 {
263 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
264 arch_read_lock(&lock->raw_lock);
265 }
266
267 int do_raw_read_trylock(rwlock_t *lock)
268 {
269 int ret = arch_read_trylock(&lock->raw_lock);
270
271 #ifndef CONFIG_SMP
272 /*
273 * Must not happen on UP:
274 */
275 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
276 #endif
277 return ret;
278 }
279
280 void do_raw_read_unlock(rwlock_t *lock)
281 {
282 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
283 arch_read_unlock(&lock->raw_lock);
284 }
285
286 static inline void debug_write_lock_before(rwlock_t *lock)
287 {
288 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
289 RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
290 RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
291 lock, "cpu recursion");
292 }
293
294 static inline void debug_write_lock_after(rwlock_t *lock)
295 {
296 lock->owner_cpu = raw_smp_processor_id();
297 lock->owner = current;
298 }
299
300 static inline void debug_write_unlock(rwlock_t *lock)
301 {
302 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
303 RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
304 RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
305 lock, "wrong CPU");
306 lock->owner = SPINLOCK_OWNER_INIT;
307 lock->owner_cpu = -1;
308 }
309
310 #if 0 /* This can cause lockups */
311 static void __write_lock_debug(rwlock_t *lock)
312 {
313 u64 i;
314 u64 loops = loops_per_jiffy * HZ;
315 int print_once = 1;
316
317 for (;;) {
318 for (i = 0; i < loops; i++) {
319 if (arch_write_trylock(&lock->raw_lock))
320 return;
321 __delay(1);
322 }
323 /* lockup suspected: */
324 if (print_once) {
325 print_once = 0;
326 printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
327 "%s/%d, %p\n",
328 raw_smp_processor_id(), current->comm,
329 current->pid, lock);
330 dump_stack();
331 }
332 }
333 }
334 #endif
335
336 void do_raw_write_lock(rwlock_t *lock)
337 {
338 debug_write_lock_before(lock);
339 arch_write_lock(&lock->raw_lock);
340 debug_write_lock_after(lock);
341 }
342
343 int do_raw_write_trylock(rwlock_t *lock)
344 {
345 int ret = arch_write_trylock(&lock->raw_lock);
346
347 if (ret)
348 debug_write_lock_after(lock);
349 #ifndef CONFIG_SMP
350 /*
351 * Must not happen on UP:
352 */
353 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
354 #endif
355 return ret;
356 }
357
358 void do_raw_write_unlock(rwlock_t *lock)
359 {
360 debug_write_unlock(lock);
361 arch_write_unlock(&lock->raw_lock);
362 }