2 * Copyright 2005, Red Hat, Inc., Ingo Molnar
3 * Released under the General Public License (GPL).
5 * This file contains the spinlock/rwlock implementations for
9 #include <linux/spinlock.h>
10 #include <linux/nmi.h>
11 #include <linux/interrupt.h>
12 #include <linux/debug_locks.h>
13 #include <linux/delay.h>
14 #include <linux/export.h>
15 #include <linux/kernel.h>
16 #include <linux/aee.h>
18 extern int InDumpAllStack
;
21 void __raw_spin_lock_init(raw_spinlock_t
*lock
, const char *name
,
22 struct lock_class_key
*key
)
24 #ifdef CONFIG_DEBUG_LOCK_ALLOC
26 * Make sure we are not reinitializing a held lock:
28 debug_check_no_locks_freed((void *)lock
, sizeof(*lock
));
29 lockdep_init_map(&lock
->dep_map
, name
, key
, 0);
31 lock
->raw_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
32 lock
->magic
= SPINLOCK_MAGIC
;
33 lock
->owner
= SPINLOCK_OWNER_INIT
;
37 EXPORT_SYMBOL(__raw_spin_lock_init
);
39 void __rwlock_init(rwlock_t
*lock
, const char *name
,
40 struct lock_class_key
*key
)
42 #ifdef CONFIG_DEBUG_LOCK_ALLOC
44 * Make sure we are not reinitializing a held lock:
46 debug_check_no_locks_freed((void *)lock
, sizeof(*lock
));
47 lockdep_init_map(&lock
->dep_map
, name
, key
, 0);
49 lock
->raw_lock
= (arch_rwlock_t
) __ARCH_RW_LOCK_UNLOCKED
;
50 lock
->magic
= RWLOCK_MAGIC
;
51 lock
->owner
= SPINLOCK_OWNER_INIT
;
55 EXPORT_SYMBOL(__rwlock_init
);
57 static void spin_dump(raw_spinlock_t
*lock
, const char *msg
)
59 struct task_struct
*owner
= NULL
;
61 if (lock
->owner
&& lock
->owner
!= SPINLOCK_OWNER_INIT
)
63 printk(KERN_EMERG
"BUG: spinlock %s on CPU#%d, %s/%d\n",
64 msg
, raw_smp_processor_id(),
65 current
->comm
, task_pid_nr(current
));
66 printk(KERN_EMERG
" lock: %pS, .magic: %08x, .owner: %s/%d, "
67 ".owner_cpu: %d value:0x%08x\n",
69 owner
? owner
->comm
: "<none>",
70 owner
? task_pid_nr(owner
) : -1,
71 lock
->owner_cpu
, *((unsigned int *)&lock
->raw_lock
));
75 static void spin_bug(raw_spinlock_t
*lock
, const char *msg
)
78 // if (!debug_locks_off())
82 snprintf( aee_str
, 50, "Spinlock %s :%s\n", current
->comm
, msg
);
83 if(!strcmp(msg
,"bad magic")){
84 printk("[spin lock debug] bad magic:%08x, should be %08x, may use an un-initial spin_lock or mem corrupt\n", lock
->magic
, SPINLOCK_MAGIC
);
85 printk(">>>>>>>>>>>>>> Let's KE <<<<<<<<<<<<<<\n");
88 aee_kernel_warning_api(__FILE__
, __LINE__
, DB_OPT_DUMMY_DUMP
| DB_OPT_FTRACE
, aee_str
,"spinlock debugger\n");
91 #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
94 debug_spin_lock_before(raw_spinlock_t
*lock
)
96 SPIN_BUG_ON(lock
->magic
!= SPINLOCK_MAGIC
, lock
, "bad magic");
97 SPIN_BUG_ON(lock
->owner
== current
, lock
, "recursion");
98 SPIN_BUG_ON(lock
->owner_cpu
== raw_smp_processor_id(),
99 lock
, "cpu recursion");
102 static inline void debug_spin_lock_after(raw_spinlock_t
*lock
)
104 lock
->owner_cpu
= raw_smp_processor_id();
105 lock
->owner
= current
;
108 static inline void debug_spin_unlock(raw_spinlock_t
*lock
)
110 SPIN_BUG_ON(lock
->magic
!= SPINLOCK_MAGIC
, lock
, "bad magic");
111 SPIN_BUG_ON(!raw_spin_is_locked(lock
), lock
, "already unlocked");
112 SPIN_BUG_ON(lock
->owner
!= current
, lock
, "wrong owner");
113 SPIN_BUG_ON(lock
->owner_cpu
!= raw_smp_processor_id(),
115 lock
->owner
= SPINLOCK_OWNER_INIT
;
116 lock
->owner_cpu
= -1;
120 Select appropriate loop counts to 1~2sec
123 #define LOOP_HZ 100 // temp 10
125 #define LOOP_HZ 2 // temp 2
129 #define WARNING_TIME 1000000000 // warning time 1 seconds
130 static void __spin_lock_debug(raw_spinlock_t
*lock
)
132 #ifdef CONFIG_MTK_MUTATION
134 u64 loops
= loops_per_jiffy
* LOOP_HZ
;
137 unsigned long long t1
,t2
;
141 for (i
= 0; i
< loops
; i
++) {
142 if (arch_spin_trylock(&lock
->raw_lock
))
146 if(sched_clock() - t2
< WARNING_TIME
*3) continue;
148 #ifdef CONFIG_MTK_AEE_FEATURE
149 if((oops_in_progress
!= 0) || (InDumpAllStack
== 1)) continue; // in exception follow, printk maybe spinlock error
151 if(oops_in_progress
!= 0) continue; // in exception follow, printk maybe spinlock error
153 /* lockup suspected: */
154 printk("spin time: %llu ns(start:%llu ns, lpj:%lu, LPHZ:%d), value: 0x%08x\n", sched_clock() - t1
, t1
, loops_per_jiffy
, (int)LOOP_HZ
, *((unsigned int *)&lock
->raw_lock
));
157 spin_dump(lock
, "lockup suspected");
159 trigger_all_cpu_backtrace();
161 debug_show_all_locks();
162 snprintf( aee_str
, 50, "Spinlock lockup:%s\n", current
->comm
);
163 aee_kernel_warning_api(__FILE__
, __LINE__
, DB_OPT_DUMMY_DUMP
| DB_OPT_FTRACE
, aee_str
,"spinlock debugger\n");
167 #else //CONFIG_MTK_MUTATION
169 u64 loops
= loops_per_jiffy
* HZ
;
170 for (i
= 0; i
< loops
; i
++) {
171 if (arch_spin_trylock(&lock
->raw_lock
))
175 /* lockup suspected: */
176 spin_dump(lock
, "lockup suspected");
178 trigger_all_cpu_backtrace();
182 * The trylock above was causing a livelock. Give the lower level arch
183 * specific lock code a chance to acquire the lock. We have already
184 * printed a warning/backtrace at this point. The non-debug arch
185 * specific code might actually succeed in acquiring the lock. If it is
186 * not successful, the end-result is the same - there is no forward
189 arch_spin_lock(&lock
->raw_lock
);
190 #endif //CONFIG_MTK_MUTATION
193 void do_raw_spin_lock(raw_spinlock_t
*lock
)
195 debug_spin_lock_before(lock
);
196 if (unlikely(!arch_spin_trylock(&lock
->raw_lock
)))
197 __spin_lock_debug(lock
);
198 debug_spin_lock_after(lock
);
201 int do_raw_spin_trylock(raw_spinlock_t
*lock
)
203 int ret
= arch_spin_trylock(&lock
->raw_lock
);
206 debug_spin_lock_after(lock
);
209 * Must not happen on UP:
211 SPIN_BUG_ON(!ret
, lock
, "trylock failure on UP");
216 void do_raw_spin_unlock(raw_spinlock_t
*lock
)
218 debug_spin_unlock(lock
);
219 arch_spin_unlock(&lock
->raw_lock
);
222 static void rwlock_bug(rwlock_t
*lock
, const char *msg
)
224 if (!debug_locks_off())
227 printk(KERN_EMERG
"BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
228 msg
, raw_smp_processor_id(), current
->comm
,
229 task_pid_nr(current
), lock
);
233 #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
235 #if 0 /* __write_lock_debug() can lock up - maybe this can too? */
236 static void __read_lock_debug(rwlock_t
*lock
)
239 u64 loops
= loops_per_jiffy
* HZ
;
243 for (i
= 0; i
< loops
; i
++) {
244 if (arch_read_trylock(&lock
->raw_lock
))
248 /* lockup suspected: */
251 printk(KERN_EMERG
"BUG: read-lock lockup on CPU#%d, "
253 raw_smp_processor_id(), current
->comm
,
261 void do_raw_read_lock(rwlock_t
*lock
)
263 RWLOCK_BUG_ON(lock
->magic
!= RWLOCK_MAGIC
, lock
, "bad magic");
264 arch_read_lock(&lock
->raw_lock
);
267 int do_raw_read_trylock(rwlock_t
*lock
)
269 int ret
= arch_read_trylock(&lock
->raw_lock
);
273 * Must not happen on UP:
275 RWLOCK_BUG_ON(!ret
, lock
, "trylock failure on UP");
280 void do_raw_read_unlock(rwlock_t
*lock
)
282 RWLOCK_BUG_ON(lock
->magic
!= RWLOCK_MAGIC
, lock
, "bad magic");
283 arch_read_unlock(&lock
->raw_lock
);
286 static inline void debug_write_lock_before(rwlock_t
*lock
)
288 RWLOCK_BUG_ON(lock
->magic
!= RWLOCK_MAGIC
, lock
, "bad magic");
289 RWLOCK_BUG_ON(lock
->owner
== current
, lock
, "recursion");
290 RWLOCK_BUG_ON(lock
->owner_cpu
== raw_smp_processor_id(),
291 lock
, "cpu recursion");
294 static inline void debug_write_lock_after(rwlock_t
*lock
)
296 lock
->owner_cpu
= raw_smp_processor_id();
297 lock
->owner
= current
;
300 static inline void debug_write_unlock(rwlock_t
*lock
)
302 RWLOCK_BUG_ON(lock
->magic
!= RWLOCK_MAGIC
, lock
, "bad magic");
303 RWLOCK_BUG_ON(lock
->owner
!= current
, lock
, "wrong owner");
304 RWLOCK_BUG_ON(lock
->owner_cpu
!= raw_smp_processor_id(),
306 lock
->owner
= SPINLOCK_OWNER_INIT
;
307 lock
->owner_cpu
= -1;
310 #if 0 /* This can cause lockups */
311 static void __write_lock_debug(rwlock_t
*lock
)
314 u64 loops
= loops_per_jiffy
* HZ
;
318 for (i
= 0; i
< loops
; i
++) {
319 if (arch_write_trylock(&lock
->raw_lock
))
323 /* lockup suspected: */
326 printk(KERN_EMERG
"BUG: write-lock lockup on CPU#%d, "
328 raw_smp_processor_id(), current
->comm
,
336 void do_raw_write_lock(rwlock_t
*lock
)
338 debug_write_lock_before(lock
);
339 arch_write_lock(&lock
->raw_lock
);
340 debug_write_lock_after(lock
);
343 int do_raw_write_trylock(rwlock_t
*lock
)
345 int ret
= arch_write_trylock(&lock
->raw_lock
);
348 debug_write_lock_after(lock
);
351 * Must not happen on UP:
353 RWLOCK_BUG_ON(!ret
, lock
, "trylock failure on UP");
358 void do_raw_write_unlock(rwlock_t
*lock
)
360 debug_write_unlock(lock
);
361 arch_write_unlock(&lock
->raw_lock
);