lockstat: hook into spinlock_t, rwlock_t, rwsem and mutex
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Thu, 19 Jul 2007 08:48:58 +0000 (01:48 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Thu, 19 Jul 2007 17:04:49 +0000 (10:04 -0700)
Call the new lockstat tracking functions from the various lock primitives.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jason Baron <jbaron@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
kernel/mutex.c
kernel/rwsem.c
kernel/spinlock.c

index 303eab18484b1c63b7a678bf2b24a928cfc847fa..7a3f32761f2674e814e461d7a4039235b1dcb33f 100644 (file)
@@ -139,6 +139,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
        list_add_tail(&waiter.list, &lock->wait_list);
        waiter.task = task;
 
+       old_val = atomic_xchg(&lock->count, -1);
+       if (old_val == 1)
+               goto done;
+
+       lock_contended(&lock->dep_map, _RET_IP_);
+
        for (;;) {
                /*
                 * Lets try to take the lock again - this is needed even if
@@ -174,6 +180,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
                spin_lock_mutex(&lock->wait_lock, flags);
        }
 
+       lock_acquired(&lock->dep_map);
+done:
        /* got the lock - rejoice! */
        mutex_remove_waiter(lock, &waiter, task_thread_info(task));
        debug_mutex_set_owner(lock, task_thread_info(task));
index 9a87886b022eb42ab486cd4749a5da1043a91c36..1ec620c03064d109a5f1de5f925f00ae07cd946a 100644 (file)
@@ -20,7 +20,7 @@ void down_read(struct rw_semaphore *sem)
        might_sleep();
        rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
 
-       __down_read(sem);
+       LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
 }
 
 EXPORT_SYMBOL(down_read);
@@ -47,7 +47,7 @@ void down_write(struct rw_semaphore *sem)
        might_sleep();
        rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
 
-       __down_write(sem);
+       LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
 }
 
 EXPORT_SYMBOL(down_write);
@@ -111,7 +111,7 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
        might_sleep();
        rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
 
-       __down_read(sem);
+       LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
 }
 
 EXPORT_SYMBOL(down_read_nested);
@@ -130,7 +130,7 @@ void down_write_nested(struct rw_semaphore *sem, int subclass)
        might_sleep();
        rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
 
-       __down_write_nested(sem, subclass);
+       LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
 }
 
 EXPORT_SYMBOL(down_write_nested);
index cd93bfe3f10d9772206feda45570ab68b0cfb190..cd72424c26625765085e0b2306345d474cb9476d 100644 (file)
@@ -72,7 +72,7 @@ void __lockfunc _read_lock(rwlock_t *lock)
 {
        preempt_disable();
        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
-       _raw_read_lock(lock);
+       LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 }
 EXPORT_SYMBOL(_read_lock);
 
@@ -89,7 +89,7 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
         * that interrupts are not re-enabled during lock-acquire:
         */
 #ifdef CONFIG_LOCKDEP
-       _raw_spin_lock(lock);
+       LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 #else
        _raw_spin_lock_flags(lock, &flags);
 #endif
@@ -102,7 +102,7 @@ void __lockfunc _spin_lock_irq(spinlock_t *lock)
        local_irq_disable();
        preempt_disable();
        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-       _raw_spin_lock(lock);
+       LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 }
 EXPORT_SYMBOL(_spin_lock_irq);
 
@@ -111,7 +111,7 @@ void __lockfunc _spin_lock_bh(spinlock_t *lock)
        local_bh_disable();
        preempt_disable();
        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-       _raw_spin_lock(lock);
+       LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 }
 EXPORT_SYMBOL(_spin_lock_bh);
 
@@ -122,7 +122,7 @@ unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
        local_irq_save(flags);
        preempt_disable();
        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
-       _raw_read_lock(lock);
+       LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
        return flags;
 }
 EXPORT_SYMBOL(_read_lock_irqsave);
@@ -132,7 +132,7 @@ void __lockfunc _read_lock_irq(rwlock_t *lock)
        local_irq_disable();
        preempt_disable();
        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
-       _raw_read_lock(lock);
+       LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 }
 EXPORT_SYMBOL(_read_lock_irq);
 
@@ -141,7 +141,7 @@ void __lockfunc _read_lock_bh(rwlock_t *lock)
        local_bh_disable();
        preempt_disable();
        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
-       _raw_read_lock(lock);
+       LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 }
 EXPORT_SYMBOL(_read_lock_bh);
 
@@ -152,7 +152,7 @@ unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
        local_irq_save(flags);
        preempt_disable();
        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-       _raw_write_lock(lock);
+       LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
        return flags;
 }
 EXPORT_SYMBOL(_write_lock_irqsave);
@@ -162,7 +162,7 @@ void __lockfunc _write_lock_irq(rwlock_t *lock)
        local_irq_disable();
        preempt_disable();
        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-       _raw_write_lock(lock);
+       LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 }
 EXPORT_SYMBOL(_write_lock_irq);
 
@@ -171,7 +171,7 @@ void __lockfunc _write_lock_bh(rwlock_t *lock)
        local_bh_disable();
        preempt_disable();
        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-       _raw_write_lock(lock);
+       LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 }
 EXPORT_SYMBOL(_write_lock_bh);
 
@@ -179,7 +179,7 @@ void __lockfunc _spin_lock(spinlock_t *lock)
 {
        preempt_disable();
        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-       _raw_spin_lock(lock);
+       LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 }
 
 EXPORT_SYMBOL(_spin_lock);
@@ -188,7 +188,7 @@ void __lockfunc _write_lock(rwlock_t *lock)
 {
        preempt_disable();
        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
-       _raw_write_lock(lock);
+       LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 }
 
 EXPORT_SYMBOL(_write_lock);
@@ -289,7 +289,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
 {
        preempt_disable();
        spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
-       _raw_spin_lock(lock);
+       LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 }
 
 EXPORT_SYMBOL(_spin_lock_nested);
@@ -306,7 +306,7 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
         * that interrupts are not re-enabled during lock-acquire:
         */
 #ifdef CONFIG_LOCKDEP
-       _raw_spin_lock(lock);
+       LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 #else
        _raw_spin_lock_flags(lock, &flags);
 #endif