From e4d919188554a77c798a267e098059bc9aa39726 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 3 Jul 2006 00:24:34 -0700 Subject: [PATCH] [PATCH] lockdep: locking init debugging improvement Locking init improvement: - introduce and use __SPIN_LOCK_UNLOCKED for array initializations, to pass in the name string of locks, used by debugging Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/random.c | 6 +++--- fs/dcache.c | 2 +- include/linux/idr.h | 2 +- include/linux/init_task.h | 10 +++++----- include/linux/notifier.h | 2 +- include/linux/seqlock.h | 12 ++++++++++-- include/linux/spinlock_types.h | 15 +++++++++------ include/linux/wait.h | 2 +- kernel/rcupdate.c | 4 ++-- kernel/timer.c | 2 +- mm/swap_state.c | 2 +- net/ipv4/tcp_ipv4.c | 2 +- net/ipv4/tcp_minisocks.c | 2 +- 13 files changed, 37 insertions(+), 26 deletions(-) diff --git a/drivers/char/random.c b/drivers/char/random.c index 164bddae047..4c3a5ca9d8f 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -416,7 +416,7 @@ static struct entropy_store input_pool = { .poolinfo = &poolinfo_table[0], .name = "input", .limit = 1, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock), .pool = input_pool_data }; @@ -425,7 +425,7 @@ static struct entropy_store blocking_pool = { .name = "blocking", .limit = 1, .pull = &input_pool, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock), .pool = blocking_pool_data }; @@ -433,7 +433,7 @@ static struct entropy_store nonblocking_pool = { .poolinfo = &poolinfo_table[1], .name = "nonblocking", .pull = &input_pool, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock), .pool = nonblocking_pool_data }; diff --git a/fs/dcache.c b/fs/dcache.c index c6e3535be19..bec4de176c8 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -38,7 +38,7 @@ int sysctl_vfs_cache_pressure __read_mostly = 100; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); -static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; +static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); EXPORT_SYMBOL(dcache_lock); diff --git a/include/linux/idr.h b/include/linux/idr.h index f559a719dbe..826803449db 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -66,7 +66,7 @@ struct idr { .id_free = NULL, \ .layers = 0, \ .id_free_cnt = 0, \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ } #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 678c1a90380..1b7bb37624b 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -21,7 +21,7 @@ .count = ATOMIC_INIT(1), \ .fdt = &init_files.fdtab, \ .fdtab = INIT_FDTABLE, \ - .file_lock = SPIN_LOCK_UNLOCKED, \ + .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), \ .next_fd = 0, \ .close_on_exec_init = { { 0, } }, \ .open_fds_init = { { 0, } }, \ @@ -36,7 +36,7 @@ .user_id = 0, \ .next = NULL, \ .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \ - .ctx_lock = SPIN_LOCK_UNLOCKED, \ + .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \ .reqs_active = 0U, \ .max_reqs = ~0U, \ } @@ -48,7 +48,7 @@ .mm_users = ATOMIC_INIT(2), \ .mm_count = ATOMIC_INIT(1), \ .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \ - .page_table_lock = SPIN_LOCK_UNLOCKED, \ + .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \ .mmlist = LIST_HEAD_INIT(name.mmlist), \ .cpu_vm_mask = CPU_MASK_ALL, \ } @@ -69,7 +69,7 @@ #define INIT_SIGHAND(sighand) { \ .count = ATOMIC_INIT(1), \ .action = { { { .sa_handler = NULL, } }, }, \ - .siglock = SPIN_LOCK_UNLOCKED, \ + .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ } extern struct group_info init_groups; @@ -119,7 +119,7 @@ extern struct group_info init_groups; .list = LIST_HEAD_INIT(tsk.pending.list), \ .signal = {{0}}}, \ .blocked = {{0}}, \ - .alloc_lock = SPIN_LOCK_UNLOCKED, \ + .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .fs_excl = ATOMIC_INIT(0), \ diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 51dbab9710c..7ff386a6ae8 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -65,7 +65,7 @@ struct raw_notifier_head { } while (0) #define ATOMIC_NOTIFIER_INIT(name) { \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .head = NULL } #define BLOCKING_NOTIFIER_INIT(name) { \ .rwsem = __RWSEM_INITIALIZER((name).rwsem), \ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 7bc5c7c12b5..46000936f8f 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -38,9 +38,17 @@ typedef struct { * These macros triggered gcc-3.x compile-time problems. We think these are * OK now. Be cautious. */ -#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED } -#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0) +#define __SEQLOCK_UNLOCKED(lockname) \ + { 0, __SPIN_LOCK_UNLOCKED(lockname) } +#define SEQLOCK_UNLOCKED \ + __SEQLOCK_UNLOCKED(old_style_seqlock_init) + +#define seqlock_init(x) \ + do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); } while (0) + +#define DEFINE_SEQLOCK(x) \ + seqlock_t x = __SEQLOCK_UNLOCKED(x) /* Lock out other writers and update the count. * Acts like a normal spin_lock/unlock. diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 9cb51e07039..f5d4ed7bc78 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -44,24 +44,27 @@ typedef struct { #define SPINLOCK_OWNER_INIT ((void *)-1L) #ifdef CONFIG_DEBUG_SPINLOCK -# define SPIN_LOCK_UNLOCKED \ +# define __SPIN_LOCK_UNLOCKED(lockname) \ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ .magic = SPINLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1 } -#define RW_LOCK_UNLOCKED \ +#define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ .magic = RWLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1 } #else -# define SPIN_LOCK_UNLOCKED \ +# define __SPIN_LOCK_UNLOCKED(lockname) \ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } -#define RW_LOCK_UNLOCKED \ +#define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } #endif -#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED -#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED +#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) +#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) + +#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/wait.h b/include/linux/wait.h index 544e855c7c0..bc4f389c49b 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -68,7 +68,7 @@ struct task_struct; wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ - .lock = SPIN_LOCK_UNLOCKED, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .task_list = { &(name).task_list, &(name).task_list } } #define DECLARE_WAIT_QUEUE_HEAD(name) \ diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index f464f5ae3f1..759805c9859 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -53,13 +53,13 @@ static struct rcu_ctrlblk rcu_ctrlblk = { .cur = -300, .completed = -300, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), .cpumask = CPU_MASK_NONE, }; static struct rcu_ctrlblk rcu_bh_ctrlblk = { .cur = -300, .completed = -300, - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), .cpumask = CPU_MASK_NONE, }; diff --git a/kernel/timer.c b/kernel/timer.c index 5a896025306..4dd9a10d67d 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1208,7 +1208,7 @@ unsigned long wall_jiffies = INITIAL_JIFFIES; * playing with xtime and avenrun. */ #ifndef ARCH_HAVE_XTIME_LOCK -seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; +__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); EXPORT_SYMBOL(xtime_lock); #endif diff --git a/mm/swap_state.c b/mm/swap_state.c index fccbd9bba77..5f7cf2a4cb5 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -38,7 +38,7 @@ static struct backing_dev_info swap_backing_dev_info = { struct address_space swapper_space = { .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), - .tree_lock = RW_LOCK_UNLOCKED, + .tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock), .a_ops = &swap_aops, .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), .backing_dev_info = &swap_backing_dev_info, diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8355b729fa9..823717285c6 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -90,7 +90,7 @@ static struct socket *tcp_socket; void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { - .lhash_lock = RW_LOCK_UNLOCKED, + .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), .lhash_users = ATOMIC_INIT(0), .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), }; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index e0851697ad5..0ccb7cb22b1 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -40,7 +40,7 @@ int sysctl_tcp_abort_on_overflow; struct inet_timewait_death_row tcp_death_row = { .sysctl_max_tw_buckets = NR_FILE * 2, .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, - .death_lock = SPIN_LOCK_UNLOCKED, + .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock), .hashinfo = &tcp_hashinfo, .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, (unsigned long)&tcp_death_row), -- 2.20.1