brlocks/lglocks: turn into functions
authorAndi Kleen <ak@linux.intel.com>
Tue, 8 May 2012 04:02:24 +0000 (13:32 +0930)
committerAl Viro <viro@zeniv.linux.org.uk>
Wed, 30 May 2012 03:28:41 +0000 (23:28 -0400)
lglocks and brlocks are currently generated with some complicated macros
in lglock.h.  But there's no reason to not just use common utility
functions and put all the data into a common data structure.

Since there are at least two users it makes sense to share this code in a
library.  This is also easier maintainable than a macro forest.

This will also make it later possible to dynamically allocate lglocks and
also use them in modules (this would both still need some additional, but
now straightforward, code)

[akpm@linux-foundation.org: checkpatch fixes]
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
fs/file_table.c
fs/internal.h
include/linux/lglock.h
kernel/Makefile
kernel/lglock.c [new file with mode: 0644]

index 70f2a0fd6aec62b28724d46e356dc0ff871f88b8..f5c67c59ec10ce4d9ed80dbfa5814e76f316eebf 100644 (file)
@@ -34,7 +34,6 @@ struct files_stat_struct files_stat = {
        .max_files = NR_FILE
 };
 
-DECLARE_LGLOCK(files_lglock);
 DEFINE_LGLOCK(files_lglock);
 
 /* SLAB cache for file structures */
index 9962c59ba280b1c75d78adc55b8491733075a5e0..8040af489c782941adf9dae2f2aa0a7257bf9000 100644 (file)
@@ -56,7 +56,7 @@ extern int sb_prepare_remount_readonly(struct super_block *);
 
 extern void __init mnt_init(void);
 
-DECLARE_BRLOCK(vfsmount_lock);
+extern struct lglock vfsmount_lock;
 
 
 /*
index 0fdd821e77b702e224dd7f23e9b5072dad2a12b7..f01e5f6d1f07a4966927bb7acd5707f8f77904c8 100644 (file)
 #include <linux/lockdep.h>
 #include <linux/percpu.h>
 #include <linux/cpu.h>
+#include <linux/notifier.h>
 
 /* can make br locks by using local lock for read side, global lock for write */
-#define br_lock_init(name)     name##_lock_init()
-#define br_read_lock(name)     name##_local_lock()
-#define br_read_unlock(name)   name##_local_unlock()
-#define br_write_lock(name)    name##_global_lock()
-#define br_write_unlock(name)  name##_global_unlock()
+#define br_lock_init(name)     lg_lock_init(name, #name)
+#define br_read_lock(name)     lg_local_lock(name)
+#define br_read_unlock(name)   lg_local_unlock(name)
+#define br_write_lock(name)    lg_global_lock(name)
+#define br_write_unlock(name)  lg_global_unlock(name)
 
-#define DECLARE_BRLOCK(name)   DECLARE_LGLOCK(name)
 #define DEFINE_BRLOCK(name)    DEFINE_LGLOCK(name)
 
-
-#define lg_lock_init(name)     name##_lock_init()
-#define lg_local_lock(name)    name##_local_lock()
-#define lg_local_unlock(name)  name##_local_unlock()
-#define lg_local_lock_cpu(name, cpu)   name##_local_lock_cpu(cpu)
-#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
-#define lg_global_lock(name)   name##_global_lock()
-#define lg_global_unlock(name) name##_global_unlock()
-
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 #define LOCKDEP_INIT_MAP lockdep_init_map
 
 #define DEFINE_LGLOCK_LOCKDEP(name)
 #endif
 
-
-#define DECLARE_LGLOCK(name)                                           \
- extern void name##_lock_init(void);                                   \
- extern void name##_local_lock(void);                                  \
- extern void name##_local_unlock(void);                                        \
- extern void name##_local_lock_cpu(int cpu);                           \
- extern void name##_local_unlock_cpu(int cpu);                         \
- extern void name##_global_lock(void);                                 \
- extern void name##_global_unlock(void);                               \
+struct lglock {
+       arch_spinlock_t __percpu *lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lock_class_key lock_key;
+       struct lockdep_map    lock_dep_map;
+#endif
+};
 
 #define DEFINE_LGLOCK(name)                                            \
-                                                                       \
- DEFINE_SPINLOCK(name##_cpu_lock);                                     \
- DEFINE_PER_CPU(arch_spinlock_t, name##_lock);                         \
- DEFINE_LGLOCK_LOCKDEP(name);                                          \
-                                                                       \
- void name##_lock_init(void) {                                         \
-       int i;                                                          \
-       LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
-       for_each_possible_cpu(i) {                                      \
-               arch_spinlock_t *lock;                                  \
-               lock = &per_cpu(name##_lock, i);                        \
-               *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;     \
-       }                                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_lock_init);                                      \
-                                                                       \
- void name##_local_lock(void) {                                                \
-       arch_spinlock_t *lock;                                          \
-       preempt_disable();                                              \
-       rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);     \
-       lock = &__get_cpu_var(name##_lock);                             \
-       arch_spin_lock(lock);                                           \
- }                                                                     \
- EXPORT_SYMBOL(name##_local_lock);                                     \
-                                                                       \
- void name##_local_unlock(void) {                                      \
-       arch_spinlock_t *lock;                                          \
-       rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);             \
-       lock = &__get_cpu_var(name##_lock);                             \
-       arch_spin_unlock(lock);                                         \
-       preempt_enable();                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_local_unlock);                                   \
-                                                                       \
- void name##_local_lock_cpu(int cpu) {                                 \
-       arch_spinlock_t *lock;                                          \
-       preempt_disable();                                              \
-       rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);     \
-       lock = &per_cpu(name##_lock, cpu);                              \
-       arch_spin_lock(lock);                                           \
- }                                                                     \
- EXPORT_SYMBOL(name##_local_lock_cpu);                                 \
-                                                                       \
- void name##_local_unlock_cpu(int cpu) {                               \
-       arch_spinlock_t *lock;                                          \
-       rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);             \
-       lock = &per_cpu(name##_lock, cpu);                              \
-       arch_spin_unlock(lock);                                         \
-       preempt_enable();                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_local_unlock_cpu);                               \
-                                                                       \
- void name##_global_lock(void) {                                       \
-       int i;                                                          \
-       preempt_disable();                                              \
-       rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_possible_cpu(i) {                                      \
-               arch_spinlock_t *lock;                                  \
-               lock = &per_cpu(name##_lock, i);                        \
-               arch_spin_lock(lock);                                   \
-       }                                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_global_lock);                                    \
-                                                                       \
- void name##_global_unlock(void) {                                     \
-       int i;                                                          \
-       rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_possible_cpu(i) {                                      \
-               arch_spinlock_t *lock;                                  \
-               lock = &per_cpu(name##_lock, i);                        \
-               arch_spin_unlock(lock);                                 \
-       }                                                               \
-       preempt_enable();                                               \
- }                                                                     \
- EXPORT_SYMBOL(name##_global_unlock);
+       DEFINE_LGLOCK_LOCKDEP(name);                                    \
+       DEFINE_PER_CPU(arch_spinlock_t, name ## _lock)                  \
+       = __ARCH_SPIN_LOCK_UNLOCKED;                                    \
+       struct lglock name = { .lock = &name ## _lock }
+
+void lg_lock_init(struct lglock *lg, char *name);
+void lg_local_lock(struct lglock *lg);
+void lg_local_unlock(struct lglock *lg);
+void lg_local_lock_cpu(struct lglock *lg, int cpu);
+void lg_local_unlock_cpu(struct lglock *lg, int cpu);
+void lg_global_lock(struct lglock *lg);
+void lg_global_unlock(struct lglock *lg);
+
 #endif
index 6c07f30fa9b7e678e23b2038474a5b508b7fa661..296132c19a57861dfb247e1cc0527c2deb870e99 100644 (file)
@@ -10,7 +10,7 @@ obj-y     = fork.o exec_domain.o panic.o printk.o \
            kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
            hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
            notifier.o ksysfs.o cred.o \
-           async.o range.o groups.o
+           async.o range.o groups.o lglock.o
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace debug files and internal ftrace files
diff --git a/kernel/lglock.c b/kernel/lglock.c
new file mode 100644 (file)
index 0000000..6535a66
--- /dev/null
@@ -0,0 +1,89 @@
+/* See include/linux/lglock.h for description */
+#include <linux/module.h>
+#include <linux/lglock.h>
+#include <linux/cpu.h>
+#include <linux/string.h>
+
+/*
+ * Note there is no uninit, so lglocks cannot be defined in
+ * modules (but it's fine to use them from there)
+ * Could be added though, just undo lg_lock_init
+ */
+
+void lg_lock_init(struct lglock *lg, char *name)
+{
+       LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
+}
+EXPORT_SYMBOL(lg_lock_init);
+
+void lg_local_lock(struct lglock *lg)
+{
+       arch_spinlock_t *lock;
+
+       preempt_disable();
+       rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+       lock = this_cpu_ptr(lg->lock);
+       arch_spin_lock(lock);
+}
+EXPORT_SYMBOL(lg_local_lock);
+
+void lg_local_unlock(struct lglock *lg)
+{
+       arch_spinlock_t *lock;
+
+       rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+       lock = this_cpu_ptr(lg->lock);
+       arch_spin_unlock(lock);
+       preempt_enable();
+}
+EXPORT_SYMBOL(lg_local_unlock);
+
+void lg_local_lock_cpu(struct lglock *lg, int cpu)
+{
+       arch_spinlock_t *lock;
+
+       preempt_disable();
+       rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+       lock = per_cpu_ptr(lg->lock, cpu);
+       arch_spin_lock(lock);
+}
+EXPORT_SYMBOL(lg_local_lock_cpu);
+
+void lg_local_unlock_cpu(struct lglock *lg, int cpu)
+{
+       arch_spinlock_t *lock;
+
+       rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+       lock = per_cpu_ptr(lg->lock, cpu);
+       arch_spin_unlock(lock);
+       preempt_enable();
+}
+EXPORT_SYMBOL(lg_local_unlock_cpu);
+
+void lg_global_lock(struct lglock *lg)
+{
+       int i;
+
+       preempt_disable();
+       rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_);
+       for_each_possible_cpu(i) {
+               arch_spinlock_t *lock;
+               lock = per_cpu_ptr(lg->lock, i);
+               arch_spin_lock(lock);
+       }
+}
+EXPORT_SYMBOL(lg_global_lock);
+
+void lg_global_unlock(struct lglock *lg)
+{
+       int i;
+
+       rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+       for_each_possible_cpu(i) {
+               arch_spinlock_t *lock;
+               lock = per_cpu_ptr(lg->lock, i);
+               arch_spin_unlock(lock);
+       }
+       preempt_enable();
+}
+EXPORT_SYMBOL(lg_global_unlock);