locking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED
authorThomas Gleixner <tglx@linutronix.de>
Thu, 3 Dec 2009 11:38:57 +0000 (12:38 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 14 Dec 2009 22:55:32 +0000 (23:55 +0100)
Further name space cleanup. No functional change

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
25 files changed:
arch/alpha/include/asm/spinlock_types.h
arch/arm/include/asm/spinlock_types.h
arch/blackfin/include/asm/spinlock_types.h
arch/ia64/include/asm/spinlock_types.h
arch/m32r/include/asm/spinlock_types.h
arch/mips/include/asm/spinlock_types.h
arch/parisc/include/asm/spinlock_types.h
arch/parisc/lib/bitops.c
arch/powerpc/include/asm/spinlock_types.h
arch/powerpc/kernel/rtas.c
arch/s390/include/asm/spinlock_types.h
arch/sh/include/asm/spinlock_types.h
arch/sparc/include/asm/spinlock_types.h
arch/x86/include/asm/spinlock_types.h
arch/x86/kernel/dumpstack.c
arch/x86/kernel/tsc_sync.c
include/linux/spinlock_types.h
include/linux/spinlock_types_up.h
kernel/lockdep.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_clock.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_stack.c
lib/spinlock_debug.c

index bb94a51e53d2b12411126749db51e9c97ed81acb..08975ee0a10026249f1ed1a7099b152d39143605 100644 (file)
@@ -9,7 +9,7 @@ typedef struct {
        volatile unsigned int lock;
 } arch_spinlock_t;
 
-#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
 
 typedef struct {
        volatile unsigned int lock;
index 5e9d3eadd1675232feabf26ef9c0ef924cf570c6..9622e126a8decb6c52ec8105df303b90e36afbea 100644 (file)
@@ -9,7 +9,7 @@ typedef struct {
        volatile unsigned int lock;
 } arch_spinlock_t;
 
-#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
 
 typedef struct {
        volatile unsigned int lock;
index 03b377abf5c07f11724ed250e4c686e7131f0137..c8a3928a58c57a9dc9129e4106fada5d319aef18 100644 (file)
@@ -17,7 +17,7 @@ typedef struct {
        volatile unsigned int lock;
 } arch_spinlock_t;
 
-#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
 
 typedef struct {
        volatile unsigned int lock;
index 447ccc6ca7a84012fa327c58ce898adcd763de6e..6a11b65fa66da8a211570b570cb6f49ab4cfd256 100644 (file)
@@ -9,7 +9,7 @@ typedef struct {
        volatile unsigned int lock;
 } arch_spinlock_t;
 
-#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
 
 typedef struct {
        volatile unsigned int read_counter      : 31;
index 17d15bd6322d37af48586619e8645a0b8d6677eb..5873a8701107b1ed5fca48b842809db0f907639d 100644 (file)
@@ -9,7 +9,7 @@ typedef struct {
        volatile int slock;
 } arch_spinlock_t;
 
-#define __RAW_SPIN_LOCK_UNLOCKED       { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 1 }
 
 typedef struct {
        volatile int lock;
index 2e1060892d3bca10c2e02bb70700f5b5b2ca14c6..b4c5efaadb9c706af9d86ef2857db9b3849b89f3 100644 (file)
@@ -14,7 +14,7 @@ typedef struct {
        unsigned int lock;
 } arch_spinlock_t;
 
-#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
 
 typedef struct {
        volatile unsigned int lock;
index 735caafb81f5531239f3449e8761df71017040d3..396d2746ca570106108a5241f9679a47ec323f88 100644 (file)
@@ -4,10 +4,10 @@
 typedef struct {
 #ifdef CONFIG_PA20
        volatile unsigned int slock;
-# define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
 #else
        volatile unsigned int lock[4];
-# define __RAW_SPIN_LOCK_UNLOCKED      { { 1, 1, 1, 1 } }
+# define __ARCH_SPIN_LOCK_UNLOCKED     { { 1, 1, 1, 1 } }
 #endif
 } arch_spinlock_t;
 
@@ -16,6 +16,6 @@ typedef struct {
        volatile int counter;
 } raw_rwlock_t;
 
-#define __RAW_RW_LOCK_UNLOCKED         { __RAW_SPIN_LOCK_UNLOCKED, 0 }
+#define __RAW_RW_LOCK_UNLOCKED         { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
 
 #endif
index fdd7f583de540cbce8cebc0193c835aa9eae3e37..353963d42059aa409eb36c15a00bbfb5b49068a0 100644 (file)
@@ -13,7 +13,7 @@
 
 #ifdef CONFIG_SMP
 arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
-       [0 ... (ATOMIC_HASH_SIZE-1)]  = __RAW_SPIN_LOCK_UNLOCKED
+       [0 ... (ATOMIC_HASH_SIZE-1)]  = __ARCH_SPIN_LOCK_UNLOCKED
 };
 #endif
 
index 4312e5baaf88610fb0871730007ad12a10557928..f5f39d82711fdae30968ea5e8c85fe33036ba5da 100644 (file)
@@ -9,7 +9,7 @@ typedef struct {
        volatile unsigned int slock;
 } arch_spinlock_t;
 
-#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
 
 typedef struct {
        volatile signed int lock;
index 579069c1215283519a6090b2b14ec742d4f906e6..57dfa414cfb8d8abc08ed9432a4ef93e78c2483b 100644 (file)
@@ -42,7 +42,7 @@
 #include <asm/mmu.h>
 
 struct rtas_t rtas = {
-       .lock = __RAW_SPIN_LOCK_UNLOCKED
+       .lock = __ARCH_SPIN_LOCK_UNLOCKED
 };
 EXPORT_SYMBOL(rtas);
 
index a93638eee3f79174e769b4680e5f99f9be05c063..e25c0370f6cd3c89acd2e8a63b8a820c15cd8ffe 100644 (file)
@@ -9,7 +9,7 @@ typedef struct {
        volatile unsigned int owner_cpu;
 } __attribute__ ((aligned (4))) arch_spinlock_t;
 
-#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
 
 typedef struct {
        volatile unsigned int lock;
index 37712c32ba9906fa06be096ade98ad660c1310c1..a3be2db960ed47e90b52fe3c991bb4077683cf4a 100644 (file)
@@ -9,7 +9,7 @@ typedef struct {
        volatile unsigned int lock;
 } arch_spinlock_t;
 
-#define __RAW_SPIN_LOCK_UNLOCKED               { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED              { 1 }
 
 typedef struct {
        volatile unsigned int lock;
index 41d9a8fec13d8ff99ebe3610f0e974b62a0a8fb0..c145e63a5d66ad76534a1ffe5ddd0ef7f68a530b 100644 (file)
@@ -9,7 +9,7 @@ typedef struct {
        volatile unsigned char lock;
 } arch_spinlock_t;
 
-#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
 
 typedef struct {
        volatile unsigned int lock;
index 2ae7637ed5248b2644788464ef955c3be29ad4b4..696f8364a4f37bd1093fc7fd02224c2579336af1 100644 (file)
@@ -9,7 +9,7 @@ typedef struct arch_spinlock {
        unsigned int slock;
 } arch_spinlock_t;
 
-#define __RAW_SPIN_LOCK_UNLOCKED       { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
 
 typedef struct {
        unsigned int lock;
index 0862d9d89c9239da2b79e329d339bcc6d437c412..5b75afac8a38baa1646fc964ac1e0c9215f5c6f7 100644 (file)
@@ -188,7 +188,7 @@ void dump_stack(void)
 }
 EXPORT_SYMBOL(dump_stack);
 
-static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 static int die_owner = -1;
 static unsigned int die_nest_count;
 
index 9f908b9d1abe8b3cdb444144fe024d071b3ce503..f1714697a09aaef20d6c70585b0345b5b510c362 100644 (file)
@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count;
  * we want to have the fastest, inlined, non-debug version
  * of a critical section, to be able to prove TSC time-warps:
  */
-static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 
 static __cpuinitdata cycles_t last_tsc;
 static __cpuinitdata cycles_t max_warp;
index d4af2d7a86ea00420cc3b5ee710a02603769d45d..7dadce303ebf0b47e7587aefdc1810e09ca9dcc2 100644 (file)
@@ -43,14 +43,14 @@ typedef struct {
 
 #ifdef CONFIG_DEBUG_SPINLOCK
 # define __SPIN_LOCK_UNLOCKED(lockname)                                        \
-       (spinlock_t)    {       .raw_lock = __RAW_SPIN_LOCK_UNLOCKED,   \
+       (spinlock_t)    {       .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED,  \
                                .magic = SPINLOCK_MAGIC,                \
                                .owner = SPINLOCK_OWNER_INIT,           \
                                .owner_cpu = -1,                        \
                                SPIN_DEP_MAP_INIT(lockname) }
 #else
 # define __SPIN_LOCK_UNLOCKED(lockname) \
-       (spinlock_t)    {       .raw_lock = __RAW_SPIN_LOCK_UNLOCKED,   \
+       (spinlock_t)    {       .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED,  \
                                SPIN_DEP_MAP_INIT(lockname) }
 #endif
 
index 34d36691c4ec28c863b5c9d6818071283bf30856..10db021f4875a62446a42ad66a84b41c14d08ea3 100644 (file)
@@ -18,13 +18,13 @@ typedef struct {
        volatile unsigned int slock;
 } arch_spinlock_t;
 
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
 
 #else
 
 typedef struct { } arch_spinlock_t;
 
-#define __RAW_SPIN_LOCK_UNLOCKED { }
+#define __ARCH_SPIN_LOCK_UNLOCKED { }
 
 #endif
 
index 7cc50c62af598fd420577a344b6fc1291e648835..2389e3f85cf6861d3c1eb7c71ae00d0e9cc41db1 100644 (file)
@@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644);
  * to use a raw spinlock - we really dont want the spinlock
  * code to recurse back into the lockdep code...
  */
-static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
 static int graph_lock(void)
 {
index 5ac8ee0a9e351455134fef43ab2b7e7db9718eb5..fb7a0fa508b9e6077f37a46af3e00e671f6877e0 100644 (file)
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
        cpu_buffer->buffer = buffer;
        spin_lock_init(&cpu_buffer->reader_lock);
        lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
-       cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+       cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
        bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
                            GFP_KERNEL, cpu_to_node(cpu));
index 7d56cecc2c6ea8bba5bc87d5d1e86d4e2c393c6a..63bc1cc3821979dfb6663d3d28af57579174a48a 100644 (file)
@@ -501,7 +501,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
  * CONFIG_TRACER_MAX_TRACE.
  */
 static arch_spinlock_t ftrace_max_lock =
-       (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+       (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
 #ifdef CONFIG_TRACER_MAX_TRACE
 unsigned long __read_mostly    tracing_max_latency;
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
 static int cmdline_idx;
-static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 
 /* temporary disable recording */
 static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -1252,7 +1252,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 {
        static arch_spinlock_t trace_buf_lock =
-               (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+               (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
        static u32 trace_buf[TRACE_BUF_SIZE];
 
        struct ftrace_event_call *call = &event_bprint;
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr,
 int trace_array_vprintk(struct trace_array *tr,
                        unsigned long ip, const char *fmt, va_list args)
 {
-       static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
+       static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
        static char trace_buf[TRACE_BUF_SIZE];
 
        struct ftrace_event_call *call = &event_print;
@@ -4308,7 +4308,7 @@ trace_printk_seq(struct trace_seq *s)
 static void __ftrace_dump(bool disable_tracing)
 {
        static arch_spinlock_t ftrace_dump_lock =
-               (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+               (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
        /* use static because iter can be a bit big for the stack */
        static struct trace_iterator iter;
        unsigned int old_userobj;
index 206ec3d4b3c25650f701c6b6351896e9d84b1dc8..433e2eda2d01c1be10a89c15c04cdc02dd80578b 100644 (file)
@@ -74,7 +74,7 @@ static struct {
        arch_spinlock_t lock;
 } trace_clock_struct ____cacheline_aligned_in_smp =
        {
-               .lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
+               .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
        };
 
 u64 notrace trace_clock_global(void)
index 4cf7e83ec2357ff8f5f954a14ab29af929939ea3..e347853564e952beddb3f42439782b76fcf74470 100644 (file)
@@ -29,7 +29,7 @@ static unsigned                       wakeup_prio = -1;
 static int                     wakeup_rt;
 
 static arch_spinlock_t wakeup_lock =
-       (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+       (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
 static void __wakeup_reset(struct trace_array *tr);
 
index 9a82d568fdec604bb72e03c36f7bebea606c82bf..728c352214834eeabbb6967df7befcc28d391639 100644 (file)
@@ -28,7 +28,7 @@ static struct stack_trace max_stack_trace = {
 
 static unsigned long max_stack_size;
 static arch_spinlock_t max_stack_lock =
-       (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+       (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
 static int stack_trace_disabled __read_mostly;
 static DEFINE_PER_CPU(int, trace_active);
index 2acd501b382602baf9aa2c8879f1de4777d24986..f73004137141fdb275bb09c1afc88df2150a2007 100644 (file)
@@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
        debug_check_no_locks_freed((void *)lock, sizeof(*lock));
        lockdep_init_map(&lock->dep_map, name, key, 0);
 #endif
-       lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+       lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
        lock->magic = SPINLOCK_MAGIC;
        lock->owner = SPINLOCK_OWNER_INIT;
        lock->owner_cpu = -1;