From 4af57b787b4be09419a2bb48aa705fa87ef41cca Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Sat, 20 Feb 2010 01:03:34 +0100 Subject: [PATCH] Rename .data.cacheline_aligned to .data..cacheline_aligned. Signed-off-by: Tim Abbott Cc: Sam Ravnborg Signed-off-by: Denys Vlasenko Signed-off-by: Michal Marek --- arch/powerpc/kernel/vmlinux.lds.S | 2 +- arch/x86/kernel/init_task.c | 2 +- include/asm-generic/vmlinux.lds.h | 2 +- include/linux/cache.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index dcd01c82e701..3229c0622161 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -231,7 +231,7 @@ SECTIONS PAGE_ALIGNED_DATA(PAGE_SIZE) } - .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { + .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) { CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) } diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index 3a54dcb9cd0e..43e9ccf44947 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c @@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task); /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, * no more per-task TSS's. The TSS size is kept cacheline-aligned - * so they are allowed to end up in the .data.cacheline_aligned + * so they are allowed to end up in the .data..cacheline_aligned * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 67e652068e0e..78450aaab9ef 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -189,7 +189,7 @@ #define CACHELINE_ALIGNED_DATA(align) \ . = ALIGN(align); \ - *(.data.cacheline_aligned) + *(.data..cacheline_aligned) #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ diff --git a/include/linux/cache.h b/include/linux/cache.h index 97e24881c4c6..4c570653ab84 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -31,7 +31,7 @@ #ifndef __cacheline_aligned #define __cacheline_aligned \ __attribute__((__aligned__(SMP_CACHE_BYTES), \ - __section__(".data.cacheline_aligned"))) + __section__(".data..cacheline_aligned"))) #endif /* __cacheline_aligned */ #ifndef __cacheline_aligned_in_smp -- 2.20.1