Previously this was using a hardcoded 32, use L1_CACHE_BYTES for
cacheline alignment instead.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
* Written by Niibe Yutaka
*/
#include <asm/thread_info.h>
+#include <asm/cache.h>
#include <asm-generic/vmlinux.lds.h>
#ifdef CONFIG_CPU_LITTLE_ENDIAN
. = ALIGN(PAGE_SIZE);
.data.page_aligned : { *(.data.page_aligned) }
- . = ALIGN(32);
+ . = ALIGN(L1_CACHE_BYTES);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
+#ifndef __ASSEMBLY__
struct cache_info {
unsigned int ways; /* Number of cache ways */
unsigned int sets; /* Number of cache sets */
unsigned long flags;
};
-
+#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHE_H */