From feaf7cf153335fe7100b65ed6f4585c3574fe69a Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Thu, 22 Sep 2005 14:20:04 -0500 Subject: [PATCH] [PATCH] powerpc: merge atomic.h, memory.h powerpc: Merge atomic.h and memory.h into powerpc Merged atomic.h into include/powerpc. Moved asm-style HMT_ defines from memory.h into ppc_asm.h, where there were already HMT_defines; moved c-style HMT_ defines to processor.h. Renamed memory.h to synch.h to better reflect its contents. Signed-off-by: Kumar Gala Signed-off-by: Becky Bruce Signed-off-by: Jon Loeliger Signed-off-by: Paul Mackerras --- include/{asm-ppc => asm-powerpc}/atomic.h | 45 +++-- include/asm-powerpc/ppc_asm.h | 3 + include/asm-powerpc/synch.h | 51 ++++++ include/asm-ppc/io.h | 11 +- include/asm-ppc64/atomic.h | 197 ---------------------- include/asm-ppc64/bitops.h | 2 +- include/asm-ppc64/futex.h | 2 +- include/asm-ppc64/io.h | 2 +- include/asm-ppc64/memory.h | 61 ------- include/asm-ppc64/processor.h | 8 + include/asm-ppc64/system.h | 4 +- 11 files changed, 88 insertions(+), 298 deletions(-) rename include/{asm-ppc => asm-powerpc}/atomic.h (85%) create mode 100644 include/asm-powerpc/synch.h delete mode 100644 include/asm-ppc64/atomic.h delete mode 100644 include/asm-ppc64/memory.h diff --git a/include/asm-ppc/atomic.h b/include/asm-powerpc/atomic.h similarity index 85% rename from include/asm-ppc/atomic.h rename to include/asm-powerpc/atomic.h index eeafd505836e..ed4b345ed75d 100644 --- a/include/asm-ppc/atomic.h +++ b/include/asm-powerpc/atomic.h @@ -1,29 +1,20 @@ +#ifndef _ASM_POWERPC_ATOMIC_H_ +#define _ASM_POWERPC_ATOMIC_H_ + /* * PowerPC atomic operations */ -#ifndef _ASM_PPC_ATOMIC_H_ -#define _ASM_PPC_ATOMIC_H_ - typedef struct { volatile int counter; } atomic_t; #ifdef __KERNEL__ +#include -#define ATOMIC_INIT(i) { (i) } +#define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) -extern void atomic_clear_mask(unsigned long mask, unsigned long *addr); - -#ifdef CONFIG_SMP -#define SMP_SYNC "sync" -#define SMP_ISYNC "\n\tisync" -#else -#define SMP_SYNC "" -#define SMP_ISYNC -#endif - /* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx. * The old ATOMIC_SYNC_FIX covered some but not all of this. */ @@ -53,12 +44,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v) int t; __asm__ __volatile__( + EIEIO_ON_SMP "1: lwarx %0,0,%2 # atomic_add_return\n\ add %0,%1,%0\n" PPC405_ERR77(0,%2) " stwcx. %0,0,%2 \n\ bne- 1b" - SMP_ISYNC + ISYNC_ON_SMP : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); @@ -88,12 +80,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v) int t; __asm__ __volatile__( + EIEIO_ON_SMP "1: lwarx %0,0,%2 # atomic_sub_return\n\ subf %0,%1,%0\n" PPC405_ERR77(0,%2) " stwcx. %0,0,%2 \n\ bne- 1b" - SMP_ISYNC + ISYNC_ON_SMP : "=&r" (t) : "r" (a), "r" (&v->counter) : "cc", "memory"); @@ -121,12 +114,13 @@ static __inline__ int atomic_inc_return(atomic_t *v) int t; __asm__ __volatile__( + EIEIO_ON_SMP "1: lwarx %0,0,%1 # atomic_inc_return\n\ addic %0,%0,1\n" PPC405_ERR77(0,%1) " stwcx. %0,0,%1 \n\ bne- 1b" - SMP_ISYNC + ISYNC_ON_SMP : "=&r" (t) : "r" (&v->counter) : "cc", "memory"); @@ -164,12 +158,13 @@ static __inline__ int atomic_dec_return(atomic_t *v) int t; __asm__ __volatile__( + EIEIO_ON_SMP "1: lwarx %0,0,%1 # atomic_dec_return\n\ addic %0,%0,-1\n" PPC405_ERR77(0,%1) " stwcx. %0,0,%1\n\ bne- 1b" - SMP_ISYNC + ISYNC_ON_SMP : "=&r" (t) : "r" (&v->counter) : "cc", "memory"); @@ -189,13 +184,14 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) int t; __asm__ __volatile__( + EIEIO_ON_SMP "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ addic. %0,%0,-1\n\ blt- 2f\n" PPC405_ERR77(0,%1) " stwcx. %0,0,%1\n\ bne- 1b" - SMP_ISYNC + ISYNC_ON_SMP "\n\ 2:" : "=&r" (t) : "r" (&v->counter) @@ -204,11 +200,10 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) return t; } -#define __MB __asm__ __volatile__ (SMP_SYNC : : : "memory") -#define smp_mb__before_atomic_dec() __MB -#define smp_mb__after_atomic_dec() __MB -#define smp_mb__before_atomic_inc() __MB -#define smp_mb__after_atomic_inc() __MB +#define smp_mb__before_atomic_dec() smp_mb() +#define smp_mb__after_atomic_dec() smp_mb() +#define smp_mb__before_atomic_inc() smp_mb() +#define smp_mb__after_atomic_inc() smp_mb() #endif /* __KERNEL__ */ -#endif /* _ASM_PPC_ATOMIC_H_ */ +#endif /* _ASM_POWERPC_ATOMIC_H_ */ diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h index 553035cda00e..4efa71878fa9 100644 --- a/include/asm-powerpc/ppc_asm.h +++ b/include/asm-powerpc/ppc_asm.h @@ -75,8 +75,11 @@ #define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base) /* Macros to adjust thread priority for Iseries hardware multithreading */ +#define HMT_VERY_LOW or 31,31,31 # very low priority\n" #define HMT_LOW or 1,1,1 +#define HMT_MEDIUM_LOW or 6,6,6 # medium low priority\n" #define HMT_MEDIUM or 2,2,2 +#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority\n" #define HMT_HIGH or 3,3,3 /* handle instructions that older assemblers may not know */ diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h new file mode 100644 index 000000000000..4660c0394a77 --- /dev/null +++ b/include/asm-powerpc/synch.h @@ -0,0 +1,51 @@ +#ifndef _ASM_POWERPC_SYNCH_H +#define _ASM_POWERPC_SYNCH_H + +#include + +#ifdef __powerpc64__ +#define __SUBARCH_HAS_LWSYNC +#endif + +#ifdef __SUBARCH_HAS_LWSYNC +# define LWSYNC lwsync +#else +# define LWSYNC sync +#endif + + +/* + * Arguably the bitops and *xchg operations don't imply any memory barrier + * or SMP ordering, but in fact a lot of drivers expect them to imply + * both, since they do on x86 cpus. + */ +#ifdef CONFIG_SMP +#define EIEIO_ON_SMP "eieio\n" +#define ISYNC_ON_SMP "\n\tisync" +#define SYNC_ON_SMP __stringify(LWSYNC) "\n" +#else +#define EIEIO_ON_SMP +#define ISYNC_ON_SMP +#define SYNC_ON_SMP +#endif + +static inline void eieio(void) +{ + __asm__ __volatile__ ("eieio" : : : "memory"); +} + +static inline void isync(void) +{ + __asm__ __volatile__ ("isync" : : : "memory"); +} + +#ifdef CONFIG_SMP +#define eieio_on_smp() eieio() +#define isync_on_smp() isync() +#else +#define eieio_on_smp() __asm__ __volatile__("": : :"memory") +#define isync_on_smp() __asm__ __volatile__("": : :"memory") +#endif + +#endif /* _ASM_POWERPC_SYNCH_H */ + diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h index 7eb7cf6360bd..39caf067a31b 100644 --- a/include/asm-ppc/io.h +++ b/include/asm-ppc/io.h @@ -8,6 +8,7 @@ #include #include +#include #include #define SIO_CONFIG_RA 0x398 @@ -440,16 +441,6 @@ extern inline void * phys_to_virt(unsigned long address) #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET) -/* - * Enforce In-order Execution of I/O: - * Acts as a barrier to ensure all previous I/O accesses have - * completed before any further ones are issued. - */ -extern inline void eieio(void) -{ - __asm__ __volatile__ ("eieio" : : : "memory"); -} - /* Enforce in-order execution of data I/O. * No distinction between read/write on PPC; use eieio for all three. */ diff --git a/include/asm-ppc64/atomic.h b/include/asm-ppc64/atomic.h deleted file mode 100644 index 0e5f25e83bc0..000000000000 --- a/include/asm-ppc64/atomic.h +++ /dev/null @@ -1,197 +0,0 @@ -/* - * PowerPC64 atomic operations - * - * Copyright (C) 2001 Paul Mackerras , IBM - * Copyright (C) 2001 Anton Blanchard , IBM - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _ASM_PPC64_ATOMIC_H_ -#define _ASM_PPC64_ATOMIC_H_ - -#include - -typedef struct { volatile int counter; } atomic_t; - -#define ATOMIC_INIT(i) { (i) } - -#define atomic_read(v) ((v)->counter) -#define atomic_set(v,i) (((v)->counter) = (i)) - -static __inline__ void atomic_add(int a, atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%3 # atomic_add\n\ - add %0,%2,%0\n\ - stwcx. %0,0,%3\n\ - bne- 1b" - : "=&r" (t), "=m" (v->counter) - : "r" (a), "r" (&v->counter), "m" (v->counter) - : "cc"); -} - -static __inline__ int atomic_add_return(int a, atomic_t *v) -{ - int t; - - __asm__ __volatile__( - EIEIO_ON_SMP -"1: lwarx %0,0,%2 # atomic_add_return\n\ - add %0,%1,%0\n\ - stwcx. %0,0,%2\n\ - bne- 1b" - ISYNC_ON_SMP - : "=&r" (t) - : "r" (a), "r" (&v->counter) - : "cc", "memory"); - - return t; -} - -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) - -static __inline__ void atomic_sub(int a, atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%3 # atomic_sub\n\ - subf %0,%2,%0\n\ - stwcx. %0,0,%3\n\ - bne- 1b" - : "=&r" (t), "=m" (v->counter) - : "r" (a), "r" (&v->counter), "m" (v->counter) - : "cc"); -} - -static __inline__ int atomic_sub_return(int a, atomic_t *v) -{ - int t; - - __asm__ __volatile__( - EIEIO_ON_SMP -"1: lwarx %0,0,%2 # atomic_sub_return\n\ - subf %0,%1,%0\n\ - stwcx. %0,0,%2\n\ - bne- 1b" - ISYNC_ON_SMP - : "=&r" (t) - : "r" (a), "r" (&v->counter) - : "cc", "memory"); - - return t; -} - -static __inline__ void atomic_inc(atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%2 # atomic_inc\n\ - addic %0,%0,1\n\ - stwcx. %0,0,%2\n\ - bne- 1b" - : "=&r" (t), "=m" (v->counter) - : "r" (&v->counter), "m" (v->counter) - : "cc"); -} - -static __inline__ int atomic_inc_return(atomic_t *v) -{ - int t; - - __asm__ __volatile__( - EIEIO_ON_SMP -"1: lwarx %0,0,%1 # atomic_inc_return\n\ - addic %0,%0,1\n\ - stwcx. %0,0,%1\n\ - bne- 1b" - ISYNC_ON_SMP - : "=&r" (t) - : "r" (&v->counter) - : "cc", "memory"); - - return t; -} - -/* - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - -static __inline__ void atomic_dec(atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%2 # atomic_dec\n\ - addic %0,%0,-1\n\ - stwcx. %0,0,%2\n\ - bne- 1b" - : "=&r" (t), "=m" (v->counter) - : "r" (&v->counter), "m" (v->counter) - : "cc"); -} - -static __inline__ int atomic_dec_return(atomic_t *v) -{ - int t; - - __asm__ __volatile__( - EIEIO_ON_SMP -"1: lwarx %0,0,%1 # atomic_dec_return\n\ - addic %0,%0,-1\n\ - stwcx. %0,0,%1\n\ - bne- 1b" - ISYNC_ON_SMP - : "=&r" (t) - : "r" (&v->counter) - : "cc", "memory"); - - return t; -} - -#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) -#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) - -/* - * Atomically test *v and decrement if it is greater than 0. - * The function returns the old value of *v minus 1. - */ -static __inline__ int atomic_dec_if_positive(atomic_t *v) -{ - int t; - - __asm__ __volatile__( - EIEIO_ON_SMP -"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ - addic. %0,%0,-1\n\ - blt- 2f\n\ - stwcx. %0,0,%1\n\ - bne- 1b" - ISYNC_ON_SMP - "\n\ -2:" : "=&r" (t) - : "r" (&v->counter) - : "cc", "memory"); - - return t; -} - -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - -#endif /* _ASM_PPC64_ATOMIC_H_ */ diff --git a/include/asm-ppc64/bitops.h b/include/asm-ppc64/bitops.h index a0f831224f96..dbfa42ef4a99 100644 --- a/include/asm-ppc64/bitops.h +++ b/include/asm-ppc64/bitops.h @@ -42,7 +42,7 @@ #ifdef __KERNEL__ -#include +#include /* * clear_bit doesn't imply a memory barrier diff --git a/include/asm-ppc64/futex.h b/include/asm-ppc64/futex.h index cb2640b3a408..266b460de44e 100644 --- a/include/asm-ppc64/futex.h +++ b/include/asm-ppc64/futex.h @@ -5,7 +5,7 @@ #include #include -#include +#include #include #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ diff --git a/include/asm-ppc64/io.h b/include/asm-ppc64/io.h index 59c958aea4db..bd7c9532d77b 100644 --- a/include/asm-ppc64/io.h +++ b/include/asm-ppc64/io.h @@ -15,7 +15,7 @@ #ifdef CONFIG_PPC_ISERIES #include #endif -#include +#include #include #include diff --git a/include/asm-ppc64/memory.h b/include/asm-ppc64/memory.h deleted file mode 100644 index af53ffb55726..000000000000 --- a/include/asm-ppc64/memory.h +++ /dev/null @@ -1,61 +0,0 @@ -#ifndef _ASM_PPC64_MEMORY_H_ -#define _ASM_PPC64_MEMORY_H_ - -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include - -/* - * Arguably the bitops and *xchg operations don't imply any memory barrier - * or SMP ordering, but in fact a lot of drivers expect them to imply - * both, since they do on x86 cpus. - */ -#ifdef CONFIG_SMP -#define EIEIO_ON_SMP "eieio\n" -#define ISYNC_ON_SMP "\n\tisync" -#define SYNC_ON_SMP "lwsync\n\t" -#else -#define EIEIO_ON_SMP -#define ISYNC_ON_SMP -#define SYNC_ON_SMP -#endif - -static inline void eieio(void) -{ - __asm__ __volatile__ ("eieio" : : : "memory"); -} - -static inline void isync(void) -{ - __asm__ __volatile__ ("isync" : : : "memory"); -} - -#ifdef CONFIG_SMP -#define eieio_on_smp() eieio() -#define isync_on_smp() isync() -#else -#define eieio_on_smp() __asm__ __volatile__("": : :"memory") -#define isync_on_smp() __asm__ __volatile__("": : :"memory") -#endif - -/* Macros for adjusting thread priority (hardware multi-threading) */ -#define HMT_very_low() asm volatile("or 31,31,31 # very low priority") -#define HMT_low() asm volatile("or 1,1,1 # low priority") -#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority") -#define HMT_medium() asm volatile("or 2,2,2 # medium priority") -#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority") -#define HMT_high() asm volatile("or 3,3,3 # high priority") - -#define HMT_VERY_LOW "\tor 31,31,31 # very low priority\n" -#define HMT_LOW "\tor 1,1,1 # low priority\n" -#define HMT_MEDIUM_LOW "\tor 6,6,6 # medium low priority\n" -#define HMT_MEDIUM "\tor 2,2,2 # medium priority\n" -#define HMT_MEDIUM_HIGH "\tor 5,5,5 # medium high priority\n" -#define HMT_HIGH "\tor 3,3,3 # high priority\n" - -#endif diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h index 4146189006e3..e5fc18531ec1 100644 --- a/include/asm-ppc64/processor.h +++ b/include/asm-ppc64/processor.h @@ -368,6 +368,14 @@ GLUE(.,name): #define mfasr() ({unsigned long rval; \ asm volatile("mfasr %0" : "=r" (rval)); rval;}) +/* Macros for adjusting thread priority (hardware multi-threading) */ +#define HMT_very_low() asm volatile("or 31,31,31 # very low priority") +#define HMT_low() asm volatile("or 1,1,1 # low priority") +#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority") +#define HMT_medium() asm volatile("or 2,2,2 # medium priority") +#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority") +#define HMT_high() asm volatile("or 3,3,3 # high priority") + static inline void set_tb(unsigned int upper, unsigned int lower) { mttbl(0); diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h index 375015c62f20..1fbdc9f0590c 100644 --- a/include/asm-ppc64/system.h +++ b/include/asm-ppc64/system.h @@ -13,7 +13,7 @@ #include #include #include -#include +#include /* * Memory barrier. @@ -48,7 +48,7 @@ #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() -#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") +#define smp_wmb() eieio() #define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() __asm__ __volatile__("": : :"memory") -- 2.20.1