From: Mathieu Desnoyers Date: Thu, 7 Feb 2008 08:16:24 +0000 (-0800) Subject: Add cmpxchg_local to sparc, move __cmpxchg to system.h X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=405321d3ab2b41960e2874f2f74609daffbc7a4c;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git Add cmpxchg_local to sparc, move __cmpxchg to system.h Move cmpxchg and add cmpxchg_local to system.h. Use the new generic cmpxchg_local (disables interrupt). Signed-off-by: Mathieu Desnoyers Cc: William Lee Irwin III Cc: "David S. Miller" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h index 3328950dbfe6..5c944b5a8040 100644 --- a/include/asm-sparc/atomic.h +++ b/include/asm-sparc/atomic.h @@ -17,42 +17,6 @@ typedef struct { volatile int counter; } atomic_t; #ifdef __KERNEL__ -/* Emulate cmpxchg() the same way we emulate atomics, - * by hashing the object address and indexing into an array - * of spinlocks to get a bit of performance... - * - * See arch/sparc/lib/atomic32.c for implementation. - * - * Cribbed from - */ -#define __HAVE_ARCH_CMPXCHG 1 - -/* bug catcher for when unsupported size is used - won't link */ -extern void __cmpxchg_called_with_bad_pointer(void); -/* we only need to support cmpxchg of a u32 on sparc */ -extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); - -/* don't worry...optimizer will get rid of most of this */ -static inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) -{ - switch(size) { - case 4: - return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_); - default: - __cmpxchg_called_with_bad_pointer(); - break; - } - return old; -} - -#define cmpxchg(ptr,o,n) ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ -}) - #define ATOMIC_INIT(i) { (i) } extern int __atomic_add_return(int, atomic_t *); diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h index 2655d142b22d..45e47c159a6e 100644 --- a/include/asm-sparc/system.h +++ b/include/asm-sparc/system.h @@ -225,6 +225,54 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int return x; } +/* Emulate cmpxchg() the same way we emulate atomics, + * by hashing the object address and indexing into an array + * of spinlocks to get a bit of performance... + * + * See arch/sparc/lib/atomic32.c for implementation. + * + * Cribbed from + */ +#define __HAVE_ARCH_CMPXCHG 1 + +/* bug catcher for when unsupported size is used - won't link */ +extern void __cmpxchg_called_with_bad_pointer(void); +/* we only need to support cmpxchg of a u32 on sparc */ +extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); + +/* don't worry...optimizer will get rid of most of this */ +static inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_); + default: + __cmpxchg_called_with_bad_pointer(); + break; + } + return old; +} + +#define cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ +}) + +#include + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); #endif /* __KERNEL__ */