Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh64-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / asm-generic / local.h
CommitLineData
1da177e4
LT
1#ifndef _ASM_GENERIC_LOCAL_H
2#define _ASM_GENERIC_LOCAL_H
3
1da177e4
LT
4#include <linux/percpu.h>
5#include <linux/hardirq.h>
f5f5370d 6#include <asm/atomic.h>
1da177e4
LT
7#include <asm/types.h>
8
2cf8d82d
AM
9/*
10 * A signed long type for operations which are atomic for a single CPU.
11 * Usually used in combination with per-cpu variables.
12 *
13 * This is the default implementation, which uses atomic_long_t. Which is
14 * rather pointless. The whole point behind local_t is that some processors
15 * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
16 * running on this CPU. local_t allows exploitation of such capabilities.
17 */
1da177e4 18
1da177e4
LT
19/* Implement in terms of atomics. */
20
21/* Don't use typedef: don't want them to be mixed with atomic_t's. */
22typedef struct
23{
f5f5370d 24 atomic_long_t a;
1da177e4
LT
25} local_t;
26
f5f5370d 27#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
1da177e4 28
2cf8d82d 29#define local_read(l) atomic_long_read(&(l)->a)
f5f5370d
KM
30#define local_set(l,i) atomic_long_set((&(l)->a),(i))
31#define local_inc(l) atomic_long_inc(&(l)->a)
32#define local_dec(l) atomic_long_dec(&(l)->a)
33#define local_add(i,l) atomic_long_add((i),(&(l)->a))
34#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
1da177e4 35
5e97b930
MD
36#define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
37#define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
38#define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
39#define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
40#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
41#define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
42#define local_inc_return(l) atomic_long_inc_return(&(l)->a)
43
44#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
45#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
46#define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u))
47#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
48
1da177e4
LT
49/* Non-atomic variants, ie. preemption disabled and won't be touched
50 * in interrupt, etc. Some archs can optimize this case well. */
51#define __local_inc(l) local_set((l), local_read(l) + 1)
52#define __local_dec(l) local_set((l), local_read(l) - 1)
53#define __local_add(i,l) local_set((l), local_read(l) + (i))
54#define __local_sub(i,l) local_set((l), local_read(l) - (i))
55
1da177e4
LT
56/* Use these for per-cpu local_t variables: on some archs they are
57 * much more efficient than these naive implementations. Note they take
58 * a variable (eg. mystruct.foo), not an address.
59 */
5e97b930
MD
60#define cpu_local_read(l) local_read(&__get_cpu_var(l))
61#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
62#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
63#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
64#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
65#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
1da177e4
LT
66
67/* Non-atomic increments, ie. preemption disabled and won't be touched
68 * in interrupt, etc. Some archs can optimize this case well.
69 */
5e97b930
MD
70#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
71#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
72#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
73#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
1da177e4
LT
74
75#endif /* _ASM_GENERIC_LOCAL_H */