Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH64_ATOMIC_H |
2 | #define __ASM_SH64_ATOMIC_H | |
3 | ||
4 | /* | |
5 | * This file is subject to the terms and conditions of the GNU General Public | |
6 | * License. See the file "COPYING" in the main directory of this archive | |
7 | * for more details. | |
8 | * | |
9 | * include/asm-sh64/atomic.h | |
10 | * | |
11 | * Copyright (C) 2000, 2001 Paolo Alberelli | |
12 | * Copyright (C) 2003 Paul Mundt | |
13 | * | |
14 | */ | |
15 | ||
16 | /* | |
17 | * Atomic operations that C can't guarantee us. Useful for | |
18 | * resource counting etc.. | |
19 | * | |
20 | */ | |
21 | ||
22 | typedef struct { volatile int counter; } atomic_t; | |
23 | ||
24 | #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) | |
25 | ||
26 | #define atomic_read(v) ((v)->counter) | |
27 | #define atomic_set(v,i) ((v)->counter = (i)) | |
28 | ||
29 | #include <asm/system.h> | |
30 | ||
31 | /* | |
32 | * To get proper branch prediction for the main line, we must branch | |
33 | * forward to code at the end of this object's .text section, then | |
34 | * branch back to restart the operation. | |
35 | */ | |
36 | ||
37 | static __inline__ void atomic_add(int i, atomic_t * v) | |
38 | { | |
39 | unsigned long flags; | |
40 | ||
41 | local_irq_save(flags); | |
42 | *(long *)v += i; | |
43 | local_irq_restore(flags); | |
44 | } | |
45 | ||
46 | static __inline__ void atomic_sub(int i, atomic_t *v) | |
47 | { | |
48 | unsigned long flags; | |
49 | ||
50 | local_irq_save(flags); | |
51 | *(long *)v -= i; | |
52 | local_irq_restore(flags); | |
53 | } | |
54 | ||
55 | static __inline__ int atomic_add_return(int i, atomic_t * v) | |
56 | { | |
57 | unsigned long temp, flags; | |
58 | ||
59 | local_irq_save(flags); | |
60 | temp = *(long *)v; | |
61 | temp += i; | |
62 | *(long *)v = temp; | |
63 | local_irq_restore(flags); | |
64 | ||
65 | return temp; | |
66 | } | |
67 | ||
68 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | |
69 | ||
70 | static __inline__ int atomic_sub_return(int i, atomic_t * v) | |
71 | { | |
72 | unsigned long temp, flags; | |
73 | ||
74 | local_irq_save(flags); | |
75 | temp = *(long *)v; | |
76 | temp -= i; | |
77 | *(long *)v = temp; | |
78 | local_irq_restore(flags); | |
79 | ||
80 | return temp; | |
81 | } | |
82 | ||
83 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) | |
84 | #define atomic_inc_return(v) atomic_add_return(1,(v)) | |
85 | ||
86 | /* | |
87 | * atomic_inc_and_test - increment and test | |
88 | * @v: pointer of type atomic_t | |
89 | * | |
90 | * Atomically increments @v by 1 | |
91 | * and returns true if the result is zero, or false for all | |
92 | * other cases. | |
93 | */ | |
94 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | |
95 | ||
96 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) | |
97 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | |
98 | ||
99 | #define atomic_inc(v) atomic_add(1,(v)) | |
100 | #define atomic_dec(v) atomic_sub(1,(v)) | |
101 | ||
102 | static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) | |
103 | { | |
104 | unsigned long flags; | |
105 | ||
106 | local_irq_save(flags); | |
107 | *(long *)v &= ~mask; | |
108 | local_irq_restore(flags); | |
109 | } | |
110 | ||
111 | static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) | |
112 | { | |
113 | unsigned long flags; | |
114 | ||
115 | local_irq_save(flags); | |
116 | *(long *)v |= mask; | |
117 | local_irq_restore(flags); | |
118 | } | |
119 | ||
120 | /* Atomic operations are already serializing on SH */ | |
121 | #define smp_mb__before_atomic_dec() barrier() | |
122 | #define smp_mb__after_atomic_dec() barrier() | |
123 | #define smp_mb__before_atomic_inc() barrier() | |
124 | #define smp_mb__after_atomic_inc() barrier() | |
125 | ||
126 | #endif /* __ASM_SH64_ATOMIC_H */ |