Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef __ASM_SPINLOCK_H |
3 | #define __ASM_SPINLOCK_H | |
4 | ||
1cab4201 REB |
5 | #include <asm/barrier.h> |
6 | #include <asm/ldcw.h> | |
fb1c8f93 IM |
7 | #include <asm/processor.h> |
8 | #include <asm/spinlock_types.h> | |
1da177e4 | 9 | |
0199c4e6 | 10 | static inline int arch_spin_is_locked(arch_spinlock_t *x) |
1da177e4 LT |
11 | { |
12 | volatile unsigned int *a = __ldcw_align(x); | |
13 | return *a == 0; | |
14 | } | |
15 | ||
0199c4e6 | 16 | #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) |
726328d9 | 17 | |
0199c4e6 | 18 | static inline void arch_spin_lock_flags(arch_spinlock_t *x, |
08dc2ca6 | 19 | unsigned long flags) |
1da177e4 LT |
20 | { |
21 | volatile unsigned int *a; | |
22 | ||
1da177e4 LT |
23 | a = __ldcw_align(x); |
24 | while (__ldcw(a) == 0) | |
08dc2ca6 JB |
25 | while (*a == 0) |
26 | if (flags & PSW_SM_I) { | |
27 | local_irq_enable(); | |
28 | cpu_relax(); | |
29 | local_irq_disable(); | |
30 | } else | |
31 | cpu_relax(); | |
1da177e4 LT |
32 | } |
33 | ||
0199c4e6 | 34 | static inline void arch_spin_unlock(arch_spinlock_t *x) |
1da177e4 LT |
35 | { |
36 | volatile unsigned int *a; | |
6541d98d | 37 | |
1da177e4 | 38 | a = __ldcw_align(x); |
1da177e4 | 39 | mb(); |
6541d98d | 40 | *a = 1; |
1da177e4 LT |
41 | } |
42 | ||
0199c4e6 | 43 | static inline int arch_spin_trylock(arch_spinlock_t *x) |
1da177e4 LT |
44 | { |
45 | volatile unsigned int *a; | |
46 | int ret; | |
47 | ||
1da177e4 LT |
48 | a = __ldcw_align(x); |
49 | ret = __ldcw(a) != 0; | |
1da177e4 LT |
50 | |
51 | return ret; | |
52 | } | |
1da177e4 LT |
53 | |
54 | /* | |
6e071852 | 55 | * Read-write spinlocks, allowing multiple readers but only one writer. |
65ee8f0a MW |
56 | * Linux rwlocks are unfair to writers; they can be starved for an indefinite |
57 | * time by readers. With care, they can also be taken in interrupt context. | |
58 | * | |
59 | * In the PA-RISC implementation, we have a spinlock and a counter. | |
60 | * Readers use the lock to serialise their access to the counter (which | |
61 | * records how many readers currently hold the lock). | |
62 | * Writers hold the spinlock, preventing any readers or other writers from | |
63 | * grabbing the rwlock. | |
1da177e4 | 64 | */ |
1da177e4 | 65 | |
65ee8f0a MW |
66 | /* Note that we have to ensure interrupts are disabled in case we're |
67 | * interrupted by some other code that wants to grab the same read lock */ | |
e5931943 | 68 | static __inline__ void arch_read_lock(arch_rwlock_t *rw) |
1da177e4 | 69 | { |
6e071852 MW |
70 | unsigned long flags; |
71 | local_irq_save(flags); | |
0199c4e6 | 72 | arch_spin_lock_flags(&rw->lock, flags); |
1da177e4 | 73 | rw->counter++; |
0199c4e6 | 74 | arch_spin_unlock(&rw->lock); |
6e071852 | 75 | local_irq_restore(flags); |
1da177e4 | 76 | } |
1da177e4 | 77 | |
65ee8f0a MW |
78 | /* Note that we have to ensure interrupts are disabled in case we're |
79 | * interrupted by some other code that wants to grab the same read lock */ | |
e5931943 | 80 | static __inline__ void arch_read_unlock(arch_rwlock_t *rw) |
1da177e4 | 81 | { |
6e071852 MW |
82 | unsigned long flags; |
83 | local_irq_save(flags); | |
0199c4e6 | 84 | arch_spin_lock_flags(&rw->lock, flags); |
1da177e4 | 85 | rw->counter--; |
0199c4e6 | 86 | arch_spin_unlock(&rw->lock); |
6e071852 | 87 | local_irq_restore(flags); |
1da177e4 LT |
88 | } |
89 | ||
65ee8f0a MW |
90 | /* Note that we have to ensure interrupts are disabled in case we're |
91 | * interrupted by some other code that wants to grab the same read lock */ | |
e5931943 | 92 | static __inline__ int arch_read_trylock(arch_rwlock_t *rw) |
6e071852 MW |
93 | { |
94 | unsigned long flags; | |
95 | retry: | |
96 | local_irq_save(flags); | |
0199c4e6 | 97 | if (arch_spin_trylock(&rw->lock)) { |
6e071852 | 98 | rw->counter++; |
0199c4e6 | 99 | arch_spin_unlock(&rw->lock); |
6e071852 MW |
100 | local_irq_restore(flags); |
101 | return 1; | |
102 | } | |
103 | ||
104 | local_irq_restore(flags); | |
105 | /* If write-locked, we fail to acquire the lock */ | |
106 | if (rw->counter < 0) | |
107 | return 0; | |
108 | ||
109 | /* Wait until we have a realistic chance at the lock */ | |
0199c4e6 | 110 | while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) |
6e071852 MW |
111 | cpu_relax(); |
112 | ||
113 | goto retry; | |
114 | } | |
1da177e4 | 115 | |
65ee8f0a MW |
116 | /* Note that we have to ensure interrupts are disabled in case we're |
117 | * interrupted by some other code that wants to read_trylock() this lock */ | |
e5931943 | 118 | static __inline__ void arch_write_lock(arch_rwlock_t *rw) |
1da177e4 | 119 | { |
6e071852 | 120 | unsigned long flags; |
1da177e4 | 121 | retry: |
6e071852 | 122 | local_irq_save(flags); |
0199c4e6 | 123 | arch_spin_lock_flags(&rw->lock, flags); |
1da177e4 | 124 | |
6e071852 | 125 | if (rw->counter != 0) { |
0199c4e6 | 126 | arch_spin_unlock(&rw->lock); |
6e071852 | 127 | local_irq_restore(flags); |
1da177e4 | 128 | |
fb1c8f93 IM |
129 | while (rw->counter != 0) |
130 | cpu_relax(); | |
1da177e4 LT |
131 | |
132 | goto retry; | |
133 | } | |
134 | ||
6e071852 MW |
135 | rw->counter = -1; /* mark as write-locked */ |
136 | mb(); | |
137 | local_irq_restore(flags); | |
1da177e4 | 138 | } |
1da177e4 | 139 | |
e5931943 | 140 | static __inline__ void arch_write_unlock(arch_rwlock_t *rw) |
1da177e4 LT |
141 | { |
142 | rw->counter = 0; | |
0199c4e6 | 143 | arch_spin_unlock(&rw->lock); |
1da177e4 LT |
144 | } |
145 | ||
65ee8f0a MW |
146 | /* Note that we have to ensure interrupts are disabled in case we're |
147 | * interrupted by some other code that wants to read_trylock() this lock */ | |
e5931943 | 148 | static __inline__ int arch_write_trylock(arch_rwlock_t *rw) |
1da177e4 | 149 | { |
6e071852 MW |
150 | unsigned long flags; |
151 | int result = 0; | |
152 | ||
153 | local_irq_save(flags); | |
0199c4e6 | 154 | if (arch_spin_trylock(&rw->lock)) { |
6e071852 MW |
155 | if (rw->counter == 0) { |
156 | rw->counter = -1; | |
157 | result = 1; | |
158 | } else { | |
159 | /* Read-locked. Oh well. */ | |
0199c4e6 | 160 | arch_spin_unlock(&rw->lock); |
6e071852 | 161 | } |
1da177e4 | 162 | } |
6e071852 | 163 | local_irq_restore(flags); |
1da177e4 | 164 | |
6e071852 | 165 | return result; |
1da177e4 | 166 | } |
1da177e4 | 167 | |
bc8846c5 KM |
168 | /* |
169 | * read_can_lock - would read_trylock() succeed? | |
170 | * @lock: the rwlock in question. | |
171 | */ | |
e5931943 | 172 | static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) |
1da177e4 | 173 | { |
bc8846c5 | 174 | return rw->counter >= 0; |
1da177e4 LT |
175 | } |
176 | ||
bc8846c5 KM |
177 | /* |
178 | * write_can_lock - would write_trylock() succeed? | |
179 | * @lock: the rwlock in question. | |
180 | */ | |
e5931943 | 181 | static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) |
1da177e4 | 182 | { |
bc8846c5 | 183 | return !rw->counter; |
1da177e4 LT |
184 | } |
185 | ||
e5931943 TG |
186 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
187 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
f5f7eac4 | 188 | |
1da177e4 | 189 | #endif /* __ASM_SPINLOCK_H */ |