08a2fee40659667819d01bc95a184e059d0fd9b1
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / hwspinlock.h
1 /*
2 * Hardware spinlock public header
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18 #ifndef __LINUX_HWSPINLOCK_H
19 #define __LINUX_HWSPINLOCK_H
20
21 #include <linux/err.h>
22 #include <linux/sched.h>
23 #include <linux/device.h>
24
25 /* hwspinlock mode argument */
26 #define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
27 #define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
28
29 struct hwspinlock;
30 struct hwspinlock_device;
31 struct hwspinlock_ops;
32
33 /**
34 * struct hwspinlock_pdata - platform data for hwspinlock drivers
35 * @base_id: base id for this hwspinlock device
36 *
37 * hwspinlock devices provide system-wide hardware locks that are used
38 * by remote processors that have no other way to achieve synchronization.
39 *
40 * To achieve that, each physical lock must have a system-wide id number
41 * that is agreed upon, otherwise remote processors can't possibly assume
42 * they're using the same hardware lock.
43 *
44 * Usually boards have a single hwspinlock device, which provides several
45 * hwspinlocks, and in this case, they can be trivially numbered 0 to
46 * (num-of-locks - 1).
47 *
48 * In case boards have several hwspinlocks devices, a different base id
49 * should be used for each hwspinlock device (they can't all use 0 as
50 * a starting id!).
51 *
52 * This platform data structure should be used to provide the base id
53 * for each device (which is trivially 0 when only a single hwspinlock
54 * device exists). It can be shared between different platforms, hence
55 * its location.
56 */
57 struct hwspinlock_pdata {
58 int base_id;
59 };
60
61 #if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
62
63 int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
64 const struct hwspinlock_ops *ops, int base_id, int num_locks);
65 int hwspin_lock_unregister(struct hwspinlock_device *bank);
66 struct hwspinlock *hwspin_lock_request(void);
67 struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
68 int hwspin_lock_free(struct hwspinlock *hwlock);
69 int hwspin_lock_get_id(struct hwspinlock *hwlock);
70 int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
71 unsigned long *);
72 int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
73 void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
74
75 #else /* !CONFIG_HWSPINLOCK */
76
77 /*
78 * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
79 * enabled. We prefer to silently succeed in this case, and let the
80 * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
81 * required on a given setup, users will still work.
82 *
83 * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
84 * we _do_ want users to fail (no point in registering hwspinlock instances if
85 * the framework is not available).
86 *
87 * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
88 * users. Others, which care, can still check this with IS_ERR.
89 */
90 static inline struct hwspinlock *hwspin_lock_request(void)
91 {
92 return ERR_PTR(-ENODEV);
93 }
94
95 static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
96 {
97 return ERR_PTR(-ENODEV);
98 }
99
100 static inline int hwspin_lock_free(struct hwspinlock *hwlock)
101 {
102 return 0;
103 }
104
105 static inline
106 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
107 int mode, unsigned long *flags)
108 {
109 return 0;
110 }
111
112 static inline
113 int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
114 {
115 return 0;
116 }
117
118 static inline
119 void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
120 {
121 return 0;
122 }
123
124 static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
125 {
126 return 0;
127 }
128
129 #endif /* !CONFIG_HWSPINLOCK */
130
131 /**
132 * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
133 * @hwlock: an hwspinlock which we want to trylock
134 * @flags: a pointer to where the caller's interrupt state will be saved at
135 *
136 * This function attempts to lock the underlying hwspinlock, and will
137 * immediately fail if the hwspinlock is already locked.
138 *
139 * Upon a successful return from this function, preemption and local
140 * interrupts are disabled (previous interrupts state is saved at @flags),
141 * so the caller must not sleep, and is advised to release the hwspinlock
142 * as soon as possible.
143 *
144 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
145 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
146 */
147 static inline
148 int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
149 {
150 return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
151 }
152
153 /**
154 * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
155 * @hwlock: an hwspinlock which we want to trylock
156 *
157 * This function attempts to lock the underlying hwspinlock, and will
158 * immediately fail if the hwspinlock is already locked.
159 *
160 * Upon a successful return from this function, preemption and local
161 * interrupts are disabled, so the caller must not sleep, and is advised
162 * to release the hwspinlock as soon as possible.
163 *
164 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
165 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
166 */
167 static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
168 {
169 return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
170 }
171
172 /**
173 * hwspin_trylock() - attempt to lock a specific hwspinlock
174 * @hwlock: an hwspinlock which we want to trylock
175 *
176 * This function attempts to lock an hwspinlock, and will immediately fail
177 * if the hwspinlock is already taken.
178 *
179 * Upon a successful return from this function, preemption is disabled,
180 * so the caller must not sleep, and is advised to release the hwspinlock
181 * as soon as possible. This is required in order to minimize remote cores
182 * polling on the hardware interconnect.
183 *
184 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
185 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
186 */
187 static inline int hwspin_trylock(struct hwspinlock *hwlock)
188 {
189 return __hwspin_trylock(hwlock, 0, NULL);
190 }
191
192 /**
193 * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
194 * @hwlock: the hwspinlock to be locked
195 * @to: timeout value in msecs
196 * @flags: a pointer to where the caller's interrupt state will be saved at
197 *
198 * This function locks the underlying @hwlock. If the @hwlock
199 * is already taken, the function will busy loop waiting for it to
200 * be released, but give up when @timeout msecs have elapsed.
201 *
202 * Upon a successful return from this function, preemption and local interrupts
203 * are disabled (plus previous interrupt state is saved), so the caller must
204 * not sleep, and is advised to release the hwspinlock as soon as possible.
205 *
206 * Returns 0 when the @hwlock was successfully taken, and an appropriate
207 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
208 * busy after @timeout msecs). The function will never sleep.
209 */
210 static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
211 unsigned int to, unsigned long *flags)
212 {
213 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
214 }
215
216 /**
217 * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
218 * @hwlock: the hwspinlock to be locked
219 * @to: timeout value in msecs
220 *
221 * This function locks the underlying @hwlock. If the @hwlock
222 * is already taken, the function will busy loop waiting for it to
223 * be released, but give up when @timeout msecs have elapsed.
224 *
225 * Upon a successful return from this function, preemption and local interrupts
226 * are disabled so the caller must not sleep, and is advised to release the
227 * hwspinlock as soon as possible.
228 *
229 * Returns 0 when the @hwlock was successfully taken, and an appropriate
230 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
231 * busy after @timeout msecs). The function will never sleep.
232 */
233 static inline
234 int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
235 {
236 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
237 }
238
239 /**
240 * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
241 * @hwlock: the hwspinlock to be locked
242 * @to: timeout value in msecs
243 *
244 * This function locks the underlying @hwlock. If the @hwlock
245 * is already taken, the function will busy loop waiting for it to
246 * be released, but give up when @timeout msecs have elapsed.
247 *
248 * Upon a successful return from this function, preemption is disabled
249 * so the caller must not sleep, and is advised to release the hwspinlock
250 * as soon as possible.
251 * This is required in order to minimize remote cores polling on the
252 * hardware interconnect.
253 *
254 * Returns 0 when the @hwlock was successfully taken, and an appropriate
255 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
256 * busy after @timeout msecs). The function will never sleep.
257 */
258 static inline
259 int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
260 {
261 return __hwspin_lock_timeout(hwlock, to, 0, NULL);
262 }
263
264 /**
265 * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
266 * @hwlock: a previously-acquired hwspinlock which we want to unlock
267 * @flags: previous caller's interrupt state to restore
268 *
269 * This function will unlock a specific hwspinlock, enable preemption and
270 * restore the previous state of the local interrupts. It should be used
271 * to undo, e.g., hwspin_trylock_irqsave().
272 *
273 * @hwlock must be already locked before calling this function: it is a bug
274 * to call unlock on a @hwlock that is already unlocked.
275 */
276 static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
277 unsigned long *flags)
278 {
279 __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
280 }
281
282 /**
283 * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
284 * @hwlock: a previously-acquired hwspinlock which we want to unlock
285 *
286 * This function will unlock a specific hwspinlock, enable preemption and
287 * enable local interrupts. Should be used to undo hwspin_lock_irq().
288 *
289 * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
290 * calling this function: it is a bug to call unlock on a @hwlock that is
291 * already unlocked.
292 */
293 static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
294 {
295 __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
296 }
297
298 /**
299 * hwspin_unlock() - unlock hwspinlock
300 * @hwlock: a previously-acquired hwspinlock which we want to unlock
301 *
302 * This function will unlock a specific hwspinlock and enable preemption
303 * back.
304 *
305 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
306 * this function: it is a bug to call unlock on a @hwlock that is already
307 * unlocked.
308 */
309 static inline void hwspin_unlock(struct hwspinlock *hwlock)
310 {
311 __hwspin_unlock(hwlock, 0, NULL);
312 }
313
314 #endif /* __LINUX_HWSPINLOCK_H */