Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Generic waiting primitives. | |
3 | * | |
4 | * (C) 2004 William Irwin, Oracle | |
5 | */ | |
1da177e4 LT |
6 | #include <linux/init.h> |
7 | #include <linux/module.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/wait.h> | |
11 | #include <linux/hash.h> | |
12 | ||
13 | void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | |
14 | { | |
15 | unsigned long flags; | |
16 | ||
17 | wait->flags &= ~WQ_FLAG_EXCLUSIVE; | |
18 | spin_lock_irqsave(&q->lock, flags); | |
19 | __add_wait_queue(q, wait); | |
20 | spin_unlock_irqrestore(&q->lock, flags); | |
21 | } | |
22 | EXPORT_SYMBOL(add_wait_queue); | |
23 | ||
24 | void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) | |
25 | { | |
26 | unsigned long flags; | |
27 | ||
28 | wait->flags |= WQ_FLAG_EXCLUSIVE; | |
29 | spin_lock_irqsave(&q->lock, flags); | |
30 | __add_wait_queue_tail(q, wait); | |
31 | spin_unlock_irqrestore(&q->lock, flags); | |
32 | } | |
33 | EXPORT_SYMBOL(add_wait_queue_exclusive); | |
34 | ||
35 | void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | |
36 | { | |
37 | unsigned long flags; | |
38 | ||
39 | spin_lock_irqsave(&q->lock, flags); | |
40 | __remove_wait_queue(q, wait); | |
41 | spin_unlock_irqrestore(&q->lock, flags); | |
42 | } | |
43 | EXPORT_SYMBOL(remove_wait_queue); | |
44 | ||
45 | ||
46 | /* | |
47 | * Note: we use "set_current_state()" _after_ the wait-queue add, | |
48 | * because we need a memory barrier there on SMP, so that any | |
49 | * wake-function that tests for the wait-queue being active | |
50 | * will be guaranteed to see waitqueue addition _or_ subsequent | |
51 | * tests in this thread will see the wakeup having taken place. | |
52 | * | |
53 | * The spin_unlock() itself is semi-permeable and only protects | |
54 | * one way (it only protects stuff inside the critical region and | |
55 | * stops them from bleeding out - it would still allow subsequent | |
56 | * loads to move into the the critical region). | |
57 | */ | |
58 | void fastcall | |
59 | prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) | |
60 | { | |
61 | unsigned long flags; | |
62 | ||
63 | wait->flags &= ~WQ_FLAG_EXCLUSIVE; | |
64 | spin_lock_irqsave(&q->lock, flags); | |
65 | if (list_empty(&wait->task_list)) | |
66 | __add_wait_queue(q, wait); | |
67 | /* | |
68 | * don't alter the task state if this is just going to | |
69 | * queue an async wait queue callback | |
70 | */ | |
71 | if (is_sync_wait(wait)) | |
72 | set_current_state(state); | |
73 | spin_unlock_irqrestore(&q->lock, flags); | |
74 | } | |
75 | EXPORT_SYMBOL(prepare_to_wait); | |
76 | ||
77 | void fastcall | |
78 | prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) | |
79 | { | |
80 | unsigned long flags; | |
81 | ||
82 | wait->flags |= WQ_FLAG_EXCLUSIVE; | |
83 | spin_lock_irqsave(&q->lock, flags); | |
84 | if (list_empty(&wait->task_list)) | |
85 | __add_wait_queue_tail(q, wait); | |
86 | /* | |
87 | * don't alter the task state if this is just going to | |
88 | * queue an async wait queue callback | |
89 | */ | |
90 | if (is_sync_wait(wait)) | |
91 | set_current_state(state); | |
92 | spin_unlock_irqrestore(&q->lock, flags); | |
93 | } | |
94 | EXPORT_SYMBOL(prepare_to_wait_exclusive); | |
95 | ||
96 | void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait) | |
97 | { | |
98 | unsigned long flags; | |
99 | ||
100 | __set_current_state(TASK_RUNNING); | |
101 | /* | |
102 | * We can check for list emptiness outside the lock | |
103 | * IFF: | |
104 | * - we use the "careful" check that verifies both | |
105 | * the next and prev pointers, so that there cannot | |
106 | * be any half-pending updates in progress on other | |
107 | * CPU's that we haven't seen yet (and that might | |
108 | * still change the stack area. | |
109 | * and | |
110 | * - all other users take the lock (ie we can only | |
111 | * have _one_ other CPU that looks at or modifies | |
112 | * the list). | |
113 | */ | |
114 | if (!list_empty_careful(&wait->task_list)) { | |
115 | spin_lock_irqsave(&q->lock, flags); | |
116 | list_del_init(&wait->task_list); | |
117 | spin_unlock_irqrestore(&q->lock, flags); | |
118 | } | |
119 | } | |
120 | EXPORT_SYMBOL(finish_wait); | |
121 | ||
122 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) | |
123 | { | |
124 | int ret = default_wake_function(wait, mode, sync, key); | |
125 | ||
126 | if (ret) | |
127 | list_del_init(&wait->task_list); | |
128 | return ret; | |
129 | } | |
130 | EXPORT_SYMBOL(autoremove_wake_function); | |
131 | ||
132 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) | |
133 | { | |
134 | struct wait_bit_key *key = arg; | |
135 | struct wait_bit_queue *wait_bit | |
136 | = container_of(wait, struct wait_bit_queue, wait); | |
137 | ||
138 | if (wait_bit->key.flags != key->flags || | |
139 | wait_bit->key.bit_nr != key->bit_nr || | |
140 | test_bit(key->bit_nr, key->flags)) | |
141 | return 0; | |
142 | else | |
143 | return autoremove_wake_function(wait, mode, sync, key); | |
144 | } | |
145 | EXPORT_SYMBOL(wake_bit_function); | |
146 | ||
147 | /* | |
148 | * To allow interruptible waiting and asynchronous (i.e. nonblocking) | |
149 | * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are | |
150 | * permitted return codes. Nonzero return codes halt waiting and return. | |
151 | */ | |
152 | int __sched fastcall | |
153 | __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, | |
154 | int (*action)(void *), unsigned mode) | |
155 | { | |
156 | int ret = 0; | |
157 | ||
158 | do { | |
159 | prepare_to_wait(wq, &q->wait, mode); | |
160 | if (test_bit(q->key.bit_nr, q->key.flags)) | |
161 | ret = (*action)(q->key.flags); | |
162 | } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); | |
163 | finish_wait(wq, &q->wait); | |
164 | return ret; | |
165 | } | |
166 | EXPORT_SYMBOL(__wait_on_bit); | |
167 | ||
168 | int __sched fastcall out_of_line_wait_on_bit(void *word, int bit, | |
169 | int (*action)(void *), unsigned mode) | |
170 | { | |
171 | wait_queue_head_t *wq = bit_waitqueue(word, bit); | |
172 | DEFINE_WAIT_BIT(wait, word, bit); | |
173 | ||
174 | return __wait_on_bit(wq, &wait, action, mode); | |
175 | } | |
176 | EXPORT_SYMBOL(out_of_line_wait_on_bit); | |
177 | ||
178 | int __sched fastcall | |
179 | __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, | |
180 | int (*action)(void *), unsigned mode) | |
181 | { | |
182 | int ret = 0; | |
183 | ||
184 | do { | |
185 | prepare_to_wait_exclusive(wq, &q->wait, mode); | |
186 | if (test_bit(q->key.bit_nr, q->key.flags)) { | |
187 | if ((ret = (*action)(q->key.flags))) | |
188 | break; | |
189 | } | |
190 | } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); | |
191 | finish_wait(wq, &q->wait); | |
192 | return ret; | |
193 | } | |
194 | EXPORT_SYMBOL(__wait_on_bit_lock); | |
195 | ||
196 | int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit, | |
197 | int (*action)(void *), unsigned mode) | |
198 | { | |
199 | wait_queue_head_t *wq = bit_waitqueue(word, bit); | |
200 | DEFINE_WAIT_BIT(wait, word, bit); | |
201 | ||
202 | return __wait_on_bit_lock(wq, &wait, action, mode); | |
203 | } | |
204 | EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); | |
205 | ||
206 | void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) | |
207 | { | |
208 | struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); | |
209 | if (waitqueue_active(wq)) | |
210 | __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key); | |
211 | } | |
212 | EXPORT_SYMBOL(__wake_up_bit); | |
213 | ||
214 | /** | |
215 | * wake_up_bit - wake up a waiter on a bit | |
216 | * @word: the word being waited on, a kernel virtual address | |
217 | * @bit: the bit of the word being waited on | |
218 | * | |
219 | * There is a standard hashed waitqueue table for generic use. This | |
220 | * is the part of the hashtable's accessor API that wakes up waiters | |
221 | * on a bit. For instance, if one were to have waiters on a bitflag, | |
222 | * one would call wake_up_bit() after clearing the bit. | |
223 | * | |
224 | * In order for this to function properly, as it uses waitqueue_active() | |
225 | * internally, some kind of memory barrier must be done prior to calling | |
226 | * this. Typically, this will be smp_mb__after_clear_bit(), but in some | |
227 | * cases where bitflags are manipulated non-atomically under a lock, one | |
228 | * may need to use a less regular barrier, such fs/inode.c's smp_mb(), | |
229 | * because spin_unlock() does not guarantee a memory barrier. | |
230 | */ | |
231 | void fastcall wake_up_bit(void *word, int bit) | |
232 | { | |
233 | __wake_up_bit(bit_waitqueue(word, bit), word, bit); | |
234 | } | |
235 | EXPORT_SYMBOL(wake_up_bit); | |
236 | ||
237 | fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit) | |
238 | { | |
239 | const int shift = BITS_PER_LONG == 32 ? 5 : 6; | |
240 | const struct zone *zone = page_zone(virt_to_page(word)); | |
241 | unsigned long val = (unsigned long)word << shift | bit; | |
242 | ||
243 | return &zone->wait_table[hash_long(val, zone->wait_table_bits)]; | |
244 | } | |
245 | EXPORT_SYMBOL(bit_waitqueue); |