Commit | Line | Data |
---|---|---|
82524746 FBH |
1 | #ifndef _LINUX_RCULIST_H |
2 | #define _LINUX_RCULIST_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | ||
6 | /* | |
7 | * RCU-protected list version | |
8 | */ | |
9 | #include <linux/list.h> | |
10 | ||
11 | /* | |
12 | * Insert a new entry between two known consecutive entries. | |
13 | * | |
14 | * This is only for internal list manipulation where we know | |
15 | * the prev/next entries already! | |
16 | */ | |
17 | static inline void __list_add_rcu(struct list_head *new, | |
18 | struct list_head *prev, struct list_head *next) | |
19 | { | |
20 | new->next = next; | |
21 | new->prev = prev; | |
22 | smp_wmb(); | |
23 | next->prev = new; | |
24 | prev->next = new; | |
25 | } | |
26 | ||
27 | /** | |
28 | * list_add_rcu - add a new entry to rcu-protected list | |
29 | * @new: new entry to be added | |
30 | * @head: list head to add it after | |
31 | * | |
32 | * Insert a new entry after the specified head. | |
33 | * This is good for implementing stacks. | |
34 | * | |
35 | * The caller must take whatever precautions are necessary | |
36 | * (such as holding appropriate locks) to avoid racing | |
37 | * with another list-mutation primitive, such as list_add_rcu() | |
38 | * or list_del_rcu(), running on this same list. | |
39 | * However, it is perfectly legal to run concurrently with | |
40 | * the _rcu list-traversal primitives, such as | |
41 | * list_for_each_entry_rcu(). | |
42 | */ | |
43 | static inline void list_add_rcu(struct list_head *new, struct list_head *head) | |
44 | { | |
45 | __list_add_rcu(new, head, head->next); | |
46 | } | |
47 | ||
48 | /** | |
49 | * list_add_tail_rcu - add a new entry to rcu-protected list | |
50 | * @new: new entry to be added | |
51 | * @head: list head to add it before | |
52 | * | |
53 | * Insert a new entry before the specified head. | |
54 | * This is useful for implementing queues. | |
55 | * | |
56 | * The caller must take whatever precautions are necessary | |
57 | * (such as holding appropriate locks) to avoid racing | |
58 | * with another list-mutation primitive, such as list_add_tail_rcu() | |
59 | * or list_del_rcu(), running on this same list. | |
60 | * However, it is perfectly legal to run concurrently with | |
61 | * the _rcu list-traversal primitives, such as | |
62 | * list_for_each_entry_rcu(). | |
63 | */ | |
64 | static inline void list_add_tail_rcu(struct list_head *new, | |
65 | struct list_head *head) | |
66 | { | |
67 | __list_add_rcu(new, head->prev, head); | |
68 | } | |
69 | ||
70 | /** | |
71 | * list_del_rcu - deletes entry from list without re-initialization | |
72 | * @entry: the element to delete from the list. | |
73 | * | |
74 | * Note: list_empty() on entry does not return true after this, | |
75 | * the entry is in an undefined state. It is useful for RCU based | |
76 | * lockfree traversal. | |
77 | * | |
78 | * In particular, it means that we can not poison the forward | |
79 | * pointers that may still be used for walking the list. | |
80 | * | |
81 | * The caller must take whatever precautions are necessary | |
82 | * (such as holding appropriate locks) to avoid racing | |
83 | * with another list-mutation primitive, such as list_del_rcu() | |
84 | * or list_add_rcu(), running on this same list. | |
85 | * However, it is perfectly legal to run concurrently with | |
86 | * the _rcu list-traversal primitives, such as | |
87 | * list_for_each_entry_rcu(). | |
88 | * | |
89 | * Note that the caller is not permitted to immediately free | |
90 | * the newly deleted entry. Instead, either synchronize_rcu() | |
91 | * or call_rcu() must be used to defer freeing until an RCU | |
92 | * grace period has elapsed. | |
93 | */ | |
94 | static inline void list_del_rcu(struct list_head *entry) | |
95 | { | |
96 | __list_del(entry->prev, entry->next); | |
97 | entry->prev = LIST_POISON2; | |
98 | } | |
99 | ||
100 | /** | |
101 | * list_replace_rcu - replace old entry by new one | |
102 | * @old : the element to be replaced | |
103 | * @new : the new element to insert | |
104 | * | |
105 | * The @old entry will be replaced with the @new entry atomically. | |
106 | * Note: @old should not be empty. | |
107 | */ | |
108 | static inline void list_replace_rcu(struct list_head *old, | |
109 | struct list_head *new) | |
110 | { | |
111 | new->next = old->next; | |
112 | new->prev = old->prev; | |
113 | smp_wmb(); | |
114 | new->next->prev = new; | |
115 | new->prev->next = new; | |
116 | old->prev = LIST_POISON2; | |
117 | } | |
118 | ||
119 | /** | |
120 | * list_splice_init_rcu - splice an RCU-protected list into an existing list. | |
121 | * @list: the RCU-protected list to splice | |
122 | * @head: the place in the list to splice the first list into | |
123 | * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... | |
124 | * | |
125 | * @head can be RCU-read traversed concurrently with this function. | |
126 | * | |
127 | * Note that this function blocks. | |
128 | * | |
129 | * Important note: the caller must take whatever action is necessary to | |
130 | * prevent any other updates to @head. In principle, it is possible | |
131 | * to modify the list as soon as sync() begins execution. | |
132 | * If this sort of thing becomes necessary, an alternative version | |
133 | * based on call_rcu() could be created. But only if -really- | |
134 | * needed -- there is no shortage of RCU API members. | |
135 | */ | |
136 | static inline void list_splice_init_rcu(struct list_head *list, | |
137 | struct list_head *head, | |
138 | void (*sync)(void)) | |
139 | { | |
140 | struct list_head *first = list->next; | |
141 | struct list_head *last = list->prev; | |
142 | struct list_head *at = head->next; | |
143 | ||
144 | if (list_empty(head)) | |
145 | return; | |
146 | ||
147 | /* "first" and "last" tracking list, so initialize it. */ | |
148 | ||
149 | INIT_LIST_HEAD(list); | |
150 | ||
151 | /* | |
152 | * At this point, the list body still points to the source list. | |
153 | * Wait for any readers to finish using the list before splicing | |
154 | * the list body into the new list. Any new readers will see | |
155 | * an empty list. | |
156 | */ | |
157 | ||
158 | sync(); | |
159 | ||
160 | /* | |
161 | * Readers are finished with the source list, so perform splice. | |
162 | * The order is important if the new list is global and accessible | |
163 | * to concurrent RCU readers. Note that RCU readers are not | |
164 | * permitted to traverse the prev pointers without excluding | |
165 | * this function. | |
166 | */ | |
167 | ||
168 | last->next = at; | |
169 | smp_wmb(); | |
170 | head->next = first; | |
171 | first->prev = head; | |
172 | at->prev = last; | |
173 | } | |
174 | ||
175 | /** | |
176 | * list_for_each_rcu - iterate over an rcu-protected list | |
177 | * @pos: the &struct list_head to use as a loop cursor. | |
178 | * @head: the head for your list. | |
179 | * | |
180 | * This list-traversal primitive may safely run concurrently with | |
181 | * the _rcu list-mutation primitives such as list_add_rcu() | |
182 | * as long as the traversal is guarded by rcu_read_lock(). | |
183 | */ | |
184 | #define list_for_each_rcu(pos, head) \ | |
185 | for (pos = (head)->next; \ | |
186 | prefetch(rcu_dereference(pos)->next), pos != (head); \ | |
187 | pos = pos->next) | |
188 | ||
189 | #define __list_for_each_rcu(pos, head) \ | |
190 | for (pos = (head)->next; \ | |
191 | rcu_dereference(pos) != (head); \ | |
192 | pos = pos->next) | |
193 | ||
194 | /** | |
195 | * list_for_each_safe_rcu | |
196 | * @pos: the &struct list_head to use as a loop cursor. | |
197 | * @n: another &struct list_head to use as temporary storage | |
198 | * @head: the head for your list. | |
199 | * | |
200 | * Iterate over an rcu-protected list, safe against removal of list entry. | |
201 | * | |
202 | * This list-traversal primitive may safely run concurrently with | |
203 | * the _rcu list-mutation primitives such as list_add_rcu() | |
204 | * as long as the traversal is guarded by rcu_read_lock(). | |
205 | */ | |
206 | #define list_for_each_safe_rcu(pos, n, head) \ | |
207 | for (pos = (head)->next; \ | |
208 | n = rcu_dereference(pos)->next, pos != (head); \ | |
209 | pos = n) | |
210 | ||
211 | /** | |
212 | * list_for_each_entry_rcu - iterate over rcu list of given type | |
213 | * @pos: the type * to use as a loop cursor. | |
214 | * @head: the head for your list. | |
215 | * @member: the name of the list_struct within the struct. | |
216 | * | |
217 | * This list-traversal primitive may safely run concurrently with | |
218 | * the _rcu list-mutation primitives such as list_add_rcu() | |
219 | * as long as the traversal is guarded by rcu_read_lock(). | |
220 | */ | |
221 | #define list_for_each_entry_rcu(pos, head, member) \ | |
222 | for (pos = list_entry((head)->next, typeof(*pos), member); \ | |
223 | prefetch(rcu_dereference(pos)->member.next), \ | |
224 | &pos->member != (head); \ | |
225 | pos = list_entry(pos->member.next, typeof(*pos), member)) | |
226 | ||
227 | ||
228 | /** | |
229 | * list_for_each_continue_rcu | |
230 | * @pos: the &struct list_head to use as a loop cursor. | |
231 | * @head: the head for your list. | |
232 | * | |
233 | * Iterate over an rcu-protected list, continuing after current point. | |
234 | * | |
235 | * This list-traversal primitive may safely run concurrently with | |
236 | * the _rcu list-mutation primitives such as list_add_rcu() | |
237 | * as long as the traversal is guarded by rcu_read_lock(). | |
238 | */ | |
239 | #define list_for_each_continue_rcu(pos, head) \ | |
240 | for ((pos) = (pos)->next; \ | |
241 | prefetch(rcu_dereference((pos))->next), (pos) != (head); \ | |
242 | (pos) = (pos)->next) | |
243 | ||
244 | /** | |
245 | * hlist_del_rcu - deletes entry from hash list without re-initialization | |
246 | * @n: the element to delete from the hash list. | |
247 | * | |
248 | * Note: list_unhashed() on entry does not return true after this, | |
249 | * the entry is in an undefined state. It is useful for RCU based | |
250 | * lockfree traversal. | |
251 | * | |
252 | * In particular, it means that we can not poison the forward | |
253 | * pointers that may still be used for walking the hash list. | |
254 | * | |
255 | * The caller must take whatever precautions are necessary | |
256 | * (such as holding appropriate locks) to avoid racing | |
257 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
258 | * or hlist_del_rcu(), running on this same list. | |
259 | * However, it is perfectly legal to run concurrently with | |
260 | * the _rcu list-traversal primitives, such as | |
261 | * hlist_for_each_entry(). | |
262 | */ | |
263 | static inline void hlist_del_rcu(struct hlist_node *n) | |
264 | { | |
265 | __hlist_del(n); | |
266 | n->pprev = LIST_POISON2; | |
267 | } | |
268 | ||
269 | /** | |
270 | * hlist_replace_rcu - replace old entry by new one | |
271 | * @old : the element to be replaced | |
272 | * @new : the new element to insert | |
273 | * | |
274 | * The @old entry will be replaced with the @new entry atomically. | |
275 | */ | |
276 | static inline void hlist_replace_rcu(struct hlist_node *old, | |
277 | struct hlist_node *new) | |
278 | { | |
279 | struct hlist_node *next = old->next; | |
280 | ||
281 | new->next = next; | |
282 | new->pprev = old->pprev; | |
283 | smp_wmb(); | |
284 | if (next) | |
285 | new->next->pprev = &new->next; | |
286 | *new->pprev = new; | |
287 | old->pprev = LIST_POISON2; | |
288 | } | |
289 | ||
290 | /** | |
291 | * hlist_add_head_rcu | |
292 | * @n: the element to add to the hash list. | |
293 | * @h: the list to add to. | |
294 | * | |
295 | * Description: | |
296 | * Adds the specified element to the specified hlist, | |
297 | * while permitting racing traversals. | |
298 | * | |
299 | * The caller must take whatever precautions are necessary | |
300 | * (such as holding appropriate locks) to avoid racing | |
301 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
302 | * or hlist_del_rcu(), running on this same list. | |
303 | * However, it is perfectly legal to run concurrently with | |
304 | * the _rcu list-traversal primitives, such as | |
305 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
306 | * problems on Alpha CPUs. Regardless of the type of CPU, the | |
307 | * list-traversal primitive must be guarded by rcu_read_lock(). | |
308 | */ | |
309 | static inline void hlist_add_head_rcu(struct hlist_node *n, | |
310 | struct hlist_head *h) | |
311 | { | |
312 | struct hlist_node *first = h->first; | |
313 | n->next = first; | |
314 | n->pprev = &h->first; | |
315 | smp_wmb(); | |
316 | if (first) | |
317 | first->pprev = &n->next; | |
318 | h->first = n; | |
319 | } | |
320 | ||
321 | /** | |
322 | * hlist_add_before_rcu | |
323 | * @n: the new element to add to the hash list. | |
324 | * @next: the existing element to add the new element before. | |
325 | * | |
326 | * Description: | |
327 | * Adds the specified element to the specified hlist | |
328 | * before the specified node while permitting racing traversals. | |
329 | * | |
330 | * The caller must take whatever precautions are necessary | |
331 | * (such as holding appropriate locks) to avoid racing | |
332 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
333 | * or hlist_del_rcu(), running on this same list. | |
334 | * However, it is perfectly legal to run concurrently with | |
335 | * the _rcu list-traversal primitives, such as | |
336 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
337 | * problems on Alpha CPUs. | |
338 | */ | |
339 | static inline void hlist_add_before_rcu(struct hlist_node *n, | |
340 | struct hlist_node *next) | |
341 | { | |
342 | n->pprev = next->pprev; | |
343 | n->next = next; | |
344 | smp_wmb(); | |
345 | next->pprev = &n->next; | |
346 | *(n->pprev) = n; | |
347 | } | |
348 | ||
349 | /** | |
350 | * hlist_add_after_rcu | |
351 | * @prev: the existing element to add the new element after. | |
352 | * @n: the new element to add to the hash list. | |
353 | * | |
354 | * Description: | |
355 | * Adds the specified element to the specified hlist | |
356 | * after the specified node while permitting racing traversals. | |
357 | * | |
358 | * The caller must take whatever precautions are necessary | |
359 | * (such as holding appropriate locks) to avoid racing | |
360 | * with another list-mutation primitive, such as hlist_add_head_rcu() | |
361 | * or hlist_del_rcu(), running on this same list. | |
362 | * However, it is perfectly legal to run concurrently with | |
363 | * the _rcu list-traversal primitives, such as | |
364 | * hlist_for_each_entry_rcu(), used to prevent memory-consistency | |
365 | * problems on Alpha CPUs. | |
366 | */ | |
367 | static inline void hlist_add_after_rcu(struct hlist_node *prev, | |
368 | struct hlist_node *n) | |
369 | { | |
370 | n->next = prev->next; | |
371 | n->pprev = &prev->next; | |
372 | smp_wmb(); | |
373 | prev->next = n; | |
374 | if (n->next) | |
375 | n->next->pprev = &n->next; | |
376 | } | |
377 | ||
378 | /** | |
379 | * hlist_for_each_entry_rcu - iterate over rcu list of given type | |
380 | * @tpos: the type * to use as a loop cursor. | |
381 | * @pos: the &struct hlist_node to use as a loop cursor. | |
382 | * @head: the head for your list. | |
383 | * @member: the name of the hlist_node within the struct. | |
384 | * | |
385 | * This list-traversal primitive may safely run concurrently with | |
386 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | |
387 | * as long as the traversal is guarded by rcu_read_lock(). | |
388 | */ | |
389 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ | |
390 | for (pos = (head)->first; \ | |
391 | rcu_dereference(pos) && ({ prefetch(pos->next); 1; }) && \ | |
392 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | |
393 | pos = pos->next) | |
394 | ||
395 | #endif /* __KERNEL__ */ | |
396 | #endif |