srcu: Simplify __srcu_read_unlock() via this_cpu_dec()
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / kernel / srcu.c
CommitLineData
621934ee
PM
1/*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
4e87b2d7 19 * Copyright (C) Fujitsu, 2012
621934ee
PM
20 *
21 * Author: Paul McKenney <paulmck@us.ibm.com>
4e87b2d7 22 * Lai Jiangshan <laijs@cn.fujitsu.com>
621934ee
PM
23 *
24 * For detailed explanation of Read-Copy Update mechanism see -
25 * Documentation/RCU/ *.txt
26 *
27 */
28
9984de1a 29#include <linux/export.h>
621934ee
PM
30#include <linux/mutex.h>
31#include <linux/percpu.h>
32#include <linux/preempt.h>
33#include <linux/rcupdate.h>
34#include <linux/sched.h>
621934ee 35#include <linux/smp.h>
46fdb093 36#include <linux/delay.h>
621934ee
PM
37#include <linux/srcu.h>
38
3705b88d
AM
39#include <trace/events/rcu.h>
40
41#include "rcu.h"
42
931ea9d1
LJ
43/*
44 * Initialize an rcu_batch structure to empty.
45 */
46static inline void rcu_batch_init(struct rcu_batch *b)
47{
48 b->head = NULL;
49 b->tail = &b->head;
50}
51
52/*
53 * Enqueue a callback onto the tail of the specified rcu_batch structure.
54 */
55static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head)
56{
57 *b->tail = head;
58 b->tail = &head->next;
59}
60
61/*
62 * Is the specified rcu_batch structure empty?
63 */
64static inline bool rcu_batch_empty(struct rcu_batch *b)
65{
66 return b->tail == &b->head;
67}
68
69/*
70 * Remove the callback at the head of the specified rcu_batch structure
71 * and return a pointer to it, or return NULL if the structure is empty.
72 */
73static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b)
74{
75 struct rcu_head *head;
76
77 if (rcu_batch_empty(b))
78 return NULL;
79
80 head = b->head;
81 b->head = head->next;
82 if (b->tail == &head->next)
83 rcu_batch_init(b);
84
85 return head;
86}
87
88/*
89 * Move all callbacks from the rcu_batch structure specified by "from" to
90 * the structure specified by "to".
91 */
92static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from)
93{
94 if (!rcu_batch_empty(from)) {
95 *to->tail = from->head;
96 to->tail = from->tail;
97 rcu_batch_init(from);
98 }
99}
100
632ee200
PM
101static int init_srcu_struct_fields(struct srcu_struct *sp)
102{
103 sp->completed = 0;
931ea9d1
LJ
104 spin_lock_init(&sp->queue_lock);
105 sp->running = false;
106 rcu_batch_init(&sp->batch_queue);
107 rcu_batch_init(&sp->batch_check0);
108 rcu_batch_init(&sp->batch_check1);
109 rcu_batch_init(&sp->batch_done);
110 INIT_DELAYED_WORK(&sp->work, process_srcu);
632ee200
PM
111 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
112 return sp->per_cpu_ref ? 0 : -ENOMEM;
113}
114
115#ifdef CONFIG_DEBUG_LOCK_ALLOC
116
117int __init_srcu_struct(struct srcu_struct *sp, const char *name,
118 struct lock_class_key *key)
119{
632ee200
PM
120 /* Don't re-initialize a lock while it is held. */
121 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
122 lockdep_init_map(&sp->dep_map, name, key, 0);
632ee200
PM
123 return init_srcu_struct_fields(sp);
124}
125EXPORT_SYMBOL_GPL(__init_srcu_struct);
126
127#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
128
621934ee
PM
129/**
130 * init_srcu_struct - initialize a sleep-RCU structure
131 * @sp: structure to initialize.
132 *
133 * Must invoke this on a given srcu_struct before passing that srcu_struct
134 * to any other function. Each srcu_struct represents a separate domain
135 * of SRCU protection.
136 */
e6a92013 137int init_srcu_struct(struct srcu_struct *sp)
621934ee 138{
632ee200 139 return init_srcu_struct_fields(sp);
621934ee 140}
0cd397d3 141EXPORT_SYMBOL_GPL(init_srcu_struct);
621934ee 142
632ee200
PM
143#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
144
b52ce066
LJ
145/*
146 * Returns approximate total of the readers' ->seq[] values for the
147 * rank of per-CPU counters specified by idx.
148 */
149static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx)
150{
151 int cpu;
152 unsigned long sum = 0;
153 unsigned long t;
154
155 for_each_possible_cpu(cpu) {
156 t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]);
157 sum += t;
158 }
159 return sum;
160}
161
621934ee 162/*
cef50120 163 * Returns approximate number of readers active on the specified rank
b52ce066 164 * of the per-CPU ->c[] counters.
621934ee 165 */
cef50120
PM
166static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx)
167{
168 int cpu;
169 unsigned long sum = 0;
170 unsigned long t;
621934ee 171
cef50120
PM
172 for_each_possible_cpu(cpu) {
173 t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
174 sum += t;
cef50120 175 }
b52ce066 176 return sum;
cef50120
PM
177}
178
179/*
b52ce066
LJ
180 * Return true if the number of pre-existing readers is determined to
181 * be stably zero. An example unstable zero can occur if the call
182 * to srcu_readers_active_idx() misses an __srcu_read_lock() increment,
183 * but due to task migration, sees the corresponding __srcu_read_unlock()
184 * decrement. This can happen because srcu_readers_active_idx() takes
185 * time to sum the array, and might in fact be interrupted or preempted
186 * partway through the summation.
cef50120
PM
187 */
188static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
621934ee 189{
b52ce066
LJ
190 unsigned long seq;
191
192 seq = srcu_readers_seq_idx(sp, idx);
193
194 /*
195 * The following smp_mb() A pairs with the smp_mb() B located in
196 * __srcu_read_lock(). This pairing ensures that if an
197 * __srcu_read_lock() increments its counter after the summation
198 * in srcu_readers_active_idx(), then the corresponding SRCU read-side
199 * critical section will see any changes made prior to the start
200 * of the current SRCU grace period.
201 *
202 * Also, if the above call to srcu_readers_seq_idx() saw the
203 * increment of ->seq[], then the call to srcu_readers_active_idx()
204 * must see the increment of ->c[].
205 */
206 smp_mb(); /* A */
621934ee 207
cef50120
PM
208 /*
209 * Note that srcu_readers_active_idx() can incorrectly return
210 * zero even though there is a pre-existing reader throughout.
211 * To see this, suppose that task A is in a very long SRCU
212 * read-side critical section that started on CPU 0, and that
b52ce066 213 * no other reader exists, so that the sum of the counters
cef50120
PM
214 * is equal to one. Then suppose that task B starts executing
215 * srcu_readers_active_idx(), summing up to CPU 1, and then that
216 * task C starts reading on CPU 0, so that its increment is not
217 * summed, but finishes reading on CPU 2, so that its decrement
218 * -is- summed. Then when task B completes its sum, it will
219 * incorrectly get zero, despite the fact that task A has been
220 * in its SRCU read-side critical section the whole time.
221 *
222 * We therefore do a validation step should srcu_readers_active_idx()
223 * return zero.
224 */
225 if (srcu_readers_active_idx(sp, idx) != 0)
226 return false;
227
228 /*
b52ce066
LJ
229 * The remainder of this function is the validation step.
230 * The following smp_mb() D pairs with the smp_mb() C in
231 * __srcu_read_unlock(). If the __srcu_read_unlock() was seen
232 * by srcu_readers_active_idx() above, then any destructive
233 * operation performed after the grace period will happen after
234 * the corresponding SRCU read-side critical section.
cef50120 235 *
b52ce066
LJ
236 * Note that there can be at most NR_CPUS worth of readers using
237 * the old index, which is not enough to overflow even a 32-bit
238 * integer. (Yes, this does mean that systems having more than
239 * a billion or so CPUs need to be 64-bit systems.) Therefore,
240 * the sum of the ->seq[] counters cannot possibly overflow.
241 * Therefore, the only way that the return values of the two
242 * calls to srcu_readers_seq_idx() can be equal is if there were
243 * no increments of the corresponding rank of ->seq[] counts
244 * in the interim. But the missed-increment scenario laid out
245 * above includes an increment of the ->seq[] counter by
246 * the corresponding __srcu_read_lock(). Therefore, if this
247 * scenario occurs, the return values from the two calls to
248 * srcu_readers_seq_idx() will differ, and thus the validation
249 * step below suffices.
cef50120 250 */
b52ce066
LJ
251 smp_mb(); /* D */
252
253 return srcu_readers_seq_idx(sp, idx) == seq;
621934ee
PM
254}
255
256/**
257 * srcu_readers_active - returns approximate number of readers.
258 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
259 *
260 * Note that this is not an atomic primitive, and can therefore suffer
261 * severe errors when invoked on an active srcu_struct. That said, it
262 * can be useful as an error check at cleanup time.
263 */
bb695170 264static int srcu_readers_active(struct srcu_struct *sp)
621934ee 265{
dc879175
LJ
266 int cpu;
267 unsigned long sum = 0;
268
269 for_each_possible_cpu(cpu) {
270 sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]);
271 sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]);
272 }
273 return sum;
621934ee
PM
274}
275
276/**
277 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
278 * @sp: structure to clean up.
279 *
280 * Must invoke this after you are finished using a given srcu_struct that
281 * was initialized via init_srcu_struct(), else you leak memory.
282 */
283void cleanup_srcu_struct(struct srcu_struct *sp)
284{
285 int sum;
286
287 sum = srcu_readers_active(sp);
288 WARN_ON(sum); /* Leakage unless caller handles error. */
289 if (sum != 0)
290 return;
291 free_percpu(sp->per_cpu_ref);
292 sp->per_cpu_ref = NULL;
293}
0cd397d3 294EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
621934ee 295
632ee200 296/*
621934ee
PM
297 * Counts the new reader in the appropriate per-CPU element of the
298 * srcu_struct. Must be called from process context.
299 * Returns an index that must be passed to the matching srcu_read_unlock().
300 */
632ee200 301int __srcu_read_lock(struct srcu_struct *sp)
621934ee
PM
302{
303 int idx;
304
305 preempt_disable();
cef50120
PM
306 idx = rcu_dereference_index_check(sp->completed,
307 rcu_read_lock_sched_held()) & 0x1;
b52ce066 308 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
cef50120 309 smp_mb(); /* B */ /* Avoid leaking the critical section. */
b52ce066 310 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
621934ee
PM
311 preempt_enable();
312 return idx;
313}
632ee200 314EXPORT_SYMBOL_GPL(__srcu_read_lock);
621934ee 315
632ee200 316/*
621934ee
PM
317 * Removes the count for the old reader from the appropriate per-CPU
318 * element of the srcu_struct. Note that this may well be a different
319 * CPU than that which was incremented by the corresponding srcu_read_lock().
320 * Must be called from process context.
321 */
632ee200 322void __srcu_read_unlock(struct srcu_struct *sp, int idx)
621934ee 323{
cef50120 324 smp_mb(); /* C */ /* Avoid leaking the critical section. */
5a41344a 325 this_cpu_dec(sp->per_cpu_ref->c[idx]);
621934ee 326}
632ee200 327EXPORT_SYMBOL_GPL(__srcu_read_unlock);
621934ee 328
c072a388
PM
329/*
330 * We use an adaptive strategy for synchronize_srcu() and especially for
331 * synchronize_srcu_expedited(). We spin for a fixed time period
332 * (defined below) to allow SRCU readers to exit their read-side critical
333 * sections. If there are still some readers after 10 microseconds,
334 * we repeatedly block for 1-millisecond time periods. This approach
335 * has done well in testing, so there is no need for a config parameter.
336 */
931ea9d1 337#define SRCU_RETRY_CHECK_DELAY 5
d9792edd
LJ
338#define SYNCHRONIZE_SRCU_TRYCOUNT 2
339#define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12
cef50120 340
18108ebf 341/*
931ea9d1 342 * @@@ Wait until all pre-existing readers complete. Such readers
18108ebf 343 * will have used the index specified by "idx".
931ea9d1
LJ
344 * the caller should ensures the ->completed is not changed while checking
345 * and idx = (->completed & 1) ^ 1
18108ebf 346 */
931ea9d1 347static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
cef50120 348{
931ea9d1
LJ
349 for (;;) {
350 if (srcu_readers_active_idx_check(sp, idx))
351 return true;
352 if (--trycount <= 0)
353 return false;
354 udelay(SRCU_RETRY_CHECK_DELAY);
cef50120 355 }
cef50120 356}
c072a388 357
931ea9d1
LJ
358/*
359 * Increment the ->completed counter so that future SRCU readers will
360 * use the other rank of the ->c[] and ->seq[] arrays. This allows
361 * us to wait for pre-existing readers in a starvation-free manner.
362 */
18108ebf 363static void srcu_flip(struct srcu_struct *sp)
944ce9af 364{
18108ebf 365 sp->completed++;
944ce9af
LJ
366}
367
931ea9d1
LJ
368/*
369 * Enqueue an SRCU callback on the specified srcu_struct structure,
370 * initiating grace-period processing if it is not already running.
371 */
372void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
373 void (*func)(struct rcu_head *head))
374{
375 unsigned long flags;
376
377 head->next = NULL;
378 head->func = func;
379 spin_lock_irqsave(&sp->queue_lock, flags);
380 rcu_batch_queue(&sp->batch_queue, head);
381 if (!sp->running) {
382 sp->running = true;
3b07e9ca 383 schedule_delayed_work(&sp->work, 0);
931ea9d1
LJ
384 }
385 spin_unlock_irqrestore(&sp->queue_lock, flags);
386}
387EXPORT_SYMBOL_GPL(call_srcu);
388
389struct rcu_synchronize {
390 struct rcu_head head;
391 struct completion completion;
392};
393
394/*
395 * Awaken the corresponding synchronize_srcu() instance now that a
396 * grace period has elapsed.
397 */
398static void wakeme_after_rcu(struct rcu_head *head)
399{
400 struct rcu_synchronize *rcu;
401
402 rcu = container_of(head, struct rcu_synchronize, head);
403 complete(&rcu->completion);
404}
405
406static void srcu_advance_batches(struct srcu_struct *sp, int trycount);
407static void srcu_reschedule(struct srcu_struct *sp);
408
0cd397d3
PM
409/*
410 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
621934ee 411 */
d9792edd 412static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
621934ee 413{
931ea9d1
LJ
414 struct rcu_synchronize rcu;
415 struct rcu_head *head = &rcu.head;
416 bool done = false;
18108ebf 417
fe15d706
PM
418 rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
419 !lock_is_held(&rcu_bh_lock_map) &&
420 !lock_is_held(&rcu_lock_map) &&
421 !lock_is_held(&rcu_sched_lock_map),
422 "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
423
931ea9d1
LJ
424 init_completion(&rcu.completion);
425
426 head->next = NULL;
427 head->func = wakeme_after_rcu;
428 spin_lock_irq(&sp->queue_lock);
429 if (!sp->running) {
430 /* steal the processing owner */
431 sp->running = true;
432 rcu_batch_queue(&sp->batch_check0, head);
433 spin_unlock_irq(&sp->queue_lock);
434
435 srcu_advance_batches(sp, trycount);
436 if (!rcu_batch_empty(&sp->batch_done)) {
437 BUG_ON(sp->batch_done.head != head);
438 rcu_batch_dequeue(&sp->batch_done);
439 done = true;
440 }
441 /* give the processing owner to work_struct */
442 srcu_reschedule(sp);
443 } else {
444 rcu_batch_queue(&sp->batch_queue, head);
445 spin_unlock_irq(&sp->queue_lock);
446 }
944ce9af 447
931ea9d1
LJ
448 if (!done)
449 wait_for_completion(&rcu.completion);
621934ee
PM
450}
451
0cd397d3
PM
452/**
453 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
454 * @sp: srcu_struct with which to synchronize.
455 *
456 * Flip the completed counter, and wait for the old count to drain to zero.
457 * As with classic RCU, the updater must use some separate means of
458 * synchronizing concurrent updates. Can block; must be called from
459 * process context.
460 *
461 * Note that it is illegal to call synchronize_srcu() from the corresponding
462 * SRCU read-side critical section; doing so will result in deadlock.
463 * However, it is perfectly legal to call synchronize_srcu() on one
464 * srcu_struct from some other srcu_struct's read-side critical section.
465 */
466void synchronize_srcu(struct srcu_struct *sp)
467{
3705b88d
AM
468 __synchronize_srcu(sp, rcu_expedited
469 ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
470 : SYNCHRONIZE_SRCU_TRYCOUNT);
0cd397d3
PM
471}
472EXPORT_SYMBOL_GPL(synchronize_srcu);
473
474/**
236fefaf 475 * synchronize_srcu_expedited - Brute-force SRCU grace period
0cd397d3
PM
476 * @sp: srcu_struct with which to synchronize.
477 *
cef50120
PM
478 * Wait for an SRCU grace period to elapse, but be more aggressive about
479 * spinning rather than blocking when waiting.
0cd397d3 480 *
236fefaf 481 * Note that it is illegal to call this function while holding any lock
cef50120 482 * that is acquired by a CPU-hotplug notifier. It is also illegal to call
236fefaf
PM
483 * synchronize_srcu_expedited() from the corresponding SRCU read-side
484 * critical section; doing so will result in deadlock. However, it is
485 * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct
486 * from some other srcu_struct's read-side critical section, as long as
487 * the resulting graph of srcu_structs is acyclic.
0cd397d3
PM
488 */
489void synchronize_srcu_expedited(struct srcu_struct *sp)
490{
d9792edd 491 __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT);
0cd397d3
PM
492}
493EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
494
931ea9d1
LJ
495/**
496 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
497 */
498void srcu_barrier(struct srcu_struct *sp)
499{
500 synchronize_srcu(sp);
501}
502EXPORT_SYMBOL_GPL(srcu_barrier);
503
621934ee
PM
504/**
505 * srcu_batches_completed - return batches completed.
506 * @sp: srcu_struct on which to report batch completion.
507 *
508 * Report the number of batches, correlated with, but not necessarily
509 * precisely the same as, the number of grace periods that have elapsed.
510 */
621934ee
PM
511long srcu_batches_completed(struct srcu_struct *sp)
512{
513 return sp->completed;
514}
621934ee 515EXPORT_SYMBOL_GPL(srcu_batches_completed);
931ea9d1
LJ
516
517#define SRCU_CALLBACK_BATCH 10
518#define SRCU_INTERVAL 1
519
520/*
521 * Move any new SRCU callbacks to the first stage of the SRCU grace
522 * period pipeline.
523 */
524static void srcu_collect_new(struct srcu_struct *sp)
525{
526 if (!rcu_batch_empty(&sp->batch_queue)) {
527 spin_lock_irq(&sp->queue_lock);
528 rcu_batch_move(&sp->batch_check0, &sp->batch_queue);
529 spin_unlock_irq(&sp->queue_lock);
530 }
531}
532
533/*
534 * Core SRCU state machine. Advance callbacks from ->batch_check0 to
535 * ->batch_check1 and then to ->batch_done as readers drain.
536 */
537static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
538{
539 int idx = 1 ^ (sp->completed & 1);
540
541 /*
542 * Because readers might be delayed for an extended period after
543 * fetching ->completed for their index, at any point in time there
544 * might well be readers using both idx=0 and idx=1. We therefore
545 * need to wait for readers to clear from both index values before
546 * invoking a callback.
547 */
548
549 if (rcu_batch_empty(&sp->batch_check0) &&
550 rcu_batch_empty(&sp->batch_check1))
551 return; /* no callbacks need to be advanced */
552
553 if (!try_check_zero(sp, idx, trycount))
554 return; /* failed to advance, will try after SRCU_INTERVAL */
555
556 /*
557 * The callbacks in ->batch_check1 have already done with their
558 * first zero check and flip back when they were enqueued on
559 * ->batch_check0 in a previous invocation of srcu_advance_batches().
560 * (Presumably try_check_zero() returned false during that
561 * invocation, leaving the callbacks stranded on ->batch_check1.)
562 * They are therefore ready to invoke, so move them to ->batch_done.
563 */
564 rcu_batch_move(&sp->batch_done, &sp->batch_check1);
565
566 if (rcu_batch_empty(&sp->batch_check0))
567 return; /* no callbacks need to be advanced */
568 srcu_flip(sp);
569
570 /*
571 * The callbacks in ->batch_check0 just finished their
572 * first check zero and flip, so move them to ->batch_check1
573 * for future checking on the other idx.
574 */
575 rcu_batch_move(&sp->batch_check1, &sp->batch_check0);
576
577 /*
578 * SRCU read-side critical sections are normally short, so check
579 * at least twice in quick succession after a flip.
580 */
581 trycount = trycount < 2 ? 2 : trycount;
582 if (!try_check_zero(sp, idx^1, trycount))
583 return; /* failed to advance, will try after SRCU_INTERVAL */
584
585 /*
586 * The callbacks in ->batch_check1 have now waited for all
587 * pre-existing readers using both idx values. They are therefore
588 * ready to invoke, so move them to ->batch_done.
589 */
590 rcu_batch_move(&sp->batch_done, &sp->batch_check1);
591}
592
593/*
594 * Invoke a limited number of SRCU callbacks that have passed through
595 * their grace period. If there are more to do, SRCU will reschedule
596 * the workqueue.
597 */
598static void srcu_invoke_callbacks(struct srcu_struct *sp)
599{
600 int i;
601 struct rcu_head *head;
602
603 for (i = 0; i < SRCU_CALLBACK_BATCH; i++) {
604 head = rcu_batch_dequeue(&sp->batch_done);
605 if (!head)
606 break;
607 local_bh_disable();
608 head->func(head);
609 local_bh_enable();
610 }
611}
612
613/*
614 * Finished one round of SRCU grace period. Start another if there are
615 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
616 */
617static void srcu_reschedule(struct srcu_struct *sp)
618{
619 bool pending = true;
620
621 if (rcu_batch_empty(&sp->batch_done) &&
622 rcu_batch_empty(&sp->batch_check1) &&
623 rcu_batch_empty(&sp->batch_check0) &&
624 rcu_batch_empty(&sp->batch_queue)) {
625 spin_lock_irq(&sp->queue_lock);
626 if (rcu_batch_empty(&sp->batch_done) &&
627 rcu_batch_empty(&sp->batch_check1) &&
628 rcu_batch_empty(&sp->batch_check0) &&
629 rcu_batch_empty(&sp->batch_queue)) {
630 sp->running = false;
631 pending = false;
632 }
633 spin_unlock_irq(&sp->queue_lock);
634 }
635
636 if (pending)
3b07e9ca 637 schedule_delayed_work(&sp->work, SRCU_INTERVAL);
931ea9d1
LJ
638}
639
640/*
641 * This is the work-queue function that handles SRCU grace periods.
642 */
f2ebfbc9 643void process_srcu(struct work_struct *work)
931ea9d1
LJ
644{
645 struct srcu_struct *sp;
646
647 sp = container_of(work, struct srcu_struct, work.work);
648
649 srcu_collect_new(sp);
650 srcu_advance_batches(sp, 1);
651 srcu_invoke_callbacks(sp);
652 srcu_reschedule(sp);
653}
f2ebfbc9 654EXPORT_SYMBOL_GPL(process_srcu);