static int init_srcu_struct_fields(struct srcu_struct *sp)
{
sp->completed = 0;
+ sp->srcu_gp_seq = 0;
spin_lock_init(&sp->queue_lock);
sp->srcu_state = SRCU_STATE_IDLE;
rcu_batch_init(&sp->batch_queue);
#define SYNCHRONIZE_SRCU_TRYCOUNT 2
#define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12
+/*
+ * Start an SRCU grace period.
+ */
+static void srcu_gp_start(struct srcu_struct *sp)
+{
+ WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN1);
+ rcu_seq_start(&sp->srcu_gp_seq);
+}
+
/*
* @@@ Wait until all pre-existing readers complete. Such readers
* will have used the index specified by "idx".
smp_mb(); /* D */ /* Pairs with C. */
}
+/*
+ * End an SRCU grace period.
+ */
+static void srcu_gp_end(struct srcu_struct *sp)
+{
+ rcu_seq_end(&sp->srcu_gp_seq);
+ WRITE_ONCE(sp->srcu_state, SRCU_STATE_DONE);
+}
+
/*
* Enqueue an SRCU callback on the specified srcu_struct structure,
* initiating grace-period processing if it is not already running.
smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */
rcu_batch_queue(&sp->batch_queue, head);
if (READ_ONCE(sp->srcu_state) == SRCU_STATE_IDLE) {
- WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN1);
+ srcu_gp_start(sp);
queue_delayed_work(system_power_efficient_wq, &sp->work, 0);
}
spin_unlock_irqrestore(&sp->queue_lock, flags);
smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */
if (READ_ONCE(sp->srcu_state) == SRCU_STATE_IDLE) {
/* steal the processing owner */
- WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN1);
+ srcu_gp_start(sp);
rcu_batch_queue(&sp->batch_check0, head);
spin_unlock_irq(&sp->queue_lock);
/* give the processing owner to work_struct */
*/
if (sp->srcu_state == SRCU_STATE_DONE)
- WRITE_ONCE(sp->srcu_state, SRCU_STATE_SCAN1);
+ srcu_gp_start(sp);
if (sp->srcu_state == SRCU_STATE_SCAN1) {
idx = 1 ^ (sp->completed & 1);
*/
rcu_batch_move(&sp->batch_done, &sp->batch_check1);
- WRITE_ONCE(sp->srcu_state, SRCU_STATE_DONE);
+ srcu_gp_end(sp);
}
}