.orphan_nxttail = &sname##_state.orphan_nxtlist, \
.orphan_donetail = &sname##_state.orphan_donelist, \
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
---- -- .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
.name = RCU_STATE_NAME(sname), \
.abbr = sabbr, \
- -----}; \
- -----DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data)
+ +++++}
RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
* Compute the per-level fanout, either using the exact fanout specified
* or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
*/
-- ----#ifdef CONFIG_RCU_FANOUT_EXACT
-- - --static void __init rcu_init_levelspread(struct rcu_state *rsp)
-- - --{
-- - -- int i;
-- - --
-- - -- rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
-- - -- for (i = rcu_num_lvls - 2; i >= 0; i--)
-- - -- rsp->levelspread[i] = CONFIG_RCU_FANOUT;
-- - --}
-- - --#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
static void __init rcu_init_levelspread(struct rcu_state *rsp)
{
-- - -- int ccur;
-- - -- int cprv;
int i;
- rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
- for (i = rcu_num_lvls - 2; i >= 0; i--)
- rsp->levelspread[i] = CONFIG_RCU_FANOUT;
- }
- #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
- static void __init rcu_init_levelspread(struct rcu_state *rsp)
- {
- int ccur;
- int cprv;
- int i;
-
-- ---- cprv = nr_cpu_ids;
-- ---- for (i = rcu_num_lvls - 1; i >= 0; i--) {
-- ---- ccur = rsp->levelcnt[i];
-- ---- rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
-- ---- cprv = ccur;
++ ++++ if (IS_ENABLED(CONFIG_RCU_FANOUT_EXACT)) {
++ ++++ rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
++ ++++ for (i = rcu_num_lvls - 2; i >= 0; i--)
++ ++++ rsp->levelspread[i] = CONFIG_RCU_FANOUT;
++ ++++ } else {
++ ++++ int ccur;
++ ++++ int cprv;
++ ++++
++ ++++ cprv = nr_cpu_ids;
++ ++++ for (i = rcu_num_lvls - 1; i >= 0; i--) {
++ ++++ ccur = rsp->levelcnt[i];
++ ++++ rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
++ ++++ cprv = ccur;
++ ++++ }
}
}
-- ----#endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
/*
* Helper function for rcu_init() that initializes one rcu_state structure.
/* force all RCU readers onto ->blkd_tasks lists. */
synchronize_sched_expedited();
---- -- /* Initialize ->expmask for all non-leaf rcu_node structures. */
---- -- rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
---- -- raw_spin_lock_irqsave(&rnp->lock, flags);
---- -- smp_mb__after_unlock_lock();
---- -- rnp->expmask = rnp->qsmaskinit;
---- -- raw_spin_unlock_irqrestore(&rnp->lock, flags);
---- -- }
---- --
---- -- /* Snapshot current state of ->blkd_tasks lists. */
++++ ++ /*
++++ ++ * Snapshot current state of ->blkd_tasks lists into ->expmask.
++++ ++ * Phase 1 sets bits and phase 2 permits rcu_read_unlock_special()
++++ ++ * to start clearing them. Doing this in one phase leads to
++++ ++ * strange races between setting and clearing bits, so just say "no"!
++++ ++ */
++ + ++ rcu_for_each_leaf_node(rsp, rnp)
- sync_rcu_preempt_exp_init(rsp, rnp);
- if (NUM_RCU_NODES > 1)
- sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
++++ ++ sync_rcu_preempt_exp_init1(rsp, rnp);
+ rcu_for_each_leaf_node(rsp, rnp)
-- - -- sync_rcu_preempt_exp_init(rsp, rnp);
-- - -- if (NUM_RCU_NODES > 1)
-- - -- sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
++++ ++ sync_rcu_preempt_exp_init2(rsp, rnp);
put_online_cpus();