sched: Use lockdep-based checking on rcu_dereference()
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 23 Feb 2010 01:04:50 +0000 (17:04 -0800)
committerIngo Molnar <mingo@elte.hu>
Thu, 25 Feb 2010 09:34:26 +0000 (10:34 +0100)
Update the rcu_dereference() usages to take advantage of the new
lockdep-based checking.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1266887105-1528-6-git-send-email-paulmck@linux.vnet.ibm.com>
[ -v2: fix allmodconfig missing symbol export build failure on x86 ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/cgroup.h
include/linux/cred.h
init/main.c
kernel/cgroup.c
kernel/exit.c
kernel/fork.c
kernel/notifier.c
kernel/pid.c
kernel/sched.c

index 0008dee665148850f2b46af2918b23635cedc7c0..c9bbcb2a75aecbacdb1463cf08a2b0a26d0f264b 100644 (file)
@@ -28,6 +28,7 @@ struct css_id;
 extern int cgroup_init_early(void);
 extern int cgroup_init(void);
 extern void cgroup_lock(void);
+extern int cgroup_lock_is_held(void);
 extern bool cgroup_lock_live_group(struct cgroup *cgrp);
 extern void cgroup_unlock(void);
 extern void cgroup_fork(struct task_struct *p);
@@ -486,7 +487,9 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state(
 static inline struct cgroup_subsys_state *task_subsys_state(
        struct task_struct *task, int subsys_id)
 {
-       return rcu_dereference(task->cgroups->subsys[subsys_id]);
+       return rcu_dereference_check(task->cgroups->subsys[subsys_id],
+                                    rcu_read_lock_held() ||
+                                    cgroup_lock_is_held());
 }
 
 static inline struct cgroup* task_cgroup(struct task_struct *task,
index 4e3387a89cb92a8549e2c2479c37ac7fd2443956..4db09f89b637539e3d3e76bdc8565d51c0f11a7b 100644 (file)
@@ -280,7 +280,7 @@ static inline void put_cred(const struct cred *_cred)
  * task or by holding tasklist_lock to prevent it from being unlinked.
  */
 #define __task_cred(task) \
-       ((const struct cred *)(rcu_dereference((task)->real_cred)))
+       ((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock))))
 
 /**
  * get_task_cred - Get another task's objective credentials
index 4cb47a159f028fa48a114efbf74cf79a9d828883..c75dcd6eef091061e78b306ce3944958b5b9b30d 100644 (file)
@@ -416,7 +416,9 @@ static noinline void __init_refok rest_init(void)
        kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
        numa_default_policy();
        pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
+       rcu_read_lock();
        kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
+       rcu_read_unlock();
        unlock_kernel();
 
        /*
index aa3bee56644675d4c707b03d97fcf0bb47306646..b1a0f5a528fe6563badc393f96db9e4cfa2d9255 100644 (file)
@@ -166,6 +166,20 @@ static DEFINE_SPINLOCK(hierarchy_id_lock);
  */
 static int need_forkexit_callback __read_mostly;
 
+#ifdef CONFIG_PROVE_LOCKING
+int cgroup_lock_is_held(void)
+{
+       return lockdep_is_held(&cgroup_mutex);
+}
+#else /* #ifdef CONFIG_PROVE_LOCKING */
+int cgroup_lock_is_held(void)
+{
+       return mutex_is_locked(&cgroup_mutex);
+}
+#endif /* #else #ifdef CONFIG_PROVE_LOCKING */
+
+EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
+
 /* convenient tests for these bits */
 inline int cgroup_is_removed(const struct cgroup *cgrp)
 {
index 546774a31a66752a698faa02c4e54f7c39fbef9e..45ed043b8bf59b8704b7cc13f19b1b13824b7ce4 100644 (file)
@@ -85,7 +85,9 @@ static void __exit_signal(struct task_struct *tsk)
        BUG_ON(!sig);
        BUG_ON(!atomic_read(&sig->count));
 
-       sighand = rcu_dereference(tsk->sighand);
+       sighand = rcu_dereference_check(tsk->sighand,
+                                       rcu_read_lock_held() ||
+                                       lockdep_is_held(&tasklist_lock));
        spin_lock(&sighand->siglock);
 
        posix_cpu_timers_exit(tsk);
@@ -170,8 +172,10 @@ void release_task(struct task_struct * p)
 repeat:
        tracehook_prepare_release_task(p);
        /* don't need to get the RCU readlock here - the process is dead and
-        * can't be modifying its own credentials */
+        * can't be modifying its own credentials. But shut RCU-lockdep up */
+       rcu_read_lock();
        atomic_dec(&__task_cred(p)->user->processes);
+       rcu_read_unlock();
 
        proc_flush_task(p);
 
@@ -473,9 +477,11 @@ static void close_files(struct files_struct * files)
        /*
         * It is safe to dereference the fd table without RCU or
         * ->file_lock because this is the last reference to the
-        * files structure.
+        * files structure.  But use RCU to shut RCU-lockdep up.
         */
+       rcu_read_lock();
        fdt = files_fdtable(files);
+       rcu_read_unlock();
        for (;;) {
                unsigned long set;
                i = j * __NFDBITS;
@@ -521,10 +527,12 @@ void put_files_struct(struct files_struct *files)
                 * at the end of the RCU grace period. Otherwise,
                 * you can free files immediately.
                 */
+               rcu_read_lock();
                fdt = files_fdtable(files);
                if (fdt != &files->fdtab)
                        kmem_cache_free(files_cachep, files);
                free_fdtable(fdt);
+               rcu_read_unlock();
        }
 }
 
index f88bd984df35fc937a7ee0c2ab89ac091ce5141d..17bbf093356d085123b83fa93c05a89e380fb53e 100644 (file)
@@ -86,6 +86,7 @@ int max_threads;              /* tunable limit on nr_threads */
 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
 
 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
+EXPORT_SYMBOL_GPL(tasklist_lock);
 
 int nr_processes(void)
 {
index acd24e7643eb8ebdb9a0386b65931294b37c7eaf..2488ba7eb568784eb5c13b57247918409c4caf13 100644 (file)
@@ -78,10 +78,10 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
        int ret = NOTIFY_DONE;
        struct notifier_block *nb, *next_nb;
 
-       nb = rcu_dereference(*nl);
+       nb = rcu_dereference_raw(*nl);
 
        while (nb && nr_to_call) {
-               next_nb = rcu_dereference(nb->next);
+               next_nb = rcu_dereference_raw(nb->next);
 
 #ifdef CONFIG_DEBUG_NOTIFIERS
                if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
@@ -309,7 +309,7 @@ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
         * racy then it does not matter what the result of the test
         * is, we re-check the list after having taken the lock anyway:
         */
-       if (rcu_dereference(nh->head)) {
+       if (rcu_dereference_raw(nh->head)) {
                down_read(&nh->rwsem);
                ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
                                        nr_calls);
index 2e17c9c92cbec3ff3ab5afbb9ec30ce0fbafcb21..b08e697cd83f0944747d5624698d356cf0288090 100644 (file)
@@ -367,7 +367,7 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type)
        struct task_struct *result = NULL;
        if (pid) {
                struct hlist_node *first;
-               first = rcu_dereference(pid->tasks[type].first);
+               first = rcu_dereference_check(pid->tasks[type].first, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock));
                if (first)
                        result = hlist_entry(first, struct task_struct, pids[(type)].node);
        }
index 3a8fb30a91b1bba639e164d8f34209a0497ae9fd..70ae68680d4c45e82e7f6d4962f4b55373d16392 100644 (file)
@@ -645,6 +645,11 @@ static inline int cpu_of(struct rq *rq)
 #endif
 }
 
+#define for_each_domain_rd(p) \
+       rcu_dereference_check((p), \
+                             rcu_read_lock_sched_held() || \
+                             lockdep_is_held(&sched_domains_mutex))
+
 /*
  * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  * See detach_destroy_domains: synchronize_sched for details.
@@ -653,7 +658,7 @@ static inline int cpu_of(struct rq *rq)
  * preempt-disabled sections.
  */
 #define for_each_domain(cpu, __sd) \
-       for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
+       for (__sd = for_each_domain_rd(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
 
 #define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
 #define this_rq()              (&__get_cpu_var(runqueues))
@@ -1531,7 +1536,7 @@ static unsigned long target_load(int cpu, int type)
 
 static struct sched_group *group_of(int cpu)
 {
-       struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
+       struct sched_domain *sd = rcu_dereference_sched(cpu_rq(cpu)->sd);
 
        if (!sd)
                return NULL;
@@ -4888,7 +4893,7 @@ static void run_rebalance_domains(struct softirq_action *h)
 
 static inline int on_null_domain(int cpu)
 {
-       return !rcu_dereference(cpu_rq(cpu)->sd);
+       return !rcu_dereference_sched(cpu_rq(cpu)->sd);
 }
 
 /*