[PATCH] Make RCU task_struct safe for oprofile
authorPaul E. McKenney <paulmck@us.ibm.com>
Sun, 8 Jan 2006 09:01:35 +0000 (01:01 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Mon, 9 Jan 2006 04:13:40 +0000 (20:13 -0800)
Applying RCU to the task structure broke oprofile, because
free_task_notify() can now be called from softirq.  This means that the
task_mortuary lock must be acquired with irq disabled in order to avoid
intermittent self-deadlock.  Since irq is now disabled, the critical
section within process_task_mortuary() has been restructured to be O(1) in
order to maximize scalability and minimize realtime latency degradation.

Kudos to Wu Fengguang for finding this problem!

CC: Wu Fengguang <wfg@mail.ustc.edu.cn>
Cc: Philippe Elie <phil.el@wanadoo.fr>
Cc: John Levon <levon@movementarian.org>
Signed-off-by: "Paul E. McKenney" <paulmck@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
drivers/oprofile/buffer_sync.c

index 531b0731314162e4afea759d8e52ada9bb11a1f8..b2e8e49c865987e42f4a3e9a9d9c0e5f353df1e4 100644 (file)
@@ -43,13 +43,16 @@ static void process_task_mortuary(void);
  * list for processing. Only after two full buffer syncs
  * does the task eventually get freed, because by then
  * we are sure we will not reference it again.
+ * Can be invoked from softirq via RCU callback due to
+ * call_rcu() of the task struct, hence the _irqsave.
  */
 static int task_free_notify(struct notifier_block * self, unsigned long val, void * data)
 {
+       unsigned long flags;
        struct task_struct * task = data;
-       spin_lock(&task_mortuary);
+       spin_lock_irqsave(&task_mortuary, flags);
        list_add(&task->tasks, &dying_tasks);
-       spin_unlock(&task_mortuary);
+       spin_unlock_irqrestore(&task_mortuary, flags);
        return NOTIFY_OK;
 }
 
@@ -431,25 +434,22 @@ static void increment_tail(struct oprofile_cpu_buffer * b)
  */
 static void process_task_mortuary(void)
 {
-       struct list_head * pos;
-       struct list_head * pos2;
+       unsigned long flags;
+       LIST_HEAD(local_dead_tasks);
        struct task_struct * task;
+       struct task_struct * ttask;
 
-       spin_lock(&task_mortuary);
+       spin_lock_irqsave(&task_mortuary, flags);
 
-       list_for_each_safe(pos, pos2, &dead_tasks) {
-               task = list_entry(pos, struct task_struct, tasks);
-               list_del(&task->tasks);
-               free_task(task);
-       }
+       list_splice_init(&dead_tasks, &local_dead_tasks);
+       list_splice_init(&dying_tasks, &dead_tasks);
 
-       list_for_each_safe(pos, pos2, &dying_tasks) {
-               task = list_entry(pos, struct task_struct, tasks);
+       spin_unlock_irqrestore(&task_mortuary, flags);
+
+       list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
                list_del(&task->tasks);
-               list_add_tail(&task->tasks, &dead_tasks);
+               free_task(task);
        }
-
-       spin_unlock(&task_mortuary);
 }