From dfcce791fb0ad06f3f0b745a23160b9d8858fe39 Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Thu, 16 Apr 2015 12:48:01 -0700 Subject: [PATCH] fs/exec.c:de_thread: move notify_count write under lock We set sig->notify_count = -1 between RELEASE and ACQUIRE operations: spin_unlock_irq(lock); ... if (!thread_group_leader(tsk)) { ... for (;;) { sig->notify_count = -1; write_lock_irq(&tasklist_lock); There are no restriction on it so other processors may see this STORE mixed with other STOREs in both areas limited by the spinlocks. Probably, it may be reordered with the above sig->group_exit_task = tsk; sig->notify_count = zap_other_threads(tsk); in some way. Set it under tasklist_lock locked to be sure nothing will be reordered. Signed-off-by: Kirill Tkhai Acked-by: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/exec.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/exec.c b/fs/exec.c index a5fef835ebc5..02bfd980a40c 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -926,10 +926,14 @@ static int de_thread(struct task_struct *tsk) if (!thread_group_leader(tsk)) { struct task_struct *leader = tsk->group_leader; - sig->notify_count = -1; /* for exit_notify() */ for (;;) { threadgroup_change_begin(tsk); write_lock_irq(&tasklist_lock); + /* + * Do this under tasklist_lock to ensure that + * exit_notify() can't miss ->group_exit_task + */ + sig->notify_count = -1; if (likely(leader->exit_state)) break; __set_current_state(TASK_KILLABLE); -- 2.20.1