Move mm->core_waiters into "struct core_state" allocated on stack. This
shrinks mm_struct a little bit and allows further changes.
This patch mostly does s/core_waiters/core_state. The only essential
change is that coredump_wait() must clear mm->core_state before return.
The coredump_wait()'s path is uglified and .text grows by 30 bytes, this
is fixed by the next patch.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
* Make sure that if there is a core dump in progress
* for the old mm, we get out and die instead of going
* through with the exec. We must hold mmap_sem around
* Make sure that if there is a core dump in progress
* for the old mm, we get out and die instead of going
* through with the exec. We must hold mmap_sem around
- * checking core_waiters and changing tsk->mm. The
- * core-inducing thread will increment core_waiters for
- * each thread whose ->mm == old_mm.
+ * checking core_state and changing tsk->mm.
*/
down_read(&old_mm->mmap_sem);
*/
down_read(&old_mm->mmap_sem);
- if (unlikely(old_mm->core_waiters)) {
+ if (unlikely(old_mm->core_state)) {
up_read(&old_mm->mmap_sem);
return -EINTR;
}
up_read(&old_mm->mmap_sem);
return -EINTR;
}
t = start;
do {
if (t != current && t->mm) {
t = start;
do {
if (t != current && t->mm) {
+ t->mm->core_state->nr_threads++;
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
}
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
}
- if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
+ if (atomic_read(&mm->mm_users) == mm->core_state->nr_threads + 1)
goto done;
/*
* We should find and kill all tasks which use this mm, and we should
goto done;
/*
* We should find and kill all tasks which use this mm, and we should
- * count them correctly into mm->core_waiters. We don't take tasklist
+ * count them correctly into ->nr_threads. We don't take tasklist
* lock, but this is safe wrt:
*
* fork:
* lock, but this is safe wrt:
*
* fork:
}
rcu_read_unlock();
done:
}
rcu_read_unlock();
done:
- return mm->core_waiters;
+ return mm->core_state->nr_threads;
}
static int coredump_wait(int exit_code)
}
static int coredump_wait(int exit_code)
init_completion(&mm->core_done);
init_completion(&core_state.startup);
init_completion(&mm->core_done);
init_completion(&core_state.startup);
+ core_state.nr_threads = 0;
mm->core_state = &core_state;
core_waiters = zap_threads(tsk, mm, exit_code);
mm->core_state = &core_state;
core_waiters = zap_threads(tsk, mm, exit_code);
+ if (core_waiters < 0)
+ mm->core_state = NULL;
up_write(&mm->mmap_sem);
if (unlikely(core_waiters < 0))
up_write(&mm->mmap_sem);
if (unlikely(core_waiters < 0))
if (core_waiters)
wait_for_completion(&core_state.startup);
if (core_waiters)
wait_for_completion(&core_state.startup);
- BUG_ON(mm->core_waiters);
/*
* If another thread got here first, or we are not dumpable, bail out.
*/
/*
* If another thread got here first, or we are not dumpable, bail out.
*/
- if (mm->core_waiters || !get_dumpable(mm)) {
+ if (mm->core_state || !get_dumpable(mm)) {
up_write(&mm->mmap_sem);
goto fail;
}
up_write(&mm->mmap_sem);
goto fail;
}
struct completion startup;
};
struct completion startup;
};
atomic_t mm_users; /* How many users with user space? */
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
int map_count; /* number of VMAs */
atomic_t mm_users; /* How many users with user space? */
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
int map_count; /* number of VMAs */
struct rw_semaphore mmap_sem;
spinlock_t page_table_lock; /* Protects page tables and some counters */
struct rw_semaphore mmap_sem;
spinlock_t page_table_lock; /* Protects page tables and some counters */
return;
/*
* Serialize with any possible pending coredump.
return;
/*
* Serialize with any possible pending coredump.
- * We must hold mmap_sem around checking core_waiters
+ * We must hold mmap_sem around checking core_state
* and clearing tsk->mm. The core-inducing thread
* and clearing tsk->mm. The core-inducing thread
- * will increment core_waiters for each thread in the
+ * will increment ->nr_threads for each thread in the
* group with ->mm != NULL.
*/
down_read(&mm->mmap_sem);
* group with ->mm != NULL.
*/
down_read(&mm->mmap_sem);
- if (mm->core_waiters) {
up_read(&mm->mmap_sem);
down_write(&mm->mmap_sem);
up_read(&mm->mmap_sem);
down_write(&mm->mmap_sem);
- if (!--mm->core_waiters)
+ if (!--mm->core_state->nr_threads)
complete(&mm->core_state->startup);
up_write(&mm->mmap_sem);
complete(&mm->core_state->startup);
up_write(&mm->mmap_sem);
INIT_LIST_HEAD(&mm->mmlist);
mm->flags = (current->mm) ? current->mm->flags
: MMF_DUMP_FILTER_DEFAULT;
INIT_LIST_HEAD(&mm->mmlist);
mm->flags = (current->mm) ? current->mm->flags
: MMF_DUMP_FILTER_DEFAULT;
mm->nr_ptes = 0;
set_mm_counter(mm, file_rss, 0);
set_mm_counter(mm, anon_rss, 0);
mm->nr_ptes = 0;
set_mm_counter(mm, file_rss, 0);
set_mm_counter(mm, anon_rss, 0);
* is a deadlock situation, and pointless because our tracer
* is dead so don't allow us to stop.
* If SIGKILL was already sent before the caller unlocked
* is a deadlock situation, and pointless because our tracer
* is dead so don't allow us to stop.
* If SIGKILL was already sent before the caller unlocked
- * ->siglock we must see ->core_waiters != 0. Otherwise it
+ * ->siglock we must see ->core_state != NULL. Otherwise it
* is safe to enter schedule().
*/
* is safe to enter schedule().
*/
- if (unlikely(current->mm->core_waiters) &&
+ if (unlikely(current->mm->core_state) &&
unlikely(current->mm == current->parent->mm))
return 0;
unlikely(current->mm == current->parent->mm))
return 0;