From 9edff4ab1f8d82675277a04e359d0ed8bf14a7b7 Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Tue, 29 Apr 2008 01:00:57 -0700 Subject: [PATCH] ipc: sysvsem: implement sys_unshare(CLONE_SYSVSEM) sys_unshare(CLONE_NEWIPC) doesn't handle the undo lists properly, this can cause a kernel memory corruption. CLONE_NEWIPC must detach from the existing undo lists. Fix, part 1: add support for sys_unshare(CLONE_SYSVSEM) The original reason to not support it was the potential (inevitable?) confusion due to the fact that sys_unshare(CLONE_SYSVSEM) has the inverse meaning of clone(CLONE_SYSVSEM). Our two most reasonable options then appear to be (1) fully support CLONE_SYSVSEM, or (2) continue to refuse explicit CLONE_SYSVSEM, but always do it anyway on unshare(CLONE_SYSVSEM). This patch does (1). Changelog: Apr 16: SEH: switch to Manfred's alternative patch which removes the unshare_semundo() function which always refused CLONE_SYSVSEM. Signed-off-by: Manfred Spraul Signed-off-by: Serge E. Hallyn Acked-by: "Eric W. Biederman" Cc: Pavel Emelyanov Cc: Michael Kerrisk Cc: Pierre Peiffer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 1 + kernel/fork.c | 29 +++++++++++------------------ 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/ipc/sem.c b/ipc/sem.c index d56d3ab6bb8..e9418df5ff3 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -1250,6 +1250,7 @@ void exit_sem(struct task_struct *tsk) undo_list = tsk->sysvsem.undo_list; if (!undo_list) return; + tsk->sysvsem.undo_list = NULL; if (!atomic_dec_and_test(&undo_list->refcnt)) return; diff --git a/kernel/fork.c b/kernel/fork.c index 156db96ff75..01666979bea 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1668,18 +1668,6 @@ static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp return 0; } -/* - * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not - * supported yet - */ -static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp) -{ - if (unshare_flags & CLONE_SYSVSEM) - return -EINVAL; - - return 0; -} - /* * unshare allows a process to 'unshare' part of the process * context which was originally shared using clone. copy_* @@ -1695,8 +1683,8 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) struct sighand_struct *new_sigh = NULL; struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; struct files_struct *fd, *new_fd = NULL; - struct sem_undo_list *new_ulist = NULL; struct nsproxy *new_nsproxy = NULL; + int do_sysvsem = 0; check_unshare_flags(&unshare_flags); @@ -1708,6 +1696,8 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) CLONE_NEWNET)) goto bad_unshare_out; + if (unshare_flags & CLONE_SYSVSEM) + do_sysvsem = 1; if ((err = unshare_thread(unshare_flags))) goto bad_unshare_out; if ((err = unshare_fs(unshare_flags, &new_fs))) @@ -1718,13 +1708,17 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) goto bad_unshare_cleanup_sigh; if ((err = unshare_fd(unshare_flags, &new_fd))) goto bad_unshare_cleanup_vm; - if ((err = unshare_semundo(unshare_flags, &new_ulist))) - goto bad_unshare_cleanup_fd; if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_fs))) - goto bad_unshare_cleanup_semundo; + goto bad_unshare_cleanup_fd; - if (new_fs || new_mm || new_fd || new_ulist || new_nsproxy) { + if (new_fs || new_mm || new_fd || do_sysvsem || new_nsproxy) { + if (do_sysvsem) { + /* + * CLONE_SYSVSEM is equivalent to sys_exit(). + */ + exit_sem(current); + } if (new_nsproxy) { switch_task_namespaces(current, new_nsproxy); @@ -1760,7 +1754,6 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) if (new_nsproxy) put_nsproxy(new_nsproxy); -bad_unshare_cleanup_semundo: bad_unshare_cleanup_fd: if (new_fd) put_files_struct(new_fd); -- 2.20.1