From 56cd697366b6d6f67acb6c58ac7f3b185d11ef07 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 6 Feb 2017 10:57:33 +0100 Subject: [PATCH] sched/headers: Move the task_lock()/unlock() APIs to The task_lock()/task_unlock() APIs are not realated to core scheduling, they are task lifetime APIs, i.e. they belong into . Move them. Acked-by: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched.h | 20 -------------------- include/linux/sched/task.h | 20 ++++++++++++++++++++ kernel/cgroup/cgroup-v1.c | 1 + kernel/cgroup/namespace.c | 2 +- 4 files changed, 22 insertions(+), 21 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 4ab105a2a8f9..d481c129a822 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1526,26 +1526,6 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, } #endif -/* - * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring - * subscriptions and synchronises with wait4(). Also used in procfs. Also - * pins the final release of task.io_context. Also protects ->cpuset and - * ->cgroup.subsys[]. And ->vfork_done. - * - * Nests both inside and outside of read_lock(&tasklist_lock). - * It must not be nested with write_lock_irq(&tasklist_lock), - * neither inside nor outside. - */ -static inline void task_lock(struct task_struct *p) -{ - spin_lock(&p->alloc_lock); -} - -static inline void task_unlock(struct task_struct *p) -{ - spin_unlock(&p->alloc_lock); -} - /* set thread flags in other task's structures * - see asm/thread_info.h for TIF_xxxx flags available */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 1be049a18d1b..2be9fde588a7 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -91,4 +91,24 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) } #endif +/* + * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring + * subscriptions and synchronises with wait4(). Also used in procfs. Also + * pins the final release of task.io_context. Also protects ->cpuset and + * ->cgroup.subsys[]. And ->vfork_done. + * + * Nests both inside and outside of read_lock(&tasklist_lock). + * It must not be nested with write_lock_irq(&tasklist_lock), + * neither inside nor outside. + */ +static inline void task_lock(struct task_struct *p) +{ + spin_lock(&p->alloc_lock); +} + +static inline void task_unlock(struct task_struct *p) +{ + spin_unlock(&p->alloc_lock); +} + #endif /* _LINUX_SCHED_TASK_H */ diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 08d2cb605101..abc585858685 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/cgroup/namespace.c b/kernel/cgroup/namespace.c index cff7ea62c38f..96d38dab6fb2 100644 --- a/kernel/cgroup/namespace.c +++ b/kernel/cgroup/namespace.c @@ -1,6 +1,6 @@ #include "cgroup-internal.h" -#include +#include #include #include #include -- 2.20.1