- The "cgroup.clone_children" file is removed.
+- /proc/PID/cgroup keeps reporting the cgroup that a zombie belonged
+ to before exiting. If the cgroup is removed before the zombie is
+ reaped, " (deleted)" is appeneded to the path.
+
5-3. Controller File Conventions
int (*can_fork)(struct task_struct *task, void **priv_p);
void (*cancel_fork)(struct task_struct *task, void *priv);
void (*fork)(struct task_struct *task, void *priv);
- void (*exit)(struct cgroup_subsys_state *css,
- struct cgroup_subsys_state *old_css,
- struct task_struct *task);
+ void (*exit)(struct task_struct *task);
void (*bind)(struct cgroup_subsys_state *root_css);
int early_init;
extern void cgroup_post_fork(struct task_struct *p,
void *old_ss_priv[CGROUP_CANFORK_COUNT]);
void cgroup_exit(struct task_struct *p);
+void cgroup_free(struct task_struct *p);
int cgroup_init_early(void);
int cgroup_init(void);
static inline void cgroup_post_fork(struct task_struct *p,
void *ss_priv[CGROUP_CANFORK_COUNT]) {}
static inline void cgroup_exit(struct task_struct *p) {}
+static inline void cgroup_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
seq_printf(m, "%sname=%s", count ? "," : "",
root->name);
seq_putc(m, ':');
+
cgrp = task_cgroup_from_root(tsk, root);
- path = cgroup_path(cgrp, buf, PATH_MAX);
- if (!path) {
- retval = -ENAMETOOLONG;
- goto out_unlock;
+
+ /*
+ * On traditional hierarchies, all zombie tasks show up as
+ * belonging to the root cgroup. On the default hierarchy,
+ * while a zombie doesn't show up in "cgroup.procs" and
+ * thus can't be migrated, its /proc/PID/cgroup keeps
+ * reporting the cgroup it belonged to before exiting. If
+ * the cgroup is removed before the zombie is reaped,
+ * " (deleted)" is appended to the cgroup path.
+ */
+ if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
+ path = cgroup_path(cgrp, buf, PATH_MAX);
+ if (!path) {
+ retval = -ENAMETOOLONG;
+ goto out_unlock;
+ }
+ } else {
+ path = "/";
}
+
seq_puts(m, path);
- seq_putc(m, '\n');
+
+ if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
+ seq_puts(m, " (deleted)\n");
+ else
+ seq_putc(m, '\n');
}
retval = 0;
{
struct cgroup_subsys *ss;
struct css_set *cset;
- bool put_cset = false;
int i;
/*
spin_lock_bh(&css_set_lock);
css_set_move_task(tsk, cset, NULL, false);
spin_unlock_bh(&css_set_lock);
- put_cset = true;
+ } else {
+ get_css_set(cset);
}
- /* Reassign the task to the init_css_set. */
- RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
-
/* see cgroup_post_fork() for details */
- for_each_subsys_which(ss, i, &have_exit_callback) {
- struct cgroup_subsys_state *old_css = cset->subsys[i];
- struct cgroup_subsys_state *css = task_css(tsk, i);
+ for_each_subsys_which(ss, i, &have_exit_callback)
+ ss->exit(tsk);
+}
- ss->exit(css, old_css, tsk);
- }
+void cgroup_free(struct task_struct *task)
+{
+ struct css_set *cset = task_css_set(task);
- if (put_cset)
- put_css_set(cset);
+ put_css_set(cset);
}
static void check_for_release(struct cgroup *cgrp)
css_put(old_css);
}
-static void pids_exit(struct cgroup_subsys_state *css,
- struct cgroup_subsys_state *old_css,
- struct task_struct *task)
+static void pids_exit(struct task_struct *task)
{
- struct pids_cgroup *pids = css_pids(old_css);
+ struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
pids_uncharge(pids, 1);
}
task_function_call(task, __perf_cgroup_move, task);
}
-static void perf_cgroup_exit(struct cgroup_subsys_state *css,
- struct cgroup_subsys_state *old_css,
- struct task_struct *task)
-{
- /*
- * cgroup_exit() is called in the copy_process() failure path.
- * Ignore this case since the task hasn't ran yet, this avoids
- * trying to poke a half freed task state from generic code.
- */
- if (!(task->flags & PF_EXITING))
- return;
-
- task_function_call(task, __perf_cgroup_move, task);
-}
-
struct cgroup_subsys perf_event_cgrp_subsys = {
.css_alloc = perf_cgroup_css_alloc,
.css_free = perf_cgroup_css_free,
- .exit = perf_cgroup_exit,
.attach = perf_cgroup_attach,
};
#endif /* CONFIG_CGROUP_PERF */
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
+ cgroup_free(tsk);
task_numa_free(tsk);
security_task_free(tsk);
exit_creds(tsk);
sched_move_task(task);
}
-static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
- struct cgroup_subsys_state *old_css,
- struct task_struct *task)
-{
- /*
- * cgroup_exit() is called in the copy_process() failure path.
- * Ignore this case since the task hasn't ran yet, this avoids
- * trying to poke a half freed task state from generic code.
- */
- if (!(task->flags & PF_EXITING))
- return;
-
- sched_move_task(task);
-}
-
#ifdef CONFIG_FAIR_GROUP_SCHED
static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 shareval)
.fork = cpu_cgroup_fork,
.can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach,
- .exit = cpu_cgroup_exit,
.legacy_cftypes = cpu_files,
.early_init = 1,
};