move d_rcu from overlapping d_child to overlapping d_alias
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / cgroup.c
1 /*
2 * Generic process-grouping system.
3 *
4 * Based originally on the cpuset system, extracted by Paul Menage
5 * Copyright (C) 2006 Google, Inc
6 *
7 * Notifications support
8 * Copyright (C) 2009 Nokia Corporation
9 * Author: Kirill A. Shutemov
10 *
11 * Copyright notices from the original cpuset code:
12 * --------------------------------------------------
13 * Copyright (C) 2003 BULL SA.
14 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
15 *
16 * Portions derived from Patrick Mochel's sysfs code.
17 * sysfs is Copyright (c) 2001-3 Patrick Mochel
18 *
19 * 2003-10-10 Written by Simon Derr.
20 * 2003-10-22 Updates by Stephen Hemminger.
21 * 2004 May-July Rework by Paul Jackson.
22 * ---------------------------------------------------
23 *
24 * This file is subject to the terms and conditions of the GNU General Public
25 * License. See the file COPYING in the main directory of the Linux
26 * distribution for more details.
27 */
28
29 #include <linux/cgroup.h>
30 #include <linux/cred.h>
31 #include <linux/ctype.h>
32 #include <linux/errno.h>
33 #include <linux/init_task.h>
34 #include <linux/kernel.h>
35 #include <linux/list.h>
36 #include <linux/mm.h>
37 #include <linux/mutex.h>
38 #include <linux/mount.h>
39 #include <linux/pagemap.h>
40 #include <linux/proc_fs.h>
41 #include <linux/rcupdate.h>
42 #include <linux/sched.h>
43 #include <linux/backing-dev.h>
44 #include <linux/seq_file.h>
45 #include <linux/slab.h>
46 #include <linux/magic.h>
47 #include <linux/spinlock.h>
48 #include <linux/string.h>
49 #include <linux/sort.h>
50 #include <linux/kmod.h>
51 #include <linux/module.h>
52 #include <linux/delayacct.h>
53 #include <linux/cgroupstats.h>
54 #include <linux/hashtable.h>
55 #include <linux/namei.h>
56 #include <linux/pid_namespace.h>
57 #include <linux/idr.h>
58 #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
59 #include <linux/eventfd.h>
60 #include <linux/poll.h>
61 #include <linux/flex_array.h> /* used in cgroup_attach_task */
62 #include <linux/kthread.h>
63
64 #include <linux/atomic.h>
65
66 /* css deactivation bias, makes css->refcnt negative to deny new trygets */
67 #define CSS_DEACT_BIAS INT_MIN
68
69 /*
70 * cgroup_mutex is the master lock. Any modification to cgroup or its
71 * hierarchy must be performed while holding it.
72 *
73 * cgroup_root_mutex nests inside cgroup_mutex and should be held to modify
74 * cgroupfs_root of any cgroup hierarchy - subsys list, flags,
75 * release_agent_path and so on. Modifying requires both cgroup_mutex and
76 * cgroup_root_mutex. Readers can acquire either of the two. This is to
77 * break the following locking order cycle.
78 *
79 * A. cgroup_mutex -> cred_guard_mutex -> s_type->i_mutex_key -> namespace_sem
80 * B. namespace_sem -> cgroup_mutex
81 *
82 * B happens only through cgroup_show_options() and using cgroup_root_mutex
83 * breaks it.
84 */
85 #ifdef CONFIG_PROVE_RCU
86 DEFINE_MUTEX(cgroup_mutex);
87 EXPORT_SYMBOL_GPL(cgroup_mutex); /* only for task_subsys_state_check() */
88 #else
89 static DEFINE_MUTEX(cgroup_mutex);
90 #endif
91
92 static DEFINE_MUTEX(cgroup_root_mutex);
93
94 /*
95 * cgroup destruction makes heavy use of work items and there can be a lot
96 * of concurrent destructions. Use a separate workqueue so that cgroup
97 * destruction work items don't end up filling up max_active of system_wq
98 * which may lead to deadlock.
99 */
100 static struct workqueue_struct *cgroup_destroy_wq;
101
102 /*
103 * Generate an array of cgroup subsystem pointers. At boot time, this is
104 * populated with the built in subsystems, and modular subsystems are
105 * registered after that. The mutable section of this array is protected by
106 * cgroup_mutex.
107 */
108 #define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys,
109 #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
110 static struct cgroup_subsys *subsys[CGROUP_SUBSYS_COUNT] = {
111 #include <linux/cgroup_subsys.h>
112 };
113
114 /*
115 * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
116 * subsystems that are otherwise unattached - it never has more than a
117 * single cgroup, and all tasks are part of that cgroup.
118 */
119 static struct cgroupfs_root rootnode;
120
121 /*
122 * cgroupfs file entry, pointed to from leaf dentry->d_fsdata.
123 */
124 struct cfent {
125 struct list_head node;
126 struct dentry *dentry;
127 struct cftype *type;
128
129 /* file xattrs */
130 struct simple_xattrs xattrs;
131 };
132
133 /*
134 * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
135 * cgroup_subsys->use_id != 0.
136 */
137 #define CSS_ID_MAX (65535)
138 struct css_id {
139 /*
140 * The css to which this ID points. This pointer is set to valid value
141 * after cgroup is populated. If cgroup is removed, this will be NULL.
142 * This pointer is expected to be RCU-safe because destroy()
143 * is called after synchronize_rcu(). But for safe use, css_tryget()
144 * should be used for avoiding race.
145 */
146 struct cgroup_subsys_state __rcu *css;
147 /*
148 * ID of this css.
149 */
150 unsigned short id;
151 /*
152 * Depth in hierarchy which this ID belongs to.
153 */
154 unsigned short depth;
155 /*
156 * ID is freed by RCU. (and lookup routine is RCU safe.)
157 */
158 struct rcu_head rcu_head;
159 /*
160 * Hierarchy of CSS ID belongs to.
161 */
162 unsigned short stack[0]; /* Array of Length (depth+1) */
163 };
164
165 /*
166 * cgroup_event represents events which userspace want to receive.
167 */
168 struct cgroup_event {
169 /*
170 * Cgroup which the event belongs to.
171 */
172 struct cgroup *cgrp;
173 /*
174 * Control file which the event associated.
175 */
176 struct cftype *cft;
177 /*
178 * eventfd to signal userspace about the event.
179 */
180 struct eventfd_ctx *eventfd;
181 /*
182 * Each of these stored in a list by the cgroup.
183 */
184 struct list_head list;
185 /*
186 * All fields below needed to unregister event when
187 * userspace closes eventfd.
188 */
189 poll_table pt;
190 wait_queue_head_t *wqh;
191 wait_queue_t wait;
192 struct work_struct remove;
193 };
194
195 /* The list of hierarchy roots */
196
197 static LIST_HEAD(roots);
198 static int root_count;
199
200 static DEFINE_IDA(hierarchy_ida);
201 static int next_hierarchy_id;
202 static DEFINE_SPINLOCK(hierarchy_id_lock);
203
204 /* dummytop is a shorthand for the dummy hierarchy's top cgroup */
205 #define dummytop (&rootnode.top_cgroup)
206
207 static struct cgroup_name root_cgroup_name = { .name = "/" };
208
209 /* This flag indicates whether tasks in the fork and exit paths should
210 * check for fork/exit handlers to call. This avoids us having to do
211 * extra work in the fork/exit path if none of the subsystems need to
212 * be called.
213 */
214 static int need_forkexit_callback __read_mostly;
215
216 static int cgroup_destroy_locked(struct cgroup *cgrp);
217 static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
218 struct cftype cfts[], bool is_add);
219
220 static int css_unbias_refcnt(int refcnt)
221 {
222 return refcnt >= 0 ? refcnt : refcnt - CSS_DEACT_BIAS;
223 }
224
225 /* the current nr of refs, always >= 0 whether @css is deactivated or not */
226 static int css_refcnt(struct cgroup_subsys_state *css)
227 {
228 int v = atomic_read(&css->refcnt);
229
230 return css_unbias_refcnt(v);
231 }
232
233 /* convenient tests for these bits */
234 inline int cgroup_is_removed(const struct cgroup *cgrp)
235 {
236 return test_bit(CGRP_REMOVED, &cgrp->flags);
237 }
238
239 /**
240 * cgroup_is_descendant - test ancestry
241 * @cgrp: the cgroup to be tested
242 * @ancestor: possible ancestor of @cgrp
243 *
244 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
245 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
246 * and @ancestor are accessible.
247 */
248 bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor)
249 {
250 while (cgrp) {
251 if (cgrp == ancestor)
252 return true;
253 cgrp = cgrp->parent;
254 }
255 return false;
256 }
257 EXPORT_SYMBOL_GPL(cgroup_is_descendant);
258
259 static int cgroup_is_releasable(const struct cgroup *cgrp)
260 {
261 const int bits =
262 (1 << CGRP_RELEASABLE) |
263 (1 << CGRP_NOTIFY_ON_RELEASE);
264 return (cgrp->flags & bits) == bits;
265 }
266
267 static int notify_on_release(const struct cgroup *cgrp)
268 {
269 return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
270 }
271
272 /*
273 * for_each_subsys() allows you to iterate on each subsystem attached to
274 * an active hierarchy
275 */
276 #define for_each_subsys(_root, _ss) \
277 list_for_each_entry(_ss, &_root->subsys_list, sibling)
278
279 /* for_each_active_root() allows you to iterate across the active hierarchies */
280 #define for_each_active_root(_root) \
281 list_for_each_entry(_root, &roots, root_list)
282
283 static inline struct cgroup *__d_cgrp(struct dentry *dentry)
284 {
285 return dentry->d_fsdata;
286 }
287
288 static inline struct cfent *__d_cfe(struct dentry *dentry)
289 {
290 return dentry->d_fsdata;
291 }
292
293 static inline struct cftype *__d_cft(struct dentry *dentry)
294 {
295 return __d_cfe(dentry)->type;
296 }
297
298 /**
299 * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
300 * @cgrp: the cgroup to be checked for liveness
301 *
302 * On success, returns true; the mutex should be later unlocked. On
303 * failure returns false with no lock held.
304 */
305 static bool cgroup_lock_live_group(struct cgroup *cgrp)
306 {
307 mutex_lock(&cgroup_mutex);
308 if (cgroup_is_removed(cgrp)) {
309 mutex_unlock(&cgroup_mutex);
310 return false;
311 }
312 return true;
313 }
314
315 /* the list of cgroups eligible for automatic release. Protected by
316 * release_list_lock */
317 static LIST_HEAD(release_list);
318 static DEFINE_RAW_SPINLOCK(release_list_lock);
319 static void cgroup_release_agent(struct work_struct *work);
320 static DECLARE_WORK(release_agent_work, cgroup_release_agent);
321 static void check_for_release(struct cgroup *cgrp);
322
323 /* Link structure for associating css_set objects with cgroups */
324 struct cg_cgroup_link {
325 /*
326 * List running through cg_cgroup_links associated with a
327 * cgroup, anchored on cgroup->css_sets
328 */
329 struct list_head cgrp_link_list;
330 struct cgroup *cgrp;
331 /*
332 * List running through cg_cgroup_links pointing at a
333 * single css_set object, anchored on css_set->cg_links
334 */
335 struct list_head cg_link_list;
336 struct css_set *cg;
337 };
338
339 /* The default css_set - used by init and its children prior to any
340 * hierarchies being mounted. It contains a pointer to the root state
341 * for each subsystem. Also used to anchor the list of css_sets. Not
342 * reference-counted, to improve performance when child cgroups
343 * haven't been created.
344 */
345
346 static struct css_set init_css_set;
347 static struct cg_cgroup_link init_css_set_link;
348
349 static int cgroup_init_idr(struct cgroup_subsys *ss,
350 struct cgroup_subsys_state *css);
351
352 /* css_set_lock protects the list of css_set objects, and the
353 * chain of tasks off each css_set. Nests outside task->alloc_lock
354 * due to cgroup_iter_start() */
355 static DEFINE_RWLOCK(css_set_lock);
356 static int css_set_count;
357
358 /*
359 * hash table for cgroup groups. This improves the performance to find
360 * an existing css_set. This hash doesn't (currently) take into
361 * account cgroups in empty hierarchies.
362 */
363 #define CSS_SET_HASH_BITS 7
364 static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
365
366 static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
367 {
368 int i;
369 unsigned long key = 0UL;
370
371 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
372 key += (unsigned long)css[i];
373 key = (key >> 16) ^ key;
374
375 return key;
376 }
377
378 /* We don't maintain the lists running through each css_set to its
379 * task until after the first call to cgroup_iter_start(). This
380 * reduces the fork()/exit() overhead for people who have cgroups
381 * compiled into their kernel but not actually in use */
382 static int use_task_css_set_links __read_mostly;
383
384 static void __put_css_set(struct css_set *cg, int taskexit)
385 {
386 struct cg_cgroup_link *link;
387 struct cg_cgroup_link *saved_link;
388 /*
389 * Ensure that the refcount doesn't hit zero while any readers
390 * can see it. Similar to atomic_dec_and_lock(), but for an
391 * rwlock
392 */
393 if (atomic_add_unless(&cg->refcount, -1, 1))
394 return;
395 write_lock(&css_set_lock);
396 if (!atomic_dec_and_test(&cg->refcount)) {
397 write_unlock(&css_set_lock);
398 return;
399 }
400
401 /* This css_set is dead. unlink it and release cgroup refcounts */
402 hash_del(&cg->hlist);
403 css_set_count--;
404
405 list_for_each_entry_safe(link, saved_link, &cg->cg_links,
406 cg_link_list) {
407 struct cgroup *cgrp = link->cgrp;
408 list_del(&link->cg_link_list);
409 list_del(&link->cgrp_link_list);
410
411 /*
412 * We may not be holding cgroup_mutex, and if cgrp->count is
413 * dropped to 0 the cgroup can be destroyed at any time, hence
414 * rcu_read_lock is used to keep it alive.
415 */
416 rcu_read_lock();
417 if (atomic_dec_and_test(&cgrp->count) &&
418 notify_on_release(cgrp)) {
419 if (taskexit)
420 set_bit(CGRP_RELEASABLE, &cgrp->flags);
421 check_for_release(cgrp);
422 }
423 rcu_read_unlock();
424
425 kfree(link);
426 }
427
428 write_unlock(&css_set_lock);
429 kfree_rcu(cg, rcu_head);
430 }
431
432 /*
433 * refcounted get/put for css_set objects
434 */
435 static inline void get_css_set(struct css_set *cg)
436 {
437 atomic_inc(&cg->refcount);
438 }
439
440 static inline void put_css_set(struct css_set *cg)
441 {
442 __put_css_set(cg, 0);
443 }
444
445 static inline void put_css_set_taskexit(struct css_set *cg)
446 {
447 __put_css_set(cg, 1);
448 }
449
450 /*
451 * compare_css_sets - helper function for find_existing_css_set().
452 * @cg: candidate css_set being tested
453 * @old_cg: existing css_set for a task
454 * @new_cgrp: cgroup that's being entered by the task
455 * @template: desired set of css pointers in css_set (pre-calculated)
456 *
457 * Returns true if "cg" matches "old_cg" except for the hierarchy
458 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
459 */
460 static bool compare_css_sets(struct css_set *cg,
461 struct css_set *old_cg,
462 struct cgroup *new_cgrp,
463 struct cgroup_subsys_state *template[])
464 {
465 struct list_head *l1, *l2;
466
467 if (memcmp(template, cg->subsys, sizeof(cg->subsys))) {
468 /* Not all subsystems matched */
469 return false;
470 }
471
472 /*
473 * Compare cgroup pointers in order to distinguish between
474 * different cgroups in heirarchies with no subsystems. We
475 * could get by with just this check alone (and skip the
476 * memcmp above) but on most setups the memcmp check will
477 * avoid the need for this more expensive check on almost all
478 * candidates.
479 */
480
481 l1 = &cg->cg_links;
482 l2 = &old_cg->cg_links;
483 while (1) {
484 struct cg_cgroup_link *cgl1, *cgl2;
485 struct cgroup *cg1, *cg2;
486
487 l1 = l1->next;
488 l2 = l2->next;
489 /* See if we reached the end - both lists are equal length. */
490 if (l1 == &cg->cg_links) {
491 BUG_ON(l2 != &old_cg->cg_links);
492 break;
493 } else {
494 BUG_ON(l2 == &old_cg->cg_links);
495 }
496 /* Locate the cgroups associated with these links. */
497 cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list);
498 cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list);
499 cg1 = cgl1->cgrp;
500 cg2 = cgl2->cgrp;
501 /* Hierarchies should be linked in the same order. */
502 BUG_ON(cg1->root != cg2->root);
503
504 /*
505 * If this hierarchy is the hierarchy of the cgroup
506 * that's changing, then we need to check that this
507 * css_set points to the new cgroup; if it's any other
508 * hierarchy, then this css_set should point to the
509 * same cgroup as the old css_set.
510 */
511 if (cg1->root == new_cgrp->root) {
512 if (cg1 != new_cgrp)
513 return false;
514 } else {
515 if (cg1 != cg2)
516 return false;
517 }
518 }
519 return true;
520 }
521
522 /*
523 * find_existing_css_set() is a helper for
524 * find_css_set(), and checks to see whether an existing
525 * css_set is suitable.
526 *
527 * oldcg: the cgroup group that we're using before the cgroup
528 * transition
529 *
530 * cgrp: the cgroup that we're moving into
531 *
532 * template: location in which to build the desired set of subsystem
533 * state objects for the new cgroup group
534 */
535 static struct css_set *find_existing_css_set(
536 struct css_set *oldcg,
537 struct cgroup *cgrp,
538 struct cgroup_subsys_state *template[])
539 {
540 int i;
541 struct cgroupfs_root *root = cgrp->root;
542 struct css_set *cg;
543 unsigned long key;
544
545 /*
546 * Build the set of subsystem state objects that we want to see in the
547 * new css_set. while subsystems can change globally, the entries here
548 * won't change, so no need for locking.
549 */
550 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
551 if (root->subsys_mask & (1UL << i)) {
552 /* Subsystem is in this hierarchy. So we want
553 * the subsystem state from the new
554 * cgroup */
555 template[i] = cgrp->subsys[i];
556 } else {
557 /* Subsystem is not in this hierarchy, so we
558 * don't want to change the subsystem state */
559 template[i] = oldcg->subsys[i];
560 }
561 }
562
563 key = css_set_hash(template);
564 hash_for_each_possible(css_set_table, cg, hlist, key) {
565 if (!compare_css_sets(cg, oldcg, cgrp, template))
566 continue;
567
568 /* This css_set matches what we need */
569 return cg;
570 }
571
572 /* No existing cgroup group matched */
573 return NULL;
574 }
575
576 static void free_cg_links(struct list_head *tmp)
577 {
578 struct cg_cgroup_link *link;
579 struct cg_cgroup_link *saved_link;
580
581 list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
582 list_del(&link->cgrp_link_list);
583 kfree(link);
584 }
585 }
586
587 /*
588 * allocate_cg_links() allocates "count" cg_cgroup_link structures
589 * and chains them on tmp through their cgrp_link_list fields. Returns 0 on
590 * success or a negative error
591 */
592 static int allocate_cg_links(int count, struct list_head *tmp)
593 {
594 struct cg_cgroup_link *link;
595 int i;
596 INIT_LIST_HEAD(tmp);
597 for (i = 0; i < count; i++) {
598 link = kmalloc(sizeof(*link), GFP_KERNEL);
599 if (!link) {
600 free_cg_links(tmp);
601 return -ENOMEM;
602 }
603 list_add(&link->cgrp_link_list, tmp);
604 }
605 return 0;
606 }
607
608 /**
609 * link_css_set - a helper function to link a css_set to a cgroup
610 * @tmp_cg_links: cg_cgroup_link objects allocated by allocate_cg_links()
611 * @cg: the css_set to be linked
612 * @cgrp: the destination cgroup
613 */
614 static void link_css_set(struct list_head *tmp_cg_links,
615 struct css_set *cg, struct cgroup *cgrp)
616 {
617 struct cg_cgroup_link *link;
618
619 BUG_ON(list_empty(tmp_cg_links));
620 link = list_first_entry(tmp_cg_links, struct cg_cgroup_link,
621 cgrp_link_list);
622 link->cg = cg;
623 link->cgrp = cgrp;
624 atomic_inc(&cgrp->count);
625 list_move(&link->cgrp_link_list, &cgrp->css_sets);
626 /*
627 * Always add links to the tail of the list so that the list
628 * is sorted by order of hierarchy creation
629 */
630 list_add_tail(&link->cg_link_list, &cg->cg_links);
631 }
632
633 /*
634 * find_css_set() takes an existing cgroup group and a
635 * cgroup object, and returns a css_set object that's
636 * equivalent to the old group, but with the given cgroup
637 * substituted into the appropriate hierarchy. Must be called with
638 * cgroup_mutex held
639 */
640 static struct css_set *find_css_set(
641 struct css_set *oldcg, struct cgroup *cgrp)
642 {
643 struct css_set *res;
644 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
645
646 struct list_head tmp_cg_links;
647
648 struct cg_cgroup_link *link;
649 unsigned long key;
650
651 /* First see if we already have a cgroup group that matches
652 * the desired set */
653 read_lock(&css_set_lock);
654 res = find_existing_css_set(oldcg, cgrp, template);
655 if (res)
656 get_css_set(res);
657 read_unlock(&css_set_lock);
658
659 if (res)
660 return res;
661
662 res = kmalloc(sizeof(*res), GFP_KERNEL);
663 if (!res)
664 return NULL;
665
666 /* Allocate all the cg_cgroup_link objects that we'll need */
667 if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
668 kfree(res);
669 return NULL;
670 }
671
672 atomic_set(&res->refcount, 1);
673 INIT_LIST_HEAD(&res->cg_links);
674 INIT_LIST_HEAD(&res->tasks);
675 INIT_HLIST_NODE(&res->hlist);
676
677 /* Copy the set of subsystem state objects generated in
678 * find_existing_css_set() */
679 memcpy(res->subsys, template, sizeof(res->subsys));
680
681 write_lock(&css_set_lock);
682 /* Add reference counts and links from the new css_set. */
683 list_for_each_entry(link, &oldcg->cg_links, cg_link_list) {
684 struct cgroup *c = link->cgrp;
685 if (c->root == cgrp->root)
686 c = cgrp;
687 link_css_set(&tmp_cg_links, res, c);
688 }
689
690 BUG_ON(!list_empty(&tmp_cg_links));
691
692 css_set_count++;
693
694 /* Add this cgroup group to the hash table */
695 key = css_set_hash(res->subsys);
696 hash_add(css_set_table, &res->hlist, key);
697
698 write_unlock(&css_set_lock);
699
700 return res;
701 }
702
703 /*
704 * Return the cgroup for "task" from the given hierarchy. Must be
705 * called with cgroup_mutex held.
706 */
707 static struct cgroup *task_cgroup_from_root(struct task_struct *task,
708 struct cgroupfs_root *root)
709 {
710 struct css_set *css;
711 struct cgroup *res = NULL;
712
713 BUG_ON(!mutex_is_locked(&cgroup_mutex));
714 read_lock(&css_set_lock);
715 /*
716 * No need to lock the task - since we hold cgroup_mutex the
717 * task can't change groups, so the only thing that can happen
718 * is that it exits and its css is set back to init_css_set.
719 */
720 css = task->cgroups;
721 if (css == &init_css_set) {
722 res = &root->top_cgroup;
723 } else {
724 struct cg_cgroup_link *link;
725 list_for_each_entry(link, &css->cg_links, cg_link_list) {
726 struct cgroup *c = link->cgrp;
727 if (c->root == root) {
728 res = c;
729 break;
730 }
731 }
732 }
733 read_unlock(&css_set_lock);
734 BUG_ON(!res);
735 return res;
736 }
737
738 /*
739 * There is one global cgroup mutex. We also require taking
740 * task_lock() when dereferencing a task's cgroup subsys pointers.
741 * See "The task_lock() exception", at the end of this comment.
742 *
743 * A task must hold cgroup_mutex to modify cgroups.
744 *
745 * Any task can increment and decrement the count field without lock.
746 * So in general, code holding cgroup_mutex can't rely on the count
747 * field not changing. However, if the count goes to zero, then only
748 * cgroup_attach_task() can increment it again. Because a count of zero
749 * means that no tasks are currently attached, therefore there is no
750 * way a task attached to that cgroup can fork (the other way to
751 * increment the count). So code holding cgroup_mutex can safely
752 * assume that if the count is zero, it will stay zero. Similarly, if
753 * a task holds cgroup_mutex on a cgroup with zero count, it
754 * knows that the cgroup won't be removed, as cgroup_rmdir()
755 * needs that mutex.
756 *
757 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
758 * (usually) take cgroup_mutex. These are the two most performance
759 * critical pieces of code here. The exception occurs on cgroup_exit(),
760 * when a task in a notify_on_release cgroup exits. Then cgroup_mutex
761 * is taken, and if the cgroup count is zero, a usermode call made
762 * to the release agent with the name of the cgroup (path relative to
763 * the root of cgroup file system) as the argument.
764 *
765 * A cgroup can only be deleted if both its 'count' of using tasks
766 * is zero, and its list of 'children' cgroups is empty. Since all
767 * tasks in the system use _some_ cgroup, and since there is always at
768 * least one task in the system (init, pid == 1), therefore, top_cgroup
769 * always has either children cgroups and/or using tasks. So we don't
770 * need a special hack to ensure that top_cgroup cannot be deleted.
771 *
772 * The task_lock() exception
773 *
774 * The need for this exception arises from the action of
775 * cgroup_attach_task(), which overwrites one task's cgroup pointer with
776 * another. It does so using cgroup_mutex, however there are
777 * several performance critical places that need to reference
778 * task->cgroup without the expense of grabbing a system global
779 * mutex. Therefore except as noted below, when dereferencing or, as
780 * in cgroup_attach_task(), modifying a task's cgroup pointer we use
781 * task_lock(), which acts on a spinlock (task->alloc_lock) already in
782 * the task_struct routinely used for such matters.
783 *
784 * P.S. One more locking exception. RCU is used to guard the
785 * update of a tasks cgroup pointer by cgroup_attach_task()
786 */
787
788 /*
789 * A couple of forward declarations required, due to cyclic reference loop:
790 * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
791 * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
792 * -> cgroup_mkdir.
793 */
794
795 static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
796 static struct dentry *cgroup_lookup(struct inode *, struct dentry *, unsigned int);
797 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
798 static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
799 unsigned long subsys_mask);
800 static const struct inode_operations cgroup_dir_inode_operations;
801 static const struct file_operations proc_cgroupstats_operations;
802
803 static struct backing_dev_info cgroup_backing_dev_info = {
804 .name = "cgroup",
805 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
806 };
807
808 static int alloc_css_id(struct cgroup_subsys *ss,
809 struct cgroup *parent, struct cgroup *child);
810
811 static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
812 {
813 struct inode *inode = new_inode(sb);
814
815 if (inode) {
816 inode->i_ino = get_next_ino();
817 inode->i_mode = mode;
818 inode->i_uid = current_fsuid();
819 inode->i_gid = current_fsgid();
820 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
821 inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
822 }
823 return inode;
824 }
825
826 static struct cgroup_name *cgroup_alloc_name(struct dentry *dentry)
827 {
828 struct cgroup_name *name;
829
830 name = kmalloc(sizeof(*name) + dentry->d_name.len + 1, GFP_KERNEL);
831 if (!name)
832 return NULL;
833 strcpy(name->name, dentry->d_name.name);
834 return name;
835 }
836
837 static void cgroup_free_fn(struct work_struct *work)
838 {
839 struct cgroup *cgrp = container_of(work, struct cgroup, free_work);
840 struct cgroup_subsys *ss;
841
842 mutex_lock(&cgroup_mutex);
843 /*
844 * Release the subsystem state objects.
845 */
846 for_each_subsys(cgrp->root, ss)
847 ss->css_free(cgrp);
848
849 cgrp->root->number_of_cgroups--;
850 mutex_unlock(&cgroup_mutex);
851
852 /*
853 * We get a ref to the parent's dentry, and put the ref when
854 * this cgroup is being freed, so it's guaranteed that the
855 * parent won't be destroyed before its children.
856 */
857 dput(cgrp->parent->dentry);
858
859 ida_simple_remove(&cgrp->root->cgroup_ida, cgrp->id);
860
861 /*
862 * Drop the active superblock reference that we took when we
863 * created the cgroup. This will free cgrp->root, if we are
864 * holding the last reference to @sb.
865 */
866 deactivate_super(cgrp->root->sb);
867
868 /*
869 * if we're getting rid of the cgroup, refcount should ensure
870 * that there are no pidlists left.
871 */
872 BUG_ON(!list_empty(&cgrp->pidlists));
873
874 simple_xattrs_free(&cgrp->xattrs);
875
876 kfree(rcu_dereference_raw(cgrp->name));
877 kfree(cgrp);
878 }
879
880 static void cgroup_free_rcu(struct rcu_head *head)
881 {
882 struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);
883
884 queue_work(cgroup_destroy_wq, &cgrp->free_work);
885 }
886
887 static void cgroup_diput(struct dentry *dentry, struct inode *inode)
888 {
889 /* is dentry a directory ? if so, kfree() associated cgroup */
890 if (S_ISDIR(inode->i_mode)) {
891 struct cgroup *cgrp = dentry->d_fsdata;
892
893 BUG_ON(!(cgroup_is_removed(cgrp)));
894 call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
895 } else {
896 struct cfent *cfe = __d_cfe(dentry);
897 struct cgroup *cgrp = dentry->d_parent->d_fsdata;
898
899 WARN_ONCE(!list_empty(&cfe->node) &&
900 cgrp != &cgrp->root->top_cgroup,
901 "cfe still linked for %s\n", cfe->type->name);
902 simple_xattrs_free(&cfe->xattrs);
903 kfree(cfe);
904 }
905 iput(inode);
906 }
907
908 static int cgroup_delete(const struct dentry *d)
909 {
910 return 1;
911 }
912
913 static void remove_dir(struct dentry *d)
914 {
915 struct dentry *parent = dget(d->d_parent);
916
917 d_delete(d);
918 simple_rmdir(parent->d_inode, d);
919 dput(parent);
920 }
921
922 static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
923 {
924 struct cfent *cfe;
925
926 lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
927 lockdep_assert_held(&cgroup_mutex);
928
929 /*
930 * If we're doing cleanup due to failure of cgroup_create(),
931 * the corresponding @cfe may not exist.
932 */
933 list_for_each_entry(cfe, &cgrp->files, node) {
934 struct dentry *d = cfe->dentry;
935
936 if (cft && cfe->type != cft)
937 continue;
938
939 dget(d);
940 d_delete(d);
941 simple_unlink(cgrp->dentry->d_inode, d);
942 list_del_init(&cfe->node);
943 dput(d);
944
945 break;
946 }
947 }
948
949 /**
950 * cgroup_clear_directory - selective removal of base and subsystem files
951 * @dir: directory containing the files
952 * @base_files: true if the base files should be removed
953 * @subsys_mask: mask of the subsystem ids whose files should be removed
954 */
955 static void cgroup_clear_directory(struct dentry *dir, bool base_files,
956 unsigned long subsys_mask)
957 {
958 struct cgroup *cgrp = __d_cgrp(dir);
959 struct cgroup_subsys *ss;
960
961 for_each_subsys(cgrp->root, ss) {
962 struct cftype_set *set;
963 if (!test_bit(ss->subsys_id, &subsys_mask))
964 continue;
965 list_for_each_entry(set, &ss->cftsets, node)
966 cgroup_addrm_files(cgrp, NULL, set->cfts, false);
967 }
968 if (base_files) {
969 while (!list_empty(&cgrp->files))
970 cgroup_rm_file(cgrp, NULL);
971 }
972 }
973
974 /*
975 * NOTE : the dentry must have been dget()'ed
976 */
977 static void cgroup_d_remove_dir(struct dentry *dentry)
978 {
979 struct dentry *parent;
980 struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
981
982 cgroup_clear_directory(dentry, true, root->subsys_mask);
983
984 parent = dentry->d_parent;
985 spin_lock(&parent->d_lock);
986 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
987 list_del_init(&dentry->d_child);
988 spin_unlock(&dentry->d_lock);
989 spin_unlock(&parent->d_lock);
990 remove_dir(dentry);
991 }
992
993 /*
994 * Call with cgroup_mutex held. Drops reference counts on modules, including
995 * any duplicate ones that parse_cgroupfs_options took. If this function
996 * returns an error, no reference counts are touched.
997 */
998 static int rebind_subsystems(struct cgroupfs_root *root,
999 unsigned long final_subsys_mask)
1000 {
1001 unsigned long added_mask, removed_mask;
1002 struct cgroup *cgrp = &root->top_cgroup;
1003 int i;
1004
1005 BUG_ON(!mutex_is_locked(&cgroup_mutex));
1006 BUG_ON(!mutex_is_locked(&cgroup_root_mutex));
1007
1008 removed_mask = root->actual_subsys_mask & ~final_subsys_mask;
1009 added_mask = final_subsys_mask & ~root->actual_subsys_mask;
1010 /* Check that any added subsystems are currently free */
1011 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
1012 unsigned long bit = 1UL << i;
1013 struct cgroup_subsys *ss = subsys[i];
1014 if (!(bit & added_mask))
1015 continue;
1016 /*
1017 * Nobody should tell us to do a subsys that doesn't exist:
1018 * parse_cgroupfs_options should catch that case and refcounts
1019 * ensure that subsystems won't disappear once selected.
1020 */
1021 BUG_ON(ss == NULL);
1022 if (ss->root != &rootnode) {
1023 /* Subsystem isn't free */
1024 return -EBUSY;
1025 }
1026 }
1027
1028 /* Currently we don't handle adding/removing subsystems when
1029 * any child cgroups exist. This is theoretically supportable
1030 * but involves complex error handling, so it's being left until
1031 * later */
1032 if (root->number_of_cgroups > 1)
1033 return -EBUSY;
1034
1035 /* Process each subsystem */
1036 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
1037 struct cgroup_subsys *ss = subsys[i];
1038 unsigned long bit = 1UL << i;
1039 if (bit & added_mask) {
1040 /* We're binding this subsystem to this hierarchy */
1041 BUG_ON(ss == NULL);
1042 BUG_ON(cgrp->subsys[i]);
1043 BUG_ON(!dummytop->subsys[i]);
1044 BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
1045 cgrp->subsys[i] = dummytop->subsys[i];
1046 cgrp->subsys[i]->cgroup = cgrp;
1047 list_move(&ss->sibling, &root->subsys_list);
1048 ss->root = root;
1049 if (ss->bind)
1050 ss->bind(cgrp);
1051 /* refcount was already taken, and we're keeping it */
1052 } else if (bit & removed_mask) {
1053 /* We're removing this subsystem */
1054 BUG_ON(ss == NULL);
1055 BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
1056 BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
1057 if (ss->bind)
1058 ss->bind(dummytop);
1059 dummytop->subsys[i]->cgroup = dummytop;
1060 cgrp->subsys[i] = NULL;
1061 subsys[i]->root = &rootnode;
1062 list_move(&ss->sibling, &rootnode.subsys_list);
1063 /* subsystem is now free - drop reference on module */
1064 module_put(ss->module);
1065 } else if (bit & final_subsys_mask) {
1066 /* Subsystem state should already exist */
1067 BUG_ON(ss == NULL);
1068 BUG_ON(!cgrp->subsys[i]);
1069 /*
1070 * a refcount was taken, but we already had one, so
1071 * drop the extra reference.
1072 */
1073 module_put(ss->module);
1074 #ifdef CONFIG_MODULE_UNLOAD
1075 BUG_ON(ss->module && !module_refcount(ss->module));
1076 #endif
1077 } else {
1078 /* Subsystem state shouldn't exist */
1079 BUG_ON(cgrp->subsys[i]);
1080 }
1081 }
1082 root->subsys_mask = root->actual_subsys_mask = final_subsys_mask;
1083
1084 return 0;
1085 }
1086
1087 static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
1088 {
1089 struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
1090 struct cgroup_subsys *ss;
1091
1092 mutex_lock(&cgroup_root_mutex);
1093 for_each_subsys(root, ss)
1094 seq_printf(seq, ",%s", ss->name);
1095 if (root->flags & CGRP_ROOT_SANE_BEHAVIOR)
1096 seq_puts(seq, ",sane_behavior");
1097 if (root->flags & CGRP_ROOT_NOPREFIX)
1098 seq_puts(seq, ",noprefix");
1099 if (root->flags & CGRP_ROOT_XATTR)
1100 seq_puts(seq, ",xattr");
1101 if (strlen(root->release_agent_path))
1102 seq_printf(seq, ",release_agent=%s", root->release_agent_path);
1103 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags))
1104 seq_puts(seq, ",clone_children");
1105 if (strlen(root->name))
1106 seq_printf(seq, ",name=%s", root->name);
1107 mutex_unlock(&cgroup_root_mutex);
1108 return 0;
1109 }
1110
1111 struct cgroup_sb_opts {
1112 unsigned long subsys_mask;
1113 unsigned long flags;
1114 char *release_agent;
1115 bool cpuset_clone_children;
1116 char *name;
1117 /* User explicitly requested empty subsystem */
1118 bool none;
1119
1120 struct cgroupfs_root *new_root;
1121
1122 };
1123
1124 /*
1125 * Convert a hierarchy specifier into a bitmask of subsystems and flags. Call
1126 * with cgroup_mutex held to protect the subsys[] array. This function takes
1127 * refcounts on subsystems to be used, unless it returns error, in which case
1128 * no refcounts are taken.
1129 */
1130 static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1131 {
1132 char *token, *o = data;
1133 bool all_ss = false, one_ss = false;
1134 unsigned long mask = (unsigned long)-1;
1135 int i;
1136 bool module_pin_failed = false;
1137
1138 BUG_ON(!mutex_is_locked(&cgroup_mutex));
1139
1140 #ifdef CONFIG_CPUSETS
1141 mask = ~(1UL << cpuset_subsys_id);
1142 #endif
1143
1144 memset(opts, 0, sizeof(*opts));
1145
1146 while ((token = strsep(&o, ",")) != NULL) {
1147 if (!*token)
1148 return -EINVAL;
1149 if (!strcmp(token, "none")) {
1150 /* Explicitly have no subsystems */
1151 opts->none = true;
1152 continue;
1153 }
1154 if (!strcmp(token, "all")) {
1155 /* Mutually exclusive option 'all' + subsystem name */
1156 if (one_ss)
1157 return -EINVAL;
1158 all_ss = true;
1159 continue;
1160 }
1161 if (!strcmp(token, "__DEVEL__sane_behavior")) {
1162 opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
1163 continue;
1164 }
1165 if (!strcmp(token, "noprefix")) {
1166 opts->flags |= CGRP_ROOT_NOPREFIX;
1167 continue;
1168 }
1169 if (!strcmp(token, "clone_children")) {
1170 opts->cpuset_clone_children = true;
1171 continue;
1172 }
1173 if (!strcmp(token, "xattr")) {
1174 opts->flags |= CGRP_ROOT_XATTR;
1175 continue;
1176 }
1177 if (!strncmp(token, "release_agent=", 14)) {
1178 /* Specifying two release agents is forbidden */
1179 if (opts->release_agent)
1180 return -EINVAL;
1181 opts->release_agent =
1182 kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1183 if (!opts->release_agent)
1184 return -ENOMEM;
1185 continue;
1186 }
1187 if (!strncmp(token, "name=", 5)) {
1188 const char *name = token + 5;
1189 /* Can't specify an empty name */
1190 if (!strlen(name))
1191 return -EINVAL;
1192 /* Must match [\w.-]+ */
1193 for (i = 0; i < strlen(name); i++) {
1194 char c = name[i];
1195 if (isalnum(c))
1196 continue;
1197 if ((c == '.') || (c == '-') || (c == '_'))
1198 continue;
1199 return -EINVAL;
1200 }
1201 /* Specifying two names is forbidden */
1202 if (opts->name)
1203 return -EINVAL;
1204 opts->name = kstrndup(name,
1205 MAX_CGROUP_ROOT_NAMELEN - 1,
1206 GFP_KERNEL);
1207 if (!opts->name)
1208 return -ENOMEM;
1209
1210 continue;
1211 }
1212
1213 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
1214 struct cgroup_subsys *ss = subsys[i];
1215 if (ss == NULL)
1216 continue;
1217 if (strcmp(token, ss->name))
1218 continue;
1219 if (ss->disabled)
1220 continue;
1221
1222 /* Mutually exclusive option 'all' + subsystem name */
1223 if (all_ss)
1224 return -EINVAL;
1225 set_bit(i, &opts->subsys_mask);
1226 one_ss = true;
1227
1228 break;
1229 }
1230 if (i == CGROUP_SUBSYS_COUNT)
1231 return -ENOENT;
1232 }
1233
1234 /*
1235 * If the 'all' option was specified select all the subsystems,
1236 * otherwise if 'none', 'name=' and a subsystem name options
1237 * were not specified, let's default to 'all'
1238 */
1239 if (all_ss || (!one_ss && !opts->none && !opts->name)) {
1240 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
1241 struct cgroup_subsys *ss = subsys[i];
1242 if (ss == NULL)
1243 continue;
1244 if (ss->disabled)
1245 continue;
1246 set_bit(i, &opts->subsys_mask);
1247 }
1248 }
1249
1250 /* Consistency checks */
1251
1252 if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
1253 pr_warning("cgroup: sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
1254
1255 if (opts->flags & CGRP_ROOT_NOPREFIX) {
1256 pr_err("cgroup: sane_behavior: noprefix is not allowed\n");
1257 return -EINVAL;
1258 }
1259
1260 if (opts->cpuset_clone_children) {
1261 pr_err("cgroup: sane_behavior: clone_children is not allowed\n");
1262 return -EINVAL;
1263 }
1264 }
1265
1266 /*
1267 * Option noprefix was introduced just for backward compatibility
1268 * with the old cpuset, so we allow noprefix only if mounting just
1269 * the cpuset subsystem.
1270 */
1271 if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1272 return -EINVAL;
1273
1274
1275 /* Can't specify "none" and some subsystems */
1276 if (opts->subsys_mask && opts->none)
1277 return -EINVAL;
1278
1279 /*
1280 * We either have to specify by name or by subsystems. (So all
1281 * empty hierarchies must have a name).
1282 */
1283 if (!opts->subsys_mask && !opts->name)
1284 return -EINVAL;
1285
1286 /*
1287 * Grab references on all the modules we'll need, so the subsystems
1288 * don't dance around before rebind_subsystems attaches them. This may
1289 * take duplicate reference counts on a subsystem that's already used,
1290 * but rebind_subsystems handles this case.
1291 */
1292 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
1293 unsigned long bit = 1UL << i;
1294
1295 if (!(bit & opts->subsys_mask))
1296 continue;
1297 if (!try_module_get(subsys[i]->module)) {
1298 module_pin_failed = true;
1299 break;
1300 }
1301 }
1302 if (module_pin_failed) {
1303 /*
1304 * oops, one of the modules was going away. this means that we
1305 * raced with a module_delete call, and to the user this is
1306 * essentially a "subsystem doesn't exist" case.
1307 */
1308 for (i--; i >= 0; i--) {
1309 /* drop refcounts only on the ones we took */
1310 unsigned long bit = 1UL << i;
1311
1312 if (!(bit & opts->subsys_mask))
1313 continue;
1314 module_put(subsys[i]->module);
1315 }
1316 return -ENOENT;
1317 }
1318
1319 return 0;
1320 }
1321
1322 static void drop_parsed_module_refcounts(unsigned long subsys_mask)
1323 {
1324 int i;
1325 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
1326 unsigned long bit = 1UL << i;
1327
1328 if (!(bit & subsys_mask))
1329 continue;
1330 module_put(subsys[i]->module);
1331 }
1332 }
1333
1334 static int cgroup_remount(struct super_block *sb, int *flags, char *data)
1335 {
1336 int ret = 0;
1337 struct cgroupfs_root *root = sb->s_fs_info;
1338 struct cgroup *cgrp = &root->top_cgroup;
1339 struct cgroup_sb_opts opts;
1340 unsigned long added_mask, removed_mask;
1341
1342 if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) {
1343 pr_err("cgroup: sane_behavior: remount is not allowed\n");
1344 return -EINVAL;
1345 }
1346
1347 mutex_lock(&cgrp->dentry->d_inode->i_mutex);
1348 mutex_lock(&cgroup_mutex);
1349 mutex_lock(&cgroup_root_mutex);
1350
1351 /* See what subsystems are wanted */
1352 ret = parse_cgroupfs_options(data, &opts);
1353 if (ret)
1354 goto out_unlock;
1355
1356 if (opts.subsys_mask != root->actual_subsys_mask || opts.release_agent)
1357 pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n",
1358 task_tgid_nr(current), current->comm);
1359
1360 added_mask = opts.subsys_mask & ~root->subsys_mask;
1361 removed_mask = root->subsys_mask & ~opts.subsys_mask;
1362
1363 /* Don't allow flags or name to change at remount */
1364 if (opts.flags != root->flags ||
1365 (opts.name && strcmp(opts.name, root->name))) {
1366 ret = -EINVAL;
1367 drop_parsed_module_refcounts(opts.subsys_mask);
1368 goto out_unlock;
1369 }
1370
1371 /*
1372 * Clear out the files of subsystems that should be removed, do
1373 * this before rebind_subsystems, since rebind_subsystems may
1374 * change this hierarchy's subsys_list.
1375 */
1376 cgroup_clear_directory(cgrp->dentry, false, removed_mask);
1377
1378 ret = rebind_subsystems(root, opts.subsys_mask);
1379 if (ret) {
1380 /* rebind_subsystems failed, re-populate the removed files */
1381 cgroup_populate_dir(cgrp, false, removed_mask);
1382 drop_parsed_module_refcounts(opts.subsys_mask);
1383 goto out_unlock;
1384 }
1385
1386 /* re-populate subsystem files */
1387 cgroup_populate_dir(cgrp, false, added_mask);
1388
1389 if (opts.release_agent)
1390 strcpy(root->release_agent_path, opts.release_agent);
1391 out_unlock:
1392 kfree(opts.release_agent);
1393 kfree(opts.name);
1394 mutex_unlock(&cgroup_root_mutex);
1395 mutex_unlock(&cgroup_mutex);
1396 mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
1397 return ret;
1398 }
1399
1400 static const struct super_operations cgroup_ops = {
1401 .statfs = simple_statfs,
1402 .drop_inode = generic_delete_inode,
1403 .show_options = cgroup_show_options,
1404 .remount_fs = cgroup_remount,
1405 };
1406
1407 static void init_cgroup_housekeeping(struct cgroup *cgrp)
1408 {
1409 INIT_LIST_HEAD(&cgrp->sibling);
1410 INIT_LIST_HEAD(&cgrp->children);
1411 INIT_LIST_HEAD(&cgrp->files);
1412 INIT_LIST_HEAD(&cgrp->css_sets);
1413 INIT_LIST_HEAD(&cgrp->allcg_node);
1414 INIT_LIST_HEAD(&cgrp->release_list);
1415 INIT_LIST_HEAD(&cgrp->pidlists);
1416 INIT_WORK(&cgrp->free_work, cgroup_free_fn);
1417 mutex_init(&cgrp->pidlist_mutex);
1418 INIT_LIST_HEAD(&cgrp->event_list);
1419 spin_lock_init(&cgrp->event_list_lock);
1420 simple_xattrs_init(&cgrp->xattrs);
1421 }
1422
1423 static void init_cgroup_root(struct cgroupfs_root *root)
1424 {
1425 struct cgroup *cgrp = &root->top_cgroup;
1426
1427 INIT_LIST_HEAD(&root->subsys_list);
1428 INIT_LIST_HEAD(&root->root_list);
1429 INIT_LIST_HEAD(&root->allcg_list);
1430 root->number_of_cgroups = 1;
1431 cgrp->root = root;
1432 cgrp->name = &root_cgroup_name;
1433 init_cgroup_housekeeping(cgrp);
1434 list_add_tail(&cgrp->allcg_node, &root->allcg_list);
1435 }
1436
1437 static bool init_root_id(struct cgroupfs_root *root)
1438 {
1439 int ret = 0;
1440
1441 do {
1442 if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL))
1443 return false;
1444 spin_lock(&hierarchy_id_lock);
1445 /* Try to allocate the next unused ID */
1446 ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id,
1447 &root->hierarchy_id);
1448 if (ret == -ENOSPC)
1449 /* Try again starting from 0 */
1450 ret = ida_get_new(&hierarchy_ida, &root->hierarchy_id);
1451 if (!ret) {
1452 next_hierarchy_id = root->hierarchy_id + 1;
1453 } else if (ret != -EAGAIN) {
1454 /* Can only get here if the 31-bit IDR is full ... */
1455 BUG_ON(ret);
1456 }
1457 spin_unlock(&hierarchy_id_lock);
1458 } while (ret);
1459 return true;
1460 }
1461
1462 static int cgroup_test_super(struct super_block *sb, void *data)
1463 {
1464 struct cgroup_sb_opts *opts = data;
1465 struct cgroupfs_root *root = sb->s_fs_info;
1466
1467 /* If we asked for a name then it must match */
1468 if (opts->name && strcmp(opts->name, root->name))
1469 return 0;
1470
1471 /*
1472 * If we asked for subsystems (or explicitly for no
1473 * subsystems) then they must match
1474 */
1475 if ((opts->subsys_mask || opts->none)
1476 && (opts->subsys_mask != root->subsys_mask))
1477 return 0;
1478
1479 return 1;
1480 }
1481
1482 static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
1483 {
1484 struct cgroupfs_root *root;
1485
1486 if (!opts->subsys_mask && !opts->none)
1487 return NULL;
1488
1489 root = kzalloc(sizeof(*root), GFP_KERNEL);
1490 if (!root)
1491 return ERR_PTR(-ENOMEM);
1492
1493 if (!init_root_id(root)) {
1494 kfree(root);
1495 return ERR_PTR(-ENOMEM);
1496 }
1497 init_cgroup_root(root);
1498
1499 root->subsys_mask = opts->subsys_mask;
1500 root->flags = opts->flags;
1501 ida_init(&root->cgroup_ida);
1502 if (opts->release_agent)
1503 strcpy(root->release_agent_path, opts->release_agent);
1504 if (opts->name)
1505 strcpy(root->name, opts->name);
1506 if (opts->cpuset_clone_children)
1507 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags);
1508 return root;
1509 }
1510
1511 static void cgroup_drop_root(struct cgroupfs_root *root)
1512 {
1513 if (!root)
1514 return;
1515
1516 BUG_ON(!root->hierarchy_id);
1517 spin_lock(&hierarchy_id_lock);
1518 ida_remove(&hierarchy_ida, root->hierarchy_id);
1519 spin_unlock(&hierarchy_id_lock);
1520 ida_destroy(&root->cgroup_ida);
1521 kfree(root);
1522 }
1523
1524 static int cgroup_set_super(struct super_block *sb, void *data)
1525 {
1526 int ret;
1527 struct cgroup_sb_opts *opts = data;
1528
1529 /* If we don't have a new root, we can't set up a new sb */
1530 if (!opts->new_root)
1531 return -EINVAL;
1532
1533 BUG_ON(!opts->subsys_mask && !opts->none);
1534
1535 ret = set_anon_super(sb, NULL);
1536 if (ret)
1537 return ret;
1538
1539 sb->s_fs_info = opts->new_root;
1540 opts->new_root->sb = sb;
1541
1542 sb->s_blocksize = PAGE_CACHE_SIZE;
1543 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1544 sb->s_magic = CGROUP_SUPER_MAGIC;
1545 sb->s_op = &cgroup_ops;
1546
1547 return 0;
1548 }
1549
1550 static int cgroup_get_rootdir(struct super_block *sb)
1551 {
1552 static const struct dentry_operations cgroup_dops = {
1553 .d_iput = cgroup_diput,
1554 .d_delete = cgroup_delete,
1555 };
1556
1557 struct inode *inode =
1558 cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
1559
1560 if (!inode)
1561 return -ENOMEM;
1562
1563 inode->i_fop = &simple_dir_operations;
1564 inode->i_op = &cgroup_dir_inode_operations;
1565 /* directories start off with i_nlink == 2 (for "." entry) */
1566 inc_nlink(inode);
1567 sb->s_root = d_make_root(inode);
1568 if (!sb->s_root)
1569 return -ENOMEM;
1570 /* for everything else we want ->d_op set */
1571 sb->s_d_op = &cgroup_dops;
1572 return 0;
1573 }
1574
1575 static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1576 int flags, const char *unused_dev_name,
1577 void *data)
1578 {
1579 struct cgroup_sb_opts opts;
1580 struct cgroupfs_root *root;
1581 int ret = 0;
1582 struct super_block *sb;
1583 struct cgroupfs_root *new_root;
1584 struct inode *inode;
1585
1586 /* First find the desired set of subsystems */
1587 mutex_lock(&cgroup_mutex);
1588 ret = parse_cgroupfs_options(data, &opts);
1589 mutex_unlock(&cgroup_mutex);
1590 if (ret)
1591 goto out_err;
1592
1593 /*
1594 * Allocate a new cgroup root. We may not need it if we're
1595 * reusing an existing hierarchy.
1596 */
1597 new_root = cgroup_root_from_opts(&opts);
1598 if (IS_ERR(new_root)) {
1599 ret = PTR_ERR(new_root);
1600 goto drop_modules;
1601 }
1602 opts.new_root = new_root;
1603
1604 /* Locate an existing or new sb for this hierarchy */
1605 sb = sget(fs_type, cgroup_test_super, cgroup_set_super, 0, &opts);
1606 if (IS_ERR(sb)) {
1607 ret = PTR_ERR(sb);
1608 cgroup_drop_root(opts.new_root);
1609 goto drop_modules;
1610 }
1611
1612 root = sb->s_fs_info;
1613 BUG_ON(!root);
1614 if (root == opts.new_root) {
1615 /* We used the new root structure, so this is a new hierarchy */
1616 struct list_head tmp_cg_links;
1617 struct cgroup *root_cgrp = &root->top_cgroup;
1618 struct cgroupfs_root *existing_root;
1619 const struct cred *cred;
1620 int i;
1621 struct css_set *cg;
1622
1623 BUG_ON(sb->s_root != NULL);
1624
1625 ret = cgroup_get_rootdir(sb);
1626 if (ret)
1627 goto drop_new_super;
1628 inode = sb->s_root->d_inode;
1629
1630 mutex_lock(&inode->i_mutex);
1631 mutex_lock(&cgroup_mutex);
1632 mutex_lock(&cgroup_root_mutex);
1633
1634 /* Check for name clashes with existing mounts */
1635 ret = -EBUSY;
1636 if (strlen(root->name))
1637 for_each_active_root(existing_root)
1638 if (!strcmp(existing_root->name, root->name))
1639 goto unlock_drop;
1640
1641 /*
1642 * We're accessing css_set_count without locking
1643 * css_set_lock here, but that's OK - it can only be
1644 * increased by someone holding cgroup_lock, and
1645 * that's us. The worst that can happen is that we
1646 * have some link structures left over
1647 */
1648 ret = allocate_cg_links(css_set_count, &tmp_cg_links);
1649 if (ret)
1650 goto unlock_drop;
1651
1652 ret = rebind_subsystems(root, root->subsys_mask);
1653 if (ret == -EBUSY) {
1654 free_cg_links(&tmp_cg_links);
1655 goto unlock_drop;
1656 }
1657 /*
1658 * There must be no failure case after here, since rebinding
1659 * takes care of subsystems' refcounts, which are explicitly
1660 * dropped in the failure exit path.
1661 */
1662
1663 /* EBUSY should be the only error here */
1664 BUG_ON(ret);
1665
1666 list_add(&root->root_list, &roots);
1667 root_count++;
1668
1669 sb->s_root->d_fsdata = root_cgrp;
1670 root->top_cgroup.dentry = sb->s_root;
1671
1672 /* Link the top cgroup in this hierarchy into all
1673 * the css_set objects */
1674 write_lock(&css_set_lock);
1675 hash_for_each(css_set_table, i, cg, hlist)
1676 link_css_set(&tmp_cg_links, cg, root_cgrp);
1677 write_unlock(&css_set_lock);
1678
1679 free_cg_links(&tmp_cg_links);
1680
1681 BUG_ON(!list_empty(&root_cgrp->children));
1682 BUG_ON(root->number_of_cgroups != 1);
1683
1684 cred = override_creds(&init_cred);
1685 cgroup_populate_dir(root_cgrp, true, root->subsys_mask);
1686 revert_creds(cred);
1687 mutex_unlock(&cgroup_root_mutex);
1688 mutex_unlock(&cgroup_mutex);
1689 mutex_unlock(&inode->i_mutex);
1690 } else {
1691 /*
1692 * We re-used an existing hierarchy - the new root (if
1693 * any) is not needed
1694 */
1695 cgroup_drop_root(opts.new_root);
1696
1697 if (root->flags != opts.flags) {
1698 if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
1699 pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
1700 ret = -EINVAL;
1701 goto drop_new_super;
1702 } else {
1703 pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
1704 }
1705 }
1706
1707 /* no subsys rebinding, so refcounts don't change */
1708 drop_parsed_module_refcounts(opts.subsys_mask);
1709 }
1710
1711 kfree(opts.release_agent);
1712 kfree(opts.name);
1713 return dget(sb->s_root);
1714
1715 unlock_drop:
1716 mutex_unlock(&cgroup_root_mutex);
1717 mutex_unlock(&cgroup_mutex);
1718 mutex_unlock(&inode->i_mutex);
1719 drop_new_super:
1720 deactivate_locked_super(sb);
1721 drop_modules:
1722 drop_parsed_module_refcounts(opts.subsys_mask);
1723 out_err:
1724 kfree(opts.release_agent);
1725 kfree(opts.name);
1726 return ERR_PTR(ret);
1727 }
1728
1729 static void cgroup_kill_sb(struct super_block *sb) {
1730 struct cgroupfs_root *root = sb->s_fs_info;
1731 struct cgroup *cgrp = &root->top_cgroup;
1732 int ret;
1733 struct cg_cgroup_link *link;
1734 struct cg_cgroup_link *saved_link;
1735
1736 BUG_ON(!root);
1737
1738 BUG_ON(root->number_of_cgroups != 1);
1739 BUG_ON(!list_empty(&cgrp->children));
1740
1741 mutex_lock(&cgroup_mutex);
1742 mutex_lock(&cgroup_root_mutex);
1743
1744 /* Rebind all subsystems back to the default hierarchy */
1745 ret = rebind_subsystems(root, 0);
1746 /* Shouldn't be able to fail ... */
1747 BUG_ON(ret);
1748
1749 /*
1750 * Release all the links from css_sets to this hierarchy's
1751 * root cgroup
1752 */
1753 write_lock(&css_set_lock);
1754
1755 list_for_each_entry_safe(link, saved_link, &cgrp->css_sets,
1756 cgrp_link_list) {
1757 list_del(&link->cg_link_list);
1758 list_del(&link->cgrp_link_list);
1759 kfree(link);
1760 }
1761 write_unlock(&css_set_lock);
1762
1763 if (!list_empty(&root->root_list)) {
1764 list_del(&root->root_list);
1765 root_count--;
1766 }
1767
1768 mutex_unlock(&cgroup_root_mutex);
1769 mutex_unlock(&cgroup_mutex);
1770
1771 simple_xattrs_free(&cgrp->xattrs);
1772
1773 kill_litter_super(sb);
1774 cgroup_drop_root(root);
1775 }
1776
1777 static struct file_system_type cgroup_fs_type = {
1778 .name = "cgroup",
1779 .mount = cgroup_mount,
1780 .kill_sb = cgroup_kill_sb,
1781 };
1782
1783 static struct kobject *cgroup_kobj;
1784
1785 /**
1786 * cgroup_path - generate the path of a cgroup
1787 * @cgrp: the cgroup in question
1788 * @buf: the buffer to write the path into
1789 * @buflen: the length of the buffer
1790 *
1791 * Writes path of cgroup into buf. Returns 0 on success, -errno on error.
1792 *
1793 * We can't generate cgroup path using dentry->d_name, as accessing
1794 * dentry->name must be protected by irq-unsafe dentry->d_lock or parent
1795 * inode's i_mutex, while on the other hand cgroup_path() can be called
1796 * with some irq-safe spinlocks held.
1797 */
1798 int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1799 {
1800 int ret = -ENAMETOOLONG;
1801 char *start;
1802
1803 if (!cgrp->parent) {
1804 if (strlcpy(buf, "/", buflen) >= buflen)
1805 return -ENAMETOOLONG;
1806 return 0;
1807 }
1808
1809 start = buf + buflen - 1;
1810 *start = '\0';
1811
1812 rcu_read_lock();
1813 do {
1814 const char *name = cgroup_name(cgrp);
1815 int len;
1816
1817 len = strlen(name);
1818 if ((start -= len) < buf)
1819 goto out;
1820 memcpy(start, name, len);
1821
1822 if (--start < buf)
1823 goto out;
1824 *start = '/';
1825
1826 cgrp = cgrp->parent;
1827 } while (cgrp->parent);
1828 ret = 0;
1829 memmove(buf, start, buf + buflen - start);
1830 out:
1831 rcu_read_unlock();
1832 return ret;
1833 }
1834 EXPORT_SYMBOL_GPL(cgroup_path);
1835
1836 /*
1837 * Control Group taskset
1838 */
1839 struct task_and_cgroup {
1840 struct task_struct *task;
1841 struct cgroup *cgrp;
1842 struct css_set *cg;
1843 };
1844
1845 struct cgroup_taskset {
1846 struct task_and_cgroup single;
1847 struct flex_array *tc_array;
1848 int tc_array_len;
1849 int idx;
1850 struct cgroup *cur_cgrp;
1851 };
1852
1853 /**
1854 * cgroup_taskset_first - reset taskset and return the first task
1855 * @tset: taskset of interest
1856 *
1857 * @tset iteration is initialized and the first task is returned.
1858 */
1859 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
1860 {
1861 if (tset->tc_array) {
1862 tset->idx = 0;
1863 return cgroup_taskset_next(tset);
1864 } else {
1865 tset->cur_cgrp = tset->single.cgrp;
1866 return tset->single.task;
1867 }
1868 }
1869 EXPORT_SYMBOL_GPL(cgroup_taskset_first);
1870
1871 /**
1872 * cgroup_taskset_next - iterate to the next task in taskset
1873 * @tset: taskset of interest
1874 *
1875 * Return the next task in @tset. Iteration must have been initialized
1876 * with cgroup_taskset_first().
1877 */
1878 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
1879 {
1880 struct task_and_cgroup *tc;
1881
1882 if (!tset->tc_array || tset->idx >= tset->tc_array_len)
1883 return NULL;
1884
1885 tc = flex_array_get(tset->tc_array, tset->idx++);
1886 tset->cur_cgrp = tc->cgrp;
1887 return tc->task;
1888 }
1889 EXPORT_SYMBOL_GPL(cgroup_taskset_next);
1890
1891 /**
1892 * cgroup_taskset_cur_cgroup - return the matching cgroup for the current task
1893 * @tset: taskset of interest
1894 *
1895 * Return the cgroup for the current (last returned) task of @tset. This
1896 * function must be preceded by either cgroup_taskset_first() or
1897 * cgroup_taskset_next().
1898 */
1899 struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset)
1900 {
1901 return tset->cur_cgrp;
1902 }
1903 EXPORT_SYMBOL_GPL(cgroup_taskset_cur_cgroup);
1904
1905 /**
1906 * cgroup_taskset_size - return the number of tasks in taskset
1907 * @tset: taskset of interest
1908 */
1909 int cgroup_taskset_size(struct cgroup_taskset *tset)
1910 {
1911 return tset->tc_array ? tset->tc_array_len : 1;
1912 }
1913 EXPORT_SYMBOL_GPL(cgroup_taskset_size);
1914
1915
1916 /*
1917 * cgroup_task_migrate - move a task from one cgroup to another.
1918 *
1919 * Must be called with cgroup_mutex and threadgroup locked.
1920 */
1921 static void cgroup_task_migrate(struct cgroup *oldcgrp,
1922 struct task_struct *tsk, struct css_set *newcg)
1923 {
1924 struct css_set *oldcg;
1925
1926 /*
1927 * We are synchronized through threadgroup_lock() against PF_EXITING
1928 * setting such that we can't race against cgroup_exit() changing the
1929 * css_set to init_css_set and dropping the old one.
1930 */
1931 WARN_ON_ONCE(tsk->flags & PF_EXITING);
1932 oldcg = tsk->cgroups;
1933
1934 task_lock(tsk);
1935 rcu_assign_pointer(tsk->cgroups, newcg);
1936 task_unlock(tsk);
1937
1938 /* Update the css_set linked lists if we're using them */
1939 write_lock(&css_set_lock);
1940 if (!list_empty(&tsk->cg_list))
1941 list_move(&tsk->cg_list, &newcg->tasks);
1942 write_unlock(&css_set_lock);
1943
1944 /*
1945 * We just gained a reference on oldcg by taking it from the task. As
1946 * trading it for newcg is protected by cgroup_mutex, we're safe to drop
1947 * it here; it will be freed under RCU.
1948 */
1949 set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
1950 put_css_set(oldcg);
1951 }
1952
1953 /**
1954 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
1955 * @cgrp: the cgroup to attach to
1956 * @tsk: the task or the leader of the threadgroup to be attached
1957 * @threadgroup: attach the whole threadgroup?
1958 *
1959 * Call holding cgroup_mutex and the group_rwsem of the leader. Will take
1960 * task_lock of @tsk or each thread in the threadgroup individually in turn.
1961 */
1962 static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
1963 bool threadgroup)
1964 {
1965 int retval, i, group_size;
1966 struct cgroup_subsys *ss, *failed_ss = NULL;
1967 struct cgroupfs_root *root = cgrp->root;
1968 /* threadgroup list cursor and array */
1969 struct task_struct *leader = tsk;
1970 struct task_and_cgroup *tc;
1971 struct flex_array *group;
1972 struct cgroup_taskset tset = { };
1973
1974 /*
1975 * step 0: in order to do expensive, possibly blocking operations for
1976 * every thread, we cannot iterate the thread group list, since it needs
1977 * rcu or tasklist locked. instead, build an array of all threads in the
1978 * group - group_rwsem prevents new threads from appearing, and if
1979 * threads exit, this will just be an over-estimate.
1980 */
1981 if (threadgroup)
1982 group_size = get_nr_threads(tsk);
1983 else
1984 group_size = 1;
1985 /* flex_array supports very large thread-groups better than kmalloc. */
1986 group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL);
1987 if (!group)
1988 return -ENOMEM;
1989 /* pre-allocate to guarantee space while iterating in rcu read-side. */
1990 retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL);
1991 if (retval)
1992 goto out_free_group_list;
1993
1994 i = 0;
1995 /*
1996 * Prevent freeing of tasks while we take a snapshot. Tasks that are
1997 * already PF_EXITING could be freed from underneath us unless we
1998 * take an rcu_read_lock.
1999 */
2000 rcu_read_lock();
2001 do {
2002 struct task_and_cgroup ent;
2003
2004 /* @tsk either already exited or can't exit until the end */
2005 if (tsk->flags & PF_EXITING)
2006 goto next;
2007
2008 /* as per above, nr_threads may decrease, but not increase. */
2009 BUG_ON(i >= group_size);
2010 ent.task = tsk;
2011 ent.cgrp = task_cgroup_from_root(tsk, root);
2012 /* nothing to do if this task is already in the cgroup */
2013 if (ent.cgrp == cgrp)
2014 goto next;
2015 /*
2016 * saying GFP_ATOMIC has no effect here because we did prealloc
2017 * earlier, but it's good form to communicate our expectations.
2018 */
2019 retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
2020 BUG_ON(retval != 0);
2021 i++;
2022 next:
2023 if (!threadgroup)
2024 break;
2025 } while_each_thread(leader, tsk);
2026 rcu_read_unlock();
2027 /* remember the number of threads in the array for later. */
2028 group_size = i;
2029 tset.tc_array = group;
2030 tset.tc_array_len = group_size;
2031
2032 /* methods shouldn't be called if no task is actually migrating */
2033 retval = 0;
2034 if (!group_size)
2035 goto out_free_group_list;
2036
2037 /*
2038 * step 1: check that we can legitimately attach to the cgroup.
2039 */
2040 for_each_subsys(root, ss) {
2041 if (ss->can_attach) {
2042 retval = ss->can_attach(cgrp, &tset);
2043 if (retval) {
2044 failed_ss = ss;
2045 goto out_cancel_attach;
2046 }
2047 }
2048 }
2049
2050 /*
2051 * step 2: make sure css_sets exist for all threads to be migrated.
2052 * we use find_css_set, which allocates a new one if necessary.
2053 */
2054 for (i = 0; i < group_size; i++) {
2055 tc = flex_array_get(group, i);
2056 tc->cg = find_css_set(tc->task->cgroups, cgrp);
2057 if (!tc->cg) {
2058 retval = -ENOMEM;
2059 goto out_put_css_set_refs;
2060 }
2061 }
2062
2063 /*
2064 * step 3: now that we're guaranteed success wrt the css_sets,
2065 * proceed to move all tasks to the new cgroup. There are no
2066 * failure cases after here, so this is the commit point.
2067 */
2068 for (i = 0; i < group_size; i++) {
2069 tc = flex_array_get(group, i);
2070 cgroup_task_migrate(tc->cgrp, tc->task, tc->cg);
2071 }
2072 /* nothing is sensitive to fork() after this point. */
2073
2074 /*
2075 * step 4: do subsystem attach callbacks.
2076 */
2077 for_each_subsys(root, ss) {
2078 if (ss->attach)
2079 ss->attach(cgrp, &tset);
2080 }
2081
2082 /*
2083 * step 5: success! and cleanup
2084 */
2085 retval = 0;
2086 out_put_css_set_refs:
2087 if (retval) {
2088 for (i = 0; i < group_size; i++) {
2089 tc = flex_array_get(group, i);
2090 if (!tc->cg)
2091 break;
2092 put_css_set(tc->cg);
2093 }
2094 }
2095 out_cancel_attach:
2096 if (retval) {
2097 for_each_subsys(root, ss) {
2098 if (ss == failed_ss)
2099 break;
2100 if (ss->cancel_attach)
2101 ss->cancel_attach(cgrp, &tset);
2102 }
2103 }
2104 out_free_group_list:
2105 flex_array_free(group);
2106 return retval;
2107 }
2108
2109 /*
2110 * Find the task_struct of the task to attach by vpid and pass it along to the
2111 * function to attach either it or all tasks in its threadgroup. Will lock
2112 * cgroup_mutex and threadgroup; may take task_lock of task.
2113 */
2114 static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
2115 {
2116 struct task_struct *tsk;
2117 const struct cred *cred = current_cred(), *tcred;
2118 int ret;
2119
2120 if (!cgroup_lock_live_group(cgrp))
2121 return -ENODEV;
2122
2123 retry_find_task:
2124 rcu_read_lock();
2125 if (pid) {
2126 tsk = find_task_by_vpid(pid);
2127 if (!tsk) {
2128 rcu_read_unlock();
2129 ret= -ESRCH;
2130 goto out_unlock_cgroup;
2131 }
2132 /*
2133 * even if we're attaching all tasks in the thread group, we
2134 * only need to check permissions on one of them.
2135 */
2136 tcred = __task_cred(tsk);
2137 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
2138 !uid_eq(cred->euid, tcred->uid) &&
2139 !uid_eq(cred->euid, tcred->suid)) {
2140 rcu_read_unlock();
2141 ret = -EACCES;
2142 goto out_unlock_cgroup;
2143 }
2144 } else
2145 tsk = current;
2146
2147 if (threadgroup)
2148 tsk = tsk->group_leader;
2149
2150 /*
2151 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
2152 * trapped in a cpuset, or RT worker may be born in a cgroup
2153 * with no rt_runtime allocated. Just say no.
2154 */
2155 if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2156 ret = -EINVAL;
2157 rcu_read_unlock();
2158 goto out_unlock_cgroup;
2159 }
2160
2161 get_task_struct(tsk);
2162 rcu_read_unlock();
2163
2164 threadgroup_lock(tsk);
2165 if (threadgroup) {
2166 if (!thread_group_leader(tsk)) {
2167 /*
2168 * a race with de_thread from another thread's exec()
2169 * may strip us of our leadership, if this happens,
2170 * there is no choice but to throw this task away and
2171 * try again; this is
2172 * "double-double-toil-and-trouble-check locking".
2173 */
2174 threadgroup_unlock(tsk);
2175 put_task_struct(tsk);
2176 goto retry_find_task;
2177 }
2178 }
2179
2180 ret = cgroup_attach_task(cgrp, tsk, threadgroup);
2181
2182 threadgroup_unlock(tsk);
2183
2184 put_task_struct(tsk);
2185 out_unlock_cgroup:
2186 mutex_unlock(&cgroup_mutex);
2187 return ret;
2188 }
2189
2190 /**
2191 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
2192 * @from: attach to all cgroups of a given task
2193 * @tsk: the task to be attached
2194 */
2195 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
2196 {
2197 struct cgroupfs_root *root;
2198 int retval = 0;
2199
2200 mutex_lock(&cgroup_mutex);
2201 for_each_active_root(root) {
2202 struct cgroup *from_cg = task_cgroup_from_root(from, root);
2203
2204 retval = cgroup_attach_task(from_cg, tsk, false);
2205 if (retval)
2206 break;
2207 }
2208 mutex_unlock(&cgroup_mutex);
2209
2210 return retval;
2211 }
2212 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
2213
2214 static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
2215 {
2216 return attach_task_by_pid(cgrp, pid, false);
2217 }
2218
2219 static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
2220 {
2221 return attach_task_by_pid(cgrp, tgid, true);
2222 }
2223
2224 static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
2225 const char *buffer)
2226 {
2227 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
2228 if (strlen(buffer) >= PATH_MAX)
2229 return -EINVAL;
2230 if (!cgroup_lock_live_group(cgrp))
2231 return -ENODEV;
2232 mutex_lock(&cgroup_root_mutex);
2233 strcpy(cgrp->root->release_agent_path, buffer);
2234 mutex_unlock(&cgroup_root_mutex);
2235 mutex_unlock(&cgroup_mutex);
2236 return 0;
2237 }
2238
2239 static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
2240 struct seq_file *seq)
2241 {
2242 if (!cgroup_lock_live_group(cgrp))
2243 return -ENODEV;
2244 seq_puts(seq, cgrp->root->release_agent_path);
2245 seq_putc(seq, '\n');
2246 mutex_unlock(&cgroup_mutex);
2247 return 0;
2248 }
2249
2250 static int cgroup_sane_behavior_show(struct cgroup *cgrp, struct cftype *cft,
2251 struct seq_file *seq)
2252 {
2253 seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp));
2254 return 0;
2255 }
2256
2257 /* A buffer size big enough for numbers or short strings */
2258 #define CGROUP_LOCAL_BUFFER_SIZE 64
2259
2260 static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
2261 struct file *file,
2262 const char __user *userbuf,
2263 size_t nbytes, loff_t *unused_ppos)
2264 {
2265 char buffer[CGROUP_LOCAL_BUFFER_SIZE];
2266 int retval = 0;
2267 char *end;
2268
2269 if (!nbytes)
2270 return -EINVAL;
2271 if (nbytes >= sizeof(buffer))
2272 return -E2BIG;
2273 if (copy_from_user(buffer, userbuf, nbytes))
2274 return -EFAULT;
2275
2276 buffer[nbytes] = 0; /* nul-terminate */
2277 if (cft->write_u64) {
2278 u64 val = simple_strtoull(strstrip(buffer), &end, 0);
2279 if (*end)
2280 return -EINVAL;
2281 retval = cft->write_u64(cgrp, cft, val);
2282 } else {
2283 s64 val = simple_strtoll(strstrip(buffer), &end, 0);
2284 if (*end)
2285 return -EINVAL;
2286 retval = cft->write_s64(cgrp, cft, val);
2287 }
2288 if (!retval)
2289 retval = nbytes;
2290 return retval;
2291 }
2292
2293 static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
2294 struct file *file,
2295 const char __user *userbuf,
2296 size_t nbytes, loff_t *unused_ppos)
2297 {
2298 char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
2299 int retval = 0;
2300 size_t max_bytes = cft->max_write_len;
2301 char *buffer = local_buffer;
2302
2303 if (!max_bytes)
2304 max_bytes = sizeof(local_buffer) - 1;
2305 if (nbytes >= max_bytes)
2306 return -E2BIG;
2307 /* Allocate a dynamic buffer if we need one */
2308 if (nbytes >= sizeof(local_buffer)) {
2309 buffer = kmalloc(nbytes + 1, GFP_KERNEL);
2310 if (buffer == NULL)
2311 return -ENOMEM;
2312 }
2313 if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
2314 retval = -EFAULT;
2315 goto out;
2316 }
2317
2318 buffer[nbytes] = 0; /* nul-terminate */
2319 retval = cft->write_string(cgrp, cft, strstrip(buffer));
2320 if (!retval)
2321 retval = nbytes;
2322 out:
2323 if (buffer != local_buffer)
2324 kfree(buffer);
2325 return retval;
2326 }
2327
2328 static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
2329 size_t nbytes, loff_t *ppos)
2330 {
2331 struct cftype *cft = __d_cft(file->f_dentry);
2332 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
2333
2334 if (cgroup_is_removed(cgrp))
2335 return -ENODEV;
2336 if (cft->write)
2337 return cft->write(cgrp, cft, file, buf, nbytes, ppos);
2338 if (cft->write_u64 || cft->write_s64)
2339 return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
2340 if (cft->write_string)
2341 return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
2342 if (cft->trigger) {
2343 int ret = cft->trigger(cgrp, (unsigned int)cft->private);
2344 return ret ? ret : nbytes;
2345 }
2346 return -EINVAL;
2347 }
2348
2349 static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
2350 struct file *file,
2351 char __user *buf, size_t nbytes,
2352 loff_t *ppos)
2353 {
2354 char tmp[CGROUP_LOCAL_BUFFER_SIZE];
2355 u64 val = cft->read_u64(cgrp, cft);
2356 int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
2357
2358 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
2359 }
2360
2361 static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
2362 struct file *file,
2363 char __user *buf, size_t nbytes,
2364 loff_t *ppos)
2365 {
2366 char tmp[CGROUP_LOCAL_BUFFER_SIZE];
2367 s64 val = cft->read_s64(cgrp, cft);
2368 int len = sprintf(tmp, "%lld\n", (long long) val);
2369
2370 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
2371 }
2372
2373 static ssize_t cgroup_file_read(struct file *file, char __user *buf,
2374 size_t nbytes, loff_t *ppos)
2375 {
2376 struct cftype *cft = __d_cft(file->f_dentry);
2377 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
2378
2379 if (cgroup_is_removed(cgrp))
2380 return -ENODEV;
2381
2382 if (cft->read)
2383 return cft->read(cgrp, cft, file, buf, nbytes, ppos);
2384 if (cft->read_u64)
2385 return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
2386 if (cft->read_s64)
2387 return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
2388 return -EINVAL;
2389 }
2390
2391 /*
2392 * seqfile ops/methods for returning structured data. Currently just
2393 * supports string->u64 maps, but can be extended in future.
2394 */
2395
2396 struct cgroup_seqfile_state {
2397 struct cftype *cft;
2398 struct cgroup *cgroup;
2399 };
2400
2401 static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
2402 {
2403 struct seq_file *sf = cb->state;
2404 return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value);
2405 }
2406
2407 static int cgroup_seqfile_show(struct seq_file *m, void *arg)
2408 {
2409 struct cgroup_seqfile_state *state = m->private;
2410 struct cftype *cft = state->cft;
2411 if (cft->read_map) {
2412 struct cgroup_map_cb cb = {
2413 .fill = cgroup_map_add,
2414 .state = m,
2415 };
2416 return cft->read_map(state->cgroup, cft, &cb);
2417 }
2418 return cft->read_seq_string(state->cgroup, cft, m);
2419 }
2420
2421 static int cgroup_seqfile_release(struct inode *inode, struct file *file)
2422 {
2423 struct seq_file *seq = file->private_data;
2424 kfree(seq->private);
2425 return single_release(inode, file);
2426 }
2427
2428 static const struct file_operations cgroup_seqfile_operations = {
2429 .read = seq_read,
2430 .write = cgroup_file_write,
2431 .llseek = seq_lseek,
2432 .release = cgroup_seqfile_release,
2433 };
2434
2435 static int cgroup_file_open(struct inode *inode, struct file *file)
2436 {
2437 int err;
2438 struct cftype *cft;
2439
2440 err = generic_file_open(inode, file);
2441 if (err)
2442 return err;
2443 cft = __d_cft(file->f_dentry);
2444
2445 if (cft->read_map || cft->read_seq_string) {
2446 struct cgroup_seqfile_state *state =
2447 kzalloc(sizeof(*state), GFP_USER);
2448 if (!state)
2449 return -ENOMEM;
2450 state->cft = cft;
2451 state->cgroup = __d_cgrp(file->f_dentry->d_parent);
2452 file->f_op = &cgroup_seqfile_operations;
2453 err = single_open(file, cgroup_seqfile_show, state);
2454 if (err < 0)
2455 kfree(state);
2456 } else if (cft->open)
2457 err = cft->open(inode, file);
2458 else
2459 err = 0;
2460
2461 return err;
2462 }
2463
2464 static int cgroup_file_release(struct inode *inode, struct file *file)
2465 {
2466 struct cftype *cft = __d_cft(file->f_dentry);
2467 if (cft->release)
2468 return cft->release(inode, file);
2469 return 0;
2470 }
2471
2472 /*
2473 * cgroup_rename - Only allow simple rename of directories in place.
2474 */
2475 static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
2476 struct inode *new_dir, struct dentry *new_dentry)
2477 {
2478 int ret;
2479 struct cgroup_name *name, *old_name;
2480 struct cgroup *cgrp;
2481
2482 /*
2483 * It's convinient to use parent dir's i_mutex to protected
2484 * cgrp->name.
2485 */
2486 lockdep_assert_held(&old_dir->i_mutex);
2487
2488 if (!S_ISDIR(old_dentry->d_inode->i_mode))
2489 return -ENOTDIR;
2490 if (new_dentry->d_inode)
2491 return -EEXIST;
2492 if (old_dir != new_dir)
2493 return -EIO;
2494
2495 cgrp = __d_cgrp(old_dentry);
2496
2497 name = cgroup_alloc_name(new_dentry);
2498 if (!name)
2499 return -ENOMEM;
2500
2501 ret = simple_rename(old_dir, old_dentry, new_dir, new_dentry);
2502 if (ret) {
2503 kfree(name);
2504 return ret;
2505 }
2506
2507 old_name = cgrp->name;
2508 rcu_assign_pointer(cgrp->name, name);
2509
2510 kfree_rcu(old_name, rcu_head);
2511 return 0;
2512 }
2513
2514 static struct simple_xattrs *__d_xattrs(struct dentry *dentry)
2515 {
2516 if (S_ISDIR(dentry->d_inode->i_mode))
2517 return &__d_cgrp(dentry)->xattrs;
2518 else
2519 return &__d_cfe(dentry)->xattrs;
2520 }
2521
2522 static inline int xattr_enabled(struct dentry *dentry)
2523 {
2524 struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
2525 return root->flags & CGRP_ROOT_XATTR;
2526 }
2527
2528 static bool is_valid_xattr(const char *name)
2529 {
2530 if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
2531 !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN))
2532 return true;
2533 return false;
2534 }
2535
2536 static int cgroup_setxattr(struct dentry *dentry, const char *name,
2537 const void *val, size_t size, int flags)
2538 {
2539 if (!xattr_enabled(dentry))
2540 return -EOPNOTSUPP;
2541 if (!is_valid_xattr(name))
2542 return -EINVAL;
2543 return simple_xattr_set(__d_xattrs(dentry), name, val, size, flags);
2544 }
2545
2546 static int cgroup_removexattr(struct dentry *dentry, const char *name)
2547 {
2548 if (!xattr_enabled(dentry))
2549 return -EOPNOTSUPP;
2550 if (!is_valid_xattr(name))
2551 return -EINVAL;
2552 return simple_xattr_remove(__d_xattrs(dentry), name);
2553 }
2554
2555 static ssize_t cgroup_getxattr(struct dentry *dentry, const char *name,
2556 void *buf, size_t size)
2557 {
2558 if (!xattr_enabled(dentry))
2559 return -EOPNOTSUPP;
2560 if (!is_valid_xattr(name))
2561 return -EINVAL;
2562 return simple_xattr_get(__d_xattrs(dentry), name, buf, size);
2563 }
2564
2565 static ssize_t cgroup_listxattr(struct dentry *dentry, char *buf, size_t size)
2566 {
2567 if (!xattr_enabled(dentry))
2568 return -EOPNOTSUPP;
2569 return simple_xattr_list(__d_xattrs(dentry), buf, size);
2570 }
2571
2572 static const struct file_operations cgroup_file_operations = {
2573 .read = cgroup_file_read,
2574 .write = cgroup_file_write,
2575 .llseek = generic_file_llseek,
2576 .open = cgroup_file_open,
2577 .release = cgroup_file_release,
2578 };
2579
2580 static const struct inode_operations cgroup_file_inode_operations = {
2581 .setxattr = cgroup_setxattr,
2582 .getxattr = cgroup_getxattr,
2583 .listxattr = cgroup_listxattr,
2584 .removexattr = cgroup_removexattr,
2585 };
2586
2587 static const struct inode_operations cgroup_dir_inode_operations = {
2588 .lookup = cgroup_lookup,
2589 .mkdir = cgroup_mkdir,
2590 .rmdir = cgroup_rmdir,
2591 .rename = cgroup_rename,
2592 .setxattr = cgroup_setxattr,
2593 .getxattr = cgroup_getxattr,
2594 .listxattr = cgroup_listxattr,
2595 .removexattr = cgroup_removexattr,
2596 };
2597
2598 static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
2599 {
2600 if (dentry->d_name.len > NAME_MAX)
2601 return ERR_PTR(-ENAMETOOLONG);
2602 d_add(dentry, NULL);
2603 return NULL;
2604 }
2605
2606 /*
2607 * Check if a file is a control file
2608 */
2609 static inline struct cftype *__file_cft(struct file *file)
2610 {
2611 if (file_inode(file)->i_fop != &cgroup_file_operations)
2612 return ERR_PTR(-EINVAL);
2613 return __d_cft(file->f_dentry);
2614 }
2615
2616 static int cgroup_create_file(struct dentry *dentry, umode_t mode,
2617 struct super_block *sb)
2618 {
2619 struct inode *inode;
2620
2621 if (!dentry)
2622 return -ENOENT;
2623 if (dentry->d_inode)
2624 return -EEXIST;
2625
2626 inode = cgroup_new_inode(mode, sb);
2627 if (!inode)
2628 return -ENOMEM;
2629
2630 if (S_ISDIR(mode)) {
2631 inode->i_op = &cgroup_dir_inode_operations;
2632 inode->i_fop = &simple_dir_operations;
2633
2634 /* start off with i_nlink == 2 (for "." entry) */
2635 inc_nlink(inode);
2636 inc_nlink(dentry->d_parent->d_inode);
2637
2638 /*
2639 * Control reaches here with cgroup_mutex held.
2640 * @inode->i_mutex should nest outside cgroup_mutex but we
2641 * want to populate it immediately without releasing
2642 * cgroup_mutex. As @inode isn't visible to anyone else
2643 * yet, trylock will always succeed without affecting
2644 * lockdep checks.
2645 */
2646 WARN_ON_ONCE(!mutex_trylock(&inode->i_mutex));
2647 } else if (S_ISREG(mode)) {
2648 inode->i_size = 0;
2649 inode->i_fop = &cgroup_file_operations;
2650 inode->i_op = &cgroup_file_inode_operations;
2651 }
2652 d_instantiate(dentry, inode);
2653 dget(dentry); /* Extra count - pin the dentry in core */
2654 return 0;
2655 }
2656
2657 /**
2658 * cgroup_file_mode - deduce file mode of a control file
2659 * @cft: the control file in question
2660 *
2661 * returns cft->mode if ->mode is not 0
2662 * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
2663 * returns S_IRUGO if it has only a read handler
2664 * returns S_IWUSR if it has only a write hander
2665 */
2666 static umode_t cgroup_file_mode(const struct cftype *cft)
2667 {
2668 umode_t mode = 0;
2669
2670 if (cft->mode)
2671 return cft->mode;
2672
2673 if (cft->read || cft->read_u64 || cft->read_s64 ||
2674 cft->read_map || cft->read_seq_string)
2675 mode |= S_IRUGO;
2676
2677 if (cft->write || cft->write_u64 || cft->write_s64 ||
2678 cft->write_string || cft->trigger)
2679 mode |= S_IWUSR;
2680
2681 return mode;
2682 }
2683
2684 static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
2685 struct cftype *cft)
2686 {
2687 struct dentry *dir = cgrp->dentry;
2688 struct cgroup *parent = __d_cgrp(dir);
2689 struct dentry *dentry;
2690 struct cfent *cfe;
2691 int error;
2692 umode_t mode;
2693 char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
2694
2695 if (subsys && !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
2696 strcpy(name, subsys->name);
2697 strcat(name, ".");
2698 }
2699 strcat(name, cft->name);
2700
2701 BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
2702
2703 cfe = kzalloc(sizeof(*cfe), GFP_KERNEL);
2704 if (!cfe)
2705 return -ENOMEM;
2706
2707 dentry = lookup_one_len(name, dir, strlen(name));
2708 if (IS_ERR(dentry)) {
2709 error = PTR_ERR(dentry);
2710 goto out;
2711 }
2712
2713 cfe->type = (void *)cft;
2714 cfe->dentry = dentry;
2715 dentry->d_fsdata = cfe;
2716 simple_xattrs_init(&cfe->xattrs);
2717
2718 mode = cgroup_file_mode(cft);
2719 error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb);
2720 if (!error) {
2721 list_add_tail(&cfe->node, &parent->files);
2722 cfe = NULL;
2723 }
2724 dput(dentry);
2725 out:
2726 kfree(cfe);
2727 return error;
2728 }
2729
2730 static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
2731 struct cftype cfts[], bool is_add)
2732 {
2733 struct cftype *cft;
2734 int err, ret = 0;
2735
2736 for (cft = cfts; cft->name[0] != '\0'; cft++) {
2737 /* does cft->flags tell us to skip this file on @cgrp? */
2738 if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp))
2739 continue;
2740 if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent)
2741 continue;
2742 if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent)
2743 continue;
2744
2745 if (is_add) {
2746 err = cgroup_add_file(cgrp, subsys, cft);
2747 if (err)
2748 pr_warn("cgroup_addrm_files: failed to add %s, err=%d\n",
2749 cft->name, err);
2750 ret = err;
2751 } else {
2752 cgroup_rm_file(cgrp, cft);
2753 }
2754 }
2755 return ret;
2756 }
2757
2758 static DEFINE_MUTEX(cgroup_cft_mutex);
2759
2760 static void cgroup_cfts_prepare(void)
2761 __acquires(&cgroup_cft_mutex) __acquires(&cgroup_mutex)
2762 {
2763 /*
2764 * Thanks to the entanglement with vfs inode locking, we can't walk
2765 * the existing cgroups under cgroup_mutex and create files.
2766 * Instead, we increment reference on all cgroups and build list of
2767 * them using @cgrp->cft_q_node. Grab cgroup_cft_mutex to ensure
2768 * exclusive access to the field.
2769 */
2770 mutex_lock(&cgroup_cft_mutex);
2771 mutex_lock(&cgroup_mutex);
2772 }
2773
2774 static void cgroup_cfts_commit(struct cgroup_subsys *ss,
2775 struct cftype *cfts, bool is_add)
2776 __releases(&cgroup_mutex) __releases(&cgroup_cft_mutex)
2777 {
2778 LIST_HEAD(pending);
2779 struct cgroup *cgrp, *n;
2780 struct super_block *sb = ss->root->sb;
2781
2782 /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
2783 if (cfts && ss->root != &rootnode &&
2784 atomic_inc_not_zero(&sb->s_active)) {
2785 list_for_each_entry(cgrp, &ss->root->allcg_list, allcg_node) {
2786 dget(cgrp->dentry);
2787 list_add_tail(&cgrp->cft_q_node, &pending);
2788 }
2789 } else {
2790 sb = NULL;
2791 }
2792
2793 mutex_unlock(&cgroup_mutex);
2794
2795 /*
2796 * All new cgroups will see @cfts update on @ss->cftsets. Add/rm
2797 * files for all cgroups which were created before.
2798 */
2799 list_for_each_entry_safe(cgrp, n, &pending, cft_q_node) {
2800 struct inode *inode = cgrp->dentry->d_inode;
2801
2802 mutex_lock(&inode->i_mutex);
2803 mutex_lock(&cgroup_mutex);
2804 if (!cgroup_is_removed(cgrp))
2805 cgroup_addrm_files(cgrp, ss, cfts, is_add);
2806 mutex_unlock(&cgroup_mutex);
2807 mutex_unlock(&inode->i_mutex);
2808
2809 list_del_init(&cgrp->cft_q_node);
2810 dput(cgrp->dentry);
2811 }
2812
2813 if (sb)
2814 deactivate_super(sb);
2815
2816 mutex_unlock(&cgroup_cft_mutex);
2817 }
2818
2819 /**
2820 * cgroup_add_cftypes - add an array of cftypes to a subsystem
2821 * @ss: target cgroup subsystem
2822 * @cfts: zero-length name terminated array of cftypes
2823 *
2824 * Register @cfts to @ss. Files described by @cfts are created for all
2825 * existing cgroups to which @ss is attached and all future cgroups will
2826 * have them too. This function can be called anytime whether @ss is
2827 * attached or not.
2828 *
2829 * Returns 0 on successful registration, -errno on failure. Note that this
2830 * function currently returns 0 as long as @cfts registration is successful
2831 * even if some file creation attempts on existing cgroups fail.
2832 */
2833 int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
2834 {
2835 struct cftype_set *set;
2836
2837 set = kzalloc(sizeof(*set), GFP_KERNEL);
2838 if (!set)
2839 return -ENOMEM;
2840
2841 cgroup_cfts_prepare();
2842 set->cfts = cfts;
2843 list_add_tail(&set->node, &ss->cftsets);
2844 cgroup_cfts_commit(ss, cfts, true);
2845
2846 return 0;
2847 }
2848 EXPORT_SYMBOL_GPL(cgroup_add_cftypes);
2849
2850 /**
2851 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
2852 * @ss: target cgroup subsystem
2853 * @cfts: zero-length name terminated array of cftypes
2854 *
2855 * Unregister @cfts from @ss. Files described by @cfts are removed from
2856 * all existing cgroups to which @ss is attached and all future cgroups
2857 * won't have them either. This function can be called anytime whether @ss
2858 * is attached or not.
2859 *
2860 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
2861 * registered with @ss.
2862 */
2863 int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
2864 {
2865 struct cftype_set *set;
2866
2867 cgroup_cfts_prepare();
2868
2869 list_for_each_entry(set, &ss->cftsets, node) {
2870 if (set->cfts == cfts) {
2871 list_del_init(&set->node);
2872 cgroup_cfts_commit(ss, cfts, false);
2873 return 0;
2874 }
2875 }
2876
2877 cgroup_cfts_commit(ss, NULL, false);
2878 return -ENOENT;
2879 }
2880
2881 /**
2882 * cgroup_task_count - count the number of tasks in a cgroup.
2883 * @cgrp: the cgroup in question
2884 *
2885 * Return the number of tasks in the cgroup.
2886 */
2887 int cgroup_task_count(const struct cgroup *cgrp)
2888 {
2889 int count = 0;
2890 struct cg_cgroup_link *link;
2891
2892 read_lock(&css_set_lock);
2893 list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
2894 count += atomic_read(&link->cg->refcount);
2895 }
2896 read_unlock(&css_set_lock);
2897 return count;
2898 }
2899
2900 /*
2901 * Advance a list_head iterator. The iterator should be positioned at
2902 * the start of a css_set
2903 */
2904 static void cgroup_advance_iter(struct cgroup *cgrp,
2905 struct cgroup_iter *it)
2906 {
2907 struct list_head *l = it->cg_link;
2908 struct cg_cgroup_link *link;
2909 struct css_set *cg;
2910
2911 /* Advance to the next non-empty css_set */
2912 do {
2913 l = l->next;
2914 if (l == &cgrp->css_sets) {
2915 it->cg_link = NULL;
2916 return;
2917 }
2918 link = list_entry(l, struct cg_cgroup_link, cgrp_link_list);
2919 cg = link->cg;
2920 } while (list_empty(&cg->tasks));
2921 it->cg_link = l;
2922 it->task = cg->tasks.next;
2923 }
2924
2925 /*
2926 * To reduce the fork() overhead for systems that are not actually
2927 * using their cgroups capability, we don't maintain the lists running
2928 * through each css_set to its tasks until we see the list actually
2929 * used - in other words after the first call to cgroup_iter_start().
2930 */
2931 static void cgroup_enable_task_cg_lists(void)
2932 {
2933 struct task_struct *p, *g;
2934 write_lock(&css_set_lock);
2935 use_task_css_set_links = 1;
2936 /*
2937 * We need tasklist_lock because RCU is not safe against
2938 * while_each_thread(). Besides, a forking task that has passed
2939 * cgroup_post_fork() without seeing use_task_css_set_links = 1
2940 * is not guaranteed to have its child immediately visible in the
2941 * tasklist if we walk through it with RCU.
2942 */
2943 read_lock(&tasklist_lock);
2944 do_each_thread(g, p) {
2945 task_lock(p);
2946 /*
2947 * We should check if the process is exiting, otherwise
2948 * it will race with cgroup_exit() in that the list
2949 * entry won't be deleted though the process has exited.
2950 */
2951 if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
2952 list_add(&p->cg_list, &p->cgroups->tasks);
2953 task_unlock(p);
2954 } while_each_thread(g, p);
2955 read_unlock(&tasklist_lock);
2956 write_unlock(&css_set_lock);
2957 }
2958
2959 /**
2960 * cgroup_next_descendant_pre - find the next descendant for pre-order walk
2961 * @pos: the current position (%NULL to initiate traversal)
2962 * @cgroup: cgroup whose descendants to walk
2963 *
2964 * To be used by cgroup_for_each_descendant_pre(). Find the next
2965 * descendant to visit for pre-order traversal of @cgroup's descendants.
2966 */
2967 struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
2968 struct cgroup *cgroup)
2969 {
2970 struct cgroup *next;
2971
2972 WARN_ON_ONCE(!rcu_read_lock_held());
2973
2974 /* if first iteration, pretend we just visited @cgroup */
2975 if (!pos)
2976 pos = cgroup;
2977
2978 /* visit the first child if exists */
2979 next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling);
2980 if (next)
2981 return next;
2982
2983 /* no child, visit my or the closest ancestor's next sibling */
2984 while (pos != cgroup) {
2985 next = list_entry_rcu(pos->sibling.next, struct cgroup,
2986 sibling);
2987 if (&next->sibling != &pos->parent->children)
2988 return next;
2989
2990 pos = pos->parent;
2991 }
2992
2993 return NULL;
2994 }
2995 EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre);
2996
2997 /**
2998 * cgroup_rightmost_descendant - return the rightmost descendant of a cgroup
2999 * @pos: cgroup of interest
3000 *
3001 * Return the rightmost descendant of @pos. If there's no descendant,
3002 * @pos is returned. This can be used during pre-order traversal to skip
3003 * subtree of @pos.
3004 */
3005 struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos)
3006 {
3007 struct cgroup *last, *tmp;
3008
3009 WARN_ON_ONCE(!rcu_read_lock_held());
3010
3011 do {
3012 last = pos;
3013 /* ->prev isn't RCU safe, walk ->next till the end */
3014 pos = NULL;
3015 list_for_each_entry_rcu(tmp, &last->children, sibling)
3016 pos = tmp;
3017 } while (pos);
3018
3019 return last;
3020 }
3021 EXPORT_SYMBOL_GPL(cgroup_rightmost_descendant);
3022
3023 static struct cgroup *cgroup_leftmost_descendant(struct cgroup *pos)
3024 {
3025 struct cgroup *last;
3026
3027 do {
3028 last = pos;
3029 pos = list_first_or_null_rcu(&pos->children, struct cgroup,
3030 sibling);
3031 } while (pos);
3032
3033 return last;
3034 }
3035
3036 /**
3037 * cgroup_next_descendant_post - find the next descendant for post-order walk
3038 * @pos: the current position (%NULL to initiate traversal)
3039 * @cgroup: cgroup whose descendants to walk
3040 *
3041 * To be used by cgroup_for_each_descendant_post(). Find the next
3042 * descendant to visit for post-order traversal of @cgroup's descendants.
3043 */
3044 struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
3045 struct cgroup *cgroup)
3046 {
3047 struct cgroup *next;
3048
3049 WARN_ON_ONCE(!rcu_read_lock_held());
3050
3051 /* if first iteration, visit the leftmost descendant */
3052 if (!pos) {
3053 next = cgroup_leftmost_descendant(cgroup);
3054 return next != cgroup ? next : NULL;
3055 }
3056
3057 /* if there's an unvisited sibling, visit its leftmost descendant */
3058 next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling);
3059 if (&next->sibling != &pos->parent->children)
3060 return cgroup_leftmost_descendant(next);
3061
3062 /* no sibling left, visit parent */
3063 next = pos->parent;
3064 return next != cgroup ? next : NULL;
3065 }
3066 EXPORT_SYMBOL_GPL(cgroup_next_descendant_post);
3067
3068 void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
3069 __acquires(css_set_lock)
3070 {
3071 /*
3072 * The first time anyone tries to iterate across a cgroup,
3073 * we need to enable the list linking each css_set to its
3074 * tasks, and fix up all existing tasks.
3075 */
3076 if (!use_task_css_set_links)
3077 cgroup_enable_task_cg_lists();
3078
3079 read_lock(&css_set_lock);
3080 it->cg_link = &cgrp->css_sets;
3081 cgroup_advance_iter(cgrp, it);
3082 }
3083
3084 struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
3085 struct cgroup_iter *it)
3086 {
3087 struct task_struct *res;
3088 struct list_head *l = it->task;
3089 struct cg_cgroup_link *link;
3090
3091 /* If the iterator cg is NULL, we have no tasks */
3092 if (!it->cg_link)
3093 return NULL;
3094 res = list_entry(l, struct task_struct, cg_list);
3095 /* Advance iterator to find next entry */
3096 l = l->next;
3097 link = list_entry(it->cg_link, struct cg_cgroup_link, cgrp_link_list);
3098 if (l == &link->cg->tasks) {
3099 /* We reached the end of this task list - move on to
3100 * the next cg_cgroup_link */
3101 cgroup_advance_iter(cgrp, it);
3102 } else {
3103 it->task = l;
3104 }
3105 return res;
3106 }
3107
3108 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
3109 __releases(css_set_lock)
3110 {
3111 read_unlock(&css_set_lock);
3112 }
3113
3114 static inline int started_after_time(struct task_struct *t1,
3115 struct timespec *time,
3116 struct task_struct *t2)
3117 {
3118 int start_diff = timespec_compare(&t1->start_time, time);
3119 if (start_diff > 0) {
3120 return 1;
3121 } else if (start_diff < 0) {
3122 return 0;
3123 } else {
3124 /*
3125 * Arbitrarily, if two processes started at the same
3126 * time, we'll say that the lower pointer value
3127 * started first. Note that t2 may have exited by now
3128 * so this may not be a valid pointer any longer, but
3129 * that's fine - it still serves to distinguish
3130 * between two tasks started (effectively) simultaneously.
3131 */
3132 return t1 > t2;
3133 }
3134 }
3135
3136 /*
3137 * This function is a callback from heap_insert() and is used to order
3138 * the heap.
3139 * In this case we order the heap in descending task start time.
3140 */
3141 static inline int started_after(void *p1, void *p2)
3142 {
3143 struct task_struct *t1 = p1;
3144 struct task_struct *t2 = p2;
3145 return started_after_time(t1, &t2->start_time, t2);
3146 }
3147
3148 /**
3149 * cgroup_scan_tasks - iterate though all the tasks in a cgroup
3150 * @scan: struct cgroup_scanner containing arguments for the scan
3151 *
3152 * Arguments include pointers to callback functions test_task() and
3153 * process_task().
3154 * Iterate through all the tasks in a cgroup, calling test_task() for each,
3155 * and if it returns true, call process_task() for it also.
3156 * The test_task pointer may be NULL, meaning always true (select all tasks).
3157 * Effectively duplicates cgroup_iter_{start,next,end}()
3158 * but does not lock css_set_lock for the call to process_task().
3159 * The struct cgroup_scanner may be embedded in any structure of the caller's
3160 * creation.
3161 * It is guaranteed that process_task() will act on every task that
3162 * is a member of the cgroup for the duration of this call. This
3163 * function may or may not call process_task() for tasks that exit
3164 * or move to a different cgroup during the call, or are forked or
3165 * move into the cgroup during the call.
3166 *
3167 * Note that test_task() may be called with locks held, and may in some
3168 * situations be called multiple times for the same task, so it should
3169 * be cheap.
3170 * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been
3171 * pre-allocated and will be used for heap operations (and its "gt" member will
3172 * be overwritten), else a temporary heap will be used (allocation of which
3173 * may cause this function to fail).
3174 */
3175 int cgroup_scan_tasks(struct cgroup_scanner *scan)
3176 {
3177 int retval, i;
3178 struct cgroup_iter it;
3179 struct task_struct *p, *dropped;
3180 /* Never dereference latest_task, since it's not refcounted */
3181 struct task_struct *latest_task = NULL;
3182 struct ptr_heap tmp_heap;
3183 struct ptr_heap *heap;
3184 struct timespec latest_time = { 0, 0 };
3185
3186 if (scan->heap) {
3187 /* The caller supplied our heap and pre-allocated its memory */
3188 heap = scan->heap;
3189 heap->gt = &started_after;
3190 } else {
3191 /* We need to allocate our own heap memory */
3192 heap = &tmp_heap;
3193 retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after);
3194 if (retval)
3195 /* cannot allocate the heap */
3196 return retval;
3197 }
3198
3199 again:
3200 /*
3201 * Scan tasks in the cgroup, using the scanner's "test_task" callback
3202 * to determine which are of interest, and using the scanner's
3203 * "process_task" callback to process any of them that need an update.
3204 * Since we don't want to hold any locks during the task updates,
3205 * gather tasks to be processed in a heap structure.
3206 * The heap is sorted by descending task start time.
3207 * If the statically-sized heap fills up, we overflow tasks that
3208 * started later, and in future iterations only consider tasks that
3209 * started after the latest task in the previous pass. This
3210 * guarantees forward progress and that we don't miss any tasks.
3211 */
3212 heap->size = 0;
3213 cgroup_iter_start(scan->cg, &it);
3214 while ((p = cgroup_iter_next(scan->cg, &it))) {
3215 /*
3216 * Only affect tasks that qualify per the caller's callback,
3217 * if he provided one
3218 */
3219 if (scan->test_task && !scan->test_task(p, scan))
3220 continue;
3221 /*
3222 * Only process tasks that started after the last task
3223 * we processed
3224 */
3225 if (!started_after_time(p, &latest_time, latest_task))
3226 continue;
3227 dropped = heap_insert(heap, p);
3228 if (dropped == NULL) {
3229 /*
3230 * The new task was inserted; the heap wasn't
3231 * previously full
3232 */
3233 get_task_struct(p);
3234 } else if (dropped != p) {
3235 /*
3236 * The new task was inserted, and pushed out a
3237 * different task
3238 */
3239 get_task_struct(p);
3240 put_task_struct(dropped);
3241 }
3242 /*
3243 * Else the new task was newer than anything already in
3244 * the heap and wasn't inserted
3245 */
3246 }
3247 cgroup_iter_end(scan->cg, &it);
3248
3249 if (heap->size) {
3250 for (i = 0; i < heap->size; i++) {
3251 struct task_struct *q = heap->ptrs[i];
3252 if (i == 0) {
3253 latest_time = q->start_time;
3254 latest_task = q;
3255 }
3256 /* Process the task per the caller's callback */
3257 scan->process_task(q, scan);
3258 put_task_struct(q);
3259 }
3260 /*
3261 * If we had to process any tasks at all, scan again
3262 * in case some of them were in the middle of forking
3263 * children that didn't get processed.
3264 * Not the most efficient way to do it, but it avoids
3265 * having to take callback_mutex in the fork path
3266 */
3267 goto again;
3268 }
3269 if (heap == &tmp_heap)
3270 heap_free(&tmp_heap);
3271 return 0;
3272 }
3273
3274 static void cgroup_transfer_one_task(struct task_struct *task,
3275 struct cgroup_scanner *scan)
3276 {
3277 struct cgroup *new_cgroup = scan->data;
3278
3279 mutex_lock(&cgroup_mutex);
3280 cgroup_attach_task(new_cgroup, task, false);
3281 mutex_unlock(&cgroup_mutex);
3282 }
3283
3284 /**
3285 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
3286 * @to: cgroup to which the tasks will be moved
3287 * @from: cgroup in which the tasks currently reside
3288 */
3289 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
3290 {
3291 struct cgroup_scanner scan;
3292
3293 scan.cg = from;
3294 scan.test_task = NULL; /* select all tasks in cgroup */
3295 scan.process_task = cgroup_transfer_one_task;
3296 scan.heap = NULL;
3297 scan.data = to;
3298
3299 return cgroup_scan_tasks(&scan);
3300 }
3301
3302 /*
3303 * Stuff for reading the 'tasks'/'procs' files.
3304 *
3305 * Reading this file can return large amounts of data if a cgroup has
3306 * *lots* of attached tasks. So it may need several calls to read(),
3307 * but we cannot guarantee that the information we produce is correct
3308 * unless we produce it entirely atomically.
3309 *
3310 */
3311
3312 /* which pidlist file are we talking about? */
3313 enum cgroup_filetype {
3314 CGROUP_FILE_PROCS,
3315 CGROUP_FILE_TASKS,
3316 };
3317
3318 /*
3319 * A pidlist is a list of pids that virtually represents the contents of one
3320 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
3321 * a pair (one each for procs, tasks) for each pid namespace that's relevant
3322 * to the cgroup.
3323 */
3324 struct cgroup_pidlist {
3325 /*
3326 * used to find which pidlist is wanted. doesn't change as long as
3327 * this particular list stays in the list.
3328 */
3329 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
3330 /* array of xids */
3331 pid_t *list;
3332 /* how many elements the above list has */
3333 int length;
3334 /* how many files are using the current array */
3335 int use_count;
3336 /* each of these stored in a list by its cgroup */
3337 struct list_head links;
3338 /* pointer to the cgroup we belong to, for list removal purposes */
3339 struct cgroup *owner;
3340 /* protects the other fields */
3341 struct rw_semaphore mutex;
3342 };
3343
3344 /*
3345 * The following two functions "fix" the issue where there are more pids
3346 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
3347 * TODO: replace with a kernel-wide solution to this problem
3348 */
3349 #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
3350 static void *pidlist_allocate(int count)
3351 {
3352 if (PIDLIST_TOO_LARGE(count))
3353 return vmalloc(count * sizeof(pid_t));
3354 else
3355 return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
3356 }
3357 static void pidlist_free(void *p)
3358 {
3359 if (is_vmalloc_addr(p))
3360 vfree(p);
3361 else
3362 kfree(p);
3363 }
3364
3365 /*
3366 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
3367 * Returns the number of unique elements.
3368 */
3369 static int pidlist_uniq(pid_t *list, int length)
3370 {
3371 int src, dest = 1;
3372
3373 /*
3374 * we presume the 0th element is unique, so i starts at 1. trivial
3375 * edge cases first; no work needs to be done for either
3376 */
3377 if (length == 0 || length == 1)
3378 return length;
3379 /* src and dest walk down the list; dest counts unique elements */
3380 for (src = 1; src < length; src++) {
3381 /* find next unique element */
3382 while (list[src] == list[src-1]) {
3383 src++;
3384 if (src == length)
3385 goto after;
3386 }
3387 /* dest always points to where the next unique element goes */
3388 list[dest] = list[src];
3389 dest++;
3390 }
3391 after:
3392 return dest;
3393 }
3394
3395 static int cmppid(const void *a, const void *b)
3396 {
3397 return *(pid_t *)a - *(pid_t *)b;
3398 }
3399
3400 /*
3401 * find the appropriate pidlist for our purpose (given procs vs tasks)
3402 * returns with the lock on that pidlist already held, and takes care
3403 * of the use count, or returns NULL with no locks held if we're out of
3404 * memory.
3405 */
3406 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
3407 enum cgroup_filetype type)
3408 {
3409 struct cgroup_pidlist *l;
3410 /* don't need task_nsproxy() if we're looking at ourself */
3411 struct pid_namespace *ns = task_active_pid_ns(current);
3412
3413 /*
3414 * We can't drop the pidlist_mutex before taking the l->mutex in case
3415 * the last ref-holder is trying to remove l from the list at the same
3416 * time. Holding the pidlist_mutex precludes somebody taking whichever
3417 * list we find out from under us - compare release_pid_array().
3418 */
3419 mutex_lock(&cgrp->pidlist_mutex);
3420 list_for_each_entry(l, &cgrp->pidlists, links) {
3421 if (l->key.type == type && l->key.ns == ns) {
3422 /* make sure l doesn't vanish out from under us */
3423 down_write(&l->mutex);
3424 mutex_unlock(&cgrp->pidlist_mutex);
3425 return l;
3426 }
3427 }
3428 /* entry not found; create a new one */
3429 l = kmalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
3430 if (!l) {
3431 mutex_unlock(&cgrp->pidlist_mutex);
3432 return l;
3433 }
3434 init_rwsem(&l->mutex);
3435 down_write(&l->mutex);
3436 l->key.type = type;
3437 l->key.ns = get_pid_ns(ns);
3438 l->use_count = 0; /* don't increment here */
3439 l->list = NULL;
3440 l->owner = cgrp;
3441 list_add(&l->links, &cgrp->pidlists);
3442 mutex_unlock(&cgrp->pidlist_mutex);
3443 return l;
3444 }
3445
3446 /*
3447 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
3448 */
3449 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
3450 struct cgroup_pidlist **lp)
3451 {
3452 pid_t *array;
3453 int length;
3454 int pid, n = 0; /* used for populating the array */
3455 struct cgroup_iter it;
3456 struct task_struct *tsk;
3457 struct cgroup_pidlist *l;
3458
3459 /*
3460 * If cgroup gets more users after we read count, we won't have
3461 * enough space - tough. This race is indistinguishable to the
3462 * caller from the case that the additional cgroup users didn't
3463 * show up until sometime later on.
3464 */
3465 length = cgroup_task_count(cgrp);
3466 array = pidlist_allocate(length);
3467 if (!array)
3468 return -ENOMEM;
3469 /* now, populate the array */
3470 cgroup_iter_start(cgrp, &it);
3471 while ((tsk = cgroup_iter_next(cgrp, &it))) {
3472 if (unlikely(n == length))
3473 break;
3474 /* get tgid or pid for procs or tasks file respectively */
3475 if (type == CGROUP_FILE_PROCS)
3476 pid = task_tgid_vnr(tsk);
3477 else
3478 pid = task_pid_vnr(tsk);
3479 if (pid > 0) /* make sure to only use valid results */
3480 array[n++] = pid;
3481 }
3482 cgroup_iter_end(cgrp, &it);
3483 length = n;
3484 /* now sort & (if procs) strip out duplicates */
3485 sort(array, length, sizeof(pid_t), cmppid, NULL);
3486 if (type == CGROUP_FILE_PROCS)
3487 length = pidlist_uniq(array, length);
3488 l = cgroup_pidlist_find(cgrp, type);
3489 if (!l) {
3490 pidlist_free(array);
3491 return -ENOMEM;
3492 }
3493 /* store array, freeing old if necessary - lock already held */
3494 pidlist_free(l->list);
3495 l->list = array;
3496 l->length = length;
3497 l->use_count++;
3498 up_write(&l->mutex);
3499 *lp = l;
3500 return 0;
3501 }
3502
3503 /**
3504 * cgroupstats_build - build and fill cgroupstats
3505 * @stats: cgroupstats to fill information into
3506 * @dentry: A dentry entry belonging to the cgroup for which stats have
3507 * been requested.
3508 *
3509 * Build and fill cgroupstats so that taskstats can export it to user
3510 * space.
3511 */
3512 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
3513 {
3514 int ret = -EINVAL;
3515 struct cgroup *cgrp;
3516 struct cgroup_iter it;
3517 struct task_struct *tsk;
3518
3519 /*
3520 * Validate dentry by checking the superblock operations,
3521 * and make sure it's a directory.
3522 */
3523 if (dentry->d_sb->s_op != &cgroup_ops ||
3524 !S_ISDIR(dentry->d_inode->i_mode))
3525 goto err;
3526
3527 ret = 0;
3528 cgrp = dentry->d_fsdata;
3529
3530 cgroup_iter_start(cgrp, &it);
3531 while ((tsk = cgroup_iter_next(cgrp, &it))) {
3532 switch (tsk->state) {
3533 case TASK_RUNNING:
3534 stats->nr_running++;
3535 break;
3536 case TASK_INTERRUPTIBLE:
3537 stats->nr_sleeping++;
3538 break;
3539 case TASK_UNINTERRUPTIBLE:
3540 stats->nr_uninterruptible++;
3541 break;
3542 case TASK_STOPPED:
3543 stats->nr_stopped++;
3544 break;
3545 default:
3546 if (delayacct_is_task_waiting_on_io(tsk))
3547 stats->nr_io_wait++;
3548 break;
3549 }
3550 }
3551 cgroup_iter_end(cgrp, &it);
3552
3553 err:
3554 return ret;
3555 }
3556
3557
3558 /*
3559 * seq_file methods for the tasks/procs files. The seq_file position is the
3560 * next pid to display; the seq_file iterator is a pointer to the pid
3561 * in the cgroup->l->list array.
3562 */
3563
3564 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
3565 {
3566 /*
3567 * Initially we receive a position value that corresponds to
3568 * one more than the last pid shown (or 0 on the first call or
3569 * after a seek to the start). Use a binary-search to find the
3570 * next pid to display, if any
3571 */
3572 struct cgroup_pidlist *l = s->private;
3573 int index = 0, pid = *pos;
3574 int *iter;
3575
3576 down_read(&l->mutex);
3577 if (pid) {
3578 int end = l->length;
3579
3580 while (index < end) {
3581 int mid = (index + end) / 2;
3582 if (l->list[mid] == pid) {
3583 index = mid;
3584 break;
3585 } else if (l->list[mid] <= pid)
3586 index = mid + 1;
3587 else
3588 end = mid;
3589 }
3590 }
3591 /* If we're off the end of the array, we're done */
3592 if (index >= l->length)
3593 return NULL;
3594 /* Update the abstract position to be the actual pid that we found */
3595 iter = l->list + index;
3596 *pos = *iter;
3597 return iter;
3598 }
3599
3600 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
3601 {
3602 struct cgroup_pidlist *l = s->private;
3603 up_read(&l->mutex);
3604 }
3605
3606 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
3607 {
3608 struct cgroup_pidlist *l = s->private;
3609 pid_t *p = v;
3610 pid_t *end = l->list + l->length;
3611 /*
3612 * Advance to the next pid in the array. If this goes off the
3613 * end, we're done
3614 */
3615 p++;
3616 if (p >= end) {
3617 return NULL;
3618 } else {
3619 *pos = *p;
3620 return p;
3621 }
3622 }
3623
3624 static int cgroup_pidlist_show(struct seq_file *s, void *v)
3625 {
3626 return seq_printf(s, "%d\n", *(int *)v);
3627 }
3628
3629 /*
3630 * seq_operations functions for iterating on pidlists through seq_file -
3631 * independent of whether it's tasks or procs
3632 */
3633 static const struct seq_operations cgroup_pidlist_seq_operations = {
3634 .start = cgroup_pidlist_start,
3635 .stop = cgroup_pidlist_stop,
3636 .next = cgroup_pidlist_next,
3637 .show = cgroup_pidlist_show,
3638 };
3639
3640 static void cgroup_release_pid_array(struct cgroup_pidlist *l)
3641 {
3642 /*
3643 * the case where we're the last user of this particular pidlist will
3644 * have us remove it from the cgroup's list, which entails taking the
3645 * mutex. since in pidlist_find the pidlist->lock depends on cgroup->
3646 * pidlist_mutex, we have to take pidlist_mutex first.
3647 */
3648 mutex_lock(&l->owner->pidlist_mutex);
3649 down_write(&l->mutex);
3650 BUG_ON(!l->use_count);
3651 if (!--l->use_count) {
3652 /* we're the last user if refcount is 0; remove and free */
3653 list_del(&l->links);
3654 mutex_unlock(&l->owner->pidlist_mutex);
3655 pidlist_free(l->list);
3656 put_pid_ns(l->key.ns);
3657 up_write(&l->mutex);
3658 kfree(l);
3659 return;
3660 }
3661 mutex_unlock(&l->owner->pidlist_mutex);
3662 up_write(&l->mutex);
3663 }
3664
3665 static int cgroup_pidlist_release(struct inode *inode, struct file *file)
3666 {
3667 struct cgroup_pidlist *l;
3668 if (!(file->f_mode & FMODE_READ))
3669 return 0;
3670 /*
3671 * the seq_file will only be initialized if the file was opened for
3672 * reading; hence we check if it's not null only in that case.
3673 */
3674 l = ((struct seq_file *)file->private_data)->private;
3675 cgroup_release_pid_array(l);
3676 return seq_release(inode, file);
3677 }
3678
3679 static const struct file_operations cgroup_pidlist_operations = {
3680 .read = seq_read,
3681 .llseek = seq_lseek,
3682 .write = cgroup_file_write,
3683 .release = cgroup_pidlist_release,
3684 };
3685
3686 /*
3687 * The following functions handle opens on a file that displays a pidlist
3688 * (tasks or procs). Prepare an array of the process/thread IDs of whoever's
3689 * in the cgroup.
3690 */
3691 /* helper function for the two below it */
3692 static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type)
3693 {
3694 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
3695 struct cgroup_pidlist *l;
3696 int retval;
3697
3698 /* Nothing to do for write-only files */
3699 if (!(file->f_mode & FMODE_READ))
3700 return 0;
3701
3702 /* have the array populated */
3703 retval = pidlist_array_load(cgrp, type, &l);
3704 if (retval)
3705 return retval;
3706 /* configure file information */
3707 file->f_op = &cgroup_pidlist_operations;
3708
3709 retval = seq_open(file, &cgroup_pidlist_seq_operations);
3710 if (retval) {
3711 cgroup_release_pid_array(l);
3712 return retval;
3713 }
3714 ((struct seq_file *)file->private_data)->private = l;
3715 return 0;
3716 }
3717 static int cgroup_tasks_open(struct inode *unused, struct file *file)
3718 {
3719 return cgroup_pidlist_open(file, CGROUP_FILE_TASKS);
3720 }
3721 static int cgroup_procs_open(struct inode *unused, struct file *file)
3722 {
3723 return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
3724 }
3725
3726 static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
3727 struct cftype *cft)
3728 {
3729 return notify_on_release(cgrp);
3730 }
3731
3732 static int cgroup_write_notify_on_release(struct cgroup *cgrp,
3733 struct cftype *cft,
3734 u64 val)
3735 {
3736 clear_bit(CGRP_RELEASABLE, &cgrp->flags);
3737 if (val)
3738 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
3739 else
3740 clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
3741 return 0;
3742 }
3743
3744 /*
3745 * When dput() is called asynchronously, if umount has been done and
3746 * then deactivate_super() in cgroup_free_fn() kills the superblock,
3747 * there's a small window that vfs will see the root dentry with non-zero
3748 * refcnt and trigger BUG().
3749 *
3750 * That's why we hold a reference before dput() and drop it right after.
3751 */
3752 static void cgroup_dput(struct cgroup *cgrp)
3753 {
3754 struct super_block *sb = cgrp->root->sb;
3755
3756 atomic_inc(&sb->s_active);
3757 dput(cgrp->dentry);
3758 deactivate_super(sb);
3759 }
3760
3761 /*
3762 * Unregister event and free resources.
3763 *
3764 * Gets called from workqueue.
3765 */
3766 static void cgroup_event_remove(struct work_struct *work)
3767 {
3768 struct cgroup_event *event = container_of(work, struct cgroup_event,
3769 remove);
3770 struct cgroup *cgrp = event->cgrp;
3771
3772 remove_wait_queue(event->wqh, &event->wait);
3773
3774 event->cft->unregister_event(cgrp, event->cft, event->eventfd);
3775
3776 /* Notify userspace the event is going away. */
3777 eventfd_signal(event->eventfd, 1);
3778
3779 eventfd_ctx_put(event->eventfd);
3780 kfree(event);
3781 cgroup_dput(cgrp);
3782 }
3783
3784 /*
3785 * Gets called on POLLHUP on eventfd when user closes it.
3786 *
3787 * Called with wqh->lock held and interrupts disabled.
3788 */
3789 static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
3790 int sync, void *key)
3791 {
3792 struct cgroup_event *event = container_of(wait,
3793 struct cgroup_event, wait);
3794 struct cgroup *cgrp = event->cgrp;
3795 unsigned long flags = (unsigned long)key;
3796
3797 if (flags & POLLHUP) {
3798 /*
3799 * If the event has been detached at cgroup removal, we
3800 * can simply return knowing the other side will cleanup
3801 * for us.
3802 *
3803 * We can't race against event freeing since the other
3804 * side will require wqh->lock via remove_wait_queue(),
3805 * which we hold.
3806 */
3807 spin_lock(&cgrp->event_list_lock);
3808 if (!list_empty(&event->list)) {
3809 list_del_init(&event->list);
3810 /*
3811 * We are in atomic context, but cgroup_event_remove()
3812 * may sleep, so we have to call it in workqueue.
3813 */
3814 schedule_work(&event->remove);
3815 }
3816 spin_unlock(&cgrp->event_list_lock);
3817 }
3818
3819 return 0;
3820 }
3821
3822 static void cgroup_event_ptable_queue_proc(struct file *file,
3823 wait_queue_head_t *wqh, poll_table *pt)
3824 {
3825 struct cgroup_event *event = container_of(pt,
3826 struct cgroup_event, pt);
3827
3828 event->wqh = wqh;
3829 add_wait_queue(wqh, &event->wait);
3830 }
3831
3832 /*
3833 * Parse input and register new cgroup event handler.
3834 *
3835 * Input must be in format '<event_fd> <control_fd> <args>'.
3836 * Interpretation of args is defined by control file implementation.
3837 */
3838 static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
3839 const char *buffer)
3840 {
3841 struct cgroup_event *event = NULL;
3842 struct cgroup *cgrp_cfile;
3843 unsigned int efd, cfd;
3844 struct file *efile = NULL;
3845 struct file *cfile = NULL;
3846 char *endp;
3847 int ret;
3848
3849 efd = simple_strtoul(buffer, &endp, 10);
3850 if (*endp != ' ')
3851 return -EINVAL;
3852 buffer = endp + 1;
3853
3854 cfd = simple_strtoul(buffer, &endp, 10);
3855 if ((*endp != ' ') && (*endp != '\0'))
3856 return -EINVAL;
3857 buffer = endp + 1;
3858
3859 event = kzalloc(sizeof(*event), GFP_KERNEL);
3860 if (!event)
3861 return -ENOMEM;
3862 event->cgrp = cgrp;
3863 INIT_LIST_HEAD(&event->list);
3864 init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc);
3865 init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
3866 INIT_WORK(&event->remove, cgroup_event_remove);
3867
3868 efile = eventfd_fget(efd);
3869 if (IS_ERR(efile)) {
3870 ret = PTR_ERR(efile);
3871 goto fail;
3872 }
3873
3874 event->eventfd = eventfd_ctx_fileget(efile);
3875 if (IS_ERR(event->eventfd)) {
3876 ret = PTR_ERR(event->eventfd);
3877 goto fail;
3878 }
3879
3880 cfile = fget(cfd);
3881 if (!cfile) {
3882 ret = -EBADF;
3883 goto fail;
3884 }
3885
3886 /* the process need read permission on control file */
3887 /* AV: shouldn't we check that it's been opened for read instead? */
3888 ret = inode_permission(file_inode(cfile), MAY_READ);
3889 if (ret < 0)
3890 goto fail;
3891
3892 event->cft = __file_cft(cfile);
3893 if (IS_ERR(event->cft)) {
3894 ret = PTR_ERR(event->cft);
3895 goto fail;
3896 }
3897
3898 /*
3899 * The file to be monitored must be in the same cgroup as
3900 * cgroup.event_control is.
3901 */
3902 cgrp_cfile = __d_cgrp(cfile->f_dentry->d_parent);
3903 if (cgrp_cfile != cgrp) {
3904 ret = -EINVAL;
3905 goto fail;
3906 }
3907
3908 if (!event->cft->register_event || !event->cft->unregister_event) {
3909 ret = -EINVAL;
3910 goto fail;
3911 }
3912
3913 ret = event->cft->register_event(cgrp, event->cft,
3914 event->eventfd, buffer);
3915 if (ret)
3916 goto fail;
3917
3918 efile->f_op->poll(efile, &event->pt);
3919
3920 /*
3921 * Events should be removed after rmdir of cgroup directory, but before
3922 * destroying subsystem state objects. Let's take reference to cgroup
3923 * directory dentry to do that.
3924 */
3925 dget(cgrp->dentry);
3926
3927 spin_lock(&cgrp->event_list_lock);
3928 list_add(&event->list, &cgrp->event_list);
3929 spin_unlock(&cgrp->event_list_lock);
3930
3931 fput(cfile);
3932 fput(efile);
3933
3934 return 0;
3935
3936 fail:
3937 if (cfile)
3938 fput(cfile);
3939
3940 if (event && event->eventfd && !IS_ERR(event->eventfd))
3941 eventfd_ctx_put(event->eventfd);
3942
3943 if (!IS_ERR_OR_NULL(efile))
3944 fput(efile);
3945
3946 kfree(event);
3947
3948 return ret;
3949 }
3950
3951 static u64 cgroup_clone_children_read(struct cgroup *cgrp,
3952 struct cftype *cft)
3953 {
3954 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
3955 }
3956
3957 static int cgroup_clone_children_write(struct cgroup *cgrp,
3958 struct cftype *cft,
3959 u64 val)
3960 {
3961 if (val)
3962 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
3963 else
3964 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
3965 return 0;
3966 }
3967
3968 /*
3969 * for the common functions, 'private' gives the type of file
3970 */
3971 /* for hysterical raisins, we can't put this on the older files */
3972 #define CGROUP_FILE_GENERIC_PREFIX "cgroup."
3973 static struct cftype files[] = {
3974 {
3975 .name = "tasks",
3976 .open = cgroup_tasks_open,
3977 .write_u64 = cgroup_tasks_write,
3978 .release = cgroup_pidlist_release,
3979 .mode = S_IRUGO | S_IWUSR,
3980 },
3981 {
3982 .name = CGROUP_FILE_GENERIC_PREFIX "procs",
3983 .open = cgroup_procs_open,
3984 .write_u64 = cgroup_procs_write,
3985 .release = cgroup_pidlist_release,
3986 .mode = S_IRUGO | S_IWUSR,
3987 },
3988 {
3989 .name = "notify_on_release",
3990 .read_u64 = cgroup_read_notify_on_release,
3991 .write_u64 = cgroup_write_notify_on_release,
3992 },
3993 {
3994 .name = CGROUP_FILE_GENERIC_PREFIX "event_control",
3995 .write_string = cgroup_write_event_control,
3996 .mode = S_IWUGO,
3997 },
3998 {
3999 .name = "cgroup.clone_children",
4000 .flags = CFTYPE_INSANE,
4001 .read_u64 = cgroup_clone_children_read,
4002 .write_u64 = cgroup_clone_children_write,
4003 },
4004 {
4005 .name = "cgroup.sane_behavior",
4006 .flags = CFTYPE_ONLY_ON_ROOT,
4007 .read_seq_string = cgroup_sane_behavior_show,
4008 },
4009 {
4010 .name = "release_agent",
4011 .flags = CFTYPE_ONLY_ON_ROOT,
4012 .read_seq_string = cgroup_release_agent_show,
4013 .write_string = cgroup_release_agent_write,
4014 .max_write_len = PATH_MAX,
4015 },
4016 { } /* terminate */
4017 };
4018
4019 /**
4020 * cgroup_populate_dir - selectively creation of files in a directory
4021 * @cgrp: target cgroup
4022 * @base_files: true if the base files should be added
4023 * @subsys_mask: mask of the subsystem ids whose files should be added
4024 */
4025 static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
4026 unsigned long subsys_mask)
4027 {
4028 int err;
4029 struct cgroup_subsys *ss;
4030
4031 if (base_files) {
4032 err = cgroup_addrm_files(cgrp, NULL, files, true);
4033 if (err < 0)
4034 return err;
4035 }
4036
4037 /* process cftsets of each subsystem */
4038 for_each_subsys(cgrp->root, ss) {
4039 struct cftype_set *set;
4040 if (!test_bit(ss->subsys_id, &subsys_mask))
4041 continue;
4042
4043 list_for_each_entry(set, &ss->cftsets, node)
4044 cgroup_addrm_files(cgrp, ss, set->cfts, true);
4045 }
4046
4047 /* This cgroup is ready now */
4048 for_each_subsys(cgrp->root, ss) {
4049 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
4050 /*
4051 * Update id->css pointer and make this css visible from
4052 * CSS ID functions. This pointer will be dereferened
4053 * from RCU-read-side without locks.
4054 */
4055 if (css->id)
4056 rcu_assign_pointer(css->id->css, css);
4057 }
4058
4059 return 0;
4060 }
4061
4062 static void css_dput_fn(struct work_struct *work)
4063 {
4064 struct cgroup_subsys_state *css =
4065 container_of(work, struct cgroup_subsys_state, dput_work);
4066
4067 cgroup_dput(css->cgroup);
4068 }
4069
4070 static void init_cgroup_css(struct cgroup_subsys_state *css,
4071 struct cgroup_subsys *ss,
4072 struct cgroup *cgrp)
4073 {
4074 css->cgroup = cgrp;
4075 atomic_set(&css->refcnt, 1);
4076 css->flags = 0;
4077 css->id = NULL;
4078 if (cgrp == dummytop)
4079 css->flags |= CSS_ROOT;
4080 BUG_ON(cgrp->subsys[ss->subsys_id]);
4081 cgrp->subsys[ss->subsys_id] = css;
4082
4083 /*
4084 * css holds an extra ref to @cgrp->dentry which is put on the last
4085 * css_put(). dput() requires process context, which css_put() may
4086 * be called without. @css->dput_work will be used to invoke
4087 * dput() asynchronously from css_put().
4088 */
4089 INIT_WORK(&css->dput_work, css_dput_fn);
4090 }
4091
4092 /* invoke ->post_create() on a new CSS and mark it online if successful */
4093 static int online_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
4094 {
4095 int ret = 0;
4096
4097 lockdep_assert_held(&cgroup_mutex);
4098
4099 if (ss->css_online)
4100 ret = ss->css_online(cgrp);
4101 if (!ret)
4102 cgrp->subsys[ss->subsys_id]->flags |= CSS_ONLINE;
4103 return ret;
4104 }
4105
4106 /* if the CSS is online, invoke ->pre_destory() on it and mark it offline */
4107 static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
4108 __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
4109 {
4110 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
4111
4112 lockdep_assert_held(&cgroup_mutex);
4113
4114 if (!(css->flags & CSS_ONLINE))
4115 return;
4116
4117 if (ss->css_offline)
4118 ss->css_offline(cgrp);
4119
4120 cgrp->subsys[ss->subsys_id]->flags &= ~CSS_ONLINE;
4121 }
4122
4123 /*
4124 * cgroup_create - create a cgroup
4125 * @parent: cgroup that will be parent of the new cgroup
4126 * @dentry: dentry of the new cgroup
4127 * @mode: mode to set on new inode
4128 *
4129 * Must be called with the mutex on the parent inode held
4130 */
4131 static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4132 umode_t mode)
4133 {
4134 struct cgroup *cgrp;
4135 struct cgroup_name *name;
4136 struct cgroupfs_root *root = parent->root;
4137 int err = 0;
4138 struct cgroup_subsys *ss;
4139 struct super_block *sb = root->sb;
4140
4141 /* allocate the cgroup and its ID, 0 is reserved for the root */
4142 cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
4143 if (!cgrp)
4144 return -ENOMEM;
4145
4146 name = cgroup_alloc_name(dentry);
4147 if (!name)
4148 goto err_free_cgrp;
4149 rcu_assign_pointer(cgrp->name, name);
4150
4151 cgrp->id = ida_simple_get(&root->cgroup_ida, 1, 0, GFP_KERNEL);
4152 if (cgrp->id < 0)
4153 goto err_free_name;
4154
4155 /*
4156 * Only live parents can have children. Note that the liveliness
4157 * check isn't strictly necessary because cgroup_mkdir() and
4158 * cgroup_rmdir() are fully synchronized by i_mutex; however, do it
4159 * anyway so that locking is contained inside cgroup proper and we
4160 * don't get nasty surprises if we ever grow another caller.
4161 */
4162 if (!cgroup_lock_live_group(parent)) {
4163 err = -ENODEV;
4164 goto err_free_id;
4165 }
4166
4167 /* Grab a reference on the superblock so the hierarchy doesn't
4168 * get deleted on unmount if there are child cgroups. This
4169 * can be done outside cgroup_mutex, since the sb can't
4170 * disappear while someone has an open control file on the
4171 * fs */
4172 atomic_inc(&sb->s_active);
4173
4174 init_cgroup_housekeeping(cgrp);
4175
4176 dentry->d_fsdata = cgrp;
4177 cgrp->dentry = dentry;
4178
4179 cgrp->parent = parent;
4180 cgrp->root = parent->root;
4181
4182 if (notify_on_release(parent))
4183 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
4184
4185 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
4186 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
4187
4188 for_each_subsys(root, ss) {
4189 struct cgroup_subsys_state *css;
4190
4191 css = ss->css_alloc(cgrp);
4192 if (IS_ERR(css)) {
4193 err = PTR_ERR(css);
4194 goto err_free_all;
4195 }
4196 init_cgroup_css(css, ss, cgrp);
4197 if (ss->use_id) {
4198 err = alloc_css_id(ss, parent, cgrp);
4199 if (err)
4200 goto err_free_all;
4201 }
4202 }
4203
4204 /*
4205 * Create directory. cgroup_create_file() returns with the new
4206 * directory locked on success so that it can be populated without
4207 * dropping cgroup_mutex.
4208 */
4209 err = cgroup_create_file(dentry, S_IFDIR | mode, sb);
4210 if (err < 0)
4211 goto err_free_all;
4212 lockdep_assert_held(&dentry->d_inode->i_mutex);
4213
4214 /* allocation complete, commit to creation */
4215 list_add_tail(&cgrp->allcg_node, &root->allcg_list);
4216 list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
4217 root->number_of_cgroups++;
4218
4219 /* each css holds a ref to the cgroup's dentry */
4220 for_each_subsys(root, ss)
4221 dget(dentry);
4222
4223 /* hold a ref to the parent's dentry */
4224 dget(parent->dentry);
4225
4226 /* creation succeeded, notify subsystems */
4227 for_each_subsys(root, ss) {
4228 err = online_css(ss, cgrp);
4229 if (err)
4230 goto err_destroy;
4231
4232 if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
4233 parent->parent) {
4234 pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
4235 current->comm, current->pid, ss->name);
4236 if (!strcmp(ss->name, "memory"))
4237 pr_warning("cgroup: \"memory\" requires setting use_hierarchy to 1 on the root.\n");
4238 ss->warned_broken_hierarchy = true;
4239 }
4240 }
4241
4242 err = cgroup_populate_dir(cgrp, true, root->subsys_mask);
4243 if (err)
4244 goto err_destroy;
4245
4246 mutex_unlock(&cgroup_mutex);
4247 mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
4248
4249 return 0;
4250
4251 err_free_all:
4252 for_each_subsys(root, ss) {
4253 if (cgrp->subsys[ss->subsys_id])
4254 ss->css_free(cgrp);
4255 }
4256 mutex_unlock(&cgroup_mutex);
4257 /* Release the reference count that we took on the superblock */
4258 deactivate_super(sb);
4259 err_free_id:
4260 ida_simple_remove(&root->cgroup_ida, cgrp->id);
4261 err_free_name:
4262 kfree(rcu_dereference_raw(cgrp->name));
4263 err_free_cgrp:
4264 kfree(cgrp);
4265 return err;
4266
4267 err_destroy:
4268 cgroup_destroy_locked(cgrp);
4269 mutex_unlock(&cgroup_mutex);
4270 mutex_unlock(&dentry->d_inode->i_mutex);
4271 return err;
4272 }
4273
4274 static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
4275 {
4276 struct cgroup *c_parent = dentry->d_parent->d_fsdata;
4277
4278 /* the vfs holds inode->i_mutex already */
4279 return cgroup_create(c_parent, dentry, mode | S_IFDIR);
4280 }
4281
4282 static int cgroup_destroy_locked(struct cgroup *cgrp)
4283 __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
4284 {
4285 struct dentry *d = cgrp->dentry;
4286 struct cgroup *parent = cgrp->parent;
4287 struct cgroup_event *event, *tmp;
4288 struct cgroup_subsys *ss;
4289
4290 lockdep_assert_held(&d->d_inode->i_mutex);
4291 lockdep_assert_held(&cgroup_mutex);
4292
4293 if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children))
4294 return -EBUSY;
4295
4296 /*
4297 * Block new css_tryget() by deactivating refcnt and mark @cgrp
4298 * removed. This makes future css_tryget() and child creation
4299 * attempts fail thus maintaining the removal conditions verified
4300 * above.
4301 */
4302 for_each_subsys(cgrp->root, ss) {
4303 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
4304
4305 WARN_ON(atomic_read(&css->refcnt) < 0);
4306 atomic_add(CSS_DEACT_BIAS, &css->refcnt);
4307 }
4308 set_bit(CGRP_REMOVED, &cgrp->flags);
4309
4310 /* tell subsystems to initate destruction */
4311 for_each_subsys(cgrp->root, ss)
4312 offline_css(ss, cgrp);
4313
4314 /*
4315 * Put all the base refs. Each css holds an extra reference to the
4316 * cgroup's dentry and cgroup removal proceeds regardless of css
4317 * refs. On the last put of each css, whenever that may be, the
4318 * extra dentry ref is put so that dentry destruction happens only
4319 * after all css's are released.
4320 */
4321 for_each_subsys(cgrp->root, ss)
4322 css_put(cgrp->subsys[ss->subsys_id]);
4323
4324 raw_spin_lock(&release_list_lock);
4325 if (!list_empty(&cgrp->release_list))
4326 list_del_init(&cgrp->release_list);
4327 raw_spin_unlock(&release_list_lock);
4328
4329 /* delete this cgroup from parent->children */
4330 list_del_rcu(&cgrp->sibling);
4331 list_del_init(&cgrp->allcg_node);
4332
4333 dget(d);
4334 cgroup_d_remove_dir(d);
4335 dput(d);
4336
4337 set_bit(CGRP_RELEASABLE, &parent->flags);
4338 check_for_release(parent);
4339
4340 /*
4341 * Unregister events and notify userspace.
4342 * Notify userspace about cgroup removing only after rmdir of cgroup
4343 * directory to avoid race between userspace and kernelspace.
4344 */
4345 spin_lock(&cgrp->event_list_lock);
4346 list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
4347 list_del_init(&event->list);
4348 schedule_work(&event->remove);
4349 }
4350 spin_unlock(&cgrp->event_list_lock);
4351
4352 return 0;
4353 }
4354
4355 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
4356 {
4357 int ret;
4358
4359 mutex_lock(&cgroup_mutex);
4360 ret = cgroup_destroy_locked(dentry->d_fsdata);
4361 mutex_unlock(&cgroup_mutex);
4362
4363 return ret;
4364 }
4365
4366 static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss)
4367 {
4368 INIT_LIST_HEAD(&ss->cftsets);
4369
4370 /*
4371 * base_cftset is embedded in subsys itself, no need to worry about
4372 * deregistration.
4373 */
4374 if (ss->base_cftypes) {
4375 ss->base_cftset.cfts = ss->base_cftypes;
4376 list_add_tail(&ss->base_cftset.node, &ss->cftsets);
4377 }
4378 }
4379
4380 static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
4381 {
4382 struct cgroup_subsys_state *css;
4383
4384 printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
4385
4386 mutex_lock(&cgroup_mutex);
4387
4388 /* init base cftset */
4389 cgroup_init_cftsets(ss);
4390
4391 /* Create the top cgroup state for this subsystem */
4392 list_add(&ss->sibling, &rootnode.subsys_list);
4393 ss->root = &rootnode;
4394 css = ss->css_alloc(dummytop);
4395 /* We don't handle early failures gracefully */
4396 BUG_ON(IS_ERR(css));
4397 init_cgroup_css(css, ss, dummytop);
4398
4399 /* Update the init_css_set to contain a subsys
4400 * pointer to this state - since the subsystem is
4401 * newly registered, all tasks and hence the
4402 * init_css_set is in the subsystem's top cgroup. */
4403 init_css_set.subsys[ss->subsys_id] = css;
4404
4405 need_forkexit_callback |= ss->fork || ss->exit;
4406
4407 /* At system boot, before all subsystems have been
4408 * registered, no tasks have been forked, so we don't
4409 * need to invoke fork callbacks here. */
4410 BUG_ON(!list_empty(&init_task.tasks));
4411
4412 BUG_ON(online_css(ss, dummytop));
4413
4414 mutex_unlock(&cgroup_mutex);
4415
4416 /* this function shouldn't be used with modular subsystems, since they
4417 * need to register a subsys_id, among other things */
4418 BUG_ON(ss->module);
4419 }
4420
4421 /**
4422 * cgroup_load_subsys: load and register a modular subsystem at runtime
4423 * @ss: the subsystem to load
4424 *
4425 * This function should be called in a modular subsystem's initcall. If the
4426 * subsystem is built as a module, it will be assigned a new subsys_id and set
4427 * up for use. If the subsystem is built-in anyway, work is delegated to the
4428 * simpler cgroup_init_subsys.
4429 */
4430 int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4431 {
4432 struct cgroup_subsys_state *css;
4433 int i, ret;
4434 struct hlist_node *tmp;
4435 struct css_set *cg;
4436 unsigned long key;
4437
4438 /* check name and function validity */
4439 if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN ||
4440 ss->css_alloc == NULL || ss->css_free == NULL)
4441 return -EINVAL;
4442
4443 /*
4444 * we don't support callbacks in modular subsystems. this check is
4445 * before the ss->module check for consistency; a subsystem that could
4446 * be a module should still have no callbacks even if the user isn't
4447 * compiling it as one.
4448 */
4449 if (ss->fork || ss->exit)
4450 return -EINVAL;
4451
4452 /*
4453 * an optionally modular subsystem is built-in: we want to do nothing,
4454 * since cgroup_init_subsys will have already taken care of it.
4455 */
4456 if (ss->module == NULL) {
4457 /* a sanity check */
4458 BUG_ON(subsys[ss->subsys_id] != ss);
4459 return 0;
4460 }
4461
4462 /* init base cftset */
4463 cgroup_init_cftsets(ss);
4464
4465 mutex_lock(&cgroup_mutex);
4466 subsys[ss->subsys_id] = ss;
4467
4468 /*
4469 * no ss->css_alloc seems to need anything important in the ss
4470 * struct, so this can happen first (i.e. before the rootnode
4471 * attachment).
4472 */
4473 css = ss->css_alloc(dummytop);
4474 if (IS_ERR(css)) {
4475 /* failure case - need to deassign the subsys[] slot. */
4476 subsys[ss->subsys_id] = NULL;
4477 mutex_unlock(&cgroup_mutex);
4478 return PTR_ERR(css);
4479 }
4480
4481 list_add(&ss->sibling, &rootnode.subsys_list);
4482 ss->root = &rootnode;
4483
4484 /* our new subsystem will be attached to the dummy hierarchy. */
4485 init_cgroup_css(css, ss, dummytop);
4486 /* init_idr must be after init_cgroup_css because it sets css->id. */
4487 if (ss->use_id) {
4488 ret = cgroup_init_idr(ss, css);
4489 if (ret)
4490 goto err_unload;
4491 }
4492
4493 /*
4494 * Now we need to entangle the css into the existing css_sets. unlike
4495 * in cgroup_init_subsys, there are now multiple css_sets, so each one
4496 * will need a new pointer to it; done by iterating the css_set_table.
4497 * furthermore, modifying the existing css_sets will corrupt the hash
4498 * table state, so each changed css_set will need its hash recomputed.
4499 * this is all done under the css_set_lock.
4500 */
4501 write_lock(&css_set_lock);
4502 hash_for_each_safe(css_set_table, i, tmp, cg, hlist) {
4503 /* skip entries that we already rehashed */
4504 if (cg->subsys[ss->subsys_id])
4505 continue;
4506 /* remove existing entry */
4507 hash_del(&cg->hlist);
4508 /* set new value */
4509 cg->subsys[ss->subsys_id] = css;
4510 /* recompute hash and restore entry */
4511 key = css_set_hash(cg->subsys);
4512 hash_add(css_set_table, &cg->hlist, key);
4513 }
4514 write_unlock(&css_set_lock);
4515
4516 ret = online_css(ss, dummytop);
4517 if (ret)
4518 goto err_unload;
4519
4520 /* success! */
4521 mutex_unlock(&cgroup_mutex);
4522 return 0;
4523
4524 err_unload:
4525 mutex_unlock(&cgroup_mutex);
4526 /* @ss can't be mounted here as try_module_get() would fail */
4527 cgroup_unload_subsys(ss);
4528 return ret;
4529 }
4530 EXPORT_SYMBOL_GPL(cgroup_load_subsys);
4531
4532 /**
4533 * cgroup_unload_subsys: unload a modular subsystem
4534 * @ss: the subsystem to unload
4535 *
4536 * This function should be called in a modular subsystem's exitcall. When this
4537 * function is invoked, the refcount on the subsystem's module will be 0, so
4538 * the subsystem will not be attached to any hierarchy.
4539 */
4540 void cgroup_unload_subsys(struct cgroup_subsys *ss)
4541 {
4542 struct cg_cgroup_link *link;
4543
4544 BUG_ON(ss->module == NULL);
4545
4546 /*
4547 * we shouldn't be called if the subsystem is in use, and the use of
4548 * try_module_get in parse_cgroupfs_options should ensure that it
4549 * doesn't start being used while we're killing it off.
4550 */
4551 BUG_ON(ss->root != &rootnode);
4552
4553 mutex_lock(&cgroup_mutex);
4554
4555 offline_css(ss, dummytop);
4556
4557 if (ss->use_id)
4558 idr_destroy(&ss->idr);
4559
4560 /* deassign the subsys_id */
4561 subsys[ss->subsys_id] = NULL;
4562
4563 /* remove subsystem from rootnode's list of subsystems */
4564 list_del_init(&ss->sibling);
4565
4566 /*
4567 * disentangle the css from all css_sets attached to the dummytop. as
4568 * in loading, we need to pay our respects to the hashtable gods.
4569 */
4570 write_lock(&css_set_lock);
4571 list_for_each_entry(link, &dummytop->css_sets, cgrp_link_list) {
4572 struct css_set *cg = link->cg;
4573 unsigned long key;
4574
4575 hash_del(&cg->hlist);
4576 cg->subsys[ss->subsys_id] = NULL;
4577 key = css_set_hash(cg->subsys);
4578 hash_add(css_set_table, &cg->hlist, key);
4579 }
4580 write_unlock(&css_set_lock);
4581
4582 /*
4583 * remove subsystem's css from the dummytop and free it - need to
4584 * free before marking as null because ss->css_free needs the
4585 * cgrp->subsys pointer to find their state. note that this also
4586 * takes care of freeing the css_id.
4587 */
4588 ss->css_free(dummytop);
4589 dummytop->subsys[ss->subsys_id] = NULL;
4590
4591 mutex_unlock(&cgroup_mutex);
4592 }
4593 EXPORT_SYMBOL_GPL(cgroup_unload_subsys);
4594
4595 /**
4596 * cgroup_init_early - cgroup initialization at system boot
4597 *
4598 * Initialize cgroups at system boot, and initialize any
4599 * subsystems that request early init.
4600 */
4601 int __init cgroup_init_early(void)
4602 {
4603 int i;
4604 atomic_set(&init_css_set.refcount, 1);
4605 INIT_LIST_HEAD(&init_css_set.cg_links);
4606 INIT_LIST_HEAD(&init_css_set.tasks);
4607 INIT_HLIST_NODE(&init_css_set.hlist);
4608 css_set_count = 1;
4609 init_cgroup_root(&rootnode);
4610 root_count = 1;
4611 init_task.cgroups = &init_css_set;
4612
4613 init_css_set_link.cg = &init_css_set;
4614 init_css_set_link.cgrp = dummytop;
4615 list_add(&init_css_set_link.cgrp_link_list,
4616 &rootnode.top_cgroup.css_sets);
4617 list_add(&init_css_set_link.cg_link_list,
4618 &init_css_set.cg_links);
4619
4620 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
4621 struct cgroup_subsys *ss = subsys[i];
4622
4623 /* at bootup time, we don't worry about modular subsystems */
4624 if (!ss || ss->module)
4625 continue;
4626
4627 BUG_ON(!ss->name);
4628 BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
4629 BUG_ON(!ss->css_alloc);
4630 BUG_ON(!ss->css_free);
4631 if (ss->subsys_id != i) {
4632 printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
4633 ss->name, ss->subsys_id);
4634 BUG();
4635 }
4636
4637 if (ss->early_init)
4638 cgroup_init_subsys(ss);
4639 }
4640 return 0;
4641 }
4642
4643 /**
4644 * cgroup_init - cgroup initialization
4645 *
4646 * Register cgroup filesystem and /proc file, and initialize
4647 * any subsystems that didn't request early init.
4648 */
4649 int __init cgroup_init(void)
4650 {
4651 int err;
4652 int i;
4653 unsigned long key;
4654
4655 err = bdi_init(&cgroup_backing_dev_info);
4656 if (err)
4657 return err;
4658
4659 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
4660 struct cgroup_subsys *ss = subsys[i];
4661
4662 /* at bootup time, we don't worry about modular subsystems */
4663 if (!ss || ss->module)
4664 continue;
4665 if (!ss->early_init)
4666 cgroup_init_subsys(ss);
4667 if (ss->use_id)
4668 cgroup_init_idr(ss, init_css_set.subsys[ss->subsys_id]);
4669 }
4670
4671 /* Add init_css_set to the hash table */
4672 key = css_set_hash(init_css_set.subsys);
4673 hash_add(css_set_table, &init_css_set.hlist, key);
4674 BUG_ON(!init_root_id(&rootnode));
4675
4676 cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
4677 if (!cgroup_kobj) {
4678 err = -ENOMEM;
4679 goto out;
4680 }
4681
4682 err = register_filesystem(&cgroup_fs_type);
4683 if (err < 0) {
4684 kobject_put(cgroup_kobj);
4685 goto out;
4686 }
4687
4688 proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
4689
4690 out:
4691 if (err)
4692 bdi_destroy(&cgroup_backing_dev_info);
4693
4694 return err;
4695 }
4696
4697 static int __init cgroup_wq_init(void)
4698 {
4699 /*
4700 * There isn't much point in executing destruction path in
4701 * parallel. Good chunk is serialized with cgroup_mutex anyway.
4702 * Use 1 for @max_active.
4703 *
4704 * We would prefer to do this in cgroup_init() above, but that
4705 * is called before init_workqueues(): so leave this until after.
4706 */
4707 cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
4708 BUG_ON(!cgroup_destroy_wq);
4709 return 0;
4710 }
4711 core_initcall(cgroup_wq_init);
4712
4713 /*
4714 * proc_cgroup_show()
4715 * - Print task's cgroup paths into seq_file, one line for each hierarchy
4716 * - Used for /proc/<pid>/cgroup.
4717 * - No need to task_lock(tsk) on this tsk->cgroup reference, as it
4718 * doesn't really matter if tsk->cgroup changes after we read it,
4719 * and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
4720 * anyway. No need to check that tsk->cgroup != NULL, thanks to
4721 * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
4722 * cgroup to top_cgroup.
4723 */
4724
4725 /* TODO: Use a proper seq_file iterator */
4726 int proc_cgroup_show(struct seq_file *m, void *v)
4727 {
4728 struct pid *pid;
4729 struct task_struct *tsk;
4730 char *buf;
4731 int retval;
4732 struct cgroupfs_root *root;
4733
4734 retval = -ENOMEM;
4735 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
4736 if (!buf)
4737 goto out;
4738
4739 retval = -ESRCH;
4740 pid = m->private;
4741 tsk = get_pid_task(pid, PIDTYPE_PID);
4742 if (!tsk)
4743 goto out_free;
4744
4745 retval = 0;
4746
4747 mutex_lock(&cgroup_mutex);
4748
4749 for_each_active_root(root) {
4750 struct cgroup_subsys *ss;
4751 struct cgroup *cgrp;
4752 int count = 0;
4753
4754 seq_printf(m, "%d:", root->hierarchy_id);
4755 for_each_subsys(root, ss)
4756 seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
4757 if (strlen(root->name))
4758 seq_printf(m, "%sname=%s", count ? "," : "",
4759 root->name);
4760 seq_putc(m, ':');
4761 cgrp = task_cgroup_from_root(tsk, root);
4762 retval = cgroup_path(cgrp, buf, PAGE_SIZE);
4763 if (retval < 0)
4764 goto out_unlock;
4765 seq_puts(m, buf);
4766 seq_putc(m, '\n');
4767 }
4768
4769 out_unlock:
4770 mutex_unlock(&cgroup_mutex);
4771 put_task_struct(tsk);
4772 out_free:
4773 kfree(buf);
4774 out:
4775 return retval;
4776 }
4777
4778 /* Display information about each subsystem and each hierarchy */
4779 static int proc_cgroupstats_show(struct seq_file *m, void *v)
4780 {
4781 int i;
4782
4783 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
4784 /*
4785 * ideally we don't want subsystems moving around while we do this.
4786 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
4787 * subsys/hierarchy state.
4788 */
4789 mutex_lock(&cgroup_mutex);
4790 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
4791 struct cgroup_subsys *ss = subsys[i];
4792 if (ss == NULL)
4793 continue;
4794 seq_printf(m, "%s\t%d\t%d\t%d\n",
4795 ss->name, ss->root->hierarchy_id,
4796 ss->root->number_of_cgroups, !ss->disabled);
4797 }
4798 mutex_unlock(&cgroup_mutex);
4799 return 0;
4800 }
4801
4802 static int cgroupstats_open(struct inode *inode, struct file *file)
4803 {
4804 return single_open(file, proc_cgroupstats_show, NULL);
4805 }
4806
4807 static const struct file_operations proc_cgroupstats_operations = {
4808 .open = cgroupstats_open,
4809 .read = seq_read,
4810 .llseek = seq_lseek,
4811 .release = single_release,
4812 };
4813
4814 /**
4815 * cgroup_fork - attach newly forked task to its parents cgroup.
4816 * @child: pointer to task_struct of forking parent process.
4817 *
4818 * Description: A task inherits its parent's cgroup at fork().
4819 *
4820 * A pointer to the shared css_set was automatically copied in
4821 * fork.c by dup_task_struct(). However, we ignore that copy, since
4822 * it was not made under the protection of RCU or cgroup_mutex, so
4823 * might no longer be a valid cgroup pointer. cgroup_attach_task() might
4824 * have already changed current->cgroups, allowing the previously
4825 * referenced cgroup group to be removed and freed.
4826 *
4827 * At the point that cgroup_fork() is called, 'current' is the parent
4828 * task, and the passed argument 'child' points to the child task.
4829 */
4830 void cgroup_fork(struct task_struct *child)
4831 {
4832 task_lock(current);
4833 child->cgroups = current->cgroups;
4834 get_css_set(child->cgroups);
4835 task_unlock(current);
4836 INIT_LIST_HEAD(&child->cg_list);
4837 }
4838
4839 /**
4840 * cgroup_post_fork - called on a new task after adding it to the task list
4841 * @child: the task in question
4842 *
4843 * Adds the task to the list running through its css_set if necessary and
4844 * call the subsystem fork() callbacks. Has to be after the task is
4845 * visible on the task list in case we race with the first call to
4846 * cgroup_iter_start() - to guarantee that the new task ends up on its
4847 * list.
4848 */
4849 void cgroup_post_fork(struct task_struct *child)
4850 {
4851 int i;
4852
4853 /*
4854 * use_task_css_set_links is set to 1 before we walk the tasklist
4855 * under the tasklist_lock and we read it here after we added the child
4856 * to the tasklist under the tasklist_lock as well. If the child wasn't
4857 * yet in the tasklist when we walked through it from
4858 * cgroup_enable_task_cg_lists(), then use_task_css_set_links value
4859 * should be visible now due to the paired locking and barriers implied
4860 * by LOCK/UNLOCK: it is written before the tasklist_lock unlock
4861 * in cgroup_enable_task_cg_lists() and read here after the tasklist_lock
4862 * lock on fork.
4863 */
4864 if (use_task_css_set_links) {
4865 write_lock(&css_set_lock);
4866 task_lock(child);
4867 if (list_empty(&child->cg_list))
4868 list_add(&child->cg_list, &child->cgroups->tasks);
4869 task_unlock(child);
4870 write_unlock(&css_set_lock);
4871 }
4872
4873 /*
4874 * Call ss->fork(). This must happen after @child is linked on
4875 * css_set; otherwise, @child might change state between ->fork()
4876 * and addition to css_set.
4877 */
4878 if (need_forkexit_callback) {
4879 /*
4880 * fork/exit callbacks are supported only for builtin
4881 * subsystems, and the builtin section of the subsys
4882 * array is immutable, so we don't need to lock the
4883 * subsys array here. On the other hand, modular section
4884 * of the array can be freed at module unload, so we
4885 * can't touch that.
4886 */
4887 for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
4888 struct cgroup_subsys *ss = subsys[i];
4889
4890 if (ss->fork)
4891 ss->fork(child);
4892 }
4893 }
4894 }
4895
4896 /**
4897 * cgroup_exit - detach cgroup from exiting task
4898 * @tsk: pointer to task_struct of exiting process
4899 * @run_callback: run exit callbacks?
4900 *
4901 * Description: Detach cgroup from @tsk and release it.
4902 *
4903 * Note that cgroups marked notify_on_release force every task in
4904 * them to take the global cgroup_mutex mutex when exiting.
4905 * This could impact scaling on very large systems. Be reluctant to
4906 * use notify_on_release cgroups where very high task exit scaling
4907 * is required on large systems.
4908 *
4909 * the_top_cgroup_hack:
4910 *
4911 * Set the exiting tasks cgroup to the root cgroup (top_cgroup).
4912 *
4913 * We call cgroup_exit() while the task is still competent to
4914 * handle notify_on_release(), then leave the task attached to the
4915 * root cgroup in each hierarchy for the remainder of its exit.
4916 *
4917 * To do this properly, we would increment the reference count on
4918 * top_cgroup, and near the very end of the kernel/exit.c do_exit()
4919 * code we would add a second cgroup function call, to drop that
4920 * reference. This would just create an unnecessary hot spot on
4921 * the top_cgroup reference count, to no avail.
4922 *
4923 * Normally, holding a reference to a cgroup without bumping its
4924 * count is unsafe. The cgroup could go away, or someone could
4925 * attach us to a different cgroup, decrementing the count on
4926 * the first cgroup that we never incremented. But in this case,
4927 * top_cgroup isn't going away, and either task has PF_EXITING set,
4928 * which wards off any cgroup_attach_task() attempts, or task is a failed
4929 * fork, never visible to cgroup_attach_task.
4930 */
4931 void cgroup_exit(struct task_struct *tsk, int run_callbacks)
4932 {
4933 struct css_set *cg;
4934 int i;
4935
4936 /*
4937 * Unlink from the css_set task list if necessary.
4938 * Optimistically check cg_list before taking
4939 * css_set_lock
4940 */
4941 if (!list_empty(&tsk->cg_list)) {
4942 write_lock(&css_set_lock);
4943 if (!list_empty(&tsk->cg_list))
4944 list_del_init(&tsk->cg_list);
4945 write_unlock(&css_set_lock);
4946 }
4947
4948 /* Reassign the task to the init_css_set. */
4949 task_lock(tsk);
4950 cg = tsk->cgroups;
4951 tsk->cgroups = &init_css_set;
4952
4953 if (run_callbacks && need_forkexit_callback) {
4954 /*
4955 * fork/exit callbacks are supported only for builtin
4956 * subsystems, see cgroup_post_fork() for details.
4957 */
4958 for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
4959 struct cgroup_subsys *ss = subsys[i];
4960
4961 if (ss->exit) {
4962 struct cgroup *old_cgrp =
4963 rcu_dereference_raw(cg->subsys[i])->cgroup;
4964 struct cgroup *cgrp = task_cgroup(tsk, i);
4965 ss->exit(cgrp, old_cgrp, tsk);
4966 }
4967 }
4968 }
4969 task_unlock(tsk);
4970
4971 put_css_set_taskexit(cg);
4972 }
4973
4974 static void check_for_release(struct cgroup *cgrp)
4975 {
4976 /* All of these checks rely on RCU to keep the cgroup
4977 * structure alive */
4978 if (cgroup_is_releasable(cgrp) &&
4979 !atomic_read(&cgrp->count) && list_empty(&cgrp->children)) {
4980 /*
4981 * Control Group is currently removeable. If it's not
4982 * already queued for a userspace notification, queue
4983 * it now
4984 */
4985 int need_schedule_work = 0;
4986
4987 raw_spin_lock(&release_list_lock);
4988 if (!cgroup_is_removed(cgrp) &&
4989 list_empty(&cgrp->release_list)) {
4990 list_add(&cgrp->release_list, &release_list);
4991 need_schedule_work = 1;
4992 }
4993 raw_spin_unlock(&release_list_lock);
4994 if (need_schedule_work)
4995 schedule_work(&release_agent_work);
4996 }
4997 }
4998
4999 /* Caller must verify that the css is not for root cgroup */
5000 bool __css_tryget(struct cgroup_subsys_state *css)
5001 {
5002 while (true) {
5003 int t, v;
5004
5005 v = css_refcnt(css);
5006 t = atomic_cmpxchg(&css->refcnt, v, v + 1);
5007 if (likely(t == v))
5008 return true;
5009 else if (t < 0)
5010 return false;
5011 cpu_relax();
5012 }
5013 }
5014 EXPORT_SYMBOL_GPL(__css_tryget);
5015
5016 /* Caller must verify that the css is not for root cgroup */
5017 void __css_put(struct cgroup_subsys_state *css)
5018 {
5019 int v;
5020
5021 v = css_unbias_refcnt(atomic_dec_return(&css->refcnt));
5022 if (v == 0)
5023 queue_work(cgroup_destroy_wq, &css->dput_work);
5024 }
5025 EXPORT_SYMBOL_GPL(__css_put);
5026
5027 /*
5028 * Notify userspace when a cgroup is released, by running the
5029 * configured release agent with the name of the cgroup (path
5030 * relative to the root of cgroup file system) as the argument.
5031 *
5032 * Most likely, this user command will try to rmdir this cgroup.
5033 *
5034 * This races with the possibility that some other task will be
5035 * attached to this cgroup before it is removed, or that some other
5036 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
5037 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
5038 * unused, and this cgroup will be reprieved from its death sentence,
5039 * to continue to serve a useful existence. Next time it's released,
5040 * we will get notified again, if it still has 'notify_on_release' set.
5041 *
5042 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
5043 * means only wait until the task is successfully execve()'d. The
5044 * separate release agent task is forked by call_usermodehelper(),
5045 * then control in this thread returns here, without waiting for the
5046 * release agent task. We don't bother to wait because the caller of
5047 * this routine has no use for the exit status of the release agent
5048 * task, so no sense holding our caller up for that.
5049 */
5050 static void cgroup_release_agent(struct work_struct *work)
5051 {
5052 BUG_ON(work != &release_agent_work);
5053 mutex_lock(&cgroup_mutex);
5054 raw_spin_lock(&release_list_lock);
5055 while (!list_empty(&release_list)) {
5056 char *argv[3], *envp[3];
5057 int i;
5058 char *pathbuf = NULL, *agentbuf = NULL;
5059 struct cgroup *cgrp = list_entry(release_list.next,
5060 struct cgroup,
5061 release_list);
5062 list_del_init(&cgrp->release_list);
5063 raw_spin_unlock(&release_list_lock);
5064 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
5065 if (!pathbuf)
5066 goto continue_free;
5067 if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
5068 goto continue_free;
5069 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
5070 if (!agentbuf)
5071 goto continue_free;
5072
5073 i = 0;
5074 argv[i++] = agentbuf;
5075 argv[i++] = pathbuf;
5076 argv[i] = NULL;
5077
5078 i = 0;
5079 /* minimal command environment */
5080 envp[i++] = "HOME=/";
5081 envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
5082 envp[i] = NULL;
5083
5084 /* Drop the lock while we invoke the usermode helper,
5085 * since the exec could involve hitting disk and hence
5086 * be a slow process */
5087 mutex_unlock(&cgroup_mutex);
5088 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
5089 mutex_lock(&cgroup_mutex);
5090 continue_free:
5091 kfree(pathbuf);
5092 kfree(agentbuf);
5093 raw_spin_lock(&release_list_lock);
5094 }
5095 raw_spin_unlock(&release_list_lock);
5096 mutex_unlock(&cgroup_mutex);
5097 }
5098
5099 static int __init cgroup_disable(char *str)
5100 {
5101 int i;
5102 char *token;
5103
5104 while ((token = strsep(&str, ",")) != NULL) {
5105 if (!*token)
5106 continue;
5107 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
5108 struct cgroup_subsys *ss = subsys[i];
5109
5110 /*
5111 * cgroup_disable, being at boot time, can't
5112 * know about module subsystems, so we don't
5113 * worry about them.
5114 */
5115 if (!ss || ss->module)
5116 continue;
5117
5118 if (!strcmp(token, ss->name)) {
5119 ss->disabled = 1;
5120 printk(KERN_INFO "Disabling %s control group"
5121 " subsystem\n", ss->name);
5122 break;
5123 }
5124 }
5125 }
5126 return 1;
5127 }
5128 __setup("cgroup_disable=", cgroup_disable);
5129
5130 /*
5131 * Functons for CSS ID.
5132 */
5133
5134 /*
5135 *To get ID other than 0, this should be called when !cgroup_is_removed().
5136 */
5137 unsigned short css_id(struct cgroup_subsys_state *css)
5138 {
5139 struct css_id *cssid;
5140
5141 /*
5142 * This css_id() can return correct value when somone has refcnt
5143 * on this or this is under rcu_read_lock(). Once css->id is allocated,
5144 * it's unchanged until freed.
5145 */
5146 cssid = rcu_dereference_check(css->id, css_refcnt(css));
5147
5148 if (cssid)
5149 return cssid->id;
5150 return 0;
5151 }
5152 EXPORT_SYMBOL_GPL(css_id);
5153
5154 unsigned short css_depth(struct cgroup_subsys_state *css)
5155 {
5156 struct css_id *cssid;
5157
5158 cssid = rcu_dereference_check(css->id, css_refcnt(css));
5159
5160 if (cssid)
5161 return cssid->depth;
5162 return 0;
5163 }
5164 EXPORT_SYMBOL_GPL(css_depth);
5165
5166 /**
5167 * css_is_ancestor - test "root" css is an ancestor of "child"
5168 * @child: the css to be tested.
5169 * @root: the css supporsed to be an ancestor of the child.
5170 *
5171 * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
5172 * this function reads css->id, the caller must hold rcu_read_lock().
5173 * But, considering usual usage, the csses should be valid objects after test.
5174 * Assuming that the caller will do some action to the child if this returns
5175 * returns true, the caller must take "child";s reference count.
5176 * If "child" is valid object and this returns true, "root" is valid, too.
5177 */
5178
5179 bool css_is_ancestor(struct cgroup_subsys_state *child,
5180 const struct cgroup_subsys_state *root)
5181 {
5182 struct css_id *child_id;
5183 struct css_id *root_id;
5184
5185 child_id = rcu_dereference(child->id);
5186 if (!child_id)
5187 return false;
5188 root_id = rcu_dereference(root->id);
5189 if (!root_id)
5190 return false;
5191 if (child_id->depth < root_id->depth)
5192 return false;
5193 if (child_id->stack[root_id->depth] != root_id->id)
5194 return false;
5195 return true;
5196 }
5197
5198 void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
5199 {
5200 struct css_id *id = css->id;
5201 /* When this is called before css_id initialization, id can be NULL */
5202 if (!id)
5203 return;
5204
5205 BUG_ON(!ss->use_id);
5206
5207 rcu_assign_pointer(id->css, NULL);
5208 rcu_assign_pointer(css->id, NULL);
5209 spin_lock(&ss->id_lock);
5210 idr_remove(&ss->idr, id->id);
5211 spin_unlock(&ss->id_lock);
5212 kfree_rcu(id, rcu_head);
5213 }
5214 EXPORT_SYMBOL_GPL(free_css_id);
5215
5216 /*
5217 * This is called by init or create(). Then, calls to this function are
5218 * always serialized (By cgroup_mutex() at create()).
5219 */
5220
5221 static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
5222 {
5223 struct css_id *newid;
5224 int ret, size;
5225
5226 BUG_ON(!ss->use_id);
5227
5228 size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
5229 newid = kzalloc(size, GFP_KERNEL);
5230 if (!newid)
5231 return ERR_PTR(-ENOMEM);
5232
5233 idr_preload(GFP_KERNEL);
5234 spin_lock(&ss->id_lock);
5235 /* Don't use 0. allocates an ID of 1-65535 */
5236 ret = idr_alloc(&ss->idr, newid, 1, CSS_ID_MAX + 1, GFP_NOWAIT);
5237 spin_unlock(&ss->id_lock);
5238 idr_preload_end();
5239
5240 /* Returns error when there are no free spaces for new ID.*/
5241 if (ret < 0)
5242 goto err_out;
5243
5244 newid->id = ret;
5245 newid->depth = depth;
5246 return newid;
5247 err_out:
5248 kfree(newid);
5249 return ERR_PTR(ret);
5250
5251 }
5252
5253 static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
5254 struct cgroup_subsys_state *rootcss)
5255 {
5256 struct css_id *newid;
5257
5258 spin_lock_init(&ss->id_lock);
5259 idr_init(&ss->idr);
5260
5261 newid = get_new_cssid(ss, 0);
5262 if (IS_ERR(newid))
5263 return PTR_ERR(newid);
5264
5265 newid->stack[0] = newid->id;
5266 newid->css = rootcss;
5267 rootcss->id = newid;
5268 return 0;
5269 }
5270
5271 static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
5272 struct cgroup *child)
5273 {
5274 int subsys_id, i, depth = 0;
5275 struct cgroup_subsys_state *parent_css, *child_css;
5276 struct css_id *child_id, *parent_id;
5277
5278 subsys_id = ss->subsys_id;
5279 parent_css = parent->subsys[subsys_id];
5280 child_css = child->subsys[subsys_id];
5281 parent_id = parent_css->id;
5282 depth = parent_id->depth + 1;
5283
5284 child_id = get_new_cssid(ss, depth);
5285 if (IS_ERR(child_id))
5286 return PTR_ERR(child_id);
5287
5288 for (i = 0; i < depth; i++)
5289 child_id->stack[i] = parent_id->stack[i];
5290 child_id->stack[depth] = child_id->id;
5291 /*
5292 * child_id->css pointer will be set after this cgroup is available
5293 * see cgroup_populate_dir()
5294 */
5295 rcu_assign_pointer(child_css->id, child_id);
5296
5297 return 0;
5298 }
5299
5300 /**
5301 * css_lookup - lookup css by id
5302 * @ss: cgroup subsys to be looked into.
5303 * @id: the id
5304 *
5305 * Returns pointer to cgroup_subsys_state if there is valid one with id.
5306 * NULL if not. Should be called under rcu_read_lock()
5307 */
5308 struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
5309 {
5310 struct css_id *cssid = NULL;
5311
5312 BUG_ON(!ss->use_id);
5313 cssid = idr_find(&ss->idr, id);
5314
5315 if (unlikely(!cssid))
5316 return NULL;
5317
5318 return rcu_dereference(cssid->css);
5319 }
5320 EXPORT_SYMBOL_GPL(css_lookup);
5321
5322 /*
5323 * get corresponding css from file open on cgroupfs directory
5324 */
5325 struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
5326 {
5327 struct cgroup *cgrp;
5328 struct inode *inode;
5329 struct cgroup_subsys_state *css;
5330
5331 inode = file_inode(f);
5332 /* check in cgroup filesystem dir */
5333 if (inode->i_op != &cgroup_dir_inode_operations)
5334 return ERR_PTR(-EBADF);
5335
5336 if (id < 0 || id >= CGROUP_SUBSYS_COUNT)
5337 return ERR_PTR(-EINVAL);
5338
5339 /* get cgroup */
5340 cgrp = __d_cgrp(f->f_dentry);
5341 css = cgrp->subsys[id];
5342 return css ? css : ERR_PTR(-ENOENT);
5343 }
5344
5345 #ifdef CONFIG_CGROUP_DEBUG
5346 static struct cgroup_subsys_state *debug_css_alloc(struct cgroup *cont)
5347 {
5348 struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
5349
5350 if (!css)
5351 return ERR_PTR(-ENOMEM);
5352
5353 return css;
5354 }
5355
5356 static void debug_css_free(struct cgroup *cont)
5357 {
5358 kfree(cont->subsys[debug_subsys_id]);
5359 }
5360
5361 static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft)
5362 {
5363 return atomic_read(&cont->count);
5364 }
5365
5366 static u64 debug_taskcount_read(struct cgroup *cont, struct cftype *cft)
5367 {
5368 return cgroup_task_count(cont);
5369 }
5370
5371 static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft)
5372 {
5373 return (u64)(unsigned long)current->cgroups;
5374 }
5375
5376 static u64 current_css_set_refcount_read(struct cgroup *cont,
5377 struct cftype *cft)
5378 {
5379 u64 count;
5380
5381 rcu_read_lock();
5382 count = atomic_read(&current->cgroups->refcount);
5383 rcu_read_unlock();
5384 return count;
5385 }
5386
5387 static int current_css_set_cg_links_read(struct cgroup *cont,
5388 struct cftype *cft,
5389 struct seq_file *seq)
5390 {
5391 struct cg_cgroup_link *link;
5392 struct css_set *cg;
5393
5394 read_lock(&css_set_lock);
5395 rcu_read_lock();
5396 cg = rcu_dereference(current->cgroups);
5397 list_for_each_entry(link, &cg->cg_links, cg_link_list) {
5398 struct cgroup *c = link->cgrp;
5399 const char *name;
5400
5401 if (c->dentry)
5402 name = c->dentry->d_name.name;
5403 else
5404 name = "?";
5405 seq_printf(seq, "Root %d group %s\n",
5406 c->root->hierarchy_id, name);
5407 }
5408 rcu_read_unlock();
5409 read_unlock(&css_set_lock);
5410 return 0;
5411 }
5412
5413 #define MAX_TASKS_SHOWN_PER_CSS 25
5414 static int cgroup_css_links_read(struct cgroup *cont,
5415 struct cftype *cft,
5416 struct seq_file *seq)
5417 {
5418 struct cg_cgroup_link *link;
5419
5420 read_lock(&css_set_lock);
5421 list_for_each_entry(link, &cont->css_sets, cgrp_link_list) {
5422 struct css_set *cg = link->cg;
5423 struct task_struct *task;
5424 int count = 0;
5425 seq_printf(seq, "css_set %p\n", cg);
5426 list_for_each_entry(task, &cg->tasks, cg_list) {
5427 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
5428 seq_puts(seq, " ...\n");
5429 break;
5430 } else {
5431 seq_printf(seq, " task %d\n",
5432 task_pid_vnr(task));
5433 }
5434 }
5435 }
5436 read_unlock(&css_set_lock);
5437 return 0;
5438 }
5439
5440 static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
5441 {
5442 return test_bit(CGRP_RELEASABLE, &cgrp->flags);
5443 }
5444
5445 static struct cftype debug_files[] = {
5446 {
5447 .name = "cgroup_refcount",
5448 .read_u64 = cgroup_refcount_read,
5449 },
5450 {
5451 .name = "taskcount",
5452 .read_u64 = debug_taskcount_read,
5453 },
5454
5455 {
5456 .name = "current_css_set",
5457 .read_u64 = current_css_set_read,
5458 },
5459
5460 {
5461 .name = "current_css_set_refcount",
5462 .read_u64 = current_css_set_refcount_read,
5463 },
5464
5465 {
5466 .name = "current_css_set_cg_links",
5467 .read_seq_string = current_css_set_cg_links_read,
5468 },
5469
5470 {
5471 .name = "cgroup_css_links",
5472 .read_seq_string = cgroup_css_links_read,
5473 },
5474
5475 {
5476 .name = "releasable",
5477 .read_u64 = releasable_read,
5478 },
5479
5480 { } /* terminate */
5481 };
5482
5483 struct cgroup_subsys debug_subsys = {
5484 .name = "debug",
5485 .css_alloc = debug_css_alloc,
5486 .css_free = debug_css_free,
5487 .subsys_id = debug_subsys_id,
5488 .base_cftypes = debug_files,
5489 };
5490 #endif /* CONFIG_CGROUP_DEBUG */