don't bother with deferred freeing of fdtables
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / cgroup.h
1 #ifndef _LINUX_CGROUP_H
2 #define _LINUX_CGROUP_H
3 /*
4 * cgroup interface
5 *
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
8 *
9 */
10
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/rcupdate.h>
15 #include <linux/rculist.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/prio_heap.h>
18 #include <linux/rwsem.h>
19 #include <linux/idr.h>
20 #include <linux/workqueue.h>
21 #include <linux/xattr.h>
22
23 #ifdef CONFIG_CGROUPS
24
25 struct cgroupfs_root;
26 struct cgroup_subsys;
27 struct inode;
28 struct cgroup;
29 struct css_id;
30
31 extern int cgroup_init_early(void);
32 extern int cgroup_init(void);
33 extern void cgroup_lock(void);
34 extern int cgroup_lock_is_held(void);
35 extern bool cgroup_lock_live_group(struct cgroup *cgrp);
36 extern void cgroup_unlock(void);
37 extern void cgroup_fork(struct task_struct *p);
38 extern void cgroup_post_fork(struct task_struct *p);
39 extern void cgroup_exit(struct task_struct *p, int run_callbacks);
40 extern int cgroupstats_build(struct cgroupstats *stats,
41 struct dentry *dentry);
42 extern int cgroup_load_subsys(struct cgroup_subsys *ss);
43 extern void cgroup_unload_subsys(struct cgroup_subsys *ss);
44
45 extern int proc_cgroup_show(struct seq_file *, void *);
46
47 /* Define the enumeration of all builtin cgroup subsystems */
48 #define SUBSYS(_x) _x ## _subsys_id,
49 #define IS_SUBSYS_ENABLED(option) IS_ENABLED(option)
50 enum cgroup_subsys_id {
51 #include <linux/cgroup_subsys.h>
52 CGROUP_SUBSYS_COUNT,
53 };
54 #undef IS_SUBSYS_ENABLED
55 #undef SUBSYS
56
57 /* Per-subsystem/per-cgroup state maintained by the system. */
58 struct cgroup_subsys_state {
59 /*
60 * The cgroup that this subsystem is attached to. Useful
61 * for subsystems that want to know about the cgroup
62 * hierarchy structure
63 */
64 struct cgroup *cgroup;
65
66 /*
67 * State maintained by the cgroup system to allow subsystems
68 * to be "busy". Should be accessed via css_get(),
69 * css_tryget() and css_put().
70 */
71
72 atomic_t refcnt;
73
74 unsigned long flags;
75 /* ID for this css, if possible */
76 struct css_id __rcu *id;
77
78 /* Used to put @cgroup->dentry on the last css_put() */
79 struct work_struct dput_work;
80 };
81
82 /* bits in struct cgroup_subsys_state flags field */
83 enum {
84 CSS_ROOT = (1 << 0), /* this CSS is the root of the subsystem */
85 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
86 };
87
88 /* Caller must verify that the css is not for root cgroup */
89 static inline void __css_get(struct cgroup_subsys_state *css, int count)
90 {
91 atomic_add(count, &css->refcnt);
92 }
93
94 /*
95 * Call css_get() to hold a reference on the css; it can be used
96 * for a reference obtained via:
97 * - an existing ref-counted reference to the css
98 * - task->cgroups for a locked task
99 */
100
101 static inline void css_get(struct cgroup_subsys_state *css)
102 {
103 /* We don't need to reference count the root state */
104 if (!(css->flags & CSS_ROOT))
105 __css_get(css, 1);
106 }
107
108 /*
109 * Call css_tryget() to take a reference on a css if your existing
110 * (known-valid) reference isn't already ref-counted. Returns false if
111 * the css has been destroyed.
112 */
113
114 extern bool __css_tryget(struct cgroup_subsys_state *css);
115 static inline bool css_tryget(struct cgroup_subsys_state *css)
116 {
117 if (css->flags & CSS_ROOT)
118 return true;
119 return __css_tryget(css);
120 }
121
122 /*
123 * css_put() should be called to release a reference taken by
124 * css_get() or css_tryget()
125 */
126
127 extern void __css_put(struct cgroup_subsys_state *css);
128 static inline void css_put(struct cgroup_subsys_state *css)
129 {
130 if (!(css->flags & CSS_ROOT))
131 __css_put(css);
132 }
133
134 /* bits in struct cgroup flags field */
135 enum {
136 /* Control Group is dead */
137 CGRP_REMOVED,
138 /*
139 * Control Group has previously had a child cgroup or a task,
140 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
141 */
142 CGRP_RELEASABLE,
143 /* Control Group requires release notifications to userspace */
144 CGRP_NOTIFY_ON_RELEASE,
145 /*
146 * Clone the parent's configuration when creating a new child
147 * cpuset cgroup. For historical reasons, this option can be
148 * specified at mount time and thus is implemented here.
149 */
150 CGRP_CPUSET_CLONE_CHILDREN,
151 };
152
153 struct cgroup {
154 unsigned long flags; /* "unsigned long" so bitops work */
155
156 /*
157 * count users of this cgroup. >0 means busy, but doesn't
158 * necessarily indicate the number of tasks in the cgroup
159 */
160 atomic_t count;
161
162 int id; /* ida allocated in-hierarchy ID */
163
164 /*
165 * We link our 'sibling' struct into our parent's 'children'.
166 * Our children link their 'sibling' into our 'children'.
167 */
168 struct list_head sibling; /* my parent's children */
169 struct list_head children; /* my children */
170 struct list_head files; /* my files */
171
172 struct cgroup *parent; /* my parent */
173 struct dentry *dentry; /* cgroup fs entry, RCU protected */
174
175 /* Private pointers for each registered subsystem */
176 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
177
178 struct cgroupfs_root *root;
179 struct cgroup *top_cgroup;
180
181 /*
182 * List of cg_cgroup_links pointing at css_sets with
183 * tasks in this cgroup. Protected by css_set_lock
184 */
185 struct list_head css_sets;
186
187 struct list_head allcg_node; /* cgroupfs_root->allcg_list */
188 struct list_head cft_q_node; /* used during cftype add/rm */
189
190 /*
191 * Linked list running through all cgroups that can
192 * potentially be reaped by the release agent. Protected by
193 * release_list_lock
194 */
195 struct list_head release_list;
196
197 /*
198 * list of pidlists, up to two for each namespace (one for procs, one
199 * for tasks); created on demand.
200 */
201 struct list_head pidlists;
202 struct mutex pidlist_mutex;
203
204 /* For RCU-protected deletion */
205 struct rcu_head rcu_head;
206 struct work_struct free_work;
207
208 /* List of events which userspace want to receive */
209 struct list_head event_list;
210 spinlock_t event_list_lock;
211
212 /* directory xattrs */
213 struct simple_xattrs xattrs;
214 };
215
216 /*
217 * A css_set is a structure holding pointers to a set of
218 * cgroup_subsys_state objects. This saves space in the task struct
219 * object and speeds up fork()/exit(), since a single inc/dec and a
220 * list_add()/del() can bump the reference count on the entire cgroup
221 * set for a task.
222 */
223
224 struct css_set {
225
226 /* Reference count */
227 atomic_t refcount;
228
229 /*
230 * List running through all cgroup groups in the same hash
231 * slot. Protected by css_set_lock
232 */
233 struct hlist_node hlist;
234
235 /*
236 * List running through all tasks using this cgroup
237 * group. Protected by css_set_lock
238 */
239 struct list_head tasks;
240
241 /*
242 * List of cg_cgroup_link objects on link chains from
243 * cgroups referenced from this css_set. Protected by
244 * css_set_lock
245 */
246 struct list_head cg_links;
247
248 /*
249 * Set of subsystem states, one for each subsystem. This array
250 * is immutable after creation apart from the init_css_set
251 * during subsystem registration (at boot time) and modular subsystem
252 * loading/unloading.
253 */
254 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
255
256 /* For RCU-protected deletion */
257 struct rcu_head rcu_head;
258 };
259
260 /*
261 * cgroup_map_cb is an abstract callback API for reporting map-valued
262 * control files
263 */
264
265 struct cgroup_map_cb {
266 int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value);
267 void *state;
268 };
269
270 /*
271 * struct cftype: handler definitions for cgroup control files
272 *
273 * When reading/writing to a file:
274 * - the cgroup to use is file->f_dentry->d_parent->d_fsdata
275 * - the 'cftype' of the file is file->f_dentry->d_fsdata
276 */
277
278 /* cftype->flags */
279 #define CFTYPE_ONLY_ON_ROOT (1U << 0) /* only create on root cg */
280 #define CFTYPE_NOT_ON_ROOT (1U << 1) /* don't create on root cg */
281
282 #define MAX_CFTYPE_NAME 64
283
284 struct cftype {
285 /*
286 * By convention, the name should begin with the name of the
287 * subsystem, followed by a period. Zero length string indicates
288 * end of cftype array.
289 */
290 char name[MAX_CFTYPE_NAME];
291 int private;
292 /*
293 * If not 0, file mode is set to this value, otherwise it will
294 * be figured out automatically
295 */
296 umode_t mode;
297
298 /*
299 * If non-zero, defines the maximum length of string that can
300 * be passed to write_string; defaults to 64
301 */
302 size_t max_write_len;
303
304 /* CFTYPE_* flags */
305 unsigned int flags;
306
307 /* file xattrs */
308 struct simple_xattrs xattrs;
309
310 int (*open)(struct inode *inode, struct file *file);
311 ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
312 struct file *file,
313 char __user *buf, size_t nbytes, loff_t *ppos);
314 /*
315 * read_u64() is a shortcut for the common case of returning a
316 * single integer. Use it in place of read()
317 */
318 u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft);
319 /*
320 * read_s64() is a signed version of read_u64()
321 */
322 s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft);
323 /*
324 * read_map() is used for defining a map of key/value
325 * pairs. It should call cb->fill(cb, key, value) for each
326 * entry. The key/value pairs (and their ordering) should not
327 * change between reboots.
328 */
329 int (*read_map)(struct cgroup *cont, struct cftype *cft,
330 struct cgroup_map_cb *cb);
331 /*
332 * read_seq_string() is used for outputting a simple sequence
333 * using seqfile.
334 */
335 int (*read_seq_string)(struct cgroup *cont, struct cftype *cft,
336 struct seq_file *m);
337
338 ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft,
339 struct file *file,
340 const char __user *buf, size_t nbytes, loff_t *ppos);
341
342 /*
343 * write_u64() is a shortcut for the common case of accepting
344 * a single integer (as parsed by simple_strtoull) from
345 * userspace. Use in place of write(); return 0 or error.
346 */
347 int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val);
348 /*
349 * write_s64() is a signed version of write_u64()
350 */
351 int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val);
352
353 /*
354 * write_string() is passed a nul-terminated kernelspace
355 * buffer of maximum length determined by max_write_len.
356 * Returns 0 or -ve error code.
357 */
358 int (*write_string)(struct cgroup *cgrp, struct cftype *cft,
359 const char *buffer);
360 /*
361 * trigger() callback can be used to get some kick from the
362 * userspace, when the actual string written is not important
363 * at all. The private field can be used to determine the
364 * kick type for multiplexing.
365 */
366 int (*trigger)(struct cgroup *cgrp, unsigned int event);
367
368 int (*release)(struct inode *inode, struct file *file);
369
370 /*
371 * register_event() callback will be used to add new userspace
372 * waiter for changes related to the cftype. Implement it if
373 * you want to provide this functionality. Use eventfd_signal()
374 * on eventfd to send notification to userspace.
375 */
376 int (*register_event)(struct cgroup *cgrp, struct cftype *cft,
377 struct eventfd_ctx *eventfd, const char *args);
378 /*
379 * unregister_event() callback will be called when userspace
380 * closes the eventfd or on cgroup removing.
381 * This callback must be implemented, if you want provide
382 * notification functionality.
383 */
384 void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
385 struct eventfd_ctx *eventfd);
386 };
387
388 /*
389 * cftype_sets describe cftypes belonging to a subsystem and are chained at
390 * cgroup_subsys->cftsets. Each cftset points to an array of cftypes
391 * terminated by zero length name.
392 */
393 struct cftype_set {
394 struct list_head node; /* chained at subsys->cftsets */
395 struct cftype *cfts;
396 };
397
398 struct cgroup_scanner {
399 struct cgroup *cg;
400 int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
401 void (*process_task)(struct task_struct *p,
402 struct cgroup_scanner *scan);
403 struct ptr_heap *heap;
404 void *data;
405 };
406
407 int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
408 int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
409
410 int cgroup_is_removed(const struct cgroup *cgrp);
411
412 int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
413
414 int cgroup_task_count(const struct cgroup *cgrp);
415
416 /* Return true if cgrp is a descendant of the task's cgroup */
417 int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
418
419 /*
420 * Control Group taskset, used to pass around set of tasks to cgroup_subsys
421 * methods.
422 */
423 struct cgroup_taskset;
424 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
425 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
426 struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset);
427 int cgroup_taskset_size(struct cgroup_taskset *tset);
428
429 /**
430 * cgroup_taskset_for_each - iterate cgroup_taskset
431 * @task: the loop cursor
432 * @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all
433 * @tset: taskset to iterate
434 */
435 #define cgroup_taskset_for_each(task, skip_cgrp, tset) \
436 for ((task) = cgroup_taskset_first((tset)); (task); \
437 (task) = cgroup_taskset_next((tset))) \
438 if (!(skip_cgrp) || \
439 cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp))
440
441 /*
442 * Control Group subsystem type.
443 * See Documentation/cgroups/cgroups.txt for details
444 */
445
446 struct cgroup_subsys {
447 struct cgroup_subsys_state *(*css_alloc)(struct cgroup *cgrp);
448 int (*css_online)(struct cgroup *cgrp);
449 void (*css_offline)(struct cgroup *cgrp);
450 void (*css_free)(struct cgroup *cgrp);
451
452 int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
453 void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
454 void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
455 void (*fork)(struct task_struct *task);
456 void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp,
457 struct task_struct *task);
458 void (*bind)(struct cgroup *root);
459
460 int subsys_id;
461 int active;
462 int disabled;
463 int early_init;
464 /*
465 * True if this subsys uses ID. ID is not available before cgroup_init()
466 * (not available in early_init time.)
467 */
468 bool use_id;
469
470 /*
471 * If %false, this subsystem is properly hierarchical -
472 * configuration, resource accounting and restriction on a parent
473 * cgroup cover those of its children. If %true, hierarchy support
474 * is broken in some ways - some subsystems ignore hierarchy
475 * completely while others are only implemented half-way.
476 *
477 * It's now disallowed to create nested cgroups if the subsystem is
478 * broken and cgroup core will emit a warning message on such
479 * cases. Eventually, all subsystems will be made properly
480 * hierarchical and this will go away.
481 */
482 bool broken_hierarchy;
483 bool warned_broken_hierarchy;
484
485 #define MAX_CGROUP_TYPE_NAMELEN 32
486 const char *name;
487
488 /*
489 * Link to parent, and list entry in parent's children.
490 * Protected by cgroup_lock()
491 */
492 struct cgroupfs_root *root;
493 struct list_head sibling;
494 /* used when use_id == true */
495 struct idr idr;
496 spinlock_t id_lock;
497
498 /* list of cftype_sets */
499 struct list_head cftsets;
500
501 /* base cftypes, automatically [de]registered with subsys itself */
502 struct cftype *base_cftypes;
503 struct cftype_set base_cftset;
504
505 /* should be defined only by modular subsystems */
506 struct module *module;
507 };
508
509 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
510 #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
511 #include <linux/cgroup_subsys.h>
512 #undef IS_SUBSYS_ENABLED
513 #undef SUBSYS
514
515 static inline struct cgroup_subsys_state *cgroup_subsys_state(
516 struct cgroup *cgrp, int subsys_id)
517 {
518 return cgrp->subsys[subsys_id];
519 }
520
521 /*
522 * function to get the cgroup_subsys_state which allows for extra
523 * rcu_dereference_check() conditions, such as locks used during the
524 * cgroup_subsys::attach() methods.
525 */
526 #define task_subsys_state_check(task, subsys_id, __c) \
527 rcu_dereference_check(task->cgroups->subsys[subsys_id], \
528 lockdep_is_held(&task->alloc_lock) || \
529 cgroup_lock_is_held() || (__c))
530
531 static inline struct cgroup_subsys_state *
532 task_subsys_state(struct task_struct *task, int subsys_id)
533 {
534 return task_subsys_state_check(task, subsys_id, false);
535 }
536
537 static inline struct cgroup* task_cgroup(struct task_struct *task,
538 int subsys_id)
539 {
540 return task_subsys_state(task, subsys_id)->cgroup;
541 }
542
543 /**
544 * cgroup_for_each_child - iterate through children of a cgroup
545 * @pos: the cgroup * to use as the loop cursor
546 * @cgroup: cgroup whose children to walk
547 *
548 * Walk @cgroup's children. Must be called under rcu_read_lock(). A child
549 * cgroup which hasn't finished ->css_online() or already has finished
550 * ->css_offline() may show up during traversal and it's each subsystem's
551 * responsibility to verify that each @pos is alive.
552 *
553 * If a subsystem synchronizes against the parent in its ->css_online() and
554 * before starting iterating, a cgroup which finished ->css_online() is
555 * guaranteed to be visible in the future iterations.
556 */
557 #define cgroup_for_each_child(pos, cgroup) \
558 list_for_each_entry_rcu(pos, &(cgroup)->children, sibling)
559
560 struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
561 struct cgroup *cgroup);
562 struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
563
564 /**
565 * cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants
566 * @pos: the cgroup * to use as the loop cursor
567 * @cgroup: cgroup whose descendants to walk
568 *
569 * Walk @cgroup's descendants. Must be called under rcu_read_lock(). A
570 * descendant cgroup which hasn't finished ->css_online() or already has
571 * finished ->css_offline() may show up during traversal and it's each
572 * subsystem's responsibility to verify that each @pos is alive.
573 *
574 * If a subsystem synchronizes against the parent in its ->css_online() and
575 * before starting iterating, and synchronizes against @pos on each
576 * iteration, any descendant cgroup which finished ->css_offline() is
577 * guaranteed to be visible in the future iterations.
578 *
579 * In other words, the following guarantees that a descendant can't escape
580 * state updates of its ancestors.
581 *
582 * my_online(@cgrp)
583 * {
584 * Lock @cgrp->parent and @cgrp;
585 * Inherit state from @cgrp->parent;
586 * Unlock both.
587 * }
588 *
589 * my_update_state(@cgrp)
590 * {
591 * Lock @cgrp;
592 * Update @cgrp's state;
593 * Unlock @cgrp;
594 *
595 * cgroup_for_each_descendant_pre(@pos, @cgrp) {
596 * Lock @pos;
597 * Verify @pos is alive and inherit state from @pos->parent;
598 * Unlock @pos;
599 * }
600 * }
601 *
602 * As long as the inheriting step, including checking the parent state, is
603 * enclosed inside @pos locking, double-locking the parent isn't necessary
604 * while inheriting. The state update to the parent is guaranteed to be
605 * visible by walking order and, as long as inheriting operations to the
606 * same @pos are atomic to each other, multiple updates racing each other
607 * still result in the correct state. It's guaranateed that at least one
608 * inheritance happens for any cgroup after the latest update to its
609 * parent.
610 *
611 * If checking parent's state requires locking the parent, each inheriting
612 * iteration should lock and unlock both @pos->parent and @pos.
613 *
614 * Alternatively, a subsystem may choose to use a single global lock to
615 * synchronize ->css_online() and ->css_offline() against tree-walking
616 * operations.
617 */
618 #define cgroup_for_each_descendant_pre(pos, cgroup) \
619 for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos); \
620 pos = cgroup_next_descendant_pre((pos), (cgroup)))
621
622 struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
623 struct cgroup *cgroup);
624
625 /**
626 * cgroup_for_each_descendant_post - post-order walk of a cgroup's descendants
627 * @pos: the cgroup * to use as the loop cursor
628 * @cgroup: cgroup whose descendants to walk
629 *
630 * Similar to cgroup_for_each_descendant_pre() but performs post-order
631 * traversal instead. Note that the walk visibility guarantee described in
632 * pre-order walk doesn't apply the same to post-order walks.
633 */
634 #define cgroup_for_each_descendant_post(pos, cgroup) \
635 for (pos = cgroup_next_descendant_post(NULL, (cgroup)); (pos); \
636 pos = cgroup_next_descendant_post((pos), (cgroup)))
637
638 /* A cgroup_iter should be treated as an opaque object */
639 struct cgroup_iter {
640 struct list_head *cg_link;
641 struct list_head *task;
642 };
643
644 /*
645 * To iterate across the tasks in a cgroup:
646 *
647 * 1) call cgroup_iter_start to initialize an iterator
648 *
649 * 2) call cgroup_iter_next() to retrieve member tasks until it
650 * returns NULL or until you want to end the iteration
651 *
652 * 3) call cgroup_iter_end() to destroy the iterator.
653 *
654 * Or, call cgroup_scan_tasks() to iterate through every task in a
655 * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
656 * the test_task() callback, but not while calling the process_task()
657 * callback.
658 */
659 void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
660 struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
661 struct cgroup_iter *it);
662 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
663 int cgroup_scan_tasks(struct cgroup_scanner *scan);
664 int cgroup_attach_task(struct cgroup *, struct task_struct *);
665 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
666
667 /*
668 * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
669 * if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
670 * CSS ID is assigned at cgroup allocation (create) automatically
671 * and removed when subsys calls free_css_id() function. This is because
672 * the lifetime of cgroup_subsys_state is subsys's matter.
673 *
674 * Looking up and scanning function should be called under rcu_read_lock().
675 * Taking cgroup_mutex is not necessary for following calls.
676 * But the css returned by this routine can be "not populated yet" or "being
677 * destroyed". The caller should check css and cgroup's status.
678 */
679
680 /*
681 * Typically Called at ->destroy(), or somewhere the subsys frees
682 * cgroup_subsys_state.
683 */
684 void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);
685
686 /* Find a cgroup_subsys_state which has given ID */
687
688 struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);
689
690 /*
691 * Get a cgroup whose id is greater than or equal to id under tree of root.
692 * Returning a cgroup_subsys_state or NULL.
693 */
694 struct cgroup_subsys_state *css_get_next(struct cgroup_subsys *ss, int id,
695 struct cgroup_subsys_state *root, int *foundid);
696
697 /* Returns true if root is ancestor of cg */
698 bool css_is_ancestor(struct cgroup_subsys_state *cg,
699 const struct cgroup_subsys_state *root);
700
701 /* Get id and depth of css */
702 unsigned short css_id(struct cgroup_subsys_state *css);
703 unsigned short css_depth(struct cgroup_subsys_state *css);
704 struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
705
706 #else /* !CONFIG_CGROUPS */
707
708 static inline int cgroup_init_early(void) { return 0; }
709 static inline int cgroup_init(void) { return 0; }
710 static inline void cgroup_fork(struct task_struct *p) {}
711 static inline void cgroup_post_fork(struct task_struct *p) {}
712 static inline void cgroup_exit(struct task_struct *p, int callbacks) {}
713
714 static inline void cgroup_lock(void) {}
715 static inline void cgroup_unlock(void) {}
716 static inline int cgroupstats_build(struct cgroupstats *stats,
717 struct dentry *dentry)
718 {
719 return -EINVAL;
720 }
721
722 /* No cgroups - nothing to do */
723 static inline int cgroup_attach_task_all(struct task_struct *from,
724 struct task_struct *t)
725 {
726 return 0;
727 }
728
729 #endif /* !CONFIG_CGROUPS */
730
731 #endif /* _LINUX_CGROUP_H */