kbuild: add headerdep used to detect inclusion cycles in header files
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / res_counter.h
1 #ifndef __RES_COUNTER_H__
2 #define __RES_COUNTER_H__
3
4 /*
5 * Resource Counters
6 * Contain common data types and routines for resource accounting
7 *
8 * Copyright 2007 OpenVZ SWsoft Inc
9 *
10 * Author: Pavel Emelianov <xemul@openvz.org>
11 *
12 * See Documentation/controllers/resource_counter.txt for more
13 * info about what this counter is.
14 */
15
16 #include <linux/cgroup.h>
17
18 /*
19 * The core object. the cgroup that wishes to account for some
20 * resource may include this counter into its structures and use
21 * the helpers described beyond
22 */
23
24 struct res_counter {
25 /*
26 * the current resource consumption level
27 */
28 unsigned long long usage;
29 /*
30 * the maximal value of the usage from the counter creation
31 */
32 unsigned long long max_usage;
33 /*
34 * the limit that usage cannot exceed
35 */
36 unsigned long long limit;
37 /*
38 * the number of unsuccessful attempts to consume the resource
39 */
40 unsigned long long failcnt;
41 /*
42 * the lock to protect all of the above.
43 * the routines below consider this to be IRQ-safe
44 */
45 spinlock_t lock;
46 };
47
48 /**
49 * Helpers to interact with userspace
50 * res_counter_read_u64() - returns the value of the specified member.
51 * res_counter_read/_write - put/get the specified fields from the
52 * res_counter struct to/from the user
53 *
54 * @counter: the counter in question
55 * @member: the field to work with (see RES_xxx below)
56 * @buf: the buffer to opeate on,...
57 * @nbytes: its size...
58 * @pos: and the offset.
59 */
60
61 u64 res_counter_read_u64(struct res_counter *counter, int member);
62
63 ssize_t res_counter_read(struct res_counter *counter, int member,
64 const char __user *buf, size_t nbytes, loff_t *pos,
65 int (*read_strategy)(unsigned long long val, char *s));
66
67 typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val);
68
69 int res_counter_memparse_write_strategy(const char *buf,
70 unsigned long long *res);
71
72 int res_counter_write(struct res_counter *counter, int member,
73 const char *buffer, write_strategy_fn write_strategy);
74
75 /*
76 * the field descriptors. one for each member of res_counter
77 */
78
79 enum {
80 RES_USAGE,
81 RES_MAX_USAGE,
82 RES_LIMIT,
83 RES_FAILCNT,
84 };
85
86 /*
87 * helpers for accounting
88 */
89
90 void res_counter_init(struct res_counter *counter);
91
92 /*
93 * charge - try to consume more resource.
94 *
95 * @counter: the counter
96 * @val: the amount of the resource. each controller defines its own
97 * units, e.g. numbers, bytes, Kbytes, etc
98 *
99 * returns 0 on success and <0 if the counter->usage will exceed the
100 * counter->limit _locked call expects the counter->lock to be taken
101 */
102
103 int __must_check res_counter_charge_locked(struct res_counter *counter,
104 unsigned long val);
105 int __must_check res_counter_charge(struct res_counter *counter,
106 unsigned long val);
107
108 /*
109 * uncharge - tell that some portion of the resource is released
110 *
111 * @counter: the counter
112 * @val: the amount of the resource
113 *
114 * these calls check for usage underflow and show a warning on the console
115 * _locked call expects the counter->lock to be taken
116 */
117
118 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
119 void res_counter_uncharge(struct res_counter *counter, unsigned long val);
120
121 static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
122 {
123 if (cnt->usage < cnt->limit)
124 return true;
125
126 return false;
127 }
128
129 /*
130 * Helper function to detect if the cgroup is within it's limit or
131 * not. It's currently called from cgroup_rss_prepare()
132 */
133 static inline bool res_counter_check_under_limit(struct res_counter *cnt)
134 {
135 bool ret;
136 unsigned long flags;
137
138 spin_lock_irqsave(&cnt->lock, flags);
139 ret = res_counter_limit_check_locked(cnt);
140 spin_unlock_irqrestore(&cnt->lock, flags);
141 return ret;
142 }
143
144 static inline void res_counter_reset_max(struct res_counter *cnt)
145 {
146 unsigned long flags;
147
148 spin_lock_irqsave(&cnt->lock, flags);
149 cnt->max_usage = cnt->usage;
150 spin_unlock_irqrestore(&cnt->lock, flags);
151 }
152
153 static inline void res_counter_reset_failcnt(struct res_counter *cnt)
154 {
155 unsigned long flags;
156
157 spin_lock_irqsave(&cnt->lock, flags);
158 cnt->failcnt = 0;
159 spin_unlock_irqrestore(&cnt->lock, flags);
160 }
161
162 static inline int res_counter_set_limit(struct res_counter *cnt,
163 unsigned long long limit)
164 {
165 unsigned long flags;
166 int ret = -EBUSY;
167
168 spin_lock_irqsave(&cnt->lock, flags);
169 if (cnt->usage <= limit) {
170 cnt->limit = limit;
171 ret = 0;
172 }
173 spin_unlock_irqrestore(&cnt->lock, flags);
174 return ret;
175 }
176
177 #endif