kbuild: add headerdep used to detect inclusion cycles in header files
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / res_counter.h
CommitLineData
e552b661
PE
1#ifndef __RES_COUNTER_H__
2#define __RES_COUNTER_H__
3
4/*
5 * Resource Counters
6 * Contain common data types and routines for resource accounting
7 *
8 * Copyright 2007 OpenVZ SWsoft Inc
9 *
10 * Author: Pavel Emelianov <xemul@openvz.org>
11 *
faebe9fd
PE
12 * See Documentation/controllers/resource_counter.txt for more
13 * info about what this counter is.
e552b661
PE
14 */
15
16#include <linux/cgroup.h>
17
18/*
19 * The core object. the cgroup that wishes to account for some
20 * resource may include this counter into its structures and use
21 * the helpers described beyond
22 */
23
24struct res_counter {
25 /*
26 * the current resource consumption level
27 */
0eea1030 28 unsigned long long usage;
c84872e1
PE
29 /*
30 * the maximal value of the usage from the counter creation
31 */
32 unsigned long long max_usage;
e552b661
PE
33 /*
34 * the limit that usage cannot exceed
35 */
0eea1030 36 unsigned long long limit;
e552b661
PE
37 /*
38 * the number of unsuccessful attempts to consume the resource
39 */
0eea1030 40 unsigned long long failcnt;
e552b661
PE
41 /*
42 * the lock to protect all of the above.
43 * the routines below consider this to be IRQ-safe
44 */
45 spinlock_t lock;
46};
47
2c7eabf3 48/**
e552b661 49 * Helpers to interact with userspace
2c7eabf3 50 * res_counter_read_u64() - returns the value of the specified member.
e552b661
PE
51 * res_counter_read/_write - put/get the specified fields from the
52 * res_counter struct to/from the user
53 *
54 * @counter: the counter in question
55 * @member: the field to work with (see RES_xxx below)
56 * @buf: the buffer to opeate on,...
57 * @nbytes: its size...
58 * @pos: and the offset.
59 */
60
2c7eabf3
PM
61u64 res_counter_read_u64(struct res_counter *counter, int member);
62
e552b661 63ssize_t res_counter_read(struct res_counter *counter, int member,
0eea1030
BS
64 const char __user *buf, size_t nbytes, loff_t *pos,
65 int (*read_strategy)(unsigned long long val, char *s));
856c13aa
PM
66
67typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val);
68
69int res_counter_memparse_write_strategy(const char *buf,
70 unsigned long long *res);
71
72int res_counter_write(struct res_counter *counter, int member,
73 const char *buffer, write_strategy_fn write_strategy);
e552b661
PE
74
75/*
76 * the field descriptors. one for each member of res_counter
77 */
78
79enum {
80 RES_USAGE,
c84872e1 81 RES_MAX_USAGE,
e552b661
PE
82 RES_LIMIT,
83 RES_FAILCNT,
84};
85
86/*
87 * helpers for accounting
88 */
89
90void res_counter_init(struct res_counter *counter);
91
92/*
93 * charge - try to consume more resource.
94 *
95 * @counter: the counter
96 * @val: the amount of the resource. each controller defines its own
97 * units, e.g. numbers, bytes, Kbytes, etc
98 *
99 * returns 0 on success and <0 if the counter->usage will exceed the
100 * counter->limit _locked call expects the counter->lock to be taken
101 */
102
f2992db2
PE
103int __must_check res_counter_charge_locked(struct res_counter *counter,
104 unsigned long val);
105int __must_check res_counter_charge(struct res_counter *counter,
106 unsigned long val);
e552b661
PE
107
108/*
109 * uncharge - tell that some portion of the resource is released
110 *
111 * @counter: the counter
112 * @val: the amount of the resource
113 *
114 * these calls check for usage underflow and show a warning on the console
115 * _locked call expects the counter->lock to be taken
116 */
117
118void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
119void res_counter_uncharge(struct res_counter *counter, unsigned long val);
120
66e1707b
BS
121static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
122{
123 if (cnt->usage < cnt->limit)
124 return true;
125
126 return false;
127}
128
129/*
130 * Helper function to detect if the cgroup is within it's limit or
131 * not. It's currently called from cgroup_rss_prepare()
132 */
133static inline bool res_counter_check_under_limit(struct res_counter *cnt)
134{
135 bool ret;
136 unsigned long flags;
137
138 spin_lock_irqsave(&cnt->lock, flags);
139 ret = res_counter_limit_check_locked(cnt);
140 spin_unlock_irqrestore(&cnt->lock, flags);
141 return ret;
142}
143
c84872e1
PE
144static inline void res_counter_reset_max(struct res_counter *cnt)
145{
146 unsigned long flags;
147
148 spin_lock_irqsave(&cnt->lock, flags);
149 cnt->max_usage = cnt->usage;
150 spin_unlock_irqrestore(&cnt->lock, flags);
151}
152
29f2a4da
PE
153static inline void res_counter_reset_failcnt(struct res_counter *cnt)
154{
155 unsigned long flags;
156
157 spin_lock_irqsave(&cnt->lock, flags);
158 cnt->failcnt = 0;
159 spin_unlock_irqrestore(&cnt->lock, flags);
160}
12b98044
KH
161
162static inline int res_counter_set_limit(struct res_counter *cnt,
163 unsigned long long limit)
164{
165 unsigned long flags;
166 int ret = -EBUSY;
167
168 spin_lock_irqsave(&cnt->lock, flags);
11d55d2c 169 if (cnt->usage <= limit) {
12b98044
KH
170 cnt->limit = limit;
171 ret = 0;
172 }
173 spin_unlock_irqrestore(&cnt->lock, flags);
174 return ret;
175}
176
e552b661 177#endif