memcg, slab: do not schedule cache destruction when last page goes away
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / mm / slab.h
CommitLineData
97d06609
CL
1#ifndef MM_SLAB_H
2#define MM_SLAB_H
3/*
4 * Internal slab definitions
5 */
6
7/*
8 * State of the slab allocator.
9 *
10 * This is used to describe the states of the allocator during bootup.
11 * Allocators use this to gradually bootstrap themselves. Most allocators
12 * have the problem that the structures used for managing slab caches are
13 * allocated from slab caches themselves.
14 */
15enum slab_state {
16 DOWN, /* No slab functionality yet */
17 PARTIAL, /* SLUB: kmem_cache_node available */
18 PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */
ce8eb6c4 19 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
97d06609
CL
20 UP, /* Slab caches usable but not all extras yet */
21 FULL /* Everything is working */
22};
23
24extern enum slab_state slab_state;
25
18004c5d
CL
26/* The slab cache mutex protects the management structures during changes */
27extern struct mutex slab_mutex;
9b030cb8
CL
28
29/* The list of all slab caches on the system */
18004c5d
CL
30extern struct list_head slab_caches;
31
9b030cb8
CL
32/* The slab cache that manages slab cache information */
33extern struct kmem_cache *kmem_cache;
34
45906855
CL
35unsigned long calculate_alignment(unsigned long flags,
36 unsigned long align, unsigned long size);
37
f97d5f63
CL
38#ifndef CONFIG_SLOB
39/* Kmalloc array related functions */
40void create_kmalloc_caches(unsigned long);
2c59dd65
CL
41
42/* Find the kmalloc slab corresponding for a certain size */
43struct kmem_cache *kmalloc_slab(size_t, gfp_t);
f97d5f63
CL
44#endif
45
46
9b030cb8 47/* Functions provided by the slab allocators */
8a13a4cc 48extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
97d06609 49
45530c44
CL
50extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
51 unsigned long flags);
52extern void create_boot_cache(struct kmem_cache *, const char *name,
53 size_t size, unsigned long flags);
54
2633d7a0 55struct mem_cgroup;
cbb79694 56#ifdef CONFIG_SLUB
2633d7a0 57struct kmem_cache *
a44cb944
VD
58__kmem_cache_alias(const char *name, size_t size, size_t align,
59 unsigned long flags, void (*ctor)(void *));
cbb79694 60#else
2633d7a0 61static inline struct kmem_cache *
a44cb944
VD
62__kmem_cache_alias(const char *name, size_t size, size_t align,
63 unsigned long flags, void (*ctor)(void *))
cbb79694
CL
64{ return NULL; }
65#endif
66
67
d8843922
GC
68/* Legal flag mask for kmem_cache_create(), for various configurations */
69#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
70 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
71
72#if defined(CONFIG_DEBUG_SLAB)
73#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
74#elif defined(CONFIG_SLUB_DEBUG)
75#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
76 SLAB_TRACE | SLAB_DEBUG_FREE)
77#else
78#define SLAB_DEBUG_FLAGS (0)
79#endif
80
81#if defined(CONFIG_SLAB)
82#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
83 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
84#elif defined(CONFIG_SLUB)
85#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
86 SLAB_TEMPORARY | SLAB_NOTRACK)
87#else
88#define SLAB_CACHE_FLAGS (0)
89#endif
90
91#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
92
945cf2b6 93int __kmem_cache_shutdown(struct kmem_cache *);
03afc0e2 94int __kmem_cache_shrink(struct kmem_cache *);
41a21285 95void slab_kmem_cache_release(struct kmem_cache *);
945cf2b6 96
b7454ad3
GC
97struct seq_file;
98struct file;
b7454ad3 99
0d7561c6
GC
100struct slabinfo {
101 unsigned long active_objs;
102 unsigned long num_objs;
103 unsigned long active_slabs;
104 unsigned long num_slabs;
105 unsigned long shared_avail;
106 unsigned int limit;
107 unsigned int batchcount;
108 unsigned int shared;
109 unsigned int objects_per_slab;
110 unsigned int cache_order;
111};
112
113void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
114void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
b7454ad3
GC
115ssize_t slabinfo_write(struct file *file, const char __user *buffer,
116 size_t count, loff_t *ppos);
ba6c496e
GC
117
118#ifdef CONFIG_MEMCG_KMEM
119static inline bool is_root_cache(struct kmem_cache *s)
120{
121 return !s->memcg_params || s->memcg_params->is_root_cache;
122}
2633d7a0 123
1f458cbf
GC
124static inline void memcg_bind_pages(struct kmem_cache *s, int order)
125{
126 if (!is_root_cache(s))
127 atomic_add(1 << order, &s->memcg_params->nr_pages);
128}
129
130static inline void memcg_release_pages(struct kmem_cache *s, int order)
131{
1e32e77f
VD
132 if (!is_root_cache(s))
133 atomic_sub(1 << order, &s->memcg_params->nr_pages);
1f458cbf
GC
134}
135
b9ce5ef4
GC
136static inline bool slab_equal_or_root(struct kmem_cache *s,
137 struct kmem_cache *p)
138{
139 return (p == s) ||
140 (s->memcg_params && (p == s->memcg_params->root_cache));
141}
749c5415
GC
142
143/*
144 * We use suffixes to the name in memcg because we can't have caches
145 * created in the system with the same name. But when we print them
146 * locally, better refer to them with the base name
147 */
148static inline const char *cache_name(struct kmem_cache *s)
149{
150 if (!is_root_cache(s))
151 return s->memcg_params->root_cache->name;
152 return s->name;
153}
154
f8570263
VD
155/*
156 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
157 * That said the caller must assure the memcg's cache won't go away. Since once
158 * created a memcg's cache is destroyed only along with the root cache, it is
159 * true if we are going to allocate from the cache or hold a reference to the
160 * root cache by other means. Otherwise, we should hold either the slab_mutex
161 * or the memcg's slab_caches_mutex while calling this function and accessing
162 * the returned value.
163 */
2ade4de8
QH
164static inline struct kmem_cache *
165cache_from_memcg_idx(struct kmem_cache *s, int idx)
749c5415 166{
959c8963 167 struct kmem_cache *cachep;
f8570263 168 struct memcg_cache_params *params;
959c8963 169
6f6b8951
AV
170 if (!s->memcg_params)
171 return NULL;
f8570263
VD
172
173 rcu_read_lock();
174 params = rcu_dereference(s->memcg_params);
175 cachep = params->memcg_caches[idx];
176 rcu_read_unlock();
959c8963
VD
177
178 /*
179 * Make sure we will access the up-to-date value. The code updating
180 * memcg_caches issues a write barrier to match this (see
181 * memcg_register_cache()).
182 */
183 smp_read_barrier_depends();
184 return cachep;
749c5415 185}
943a451a
GC
186
187static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
188{
189 if (is_root_cache(s))
190 return s;
191 return s->memcg_params->root_cache;
192}
5dfb4175
VD
193
194static __always_inline int memcg_charge_slab(struct kmem_cache *s,
195 gfp_t gfp, int order)
196{
197 if (!memcg_kmem_enabled())
198 return 0;
199 if (is_root_cache(s))
200 return 0;
201 return memcg_charge_kmem(s->memcg_params->memcg, gfp,
202 PAGE_SIZE << order);
203}
204
205static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
206{
207 if (!memcg_kmem_enabled())
208 return;
209 if (is_root_cache(s))
210 return;
211 memcg_uncharge_kmem(s->memcg_params->memcg, PAGE_SIZE << order);
212}
ba6c496e
GC
213#else
214static inline bool is_root_cache(struct kmem_cache *s)
215{
216 return true;
217}
218
1f458cbf
GC
219static inline void memcg_bind_pages(struct kmem_cache *s, int order)
220{
221}
222
223static inline void memcg_release_pages(struct kmem_cache *s, int order)
224{
225}
226
b9ce5ef4
GC
227static inline bool slab_equal_or_root(struct kmem_cache *s,
228 struct kmem_cache *p)
229{
230 return true;
231}
749c5415
GC
232
233static inline const char *cache_name(struct kmem_cache *s)
234{
235 return s->name;
236}
237
2ade4de8
QH
238static inline struct kmem_cache *
239cache_from_memcg_idx(struct kmem_cache *s, int idx)
749c5415
GC
240{
241 return NULL;
242}
943a451a
GC
243
244static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
245{
246 return s;
247}
5dfb4175
VD
248
249static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order)
250{
251 return 0;
252}
253
254static inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
255{
256}
ba6c496e 257#endif
b9ce5ef4
GC
258
259static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
260{
261 struct kmem_cache *cachep;
262 struct page *page;
263
264 /*
265 * When kmemcg is not being used, both assignments should return the
266 * same value. but we don't want to pay the assignment price in that
267 * case. If it is not compiled in, the compiler should be smart enough
268 * to not do even the assignment. In that case, slab_equal_or_root
269 * will also be a constant.
270 */
271 if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
272 return s;
273
274 page = virt_to_head_page(x);
275 cachep = page->slab_cache;
276 if (slab_equal_or_root(cachep, s))
277 return cachep;
278
279 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
280 __FUNCTION__, cachep->name, s->name);
281 WARN_ON_ONCE(1);
282 return s;
283}
97d06609 284#endif
ca34956b
CL
285
286
287/*
288 * The slab lists for all objects.
289 */
290struct kmem_cache_node {
291 spinlock_t list_lock;
292
293#ifdef CONFIG_SLAB
294 struct list_head slabs_partial; /* partial list first, better asm code */
295 struct list_head slabs_full;
296 struct list_head slabs_free;
297 unsigned long free_objects;
298 unsigned int free_limit;
299 unsigned int colour_next; /* Per-node cache coloring */
300 struct array_cache *shared; /* shared per node */
301 struct array_cache **alien; /* on other nodes */
302 unsigned long next_reap; /* updated without locking */
303 int free_touched; /* updated without locking */
304#endif
305
306#ifdef CONFIG_SLUB
307 unsigned long nr_partial;
308 struct list_head partial;
309#ifdef CONFIG_SLUB_DEBUG
310 atomic_long_t nr_slabs;
311 atomic_long_t total_objects;
312 struct list_head full;
313#endif
314#endif
315
316};
e25839f6 317
276a2439
WL
318void *slab_next(struct seq_file *m, void *p, loff_t *pos);
319void slab_stop(struct seq_file *m, void *p);