Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / mm / slab.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef MM_SLAB_H
3 #define MM_SLAB_H
4 /*
5 * Internal slab definitions
6 */
7
8 #ifdef CONFIG_SLOB
9 /*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20 struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
24 unsigned long flags; /* Active flags on the slab */
25 const char *name; /* Slab name for sysfs */
26 int refcount; /* Use counter */
27 void (*ctor)(void *); /* Called on object slot creation */
28 struct list_head list; /* List of all slab caches on the system */
29 };
30
31 #endif /* CONFIG_SLOB */
32
33 #ifdef CONFIG_SLAB
34 #include <linux/slab_def.h>
35 #endif
36
37 #ifdef CONFIG_SLUB
38 #include <linux/slub_def.h>
39 #endif
40
41 #include <linux/memcontrol.h>
42 #include <linux/fault-inject.h>
43 #include <linux/kmemcheck.h>
44 #include <linux/kasan.h>
45 #include <linux/kmemleak.h>
46 #include <linux/random.h>
47 #include <linux/sched/mm.h>
48
49 /*
50 * State of the slab allocator.
51 *
52 * This is used to describe the states of the allocator during bootup.
53 * Allocators use this to gradually bootstrap themselves. Most allocators
54 * have the problem that the structures used for managing slab caches are
55 * allocated from slab caches themselves.
56 */
57 enum slab_state {
58 DOWN, /* No slab functionality yet */
59 PARTIAL, /* SLUB: kmem_cache_node available */
60 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
61 UP, /* Slab caches usable but not all extras yet */
62 FULL /* Everything is working */
63 };
64
65 extern enum slab_state slab_state;
66
67 /* The slab cache mutex protects the management structures during changes */
68 extern struct mutex slab_mutex;
69
70 /* The list of all slab caches on the system */
71 extern struct list_head slab_caches;
72
73 /* The slab cache that manages slab cache information */
74 extern struct kmem_cache *kmem_cache;
75
76 /* A table of kmalloc cache names and sizes */
77 extern const struct kmalloc_info_struct {
78 const char *name;
79 unsigned long size;
80 } kmalloc_info[];
81
82 unsigned long calculate_alignment(unsigned long flags,
83 unsigned long align, unsigned long size);
84
85 #ifndef CONFIG_SLOB
86 /* Kmalloc array related functions */
87 void setup_kmalloc_cache_index_table(void);
88 void create_kmalloc_caches(unsigned long);
89
90 /* Find the kmalloc slab corresponding for a certain size */
91 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
92 #endif
93
94
95 /* Functions provided by the slab allocators */
96 extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
97
98 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
99 unsigned long flags);
100 extern void create_boot_cache(struct kmem_cache *, const char *name,
101 size_t size, unsigned long flags);
102
103 int slab_unmergeable(struct kmem_cache *s);
104 struct kmem_cache *find_mergeable(size_t size, size_t align,
105 unsigned long flags, const char *name, void (*ctor)(void *));
106 #ifndef CONFIG_SLOB
107 struct kmem_cache *
108 __kmem_cache_alias(const char *name, size_t size, size_t align,
109 unsigned long flags, void (*ctor)(void *));
110
111 unsigned long kmem_cache_flags(unsigned long object_size,
112 unsigned long flags, const char *name,
113 void (*ctor)(void *));
114 #else
115 static inline struct kmem_cache *
116 __kmem_cache_alias(const char *name, size_t size, size_t align,
117 unsigned long flags, void (*ctor)(void *))
118 { return NULL; }
119
120 static inline unsigned long kmem_cache_flags(unsigned long object_size,
121 unsigned long flags, const char *name,
122 void (*ctor)(void *))
123 {
124 return flags;
125 }
126 #endif
127
128
129 /* Legal flag mask for kmem_cache_create(), for various configurations */
130 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
131 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
132
133 #if defined(CONFIG_DEBUG_SLAB)
134 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
135 #elif defined(CONFIG_SLUB_DEBUG)
136 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
137 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
138 #else
139 #define SLAB_DEBUG_FLAGS (0)
140 #endif
141
142 #if defined(CONFIG_SLAB)
143 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
144 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
145 SLAB_NOTRACK | SLAB_ACCOUNT)
146 #elif defined(CONFIG_SLUB)
147 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
148 SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
149 #else
150 #define SLAB_CACHE_FLAGS (0)
151 #endif
152
153 /* Common flags available with current configuration */
154 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
155
156 /* Common flags permitted for kmem_cache_create */
157 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
158 SLAB_RED_ZONE | \
159 SLAB_POISON | \
160 SLAB_STORE_USER | \
161 SLAB_TRACE | \
162 SLAB_CONSISTENCY_CHECKS | \
163 SLAB_MEM_SPREAD | \
164 SLAB_NOLEAKTRACE | \
165 SLAB_RECLAIM_ACCOUNT | \
166 SLAB_TEMPORARY | \
167 SLAB_NOTRACK | \
168 SLAB_ACCOUNT)
169
170 int __kmem_cache_shutdown(struct kmem_cache *);
171 void __kmem_cache_release(struct kmem_cache *);
172 int __kmem_cache_shrink(struct kmem_cache *);
173 void __kmemcg_cache_deactivate(struct kmem_cache *s);
174 void slab_kmem_cache_release(struct kmem_cache *);
175
176 struct seq_file;
177 struct file;
178
179 struct slabinfo {
180 unsigned long active_objs;
181 unsigned long num_objs;
182 unsigned long active_slabs;
183 unsigned long num_slabs;
184 unsigned long shared_avail;
185 unsigned int limit;
186 unsigned int batchcount;
187 unsigned int shared;
188 unsigned int objects_per_slab;
189 unsigned int cache_order;
190 };
191
192 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
193 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
194 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
195 size_t count, loff_t *ppos);
196
197 /*
198 * Generic implementation of bulk operations
199 * These are useful for situations in which the allocator cannot
200 * perform optimizations. In that case segments of the object listed
201 * may be allocated or freed using these operations.
202 */
203 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
204 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
205
206 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
207
208 /* List of all root caches. */
209 extern struct list_head slab_root_caches;
210 #define root_caches_node memcg_params.__root_caches_node
211
212 /*
213 * Iterate over all memcg caches of the given root cache. The caller must hold
214 * slab_mutex.
215 */
216 #define for_each_memcg_cache(iter, root) \
217 list_for_each_entry(iter, &(root)->memcg_params.children, \
218 memcg_params.children_node)
219
220 static inline bool is_root_cache(struct kmem_cache *s)
221 {
222 return !s->memcg_params.root_cache;
223 }
224
225 static inline bool slab_equal_or_root(struct kmem_cache *s,
226 struct kmem_cache *p)
227 {
228 return p == s || p == s->memcg_params.root_cache;
229 }
230
231 /*
232 * We use suffixes to the name in memcg because we can't have caches
233 * created in the system with the same name. But when we print them
234 * locally, better refer to them with the base name
235 */
236 static inline const char *cache_name(struct kmem_cache *s)
237 {
238 if (!is_root_cache(s))
239 s = s->memcg_params.root_cache;
240 return s->name;
241 }
242
243 /*
244 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
245 * That said the caller must assure the memcg's cache won't go away by either
246 * taking a css reference to the owner cgroup, or holding the slab_mutex.
247 */
248 static inline struct kmem_cache *
249 cache_from_memcg_idx(struct kmem_cache *s, int idx)
250 {
251 struct kmem_cache *cachep;
252 struct memcg_cache_array *arr;
253
254 rcu_read_lock();
255 arr = rcu_dereference(s->memcg_params.memcg_caches);
256
257 /*
258 * Make sure we will access the up-to-date value. The code updating
259 * memcg_caches issues a write barrier to match this (see
260 * memcg_create_kmem_cache()).
261 */
262 cachep = lockless_dereference(arr->entries[idx]);
263 rcu_read_unlock();
264
265 return cachep;
266 }
267
268 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
269 {
270 if (is_root_cache(s))
271 return s;
272 return s->memcg_params.root_cache;
273 }
274
275 static __always_inline int memcg_charge_slab(struct page *page,
276 gfp_t gfp, int order,
277 struct kmem_cache *s)
278 {
279 if (!memcg_kmem_enabled())
280 return 0;
281 if (is_root_cache(s))
282 return 0;
283 return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
284 }
285
286 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
287 struct kmem_cache *s)
288 {
289 if (!memcg_kmem_enabled())
290 return;
291 memcg_kmem_uncharge(page, order);
292 }
293
294 extern void slab_init_memcg_params(struct kmem_cache *);
295 extern void memcg_link_cache(struct kmem_cache *s);
296 extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
297 void (*deact_fn)(struct kmem_cache *));
298
299 #else /* CONFIG_MEMCG && !CONFIG_SLOB */
300
301 /* If !memcg, all caches are root. */
302 #define slab_root_caches slab_caches
303 #define root_caches_node list
304
305 #define for_each_memcg_cache(iter, root) \
306 for ((void)(iter), (void)(root); 0; )
307
308 static inline bool is_root_cache(struct kmem_cache *s)
309 {
310 return true;
311 }
312
313 static inline bool slab_equal_or_root(struct kmem_cache *s,
314 struct kmem_cache *p)
315 {
316 return true;
317 }
318
319 static inline const char *cache_name(struct kmem_cache *s)
320 {
321 return s->name;
322 }
323
324 static inline struct kmem_cache *
325 cache_from_memcg_idx(struct kmem_cache *s, int idx)
326 {
327 return NULL;
328 }
329
330 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
331 {
332 return s;
333 }
334
335 static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
336 struct kmem_cache *s)
337 {
338 return 0;
339 }
340
341 static inline void memcg_uncharge_slab(struct page *page, int order,
342 struct kmem_cache *s)
343 {
344 }
345
346 static inline void slab_init_memcg_params(struct kmem_cache *s)
347 {
348 }
349
350 static inline void memcg_link_cache(struct kmem_cache *s)
351 {
352 }
353
354 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
355
356 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
357 {
358 struct kmem_cache *cachep;
359 struct page *page;
360
361 /*
362 * When kmemcg is not being used, both assignments should return the
363 * same value. but we don't want to pay the assignment price in that
364 * case. If it is not compiled in, the compiler should be smart enough
365 * to not do even the assignment. In that case, slab_equal_or_root
366 * will also be a constant.
367 */
368 if (!memcg_kmem_enabled() &&
369 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
370 return s;
371
372 page = virt_to_head_page(x);
373 cachep = page->slab_cache;
374 if (slab_equal_or_root(cachep, s))
375 return cachep;
376
377 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
378 __func__, s->name, cachep->name);
379 WARN_ON_ONCE(1);
380 return s;
381 }
382
383 static inline size_t slab_ksize(const struct kmem_cache *s)
384 {
385 #ifndef CONFIG_SLUB
386 return s->object_size;
387
388 #else /* CONFIG_SLUB */
389 # ifdef CONFIG_SLUB_DEBUG
390 /*
391 * Debugging requires use of the padding between object
392 * and whatever may come after it.
393 */
394 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
395 return s->object_size;
396 # endif
397 if (s->flags & SLAB_KASAN)
398 return s->object_size;
399 /*
400 * If we have the need to store the freelist pointer
401 * back there or track user information then we can
402 * only use the space before that information.
403 */
404 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
405 return s->inuse;
406 /*
407 * Else we can use all the padding etc for the allocation
408 */
409 return s->size;
410 #endif
411 }
412
413 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
414 gfp_t flags)
415 {
416 flags &= gfp_allowed_mask;
417
418 fs_reclaim_acquire(flags);
419 fs_reclaim_release(flags);
420
421 might_sleep_if(gfpflags_allow_blocking(flags));
422
423 if (should_failslab(s, flags))
424 return NULL;
425
426 if (memcg_kmem_enabled() &&
427 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
428 return memcg_kmem_get_cache(s);
429
430 return s;
431 }
432
433 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
434 size_t size, void **p)
435 {
436 size_t i;
437
438 flags &= gfp_allowed_mask;
439 for (i = 0; i < size; i++) {
440 void *object = p[i];
441
442 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
443 kmemleak_alloc_recursive(object, s->object_size, 1,
444 s->flags, flags);
445 kasan_slab_alloc(s, object, flags);
446 }
447
448 if (memcg_kmem_enabled())
449 memcg_kmem_put_cache(s);
450 }
451
452 #ifndef CONFIG_SLOB
453 /*
454 * The slab lists for all objects.
455 */
456 struct kmem_cache_node {
457 spinlock_t list_lock;
458
459 #ifdef CONFIG_SLAB
460 struct list_head slabs_partial; /* partial list first, better asm code */
461 struct list_head slabs_full;
462 struct list_head slabs_free;
463 unsigned long total_slabs; /* length of all slab lists */
464 unsigned long free_slabs; /* length of free slab list only */
465 unsigned long free_objects;
466 unsigned int free_limit;
467 unsigned int colour_next; /* Per-node cache coloring */
468 struct array_cache *shared; /* shared per node */
469 struct alien_cache **alien; /* on other nodes */
470 unsigned long next_reap; /* updated without locking */
471 int free_touched; /* updated without locking */
472 #endif
473
474 #ifdef CONFIG_SLUB
475 unsigned long nr_partial;
476 struct list_head partial;
477 #ifdef CONFIG_SLUB_DEBUG
478 atomic_long_t nr_slabs;
479 atomic_long_t total_objects;
480 struct list_head full;
481 #endif
482 #endif
483
484 };
485
486 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
487 {
488 return s->node[node];
489 }
490
491 /*
492 * Iterator over all nodes. The body will be executed for each node that has
493 * a kmem_cache_node structure allocated (which is true for all online nodes)
494 */
495 #define for_each_kmem_cache_node(__s, __node, __n) \
496 for (__node = 0; __node < nr_node_ids; __node++) \
497 if ((__n = get_node(__s, __node)))
498
499 #endif
500
501 void *slab_start(struct seq_file *m, loff_t *pos);
502 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
503 void slab_stop(struct seq_file *m, void *p);
504 void *memcg_slab_start(struct seq_file *m, loff_t *pos);
505 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
506 void memcg_slab_stop(struct seq_file *m, void *p);
507 int memcg_slab_show(struct seq_file *m, void *p);
508
509 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
510
511 #ifdef CONFIG_SLAB_FREELIST_RANDOM
512 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
513 gfp_t gfp);
514 void cache_random_seq_destroy(struct kmem_cache *cachep);
515 #else
516 static inline int cache_random_seq_create(struct kmem_cache *cachep,
517 unsigned int count, gfp_t gfp)
518 {
519 return 0;
520 }
521 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
522 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
523
524 #endif /* MM_SLAB_H */