UPSTREAM: kasan: improve double-free reports
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / mm / slab_common.c
1 /*
2 * Slab allocator functions that are independent of the allocator strategy
3 *
4 * (C) 2012 Christoph Lameter <cl@linux.com>
5 */
6 #include <linux/slab.h>
7
8 #include <linux/mm.h>
9 #include <linux/poison.h>
10 #include <linux/interrupt.h>
11 #include <linux/memory.h>
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/cpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/seq_file.h>
17 #include <linux/proc_fs.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
20 #include <asm/page.h>
21 #include <linux/memcontrol.h>
22
23 #define CREATE_TRACE_POINTS
24 #include <trace/events/kmem.h>
25
26 #include "slab.h"
27
28 enum slab_state slab_state;
29 LIST_HEAD(slab_caches);
30 DEFINE_MUTEX(slab_mutex);
31 struct kmem_cache *kmem_cache;
32
33 /*
34 * Set of flags that will prevent slab merging
35 */
36 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB | SLAB_KASAN)
39
40 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
41
42 /*
43 * Merge control. If this is set then no merging of slab caches will occur.
44 * (Could be removed. This was introduced to pacify the merge skeptics.)
45 */
46 static int slab_nomerge;
47
48 static int __init setup_slab_nomerge(char *str)
49 {
50 slab_nomerge = 1;
51 return 1;
52 }
53
54 #ifdef CONFIG_SLUB
55 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
56 #endif
57
58 __setup("slab_nomerge", setup_slab_nomerge);
59
60 /*
61 * Determine the size of a slab object
62 */
63 unsigned int kmem_cache_size(struct kmem_cache *s)
64 {
65 return s->object_size;
66 }
67 EXPORT_SYMBOL(kmem_cache_size);
68
69 #ifdef CONFIG_DEBUG_VM
70 static int kmem_cache_sanity_check(const char *name, size_t size)
71 {
72 struct kmem_cache *s = NULL;
73
74 if (!name || in_interrupt() || size < sizeof(void *) ||
75 size > KMALLOC_MAX_SIZE) {
76 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
77 return -EINVAL;
78 }
79
80 list_for_each_entry(s, &slab_caches, list) {
81 char tmp;
82 int res;
83
84 /*
85 * This happens when the module gets unloaded and doesn't
86 * destroy its slab cache and no-one else reuses the vmalloc
87 * area of the module. Print a warning.
88 */
89 res = probe_kernel_address(s->name, tmp);
90 if (res) {
91 pr_err("Slab cache with size %d has lost its name\n",
92 s->object_size);
93 continue;
94 }
95 }
96
97 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
98 return 0;
99 }
100 #else
101 static inline int kmem_cache_sanity_check(const char *name, size_t size)
102 {
103 return 0;
104 }
105 #endif
106
107 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
108 {
109 size_t i;
110
111 for (i = 0; i < nr; i++)
112 kmem_cache_free(s, p[i]);
113 }
114
115 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
116 void **p)
117 {
118 size_t i;
119
120 for (i = 0; i < nr; i++) {
121 void *x = p[i] = kmem_cache_alloc(s, flags);
122 if (!x) {
123 __kmem_cache_free_bulk(s, i, p);
124 return 0;
125 }
126 }
127 return i;
128 }
129
130 #ifdef CONFIG_MEMCG_KMEM
131 void slab_init_memcg_params(struct kmem_cache *s)
132 {
133 s->memcg_params.is_root_cache = true;
134 INIT_LIST_HEAD(&s->memcg_params.list);
135 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
136 }
137
138 static int init_memcg_params(struct kmem_cache *s,
139 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
140 {
141 struct memcg_cache_array *arr;
142
143 if (memcg) {
144 s->memcg_params.is_root_cache = false;
145 s->memcg_params.memcg = memcg;
146 s->memcg_params.root_cache = root_cache;
147 return 0;
148 }
149
150 slab_init_memcg_params(s);
151
152 if (!memcg_nr_cache_ids)
153 return 0;
154
155 arr = kzalloc(sizeof(struct memcg_cache_array) +
156 memcg_nr_cache_ids * sizeof(void *),
157 GFP_KERNEL);
158 if (!arr)
159 return -ENOMEM;
160
161 RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
162 return 0;
163 }
164
165 static void destroy_memcg_params(struct kmem_cache *s)
166 {
167 if (is_root_cache(s))
168 kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
169 }
170
171 static int update_memcg_params(struct kmem_cache *s, int new_array_size)
172 {
173 struct memcg_cache_array *old, *new;
174
175 if (!is_root_cache(s))
176 return 0;
177
178 new = kzalloc(sizeof(struct memcg_cache_array) +
179 new_array_size * sizeof(void *), GFP_KERNEL);
180 if (!new)
181 return -ENOMEM;
182
183 old = rcu_dereference_protected(s->memcg_params.memcg_caches,
184 lockdep_is_held(&slab_mutex));
185 if (old)
186 memcpy(new->entries, old->entries,
187 memcg_nr_cache_ids * sizeof(void *));
188
189 rcu_assign_pointer(s->memcg_params.memcg_caches, new);
190 if (old)
191 kfree_rcu(old, rcu);
192 return 0;
193 }
194
195 int memcg_update_all_caches(int num_memcgs)
196 {
197 struct kmem_cache *s;
198 int ret = 0;
199
200 mutex_lock(&slab_mutex);
201 list_for_each_entry(s, &slab_caches, list) {
202 ret = update_memcg_params(s, num_memcgs);
203 /*
204 * Instead of freeing the memory, we'll just leave the caches
205 * up to this point in an updated state.
206 */
207 if (ret)
208 break;
209 }
210 mutex_unlock(&slab_mutex);
211 return ret;
212 }
213 #else
214 static inline int init_memcg_params(struct kmem_cache *s,
215 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
216 {
217 return 0;
218 }
219
220 static inline void destroy_memcg_params(struct kmem_cache *s)
221 {
222 }
223 #endif /* CONFIG_MEMCG_KMEM */
224
225 /*
226 * Find a mergeable slab cache
227 */
228 int slab_unmergeable(struct kmem_cache *s)
229 {
230 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
231 return 1;
232
233 if (!is_root_cache(s))
234 return 1;
235
236 if (s->ctor)
237 return 1;
238
239 /*
240 * We may have set a slab to be unmergeable during bootstrap.
241 */
242 if (s->refcount < 0)
243 return 1;
244
245 return 0;
246 }
247
248 struct kmem_cache *find_mergeable(size_t size, size_t align,
249 unsigned long flags, const char *name, void (*ctor)(void *))
250 {
251 struct kmem_cache *s;
252
253 if (slab_nomerge)
254 return NULL;
255
256 if (ctor)
257 return NULL;
258
259 size = ALIGN(size, sizeof(void *));
260 align = calculate_alignment(flags, align, size);
261 size = ALIGN(size, align);
262 flags = kmem_cache_flags(size, flags, name, NULL);
263
264 if (flags & SLAB_NEVER_MERGE)
265 return NULL;
266
267 list_for_each_entry_reverse(s, &slab_caches, list) {
268 if (slab_unmergeable(s))
269 continue;
270
271 if (size > s->size)
272 continue;
273
274 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
275 continue;
276 /*
277 * Check if alignment is compatible.
278 * Courtesy of Adrian Drzewiecki
279 */
280 if ((s->size & ~(align - 1)) != s->size)
281 continue;
282
283 if (s->size - size >= sizeof(void *))
284 continue;
285
286 if (IS_ENABLED(CONFIG_SLAB) && align &&
287 (align > s->align || s->align % align))
288 continue;
289
290 return s;
291 }
292 return NULL;
293 }
294
295 /*
296 * Figure out what the alignment of the objects will be given a set of
297 * flags, a user specified alignment and the size of the objects.
298 */
299 unsigned long calculate_alignment(unsigned long flags,
300 unsigned long align, unsigned long size)
301 {
302 /*
303 * If the user wants hardware cache aligned objects then follow that
304 * suggestion if the object is sufficiently large.
305 *
306 * The hardware cache alignment cannot override the specified
307 * alignment though. If that is greater then use it.
308 */
309 if (flags & SLAB_HWCACHE_ALIGN) {
310 unsigned long ralign = cache_line_size();
311 while (size <= ralign / 2)
312 ralign /= 2;
313 align = max(align, ralign);
314 }
315
316 if (align < ARCH_SLAB_MINALIGN)
317 align = ARCH_SLAB_MINALIGN;
318
319 return ALIGN(align, sizeof(void *));
320 }
321
322 static struct kmem_cache *create_cache(const char *name,
323 size_t object_size, size_t size, size_t align,
324 unsigned long flags, void (*ctor)(void *),
325 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
326 {
327 struct kmem_cache *s;
328 int err;
329
330 err = -ENOMEM;
331 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
332 if (!s)
333 goto out;
334
335 s->name = name;
336 s->object_size = object_size;
337 s->size = size;
338 s->align = align;
339 s->ctor = ctor;
340
341 err = init_memcg_params(s, memcg, root_cache);
342 if (err)
343 goto out_free_cache;
344
345 err = __kmem_cache_create(s, flags);
346 if (err)
347 goto out_free_cache;
348
349 s->refcount = 1;
350 list_add(&s->list, &slab_caches);
351 out:
352 if (err)
353 return ERR_PTR(err);
354 return s;
355
356 out_free_cache:
357 destroy_memcg_params(s);
358 kmem_cache_free(kmem_cache, s);
359 goto out;
360 }
361
362 /*
363 * kmem_cache_create - Create a cache.
364 * @name: A string which is used in /proc/slabinfo to identify this cache.
365 * @size: The size of objects to be created in this cache.
366 * @align: The required alignment for the objects.
367 * @flags: SLAB flags
368 * @ctor: A constructor for the objects.
369 *
370 * Returns a ptr to the cache on success, NULL on failure.
371 * Cannot be called within a interrupt, but can be interrupted.
372 * The @ctor is run when new pages are allocated by the cache.
373 *
374 * The flags are
375 *
376 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
377 * to catch references to uninitialised memory.
378 *
379 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
380 * for buffer overruns.
381 *
382 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
383 * cacheline. This can be beneficial if you're counting cycles as closely
384 * as davem.
385 */
386 struct kmem_cache *
387 kmem_cache_create(const char *name, size_t size, size_t align,
388 unsigned long flags, void (*ctor)(void *))
389 {
390 struct kmem_cache *s = NULL;
391 const char *cache_name;
392 int err;
393
394 get_online_cpus();
395 get_online_mems();
396 memcg_get_cache_ids();
397
398 mutex_lock(&slab_mutex);
399
400 err = kmem_cache_sanity_check(name, size);
401 if (err) {
402 goto out_unlock;
403 }
404
405 /*
406 * Some allocators will constraint the set of valid flags to a subset
407 * of all flags. We expect them to define CACHE_CREATE_MASK in this
408 * case, and we'll just provide them with a sanitized version of the
409 * passed flags.
410 */
411 flags &= CACHE_CREATE_MASK;
412
413 s = __kmem_cache_alias(name, size, align, flags, ctor);
414 if (s)
415 goto out_unlock;
416
417 cache_name = kstrdup_const(name, GFP_KERNEL);
418 if (!cache_name) {
419 err = -ENOMEM;
420 goto out_unlock;
421 }
422
423 s = create_cache(cache_name, size, size,
424 calculate_alignment(flags, align, size),
425 flags, ctor, NULL, NULL);
426 if (IS_ERR(s)) {
427 err = PTR_ERR(s);
428 kfree_const(cache_name);
429 }
430
431 out_unlock:
432 mutex_unlock(&slab_mutex);
433
434 memcg_put_cache_ids();
435 put_online_mems();
436 put_online_cpus();
437
438 if (err) {
439 if (flags & SLAB_PANIC)
440 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
441 name, err);
442 else {
443 printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
444 name, err);
445 dump_stack();
446 }
447 return NULL;
448 }
449 return s;
450 }
451 EXPORT_SYMBOL(kmem_cache_create);
452
453 static int shutdown_cache(struct kmem_cache *s,
454 struct list_head *release, bool *need_rcu_barrier)
455 {
456 if (__kmem_cache_shutdown(s) != 0)
457 return -EBUSY;
458
459 if (s->flags & SLAB_DESTROY_BY_RCU)
460 *need_rcu_barrier = true;
461
462 list_move(&s->list, release);
463 return 0;
464 }
465
466 static void release_caches(struct list_head *release, bool need_rcu_barrier)
467 {
468 struct kmem_cache *s, *s2;
469
470 if (need_rcu_barrier)
471 rcu_barrier();
472
473 list_for_each_entry_safe(s, s2, release, list) {
474 #ifdef SLAB_SUPPORTS_SYSFS
475 sysfs_slab_remove(s);
476 #else
477 slab_kmem_cache_release(s);
478 #endif
479 }
480 }
481
482 #ifdef CONFIG_MEMCG_KMEM
483 /*
484 * memcg_create_kmem_cache - Create a cache for a memory cgroup.
485 * @memcg: The memory cgroup the new cache is for.
486 * @root_cache: The parent of the new cache.
487 *
488 * This function attempts to create a kmem cache that will serve allocation
489 * requests going from @memcg to @root_cache. The new cache inherits properties
490 * from its parent.
491 */
492 void memcg_create_kmem_cache(struct mem_cgroup *memcg,
493 struct kmem_cache *root_cache)
494 {
495 static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
496 struct cgroup_subsys_state *css = &memcg->css;
497 struct memcg_cache_array *arr;
498 struct kmem_cache *s = NULL;
499 char *cache_name;
500 int idx;
501
502 get_online_cpus();
503 get_online_mems();
504
505 mutex_lock(&slab_mutex);
506
507 /*
508 * The memory cgroup could have been deactivated while the cache
509 * creation work was pending.
510 */
511 if (!memcg_kmem_is_active(memcg))
512 goto out_unlock;
513
514 idx = memcg_cache_id(memcg);
515 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
516 lockdep_is_held(&slab_mutex));
517
518 /*
519 * Since per-memcg caches are created asynchronously on first
520 * allocation (see memcg_kmem_get_cache()), several threads can try to
521 * create the same cache, but only one of them may succeed.
522 */
523 if (arr->entries[idx])
524 goto out_unlock;
525
526 cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
527 cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
528 css->serial_nr, memcg_name_buf);
529 if (!cache_name)
530 goto out_unlock;
531
532 s = create_cache(cache_name, root_cache->object_size,
533 root_cache->size, root_cache->align,
534 root_cache->flags, root_cache->ctor,
535 memcg, root_cache);
536 /*
537 * If we could not create a memcg cache, do not complain, because
538 * that's not critical at all as we can always proceed with the root
539 * cache.
540 */
541 if (IS_ERR(s)) {
542 kfree(cache_name);
543 goto out_unlock;
544 }
545
546 list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
547
548 /*
549 * Since readers won't lock (see cache_from_memcg_idx()), we need a
550 * barrier here to ensure nobody will see the kmem_cache partially
551 * initialized.
552 */
553 smp_wmb();
554 arr->entries[idx] = s;
555
556 out_unlock:
557 mutex_unlock(&slab_mutex);
558
559 put_online_mems();
560 put_online_cpus();
561 }
562
563 void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
564 {
565 int idx;
566 struct memcg_cache_array *arr;
567 struct kmem_cache *s, *c;
568
569 idx = memcg_cache_id(memcg);
570
571 get_online_cpus();
572 get_online_mems();
573
574 mutex_lock(&slab_mutex);
575 list_for_each_entry(s, &slab_caches, list) {
576 if (!is_root_cache(s))
577 continue;
578
579 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
580 lockdep_is_held(&slab_mutex));
581 c = arr->entries[idx];
582 if (!c)
583 continue;
584
585 __kmem_cache_shrink(c, true);
586 arr->entries[idx] = NULL;
587 }
588 mutex_unlock(&slab_mutex);
589
590 put_online_mems();
591 put_online_cpus();
592 }
593
594 static int __shutdown_memcg_cache(struct kmem_cache *s,
595 struct list_head *release, bool *need_rcu_barrier)
596 {
597 BUG_ON(is_root_cache(s));
598
599 if (shutdown_cache(s, release, need_rcu_barrier))
600 return -EBUSY;
601
602 list_del(&s->memcg_params.list);
603 return 0;
604 }
605
606 void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
607 {
608 LIST_HEAD(release);
609 bool need_rcu_barrier = false;
610 struct kmem_cache *s, *s2;
611
612 get_online_cpus();
613 get_online_mems();
614
615 mutex_lock(&slab_mutex);
616 list_for_each_entry_safe(s, s2, &slab_caches, list) {
617 if (is_root_cache(s) || s->memcg_params.memcg != memcg)
618 continue;
619 /*
620 * The cgroup is about to be freed and therefore has no charges
621 * left. Hence, all its caches must be empty by now.
622 */
623 BUG_ON(__shutdown_memcg_cache(s, &release, &need_rcu_barrier));
624 }
625 mutex_unlock(&slab_mutex);
626
627 put_online_mems();
628 put_online_cpus();
629
630 release_caches(&release, need_rcu_barrier);
631 }
632
633 static int shutdown_memcg_caches(struct kmem_cache *s,
634 struct list_head *release, bool *need_rcu_barrier)
635 {
636 struct memcg_cache_array *arr;
637 struct kmem_cache *c, *c2;
638 LIST_HEAD(busy);
639 int i;
640
641 BUG_ON(!is_root_cache(s));
642
643 /*
644 * First, shutdown active caches, i.e. caches that belong to online
645 * memory cgroups.
646 */
647 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
648 lockdep_is_held(&slab_mutex));
649 for_each_memcg_cache_index(i) {
650 c = arr->entries[i];
651 if (!c)
652 continue;
653 if (__shutdown_memcg_cache(c, release, need_rcu_barrier))
654 /*
655 * The cache still has objects. Move it to a temporary
656 * list so as not to try to destroy it for a second
657 * time while iterating over inactive caches below.
658 */
659 list_move(&c->memcg_params.list, &busy);
660 else
661 /*
662 * The cache is empty and will be destroyed soon. Clear
663 * the pointer to it in the memcg_caches array so that
664 * it will never be accessed even if the root cache
665 * stays alive.
666 */
667 arr->entries[i] = NULL;
668 }
669
670 /*
671 * Second, shutdown all caches left from memory cgroups that are now
672 * offline.
673 */
674 list_for_each_entry_safe(c, c2, &s->memcg_params.list,
675 memcg_params.list)
676 __shutdown_memcg_cache(c, release, need_rcu_barrier);
677
678 list_splice(&busy, &s->memcg_params.list);
679
680 /*
681 * A cache being destroyed must be empty. In particular, this means
682 * that all per memcg caches attached to it must be empty too.
683 */
684 if (!list_empty(&s->memcg_params.list))
685 return -EBUSY;
686 return 0;
687 }
688 #else
689 static inline int shutdown_memcg_caches(struct kmem_cache *s,
690 struct list_head *release, bool *need_rcu_barrier)
691 {
692 return 0;
693 }
694 #endif /* CONFIG_MEMCG_KMEM */
695
696 void slab_kmem_cache_release(struct kmem_cache *s)
697 {
698 destroy_memcg_params(s);
699 kfree_const(s->name);
700 kmem_cache_free(kmem_cache, s);
701 }
702
703 void kmem_cache_destroy(struct kmem_cache *s)
704 {
705 LIST_HEAD(release);
706 bool need_rcu_barrier = false;
707 int err;
708
709 if (unlikely(!s))
710 return;
711
712 get_online_cpus();
713 get_online_mems();
714
715 kasan_cache_destroy(s);
716 mutex_lock(&slab_mutex);
717
718 s->refcount--;
719 if (s->refcount)
720 goto out_unlock;
721
722 err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
723 if (!err)
724 err = shutdown_cache(s, &release, &need_rcu_barrier);
725
726 if (err) {
727 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
728 s->name);
729 dump_stack();
730 }
731 out_unlock:
732 mutex_unlock(&slab_mutex);
733
734 put_online_mems();
735 put_online_cpus();
736
737 release_caches(&release, need_rcu_barrier);
738 }
739 EXPORT_SYMBOL(kmem_cache_destroy);
740
741 /**
742 * kmem_cache_shrink - Shrink a cache.
743 * @cachep: The cache to shrink.
744 *
745 * Releases as many slabs as possible for a cache.
746 * To help debugging, a zero exit status indicates all slabs were released.
747 */
748 int kmem_cache_shrink(struct kmem_cache *cachep)
749 {
750 int ret;
751
752 get_online_cpus();
753 get_online_mems();
754 kasan_cache_shrink(cachep);
755 ret = __kmem_cache_shrink(cachep, false);
756 put_online_mems();
757 put_online_cpus();
758 return ret;
759 }
760 EXPORT_SYMBOL(kmem_cache_shrink);
761
762 bool slab_is_available(void)
763 {
764 return slab_state >= UP;
765 }
766
767 #ifndef CONFIG_SLOB
768 /* Create a cache during boot when no slab services are available yet */
769 void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
770 unsigned long flags)
771 {
772 int err;
773
774 s->name = name;
775 s->size = s->object_size = size;
776 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
777
778 slab_init_memcg_params(s);
779
780 err = __kmem_cache_create(s, flags);
781
782 if (err)
783 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
784 name, size, err);
785
786 s->refcount = -1; /* Exempt from merging for now */
787 }
788
789 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
790 unsigned long flags)
791 {
792 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
793
794 if (!s)
795 panic("Out of memory when creating slab %s\n", name);
796
797 create_boot_cache(s, name, size, flags);
798 list_add(&s->list, &slab_caches);
799 s->refcount = 1;
800 return s;
801 }
802
803 struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
804 EXPORT_SYMBOL(kmalloc_caches);
805
806 #ifdef CONFIG_ZONE_DMA
807 struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
808 EXPORT_SYMBOL(kmalloc_dma_caches);
809 #endif
810
811 /*
812 * Conversion table for small slabs sizes / 8 to the index in the
813 * kmalloc array. This is necessary for slabs < 192 since we have non power
814 * of two cache sizes there. The size of larger slabs can be determined using
815 * fls.
816 */
817 static s8 size_index[24] = {
818 3, /* 8 */
819 4, /* 16 */
820 5, /* 24 */
821 5, /* 32 */
822 6, /* 40 */
823 6, /* 48 */
824 6, /* 56 */
825 6, /* 64 */
826 1, /* 72 */
827 1, /* 80 */
828 1, /* 88 */
829 1, /* 96 */
830 7, /* 104 */
831 7, /* 112 */
832 7, /* 120 */
833 7, /* 128 */
834 2, /* 136 */
835 2, /* 144 */
836 2, /* 152 */
837 2, /* 160 */
838 2, /* 168 */
839 2, /* 176 */
840 2, /* 184 */
841 2 /* 192 */
842 };
843
844 static inline int size_index_elem(size_t bytes)
845 {
846 return (bytes - 1) / 8;
847 }
848
849 /*
850 * Find the kmem_cache structure that serves a given size of
851 * allocation
852 */
853 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
854 {
855 int index;
856
857 if (unlikely(size > KMALLOC_MAX_SIZE)) {
858 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
859 return NULL;
860 }
861
862 if (size <= 192) {
863 if (!size)
864 return ZERO_SIZE_PTR;
865
866 index = size_index[size_index_elem(size)];
867 } else
868 index = fls(size - 1);
869
870 #ifdef CONFIG_ZONE_DMA
871 if (unlikely((flags & GFP_DMA)))
872 return kmalloc_dma_caches[index];
873
874 #endif
875 return kmalloc_caches[index];
876 }
877
878 /*
879 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
880 * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
881 * kmalloc-67108864.
882 */
883 static struct {
884 const char *name;
885 unsigned long size;
886 } const kmalloc_info[] __initconst = {
887 {NULL, 0}, {"kmalloc-96", 96},
888 {"kmalloc-192", 192}, {"kmalloc-8", 8},
889 {"kmalloc-16", 16}, {"kmalloc-32", 32},
890 {"kmalloc-64", 64}, {"kmalloc-128", 128},
891 {"kmalloc-256", 256}, {"kmalloc-512", 512},
892 {"kmalloc-1024", 1024}, {"kmalloc-2048", 2048},
893 {"kmalloc-4096", 4096}, {"kmalloc-8192", 8192},
894 {"kmalloc-16384", 16384}, {"kmalloc-32768", 32768},
895 {"kmalloc-65536", 65536}, {"kmalloc-131072", 131072},
896 {"kmalloc-262144", 262144}, {"kmalloc-524288", 524288},
897 {"kmalloc-1048576", 1048576}, {"kmalloc-2097152", 2097152},
898 {"kmalloc-4194304", 4194304}, {"kmalloc-8388608", 8388608},
899 {"kmalloc-16777216", 16777216}, {"kmalloc-33554432", 33554432},
900 {"kmalloc-67108864", 67108864}
901 };
902
903 /*
904 * Patch up the size_index table if we have strange large alignment
905 * requirements for the kmalloc array. This is only the case for
906 * MIPS it seems. The standard arches will not generate any code here.
907 *
908 * Largest permitted alignment is 256 bytes due to the way we
909 * handle the index determination for the smaller caches.
910 *
911 * Make sure that nothing crazy happens if someone starts tinkering
912 * around with ARCH_KMALLOC_MINALIGN
913 */
914 void __init setup_kmalloc_cache_index_table(void)
915 {
916 int i;
917
918 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
919 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
920
921 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
922 int elem = size_index_elem(i);
923
924 if (elem >= ARRAY_SIZE(size_index))
925 break;
926 size_index[elem] = KMALLOC_SHIFT_LOW;
927 }
928
929 if (KMALLOC_MIN_SIZE >= 64) {
930 /*
931 * The 96 byte size cache is not used if the alignment
932 * is 64 byte.
933 */
934 for (i = 64 + 8; i <= 96; i += 8)
935 size_index[size_index_elem(i)] = 7;
936
937 }
938
939 if (KMALLOC_MIN_SIZE >= 128) {
940 /*
941 * The 192 byte sized cache is not used if the alignment
942 * is 128 byte. Redirect kmalloc to use the 256 byte cache
943 * instead.
944 */
945 for (i = 128 + 8; i <= 192; i += 8)
946 size_index[size_index_elem(i)] = 8;
947 }
948 }
949
950 static void __init new_kmalloc_cache(int idx, unsigned long flags)
951 {
952 kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
953 kmalloc_info[idx].size, flags);
954 }
955
956 /*
957 * Create the kmalloc array. Some of the regular kmalloc arrays
958 * may already have been created because they were needed to
959 * enable allocations for slab creation.
960 */
961 void __init create_kmalloc_caches(unsigned long flags)
962 {
963 int i;
964
965 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
966 if (!kmalloc_caches[i])
967 new_kmalloc_cache(i, flags);
968
969 /*
970 * Caches that are not of the two-to-the-power-of size.
971 * These have to be created immediately after the
972 * earlier power of two caches
973 */
974 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
975 new_kmalloc_cache(1, flags);
976 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
977 new_kmalloc_cache(2, flags);
978 }
979
980 /* Kmalloc array is now usable */
981 slab_state = UP;
982
983 #ifdef CONFIG_ZONE_DMA
984 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
985 struct kmem_cache *s = kmalloc_caches[i];
986
987 if (s) {
988 int size = kmalloc_size(i);
989 char *n = kasprintf(GFP_NOWAIT,
990 "dma-kmalloc-%d", size);
991
992 BUG_ON(!n);
993 kmalloc_dma_caches[i] = create_kmalloc_cache(n,
994 size, SLAB_CACHE_DMA | flags);
995 }
996 }
997 #endif
998 }
999 #endif /* !CONFIG_SLOB */
1000
1001 /*
1002 * To avoid unnecessary overhead, we pass through large allocation requests
1003 * directly to the page allocator. We use __GFP_COMP, because we will need to
1004 * know the allocation order to free the pages properly in kfree.
1005 */
1006 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1007 {
1008 void *ret;
1009 struct page *page;
1010
1011 flags |= __GFP_COMP;
1012 page = alloc_kmem_pages(flags, order);
1013 ret = page ? page_address(page) : NULL;
1014 kmemleak_alloc(ret, size, 1, flags);
1015 kasan_kmalloc_large(ret, size, flags);
1016 return ret;
1017 }
1018 EXPORT_SYMBOL(kmalloc_order);
1019
1020 #ifdef CONFIG_TRACING
1021 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1022 {
1023 void *ret = kmalloc_order(size, flags, order);
1024 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1025 return ret;
1026 }
1027 EXPORT_SYMBOL(kmalloc_order_trace);
1028 #endif
1029
1030 #ifdef CONFIG_SLABINFO
1031
1032 #ifdef CONFIG_SLAB
1033 #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
1034 #else
1035 #define SLABINFO_RIGHTS S_IRUSR
1036 #endif
1037
1038 static void print_slabinfo_header(struct seq_file *m)
1039 {
1040 /*
1041 * Output format version, so at least we can change it
1042 * without _too_ many complaints.
1043 */
1044 #ifdef CONFIG_DEBUG_SLAB
1045 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1046 #else
1047 seq_puts(m, "slabinfo - version: 2.1\n");
1048 #endif
1049 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1050 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1051 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1052 #ifdef CONFIG_DEBUG_SLAB
1053 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1054 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1055 #endif
1056 seq_putc(m, '\n');
1057 }
1058
1059 void *slab_start(struct seq_file *m, loff_t *pos)
1060 {
1061 mutex_lock(&slab_mutex);
1062 return seq_list_start(&slab_caches, *pos);
1063 }
1064
1065 void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1066 {
1067 return seq_list_next(p, &slab_caches, pos);
1068 }
1069
1070 void slab_stop(struct seq_file *m, void *p)
1071 {
1072 mutex_unlock(&slab_mutex);
1073 }
1074
1075 static void
1076 memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
1077 {
1078 struct kmem_cache *c;
1079 struct slabinfo sinfo;
1080
1081 if (!is_root_cache(s))
1082 return;
1083
1084 for_each_memcg_cache(c, s) {
1085 memset(&sinfo, 0, sizeof(sinfo));
1086 get_slabinfo(c, &sinfo);
1087
1088 info->active_slabs += sinfo.active_slabs;
1089 info->num_slabs += sinfo.num_slabs;
1090 info->shared_avail += sinfo.shared_avail;
1091 info->active_objs += sinfo.active_objs;
1092 info->num_objs += sinfo.num_objs;
1093 }
1094 }
1095
1096 static void cache_show(struct kmem_cache *s, struct seq_file *m)
1097 {
1098 struct slabinfo sinfo;
1099
1100 memset(&sinfo, 0, sizeof(sinfo));
1101 get_slabinfo(s, &sinfo);
1102
1103 memcg_accumulate_slabinfo(s, &sinfo);
1104
1105 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1106 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
1107 sinfo.objects_per_slab, (1 << sinfo.cache_order));
1108
1109 seq_printf(m, " : tunables %4u %4u %4u",
1110 sinfo.limit, sinfo.batchcount, sinfo.shared);
1111 seq_printf(m, " : slabdata %6lu %6lu %6lu",
1112 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1113 slabinfo_show_stats(m, s);
1114 seq_putc(m, '\n');
1115 }
1116
1117 static int slab_show(struct seq_file *m, void *p)
1118 {
1119 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1120
1121 if (p == slab_caches.next)
1122 print_slabinfo_header(m);
1123 if (is_root_cache(s))
1124 cache_show(s, m);
1125 return 0;
1126 }
1127
1128 #ifdef CONFIG_MEMCG_KMEM
1129 int memcg_slab_show(struct seq_file *m, void *p)
1130 {
1131 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1132 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1133
1134 if (p == slab_caches.next)
1135 print_slabinfo_header(m);
1136 if (!is_root_cache(s) && s->memcg_params.memcg == memcg)
1137 cache_show(s, m);
1138 return 0;
1139 }
1140 #endif
1141
1142 /*
1143 * slabinfo_op - iterator that generates /proc/slabinfo
1144 *
1145 * Output layout:
1146 * cache-name
1147 * num-active-objs
1148 * total-objs
1149 * object size
1150 * num-active-slabs
1151 * total-slabs
1152 * num-pages-per-slab
1153 * + further values on SMP and with statistics enabled
1154 */
1155 static const struct seq_operations slabinfo_op = {
1156 .start = slab_start,
1157 .next = slab_next,
1158 .stop = slab_stop,
1159 .show = slab_show,
1160 };
1161
1162 static int slabinfo_open(struct inode *inode, struct file *file)
1163 {
1164 return seq_open(file, &slabinfo_op);
1165 }
1166
1167 static const struct file_operations proc_slabinfo_operations = {
1168 .open = slabinfo_open,
1169 .read = seq_read,
1170 .write = slabinfo_write,
1171 .llseek = seq_lseek,
1172 .release = seq_release,
1173 };
1174
1175 static int __init slab_proc_init(void)
1176 {
1177 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1178 &proc_slabinfo_operations);
1179 return 0;
1180 }
1181 module_init(slab_proc_init);
1182 #endif /* CONFIG_SLABINFO */
1183
1184 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1185 gfp_t flags)
1186 {
1187 void *ret;
1188 size_t ks = 0;
1189
1190 if (p)
1191 ks = ksize(p);
1192
1193 if (ks >= new_size) {
1194 kasan_krealloc((void *)p, new_size, flags);
1195 return (void *)p;
1196 }
1197
1198 ret = kmalloc_track_caller(new_size, flags);
1199 if (ret && p)
1200 memcpy(ret, p, ks);
1201
1202 return ret;
1203 }
1204
1205 /**
1206 * __krealloc - like krealloc() but don't free @p.
1207 * @p: object to reallocate memory for.
1208 * @new_size: how many bytes of memory are required.
1209 * @flags: the type of memory to allocate.
1210 *
1211 * This function is like krealloc() except it never frees the originally
1212 * allocated buffer. Use this if you don't want to free the buffer immediately
1213 * like, for example, with RCU.
1214 */
1215 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1216 {
1217 if (unlikely(!new_size))
1218 return ZERO_SIZE_PTR;
1219
1220 return __do_krealloc(p, new_size, flags);
1221
1222 }
1223 EXPORT_SYMBOL(__krealloc);
1224
1225 /**
1226 * krealloc - reallocate memory. The contents will remain unchanged.
1227 * @p: object to reallocate memory for.
1228 * @new_size: how many bytes of memory are required.
1229 * @flags: the type of memory to allocate.
1230 *
1231 * The contents of the object pointed to are preserved up to the
1232 * lesser of the new and old sizes. If @p is %NULL, krealloc()
1233 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
1234 * %NULL pointer, the object pointed to is freed.
1235 */
1236 void *krealloc(const void *p, size_t new_size, gfp_t flags)
1237 {
1238 void *ret;
1239
1240 if (unlikely(!new_size)) {
1241 kfree(p);
1242 return ZERO_SIZE_PTR;
1243 }
1244
1245 ret = __do_krealloc(p, new_size, flags);
1246 if (ret && p != ret)
1247 kfree(p);
1248
1249 return ret;
1250 }
1251 EXPORT_SYMBOL(krealloc);
1252
1253 /**
1254 * kzfree - like kfree but zero memory
1255 * @p: object to free memory of
1256 *
1257 * The memory of the object @p points to is zeroed before freed.
1258 * If @p is %NULL, kzfree() does nothing.
1259 *
1260 * Note: this function zeroes the whole allocated buffer which can be a good
1261 * deal bigger than the requested buffer size passed to kmalloc(). So be
1262 * careful when using this function in performance sensitive code.
1263 */
1264 void kzfree(const void *p)
1265 {
1266 size_t ks;
1267 void *mem = (void *)p;
1268
1269 if (unlikely(ZERO_OR_NULL_PTR(mem)))
1270 return;
1271 ks = ksize(mem);
1272 memset(mem, 0, ks);
1273 kfree(mem);
1274 }
1275 EXPORT_SYMBOL(kzfree);
1276
1277 /* Tracepoints definitions. */
1278 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1279 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1280 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1281 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1282 EXPORT_TRACEPOINT_SYMBOL(kfree);
1283 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);