remove libdss from Makefile
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / mm / kmemleak.c
1 /*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/dev-tools/kmemleak.rst.
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
32 * blocks. The object_tree_root is a red black tree used to look-up
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
54 * pointer
55 *
56 * Locks and mutexes are acquired/nested in the following order:
57 *
58 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
59 *
60 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
61 * regions.
62 *
63 * The kmemleak_object structures have a use_count incremented or decremented
64 * using the get_object()/put_object() functions. When the use_count becomes
65 * 0, this count can no longer be incremented and put_object() schedules the
66 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
67 * function must be protected by rcu_read_lock() to avoid accessing a freed
68 * structure.
69 */
70
71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
73 #include <linux/init.h>
74 #include <linux/kernel.h>
75 #include <linux/list.h>
76 #include <linux/sched/signal.h>
77 #include <linux/sched/task.h>
78 #include <linux/sched/task_stack.h>
79 #include <linux/jiffies.h>
80 #include <linux/delay.h>
81 #include <linux/export.h>
82 #include <linux/kthread.h>
83 #include <linux/rbtree.h>
84 #include <linux/fs.h>
85 #include <linux/debugfs.h>
86 #include <linux/seq_file.h>
87 #include <linux/cpumask.h>
88 #include <linux/spinlock.h>
89 #include <linux/mutex.h>
90 #include <linux/rcupdate.h>
91 #include <linux/stacktrace.h>
92 #include <linux/cache.h>
93 #include <linux/percpu.h>
94 #include <linux/hardirq.h>
95 #include <linux/bootmem.h>
96 #include <linux/pfn.h>
97 #include <linux/mmzone.h>
98 #include <linux/slab.h>
99 #include <linux/thread_info.h>
100 #include <linux/err.h>
101 #include <linux/uaccess.h>
102 #include <linux/string.h>
103 #include <linux/nodemask.h>
104 #include <linux/mm.h>
105 #include <linux/workqueue.h>
106 #include <linux/crc32.h>
107
108 #include <asm/sections.h>
109 #include <asm/processor.h>
110 #include <linux/atomic.h>
111
112 #include <linux/kasan.h>
113 #include <linux/kmemleak.h>
114 #include <linux/memory_hotplug.h>
115
116 /*
117 * Kmemleak configuration and common defines.
118 */
119 #define MAX_TRACE 16 /* stack trace length */
120 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
121 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
122 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
123 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
124
125 #define BYTES_PER_POINTER sizeof(void *)
126
127 /* GFP bitmask for kmemleak internal allocations */
128 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
129 __GFP_NORETRY | __GFP_NOMEMALLOC | \
130 __GFP_NOWARN)
131
132 /* scanning area inside a memory block */
133 struct kmemleak_scan_area {
134 struct hlist_node node;
135 unsigned long start;
136 size_t size;
137 };
138
139 #define KMEMLEAK_GREY 0
140 #define KMEMLEAK_BLACK -1
141
142 /*
143 * Structure holding the metadata for each allocated memory block.
144 * Modifications to such objects should be made while holding the
145 * object->lock. Insertions or deletions from object_list, gray_list or
146 * rb_node are already protected by the corresponding locks or mutex (see
147 * the notes on locking above). These objects are reference-counted
148 * (use_count) and freed using the RCU mechanism.
149 */
150 struct kmemleak_object {
151 spinlock_t lock;
152 unsigned int flags; /* object status flags */
153 struct list_head object_list;
154 struct list_head gray_list;
155 struct rb_node rb_node;
156 struct rcu_head rcu; /* object_list lockless traversal */
157 /* object usage count; object freed when use_count == 0 */
158 atomic_t use_count;
159 unsigned long pointer;
160 size_t size;
161 /* pass surplus references to this pointer */
162 unsigned long excess_ref;
163 /* minimum number of a pointers found before it is considered leak */
164 int min_count;
165 /* the total number of pointers found pointing to this object */
166 int count;
167 /* checksum for detecting modified objects */
168 u32 checksum;
169 /* memory ranges to be scanned inside an object (empty for all) */
170 struct hlist_head area_list;
171 unsigned long trace[MAX_TRACE];
172 unsigned int trace_len;
173 unsigned long jiffies; /* creation timestamp */
174 pid_t pid; /* pid of the current task */
175 char comm[TASK_COMM_LEN]; /* executable name */
176 };
177
178 /* flag representing the memory block allocation status */
179 #define OBJECT_ALLOCATED (1 << 0)
180 /* flag set after the first reporting of an unreference object */
181 #define OBJECT_REPORTED (1 << 1)
182 /* flag set to not scan the object */
183 #define OBJECT_NO_SCAN (1 << 2)
184
185 /* number of bytes to print per line; must be 16 or 32 */
186 #define HEX_ROW_SIZE 16
187 /* number of bytes to print at a time (1, 2, 4, 8) */
188 #define HEX_GROUP_SIZE 1
189 /* include ASCII after the hex output */
190 #define HEX_ASCII 1
191 /* max number of lines to be printed */
192 #define HEX_MAX_LINES 2
193
194 /* the list of all allocated objects */
195 static LIST_HEAD(object_list);
196 /* the list of gray-colored objects (see color_gray comment below) */
197 static LIST_HEAD(gray_list);
198 /* search tree for object boundaries */
199 static struct rb_root object_tree_root = RB_ROOT;
200 /* rw_lock protecting the access to object_list and object_tree_root */
201 static DEFINE_RWLOCK(kmemleak_lock);
202
203 /* allocation caches for kmemleak internal data */
204 static struct kmem_cache *object_cache;
205 static struct kmem_cache *scan_area_cache;
206
207 /* set if tracing memory operations is enabled */
208 static int kmemleak_enabled;
209 /* same as above but only for the kmemleak_free() callback */
210 static int kmemleak_free_enabled;
211 /* set in the late_initcall if there were no errors */
212 static int kmemleak_initialized;
213 /* enables or disables early logging of the memory operations */
214 static int kmemleak_early_log = 1;
215 /* set if a kmemleak warning was issued */
216 static int kmemleak_warning;
217 /* set if a fatal kmemleak error has occurred */
218 static int kmemleak_error;
219
220 /* minimum and maximum address that may be valid pointers */
221 static unsigned long min_addr = ULONG_MAX;
222 static unsigned long max_addr;
223
224 static struct task_struct *scan_thread;
225 /* used to avoid reporting of recently allocated objects */
226 static unsigned long jiffies_min_age;
227 static unsigned long jiffies_last_scan;
228 /* delay between automatic memory scannings */
229 static signed long jiffies_scan_wait;
230 /* enables or disables the task stacks scanning */
231 static int kmemleak_stack_scan = 1;
232 /* protects the memory scanning, parameters and debug/kmemleak file access */
233 static DEFINE_MUTEX(scan_mutex);
234 /* setting kmemleak=on, will set this var, skipping the disable */
235 static int kmemleak_skip_disable;
236 /* If there are leaks that can be reported */
237 static bool kmemleak_found_leaks;
238
239 /*
240 * Early object allocation/freeing logging. Kmemleak is initialized after the
241 * kernel allocator. However, both the kernel allocator and kmemleak may
242 * allocate memory blocks which need to be tracked. Kmemleak defines an
243 * arbitrary buffer to hold the allocation/freeing information before it is
244 * fully initialized.
245 */
246
247 /* kmemleak operation type for early logging */
248 enum {
249 KMEMLEAK_ALLOC,
250 KMEMLEAK_ALLOC_PERCPU,
251 KMEMLEAK_FREE,
252 KMEMLEAK_FREE_PART,
253 KMEMLEAK_FREE_PERCPU,
254 KMEMLEAK_NOT_LEAK,
255 KMEMLEAK_IGNORE,
256 KMEMLEAK_SCAN_AREA,
257 KMEMLEAK_NO_SCAN,
258 KMEMLEAK_SET_EXCESS_REF
259 };
260
261 /*
262 * Structure holding the information passed to kmemleak callbacks during the
263 * early logging.
264 */
265 struct early_log {
266 int op_type; /* kmemleak operation type */
267 int min_count; /* minimum reference count */
268 const void *ptr; /* allocated/freed memory block */
269 union {
270 size_t size; /* memory block size */
271 unsigned long excess_ref; /* surplus reference passing */
272 };
273 unsigned long trace[MAX_TRACE]; /* stack trace */
274 unsigned int trace_len; /* stack trace length */
275 };
276
277 /* early logging buffer and current position */
278 static struct early_log
279 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
280 static int crt_early_log __initdata;
281
282 static void kmemleak_disable(void);
283
284 /*
285 * Print a warning and dump the stack trace.
286 */
287 #define kmemleak_warn(x...) do { \
288 pr_warn(x); \
289 dump_stack(); \
290 kmemleak_warning = 1; \
291 } while (0)
292
293 /*
294 * Macro invoked when a serious kmemleak condition occurred and cannot be
295 * recovered from. Kmemleak will be disabled and further allocation/freeing
296 * tracing no longer available.
297 */
298 #define kmemleak_stop(x...) do { \
299 kmemleak_warn(x); \
300 kmemleak_disable(); \
301 } while (0)
302
303 /*
304 * Printing of the objects hex dump to the seq file. The number of lines to be
305 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
306 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
307 * with the object->lock held.
308 */
309 static void hex_dump_object(struct seq_file *seq,
310 struct kmemleak_object *object)
311 {
312 const u8 *ptr = (const u8 *)object->pointer;
313 size_t len;
314
315 /* limit the number of lines to HEX_MAX_LINES */
316 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
317
318 seq_printf(seq, " hex dump (first %zu bytes):\n", len);
319 kasan_disable_current();
320 seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
321 HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
322 kasan_enable_current();
323 }
324
325 /*
326 * Object colors, encoded with count and min_count:
327 * - white - orphan object, not enough references to it (count < min_count)
328 * - gray - not orphan, not marked as false positive (min_count == 0) or
329 * sufficient references to it (count >= min_count)
330 * - black - ignore, it doesn't contain references (e.g. text section)
331 * (min_count == -1). No function defined for this color.
332 * Newly created objects don't have any color assigned (object->count == -1)
333 * before the next memory scan when they become white.
334 */
335 static bool color_white(const struct kmemleak_object *object)
336 {
337 return object->count != KMEMLEAK_BLACK &&
338 object->count < object->min_count;
339 }
340
341 static bool color_gray(const struct kmemleak_object *object)
342 {
343 return object->min_count != KMEMLEAK_BLACK &&
344 object->count >= object->min_count;
345 }
346
347 /*
348 * Objects are considered unreferenced only if their color is white, they have
349 * not be deleted and have a minimum age to avoid false positives caused by
350 * pointers temporarily stored in CPU registers.
351 */
352 static bool unreferenced_object(struct kmemleak_object *object)
353 {
354 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
355 time_before_eq(object->jiffies + jiffies_min_age,
356 jiffies_last_scan);
357 }
358
359 /*
360 * Printing of the unreferenced objects information to the seq file. The
361 * print_unreferenced function must be called with the object->lock held.
362 */
363 static void print_unreferenced(struct seq_file *seq,
364 struct kmemleak_object *object)
365 {
366 int i;
367 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
368
369 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
370 object->pointer, object->size);
371 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
372 object->comm, object->pid, object->jiffies,
373 msecs_age / 1000, msecs_age % 1000);
374 hex_dump_object(seq, object);
375 seq_printf(seq, " backtrace:\n");
376
377 for (i = 0; i < object->trace_len; i++) {
378 void *ptr = (void *)object->trace[i];
379 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
380 }
381 }
382
383 /*
384 * Print the kmemleak_object information. This function is used mainly for
385 * debugging special cases when kmemleak operations. It must be called with
386 * the object->lock held.
387 */
388 static void dump_object_info(struct kmemleak_object *object)
389 {
390 struct stack_trace trace;
391
392 trace.nr_entries = object->trace_len;
393 trace.entries = object->trace;
394
395 pr_notice("Object 0x%08lx (size %zu):\n",
396 object->pointer, object->size);
397 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
398 object->comm, object->pid, object->jiffies);
399 pr_notice(" min_count = %d\n", object->min_count);
400 pr_notice(" count = %d\n", object->count);
401 pr_notice(" flags = 0x%x\n", object->flags);
402 pr_notice(" checksum = %u\n", object->checksum);
403 pr_notice(" backtrace:\n");
404 print_stack_trace(&trace, 4);
405 }
406
407 /*
408 * Look-up a memory block metadata (kmemleak_object) in the object search
409 * tree based on a pointer value. If alias is 0, only values pointing to the
410 * beginning of the memory block are allowed. The kmemleak_lock must be held
411 * when calling this function.
412 */
413 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
414 {
415 struct rb_node *rb = object_tree_root.rb_node;
416
417 while (rb) {
418 struct kmemleak_object *object =
419 rb_entry(rb, struct kmemleak_object, rb_node);
420 if (ptr < object->pointer)
421 rb = object->rb_node.rb_left;
422 else if (object->pointer + object->size <= ptr)
423 rb = object->rb_node.rb_right;
424 else if (object->pointer == ptr || alias)
425 return object;
426 else {
427 kmemleak_warn("Found object by alias at 0x%08lx\n",
428 ptr);
429 dump_object_info(object);
430 break;
431 }
432 }
433 return NULL;
434 }
435
436 /*
437 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
438 * that once an object's use_count reached 0, the RCU freeing was already
439 * registered and the object should no longer be used. This function must be
440 * called under the protection of rcu_read_lock().
441 */
442 static int get_object(struct kmemleak_object *object)
443 {
444 return atomic_inc_not_zero(&object->use_count);
445 }
446
447 /*
448 * RCU callback to free a kmemleak_object.
449 */
450 static void free_object_rcu(struct rcu_head *rcu)
451 {
452 struct hlist_node *tmp;
453 struct kmemleak_scan_area *area;
454 struct kmemleak_object *object =
455 container_of(rcu, struct kmemleak_object, rcu);
456
457 /*
458 * Once use_count is 0 (guaranteed by put_object), there is no other
459 * code accessing this object, hence no need for locking.
460 */
461 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
462 hlist_del(&area->node);
463 kmem_cache_free(scan_area_cache, area);
464 }
465 kmem_cache_free(object_cache, object);
466 }
467
468 /*
469 * Decrement the object use_count. Once the count is 0, free the object using
470 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
471 * delete_object() path, the delayed RCU freeing ensures that there is no
472 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
473 * is also possible.
474 */
475 static void put_object(struct kmemleak_object *object)
476 {
477 if (!atomic_dec_and_test(&object->use_count))
478 return;
479
480 /* should only get here after delete_object was called */
481 WARN_ON(object->flags & OBJECT_ALLOCATED);
482
483 call_rcu(&object->rcu, free_object_rcu);
484 }
485
486 /*
487 * Look up an object in the object search tree and increase its use_count.
488 */
489 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
490 {
491 unsigned long flags;
492 struct kmemleak_object *object;
493
494 rcu_read_lock();
495 read_lock_irqsave(&kmemleak_lock, flags);
496 object = lookup_object(ptr, alias);
497 read_unlock_irqrestore(&kmemleak_lock, flags);
498
499 /* check whether the object is still available */
500 if (object && !get_object(object))
501 object = NULL;
502 rcu_read_unlock();
503
504 return object;
505 }
506
507 /*
508 * Look up an object in the object search tree and remove it from both
509 * object_tree_root and object_list. The returned object's use_count should be
510 * at least 1, as initially set by create_object().
511 */
512 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
513 {
514 unsigned long flags;
515 struct kmemleak_object *object;
516
517 write_lock_irqsave(&kmemleak_lock, flags);
518 object = lookup_object(ptr, alias);
519 if (object) {
520 rb_erase(&object->rb_node, &object_tree_root);
521 list_del_rcu(&object->object_list);
522 }
523 write_unlock_irqrestore(&kmemleak_lock, flags);
524
525 return object;
526 }
527
528 /*
529 * Save stack trace to the given array of MAX_TRACE size.
530 */
531 static int __save_stack_trace(unsigned long *trace)
532 {
533 struct stack_trace stack_trace;
534
535 stack_trace.max_entries = MAX_TRACE;
536 stack_trace.nr_entries = 0;
537 stack_trace.entries = trace;
538 stack_trace.skip = 2;
539 save_stack_trace(&stack_trace);
540
541 return stack_trace.nr_entries;
542 }
543
544 /*
545 * Create the metadata (struct kmemleak_object) corresponding to an allocated
546 * memory block and add it to the object_list and object_tree_root.
547 */
548 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
549 int min_count, gfp_t gfp)
550 {
551 unsigned long flags;
552 struct kmemleak_object *object, *parent;
553 struct rb_node **link, *rb_parent;
554
555 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
556 if (!object) {
557 pr_warn("Cannot allocate a kmemleak_object structure\n");
558 kmemleak_disable();
559 return NULL;
560 }
561
562 INIT_LIST_HEAD(&object->object_list);
563 INIT_LIST_HEAD(&object->gray_list);
564 INIT_HLIST_HEAD(&object->area_list);
565 spin_lock_init(&object->lock);
566 atomic_set(&object->use_count, 1);
567 object->flags = OBJECT_ALLOCATED;
568 object->pointer = ptr;
569 object->size = size;
570 object->excess_ref = 0;
571 object->min_count = min_count;
572 object->count = 0; /* white color initially */
573 object->jiffies = jiffies;
574 object->checksum = 0;
575
576 /* task information */
577 if (in_irq()) {
578 object->pid = 0;
579 strncpy(object->comm, "hardirq", sizeof(object->comm));
580 } else if (in_softirq()) {
581 object->pid = 0;
582 strncpy(object->comm, "softirq", sizeof(object->comm));
583 } else {
584 object->pid = current->pid;
585 /*
586 * There is a small chance of a race with set_task_comm(),
587 * however using get_task_comm() here may cause locking
588 * dependency issues with current->alloc_lock. In the worst
589 * case, the command line is not correct.
590 */
591 strncpy(object->comm, current->comm, sizeof(object->comm));
592 }
593
594 /* kernel backtrace */
595 object->trace_len = __save_stack_trace(object->trace);
596
597 write_lock_irqsave(&kmemleak_lock, flags);
598
599 min_addr = min(min_addr, ptr);
600 max_addr = max(max_addr, ptr + size);
601 link = &object_tree_root.rb_node;
602 rb_parent = NULL;
603 while (*link) {
604 rb_parent = *link;
605 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
606 if (ptr + size <= parent->pointer)
607 link = &parent->rb_node.rb_left;
608 else if (parent->pointer + parent->size <= ptr)
609 link = &parent->rb_node.rb_right;
610 else {
611 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
612 ptr);
613 /*
614 * No need for parent->lock here since "parent" cannot
615 * be freed while the kmemleak_lock is held.
616 */
617 dump_object_info(parent);
618 kmem_cache_free(object_cache, object);
619 object = NULL;
620 goto out;
621 }
622 }
623 rb_link_node(&object->rb_node, rb_parent, link);
624 rb_insert_color(&object->rb_node, &object_tree_root);
625
626 list_add_tail_rcu(&object->object_list, &object_list);
627 out:
628 write_unlock_irqrestore(&kmemleak_lock, flags);
629 return object;
630 }
631
632 /*
633 * Mark the object as not allocated and schedule RCU freeing via put_object().
634 */
635 static void __delete_object(struct kmemleak_object *object)
636 {
637 unsigned long flags;
638
639 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
640 WARN_ON(atomic_read(&object->use_count) < 1);
641
642 /*
643 * Locking here also ensures that the corresponding memory block
644 * cannot be freed when it is being scanned.
645 */
646 spin_lock_irqsave(&object->lock, flags);
647 object->flags &= ~OBJECT_ALLOCATED;
648 spin_unlock_irqrestore(&object->lock, flags);
649 put_object(object);
650 }
651
652 /*
653 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
654 * delete it.
655 */
656 static void delete_object_full(unsigned long ptr)
657 {
658 struct kmemleak_object *object;
659
660 object = find_and_remove_object(ptr, 0);
661 if (!object) {
662 #ifdef DEBUG
663 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
664 ptr);
665 #endif
666 return;
667 }
668 __delete_object(object);
669 }
670
671 /*
672 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
673 * delete it. If the memory block is partially freed, the function may create
674 * additional metadata for the remaining parts of the block.
675 */
676 static void delete_object_part(unsigned long ptr, size_t size)
677 {
678 struct kmemleak_object *object;
679 unsigned long start, end;
680
681 object = find_and_remove_object(ptr, 1);
682 if (!object) {
683 #ifdef DEBUG
684 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
685 ptr, size);
686 #endif
687 return;
688 }
689
690 /*
691 * Create one or two objects that may result from the memory block
692 * split. Note that partial freeing is only done by free_bootmem() and
693 * this happens before kmemleak_init() is called. The path below is
694 * only executed during early log recording in kmemleak_init(), so
695 * GFP_KERNEL is enough.
696 */
697 start = object->pointer;
698 end = object->pointer + object->size;
699 if (ptr > start)
700 create_object(start, ptr - start, object->min_count,
701 GFP_KERNEL);
702 if (ptr + size < end)
703 create_object(ptr + size, end - ptr - size, object->min_count,
704 GFP_KERNEL);
705
706 __delete_object(object);
707 }
708
709 static void __paint_it(struct kmemleak_object *object, int color)
710 {
711 object->min_count = color;
712 if (color == KMEMLEAK_BLACK)
713 object->flags |= OBJECT_NO_SCAN;
714 }
715
716 static void paint_it(struct kmemleak_object *object, int color)
717 {
718 unsigned long flags;
719
720 spin_lock_irqsave(&object->lock, flags);
721 __paint_it(object, color);
722 spin_unlock_irqrestore(&object->lock, flags);
723 }
724
725 static void paint_ptr(unsigned long ptr, int color)
726 {
727 struct kmemleak_object *object;
728
729 object = find_and_get_object(ptr, 0);
730 if (!object) {
731 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
732 ptr,
733 (color == KMEMLEAK_GREY) ? "Grey" :
734 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
735 return;
736 }
737 paint_it(object, color);
738 put_object(object);
739 }
740
741 /*
742 * Mark an object permanently as gray-colored so that it can no longer be
743 * reported as a leak. This is used in general to mark a false positive.
744 */
745 static void make_gray_object(unsigned long ptr)
746 {
747 paint_ptr(ptr, KMEMLEAK_GREY);
748 }
749
750 /*
751 * Mark the object as black-colored so that it is ignored from scans and
752 * reporting.
753 */
754 static void make_black_object(unsigned long ptr)
755 {
756 paint_ptr(ptr, KMEMLEAK_BLACK);
757 }
758
759 /*
760 * Add a scanning area to the object. If at least one such area is added,
761 * kmemleak will only scan these ranges rather than the whole memory block.
762 */
763 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
764 {
765 unsigned long flags;
766 struct kmemleak_object *object;
767 struct kmemleak_scan_area *area;
768
769 object = find_and_get_object(ptr, 1);
770 if (!object) {
771 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
772 ptr);
773 return;
774 }
775
776 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
777 if (!area) {
778 pr_warn("Cannot allocate a scan area\n");
779 goto out;
780 }
781
782 spin_lock_irqsave(&object->lock, flags);
783 if (size == SIZE_MAX) {
784 size = object->pointer + object->size - ptr;
785 } else if (ptr + size > object->pointer + object->size) {
786 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
787 dump_object_info(object);
788 kmem_cache_free(scan_area_cache, area);
789 goto out_unlock;
790 }
791
792 INIT_HLIST_NODE(&area->node);
793 area->start = ptr;
794 area->size = size;
795
796 hlist_add_head(&area->node, &object->area_list);
797 out_unlock:
798 spin_unlock_irqrestore(&object->lock, flags);
799 out:
800 put_object(object);
801 }
802
803 /*
804 * Any surplus references (object already gray) to 'ptr' are passed to
805 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
806 * vm_struct may be used as an alternative reference to the vmalloc'ed object
807 * (see free_thread_stack()).
808 */
809 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
810 {
811 unsigned long flags;
812 struct kmemleak_object *object;
813
814 object = find_and_get_object(ptr, 0);
815 if (!object) {
816 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
817 ptr);
818 return;
819 }
820
821 spin_lock_irqsave(&object->lock, flags);
822 object->excess_ref = excess_ref;
823 spin_unlock_irqrestore(&object->lock, flags);
824 put_object(object);
825 }
826
827 /*
828 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
829 * pointer. Such object will not be scanned by kmemleak but references to it
830 * are searched.
831 */
832 static void object_no_scan(unsigned long ptr)
833 {
834 unsigned long flags;
835 struct kmemleak_object *object;
836
837 object = find_and_get_object(ptr, 0);
838 if (!object) {
839 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
840 return;
841 }
842
843 spin_lock_irqsave(&object->lock, flags);
844 object->flags |= OBJECT_NO_SCAN;
845 spin_unlock_irqrestore(&object->lock, flags);
846 put_object(object);
847 }
848
849 /*
850 * Log an early kmemleak_* call to the early_log buffer. These calls will be
851 * processed later once kmemleak is fully initialized.
852 */
853 static void __init log_early(int op_type, const void *ptr, size_t size,
854 int min_count)
855 {
856 unsigned long flags;
857 struct early_log *log;
858
859 if (kmemleak_error) {
860 /* kmemleak stopped recording, just count the requests */
861 crt_early_log++;
862 return;
863 }
864
865 if (crt_early_log >= ARRAY_SIZE(early_log)) {
866 crt_early_log++;
867 kmemleak_disable();
868 return;
869 }
870
871 /*
872 * There is no need for locking since the kernel is still in UP mode
873 * at this stage. Disabling the IRQs is enough.
874 */
875 local_irq_save(flags);
876 log = &early_log[crt_early_log];
877 log->op_type = op_type;
878 log->ptr = ptr;
879 log->size = size;
880 log->min_count = min_count;
881 log->trace_len = __save_stack_trace(log->trace);
882 crt_early_log++;
883 local_irq_restore(flags);
884 }
885
886 /*
887 * Log an early allocated block and populate the stack trace.
888 */
889 static void early_alloc(struct early_log *log)
890 {
891 struct kmemleak_object *object;
892 unsigned long flags;
893 int i;
894
895 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
896 return;
897
898 /*
899 * RCU locking needed to ensure object is not freed via put_object().
900 */
901 rcu_read_lock();
902 object = create_object((unsigned long)log->ptr, log->size,
903 log->min_count, GFP_ATOMIC);
904 if (!object)
905 goto out;
906 spin_lock_irqsave(&object->lock, flags);
907 for (i = 0; i < log->trace_len; i++)
908 object->trace[i] = log->trace[i];
909 object->trace_len = log->trace_len;
910 spin_unlock_irqrestore(&object->lock, flags);
911 out:
912 rcu_read_unlock();
913 }
914
915 /*
916 * Log an early allocated block and populate the stack trace.
917 */
918 static void early_alloc_percpu(struct early_log *log)
919 {
920 unsigned int cpu;
921 const void __percpu *ptr = log->ptr;
922
923 for_each_possible_cpu(cpu) {
924 log->ptr = per_cpu_ptr(ptr, cpu);
925 early_alloc(log);
926 }
927 }
928
929 /**
930 * kmemleak_alloc - register a newly allocated object
931 * @ptr: pointer to beginning of the object
932 * @size: size of the object
933 * @min_count: minimum number of references to this object. If during memory
934 * scanning a number of references less than @min_count is found,
935 * the object is reported as a memory leak. If @min_count is 0,
936 * the object is never reported as a leak. If @min_count is -1,
937 * the object is ignored (not scanned and not reported as a leak)
938 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
939 *
940 * This function is called from the kernel allocators when a new object
941 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
942 */
943 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
944 gfp_t gfp)
945 {
946 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
947
948 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
949 create_object((unsigned long)ptr, size, min_count, gfp);
950 else if (kmemleak_early_log)
951 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
952 }
953 EXPORT_SYMBOL_GPL(kmemleak_alloc);
954
955 /**
956 * kmemleak_alloc_percpu - register a newly allocated __percpu object
957 * @ptr: __percpu pointer to beginning of the object
958 * @size: size of the object
959 * @gfp: flags used for kmemleak internal memory allocations
960 *
961 * This function is called from the kernel percpu allocator when a new object
962 * (memory block) is allocated (alloc_percpu).
963 */
964 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
965 gfp_t gfp)
966 {
967 unsigned int cpu;
968
969 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
970
971 /*
972 * Percpu allocations are only scanned and not reported as leaks
973 * (min_count is set to 0).
974 */
975 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
976 for_each_possible_cpu(cpu)
977 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
978 size, 0, gfp);
979 else if (kmemleak_early_log)
980 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
981 }
982 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
983
984 /**
985 * kmemleak_vmalloc - register a newly vmalloc'ed object
986 * @area: pointer to vm_struct
987 * @size: size of the object
988 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
989 *
990 * This function is called from the vmalloc() kernel allocator when a new
991 * object (memory block) is allocated.
992 */
993 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
994 {
995 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
996
997 /*
998 * A min_count = 2 is needed because vm_struct contains a reference to
999 * the virtual address of the vmalloc'ed block.
1000 */
1001 if (kmemleak_enabled) {
1002 create_object((unsigned long)area->addr, size, 2, gfp);
1003 object_set_excess_ref((unsigned long)area,
1004 (unsigned long)area->addr);
1005 } else if (kmemleak_early_log) {
1006 log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
1007 /* reusing early_log.size for storing area->addr */
1008 log_early(KMEMLEAK_SET_EXCESS_REF,
1009 area, (unsigned long)area->addr, 0);
1010 }
1011 }
1012 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1013
1014 /**
1015 * kmemleak_free - unregister a previously registered object
1016 * @ptr: pointer to beginning of the object
1017 *
1018 * This function is called from the kernel allocators when an object (memory
1019 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1020 */
1021 void __ref kmemleak_free(const void *ptr)
1022 {
1023 pr_debug("%s(0x%p)\n", __func__, ptr);
1024
1025 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1026 delete_object_full((unsigned long)ptr);
1027 else if (kmemleak_early_log)
1028 log_early(KMEMLEAK_FREE, ptr, 0, 0);
1029 }
1030 EXPORT_SYMBOL_GPL(kmemleak_free);
1031
1032 /**
1033 * kmemleak_free_part - partially unregister a previously registered object
1034 * @ptr: pointer to the beginning or inside the object. This also
1035 * represents the start of the range to be freed
1036 * @size: size to be unregistered
1037 *
1038 * This function is called when only a part of a memory block is freed
1039 * (usually from the bootmem allocator).
1040 */
1041 void __ref kmemleak_free_part(const void *ptr, size_t size)
1042 {
1043 pr_debug("%s(0x%p)\n", __func__, ptr);
1044
1045 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1046 delete_object_part((unsigned long)ptr, size);
1047 else if (kmemleak_early_log)
1048 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
1049 }
1050 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1051
1052 /**
1053 * kmemleak_free_percpu - unregister a previously registered __percpu object
1054 * @ptr: __percpu pointer to beginning of the object
1055 *
1056 * This function is called from the kernel percpu allocator when an object
1057 * (memory block) is freed (free_percpu).
1058 */
1059 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1060 {
1061 unsigned int cpu;
1062
1063 pr_debug("%s(0x%p)\n", __func__, ptr);
1064
1065 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1066 for_each_possible_cpu(cpu)
1067 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1068 cpu));
1069 else if (kmemleak_early_log)
1070 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1071 }
1072 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1073
1074 /**
1075 * kmemleak_update_trace - update object allocation stack trace
1076 * @ptr: pointer to beginning of the object
1077 *
1078 * Override the object allocation stack trace for cases where the actual
1079 * allocation place is not always useful.
1080 */
1081 void __ref kmemleak_update_trace(const void *ptr)
1082 {
1083 struct kmemleak_object *object;
1084 unsigned long flags;
1085
1086 pr_debug("%s(0x%p)\n", __func__, ptr);
1087
1088 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1089 return;
1090
1091 object = find_and_get_object((unsigned long)ptr, 1);
1092 if (!object) {
1093 #ifdef DEBUG
1094 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1095 ptr);
1096 #endif
1097 return;
1098 }
1099
1100 spin_lock_irqsave(&object->lock, flags);
1101 object->trace_len = __save_stack_trace(object->trace);
1102 spin_unlock_irqrestore(&object->lock, flags);
1103
1104 put_object(object);
1105 }
1106 EXPORT_SYMBOL(kmemleak_update_trace);
1107
1108 /**
1109 * kmemleak_not_leak - mark an allocated object as false positive
1110 * @ptr: pointer to beginning of the object
1111 *
1112 * Calling this function on an object will cause the memory block to no longer
1113 * be reported as leak and always be scanned.
1114 */
1115 void __ref kmemleak_not_leak(const void *ptr)
1116 {
1117 pr_debug("%s(0x%p)\n", __func__, ptr);
1118
1119 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1120 make_gray_object((unsigned long)ptr);
1121 else if (kmemleak_early_log)
1122 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1123 }
1124 EXPORT_SYMBOL(kmemleak_not_leak);
1125
1126 /**
1127 * kmemleak_ignore - ignore an allocated object
1128 * @ptr: pointer to beginning of the object
1129 *
1130 * Calling this function on an object will cause the memory block to be
1131 * ignored (not scanned and not reported as a leak). This is usually done when
1132 * it is known that the corresponding block is not a leak and does not contain
1133 * any references to other allocated memory blocks.
1134 */
1135 void __ref kmemleak_ignore(const void *ptr)
1136 {
1137 pr_debug("%s(0x%p)\n", __func__, ptr);
1138
1139 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1140 make_black_object((unsigned long)ptr);
1141 else if (kmemleak_early_log)
1142 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1143 }
1144 EXPORT_SYMBOL(kmemleak_ignore);
1145
1146 /**
1147 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1148 * @ptr: pointer to beginning or inside the object. This also
1149 * represents the start of the scan area
1150 * @size: size of the scan area
1151 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1152 *
1153 * This function is used when it is known that only certain parts of an object
1154 * contain references to other objects. Kmemleak will only scan these areas
1155 * reducing the number false negatives.
1156 */
1157 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1158 {
1159 pr_debug("%s(0x%p)\n", __func__, ptr);
1160
1161 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1162 add_scan_area((unsigned long)ptr, size, gfp);
1163 else if (kmemleak_early_log)
1164 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1165 }
1166 EXPORT_SYMBOL(kmemleak_scan_area);
1167
1168 /**
1169 * kmemleak_no_scan - do not scan an allocated object
1170 * @ptr: pointer to beginning of the object
1171 *
1172 * This function notifies kmemleak not to scan the given memory block. Useful
1173 * in situations where it is known that the given object does not contain any
1174 * references to other objects. Kmemleak will not scan such objects reducing
1175 * the number of false negatives.
1176 */
1177 void __ref kmemleak_no_scan(const void *ptr)
1178 {
1179 pr_debug("%s(0x%p)\n", __func__, ptr);
1180
1181 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1182 object_no_scan((unsigned long)ptr);
1183 else if (kmemleak_early_log)
1184 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1185 }
1186 EXPORT_SYMBOL(kmemleak_no_scan);
1187
1188 /**
1189 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1190 * address argument
1191 */
1192 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1193 gfp_t gfp)
1194 {
1195 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1196 kmemleak_alloc(__va(phys), size, min_count, gfp);
1197 }
1198 EXPORT_SYMBOL(kmemleak_alloc_phys);
1199
1200 /**
1201 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1202 * physical address argument
1203 */
1204 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1205 {
1206 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1207 kmemleak_free_part(__va(phys), size);
1208 }
1209 EXPORT_SYMBOL(kmemleak_free_part_phys);
1210
1211 /**
1212 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1213 * address argument
1214 */
1215 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1216 {
1217 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1218 kmemleak_not_leak(__va(phys));
1219 }
1220 EXPORT_SYMBOL(kmemleak_not_leak_phys);
1221
1222 /**
1223 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1224 * address argument
1225 */
1226 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1227 {
1228 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1229 kmemleak_ignore(__va(phys));
1230 }
1231 EXPORT_SYMBOL(kmemleak_ignore_phys);
1232
1233 /*
1234 * Update an object's checksum and return true if it was modified.
1235 */
1236 static bool update_checksum(struct kmemleak_object *object)
1237 {
1238 u32 old_csum = object->checksum;
1239
1240 kasan_disable_current();
1241 object->checksum = crc32(0, (void *)object->pointer, object->size);
1242 kasan_enable_current();
1243
1244 return object->checksum != old_csum;
1245 }
1246
1247 /*
1248 * Update an object's references. object->lock must be held by the caller.
1249 */
1250 static void update_refs(struct kmemleak_object *object)
1251 {
1252 if (!color_white(object)) {
1253 /* non-orphan, ignored or new */
1254 return;
1255 }
1256
1257 /*
1258 * Increase the object's reference count (number of pointers to the
1259 * memory block). If this count reaches the required minimum, the
1260 * object's color will become gray and it will be added to the
1261 * gray_list.
1262 */
1263 object->count++;
1264 if (color_gray(object)) {
1265 /* put_object() called when removing from gray_list */
1266 WARN_ON(!get_object(object));
1267 list_add_tail(&object->gray_list, &gray_list);
1268 }
1269 }
1270
1271 /*
1272 * Memory scanning is a long process and it needs to be interruptable. This
1273 * function checks whether such interrupt condition occurred.
1274 */
1275 static int scan_should_stop(void)
1276 {
1277 if (!kmemleak_enabled)
1278 return 1;
1279
1280 /*
1281 * This function may be called from either process or kthread context,
1282 * hence the need to check for both stop conditions.
1283 */
1284 if (current->mm)
1285 return signal_pending(current);
1286 else
1287 return kthread_should_stop();
1288
1289 return 0;
1290 }
1291
1292 /*
1293 * Scan a memory block (exclusive range) for valid pointers and add those
1294 * found to the gray list.
1295 */
1296 static void scan_block(void *_start, void *_end,
1297 struct kmemleak_object *scanned)
1298 {
1299 unsigned long *ptr;
1300 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1301 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1302 unsigned long flags;
1303
1304 read_lock_irqsave(&kmemleak_lock, flags);
1305 for (ptr = start; ptr < end; ptr++) {
1306 struct kmemleak_object *object;
1307 unsigned long pointer;
1308 unsigned long excess_ref;
1309
1310 if (scan_should_stop())
1311 break;
1312
1313 kasan_disable_current();
1314 pointer = *ptr;
1315 kasan_enable_current();
1316
1317 if (pointer < min_addr || pointer >= max_addr)
1318 continue;
1319
1320 /*
1321 * No need for get_object() here since we hold kmemleak_lock.
1322 * object->use_count cannot be dropped to 0 while the object
1323 * is still present in object_tree_root and object_list
1324 * (with updates protected by kmemleak_lock).
1325 */
1326 object = lookup_object(pointer, 1);
1327 if (!object)
1328 continue;
1329 if (object == scanned)
1330 /* self referenced, ignore */
1331 continue;
1332
1333 /*
1334 * Avoid the lockdep recursive warning on object->lock being
1335 * previously acquired in scan_object(). These locks are
1336 * enclosed by scan_mutex.
1337 */
1338 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1339 /* only pass surplus references (object already gray) */
1340 if (color_gray(object)) {
1341 excess_ref = object->excess_ref;
1342 /* no need for update_refs() if object already gray */
1343 } else {
1344 excess_ref = 0;
1345 update_refs(object);
1346 }
1347 spin_unlock(&object->lock);
1348
1349 if (excess_ref) {
1350 object = lookup_object(excess_ref, 0);
1351 if (!object)
1352 continue;
1353 if (object == scanned)
1354 /* circular reference, ignore */
1355 continue;
1356 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1357 update_refs(object);
1358 spin_unlock(&object->lock);
1359 }
1360 }
1361 read_unlock_irqrestore(&kmemleak_lock, flags);
1362 }
1363
1364 /*
1365 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1366 */
1367 static void scan_large_block(void *start, void *end)
1368 {
1369 void *next;
1370
1371 while (start < end) {
1372 next = min(start + MAX_SCAN_SIZE, end);
1373 scan_block(start, next, NULL);
1374 start = next;
1375 cond_resched();
1376 }
1377 }
1378
1379 /*
1380 * Scan a memory block corresponding to a kmemleak_object. A condition is
1381 * that object->use_count >= 1.
1382 */
1383 static void scan_object(struct kmemleak_object *object)
1384 {
1385 struct kmemleak_scan_area *area;
1386 unsigned long flags;
1387
1388 /*
1389 * Once the object->lock is acquired, the corresponding memory block
1390 * cannot be freed (the same lock is acquired in delete_object).
1391 */
1392 spin_lock_irqsave(&object->lock, flags);
1393 if (object->flags & OBJECT_NO_SCAN)
1394 goto out;
1395 if (!(object->flags & OBJECT_ALLOCATED))
1396 /* already freed object */
1397 goto out;
1398 if (hlist_empty(&object->area_list)) {
1399 void *start = (void *)object->pointer;
1400 void *end = (void *)(object->pointer + object->size);
1401 void *next;
1402
1403 do {
1404 next = min(start + MAX_SCAN_SIZE, end);
1405 scan_block(start, next, object);
1406
1407 start = next;
1408 if (start >= end)
1409 break;
1410
1411 spin_unlock_irqrestore(&object->lock, flags);
1412 cond_resched();
1413 spin_lock_irqsave(&object->lock, flags);
1414 } while (object->flags & OBJECT_ALLOCATED);
1415 } else
1416 hlist_for_each_entry(area, &object->area_list, node)
1417 scan_block((void *)area->start,
1418 (void *)(area->start + area->size),
1419 object);
1420 out:
1421 spin_unlock_irqrestore(&object->lock, flags);
1422 }
1423
1424 /*
1425 * Scan the objects already referenced (gray objects). More objects will be
1426 * referenced and, if there are no memory leaks, all the objects are scanned.
1427 */
1428 static void scan_gray_list(void)
1429 {
1430 struct kmemleak_object *object, *tmp;
1431
1432 /*
1433 * The list traversal is safe for both tail additions and removals
1434 * from inside the loop. The kmemleak objects cannot be freed from
1435 * outside the loop because their use_count was incremented.
1436 */
1437 object = list_entry(gray_list.next, typeof(*object), gray_list);
1438 while (&object->gray_list != &gray_list) {
1439 cond_resched();
1440
1441 /* may add new objects to the list */
1442 if (!scan_should_stop())
1443 scan_object(object);
1444
1445 tmp = list_entry(object->gray_list.next, typeof(*object),
1446 gray_list);
1447
1448 /* remove the object from the list and release it */
1449 list_del(&object->gray_list);
1450 put_object(object);
1451
1452 object = tmp;
1453 }
1454 WARN_ON(!list_empty(&gray_list));
1455 }
1456
1457 /*
1458 * Scan data sections and all the referenced memory blocks allocated via the
1459 * kernel's standard allocators. This function must be called with the
1460 * scan_mutex held.
1461 */
1462 static void kmemleak_scan(void)
1463 {
1464 unsigned long flags;
1465 struct kmemleak_object *object;
1466 int i;
1467 int new_leaks = 0;
1468
1469 jiffies_last_scan = jiffies;
1470
1471 /* prepare the kmemleak_object's */
1472 rcu_read_lock();
1473 list_for_each_entry_rcu(object, &object_list, object_list) {
1474 spin_lock_irqsave(&object->lock, flags);
1475 #ifdef DEBUG
1476 /*
1477 * With a few exceptions there should be a maximum of
1478 * 1 reference to any object at this point.
1479 */
1480 if (atomic_read(&object->use_count) > 1) {
1481 pr_debug("object->use_count = %d\n",
1482 atomic_read(&object->use_count));
1483 dump_object_info(object);
1484 }
1485 #endif
1486 /* reset the reference count (whiten the object) */
1487 object->count = 0;
1488 if (color_gray(object) && get_object(object))
1489 list_add_tail(&object->gray_list, &gray_list);
1490
1491 spin_unlock_irqrestore(&object->lock, flags);
1492 }
1493 rcu_read_unlock();
1494
1495 /* data/bss scanning */
1496 scan_large_block(_sdata, _edata);
1497 scan_large_block(__bss_start, __bss_stop);
1498 scan_large_block(__start_ro_after_init, __end_ro_after_init);
1499
1500 #ifdef CONFIG_SMP
1501 /* per-cpu sections scanning */
1502 for_each_possible_cpu(i)
1503 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1504 __per_cpu_end + per_cpu_offset(i));
1505 #endif
1506
1507 /*
1508 * Struct page scanning for each node.
1509 */
1510 get_online_mems();
1511 for_each_online_node(i) {
1512 unsigned long start_pfn = node_start_pfn(i);
1513 unsigned long end_pfn = node_end_pfn(i);
1514 unsigned long pfn;
1515
1516 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1517 struct page *page;
1518
1519 if (!pfn_valid(pfn))
1520 continue;
1521 page = pfn_to_page(pfn);
1522 /* only scan if page is in use */
1523 if (page_count(page) == 0)
1524 continue;
1525 scan_block(page, page + 1, NULL);
1526 if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page))))
1527 cond_resched();
1528 }
1529 }
1530 put_online_mems();
1531
1532 /*
1533 * Scanning the task stacks (may introduce false negatives).
1534 */
1535 if (kmemleak_stack_scan) {
1536 struct task_struct *p, *g;
1537
1538 read_lock(&tasklist_lock);
1539 do_each_thread(g, p) {
1540 void *stack = try_get_task_stack(p);
1541 if (stack) {
1542 scan_block(stack, stack + THREAD_SIZE, NULL);
1543 put_task_stack(p);
1544 }
1545 } while_each_thread(g, p);
1546 read_unlock(&tasklist_lock);
1547 }
1548
1549 /*
1550 * Scan the objects already referenced from the sections scanned
1551 * above.
1552 */
1553 scan_gray_list();
1554
1555 /*
1556 * Check for new or unreferenced objects modified since the previous
1557 * scan and color them gray until the next scan.
1558 */
1559 rcu_read_lock();
1560 list_for_each_entry_rcu(object, &object_list, object_list) {
1561 spin_lock_irqsave(&object->lock, flags);
1562 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1563 && update_checksum(object) && get_object(object)) {
1564 /* color it gray temporarily */
1565 object->count = object->min_count;
1566 list_add_tail(&object->gray_list, &gray_list);
1567 }
1568 spin_unlock_irqrestore(&object->lock, flags);
1569 }
1570 rcu_read_unlock();
1571
1572 /*
1573 * Re-scan the gray list for modified unreferenced objects.
1574 */
1575 scan_gray_list();
1576
1577 /*
1578 * If scanning was stopped do not report any new unreferenced objects.
1579 */
1580 if (scan_should_stop())
1581 return;
1582
1583 /*
1584 * Scanning result reporting.
1585 */
1586 rcu_read_lock();
1587 list_for_each_entry_rcu(object, &object_list, object_list) {
1588 spin_lock_irqsave(&object->lock, flags);
1589 if (unreferenced_object(object) &&
1590 !(object->flags & OBJECT_REPORTED)) {
1591 object->flags |= OBJECT_REPORTED;
1592 new_leaks++;
1593 }
1594 spin_unlock_irqrestore(&object->lock, flags);
1595 }
1596 rcu_read_unlock();
1597
1598 if (new_leaks) {
1599 kmemleak_found_leaks = true;
1600
1601 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1602 new_leaks);
1603 }
1604
1605 }
1606
1607 /*
1608 * Thread function performing automatic memory scanning. Unreferenced objects
1609 * at the end of a memory scan are reported but only the first time.
1610 */
1611 static int kmemleak_scan_thread(void *arg)
1612 {
1613 static int first_run = 1;
1614
1615 pr_info("Automatic memory scanning thread started\n");
1616 set_user_nice(current, 10);
1617
1618 /*
1619 * Wait before the first scan to allow the system to fully initialize.
1620 */
1621 if (first_run) {
1622 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1623 first_run = 0;
1624 while (timeout && !kthread_should_stop())
1625 timeout = schedule_timeout_interruptible(timeout);
1626 }
1627
1628 while (!kthread_should_stop()) {
1629 signed long timeout = jiffies_scan_wait;
1630
1631 mutex_lock(&scan_mutex);
1632 kmemleak_scan();
1633 mutex_unlock(&scan_mutex);
1634
1635 /* wait before the next scan */
1636 while (timeout && !kthread_should_stop())
1637 timeout = schedule_timeout_interruptible(timeout);
1638 }
1639
1640 pr_info("Automatic memory scanning thread ended\n");
1641
1642 return 0;
1643 }
1644
1645 /*
1646 * Start the automatic memory scanning thread. This function must be called
1647 * with the scan_mutex held.
1648 */
1649 static void start_scan_thread(void)
1650 {
1651 if (scan_thread)
1652 return;
1653 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1654 if (IS_ERR(scan_thread)) {
1655 pr_warn("Failed to create the scan thread\n");
1656 scan_thread = NULL;
1657 }
1658 }
1659
1660 /*
1661 * Stop the automatic memory scanning thread.
1662 */
1663 static void stop_scan_thread(void)
1664 {
1665 if (scan_thread) {
1666 kthread_stop(scan_thread);
1667 scan_thread = NULL;
1668 }
1669 }
1670
1671 /*
1672 * Iterate over the object_list and return the first valid object at or after
1673 * the required position with its use_count incremented. The function triggers
1674 * a memory scanning when the pos argument points to the first position.
1675 */
1676 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1677 {
1678 struct kmemleak_object *object;
1679 loff_t n = *pos;
1680 int err;
1681
1682 err = mutex_lock_interruptible(&scan_mutex);
1683 if (err < 0)
1684 return ERR_PTR(err);
1685
1686 rcu_read_lock();
1687 list_for_each_entry_rcu(object, &object_list, object_list) {
1688 if (n-- > 0)
1689 continue;
1690 if (get_object(object))
1691 goto out;
1692 }
1693 object = NULL;
1694 out:
1695 return object;
1696 }
1697
1698 /*
1699 * Return the next object in the object_list. The function decrements the
1700 * use_count of the previous object and increases that of the next one.
1701 */
1702 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1703 {
1704 struct kmemleak_object *prev_obj = v;
1705 struct kmemleak_object *next_obj = NULL;
1706 struct kmemleak_object *obj = prev_obj;
1707
1708 ++(*pos);
1709
1710 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1711 if (get_object(obj)) {
1712 next_obj = obj;
1713 break;
1714 }
1715 }
1716
1717 put_object(prev_obj);
1718 return next_obj;
1719 }
1720
1721 /*
1722 * Decrement the use_count of the last object required, if any.
1723 */
1724 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1725 {
1726 if (!IS_ERR(v)) {
1727 /*
1728 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1729 * waiting was interrupted, so only release it if !IS_ERR.
1730 */
1731 rcu_read_unlock();
1732 mutex_unlock(&scan_mutex);
1733 if (v)
1734 put_object(v);
1735 }
1736 }
1737
1738 /*
1739 * Print the information for an unreferenced object to the seq file.
1740 */
1741 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1742 {
1743 struct kmemleak_object *object = v;
1744 unsigned long flags;
1745
1746 spin_lock_irqsave(&object->lock, flags);
1747 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1748 print_unreferenced(seq, object);
1749 spin_unlock_irqrestore(&object->lock, flags);
1750 return 0;
1751 }
1752
1753 static const struct seq_operations kmemleak_seq_ops = {
1754 .start = kmemleak_seq_start,
1755 .next = kmemleak_seq_next,
1756 .stop = kmemleak_seq_stop,
1757 .show = kmemleak_seq_show,
1758 };
1759
1760 static int kmemleak_open(struct inode *inode, struct file *file)
1761 {
1762 return seq_open(file, &kmemleak_seq_ops);
1763 }
1764
1765 static int dump_str_object_info(const char *str)
1766 {
1767 unsigned long flags;
1768 struct kmemleak_object *object;
1769 unsigned long addr;
1770
1771 if (kstrtoul(str, 0, &addr))
1772 return -EINVAL;
1773 object = find_and_get_object(addr, 0);
1774 if (!object) {
1775 pr_info("Unknown object at 0x%08lx\n", addr);
1776 return -EINVAL;
1777 }
1778
1779 spin_lock_irqsave(&object->lock, flags);
1780 dump_object_info(object);
1781 spin_unlock_irqrestore(&object->lock, flags);
1782
1783 put_object(object);
1784 return 0;
1785 }
1786
1787 /*
1788 * We use grey instead of black to ensure we can do future scans on the same
1789 * objects. If we did not do future scans these black objects could
1790 * potentially contain references to newly allocated objects in the future and
1791 * we'd end up with false positives.
1792 */
1793 static void kmemleak_clear(void)
1794 {
1795 struct kmemleak_object *object;
1796 unsigned long flags;
1797
1798 rcu_read_lock();
1799 list_for_each_entry_rcu(object, &object_list, object_list) {
1800 spin_lock_irqsave(&object->lock, flags);
1801 if ((object->flags & OBJECT_REPORTED) &&
1802 unreferenced_object(object))
1803 __paint_it(object, KMEMLEAK_GREY);
1804 spin_unlock_irqrestore(&object->lock, flags);
1805 }
1806 rcu_read_unlock();
1807
1808 kmemleak_found_leaks = false;
1809 }
1810
1811 static void __kmemleak_do_cleanup(void);
1812
1813 /*
1814 * File write operation to configure kmemleak at run-time. The following
1815 * commands can be written to the /sys/kernel/debug/kmemleak file:
1816 * off - disable kmemleak (irreversible)
1817 * stack=on - enable the task stacks scanning
1818 * stack=off - disable the tasks stacks scanning
1819 * scan=on - start the automatic memory scanning thread
1820 * scan=off - stop the automatic memory scanning thread
1821 * scan=... - set the automatic memory scanning period in seconds (0 to
1822 * disable it)
1823 * scan - trigger a memory scan
1824 * clear - mark all current reported unreferenced kmemleak objects as
1825 * grey to ignore printing them, or free all kmemleak objects
1826 * if kmemleak has been disabled.
1827 * dump=... - dump information about the object found at the given address
1828 */
1829 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1830 size_t size, loff_t *ppos)
1831 {
1832 char buf[64];
1833 int buf_size;
1834 int ret;
1835
1836 buf_size = min(size, (sizeof(buf) - 1));
1837 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1838 return -EFAULT;
1839 buf[buf_size] = 0;
1840
1841 ret = mutex_lock_interruptible(&scan_mutex);
1842 if (ret < 0)
1843 return ret;
1844
1845 if (strncmp(buf, "clear", 5) == 0) {
1846 if (kmemleak_enabled)
1847 kmemleak_clear();
1848 else
1849 __kmemleak_do_cleanup();
1850 goto out;
1851 }
1852
1853 if (!kmemleak_enabled) {
1854 ret = -EBUSY;
1855 goto out;
1856 }
1857
1858 if (strncmp(buf, "off", 3) == 0)
1859 kmemleak_disable();
1860 else if (strncmp(buf, "stack=on", 8) == 0)
1861 kmemleak_stack_scan = 1;
1862 else if (strncmp(buf, "stack=off", 9) == 0)
1863 kmemleak_stack_scan = 0;
1864 else if (strncmp(buf, "scan=on", 7) == 0)
1865 start_scan_thread();
1866 else if (strncmp(buf, "scan=off", 8) == 0)
1867 stop_scan_thread();
1868 else if (strncmp(buf, "scan=", 5) == 0) {
1869 unsigned long secs;
1870
1871 ret = kstrtoul(buf + 5, 0, &secs);
1872 if (ret < 0)
1873 goto out;
1874 stop_scan_thread();
1875 if (secs) {
1876 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1877 start_scan_thread();
1878 }
1879 } else if (strncmp(buf, "scan", 4) == 0)
1880 kmemleak_scan();
1881 else if (strncmp(buf, "dump=", 5) == 0)
1882 ret = dump_str_object_info(buf + 5);
1883 else
1884 ret = -EINVAL;
1885
1886 out:
1887 mutex_unlock(&scan_mutex);
1888 if (ret < 0)
1889 return ret;
1890
1891 /* ignore the rest of the buffer, only one command at a time */
1892 *ppos += size;
1893 return size;
1894 }
1895
1896 static const struct file_operations kmemleak_fops = {
1897 .owner = THIS_MODULE,
1898 .open = kmemleak_open,
1899 .read = seq_read,
1900 .write = kmemleak_write,
1901 .llseek = seq_lseek,
1902 .release = seq_release,
1903 };
1904
1905 static void __kmemleak_do_cleanup(void)
1906 {
1907 struct kmemleak_object *object;
1908
1909 rcu_read_lock();
1910 list_for_each_entry_rcu(object, &object_list, object_list)
1911 delete_object_full(object->pointer);
1912 rcu_read_unlock();
1913 }
1914
1915 /*
1916 * Stop the memory scanning thread and free the kmemleak internal objects if
1917 * no previous scan thread (otherwise, kmemleak may still have some useful
1918 * information on memory leaks).
1919 */
1920 static void kmemleak_do_cleanup(struct work_struct *work)
1921 {
1922 stop_scan_thread();
1923
1924 mutex_lock(&scan_mutex);
1925 /*
1926 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1927 * longer track object freeing. Ordering of the scan thread stopping and
1928 * the memory accesses below is guaranteed by the kthread_stop()
1929 * function.
1930 */
1931 kmemleak_free_enabled = 0;
1932 mutex_unlock(&scan_mutex);
1933
1934 if (!kmemleak_found_leaks)
1935 __kmemleak_do_cleanup();
1936 else
1937 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1938 }
1939
1940 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1941
1942 /*
1943 * Disable kmemleak. No memory allocation/freeing will be traced once this
1944 * function is called. Disabling kmemleak is an irreversible operation.
1945 */
1946 static void kmemleak_disable(void)
1947 {
1948 /* atomically check whether it was already invoked */
1949 if (cmpxchg(&kmemleak_error, 0, 1))
1950 return;
1951
1952 /* stop any memory operation tracing */
1953 kmemleak_enabled = 0;
1954
1955 /* check whether it is too early for a kernel thread */
1956 if (kmemleak_initialized)
1957 schedule_work(&cleanup_work);
1958 else
1959 kmemleak_free_enabled = 0;
1960
1961 pr_info("Kernel memory leak detector disabled\n");
1962 }
1963
1964 /*
1965 * Allow boot-time kmemleak disabling (enabled by default).
1966 */
1967 static int kmemleak_boot_config(char *str)
1968 {
1969 if (!str)
1970 return -EINVAL;
1971 if (strcmp(str, "off") == 0)
1972 kmemleak_disable();
1973 else if (strcmp(str, "on") == 0)
1974 kmemleak_skip_disable = 1;
1975 else
1976 return -EINVAL;
1977 return 0;
1978 }
1979 early_param("kmemleak", kmemleak_boot_config);
1980
1981 static void __init print_log_trace(struct early_log *log)
1982 {
1983 struct stack_trace trace;
1984
1985 trace.nr_entries = log->trace_len;
1986 trace.entries = log->trace;
1987
1988 pr_notice("Early log backtrace:\n");
1989 print_stack_trace(&trace, 2);
1990 }
1991
1992 /*
1993 * Kmemleak initialization.
1994 */
1995 void __init kmemleak_init(void)
1996 {
1997 int i;
1998 unsigned long flags;
1999
2000 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2001 if (!kmemleak_skip_disable) {
2002 kmemleak_early_log = 0;
2003 kmemleak_disable();
2004 return;
2005 }
2006 #endif
2007
2008 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2009 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2010
2011 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2012 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2013
2014 if (crt_early_log > ARRAY_SIZE(early_log))
2015 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
2016 crt_early_log);
2017
2018 /* the kernel is still in UP mode, so disabling the IRQs is enough */
2019 local_irq_save(flags);
2020 kmemleak_early_log = 0;
2021 if (kmemleak_error) {
2022 local_irq_restore(flags);
2023 return;
2024 } else {
2025 kmemleak_enabled = 1;
2026 kmemleak_free_enabled = 1;
2027 }
2028 local_irq_restore(flags);
2029
2030 /*
2031 * This is the point where tracking allocations is safe. Automatic
2032 * scanning is started during the late initcall. Add the early logged
2033 * callbacks to the kmemleak infrastructure.
2034 */
2035 for (i = 0; i < crt_early_log; i++) {
2036 struct early_log *log = &early_log[i];
2037
2038 switch (log->op_type) {
2039 case KMEMLEAK_ALLOC:
2040 early_alloc(log);
2041 break;
2042 case KMEMLEAK_ALLOC_PERCPU:
2043 early_alloc_percpu(log);
2044 break;
2045 case KMEMLEAK_FREE:
2046 kmemleak_free(log->ptr);
2047 break;
2048 case KMEMLEAK_FREE_PART:
2049 kmemleak_free_part(log->ptr, log->size);
2050 break;
2051 case KMEMLEAK_FREE_PERCPU:
2052 kmemleak_free_percpu(log->ptr);
2053 break;
2054 case KMEMLEAK_NOT_LEAK:
2055 kmemleak_not_leak(log->ptr);
2056 break;
2057 case KMEMLEAK_IGNORE:
2058 kmemleak_ignore(log->ptr);
2059 break;
2060 case KMEMLEAK_SCAN_AREA:
2061 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
2062 break;
2063 case KMEMLEAK_NO_SCAN:
2064 kmemleak_no_scan(log->ptr);
2065 break;
2066 case KMEMLEAK_SET_EXCESS_REF:
2067 object_set_excess_ref((unsigned long)log->ptr,
2068 log->excess_ref);
2069 break;
2070 default:
2071 kmemleak_warn("Unknown early log operation: %d\n",
2072 log->op_type);
2073 }
2074
2075 if (kmemleak_warning) {
2076 print_log_trace(log);
2077 kmemleak_warning = 0;
2078 }
2079 }
2080 }
2081
2082 /*
2083 * Late initialization function.
2084 */
2085 static int __init kmemleak_late_init(void)
2086 {
2087 struct dentry *dentry;
2088
2089 kmemleak_initialized = 1;
2090
2091 if (kmemleak_error) {
2092 /*
2093 * Some error occurred and kmemleak was disabled. There is a
2094 * small chance that kmemleak_disable() was called immediately
2095 * after setting kmemleak_initialized and we may end up with
2096 * two clean-up threads but serialized by scan_mutex.
2097 */
2098 schedule_work(&cleanup_work);
2099 return -ENOMEM;
2100 }
2101
2102 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
2103 &kmemleak_fops);
2104 if (!dentry)
2105 pr_warn("Failed to create the debugfs kmemleak file\n");
2106 mutex_lock(&scan_mutex);
2107 start_scan_thread();
2108 mutex_unlock(&scan_mutex);
2109
2110 pr_info("Kernel memory leak detector initialized\n");
2111
2112 return 0;
2113 }
2114 late_initcall(kmemleak_late_init);