usb: gadget: printer: remove unused and empty printer_unbind
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / mm / kmemleak.c
1 /*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
32 * blocks. The object_tree_root is a red black tree used to look-up
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
54 * pointer
55 *
56 * The kmemleak_object structures have a use_count incremented or decremented
57 * using the get_object()/put_object() functions. When the use_count becomes
58 * 0, this count can no longer be incremented and put_object() schedules the
59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60 * function must be protected by rcu_read_lock() to avoid accessing a freed
61 * structure.
62 */
63
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66 #include <linux/init.h>
67 #include <linux/kernel.h>
68 #include <linux/list.h>
69 #include <linux/sched.h>
70 #include <linux/jiffies.h>
71 #include <linux/delay.h>
72 #include <linux/export.h>
73 #include <linux/kthread.h>
74 #include <linux/rbtree.h>
75 #include <linux/fs.h>
76 #include <linux/debugfs.h>
77 #include <linux/seq_file.h>
78 #include <linux/cpumask.h>
79 #include <linux/spinlock.h>
80 #include <linux/mutex.h>
81 #include <linux/rcupdate.h>
82 #include <linux/stacktrace.h>
83 #include <linux/cache.h>
84 #include <linux/percpu.h>
85 #include <linux/hardirq.h>
86 #include <linux/mmzone.h>
87 #include <linux/slab.h>
88 #include <linux/thread_info.h>
89 #include <linux/err.h>
90 #include <linux/uaccess.h>
91 #include <linux/string.h>
92 #include <linux/nodemask.h>
93 #include <linux/mm.h>
94 #include <linux/workqueue.h>
95 #include <linux/crc32.h>
96
97 #include <asm/sections.h>
98 #include <asm/processor.h>
99 #include <linux/atomic.h>
100
101 #include <linux/kasan.h>
102 #include <linux/kmemcheck.h>
103 #include <linux/kmemleak.h>
104 #include <linux/memory_hotplug.h>
105
106 /*
107 * Kmemleak configuration and common defines.
108 */
109 #define MAX_TRACE 16 /* stack trace length */
110 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
111 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
112 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
113 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
114
115 #define BYTES_PER_POINTER sizeof(void *)
116
117 /* GFP bitmask for kmemleak internal allocations */
118 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
119 __GFP_NORETRY | __GFP_NOMEMALLOC | \
120 __GFP_NOWARN)
121
122 /* scanning area inside a memory block */
123 struct kmemleak_scan_area {
124 struct hlist_node node;
125 unsigned long start;
126 size_t size;
127 };
128
129 #define KMEMLEAK_GREY 0
130 #define KMEMLEAK_BLACK -1
131
132 /*
133 * Structure holding the metadata for each allocated memory block.
134 * Modifications to such objects should be made while holding the
135 * object->lock. Insertions or deletions from object_list, gray_list or
136 * rb_node are already protected by the corresponding locks or mutex (see
137 * the notes on locking above). These objects are reference-counted
138 * (use_count) and freed using the RCU mechanism.
139 */
140 struct kmemleak_object {
141 spinlock_t lock;
142 unsigned long flags; /* object status flags */
143 struct list_head object_list;
144 struct list_head gray_list;
145 struct rb_node rb_node;
146 struct rcu_head rcu; /* object_list lockless traversal */
147 /* object usage count; object freed when use_count == 0 */
148 atomic_t use_count;
149 unsigned long pointer;
150 size_t size;
151 /* minimum number of a pointers found before it is considered leak */
152 int min_count;
153 /* the total number of pointers found pointing to this object */
154 int count;
155 /* checksum for detecting modified objects */
156 u32 checksum;
157 /* memory ranges to be scanned inside an object (empty for all) */
158 struct hlist_head area_list;
159 unsigned long trace[MAX_TRACE];
160 unsigned int trace_len;
161 unsigned long jiffies; /* creation timestamp */
162 pid_t pid; /* pid of the current task */
163 char comm[TASK_COMM_LEN]; /* executable name */
164 };
165
166 /* flag representing the memory block allocation status */
167 #define OBJECT_ALLOCATED (1 << 0)
168 /* flag set after the first reporting of an unreference object */
169 #define OBJECT_REPORTED (1 << 1)
170 /* flag set to not scan the object */
171 #define OBJECT_NO_SCAN (1 << 2)
172
173 /* number of bytes to print per line; must be 16 or 32 */
174 #define HEX_ROW_SIZE 16
175 /* number of bytes to print at a time (1, 2, 4, 8) */
176 #define HEX_GROUP_SIZE 1
177 /* include ASCII after the hex output */
178 #define HEX_ASCII 1
179 /* max number of lines to be printed */
180 #define HEX_MAX_LINES 2
181
182 /* the list of all allocated objects */
183 static LIST_HEAD(object_list);
184 /* the list of gray-colored objects (see color_gray comment below) */
185 static LIST_HEAD(gray_list);
186 /* search tree for object boundaries */
187 static struct rb_root object_tree_root = RB_ROOT;
188 /* rw_lock protecting the access to object_list and object_tree_root */
189 static DEFINE_RWLOCK(kmemleak_lock);
190
191 /* allocation caches for kmemleak internal data */
192 static struct kmem_cache *object_cache;
193 static struct kmem_cache *scan_area_cache;
194
195 /* set if tracing memory operations is enabled */
196 static int kmemleak_enabled;
197 /* set in the late_initcall if there were no errors */
198 static int kmemleak_initialized;
199 /* enables or disables early logging of the memory operations */
200 static int kmemleak_early_log = 1;
201 /* set if a kmemleak warning was issued */
202 static int kmemleak_warning;
203 /* set if a fatal kmemleak error has occurred */
204 static int kmemleak_error;
205
206 /* minimum and maximum address that may be valid pointers */
207 static unsigned long min_addr = ULONG_MAX;
208 static unsigned long max_addr;
209
210 static struct task_struct *scan_thread;
211 /* used to avoid reporting of recently allocated objects */
212 static unsigned long jiffies_min_age;
213 static unsigned long jiffies_last_scan;
214 /* delay between automatic memory scannings */
215 static signed long jiffies_scan_wait;
216 /* enables or disables the task stacks scanning */
217 static int kmemleak_stack_scan = 1;
218 /* protects the memory scanning, parameters and debug/kmemleak file access */
219 static DEFINE_MUTEX(scan_mutex);
220 /* setting kmemleak=on, will set this var, skipping the disable */
221 static int kmemleak_skip_disable;
222 /* If there are leaks that can be reported */
223 static bool kmemleak_found_leaks;
224
225 /*
226 * Early object allocation/freeing logging. Kmemleak is initialized after the
227 * kernel allocator. However, both the kernel allocator and kmemleak may
228 * allocate memory blocks which need to be tracked. Kmemleak defines an
229 * arbitrary buffer to hold the allocation/freeing information before it is
230 * fully initialized.
231 */
232
233 /* kmemleak operation type for early logging */
234 enum {
235 KMEMLEAK_ALLOC,
236 KMEMLEAK_ALLOC_PERCPU,
237 KMEMLEAK_FREE,
238 KMEMLEAK_FREE_PART,
239 KMEMLEAK_FREE_PERCPU,
240 KMEMLEAK_NOT_LEAK,
241 KMEMLEAK_IGNORE,
242 KMEMLEAK_SCAN_AREA,
243 KMEMLEAK_NO_SCAN
244 };
245
246 /*
247 * Structure holding the information passed to kmemleak callbacks during the
248 * early logging.
249 */
250 struct early_log {
251 int op_type; /* kmemleak operation type */
252 const void *ptr; /* allocated/freed memory block */
253 size_t size; /* memory block size */
254 int min_count; /* minimum reference count */
255 unsigned long trace[MAX_TRACE]; /* stack trace */
256 unsigned int trace_len; /* stack trace length */
257 };
258
259 /* early logging buffer and current position */
260 static struct early_log
261 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
262 static int crt_early_log __initdata;
263
264 static void kmemleak_disable(void);
265
266 /*
267 * Print a warning and dump the stack trace.
268 */
269 #define kmemleak_warn(x...) do { \
270 pr_warning(x); \
271 dump_stack(); \
272 kmemleak_warning = 1; \
273 } while (0)
274
275 /*
276 * Macro invoked when a serious kmemleak condition occurred and cannot be
277 * recovered from. Kmemleak will be disabled and further allocation/freeing
278 * tracing no longer available.
279 */
280 #define kmemleak_stop(x...) do { \
281 kmemleak_warn(x); \
282 kmemleak_disable(); \
283 } while (0)
284
285 /*
286 * Printing of the objects hex dump to the seq file. The number of lines to be
287 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
288 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
289 * with the object->lock held.
290 */
291 static void hex_dump_object(struct seq_file *seq,
292 struct kmemleak_object *object)
293 {
294 const u8 *ptr = (const u8 *)object->pointer;
295 int i, len, remaining;
296 unsigned char linebuf[HEX_ROW_SIZE * 5];
297
298 /* limit the number of lines to HEX_MAX_LINES */
299 remaining = len =
300 min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
301
302 seq_printf(seq, " hex dump (first %d bytes):\n", len);
303 for (i = 0; i < len; i += HEX_ROW_SIZE) {
304 int linelen = min(remaining, HEX_ROW_SIZE);
305
306 remaining -= HEX_ROW_SIZE;
307 hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
308 HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
309 HEX_ASCII);
310 seq_printf(seq, " %s\n", linebuf);
311 }
312 }
313
314 /*
315 * Object colors, encoded with count and min_count:
316 * - white - orphan object, not enough references to it (count < min_count)
317 * - gray - not orphan, not marked as false positive (min_count == 0) or
318 * sufficient references to it (count >= min_count)
319 * - black - ignore, it doesn't contain references (e.g. text section)
320 * (min_count == -1). No function defined for this color.
321 * Newly created objects don't have any color assigned (object->count == -1)
322 * before the next memory scan when they become white.
323 */
324 static bool color_white(const struct kmemleak_object *object)
325 {
326 return object->count != KMEMLEAK_BLACK &&
327 object->count < object->min_count;
328 }
329
330 static bool color_gray(const struct kmemleak_object *object)
331 {
332 return object->min_count != KMEMLEAK_BLACK &&
333 object->count >= object->min_count;
334 }
335
336 /*
337 * Objects are considered unreferenced only if their color is white, they have
338 * not be deleted and have a minimum age to avoid false positives caused by
339 * pointers temporarily stored in CPU registers.
340 */
341 static bool unreferenced_object(struct kmemleak_object *object)
342 {
343 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
344 time_before_eq(object->jiffies + jiffies_min_age,
345 jiffies_last_scan);
346 }
347
348 /*
349 * Printing of the unreferenced objects information to the seq file. The
350 * print_unreferenced function must be called with the object->lock held.
351 */
352 static void print_unreferenced(struct seq_file *seq,
353 struct kmemleak_object *object)
354 {
355 int i;
356 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
357
358 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
359 object->pointer, object->size);
360 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
361 object->comm, object->pid, object->jiffies,
362 msecs_age / 1000, msecs_age % 1000);
363 hex_dump_object(seq, object);
364 seq_printf(seq, " backtrace:\n");
365
366 for (i = 0; i < object->trace_len; i++) {
367 void *ptr = (void *)object->trace[i];
368 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
369 }
370 }
371
372 /*
373 * Print the kmemleak_object information. This function is used mainly for
374 * debugging special cases when kmemleak operations. It must be called with
375 * the object->lock held.
376 */
377 static void dump_object_info(struct kmemleak_object *object)
378 {
379 struct stack_trace trace;
380
381 trace.nr_entries = object->trace_len;
382 trace.entries = object->trace;
383
384 pr_notice("Object 0x%08lx (size %zu):\n",
385 object->pointer, object->size);
386 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
387 object->comm, object->pid, object->jiffies);
388 pr_notice(" min_count = %d\n", object->min_count);
389 pr_notice(" count = %d\n", object->count);
390 pr_notice(" flags = 0x%lx\n", object->flags);
391 pr_notice(" checksum = %u\n", object->checksum);
392 pr_notice(" backtrace:\n");
393 print_stack_trace(&trace, 4);
394 }
395
396 /*
397 * Look-up a memory block metadata (kmemleak_object) in the object search
398 * tree based on a pointer value. If alias is 0, only values pointing to the
399 * beginning of the memory block are allowed. The kmemleak_lock must be held
400 * when calling this function.
401 */
402 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
403 {
404 struct rb_node *rb = object_tree_root.rb_node;
405
406 while (rb) {
407 struct kmemleak_object *object =
408 rb_entry(rb, struct kmemleak_object, rb_node);
409 if (ptr < object->pointer)
410 rb = object->rb_node.rb_left;
411 else if (object->pointer + object->size <= ptr)
412 rb = object->rb_node.rb_right;
413 else if (object->pointer == ptr || alias)
414 return object;
415 else {
416 kmemleak_warn("Found object by alias at 0x%08lx\n",
417 ptr);
418 dump_object_info(object);
419 break;
420 }
421 }
422 return NULL;
423 }
424
425 /*
426 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
427 * that once an object's use_count reached 0, the RCU freeing was already
428 * registered and the object should no longer be used. This function must be
429 * called under the protection of rcu_read_lock().
430 */
431 static int get_object(struct kmemleak_object *object)
432 {
433 return atomic_inc_not_zero(&object->use_count);
434 }
435
436 /*
437 * RCU callback to free a kmemleak_object.
438 */
439 static void free_object_rcu(struct rcu_head *rcu)
440 {
441 struct hlist_node *tmp;
442 struct kmemleak_scan_area *area;
443 struct kmemleak_object *object =
444 container_of(rcu, struct kmemleak_object, rcu);
445
446 /*
447 * Once use_count is 0 (guaranteed by put_object), there is no other
448 * code accessing this object, hence no need for locking.
449 */
450 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
451 hlist_del(&area->node);
452 kmem_cache_free(scan_area_cache, area);
453 }
454 kmem_cache_free(object_cache, object);
455 }
456
457 /*
458 * Decrement the object use_count. Once the count is 0, free the object using
459 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
460 * delete_object() path, the delayed RCU freeing ensures that there is no
461 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
462 * is also possible.
463 */
464 static void put_object(struct kmemleak_object *object)
465 {
466 if (!atomic_dec_and_test(&object->use_count))
467 return;
468
469 /* should only get here after delete_object was called */
470 WARN_ON(object->flags & OBJECT_ALLOCATED);
471
472 call_rcu(&object->rcu, free_object_rcu);
473 }
474
475 /*
476 * Look up an object in the object search tree and increase its use_count.
477 */
478 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
479 {
480 unsigned long flags;
481 struct kmemleak_object *object = NULL;
482
483 rcu_read_lock();
484 read_lock_irqsave(&kmemleak_lock, flags);
485 if (ptr >= min_addr && ptr < max_addr)
486 object = lookup_object(ptr, alias);
487 read_unlock_irqrestore(&kmemleak_lock, flags);
488
489 /* check whether the object is still available */
490 if (object && !get_object(object))
491 object = NULL;
492 rcu_read_unlock();
493
494 return object;
495 }
496
497 /*
498 * Save stack trace to the given array of MAX_TRACE size.
499 */
500 static int __save_stack_trace(unsigned long *trace)
501 {
502 struct stack_trace stack_trace;
503
504 stack_trace.max_entries = MAX_TRACE;
505 stack_trace.nr_entries = 0;
506 stack_trace.entries = trace;
507 stack_trace.skip = 2;
508 save_stack_trace(&stack_trace);
509
510 return stack_trace.nr_entries;
511 }
512
513 /*
514 * Create the metadata (struct kmemleak_object) corresponding to an allocated
515 * memory block and add it to the object_list and object_tree_root.
516 */
517 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
518 int min_count, gfp_t gfp)
519 {
520 unsigned long flags;
521 struct kmemleak_object *object, *parent;
522 struct rb_node **link, *rb_parent;
523
524 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
525 if (!object) {
526 pr_warning("Cannot allocate a kmemleak_object structure\n");
527 kmemleak_disable();
528 return NULL;
529 }
530
531 INIT_LIST_HEAD(&object->object_list);
532 INIT_LIST_HEAD(&object->gray_list);
533 INIT_HLIST_HEAD(&object->area_list);
534 spin_lock_init(&object->lock);
535 atomic_set(&object->use_count, 1);
536 object->flags = OBJECT_ALLOCATED;
537 object->pointer = ptr;
538 object->size = size;
539 object->min_count = min_count;
540 object->count = 0; /* white color initially */
541 object->jiffies = jiffies;
542 object->checksum = 0;
543
544 /* task information */
545 if (in_irq()) {
546 object->pid = 0;
547 strncpy(object->comm, "hardirq", sizeof(object->comm));
548 } else if (in_softirq()) {
549 object->pid = 0;
550 strncpy(object->comm, "softirq", sizeof(object->comm));
551 } else {
552 object->pid = current->pid;
553 /*
554 * There is a small chance of a race with set_task_comm(),
555 * however using get_task_comm() here may cause locking
556 * dependency issues with current->alloc_lock. In the worst
557 * case, the command line is not correct.
558 */
559 strncpy(object->comm, current->comm, sizeof(object->comm));
560 }
561
562 /* kernel backtrace */
563 object->trace_len = __save_stack_trace(object->trace);
564
565 write_lock_irqsave(&kmemleak_lock, flags);
566
567 min_addr = min(min_addr, ptr);
568 max_addr = max(max_addr, ptr + size);
569 link = &object_tree_root.rb_node;
570 rb_parent = NULL;
571 while (*link) {
572 rb_parent = *link;
573 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
574 if (ptr + size <= parent->pointer)
575 link = &parent->rb_node.rb_left;
576 else if (parent->pointer + parent->size <= ptr)
577 link = &parent->rb_node.rb_right;
578 else {
579 kmemleak_stop("Cannot insert 0x%lx into the object "
580 "search tree (overlaps existing)\n",
581 ptr);
582 kmem_cache_free(object_cache, object);
583 object = parent;
584 spin_lock(&object->lock);
585 dump_object_info(object);
586 spin_unlock(&object->lock);
587 goto out;
588 }
589 }
590 rb_link_node(&object->rb_node, rb_parent, link);
591 rb_insert_color(&object->rb_node, &object_tree_root);
592
593 list_add_tail_rcu(&object->object_list, &object_list);
594 out:
595 write_unlock_irqrestore(&kmemleak_lock, flags);
596 return object;
597 }
598
599 /*
600 * Remove the metadata (struct kmemleak_object) for a memory block from the
601 * object_list and object_tree_root and decrement its use_count.
602 */
603 static void __delete_object(struct kmemleak_object *object)
604 {
605 unsigned long flags;
606
607 write_lock_irqsave(&kmemleak_lock, flags);
608 rb_erase(&object->rb_node, &object_tree_root);
609 list_del_rcu(&object->object_list);
610 write_unlock_irqrestore(&kmemleak_lock, flags);
611
612 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
613 WARN_ON(atomic_read(&object->use_count) < 2);
614
615 /*
616 * Locking here also ensures that the corresponding memory block
617 * cannot be freed when it is being scanned.
618 */
619 spin_lock_irqsave(&object->lock, flags);
620 object->flags &= ~OBJECT_ALLOCATED;
621 spin_unlock_irqrestore(&object->lock, flags);
622 put_object(object);
623 }
624
625 /*
626 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
627 * delete it.
628 */
629 static void delete_object_full(unsigned long ptr)
630 {
631 struct kmemleak_object *object;
632
633 object = find_and_get_object(ptr, 0);
634 if (!object) {
635 #ifdef DEBUG
636 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
637 ptr);
638 #endif
639 return;
640 }
641 __delete_object(object);
642 put_object(object);
643 }
644
645 /*
646 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
647 * delete it. If the memory block is partially freed, the function may create
648 * additional metadata for the remaining parts of the block.
649 */
650 static void delete_object_part(unsigned long ptr, size_t size)
651 {
652 struct kmemleak_object *object;
653 unsigned long start, end;
654
655 object = find_and_get_object(ptr, 1);
656 if (!object) {
657 #ifdef DEBUG
658 kmemleak_warn("Partially freeing unknown object at 0x%08lx "
659 "(size %zu)\n", ptr, size);
660 #endif
661 return;
662 }
663 __delete_object(object);
664
665 /*
666 * Create one or two objects that may result from the memory block
667 * split. Note that partial freeing is only done by free_bootmem() and
668 * this happens before kmemleak_init() is called. The path below is
669 * only executed during early log recording in kmemleak_init(), so
670 * GFP_KERNEL is enough.
671 */
672 start = object->pointer;
673 end = object->pointer + object->size;
674 if (ptr > start)
675 create_object(start, ptr - start, object->min_count,
676 GFP_KERNEL);
677 if (ptr + size < end)
678 create_object(ptr + size, end - ptr - size, object->min_count,
679 GFP_KERNEL);
680
681 put_object(object);
682 }
683
684 static void __paint_it(struct kmemleak_object *object, int color)
685 {
686 object->min_count = color;
687 if (color == KMEMLEAK_BLACK)
688 object->flags |= OBJECT_NO_SCAN;
689 }
690
691 static void paint_it(struct kmemleak_object *object, int color)
692 {
693 unsigned long flags;
694
695 spin_lock_irqsave(&object->lock, flags);
696 __paint_it(object, color);
697 spin_unlock_irqrestore(&object->lock, flags);
698 }
699
700 static void paint_ptr(unsigned long ptr, int color)
701 {
702 struct kmemleak_object *object;
703
704 object = find_and_get_object(ptr, 0);
705 if (!object) {
706 kmemleak_warn("Trying to color unknown object "
707 "at 0x%08lx as %s\n", ptr,
708 (color == KMEMLEAK_GREY) ? "Grey" :
709 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
710 return;
711 }
712 paint_it(object, color);
713 put_object(object);
714 }
715
716 /*
717 * Mark an object permanently as gray-colored so that it can no longer be
718 * reported as a leak. This is used in general to mark a false positive.
719 */
720 static void make_gray_object(unsigned long ptr)
721 {
722 paint_ptr(ptr, KMEMLEAK_GREY);
723 }
724
725 /*
726 * Mark the object as black-colored so that it is ignored from scans and
727 * reporting.
728 */
729 static void make_black_object(unsigned long ptr)
730 {
731 paint_ptr(ptr, KMEMLEAK_BLACK);
732 }
733
734 /*
735 * Add a scanning area to the object. If at least one such area is added,
736 * kmemleak will only scan these ranges rather than the whole memory block.
737 */
738 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
739 {
740 unsigned long flags;
741 struct kmemleak_object *object;
742 struct kmemleak_scan_area *area;
743
744 object = find_and_get_object(ptr, 1);
745 if (!object) {
746 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
747 ptr);
748 return;
749 }
750
751 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
752 if (!area) {
753 pr_warning("Cannot allocate a scan area\n");
754 goto out;
755 }
756
757 spin_lock_irqsave(&object->lock, flags);
758 if (size == SIZE_MAX) {
759 size = object->pointer + object->size - ptr;
760 } else if (ptr + size > object->pointer + object->size) {
761 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
762 dump_object_info(object);
763 kmem_cache_free(scan_area_cache, area);
764 goto out_unlock;
765 }
766
767 INIT_HLIST_NODE(&area->node);
768 area->start = ptr;
769 area->size = size;
770
771 hlist_add_head(&area->node, &object->area_list);
772 out_unlock:
773 spin_unlock_irqrestore(&object->lock, flags);
774 out:
775 put_object(object);
776 }
777
778 /*
779 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
780 * pointer. Such object will not be scanned by kmemleak but references to it
781 * are searched.
782 */
783 static void object_no_scan(unsigned long ptr)
784 {
785 unsigned long flags;
786 struct kmemleak_object *object;
787
788 object = find_and_get_object(ptr, 0);
789 if (!object) {
790 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
791 return;
792 }
793
794 spin_lock_irqsave(&object->lock, flags);
795 object->flags |= OBJECT_NO_SCAN;
796 spin_unlock_irqrestore(&object->lock, flags);
797 put_object(object);
798 }
799
800 /*
801 * Log an early kmemleak_* call to the early_log buffer. These calls will be
802 * processed later once kmemleak is fully initialized.
803 */
804 static void __init log_early(int op_type, const void *ptr, size_t size,
805 int min_count)
806 {
807 unsigned long flags;
808 struct early_log *log;
809
810 if (kmemleak_error) {
811 /* kmemleak stopped recording, just count the requests */
812 crt_early_log++;
813 return;
814 }
815
816 if (crt_early_log >= ARRAY_SIZE(early_log)) {
817 kmemleak_disable();
818 return;
819 }
820
821 /*
822 * There is no need for locking since the kernel is still in UP mode
823 * at this stage. Disabling the IRQs is enough.
824 */
825 local_irq_save(flags);
826 log = &early_log[crt_early_log];
827 log->op_type = op_type;
828 log->ptr = ptr;
829 log->size = size;
830 log->min_count = min_count;
831 log->trace_len = __save_stack_trace(log->trace);
832 crt_early_log++;
833 local_irq_restore(flags);
834 }
835
836 /*
837 * Log an early allocated block and populate the stack trace.
838 */
839 static void early_alloc(struct early_log *log)
840 {
841 struct kmemleak_object *object;
842 unsigned long flags;
843 int i;
844
845 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
846 return;
847
848 /*
849 * RCU locking needed to ensure object is not freed via put_object().
850 */
851 rcu_read_lock();
852 object = create_object((unsigned long)log->ptr, log->size,
853 log->min_count, GFP_ATOMIC);
854 if (!object)
855 goto out;
856 spin_lock_irqsave(&object->lock, flags);
857 for (i = 0; i < log->trace_len; i++)
858 object->trace[i] = log->trace[i];
859 object->trace_len = log->trace_len;
860 spin_unlock_irqrestore(&object->lock, flags);
861 out:
862 rcu_read_unlock();
863 }
864
865 /*
866 * Log an early allocated block and populate the stack trace.
867 */
868 static void early_alloc_percpu(struct early_log *log)
869 {
870 unsigned int cpu;
871 const void __percpu *ptr = log->ptr;
872
873 for_each_possible_cpu(cpu) {
874 log->ptr = per_cpu_ptr(ptr, cpu);
875 early_alloc(log);
876 }
877 }
878
879 /**
880 * kmemleak_alloc - register a newly allocated object
881 * @ptr: pointer to beginning of the object
882 * @size: size of the object
883 * @min_count: minimum number of references to this object. If during memory
884 * scanning a number of references less than @min_count is found,
885 * the object is reported as a memory leak. If @min_count is 0,
886 * the object is never reported as a leak. If @min_count is -1,
887 * the object is ignored (not scanned and not reported as a leak)
888 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
889 *
890 * This function is called from the kernel allocators when a new object
891 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
892 */
893 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
894 gfp_t gfp)
895 {
896 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
897
898 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
899 create_object((unsigned long)ptr, size, min_count, gfp);
900 else if (kmemleak_early_log)
901 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
902 }
903 EXPORT_SYMBOL_GPL(kmemleak_alloc);
904
905 /**
906 * kmemleak_alloc_percpu - register a newly allocated __percpu object
907 * @ptr: __percpu pointer to beginning of the object
908 * @size: size of the object
909 *
910 * This function is called from the kernel percpu allocator when a new object
911 * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
912 * allocation.
913 */
914 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
915 {
916 unsigned int cpu;
917
918 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
919
920 /*
921 * Percpu allocations are only scanned and not reported as leaks
922 * (min_count is set to 0).
923 */
924 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
925 for_each_possible_cpu(cpu)
926 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
927 size, 0, GFP_KERNEL);
928 else if (kmemleak_early_log)
929 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
930 }
931 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
932
933 /**
934 * kmemleak_free - unregister a previously registered object
935 * @ptr: pointer to beginning of the object
936 *
937 * This function is called from the kernel allocators when an object (memory
938 * block) is freed (kmem_cache_free, kfree, vfree etc.).
939 */
940 void __ref kmemleak_free(const void *ptr)
941 {
942 pr_debug("%s(0x%p)\n", __func__, ptr);
943
944 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
945 delete_object_full((unsigned long)ptr);
946 else if (kmemleak_early_log)
947 log_early(KMEMLEAK_FREE, ptr, 0, 0);
948 }
949 EXPORT_SYMBOL_GPL(kmemleak_free);
950
951 /**
952 * kmemleak_free_part - partially unregister a previously registered object
953 * @ptr: pointer to the beginning or inside the object. This also
954 * represents the start of the range to be freed
955 * @size: size to be unregistered
956 *
957 * This function is called when only a part of a memory block is freed
958 * (usually from the bootmem allocator).
959 */
960 void __ref kmemleak_free_part(const void *ptr, size_t size)
961 {
962 pr_debug("%s(0x%p)\n", __func__, ptr);
963
964 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
965 delete_object_part((unsigned long)ptr, size);
966 else if (kmemleak_early_log)
967 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
968 }
969 EXPORT_SYMBOL_GPL(kmemleak_free_part);
970
971 /**
972 * kmemleak_free_percpu - unregister a previously registered __percpu object
973 * @ptr: __percpu pointer to beginning of the object
974 *
975 * This function is called from the kernel percpu allocator when an object
976 * (memory block) is freed (free_percpu).
977 */
978 void __ref kmemleak_free_percpu(const void __percpu *ptr)
979 {
980 unsigned int cpu;
981
982 pr_debug("%s(0x%p)\n", __func__, ptr);
983
984 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
985 for_each_possible_cpu(cpu)
986 delete_object_full((unsigned long)per_cpu_ptr(ptr,
987 cpu));
988 else if (kmemleak_early_log)
989 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
990 }
991 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
992
993 /**
994 * kmemleak_update_trace - update object allocation stack trace
995 * @ptr: pointer to beginning of the object
996 *
997 * Override the object allocation stack trace for cases where the actual
998 * allocation place is not always useful.
999 */
1000 void __ref kmemleak_update_trace(const void *ptr)
1001 {
1002 struct kmemleak_object *object;
1003 unsigned long flags;
1004
1005 pr_debug("%s(0x%p)\n", __func__, ptr);
1006
1007 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1008 return;
1009
1010 object = find_and_get_object((unsigned long)ptr, 1);
1011 if (!object) {
1012 #ifdef DEBUG
1013 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1014 ptr);
1015 #endif
1016 return;
1017 }
1018
1019 spin_lock_irqsave(&object->lock, flags);
1020 object->trace_len = __save_stack_trace(object->trace);
1021 spin_unlock_irqrestore(&object->lock, flags);
1022
1023 put_object(object);
1024 }
1025 EXPORT_SYMBOL(kmemleak_update_trace);
1026
1027 /**
1028 * kmemleak_not_leak - mark an allocated object as false positive
1029 * @ptr: pointer to beginning of the object
1030 *
1031 * Calling this function on an object will cause the memory block to no longer
1032 * be reported as leak and always be scanned.
1033 */
1034 void __ref kmemleak_not_leak(const void *ptr)
1035 {
1036 pr_debug("%s(0x%p)\n", __func__, ptr);
1037
1038 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1039 make_gray_object((unsigned long)ptr);
1040 else if (kmemleak_early_log)
1041 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1042 }
1043 EXPORT_SYMBOL(kmemleak_not_leak);
1044
1045 /**
1046 * kmemleak_ignore - ignore an allocated object
1047 * @ptr: pointer to beginning of the object
1048 *
1049 * Calling this function on an object will cause the memory block to be
1050 * ignored (not scanned and not reported as a leak). This is usually done when
1051 * it is known that the corresponding block is not a leak and does not contain
1052 * any references to other allocated memory blocks.
1053 */
1054 void __ref kmemleak_ignore(const void *ptr)
1055 {
1056 pr_debug("%s(0x%p)\n", __func__, ptr);
1057
1058 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1059 make_black_object((unsigned long)ptr);
1060 else if (kmemleak_early_log)
1061 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1062 }
1063 EXPORT_SYMBOL(kmemleak_ignore);
1064
1065 /**
1066 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1067 * @ptr: pointer to beginning or inside the object. This also
1068 * represents the start of the scan area
1069 * @size: size of the scan area
1070 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1071 *
1072 * This function is used when it is known that only certain parts of an object
1073 * contain references to other objects. Kmemleak will only scan these areas
1074 * reducing the number false negatives.
1075 */
1076 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1077 {
1078 pr_debug("%s(0x%p)\n", __func__, ptr);
1079
1080 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1081 add_scan_area((unsigned long)ptr, size, gfp);
1082 else if (kmemleak_early_log)
1083 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1084 }
1085 EXPORT_SYMBOL(kmemleak_scan_area);
1086
1087 /**
1088 * kmemleak_no_scan - do not scan an allocated object
1089 * @ptr: pointer to beginning of the object
1090 *
1091 * This function notifies kmemleak not to scan the given memory block. Useful
1092 * in situations where it is known that the given object does not contain any
1093 * references to other objects. Kmemleak will not scan such objects reducing
1094 * the number of false negatives.
1095 */
1096 void __ref kmemleak_no_scan(const void *ptr)
1097 {
1098 pr_debug("%s(0x%p)\n", __func__, ptr);
1099
1100 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1101 object_no_scan((unsigned long)ptr);
1102 else if (kmemleak_early_log)
1103 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1104 }
1105 EXPORT_SYMBOL(kmemleak_no_scan);
1106
1107 /*
1108 * Update an object's checksum and return true if it was modified.
1109 */
1110 static bool update_checksum(struct kmemleak_object *object)
1111 {
1112 u32 old_csum = object->checksum;
1113
1114 if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1115 return false;
1116
1117 kasan_disable_current();
1118 object->checksum = crc32(0, (void *)object->pointer, object->size);
1119 kasan_enable_current();
1120
1121 return object->checksum != old_csum;
1122 }
1123
1124 /*
1125 * Memory scanning is a long process and it needs to be interruptable. This
1126 * function checks whether such interrupt condition occurred.
1127 */
1128 static int scan_should_stop(void)
1129 {
1130 if (!kmemleak_enabled)
1131 return 1;
1132
1133 /*
1134 * This function may be called from either process or kthread context,
1135 * hence the need to check for both stop conditions.
1136 */
1137 if (current->mm)
1138 return signal_pending(current);
1139 else
1140 return kthread_should_stop();
1141
1142 return 0;
1143 }
1144
1145 /*
1146 * Scan a memory block (exclusive range) for valid pointers and add those
1147 * found to the gray list.
1148 */
1149 static void scan_block(void *_start, void *_end,
1150 struct kmemleak_object *scanned, int allow_resched)
1151 {
1152 unsigned long *ptr;
1153 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1154 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1155
1156 for (ptr = start; ptr < end; ptr++) {
1157 struct kmemleak_object *object;
1158 unsigned long flags;
1159 unsigned long pointer;
1160
1161 if (allow_resched)
1162 cond_resched();
1163 if (scan_should_stop())
1164 break;
1165
1166 /* don't scan uninitialized memory */
1167 if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1168 BYTES_PER_POINTER))
1169 continue;
1170
1171 kasan_disable_current();
1172 pointer = *ptr;
1173 kasan_enable_current();
1174
1175 object = find_and_get_object(pointer, 1);
1176 if (!object)
1177 continue;
1178 if (object == scanned) {
1179 /* self referenced, ignore */
1180 put_object(object);
1181 continue;
1182 }
1183
1184 /*
1185 * Avoid the lockdep recursive warning on object->lock being
1186 * previously acquired in scan_object(). These locks are
1187 * enclosed by scan_mutex.
1188 */
1189 spin_lock_irqsave_nested(&object->lock, flags,
1190 SINGLE_DEPTH_NESTING);
1191 if (!color_white(object)) {
1192 /* non-orphan, ignored or new */
1193 spin_unlock_irqrestore(&object->lock, flags);
1194 put_object(object);
1195 continue;
1196 }
1197
1198 /*
1199 * Increase the object's reference count (number of pointers
1200 * to the memory block). If this count reaches the required
1201 * minimum, the object's color will become gray and it will be
1202 * added to the gray_list.
1203 */
1204 object->count++;
1205 if (color_gray(object)) {
1206 list_add_tail(&object->gray_list, &gray_list);
1207 spin_unlock_irqrestore(&object->lock, flags);
1208 continue;
1209 }
1210
1211 spin_unlock_irqrestore(&object->lock, flags);
1212 put_object(object);
1213 }
1214 }
1215
1216 /*
1217 * Scan a memory block corresponding to a kmemleak_object. A condition is
1218 * that object->use_count >= 1.
1219 */
1220 static void scan_object(struct kmemleak_object *object)
1221 {
1222 struct kmemleak_scan_area *area;
1223 unsigned long flags;
1224
1225 /*
1226 * Once the object->lock is acquired, the corresponding memory block
1227 * cannot be freed (the same lock is acquired in delete_object).
1228 */
1229 spin_lock_irqsave(&object->lock, flags);
1230 if (object->flags & OBJECT_NO_SCAN)
1231 goto out;
1232 if (!(object->flags & OBJECT_ALLOCATED))
1233 /* already freed object */
1234 goto out;
1235 if (hlist_empty(&object->area_list)) {
1236 void *start = (void *)object->pointer;
1237 void *end = (void *)(object->pointer + object->size);
1238
1239 while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1240 !(object->flags & OBJECT_NO_SCAN)) {
1241 scan_block(start, min(start + MAX_SCAN_SIZE, end),
1242 object, 0);
1243 start += MAX_SCAN_SIZE;
1244
1245 spin_unlock_irqrestore(&object->lock, flags);
1246 cond_resched();
1247 spin_lock_irqsave(&object->lock, flags);
1248 }
1249 } else
1250 hlist_for_each_entry(area, &object->area_list, node)
1251 scan_block((void *)area->start,
1252 (void *)(area->start + area->size),
1253 object, 0);
1254 out:
1255 spin_unlock_irqrestore(&object->lock, flags);
1256 }
1257
1258 /*
1259 * Scan the objects already referenced (gray objects). More objects will be
1260 * referenced and, if there are no memory leaks, all the objects are scanned.
1261 */
1262 static void scan_gray_list(void)
1263 {
1264 struct kmemleak_object *object, *tmp;
1265
1266 /*
1267 * The list traversal is safe for both tail additions and removals
1268 * from inside the loop. The kmemleak objects cannot be freed from
1269 * outside the loop because their use_count was incremented.
1270 */
1271 object = list_entry(gray_list.next, typeof(*object), gray_list);
1272 while (&object->gray_list != &gray_list) {
1273 cond_resched();
1274
1275 /* may add new objects to the list */
1276 if (!scan_should_stop())
1277 scan_object(object);
1278
1279 tmp = list_entry(object->gray_list.next, typeof(*object),
1280 gray_list);
1281
1282 /* remove the object from the list and release it */
1283 list_del(&object->gray_list);
1284 put_object(object);
1285
1286 object = tmp;
1287 }
1288 WARN_ON(!list_empty(&gray_list));
1289 }
1290
1291 /*
1292 * Scan data sections and all the referenced memory blocks allocated via the
1293 * kernel's standard allocators. This function must be called with the
1294 * scan_mutex held.
1295 */
1296 static void kmemleak_scan(void)
1297 {
1298 unsigned long flags;
1299 struct kmemleak_object *object;
1300 int i;
1301 int new_leaks = 0;
1302
1303 jiffies_last_scan = jiffies;
1304
1305 /* prepare the kmemleak_object's */
1306 rcu_read_lock();
1307 list_for_each_entry_rcu(object, &object_list, object_list) {
1308 spin_lock_irqsave(&object->lock, flags);
1309 #ifdef DEBUG
1310 /*
1311 * With a few exceptions there should be a maximum of
1312 * 1 reference to any object at this point.
1313 */
1314 if (atomic_read(&object->use_count) > 1) {
1315 pr_debug("object->use_count = %d\n",
1316 atomic_read(&object->use_count));
1317 dump_object_info(object);
1318 }
1319 #endif
1320 /* reset the reference count (whiten the object) */
1321 object->count = 0;
1322 if (color_gray(object) && get_object(object))
1323 list_add_tail(&object->gray_list, &gray_list);
1324
1325 spin_unlock_irqrestore(&object->lock, flags);
1326 }
1327 rcu_read_unlock();
1328
1329 /* data/bss scanning */
1330 scan_block(_sdata, _edata, NULL, 1);
1331 scan_block(__bss_start, __bss_stop, NULL, 1);
1332
1333 #ifdef CONFIG_SMP
1334 /* per-cpu sections scanning */
1335 for_each_possible_cpu(i)
1336 scan_block(__per_cpu_start + per_cpu_offset(i),
1337 __per_cpu_end + per_cpu_offset(i), NULL, 1);
1338 #endif
1339
1340 /*
1341 * Struct page scanning for each node.
1342 */
1343 get_online_mems();
1344 for_each_online_node(i) {
1345 unsigned long start_pfn = node_start_pfn(i);
1346 unsigned long end_pfn = node_end_pfn(i);
1347 unsigned long pfn;
1348
1349 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1350 struct page *page;
1351
1352 if (!pfn_valid(pfn))
1353 continue;
1354 page = pfn_to_page(pfn);
1355 /* only scan if page is in use */
1356 if (page_count(page) == 0)
1357 continue;
1358 scan_block(page, page + 1, NULL, 1);
1359 }
1360 }
1361 put_online_mems();
1362
1363 /*
1364 * Scanning the task stacks (may introduce false negatives).
1365 */
1366 if (kmemleak_stack_scan) {
1367 struct task_struct *p, *g;
1368
1369 read_lock(&tasklist_lock);
1370 do_each_thread(g, p) {
1371 scan_block(task_stack_page(p), task_stack_page(p) +
1372 THREAD_SIZE, NULL, 0);
1373 } while_each_thread(g, p);
1374 read_unlock(&tasklist_lock);
1375 }
1376
1377 /*
1378 * Scan the objects already referenced from the sections scanned
1379 * above.
1380 */
1381 scan_gray_list();
1382
1383 /*
1384 * Check for new or unreferenced objects modified since the previous
1385 * scan and color them gray until the next scan.
1386 */
1387 rcu_read_lock();
1388 list_for_each_entry_rcu(object, &object_list, object_list) {
1389 spin_lock_irqsave(&object->lock, flags);
1390 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1391 && update_checksum(object) && get_object(object)) {
1392 /* color it gray temporarily */
1393 object->count = object->min_count;
1394 list_add_tail(&object->gray_list, &gray_list);
1395 }
1396 spin_unlock_irqrestore(&object->lock, flags);
1397 }
1398 rcu_read_unlock();
1399
1400 /*
1401 * Re-scan the gray list for modified unreferenced objects.
1402 */
1403 scan_gray_list();
1404
1405 /*
1406 * If scanning was stopped do not report any new unreferenced objects.
1407 */
1408 if (scan_should_stop())
1409 return;
1410
1411 /*
1412 * Scanning result reporting.
1413 */
1414 rcu_read_lock();
1415 list_for_each_entry_rcu(object, &object_list, object_list) {
1416 spin_lock_irqsave(&object->lock, flags);
1417 if (unreferenced_object(object) &&
1418 !(object->flags & OBJECT_REPORTED)) {
1419 object->flags |= OBJECT_REPORTED;
1420 new_leaks++;
1421 }
1422 spin_unlock_irqrestore(&object->lock, flags);
1423 }
1424 rcu_read_unlock();
1425
1426 if (new_leaks) {
1427 kmemleak_found_leaks = true;
1428
1429 pr_info("%d new suspected memory leaks (see "
1430 "/sys/kernel/debug/kmemleak)\n", new_leaks);
1431 }
1432
1433 }
1434
1435 /*
1436 * Thread function performing automatic memory scanning. Unreferenced objects
1437 * at the end of a memory scan are reported but only the first time.
1438 */
1439 static int kmemleak_scan_thread(void *arg)
1440 {
1441 static int first_run = 1;
1442
1443 pr_info("Automatic memory scanning thread started\n");
1444 set_user_nice(current, 10);
1445
1446 /*
1447 * Wait before the first scan to allow the system to fully initialize.
1448 */
1449 if (first_run) {
1450 first_run = 0;
1451 ssleep(SECS_FIRST_SCAN);
1452 }
1453
1454 while (!kthread_should_stop()) {
1455 signed long timeout = jiffies_scan_wait;
1456
1457 mutex_lock(&scan_mutex);
1458 kmemleak_scan();
1459 mutex_unlock(&scan_mutex);
1460
1461 /* wait before the next scan */
1462 while (timeout && !kthread_should_stop())
1463 timeout = schedule_timeout_interruptible(timeout);
1464 }
1465
1466 pr_info("Automatic memory scanning thread ended\n");
1467
1468 return 0;
1469 }
1470
1471 /*
1472 * Start the automatic memory scanning thread. This function must be called
1473 * with the scan_mutex held.
1474 */
1475 static void start_scan_thread(void)
1476 {
1477 if (scan_thread)
1478 return;
1479 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1480 if (IS_ERR(scan_thread)) {
1481 pr_warning("Failed to create the scan thread\n");
1482 scan_thread = NULL;
1483 }
1484 }
1485
1486 /*
1487 * Stop the automatic memory scanning thread. This function must be called
1488 * with the scan_mutex held.
1489 */
1490 static void stop_scan_thread(void)
1491 {
1492 if (scan_thread) {
1493 kthread_stop(scan_thread);
1494 scan_thread = NULL;
1495 }
1496 }
1497
1498 /*
1499 * Iterate over the object_list and return the first valid object at or after
1500 * the required position with its use_count incremented. The function triggers
1501 * a memory scanning when the pos argument points to the first position.
1502 */
1503 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1504 {
1505 struct kmemleak_object *object;
1506 loff_t n = *pos;
1507 int err;
1508
1509 err = mutex_lock_interruptible(&scan_mutex);
1510 if (err < 0)
1511 return ERR_PTR(err);
1512
1513 rcu_read_lock();
1514 list_for_each_entry_rcu(object, &object_list, object_list) {
1515 if (n-- > 0)
1516 continue;
1517 if (get_object(object))
1518 goto out;
1519 }
1520 object = NULL;
1521 out:
1522 return object;
1523 }
1524
1525 /*
1526 * Return the next object in the object_list. The function decrements the
1527 * use_count of the previous object and increases that of the next one.
1528 */
1529 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1530 {
1531 struct kmemleak_object *prev_obj = v;
1532 struct kmemleak_object *next_obj = NULL;
1533 struct kmemleak_object *obj = prev_obj;
1534
1535 ++(*pos);
1536
1537 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1538 if (get_object(obj)) {
1539 next_obj = obj;
1540 break;
1541 }
1542 }
1543
1544 put_object(prev_obj);
1545 return next_obj;
1546 }
1547
1548 /*
1549 * Decrement the use_count of the last object required, if any.
1550 */
1551 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1552 {
1553 if (!IS_ERR(v)) {
1554 /*
1555 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1556 * waiting was interrupted, so only release it if !IS_ERR.
1557 */
1558 rcu_read_unlock();
1559 mutex_unlock(&scan_mutex);
1560 if (v)
1561 put_object(v);
1562 }
1563 }
1564
1565 /*
1566 * Print the information for an unreferenced object to the seq file.
1567 */
1568 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1569 {
1570 struct kmemleak_object *object = v;
1571 unsigned long flags;
1572
1573 spin_lock_irqsave(&object->lock, flags);
1574 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1575 print_unreferenced(seq, object);
1576 spin_unlock_irqrestore(&object->lock, flags);
1577 return 0;
1578 }
1579
1580 static const struct seq_operations kmemleak_seq_ops = {
1581 .start = kmemleak_seq_start,
1582 .next = kmemleak_seq_next,
1583 .stop = kmemleak_seq_stop,
1584 .show = kmemleak_seq_show,
1585 };
1586
1587 static int kmemleak_open(struct inode *inode, struct file *file)
1588 {
1589 return seq_open(file, &kmemleak_seq_ops);
1590 }
1591
1592 static int dump_str_object_info(const char *str)
1593 {
1594 unsigned long flags;
1595 struct kmemleak_object *object;
1596 unsigned long addr;
1597
1598 if (kstrtoul(str, 0, &addr))
1599 return -EINVAL;
1600 object = find_and_get_object(addr, 0);
1601 if (!object) {
1602 pr_info("Unknown object at 0x%08lx\n", addr);
1603 return -EINVAL;
1604 }
1605
1606 spin_lock_irqsave(&object->lock, flags);
1607 dump_object_info(object);
1608 spin_unlock_irqrestore(&object->lock, flags);
1609
1610 put_object(object);
1611 return 0;
1612 }
1613
1614 /*
1615 * We use grey instead of black to ensure we can do future scans on the same
1616 * objects. If we did not do future scans these black objects could
1617 * potentially contain references to newly allocated objects in the future and
1618 * we'd end up with false positives.
1619 */
1620 static void kmemleak_clear(void)
1621 {
1622 struct kmemleak_object *object;
1623 unsigned long flags;
1624
1625 rcu_read_lock();
1626 list_for_each_entry_rcu(object, &object_list, object_list) {
1627 spin_lock_irqsave(&object->lock, flags);
1628 if ((object->flags & OBJECT_REPORTED) &&
1629 unreferenced_object(object))
1630 __paint_it(object, KMEMLEAK_GREY);
1631 spin_unlock_irqrestore(&object->lock, flags);
1632 }
1633 rcu_read_unlock();
1634
1635 kmemleak_found_leaks = false;
1636 }
1637
1638 static void __kmemleak_do_cleanup(void);
1639
1640 /*
1641 * File write operation to configure kmemleak at run-time. The following
1642 * commands can be written to the /sys/kernel/debug/kmemleak file:
1643 * off - disable kmemleak (irreversible)
1644 * stack=on - enable the task stacks scanning
1645 * stack=off - disable the tasks stacks scanning
1646 * scan=on - start the automatic memory scanning thread
1647 * scan=off - stop the automatic memory scanning thread
1648 * scan=... - set the automatic memory scanning period in seconds (0 to
1649 * disable it)
1650 * scan - trigger a memory scan
1651 * clear - mark all current reported unreferenced kmemleak objects as
1652 * grey to ignore printing them, or free all kmemleak objects
1653 * if kmemleak has been disabled.
1654 * dump=... - dump information about the object found at the given address
1655 */
1656 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1657 size_t size, loff_t *ppos)
1658 {
1659 char buf[64];
1660 int buf_size;
1661 int ret;
1662
1663 buf_size = min(size, (sizeof(buf) - 1));
1664 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1665 return -EFAULT;
1666 buf[buf_size] = 0;
1667
1668 ret = mutex_lock_interruptible(&scan_mutex);
1669 if (ret < 0)
1670 return ret;
1671
1672 if (strncmp(buf, "clear", 5) == 0) {
1673 if (kmemleak_enabled)
1674 kmemleak_clear();
1675 else
1676 __kmemleak_do_cleanup();
1677 goto out;
1678 }
1679
1680 if (!kmemleak_enabled) {
1681 ret = -EBUSY;
1682 goto out;
1683 }
1684
1685 if (strncmp(buf, "off", 3) == 0)
1686 kmemleak_disable();
1687 else if (strncmp(buf, "stack=on", 8) == 0)
1688 kmemleak_stack_scan = 1;
1689 else if (strncmp(buf, "stack=off", 9) == 0)
1690 kmemleak_stack_scan = 0;
1691 else if (strncmp(buf, "scan=on", 7) == 0)
1692 start_scan_thread();
1693 else if (strncmp(buf, "scan=off", 8) == 0)
1694 stop_scan_thread();
1695 else if (strncmp(buf, "scan=", 5) == 0) {
1696 unsigned long secs;
1697
1698 ret = kstrtoul(buf + 5, 0, &secs);
1699 if (ret < 0)
1700 goto out;
1701 stop_scan_thread();
1702 if (secs) {
1703 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1704 start_scan_thread();
1705 }
1706 } else if (strncmp(buf, "scan", 4) == 0)
1707 kmemleak_scan();
1708 else if (strncmp(buf, "dump=", 5) == 0)
1709 ret = dump_str_object_info(buf + 5);
1710 else
1711 ret = -EINVAL;
1712
1713 out:
1714 mutex_unlock(&scan_mutex);
1715 if (ret < 0)
1716 return ret;
1717
1718 /* ignore the rest of the buffer, only one command at a time */
1719 *ppos += size;
1720 return size;
1721 }
1722
1723 static const struct file_operations kmemleak_fops = {
1724 .owner = THIS_MODULE,
1725 .open = kmemleak_open,
1726 .read = seq_read,
1727 .write = kmemleak_write,
1728 .llseek = seq_lseek,
1729 .release = seq_release,
1730 };
1731
1732 static void __kmemleak_do_cleanup(void)
1733 {
1734 struct kmemleak_object *object;
1735
1736 rcu_read_lock();
1737 list_for_each_entry_rcu(object, &object_list, object_list)
1738 delete_object_full(object->pointer);
1739 rcu_read_unlock();
1740 }
1741
1742 /*
1743 * Stop the memory scanning thread and free the kmemleak internal objects if
1744 * no previous scan thread (otherwise, kmemleak may still have some useful
1745 * information on memory leaks).
1746 */
1747 static void kmemleak_do_cleanup(struct work_struct *work)
1748 {
1749 mutex_lock(&scan_mutex);
1750 stop_scan_thread();
1751
1752 if (!kmemleak_found_leaks)
1753 __kmemleak_do_cleanup();
1754 else
1755 pr_info("Kmemleak disabled without freeing internal data. "
1756 "Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\"\n");
1757 mutex_unlock(&scan_mutex);
1758 }
1759
1760 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1761
1762 /*
1763 * Disable kmemleak. No memory allocation/freeing will be traced once this
1764 * function is called. Disabling kmemleak is an irreversible operation.
1765 */
1766 static void kmemleak_disable(void)
1767 {
1768 /* atomically check whether it was already invoked */
1769 if (cmpxchg(&kmemleak_error, 0, 1))
1770 return;
1771
1772 /* stop any memory operation tracing */
1773 kmemleak_enabled = 0;
1774
1775 /* check whether it is too early for a kernel thread */
1776 if (kmemleak_initialized)
1777 schedule_work(&cleanup_work);
1778
1779 pr_info("Kernel memory leak detector disabled\n");
1780 }
1781
1782 /*
1783 * Allow boot-time kmemleak disabling (enabled by default).
1784 */
1785 static int kmemleak_boot_config(char *str)
1786 {
1787 if (!str)
1788 return -EINVAL;
1789 if (strcmp(str, "off") == 0)
1790 kmemleak_disable();
1791 else if (strcmp(str, "on") == 0)
1792 kmemleak_skip_disable = 1;
1793 else
1794 return -EINVAL;
1795 return 0;
1796 }
1797 early_param("kmemleak", kmemleak_boot_config);
1798
1799 static void __init print_log_trace(struct early_log *log)
1800 {
1801 struct stack_trace trace;
1802
1803 trace.nr_entries = log->trace_len;
1804 trace.entries = log->trace;
1805
1806 pr_notice("Early log backtrace:\n");
1807 print_stack_trace(&trace, 2);
1808 }
1809
1810 /*
1811 * Kmemleak initialization.
1812 */
1813 void __init kmemleak_init(void)
1814 {
1815 int i;
1816 unsigned long flags;
1817
1818 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1819 if (!kmemleak_skip_disable) {
1820 kmemleak_early_log = 0;
1821 kmemleak_disable();
1822 return;
1823 }
1824 #endif
1825
1826 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1827 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1828
1829 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1830 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1831
1832 if (crt_early_log >= ARRAY_SIZE(early_log))
1833 pr_warning("Early log buffer exceeded (%d), please increase "
1834 "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
1835
1836 /* the kernel is still in UP mode, so disabling the IRQs is enough */
1837 local_irq_save(flags);
1838 kmemleak_early_log = 0;
1839 if (kmemleak_error) {
1840 local_irq_restore(flags);
1841 return;
1842 } else
1843 kmemleak_enabled = 1;
1844 local_irq_restore(flags);
1845
1846 /*
1847 * This is the point where tracking allocations is safe. Automatic
1848 * scanning is started during the late initcall. Add the early logged
1849 * callbacks to the kmemleak infrastructure.
1850 */
1851 for (i = 0; i < crt_early_log; i++) {
1852 struct early_log *log = &early_log[i];
1853
1854 switch (log->op_type) {
1855 case KMEMLEAK_ALLOC:
1856 early_alloc(log);
1857 break;
1858 case KMEMLEAK_ALLOC_PERCPU:
1859 early_alloc_percpu(log);
1860 break;
1861 case KMEMLEAK_FREE:
1862 kmemleak_free(log->ptr);
1863 break;
1864 case KMEMLEAK_FREE_PART:
1865 kmemleak_free_part(log->ptr, log->size);
1866 break;
1867 case KMEMLEAK_FREE_PERCPU:
1868 kmemleak_free_percpu(log->ptr);
1869 break;
1870 case KMEMLEAK_NOT_LEAK:
1871 kmemleak_not_leak(log->ptr);
1872 break;
1873 case KMEMLEAK_IGNORE:
1874 kmemleak_ignore(log->ptr);
1875 break;
1876 case KMEMLEAK_SCAN_AREA:
1877 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1878 break;
1879 case KMEMLEAK_NO_SCAN:
1880 kmemleak_no_scan(log->ptr);
1881 break;
1882 default:
1883 kmemleak_warn("Unknown early log operation: %d\n",
1884 log->op_type);
1885 }
1886
1887 if (kmemleak_warning) {
1888 print_log_trace(log);
1889 kmemleak_warning = 0;
1890 }
1891 }
1892 }
1893
1894 /*
1895 * Late initialization function.
1896 */
1897 static int __init kmemleak_late_init(void)
1898 {
1899 struct dentry *dentry;
1900
1901 kmemleak_initialized = 1;
1902
1903 if (kmemleak_error) {
1904 /*
1905 * Some error occurred and kmemleak was disabled. There is a
1906 * small chance that kmemleak_disable() was called immediately
1907 * after setting kmemleak_initialized and we may end up with
1908 * two clean-up threads but serialized by scan_mutex.
1909 */
1910 schedule_work(&cleanup_work);
1911 return -ENOMEM;
1912 }
1913
1914 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1915 &kmemleak_fops);
1916 if (!dentry)
1917 pr_warning("Failed to create the debugfs kmemleak file\n");
1918 mutex_lock(&scan_mutex);
1919 start_scan_thread();
1920 mutex_unlock(&scan_mutex);
1921
1922 pr_info("Kernel memory leak detector initialized\n");
1923
1924 return 0;
1925 }
1926 late_initcall(kmemleak_late_init);