include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / kmemcheck.c
CommitLineData
b1eeab67 1#include <linux/gfp.h>
2dff4405
VN
2#include <linux/mm_types.h>
3#include <linux/mm.h>
2dff4405
VN
4#include <linux/kmemcheck.h>
5
b1eeab67 6void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
2dff4405
VN
7{
8 struct page *shadow;
9 int pages;
10 int i;
11
12 pages = 1 << order;
13
14 /*
15 * With kmemcheck enabled, we need to allocate a memory area for the
16 * shadow bits as well.
17 */
b1eeab67 18 shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
2dff4405
VN
19 if (!shadow) {
20 if (printk_ratelimit())
21 printk(KERN_ERR "kmemcheck: failed to allocate "
22 "shadow bitmap\n");
23 return;
24 }
25
26 for(i = 0; i < pages; ++i)
27 page[i].shadow = page_address(&shadow[i]);
28
29 /*
30 * Mark it as non-present for the MMU so that our accesses to
31 * this memory will trigger a page fault and let us analyze
32 * the memory accesses.
33 */
34 kmemcheck_hide_pages(page, pages);
2dff4405
VN
35}
36
b1eeab67 37void kmemcheck_free_shadow(struct page *page, int order)
2dff4405
VN
38{
39 struct page *shadow;
40 int pages;
41 int i;
42
b1eeab67
VN
43 if (!kmemcheck_page_is_tracked(page))
44 return;
45
2dff4405
VN
46 pages = 1 << order;
47
48 kmemcheck_show_pages(page, pages);
49
50 shadow = virt_to_page(page[0].shadow);
51
52 for(i = 0; i < pages; ++i)
53 page[i].shadow = NULL;
54
55 __free_pages(shadow, order);
56}
57
58void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
59 size_t size)
60{
61 /*
62 * Has already been memset(), which initializes the shadow for us
63 * as well.
64 */
65 if (gfpflags & __GFP_ZERO)
66 return;
67
68 /* No need to initialize the shadow of a non-tracked slab. */
69 if (s->flags & SLAB_NOTRACK)
70 return;
71
72 if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
73 /*
74 * Allow notracked objects to be allocated from
75 * tracked caches. Note however that these objects
76 * will still get page faults on access, they just
77 * won't ever be flagged as uninitialized. If page
78 * faults are not acceptable, the slab cache itself
79 * should be marked NOTRACK.
80 */
81 kmemcheck_mark_initialized(object, size);
82 } else if (!s->ctor) {
83 /*
84 * New objects should be marked uninitialized before
85 * they're returned to the called.
86 */
87 kmemcheck_mark_uninitialized(object, size);
88 }
89}
90
91void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
92{
93 /* TODO: RCU freeing is unsupported for now; hide false positives. */
94 if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
95 kmemcheck_mark_freed(object, size);
96}
b1eeab67
VN
97
98void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
99 gfp_t gfpflags)
100{
101 int pages;
102
103 if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
104 return;
105
106 pages = 1 << order;
107
108 /*
109 * NOTE: We choose to track GFP_ZERO pages too; in fact, they
110 * can become uninitialized by copying uninitialized memory
111 * into them.
112 */
113
114 /* XXX: Can use zone->node for node? */
115 kmemcheck_alloc_shadow(page, order, gfpflags, -1);
116
117 if (gfpflags & __GFP_ZERO)
118 kmemcheck_mark_initialized_pages(page, pages);
119 else
120 kmemcheck_mark_uninitialized_pages(page, pages);
121}