kmemcheck: add the kmemcheck core
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / mm / kmemcheck / shadow.c
1 #include <linux/kmemcheck.h>
2 #include <linux/module.h>
3 #include <linux/mm.h>
4
5 #include <asm/page.h>
6 #include <asm/pgtable.h>
7
8 #include "pte.h"
9 #include "shadow.h"
10
11 /*
12 * Return the shadow address for the given address. Returns NULL if the
13 * address is not tracked.
14 *
15 * We need to be extremely careful not to follow any invalid pointers,
16 * because this function can be called for *any* possible address.
17 */
18 void *kmemcheck_shadow_lookup(unsigned long address)
19 {
20 pte_t *pte;
21 struct page *page;
22
23 if (!virt_addr_valid(address))
24 return NULL;
25
26 pte = kmemcheck_pte_lookup(address);
27 if (!pte)
28 return NULL;
29
30 page = virt_to_page(address);
31 if (!page->shadow)
32 return NULL;
33 return page->shadow + (address & (PAGE_SIZE - 1));
34 }
35
36 static void mark_shadow(void *address, unsigned int n,
37 enum kmemcheck_shadow status)
38 {
39 unsigned long addr = (unsigned long) address;
40 unsigned long last_addr = addr + n - 1;
41 unsigned long page = addr & PAGE_MASK;
42 unsigned long last_page = last_addr & PAGE_MASK;
43 unsigned int first_n;
44 void *shadow;
45
46 /* If the memory range crosses a page boundary, stop there. */
47 if (page == last_page)
48 first_n = n;
49 else
50 first_n = page + PAGE_SIZE - addr;
51
52 shadow = kmemcheck_shadow_lookup(addr);
53 if (shadow)
54 memset(shadow, status, first_n);
55
56 addr += first_n;
57 n -= first_n;
58
59 /* Do full-page memset()s. */
60 while (n >= PAGE_SIZE) {
61 shadow = kmemcheck_shadow_lookup(addr);
62 if (shadow)
63 memset(shadow, status, PAGE_SIZE);
64
65 addr += PAGE_SIZE;
66 n -= PAGE_SIZE;
67 }
68
69 /* Do the remaining page, if any. */
70 if (n > 0) {
71 shadow = kmemcheck_shadow_lookup(addr);
72 if (shadow)
73 memset(shadow, status, n);
74 }
75 }
76
77 void kmemcheck_mark_unallocated(void *address, unsigned int n)
78 {
79 mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED);
80 }
81
82 void kmemcheck_mark_uninitialized(void *address, unsigned int n)
83 {
84 mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED);
85 }
86
87 /*
88 * Fill the shadow memory of the given address such that the memory at that
89 * address is marked as being initialized.
90 */
91 void kmemcheck_mark_initialized(void *address, unsigned int n)
92 {
93 mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED);
94 }
95 EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized);
96
97 void kmemcheck_mark_freed(void *address, unsigned int n)
98 {
99 mark_shadow(address, n, KMEMCHECK_SHADOW_FREED);
100 }
101
102 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n)
103 {
104 unsigned int i;
105
106 for (i = 0; i < n; ++i)
107 kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE);
108 }
109
110 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n)
111 {
112 unsigned int i;
113
114 for (i = 0; i < n; ++i)
115 kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE);
116 }
117
118 enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
119 {
120 uint8_t *x;
121 unsigned int i;
122
123 x = shadow;
124
125 #ifdef CONFIG_KMEMCHECK_PARTIAL_OK
126 /*
127 * Make sure _some_ bytes are initialized. Gcc frequently generates
128 * code to access neighboring bytes.
129 */
130 for (i = 0; i < size; ++i) {
131 if (x[i] == KMEMCHECK_SHADOW_INITIALIZED)
132 return x[i];
133 }
134 #else
135 /* All bytes must be initialized. */
136 for (i = 0; i < size; ++i) {
137 if (x[i] != KMEMCHECK_SHADOW_INITIALIZED)
138 return x[i];
139 }
140 #endif
141
142 return x[0];
143 }
144
145 void kmemcheck_shadow_set(void *shadow, unsigned int size)
146 {
147 uint8_t *x;
148 unsigned int i;
149
150 x = shadow;
151 for (i = 0; i < size; ++i)
152 x[i] = KMEMCHECK_SHADOW_INITIALIZED;
153 }