e7346d3873b34732d193a9fed6509645d8e11963
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / mm / kmemcheck / shadow.c
1 #include <linux/kmemcheck.h>
2 #include <linux/module.h>
3 #include <linux/mm.h>
4 #include <linux/module.h>
5
6 #include <asm/page.h>
7 #include <asm/pgtable.h>
8
9 #include "pte.h"
10 #include "shadow.h"
11
12 /*
13 * Return the shadow address for the given address. Returns NULL if the
14 * address is not tracked.
15 *
16 * We need to be extremely careful not to follow any invalid pointers,
17 * because this function can be called for *any* possible address.
18 */
19 void *kmemcheck_shadow_lookup(unsigned long address)
20 {
21 pte_t *pte;
22 struct page *page;
23
24 if (!virt_addr_valid(address))
25 return NULL;
26
27 pte = kmemcheck_pte_lookup(address);
28 if (!pte)
29 return NULL;
30
31 page = virt_to_page(address);
32 if (!page->shadow)
33 return NULL;
34 return page->shadow + (address & (PAGE_SIZE - 1));
35 }
36
37 static void mark_shadow(void *address, unsigned int n,
38 enum kmemcheck_shadow status)
39 {
40 unsigned long addr = (unsigned long) address;
41 unsigned long last_addr = addr + n - 1;
42 unsigned long page = addr & PAGE_MASK;
43 unsigned long last_page = last_addr & PAGE_MASK;
44 unsigned int first_n;
45 void *shadow;
46
47 /* If the memory range crosses a page boundary, stop there. */
48 if (page == last_page)
49 first_n = n;
50 else
51 first_n = page + PAGE_SIZE - addr;
52
53 shadow = kmemcheck_shadow_lookup(addr);
54 if (shadow)
55 memset(shadow, status, first_n);
56
57 addr += first_n;
58 n -= first_n;
59
60 /* Do full-page memset()s. */
61 while (n >= PAGE_SIZE) {
62 shadow = kmemcheck_shadow_lookup(addr);
63 if (shadow)
64 memset(shadow, status, PAGE_SIZE);
65
66 addr += PAGE_SIZE;
67 n -= PAGE_SIZE;
68 }
69
70 /* Do the remaining page, if any. */
71 if (n > 0) {
72 shadow = kmemcheck_shadow_lookup(addr);
73 if (shadow)
74 memset(shadow, status, n);
75 }
76 }
77
78 void kmemcheck_mark_unallocated(void *address, unsigned int n)
79 {
80 mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED);
81 }
82
83 void kmemcheck_mark_uninitialized(void *address, unsigned int n)
84 {
85 mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED);
86 }
87
88 /*
89 * Fill the shadow memory of the given address such that the memory at that
90 * address is marked as being initialized.
91 */
92 void kmemcheck_mark_initialized(void *address, unsigned int n)
93 {
94 mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED);
95 }
96 EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized);
97
98 void kmemcheck_mark_freed(void *address, unsigned int n)
99 {
100 mark_shadow(address, n, KMEMCHECK_SHADOW_FREED);
101 }
102
103 void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n)
104 {
105 unsigned int i;
106
107 for (i = 0; i < n; ++i)
108 kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE);
109 }
110
111 void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n)
112 {
113 unsigned int i;
114
115 for (i = 0; i < n; ++i)
116 kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE);
117 }
118
119 enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
120 {
121 uint8_t *x;
122 unsigned int i;
123
124 x = shadow;
125
126 #ifdef CONFIG_KMEMCHECK_PARTIAL_OK
127 /*
128 * Make sure _some_ bytes are initialized. Gcc frequently generates
129 * code to access neighboring bytes.
130 */
131 for (i = 0; i < size; ++i) {
132 if (x[i] == KMEMCHECK_SHADOW_INITIALIZED)
133 return x[i];
134 }
135 #else
136 /* All bytes must be initialized. */
137 for (i = 0; i < size; ++i) {
138 if (x[i] != KMEMCHECK_SHADOW_INITIALIZED)
139 return x[i];
140 }
141 #endif
142
143 return x[0];
144 }
145
146 void kmemcheck_shadow_set(void *shadow, unsigned int size)
147 {
148 uint8_t *x;
149 unsigned int i;
150
151 x = shadow;
152 for (i = 0; i < size; ++i)
153 x[i] = KMEMCHECK_SHADOW_INITIALIZED;
154 }