inet: switch IP ID generator to siphash
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / kernel / cfi.c
CommitLineData
d590fd12
ST
1/*
2 * CFI (Control Flow Integrity) error and slowpath handling
3 *
4 * Copyright (C) 2017 Google, Inc.
5 */
6
7#include <linux/gfp.h>
8#include <linux/module.h>
9#include <linux/printk.h>
10#include <linux/ratelimit.h>
11#include <linux/rcupdate.h>
12#include <linux/spinlock.h>
13#include <asm/bug.h>
14#include <asm/cacheflush.h>
15#include <asm/memory.h>
16#include <asm/set_memory.h>
17
18/* Compiler-defined handler names */
19#ifdef CONFIG_CFI_PERMISSIVE
20#define cfi_failure_handler __ubsan_handle_cfi_check_fail
21#define cfi_slowpath_handler __cfi_slowpath_diag
22#else /* enforcing */
23#define cfi_failure_handler __ubsan_handle_cfi_check_fail_abort
24#define cfi_slowpath_handler __cfi_slowpath
25#endif /* CONFIG_CFI_PERMISSIVE */
26
83a9a284 27static inline void handle_cfi_failure(void *ptr)
d590fd12
ST
28{
29#ifdef CONFIG_CFI_PERMISSIVE
83a9a284 30 WARN_RATELIMIT(1, "CFI failure (target: [<%px>] %pF):\n", ptr, ptr);
d590fd12 31#else
83a9a284 32 pr_err("CFI failure (target: [<%px>] %pF):\n", ptr, ptr);
d590fd12
ST
33 BUG();
34#endif
35}
36
37#ifdef CONFIG_MODULES
38#ifdef CONFIG_CFI_CLANG_SHADOW
39struct shadow_range {
40 /* Module address range */
41 unsigned long mod_min_addr;
42 unsigned long mod_max_addr;
43 /* Module page range */
44 unsigned long min_page;
45 unsigned long max_page;
46};
47
48#define SHADOW_ORDER 1
49#define SHADOW_PAGES (1 << SHADOW_ORDER)
50#define SHADOW_SIZE \
51 ((SHADOW_PAGES * PAGE_SIZE - sizeof(struct shadow_range)) / sizeof(u16))
52#define SHADOW_INVALID 0xFFFF
53
54struct cfi_shadow {
55 /* Page range covered by the shadow */
56 struct shadow_range r;
57 /* Page offsets to __cfi_check functions in modules */
58 u16 shadow[SHADOW_SIZE];
59};
60
61static DEFINE_SPINLOCK(shadow_update_lock);
62static struct cfi_shadow __rcu *cfi_shadow __read_mostly = NULL;
63
64static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr)
65{
66 unsigned long index;
67 unsigned long page = ptr >> PAGE_SHIFT;
68
69 if (unlikely(page < s->r.min_page))
70 return -1; /* Outside of module area */
71
72 index = page - s->r.min_page;
73
74 if (index >= SHADOW_SIZE)
75 return -1; /* Cannot be addressed with shadow */
76
77 return (int)index;
78}
79
80static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s,
81 int index)
82{
83 BUG_ON(index < 0 || index >= SHADOW_SIZE);
84
85 if (unlikely(s->shadow[index] == SHADOW_INVALID))
86 return 0;
87
88 return (s->r.min_page + s->shadow[index]) << PAGE_SHIFT;
89}
90
91static void prepare_next_shadow(const struct cfi_shadow __rcu *prev,
92 struct cfi_shadow *next)
93{
94 int i, index, check;
95
96 /* Mark everything invalid */
97 memset(next->shadow, 0xFF, sizeof(next->shadow));
98
99 if (!prev)
100 return; /* No previous shadow */
101
102 /* If the base address didn't change, update is not needed */
103 if (prev->r.min_page == next->r.min_page) {
104 memcpy(next->shadow, prev->shadow, sizeof(next->shadow));
105 return;
106 }
107
108 /* Convert the previous shadow to the new address range */
109 for (i = 0; i < SHADOW_SIZE; ++i) {
110 if (prev->shadow[i] == SHADOW_INVALID)
111 continue;
112
113 index = ptr_to_shadow(next, shadow_to_ptr(prev, i));
114 if (index < 0)
115 continue;
116
117 check = ptr_to_shadow(next,
118 shadow_to_ptr(prev, prev->shadow[i]));
119 if (check < 0)
120 continue;
121
122 next->shadow[index] = (u16)check;
123 }
124}
125
126static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod)
127{
128 unsigned long ptr;
129 unsigned long min_page_addr;
130 unsigned long max_page_addr;
131 unsigned long check = (unsigned long)mod->cfi_check;
132 int check_index = ptr_to_shadow(s, check);
133
134 BUG_ON((check & PAGE_MASK) != check); /* Must be page aligned */
135
136 if (check_index < 0)
137 return; /* Module not addressable with shadow */
138
139 min_page_addr = (unsigned long)mod->core_layout.base & PAGE_MASK;
140 max_page_addr = (unsigned long)mod->core_layout.base +
141 mod->core_layout.text_size;
142 max_page_addr &= PAGE_MASK;
143
144 /* For each page, store the check function index in the shadow */
145 for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) {
146 int index = ptr_to_shadow(s, ptr);
147 if (index >= 0) {
148 /* Assume a page only contains code for one module */
149 BUG_ON(s->shadow[index] != SHADOW_INVALID);
150 s->shadow[index] = (u16)check_index;
151 }
152 }
153}
154
155static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod)
156{
157 unsigned long ptr;
158 unsigned long min_page_addr;
159 unsigned long max_page_addr;
160
161 min_page_addr = (unsigned long)mod->core_layout.base & PAGE_MASK;
162 max_page_addr = (unsigned long)mod->core_layout.base +
163 mod->core_layout.text_size;
164 max_page_addr &= PAGE_MASK;
165
166 for (ptr = min_page_addr; ptr <= max_page_addr; ptr += PAGE_SIZE) {
167 int index = ptr_to_shadow(s, ptr);
168 if (index >= 0)
169 s->shadow[index] = SHADOW_INVALID;
170 }
171}
172
173typedef void (*update_shadow_fn)(struct cfi_shadow *, struct module *);
174
175static void update_shadow(struct module *mod, unsigned long min_addr,
176 unsigned long max_addr, update_shadow_fn fn)
177{
178 struct cfi_shadow *prev;
179 struct cfi_shadow *next = (struct cfi_shadow *)
180 __get_free_pages(GFP_KERNEL, SHADOW_ORDER);
181
182 BUG_ON(!next);
183
184 next->r.mod_min_addr = min_addr;
185 next->r.mod_max_addr = max_addr;
186 next->r.min_page = min_addr >> PAGE_SHIFT;
187 next->r.max_page = max_addr >> PAGE_SHIFT;
188
189 spin_lock(&shadow_update_lock);
190 prev = rcu_dereference_protected(cfi_shadow, 1);
191 prepare_next_shadow(prev, next);
192
193 fn(next, mod);
194 set_memory_ro((unsigned long)next, SHADOW_PAGES);
195 rcu_assign_pointer(cfi_shadow, next);
196
197 spin_unlock(&shadow_update_lock);
198 synchronize_rcu();
199
200 if (prev) {
201 set_memory_rw((unsigned long)prev, SHADOW_PAGES);
202 free_pages((unsigned long)prev, SHADOW_ORDER);
203 }
204}
205
206void cfi_module_add(struct module *mod, unsigned long min_addr,
207 unsigned long max_addr)
208{
209 update_shadow(mod, min_addr, max_addr, add_module_to_shadow);
210}
211EXPORT_SYMBOL(cfi_module_add);
212
213void cfi_module_remove(struct module *mod, unsigned long min_addr,
214 unsigned long max_addr)
215{
216 update_shadow(mod, min_addr, max_addr, remove_module_from_shadow);
217}
218EXPORT_SYMBOL(cfi_module_remove);
219
220static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s,
221 unsigned long ptr)
222{
223 int index;
f0c3da58 224// unsigned long check;
d590fd12
ST
225
226 if (unlikely(!s))
227 return NULL; /* No shadow available */
228
229 if (ptr < s->r.mod_min_addr || ptr > s->r.mod_max_addr)
230 return NULL; /* Not in a mapped module */
231
232 index = ptr_to_shadow(s, ptr);
233 if (index < 0)
234 return NULL; /* Cannot be addressed with shadow */
235
236 return (cfi_check_fn)shadow_to_ptr(s, index);
237}
238#endif /* CONFIG_CFI_CLANG_SHADOW */
239
240static inline cfi_check_fn find_module_cfi_check(void *ptr)
241{
242 struct module *mod;
243
244 preempt_disable();
245 mod = __module_address((unsigned long)ptr);
246 preempt_enable();
247
248 if (mod)
249 return mod->cfi_check;
250
251 return CFI_CHECK_FN;
252}
253
254static inline cfi_check_fn find_cfi_check(void *ptr)
255{
256#ifdef CONFIG_CFI_CLANG_SHADOW
257 cfi_check_fn f;
258
259 if (!rcu_access_pointer(cfi_shadow))
260 return CFI_CHECK_FN; /* No loaded modules */
261
262 /* Look up the __cfi_check function to use */
263 rcu_read_lock();
264 f = ptr_to_check_fn(rcu_dereference(cfi_shadow), (unsigned long)ptr);
265 rcu_read_unlock();
266
267 if (f)
268 return f;
269
270 /*
271 * Fall back to find_module_cfi_check, which works also for a larger
272 * module address space, but is slower.
273 */
274#endif /* CONFIG_CFI_CLANG_SHADOW */
275
276 return find_module_cfi_check(ptr);
277}
278
279void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag)
280{
281 cfi_check_fn check = find_cfi_check(ptr);
282
283 if (likely(check))
284 check(id, ptr, diag);
285 else /* Don't allow unchecked modules */
83a9a284 286 handle_cfi_failure(ptr);
d590fd12
ST
287}
288EXPORT_SYMBOL(cfi_slowpath_handler);
289#endif /* CONFIG_MODULES */
290
83a9a284 291void cfi_failure_handler(void *data, void *ptr, void *vtable)
d590fd12 292{
83a9a284 293 handle_cfi_failure(ptr);
d590fd12
ST
294}
295EXPORT_SYMBOL(cfi_failure_handler);
296
83a9a284 297void __cfi_check_fail(void *data, void *ptr)
d590fd12 298{
83a9a284 299 handle_cfi_failure(ptr);
d590fd12 300}