2 * CFI (Control Flow Integrity) error and slowpath handling
4 * Copyright (C) 2017 Google, Inc.
8 #include <linux/module.h>
9 #include <linux/printk.h>
10 #include <linux/ratelimit.h>
11 #include <linux/rcupdate.h>
12 #include <linux/spinlock.h>
14 #include <asm/cacheflush.h>
15 #include <asm/memory.h>
16 #include <asm/set_memory.h>
18 /* Compiler-defined handler names */
19 #ifdef CONFIG_CFI_PERMISSIVE
20 #define cfi_failure_handler __ubsan_handle_cfi_check_fail
21 #define cfi_slowpath_handler __cfi_slowpath_diag
23 #define cfi_failure_handler __ubsan_handle_cfi_check_fail_abort
24 #define cfi_slowpath_handler __cfi_slowpath
25 #endif /* CONFIG_CFI_PERMISSIVE */
27 static inline void handle_cfi_failure(void *ptr
)
29 #ifdef CONFIG_CFI_PERMISSIVE
30 WARN_RATELIMIT(1, "CFI failure (target: [<%px>] %pF):\n", ptr
, ptr
);
32 pr_err("CFI failure (target: [<%px>] %pF):\n", ptr
, ptr
);
38 #ifdef CONFIG_CFI_CLANG_SHADOW
40 /* Module address range */
41 unsigned long mod_min_addr
;
42 unsigned long mod_max_addr
;
43 /* Module page range */
44 unsigned long min_page
;
45 unsigned long max_page
;
48 #define SHADOW_ORDER 1
49 #define SHADOW_PAGES (1 << SHADOW_ORDER)
51 ((SHADOW_PAGES * PAGE_SIZE - sizeof(struct shadow_range)) / sizeof(u16))
52 #define SHADOW_INVALID 0xFFFF
55 /* Page range covered by the shadow */
56 struct shadow_range r
;
57 /* Page offsets to __cfi_check functions in modules */
58 u16 shadow
[SHADOW_SIZE
];
61 static DEFINE_SPINLOCK(shadow_update_lock
);
62 static struct cfi_shadow __rcu
*cfi_shadow __read_mostly
= NULL
;
64 static inline int ptr_to_shadow(const struct cfi_shadow
*s
, unsigned long ptr
)
67 unsigned long page
= ptr
>> PAGE_SHIFT
;
69 if (unlikely(page
< s
->r
.min_page
))
70 return -1; /* Outside of module area */
72 index
= page
- s
->r
.min_page
;
74 if (index
>= SHADOW_SIZE
)
75 return -1; /* Cannot be addressed with shadow */
80 static inline unsigned long shadow_to_ptr(const struct cfi_shadow
*s
,
83 BUG_ON(index
< 0 || index
>= SHADOW_SIZE
);
85 if (unlikely(s
->shadow
[index
] == SHADOW_INVALID
))
88 return (s
->r
.min_page
+ s
->shadow
[index
]) << PAGE_SHIFT
;
91 static void prepare_next_shadow(const struct cfi_shadow __rcu
*prev
,
92 struct cfi_shadow
*next
)
96 /* Mark everything invalid */
97 memset(next
->shadow
, 0xFF, sizeof(next
->shadow
));
100 return; /* No previous shadow */
102 /* If the base address didn't change, update is not needed */
103 if (prev
->r
.min_page
== next
->r
.min_page
) {
104 memcpy(next
->shadow
, prev
->shadow
, sizeof(next
->shadow
));
108 /* Convert the previous shadow to the new address range */
109 for (i
= 0; i
< SHADOW_SIZE
; ++i
) {
110 if (prev
->shadow
[i
] == SHADOW_INVALID
)
113 index
= ptr_to_shadow(next
, shadow_to_ptr(prev
, i
));
117 check
= ptr_to_shadow(next
,
118 shadow_to_ptr(prev
, prev
->shadow
[i
]));
122 next
->shadow
[index
] = (u16
)check
;
126 static void add_module_to_shadow(struct cfi_shadow
*s
, struct module
*mod
)
129 unsigned long min_page_addr
;
130 unsigned long max_page_addr
;
131 unsigned long check
= (unsigned long)mod
->cfi_check
;
132 int check_index
= ptr_to_shadow(s
, check
);
134 BUG_ON((check
& PAGE_MASK
) != check
); /* Must be page aligned */
137 return; /* Module not addressable with shadow */
139 min_page_addr
= (unsigned long)mod
->core_layout
.base
& PAGE_MASK
;
140 max_page_addr
= (unsigned long)mod
->core_layout
.base
+
141 mod
->core_layout
.text_size
;
142 max_page_addr
&= PAGE_MASK
;
144 /* For each page, store the check function index in the shadow */
145 for (ptr
= min_page_addr
; ptr
<= max_page_addr
; ptr
+= PAGE_SIZE
) {
146 int index
= ptr_to_shadow(s
, ptr
);
148 /* Assume a page only contains code for one module */
149 BUG_ON(s
->shadow
[index
] != SHADOW_INVALID
);
150 s
->shadow
[index
] = (u16
)check_index
;
155 static void remove_module_from_shadow(struct cfi_shadow
*s
, struct module
*mod
)
158 unsigned long min_page_addr
;
159 unsigned long max_page_addr
;
161 min_page_addr
= (unsigned long)mod
->core_layout
.base
& PAGE_MASK
;
162 max_page_addr
= (unsigned long)mod
->core_layout
.base
+
163 mod
->core_layout
.text_size
;
164 max_page_addr
&= PAGE_MASK
;
166 for (ptr
= min_page_addr
; ptr
<= max_page_addr
; ptr
+= PAGE_SIZE
) {
167 int index
= ptr_to_shadow(s
, ptr
);
169 s
->shadow
[index
] = SHADOW_INVALID
;
173 typedef void (*update_shadow_fn
)(struct cfi_shadow
*, struct module
*);
175 static void update_shadow(struct module
*mod
, unsigned long min_addr
,
176 unsigned long max_addr
, update_shadow_fn fn
)
178 struct cfi_shadow
*prev
;
179 struct cfi_shadow
*next
= (struct cfi_shadow
*)
180 __get_free_pages(GFP_KERNEL
, SHADOW_ORDER
);
184 next
->r
.mod_min_addr
= min_addr
;
185 next
->r
.mod_max_addr
= max_addr
;
186 next
->r
.min_page
= min_addr
>> PAGE_SHIFT
;
187 next
->r
.max_page
= max_addr
>> PAGE_SHIFT
;
189 spin_lock(&shadow_update_lock
);
190 prev
= rcu_dereference_protected(cfi_shadow
, 1);
191 prepare_next_shadow(prev
, next
);
194 set_memory_ro((unsigned long)next
, SHADOW_PAGES
);
195 rcu_assign_pointer(cfi_shadow
, next
);
197 spin_unlock(&shadow_update_lock
);
201 set_memory_rw((unsigned long)prev
, SHADOW_PAGES
);
202 free_pages((unsigned long)prev
, SHADOW_ORDER
);
206 void cfi_module_add(struct module
*mod
, unsigned long min_addr
,
207 unsigned long max_addr
)
209 update_shadow(mod
, min_addr
, max_addr
, add_module_to_shadow
);
211 EXPORT_SYMBOL(cfi_module_add
);
213 void cfi_module_remove(struct module
*mod
, unsigned long min_addr
,
214 unsigned long max_addr
)
216 update_shadow(mod
, min_addr
, max_addr
, remove_module_from_shadow
);
218 EXPORT_SYMBOL(cfi_module_remove
);
220 static inline cfi_check_fn
ptr_to_check_fn(const struct cfi_shadow __rcu
*s
,
224 // unsigned long check;
227 return NULL
; /* No shadow available */
229 if (ptr
< s
->r
.mod_min_addr
|| ptr
> s
->r
.mod_max_addr
)
230 return NULL
; /* Not in a mapped module */
232 index
= ptr_to_shadow(s
, ptr
);
234 return NULL
; /* Cannot be addressed with shadow */
236 return (cfi_check_fn
)shadow_to_ptr(s
, index
);
238 #endif /* CONFIG_CFI_CLANG_SHADOW */
240 static inline cfi_check_fn
find_module_cfi_check(void *ptr
)
245 mod
= __module_address((unsigned long)ptr
);
249 return mod
->cfi_check
;
254 static inline cfi_check_fn
find_cfi_check(void *ptr
)
256 #ifdef CONFIG_CFI_CLANG_SHADOW
259 if (!rcu_access_pointer(cfi_shadow
))
260 return CFI_CHECK_FN
; /* No loaded modules */
262 /* Look up the __cfi_check function to use */
264 f
= ptr_to_check_fn(rcu_dereference(cfi_shadow
), (unsigned long)ptr
);
271 * Fall back to find_module_cfi_check, which works also for a larger
272 * module address space, but is slower.
274 #endif /* CONFIG_CFI_CLANG_SHADOW */
276 return find_module_cfi_check(ptr
);
279 void cfi_slowpath_handler(uint64_t id
, void *ptr
, void *diag
)
281 cfi_check_fn check
= find_cfi_check(ptr
);
284 check(id
, ptr
, diag
);
285 else /* Don't allow unchecked modules */
286 handle_cfi_failure(ptr
);
288 EXPORT_SYMBOL(cfi_slowpath_handler
);
289 #endif /* CONFIG_MODULES */
291 void cfi_failure_handler(void *data
, void *ptr
, void *vtable
)
293 handle_cfi_failure(ptr
);
295 EXPORT_SYMBOL(cfi_failure_handler
);
297 void __cfi_check_fail(void *data
, void *ptr
)
299 handle_cfi_failure(ptr
);