1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Clang Control Flow Integrity (CFI) error and slowpath handling.
4 *
5 * Copyright (C) 2021 Google LLC
6 */
7
8 #include <linux/hardirq.h>
9 #include <linux/kallsyms.h>
10 #include <linux/module.h>
11 #include <linux/mutex.h>
12 #include <linux/printk.h>
13 #include <linux/ratelimit.h>
14 #include <linux/rcupdate.h>
15 #include <linux/vmalloc.h>
16 #include <asm/cacheflush.h>
17 #include <asm/set_memory.h>
18
19 /* Compiler-defined handler names */
20 #ifdef CONFIG_CFI_PERMISSIVE
21 #define cfi_failure_handler __ubsan_handle_cfi_check_fail
22 #else
23 #define cfi_failure_handler __ubsan_handle_cfi_check_fail_abort
24 #endif
25
handle_cfi_failure(void * ptr)26 static inline void handle_cfi_failure(void *ptr)
27 {
28 if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
29 WARN_RATELIMIT(1, "CFI failure (target: %pS):\n", ptr);
30 else
31 panic("CFI failure (target: %pS)\n", ptr);
32 }
33
34 #ifdef CONFIG_MODULES
35 #ifdef CONFIG_CFI_CLANG_SHADOW
36 /*
37 * Index type. A 16-bit index can address at most (2^16)-2 pages (taking
38 * into account SHADOW_INVALID), i.e. ~256M with 4k pages.
39 */
40 typedef u16 shadow_t;
41 #define SHADOW_INVALID ((shadow_t)~0UL)
42
43 struct cfi_shadow {
44 /* Page index for the beginning of the shadow */
45 unsigned long base;
46 /* An array of __cfi_check locations (as indices to the shadow) */
47 shadow_t shadow[1];
48 } __packed;
49
50 /*
51 * The shadow covers ~128M from the beginning of the module region. If
52 * the region is larger, we fall back to __module_address for the rest.
53 */
54 #define __SHADOW_RANGE (_UL(SZ_128M) >> PAGE_SHIFT)
55
56 /* The in-memory size of struct cfi_shadow, always at least one page */
57 #define __SHADOW_PAGES ((__SHADOW_RANGE * sizeof(shadow_t)) >> PAGE_SHIFT)
58 #define SHADOW_PAGES max(1UL, __SHADOW_PAGES)
59 #define SHADOW_SIZE (SHADOW_PAGES << PAGE_SHIFT)
60
61 /* The actual size of the shadow array, minus metadata */
62 #define SHADOW_ARR_SIZE (SHADOW_SIZE - offsetof(struct cfi_shadow, shadow))
63 #define SHADOW_ARR_SLOTS (SHADOW_ARR_SIZE / sizeof(shadow_t))
64
65 static DEFINE_MUTEX(shadow_update_lock);
66 static struct cfi_shadow __rcu *cfi_shadow __read_mostly;
67
68 /* Returns the index in the shadow for the given address */
ptr_to_shadow(const struct cfi_shadow * s,unsigned long ptr)69 static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr)
70 {
71 unsigned long index;
72 unsigned long page = ptr >> PAGE_SHIFT;
73
74 if (unlikely(page < s->base))
75 return -1; /* Outside of module area */
76
77 index = page - s->base;
78
79 if (index >= SHADOW_ARR_SLOTS)
80 return -1; /* Cannot be addressed with shadow */
81
82 return (int)index;
83 }
84
85 /* Returns the page address for an index in the shadow */
shadow_to_ptr(const struct cfi_shadow * s,int index)86 static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s,
87 int index)
88 {
89 if (unlikely(index < 0 || index >= SHADOW_ARR_SLOTS))
90 return 0;
91
92 return (s->base + index) << PAGE_SHIFT;
93 }
94
95 /* Returns the __cfi_check function address for the given shadow location */
shadow_to_check_fn(const struct cfi_shadow * s,int index)96 static inline unsigned long shadow_to_check_fn(const struct cfi_shadow *s,
97 int index)
98 {
99 if (unlikely(index < 0 || index >= SHADOW_ARR_SLOTS))
100 return 0;
101
102 if (unlikely(s->shadow[index] == SHADOW_INVALID))
103 return 0;
104
105 /* __cfi_check is always page aligned */
106 return (s->base + s->shadow[index]) << PAGE_SHIFT;
107 }
108
prepare_next_shadow(const struct cfi_shadow __rcu * prev,struct cfi_shadow * next)109 static void prepare_next_shadow(const struct cfi_shadow __rcu *prev,
110 struct cfi_shadow *next)
111 {
112 int i, index, check;
113
114 /* Mark everything invalid */
115 memset(next->shadow, 0xFF, SHADOW_ARR_SIZE);
116
117 if (!prev)
118 return; /* No previous shadow */
119
120 /* If the base address didn't change, an update is not needed */
121 if (prev->base == next->base) {
122 memcpy(next->shadow, prev->shadow, SHADOW_ARR_SIZE);
123 return;
124 }
125
126 /* Convert the previous shadow to the new address range */
127 for (i = 0; i < SHADOW_ARR_SLOTS; ++i) {
128 if (prev->shadow[i] == SHADOW_INVALID)
129 continue;
130
131 index = ptr_to_shadow(next, shadow_to_ptr(prev, i));
132 if (index < 0)
133 continue;
134
135 check = ptr_to_shadow(next,
136 shadow_to_check_fn(prev, prev->shadow[i]));
137 if (check < 0)
138 continue;
139
140 next->shadow[index] = (shadow_t)check;
141 }
142 }
143
add_module_to_shadow(struct cfi_shadow * s,struct module * mod,unsigned long min_addr,unsigned long max_addr)144 static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod,
145 unsigned long min_addr, unsigned long max_addr)
146 {
147 int check_index;
148 unsigned long check = (unsigned long)mod->cfi_check;
149 unsigned long ptr;
150
151 if (unlikely(!PAGE_ALIGNED(check))) {
152 pr_warn("cfi: not using shadow for module %s\n", mod->name);
153 return;
154 }
155
156 check_index = ptr_to_shadow(s, check);
157 if (check_index < 0)
158 return; /* Module not addressable with shadow */
159
160 /* For each page, store the check function index in the shadow */
161 for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) {
162 int index = ptr_to_shadow(s, ptr);
163
164 if (index >= 0) {
165 /* Each page must only contain one module */
166 WARN_ON_ONCE(s->shadow[index] != SHADOW_INVALID);
167 s->shadow[index] = (shadow_t)check_index;
168 }
169 }
170 }
171
remove_module_from_shadow(struct cfi_shadow * s,struct module * mod,unsigned long min_addr,unsigned long max_addr)172 static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod,
173 unsigned long min_addr, unsigned long max_addr)
174 {
175 unsigned long ptr;
176
177 for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) {
178 int index = ptr_to_shadow(s, ptr);
179
180 if (index >= 0)
181 s->shadow[index] = SHADOW_INVALID;
182 }
183 }
184
185 typedef void (*update_shadow_fn)(struct cfi_shadow *, struct module *,
186 unsigned long min_addr, unsigned long max_addr);
187
update_shadow(struct module * mod,unsigned long base_addr,update_shadow_fn fn)188 static void update_shadow(struct module *mod, unsigned long base_addr,
189 update_shadow_fn fn)
190 {
191 struct cfi_shadow *prev;
192 struct cfi_shadow *next;
193 unsigned long min_addr, max_addr;
194
195 next = vmalloc(SHADOW_SIZE);
196
197 mutex_lock(&shadow_update_lock);
198 prev = rcu_dereference_protected(cfi_shadow,
199 mutex_is_locked(&shadow_update_lock));
200
201 if (next) {
202 next->base = base_addr >> PAGE_SHIFT;
203 prepare_next_shadow(prev, next);
204
205 min_addr = (unsigned long)mod->core_layout.base;
206 max_addr = min_addr + mod->core_layout.text_size;
207 fn(next, mod, min_addr & PAGE_MASK, max_addr & PAGE_MASK);
208
209 set_memory_ro((unsigned long)next, SHADOW_PAGES);
210 }
211
212 rcu_assign_pointer(cfi_shadow, next);
213 mutex_unlock(&shadow_update_lock);
214 synchronize_rcu();
215
216 if (prev) {
217 set_memory_rw((unsigned long)prev, SHADOW_PAGES);
218 vfree(prev);
219 }
220 }
221
cfi_module_add(struct module * mod,unsigned long base_addr)222 void cfi_module_add(struct module *mod, unsigned long base_addr)
223 {
224 update_shadow(mod, base_addr, add_module_to_shadow);
225 }
226
cfi_module_remove(struct module * mod,unsigned long base_addr)227 void cfi_module_remove(struct module *mod, unsigned long base_addr)
228 {
229 update_shadow(mod, base_addr, remove_module_from_shadow);
230 }
231
ptr_to_check_fn(const struct cfi_shadow __rcu * s,unsigned long ptr)232 static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s,
233 unsigned long ptr)
234 {
235 int index;
236
237 if (unlikely(!s))
238 return NULL; /* No shadow available */
239
240 index = ptr_to_shadow(s, ptr);
241 if (index < 0)
242 return NULL; /* Cannot be addressed with shadow */
243
244 return (cfi_check_fn)shadow_to_check_fn(s, index);
245 }
246
find_shadow_check_fn(unsigned long ptr)247 static inline cfi_check_fn find_shadow_check_fn(unsigned long ptr)
248 {
249 cfi_check_fn fn;
250
251 rcu_read_lock_sched_notrace();
252 fn = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), ptr);
253 rcu_read_unlock_sched_notrace();
254
255 return fn;
256 }
257
258 #else /* !CONFIG_CFI_CLANG_SHADOW */
259
find_shadow_check_fn(unsigned long ptr)260 static inline cfi_check_fn find_shadow_check_fn(unsigned long ptr)
261 {
262 return NULL;
263 }
264
265 #endif /* CONFIG_CFI_CLANG_SHADOW */
266
find_module_check_fn(unsigned long ptr)267 static inline cfi_check_fn find_module_check_fn(unsigned long ptr)
268 {
269 cfi_check_fn fn = NULL;
270 struct module *mod;
271
272 rcu_read_lock_sched_notrace();
273 mod = __module_address(ptr);
274 if (mod)
275 fn = mod->cfi_check;
276 rcu_read_unlock_sched_notrace();
277
278 return fn;
279 }
280
find_check_fn(unsigned long ptr)281 static inline cfi_check_fn find_check_fn(unsigned long ptr)
282 {
283 cfi_check_fn fn = NULL;
284
285 if (is_kernel_text(ptr))
286 return __cfi_check;
287
288 /*
289 * Indirect call checks can happen when RCU is not watching. Both
290 * the shadow and __module_address use RCU, so we need to wake it
291 * up if necessary.
292 */
293 RCU_NONIDLE({
294 if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW))
295 fn = find_shadow_check_fn(ptr);
296
297 if (!fn)
298 fn = find_module_check_fn(ptr);
299 });
300
301 return fn;
302 }
303
__cfi_slowpath_diag(uint64_t id,void * ptr,void * diag)304 void __cfi_slowpath_diag(uint64_t id, void *ptr, void *diag)
305 {
306 cfi_check_fn fn = find_check_fn((unsigned long)ptr);
307
308 if (likely(fn))
309 fn(id, ptr, diag);
310 else /* Don't allow unchecked modules */
311 handle_cfi_failure(ptr);
312 }
313 EXPORT_SYMBOL(__cfi_slowpath_diag);
314
315 #else /* !CONFIG_MODULES */
316
__cfi_slowpath_diag(uint64_t id,void * ptr,void * diag)317 void __cfi_slowpath_diag(uint64_t id, void *ptr, void *diag)
318 {
319 handle_cfi_failure(ptr); /* No modules */
320 }
321 EXPORT_SYMBOL(__cfi_slowpath_diag);
322
323 #endif /* CONFIG_MODULES */
324
cfi_failure_handler(void * data,void * ptr,void * vtable)325 void cfi_failure_handler(void *data, void *ptr, void *vtable)
326 {
327 handle_cfi_failure(ptr);
328 }
329 EXPORT_SYMBOL(cfi_failure_handler);
330